[llvm] 2dd52b4 - [InstCombine] Improve logic for adding flags to shift instructions.

Noah Goldstein via llvm-commits llvm-commits at lists.llvm.org
Thu Oct 12 14:05:37 PDT 2023


Author: Noah Goldstein
Date: 2023-10-12T16:05:19-05:00
New Revision: 2dd52b4527667837cc525aa48435ab5cbfa30a0b

URL: https://github.com/llvm/llvm-project/commit/2dd52b4527667837cc525aa48435ab5cbfa30a0b
DIFF: https://github.com/llvm/llvm-project/commit/2dd52b4527667837cc525aa48435ab5cbfa30a0b.diff

LOG: [InstCombine] Improve logic for adding flags to shift instructions.

Instead of relying on constant operands, use known bits to do the
computation.

Proofs: https://alive2.llvm.org/ce/z/M-aBnw

Differential Revision: https://reviews.llvm.org/D157532

Added: 
    

Modified: 
    llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
    llvm/test/Analysis/ValueTracking/known-power-of-two.ll
    llvm/test/Transforms/InstCombine/and-add-shl.ll
    llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-pr49778.ll
    llvm/test/Transforms/InstCombine/rotate.ll
    llvm/test/Transforms/InstCombine/shift-flags.ll
    llvm/test/Transforms/InstCombine/trunc-inseltpoison.ll
    llvm/test/Transforms/InstCombine/trunc.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
index 83defd5816f5948..e178f9536b69f21 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -941,6 +941,60 @@ Instruction *InstCombinerImpl::foldLShrOverflowBit(BinaryOperator &I) {
   return new ZExtInst(Overflow, Ty);
 }
 
+// Try to set nuw/nsw flags on shl or exact flag on lshr/ashr using knownbits.
+static bool setShiftFlags(BinaryOperator &I, const SimplifyQuery &Q) {
+  assert(I.isShift() && "Expected a shift as input");
+  // We already have all the flags.
+  if (I.getOpcode() == Instruction::Shl) {
+    if (I.hasNoUnsignedWrap() && I.hasNoSignedWrap())
+      return false;
+  } else {
+    if (I.isExact())
+      return false;
+  }
+
+  // Compute what we know about shift count.
+  KnownBits KnownCnt =
+      computeKnownBits(I.getOperand(1), Q.DL, /*Depth*/ 0, Q.AC, Q.CxtI, Q.DT);
+  // If we know nothing about shift count or its a poison shift, we won't be
+  // able to prove anything so return before computing shift amount.
+  if (KnownCnt.isUnknown())
+    return false;
+  unsigned BitWidth = KnownCnt.getBitWidth();
+  APInt MaxCnt = KnownCnt.getMaxValue();
+  if (MaxCnt.uge(BitWidth))
+    return false;
+
+  KnownBits KnownAmt =
+      computeKnownBits(I.getOperand(0), Q.DL, /*Depth*/ 0, Q.AC, Q.CxtI, Q.DT);
+  bool Changed = false;
+
+  if (I.getOpcode() == Instruction::Shl) {
+    // If we have as many leading zeros than maximum shift cnt we have nuw.
+    if (!I.hasNoUnsignedWrap() && MaxCnt.ule(KnownAmt.countMinLeadingZeros())) {
+      I.setHasNoUnsignedWrap();
+      Changed = true;
+    }
+    // If we have more sign bits than maximum shift cnt we have nsw.
+    if (!I.hasNoSignedWrap()) {
+      if (MaxCnt.ult(KnownAmt.countMinSignBits()) ||
+          MaxCnt.ult(ComputeNumSignBits(I.getOperand(0), Q.DL, /*Depth*/ 0,
+                                        Q.AC, Q.CxtI, Q.DT))) {
+        I.setHasNoSignedWrap();
+        Changed = true;
+      }
+    }
+    return Changed;
+  }
+
+  // If we have at least as many trailing zeros as maximum count then we have
+  // exact.
+  Changed = MaxCnt.ule(KnownAmt.countMinTrailingZeros());
+  I.setIsExact(Changed);
+
+  return Changed;
+}
+
 Instruction *InstCombinerImpl::visitShl(BinaryOperator &I) {
   const SimplifyQuery Q = SQ.getWithInstruction(&I);
 
@@ -1121,22 +1175,11 @@ Instruction *InstCombinerImpl::visitShl(BinaryOperator &I) {
       Value *NewShift = Builder.CreateShl(X, Op1);
       return BinaryOperator::CreateSub(NewLHS, NewShift);
     }
-
-    // If the shifted-out value is known-zero, then this is a NUW shift.
-    if (!I.hasNoUnsignedWrap() &&
-        MaskedValueIsZero(Op0, APInt::getHighBitsSet(BitWidth, ShAmtC), 0,
-                          &I)) {
-      I.setHasNoUnsignedWrap();
-      return &I;
-    }
-
-    // If the shifted-out value is all signbits, then this is a NSW shift.
-    if (!I.hasNoSignedWrap() && ComputeNumSignBits(Op0, 0, &I) > ShAmtC) {
-      I.setHasNoSignedWrap();
-      return &I;
-    }
   }
 
+  if (setShiftFlags(I, Q))
+    return &I;
+
   // Transform  (x >> y) << y  to  x & (-1 << y)
   // Valid for any type of right-shift.
   Value *X;
@@ -1427,15 +1470,12 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
       Value *And = Builder.CreateAnd(BoolX, BoolY);
       return new ZExtInst(And, Ty);
     }
-
-    // If the shifted-out value is known-zero, then this is an exact shift.
-    if (!I.isExact() &&
-        MaskedValueIsZero(Op0, APInt::getLowBitsSet(BitWidth, ShAmtC), 0, &I)) {
-      I.setIsExact();
-      return &I;
-    }
   }
 
+  const SimplifyQuery Q = SQ.getWithInstruction(&I);
+  if (setShiftFlags(I, Q))
+    return &I;
+
   // Transform  (x << y) >> y  to  x & (-1 >> y)
   if (match(Op0, m_OneUse(m_Shl(m_Value(X), m_Specific(Op1))))) {
     Constant *AllOnes = ConstantInt::getAllOnesValue(Ty);
@@ -1594,15 +1634,12 @@ Instruction *InstCombinerImpl::visitAShr(BinaryOperator &I) {
       if (match(Op0, m_OneUse(m_NSWSub(m_Value(X), m_Value(Y)))))
         return new SExtInst(Builder.CreateICmpSLT(X, Y), Ty);
     }
-
-    // If the shifted-out value is known-zero, then this is an exact shift.
-    if (!I.isExact() &&
-        MaskedValueIsZero(Op0, APInt::getLowBitsSet(BitWidth, ShAmt), 0, &I)) {
-      I.setIsExact();
-      return &I;
-    }
   }
 
+  const SimplifyQuery Q = SQ.getWithInstruction(&I);
+  if (setShiftFlags(I, Q))
+    return &I;
+
   // Prefer `-(x & 1)` over `(x << (bitwidth(x)-1)) a>> (bitwidth(x)-1)`
   // as the pattern to splat the lowest bit.
   // FIXME: iff X is already masked, we don't need the one-use check.

diff  --git a/llvm/test/Analysis/ValueTracking/known-power-of-two.ll b/llvm/test/Analysis/ValueTracking/known-power-of-two.ll
index 12fefda31aae3ba..7bcf96065a69d99 100644
--- a/llvm/test/Analysis/ValueTracking/known-power-of-two.ll
+++ b/llvm/test/Analysis/ValueTracking/known-power-of-two.ll
@@ -413,11 +413,11 @@ define i1 @mul_is_pow2(i16 %x, i16 %y, i16 %z) {
 ; CHECK-SAME: (i16 [[X:%.*]], i16 [[Y:%.*]], i16 [[Z:%.*]]) {
 ; CHECK-NEXT:    [[XSMALL:%.*]] = and i16 [[X]], 3
 ; CHECK-NEXT:    [[ZSMALL:%.*]] = and i16 [[Z]], 3
-; CHECK-NEXT:    [[XP2:%.*]] = shl i16 4, [[XSMALL]]
-; CHECK-NEXT:    [[ZP2:%.*]] = shl i16 2, [[ZSMALL]]
-; CHECK-NEXT:    [[XX:%.*]] = mul nuw nsw i16 [[XP2]], [[ZP2]]
+; CHECK-NEXT:    [[ZP2:%.*]] = shl nuw nsw i16 2, [[ZSMALL]]
+; CHECK-NEXT:    [[TMP1:%.*]] = add nuw nsw i16 [[XSMALL]], 2
+; CHECK-NEXT:    [[XX:%.*]] = shl nuw nsw i16 [[ZP2]], [[TMP1]]
 ; CHECK-NEXT:    [[AND:%.*]] = and i16 [[XX]], [[Y]]
-; CHECK-NEXT:    [[R:%.*]] = icmp eq i16 [[AND]], [[XX]]
+; CHECK-NEXT:    [[R:%.*]] = icmp ne i16 [[AND]], 0
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
   %xsmall = and i16 %x, 3
@@ -436,9 +436,9 @@ define i1 @mul_is_pow2_fail(i16 %x, i16 %y, i16 %z) {
 ; CHECK-SAME: (i16 [[X:%.*]], i16 [[Y:%.*]], i16 [[Z:%.*]]) {
 ; CHECK-NEXT:    [[XSMALL:%.*]] = and i16 [[X]], 7
 ; CHECK-NEXT:    [[ZSMALL:%.*]] = and i16 [[Z]], 7
-; CHECK-NEXT:    [[XP2:%.*]] = shl i16 4, [[XSMALL]]
-; CHECK-NEXT:    [[ZP2:%.*]] = shl i16 2, [[ZSMALL]]
-; CHECK-NEXT:    [[XX:%.*]] = mul i16 [[XP2]], [[ZP2]]
+; CHECK-NEXT:    [[ZP2:%.*]] = shl nuw nsw i16 2, [[ZSMALL]]
+; CHECK-NEXT:    [[TMP1:%.*]] = add nuw nsw i16 [[XSMALL]], 2
+; CHECK-NEXT:    [[XX:%.*]] = shl i16 [[ZP2]], [[TMP1]]
 ; CHECK-NEXT:    [[AND:%.*]] = and i16 [[XX]], [[Y]]
 ; CHECK-NEXT:    [[R:%.*]] = icmp eq i16 [[AND]], [[XX]]
 ; CHECK-NEXT:    ret i1 [[R]]
@@ -459,9 +459,9 @@ define i1 @mul_is_pow2_fail2(i16 %x, i16 %y, i16 %z) {
 ; CHECK-SAME: (i16 [[X:%.*]], i16 [[Y:%.*]], i16 [[Z:%.*]]) {
 ; CHECK-NEXT:    [[XSMALL:%.*]] = and i16 [[X]], 3
 ; CHECK-NEXT:    [[ZSMALL:%.*]] = and i16 [[Z]], 3
-; CHECK-NEXT:    [[XP2:%.*]] = shl i16 3, [[XSMALL]]
-; CHECK-NEXT:    [[ZP2:%.*]] = shl i16 2, [[ZSMALL]]
-; CHECK-NEXT:    [[XX:%.*]] = mul nuw nsw i16 [[XP2]], [[ZP2]]
+; CHECK-NEXT:    [[XP2:%.*]] = shl nuw nsw i16 3, [[XSMALL]]
+; CHECK-NEXT:    [[TMP1:%.*]] = add nuw nsw i16 [[ZSMALL]], 1
+; CHECK-NEXT:    [[XX:%.*]] = shl nuw nsw i16 [[XP2]], [[TMP1]]
 ; CHECK-NEXT:    [[AND:%.*]] = and i16 [[XX]], [[Y]]
 ; CHECK-NEXT:    [[R:%.*]] = icmp eq i16 [[AND]], [[XX]]
 ; CHECK-NEXT:    ret i1 [[R]]
@@ -481,9 +481,9 @@ define i1 @shl_is_pow2(i16 %x, i16 %y) {
 ; CHECK-LABEL: define i1 @shl_is_pow2
 ; CHECK-SAME: (i16 [[X:%.*]], i16 [[Y:%.*]]) {
 ; CHECK-NEXT:    [[XSMALL:%.*]] = and i16 [[X]], 7
-; CHECK-NEXT:    [[XX:%.*]] = shl i16 4, [[XSMALL]]
+; CHECK-NEXT:    [[XX:%.*]] = shl nuw nsw i16 4, [[XSMALL]]
 ; CHECK-NEXT:    [[AND:%.*]] = and i16 [[XX]], [[Y]]
-; CHECK-NEXT:    [[R:%.*]] = icmp eq i16 [[AND]], [[XX]]
+; CHECK-NEXT:    [[R:%.*]] = icmp ne i16 [[AND]], 0
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
   %xsmall = and i16 %x, 7
@@ -515,7 +515,7 @@ define i1 @shl_is_pow2_fail2(i16 %x, i16 %y) {
 ; CHECK-LABEL: define i1 @shl_is_pow2_fail2
 ; CHECK-SAME: (i16 [[X:%.*]], i16 [[Y:%.*]]) {
 ; CHECK-NEXT:    [[XSMALL:%.*]] = and i16 [[X]], 7
-; CHECK-NEXT:    [[XX:%.*]] = shl i16 5, [[XSMALL]]
+; CHECK-NEXT:    [[XX:%.*]] = shl nuw nsw i16 5, [[XSMALL]]
 ; CHECK-NEXT:    [[AND:%.*]] = and i16 [[XX]], [[Y]]
 ; CHECK-NEXT:    [[R:%.*]] = icmp eq i16 [[AND]], [[XX]]
 ; CHECK-NEXT:    ret i1 [[R]]
@@ -532,9 +532,9 @@ define i1 @lshr_is_pow2(i16 %x, i16 %y) {
 ; CHECK-LABEL: define i1 @lshr_is_pow2
 ; CHECK-SAME: (i16 [[X:%.*]], i16 [[Y:%.*]]) {
 ; CHECK-NEXT:    [[XSMALL:%.*]] = and i16 [[X]], 7
-; CHECK-NEXT:    [[XX:%.*]] = lshr i16 512, [[XSMALL]]
+; CHECK-NEXT:    [[XX:%.*]] = lshr exact i16 512, [[XSMALL]]
 ; CHECK-NEXT:    [[AND:%.*]] = and i16 [[XX]], [[Y]]
-; CHECK-NEXT:    [[R:%.*]] = icmp eq i16 [[AND]], [[XX]]
+; CHECK-NEXT:    [[R:%.*]] = icmp ne i16 [[AND]], 0
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
   %xsmall = and i16 %x, 7

diff  --git a/llvm/test/Transforms/InstCombine/and-add-shl.ll b/llvm/test/Transforms/InstCombine/and-add-shl.ll
index 28778f34137e06f..92b3a8144d62ccd 100644
--- a/llvm/test/Transforms/InstCombine/and-add-shl.ll
+++ b/llvm/test/Transforms/InstCombine/and-add-shl.ll
@@ -29,7 +29,7 @@ define i8 @and_not_shl(i8 %x) {
 ; CHECK-SAME: (i8 [[X:%.*]]) {
 ; CHECK-NEXT:    [[OP1_P2:%.*]] = icmp ult i8 [[X]], 6
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[OP1_P2]])
-; CHECK-NEXT:    [[SHIFT:%.*]] = shl i8 -1, [[X]]
+; CHECK-NEXT:    [[SHIFT:%.*]] = shl nsw i8 -1, [[X]]
 ; CHECK-NEXT:    [[NOT:%.*]] = and i8 [[SHIFT]], 32
 ; CHECK-NEXT:    [[R:%.*]] = xor i8 [[NOT]], 32
 ; CHECK-NEXT:    ret i8 [[R]]

diff  --git a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-pr49778.ll b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-pr49778.ll
index 96dc6c68f4d4fb3..b06a90e2cd99b7d 100644
--- a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-pr49778.ll
+++ b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-pr49778.ll
@@ -5,10 +5,10 @@
 define i32 @src(i1 %x2) {
 ; CHECK-LABEL: @src(
 ; CHECK-NEXT:    [[X13:%.*]] = zext i1 [[X2:%.*]] to i32
-; CHECK-NEXT:    [[_7:%.*]] = shl i32 -1, [[X13]]
+; CHECK-NEXT:    [[_7:%.*]] = shl nsw i32 -1, [[X13]]
 ; CHECK-NEXT:    [[MASK:%.*]] = xor i32 [[_7]], -1
 ; CHECK-NEXT:    [[_8:%.*]] = and i32 [[MASK]], [[X13]]
-; CHECK-NEXT:    [[_9:%.*]] = shl i32 [[_8]], [[X13]]
+; CHECK-NEXT:    [[_9:%.*]] = shl nuw nsw i32 [[_8]], [[X13]]
 ; CHECK-NEXT:    ret i32 [[_9]]
 ;
   %x13 = zext i1 %x2 to i32

diff  --git a/llvm/test/Transforms/InstCombine/rotate.ll b/llvm/test/Transforms/InstCombine/rotate.ll
index fece47534819e48..ed5145255b2f072 100644
--- a/llvm/test/Transforms/InstCombine/rotate.ll
+++ b/llvm/test/Transforms/InstCombine/rotate.ll
@@ -705,7 +705,7 @@ define i9 @rotateleft_9_neg_mask_wide_amount_commute(i9 %v, i33 %shamt) {
 ; CHECK-NEXT:    [[LSHAMT:%.*]] = and i33 [[SHAMT]], 8
 ; CHECK-NEXT:    [[RSHAMT:%.*]] = and i33 [[NEG]], 8
 ; CHECK-NEXT:    [[CONV:%.*]] = zext i9 [[V:%.*]] to i33
-; CHECK-NEXT:    [[SHL:%.*]] = shl i33 [[CONV]], [[LSHAMT]]
+; CHECK-NEXT:    [[SHL:%.*]] = shl nuw nsw i33 [[CONV]], [[LSHAMT]]
 ; CHECK-NEXT:    [[SHR:%.*]] = lshr i33 [[CONV]], [[RSHAMT]]
 ; CHECK-NEXT:    [[OR:%.*]] = or i33 [[SHL]], [[SHR]]
 ; CHECK-NEXT:    [[RET:%.*]] = trunc i33 [[OR]] to i9

diff  --git a/llvm/test/Transforms/InstCombine/shift-flags.ll b/llvm/test/Transforms/InstCombine/shift-flags.ll
index ca1c65307559a69..08cf4821d85b489 100644
--- a/llvm/test/Transforms/InstCombine/shift-flags.ll
+++ b/llvm/test/Transforms/InstCombine/shift-flags.ll
@@ -5,7 +5,7 @@ define i8 @shl_add_nuw(i8 %amt_in, i8 %cnt_in) {
 ; CHECK-LABEL: @shl_add_nuw(
 ; CHECK-NEXT:    [[AMT:%.*]] = and i8 [[AMT_IN:%.*]], 63
 ; CHECK-NEXT:    [[CNT:%.*]] = and i8 [[CNT_IN:%.*]], 2
-; CHECK-NEXT:    [[R:%.*]] = shl i8 [[AMT]], [[CNT]]
+; CHECK-NEXT:    [[R:%.*]] = shl nuw i8 [[AMT]], [[CNT]]
 ; CHECK-NEXT:    ret i8 [[R]]
 ;
   %amt = and i8 %amt_in, 63
@@ -31,7 +31,7 @@ define i8 @shl_add_nuw_and_nsw(i8 %amt_in, i8 %cnt_in) {
 ; CHECK-LABEL: @shl_add_nuw_and_nsw(
 ; CHECK-NEXT:    [[AMT:%.*]] = and i8 [[AMT_IN:%.*]], 31
 ; CHECK-NEXT:    [[CNT:%.*]] = and i8 [[CNT_IN:%.*]], 2
-; CHECK-NEXT:    [[R:%.*]] = shl i8 [[AMT]], [[CNT]]
+; CHECK-NEXT:    [[R:%.*]] = shl nuw nsw i8 [[AMT]], [[CNT]]
 ; CHECK-NEXT:    ret i8 [[R]]
 ;
   %amt = and i8 %amt_in, 31
@@ -44,7 +44,7 @@ define i8 @shl_add_nsw(i8 %amt_in, i8 %cnt_in) {
 ; CHECK-LABEL: @shl_add_nsw(
 ; CHECK-NEXT:    [[AMT:%.*]] = or i8 [[AMT_IN:%.*]], -32
 ; CHECK-NEXT:    [[CNT:%.*]] = and i8 [[CNT_IN:%.*]], 2
-; CHECK-NEXT:    [[R:%.*]] = shl i8 [[AMT]], [[CNT]]
+; CHECK-NEXT:    [[R:%.*]] = shl nsw i8 [[AMT]], [[CNT]]
 ; CHECK-NEXT:    ret i8 [[R]]
 ;
   %amt = or i8 %amt_in, 224
@@ -70,7 +70,7 @@ define i8 @lshr_add_exact(i8 %amt_in, i8 %cnt_in) {
 ; CHECK-LABEL: @lshr_add_exact(
 ; CHECK-NEXT:    [[AMT:%.*]] = and i8 [[AMT_IN:%.*]], -4
 ; CHECK-NEXT:    [[CNT:%.*]] = and i8 [[CNT_IN:%.*]], 2
-; CHECK-NEXT:    [[R:%.*]] = lshr i8 [[AMT]], [[CNT]]
+; CHECK-NEXT:    [[R:%.*]] = lshr exact i8 [[AMT]], [[CNT]]
 ; CHECK-NEXT:    ret i8 [[R]]
 ;
   %amt = and i8 %amt_in, -4
@@ -96,7 +96,7 @@ define i8 @ashr_add_exact(i8 %amt_in, i8 %cnt_in) {
 ; CHECK-LABEL: @ashr_add_exact(
 ; CHECK-NEXT:    [[AMT:%.*]] = and i8 [[AMT_IN:%.*]], -14
 ; CHECK-NEXT:    [[CNT:%.*]] = and i8 [[CNT_IN:%.*]], 1
-; CHECK-NEXT:    [[R:%.*]] = ashr i8 [[AMT]], [[CNT]]
+; CHECK-NEXT:    [[R:%.*]] = ashr exact i8 [[AMT]], [[CNT]]
 ; CHECK-NEXT:    ret i8 [[R]]
 ;
   %amt = and i8 %amt_in, -14

diff  --git a/llvm/test/Transforms/InstCombine/trunc-inseltpoison.ll b/llvm/test/Transforms/InstCombine/trunc-inseltpoison.ll
index ac0115a0f571514..b5dcb9b67d676ed 100644
--- a/llvm/test/Transforms/InstCombine/trunc-inseltpoison.ll
+++ b/llvm/test/Transforms/InstCombine/trunc-inseltpoison.ll
@@ -345,7 +345,7 @@ define i64 @test11(i32 %A, i32 %B) {
 ; CHECK-NEXT:    [[C:%.*]] = zext i32 [[A:%.*]] to i64
 ; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[B:%.*]], 31
 ; CHECK-NEXT:    [[E:%.*]] = zext i32 [[TMP1]] to i64
-; CHECK-NEXT:    [[F:%.*]] = shl i64 [[C]], [[E]]
+; CHECK-NEXT:    [[F:%.*]] = shl nuw nsw i64 [[C]], [[E]]
 ; CHECK-NEXT:    ret i64 [[F]]
 ;
   %C = zext i32 %A to i128
@@ -361,7 +361,7 @@ define <2 x i64> @test11_vec(<2 x i32> %A, <2 x i32> %B) {
 ; CHECK-NEXT:    [[C:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i64>
 ; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[B:%.*]], <i32 31, i32 31>
 ; CHECK-NEXT:    [[E:%.*]] = zext <2 x i32> [[TMP1]] to <2 x i64>
-; CHECK-NEXT:    [[F:%.*]] = shl <2 x i64> [[C]], [[E]]
+; CHECK-NEXT:    [[F:%.*]] = shl nuw nsw <2 x i64> [[C]], [[E]]
 ; CHECK-NEXT:    ret <2 x i64> [[F]]
 ;
   %C = zext <2 x i32> %A to <2 x i128>
@@ -377,7 +377,7 @@ define <2 x i64> @test11_vec_nonuniform(<2 x i32> %A, <2 x i32> %B) {
 ; CHECK-NEXT:    [[C:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i64>
 ; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[B:%.*]], <i32 31, i32 15>
 ; CHECK-NEXT:    [[E:%.*]] = zext <2 x i32> [[TMP1]] to <2 x i64>
-; CHECK-NEXT:    [[F:%.*]] = shl <2 x i64> [[C]], [[E]]
+; CHECK-NEXT:    [[F:%.*]] = shl nuw nsw <2 x i64> [[C]], [[E]]
 ; CHECK-NEXT:    ret <2 x i64> [[F]]
 ;
   %C = zext <2 x i32> %A to <2 x i128>

diff  --git a/llvm/test/Transforms/InstCombine/trunc.ll b/llvm/test/Transforms/InstCombine/trunc.ll
index e04bcaf073b64e4..33baee858493a63 100644
--- a/llvm/test/Transforms/InstCombine/trunc.ll
+++ b/llvm/test/Transforms/InstCombine/trunc.ll
@@ -345,7 +345,7 @@ define i64 @test11(i32 %A, i32 %B) {
 ; CHECK-NEXT:    [[C:%.*]] = zext i32 [[A:%.*]] to i64
 ; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[B:%.*]], 31
 ; CHECK-NEXT:    [[E:%.*]] = zext i32 [[TMP1]] to i64
-; CHECK-NEXT:    [[F:%.*]] = shl i64 [[C]], [[E]]
+; CHECK-NEXT:    [[F:%.*]] = shl nuw nsw i64 [[C]], [[E]]
 ; CHECK-NEXT:    ret i64 [[F]]
 ;
   %C = zext i32 %A to i128
@@ -361,7 +361,7 @@ define <2 x i64> @test11_vec(<2 x i32> %A, <2 x i32> %B) {
 ; CHECK-NEXT:    [[C:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i64>
 ; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[B:%.*]], <i32 31, i32 31>
 ; CHECK-NEXT:    [[E:%.*]] = zext <2 x i32> [[TMP1]] to <2 x i64>
-; CHECK-NEXT:    [[F:%.*]] = shl <2 x i64> [[C]], [[E]]
+; CHECK-NEXT:    [[F:%.*]] = shl nuw nsw <2 x i64> [[C]], [[E]]
 ; CHECK-NEXT:    ret <2 x i64> [[F]]
 ;
   %C = zext <2 x i32> %A to <2 x i128>
@@ -377,7 +377,7 @@ define <2 x i64> @test11_vec_nonuniform(<2 x i32> %A, <2 x i32> %B) {
 ; CHECK-NEXT:    [[C:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i64>
 ; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[B:%.*]], <i32 31, i32 15>
 ; CHECK-NEXT:    [[E:%.*]] = zext <2 x i32> [[TMP1]] to <2 x i64>
-; CHECK-NEXT:    [[F:%.*]] = shl <2 x i64> [[C]], [[E]]
+; CHECK-NEXT:    [[F:%.*]] = shl nuw nsw <2 x i64> [[C]], [[E]]
 ; CHECK-NEXT:    ret <2 x i64> [[F]]
 ;
   %C = zext <2 x i32> %A to <2 x i128>


        


More information about the llvm-commits mailing list