[llvm] b289dc5 - [CVP] Narrow SDiv/SRem to the smallest power-of-2 that's sufficient to contain its operands

Roman Lebedev via llvm-commits llvm-commits at lists.llvm.org
Tue Sep 22 11:38:11 PDT 2020


Author: Roman Lebedev
Date: 2020-09-22T21:37:30+03:00
New Revision: b289dc530632613edb3eb067895c1981cb77ccd0

URL: https://github.com/llvm/llvm-project/commit/b289dc530632613edb3eb067895c1981cb77ccd0
DIFF: https://github.com/llvm/llvm-project/commit/b289dc530632613edb3eb067895c1981cb77ccd0.diff

LOG: [CVP] Narrow SDiv/SRem to the smallest power-of-2 that's sufficient to contain its operands

This is practically identical to what we already do for UDiv/URem:
  https://rise4fun.com/Alive/04K

Name: narrow udiv
Pre: C0 u<= 255 && C1 u<= 255
%r = udiv i16 C0, C1
  =>
%t0 = trunc i16 C0 to i8
%t1 = trunc i16 C1 to i8
%t2 = udiv i8 %t0, %t1
%r = zext i8 %t2 to i16

Name: narrow exact udiv
Pre: C0 u<= 255 && C1 u<= 255
%r = udiv exact i16 C0, C1
  =>
%t0 = trunc i16 C0 to i8
%t1 = trunc i16 C1 to i8
%t2 = udiv exact i8 %t0, %t1
%r = zext i8 %t2 to i16

Name: narrow urem
Pre: C0 u<= 255 && C1 u<= 255
%r = urem i16 C0, C1
  =>
%t0 = trunc i16 C0 to i8
%t1 = trunc i16 C1 to i8
%t2 = urem i8 %t0, %t1
%r = zext i8 %t2 to i16

... only here we need to look for 'min signed bits', not 'active bits',
and there's an UB to be aware of:
  https://rise4fun.com/Alive/KG86
  https://rise4fun.com/Alive/LwR

Name: narrow sdiv
Pre: C0 <= 127 && C1 <= 127 && C0 >= -128 && C1 >= -128
%r = sdiv i16 C0, C1
  =>
%t0 = trunc i16 C0 to i9
%t1 = trunc i16 C1 to i9
%t2 = sdiv i9 %t0, %t1
%r = sext i9 %t2 to i16

Name: narrow exact sdiv
Pre: C0 <= 127 && C1 <= 127 && C0 >= -128 && C1 >= -128
%r = sdiv exact i16 C0, C1
  =>
%t0 = trunc i16 C0 to i9
%t1 = trunc i16 C1 to i9
%t2 = sdiv exact i9 %t0, %t1
%r = sext i9 %t2 to i16

Name: narrow srem
Pre: C0 <= 127 && C1 <= 127 && C0 >= -128 && C1 >= -128
%r = srem i16 C0, C1
  =>
%t0 = trunc i16 C0 to i9
%t1 = trunc i16 C1 to i9
%t2 = srem i9 %t0, %t1
%r = sext i9 %t2 to i16


Name: narrow sdiv
Pre: C0 <= 127 && C1 <= 127 && C0 >= -128 && C1 >= -128 && !(C0 == -128 && C1 == -1)
%r = sdiv i16 C0, C1
  =>
%t0 = trunc i16 C0 to i8
%t1 = trunc i16 C1 to i8
%t2 = sdiv i8 %t0, %t1
%r = sext i8 %t2 to i16

Name: narrow exact sdiv
Pre: C0 <= 127 && C1 <= 127 && C0 >= -128 && C1 >= -128 && !(C0 == -128 && C1 == -1)
%r = sdiv exact i16 C0, C1
  =>
%t0 = trunc i16 C0 to i8
%t1 = trunc i16 C1 to i8
%t2 = sdiv exact i8 %t0, %t1
%r = sext i8 %t2 to i16

Name: narrow srem
Pre: C0 <= 127 && C1 <= 127 && C0 >= -128 && C1 >= -128 && !(C0 == -128 && C1 == -1)
%r = srem i16 C0, C1
  =>
%t0 = trunc i16 C0 to i8
%t1 = trunc i16 C1 to i8
%t2 = srem i8 %t0, %t1
%r = sext i8 %t2 to i16


The ConstantRangeTest.losslessSignedTruncationSignext test sanity-checks
the logic, that we can losslessly truncate ConstantRange to
`getMinSignedBits()` and signext it back, and it will be identical
to the original CR.

On vanilla llvm test-suite + RawSpeed, this fires 1262 times,
while the same fold for UDiv/URem only fires 384 times. Sic!

Additionally, this causes +606.18% (+1079) extra cases of
aggressive-instcombine.NumDAGsReduced, and +473.14% (+1145)
of aggressive-instcombine.NumInstrsReduced folds.

Added: 
    

Modified: 
    llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
    llvm/test/Transforms/CorrelatedValuePropagation/sdiv.ll
    llvm/test/Transforms/CorrelatedValuePropagation/srem.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp b/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
index db89e125e475..f21e65488418 100644
--- a/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
+++ b/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
@@ -58,6 +58,8 @@ STATISTIC(NumMemAccess, "Number of memory access targets propagated");
 STATISTIC(NumCmps,      "Number of comparisons propagated");
 STATISTIC(NumReturns,   "Number of return values propagated");
 STATISTIC(NumDeadCases, "Number of switch cases removed");
+STATISTIC(NumSDivSRemsNarrowed,
+          "Number of sdivs/srems whose width was decreased");
 STATISTIC(NumSDivs,     "Number of sdiv converted to udiv");
 STATISTIC(NumUDivURemsNarrowed,
           "Number of udivs/urems whose width was decreased");
@@ -624,6 +626,60 @@ Domain getDomain(Value *V, LazyValueInfo *LVI, Instruction *CxtI) {
   return Domain::Unknown;
 };
 
+/// Try to shrink a sdiv/srem's width down to the smallest power of two that's
+/// sufficient to contain its operands.
+static bool narrowSDivOrSRem(BinaryOperator *Instr, LazyValueInfo *LVI) {
+  assert(Instr->getOpcode() == Instruction::SDiv ||
+         Instr->getOpcode() == Instruction::SRem);
+  if (Instr->getType()->isVectorTy())
+    return false;
+
+  // Find the smallest power of two bitwidth that's sufficient to hold Instr's
+  // operands.
+  unsigned OrigWidth = Instr->getType()->getIntegerBitWidth();
+
+  // What is the smallest bit width that can accomodate the entire value ranges
+  // of both of the operands?
+  std::array<Optional<ConstantRange>, 2> CRs;
+  unsigned MinSignedBits = 0;
+  for (auto I : zip(Instr->operands(), CRs)) {
+    std::get<1>(I) = LVI->getConstantRange(std::get<0>(I), Instr->getParent());
+    MinSignedBits = std::max(std::get<1>(I)->getMinSignedBits(), MinSignedBits);
+  }
+
+  // sdiv/srem is UB if divisor is -1 and divident is INT_MIN, so unless we can
+  // prove that such a combination is impossible, we need to bump the bitwidth.
+  if (CRs[1]->contains(APInt::getAllOnesValue(OrigWidth)) &&
+      CRs[0]->contains(
+          APInt::getSignedMinValue(MinSignedBits).sextOrSelf(OrigWidth)))
+    ++MinSignedBits;
+
+  // Don't shrink below 8 bits wide.
+  unsigned NewWidth = std::max<unsigned>(PowerOf2Ceil(MinSignedBits), 8);
+
+  // NewWidth might be greater than OrigWidth if OrigWidth is not a power of
+  // two.
+  if (NewWidth >= OrigWidth)
+    return false;
+
+  ++NumSDivSRemsNarrowed;
+  IRBuilder<> B{Instr};
+  auto *TruncTy = Type::getIntNTy(Instr->getContext(), NewWidth);
+  auto *LHS = B.CreateTruncOrBitCast(Instr->getOperand(0), TruncTy,
+                                     Instr->getName() + ".lhs.trunc");
+  auto *RHS = B.CreateTruncOrBitCast(Instr->getOperand(1), TruncTy,
+                                     Instr->getName() + ".rhs.trunc");
+  auto *BO = B.CreateBinOp(Instr->getOpcode(), LHS, RHS, Instr->getName());
+  auto *Sext = B.CreateSExt(BO, Instr->getType(), Instr->getName() + ".sext");
+  if (auto *BinOp = dyn_cast<BinaryOperator>(BO))
+    if (BinOp->getOpcode() == Instruction::SDiv)
+      BinOp->setIsExact(Instr->isExact());
+
+  Instr->replaceAllUsesWith(Sext);
+  Instr->eraseFromParent();
+  return true;
+}
+
 /// Try to shrink a udiv/urem's width down to the smallest power of two that's
 /// sufficient to contain its operands.
 static bool processUDivOrURem(BinaryOperator *Instr, LazyValueInfo *LVI) {
@@ -669,6 +725,7 @@ static bool processUDivOrURem(BinaryOperator *Instr, LazyValueInfo *LVI) {
 }
 
 static bool processSRem(BinaryOperator *SDI, LazyValueInfo *LVI) {
+  assert(SDI->getOpcode() == Instruction::SRem);
   if (SDI->getType()->isVectorTy())
     return false;
 
@@ -724,6 +781,7 @@ static bool processSRem(BinaryOperator *SDI, LazyValueInfo *LVI) {
 /// conditions, this can sometimes prove conditions instcombine can't by
 /// exploiting range information.
 static bool processSDiv(BinaryOperator *SDI, LazyValueInfo *LVI) {
+  assert(SDI->getOpcode() == Instruction::SDiv);
   if (SDI->getType()->isVectorTy())
     return false;
 
@@ -774,6 +832,23 @@ static bool processSDiv(BinaryOperator *SDI, LazyValueInfo *LVI) {
   return true;
 }
 
+static bool processSDivOrSRem(BinaryOperator *Instr, LazyValueInfo *LVI) {
+  assert(Instr->getOpcode() == Instruction::SDiv ||
+         Instr->getOpcode() == Instruction::SRem);
+  if (Instr->getType()->isVectorTy())
+    return false;
+
+  if (Instr->getOpcode() == Instruction::SDiv)
+    if (processSDiv(Instr, LVI))
+      return true;
+
+  if (Instr->getOpcode() == Instruction::SRem)
+    if (processSRem(Instr, LVI))
+      return true;
+
+  return narrowSDivOrSRem(Instr, LVI);
+}
+
 static bool processAShr(BinaryOperator *SDI, LazyValueInfo *LVI) {
   if (SDI->getType()->isVectorTy())
     return false;
@@ -935,10 +1010,8 @@ static bool runImpl(Function &F, LazyValueInfo *LVI, DominatorTree *DT,
         BBChanged |= processCallSite(cast<CallBase>(*II), LVI);
         break;
       case Instruction::SRem:
-        BBChanged |= processSRem(cast<BinaryOperator>(II), LVI);
-        break;
       case Instruction::SDiv:
-        BBChanged |= processSDiv(cast<BinaryOperator>(II), LVI);
+        BBChanged |= processSDivOrSRem(cast<BinaryOperator>(II), LVI);
         break;
       case Instruction::UDiv:
       case Instruction::URem:

diff  --git a/llvm/test/Transforms/CorrelatedValuePropagation/sdiv.ll b/llvm/test/Transforms/CorrelatedValuePropagation/sdiv.ll
index ba618f5af033..3f533030a11c 100644
--- a/llvm/test/Transforms/CorrelatedValuePropagation/sdiv.ll
+++ b/llvm/test/Transforms/CorrelatedValuePropagation/sdiv.ll
@@ -271,8 +271,11 @@ define i64 @test11_i15_i15(i64 %x, i64 %y) {
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[C3]])
 ; CHECK-NEXT:    br label [[END:%.*]]
 ; CHECK:       end:
-; CHECK-NEXT:    [[DIV:%.*]] = sdiv i64 [[X]], [[Y]]
-; CHECK-NEXT:    ret i64 [[DIV]]
+; CHECK-NEXT:    [[DIV_LHS_TRUNC:%.*]] = trunc i64 [[X]] to i16
+; CHECK-NEXT:    [[DIV_RHS_TRUNC:%.*]] = trunc i64 [[Y]] to i16
+; CHECK-NEXT:    [[DIV1:%.*]] = sdiv i16 [[DIV_LHS_TRUNC]], [[DIV_RHS_TRUNC]]
+; CHECK-NEXT:    [[DIV_SEXT:%.*]] = sext i16 [[DIV1]] to i64
+; CHECK-NEXT:    ret i64 [[DIV_SEXT]]
 ;
 entry:
   %c0 = icmp sle i64 %x, 16383
@@ -306,8 +309,11 @@ define i64 @test12_i16_i16(i64 %x, i64 %y) {
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[C3]])
 ; CHECK-NEXT:    br label [[END:%.*]]
 ; CHECK:       end:
-; CHECK-NEXT:    [[DIV:%.*]] = sdiv i64 [[X]], [[Y]]
-; CHECK-NEXT:    ret i64 [[DIV]]
+; CHECK-NEXT:    [[DIV_LHS_TRUNC:%.*]] = trunc i64 [[X]] to i32
+; CHECK-NEXT:    [[DIV_RHS_TRUNC:%.*]] = trunc i64 [[Y]] to i32
+; CHECK-NEXT:    [[DIV1:%.*]] = sdiv i32 [[DIV_LHS_TRUNC]], [[DIV_RHS_TRUNC]]
+; CHECK-NEXT:    [[DIV_SEXT:%.*]] = sext i32 [[DIV1]] to i64
+; CHECK-NEXT:    ret i64 [[DIV_SEXT]]
 ;
 entry:
   %c0 = icmp sle i64 %x, 32767
@@ -338,8 +344,11 @@ define i64 @test13_i16_u15(i64 %x, i64 %y) {
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[C2]])
 ; CHECK-NEXT:    br label [[END:%.*]]
 ; CHECK:       end:
-; CHECK-NEXT:    [[DIV:%.*]] = sdiv i64 [[X]], [[Y]]
-; CHECK-NEXT:    ret i64 [[DIV]]
+; CHECK-NEXT:    [[DIV_LHS_TRUNC:%.*]] = trunc i64 [[X]] to i16
+; CHECK-NEXT:    [[DIV_RHS_TRUNC:%.*]] = trunc i64 [[Y]] to i16
+; CHECK-NEXT:    [[DIV1:%.*]] = sdiv i16 [[DIV_LHS_TRUNC]], [[DIV_RHS_TRUNC]]
+; CHECK-NEXT:    [[DIV_SEXT:%.*]] = sext i16 [[DIV1]] to i64
+; CHECK-NEXT:    ret i64 [[DIV_SEXT]]
 ;
 entry:
   %c0 = icmp sle i64 %x, 32767
@@ -371,8 +380,11 @@ define i64 @test14_i16safe_i16(i64 %x, i64 %y) {
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[C3]])
 ; CHECK-NEXT:    br label [[END:%.*]]
 ; CHECK:       end:
-; CHECK-NEXT:    [[DIV:%.*]] = sdiv i64 [[X]], [[Y]]
-; CHECK-NEXT:    ret i64 [[DIV]]
+; CHECK-NEXT:    [[DIV_LHS_TRUNC:%.*]] = trunc i64 [[X]] to i16
+; CHECK-NEXT:    [[DIV_RHS_TRUNC:%.*]] = trunc i64 [[Y]] to i16
+; CHECK-NEXT:    [[DIV1:%.*]] = sdiv i16 [[DIV_LHS_TRUNC]], [[DIV_RHS_TRUNC]]
+; CHECK-NEXT:    [[DIV_SEXT:%.*]] = sext i16 [[DIV1]] to i64
+; CHECK-NEXT:    ret i64 [[DIV_SEXT]]
 ;
 entry:
   %c0 = icmp sle i64 %x, 32767
@@ -403,8 +415,11 @@ define i64 @test15_i16safe_u15(i64 %x, i64 %y) {
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[C2]])
 ; CHECK-NEXT:    br label [[END:%.*]]
 ; CHECK:       end:
-; CHECK-NEXT:    [[DIV:%.*]] = sdiv i64 [[X]], [[Y]]
-; CHECK-NEXT:    ret i64 [[DIV]]
+; CHECK-NEXT:    [[DIV_LHS_TRUNC:%.*]] = trunc i64 [[X]] to i16
+; CHECK-NEXT:    [[DIV_RHS_TRUNC:%.*]] = trunc i64 [[Y]] to i16
+; CHECK-NEXT:    [[DIV1:%.*]] = sdiv i16 [[DIV_LHS_TRUNC]], [[DIV_RHS_TRUNC]]
+; CHECK-NEXT:    [[DIV_SEXT:%.*]] = sext i16 [[DIV1]] to i64
+; CHECK-NEXT:    ret i64 [[DIV_SEXT]]
 ;
 entry:
   %c0 = icmp sle i64 %x, 32767
@@ -435,8 +450,11 @@ define i64 @test16_i4_i4(i64 %x, i64 %y) {
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[C3]])
 ; CHECK-NEXT:    br label [[END:%.*]]
 ; CHECK:       end:
-; CHECK-NEXT:    [[DIV:%.*]] = sdiv i64 [[X]], [[Y]]
-; CHECK-NEXT:    ret i64 [[DIV]]
+; CHECK-NEXT:    [[DIV_LHS_TRUNC:%.*]] = trunc i64 [[X]] to i8
+; CHECK-NEXT:    [[DIV_RHS_TRUNC:%.*]] = trunc i64 [[Y]] to i8
+; CHECK-NEXT:    [[DIV1:%.*]] = sdiv i8 [[DIV_LHS_TRUNC]], [[DIV_RHS_TRUNC]]
+; CHECK-NEXT:    [[DIV_SEXT:%.*]] = sext i8 [[DIV1]] to i64
+; CHECK-NEXT:    ret i64 [[DIV_SEXT]]
 ;
 entry:
   %c0 = icmp sle i64 %x, 3
@@ -469,8 +487,11 @@ define i64 @test17_i9_i9(i64 %x, i64 %y) {
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[C3]])
 ; CHECK-NEXT:    br label [[END:%.*]]
 ; CHECK:       end:
-; CHECK-NEXT:    [[DIV:%.*]] = sdiv i64 [[X]], [[Y]]
-; CHECK-NEXT:    ret i64 [[DIV]]
+; CHECK-NEXT:    [[DIV_LHS_TRUNC:%.*]] = trunc i64 [[X]] to i16
+; CHECK-NEXT:    [[DIV_RHS_TRUNC:%.*]] = trunc i64 [[Y]] to i16
+; CHECK-NEXT:    [[DIV1:%.*]] = sdiv i16 [[DIV_LHS_TRUNC]], [[DIV_RHS_TRUNC]]
+; CHECK-NEXT:    [[DIV_SEXT:%.*]] = sext i16 [[DIV1]] to i64
+; CHECK-NEXT:    ret i64 [[DIV_SEXT]]
 ;
 entry:
   %c0 = icmp sle i64 %x, 255
@@ -569,8 +590,11 @@ define i64 @test20_i16_i18(i64 %x, i64 %y) {
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[C3]])
 ; CHECK-NEXT:    br label [[END:%.*]]
 ; CHECK:       end:
-; CHECK-NEXT:    [[DIV:%.*]] = sdiv i64 [[X]], [[Y]]
-; CHECK-NEXT:    ret i64 [[DIV]]
+; CHECK-NEXT:    [[DIV_LHS_TRUNC:%.*]] = trunc i64 [[X]] to i32
+; CHECK-NEXT:    [[DIV_RHS_TRUNC:%.*]] = trunc i64 [[Y]] to i32
+; CHECK-NEXT:    [[DIV1:%.*]] = sdiv i32 [[DIV_LHS_TRUNC]], [[DIV_RHS_TRUNC]]
+; CHECK-NEXT:    [[DIV_SEXT:%.*]] = sext i32 [[DIV1]] to i64
+; CHECK-NEXT:    ret i64 [[DIV_SEXT]]
 ;
 entry:
   %c0 = icmp sle i64 %x, 16383
@@ -601,8 +625,11 @@ define i64 @test21_i18_i16(i64 %x, i64 %y) {
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[C3]])
 ; CHECK-NEXT:    br label [[END:%.*]]
 ; CHECK:       end:
-; CHECK-NEXT:    [[DIV:%.*]] = sdiv i64 [[X]], [[Y]]
-; CHECK-NEXT:    ret i64 [[DIV]]
+; CHECK-NEXT:    [[DIV_LHS_TRUNC:%.*]] = trunc i64 [[X]] to i32
+; CHECK-NEXT:    [[DIV_RHS_TRUNC:%.*]] = trunc i64 [[Y]] to i32
+; CHECK-NEXT:    [[DIV1:%.*]] = sdiv i32 [[DIV_LHS_TRUNC]], [[DIV_RHS_TRUNC]]
+; CHECK-NEXT:    [[DIV_SEXT:%.*]] = sext i32 [[DIV1]] to i64
+; CHECK-NEXT:    ret i64 [[DIV_SEXT]]
 ;
 entry:
   %c0 = icmp sle i64 %x, 65535
@@ -635,8 +662,11 @@ define i64 @test22_i16_i16(i64 %x, i64 %y) {
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[C3]])
 ; CHECK-NEXT:    br label [[END:%.*]]
 ; CHECK:       end:
-; CHECK-NEXT:    [[DIV:%.*]] = sdiv exact i64 [[X]], [[Y]]
-; CHECK-NEXT:    ret i64 [[DIV]]
+; CHECK-NEXT:    [[DIV_LHS_TRUNC:%.*]] = trunc i64 [[X]] to i32
+; CHECK-NEXT:    [[DIV_RHS_TRUNC:%.*]] = trunc i64 [[Y]] to i32
+; CHECK-NEXT:    [[DIV1:%.*]] = sdiv exact i32 [[DIV_LHS_TRUNC]], [[DIV_RHS_TRUNC]]
+; CHECK-NEXT:    [[DIV_SEXT:%.*]] = sext i32 [[DIV1]] to i64
+; CHECK-NEXT:    ret i64 [[DIV_SEXT]]
 ;
 entry:
   %c0 = icmp sle i64 %x, 32767

diff  --git a/llvm/test/Transforms/CorrelatedValuePropagation/srem.ll b/llvm/test/Transforms/CorrelatedValuePropagation/srem.ll
index 4e7e28c8e98c..8d55de9eb1eb 100644
--- a/llvm/test/Transforms/CorrelatedValuePropagation/srem.ll
+++ b/llvm/test/Transforms/CorrelatedValuePropagation/srem.ll
@@ -184,8 +184,11 @@ define i64 @test11_i15_i15(i64 %x, i64 %y) {
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[C3]])
 ; CHECK-NEXT:    br label [[END:%.*]]
 ; CHECK:       end:
-; CHECK-NEXT:    [[DIV:%.*]] = srem i64 [[X]], [[Y]]
-; CHECK-NEXT:    ret i64 [[DIV]]
+; CHECK-NEXT:    [[DIV_LHS_TRUNC:%.*]] = trunc i64 [[X]] to i16
+; CHECK-NEXT:    [[DIV_RHS_TRUNC:%.*]] = trunc i64 [[Y]] to i16
+; CHECK-NEXT:    [[DIV1:%.*]] = srem i16 [[DIV_LHS_TRUNC]], [[DIV_RHS_TRUNC]]
+; CHECK-NEXT:    [[DIV_SEXT:%.*]] = sext i16 [[DIV1]] to i64
+; CHECK-NEXT:    ret i64 [[DIV_SEXT]]
 ;
 entry:
   %c0 = icmp sle i64 %x, 16383
@@ -219,8 +222,11 @@ define i64 @test12_i16_i16(i64 %x, i64 %y) {
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[C3]])
 ; CHECK-NEXT:    br label [[END:%.*]]
 ; CHECK:       end:
-; CHECK-NEXT:    [[DIV:%.*]] = srem i64 [[X]], [[Y]]
-; CHECK-NEXT:    ret i64 [[DIV]]
+; CHECK-NEXT:    [[DIV_LHS_TRUNC:%.*]] = trunc i64 [[X]] to i32
+; CHECK-NEXT:    [[DIV_RHS_TRUNC:%.*]] = trunc i64 [[Y]] to i32
+; CHECK-NEXT:    [[DIV1:%.*]] = srem i32 [[DIV_LHS_TRUNC]], [[DIV_RHS_TRUNC]]
+; CHECK-NEXT:    [[DIV_SEXT:%.*]] = sext i32 [[DIV1]] to i64
+; CHECK-NEXT:    ret i64 [[DIV_SEXT]]
 ;
 entry:
   %c0 = icmp sle i64 %x, 32767
@@ -251,8 +257,11 @@ define i64 @test13_i16_u15(i64 %x, i64 %y) {
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[C2]])
 ; CHECK-NEXT:    br label [[END:%.*]]
 ; CHECK:       end:
-; CHECK-NEXT:    [[DIV:%.*]] = srem i64 [[X]], [[Y]]
-; CHECK-NEXT:    ret i64 [[DIV]]
+; CHECK-NEXT:    [[DIV_LHS_TRUNC:%.*]] = trunc i64 [[X]] to i16
+; CHECK-NEXT:    [[DIV_RHS_TRUNC:%.*]] = trunc i64 [[Y]] to i16
+; CHECK-NEXT:    [[DIV1:%.*]] = srem i16 [[DIV_LHS_TRUNC]], [[DIV_RHS_TRUNC]]
+; CHECK-NEXT:    [[DIV_SEXT:%.*]] = sext i16 [[DIV1]] to i64
+; CHECK-NEXT:    ret i64 [[DIV_SEXT]]
 ;
 entry:
   %c0 = icmp sle i64 %x, 32767
@@ -284,8 +293,11 @@ define i64 @test14_i16safe_i16(i64 %x, i64 %y) {
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[C3]])
 ; CHECK-NEXT:    br label [[END:%.*]]
 ; CHECK:       end:
-; CHECK-NEXT:    [[DIV:%.*]] = srem i64 [[X]], [[Y]]
-; CHECK-NEXT:    ret i64 [[DIV]]
+; CHECK-NEXT:    [[DIV_LHS_TRUNC:%.*]] = trunc i64 [[X]] to i16
+; CHECK-NEXT:    [[DIV_RHS_TRUNC:%.*]] = trunc i64 [[Y]] to i16
+; CHECK-NEXT:    [[DIV1:%.*]] = srem i16 [[DIV_LHS_TRUNC]], [[DIV_RHS_TRUNC]]
+; CHECK-NEXT:    [[DIV_SEXT:%.*]] = sext i16 [[DIV1]] to i64
+; CHECK-NEXT:    ret i64 [[DIV_SEXT]]
 ;
 entry:
   %c0 = icmp sle i64 %x, 32767
@@ -316,8 +328,11 @@ define i64 @test15_i16safe_u15(i64 %x, i64 %y) {
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[C2]])
 ; CHECK-NEXT:    br label [[END:%.*]]
 ; CHECK:       end:
-; CHECK-NEXT:    [[DIV:%.*]] = srem i64 [[X]], [[Y]]
-; CHECK-NEXT:    ret i64 [[DIV]]
+; CHECK-NEXT:    [[DIV_LHS_TRUNC:%.*]] = trunc i64 [[X]] to i16
+; CHECK-NEXT:    [[DIV_RHS_TRUNC:%.*]] = trunc i64 [[Y]] to i16
+; CHECK-NEXT:    [[DIV1:%.*]] = srem i16 [[DIV_LHS_TRUNC]], [[DIV_RHS_TRUNC]]
+; CHECK-NEXT:    [[DIV_SEXT:%.*]] = sext i16 [[DIV1]] to i64
+; CHECK-NEXT:    ret i64 [[DIV_SEXT]]
 ;
 entry:
   %c0 = icmp sle i64 %x, 32767
@@ -348,8 +363,11 @@ define i64 @test16_i4_i4(i64 %x, i64 %y) {
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[C3]])
 ; CHECK-NEXT:    br label [[END:%.*]]
 ; CHECK:       end:
-; CHECK-NEXT:    [[DIV:%.*]] = srem i64 [[X]], [[Y]]
-; CHECK-NEXT:    ret i64 [[DIV]]
+; CHECK-NEXT:    [[DIV_LHS_TRUNC:%.*]] = trunc i64 [[X]] to i8
+; CHECK-NEXT:    [[DIV_RHS_TRUNC:%.*]] = trunc i64 [[Y]] to i8
+; CHECK-NEXT:    [[DIV1:%.*]] = srem i8 [[DIV_LHS_TRUNC]], [[DIV_RHS_TRUNC]]
+; CHECK-NEXT:    [[DIV_SEXT:%.*]] = sext i8 [[DIV1]] to i64
+; CHECK-NEXT:    ret i64 [[DIV_SEXT]]
 ;
 entry:
   %c0 = icmp sle i64 %x, 3
@@ -382,8 +400,11 @@ define i64 @test17_i9_i9(i64 %x, i64 %y) {
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[C3]])
 ; CHECK-NEXT:    br label [[END:%.*]]
 ; CHECK:       end:
-; CHECK-NEXT:    [[DIV:%.*]] = srem i64 [[X]], [[Y]]
-; CHECK-NEXT:    ret i64 [[DIV]]
+; CHECK-NEXT:    [[DIV_LHS_TRUNC:%.*]] = trunc i64 [[X]] to i16
+; CHECK-NEXT:    [[DIV_RHS_TRUNC:%.*]] = trunc i64 [[Y]] to i16
+; CHECK-NEXT:    [[DIV1:%.*]] = srem i16 [[DIV_LHS_TRUNC]], [[DIV_RHS_TRUNC]]
+; CHECK-NEXT:    [[DIV_SEXT:%.*]] = sext i16 [[DIV1]] to i64
+; CHECK-NEXT:    ret i64 [[DIV_SEXT]]
 ;
 entry:
   %c0 = icmp sle i64 %x, 255
@@ -482,8 +503,11 @@ define i64 @test20_i16_i18(i64 %x, i64 %y) {
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[C3]])
 ; CHECK-NEXT:    br label [[END:%.*]]
 ; CHECK:       end:
-; CHECK-NEXT:    [[DIV:%.*]] = srem i64 [[X]], [[Y]]
-; CHECK-NEXT:    ret i64 [[DIV]]
+; CHECK-NEXT:    [[DIV_LHS_TRUNC:%.*]] = trunc i64 [[X]] to i32
+; CHECK-NEXT:    [[DIV_RHS_TRUNC:%.*]] = trunc i64 [[Y]] to i32
+; CHECK-NEXT:    [[DIV1:%.*]] = srem i32 [[DIV_LHS_TRUNC]], [[DIV_RHS_TRUNC]]
+; CHECK-NEXT:    [[DIV_SEXT:%.*]] = sext i32 [[DIV1]] to i64
+; CHECK-NEXT:    ret i64 [[DIV_SEXT]]
 ;
 entry:
   %c0 = icmp sle i64 %x, 16383
@@ -514,8 +538,11 @@ define i64 @test21_i18_i16(i64 %x, i64 %y) {
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[C3]])
 ; CHECK-NEXT:    br label [[END:%.*]]
 ; CHECK:       end:
-; CHECK-NEXT:    [[DIV:%.*]] = srem i64 [[X]], [[Y]]
-; CHECK-NEXT:    ret i64 [[DIV]]
+; CHECK-NEXT:    [[DIV_LHS_TRUNC:%.*]] = trunc i64 [[X]] to i32
+; CHECK-NEXT:    [[DIV_RHS_TRUNC:%.*]] = trunc i64 [[Y]] to i32
+; CHECK-NEXT:    [[DIV1:%.*]] = srem i32 [[DIV_LHS_TRUNC]], [[DIV_RHS_TRUNC]]
+; CHECK-NEXT:    [[DIV_SEXT:%.*]] = sext i32 [[DIV1]] to i64
+; CHECK-NEXT:    ret i64 [[DIV_SEXT]]
 ;
 entry:
   %c0 = icmp sle i64 %x, 65535


        


More information about the llvm-commits mailing list