[llvm] [profcheck] Fix profile metadata propagation for Large Integer and FP Operations (PR #173114)

Jin Huang via llvm-commits llvm-commits at lists.llvm.org
Fri Jan 9 11:01:51 PST 2026


https://github.com/jinhuang1102 updated https://github.com/llvm/llvm-project/pull/173114

>From 828c12ce3b5327b3a7684525d3d277c4898c3401 Mon Sep 17 00:00:00 2001
From: Jin Huang <jingold at google.com>
Date: Fri, 19 Dec 2025 22:49:22 +0000
Subject: [PATCH] [profcheck] Fix profile metadata missing in ExpandLargeDivRem

---
 llvm/lib/CodeGen/ExpandIRInsts.cpp            |  104 +-
 llvm/lib/Transforms/Utils/IntegerDivision.cpp |   73 +-
 llvm/test/CodeGen/AMDGPU/fptoi.i128.ll        |  868 +++---
 llvm/test/CodeGen/AMDGPU/itofp.i128.bf.ll     |    2 +-
 llvm/test/CodeGen/AMDGPU/itofp.i128.ll        |    4 +-
 llvm/test/CodeGen/RISCV/idiv_large.ll         | 2688 ++++++-----------
 .../X86/div-rem-pair-recomposition-signed.ll  |  423 +--
 .../div-rem-pair-recomposition-unsigned.ll    |  395 ++-
 .../X86/expand-large-fp-convert-fptosi129.ll  |   64 +-
 .../X86/expand-large-fp-convert-fptoui129.ll  |   64 +-
 .../X86/expand-large-fp-convert-si129tofp.ll  |   69 +-
 .../X86/expand-large-fp-convert-ui129tofp.ll  |   69 +-
 .../X86/expand-large-fp-optnone.ll            |   24 +-
 .../Transforms/ExpandIRInsts/X86/sdiv129.ll   |   19 +-
 .../Transforms/ExpandIRInsts/X86/srem129.ll   |   25 +-
 .../Transforms/ExpandIRInsts/X86/udiv129.ll   |   19 +-
 .../Transforms/ExpandIRInsts/X86/urem129.ll   |   19 +-
 .../Transforms/ExpandIRInsts/X86/vector.ll    |  106 +-
 llvm/utils/profcheck-xfail.txt                |   10 +-
 19 files changed, 2204 insertions(+), 2841 deletions(-)

diff --git a/llvm/lib/CodeGen/ExpandIRInsts.cpp b/llvm/lib/CodeGen/ExpandIRInsts.cpp
index 1d09000fbca6b..e2454a23f54d8 100644
--- a/llvm/lib/CodeGen/ExpandIRInsts.cpp
+++ b/llvm/lib/CodeGen/ExpandIRInsts.cpp
@@ -40,10 +40,14 @@
 #include "llvm/CodeGen/TargetSubtargetInfo.h"
 #include "llvm/IR/IRBuilder.h"
 #include "llvm/IR/InstIterator.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/MDBuilder.h"
 #include "llvm/IR/Module.h"
 #include "llvm/IR/PassManager.h"
+#include "llvm/IR/ProfDataUtils.h"
 #include "llvm/InitializePasses.h"
 #include "llvm/Pass.h"
+#include "llvm/Support/Casting.h"
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Target/TargetMachine.h"
@@ -56,6 +60,10 @@
 
 using namespace llvm;
 
+namespace llvm {
+extern cl::opt<bool> ProfcheckDisableMetadataFixes;
+}
+
 static cl::opt<unsigned>
     ExpandFpConvertBits("expand-fp-convert-bits", cl::Hidden,
                         cl::init(llvm::IntegerType::MAX_INT_BITS),
@@ -569,38 +577,84 @@ static void expandFPToI(Instruction *FPToI) {
   Value *ARep = Builder.CreateZExt(ARep0, FPToI->getType());
   Value *PosOrNeg = Builder.CreateICmpSGT(
       ARep0, ConstantInt::getSigned(Builder.getIntNTy(FloatWidth), -1));
+  // Assume the sign is likely positive, although mathematically it's 50-50.
   Value *Sign = Builder.CreateSelect(PosOrNeg, ConstantInt::getSigned(IntTy, 1),
                                      ConstantInt::getSigned(IntTy, -1));
+  if (!ProfcheckDisableMetadataFixes) {
+    if (Instruction *SignInst = dyn_cast<Instruction>(Sign)) {
+      SignInst->setMetadata(
+          LLVMContext::MD_prof,
+          MDBuilder(SignInst->getContext()).createLikelyBranchWeights());
+    }
+  }
   Value *And =
       Builder.CreateLShr(ARep, Builder.getIntN(BitWidth, FPMantissaWidth));
   Value *And2 = Builder.CreateAnd(
       And, Builder.getIntN(BitWidth, (1 << ExponentWidth) - 1));
   Value *Abs = Builder.CreateAnd(ARep, SignificandMask);
   Value *Or = Builder.CreateOr(Abs, ImplicitBit);
+  // The comparison checks if the true exponent is negative (i.e., And2 <
+  // ExponentBias). We assume this case is less common, so the branch to 'End'
+  // is the unlikely path.
   Value *Cmp =
       Builder.CreateICmpULT(And2, Builder.getIntN(BitWidth, ExponentBias));
-  Builder.CreateCondBr(Cmp, End, IfEnd);
+  Value *CondBrEntry = Builder.CreateCondBr(Cmp, End, IfEnd);
+  if (!ProfcheckDisableMetadataFixes) {
+    if (Instruction *CondBrEntryInst = dyn_cast<Instruction>(CondBrEntry)) {
+      CondBrEntryInst->setMetadata(LLVMContext::MD_prof,
+                                   MDBuilder(CondBrEntryInst->getContext())
+                                       .createUnlikelyBranchWeights());
+    }
+  }
 
   // if.end:
   Builder.SetInsertPoint(IfEnd);
   Value *Add1 = Builder.CreateAdd(
       And2, ConstantInt::getSigned(
                 IntTy, -static_cast<int64_t>(ExponentBias + BitWidth)));
+  // The comparison is doing the overflow check so we assume the 'true' path is
+  // unlikely.
   Value *Cmp3 = Builder.CreateICmpULT(
       Add1, ConstantInt::getSigned(IntTy, -static_cast<int64_t>(BitWidth)));
-  Builder.CreateCondBr(Cmp3, IfThen5, IfEnd9);
+  Value *CondBrIfEnd = Builder.CreateCondBr(Cmp3, IfThen5, IfEnd9);
+  if (!ProfcheckDisableMetadataFixes) {
+    if (Instruction *CondBrIfEndInst = dyn_cast<Instruction>(CondBrIfEnd)) {
+      CondBrIfEndInst->setMetadata(LLVMContext::MD_prof,
+                                   MDBuilder(CondBrIfEndInst->getContext())
+                                       .createUnlikelyBranchWeights());
+    }
+  }
 
   // if.then5:
   Builder.SetInsertPoint(IfThen5);
   Value *PosInf = Builder.CreateXor(NegOne, NegInf);
   Value *Cond8 = Builder.CreateSelect(PosOrNeg, PosInf, NegInf);
+  if (!ProfcheckDisableMetadataFixes) {
+    if (Instruction *Cond8Inst = dyn_cast<Instruction>(Cond8)) {
+      setExplicitlyUnknownBranchWeightsIfProfiled(*Cond8Inst, DEBUG_TYPE,
+                                                  FPToI->getFunction());
+    }
+  }
   Builder.CreateBr(End);
 
   // if.end9:
   Builder.SetInsertPoint(IfEnd9);
+  // This branch determines whether the significand needs to be shifted left
+  // or right to form the integer part. In many real-world scenarios,
+  // floating-point numbers are relatively small, meaning their effective
+  // exponent is less than the mantissa width, requiring a right shift to form
+  // the integer part. Therefore, the 'true' path (right shift) is assumed to
+  // be more likely.
   Value *Cmp10 = Builder.CreateICmpULT(
       And2, Builder.getIntN(BitWidth, ExponentBias + FPMantissaWidth));
-  Builder.CreateCondBr(Cmp10, IfThen12, IfElse);
+  Value *CondBrIfEnd9 = Builder.CreateCondBr(Cmp10, IfThen12, IfElse);
+  if (!ProfcheckDisableMetadataFixes) {
+    if (Instruction *CondBrIfEnd9Inst = dyn_cast<Instruction>(CondBrIfEnd9)) {
+      CondBrIfEnd9Inst->setMetadata(LLVMContext::MD_prof,
+                                    MDBuilder(CondBrIfEnd9Inst->getContext())
+                                        .createLikelyBranchWeights());
+    }
+  }
 
   // if.then12:
   Builder.SetInsertPoint(IfThen12);
@@ -772,8 +826,17 @@ static void expandIToFP(Instruction *IToFP) {
 
   // entry:
   Builder.SetInsertPoint(Entry);
+  // We assume that the zero is an unlikely input case, so the branch to 'End'
+  // is the unlikely path.
   Value *Cmp = Builder.CreateICmpEQ(IntVal, ConstantInt::getSigned(IntTy, 0));
-  Builder.CreateCondBr(Cmp, End, IfEnd);
+  Value *CondBrEntry = Builder.CreateCondBr(Cmp, End, IfEnd);
+  if (!ProfcheckDisableMetadataFixes) {
+    if (Instruction *CondBrEntryInst = dyn_cast<Instruction>(CondBrEntry)) {
+      CondBrEntryInst->setMetadata(
+          LLVMContext::MD_prof,
+          MDBuilder(CondBrEntryInst->getContext()).createLikelyBranchWeights());
+    }
+  }
 
   // if.end:
   Builder.SetInsertPoint(IfEnd);
@@ -790,13 +853,32 @@ static void expandIToFP(Instruction *IToFP) {
                                   FloatWidth == 128 ? Call : Cast);
   Value *Cmp3 = Builder.CreateICmpSGT(
       Sub1, Builder.getIntN(BitWidthNew, FPMantissaWidth + 1));
-  Builder.CreateCondBr(Cmp3, IfThen4, IfElse);
+  // We assume the case where the input exceeds Mantissa width and proceed
+  // to rounding logic is more likely than the case where the input fits
+  // fits perfectly within the Mantissa width.
+  Value *CondBrIfEnd = Builder.CreateCondBr(Cmp3, IfThen4, IfElse);
+  if (!ProfcheckDisableMetadataFixes) {
+    if (Instruction *CondBrIfEndInst = dyn_cast<Instruction>(CondBrIfEnd)) {
+      CondBrIfEndInst->setMetadata(
+          LLVMContext::MD_prof,
+          MDBuilder(CondBrIfEndInst->getContext()).createLikelyBranchWeights());
+    }
+  }
 
   // if.then4:
   Builder.SetInsertPoint(IfThen4);
   llvm::SwitchInst *SI = Builder.CreateSwitch(Sub1, SwDefault);
   SI->addCase(Builder.getIntN(BitWidthNew, FPMantissaWidth + 2), SwBB);
   SI->addCase(Builder.getIntN(BitWidthNew, FPMantissaWidth + 3), SwEpilog);
+  // Add branch weights to the SwitchInst. The weights are provided for the
+  // default case first (SwDefault), followed by each explicit case in the
+  // order they were added (SwBB, then SwEpilog). Because the following cases
+  // are rare, the defalut case is given a likely weight.
+  if (!ProfcheckDisableMetadataFixes) {
+    SI->setMetadata(LLVMContext::MD_prof,
+                    MDBuilder(SI->getContext())
+                        .createBranchWeights({(1U << 20) - 1, 1, 1}));
+  }
 
   // sw.bb:
   Builder.SetInsertPoint(SwBB);
@@ -850,7 +932,17 @@ static void expandIToFP(Instruction *IToFP) {
     ExtractT64 = Builder.CreateTrunc(Sub2, Builder.getInt64Ty());
   else
     ExtractT64 = Builder.CreateTrunc(Extract63, Builder.getInt32Ty());
-  Builder.CreateCondBr(PosOrNeg, IfEnd26, IfThen20);
+  // Rounding usually keeps the exponent within its current magnitude and
+  // overflow is rare. The False path is unlikely to be taken.
+  Value *CondBrSwEpilog = Builder.CreateCondBr(PosOrNeg, IfEnd26, IfThen20);
+  if (!ProfcheckDisableMetadataFixes) {
+    if (Instruction *CondBrSwEpilogInst =
+            dyn_cast<Instruction>(CondBrSwEpilog)) {
+      CondBrSwEpilogInst->setMetadata(
+          LLVMContext::MD_prof, MDBuilder(CondBrSwEpilogInst->getContext())
+                                    .createLikelyBranchWeights());
+    }
+  }
 
   // if.then20
   Builder.SetInsertPoint(IfThen20);
diff --git a/llvm/lib/Transforms/Utils/IntegerDivision.cpp b/llvm/lib/Transforms/Utils/IntegerDivision.cpp
index e95a7a9ae525a..d3ab3e348bf42 100644
--- a/llvm/lib/Transforms/Utils/IntegerDivision.cpp
+++ b/llvm/lib/Transforms/Utils/IntegerDivision.cpp
@@ -16,13 +16,23 @@
 #include "llvm/Transforms/Utils/IntegerDivision.h"
 #include "llvm/IR/Function.h"
 #include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Instruction.h"
 #include "llvm/IR/Instructions.h"
 #include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/MDBuilder.h"
+#include "llvm/IR/ProfDataUtils.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/CommandLine.h"
 
 using namespace llvm;
 
 #define DEBUG_TYPE "integer-division"
 
+namespace llvm {
+extern cl::opt<bool> ProfcheckDisableMetadataFixes;
+}
+
 /// Generate code to compute the remainder of two signed integers. Returns the
 /// remainder, which will have the sign of the dividend. Builder's insert point
 /// should be pointing where the caller wants code generated, e.g. at the srem
@@ -235,11 +245,51 @@ static Value *generateUnsignedDivisionCode(Value *Dividend, Value *Divisor,
   Value *Tmp1 = Builder.CreateCall(CTLZ, {Dividend, True});
   Value *SR          = Builder.CreateSub(Tmp0, Tmp1);
   Value *Ret0_4      = Builder.CreateICmpUGT(SR, MSB);
+
+  // Add 'unlikely' branch weights. We mark the case where either the divisor
+  // or the dividend is equal to zero as unlikely.
   Value *Ret0        = Builder.CreateLogicalOr(Ret0_3, Ret0_4);
+  if (!ProfcheckDisableMetadataFixes) {
+    if (Instruction *Ret0SelectI = dyn_cast<Instruction>(Ret0)) {
+      Ret0SelectI->setMetadata(
+          LLVMContext::MD_prof,
+          MDBuilder(Ret0SelectI->getContext()).createUnlikelyBranchWeights());
+    }
+  }
   Value *RetDividend = Builder.CreateICmpEQ(SR, MSB);
+
+  // Add 'unlikely' branch weights. We mark the case where the divisor is
+  // greater than the dividend as unlikely.
   Value *RetVal      = Builder.CreateSelect(Ret0, Zero, Dividend);
+  if (!ProfcheckDisableMetadataFixes) {
+    if (Instruction *RetValSelectI = dyn_cast<Instruction>(RetVal)) {
+      RetValSelectI->setMetadata(
+          LLVMContext::MD_prof,
+          MDBuilder(RetValSelectI->getContext()).createUnlikelyBranchWeights());
+    }
+  }
+  // This select instruction (EarlyRet) is used to check another edge case, and
+  // it share the same branch weights as RetVal so we reuse the 'unlikely'
+  // weigthts here.
   Value *EarlyRet    = Builder.CreateLogicalOr(Ret0, RetDividend);
-  Builder.CreateCondBr(EarlyRet, End, BB1);
+  if (!ProfcheckDisableMetadataFixes) {
+    if (Instruction *EarlyRetSelectI = dyn_cast<Instruction>(EarlyRet)) {
+      EarlyRetSelectI->setMetadata(LLVMContext::MD_prof,
+                                   MDBuilder(EarlyRetSelectI->getContext())
+                                       .createUnlikelyBranchWeights());
+    }
+  }
+
+  // The condition of this branch is based on `EarlyRet`. `EarlyRet` is true
+  // only for special cases like dividend or divisor being zero, or the divisor
+  // being greater than the dividend. Thus, the branch to `End` is unlikely,
+  // and we expect to more frequently enter `BB1`.
+  Instruction *ConBrSpecialCases = Builder.CreateCondBr(EarlyRet, End, BB1);
+  if (!ProfcheckDisableMetadataFixes) {
+    ConBrSpecialCases->setMetadata(LLVMContext::MD_prof,
+                                   MDBuilder(ConBrSpecialCases->getContext())
+                                       .createUnlikelyBranchWeights());
+  }
 
   // ; bb1:                                             ; preds = %special-cases
   // ;   %sr_1     = add i32 %sr, 1
@@ -251,8 +301,17 @@ static Value *generateUnsignedDivisionCode(Value *Dividend, Value *Divisor,
   Value *SR_1     = Builder.CreateAdd(SR, One);
   Value *Tmp2     = Builder.CreateSub(MSB, SR);
   Value *Q        = Builder.CreateShl(Dividend, Tmp2);
+  // We assume that in the common case, the dividend's magnitude is larger than
+  // the divisor's magnitude such that the loop counter (SR) is non-zero.
+  // Specifically, if |dividend| >= 2 * |divisor|, then SR >= 1, ensuring SR_1
+  // >= 2. The case where SR_1 == 0 is thus considered unlikely.
   Value *SkipLoop = Builder.CreateICmpEQ(SR_1, Zero);
-  Builder.CreateCondBr(SkipLoop, LoopExit, Preheader);
+  Instruction *ConBrBB1 = Builder.CreateCondBr(SkipLoop, LoopExit, Preheader);
+  if (!ProfcheckDisableMetadataFixes) {
+    ConBrBB1->setMetadata(
+        LLVMContext::MD_prof,
+        MDBuilder(ConBrBB1->getContext()).createUnlikelyBranchWeights());
+  }
 
   // ; preheader:                                           ; preds = %bb1
   // ;   %tmp3 = lshr i32 %dividend, %sr_1
@@ -298,7 +357,15 @@ static Value *generateUnsignedDivisionCode(Value *Dividend, Value *Divisor,
   Value *R     = Builder.CreateSub(Tmp7, Tmp11);
   Value *SR_2  = Builder.CreateAdd(SR_3, NegOne);
   Value *Tmp12 = Builder.CreateICmpEQ(SR_2, Zero);
-  Builder.CreateCondBr(Tmp12, LoopExit, DoWhile);
+  // The loop implements the core bit-by-bit binary long division algorithm.
+  // The branch is unlikely to exit the loop early until it has processed all
+  // significant bits.
+  Instruction *ConBrDoWhile = Builder.CreateCondBr(Tmp12, LoopExit, DoWhile);
+  if (!ProfcheckDisableMetadataFixes) {
+    ConBrDoWhile->setMetadata(
+        LLVMContext::MD_prof,
+        MDBuilder(ConBrDoWhile->getContext()).createUnlikelyBranchWeights());
+  }
 
   // ; loop-exit:                                      ; preds = %do-while, %bb1
   // ;   %carry_2 = phi i32 [ 0, %bb1 ], [ %carry, %do-while ]
diff --git a/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll b/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll
index 15619532414ea..cdd34cbde6ddd 100644
--- a/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/fptoi.i128.ll
@@ -11,8 +11,8 @@ define i128 @fptosi_f64_to_i128(double %x) {
 ; SDAG-NEXT:    v_mov_b32_e32 v7, 0
 ; SDAG-NEXT:    s_mov_b64 s[4:5], 0x3fe
 ; SDAG-NEXT:    v_mov_b32_e32 v4, v0
-; SDAG-NEXT:    v_mov_b32_e32 v0, 0
 ; SDAG-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[6:7]
+; SDAG-NEXT:    v_mov_b32_e32 v0, 0
 ; SDAG-NEXT:    v_mov_b32_e32 v2, 0
 ; SDAG-NEXT:    v_mov_b32_e32 v1, 0
 ; SDAG-NEXT:    v_mov_b32_e32 v3, 0
@@ -20,8 +20,7 @@ define i128 @fptosi_f64_to_i128(double %x) {
 ; SDAG-NEXT:    s_cbranch_execz .LBB0_10
 ; SDAG-NEXT:  ; %bb.1: ; %fp-to-i-if-end
 ; SDAG-NEXT:    v_add_co_u32_e32 v0, vcc, 0xfffffb81, v6
-; SDAG-NEXT:    v_mov_b32_e32 v1, -1
-; SDAG-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; SDAG-NEXT:    v_addc_co_u32_e32 v1, vcc, -1, v7, vcc
 ; SDAG-NEXT:    v_addc_co_u32_e32 v2, vcc, -1, v7, vcc
 ; SDAG-NEXT:    s_movk_i32 s6, 0xff7f
 ; SDAG-NEXT:    v_addc_co_u32_e32 v3, vcc, -1, v7, vcc
@@ -52,41 +51,42 @@ define i128 @fptosi_f64_to_i128(double %x) {
 ; SDAG-NEXT:  ; %bb.3: ; %fp-to-i-if-else
 ; SDAG-NEXT:    v_sub_u32_e32 v0, 0x473, v6
 ; SDAG-NEXT:    v_add_u32_e32 v2, 0xfffffb8d, v6
-; SDAG-NEXT:    v_add_u32_e32 v3, 0xfffffbcd, v6
+; SDAG-NEXT:    v_add_u32_e32 v7, 0xfffffbcd, v6
 ; SDAG-NEXT:    v_lshrrev_b64 v[0:1], v0, v[4:5]
-; SDAG-NEXT:    v_lshlrev_b64 v[6:7], v2, v[4:5]
-; SDAG-NEXT:    v_cmp_gt_u32_e64 s[4:5], 64, v3
-; SDAG-NEXT:    v_cmp_ne_u32_e64 s[6:7], 0, v3
-; SDAG-NEXT:    v_lshlrev_b64 v[3:4], v3, v[4:5]
-; SDAG-NEXT:    v_cndmask_b32_e64 v1, v7, v1, s[4:5]
-; SDAG-NEXT:    v_cndmask_b32_e64 v1, 0, v1, s[6:7]
-; SDAG-NEXT:    v_cndmask_b32_e64 v0, v6, v0, s[4:5]
-; SDAG-NEXT:    v_cndmask_b32_e64 v7, 0, v3, s[4:5]
-; SDAG-NEXT:    v_cndmask_b32_e64 v5, 0, v0, s[6:7]
-; SDAG-NEXT:    v_mul_lo_u32 v12, v10, v1
-; SDAG-NEXT:    v_mad_u64_u32 v[0:1], s[6:7], v7, v10, 0
-; SDAG-NEXT:    v_mov_b32_e32 v2, 0
-; SDAG-NEXT:    v_cndmask_b32_e64 v13, 0, v4, s[4:5]
-; SDAG-NEXT:    v_mad_u64_u32 v[3:4], s[4:5], v13, v10, v[1:2]
-; SDAG-NEXT:    v_mul_lo_u32 v11, v8, v5
-; SDAG-NEXT:    v_mad_u64_u32 v[5:6], s[6:7], v10, v5, 0
-; SDAG-NEXT:    v_mov_b32_e32 v1, v3
-; SDAG-NEXT:    v_mad_u64_u32 v[1:2], s[4:5], v7, v8, v[1:2]
-; SDAG-NEXT:    v_add3_u32 v6, v6, v12, v11
-; SDAG-NEXT:    v_mad_u64_u32 v[5:6], s[4:5], v9, v7, v[5:6]
-; SDAG-NEXT:    v_add_co_u32_e64 v2, s[4:5], v4, v2
+; SDAG-NEXT:    v_lshlrev_b64 v[2:3], v2, v[4:5]
+; SDAG-NEXT:    v_cmp_gt_u32_e64 s[4:5], 64, v7
+; SDAG-NEXT:    v_cndmask_b32_e64 v1, v3, v1, s[4:5]
+; SDAG-NEXT:    v_cmp_ne_u32_e64 s[6:7], 0, v7
+; SDAG-NEXT:    v_cndmask_b32_e64 v6, 0, v1, s[6:7]
+; SDAG-NEXT:    v_cndmask_b32_e64 v2, v2, v0, s[4:5]
+; SDAG-NEXT:    v_lshlrev_b64 v[0:1], v7, v[4:5]
+; SDAG-NEXT:    v_cndmask_b32_e64 v2, 0, v2, s[6:7]
+; SDAG-NEXT:    v_cndmask_b32_e64 v12, 0, v0, s[4:5]
+; SDAG-NEXT:    v_cndmask_b32_e64 v11, 0, v1, s[4:5]
+; SDAG-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v12, v10, 0
+; SDAG-NEXT:    v_mov_b32_e32 v3, 0
+; SDAG-NEXT:    v_mul_lo_u32 v13, v8, v2
+; SDAG-NEXT:    v_mad_u64_u32 v[4:5], s[4:5], v10, v2, 0
+; SDAG-NEXT:    v_mov_b32_e32 v2, v1
+; SDAG-NEXT:    v_mul_lo_u32 v14, v10, v6
+; SDAG-NEXT:    v_mad_u64_u32 v[6:7], s[4:5], v11, v10, v[2:3]
+; SDAG-NEXT:    ; implicit-def: $vgpr10
+; SDAG-NEXT:    v_add3_u32 v5, v5, v14, v13
+; SDAG-NEXT:    v_mov_b32_e32 v2, v6
+; SDAG-NEXT:    v_mad_u64_u32 v[1:2], s[4:5], v12, v8, v[2:3]
+; SDAG-NEXT:    v_mad_u64_u32 v[4:5], s[4:5], v9, v12, v[4:5]
+; SDAG-NEXT:    v_add_co_u32_e64 v2, s[4:5], v7, v2
 ; SDAG-NEXT:    v_addc_co_u32_e64 v3, s[4:5], 0, 0, s[4:5]
-; SDAG-NEXT:    v_mul_lo_u32 v10, v9, v13
-; SDAG-NEXT:    v_mul_lo_u32 v7, v9, v7
-; SDAG-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], v13, v8, v[2:3]
+; SDAG-NEXT:    v_mul_lo_u32 v6, v9, v11
+; SDAG-NEXT:    v_mul_lo_u32 v9, v9, v12
+; SDAG-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], v11, v8, v[2:3]
 ; SDAG-NEXT:    ; implicit-def: $vgpr8
-; SDAG-NEXT:    ; implicit-def: $vgpr9
-; SDAG-NEXT:    v_add3_u32 v4, v7, v6, v10
-; SDAG-NEXT:    v_add_co_u32_e64 v2, s[4:5], v2, v5
-; SDAG-NEXT:    v_addc_co_u32_e64 v3, s[4:5], v3, v4, s[4:5]
+; SDAG-NEXT:    v_add3_u32 v5, v9, v5, v6
+; SDAG-NEXT:    v_add_co_u32_e64 v2, s[4:5], v2, v4
+; SDAG-NEXT:    v_addc_co_u32_e64 v3, s[4:5], v3, v5, s[4:5]
 ; SDAG-NEXT:    ; implicit-def: $vgpr6_vgpr7
 ; SDAG-NEXT:    ; implicit-def: $vgpr4_vgpr5
-; SDAG-NEXT:    ; implicit-def: $vgpr10
+; SDAG-NEXT:    ; implicit-def: $vgpr9
 ; SDAG-NEXT:  .LBB0_4: ; %Flow
 ; SDAG-NEXT:    s_andn2_saveexec_b64 s[12:13], s[12:13]
 ; SDAG-NEXT:    s_cbranch_execz .LBB0_6
@@ -117,11 +117,10 @@ define i128 @fptosi_f64_to_i128(double %x) {
 ; SDAG-NEXT:  ; %bb.8: ; %fp-to-i-if-then5
 ; SDAG-NEXT:    v_bfrev_b32_e32 v0, 1
 ; SDAG-NEXT:    v_bfrev_b32_e32 v1, -2
-; SDAG-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; SDAG-NEXT:    v_cndmask_b32_e64 v1, 0, -1, vcc
-; SDAG-NEXT:    v_mov_b32_e32 v3, v2
-; SDAG-NEXT:    v_mov_b32_e32 v0, v1
-; SDAG-NEXT:    v_mov_b32_e32 v2, v1
+; SDAG-NEXT:    v_cndmask_b32_e64 v2, 0, -1, vcc
+; SDAG-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
+; SDAG-NEXT:    v_mov_b32_e32 v0, v2
+; SDAG-NEXT:    v_mov_b32_e32 v1, v2
 ; SDAG-NEXT:  ; %bb.9: ; %Flow3
 ; SDAG-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; SDAG-NEXT:  .LBB0_10: ; %fp-to-i-cleanup
@@ -233,36 +232,37 @@ define i128 @fptosi_f64_to_i128(double %x) {
 ; GISEL-NEXT:    s_xor_b64 s[16:17], exec, s[6:7]
 ; GISEL-NEXT:    s_cbranch_execz .LBB0_4
 ; GISEL-NEXT:  ; %bb.3: ; %fp-to-i-if-else
-; GISEL-NEXT:    v_add_u32_e32 v2, 0xfffffbcd, v6
-; GISEL-NEXT:    v_lshlrev_b64 v[0:1], v2, v[4:5]
-; GISEL-NEXT:    v_cmp_gt_u32_e32 vcc, 64, v2
+; GISEL-NEXT:    v_add_u32_e32 v7, 0xfffffbcd, v6
+; GISEL-NEXT:    v_lshlrev_b64 v[0:1], v7, v[4:5]
+; GISEL-NEXT:    v_cmp_gt_u32_e32 vcc, 64, v7
 ; GISEL-NEXT:    v_cndmask_b32_e32 v10, 0, v0, vcc
 ; GISEL-NEXT:    v_cndmask_b32_e32 v11, 0, v1, vcc
 ; GISEL-NEXT:    v_mad_u64_u32 v[0:1], s[6:7], v10, v9, 0
-; GISEL-NEXT:    v_add_u32_e32 v3, 0xfffffb8d, v6
-; GISEL-NEXT:    v_sub_u32_e32 v6, 64, v2
-; GISEL-NEXT:    v_lshrrev_b64 v[6:7], v6, v[4:5]
-; GISEL-NEXT:    v_lshlrev_b64 v[3:4], v3, v[4:5]
-; GISEL-NEXT:    v_cmp_eq_u32_e64 s[6:7], 0, v2
-; GISEL-NEXT:    v_cndmask_b32_e32 v3, v3, v6, vcc
-; GISEL-NEXT:    v_mad_u64_u32 v[5:6], s[8:9], v11, v9, v[0:1]
-; GISEL-NEXT:    v_cndmask_b32_e64 v12, v3, 0, s[6:7]
+; GISEL-NEXT:    v_add_u32_e32 v6, 0xfffffb8d, v6
+; GISEL-NEXT:    v_sub_u32_e32 v2, 64, v7
+; GISEL-NEXT:    v_lshrrev_b64 v[2:3], v2, v[4:5]
+; GISEL-NEXT:    v_lshlrev_b64 v[4:5], v6, v[4:5]
+; GISEL-NEXT:    v_cmp_eq_u32_e64 s[6:7], 0, v7
+; GISEL-NEXT:    v_mad_u64_u32 v[6:7], s[8:9], v11, v9, v[0:1]
+; GISEL-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
+; GISEL-NEXT:    v_cndmask_b32_e64 v12, v2, 0, s[6:7]
+; GISEL-NEXT:    v_mad_u64_u32 v[6:7], s[8:9], v12, v8, v[6:7]
 ; GISEL-NEXT:    v_mad_u64_u32 v[0:1], s[8:9], v10, v8, 0
-; GISEL-NEXT:    v_mad_u64_u32 v[2:3], s[8:9], v12, v8, v[5:6]
-; GISEL-NEXT:    v_mul_lo_u32 v13, v11, v9
-; GISEL-NEXT:    v_mad_u64_u32 v[5:6], s[8:9], v10, v9, v[1:2]
-; GISEL-NEXT:    v_mul_lo_u32 v10, v10, v9
-; GISEL-NEXT:    v_mad_u64_u32 v[1:2], s[10:11], v11, v8, v[5:6]
-; GISEL-NEXT:    v_addc_co_u32_e64 v3, s[10:11], v3, v10, s[10:11]
-; GISEL-NEXT:    v_addc_co_u32_e64 v3, s[8:9], v3, v13, s[8:9]
-; GISEL-NEXT:    v_mad_u64_u32 v[5:6], s[8:9], v12, v9, v[3:4]
-; GISEL-NEXT:    v_cndmask_b32_e32 v3, v4, v7, vcc
-; GISEL-NEXT:    v_cndmask_b32_e64 v7, v3, 0, s[6:7]
-; GISEL-NEXT:    v_mad_u64_u32 v[3:4], s[6:7], v7, v8, v[5:6]
+; GISEL-NEXT:    v_mov_b32_e32 v2, v6
+; GISEL-NEXT:    v_mul_lo_u32 v6, v10, v9
+; GISEL-NEXT:    v_mad_u64_u32 v[1:2], s[8:9], v10, v9, v[1:2]
+; GISEL-NEXT:    v_mul_lo_u32 v4, v11, v9
+; GISEL-NEXT:    v_cndmask_b32_e32 v3, v5, v3, vcc
+; GISEL-NEXT:    v_mad_u64_u32 v[1:2], s[10:11], v11, v8, v[1:2]
+; GISEL-NEXT:    v_addc_co_u32_e64 v6, s[10:11], v7, v6, s[10:11]
+; GISEL-NEXT:    v_addc_co_u32_e64 v4, s[8:9], v6, v4, s[8:9]
+; GISEL-NEXT:    v_mad_u64_u32 v[6:7], s[8:9], v12, v9, v[4:5]
+; GISEL-NEXT:    v_cndmask_b32_e64 v3, v3, 0, s[6:7]
+; GISEL-NEXT:    ; implicit-def: $vgpr9
+; GISEL-NEXT:    v_mad_u64_u32 v[3:4], s[6:7], v3, v8, v[6:7]
 ; GISEL-NEXT:    ; implicit-def: $vgpr6
 ; GISEL-NEXT:    ; implicit-def: $vgpr4_vgpr5
 ; GISEL-NEXT:    ; implicit-def: $vgpr8
-; GISEL-NEXT:    ; implicit-def: $vgpr9
 ; GISEL-NEXT:  .LBB0_4: ; %Flow
 ; GISEL-NEXT:    s_andn2_saveexec_b64 s[8:9], s[16:17]
 ; GISEL-NEXT:    s_cbranch_execz .LBB0_6
@@ -273,17 +273,17 @@ define i128 @fptosi_f64_to_i128(double %x) {
 ; GISEL-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
 ; GISEL-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
 ; GISEL-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
-; GISEL-NEXT:    v_cndmask_b32_e32 v6, v0, v4, vcc
-; GISEL-NEXT:    v_cndmask_b32_e32 v7, v1, v5, vcc
-; GISEL-NEXT:    v_mad_u64_u32 v[4:5], s[6:7], v6, v9, 0
-; GISEL-NEXT:    v_mad_u64_u32 v[0:1], s[6:7], v6, v8, 0
-; GISEL-NEXT:    v_mad_u64_u32 v[2:3], s[6:7], v7, v9, v[4:5]
-; GISEL-NEXT:    v_mul_lo_u32 v10, v7, v9
-; GISEL-NEXT:    v_mad_u64_u32 v[4:5], vcc, v6, v9, v[1:2]
-; GISEL-NEXT:    v_mul_lo_u32 v6, v6, v9
-; GISEL-NEXT:    v_mad_u64_u32 v[1:2], s[6:7], v7, v8, v[4:5]
-; GISEL-NEXT:    v_addc_co_u32_e64 v3, s[6:7], v3, v6, s[6:7]
-; GISEL-NEXT:    v_addc_co_u32_e32 v3, vcc, v3, v10, vcc
+; GISEL-NEXT:    v_cndmask_b32_e32 v4, v0, v4, vcc
+; GISEL-NEXT:    v_mad_u64_u32 v[2:3], s[6:7], v4, v9, 0
+; GISEL-NEXT:    v_cndmask_b32_e32 v5, v1, v5, vcc
+; GISEL-NEXT:    v_mad_u64_u32 v[0:1], s[6:7], v4, v8, 0
+; GISEL-NEXT:    v_mad_u64_u32 v[2:3], s[6:7], v5, v9, v[2:3]
+; GISEL-NEXT:    v_mul_lo_u32 v6, v5, v9
+; GISEL-NEXT:    v_mad_u64_u32 v[1:2], vcc, v4, v9, v[1:2]
+; GISEL-NEXT:    v_mul_lo_u32 v4, v4, v9
+; GISEL-NEXT:    v_mad_u64_u32 v[1:2], s[6:7], v5, v8, v[1:2]
+; GISEL-NEXT:    v_addc_co_u32_e64 v3, s[6:7], v3, v4, s[6:7]
+; GISEL-NEXT:    v_addc_co_u32_e32 v3, vcc, v3, v6, vcc
 ; GISEL-NEXT:  .LBB0_6: ; %Flow1
 ; GISEL-NEXT:    s_or_b64 exec, exec, s[8:9]
 ; GISEL-NEXT:  .LBB0_7: ; %Flow2
@@ -376,8 +376,8 @@ define i128 @fptoui_f64_to_i128(double %x) {
 ; SDAG-NEXT:    v_mov_b32_e32 v7, 0
 ; SDAG-NEXT:    s_mov_b64 s[4:5], 0x3fe
 ; SDAG-NEXT:    v_mov_b32_e32 v4, v0
-; SDAG-NEXT:    v_mov_b32_e32 v0, 0
 ; SDAG-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[6:7]
+; SDAG-NEXT:    v_mov_b32_e32 v0, 0
 ; SDAG-NEXT:    v_mov_b32_e32 v2, 0
 ; SDAG-NEXT:    v_mov_b32_e32 v1, 0
 ; SDAG-NEXT:    v_mov_b32_e32 v3, 0
@@ -385,8 +385,7 @@ define i128 @fptoui_f64_to_i128(double %x) {
 ; SDAG-NEXT:    s_cbranch_execz .LBB1_10
 ; SDAG-NEXT:  ; %bb.1: ; %fp-to-i-if-end
 ; SDAG-NEXT:    v_add_co_u32_e32 v0, vcc, 0xfffffb81, v6
-; SDAG-NEXT:    v_mov_b32_e32 v1, -1
-; SDAG-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; SDAG-NEXT:    v_addc_co_u32_e32 v1, vcc, -1, v7, vcc
 ; SDAG-NEXT:    v_addc_co_u32_e32 v2, vcc, -1, v7, vcc
 ; SDAG-NEXT:    s_movk_i32 s6, 0xff7f
 ; SDAG-NEXT:    v_addc_co_u32_e32 v3, vcc, -1, v7, vcc
@@ -417,41 +416,42 @@ define i128 @fptoui_f64_to_i128(double %x) {
 ; SDAG-NEXT:  ; %bb.3: ; %fp-to-i-if-else
 ; SDAG-NEXT:    v_sub_u32_e32 v0, 0x473, v6
 ; SDAG-NEXT:    v_add_u32_e32 v2, 0xfffffb8d, v6
-; SDAG-NEXT:    v_add_u32_e32 v3, 0xfffffbcd, v6
+; SDAG-NEXT:    v_add_u32_e32 v7, 0xfffffbcd, v6
 ; SDAG-NEXT:    v_lshrrev_b64 v[0:1], v0, v[4:5]
-; SDAG-NEXT:    v_lshlrev_b64 v[6:7], v2, v[4:5]
-; SDAG-NEXT:    v_cmp_gt_u32_e64 s[4:5], 64, v3
-; SDAG-NEXT:    v_cmp_ne_u32_e64 s[6:7], 0, v3
-; SDAG-NEXT:    v_lshlrev_b64 v[3:4], v3, v[4:5]
-; SDAG-NEXT:    v_cndmask_b32_e64 v1, v7, v1, s[4:5]
-; SDAG-NEXT:    v_cndmask_b32_e64 v1, 0, v1, s[6:7]
-; SDAG-NEXT:    v_cndmask_b32_e64 v0, v6, v0, s[4:5]
-; SDAG-NEXT:    v_cndmask_b32_e64 v7, 0, v3, s[4:5]
-; SDAG-NEXT:    v_cndmask_b32_e64 v5, 0, v0, s[6:7]
-; SDAG-NEXT:    v_mul_lo_u32 v12, v10, v1
-; SDAG-NEXT:    v_mad_u64_u32 v[0:1], s[6:7], v7, v10, 0
-; SDAG-NEXT:    v_mov_b32_e32 v2, 0
-; SDAG-NEXT:    v_cndmask_b32_e64 v13, 0, v4, s[4:5]
-; SDAG-NEXT:    v_mad_u64_u32 v[3:4], s[4:5], v13, v10, v[1:2]
-; SDAG-NEXT:    v_mul_lo_u32 v11, v8, v5
-; SDAG-NEXT:    v_mad_u64_u32 v[5:6], s[6:7], v10, v5, 0
-; SDAG-NEXT:    v_mov_b32_e32 v1, v3
-; SDAG-NEXT:    v_mad_u64_u32 v[1:2], s[4:5], v7, v8, v[1:2]
-; SDAG-NEXT:    v_add3_u32 v6, v6, v12, v11
-; SDAG-NEXT:    v_mad_u64_u32 v[5:6], s[4:5], v9, v7, v[5:6]
-; SDAG-NEXT:    v_add_co_u32_e64 v2, s[4:5], v4, v2
+; SDAG-NEXT:    v_lshlrev_b64 v[2:3], v2, v[4:5]
+; SDAG-NEXT:    v_cmp_gt_u32_e64 s[4:5], 64, v7
+; SDAG-NEXT:    v_cndmask_b32_e64 v1, v3, v1, s[4:5]
+; SDAG-NEXT:    v_cmp_ne_u32_e64 s[6:7], 0, v7
+; SDAG-NEXT:    v_cndmask_b32_e64 v6, 0, v1, s[6:7]
+; SDAG-NEXT:    v_cndmask_b32_e64 v2, v2, v0, s[4:5]
+; SDAG-NEXT:    v_lshlrev_b64 v[0:1], v7, v[4:5]
+; SDAG-NEXT:    v_cndmask_b32_e64 v2, 0, v2, s[6:7]
+; SDAG-NEXT:    v_cndmask_b32_e64 v12, 0, v0, s[4:5]
+; SDAG-NEXT:    v_cndmask_b32_e64 v11, 0, v1, s[4:5]
+; SDAG-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v12, v10, 0
+; SDAG-NEXT:    v_mov_b32_e32 v3, 0
+; SDAG-NEXT:    v_mul_lo_u32 v13, v8, v2
+; SDAG-NEXT:    v_mad_u64_u32 v[4:5], s[4:5], v10, v2, 0
+; SDAG-NEXT:    v_mov_b32_e32 v2, v1
+; SDAG-NEXT:    v_mul_lo_u32 v14, v10, v6
+; SDAG-NEXT:    v_mad_u64_u32 v[6:7], s[4:5], v11, v10, v[2:3]
+; SDAG-NEXT:    ; implicit-def: $vgpr10
+; SDAG-NEXT:    v_add3_u32 v5, v5, v14, v13
+; SDAG-NEXT:    v_mov_b32_e32 v2, v6
+; SDAG-NEXT:    v_mad_u64_u32 v[1:2], s[4:5], v12, v8, v[2:3]
+; SDAG-NEXT:    v_mad_u64_u32 v[4:5], s[4:5], v9, v12, v[4:5]
+; SDAG-NEXT:    v_add_co_u32_e64 v2, s[4:5], v7, v2
 ; SDAG-NEXT:    v_addc_co_u32_e64 v3, s[4:5], 0, 0, s[4:5]
-; SDAG-NEXT:    v_mul_lo_u32 v10, v9, v13
-; SDAG-NEXT:    v_mul_lo_u32 v7, v9, v7
-; SDAG-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], v13, v8, v[2:3]
+; SDAG-NEXT:    v_mul_lo_u32 v6, v9, v11
+; SDAG-NEXT:    v_mul_lo_u32 v9, v9, v12
+; SDAG-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], v11, v8, v[2:3]
 ; SDAG-NEXT:    ; implicit-def: $vgpr8
-; SDAG-NEXT:    ; implicit-def: $vgpr9
-; SDAG-NEXT:    v_add3_u32 v4, v7, v6, v10
-; SDAG-NEXT:    v_add_co_u32_e64 v2, s[4:5], v2, v5
-; SDAG-NEXT:    v_addc_co_u32_e64 v3, s[4:5], v3, v4, s[4:5]
+; SDAG-NEXT:    v_add3_u32 v5, v9, v5, v6
+; SDAG-NEXT:    v_add_co_u32_e64 v2, s[4:5], v2, v4
+; SDAG-NEXT:    v_addc_co_u32_e64 v3, s[4:5], v3, v5, s[4:5]
 ; SDAG-NEXT:    ; implicit-def: $vgpr6_vgpr7
 ; SDAG-NEXT:    ; implicit-def: $vgpr4_vgpr5
-; SDAG-NEXT:    ; implicit-def: $vgpr10
+; SDAG-NEXT:    ; implicit-def: $vgpr9
 ; SDAG-NEXT:  .LBB1_4: ; %Flow
 ; SDAG-NEXT:    s_andn2_saveexec_b64 s[12:13], s[12:13]
 ; SDAG-NEXT:    s_cbranch_execz .LBB1_6
@@ -482,11 +482,10 @@ define i128 @fptoui_f64_to_i128(double %x) {
 ; SDAG-NEXT:  ; %bb.8: ; %fp-to-i-if-then5
 ; SDAG-NEXT:    v_bfrev_b32_e32 v0, 1
 ; SDAG-NEXT:    v_bfrev_b32_e32 v1, -2
-; SDAG-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; SDAG-NEXT:    v_cndmask_b32_e64 v1, 0, -1, vcc
-; SDAG-NEXT:    v_mov_b32_e32 v3, v2
-; SDAG-NEXT:    v_mov_b32_e32 v0, v1
-; SDAG-NEXT:    v_mov_b32_e32 v2, v1
+; SDAG-NEXT:    v_cndmask_b32_e64 v2, 0, -1, vcc
+; SDAG-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
+; SDAG-NEXT:    v_mov_b32_e32 v0, v2
+; SDAG-NEXT:    v_mov_b32_e32 v1, v2
 ; SDAG-NEXT:  ; %bb.9: ; %Flow3
 ; SDAG-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; SDAG-NEXT:  .LBB1_10: ; %fp-to-i-cleanup
@@ -598,36 +597,37 @@ define i128 @fptoui_f64_to_i128(double %x) {
 ; GISEL-NEXT:    s_xor_b64 s[16:17], exec, s[6:7]
 ; GISEL-NEXT:    s_cbranch_execz .LBB1_4
 ; GISEL-NEXT:  ; %bb.3: ; %fp-to-i-if-else
-; GISEL-NEXT:    v_add_u32_e32 v2, 0xfffffbcd, v6
-; GISEL-NEXT:    v_lshlrev_b64 v[0:1], v2, v[4:5]
-; GISEL-NEXT:    v_cmp_gt_u32_e32 vcc, 64, v2
+; GISEL-NEXT:    v_add_u32_e32 v7, 0xfffffbcd, v6
+; GISEL-NEXT:    v_lshlrev_b64 v[0:1], v7, v[4:5]
+; GISEL-NEXT:    v_cmp_gt_u32_e32 vcc, 64, v7
 ; GISEL-NEXT:    v_cndmask_b32_e32 v10, 0, v0, vcc
 ; GISEL-NEXT:    v_cndmask_b32_e32 v11, 0, v1, vcc
 ; GISEL-NEXT:    v_mad_u64_u32 v[0:1], s[6:7], v10, v9, 0
-; GISEL-NEXT:    v_add_u32_e32 v3, 0xfffffb8d, v6
-; GISEL-NEXT:    v_sub_u32_e32 v6, 64, v2
-; GISEL-NEXT:    v_lshrrev_b64 v[6:7], v6, v[4:5]
-; GISEL-NEXT:    v_lshlrev_b64 v[3:4], v3, v[4:5]
-; GISEL-NEXT:    v_cmp_eq_u32_e64 s[6:7], 0, v2
-; GISEL-NEXT:    v_cndmask_b32_e32 v3, v3, v6, vcc
-; GISEL-NEXT:    v_mad_u64_u32 v[5:6], s[8:9], v11, v9, v[0:1]
-; GISEL-NEXT:    v_cndmask_b32_e64 v12, v3, 0, s[6:7]
+; GISEL-NEXT:    v_add_u32_e32 v6, 0xfffffb8d, v6
+; GISEL-NEXT:    v_sub_u32_e32 v2, 64, v7
+; GISEL-NEXT:    v_lshrrev_b64 v[2:3], v2, v[4:5]
+; GISEL-NEXT:    v_lshlrev_b64 v[4:5], v6, v[4:5]
+; GISEL-NEXT:    v_cmp_eq_u32_e64 s[6:7], 0, v7
+; GISEL-NEXT:    v_mad_u64_u32 v[6:7], s[8:9], v11, v9, v[0:1]
+; GISEL-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
+; GISEL-NEXT:    v_cndmask_b32_e64 v12, v2, 0, s[6:7]
+; GISEL-NEXT:    v_mad_u64_u32 v[6:7], s[8:9], v12, v8, v[6:7]
 ; GISEL-NEXT:    v_mad_u64_u32 v[0:1], s[8:9], v10, v8, 0
-; GISEL-NEXT:    v_mad_u64_u32 v[2:3], s[8:9], v12, v8, v[5:6]
-; GISEL-NEXT:    v_mul_lo_u32 v13, v11, v9
-; GISEL-NEXT:    v_mad_u64_u32 v[5:6], s[8:9], v10, v9, v[1:2]
-; GISEL-NEXT:    v_mul_lo_u32 v10, v10, v9
-; GISEL-NEXT:    v_mad_u64_u32 v[1:2], s[10:11], v11, v8, v[5:6]
-; GISEL-NEXT:    v_addc_co_u32_e64 v3, s[10:11], v3, v10, s[10:11]
-; GISEL-NEXT:    v_addc_co_u32_e64 v3, s[8:9], v3, v13, s[8:9]
-; GISEL-NEXT:    v_mad_u64_u32 v[5:6], s[8:9], v12, v9, v[3:4]
-; GISEL-NEXT:    v_cndmask_b32_e32 v3, v4, v7, vcc
-; GISEL-NEXT:    v_cndmask_b32_e64 v7, v3, 0, s[6:7]
-; GISEL-NEXT:    v_mad_u64_u32 v[3:4], s[6:7], v7, v8, v[5:6]
+; GISEL-NEXT:    v_mov_b32_e32 v2, v6
+; GISEL-NEXT:    v_mul_lo_u32 v6, v10, v9
+; GISEL-NEXT:    v_mad_u64_u32 v[1:2], s[8:9], v10, v9, v[1:2]
+; GISEL-NEXT:    v_mul_lo_u32 v4, v11, v9
+; GISEL-NEXT:    v_cndmask_b32_e32 v3, v5, v3, vcc
+; GISEL-NEXT:    v_mad_u64_u32 v[1:2], s[10:11], v11, v8, v[1:2]
+; GISEL-NEXT:    v_addc_co_u32_e64 v6, s[10:11], v7, v6, s[10:11]
+; GISEL-NEXT:    v_addc_co_u32_e64 v4, s[8:9], v6, v4, s[8:9]
+; GISEL-NEXT:    v_mad_u64_u32 v[6:7], s[8:9], v12, v9, v[4:5]
+; GISEL-NEXT:    v_cndmask_b32_e64 v3, v3, 0, s[6:7]
+; GISEL-NEXT:    ; implicit-def: $vgpr9
+; GISEL-NEXT:    v_mad_u64_u32 v[3:4], s[6:7], v3, v8, v[6:7]
 ; GISEL-NEXT:    ; implicit-def: $vgpr6
 ; GISEL-NEXT:    ; implicit-def: $vgpr4_vgpr5
 ; GISEL-NEXT:    ; implicit-def: $vgpr8
-; GISEL-NEXT:    ; implicit-def: $vgpr9
 ; GISEL-NEXT:  .LBB1_4: ; %Flow
 ; GISEL-NEXT:    s_andn2_saveexec_b64 s[8:9], s[16:17]
 ; GISEL-NEXT:    s_cbranch_execz .LBB1_6
@@ -638,17 +638,17 @@ define i128 @fptoui_f64_to_i128(double %x) {
 ; GISEL-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
 ; GISEL-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
 ; GISEL-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
-; GISEL-NEXT:    v_cndmask_b32_e32 v6, v0, v4, vcc
-; GISEL-NEXT:    v_cndmask_b32_e32 v7, v1, v5, vcc
-; GISEL-NEXT:    v_mad_u64_u32 v[4:5], s[6:7], v6, v9, 0
-; GISEL-NEXT:    v_mad_u64_u32 v[0:1], s[6:7], v6, v8, 0
-; GISEL-NEXT:    v_mad_u64_u32 v[2:3], s[6:7], v7, v9, v[4:5]
-; GISEL-NEXT:    v_mul_lo_u32 v10, v7, v9
-; GISEL-NEXT:    v_mad_u64_u32 v[4:5], vcc, v6, v9, v[1:2]
-; GISEL-NEXT:    v_mul_lo_u32 v6, v6, v9
-; GISEL-NEXT:    v_mad_u64_u32 v[1:2], s[6:7], v7, v8, v[4:5]
-; GISEL-NEXT:    v_addc_co_u32_e64 v3, s[6:7], v3, v6, s[6:7]
-; GISEL-NEXT:    v_addc_co_u32_e32 v3, vcc, v3, v10, vcc
+; GISEL-NEXT:    v_cndmask_b32_e32 v4, v0, v4, vcc
+; GISEL-NEXT:    v_mad_u64_u32 v[2:3], s[6:7], v4, v9, 0
+; GISEL-NEXT:    v_cndmask_b32_e32 v5, v1, v5, vcc
+; GISEL-NEXT:    v_mad_u64_u32 v[0:1], s[6:7], v4, v8, 0
+; GISEL-NEXT:    v_mad_u64_u32 v[2:3], s[6:7], v5, v9, v[2:3]
+; GISEL-NEXT:    v_mul_lo_u32 v6, v5, v9
+; GISEL-NEXT:    v_mad_u64_u32 v[1:2], vcc, v4, v9, v[1:2]
+; GISEL-NEXT:    v_mul_lo_u32 v4, v4, v9
+; GISEL-NEXT:    v_mad_u64_u32 v[1:2], s[6:7], v5, v8, v[1:2]
+; GISEL-NEXT:    v_addc_co_u32_e64 v3, s[6:7], v3, v4, s[6:7]
+; GISEL-NEXT:    v_addc_co_u32_e32 v3, vcc, v3, v6, vcc
 ; GISEL-NEXT:  .LBB1_6: ; %Flow1
 ; GISEL-NEXT:    s_or_b64 exec, exec, s[8:9]
 ; GISEL-NEXT:  .LBB1_7: ; %Flow2
@@ -737,10 +737,11 @@ define i128 @fptosi_f32_to_i128(float %x) {
 ; SDAG:       ; %bb.0: ; %fp-to-i-entry
 ; SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; SDAG-NEXT:    v_mov_b32_e32 v4, v0
-; SDAG-NEXT:    v_mov_b32_e32 v0, 0
 ; SDAG-NEXT:    v_bfe_u32 v5, v4, 23, 8
 ; SDAG-NEXT:    s_movk_i32 s4, 0x7e
+; SDAG-NEXT:    v_mov_b32_e32 v0, 0
 ; SDAG-NEXT:    v_mov_b32_e32 v2, 0
+; SDAG-NEXT:    v_mov_b32_e32 v6, 0
 ; SDAG-NEXT:    v_mov_b32_e32 v1, 0
 ; SDAG-NEXT:    v_mov_b32_e32 v3, 0
 ; SDAG-NEXT:    v_cmp_lt_u32_e32 vcc, s4, v5
@@ -748,9 +749,7 @@ define i128 @fptosi_f32_to_i128(float %x) {
 ; SDAG-NEXT:    s_cbranch_execz .LBB2_10
 ; SDAG-NEXT:  ; %bb.1: ; %fp-to-i-if-end
 ; SDAG-NEXT:    v_add_co_u32_e32 v0, vcc, 0xffffff01, v5
-; SDAG-NEXT:    v_mov_b32_e32 v1, -1
-; SDAG-NEXT:    v_mov_b32_e32 v6, 0
-; SDAG-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; SDAG-NEXT:    v_addc_co_u32_e32 v1, vcc, -1, v6, vcc
 ; SDAG-NEXT:    v_addc_co_u32_e32 v2, vcc, -1, v6, vcc
 ; SDAG-NEXT:    s_movk_i32 s6, 0xff7f
 ; SDAG-NEXT:    v_addc_co_u32_e32 v3, vcc, -1, v6, vcc
@@ -766,14 +765,14 @@ define i128 @fptosi_f32_to_i128(float %x) {
 ; SDAG-NEXT:    s_cbranch_execz .LBB2_7
 ; SDAG-NEXT:  ; %bb.2: ; %fp-to-i-if-end9
 ; SDAG-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; SDAG-NEXT:    v_add_co_u32_e64 v10, s[4:5], -1, v0
+; SDAG-NEXT:    v_add_co_u32_e64 v9, s[4:5], -1, v0
 ; SDAG-NEXT:    s_mov_b64 s[4:5], 0x95
 ; SDAG-NEXT:    v_and_b32_e32 v0, 0x7fffff, v4
 ; SDAG-NEXT:    v_cmp_lt_u64_e64 s[4:5], s[4:5], v[5:6]
-; SDAG-NEXT:    v_cndmask_b32_e64 v9, -1, 0, vcc
-; SDAG-NEXT:    v_cndmask_b32_e64 v11, -1, 1, vcc
-; SDAG-NEXT:    v_or_b32_e32 v7, 0x800000, v0
-; SDAG-NEXT:    v_mov_b32_e32 v8, v6
+; SDAG-NEXT:    v_mov_b32_e32 v7, 0
+; SDAG-NEXT:    v_cndmask_b32_e64 v8, -1, 0, vcc
+; SDAG-NEXT:    v_cndmask_b32_e64 v10, -1, 1, vcc
+; SDAG-NEXT:    v_or_b32_e32 v6, 0x800000, v0
 ; SDAG-NEXT:    ; implicit-def: $vgpr0_vgpr1
 ; SDAG-NEXT:    ; implicit-def: $vgpr2_vgpr3
 ; SDAG-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
@@ -783,56 +782,56 @@ define i128 @fptosi_f32_to_i128(float %x) {
 ; SDAG-NEXT:    v_sub_u32_e32 v0, 0xd6, v5
 ; SDAG-NEXT:    v_add_u32_e32 v2, 0xffffff2a, v5
 ; SDAG-NEXT:    v_add_u32_e32 v4, 0xffffff6a, v5
-; SDAG-NEXT:    v_lshrrev_b64 v[0:1], v0, v[7:8]
-; SDAG-NEXT:    v_lshlrev_b64 v[2:3], v2, v[7:8]
+; SDAG-NEXT:    v_lshrrev_b64 v[0:1], v0, v[6:7]
+; SDAG-NEXT:    v_lshlrev_b64 v[2:3], v2, v[6:7]
 ; SDAG-NEXT:    v_cmp_gt_u32_e64 s[4:5], 64, v4
 ; SDAG-NEXT:    v_cndmask_b32_e64 v1, v3, v1, s[4:5]
 ; SDAG-NEXT:    v_cmp_ne_u32_e64 s[6:7], 0, v4
 ; SDAG-NEXT:    v_cndmask_b32_e64 v3, 0, v1, s[6:7]
 ; SDAG-NEXT:    v_cndmask_b32_e64 v2, v2, v0, s[4:5]
-; SDAG-NEXT:    v_lshlrev_b64 v[0:1], v4, v[7:8]
+; SDAG-NEXT:    v_lshlrev_b64 v[0:1], v4, v[6:7]
 ; SDAG-NEXT:    v_cndmask_b32_e64 v2, 0, v2, s[6:7]
-; SDAG-NEXT:    v_cndmask_b32_e64 v7, 0, v0, s[4:5]
-; SDAG-NEXT:    v_mad_u64_u32 v[4:5], s[6:7], v7, v11, 0
-; SDAG-NEXT:    v_cndmask_b32_e64 v13, 0, v1, s[4:5]
-; SDAG-NEXT:    v_mul_lo_u32 v8, v9, v2
-; SDAG-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v13, v11, v[5:6]
-; SDAG-NEXT:    v_mul_lo_u32 v12, v11, v3
-; SDAG-NEXT:    v_mad_u64_u32 v[2:3], s[6:7], v11, v2, 0
-; SDAG-NEXT:    v_mov_b32_e32 v5, v0
-; SDAG-NEXT:    v_mad_u64_u32 v[5:6], s[4:5], v7, v9, v[5:6]
-; SDAG-NEXT:    v_add3_u32 v3, v3, v12, v8
-; SDAG-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], v10, v7, v[2:3]
-; SDAG-NEXT:    v_add_co_u32_e64 v0, s[4:5], v1, v6
-; SDAG-NEXT:    v_addc_co_u32_e64 v1, s[4:5], 0, 0, s[4:5]
-; SDAG-NEXT:    v_mul_lo_u32 v8, v10, v13
-; SDAG-NEXT:    v_mul_lo_u32 v7, v10, v7
-; SDAG-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v13, v9, v[0:1]
-; SDAG-NEXT:    ; implicit-def: $vgpr11
-; SDAG-NEXT:    ; implicit-def: $vgpr9
-; SDAG-NEXT:    ; implicit-def: $vgpr10
-; SDAG-NEXT:    v_add3_u32 v3, v7, v3, v8
-; SDAG-NEXT:    v_add_co_u32_e64 v2, s[4:5], v0, v2
-; SDAG-NEXT:    v_addc_co_u32_e64 v3, s[4:5], v1, v3, s[4:5]
-; SDAG-NEXT:    v_mov_b32_e32 v0, v4
-; SDAG-NEXT:    v_mov_b32_e32 v1, v5
+; SDAG-NEXT:    v_cndmask_b32_e64 v12, 0, v0, s[4:5]
+; SDAG-NEXT:    v_cndmask_b32_e64 v11, 0, v1, s[4:5]
+; SDAG-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v12, v10, 0
+; SDAG-NEXT:    v_mul_lo_u32 v13, v8, v2
+; SDAG-NEXT:    v_mul_lo_u32 v14, v10, v3
+; SDAG-NEXT:    v_mov_b32_e32 v6, v1
+; SDAG-NEXT:    v_mad_u64_u32 v[4:5], s[4:5], v11, v10, v[6:7]
+; SDAG-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], v10, v2, 0
+; SDAG-NEXT:    v_mov_b32_e32 v6, v4
+; SDAG-NEXT:    v_mad_u64_u32 v[6:7], s[4:5], v12, v8, v[6:7]
+; SDAG-NEXT:    v_add3_u32 v3, v3, v14, v13
+; SDAG-NEXT:    v_mad_u64_u32 v[1:2], s[4:5], v9, v12, v[2:3]
+; SDAG-NEXT:    v_add_co_u32_e64 v3, s[4:5], v5, v7
+; SDAG-NEXT:    v_addc_co_u32_e64 v4, s[4:5], 0, 0, s[4:5]
+; SDAG-NEXT:    v_mul_lo_u32 v10, v9, v11
+; SDAG-NEXT:    v_mul_lo_u32 v9, v9, v12
+; SDAG-NEXT:    v_mad_u64_u32 v[3:4], s[4:5], v11, v8, v[3:4]
+; SDAG-NEXT:    ; implicit-def: $vgpr8
+; SDAG-NEXT:    v_add3_u32 v5, v9, v2, v10
+; SDAG-NEXT:    v_add_co_u32_e64 v2, s[4:5], v3, v1
+; SDAG-NEXT:    v_addc_co_u32_e64 v3, s[4:5], v4, v5, s[4:5]
+; SDAG-NEXT:    v_mov_b32_e32 v1, v6
 ; SDAG-NEXT:    ; implicit-def: $vgpr5_vgpr6
-; SDAG-NEXT:    ; implicit-def: $vgpr7_vgpr8
+; SDAG-NEXT:    ; implicit-def: $vgpr6_vgpr7
+; SDAG-NEXT:    ; implicit-def: $vgpr10
+; SDAG-NEXT:    ; implicit-def: $vgpr9
 ; SDAG-NEXT:  .LBB2_4: ; %Flow
 ; SDAG-NEXT:    s_andn2_saveexec_b64 s[6:7], s[12:13]
 ; SDAG-NEXT:    s_cbranch_execz .LBB2_6
 ; SDAG-NEXT:  ; %bb.5: ; %fp-to-i-if-then12
 ; SDAG-NEXT:    v_sub_u32_e32 v2, 0x96, v5
-; SDAG-NEXT:    v_lshrrev_b64 v[0:1], v2, v[7:8]
+; SDAG-NEXT:    v_lshrrev_b64 v[0:1], v2, v[6:7]
 ; SDAG-NEXT:    v_cmp_gt_u32_e64 s[4:5], 64, v2
 ; SDAG-NEXT:    v_cndmask_b32_e64 v0, 0, v0, s[4:5]
 ; SDAG-NEXT:    v_cmp_eq_u32_e64 s[4:5], 0, v2
-; SDAG-NEXT:    v_cndmask_b32_e64 v3, v0, v7, s[4:5]
-; SDAG-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v3, v11, 0
+; SDAG-NEXT:    v_cndmask_b32_e64 v3, v0, v6, s[4:5]
+; SDAG-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v3, v10, 0
 ; SDAG-NEXT:    v_mov_b32_e32 v2, 0
-; SDAG-NEXT:    v_mov_b32_e32 v6, v2
-; SDAG-NEXT:    v_mad_u64_u32 v[4:5], s[4:5], v3, v9, v[1:2]
-; SDAG-NEXT:    v_mad_i64_i32 v[2:3], s[4:5], v10, v3, v[5:6]
+; SDAG-NEXT:    v_mad_u64_u32 v[4:5], s[4:5], v3, v8, v[1:2]
+; SDAG-NEXT:    v_mov_b32_e32 v1, v5
+; SDAG-NEXT:    v_mad_i64_i32 v[2:3], s[4:5], v9, v3, v[1:2]
 ; SDAG-NEXT:    v_mov_b32_e32 v1, v4
 ; SDAG-NEXT:  .LBB2_6: ; %Flow1
 ; SDAG-NEXT:    s_or_b64 exec, exec, s[6:7]
@@ -841,11 +840,10 @@ define i128 @fptosi_f32_to_i128(float %x) {
 ; SDAG-NEXT:  ; %bb.8: ; %fp-to-i-if-then5
 ; SDAG-NEXT:    v_bfrev_b32_e32 v0, 1
 ; SDAG-NEXT:    v_bfrev_b32_e32 v1, -2
-; SDAG-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; SDAG-NEXT:    v_cndmask_b32_e64 v1, 0, -1, vcc
-; SDAG-NEXT:    v_mov_b32_e32 v3, v2
-; SDAG-NEXT:    v_mov_b32_e32 v0, v1
-; SDAG-NEXT:    v_mov_b32_e32 v2, v1
+; SDAG-NEXT:    v_cndmask_b32_e64 v2, 0, -1, vcc
+; SDAG-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
+; SDAG-NEXT:    v_mov_b32_e32 v0, v2
+; SDAG-NEXT:    v_mov_b32_e32 v1, v2
 ; SDAG-NEXT:  ; %bb.9: ; %Flow3
 ; SDAG-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; SDAG-NEXT:  .LBB2_10: ; %fp-to-i-cleanup
@@ -956,36 +954,37 @@ define i128 @fptosi_f32_to_i128(float %x) {
 ; GISEL-NEXT:    s_xor_b64 s[16:17], exec, s[6:7]
 ; GISEL-NEXT:    s_cbranch_execz .LBB2_4
 ; GISEL-NEXT:  ; %bb.3: ; %fp-to-i-if-else
-; GISEL-NEXT:    v_add_u32_e32 v2, 0xffffff6a, v6
-; GISEL-NEXT:    v_lshlrev_b64 v[0:1], v2, v[4:5]
-; GISEL-NEXT:    v_cmp_gt_u32_e32 vcc, 64, v2
+; GISEL-NEXT:    v_add_u32_e32 v7, 0xffffff6a, v6
+; GISEL-NEXT:    v_lshlrev_b64 v[0:1], v7, v[4:5]
+; GISEL-NEXT:    v_cmp_gt_u32_e32 vcc, 64, v7
 ; GISEL-NEXT:    v_cndmask_b32_e32 v10, 0, v0, vcc
 ; GISEL-NEXT:    v_cndmask_b32_e32 v11, 0, v1, vcc
 ; GISEL-NEXT:    v_mad_u64_u32 v[0:1], s[6:7], v10, v8, 0
-; GISEL-NEXT:    v_add_u32_e32 v3, 0xffffff2a, v6
-; GISEL-NEXT:    v_sub_u32_e32 v6, 64, v2
-; GISEL-NEXT:    v_lshrrev_b64 v[6:7], v6, v[4:5]
-; GISEL-NEXT:    v_lshlrev_b64 v[3:4], v3, v[4:5]
-; GISEL-NEXT:    v_cmp_eq_u32_e64 s[6:7], 0, v2
-; GISEL-NEXT:    v_cndmask_b32_e32 v3, v3, v6, vcc
-; GISEL-NEXT:    v_mad_u64_u32 v[5:6], s[8:9], v11, v8, v[0:1]
-; GISEL-NEXT:    v_cndmask_b32_e64 v12, v3, 0, s[6:7]
+; GISEL-NEXT:    v_add_u32_e32 v6, 0xffffff2a, v6
+; GISEL-NEXT:    v_sub_u32_e32 v2, 64, v7
+; GISEL-NEXT:    v_lshrrev_b64 v[2:3], v2, v[4:5]
+; GISEL-NEXT:    v_lshlrev_b64 v[4:5], v6, v[4:5]
+; GISEL-NEXT:    v_cmp_eq_u32_e64 s[6:7], 0, v7
+; GISEL-NEXT:    v_mad_u64_u32 v[6:7], s[8:9], v11, v8, v[0:1]
+; GISEL-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
+; GISEL-NEXT:    v_cndmask_b32_e64 v12, v2, 0, s[6:7]
+; GISEL-NEXT:    v_mad_u64_u32 v[6:7], s[8:9], v12, v9, v[6:7]
 ; GISEL-NEXT:    v_mad_u64_u32 v[0:1], s[8:9], v10, v9, 0
-; GISEL-NEXT:    v_mad_u64_u32 v[2:3], s[8:9], v12, v9, v[5:6]
-; GISEL-NEXT:    v_mul_lo_u32 v13, v11, v8
-; GISEL-NEXT:    v_mad_u64_u32 v[5:6], s[8:9], v10, v8, v[1:2]
-; GISEL-NEXT:    v_mul_lo_u32 v10, v10, v8
-; GISEL-NEXT:    v_mad_u64_u32 v[1:2], s[10:11], v11, v9, v[5:6]
-; GISEL-NEXT:    v_addc_co_u32_e64 v3, s[10:11], v3, v10, s[10:11]
-; GISEL-NEXT:    v_addc_co_u32_e64 v3, s[8:9], v3, v13, s[8:9]
-; GISEL-NEXT:    v_mad_u64_u32 v[5:6], s[8:9], v12, v8, v[3:4]
-; GISEL-NEXT:    v_cndmask_b32_e32 v3, v4, v7, vcc
-; GISEL-NEXT:    v_cndmask_b32_e64 v7, v3, 0, s[6:7]
-; GISEL-NEXT:    v_mad_u64_u32 v[3:4], s[6:7], v7, v9, v[5:6]
+; GISEL-NEXT:    v_mov_b32_e32 v2, v6
+; GISEL-NEXT:    v_mul_lo_u32 v6, v10, v8
+; GISEL-NEXT:    v_mad_u64_u32 v[1:2], s[8:9], v10, v8, v[1:2]
+; GISEL-NEXT:    v_mul_lo_u32 v4, v11, v8
+; GISEL-NEXT:    v_cndmask_b32_e32 v3, v5, v3, vcc
+; GISEL-NEXT:    v_mad_u64_u32 v[1:2], s[10:11], v11, v9, v[1:2]
+; GISEL-NEXT:    v_addc_co_u32_e64 v6, s[10:11], v7, v6, s[10:11]
+; GISEL-NEXT:    v_addc_co_u32_e64 v4, s[8:9], v6, v4, s[8:9]
+; GISEL-NEXT:    v_mad_u64_u32 v[6:7], s[8:9], v12, v8, v[4:5]
+; GISEL-NEXT:    v_cndmask_b32_e64 v3, v3, 0, s[6:7]
+; GISEL-NEXT:    ; implicit-def: $vgpr8
+; GISEL-NEXT:    v_mad_u64_u32 v[3:4], s[6:7], v3, v9, v[6:7]
 ; GISEL-NEXT:    ; implicit-def: $vgpr6
 ; GISEL-NEXT:    ; implicit-def: $vgpr4_vgpr5
 ; GISEL-NEXT:    ; implicit-def: $vgpr9
-; GISEL-NEXT:    ; implicit-def: $vgpr8
 ; GISEL-NEXT:  .LBB2_4: ; %Flow
 ; GISEL-NEXT:    s_andn2_saveexec_b64 s[6:7], s[16:17]
 ; GISEL-NEXT:    s_cbranch_execz .LBB2_6
@@ -995,14 +994,12 @@ define i128 @fptosi_f32_to_i128(float %x) {
 ; GISEL-NEXT:    v_cmp_gt_u32_e32 vcc, 64, v2
 ; GISEL-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
 ; GISEL-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
-; GISEL-NEXT:    v_cndmask_b32_e32 v6, v0, v4, vcc
-; GISEL-NEXT:    v_mad_u64_u32 v[0:1], s[8:9], v6, v9, 0
-; GISEL-NEXT:    v_mad_u64_u32 v[2:3], s[8:9], v6, v8, 0
-; GISEL-NEXT:    v_mul_lo_u32 v7, v6, v8
-; GISEL-NEXT:    v_mad_u64_u32 v[4:5], vcc, v6, v8, v[1:2]
-; GISEL-NEXT:    v_addc_co_u32_e32 v3, vcc, v3, v7, vcc
-; GISEL-NEXT:    v_mov_b32_e32 v1, v4
-; GISEL-NEXT:    v_mov_b32_e32 v2, v5
+; GISEL-NEXT:    v_cndmask_b32_e32 v4, v0, v4, vcc
+; GISEL-NEXT:    v_mad_u64_u32 v[0:1], s[8:9], v4, v9, 0
+; GISEL-NEXT:    v_mad_u64_u32 v[2:3], s[8:9], v4, v8, 0
+; GISEL-NEXT:    v_mul_lo_u32 v5, v4, v8
+; GISEL-NEXT:    v_mad_u64_u32 v[1:2], vcc, v4, v8, v[1:2]
+; GISEL-NEXT:    v_addc_co_u32_e32 v3, vcc, v3, v5, vcc
 ; GISEL-NEXT:  .LBB2_6: ; %Flow1
 ; GISEL-NEXT:    s_or_b64 exec, exec, s[6:7]
 ; GISEL-NEXT:  .LBB2_7: ; %Flow2
@@ -1091,10 +1088,11 @@ define i128 @fptoui_f32_to_i128(float %x) {
 ; SDAG:       ; %bb.0: ; %fp-to-i-entry
 ; SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; SDAG-NEXT:    v_mov_b32_e32 v4, v0
-; SDAG-NEXT:    v_mov_b32_e32 v0, 0
 ; SDAG-NEXT:    v_bfe_u32 v5, v4, 23, 8
 ; SDAG-NEXT:    s_movk_i32 s4, 0x7e
+; SDAG-NEXT:    v_mov_b32_e32 v0, 0
 ; SDAG-NEXT:    v_mov_b32_e32 v2, 0
+; SDAG-NEXT:    v_mov_b32_e32 v6, 0
 ; SDAG-NEXT:    v_mov_b32_e32 v1, 0
 ; SDAG-NEXT:    v_mov_b32_e32 v3, 0
 ; SDAG-NEXT:    v_cmp_lt_u32_e32 vcc, s4, v5
@@ -1102,9 +1100,7 @@ define i128 @fptoui_f32_to_i128(float %x) {
 ; SDAG-NEXT:    s_cbranch_execz .LBB3_10
 ; SDAG-NEXT:  ; %bb.1: ; %fp-to-i-if-end
 ; SDAG-NEXT:    v_add_co_u32_e32 v0, vcc, 0xffffff01, v5
-; SDAG-NEXT:    v_mov_b32_e32 v1, -1
-; SDAG-NEXT:    v_mov_b32_e32 v6, 0
-; SDAG-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; SDAG-NEXT:    v_addc_co_u32_e32 v1, vcc, -1, v6, vcc
 ; SDAG-NEXT:    v_addc_co_u32_e32 v2, vcc, -1, v6, vcc
 ; SDAG-NEXT:    s_movk_i32 s6, 0xff7f
 ; SDAG-NEXT:    v_addc_co_u32_e32 v3, vcc, -1, v6, vcc
@@ -1120,14 +1116,14 @@ define i128 @fptoui_f32_to_i128(float %x) {
 ; SDAG-NEXT:    s_cbranch_execz .LBB3_7
 ; SDAG-NEXT:  ; %bb.2: ; %fp-to-i-if-end9
 ; SDAG-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; SDAG-NEXT:    v_add_co_u32_e64 v10, s[4:5], -1, v0
+; SDAG-NEXT:    v_add_co_u32_e64 v9, s[4:5], -1, v0
 ; SDAG-NEXT:    s_mov_b64 s[4:5], 0x95
 ; SDAG-NEXT:    v_and_b32_e32 v0, 0x7fffff, v4
 ; SDAG-NEXT:    v_cmp_lt_u64_e64 s[4:5], s[4:5], v[5:6]
-; SDAG-NEXT:    v_cndmask_b32_e64 v9, -1, 0, vcc
-; SDAG-NEXT:    v_cndmask_b32_e64 v11, -1, 1, vcc
-; SDAG-NEXT:    v_or_b32_e32 v7, 0x800000, v0
-; SDAG-NEXT:    v_mov_b32_e32 v8, v6
+; SDAG-NEXT:    v_mov_b32_e32 v7, 0
+; SDAG-NEXT:    v_cndmask_b32_e64 v8, -1, 0, vcc
+; SDAG-NEXT:    v_cndmask_b32_e64 v10, -1, 1, vcc
+; SDAG-NEXT:    v_or_b32_e32 v6, 0x800000, v0
 ; SDAG-NEXT:    ; implicit-def: $vgpr0_vgpr1
 ; SDAG-NEXT:    ; implicit-def: $vgpr2_vgpr3
 ; SDAG-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
@@ -1137,56 +1133,56 @@ define i128 @fptoui_f32_to_i128(float %x) {
 ; SDAG-NEXT:    v_sub_u32_e32 v0, 0xd6, v5
 ; SDAG-NEXT:    v_add_u32_e32 v2, 0xffffff2a, v5
 ; SDAG-NEXT:    v_add_u32_e32 v4, 0xffffff6a, v5
-; SDAG-NEXT:    v_lshrrev_b64 v[0:1], v0, v[7:8]
-; SDAG-NEXT:    v_lshlrev_b64 v[2:3], v2, v[7:8]
+; SDAG-NEXT:    v_lshrrev_b64 v[0:1], v0, v[6:7]
+; SDAG-NEXT:    v_lshlrev_b64 v[2:3], v2, v[6:7]
 ; SDAG-NEXT:    v_cmp_gt_u32_e64 s[4:5], 64, v4
 ; SDAG-NEXT:    v_cndmask_b32_e64 v1, v3, v1, s[4:5]
 ; SDAG-NEXT:    v_cmp_ne_u32_e64 s[6:7], 0, v4
 ; SDAG-NEXT:    v_cndmask_b32_e64 v3, 0, v1, s[6:7]
 ; SDAG-NEXT:    v_cndmask_b32_e64 v2, v2, v0, s[4:5]
-; SDAG-NEXT:    v_lshlrev_b64 v[0:1], v4, v[7:8]
+; SDAG-NEXT:    v_lshlrev_b64 v[0:1], v4, v[6:7]
 ; SDAG-NEXT:    v_cndmask_b32_e64 v2, 0, v2, s[6:7]
-; SDAG-NEXT:    v_cndmask_b32_e64 v7, 0, v0, s[4:5]
-; SDAG-NEXT:    v_mad_u64_u32 v[4:5], s[6:7], v7, v11, 0
-; SDAG-NEXT:    v_cndmask_b32_e64 v13, 0, v1, s[4:5]
-; SDAG-NEXT:    v_mul_lo_u32 v8, v9, v2
-; SDAG-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v13, v11, v[5:6]
-; SDAG-NEXT:    v_mul_lo_u32 v12, v11, v3
-; SDAG-NEXT:    v_mad_u64_u32 v[2:3], s[6:7], v11, v2, 0
-; SDAG-NEXT:    v_mov_b32_e32 v5, v0
-; SDAG-NEXT:    v_mad_u64_u32 v[5:6], s[4:5], v7, v9, v[5:6]
-; SDAG-NEXT:    v_add3_u32 v3, v3, v12, v8
-; SDAG-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], v10, v7, v[2:3]
-; SDAG-NEXT:    v_add_co_u32_e64 v0, s[4:5], v1, v6
-; SDAG-NEXT:    v_addc_co_u32_e64 v1, s[4:5], 0, 0, s[4:5]
-; SDAG-NEXT:    v_mul_lo_u32 v8, v10, v13
-; SDAG-NEXT:    v_mul_lo_u32 v7, v10, v7
-; SDAG-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v13, v9, v[0:1]
-; SDAG-NEXT:    ; implicit-def: $vgpr11
-; SDAG-NEXT:    ; implicit-def: $vgpr9
-; SDAG-NEXT:    ; implicit-def: $vgpr10
-; SDAG-NEXT:    v_add3_u32 v3, v7, v3, v8
-; SDAG-NEXT:    v_add_co_u32_e64 v2, s[4:5], v0, v2
-; SDAG-NEXT:    v_addc_co_u32_e64 v3, s[4:5], v1, v3, s[4:5]
-; SDAG-NEXT:    v_mov_b32_e32 v0, v4
-; SDAG-NEXT:    v_mov_b32_e32 v1, v5
+; SDAG-NEXT:    v_cndmask_b32_e64 v12, 0, v0, s[4:5]
+; SDAG-NEXT:    v_cndmask_b32_e64 v11, 0, v1, s[4:5]
+; SDAG-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v12, v10, 0
+; SDAG-NEXT:    v_mul_lo_u32 v13, v8, v2
+; SDAG-NEXT:    v_mul_lo_u32 v14, v10, v3
+; SDAG-NEXT:    v_mov_b32_e32 v6, v1
+; SDAG-NEXT:    v_mad_u64_u32 v[4:5], s[4:5], v11, v10, v[6:7]
+; SDAG-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], v10, v2, 0
+; SDAG-NEXT:    v_mov_b32_e32 v6, v4
+; SDAG-NEXT:    v_mad_u64_u32 v[6:7], s[4:5], v12, v8, v[6:7]
+; SDAG-NEXT:    v_add3_u32 v3, v3, v14, v13
+; SDAG-NEXT:    v_mad_u64_u32 v[1:2], s[4:5], v9, v12, v[2:3]
+; SDAG-NEXT:    v_add_co_u32_e64 v3, s[4:5], v5, v7
+; SDAG-NEXT:    v_addc_co_u32_e64 v4, s[4:5], 0, 0, s[4:5]
+; SDAG-NEXT:    v_mul_lo_u32 v10, v9, v11
+; SDAG-NEXT:    v_mul_lo_u32 v9, v9, v12
+; SDAG-NEXT:    v_mad_u64_u32 v[3:4], s[4:5], v11, v8, v[3:4]
+; SDAG-NEXT:    ; implicit-def: $vgpr8
+; SDAG-NEXT:    v_add3_u32 v5, v9, v2, v10
+; SDAG-NEXT:    v_add_co_u32_e64 v2, s[4:5], v3, v1
+; SDAG-NEXT:    v_addc_co_u32_e64 v3, s[4:5], v4, v5, s[4:5]
+; SDAG-NEXT:    v_mov_b32_e32 v1, v6
 ; SDAG-NEXT:    ; implicit-def: $vgpr5_vgpr6
-; SDAG-NEXT:    ; implicit-def: $vgpr7_vgpr8
+; SDAG-NEXT:    ; implicit-def: $vgpr6_vgpr7
+; SDAG-NEXT:    ; implicit-def: $vgpr10
+; SDAG-NEXT:    ; implicit-def: $vgpr9
 ; SDAG-NEXT:  .LBB3_4: ; %Flow
 ; SDAG-NEXT:    s_andn2_saveexec_b64 s[6:7], s[12:13]
 ; SDAG-NEXT:    s_cbranch_execz .LBB3_6
 ; SDAG-NEXT:  ; %bb.5: ; %fp-to-i-if-then12
 ; SDAG-NEXT:    v_sub_u32_e32 v2, 0x96, v5
-; SDAG-NEXT:    v_lshrrev_b64 v[0:1], v2, v[7:8]
+; SDAG-NEXT:    v_lshrrev_b64 v[0:1], v2, v[6:7]
 ; SDAG-NEXT:    v_cmp_gt_u32_e64 s[4:5], 64, v2
 ; SDAG-NEXT:    v_cndmask_b32_e64 v0, 0, v0, s[4:5]
 ; SDAG-NEXT:    v_cmp_eq_u32_e64 s[4:5], 0, v2
-; SDAG-NEXT:    v_cndmask_b32_e64 v3, v0, v7, s[4:5]
-; SDAG-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v3, v11, 0
+; SDAG-NEXT:    v_cndmask_b32_e64 v3, v0, v6, s[4:5]
+; SDAG-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v3, v10, 0
 ; SDAG-NEXT:    v_mov_b32_e32 v2, 0
-; SDAG-NEXT:    v_mov_b32_e32 v6, v2
-; SDAG-NEXT:    v_mad_u64_u32 v[4:5], s[4:5], v3, v9, v[1:2]
-; SDAG-NEXT:    v_mad_i64_i32 v[2:3], s[4:5], v10, v3, v[5:6]
+; SDAG-NEXT:    v_mad_u64_u32 v[4:5], s[4:5], v3, v8, v[1:2]
+; SDAG-NEXT:    v_mov_b32_e32 v1, v5
+; SDAG-NEXT:    v_mad_i64_i32 v[2:3], s[4:5], v9, v3, v[1:2]
 ; SDAG-NEXT:    v_mov_b32_e32 v1, v4
 ; SDAG-NEXT:  .LBB3_6: ; %Flow1
 ; SDAG-NEXT:    s_or_b64 exec, exec, s[6:7]
@@ -1195,11 +1191,10 @@ define i128 @fptoui_f32_to_i128(float %x) {
 ; SDAG-NEXT:  ; %bb.8: ; %fp-to-i-if-then5
 ; SDAG-NEXT:    v_bfrev_b32_e32 v0, 1
 ; SDAG-NEXT:    v_bfrev_b32_e32 v1, -2
-; SDAG-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; SDAG-NEXT:    v_cndmask_b32_e64 v1, 0, -1, vcc
-; SDAG-NEXT:    v_mov_b32_e32 v3, v2
-; SDAG-NEXT:    v_mov_b32_e32 v0, v1
-; SDAG-NEXT:    v_mov_b32_e32 v2, v1
+; SDAG-NEXT:    v_cndmask_b32_e64 v2, 0, -1, vcc
+; SDAG-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
+; SDAG-NEXT:    v_mov_b32_e32 v0, v2
+; SDAG-NEXT:    v_mov_b32_e32 v1, v2
 ; SDAG-NEXT:  ; %bb.9: ; %Flow3
 ; SDAG-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; SDAG-NEXT:  .LBB3_10: ; %fp-to-i-cleanup
@@ -1310,36 +1305,37 @@ define i128 @fptoui_f32_to_i128(float %x) {
 ; GISEL-NEXT:    s_xor_b64 s[16:17], exec, s[6:7]
 ; GISEL-NEXT:    s_cbranch_execz .LBB3_4
 ; GISEL-NEXT:  ; %bb.3: ; %fp-to-i-if-else
-; GISEL-NEXT:    v_add_u32_e32 v2, 0xffffff6a, v6
-; GISEL-NEXT:    v_lshlrev_b64 v[0:1], v2, v[4:5]
-; GISEL-NEXT:    v_cmp_gt_u32_e32 vcc, 64, v2
+; GISEL-NEXT:    v_add_u32_e32 v7, 0xffffff6a, v6
+; GISEL-NEXT:    v_lshlrev_b64 v[0:1], v7, v[4:5]
+; GISEL-NEXT:    v_cmp_gt_u32_e32 vcc, 64, v7
 ; GISEL-NEXT:    v_cndmask_b32_e32 v10, 0, v0, vcc
 ; GISEL-NEXT:    v_cndmask_b32_e32 v11, 0, v1, vcc
 ; GISEL-NEXT:    v_mad_u64_u32 v[0:1], s[6:7], v10, v8, 0
-; GISEL-NEXT:    v_add_u32_e32 v3, 0xffffff2a, v6
-; GISEL-NEXT:    v_sub_u32_e32 v6, 64, v2
-; GISEL-NEXT:    v_lshrrev_b64 v[6:7], v6, v[4:5]
-; GISEL-NEXT:    v_lshlrev_b64 v[3:4], v3, v[4:5]
-; GISEL-NEXT:    v_cmp_eq_u32_e64 s[6:7], 0, v2
-; GISEL-NEXT:    v_cndmask_b32_e32 v3, v3, v6, vcc
-; GISEL-NEXT:    v_mad_u64_u32 v[5:6], s[8:9], v11, v8, v[0:1]
-; GISEL-NEXT:    v_cndmask_b32_e64 v12, v3, 0, s[6:7]
+; GISEL-NEXT:    v_add_u32_e32 v6, 0xffffff2a, v6
+; GISEL-NEXT:    v_sub_u32_e32 v2, 64, v7
+; GISEL-NEXT:    v_lshrrev_b64 v[2:3], v2, v[4:5]
+; GISEL-NEXT:    v_lshlrev_b64 v[4:5], v6, v[4:5]
+; GISEL-NEXT:    v_cmp_eq_u32_e64 s[6:7], 0, v7
+; GISEL-NEXT:    v_mad_u64_u32 v[6:7], s[8:9], v11, v8, v[0:1]
+; GISEL-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
+; GISEL-NEXT:    v_cndmask_b32_e64 v12, v2, 0, s[6:7]
+; GISEL-NEXT:    v_mad_u64_u32 v[6:7], s[8:9], v12, v9, v[6:7]
 ; GISEL-NEXT:    v_mad_u64_u32 v[0:1], s[8:9], v10, v9, 0
-; GISEL-NEXT:    v_mad_u64_u32 v[2:3], s[8:9], v12, v9, v[5:6]
-; GISEL-NEXT:    v_mul_lo_u32 v13, v11, v8
-; GISEL-NEXT:    v_mad_u64_u32 v[5:6], s[8:9], v10, v8, v[1:2]
-; GISEL-NEXT:    v_mul_lo_u32 v10, v10, v8
-; GISEL-NEXT:    v_mad_u64_u32 v[1:2], s[10:11], v11, v9, v[5:6]
-; GISEL-NEXT:    v_addc_co_u32_e64 v3, s[10:11], v3, v10, s[10:11]
-; GISEL-NEXT:    v_addc_co_u32_e64 v3, s[8:9], v3, v13, s[8:9]
-; GISEL-NEXT:    v_mad_u64_u32 v[5:6], s[8:9], v12, v8, v[3:4]
-; GISEL-NEXT:    v_cndmask_b32_e32 v3, v4, v7, vcc
-; GISEL-NEXT:    v_cndmask_b32_e64 v7, v3, 0, s[6:7]
-; GISEL-NEXT:    v_mad_u64_u32 v[3:4], s[6:7], v7, v9, v[5:6]
+; GISEL-NEXT:    v_mov_b32_e32 v2, v6
+; GISEL-NEXT:    v_mul_lo_u32 v6, v10, v8
+; GISEL-NEXT:    v_mad_u64_u32 v[1:2], s[8:9], v10, v8, v[1:2]
+; GISEL-NEXT:    v_mul_lo_u32 v4, v11, v8
+; GISEL-NEXT:    v_cndmask_b32_e32 v3, v5, v3, vcc
+; GISEL-NEXT:    v_mad_u64_u32 v[1:2], s[10:11], v11, v9, v[1:2]
+; GISEL-NEXT:    v_addc_co_u32_e64 v6, s[10:11], v7, v6, s[10:11]
+; GISEL-NEXT:    v_addc_co_u32_e64 v4, s[8:9], v6, v4, s[8:9]
+; GISEL-NEXT:    v_mad_u64_u32 v[6:7], s[8:9], v12, v8, v[4:5]
+; GISEL-NEXT:    v_cndmask_b32_e64 v3, v3, 0, s[6:7]
+; GISEL-NEXT:    ; implicit-def: $vgpr8
+; GISEL-NEXT:    v_mad_u64_u32 v[3:4], s[6:7], v3, v9, v[6:7]
 ; GISEL-NEXT:    ; implicit-def: $vgpr6
 ; GISEL-NEXT:    ; implicit-def: $vgpr4_vgpr5
 ; GISEL-NEXT:    ; implicit-def: $vgpr9
-; GISEL-NEXT:    ; implicit-def: $vgpr8
 ; GISEL-NEXT:  .LBB3_4: ; %Flow
 ; GISEL-NEXT:    s_andn2_saveexec_b64 s[6:7], s[16:17]
 ; GISEL-NEXT:    s_cbranch_execz .LBB3_6
@@ -1349,14 +1345,12 @@ define i128 @fptoui_f32_to_i128(float %x) {
 ; GISEL-NEXT:    v_cmp_gt_u32_e32 vcc, 64, v2
 ; GISEL-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
 ; GISEL-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
-; GISEL-NEXT:    v_cndmask_b32_e32 v6, v0, v4, vcc
-; GISEL-NEXT:    v_mad_u64_u32 v[0:1], s[8:9], v6, v9, 0
-; GISEL-NEXT:    v_mad_u64_u32 v[2:3], s[8:9], v6, v8, 0
-; GISEL-NEXT:    v_mul_lo_u32 v7, v6, v8
-; GISEL-NEXT:    v_mad_u64_u32 v[4:5], vcc, v6, v8, v[1:2]
-; GISEL-NEXT:    v_addc_co_u32_e32 v3, vcc, v3, v7, vcc
-; GISEL-NEXT:    v_mov_b32_e32 v1, v4
-; GISEL-NEXT:    v_mov_b32_e32 v2, v5
+; GISEL-NEXT:    v_cndmask_b32_e32 v4, v0, v4, vcc
+; GISEL-NEXT:    v_mad_u64_u32 v[0:1], s[8:9], v4, v9, 0
+; GISEL-NEXT:    v_mad_u64_u32 v[2:3], s[8:9], v4, v8, 0
+; GISEL-NEXT:    v_mul_lo_u32 v5, v4, v8
+; GISEL-NEXT:    v_mad_u64_u32 v[1:2], vcc, v4, v8, v[1:2]
+; GISEL-NEXT:    v_addc_co_u32_e32 v3, vcc, v3, v5, vcc
 ; GISEL-NEXT:  .LBB3_6: ; %Flow1
 ; GISEL-NEXT:    s_or_b64 exec, exec, s[6:7]
 ; GISEL-NEXT:  .LBB3_7: ; %Flow2
@@ -1483,10 +1477,11 @@ define i128 @fptosi_bf16_to_i128(bfloat %x) {
 ; SDAG:       ; %bb.0: ; %fp-to-i-entry
 ; SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; SDAG-NEXT:    v_mov_b32_e32 v4, v0
-; SDAG-NEXT:    v_mov_b32_e32 v0, 0
 ; SDAG-NEXT:    v_bfe_u32 v5, v4, 7, 8
 ; SDAG-NEXT:    s_movk_i32 s4, 0x7e
+; SDAG-NEXT:    v_mov_b32_e32 v0, 0
 ; SDAG-NEXT:    v_mov_b32_e32 v2, 0
+; SDAG-NEXT:    v_mov_b32_e32 v6, 0
 ; SDAG-NEXT:    v_mov_b32_e32 v1, 0
 ; SDAG-NEXT:    v_mov_b32_e32 v3, 0
 ; SDAG-NEXT:    v_cmp_lt_u32_e32 vcc, s4, v5
@@ -1494,9 +1489,7 @@ define i128 @fptosi_bf16_to_i128(bfloat %x) {
 ; SDAG-NEXT:    s_cbranch_execz .LBB6_10
 ; SDAG-NEXT:  ; %bb.1: ; %fp-to-i-if-end
 ; SDAG-NEXT:    v_add_co_u32_e32 v0, vcc, 0xffffff01, v5
-; SDAG-NEXT:    v_mov_b32_e32 v1, -1
-; SDAG-NEXT:    v_mov_b32_e32 v6, 0
-; SDAG-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; SDAG-NEXT:    v_addc_co_u32_e32 v1, vcc, -1, v6, vcc
 ; SDAG-NEXT:    v_addc_co_u32_e32 v2, vcc, -1, v6, vcc
 ; SDAG-NEXT:    s_movk_i32 s6, 0xff7f
 ; SDAG-NEXT:    v_addc_co_u32_e32 v3, vcc, -1, v6, vcc
@@ -1515,10 +1508,10 @@ define i128 @fptosi_bf16_to_i128(bfloat %x) {
 ; SDAG-NEXT:    v_and_b32_sdwa v0, v4, s4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
 ; SDAG-NEXT:    s_mov_b64 s[4:5], 0x85
 ; SDAG-NEXT:    v_cmp_lt_u64_e64 s[4:5], s[4:5], v[5:6]
-; SDAG-NEXT:    v_cndmask_b32_e64 v10, -1, 0, vcc
-; SDAG-NEXT:    v_cndmask_b32_e64 v9, -1, 1, vcc
-; SDAG-NEXT:    v_or_b32_e32 v7, 0x80, v0
-; SDAG-NEXT:    v_mov_b32_e32 v8, v6
+; SDAG-NEXT:    v_mov_b32_e32 v7, 0
+; SDAG-NEXT:    v_cndmask_b32_e64 v9, -1, 0, vcc
+; SDAG-NEXT:    v_cndmask_b32_e64 v8, -1, 1, vcc
+; SDAG-NEXT:    v_or_b32_e32 v6, 0x80, v0
 ; SDAG-NEXT:    ; implicit-def: $vgpr0_vgpr1
 ; SDAG-NEXT:    ; implicit-def: $vgpr2_vgpr3
 ; SDAG-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
@@ -1526,56 +1519,56 @@ define i128 @fptosi_bf16_to_i128(bfloat %x) {
 ; SDAG-NEXT:    s_cbranch_execz .LBB6_4
 ; SDAG-NEXT:  ; %bb.3: ; %fp-to-i-if-else
 ; SDAG-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; SDAG-NEXT:    v_add_co_u32_e64 v11, s[4:5], -1, v0
+; SDAG-NEXT:    v_add_co_u32_e64 v10, s[4:5], -1, v0
 ; SDAG-NEXT:    v_sub_u32_e32 v0, 0xc6, v5
 ; SDAG-NEXT:    v_add_u32_e32 v2, 0xffffff3a, v5
 ; SDAG-NEXT:    v_add_u32_e32 v4, 0xffffff7a, v5
-; SDAG-NEXT:    v_lshrrev_b64 v[0:1], v0, v[7:8]
-; SDAG-NEXT:    v_lshlrev_b64 v[2:3], v2, v[7:8]
+; SDAG-NEXT:    v_lshrrev_b64 v[0:1], v0, v[6:7]
+; SDAG-NEXT:    v_lshlrev_b64 v[2:3], v2, v[6:7]
 ; SDAG-NEXT:    v_cmp_gt_u32_e64 s[4:5], 64, v4
 ; SDAG-NEXT:    v_cndmask_b32_e64 v1, v3, v1, s[4:5]
 ; SDAG-NEXT:    v_cmp_ne_u32_e64 s[6:7], 0, v4
 ; SDAG-NEXT:    v_cndmask_b32_e64 v3, 0, v1, s[6:7]
 ; SDAG-NEXT:    v_cndmask_b32_e64 v2, v2, v0, s[4:5]
-; SDAG-NEXT:    v_lshlrev_b64 v[0:1], v4, v[7:8]
+; SDAG-NEXT:    v_lshlrev_b64 v[0:1], v4, v[6:7]
 ; SDAG-NEXT:    v_cndmask_b32_e64 v2, 0, v2, s[6:7]
-; SDAG-NEXT:    v_cndmask_b32_e64 v7, 0, v0, s[4:5]
-; SDAG-NEXT:    v_mad_u64_u32 v[4:5], s[6:7], v7, v9, 0
-; SDAG-NEXT:    v_cndmask_b32_e64 v13, 0, v1, s[4:5]
-; SDAG-NEXT:    v_mul_lo_u32 v8, v10, v2
-; SDAG-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v13, v9, v[5:6]
-; SDAG-NEXT:    v_mul_lo_u32 v12, v9, v3
-; SDAG-NEXT:    v_mad_u64_u32 v[2:3], s[6:7], v9, v2, 0
-; SDAG-NEXT:    v_mov_b32_e32 v5, v0
-; SDAG-NEXT:    v_mad_u64_u32 v[5:6], s[4:5], v7, v10, v[5:6]
-; SDAG-NEXT:    v_add3_u32 v3, v3, v12, v8
-; SDAG-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], v11, v7, v[2:3]
-; SDAG-NEXT:    v_add_co_u32_e64 v0, s[4:5], v1, v6
-; SDAG-NEXT:    v_addc_co_u32_e64 v1, s[4:5], 0, 0, s[4:5]
-; SDAG-NEXT:    v_mul_lo_u32 v8, v11, v13
-; SDAG-NEXT:    v_mul_lo_u32 v7, v11, v7
-; SDAG-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v13, v10, v[0:1]
-; SDAG-NEXT:    ; implicit-def: $vgpr9
-; SDAG-NEXT:    v_add3_u32 v3, v7, v3, v8
-; SDAG-NEXT:    v_add_co_u32_e64 v2, s[4:5], v0, v2
-; SDAG-NEXT:    v_addc_co_u32_e64 v3, s[4:5], v1, v3, s[4:5]
-; SDAG-NEXT:    v_mov_b32_e32 v0, v4
-; SDAG-NEXT:    v_mov_b32_e32 v1, v5
+; SDAG-NEXT:    v_cndmask_b32_e64 v12, 0, v0, s[4:5]
+; SDAG-NEXT:    v_cndmask_b32_e64 v11, 0, v1, s[4:5]
+; SDAG-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v12, v8, 0
+; SDAG-NEXT:    v_mul_lo_u32 v13, v9, v2
+; SDAG-NEXT:    v_mul_lo_u32 v14, v8, v3
+; SDAG-NEXT:    v_mov_b32_e32 v6, v1
+; SDAG-NEXT:    v_mad_u64_u32 v[4:5], s[4:5], v11, v8, v[6:7]
+; SDAG-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], v8, v2, 0
+; SDAG-NEXT:    v_mov_b32_e32 v6, v4
+; SDAG-NEXT:    v_mad_u64_u32 v[6:7], s[4:5], v12, v9, v[6:7]
+; SDAG-NEXT:    v_add3_u32 v3, v3, v14, v13
+; SDAG-NEXT:    v_mad_u64_u32 v[1:2], s[4:5], v10, v12, v[2:3]
+; SDAG-NEXT:    v_add_co_u32_e64 v3, s[4:5], v5, v7
+; SDAG-NEXT:    v_addc_co_u32_e64 v4, s[4:5], 0, 0, s[4:5]
+; SDAG-NEXT:    v_mul_lo_u32 v8, v10, v11
+; SDAG-NEXT:    v_mul_lo_u32 v10, v10, v12
+; SDAG-NEXT:    v_mad_u64_u32 v[3:4], s[4:5], v11, v9, v[3:4]
+; SDAG-NEXT:    v_add3_u32 v5, v10, v2, v8
+; SDAG-NEXT:    v_add_co_u32_e64 v2, s[4:5], v3, v1
+; SDAG-NEXT:    v_addc_co_u32_e64 v3, s[4:5], v4, v5, s[4:5]
+; SDAG-NEXT:    v_mov_b32_e32 v1, v6
 ; SDAG-NEXT:    ; implicit-def: $vgpr5_vgpr6
-; SDAG-NEXT:    ; implicit-def: $vgpr7_vgpr8
+; SDAG-NEXT:    ; implicit-def: $vgpr6_vgpr7
+; SDAG-NEXT:    ; implicit-def: $vgpr8
 ; SDAG-NEXT:  .LBB6_4: ; %Flow
 ; SDAG-NEXT:    s_andn2_saveexec_b64 s[6:7], s[12:13]
 ; SDAG-NEXT:    s_cbranch_execz .LBB6_6
 ; SDAG-NEXT:  ; %bb.5: ; %fp-to-i-if-then12
 ; SDAG-NEXT:    v_sub_u32_e32 v2, 0x86, v5
-; SDAG-NEXT:    v_lshrrev_b64 v[0:1], v2, v[7:8]
+; SDAG-NEXT:    v_lshrrev_b64 v[0:1], v2, v[6:7]
 ; SDAG-NEXT:    v_cmp_gt_u32_e64 s[4:5], 64, v2
 ; SDAG-NEXT:    v_cndmask_b32_e64 v0, 0, v0, s[4:5]
 ; SDAG-NEXT:    v_cmp_eq_u32_e64 s[4:5], 0, v2
-; SDAG-NEXT:    v_cndmask_b32_e64 v0, v0, v7, s[4:5]
-; SDAG-NEXT:    v_mul_hi_i32_i24_e32 v1, v0, v9
+; SDAG-NEXT:    v_cndmask_b32_e64 v0, v0, v6, s[4:5]
+; SDAG-NEXT:    v_mul_hi_i32_i24_e32 v1, v0, v8
 ; SDAG-NEXT:    v_ashrrev_i32_e32 v2, 31, v1
-; SDAG-NEXT:    v_mul_i32_i24_e32 v0, v0, v9
+; SDAG-NEXT:    v_mul_i32_i24_e32 v0, v0, v8
 ; SDAG-NEXT:    v_mov_b32_e32 v3, v2
 ; SDAG-NEXT:  .LBB6_6: ; %Flow1
 ; SDAG-NEXT:    s_or_b64 exec, exec, s[6:7]
@@ -1584,11 +1577,10 @@ define i128 @fptosi_bf16_to_i128(bfloat %x) {
 ; SDAG-NEXT:  ; %bb.8: ; %fp-to-i-if-then5
 ; SDAG-NEXT:    v_bfrev_b32_e32 v0, 1
 ; SDAG-NEXT:    v_bfrev_b32_e32 v1, -2
-; SDAG-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; SDAG-NEXT:    v_cndmask_b32_e64 v1, 0, -1, vcc
-; SDAG-NEXT:    v_mov_b32_e32 v3, v2
-; SDAG-NEXT:    v_mov_b32_e32 v0, v1
-; SDAG-NEXT:    v_mov_b32_e32 v2, v1
+; SDAG-NEXT:    v_cndmask_b32_e64 v2, 0, -1, vcc
+; SDAG-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
+; SDAG-NEXT:    v_mov_b32_e32 v0, v2
+; SDAG-NEXT:    v_mov_b32_e32 v1, v2
 ; SDAG-NEXT:  ; %bb.9: ; %Flow3
 ; SDAG-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; SDAG-NEXT:  .LBB6_10: ; %fp-to-i-cleanup
@@ -1700,33 +1692,34 @@ define i128 @fptosi_bf16_to_i128(bfloat %x) {
 ; GISEL-NEXT:    s_xor_b64 s[16:17], exec, s[6:7]
 ; GISEL-NEXT:    s_cbranch_execz .LBB6_4
 ; GISEL-NEXT:  ; %bb.3: ; %fp-to-i-if-else
-; GISEL-NEXT:    v_add_u32_e32 v2, 0xffffff7a, v5
-; GISEL-NEXT:    v_lshlrev_b64 v[0:1], v2, v[6:7]
-; GISEL-NEXT:    v_add_u32_e32 v5, 0xffffff3a, v5
-; GISEL-NEXT:    v_sub_u32_e32 v3, 64, v2
-; GISEL-NEXT:    v_cmp_gt_u32_e32 vcc, 64, v2
-; GISEL-NEXT:    v_lshrrev_b64 v[3:4], v3, v[6:7]
-; GISEL-NEXT:    v_lshlrev_b64 v[5:6], v5, v[6:7]
-; GISEL-NEXT:    v_cndmask_b32_e32 v7, 0, v0, vcc
+; GISEL-NEXT:    v_add_u32_e32 v10, 0xffffff7a, v5
+; GISEL-NEXT:    v_lshlrev_b64 v[0:1], v10, v[6:7]
+; GISEL-NEXT:    v_cmp_gt_u32_e32 vcc, 64, v10
+; GISEL-NEXT:    v_cndmask_b32_e32 v11, 0, v0, vcc
 ; GISEL-NEXT:    v_cndmask_b32_e32 v12, 0, v1, vcc
-; GISEL-NEXT:    v_mad_u64_u32 v[0:1], s[6:7], v7, v9, 0
+; GISEL-NEXT:    v_mad_u64_u32 v[0:1], s[6:7], v11, v9, 0
+; GISEL-NEXT:    v_add_u32_e32 v4, 0xffffff3a, v5
+; GISEL-NEXT:    v_sub_u32_e32 v2, 64, v10
+; GISEL-NEXT:    v_lshrrev_b64 v[2:3], v2, v[6:7]
+; GISEL-NEXT:    v_lshlrev_b64 v[4:5], v4, v[6:7]
+; GISEL-NEXT:    v_mad_u64_u32 v[6:7], s[8:9], v12, v9, v[0:1]
+; GISEL-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
+; GISEL-NEXT:    v_cmp_eq_u32_e64 s[6:7], 0, v10
+; GISEL-NEXT:    v_cndmask_b32_e64 v10, v2, 0, s[6:7]
+; GISEL-NEXT:    v_mad_u64_u32 v[6:7], s[8:9], v10, v8, v[6:7]
+; GISEL-NEXT:    v_mad_u64_u32 v[0:1], s[8:9], v11, v8, 0
+; GISEL-NEXT:    v_mov_b32_e32 v2, v6
+; GISEL-NEXT:    v_mul_lo_u32 v6, v11, v9
+; GISEL-NEXT:    v_mad_u64_u32 v[1:2], s[8:9], v11, v9, v[1:2]
+; GISEL-NEXT:    v_mul_lo_u32 v4, v12, v9
 ; GISEL-NEXT:    v_cndmask_b32_e32 v3, v5, v3, vcc
-; GISEL-NEXT:    v_cmp_eq_u32_e64 s[6:7], 0, v2
-; GISEL-NEXT:    v_mad_u64_u32 v[10:11], s[8:9], v12, v9, v[0:1]
-; GISEL-NEXT:    v_cndmask_b32_e64 v5, v3, 0, s[6:7]
-; GISEL-NEXT:    v_mad_u64_u32 v[0:1], s[8:9], v7, v8, 0
-; GISEL-NEXT:    v_mad_u64_u32 v[2:3], s[8:9], v5, v8, v[10:11]
-; GISEL-NEXT:    v_mul_lo_u32 v13, v12, v9
-; GISEL-NEXT:    v_mad_u64_u32 v[10:11], s[8:9], v7, v9, v[1:2]
-; GISEL-NEXT:    v_mul_lo_u32 v7, v7, v9
-; GISEL-NEXT:    v_mad_u64_u32 v[1:2], s[10:11], v12, v8, v[10:11]
-; GISEL-NEXT:    v_addc_co_u32_e64 v3, s[10:11], v3, v7, s[10:11]
-; GISEL-NEXT:    v_addc_co_u32_e64 v3, s[8:9], v3, v13, s[8:9]
-; GISEL-NEXT:    v_mad_u64_u32 v[10:11], s[8:9], v5, v9, v[3:4]
-; GISEL-NEXT:    v_cndmask_b32_e32 v3, v6, v4, vcc
-; GISEL-NEXT:    v_cndmask_b32_e64 v5, v3, 0, s[6:7]
-; GISEL-NEXT:    v_mad_u64_u32 v[3:4], s[6:7], v5, v8, v[10:11]
+; GISEL-NEXT:    v_mad_u64_u32 v[1:2], s[10:11], v12, v8, v[1:2]
+; GISEL-NEXT:    v_addc_co_u32_e64 v6, s[10:11], v7, v6, s[10:11]
+; GISEL-NEXT:    v_addc_co_u32_e64 v4, s[8:9], v6, v4, s[8:9]
+; GISEL-NEXT:    v_mad_u64_u32 v[6:7], s[8:9], v10, v9, v[4:5]
+; GISEL-NEXT:    v_cndmask_b32_e64 v3, v3, 0, s[6:7]
 ; GISEL-NEXT:    ; implicit-def: $vgpr5
+; GISEL-NEXT:    v_mad_u64_u32 v[3:4], s[6:7], v3, v8, v[6:7]
 ; GISEL-NEXT:    ; implicit-def: $vgpr6_vgpr7
 ; GISEL-NEXT:    ; implicit-def: $vgpr8
 ; GISEL-NEXT:  .LBB6_4: ; %Flow
@@ -1831,10 +1824,11 @@ define i128 @fptoui_bf16_to_i128(bfloat %x) {
 ; SDAG:       ; %bb.0: ; %fp-to-i-entry
 ; SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; SDAG-NEXT:    v_mov_b32_e32 v4, v0
-; SDAG-NEXT:    v_mov_b32_e32 v0, 0
 ; SDAG-NEXT:    v_bfe_u32 v5, v4, 7, 8
 ; SDAG-NEXT:    s_movk_i32 s4, 0x7e
+; SDAG-NEXT:    v_mov_b32_e32 v0, 0
 ; SDAG-NEXT:    v_mov_b32_e32 v2, 0
+; SDAG-NEXT:    v_mov_b32_e32 v6, 0
 ; SDAG-NEXT:    v_mov_b32_e32 v1, 0
 ; SDAG-NEXT:    v_mov_b32_e32 v3, 0
 ; SDAG-NEXT:    v_cmp_lt_u32_e32 vcc, s4, v5
@@ -1842,9 +1836,7 @@ define i128 @fptoui_bf16_to_i128(bfloat %x) {
 ; SDAG-NEXT:    s_cbranch_execz .LBB7_10
 ; SDAG-NEXT:  ; %bb.1: ; %fp-to-i-if-end
 ; SDAG-NEXT:    v_add_co_u32_e32 v0, vcc, 0xffffff01, v5
-; SDAG-NEXT:    v_mov_b32_e32 v1, -1
-; SDAG-NEXT:    v_mov_b32_e32 v6, 0
-; SDAG-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; SDAG-NEXT:    v_addc_co_u32_e32 v1, vcc, -1, v6, vcc
 ; SDAG-NEXT:    v_addc_co_u32_e32 v2, vcc, -1, v6, vcc
 ; SDAG-NEXT:    s_movk_i32 s6, 0xff7f
 ; SDAG-NEXT:    v_addc_co_u32_e32 v3, vcc, -1, v6, vcc
@@ -1863,10 +1855,10 @@ define i128 @fptoui_bf16_to_i128(bfloat %x) {
 ; SDAG-NEXT:    v_and_b32_sdwa v0, v4, s4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
 ; SDAG-NEXT:    s_mov_b64 s[4:5], 0x85
 ; SDAG-NEXT:    v_cmp_lt_u64_e64 s[4:5], s[4:5], v[5:6]
-; SDAG-NEXT:    v_cndmask_b32_e64 v10, -1, 0, vcc
-; SDAG-NEXT:    v_cndmask_b32_e64 v9, -1, 1, vcc
-; SDAG-NEXT:    v_or_b32_e32 v7, 0x80, v0
-; SDAG-NEXT:    v_mov_b32_e32 v8, v6
+; SDAG-NEXT:    v_mov_b32_e32 v7, 0
+; SDAG-NEXT:    v_cndmask_b32_e64 v9, -1, 0, vcc
+; SDAG-NEXT:    v_cndmask_b32_e64 v8, -1, 1, vcc
+; SDAG-NEXT:    v_or_b32_e32 v6, 0x80, v0
 ; SDAG-NEXT:    ; implicit-def: $vgpr0_vgpr1
 ; SDAG-NEXT:    ; implicit-def: $vgpr2_vgpr3
 ; SDAG-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
@@ -1874,56 +1866,56 @@ define i128 @fptoui_bf16_to_i128(bfloat %x) {
 ; SDAG-NEXT:    s_cbranch_execz .LBB7_4
 ; SDAG-NEXT:  ; %bb.3: ; %fp-to-i-if-else
 ; SDAG-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; SDAG-NEXT:    v_add_co_u32_e64 v11, s[4:5], -1, v0
+; SDAG-NEXT:    v_add_co_u32_e64 v10, s[4:5], -1, v0
 ; SDAG-NEXT:    v_sub_u32_e32 v0, 0xc6, v5
 ; SDAG-NEXT:    v_add_u32_e32 v2, 0xffffff3a, v5
 ; SDAG-NEXT:    v_add_u32_e32 v4, 0xffffff7a, v5
-; SDAG-NEXT:    v_lshrrev_b64 v[0:1], v0, v[7:8]
-; SDAG-NEXT:    v_lshlrev_b64 v[2:3], v2, v[7:8]
+; SDAG-NEXT:    v_lshrrev_b64 v[0:1], v0, v[6:7]
+; SDAG-NEXT:    v_lshlrev_b64 v[2:3], v2, v[6:7]
 ; SDAG-NEXT:    v_cmp_gt_u32_e64 s[4:5], 64, v4
 ; SDAG-NEXT:    v_cndmask_b32_e64 v1, v3, v1, s[4:5]
 ; SDAG-NEXT:    v_cmp_ne_u32_e64 s[6:7], 0, v4
 ; SDAG-NEXT:    v_cndmask_b32_e64 v3, 0, v1, s[6:7]
 ; SDAG-NEXT:    v_cndmask_b32_e64 v2, v2, v0, s[4:5]
-; SDAG-NEXT:    v_lshlrev_b64 v[0:1], v4, v[7:8]
+; SDAG-NEXT:    v_lshlrev_b64 v[0:1], v4, v[6:7]
 ; SDAG-NEXT:    v_cndmask_b32_e64 v2, 0, v2, s[6:7]
-; SDAG-NEXT:    v_cndmask_b32_e64 v7, 0, v0, s[4:5]
-; SDAG-NEXT:    v_mad_u64_u32 v[4:5], s[6:7], v7, v9, 0
-; SDAG-NEXT:    v_cndmask_b32_e64 v13, 0, v1, s[4:5]
-; SDAG-NEXT:    v_mul_lo_u32 v8, v10, v2
-; SDAG-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v13, v9, v[5:6]
-; SDAG-NEXT:    v_mul_lo_u32 v12, v9, v3
-; SDAG-NEXT:    v_mad_u64_u32 v[2:3], s[6:7], v9, v2, 0
-; SDAG-NEXT:    v_mov_b32_e32 v5, v0
-; SDAG-NEXT:    v_mad_u64_u32 v[5:6], s[4:5], v7, v10, v[5:6]
-; SDAG-NEXT:    v_add3_u32 v3, v3, v12, v8
-; SDAG-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], v11, v7, v[2:3]
-; SDAG-NEXT:    v_add_co_u32_e64 v0, s[4:5], v1, v6
-; SDAG-NEXT:    v_addc_co_u32_e64 v1, s[4:5], 0, 0, s[4:5]
-; SDAG-NEXT:    v_mul_lo_u32 v8, v11, v13
-; SDAG-NEXT:    v_mul_lo_u32 v7, v11, v7
-; SDAG-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v13, v10, v[0:1]
-; SDAG-NEXT:    ; implicit-def: $vgpr9
-; SDAG-NEXT:    v_add3_u32 v3, v7, v3, v8
-; SDAG-NEXT:    v_add_co_u32_e64 v2, s[4:5], v0, v2
-; SDAG-NEXT:    v_addc_co_u32_e64 v3, s[4:5], v1, v3, s[4:5]
-; SDAG-NEXT:    v_mov_b32_e32 v0, v4
-; SDAG-NEXT:    v_mov_b32_e32 v1, v5
+; SDAG-NEXT:    v_cndmask_b32_e64 v12, 0, v0, s[4:5]
+; SDAG-NEXT:    v_cndmask_b32_e64 v11, 0, v1, s[4:5]
+; SDAG-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v12, v8, 0
+; SDAG-NEXT:    v_mul_lo_u32 v13, v9, v2
+; SDAG-NEXT:    v_mul_lo_u32 v14, v8, v3
+; SDAG-NEXT:    v_mov_b32_e32 v6, v1
+; SDAG-NEXT:    v_mad_u64_u32 v[4:5], s[4:5], v11, v8, v[6:7]
+; SDAG-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], v8, v2, 0
+; SDAG-NEXT:    v_mov_b32_e32 v6, v4
+; SDAG-NEXT:    v_mad_u64_u32 v[6:7], s[4:5], v12, v9, v[6:7]
+; SDAG-NEXT:    v_add3_u32 v3, v3, v14, v13
+; SDAG-NEXT:    v_mad_u64_u32 v[1:2], s[4:5], v10, v12, v[2:3]
+; SDAG-NEXT:    v_add_co_u32_e64 v3, s[4:5], v5, v7
+; SDAG-NEXT:    v_addc_co_u32_e64 v4, s[4:5], 0, 0, s[4:5]
+; SDAG-NEXT:    v_mul_lo_u32 v8, v10, v11
+; SDAG-NEXT:    v_mul_lo_u32 v10, v10, v12
+; SDAG-NEXT:    v_mad_u64_u32 v[3:4], s[4:5], v11, v9, v[3:4]
+; SDAG-NEXT:    v_add3_u32 v5, v10, v2, v8
+; SDAG-NEXT:    v_add_co_u32_e64 v2, s[4:5], v3, v1
+; SDAG-NEXT:    v_addc_co_u32_e64 v3, s[4:5], v4, v5, s[4:5]
+; SDAG-NEXT:    v_mov_b32_e32 v1, v6
 ; SDAG-NEXT:    ; implicit-def: $vgpr5_vgpr6
-; SDAG-NEXT:    ; implicit-def: $vgpr7_vgpr8
+; SDAG-NEXT:    ; implicit-def: $vgpr6_vgpr7
+; SDAG-NEXT:    ; implicit-def: $vgpr8
 ; SDAG-NEXT:  .LBB7_4: ; %Flow
 ; SDAG-NEXT:    s_andn2_saveexec_b64 s[6:7], s[12:13]
 ; SDAG-NEXT:    s_cbranch_execz .LBB7_6
 ; SDAG-NEXT:  ; %bb.5: ; %fp-to-i-if-then12
 ; SDAG-NEXT:    v_sub_u32_e32 v2, 0x86, v5
-; SDAG-NEXT:    v_lshrrev_b64 v[0:1], v2, v[7:8]
+; SDAG-NEXT:    v_lshrrev_b64 v[0:1], v2, v[6:7]
 ; SDAG-NEXT:    v_cmp_gt_u32_e64 s[4:5], 64, v2
 ; SDAG-NEXT:    v_cndmask_b32_e64 v0, 0, v0, s[4:5]
 ; SDAG-NEXT:    v_cmp_eq_u32_e64 s[4:5], 0, v2
-; SDAG-NEXT:    v_cndmask_b32_e64 v0, v0, v7, s[4:5]
-; SDAG-NEXT:    v_mul_hi_i32_i24_e32 v1, v0, v9
+; SDAG-NEXT:    v_cndmask_b32_e64 v0, v0, v6, s[4:5]
+; SDAG-NEXT:    v_mul_hi_i32_i24_e32 v1, v0, v8
 ; SDAG-NEXT:    v_ashrrev_i32_e32 v2, 31, v1
-; SDAG-NEXT:    v_mul_i32_i24_e32 v0, v0, v9
+; SDAG-NEXT:    v_mul_i32_i24_e32 v0, v0, v8
 ; SDAG-NEXT:    v_mov_b32_e32 v3, v2
 ; SDAG-NEXT:  .LBB7_6: ; %Flow1
 ; SDAG-NEXT:    s_or_b64 exec, exec, s[6:7]
@@ -1932,11 +1924,10 @@ define i128 @fptoui_bf16_to_i128(bfloat %x) {
 ; SDAG-NEXT:  ; %bb.8: ; %fp-to-i-if-then5
 ; SDAG-NEXT:    v_bfrev_b32_e32 v0, 1
 ; SDAG-NEXT:    v_bfrev_b32_e32 v1, -2
-; SDAG-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; SDAG-NEXT:    v_cndmask_b32_e64 v1, 0, -1, vcc
-; SDAG-NEXT:    v_mov_b32_e32 v3, v2
-; SDAG-NEXT:    v_mov_b32_e32 v0, v1
-; SDAG-NEXT:    v_mov_b32_e32 v2, v1
+; SDAG-NEXT:    v_cndmask_b32_e64 v2, 0, -1, vcc
+; SDAG-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
+; SDAG-NEXT:    v_mov_b32_e32 v0, v2
+; SDAG-NEXT:    v_mov_b32_e32 v1, v2
 ; SDAG-NEXT:  ; %bb.9: ; %Flow3
 ; SDAG-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; SDAG-NEXT:  .LBB7_10: ; %fp-to-i-cleanup
@@ -2048,33 +2039,34 @@ define i128 @fptoui_bf16_to_i128(bfloat %x) {
 ; GISEL-NEXT:    s_xor_b64 s[16:17], exec, s[6:7]
 ; GISEL-NEXT:    s_cbranch_execz .LBB7_4
 ; GISEL-NEXT:  ; %bb.3: ; %fp-to-i-if-else
-; GISEL-NEXT:    v_add_u32_e32 v2, 0xffffff7a, v5
-; GISEL-NEXT:    v_lshlrev_b64 v[0:1], v2, v[6:7]
-; GISEL-NEXT:    v_add_u32_e32 v5, 0xffffff3a, v5
-; GISEL-NEXT:    v_sub_u32_e32 v3, 64, v2
-; GISEL-NEXT:    v_cmp_gt_u32_e32 vcc, 64, v2
-; GISEL-NEXT:    v_lshrrev_b64 v[3:4], v3, v[6:7]
-; GISEL-NEXT:    v_lshlrev_b64 v[5:6], v5, v[6:7]
-; GISEL-NEXT:    v_cndmask_b32_e32 v7, 0, v0, vcc
+; GISEL-NEXT:    v_add_u32_e32 v10, 0xffffff7a, v5
+; GISEL-NEXT:    v_lshlrev_b64 v[0:1], v10, v[6:7]
+; GISEL-NEXT:    v_cmp_gt_u32_e32 vcc, 64, v10
+; GISEL-NEXT:    v_cndmask_b32_e32 v11, 0, v0, vcc
 ; GISEL-NEXT:    v_cndmask_b32_e32 v12, 0, v1, vcc
-; GISEL-NEXT:    v_mad_u64_u32 v[0:1], s[6:7], v7, v9, 0
+; GISEL-NEXT:    v_mad_u64_u32 v[0:1], s[6:7], v11, v9, 0
+; GISEL-NEXT:    v_add_u32_e32 v4, 0xffffff3a, v5
+; GISEL-NEXT:    v_sub_u32_e32 v2, 64, v10
+; GISEL-NEXT:    v_lshrrev_b64 v[2:3], v2, v[6:7]
+; GISEL-NEXT:    v_lshlrev_b64 v[4:5], v4, v[6:7]
+; GISEL-NEXT:    v_mad_u64_u32 v[6:7], s[8:9], v12, v9, v[0:1]
+; GISEL-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
+; GISEL-NEXT:    v_cmp_eq_u32_e64 s[6:7], 0, v10
+; GISEL-NEXT:    v_cndmask_b32_e64 v10, v2, 0, s[6:7]
+; GISEL-NEXT:    v_mad_u64_u32 v[6:7], s[8:9], v10, v8, v[6:7]
+; GISEL-NEXT:    v_mad_u64_u32 v[0:1], s[8:9], v11, v8, 0
+; GISEL-NEXT:    v_mov_b32_e32 v2, v6
+; GISEL-NEXT:    v_mul_lo_u32 v6, v11, v9
+; GISEL-NEXT:    v_mad_u64_u32 v[1:2], s[8:9], v11, v9, v[1:2]
+; GISEL-NEXT:    v_mul_lo_u32 v4, v12, v9
 ; GISEL-NEXT:    v_cndmask_b32_e32 v3, v5, v3, vcc
-; GISEL-NEXT:    v_cmp_eq_u32_e64 s[6:7], 0, v2
-; GISEL-NEXT:    v_mad_u64_u32 v[10:11], s[8:9], v12, v9, v[0:1]
-; GISEL-NEXT:    v_cndmask_b32_e64 v5, v3, 0, s[6:7]
-; GISEL-NEXT:    v_mad_u64_u32 v[0:1], s[8:9], v7, v8, 0
-; GISEL-NEXT:    v_mad_u64_u32 v[2:3], s[8:9], v5, v8, v[10:11]
-; GISEL-NEXT:    v_mul_lo_u32 v13, v12, v9
-; GISEL-NEXT:    v_mad_u64_u32 v[10:11], s[8:9], v7, v9, v[1:2]
-; GISEL-NEXT:    v_mul_lo_u32 v7, v7, v9
-; GISEL-NEXT:    v_mad_u64_u32 v[1:2], s[10:11], v12, v8, v[10:11]
-; GISEL-NEXT:    v_addc_co_u32_e64 v3, s[10:11], v3, v7, s[10:11]
-; GISEL-NEXT:    v_addc_co_u32_e64 v3, s[8:9], v3, v13, s[8:9]
-; GISEL-NEXT:    v_mad_u64_u32 v[10:11], s[8:9], v5, v9, v[3:4]
-; GISEL-NEXT:    v_cndmask_b32_e32 v3, v6, v4, vcc
-; GISEL-NEXT:    v_cndmask_b32_e64 v5, v3, 0, s[6:7]
-; GISEL-NEXT:    v_mad_u64_u32 v[3:4], s[6:7], v5, v8, v[10:11]
+; GISEL-NEXT:    v_mad_u64_u32 v[1:2], s[10:11], v12, v8, v[1:2]
+; GISEL-NEXT:    v_addc_co_u32_e64 v6, s[10:11], v7, v6, s[10:11]
+; GISEL-NEXT:    v_addc_co_u32_e64 v4, s[8:9], v6, v4, s[8:9]
+; GISEL-NEXT:    v_mad_u64_u32 v[6:7], s[8:9], v10, v9, v[4:5]
+; GISEL-NEXT:    v_cndmask_b32_e64 v3, v3, 0, s[6:7]
 ; GISEL-NEXT:    ; implicit-def: $vgpr5
+; GISEL-NEXT:    v_mad_u64_u32 v[3:4], s[6:7], v3, v8, v[6:7]
 ; GISEL-NEXT:    ; implicit-def: $vgpr6_vgpr7
 ; GISEL-NEXT:    ; implicit-def: $vgpr8
 ; GISEL-NEXT:  .LBB7_4: ; %Flow
diff --git a/llvm/test/CodeGen/AMDGPU/itofp.i128.bf.ll b/llvm/test/CodeGen/AMDGPU/itofp.i128.bf.ll
index eee3352fa7452..16cafc67ca885 100644
--- a/llvm/test/CodeGen/AMDGPU/itofp.i128.bf.ll
+++ b/llvm/test/CodeGen/AMDGPU/itofp.i128.bf.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck -check-prefixes=GCN,SDAG %s
-; RUN: not --crash llc -global-isel=1 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s 2>&1 | FileCheck -check-prefix=GISEL %s
+
 
 ; FIXME: GISEL can't handle the "fptrunc float to bfloat" that expand-ir-insts emits.
 
diff --git a/llvm/test/CodeGen/AMDGPU/itofp.i128.ll b/llvm/test/CodeGen/AMDGPU/itofp.i128.ll
index 2f9182e6e7c6a..968471287dc4d 100644
--- a/llvm/test/CodeGen/AMDGPU/itofp.i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/itofp.i128.ll
@@ -1066,13 +1066,13 @@ define double @uitofp_i128_to_f64(i128 %x) {
 ; GISEL-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[8:9]
 ; GISEL-NEXT:    v_lshlrev_b64 v[8:9], 30, v[2:3]
 ; GISEL-NEXT:    v_lshrrev_b32_e32 v5, 2, v1
-; GISEL-NEXT:    v_or_b32_e32 v9, v8, v5
+; GISEL-NEXT:    v_or_b32_e32 v9, v5, v8
 ; GISEL-NEXT:    s_and_saveexec_b64 s[4:5], vcc
 ; GISEL-NEXT:  ; %bb.11: ; %itofp-if-then20
 ; GISEL-NEXT:    v_lshlrev_b64 v[2:3], 29, v[2:3]
 ; GISEL-NEXT:    v_lshrrev_b64 v[4:5], 3, v[0:1]
 ; GISEL-NEXT:    v_lshrrev_b32_e32 v0, 3, v1
-; GISEL-NEXT:    v_or_b32_e32 v9, v2, v0
+; GISEL-NEXT:    v_or_b32_e32 v9, v0, v2
 ; GISEL-NEXT:    v_mov_b32_e32 v7, v6
 ; GISEL-NEXT:  ; %bb.12: ; %Flow
 ; GISEL-NEXT:    s_or_b64 exec, exec, s[4:5]
diff --git a/llvm/test/CodeGen/RISCV/idiv_large.ll b/llvm/test/CodeGen/RISCV/idiv_large.ll
index 1d13f723ac224..9072fb9a8f1e7 100644
--- a/llvm/test/CodeGen/RISCV/idiv_large.ll
+++ b/llvm/test/CodeGen/RISCV/idiv_large.ll
@@ -21,423 +21,41 @@ define i64 @udiv_i64(i64 %x, i64 %y) nounwind {
 
 define i65 @udiv_i65(i65 %x, i65 %y) nounwind {
 ; RV32-LABEL: udiv_i65:
-; RV32:       # %bb.0: # %_udiv-special-cases
-; RV32-NEXT:    lw a3, 0(a2)
-; RV32-NEXT:    lw a4, 4(a2)
-; RV32-NEXT:    lw t1, 8(a2)
-; RV32-NEXT:    lui a2, 349525
-; RV32-NEXT:    lui a5, 209715
-; RV32-NEXT:    lui a6, 61681
-; RV32-NEXT:    addi t0, a2, 1365
-; RV32-NEXT:    addi a7, a5, 819
-; RV32-NEXT:    addi a6, a6, -241
-; RV32-NEXT:    srli a2, a4, 1
-; RV32-NEXT:    slli a5, t1, 31
-; RV32-NEXT:    slli t3, a4, 31
-; RV32-NEXT:    or t2, a5, a2
-; RV32-NEXT:    srli a2, a3, 1
-; RV32-NEXT:    or t4, a2, t3
-; RV32-NEXT:    bnez t2, .LBB1_2
-; RV32-NEXT:  # %bb.1: # %_udiv-special-cases
-; RV32-NEXT:    srli a2, t4, 1
-; RV32-NEXT:    or a2, t4, a2
-; RV32-NEXT:    srli a5, a2, 2
-; RV32-NEXT:    or a2, a2, a5
-; RV32-NEXT:    srli a5, a2, 4
-; RV32-NEXT:    or a2, a2, a5
-; RV32-NEXT:    srli a5, a2, 8
-; RV32-NEXT:    or a2, a2, a5
-; RV32-NEXT:    srli a5, a2, 16
-; RV32-NEXT:    or a2, a2, a5
-; RV32-NEXT:    not a2, a2
-; RV32-NEXT:    srli a5, a2, 1
-; RV32-NEXT:    and a5, a5, t0
-; RV32-NEXT:    sub a2, a2, a5
-; RV32-NEXT:    and a5, a2, a7
-; RV32-NEXT:    srli a2, a2, 2
-; RV32-NEXT:    and a2, a2, a7
-; RV32-NEXT:    add a2, a5, a2
-; RV32-NEXT:    srli a5, a2, 4
-; RV32-NEXT:    add a2, a2, a5
-; RV32-NEXT:    and a2, a2, a6
-; RV32-NEXT:    slli a5, a2, 8
-; RV32-NEXT:    add a2, a2, a5
-; RV32-NEXT:    slli a5, a2, 16
-; RV32-NEXT:    add a2, a2, a5
-; RV32-NEXT:    srli a2, a2, 24
-; RV32-NEXT:    addi t3, a2, 32
-; RV32-NEXT:    j .LBB1_3
-; RV32-NEXT:  .LBB1_2:
-; RV32-NEXT:    srli a2, t2, 1
-; RV32-NEXT:    or a2, t2, a2
-; RV32-NEXT:    srli a5, a2, 2
-; RV32-NEXT:    or a2, a2, a5
-; RV32-NEXT:    srli a5, a2, 4
-; RV32-NEXT:    or a2, a2, a5
-; RV32-NEXT:    srli a5, a2, 8
-; RV32-NEXT:    or a2, a2, a5
-; RV32-NEXT:    srli a5, a2, 16
-; RV32-NEXT:    or a2, a2, a5
-; RV32-NEXT:    not a2, a2
-; RV32-NEXT:    srli a5, a2, 1
-; RV32-NEXT:    and a5, a5, t0
-; RV32-NEXT:    sub a2, a2, a5
-; RV32-NEXT:    and a5, a2, a7
-; RV32-NEXT:    srli a2, a2, 2
-; RV32-NEXT:    and a2, a2, a7
-; RV32-NEXT:    add a2, a5, a2
-; RV32-NEXT:    srli a5, a2, 4
-; RV32-NEXT:    add a2, a2, a5
-; RV32-NEXT:    and a2, a2, a6
-; RV32-NEXT:    slli a5, a2, 8
-; RV32-NEXT:    add a2, a2, a5
-; RV32-NEXT:    slli a5, a2, 16
-; RV32-NEXT:    add a2, a2, a5
-; RV32-NEXT:    srli t3, a2, 24
-; RV32-NEXT:  .LBB1_3: # %_udiv-special-cases
-; RV32-NEXT:    addi sp, sp, -96
-; RV32-NEXT:    sw s0, 92(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw s1, 88(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw s2, 84(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw s3, 80(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw s4, 76(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw s5, 72(sp) # 4-byte Folded Spill
-; RV32-NEXT:    slli a2, a3, 31
-; RV32-NEXT:    li t5, 64
-; RV32-NEXT:    bnez a2, .LBB1_5
-; RV32-NEXT:  # %bb.4: # %_udiv-special-cases
-; RV32-NEXT:    li s0, 64
-; RV32-NEXT:    j .LBB1_6
-; RV32-NEXT:  .LBB1_5:
-; RV32-NEXT:    srli a5, a2, 1
-; RV32-NEXT:    or a2, a2, a5
-; RV32-NEXT:    srli a5, a2, 2
-; RV32-NEXT:    or a2, a2, a5
-; RV32-NEXT:    srli a5, a2, 4
-; RV32-NEXT:    or a2, a2, a5
-; RV32-NEXT:    srli a5, a2, 8
-; RV32-NEXT:    or a2, a2, a5
-; RV32-NEXT:    srli a5, a2, 16
-; RV32-NEXT:    or a2, a2, a5
-; RV32-NEXT:    not a2, a2
-; RV32-NEXT:    srli a5, a2, 1
-; RV32-NEXT:    and a5, a5, t0
-; RV32-NEXT:    sub a2, a2, a5
-; RV32-NEXT:    and a5, a2, a7
-; RV32-NEXT:    srli a2, a2, 2
-; RV32-NEXT:    and a2, a2, a7
-; RV32-NEXT:    add a2, a5, a2
-; RV32-NEXT:    srli a5, a2, 4
-; RV32-NEXT:    add a2, a2, a5
-; RV32-NEXT:    and a2, a2, a6
-; RV32-NEXT:    slli a5, a2, 8
-; RV32-NEXT:    add a2, a2, a5
-; RV32-NEXT:    slli a5, a2, 16
-; RV32-NEXT:    add a2, a2, a5
-; RV32-NEXT:    srli s0, a2, 24
-; RV32-NEXT:  .LBB1_6: # %_udiv-special-cases
-; RV32-NEXT:    lw a5, 0(a1)
-; RV32-NEXT:    lw a2, 4(a1)
-; RV32-NEXT:    lw s2, 8(a1)
-; RV32-NEXT:    or a1, t4, t2
-; RV32-NEXT:    addi s1, s0, 64
-; RV32-NEXT:    bnez a1, .LBB1_8
-; RV32-NEXT:  # %bb.7: # %_udiv-special-cases
-; RV32-NEXT:    mv t3, s1
-; RV32-NEXT:  .LBB1_8: # %_udiv-special-cases
-; RV32-NEXT:    snez s4, a1
-; RV32-NEXT:    srli a1, a2, 1
-; RV32-NEXT:    slli t2, s2, 31
-; RV32-NEXT:    slli t4, a2, 31
-; RV32-NEXT:    or a1, t2, a1
-; RV32-NEXT:    srli t2, a5, 1
-; RV32-NEXT:    or t6, t2, t4
-; RV32-NEXT:    bnez a1, .LBB1_10
-; RV32-NEXT:  # %bb.9: # %_udiv-special-cases
-; RV32-NEXT:    srli t2, t6, 1
-; RV32-NEXT:    or t2, t6, t2
-; RV32-NEXT:    srli t4, t2, 2
-; RV32-NEXT:    or t2, t2, t4
-; RV32-NEXT:    srli t4, t2, 4
-; RV32-NEXT:    or t2, t2, t4
-; RV32-NEXT:    srli t4, t2, 8
-; RV32-NEXT:    or t2, t2, t4
-; RV32-NEXT:    srli t4, t2, 16
-; RV32-NEXT:    or t2, t2, t4
-; RV32-NEXT:    not t2, t2
-; RV32-NEXT:    srli t4, t2, 1
-; RV32-NEXT:    and t4, t4, t0
-; RV32-NEXT:    sub t2, t2, t4
-; RV32-NEXT:    and t4, t2, a7
-; RV32-NEXT:    srli t2, t2, 2
-; RV32-NEXT:    and t2, t2, a7
-; RV32-NEXT:    add t2, t4, t2
-; RV32-NEXT:    srli t4, t2, 4
-; RV32-NEXT:    add t2, t2, t4
-; RV32-NEXT:    and t2, t2, a6
-; RV32-NEXT:    slli t4, t2, 8
-; RV32-NEXT:    add t2, t2, t4
-; RV32-NEXT:    slli t4, t2, 16
-; RV32-NEXT:    add t2, t2, t4
-; RV32-NEXT:    srli t2, t2, 24
-; RV32-NEXT:    addi s3, t2, 32
-; RV32-NEXT:    j .LBB1_11
-; RV32-NEXT:  .LBB1_10:
-; RV32-NEXT:    srli t2, a1, 1
-; RV32-NEXT:    or t2, a1, t2
-; RV32-NEXT:    srli t4, t2, 2
-; RV32-NEXT:    or t2, t2, t4
-; RV32-NEXT:    srli t4, t2, 4
-; RV32-NEXT:    or t2, t2, t4
-; RV32-NEXT:    srli t4, t2, 8
-; RV32-NEXT:    or t2, t2, t4
-; RV32-NEXT:    srli t4, t2, 16
-; RV32-NEXT:    or t2, t2, t4
-; RV32-NEXT:    not t2, t2
-; RV32-NEXT:    srli t4, t2, 1
-; RV32-NEXT:    and t4, t4, t0
-; RV32-NEXT:    sub t2, t2, t4
-; RV32-NEXT:    and t4, t2, a7
-; RV32-NEXT:    srli t2, t2, 2
-; RV32-NEXT:    and t2, t2, a7
-; RV32-NEXT:    add t2, t4, t2
-; RV32-NEXT:    srli t4, t2, 4
-; RV32-NEXT:    add t2, t2, t4
-; RV32-NEXT:    and t2, t2, a6
-; RV32-NEXT:    slli t4, t2, 8
-; RV32-NEXT:    add t2, t2, t4
-; RV32-NEXT:    slli t4, t2, 16
-; RV32-NEXT:    add t2, t2, t4
-; RV32-NEXT:    srli s3, t2, 24
-; RV32-NEXT:  .LBB1_11: # %_udiv-special-cases
-; RV32-NEXT:    andi t4, s2, 1
-; RV32-NEXT:    andi t1, t1, 1
-; RV32-NEXT:    or t2, a3, a4
-; RV32-NEXT:    or s2, a5, a2
-; RV32-NEXT:    sltu s0, s1, s0
-; RV32-NEXT:    slli s1, a5, 31
-; RV32-NEXT:    addi s4, s4, -1
-; RV32-NEXT:    beqz s1, .LBB1_13
-; RV32-NEXT:  # %bb.12:
-; RV32-NEXT:    srli t5, s1, 1
-; RV32-NEXT:    or t5, s1, t5
-; RV32-NEXT:    srli s1, t5, 2
-; RV32-NEXT:    or t5, t5, s1
-; RV32-NEXT:    srli s1, t5, 4
-; RV32-NEXT:    or t5, t5, s1
-; RV32-NEXT:    srli s1, t5, 8
-; RV32-NEXT:    or t5, t5, s1
-; RV32-NEXT:    srli s1, t5, 16
-; RV32-NEXT:    or t5, t5, s1
-; RV32-NEXT:    not t5, t5
-; RV32-NEXT:    srli s1, t5, 1
-; RV32-NEXT:    and t0, s1, t0
-; RV32-NEXT:    sub t0, t5, t0
-; RV32-NEXT:    and t5, t0, a7
-; RV32-NEXT:    srli t0, t0, 2
-; RV32-NEXT:    and a7, t0, a7
-; RV32-NEXT:    add a7, t5, a7
-; RV32-NEXT:    srli t0, a7, 4
-; RV32-NEXT:    add a7, a7, t0
-; RV32-NEXT:    and a6, a7, a6
-; RV32-NEXT:    slli a7, a6, 8
-; RV32-NEXT:    add a6, a6, a7
-; RV32-NEXT:    slli a7, a6, 16
-; RV32-NEXT:    add a6, a6, a7
-; RV32-NEXT:    srli t5, a6, 24
-; RV32-NEXT:  .LBB1_13: # %_udiv-special-cases
-; RV32-NEXT:    or t0, t2, t1
-; RV32-NEXT:    or a6, s2, t4
-; RV32-NEXT:    and a7, s4, s0
-; RV32-NEXT:    or t6, t6, a1
-; RV32-NEXT:    addi s0, t5, 64
-; RV32-NEXT:    bnez t6, .LBB1_15
-; RV32-NEXT:  # %bb.14: # %_udiv-special-cases
-; RV32-NEXT:    mv s3, s0
-; RV32-NEXT:  .LBB1_15: # %_udiv-special-cases
-; RV32-NEXT:    seqz a1, t0
-; RV32-NEXT:    sltu t0, s0, t5
-; RV32-NEXT:    snez t5, t6
-; RV32-NEXT:    addi t5, t5, -1
-; RV32-NEXT:    and t0, t5, t0
-; RV32-NEXT:    sltu t5, t3, s3
-; RV32-NEXT:    seqz a6, a6
-; RV32-NEXT:    mv t6, t5
-; RV32-NEXT:    beq a7, t0, .LBB1_17
-; RV32-NEXT:  # %bb.16: # %_udiv-special-cases
-; RV32-NEXT:    sltu t6, a7, t0
-; RV32-NEXT:  .LBB1_17: # %_udiv-special-cases
-; RV32-NEXT:    or a1, a1, a6
-; RV32-NEXT:    andi a6, t6, 1
-; RV32-NEXT:    sub a7, a7, t0
-; RV32-NEXT:    sub t0, a7, t5
-; RV32-NEXT:    sub a7, t3, s3
-; RV32-NEXT:    beqz a6, .LBB1_19
-; RV32-NEXT:  # %bb.18: # %_udiv-special-cases
-; RV32-NEXT:    mv t3, a6
-; RV32-NEXT:    j .LBB1_20
-; RV32-NEXT:  .LBB1_19:
-; RV32-NEXT:    sltiu t3, a7, 65
-; RV32-NEXT:    xori t3, t3, 1
-; RV32-NEXT:    snez t5, t0
-; RV32-NEXT:    or t3, t3, t5
-; RV32-NEXT:  .LBB1_20: # %_udiv-special-cases
-; RV32-NEXT:    or t6, a1, t3
-; RV32-NEXT:    addi t5, t6, -1
-; RV32-NEXT:    and a1, t4, t5
-; RV32-NEXT:    and t3, t5, a2
-; RV32-NEXT:    and t5, t5, a5
-; RV32-NEXT:    bnez t6, .LBB1_29
-; RV32-NEXT:  # %bb.21: # %_udiv-special-cases
-; RV32-NEXT:    xori t6, a7, 64
-; RV32-NEXT:    or t6, t6, a6
-; RV32-NEXT:    or t6, t6, t0
-; RV32-NEXT:    beqz t6, .LBB1_29
-; RV32-NEXT:  # %bb.22: # %udiv-bb1
-; RV32-NEXT:    addi a1, a7, 1
-; RV32-NEXT:    sw zero, 40(sp)
-; RV32-NEXT:    sw zero, 44(sp)
-; RV32-NEXT:    sw zero, 48(sp)
-; RV32-NEXT:    sw zero, 52(sp)
-; RV32-NEXT:    sw a5, 56(sp)
-; RV32-NEXT:    sw a2, 60(sp)
-; RV32-NEXT:    sw t4, 64(sp)
-; RV32-NEXT:    li t3, 64
-; RV32-NEXT:    addi t5, sp, 56
-; RV32-NEXT:    neg s1, a7
-; RV32-NEXT:    seqz t6, a1
-; RV32-NEXT:    sub a7, t3, a7
-; RV32-NEXT:    add t0, t0, t6
-; RV32-NEXT:    andi t3, a7, 31
-; RV32-NEXT:    srli a7, a7, 3
-; RV32-NEXT:    or t6, a1, t0
-; RV32-NEXT:    xori s2, t3, 31
-; RV32-NEXT:    andi a7, a7, 12
-; RV32-NEXT:    seqz t3, t6
-; RV32-NEXT:    sub s3, t5, a7
-; RV32-NEXT:    add a6, a6, t3
-; RV32-NEXT:    lw a7, 0(s3)
-; RV32-NEXT:    lw s4, 4(s3)
-; RV32-NEXT:    andi a6, a6, 1
-; RV32-NEXT:    or t6, t6, a6
-; RV32-NEXT:    srli t3, a7, 1
-; RV32-NEXT:    sll t5, s4, s1
-; RV32-NEXT:    srl t3, t3, s2
-; RV32-NEXT:    or t5, t5, t3
-; RV32-NEXT:    sll t3, a7, s1
-; RV32-NEXT:    li a7, 0
-; RV32-NEXT:    beqz t6, .LBB1_28
-; RV32-NEXT:  # %bb.23: # %udiv-preheader
-; RV32-NEXT:    li t6, 0
-; RV32-NEXT:    li s0, 0
-; RV32-NEXT:    srli s4, s4, 1
-; RV32-NEXT:    lw s3, 8(s3)
-; RV32-NEXT:    sw zero, 24(sp)
-; RV32-NEXT:    sw zero, 28(sp)
-; RV32-NEXT:    sw zero, 32(sp)
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -64
+; RV32-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s0, 56(sp) # 4-byte Folded Spill
+; RV32-NEXT:    mv s0, a0
+; RV32-NEXT:    lw a0, 0(a1)
+; RV32-NEXT:    lw a3, 4(a1)
+; RV32-NEXT:    lw a1, 8(a1)
+; RV32-NEXT:    lw a4, 8(a2)
+; RV32-NEXT:    lw a5, 0(a2)
+; RV32-NEXT:    lw a2, 4(a2)
+; RV32-NEXT:    andi a6, a1, 1
+; RV32-NEXT:    andi a4, a4, 1
+; RV32-NEXT:    sw zero, 20(sp)
 ; RV32-NEXT:    sw zero, 36(sp)
 ; RV32-NEXT:    sw a5, 8(sp)
 ; RV32-NEXT:    sw a2, 12(sp)
-; RV32-NEXT:    sw t4, 16(sp)
-; RV32-NEXT:    sw zero, 20(sp)
-; RV32-NEXT:    srli a2, a1, 3
-; RV32-NEXT:    srl a5, s4, s2
-; RV32-NEXT:    addi t4, sp, 8
-; RV32-NEXT:    snez t2, t2
-; RV32-NEXT:    andi a2, a2, 12
-; RV32-NEXT:    add t1, t1, t2
-; RV32-NEXT:    add a2, t4, a2
-; RV32-NEXT:    lw t4, 0(a2)
-; RV32-NEXT:    lw t2, 4(a2)
-; RV32-NEXT:    lw a2, 8(a2)
-; RV32-NEXT:    sll s1, s3, s1
-; RV32-NEXT:    andi s2, a1, 31
-; RV32-NEXT:    xori s2, s2, 31
-; RV32-NEXT:    or s1, s1, a5
-; RV32-NEXT:    slli a2, a2, 1
-; RV32-NEXT:    slli a5, t2, 1
-; RV32-NEXT:    sll a2, a2, s2
-; RV32-NEXT:    sll s2, a5, s2
-; RV32-NEXT:    srl a5, t2, a1
-; RV32-NEXT:    or t2, a5, a2
-; RV32-NEXT:    seqz a2, a3
-; RV32-NEXT:    sub a2, a4, a2
-; RV32-NEXT:    addi a5, t1, 1
-; RV32-NEXT:    andi a5, a5, 1
-; RV32-NEXT:    andi s1, s1, 1
-; RV32-NEXT:    srl t1, t4, a1
-; RV32-NEXT:    or t4, t1, s2
-; RV32-NEXT:    addi t1, a3, -1
-; RV32-NEXT:    j .LBB1_26
-; RV32-NEXT:  .LBB1_24: # %udiv-do-while
-; RV32-NEXT:    # in Loop: Header=BB1_26 Depth=1
-; RV32-NEXT:    sltu s1, a2, s2
-; RV32-NEXT:  .LBB1_25: # %udiv-do-while
-; RV32-NEXT:    # in Loop: Header=BB1_26 Depth=1
-; RV32-NEXT:    srli t2, t2, 31
-; RV32-NEXT:    sub t2, a5, t2
-; RV32-NEXT:    sub t2, t2, s1
-; RV32-NEXT:    slli t2, t2, 31
-; RV32-NEXT:    srai t2, t2, 31
-; RV32-NEXT:    and s1, t2, a4
-; RV32-NEXT:    srli s3, t3, 31
-; RV32-NEXT:    slli s4, t5, 1
-; RV32-NEXT:    srli t5, t5, 31
-; RV32-NEXT:    slli t3, t3, 1
-; RV32-NEXT:    sub s2, s2, s1
-; RV32-NEXT:    and s5, t2, a3
-; RV32-NEXT:    or s1, s4, s3
-; RV32-NEXT:    seqz s3, a1
-; RV32-NEXT:    or t3, a7, t3
-; RV32-NEXT:    or s4, a1, t0
-; RV32-NEXT:    addi a1, a1, -1
-; RV32-NEXT:    or s0, s0, t5
-; RV32-NEXT:    andi a7, t2, 1
-; RV32-NEXT:    sltu t2, t4, s5
-; RV32-NEXT:    sub t0, t0, s3
-; RV32-NEXT:    snez s3, s4
-; RV32-NEXT:    or t5, t6, s1
-; RV32-NEXT:    andi s1, s0, 1
-; RV32-NEXT:    sub t2, s2, t2
-; RV32-NEXT:    add a6, a6, s3
-; RV32-NEXT:    addi a6, a6, 1
-; RV32-NEXT:    andi a6, a6, 1
-; RV32-NEXT:    or t6, a1, t0
-; RV32-NEXT:    or s2, t6, a6
-; RV32-NEXT:    sub t4, t4, s5
-; RV32-NEXT:    li t6, 0
-; RV32-NEXT:    li s0, 0
-; RV32-NEXT:    beqz s2, .LBB1_28
-; RV32-NEXT:  .LBB1_26: # %udiv-do-while
-; RV32-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32-NEXT:    srli s2, t4, 31
-; RV32-NEXT:    slli s3, t2, 1
-; RV32-NEXT:    slli t4, t4, 1
-; RV32-NEXT:    or s2, s3, s2
-; RV32-NEXT:    andi s1, s1, 1
-; RV32-NEXT:    or t4, t4, s1
-; RV32-NEXT:    bne a2, s2, .LBB1_24
-; RV32-NEXT:  # %bb.27: # in Loop: Header=BB1_26 Depth=1
-; RV32-NEXT:    sltu s1, t1, t4
-; RV32-NEXT:    j .LBB1_25
-; RV32-NEXT:  .LBB1_28: # %udiv-loop-exit
-; RV32-NEXT:    srli a2, t3, 31
-; RV32-NEXT:    slli a3, t5, 1
-; RV32-NEXT:    srli a1, t5, 31
-; RV32-NEXT:    slli a4, t3, 1
-; RV32-NEXT:    or t3, a3, a2
-; RV32-NEXT:    or t5, a7, a4
-; RV32-NEXT:  .LBB1_29: # %udiv-end
-; RV32-NEXT:    sw t5, 0(a0)
-; RV32-NEXT:    sw t3, 4(a0)
-; RV32-NEXT:    sb a1, 8(a0)
-; RV32-NEXT:    lw s0, 92(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw s1, 88(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw s2, 84(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw s3, 80(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw s4, 76(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw s5, 72(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 96
+; RV32-NEXT:    sw a0, 24(sp)
+; RV32-NEXT:    sw a3, 28(sp)
+; RV32-NEXT:    sw a4, 16(sp)
+; RV32-NEXT:    addi a0, sp, 40
+; RV32-NEXT:    addi a1, sp, 24
+; RV32-NEXT:    addi a2, sp, 8
+; RV32-NEXT:    sw a6, 32(sp)
+; RV32-NEXT:    call __udivti3
+; RV32-NEXT:    lw a0, 48(sp)
+; RV32-NEXT:    lw a1, 40(sp)
+; RV32-NEXT:    lw a2, 44(sp)
+; RV32-NEXT:    andi a0, a0, 1
+; RV32-NEXT:    sw a1, 0(s0)
+; RV32-NEXT:    sw a2, 4(s0)
+; RV32-NEXT:    sb a0, 8(s0)
+; RV32-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 64
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: udiv_i65:
@@ -456,552 +74,43 @@ define i65 @udiv_i65(i65 %x, i65 %y) nounwind {
 
 define i128 @udiv_i128(i128 %x, i128 %y) nounwind {
 ; RV32-LABEL: udiv_i128:
-; RV32:       # %bb.0: # %_udiv-special-cases
-; RV32-NEXT:    addi sp, sp, -144
-; RV32-NEXT:    sw ra, 140(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw s0, 136(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw s1, 132(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw s2, 128(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw s3, 124(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw s4, 120(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw s5, 116(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw s6, 112(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw s7, 108(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw s8, 104(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw s9, 100(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw s10, 96(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw s11, 92(sp) # 4-byte Folded Spill
-; RV32-NEXT:    mv a4, a0
-; RV32-NEXT:    lw ra, 0(a2)
-; RV32-NEXT:    lw a5, 4(a2)
-; RV32-NEXT:    lw s9, 8(a2)
-; RV32-NEXT:    lw s10, 12(a2)
-; RV32-NEXT:    lui t4, 349525
-; RV32-NEXT:    addi t4, t4, 1365
-; RV32-NEXT:    lui t3, 209715
-; RV32-NEXT:    addi t3, t3, 819
-; RV32-NEXT:    lui a7, 61681
-; RV32-NEXT:    addi a7, a7, -241
-; RV32-NEXT:    bnez a5, .LBB2_2
-; RV32-NEXT:  # %bb.1: # %_udiv-special-cases
-; RV32-NEXT:    srli a0, ra, 1
-; RV32-NEXT:    or a0, ra, a0
-; RV32-NEXT:    srli a6, a0, 2
-; RV32-NEXT:    or a0, a0, a6
-; RV32-NEXT:    srli a6, a0, 4
-; RV32-NEXT:    or a0, a0, a6
-; RV32-NEXT:    srli a6, a0, 8
-; RV32-NEXT:    or a0, a0, a6
-; RV32-NEXT:    srli a6, a0, 16
-; RV32-NEXT:    or a0, a0, a6
-; RV32-NEXT:    not a0, a0
-; RV32-NEXT:    srli a6, a0, 1
-; RV32-NEXT:    and a6, a6, t4
-; RV32-NEXT:    sub a0, a0, a6
-; RV32-NEXT:    and a6, a0, t3
-; RV32-NEXT:    srli a0, a0, 2
-; RV32-NEXT:    and a0, a0, t3
-; RV32-NEXT:    add a0, a6, a0
-; RV32-NEXT:    srli a6, a0, 4
-; RV32-NEXT:    add a0, a0, a6
-; RV32-NEXT:    and a0, a0, a7
-; RV32-NEXT:    slli a6, a0, 8
-; RV32-NEXT:    add a0, a0, a6
-; RV32-NEXT:    slli a6, a0, 16
-; RV32-NEXT:    add a0, a0, a6
-; RV32-NEXT:    srli a0, a0, 24
-; RV32-NEXT:    addi t6, a0, 32
-; RV32-NEXT:    j .LBB2_3
-; RV32-NEXT:  .LBB2_2:
-; RV32-NEXT:    srli a0, a5, 1
-; RV32-NEXT:    or a0, a5, a0
-; RV32-NEXT:    srli a6, a0, 2
-; RV32-NEXT:    or a0, a0, a6
-; RV32-NEXT:    srli a6, a0, 4
-; RV32-NEXT:    or a0, a0, a6
-; RV32-NEXT:    srli a6, a0, 8
-; RV32-NEXT:    or a0, a0, a6
-; RV32-NEXT:    srli a6, a0, 16
-; RV32-NEXT:    or a0, a0, a6
-; RV32-NEXT:    not a0, a0
-; RV32-NEXT:    srli a6, a0, 1
-; RV32-NEXT:    and a6, a6, t4
-; RV32-NEXT:    sub a0, a0, a6
-; RV32-NEXT:    and a6, a0, t3
-; RV32-NEXT:    srli a0, a0, 2
-; RV32-NEXT:    and a0, a0, t3
-; RV32-NEXT:    add a0, a6, a0
-; RV32-NEXT:    srli a6, a0, 4
-; RV32-NEXT:    add a0, a0, a6
-; RV32-NEXT:    and a0, a0, a7
-; RV32-NEXT:    slli a6, a0, 8
-; RV32-NEXT:    add a0, a0, a6
-; RV32-NEXT:    slli a6, a0, 16
-; RV32-NEXT:    add a0, a0, a6
-; RV32-NEXT:    srli t6, a0, 24
-; RV32-NEXT:  .LBB2_3: # %_udiv-special-cases
-; RV32-NEXT:    lw a6, 4(a1)
-; RV32-NEXT:    or s1, s9, s10
-; RV32-NEXT:    bnez s10, .LBB2_5
-; RV32-NEXT:  # %bb.4: # %_udiv-special-cases
-; RV32-NEXT:    srli a0, s9, 1
-; RV32-NEXT:    or a0, s9, a0
-; RV32-NEXT:    srli t0, a0, 2
-; RV32-NEXT:    or a0, a0, t0
-; RV32-NEXT:    srli t0, a0, 4
-; RV32-NEXT:    or a0, a0, t0
-; RV32-NEXT:    srli t0, a0, 8
-; RV32-NEXT:    or a0, a0, t0
-; RV32-NEXT:    srli t0, a0, 16
-; RV32-NEXT:    or a0, a0, t0
-; RV32-NEXT:    not a0, a0
-; RV32-NEXT:    srli t0, a0, 1
-; RV32-NEXT:    and t0, t0, t4
-; RV32-NEXT:    sub a0, a0, t0
-; RV32-NEXT:    and t0, a0, t3
-; RV32-NEXT:    srli a0, a0, 2
-; RV32-NEXT:    and a0, a0, t3
-; RV32-NEXT:    add a0, t0, a0
-; RV32-NEXT:    srli t0, a0, 4
-; RV32-NEXT:    add a0, a0, t0
-; RV32-NEXT:    and a0, a0, a7
-; RV32-NEXT:    slli t0, a0, 8
-; RV32-NEXT:    add a0, a0, t0
-; RV32-NEXT:    slli t0, a0, 16
-; RV32-NEXT:    add a0, a0, t0
-; RV32-NEXT:    srli a0, a0, 24
-; RV32-NEXT:    addi t5, a0, 32
-; RV32-NEXT:    j .LBB2_6
-; RV32-NEXT:  .LBB2_5:
-; RV32-NEXT:    srli a0, s10, 1
-; RV32-NEXT:    or a0, s10, a0
-; RV32-NEXT:    srli t0, a0, 2
-; RV32-NEXT:    or a0, a0, t0
-; RV32-NEXT:    srli t0, a0, 4
-; RV32-NEXT:    or a0, a0, t0
-; RV32-NEXT:    srli t0, a0, 8
-; RV32-NEXT:    or a0, a0, t0
-; RV32-NEXT:    srli t0, a0, 16
-; RV32-NEXT:    or a0, a0, t0
-; RV32-NEXT:    not a0, a0
-; RV32-NEXT:    srli t0, a0, 1
-; RV32-NEXT:    and t0, t0, t4
-; RV32-NEXT:    sub a0, a0, t0
-; RV32-NEXT:    and t0, a0, t3
-; RV32-NEXT:    srli a0, a0, 2
-; RV32-NEXT:    and a0, a0, t3
-; RV32-NEXT:    add a0, t0, a0
-; RV32-NEXT:    srli t0, a0, 4
-; RV32-NEXT:    add a0, a0, t0
-; RV32-NEXT:    and a0, a0, a7
-; RV32-NEXT:    slli t0, a0, 8
-; RV32-NEXT:    add a0, a0, t0
-; RV32-NEXT:    slli t0, a0, 16
-; RV32-NEXT:    add a0, a0, t0
-; RV32-NEXT:    srli t5, a0, 24
-; RV32-NEXT:  .LBB2_6: # %_udiv-special-cases
-; RV32-NEXT:    lw t0, 12(a1)
-; RV32-NEXT:    addi s0, t6, 64
-; RV32-NEXT:    bnez s1, .LBB2_8
-; RV32-NEXT:  # %bb.7: # %_udiv-special-cases
-; RV32-NEXT:    mv t5, s0
-; RV32-NEXT:  .LBB2_8: # %_udiv-special-cases
-; RV32-NEXT:    lw t2, 0(a1)
-; RV32-NEXT:    lw t1, 8(a1)
-; RV32-NEXT:    snez a1, s1
-; RV32-NEXT:    bnez a6, .LBB2_10
-; RV32-NEXT:  # %bb.9: # %_udiv-special-cases
-; RV32-NEXT:    srli a0, t2, 1
-; RV32-NEXT:    or a0, t2, a0
-; RV32-NEXT:    srli s1, a0, 2
-; RV32-NEXT:    or a0, a0, s1
-; RV32-NEXT:    srli s1, a0, 4
-; RV32-NEXT:    or a0, a0, s1
-; RV32-NEXT:    srli s1, a0, 8
-; RV32-NEXT:    or a0, a0, s1
-; RV32-NEXT:    srli s1, a0, 16
-; RV32-NEXT:    or a0, a0, s1
-; RV32-NEXT:    not a0, a0
-; RV32-NEXT:    srli s1, a0, 1
-; RV32-NEXT:    and s1, s1, t4
-; RV32-NEXT:    sub a0, a0, s1
-; RV32-NEXT:    and s1, a0, t3
-; RV32-NEXT:    srli a0, a0, 2
-; RV32-NEXT:    and a0, a0, t3
-; RV32-NEXT:    add a0, s1, a0
-; RV32-NEXT:    srli s1, a0, 4
-; RV32-NEXT:    add a0, a0, s1
-; RV32-NEXT:    and a0, a0, a7
-; RV32-NEXT:    slli s1, a0, 8
-; RV32-NEXT:    add a0, a0, s1
-; RV32-NEXT:    slli s1, a0, 16
-; RV32-NEXT:    add a0, a0, s1
-; RV32-NEXT:    srli a0, a0, 24
-; RV32-NEXT:    addi a0, a0, 32
-; RV32-NEXT:    j .LBB2_11
-; RV32-NEXT:  .LBB2_10:
-; RV32-NEXT:    srli a0, a6, 1
-; RV32-NEXT:    or a0, a6, a0
-; RV32-NEXT:    srli s1, a0, 2
-; RV32-NEXT:    or a0, a0, s1
-; RV32-NEXT:    srli s1, a0, 4
-; RV32-NEXT:    or a0, a0, s1
-; RV32-NEXT:    srli s1, a0, 8
-; RV32-NEXT:    or a0, a0, s1
-; RV32-NEXT:    srli s1, a0, 16
-; RV32-NEXT:    or a0, a0, s1
-; RV32-NEXT:    not a0, a0
-; RV32-NEXT:    srli s1, a0, 1
-; RV32-NEXT:    and s1, s1, t4
-; RV32-NEXT:    sub a0, a0, s1
-; RV32-NEXT:    and s1, a0, t3
-; RV32-NEXT:    srli a0, a0, 2
-; RV32-NEXT:    and a0, a0, t3
-; RV32-NEXT:    add a0, s1, a0
-; RV32-NEXT:    srli s1, a0, 4
-; RV32-NEXT:    add a0, a0, s1
-; RV32-NEXT:    and a0, a0, a7
-; RV32-NEXT:    slli s1, a0, 8
-; RV32-NEXT:    add a0, a0, s1
-; RV32-NEXT:    slli s1, a0, 16
-; RV32-NEXT:    add a0, a0, s1
-; RV32-NEXT:    srli a0, a0, 24
-; RV32-NEXT:  .LBB2_11: # %_udiv-special-cases
-; RV32-NEXT:    or s1, a5, s10
-; RV32-NEXT:    or s2, ra, s9
-; RV32-NEXT:    or s3, a6, t0
-; RV32-NEXT:    or s4, t2, t1
-; RV32-NEXT:    sltu t6, s0, t6
-; RV32-NEXT:    addi s0, a1, -1
-; RV32-NEXT:    addi a1, a0, 64
-; RV32-NEXT:    or s5, t1, t0
-; RV32-NEXT:    sltu s6, a1, a0
-; RV32-NEXT:    snez s7, s5
-; RV32-NEXT:    addi s7, s7, -1
-; RV32-NEXT:    bnez t0, .LBB2_13
-; RV32-NEXT:  # %bb.12: # %_udiv-special-cases
-; RV32-NEXT:    srli a0, t1, 1
-; RV32-NEXT:    or a0, t1, a0
-; RV32-NEXT:    srli s8, a0, 2
-; RV32-NEXT:    or a0, a0, s8
-; RV32-NEXT:    srli s8, a0, 4
-; RV32-NEXT:    or a0, a0, s8
-; RV32-NEXT:    srli s8, a0, 8
-; RV32-NEXT:    or a0, a0, s8
-; RV32-NEXT:    srli s8, a0, 16
-; RV32-NEXT:    or a0, a0, s8
-; RV32-NEXT:    not a0, a0
-; RV32-NEXT:    srli s8, a0, 1
-; RV32-NEXT:    and t4, s8, t4
-; RV32-NEXT:    sub a0, a0, t4
-; RV32-NEXT:    and t4, a0, t3
-; RV32-NEXT:    srli a0, a0, 2
-; RV32-NEXT:    and a0, a0, t3
-; RV32-NEXT:    add a0, t4, a0
-; RV32-NEXT:    srli t3, a0, 4
-; RV32-NEXT:    add a0, a0, t3
-; RV32-NEXT:    and a0, a0, a7
-; RV32-NEXT:    slli a7, a0, 8
-; RV32-NEXT:    add a0, a0, a7
-; RV32-NEXT:    slli a7, a0, 16
-; RV32-NEXT:    add a0, a0, a7
-; RV32-NEXT:    srli a0, a0, 24
-; RV32-NEXT:    addi a0, a0, 32
-; RV32-NEXT:    j .LBB2_14
-; RV32-NEXT:  .LBB2_13:
-; RV32-NEXT:    srli a0, t0, 1
-; RV32-NEXT:    or a0, t0, a0
-; RV32-NEXT:    srli s8, a0, 2
-; RV32-NEXT:    or a0, a0, s8
-; RV32-NEXT:    srli s8, a0, 4
-; RV32-NEXT:    or a0, a0, s8
-; RV32-NEXT:    srli s8, a0, 8
-; RV32-NEXT:    or a0, a0, s8
-; RV32-NEXT:    srli s8, a0, 16
-; RV32-NEXT:    or a0, a0, s8
-; RV32-NEXT:    not a0, a0
-; RV32-NEXT:    srli s8, a0, 1
-; RV32-NEXT:    and t4, s8, t4
-; RV32-NEXT:    sub a0, a0, t4
-; RV32-NEXT:    and t4, a0, t3
-; RV32-NEXT:    srli a0, a0, 2
-; RV32-NEXT:    and a0, a0, t3
-; RV32-NEXT:    add a0, t4, a0
-; RV32-NEXT:    srli t3, a0, 4
-; RV32-NEXT:    add a0, a0, t3
-; RV32-NEXT:    and a0, a0, a7
-; RV32-NEXT:    slli a7, a0, 8
-; RV32-NEXT:    add a0, a0, a7
-; RV32-NEXT:    slli a7, a0, 16
-; RV32-NEXT:    add a0, a0, a7
-; RV32-NEXT:    srli a0, a0, 24
-; RV32-NEXT:  .LBB2_14: # %_udiv-special-cases
-; RV32-NEXT:    or t4, s2, s1
-; RV32-NEXT:    or s1, s4, s3
-; RV32-NEXT:    and a7, s0, t6
-; RV32-NEXT:    and t3, s7, s6
-; RV32-NEXT:    bnez s5, .LBB2_16
-; RV32-NEXT:  # %bb.15: # %_udiv-special-cases
-; RV32-NEXT:    mv a0, a1
-; RV32-NEXT:  .LBB2_16: # %_udiv-special-cases
-; RV32-NEXT:    seqz a1, t4
-; RV32-NEXT:    seqz t4, s1
-; RV32-NEXT:    sltu t6, t5, a0
-; RV32-NEXT:    sub s1, a7, t3
-; RV32-NEXT:    mv s0, t6
-; RV32-NEXT:    beq a7, t3, .LBB2_18
-; RV32-NEXT:  # %bb.17: # %_udiv-special-cases
-; RV32-NEXT:    sltu s0, a7, t3
-; RV32-NEXT:  .LBB2_18: # %_udiv-special-cases
-; RV32-NEXT:    sub t3, s1, t6
-; RV32-NEXT:    or a1, a1, t4
-; RV32-NEXT:    neg t6, s0
-; RV32-NEXT:    seqz s0, s0
-; RV32-NEXT:    addi s0, s0, -1
-; RV32-NEXT:    or a7, t6, s0
-; RV32-NEXT:    sub t4, t5, a0
-; RV32-NEXT:    beqz a7, .LBB2_20
-; RV32-NEXT:  # %bb.19: # %_udiv-special-cases
-; RV32-NEXT:    snez a0, a7
-; RV32-NEXT:    j .LBB2_21
-; RV32-NEXT:  .LBB2_20:
-; RV32-NEXT:    snez a0, t3
-; RV32-NEXT:    sltiu a7, t4, 128
-; RV32-NEXT:    xori a7, a7, 1
-; RV32-NEXT:    or a0, a7, a0
-; RV32-NEXT:  .LBB2_21: # %_udiv-special-cases
-; RV32-NEXT:    or s1, a1, a0
-; RV32-NEXT:    addi a1, s1, -1
-; RV32-NEXT:    and a7, a1, t0
-; RV32-NEXT:    and t5, a1, t1
-; RV32-NEXT:    and a0, a1, a6
-; RV32-NEXT:    and a1, a1, t2
-; RV32-NEXT:    bnez s1, .LBB2_25
-; RV32-NEXT:  # %bb.22: # %_udiv-special-cases
-; RV32-NEXT:    xori s1, t4, 127
-; RV32-NEXT:    or s1, s1, t6
-; RV32-NEXT:    or s2, t3, s0
-; RV32-NEXT:    or s1, s1, s2
-; RV32-NEXT:    beqz s1, .LBB2_25
-; RV32-NEXT:  # %bb.23: # %udiv-bb1
-; RV32-NEXT:    sw a4, 8(sp) # 4-byte Folded Spill
-; RV32-NEXT:    addi a7, t4, 1
-; RV32-NEXT:    sw zero, 56(sp)
-; RV32-NEXT:    sw zero, 60(sp)
-; RV32-NEXT:    sw zero, 64(sp)
-; RV32-NEXT:    sw zero, 68(sp)
-; RV32-NEXT:    sw t2, 72(sp)
-; RV32-NEXT:    sw a6, 76(sp)
-; RV32-NEXT:    sw t1, 80(sp)
-; RV32-NEXT:    sw t0, 84(sp)
-; RV32-NEXT:    li a0, 127
-; RV32-NEXT:    addi a2, sp, 72
-; RV32-NEXT:    seqz a4, a7
-; RV32-NEXT:    sub a0, a0, t4
-; RV32-NEXT:    add t3, t3, a4
-; RV32-NEXT:    andi a4, a0, 31
-; RV32-NEXT:    srli a0, a0, 3
-; RV32-NEXT:    or t5, a7, t3
-; RV32-NEXT:    xori a4, a4, 31
-; RV32-NEXT:    andi a0, a0, 12
-; RV32-NEXT:    seqz t5, t5
-; RV32-NEXT:    sub a2, a2, a0
-; RV32-NEXT:    add t5, t6, t5
-; RV32-NEXT:    lw a0, 0(a2)
-; RV32-NEXT:    lw s1, 4(a2)
-; RV32-NEXT:    lw s3, 8(a2)
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -64
+; RV32-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s0, 56(sp) # 4-byte Folded Spill
+; RV32-NEXT:    lw a3, 0(a1)
+; RV32-NEXT:    lw a4, 4(a1)
+; RV32-NEXT:    lw a5, 8(a1)
+; RV32-NEXT:    lw a6, 12(a1)
+; RV32-NEXT:    lw a1, 0(a2)
+; RV32-NEXT:    lw a7, 4(a2)
+; RV32-NEXT:    lw t0, 8(a2)
 ; RV32-NEXT:    lw a2, 12(a2)
-; RV32-NEXT:    sltu t6, t5, t6
-; RV32-NEXT:    or s2, a7, t5
-; RV32-NEXT:    add t6, s0, t6
-; RV32-NEXT:    or s0, t3, t6
-; RV32-NEXT:    or s0, s2, s0
-; RV32-NEXT:    srli s2, s3, 1
-; RV32-NEXT:    srli s4, s1, 1
-; RV32-NEXT:    srli s5, a0, 1
-; RV32-NEXT:    srl s2, s2, a4
-; RV32-NEXT:    srl s4, s4, a4
-; RV32-NEXT:    srl a4, s5, a4
-; RV32-NEXT:    not t4, t4
-; RV32-NEXT:    sll a2, a2, t4
-; RV32-NEXT:    or s2, a2, s2
-; RV32-NEXT:    sll a2, s3, t4
-; RV32-NEXT:    or a2, a2, s4
-; RV32-NEXT:    sll s1, s1, t4
-; RV32-NEXT:    or s1, s1, a4
-; RV32-NEXT:    sll t4, a0, t4
-; RV32-NEXT:    li a1, 0
-; RV32-NEXT:    bnez s0, .LBB2_26
-; RV32-NEXT:  .LBB2_24: # %udiv-loop-exit
-; RV32-NEXT:    srli a0, t4, 31
-; RV32-NEXT:    slli a3, s1, 1
-; RV32-NEXT:    srli s1, s1, 31
-; RV32-NEXT:    or a0, a3, a0
-; RV32-NEXT:    slli a3, a2, 1
-; RV32-NEXT:    srli s0, a2, 31
-; RV32-NEXT:    slli s2, s2, 1
-; RV32-NEXT:    slli t4, t4, 1
-; RV32-NEXT:    or t5, a3, s1
-; RV32-NEXT:    or a7, s2, s0
-; RV32-NEXT:    or a1, a1, t4
-; RV32-NEXT:    lw a4, 8(sp) # 4-byte Folded Reload
-; RV32-NEXT:  .LBB2_25: # %udiv-end
-; RV32-NEXT:    sw a1, 0(a4)
-; RV32-NEXT:    sw a0, 4(a4)
-; RV32-NEXT:    sw t5, 8(a4)
-; RV32-NEXT:    sw a7, 12(a4)
-; RV32-NEXT:    lw ra, 140(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw s0, 136(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw s1, 132(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw s2, 128(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw s3, 124(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw s4, 120(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw s5, 116(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw s6, 112(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw s7, 108(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw s8, 104(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw s9, 100(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw s10, 96(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw s11, 92(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 144
+; RV32-NEXT:    mv s0, a0
+; RV32-NEXT:    sw a1, 8(sp)
+; RV32-NEXT:    sw a7, 12(sp)
+; RV32-NEXT:    sw t0, 16(sp)
+; RV32-NEXT:    sw a2, 20(sp)
+; RV32-NEXT:    addi a0, sp, 40
+; RV32-NEXT:    addi a1, sp, 24
+; RV32-NEXT:    addi a2, sp, 8
+; RV32-NEXT:    sw a3, 24(sp)
+; RV32-NEXT:    sw a4, 28(sp)
+; RV32-NEXT:    sw a5, 32(sp)
+; RV32-NEXT:    sw a6, 36(sp)
+; RV32-NEXT:    call __udivti3
+; RV32-NEXT:    lw a0, 40(sp)
+; RV32-NEXT:    lw a1, 44(sp)
+; RV32-NEXT:    lw a2, 48(sp)
+; RV32-NEXT:    lw a3, 52(sp)
+; RV32-NEXT:    sw a0, 0(s0)
+; RV32-NEXT:    sw a1, 4(s0)
+; RV32-NEXT:    sw a2, 8(s0)
+; RV32-NEXT:    sw a3, 12(s0)
+; RV32-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 64
 ; RV32-NEXT:    ret
-; RV32-NEXT:  .LBB2_26: # %udiv-preheader
-; RV32-NEXT:    li s3, 0
-; RV32-NEXT:    li s4, 0
-; RV32-NEXT:    li s5, 0
-; RV32-NEXT:    sw zero, 40(sp)
-; RV32-NEXT:    sw zero, 44(sp)
-; RV32-NEXT:    sw zero, 48(sp)
-; RV32-NEXT:    sw zero, 52(sp)
-; RV32-NEXT:    sw t2, 24(sp)
-; RV32-NEXT:    sw a6, 28(sp)
-; RV32-NEXT:    sw t1, 32(sp)
-; RV32-NEXT:    sw t0, 36(sp)
-; RV32-NEXT:    srli a0, a7, 3
-; RV32-NEXT:    addi a4, sp, 24
-; RV32-NEXT:    andi a0, a0, 12
-; RV32-NEXT:    add a0, a4, a0
-; RV32-NEXT:    lw a4, 4(a0)
-; RV32-NEXT:    lw a6, 8(a0)
-; RV32-NEXT:    lw t2, 12(a0)
-; RV32-NEXT:    lw a0, 0(a0)
-; RV32-NEXT:    andi t0, a7, 31
-; RV32-NEXT:    xori t0, t0, 31
-; RV32-NEXT:    slli t1, t2, 1
-; RV32-NEXT:    slli s0, a6, 1
-; RV32-NEXT:    slli s6, a4, 1
-; RV32-NEXT:    sll t1, t1, t0
-; RV32-NEXT:    sll s0, s0, t0
-; RV32-NEXT:    sll s8, s6, t0
-; RV32-NEXT:    seqz t0, ra
-; RV32-NEXT:    srl a6, a6, a7
-; RV32-NEXT:    or s6, a6, t1
-; RV32-NEXT:    or t1, ra, a5
-; RV32-NEXT:    sw a5, 20(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sub a6, a5, t0
-; RV32-NEXT:    seqz t1, t1
-; RV32-NEXT:    srl a4, a4, a7
-; RV32-NEXT:    or s7, a4, s0
-; RV32-NEXT:    sub t0, s9, t1
-; RV32-NEXT:    mv a3, s9
-; RV32-NEXT:    sltu a4, s9, t1
-; RV32-NEXT:    mv t1, s10
-; RV32-NEXT:    sub a4, s10, a4
-; RV32-NEXT:    sw a4, 16(sp) # 4-byte Folded Spill
-; RV32-NEXT:    srl a0, a0, a7
-; RV32-NEXT:    srl s9, t2, a7
-; RV32-NEXT:    or s8, a0, s8
-; RV32-NEXT:    addi a0, ra, -1
-; RV32-NEXT:    sw a0, 12(sp) # 4-byte Folded Spill
-; RV32-NEXT:    j .LBB2_28
-; RV32-NEXT:  .LBB2_27: # %udiv-do-while
-; RV32-NEXT:    # in Loop: Header=BB2_28 Depth=1
-; RV32-NEXT:    srli s0, a2, 31
-; RV32-NEXT:    slli s2, s2, 1
-; RV32-NEXT:    sub a0, s11, a0
-; RV32-NEXT:    srli s11, s1, 31
-; RV32-NEXT:    slli a2, a2, 1
-; RV32-NEXT:    or s0, s2, s0
-; RV32-NEXT:    srli s2, t4, 31
-; RV32-NEXT:    slli s1, s1, 1
-; RV32-NEXT:    slli t4, t4, 1
-; RV32-NEXT:    or a2, a2, s11
-; RV32-NEXT:    and s11, s7, t1
-; RV32-NEXT:    or s1, s1, s2
-; RV32-NEXT:    and s2, s7, a3
-; RV32-NEXT:    or t4, a1, t4
-; RV32-NEXT:    sub a4, t2, s2
-; RV32-NEXT:    sltu t2, t2, s2
-; RV32-NEXT:    or s2, a7, t3
-; RV32-NEXT:    sub s11, s6, s11
-; RV32-NEXT:    seqz s6, a7
-; RV32-NEXT:    addi a7, a7, -1
-; RV32-NEXT:    andi a1, s7, 1
-; RV32-NEXT:    sub s7, a0, ra
-; RV32-NEXT:    seqz a0, s2
-; RV32-NEXT:    sub t3, t3, s6
-; RV32-NEXT:    or s1, s3, s1
-; RV32-NEXT:    or a2, s4, a2
-; RV32-NEXT:    or s2, s5, s0
-; RV32-NEXT:    sub s6, a4, s9
-; RV32-NEXT:    sltu a4, a4, s9
-; RV32-NEXT:    sub t2, s11, t2
-; RV32-NEXT:    sltu s0, t5, a0
-; RV32-NEXT:    sub t5, t5, a0
-; RV32-NEXT:    sub s9, t2, a4
-; RV32-NEXT:    sub t6, t6, s0
-; RV32-NEXT:    or a0, t3, t6
-; RV32-NEXT:    or a4, a7, t5
-; RV32-NEXT:    or a0, a4, a0
-; RV32-NEXT:    sub s8, s8, s10
-; RV32-NEXT:    li s3, 0
-; RV32-NEXT:    li s4, 0
-; RV32-NEXT:    li s5, 0
-; RV32-NEXT:    mv ra, a5
-; RV32-NEXT:    beqz a0, .LBB2_24
-; RV32-NEXT:  .LBB2_28: # %udiv-do-while
-; RV32-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32-NEXT:    srli a0, s8, 31
-; RV32-NEXT:    slli t2, s7, 1
-; RV32-NEXT:    slli s8, s8, 1
-; RV32-NEXT:    or s11, t2, a0
-; RV32-NEXT:    srli a0, s2, 31
-; RV32-NEXT:    or s8, s8, a0
-; RV32-NEXT:    beq a6, s11, .LBB2_30
-; RV32-NEXT:  # %bb.29: # %udiv-do-while
-; RV32-NEXT:    # in Loop: Header=BB2_28 Depth=1
-; RV32-NEXT:    sltu a0, a6, s11
-; RV32-NEXT:    j .LBB2_31
-; RV32-NEXT:  .LBB2_30: # in Loop: Header=BB2_28 Depth=1
-; RV32-NEXT:    lw a0, 12(sp) # 4-byte Folded Reload
-; RV32-NEXT:    sltu a0, a0, s8
-; RV32-NEXT:  .LBB2_31: # %udiv-do-while
-; RV32-NEXT:    # in Loop: Header=BB2_28 Depth=1
-; RV32-NEXT:    srli t2, s6, 31
-; RV32-NEXT:    slli s9, s9, 1
-; RV32-NEXT:    srli s7, s7, 31
-; RV32-NEXT:    slli s10, s6, 1
-; RV32-NEXT:    or s6, s9, t2
-; RV32-NEXT:    or t2, s10, s7
-; RV32-NEXT:    sub s7, t0, t2
-; RV32-NEXT:    sltu s9, t0, t2
-; RV32-NEXT:    lw a4, 16(sp) # 4-byte Folded Reload
-; RV32-NEXT:    sub s10, a4, s6
-; RV32-NEXT:    sltu a0, s7, a0
-; RV32-NEXT:    sub s7, s10, s9
-; RV32-NEXT:    sub a0, s7, a0
-; RV32-NEXT:    srai s7, a0, 31
-; RV32-NEXT:    mv a5, ra
-; RV32-NEXT:    and s10, s7, ra
-; RV32-NEXT:    lw a0, 20(sp) # 4-byte Folded Reload
-; RV32-NEXT:    and a0, s7, a0
-; RV32-NEXT:    sltu ra, s8, s10
-; RV32-NEXT:    mv s9, ra
-; RV32-NEXT:    beq s11, a0, .LBB2_27
-; RV32-NEXT:  # %bb.32: # %udiv-do-while
-; RV32-NEXT:    # in Loop: Header=BB2_28 Depth=1
-; RV32-NEXT:    sltu s9, s11, a0
-; RV32-NEXT:    j .LBB2_27
 ;
 ; RV64-LABEL: udiv_i128:
 ; RV64:       # %bb.0:
@@ -1050,129 +159,14 @@ define i129 @udiv_i129(i129 %x, i129 %y) nounwind {
 ; RV32-NEXT:    srli a2, a5, 1
 ; RV32-NEXT:    sw a5, 24(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    slli a3, a5, 31
-; RV32-NEXT:    or a6, a2, a0
+; RV32-NEXT:    or a0, a2, a0
 ; RV32-NEXT:    sw a4, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT:    srli a0, a4, 1
-; RV32-NEXT:    or a7, a0, a3
-; RV32-NEXT:    bnez a6, .LBB3_2
+; RV32-NEXT:    srli a2, a4, 1
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    bnez a0, .LBB3_2
 ; RV32-NEXT:  # %bb.1: # %_udiv-special-cases
-; RV32-NEXT:    srli a0, a7, 1
-; RV32-NEXT:    or a0, a7, a0
-; RV32-NEXT:    srli a2, a0, 2
-; RV32-NEXT:    or a0, a0, a2
-; RV32-NEXT:    srli a2, a0, 4
-; RV32-NEXT:    or a0, a0, a2
-; RV32-NEXT:    srli a2, a0, 8
-; RV32-NEXT:    or a0, a0, a2
-; RV32-NEXT:    srli a2, a0, 16
-; RV32-NEXT:    or a0, a0, a2
-; RV32-NEXT:    not a0, a0
-; RV32-NEXT:    srli a2, a0, 1
-; RV32-NEXT:    and a2, a2, t5
-; RV32-NEXT:    sub a0, a0, a2
-; RV32-NEXT:    and a2, a0, t4
-; RV32-NEXT:    srli a0, a0, 2
-; RV32-NEXT:    and a0, a0, t4
-; RV32-NEXT:    add a0, a2, a0
-; RV32-NEXT:    srli a2, a0, 4
-; RV32-NEXT:    add a0, a0, a2
-; RV32-NEXT:    and a0, a0, t3
-; RV32-NEXT:    slli a2, a0, 8
-; RV32-NEXT:    add a0, a0, a2
-; RV32-NEXT:    slli a2, a0, 16
-; RV32-NEXT:    add a0, a0, a2
-; RV32-NEXT:    srli a0, a0, 24
-; RV32-NEXT:    addi a4, a0, 32
-; RV32-NEXT:    j .LBB3_3
-; RV32-NEXT:  .LBB3_2:
-; RV32-NEXT:    srli a0, a6, 1
-; RV32-NEXT:    or a0, a6, a0
-; RV32-NEXT:    srli a2, a0, 2
-; RV32-NEXT:    or a0, a0, a2
-; RV32-NEXT:    srli a2, a0, 4
-; RV32-NEXT:    or a0, a0, a2
-; RV32-NEXT:    srli a2, a0, 8
-; RV32-NEXT:    or a0, a0, a2
-; RV32-NEXT:    srli a2, a0, 16
-; RV32-NEXT:    or a0, a0, a2
-; RV32-NEXT:    not a0, a0
-; RV32-NEXT:    srli a2, a0, 1
-; RV32-NEXT:    and a2, a2, t5
-; RV32-NEXT:    sub a0, a0, a2
-; RV32-NEXT:    and a2, a0, t4
-; RV32-NEXT:    srli a0, a0, 2
-; RV32-NEXT:    and a0, a0, t4
-; RV32-NEXT:    add a0, a2, a0
-; RV32-NEXT:    srli a2, a0, 4
-; RV32-NEXT:    add a0, a0, a2
-; RV32-NEXT:    and a0, a0, t3
-; RV32-NEXT:    slli a2, a0, 8
-; RV32-NEXT:    add a0, a0, a2
-; RV32-NEXT:    slli a2, a0, 16
-; RV32-NEXT:    add a0, a0, a2
-; RV32-NEXT:    srli a4, a0, 24
-; RV32-NEXT:  .LBB3_3: # %_udiv-special-cases
-; RV32-NEXT:    lw a5, 28(sp) # 4-byte Folded Reload
-; RV32-NEXT:    srli a0, a5, 1
-; RV32-NEXT:    slli a3, t2, 31
-; RV32-NEXT:    slli a5, a5, 31
-; RV32-NEXT:    lw a2, 32(sp) # 4-byte Folded Reload
-; RV32-NEXT:    srli t0, a2, 1
-; RV32-NEXT:    lw a2, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT:    slli a2, a2, 31
-; RV32-NEXT:    li s2, 64
-; RV32-NEXT:    bnez a2, .LBB3_5
-; RV32-NEXT:  # %bb.4: # %_udiv-special-cases
-; RV32-NEXT:    li t1, 64
-; RV32-NEXT:    j .LBB3_6
-; RV32-NEXT:  .LBB3_5:
-; RV32-NEXT:    srli t1, a2, 1
-; RV32-NEXT:    or t1, a2, t1
-; RV32-NEXT:    srli t6, t1, 2
-; RV32-NEXT:    or t1, t1, t6
-; RV32-NEXT:    srli t6, t1, 4
-; RV32-NEXT:    or t1, t1, t6
-; RV32-NEXT:    srli t6, t1, 8
-; RV32-NEXT:    or t1, t1, t6
-; RV32-NEXT:    srli t6, t1, 16
-; RV32-NEXT:    or t1, t1, t6
-; RV32-NEXT:    not t1, t1
-; RV32-NEXT:    srli t6, t1, 1
-; RV32-NEXT:    and t6, t6, t5
-; RV32-NEXT:    sub t1, t1, t6
-; RV32-NEXT:    and t6, t1, t4
-; RV32-NEXT:    srli t1, t1, 2
-; RV32-NEXT:    and t1, t1, t4
-; RV32-NEXT:    add t1, t6, t1
-; RV32-NEXT:    srli t6, t1, 4
-; RV32-NEXT:    add t1, t1, t6
-; RV32-NEXT:    and t1, t1, t3
-; RV32-NEXT:    slli t6, t1, 8
-; RV32-NEXT:    add t1, t1, t6
-; RV32-NEXT:    slli t6, t1, 16
-; RV32-NEXT:    add t1, t1, t6
-; RV32-NEXT:    srli t1, t1, 24
-; RV32-NEXT:  .LBB3_6: # %_udiv-special-cases
-; RV32-NEXT:    or a3, a3, a0
-; RV32-NEXT:    or a5, t0, a5
-; RV32-NEXT:    bnez a2, .LBB3_8
-; RV32-NEXT:  # %bb.7: # %_udiv-special-cases
-; RV32-NEXT:    li t1, 128
-; RV32-NEXT:  .LBB3_8: # %_udiv-special-cases
-; RV32-NEXT:    or t0, a5, a3
-; RV32-NEXT:    addi a2, a4, 64
-; RV32-NEXT:    addi a0, t1, 128
-; RV32-NEXT:    or a6, a6, a3
-; RV32-NEXT:    or a7, a7, a5
-; RV32-NEXT:    or s3, a7, a6
-; RV32-NEXT:    sltu s0, a0, t1
-; RV32-NEXT:    bnez s3, .LBB3_11
-; RV32-NEXT:  # %bb.9: # %_udiv-special-cases
-; RV32-NEXT:    mv t6, s0
-; RV32-NEXT:    beqz a3, .LBB3_12
-; RV32-NEXT:  .LBB3_10:
-; RV32-NEXT:    srli a4, a3, 1
-; RV32-NEXT:    or a3, a3, a4
+; RV32-NEXT:    srli a3, a2, 1
+; RV32-NEXT:    or a3, a2, a3
 ; RV32-NEXT:    srli a4, a3, 2
 ; RV32-NEXT:    or a3, a3, a4
 ; RV32-NEXT:    srli a4, a3, 4
@@ -1196,18 +190,12 @@ define i129 @udiv_i129(i129 %x, i129 %y) nounwind {
 ; RV32-NEXT:    add a3, a3, a4
 ; RV32-NEXT:    slli a4, a3, 16
 ; RV32-NEXT:    add a3, a3, a4
-; RV32-NEXT:    srli s1, a3, 24
-; RV32-NEXT:    beqz t0, .LBB3_13
-; RV32-NEXT:    j .LBB3_14
-; RV32-NEXT:  .LBB3_11:
-; RV32-NEXT:    snez a6, t0
-; RV32-NEXT:    sltu a4, a2, a4
-; RV32-NEXT:    addi a6, a6, -1
-; RV32-NEXT:    and t6, a6, a4
-; RV32-NEXT:    bnez a3, .LBB3_10
-; RV32-NEXT:  .LBB3_12: # %_udiv-special-cases
-; RV32-NEXT:    srli a3, a5, 1
-; RV32-NEXT:    or a3, a5, a3
+; RV32-NEXT:    srli a3, a3, 24
+; RV32-NEXT:    addi a6, a3, 32
+; RV32-NEXT:    j .LBB3_3
+; RV32-NEXT:  .LBB3_2:
+; RV32-NEXT:    srli a3, a0, 1
+; RV32-NEXT:    or a3, a0, a3
 ; RV32-NEXT:    srli a4, a3, 2
 ; RV32-NEXT:    or a3, a3, a4
 ; RV32-NEXT:    srli a4, a3, 4
@@ -1231,31 +219,69 @@ define i129 @udiv_i129(i129 %x, i129 %y) nounwind {
 ; RV32-NEXT:    add a3, a3, a4
 ; RV32-NEXT:    slli a4, a3, 16
 ; RV32-NEXT:    add a3, a3, a4
-; RV32-NEXT:    srli a3, a3, 24
-; RV32-NEXT:    addi s1, a3, 32
-; RV32-NEXT:    bnez t0, .LBB3_14
-; RV32-NEXT:  .LBB3_13: # %_udiv-special-cases
-; RV32-NEXT:    mv s1, a2
-; RV32-NEXT:  .LBB3_14: # %_udiv-special-cases
-; RV32-NEXT:    lw a7, 0(a1)
-; RV32-NEXT:    lw t0, 4(a1)
-; RV32-NEXT:    lw a6, 8(a1)
-; RV32-NEXT:    bnez s3, .LBB3_16
-; RV32-NEXT:  # %bb.15: # %_udiv-special-cases
-; RV32-NEXT:    mv s1, a0
-; RV32-NEXT:  .LBB3_16: # %_udiv-special-cases
-; RV32-NEXT:    lw t1, 12(a1)
-; RV32-NEXT:    lw a1, 16(a1)
-; RV32-NEXT:    slli a0, a6, 31
-; RV32-NEXT:    srli a2, t0, 1
-; RV32-NEXT:    or s4, a2, a0
-; RV32-NEXT:    slli a0, t0, 31
-; RV32-NEXT:    srli a2, a7, 1
-; RV32-NEXT:    or s5, a2, a0
-; RV32-NEXT:    bnez s4, .LBB3_18
-; RV32-NEXT:  # %bb.17: # %_udiv-special-cases
-; RV32-NEXT:    srli a0, s5, 1
-; RV32-NEXT:    or a0, s5, a0
+; RV32-NEXT:    srli a6, a3, 24
+; RV32-NEXT:  .LBB3_3: # %_udiv-special-cases
+; RV32-NEXT:    lw a7, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    srli a3, a7, 1
+; RV32-NEXT:    slli a5, t2, 31
+; RV32-NEXT:    slli a7, a7, 31
+; RV32-NEXT:    lw a4, 32(sp) # 4-byte Folded Reload
+; RV32-NEXT:    srli t0, a4, 1
+; RV32-NEXT:    lw a4, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT:    slli a4, a4, 31
+; RV32-NEXT:    li s3, 64
+; RV32-NEXT:    bnez a4, .LBB3_5
+; RV32-NEXT:  # %bb.4: # %_udiv-special-cases
+; RV32-NEXT:    li t6, 64
+; RV32-NEXT:    j .LBB3_6
+; RV32-NEXT:  .LBB3_5:
+; RV32-NEXT:    srli t1, a4, 1
+; RV32-NEXT:    or t1, a4, t1
+; RV32-NEXT:    srli t6, t1, 2
+; RV32-NEXT:    or t1, t1, t6
+; RV32-NEXT:    srli t6, t1, 4
+; RV32-NEXT:    or t1, t1, t6
+; RV32-NEXT:    srli t6, t1, 8
+; RV32-NEXT:    or t1, t1, t6
+; RV32-NEXT:    srli t6, t1, 16
+; RV32-NEXT:    or t1, t1, t6
+; RV32-NEXT:    not t1, t1
+; RV32-NEXT:    srli t6, t1, 1
+; RV32-NEXT:    and t6, t6, t5
+; RV32-NEXT:    sub t1, t1, t6
+; RV32-NEXT:    and t6, t1, t4
+; RV32-NEXT:    srli t1, t1, 2
+; RV32-NEXT:    and t1, t1, t4
+; RV32-NEXT:    add t1, t6, t1
+; RV32-NEXT:    srli t6, t1, 4
+; RV32-NEXT:    add t1, t1, t6
+; RV32-NEXT:    and t1, t1, t3
+; RV32-NEXT:    slli t6, t1, 8
+; RV32-NEXT:    add t1, t1, t6
+; RV32-NEXT:    slli t6, t1, 16
+; RV32-NEXT:    add t1, t1, t6
+; RV32-NEXT:    srli t6, t1, 24
+; RV32-NEXT:  .LBB3_6: # %_udiv-special-cases
+; RV32-NEXT:    or t1, a5, a3
+; RV32-NEXT:    or a7, t0, a7
+; RV32-NEXT:    bnez a4, .LBB3_8
+; RV32-NEXT:  # %bb.7: # %_udiv-special-cases
+; RV32-NEXT:    li t6, 128
+; RV32-NEXT:  .LBB3_8: # %_udiv-special-cases
+; RV32-NEXT:    or a5, a7, t1
+; RV32-NEXT:    addi a4, a6, 64
+; RV32-NEXT:    addi a3, t6, 128
+; RV32-NEXT:    or a0, a0, t1
+; RV32-NEXT:    or s1, a2, a7
+; RV32-NEXT:    or s1, s1, a0
+; RV32-NEXT:    sltu s0, a3, t6
+; RV32-NEXT:    bnez s1, .LBB3_11
+; RV32-NEXT:  # %bb.9: # %_udiv-special-cases
+; RV32-NEXT:    mv t6, s0
+; RV32-NEXT:    beqz t1, .LBB3_12
+; RV32-NEXT:  .LBB3_10:
+; RV32-NEXT:    srli a0, t1, 1
+; RV32-NEXT:    or a0, t1, a0
 ; RV32-NEXT:    srli a2, a0, 2
 ; RV32-NEXT:    or a0, a0, a2
 ; RV32-NEXT:    srli a2, a0, 4
@@ -1279,12 +305,18 @@ define i129 @udiv_i129(i129 %x, i129 %y) nounwind {
 ; RV32-NEXT:    add a0, a0, a2
 ; RV32-NEXT:    slli a2, a0, 16
 ; RV32-NEXT:    add a0, a0, a2
-; RV32-NEXT:    srli a0, a0, 24
-; RV32-NEXT:    addi a4, a0, 32
-; RV32-NEXT:    j .LBB3_19
-; RV32-NEXT:  .LBB3_18:
-; RV32-NEXT:    srli a0, s4, 1
-; RV32-NEXT:    or a0, s4, a0
+; RV32-NEXT:    srli s2, a0, 24
+; RV32-NEXT:    beqz a5, .LBB3_13
+; RV32-NEXT:    j .LBB3_14
+; RV32-NEXT:  .LBB3_11:
+; RV32-NEXT:    snez a0, a5
+; RV32-NEXT:    sltu a2, a4, a6
+; RV32-NEXT:    addi a0, a0, -1
+; RV32-NEXT:    and t6, a0, a2
+; RV32-NEXT:    bnez t1, .LBB3_10
+; RV32-NEXT:  .LBB3_12: # %_udiv-special-cases
+; RV32-NEXT:    srli a0, a7, 1
+; RV32-NEXT:    or a0, a7, a0
 ; RV32-NEXT:    srli a2, a0, 2
 ; RV32-NEXT:    or a0, a0, a2
 ; RV32-NEXT:    srli a2, a0, 4
@@ -1308,63 +340,31 @@ define i129 @udiv_i129(i129 %x, i129 %y) nounwind {
 ; RV32-NEXT:    add a0, a0, a2
 ; RV32-NEXT:    slli a2, a0, 16
 ; RV32-NEXT:    add a0, a0, a2
-; RV32-NEXT:    srli a4, a0, 24
-; RV32-NEXT:  .LBB3_19: # %_udiv-special-cases
-; RV32-NEXT:    srli a0, t1, 1
-; RV32-NEXT:    slli a2, a1, 31
-; RV32-NEXT:    slli a3, t1, 31
-; RV32-NEXT:    slli a5, a7, 31
-; RV32-NEXT:    srli s6, a6, 1
-; RV32-NEXT:    beqz a5, .LBB3_21
-; RV32-NEXT:  # %bb.20:
-; RV32-NEXT:    srli s2, a5, 1
-; RV32-NEXT:    or s2, a5, s2
-; RV32-NEXT:    srli s7, s2, 2
-; RV32-NEXT:    or s2, s2, s7
-; RV32-NEXT:    srli s7, s2, 4
-; RV32-NEXT:    or s2, s2, s7
-; RV32-NEXT:    srli s7, s2, 8
-; RV32-NEXT:    or s2, s2, s7
-; RV32-NEXT:    srli s7, s2, 16
-; RV32-NEXT:    or s2, s2, s7
-; RV32-NEXT:    not s2, s2
-; RV32-NEXT:    srli s7, s2, 1
-; RV32-NEXT:    and s7, s7, t5
-; RV32-NEXT:    sub s2, s2, s7
-; RV32-NEXT:    and s7, s2, t4
-; RV32-NEXT:    srli s2, s2, 2
-; RV32-NEXT:    and s2, s2, t4
-; RV32-NEXT:    add s2, s7, s2
-; RV32-NEXT:    srli s7, s2, 4
-; RV32-NEXT:    add s2, s2, s7
-; RV32-NEXT:    and s2, s2, t3
-; RV32-NEXT:    slli s7, s2, 8
-; RV32-NEXT:    add s2, s2, s7
-; RV32-NEXT:    slli s7, s2, 16
-; RV32-NEXT:    add s2, s2, s7
-; RV32-NEXT:    srli s2, s2, 24
-; RV32-NEXT:  .LBB3_21: # %_udiv-special-cases
-; RV32-NEXT:    or s7, a2, a0
-; RV32-NEXT:    or a3, s6, a3
-; RV32-NEXT:    bnez a5, .LBB3_23
-; RV32-NEXT:  # %bb.22: # %_udiv-special-cases
-; RV32-NEXT:    li s2, 128
-; RV32-NEXT:  .LBB3_23: # %_udiv-special-cases
-; RV32-NEXT:    or a2, a3, s7
-; RV32-NEXT:    addi a0, a4, 64
-; RV32-NEXT:    addi s6, s2, 128
-; RV32-NEXT:    or a5, s4, s7
-; RV32-NEXT:    or s4, s5, a3
-; RV32-NEXT:    or s5, s4, a5
-; RV32-NEXT:    sltu s4, s6, s2
-; RV32-NEXT:    bnez s5, .LBB3_26
-; RV32-NEXT:  # %bb.24: # %_udiv-special-cases
-; RV32-NEXT:    mv s2, s4
-; RV32-NEXT:    snez s3, s3
-; RV32-NEXT:    beqz s7, .LBB3_27
-; RV32-NEXT:  .LBB3_25:
-; RV32-NEXT:    srli a3, s7, 1
-; RV32-NEXT:    or a3, s7, a3
+; RV32-NEXT:    srli a0, a0, 24
+; RV32-NEXT:    addi s2, a0, 32
+; RV32-NEXT:    bnez a5, .LBB3_14
+; RV32-NEXT:  .LBB3_13: # %_udiv-special-cases
+; RV32-NEXT:    mv s2, a4
+; RV32-NEXT:  .LBB3_14: # %_udiv-special-cases
+; RV32-NEXT:    lw a6, 0(a1)
+; RV32-NEXT:    lw t0, 4(a1)
+; RV32-NEXT:    lw a7, 8(a1)
+; RV32-NEXT:    bnez s1, .LBB3_16
+; RV32-NEXT:  # %bb.15: # %_udiv-special-cases
+; RV32-NEXT:    mv s2, a3
+; RV32-NEXT:  .LBB3_16: # %_udiv-special-cases
+; RV32-NEXT:    lw t1, 12(a1)
+; RV32-NEXT:    lw a1, 16(a1)
+; RV32-NEXT:    slli a0, a7, 31
+; RV32-NEXT:    srli a2, t0, 1
+; RV32-NEXT:    or a0, a2, a0
+; RV32-NEXT:    slli a2, t0, 31
+; RV32-NEXT:    srli a3, a6, 1
+; RV32-NEXT:    or a2, a3, a2
+; RV32-NEXT:    bnez a0, .LBB3_18
+; RV32-NEXT:  # %bb.17: # %_udiv-special-cases
+; RV32-NEXT:    srli a3, a2, 1
+; RV32-NEXT:    or a3, a2, a3
 ; RV32-NEXT:    srli a4, a3, 2
 ; RV32-NEXT:    or a3, a3, a4
 ; RV32-NEXT:    srli a4, a3, 4
@@ -1388,18 +388,12 @@ define i129 @udiv_i129(i129 %x, i129 %y) nounwind {
 ; RV32-NEXT:    add a3, a3, a4
 ; RV32-NEXT:    slli a4, a3, 16
 ; RV32-NEXT:    add a3, a3, a4
-; RV32-NEXT:    srli a4, a3, 24
-; RV32-NEXT:    j .LBB3_28
-; RV32-NEXT:  .LBB3_26:
-; RV32-NEXT:    snez a5, a2
-; RV32-NEXT:    sltu a4, a0, a4
-; RV32-NEXT:    addi a5, a5, -1
-; RV32-NEXT:    and s2, a5, a4
-; RV32-NEXT:    snez s3, s3
-; RV32-NEXT:    bnez s7, .LBB3_25
-; RV32-NEXT:  .LBB3_27: # %_udiv-special-cases
-; RV32-NEXT:    srli a4, a3, 1
-; RV32-NEXT:    or a3, a3, a4
+; RV32-NEXT:    srli a3, a3, 24
+; RV32-NEXT:    addi s5, a3, 32
+; RV32-NEXT:    j .LBB3_19
+; RV32-NEXT:  .LBB3_18:
+; RV32-NEXT:    srli a3, a0, 1
+; RV32-NEXT:    or a3, a0, a3
 ; RV32-NEXT:    srli a4, a3, 2
 ; RV32-NEXT:    or a3, a3, a4
 ; RV32-NEXT:    srli a4, a3, 4
@@ -1423,100 +417,215 @@ define i129 @udiv_i129(i129 %x, i129 %y) nounwind {
 ; RV32-NEXT:    add a3, a3, a4
 ; RV32-NEXT:    slli a4, a3, 16
 ; RV32-NEXT:    add a3, a3, a4
-; RV32-NEXT:    srli a3, a3, 24
-; RV32-NEXT:    addi a4, a3, 32
+; RV32-NEXT:    srli s5, a3, 24
+; RV32-NEXT:  .LBB3_19: # %_udiv-special-cases
+; RV32-NEXT:    srli a3, t1, 1
+; RV32-NEXT:    slli a4, a1, 31
+; RV32-NEXT:    slli a5, t1, 31
+; RV32-NEXT:    slli s4, a6, 31
+; RV32-NEXT:    srli s6, a7, 1
+; RV32-NEXT:    beqz s4, .LBB3_21
+; RV32-NEXT:  # %bb.20:
+; RV32-NEXT:    srli s3, s4, 1
+; RV32-NEXT:    or s3, s4, s3
+; RV32-NEXT:    srli s7, s3, 2
+; RV32-NEXT:    or s3, s3, s7
+; RV32-NEXT:    srli s7, s3, 4
+; RV32-NEXT:    or s3, s3, s7
+; RV32-NEXT:    srli s7, s3, 8
+; RV32-NEXT:    or s3, s3, s7
+; RV32-NEXT:    srli s7, s3, 16
+; RV32-NEXT:    or s3, s3, s7
+; RV32-NEXT:    not s3, s3
+; RV32-NEXT:    srli s7, s3, 1
+; RV32-NEXT:    and s7, s7, t5
+; RV32-NEXT:    sub s3, s3, s7
+; RV32-NEXT:    and s7, s3, t4
+; RV32-NEXT:    srli s3, s3, 2
+; RV32-NEXT:    and s3, s3, t4
+; RV32-NEXT:    add s3, s7, s3
+; RV32-NEXT:    srli s7, s3, 4
+; RV32-NEXT:    add s3, s3, s7
+; RV32-NEXT:    and s3, s3, t3
+; RV32-NEXT:    slli s7, s3, 8
+; RV32-NEXT:    add s3, s3, s7
+; RV32-NEXT:    slli s7, s3, 16
+; RV32-NEXT:    add s3, s3, s7
+; RV32-NEXT:    srli s3, s3, 24
+; RV32-NEXT:  .LBB3_21: # %_udiv-special-cases
+; RV32-NEXT:    or s7, a4, a3
+; RV32-NEXT:    or s6, s6, a5
+; RV32-NEXT:    bnez s4, .LBB3_23
+; RV32-NEXT:  # %bb.22: # %_udiv-special-cases
+; RV32-NEXT:    li s3, 128
+; RV32-NEXT:  .LBB3_23: # %_udiv-special-cases
+; RV32-NEXT:    or s4, s6, s7
+; RV32-NEXT:    addi a5, s5, 64
+; RV32-NEXT:    addi a4, s3, 128
+; RV32-NEXT:    or a0, a0, s7
+; RV32-NEXT:    or a3, a2, s6
+; RV32-NEXT:    or a3, a3, a0
+; RV32-NEXT:    sltu a0, a4, s3
+; RV32-NEXT:    bnez a3, .LBB3_26
+; RV32-NEXT:  # %bb.24: # %_udiv-special-cases
+; RV32-NEXT:    mv a2, a0
+; RV32-NEXT:    xori s3, s0, 1
+; RV32-NEXT:    beqz s7, .LBB3_27
+; RV32-NEXT:  .LBB3_25:
+; RV32-NEXT:    srli s5, s7, 1
+; RV32-NEXT:    or s5, s7, s5
+; RV32-NEXT:    srli s6, s5, 2
+; RV32-NEXT:    or s5, s5, s6
+; RV32-NEXT:    srli s6, s5, 4
+; RV32-NEXT:    or s5, s5, s6
+; RV32-NEXT:    srli s6, s5, 8
+; RV32-NEXT:    or s5, s5, s6
+; RV32-NEXT:    srli s6, s5, 16
+; RV32-NEXT:    or s5, s5, s6
+; RV32-NEXT:    not s5, s5
+; RV32-NEXT:    srli s6, s5, 1
+; RV32-NEXT:    and t5, s6, t5
+; RV32-NEXT:    sub t5, s5, t5
+; RV32-NEXT:    and s5, t5, t4
+; RV32-NEXT:    srli t5, t5, 2
+; RV32-NEXT:    and t4, t5, t4
+; RV32-NEXT:    add t4, s5, t4
+; RV32-NEXT:    srli t5, t4, 4
+; RV32-NEXT:    add t4, t4, t5
+; RV32-NEXT:    and t3, t4, t3
+; RV32-NEXT:    slli t4, t3, 8
+; RV32-NEXT:    add t3, t3, t4
+; RV32-NEXT:    slli t4, t3, 16
+; RV32-NEXT:    add t3, t3, t4
+; RV32-NEXT:    srli t3, t3, 24
+; RV32-NEXT:    j .LBB3_28
+; RV32-NEXT:  .LBB3_26:
+; RV32-NEXT:    snez a2, s4
+; RV32-NEXT:    sltu s3, a5, s5
+; RV32-NEXT:    addi a2, a2, -1
+; RV32-NEXT:    and a2, a2, s3
+; RV32-NEXT:    xori s3, s0, 1
+; RV32-NEXT:    bnez s7, .LBB3_25
+; RV32-NEXT:  .LBB3_27: # %_udiv-special-cases
+; RV32-NEXT:    srli s5, s6, 1
+; RV32-NEXT:    or s5, s6, s5
+; RV32-NEXT:    srli s6, s5, 2
+; RV32-NEXT:    or s5, s5, s6
+; RV32-NEXT:    srli s6, s5, 4
+; RV32-NEXT:    or s5, s5, s6
+; RV32-NEXT:    srli s6, s5, 8
+; RV32-NEXT:    or s5, s5, s6
+; RV32-NEXT:    srli s6, s5, 16
+; RV32-NEXT:    or s5, s5, s6
+; RV32-NEXT:    not s5, s5
+; RV32-NEXT:    srli s6, s5, 1
+; RV32-NEXT:    and t5, s6, t5
+; RV32-NEXT:    sub t5, s5, t5
+; RV32-NEXT:    and s5, t5, t4
+; RV32-NEXT:    srli t5, t5, 2
+; RV32-NEXT:    and t4, t5, t4
+; RV32-NEXT:    add t4, s5, t4
+; RV32-NEXT:    srli t5, t4, 4
+; RV32-NEXT:    add t4, t4, t5
+; RV32-NEXT:    and t3, t4, t3
+; RV32-NEXT:    slli t4, t3, 8
+; RV32-NEXT:    add t3, t3, t4
+; RV32-NEXT:    slli t4, t3, 16
+; RV32-NEXT:    add t3, t3, t4
+; RV32-NEXT:    srli t3, t3, 24
+; RV32-NEXT:    addi t3, t3, 32
 ; RV32-NEXT:  .LBB3_28: # %_udiv-special-cases
-; RV32-NEXT:    xori a3, s0, 1
-; RV32-NEXT:    addi s3, s3, -1
-; RV32-NEXT:    bnez a2, .LBB3_30
+; RV32-NEXT:    andi s10, a1, 1
+; RV32-NEXT:    andi s7, t2, 1
+; RV32-NEXT:    neg t2, s3
+; RV32-NEXT:    snez t4, s1
+; RV32-NEXT:    bnez s4, .LBB3_30
 ; RV32-NEXT:  # %bb.29: # %_udiv-special-cases
-; RV32-NEXT:    mv a4, a0
+; RV32-NEXT:    mv t3, a5
 ; RV32-NEXT:  .LBB3_30: # %_udiv-special-cases
-; RV32-NEXT:    andi s11, a1, 1
-; RV32-NEXT:    andi a0, t2, 1
 ; RV32-NEXT:    lw a1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw a2, 32(sp) # 4-byte Folded Reload
-; RV32-NEXT:    or s9, a1, a2
-; RV32-NEXT:    or a5, a7, a6
-; RV32-NEXT:    neg a1, a3
-; RV32-NEXT:    and t2, s3, s0
-; RV32-NEXT:    bnez s5, .LBB3_32
+; RV32-NEXT:    or a5, a1, s7
+; RV32-NEXT:    or a1, a6, s10
+; RV32-NEXT:    and t2, t2, s0
+; RV32-NEXT:    addi s0, t4, -1
+; RV32-NEXT:    bnez a3, .LBB3_32
 ; RV32-NEXT:  # %bb.31: # %_udiv-special-cases
-; RV32-NEXT:    mv a4, s6
+; RV32-NEXT:    mv t3, a4
 ; RV32-NEXT:  .LBB3_32: # %_udiv-special-cases
-; RV32-NEXT:    lw a2, 24(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw a3, 28(sp) # 4-byte Folded Reload
-; RV32-NEXT:    or s10, a2, a3
-; RV32-NEXT:    or a2, s9, a0
-; RV32-NEXT:    or a3, t0, t1
-; RV32-NEXT:    or t4, a5, s11
-; RV32-NEXT:    and a1, t2, a1
-; RV32-NEXT:    xori a5, s4, 1
-; RV32-NEXT:    snez t2, s5
-; RV32-NEXT:    neg a5, a5
-; RV32-NEXT:    addi t2, t2, -1
-; RV32-NEXT:    and t3, t2, s4
-; RV32-NEXT:    sltu t2, s1, a4
-; RV32-NEXT:    and t3, t3, a5
-; RV32-NEXT:    mv a5, t2
-; RV32-NEXT:    beq t6, s2, .LBB3_34
+; RV32-NEXT:    lw a4, 24(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw t4, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    or s9, a4, t4
+; RV32-NEXT:    lw a4, 32(sp) # 4-byte Folded Reload
+; RV32-NEXT:    or a5, a5, a4
+; RV32-NEXT:    or t4, t0, t1
+; RV32-NEXT:    or t5, a1, a7
+; RV32-NEXT:    and a1, s0, t2
+; RV32-NEXT:    xori a4, a0, 1
+; RV32-NEXT:    snez a3, a3
+; RV32-NEXT:    neg a4, a4
+; RV32-NEXT:    addi t2, a3, -1
+; RV32-NEXT:    and a0, a4, a0
+; RV32-NEXT:    sltu a3, s2, t3
+; RV32-NEXT:    and t2, t2, a0
+; RV32-NEXT:    mv a4, a3
+; RV32-NEXT:    beq t6, a2, .LBB3_34
 ; RV32-NEXT:  # %bb.33: # %_udiv-special-cases
-; RV32-NEXT:    sltu a5, t6, s2
+; RV32-NEXT:    sltu a4, t6, a2
 ; RV32-NEXT:  .LBB3_34: # %_udiv-special-cases
-; RV32-NEXT:    or a2, a2, s10
-; RV32-NEXT:    or a3, t4, a3
-; RV32-NEXT:    sltu t5, a1, t3
-; RV32-NEXT:    mv t4, a5
-; RV32-NEXT:    beq a1, t3, .LBB3_36
+; RV32-NEXT:    or a0, a5, s9
+; RV32-NEXT:    or t5, t5, t4
+; RV32-NEXT:    sltu t4, a1, t2
+; RV32-NEXT:    mv s0, a4
+; RV32-NEXT:    beq a1, t2, .LBB3_36
 ; RV32-NEXT:  # %bb.35: # %_udiv-special-cases
-; RV32-NEXT:    mv t4, t5
+; RV32-NEXT:    mv s0, t4
 ; RV32-NEXT:  .LBB3_36: # %_udiv-special-cases
-; RV32-NEXT:    seqz a2, a2
-; RV32-NEXT:    seqz a3, a3
-; RV32-NEXT:    andi t4, t4, 1
-; RV32-NEXT:    sub t6, t6, s2
-; RV32-NEXT:    sub a1, a1, t3
-; RV32-NEXT:    sub t2, t6, t2
-; RV32-NEXT:    sltu t3, a1, a5
-; RV32-NEXT:    add t3, t5, t3
-; RV32-NEXT:    neg t3, t3
-; RV32-NEXT:    sub t5, a1, a5
-; RV32-NEXT:    or a1, t5, t3
-; RV32-NEXT:    sub t6, s1, a4
+; RV32-NEXT:    seqz a5, a0
+; RV32-NEXT:    seqz t5, t5
+; RV32-NEXT:    andi a0, s0, 1
+; RV32-NEXT:    sub a2, t6, a2
+; RV32-NEXT:    sub a1, a1, t2
+; RV32-NEXT:    neg t4, t4
+; RV32-NEXT:    sub t2, a2, a3
+; RV32-NEXT:    sltu a2, a1, a4
+; RV32-NEXT:    sub t4, t4, a2
+; RV32-NEXT:    sub a4, a1, a4
+; RV32-NEXT:    or a1, a4, t4
+; RV32-NEXT:    sub a3, s2, t3
 ; RV32-NEXT:    beqz a1, .LBB3_38
 ; RV32-NEXT:  # %bb.37: # %_udiv-special-cases
 ; RV32-NEXT:    snez a1, a1
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    bnez t4, .LBB3_39
+; RV32-NEXT:    or a2, a5, t5
+; RV32-NEXT:    bnez a0, .LBB3_39
 ; RV32-NEXT:    j .LBB3_40
 ; RV32-NEXT:  .LBB3_38:
 ; RV32-NEXT:    snez a1, t2
-; RV32-NEXT:    sltiu a4, t6, 129
-; RV32-NEXT:    xori a4, a4, 1
-; RV32-NEXT:    or a1, a4, a1
-; RV32-NEXT:    or a2, a2, a3
-; RV32-NEXT:    beqz t4, .LBB3_40
+; RV32-NEXT:    sltiu a2, a3, 129
+; RV32-NEXT:    xori a2, a2, 1
+; RV32-NEXT:    or a1, a2, a1
+; RV32-NEXT:    or a2, a5, t5
+; RV32-NEXT:    beqz a0, .LBB3_40
 ; RV32-NEXT:  .LBB3_39: # %_udiv-special-cases
-; RV32-NEXT:    mv a1, t4
+; RV32-NEXT:    mv a1, a0
 ; RV32-NEXT:  .LBB3_40: # %_udiv-special-cases
-; RV32-NEXT:    or a5, a2, a1
-; RV32-NEXT:    addi a4, a5, -1
-; RV32-NEXT:    and s0, s11, a4
-; RV32-NEXT:    and a3, a4, t1
-; RV32-NEXT:    and a2, a4, a6
-; RV32-NEXT:    and a1, a4, t0
-; RV32-NEXT:    and a4, a4, a7
-; RV32-NEXT:    bnez a5, .LBB3_57
+; RV32-NEXT:    or t6, a2, a1
+; RV32-NEXT:    addi a1, t6, -1
+; RV32-NEXT:    and a2, s10, a1
+; RV32-NEXT:    and a5, a1, t1
+; RV32-NEXT:    and t3, a1, a7
+; RV32-NEXT:    and t5, a1, t0
+; RV32-NEXT:    and a1, a1, a6
+; RV32-NEXT:    bnez t6, .LBB3_57
 ; RV32-NEXT:  # %bb.41: # %_udiv-special-cases
-; RV32-NEXT:    or a5, t2, t3
-; RV32-NEXT:    xori s1, t6, 128
-; RV32-NEXT:    or s1, s1, t4
-; RV32-NEXT:    or s1, s1, t5
-; RV32-NEXT:    or a5, s1, a5
-; RV32-NEXT:    beqz a5, .LBB3_57
+; RV32-NEXT:    or t6, t2, t4
+; RV32-NEXT:    xori s0, a3, 128
+; RV32-NEXT:    or s0, s0, a0
+; RV32-NEXT:    or s0, s0, a4
+; RV32-NEXT:    or t6, s0, t6
+; RV32-NEXT:    beqz t6, .LBB3_57
 ; RV32-NEXT:  # %bb.42: # %udiv-bb1
 ; RV32-NEXT:    sw s8, 8(sp) # 4-byte Folded Spill
-; RV32-NEXT:    addi a1, t6, 1
+; RV32-NEXT:    addi a1, a3, 1
 ; RV32-NEXT:    sw zero, 136(sp)
 ; RV32-NEXT:    sw zero, 140(sp)
 ; RV32-NEXT:    sw zero, 144(sp)
@@ -1525,60 +634,60 @@ define i129 @udiv_i129(i129 %x, i129 %y) nounwind {
 ; RV32-NEXT:    sw zero, 124(sp)
 ; RV32-NEXT:    sw zero, 128(sp)
 ; RV32-NEXT:    sw zero, 132(sp)
-; RV32-NEXT:    sw a7, 152(sp)
+; RV32-NEXT:    sw a6, 152(sp)
 ; RV32-NEXT:    sw t0, 156(sp)
-; RV32-NEXT:    sw a6, 160(sp)
+; RV32-NEXT:    sw a7, 160(sp)
 ; RV32-NEXT:    sw t1, 164(sp)
-; RV32-NEXT:    sw s11, 168(sp)
-; RV32-NEXT:    li a2, 128
-; RV32-NEXT:    addi a3, sp, 152
-; RV32-NEXT:    neg ra, t6
-; RV32-NEXT:    seqz a4, a1
-; RV32-NEXT:    sub a2, a2, t6
-; RV32-NEXT:    add t2, t2, a4
-; RV32-NEXT:    andi a4, a2, 31
-; RV32-NEXT:    srli a2, a2, 3
-; RV32-NEXT:    or a5, a1, t2
-; RV32-NEXT:    xori s8, a4, 31
-; RV32-NEXT:    andi a2, a2, 28
-; RV32-NEXT:    seqz t6, a5
-; RV32-NEXT:    sub a2, a3, a2
-; RV32-NEXT:    add t6, t5, t6
-; RV32-NEXT:    lw a3, 0(a2)
-; RV32-NEXT:    lw a5, 4(a2)
-; RV32-NEXT:    lw s1, 8(a2)
-; RV32-NEXT:    lw a4, 12(a2)
-; RV32-NEXT:    sltu t5, t6, t5
-; RV32-NEXT:    or s0, a1, t6
-; RV32-NEXT:    add t3, t3, t5
-; RV32-NEXT:    or t5, t2, t3
-; RV32-NEXT:    or t5, s0, t5
-; RV32-NEXT:    srli s0, s1, 1
-; RV32-NEXT:    seqz s2, t5
-; RV32-NEXT:    add t4, t4, s2
-; RV32-NEXT:    sll s2, a4, ra
-; RV32-NEXT:    srl s0, s0, s8
-; RV32-NEXT:    or s0, s2, s0
-; RV32-NEXT:    srli s2, a5, 1
-; RV32-NEXT:    sll s1, s1, ra
-; RV32-NEXT:    srl s2, s2, s8
+; RV32-NEXT:    sw s10, 168(sp)
+; RV32-NEXT:    li a5, 128
+; RV32-NEXT:    addi t3, sp, 152
+; RV32-NEXT:    neg a2, a3
+; RV32-NEXT:    seqz t5, a1
+; RV32-NEXT:    sub a5, a5, a3
+; RV32-NEXT:    add t2, t2, t5
+; RV32-NEXT:    andi a3, a5, 31
+; RV32-NEXT:    srli a5, a5, 3
+; RV32-NEXT:    or t5, a1, t2
+; RV32-NEXT:    xori a3, a3, 31
+; RV32-NEXT:    andi a5, a5, 28
+; RV32-NEXT:    seqz t6, t5
+; RV32-NEXT:    sub a5, t3, a5
+; RV32-NEXT:    add t6, a4, t6
+; RV32-NEXT:    lw t3, 0(a5)
+; RV32-NEXT:    lw s0, 4(a5)
+; RV32-NEXT:    lw s1, 8(a5)
+; RV32-NEXT:    lw s11, 12(a5)
+; RV32-NEXT:    sltu a4, t6, a4
+; RV32-NEXT:    or t5, a1, t6
+; RV32-NEXT:    add t4, t4, a4
+; RV32-NEXT:    or a4, t2, t4
+; RV32-NEXT:    or a4, t5, a4
+; RV32-NEXT:    srli t5, s1, 1
+; RV32-NEXT:    seqz s2, a4
+; RV32-NEXT:    add a0, a0, s2
+; RV32-NEXT:    sll s2, s11, a2
+; RV32-NEXT:    srl t5, t5, a3
+; RV32-NEXT:    or t5, s2, t5
+; RV32-NEXT:    srli s2, s0, 1
+; RV32-NEXT:    sll s1, s1, a2
+; RV32-NEXT:    srl s2, s2, a3
 ; RV32-NEXT:    or s2, s1, s2
-; RV32-NEXT:    srli s1, a3, 1
-; RV32-NEXT:    sll a5, a5, ra
-; RV32-NEXT:    srl s3, s1, s8
-; RV32-NEXT:    andi s1, t4, 1
-; RV32-NEXT:    or s3, a5, s3
-; RV32-NEXT:    or a5, t5, s1
-; RV32-NEXT:    sll t5, a3, ra
-; RV32-NEXT:    beqz a5, .LBB3_55
+; RV32-NEXT:    srli s1, t3, 1
+; RV32-NEXT:    sll s0, s0, a2
+; RV32-NEXT:    srl s1, s1, a3
+; RV32-NEXT:    andi s3, a0, 1
+; RV32-NEXT:    or s1, s0, s1
+; RV32-NEXT:    or a0, a4, s3
+; RV32-NEXT:    sll t3, t3, a2
+; RV32-NEXT:    beqz a0, .LBB3_55
 ; RV32-NEXT:  # %bb.43: # %udiv-preheader
 ; RV32-NEXT:    sw zero, 52(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    sw zero, 48(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    sw zero, 44(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    li s6, 0
-; RV32-NEXT:    li s7, 0
-; RV32-NEXT:    srli a4, a4, 1
-; RV32-NEXT:    lw a2, 16(a2)
+; RV32-NEXT:    li s8, 0
+; RV32-NEXT:    srli a4, s11, 1
+; RV32-NEXT:    lw a0, 16(a5)
 ; RV32-NEXT:    sw zero, 104(sp)
 ; RV32-NEXT:    sw zero, 108(sp)
 ; RV32-NEXT:    sw zero, 112(sp)
@@ -1587,217 +696,232 @@ define i129 @udiv_i129(i129 %x, i129 %y) nounwind {
 ; RV32-NEXT:    sw zero, 92(sp)
 ; RV32-NEXT:    sw zero, 96(sp)
 ; RV32-NEXT:    sw zero, 100(sp)
-; RV32-NEXT:    sw s11, 72(sp)
+; RV32-NEXT:    sw s10, 72(sp)
 ; RV32-NEXT:    sw zero, 76(sp)
 ; RV32-NEXT:    sw zero, 80(sp)
 ; RV32-NEXT:    sw zero, 84(sp)
-; RV32-NEXT:    sw a7, 56(sp)
+; RV32-NEXT:    sw a6, 56(sp)
 ; RV32-NEXT:    sw t0, 60(sp)
-; RV32-NEXT:    sw a6, 64(sp)
+; RV32-NEXT:    sw a7, 64(sp)
 ; RV32-NEXT:    sw t1, 68(sp)
-; RV32-NEXT:    srli a3, a1, 3
-; RV32-NEXT:    addi a5, sp, 56
-; RV32-NEXT:    andi a6, a1, 31
-; RV32-NEXT:    or a7, s9, s10
-; RV32-NEXT:    srl a4, a4, s8
-; RV32-NEXT:    andi a3, a3, 28
-; RV32-NEXT:    xori a6, a6, 31
-; RV32-NEXT:    snez a7, a7
-; RV32-NEXT:    add a3, a5, a3
-; RV32-NEXT:    add a0, a0, a7
-; RV32-NEXT:    lw a5, 16(a3)
-; RV32-NEXT:    lw a7, 0(a3)
-; RV32-NEXT:    lw t0, 4(a3)
-; RV32-NEXT:    lw t1, 8(a3)
-; RV32-NEXT:    lw a3, 12(a3)
-; RV32-NEXT:    sll a2, a2, ra
-; RV32-NEXT:    or a2, a2, a4
+; RV32-NEXT:    srli a5, a1, 3
+; RV32-NEXT:    addi a6, sp, 56
+; RV32-NEXT:    andi a7, a1, 31
+; RV32-NEXT:    lw s10, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s11, 32(sp) # 4-byte Folded Reload
+; RV32-NEXT:    or t0, s10, s11
+; RV32-NEXT:    srl a3, a4, a3
+; RV32-NEXT:    andi a5, a5, 28
+; RV32-NEXT:    xori a4, a7, 31
+; RV32-NEXT:    or a7, t0, s9
+; RV32-NEXT:    add a5, a6, a5
+; RV32-NEXT:    snez a6, a7
+; RV32-NEXT:    lw a7, 0(a5)
+; RV32-NEXT:    lw t0, 4(a5)
+; RV32-NEXT:    lw t1, 8(a5)
+; RV32-NEXT:    lw s0, 12(a5)
+; RV32-NEXT:    lw a5, 16(a5)
+; RV32-NEXT:    add a6, s7, a6
+; RV32-NEXT:    sll a0, a0, a2
+; RV32-NEXT:    or a3, a0, a3
 ; RV32-NEXT:    slli a5, a5, 1
-; RV32-NEXT:    slli a4, a3, 1
-; RV32-NEXT:    slli t4, t1, 1
+; RV32-NEXT:    slli a0, s0, 1
+; RV32-NEXT:    slli a2, t1, 1
 ; RV32-NEXT:    slli s4, t0, 1
-; RV32-NEXT:    sll a5, a5, a6
-; RV32-NEXT:    sll a4, a4, a6
-; RV32-NEXT:    sll t4, t4, a6
-; RV32-NEXT:    sll a6, s4, a6
-; RV32-NEXT:    srl a3, a3, a1
-; RV32-NEXT:    or s9, a3, a5
-; RV32-NEXT:    lw s4, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT:    seqz a3, s4
-; RV32-NEXT:    srl a5, t1, a1
-; RV32-NEXT:    or ra, a5, a4
-; RV32-NEXT:    lw a5, 24(sp) # 4-byte Folded Reload
-; RV32-NEXT:    or a4, s4, a5
-; RV32-NEXT:    sub a5, a5, a3
-; RV32-NEXT:    seqz a3, a4
-; RV32-NEXT:    srl a4, t0, a1
-; RV32-NEXT:    or s11, a4, t4
-; RV32-NEXT:    lw a4, 32(sp) # 4-byte Folded Reload
-; RV32-NEXT:    sub t0, a4, a3
-; RV32-NEXT:    sw t0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sltu a3, a4, a3
-; RV32-NEXT:    addi a0, a0, 1
-; RV32-NEXT:    lw a4, 28(sp) # 4-byte Folded Reload
-; RV32-NEXT:    sub s5, a4, a3
-; RV32-NEXT:    andi a0, a0, 1
-; RV32-NEXT:    sw a0, 16(sp) # 4-byte Folded Spill
-; RV32-NEXT:    andi a0, a2, 1
+; RV32-NEXT:    sll a5, a5, a4
+; RV32-NEXT:    sll a0, a0, a4
+; RV32-NEXT:    sll a2, a2, a4
+; RV32-NEXT:    sll s4, s4, a4
+; RV32-NEXT:    srl a4, s0, a1
+; RV32-NEXT:    or ra, a4, a5
+; RV32-NEXT:    seqz a5, s10
+; RV32-NEXT:    srl a4, t1, a1
+; RV32-NEXT:    or a4, a4, a0
+; RV32-NEXT:    lw t1, 24(sp) # 4-byte Folded Reload
+; RV32-NEXT:    or a0, s10, t1
+; RV32-NEXT:    sub s5, t1, a5
+; RV32-NEXT:    seqz a5, a0
+; RV32-NEXT:    srl a0, t0, a1
+; RV32-NEXT:    or a0, a0, a2
+; RV32-NEXT:    sub a2, s11, a5
+; RV32-NEXT:    sw a2, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sltu a2, s11, a5
+; RV32-NEXT:    lw a5, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    sub t0, a5, a2
+; RV32-NEXT:    addi a6, a6, 1
+; RV32-NEXT:    andi a2, a6, 1
+; RV32-NEXT:    sw a2, 16(sp) # 4-byte Folded Spill
+; RV32-NEXT:    andi a3, a3, 1
 ; RV32-NEXT:    srl a2, a7, a1
-; RV32-NEXT:    or s8, a2, a6
-; RV32-NEXT:    addi s4, s4, -1
-; RV32-NEXT:    sw s4, 12(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw a5, 20(sp) # 4-byte Folded Spill
+; RV32-NEXT:    or a5, a2, s4
+; RV32-NEXT:    addi s10, s10, -1
+; RV32-NEXT:    sw s10, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw t0, 20(sp) # 4-byte Folded Spill
+; RV32-NEXT:    li s11, 0
+; RV32-NEXT:    li s10, 0
 ; RV32-NEXT:    j .LBB3_45
 ; RV32-NEXT:  .LBB3_44: # %udiv-do-while
 ; RV32-NEXT:    # in Loop: Header=BB3_45 Depth=1
-; RV32-NEXT:    lw a2, 32(sp) # 4-byte Folded Reload
-; RV32-NEXT:    and t1, a0, a2
-; RV32-NEXT:    xor a2, a6, a3
-; RV32-NEXT:    xor a7, ra, t1
-; RV32-NEXT:    or a2, a7, a2
-; RV32-NEXT:    srli a2, s2, 31
-; RV32-NEXT:    sltu a7, ra, t1
-; RV32-NEXT:    sub t1, ra, t1
-; RV32-NEXT:    slli ra, s0, 1
-; RV32-NEXT:    sub a3, a6, a3
-; RV32-NEXT:    srli a6, s3, 31
+; RV32-NEXT:    lw s0, 32(sp) # 4-byte Folded Reload
+; RV32-NEXT:    and s0, a3, s0
+; RV32-NEXT:    xor a2, t1, a7
+; RV32-NEXT:    xor s9, a4, s0
+; RV32-NEXT:    or a2, s9, a2
+; RV32-NEXT:    li s9, 0
+; RV32-NEXT:    li a2, 0
+; RV32-NEXT:    sltu s4, a4, s0
+; RV32-NEXT:    sub s0, a4, s0
+; RV32-NEXT:    sub a7, t1, a7
+; RV32-NEXT:    srli a4, s2, 31
+; RV32-NEXT:    sub a0, a0, t0
+; RV32-NEXT:    slli t0, t5, 1
+; RV32-NEXT:    or t0, t0, a4
+; RV32-NEXT:    srli a4, s1, 31
 ; RV32-NEXT:    slli s2, s2, 1
-; RV32-NEXT:    sub a5, s11, a5
-; RV32-NEXT:    srli s11, t5, 31
-; RV32-NEXT:    slli s3, s3, 1
-; RV32-NEXT:    srli s0, s0, 31
-; RV32-NEXT:    slli t5, t5, 1
-; RV32-NEXT:    or a2, ra, a2
-; RV32-NEXT:    or t0, a1, t6
-; RV32-NEXT:    or a6, s2, a6
-; RV32-NEXT:    or s2, t2, t3
-; RV32-NEXT:    or s3, s3, s11
-; RV32-NEXT:    or t4, a1, t2
-; RV32-NEXT:    lw s4, 52(sp) # 4-byte Folded Reload
-; RV32-NEXT:    or t5, s4, t5
-; RV32-NEXT:    seqz s4, a1
+; RV32-NEXT:    or t1, s2, a4
+; RV32-NEXT:    srli a4, t3, 31
+; RV32-NEXT:    slli s1, s1, 1
+; RV32-NEXT:    or s1, s1, a4
+; RV32-NEXT:    slli t3, t3, 1
+; RV32-NEXT:    lw a4, 52(sp) # 4-byte Folded Reload
+; RV32-NEXT:    or t3, a4, t3
+; RV32-NEXT:    srli a4, t5, 31
+; RV32-NEXT:    or s8, s8, a4
+; RV32-NEXT:    sub a4, s0, ra
+; RV32-NEXT:    sltu s0, s0, ra
+; RV32-NEXT:    or t5, a1, t6
+; RV32-NEXT:    sub a7, a7, s4
+; RV32-NEXT:    or s2, t2, t4
+; RV32-NEXT:    sub a0, a0, a6
+; RV32-NEXT:    or a6, a1, t2
+; RV32-NEXT:    or s4, t5, s2
+; RV32-NEXT:    seqz t5, a1
 ; RV32-NEXT:    addi a1, a1, -1
-; RV32-NEXT:    or s7, s7, s0
-; RV32-NEXT:    andi a0, a0, 1
-; RV32-NEXT:    sw a0, 52(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sub ra, t1, s9
-; RV32-NEXT:    sltu t1, t1, s9
-; RV32-NEXT:    sub a3, a3, a7
-; RV32-NEXT:    sub s11, a5, a4
-; RV32-NEXT:    or a4, t0, s2
-; RV32-NEXT:    seqz a5, t4
-; RV32-NEXT:    sub t2, t2, s4
-; RV32-NEXT:    lw a0, 48(sp) # 4-byte Folded Reload
-; RV32-NEXT:    or s3, a0, s3
-; RV32-NEXT:    lw a0, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT:    or s2, a0, a6
-; RV32-NEXT:    or s0, s6, a2
-; RV32-NEXT:    andi a0, s7, 1
-; RV32-NEXT:    sub s9, a3, t1
-; RV32-NEXT:    snez a2, a4
-; RV32-NEXT:    sltu a3, t6, a5
-; RV32-NEXT:    sub t6, t6, a5
-; RV32-NEXT:    add a2, s1, a2
-; RV32-NEXT:    sub t3, t3, a3
-; RV32-NEXT:    or a3, a1, t6
-; RV32-NEXT:    addi a2, a2, 1
-; RV32-NEXT:    or a4, t2, t3
-; RV32-NEXT:    andi s1, a2, 1
-; RV32-NEXT:    or a3, a3, a4
-; RV32-NEXT:    or a3, a3, s1
-; RV32-NEXT:    sub s8, s10, s8
+; RV32-NEXT:    andi a3, a3, 1
+; RV32-NEXT:    sw a3, 52(sp) # 4-byte Folded Spill
+; RV32-NEXT:    seqz a6, a6
+; RV32-NEXT:    sub t2, t2, t5
+; RV32-NEXT:    lw a3, 48(sp) # 4-byte Folded Reload
+; RV32-NEXT:    or s1, a3, s1
+; RV32-NEXT:    lw a3, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT:    or s2, a3, t1
+; RV32-NEXT:    or t5, s6, t0
+; RV32-NEXT:    andi a3, s8, 1
+; RV32-NEXT:    sub ra, a7, s0
+; RV32-NEXT:    snez a7, s4
+; RV32-NEXT:    sltu t0, t6, a6
+; RV32-NEXT:    sub t6, t6, a6
+; RV32-NEXT:    add a7, s3, a7
+; RV32-NEXT:    sub t4, t4, t0
+; RV32-NEXT:    or a6, a1, t6
+; RV32-NEXT:    addi a7, a7, 1
+; RV32-NEXT:    or t0, t2, t4
+; RV32-NEXT:    andi s3, a7, 1
+; RV32-NEXT:    or a6, a6, t0
+; RV32-NEXT:    or a6, a6, s3
+; RV32-NEXT:    sub a5, a5, s7
 ; RV32-NEXT:    sw zero, 48(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    sw zero, 44(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    li s6, 0
-; RV32-NEXT:    li s7, 0
-; RV32-NEXT:    lw a5, 20(sp) # 4-byte Folded Reload
-; RV32-NEXT:    beqz a3, .LBB3_56
+; RV32-NEXT:    li s8, 0
+; RV32-NEXT:    lw t0, 20(sp) # 4-byte Folded Reload
+; RV32-NEXT:    beqz a6, .LBB3_56
 ; RV32-NEXT:  .LBB3_45: # %udiv-do-while
 ; RV32-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32-NEXT:    srli a2, ra, 31
-; RV32-NEXT:    slli a3, s9, 1
-; RV32-NEXT:    or a6, a3, a2
-; RV32-NEXT:    srli a3, s11, 31
-; RV32-NEXT:    slli ra, ra, 1
-; RV32-NEXT:    or ra, ra, a3
-; RV32-NEXT:    beq s5, a6, .LBB3_47
+; RV32-NEXT:    srli a2, a4, 31
+; RV32-NEXT:    slli a6, ra, 1
+; RV32-NEXT:    or t1, a6, a2
+; RV32-NEXT:    srli a2, a0, 31
+; RV32-NEXT:    slli a4, a4, 1
+; RV32-NEXT:    or a4, a4, a2
+; RV32-NEXT:    beq t0, t1, .LBB3_47
 ; RV32-NEXT:  # %bb.46: # %udiv-do-while
 ; RV32-NEXT:    # in Loop: Header=BB3_45 Depth=1
-; RV32-NEXT:    sltu a3, s5, a6
+; RV32-NEXT:    sltu a2, t0, t1
 ; RV32-NEXT:    j .LBB3_48
 ; RV32-NEXT:  .LBB3_47: # in Loop: Header=BB3_45 Depth=1
 ; RV32-NEXT:    lw a2, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT:    sltu a3, a2, ra
+; RV32-NEXT:    sltu a2, a2, a4
 ; RV32-NEXT:  .LBB3_48: # %udiv-do-while
 ; RV32-NEXT:    # in Loop: Header=BB3_45 Depth=1
-; RV32-NEXT:    srli a4, s8, 31
-; RV32-NEXT:    slli s11, s11, 1
-; RV32-NEXT:    slli s8, s8, 1
-; RV32-NEXT:    or s11, s11, a4
-; RV32-NEXT:    andi a0, a0, 1
-; RV32-NEXT:    or s10, s8, a0
-; RV32-NEXT:    beq a5, s11, .LBB3_50
+; RV32-NEXT:    srli a6, a5, 31
+; RV32-NEXT:    slli a0, a0, 1
+; RV32-NEXT:    slli a5, a5, 1
+; RV32-NEXT:    or a0, a0, a6
+; RV32-NEXT:    andi a3, a3, 1
+; RV32-NEXT:    or a5, a5, a3
+; RV32-NEXT:    beq s5, a0, .LBB3_50
 ; RV32-NEXT:  # %bb.49: # %udiv-do-while
 ; RV32-NEXT:    # in Loop: Header=BB3_45 Depth=1
-; RV32-NEXT:    sltu a0, a5, s11
+; RV32-NEXT:    sltu a3, s5, a0
 ; RV32-NEXT:    j .LBB3_51
 ; RV32-NEXT:  .LBB3_50: # in Loop: Header=BB3_45 Depth=1
-; RV32-NEXT:    lw a0, 12(sp) # 4-byte Folded Reload
-; RV32-NEXT:    sltu a0, a0, s10
+; RV32-NEXT:    lw a3, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    sltu a3, a3, a5
 ; RV32-NEXT:  .LBB3_51: # %udiv-do-while
 ; RV32-NEXT:    # in Loop: Header=BB3_45 Depth=1
-; RV32-NEXT:    lw a2, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT:    xor a4, a2, ra
-; RV32-NEXT:    xor a5, s5, a6
-; RV32-NEXT:    or a4, a4, a5
-; RV32-NEXT:    beqz a4, .LBB3_53
+; RV32-NEXT:    lw a6, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT:    xor a6, a6, a4
+; RV32-NEXT:    xor a7, t0, t1
+; RV32-NEXT:    or a6, a6, a7
+; RV32-NEXT:    beqz a6, .LBB3_53
 ; RV32-NEXT:  # %bb.52: # %udiv-do-while
 ; RV32-NEXT:    # in Loop: Header=BB3_45 Depth=1
-; RV32-NEXT:    mv a0, a3
+; RV32-NEXT:    mv a3, a2
 ; RV32-NEXT:  .LBB3_53: # %udiv-do-while
 ; RV32-NEXT:    # in Loop: Header=BB3_45 Depth=1
-; RV32-NEXT:    srli a3, s9, 31
-; RV32-NEXT:    lw a2, 16(sp) # 4-byte Folded Reload
-; RV32-NEXT:    sub a3, a2, a3
-; RV32-NEXT:    sub a3, a3, a0
-; RV32-NEXT:    slli a0, a3, 31
-; RV32-NEXT:    srai a0, a0, 31
-; RV32-NEXT:    lw a3, 28(sp) # 4-byte Folded Reload
-; RV32-NEXT:    and a3, a0, a3
+; RV32-NEXT:    srli a2, ra, 31
+; RV32-NEXT:    lw a6, 16(sp) # 4-byte Folded Reload
+; RV32-NEXT:    sub a2, a6, a2
+; RV32-NEXT:    sub a2, a2, a3
+; RV32-NEXT:    slli a2, a2, 31
+; RV32-NEXT:    srai a3, a2, 31
+; RV32-NEXT:    lw a2, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    and a7, a3, a2
 ; RV32-NEXT:    lw a2, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT:    and s8, a0, a2
-; RV32-NEXT:    lw a5, 24(sp) # 4-byte Folded Reload
-; RV32-NEXT:    and a5, a0, a5
-; RV32-NEXT:    sltu a4, s10, s8
-; RV32-NEXT:    mv s9, a4
-; RV32-NEXT:    beq s11, a5, .LBB3_44
+; RV32-NEXT:    and s7, a3, a2
+; RV32-NEXT:    lw a2, 24(sp) # 4-byte Folded Reload
+; RV32-NEXT:    and t0, a3, a2
+; RV32-NEXT:    sltu a6, a5, s7
+; RV32-NEXT:    mv ra, a6
+; RV32-NEXT:    beq a0, t0, .LBB3_44
 ; RV32-NEXT:  # %bb.54: # %udiv-do-while
 ; RV32-NEXT:    # in Loop: Header=BB3_45 Depth=1
-; RV32-NEXT:    sltu s9, s11, a5
+; RV32-NEXT:    sltu ra, a0, t0
 ; RV32-NEXT:    j .LBB3_44
 ; RV32-NEXT:  .LBB3_55:
 ; RV32-NEXT:    sw zero, 52(sp) # 4-byte Folded Spill
+; RV32-NEXT:    li s11, 0
+; RV32-NEXT:    li s9, 0
+; RV32-NEXT:    li s10, 0
+; RV32-NEXT:    li a2, 0
 ; RV32-NEXT:  .LBB3_56: # %udiv-loop-exit
-; RV32-NEXT:    srli a0, t5, 31
-; RV32-NEXT:    slli a1, s3, 1
-; RV32-NEXT:    srli a2, s3, 31
-; RV32-NEXT:    or a1, a1, a0
-; RV32-NEXT:    slli a0, s2, 1
-; RV32-NEXT:    srli a3, s2, 31
-; RV32-NEXT:    or a2, a0, a2
-; RV32-NEXT:    slli a0, s0, 1
-; RV32-NEXT:    srli s0, s0, 31
-; RV32-NEXT:    slli t5, t5, 1
-; RV32-NEXT:    or a3, a0, a3
-; RV32-NEXT:    lw a0, 52(sp) # 4-byte Folded Reload
-; RV32-NEXT:    or a4, a0, t5
+; RV32-NEXT:    srli a0, s2, 31
+; RV32-NEXT:    slli a1, t5, 1
+; RV32-NEXT:    or a0, a1, a0
+; RV32-NEXT:    srli a1, s1, 31
+; RV32-NEXT:    slli s2, s2, 1
+; RV32-NEXT:    or a3, s2, a1
+; RV32-NEXT:    srli a4, t3, 31
+; RV32-NEXT:    slli s1, s1, 1
+; RV32-NEXT:    srli a5, t5, 31
+; RV32-NEXT:    slli t3, t3, 1
+; RV32-NEXT:    lw a1, 52(sp) # 4-byte Folded Reload
+; RV32-NEXT:    or a1, a1, t3
+; RV32-NEXT:    or a4, s11, a4
+; RV32-NEXT:    or a2, a2, a5
+; RV32-NEXT:    or t5, a4, s1
+; RV32-NEXT:    or t3, s9, a3
+; RV32-NEXT:    or a5, s10, a0
+; RV32-NEXT:    andi a2, a2, 1
 ; RV32-NEXT:    lw s8, 8(sp) # 4-byte Folded Reload
 ; RV32-NEXT:  .LBB3_57: # %udiv-end
-; RV32-NEXT:    sw a4, 0(s8)
-; RV32-NEXT:    sw a1, 4(s8)
-; RV32-NEXT:    sw a2, 8(s8)
-; RV32-NEXT:    sw a3, 12(s8)
-; RV32-NEXT:    sb s0, 16(s8)
+; RV32-NEXT:    sw a1, 0(s8)
+; RV32-NEXT:    sw t5, 4(s8)
+; RV32-NEXT:    sw t3, 8(s8)
+; RV32-NEXT:    sw a5, 12(s8)
+; RV32-NEXT:    andi a2, a2, 1
+; RV32-NEXT:    sb a2, 16(s8)
 ; RV32-NEXT:    lw ra, 236(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 232(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s1, 228(sp) # 4-byte Folded Reload
@@ -1818,29 +942,29 @@ define i129 @udiv_i129(i129 %x, i129 %y) nounwind {
 ; RV64:       # %bb.0: # %_udiv-special-cases
 ; RV64-NEXT:    ld a3, 0(a2)
 ; RV64-NEXT:    ld a4, 8(a2)
-; RV64-NEXT:    ld t1, 16(a2)
+; RV64-NEXT:    ld t4, 16(a2)
 ; RV64-NEXT:    lui a2, 349525
 ; RV64-NEXT:    lui a5, 209715
 ; RV64-NEXT:    lui a6, 61681
-; RV64-NEXT:    addi t0, a2, 1365
-; RV64-NEXT:    addi a7, a5, 819
-; RV64-NEXT:    addi a6, a6, -241
-; RV64-NEXT:    slli a2, t0, 32
-; RV64-NEXT:    slli a5, a7, 32
-; RV64-NEXT:    slli t2, a6, 32
-; RV64-NEXT:    add t0, t0, a2
-; RV64-NEXT:    add a7, a7, a5
-; RV64-NEXT:    add a6, a6, t2
+; RV64-NEXT:    addi t1, a2, 1365
+; RV64-NEXT:    addi t0, a5, 819
+; RV64-NEXT:    addi a7, a6, -241
+; RV64-NEXT:    slli a2, t1, 32
+; RV64-NEXT:    slli a5, t0, 32
+; RV64-NEXT:    slli a6, a7, 32
+; RV64-NEXT:    add t1, t1, a2
+; RV64-NEXT:    add t0, t0, a5
+; RV64-NEXT:    add a7, a7, a6
 ; RV64-NEXT:    srli a2, a4, 1
-; RV64-NEXT:    slli a5, t1, 63
+; RV64-NEXT:    slli a5, t4, 63
 ; RV64-NEXT:    slli t2, a4, 63
-; RV64-NEXT:    or t3, a5, a2
+; RV64-NEXT:    or a6, a5, a2
 ; RV64-NEXT:    srli a2, a3, 1
-; RV64-NEXT:    or t4, a2, t2
-; RV64-NEXT:    bnez t3, .LBB3_2
+; RV64-NEXT:    or t5, a2, t2
+; RV64-NEXT:    bnez a6, .LBB3_2
 ; RV64-NEXT:  # %bb.1: # %_udiv-special-cases
-; RV64-NEXT:    srli a2, t4, 1
-; RV64-NEXT:    or a2, t4, a2
+; RV64-NEXT:    srli a2, t5, 1
+; RV64-NEXT:    or a2, t5, a2
 ; RV64-NEXT:    srli a5, a2, 2
 ; RV64-NEXT:    or a2, a2, a5
 ; RV64-NEXT:    srli a5, a2, 4
@@ -1853,15 +977,15 @@ define i129 @udiv_i129(i129 %x, i129 %y) nounwind {
 ; RV64-NEXT:    or a2, a2, a5
 ; RV64-NEXT:    not a2, a2
 ; RV64-NEXT:    srli a5, a2, 1
-; RV64-NEXT:    and a5, a5, t0
+; RV64-NEXT:    and a5, a5, t1
 ; RV64-NEXT:    sub a2, a2, a5
-; RV64-NEXT:    and a5, a2, a7
+; RV64-NEXT:    and a5, a2, t0
 ; RV64-NEXT:    srli a2, a2, 2
-; RV64-NEXT:    and a2, a2, a7
+; RV64-NEXT:    and a2, a2, t0
 ; RV64-NEXT:    add a2, a5, a2
 ; RV64-NEXT:    srli a5, a2, 4
 ; RV64-NEXT:    add a2, a2, a5
-; RV64-NEXT:    and a2, a2, a6
+; RV64-NEXT:    and a2, a2, a7
 ; RV64-NEXT:    slli a5, a2, 8
 ; RV64-NEXT:    add a2, a2, a5
 ; RV64-NEXT:    slli a5, a2, 16
@@ -1872,8 +996,8 @@ define i129 @udiv_i129(i129 %x, i129 %y) nounwind {
 ; RV64-NEXT:    addi t2, a2, 64
 ; RV64-NEXT:    j .LBB3_3
 ; RV64-NEXT:  .LBB3_2:
-; RV64-NEXT:    srli a2, t3, 1
-; RV64-NEXT:    or a2, t3, a2
+; RV64-NEXT:    srli a2, a6, 1
+; RV64-NEXT:    or a2, a6, a2
 ; RV64-NEXT:    srli a5, a2, 2
 ; RV64-NEXT:    or a2, a2, a5
 ; RV64-NEXT:    srli a5, a2, 4
@@ -1886,15 +1010,15 @@ define i129 @udiv_i129(i129 %x, i129 %y) nounwind {
 ; RV64-NEXT:    or a2, a2, a5
 ; RV64-NEXT:    not a2, a2
 ; RV64-NEXT:    srli a5, a2, 1
-; RV64-NEXT:    and a5, a5, t0
+; RV64-NEXT:    and a5, a5, t1
 ; RV64-NEXT:    sub a2, a2, a5
-; RV64-NEXT:    and a5, a2, a7
+; RV64-NEXT:    and a5, a2, t0
 ; RV64-NEXT:    srli a2, a2, 2
-; RV64-NEXT:    and a2, a2, a7
+; RV64-NEXT:    and a2, a2, t0
 ; RV64-NEXT:    add a2, a5, a2
 ; RV64-NEXT:    srli a5, a2, 4
 ; RV64-NEXT:    add a2, a2, a5
-; RV64-NEXT:    and a2, a2, a6
+; RV64-NEXT:    and a2, a2, a7
 ; RV64-NEXT:    slli a5, a2, 8
 ; RV64-NEXT:    add a2, a2, a5
 ; RV64-NEXT:    slli a5, a2, 16
@@ -1903,18 +1027,11 @@ define i129 @udiv_i129(i129 %x, i129 %y) nounwind {
 ; RV64-NEXT:    add a2, a2, a5
 ; RV64-NEXT:    srli t2, a2, 56
 ; RV64-NEXT:  .LBB3_3: # %_udiv-special-cases
-; RV64-NEXT:    addi sp, sp, -176
-; RV64-NEXT:    sd s0, 168(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd s1, 160(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd s2, 152(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd s3, 144(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd s4, 136(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd s5, 128(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    slli a2, a3, 63
-; RV64-NEXT:    li t5, 128
+; RV64-NEXT:    li t3, 128
 ; RV64-NEXT:    bnez a2, .LBB3_5
 ; RV64-NEXT:  # %bb.4: # %_udiv-special-cases
-; RV64-NEXT:    li s0, 128
+; RV64-NEXT:    li t6, 128
 ; RV64-NEXT:    j .LBB3_6
 ; RV64-NEXT:  .LBB3_5:
 ; RV64-NEXT:    srli a5, a2, 1
@@ -1931,337 +1048,356 @@ define i129 @udiv_i129(i129 %x, i129 %y) nounwind {
 ; RV64-NEXT:    or a2, a2, a5
 ; RV64-NEXT:    not a2, a2
 ; RV64-NEXT:    srli a5, a2, 1
-; RV64-NEXT:    and a5, a5, t0
+; RV64-NEXT:    and a5, a5, t1
 ; RV64-NEXT:    sub a2, a2, a5
-; RV64-NEXT:    and a5, a2, a7
+; RV64-NEXT:    and a5, a2, t0
 ; RV64-NEXT:    srli a2, a2, 2
-; RV64-NEXT:    and a2, a2, a7
+; RV64-NEXT:    and a2, a2, t0
 ; RV64-NEXT:    add a2, a5, a2
 ; RV64-NEXT:    srli a5, a2, 4
 ; RV64-NEXT:    add a2, a2, a5
-; RV64-NEXT:    and a2, a2, a6
+; RV64-NEXT:    and a2, a2, a7
 ; RV64-NEXT:    slli a5, a2, 8
 ; RV64-NEXT:    add a2, a2, a5
 ; RV64-NEXT:    slli a5, a2, 16
 ; RV64-NEXT:    add a2, a2, a5
 ; RV64-NEXT:    slli a5, a2, 32
 ; RV64-NEXT:    add a2, a2, a5
-; RV64-NEXT:    srli s0, a2, 56
+; RV64-NEXT:    srli t6, a2, 56
 ; RV64-NEXT:  .LBB3_6: # %_udiv-special-cases
+; RV64-NEXT:    addi sp, sp, -192
+; RV64-NEXT:    sd s0, 184(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s1, 176(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s2, 168(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s3, 160(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s4, 152(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s5, 144(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s6, 136(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    ld a5, 0(a1)
 ; RV64-NEXT:    ld a2, 8(a1)
 ; RV64-NEXT:    ld s2, 16(a1)
-; RV64-NEXT:    or a1, t4, t3
-; RV64-NEXT:    addi s1, s0, 128
-; RV64-NEXT:    bnez a1, .LBB3_8
+; RV64-NEXT:    or t5, t5, a6
+; RV64-NEXT:    addi s0, t6, 128
+; RV64-NEXT:    bnez t5, .LBB3_8
 ; RV64-NEXT:  # %bb.7: # %_udiv-special-cases
-; RV64-NEXT:    mv t2, s1
+; RV64-NEXT:    mv t2, s0
 ; RV64-NEXT:  .LBB3_8: # %_udiv-special-cases
-; RV64-NEXT:    snez s3, a1
-; RV64-NEXT:    srli a1, a2, 1
-; RV64-NEXT:    slli t3, s2, 63
-; RV64-NEXT:    slli t4, a2, 63
-; RV64-NEXT:    or a1, t3, a1
-; RV64-NEXT:    srli t3, a5, 1
-; RV64-NEXT:    or t6, t3, t4
-; RV64-NEXT:    bnez a1, .LBB3_10
+; RV64-NEXT:    andi a6, s2, 1
+; RV64-NEXT:    andi a1, t4, 1
+; RV64-NEXT:    snez s1, t5
+; RV64-NEXT:    srli t4, a2, 1
+; RV64-NEXT:    slli s2, s2, 63
+; RV64-NEXT:    slli t5, a2, 63
+; RV64-NEXT:    or t4, s2, t4
+; RV64-NEXT:    srli s2, a5, 1
+; RV64-NEXT:    or t5, s2, t5
+; RV64-NEXT:    bnez t4, .LBB3_10
 ; RV64-NEXT:  # %bb.9: # %_udiv-special-cases
-; RV64-NEXT:    srli t3, t6, 1
-; RV64-NEXT:    or t3, t6, t3
-; RV64-NEXT:    srli t4, t3, 2
-; RV64-NEXT:    or t3, t3, t4
-; RV64-NEXT:    srli t4, t3, 4
-; RV64-NEXT:    or t3, t3, t4
-; RV64-NEXT:    srli t4, t3, 8
-; RV64-NEXT:    or t3, t3, t4
-; RV64-NEXT:    srli t4, t3, 16
-; RV64-NEXT:    or t3, t3, t4
-; RV64-NEXT:    srli t4, t3, 32
-; RV64-NEXT:    or t3, t3, t4
-; RV64-NEXT:    not t3, t3
-; RV64-NEXT:    srli t4, t3, 1
-; RV64-NEXT:    and t4, t4, t0
-; RV64-NEXT:    sub t3, t3, t4
-; RV64-NEXT:    and t4, t3, a7
-; RV64-NEXT:    srli t3, t3, 2
-; RV64-NEXT:    and t3, t3, a7
-; RV64-NEXT:    add t3, t4, t3
-; RV64-NEXT:    srli t4, t3, 4
-; RV64-NEXT:    add t3, t3, t4
-; RV64-NEXT:    and t3, t3, a6
-; RV64-NEXT:    slli t4, t3, 8
-; RV64-NEXT:    add t3, t3, t4
-; RV64-NEXT:    slli t4, t3, 16
-; RV64-NEXT:    add t3, t3, t4
-; RV64-NEXT:    slli t4, t3, 32
-; RV64-NEXT:    add t3, t3, t4
-; RV64-NEXT:    srli t3, t3, 56
-; RV64-NEXT:    addi s4, t3, 64
+; RV64-NEXT:    srli s2, t5, 1
+; RV64-NEXT:    or s2, t5, s2
+; RV64-NEXT:    srli s3, s2, 2
+; RV64-NEXT:    or s2, s2, s3
+; RV64-NEXT:    srli s3, s2, 4
+; RV64-NEXT:    or s2, s2, s3
+; RV64-NEXT:    srli s3, s2, 8
+; RV64-NEXT:    or s2, s2, s3
+; RV64-NEXT:    srli s3, s2, 16
+; RV64-NEXT:    or s2, s2, s3
+; RV64-NEXT:    srli s3, s2, 32
+; RV64-NEXT:    or s2, s2, s3
+; RV64-NEXT:    not s2, s2
+; RV64-NEXT:    srli s3, s2, 1
+; RV64-NEXT:    and s3, s3, t1
+; RV64-NEXT:    sub s2, s2, s3
+; RV64-NEXT:    and s3, s2, t0
+; RV64-NEXT:    srli s2, s2, 2
+; RV64-NEXT:    and s2, s2, t0
+; RV64-NEXT:    add s2, s3, s2
+; RV64-NEXT:    srli s3, s2, 4
+; RV64-NEXT:    add s2, s2, s3
+; RV64-NEXT:    and s2, s2, a7
+; RV64-NEXT:    slli s3, s2, 8
+; RV64-NEXT:    add s2, s2, s3
+; RV64-NEXT:    slli s3, s2, 16
+; RV64-NEXT:    add s2, s2, s3
+; RV64-NEXT:    slli s3, s2, 32
+; RV64-NEXT:    add s2, s2, s3
+; RV64-NEXT:    srli s2, s2, 56
+; RV64-NEXT:    addi s2, s2, 64
 ; RV64-NEXT:    j .LBB3_11
 ; RV64-NEXT:  .LBB3_10:
-; RV64-NEXT:    srli t3, a1, 1
-; RV64-NEXT:    or t3, a1, t3
-; RV64-NEXT:    srli t4, t3, 2
-; RV64-NEXT:    or t3, t3, t4
-; RV64-NEXT:    srli t4, t3, 4
-; RV64-NEXT:    or t3, t3, t4
-; RV64-NEXT:    srli t4, t3, 8
-; RV64-NEXT:    or t3, t3, t4
-; RV64-NEXT:    srli t4, t3, 16
-; RV64-NEXT:    or t3, t3, t4
-; RV64-NEXT:    srli t4, t3, 32
-; RV64-NEXT:    or t3, t3, t4
-; RV64-NEXT:    not t3, t3
-; RV64-NEXT:    srli t4, t3, 1
-; RV64-NEXT:    and t4, t4, t0
-; RV64-NEXT:    sub t3, t3, t4
-; RV64-NEXT:    and t4, t3, a7
-; RV64-NEXT:    srli t3, t3, 2
-; RV64-NEXT:    and t3, t3, a7
-; RV64-NEXT:    add t3, t4, t3
-; RV64-NEXT:    srli t4, t3, 4
-; RV64-NEXT:    add t3, t3, t4
-; RV64-NEXT:    and t3, t3, a6
-; RV64-NEXT:    slli t4, t3, 8
-; RV64-NEXT:    add t3, t3, t4
-; RV64-NEXT:    slli t4, t3, 16
-; RV64-NEXT:    add t3, t3, t4
-; RV64-NEXT:    slli t4, t3, 32
-; RV64-NEXT:    add t3, t3, t4
-; RV64-NEXT:    srli s4, t3, 56
+; RV64-NEXT:    srli s2, t4, 1
+; RV64-NEXT:    or s2, t4, s2
+; RV64-NEXT:    srli s3, s2, 2
+; RV64-NEXT:    or s2, s2, s3
+; RV64-NEXT:    srli s3, s2, 4
+; RV64-NEXT:    or s2, s2, s3
+; RV64-NEXT:    srli s3, s2, 8
+; RV64-NEXT:    or s2, s2, s3
+; RV64-NEXT:    srli s3, s2, 16
+; RV64-NEXT:    or s2, s2, s3
+; RV64-NEXT:    srli s3, s2, 32
+; RV64-NEXT:    or s2, s2, s3
+; RV64-NEXT:    not s2, s2
+; RV64-NEXT:    srli s3, s2, 1
+; RV64-NEXT:    and s3, s3, t1
+; RV64-NEXT:    sub s2, s2, s3
+; RV64-NEXT:    and s3, s2, t0
+; RV64-NEXT:    srli s2, s2, 2
+; RV64-NEXT:    and s2, s2, t0
+; RV64-NEXT:    add s2, s3, s2
+; RV64-NEXT:    srli s3, s2, 4
+; RV64-NEXT:    add s2, s2, s3
+; RV64-NEXT:    and s2, s2, a7
+; RV64-NEXT:    slli s3, s2, 8
+; RV64-NEXT:    add s2, s2, s3
+; RV64-NEXT:    slli s3, s2, 16
+; RV64-NEXT:    add s2, s2, s3
+; RV64-NEXT:    slli s3, s2, 32
+; RV64-NEXT:    add s2, s2, s3
+; RV64-NEXT:    srli s2, s2, 56
 ; RV64-NEXT:  .LBB3_11: # %_udiv-special-cases
-; RV64-NEXT:    andi t4, s2, 1
-; RV64-NEXT:    andi t1, t1, 1
-; RV64-NEXT:    or t3, a3, a4
-; RV64-NEXT:    or s2, a5, a2
-; RV64-NEXT:    sltu s0, s1, s0
-; RV64-NEXT:    slli s1, a5, 63
-; RV64-NEXT:    addi s3, s3, -1
-; RV64-NEXT:    beqz s1, .LBB3_13
+; RV64-NEXT:    or s3, a3, a1
+; RV64-NEXT:    or s4, a5, a6
+; RV64-NEXT:    sltu t6, s0, t6
+; RV64-NEXT:    slli s0, a5, 63
+; RV64-NEXT:    addi s1, s1, -1
+; RV64-NEXT:    beqz s0, .LBB3_13
 ; RV64-NEXT:  # %bb.12:
-; RV64-NEXT:    srli t5, s1, 1
-; RV64-NEXT:    or t5, s1, t5
-; RV64-NEXT:    srli s1, t5, 2
-; RV64-NEXT:    or t5, t5, s1
-; RV64-NEXT:    srli s1, t5, 4
-; RV64-NEXT:    or t5, t5, s1
-; RV64-NEXT:    srli s1, t5, 8
-; RV64-NEXT:    or t5, t5, s1
-; RV64-NEXT:    srli s1, t5, 16
-; RV64-NEXT:    or t5, t5, s1
-; RV64-NEXT:    srli s1, t5, 32
-; RV64-NEXT:    or t5, t5, s1
-; RV64-NEXT:    not t5, t5
-; RV64-NEXT:    srli s1, t5, 1
-; RV64-NEXT:    and t0, s1, t0
-; RV64-NEXT:    sub t0, t5, t0
-; RV64-NEXT:    and t5, t0, a7
-; RV64-NEXT:    srli t0, t0, 2
+; RV64-NEXT:    srli t3, s0, 1
+; RV64-NEXT:    or t3, s0, t3
+; RV64-NEXT:    srli s0, t3, 2
+; RV64-NEXT:    or t3, t3, s0
+; RV64-NEXT:    srli s0, t3, 4
+; RV64-NEXT:    or t3, t3, s0
+; RV64-NEXT:    srli s0, t3, 8
+; RV64-NEXT:    or t3, t3, s0
+; RV64-NEXT:    srli s0, t3, 16
+; RV64-NEXT:    or t3, t3, s0
+; RV64-NEXT:    srli s0, t3, 32
+; RV64-NEXT:    or t3, t3, s0
+; RV64-NEXT:    not t3, t3
+; RV64-NEXT:    srli s0, t3, 1
+; RV64-NEXT:    and t1, s0, t1
+; RV64-NEXT:    sub t1, t3, t1
+; RV64-NEXT:    and t3, t1, t0
+; RV64-NEXT:    srli t1, t1, 2
+; RV64-NEXT:    and t0, t1, t0
+; RV64-NEXT:    add t0, t3, t0
+; RV64-NEXT:    srli t1, t0, 4
+; RV64-NEXT:    add t0, t0, t1
 ; RV64-NEXT:    and a7, t0, a7
-; RV64-NEXT:    add a7, t5, a7
-; RV64-NEXT:    srli t0, a7, 4
+; RV64-NEXT:    slli t0, a7, 8
+; RV64-NEXT:    add a7, a7, t0
+; RV64-NEXT:    slli t0, a7, 16
 ; RV64-NEXT:    add a7, a7, t0
-; RV64-NEXT:    and a6, a7, a6
-; RV64-NEXT:    slli a7, a6, 8
-; RV64-NEXT:    add a6, a6, a7
-; RV64-NEXT:    slli a7, a6, 16
-; RV64-NEXT:    add a6, a6, a7
-; RV64-NEXT:    slli a7, a6, 32
-; RV64-NEXT:    add a6, a6, a7
-; RV64-NEXT:    srli t5, a6, 56
+; RV64-NEXT:    slli t0, a7, 32
+; RV64-NEXT:    add a7, a7, t0
+; RV64-NEXT:    srli t3, a7, 56
 ; RV64-NEXT:  .LBB3_13: # %_udiv-special-cases
-; RV64-NEXT:    or t0, t3, t1
-; RV64-NEXT:    or a6, s2, t4
-; RV64-NEXT:    and a7, s3, s0
-; RV64-NEXT:    or t6, t6, a1
-; RV64-NEXT:    addi s0, t5, 128
-; RV64-NEXT:    bnez t6, .LBB3_15
+; RV64-NEXT:    or t1, s3, a4
+; RV64-NEXT:    or t0, s4, a2
+; RV64-NEXT:    and a7, s1, t6
+; RV64-NEXT:    or t4, t5, t4
+; RV64-NEXT:    addi t5, t3, 128
+; RV64-NEXT:    bnez t4, .LBB3_15
 ; RV64-NEXT:  # %bb.14: # %_udiv-special-cases
-; RV64-NEXT:    mv s4, s0
+; RV64-NEXT:    mv s2, t5
 ; RV64-NEXT:  .LBB3_15: # %_udiv-special-cases
-; RV64-NEXT:    seqz a1, t0
-; RV64-NEXT:    sltu t0, s0, t5
-; RV64-NEXT:    snez t5, t6
-; RV64-NEXT:    addi t5, t5, -1
-; RV64-NEXT:    and t0, t5, t0
-; RV64-NEXT:    sltu t5, t2, s4
-; RV64-NEXT:    seqz a6, a6
+; RV64-NEXT:    seqz t1, t1
+; RV64-NEXT:    sltu t3, t5, t3
+; RV64-NEXT:    snez t4, t4
+; RV64-NEXT:    addi t4, t4, -1
+; RV64-NEXT:    and t4, t4, t3
+; RV64-NEXT:    sltu t5, t2, s2
+; RV64-NEXT:    seqz t0, t0
 ; RV64-NEXT:    mv t6, t5
-; RV64-NEXT:    beq a7, t0, .LBB3_17
+; RV64-NEXT:    beq a7, t4, .LBB3_17
 ; RV64-NEXT:  # %bb.16: # %_udiv-special-cases
-; RV64-NEXT:    sltu t6, a7, t0
+; RV64-NEXT:    sltu t6, a7, t4
 ; RV64-NEXT:  .LBB3_17: # %_udiv-special-cases
-; RV64-NEXT:    or a1, a1, a6
-; RV64-NEXT:    andi a6, t6, 1
-; RV64-NEXT:    sub a7, a7, t0
-; RV64-NEXT:    sub t0, a7, t5
-; RV64-NEXT:    sub a7, t2, s4
-; RV64-NEXT:    beqz a6, .LBB3_19
+; RV64-NEXT:    or t3, t1, t0
+; RV64-NEXT:    andi t0, t6, 1
+; RV64-NEXT:    sub a7, a7, t4
+; RV64-NEXT:    sub t4, a7, t5
+; RV64-NEXT:    sub t1, t2, s2
+; RV64-NEXT:    beqz t0, .LBB3_19
 ; RV64-NEXT:  # %bb.18: # %_udiv-special-cases
-; RV64-NEXT:    mv t2, a6
+; RV64-NEXT:    mv a7, t0
 ; RV64-NEXT:    j .LBB3_20
 ; RV64-NEXT:  .LBB3_19:
-; RV64-NEXT:    sltiu t2, a7, 129
-; RV64-NEXT:    xori t2, t2, 1
-; RV64-NEXT:    snez t5, t0
-; RV64-NEXT:    or t2, t2, t5
+; RV64-NEXT:    sltiu a7, t1, 129
+; RV64-NEXT:    xori a7, a7, 1
+; RV64-NEXT:    snez t2, t4
+; RV64-NEXT:    or a7, a7, t2
 ; RV64-NEXT:  .LBB3_20: # %_udiv-special-cases
-; RV64-NEXT:    or t6, a1, t2
-; RV64-NEXT:    addi t5, t6, -1
-; RV64-NEXT:    and a1, t4, t5
-; RV64-NEXT:    and t2, t5, a2
-; RV64-NEXT:    and t5, t5, a5
-; RV64-NEXT:    bnez t6, .LBB3_29
+; RV64-NEXT:    or t5, t3, a7
+; RV64-NEXT:    addi a7, t5, -1
+; RV64-NEXT:    and t3, a6, a7
+; RV64-NEXT:    and t2, a7, a2
+; RV64-NEXT:    and a7, a7, a5
+; RV64-NEXT:    bnez t5, .LBB3_30
 ; RV64-NEXT:  # %bb.21: # %_udiv-special-cases
-; RV64-NEXT:    xori t6, a7, 128
-; RV64-NEXT:    or t6, t6, a6
-; RV64-NEXT:    or t6, t6, t0
-; RV64-NEXT:    beqz t6, .LBB3_29
+; RV64-NEXT:    xori t5, t1, 128
+; RV64-NEXT:    or t5, t5, t0
+; RV64-NEXT:    or t5, t5, t4
+; RV64-NEXT:    beqz t5, .LBB3_30
 ; RV64-NEXT:  # %bb.22: # %udiv-bb1
-; RV64-NEXT:    addi a1, a7, 1
+; RV64-NEXT:    addi a7, t1, 1
 ; RV64-NEXT:    sd zero, 64(sp)
 ; RV64-NEXT:    sd zero, 72(sp)
 ; RV64-NEXT:    sd zero, 80(sp)
 ; RV64-NEXT:    sd zero, 88(sp)
 ; RV64-NEXT:    sd a5, 96(sp)
 ; RV64-NEXT:    sd a2, 104(sp)
-; RV64-NEXT:    sd t4, 112(sp)
+; RV64-NEXT:    sd a6, 112(sp)
 ; RV64-NEXT:    li t2, 128
-; RV64-NEXT:    addi t5, sp, 96
-; RV64-NEXT:    neg s1, a7
-; RV64-NEXT:    seqz t6, a1
-; RV64-NEXT:    sub a7, t2, a7
-; RV64-NEXT:    add t0, t0, t6
-; RV64-NEXT:    andi t2, a7, 63
-; RV64-NEXT:    srli a7, a7, 3
-; RV64-NEXT:    or t6, a1, t0
-; RV64-NEXT:    xori s2, t2, 63
-; RV64-NEXT:    andi a7, a7, 24
-; RV64-NEXT:    seqz t2, t6
-; RV64-NEXT:    sub s3, t5, a7
-; RV64-NEXT:    add a6, a6, t2
-; RV64-NEXT:    ld a7, 0(s3)
-; RV64-NEXT:    ld s4, 8(s3)
-; RV64-NEXT:    andi a6, a6, 1
-; RV64-NEXT:    or t6, t6, a6
-; RV64-NEXT:    srli t2, a7, 1
-; RV64-NEXT:    sll t5, s4, s1
-; RV64-NEXT:    srl t2, t2, s2
-; RV64-NEXT:    or t5, t5, t2
-; RV64-NEXT:    sll t2, a7, s1
-; RV64-NEXT:    li a7, 0
-; RV64-NEXT:    beqz t6, .LBB3_28
+; RV64-NEXT:    addi t3, sp, 96
+; RV64-NEXT:    neg s0, t1
+; RV64-NEXT:    seqz t5, a7
+; RV64-NEXT:    sub t1, t2, t1
+; RV64-NEXT:    add t4, t4, t5
+; RV64-NEXT:    andi t2, t1, 63
+; RV64-NEXT:    srli t1, t1, 3
+; RV64-NEXT:    or t5, a7, t4
+; RV64-NEXT:    xori s1, t2, 63
+; RV64-NEXT:    andi t1, t1, 24
+; RV64-NEXT:    seqz t2, t5
+; RV64-NEXT:    sub s2, t3, t1
+; RV64-NEXT:    add t0, t0, t2
+; RV64-NEXT:    ld t3, 0(s2)
+; RV64-NEXT:    ld s3, 8(s2)
+; RV64-NEXT:    andi t1, t0, 1
+; RV64-NEXT:    or t5, t5, t1
+; RV64-NEXT:    srli t0, t3, 1
+; RV64-NEXT:    sll t2, s3, s0
+; RV64-NEXT:    srl t0, t0, s1
+; RV64-NEXT:    or t2, t2, t0
+; RV64-NEXT:    sll t0, t3, s0
+; RV64-NEXT:    li t3, 0
+; RV64-NEXT:    beqz t5, .LBB3_28
 ; RV64-NEXT:  # %bb.23: # %udiv-preheader
+; RV64-NEXT:    li t5, 0
 ; RV64-NEXT:    li t6, 0
-; RV64-NEXT:    li s0, 0
-; RV64-NEXT:    srli s4, s4, 1
-; RV64-NEXT:    ld s3, 16(s3)
+; RV64-NEXT:    srli s3, s3, 1
+; RV64-NEXT:    ld s2, 16(s2)
 ; RV64-NEXT:    sd zero, 32(sp)
 ; RV64-NEXT:    sd zero, 40(sp)
 ; RV64-NEXT:    sd zero, 48(sp)
 ; RV64-NEXT:    sd zero, 56(sp)
 ; RV64-NEXT:    sd a5, 0(sp)
 ; RV64-NEXT:    sd a2, 8(sp)
-; RV64-NEXT:    sd t4, 16(sp)
+; RV64-NEXT:    sd a6, 16(sp)
 ; RV64-NEXT:    sd zero, 24(sp)
-; RV64-NEXT:    srli a2, a1, 3
-; RV64-NEXT:    srl a5, s4, s2
-; RV64-NEXT:    mv t4, sp
-; RV64-NEXT:    snez t3, t3
+; RV64-NEXT:    srli a2, a7, 3
+; RV64-NEXT:    mv a5, sp
+; RV64-NEXT:    srl a6, s3, s1
+; RV64-NEXT:    or s1, a3, a4
 ; RV64-NEXT:    andi a2, a2, 24
-; RV64-NEXT:    add t1, t1, t3
-; RV64-NEXT:    add a2, t4, a2
-; RV64-NEXT:    ld t4, 0(a2)
-; RV64-NEXT:    ld t3, 8(a2)
+; RV64-NEXT:    snez s1, s1
+; RV64-NEXT:    add a2, a5, a2
+; RV64-NEXT:    add a5, a1, s1
+; RV64-NEXT:    ld s4, 0(a2)
+; RV64-NEXT:    ld a1, 8(a2)
 ; RV64-NEXT:    ld a2, 16(a2)
-; RV64-NEXT:    sll s1, s3, s1
-; RV64-NEXT:    andi s2, a1, 63
-; RV64-NEXT:    xori s2, s2, 63
-; RV64-NEXT:    or s1, s1, a5
+; RV64-NEXT:    sll s0, s2, s0
+; RV64-NEXT:    andi s1, a7, 63
+; RV64-NEXT:    xori s1, s1, 63
+; RV64-NEXT:    or a6, s0, a6
 ; RV64-NEXT:    slli a2, a2, 1
-; RV64-NEXT:    slli a5, t3, 1
-; RV64-NEXT:    sll a2, a2, s2
-; RV64-NEXT:    sll s2, a5, s2
-; RV64-NEXT:    srl a5, t3, a1
-; RV64-NEXT:    or t3, a5, a2
-; RV64-NEXT:    seqz a2, a3
-; RV64-NEXT:    sub a2, a4, a2
-; RV64-NEXT:    addi a5, t1, 1
-; RV64-NEXT:    andi a5, a5, 1
-; RV64-NEXT:    andi s1, s1, 1
-; RV64-NEXT:    srl t1, t4, a1
-; RV64-NEXT:    or t4, t1, s2
-; RV64-NEXT:    addi t1, a3, -1
+; RV64-NEXT:    slli s0, a1, 1
+; RV64-NEXT:    sll a2, a2, s1
+; RV64-NEXT:    sll s0, s0, s1
+; RV64-NEXT:    srl s1, a1, a7
+; RV64-NEXT:    or s1, s1, a2
+; RV64-NEXT:    seqz a1, a3
+; RV64-NEXT:    sub a1, a4, a1
+; RV64-NEXT:    addi a2, a5, 1
+; RV64-NEXT:    andi a2, a2, 1
+; RV64-NEXT:    andi s3, a6, 1
+; RV64-NEXT:    srl a5, s4, a7
+; RV64-NEXT:    or s2, a5, s0
+; RV64-NEXT:    addi a5, a3, -1
 ; RV64-NEXT:    j .LBB3_26
 ; RV64-NEXT:  .LBB3_24: # %udiv-do-while
 ; RV64-NEXT:    # in Loop: Header=BB3_26 Depth=1
-; RV64-NEXT:    sltu s1, a2, s2
+; RV64-NEXT:    sltu a6, a1, s4
 ; RV64-NEXT:  .LBB3_25: # %udiv-do-while
 ; RV64-NEXT:    # in Loop: Header=BB3_26 Depth=1
-; RV64-NEXT:    srli t3, t3, 63
-; RV64-NEXT:    sub t3, a5, t3
-; RV64-NEXT:    sub t3, t3, s1
-; RV64-NEXT:    slli t3, t3, 63
-; RV64-NEXT:    srai t3, t3, 63
-; RV64-NEXT:    and s1, t3, a4
-; RV64-NEXT:    srli s3, t2, 63
-; RV64-NEXT:    slli s4, t5, 1
-; RV64-NEXT:    srli t5, t5, 63
-; RV64-NEXT:    slli t2, t2, 1
-; RV64-NEXT:    sub s2, s2, s1
-; RV64-NEXT:    and s5, t3, a3
-; RV64-NEXT:    or s1, s4, s3
-; RV64-NEXT:    seqz s3, a1
-; RV64-NEXT:    or t2, a7, t2
-; RV64-NEXT:    or s4, a1, t0
-; RV64-NEXT:    addi a1, a1, -1
-; RV64-NEXT:    or s0, s0, t5
-; RV64-NEXT:    andi a7, t3, 1
-; RV64-NEXT:    sltu t3, t4, s5
-; RV64-NEXT:    sub t0, t0, s3
-; RV64-NEXT:    snez s3, s4
-; RV64-NEXT:    or t5, t6, s1
-; RV64-NEXT:    andi s1, s0, 1
-; RV64-NEXT:    sub t3, s2, t3
-; RV64-NEXT:    add a6, a6, s3
-; RV64-NEXT:    addi a6, a6, 1
-; RV64-NEXT:    andi a6, a6, 1
-; RV64-NEXT:    or t6, a1, t0
-; RV64-NEXT:    or s2, t6, a6
-; RV64-NEXT:    sub t4, t4, s5
-; RV64-NEXT:    li t6, 0
+; RV64-NEXT:    srli s1, s1, 63
+; RV64-NEXT:    sub s0, a2, s1
+; RV64-NEXT:    sub a6, s0, a6
+; RV64-NEXT:    slli a6, a6, 63
+; RV64-NEXT:    srai s1, a6, 63
+; RV64-NEXT:    and s3, s1, a4
+; RV64-NEXT:    li a6, 0
 ; RV64-NEXT:    li s0, 0
-; RV64-NEXT:    beqz s2, .LBB3_28
+; RV64-NEXT:    srli s5, t0, 63
+; RV64-NEXT:    sub s4, s4, s3
+; RV64-NEXT:    slli s3, t2, 1
+; RV64-NEXT:    or s3, s3, s5
+; RV64-NEXT:    srli t2, t2, 63
+; RV64-NEXT:    slli t0, t0, 1
+; RV64-NEXT:    or t0, t3, t0
+; RV64-NEXT:    seqz t3, a7
+; RV64-NEXT:    or t6, t6, t2
+; RV64-NEXT:    or s5, a7, t4
+; RV64-NEXT:    sub t4, t4, t3
+; RV64-NEXT:    and s6, s1, a3
+; RV64-NEXT:    addi a7, a7, -1
+; RV64-NEXT:    andi t3, s1, 1
+; RV64-NEXT:    or t2, t5, s3
+; RV64-NEXT:    sltu t5, s2, s6
+; RV64-NEXT:    snez s5, s5
+; RV64-NEXT:    andi s3, t6, 1
+; RV64-NEXT:    sub s1, s4, t5
+; RV64-NEXT:    add t1, t1, s5
+; RV64-NEXT:    addi t1, t1, 1
+; RV64-NEXT:    andi t1, t1, 1
+; RV64-NEXT:    or t5, a7, t4
+; RV64-NEXT:    or s4, t5, t1
+; RV64-NEXT:    sub s2, s2, s6
+; RV64-NEXT:    li t5, 0
+; RV64-NEXT:    li t6, 0
+; RV64-NEXT:    beqz s4, .LBB3_29
 ; RV64-NEXT:  .LBB3_26: # %udiv-do-while
 ; RV64-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV64-NEXT:    srli s2, t4, 63
-; RV64-NEXT:    slli s3, t3, 1
-; RV64-NEXT:    slli t4, t4, 1
-; RV64-NEXT:    or s2, s3, s2
-; RV64-NEXT:    andi s1, s1, 1
-; RV64-NEXT:    or t4, t4, s1
-; RV64-NEXT:    bne a2, s2, .LBB3_24
+; RV64-NEXT:    srli a6, s2, 63
+; RV64-NEXT:    slli s0, s1, 1
+; RV64-NEXT:    slli s2, s2, 1
+; RV64-NEXT:    or s4, s0, a6
+; RV64-NEXT:    andi a6, s3, 1
+; RV64-NEXT:    or s2, s2, a6
+; RV64-NEXT:    bne a1, s4, .LBB3_24
 ; RV64-NEXT:  # %bb.27: # in Loop: Header=BB3_26 Depth=1
-; RV64-NEXT:    sltu s1, t1, t4
+; RV64-NEXT:    sltu a6, a5, s2
 ; RV64-NEXT:    j .LBB3_25
-; RV64-NEXT:  .LBB3_28: # %udiv-loop-exit
-; RV64-NEXT:    srli a2, t2, 63
-; RV64-NEXT:    slli a3, t5, 1
-; RV64-NEXT:    srli a1, t5, 63
-; RV64-NEXT:    slli a4, t2, 1
-; RV64-NEXT:    or t2, a3, a2
-; RV64-NEXT:    or t5, a7, a4
-; RV64-NEXT:  .LBB3_29: # %udiv-end
-; RV64-NEXT:    sd t5, 0(a0)
+; RV64-NEXT:  .LBB3_28:
+; RV64-NEXT:    li a6, 0
+; RV64-NEXT:    li s0, 0
+; RV64-NEXT:  .LBB3_29: # %udiv-loop-exit
+; RV64-NEXT:    srli a1, t0, 63
+; RV64-NEXT:    slli a2, t2, 1
+; RV64-NEXT:    srli a3, t2, 63
+; RV64-NEXT:    slli t0, t0, 1
+; RV64-NEXT:    or a7, t3, t0
+; RV64-NEXT:    or a1, a6, a1
+; RV64-NEXT:    or a3, s0, a3
+; RV64-NEXT:    or t2, a1, a2
+; RV64-NEXT:    andi t3, a3, 1
+; RV64-NEXT:  .LBB3_30: # %udiv-end
+; RV64-NEXT:    andi a1, t3, 1
+; RV64-NEXT:    sd a7, 0(a0)
 ; RV64-NEXT:    sd t2, 8(a0)
 ; RV64-NEXT:    sb a1, 16(a0)
-; RV64-NEXT:    ld s0, 168(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld s1, 160(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld s2, 152(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld s3, 144(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld s4, 136(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld s5, 128(sp) # 8-byte Folded Reload
-; RV64-NEXT:    addi sp, sp, 176
+; RV64-NEXT:    ld s0, 184(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s1, 176(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s2, 168(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s3, 160(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s4, 152(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s5, 144(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s6, 136(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 192
 ; RV64-NEXT:    ret
   %res = udiv i129 %x, %y
   ret i129 %res
diff --git a/llvm/test/CodeGen/X86/div-rem-pair-recomposition-signed.ll b/llvm/test/CodeGen/X86/div-rem-pair-recomposition-signed.ll
index 1a696d546a1a3..455b72d16a075 100644
--- a/llvm/test/CodeGen/X86/div-rem-pair-recomposition-signed.ll
+++ b/llvm/test/CodeGen/X86/div-rem-pair-recomposition-signed.ll
@@ -152,14 +152,14 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    andl $-16, %esp
 ; X86-NEXT:    subl $176, %esp
-; X86-NEXT:    movl 32(%ebp), %ecx
-; X86-NEXT:    movl 36(%ebp), %edx
-; X86-NEXT:    movl %edx, %eax
+; X86-NEXT:    movl 32(%ebp), %edx
+; X86-NEXT:    movl 36(%ebp), %ecx
+; X86-NEXT:    movl %ecx, %eax
 ; X86-NEXT:    sarl $31, %eax
-; X86-NEXT:    xorl %eax, %edx
-; X86-NEXT:    movl %edx, %edi
 ; X86-NEXT:    xorl %eax, %ecx
-; X86-NEXT:    movl %ecx, %esi
+; X86-NEXT:    movl %ecx, %edi
+; X86-NEXT:    xorl %eax, %edx
+; X86-NEXT:    movl %edx, %esi
 ; X86-NEXT:    movl 28(%ebp), %edx
 ; X86-NEXT:    xorl %eax, %edx
 ; X86-NEXT:    movl 24(%ebp), %ecx
@@ -172,26 +172,26 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
 ; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    sbbl %eax, %edi
 ; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    movl 52(%ebp), %edi
-; X86-NEXT:    movl %edi, %edx
+; X86-NEXT:    movl 52(%ebp), %esi
+; X86-NEXT:    movl %esi, %edx
 ; X86-NEXT:    sarl $31, %edx
-; X86-NEXT:    xorl %edx, %edi
+; X86-NEXT:    xorl %edx, %esi
 ; X86-NEXT:    movl 48(%ebp), %ecx
 ; X86-NEXT:    xorl %edx, %ecx
 ; X86-NEXT:    movl 44(%ebp), %ebx
 ; X86-NEXT:    xorl %edx, %ebx
-; X86-NEXT:    movl 40(%ebp), %esi
-; X86-NEXT:    xorl %edx, %esi
-; X86-NEXT:    subl %edx, %esi
+; X86-NEXT:    movl 40(%ebp), %edi
+; X86-NEXT:    xorl %edx, %edi
+; X86-NEXT:    subl %edx, %edi
 ; X86-NEXT:    sbbl %edx, %ebx
 ; X86-NEXT:    sbbl %edx, %ecx
 ; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    sbbl %edx, %edi
+; X86-NEXT:    sbbl %edx, %esi
 ; X86-NEXT:    xorl %eax, %edx
 ; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    movl %ebx, %eax
-; X86-NEXT:    orl %edi, %eax
-; X86-NEXT:    movl %esi, %ecx
+; X86-NEXT:    orl %esi, %eax
+; X86-NEXT:    movl %edi, %ecx
 ; X86-NEXT:    orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
 ; X86-NEXT:    orl %eax, %ecx
 ; X86-NEXT:    sete %cl
@@ -203,89 +203,92 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
 ; X86-NEXT:    sete %al
 ; X86-NEXT:    orb %cl, %al
 ; X86-NEXT:    movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
-; X86-NEXT:    bsrl %edi, %edx
+; X86-NEXT:    bsrl %esi, %edx
 ; X86-NEXT:    xorl $31, %edx
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
 ; X86-NEXT:    bsrl %eax, %ecx
 ; X86-NEXT:    xorl $31, %ecx
 ; X86-NEXT:    orl $32, %ecx
-; X86-NEXT:    testl %edi, %edi
+; X86-NEXT:    testl %esi, %esi
 ; X86-NEXT:    cmovnel %edx, %ecx
 ; X86-NEXT:    bsrl %ebx, %edx
 ; X86-NEXT:    xorl $31, %edx
-; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    bsrl %esi, %esi
-; X86-NEXT:    xorl $31, %esi
-; X86-NEXT:    orl $32, %esi
+; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    bsrl %edi, %edi
+; X86-NEXT:    xorl $31, %edi
+; X86-NEXT:    orl $32, %edi
 ; X86-NEXT:    movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    testl %ebx, %ebx
-; X86-NEXT:    movl %esi, %ebx
-; X86-NEXT:    cmovnel %edx, %ebx
-; X86-NEXT:    orl $64, %ebx
+; X86-NEXT:    cmovnel %edx, %edi
+; X86-NEXT:    orl $64, %edi
 ; X86-NEXT:    movl %eax, %edx
-; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    orl %edi, %edx
-; X86-NEXT:    cmovnel %ecx, %ebx
+; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    orl %esi, %edx
+; X86-NEXT:    cmovnel %ecx, %edi
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
 ; X86-NEXT:    bsrl %eax, %edx
 ; X86-NEXT:    xorl $31, %edx
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT:    bsrl %edi, %ecx
+; X86-NEXT:    bsrl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
 ; X86-NEXT:    xorl $31, %ecx
 ; X86-NEXT:    orl $32, %ecx
 ; X86-NEXT:    testl %eax, %eax
 ; X86-NEXT:    cmovnel %edx, %ecx
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT:    bsrl %eax, %esi
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT:    bsrl %ebx, %esi
 ; X86-NEXT:    xorl $31, %esi
 ; X86-NEXT:    bsrl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
 ; X86-NEXT:    xorl $31, %edx
 ; X86-NEXT:    orl $32, %edx
-; X86-NEXT:    testl %eax, %eax
+; X86-NEXT:    testl %ebx, %ebx
 ; X86-NEXT:    cmovnel %esi, %edx
 ; X86-NEXT:    orl $64, %edx
-; X86-NEXT:    movl %edi, %esi
-; X86-NEXT:    orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X86-NEXT:    orl %eax, %esi
 ; X86-NEXT:    cmovnel %ecx, %edx
-; X86-NEXT:    xorl %edi, %edi
-; X86-NEXT:    subl %edx, %ebx
-; X86-NEXT:    movl $0, %eax
-; X86-NEXT:    sbbl %eax, %eax
+; X86-NEXT:    xorl %ebx, %ebx
+; X86-NEXT:    subl %edx, %edi
 ; X86-NEXT:    movl $0, %edx
 ; X86-NEXT:    sbbl %edx, %edx
 ; X86-NEXT:    movl $0, %esi
 ; X86-NEXT:    sbbl %esi, %esi
+; X86-NEXT:    movl $0, %eax
+; X86-NEXT:    sbbl %eax, %eax
 ; X86-NEXT:    movl $127, %ecx
-; X86-NEXT:    cmpl %ebx, %ecx
-; X86-NEXT:    movl $0, %ecx
-; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    sbbl %eax, %ecx
+; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    cmpl %edi, %ecx
 ; X86-NEXT:    movl $0, %ecx
-; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    sbbl %edx, %ecx
 ; X86-NEXT:    movl $0, %ecx
 ; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    sbbl %esi, %ecx
+; X86-NEXT:    movl $0, %ecx
+; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    sbbl %eax, %ecx
 ; X86-NEXT:    setb %cl
 ; X86-NEXT:    orb {{[-0-9]+}}(%e{{[sb]}}p), %cl # 1-byte Folded Reload
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT:    cmovnel %edi, %esi
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT:    cmovnel %edi, %edx
+; X86-NEXT:    cmovnel %ebx, %esi
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT:    cmovnel %edi, %eax
-; X86-NEXT:    cmovel {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-NEXT:    jne .LBB4_8
-; X86-NEXT:  # %bb.1: # %_udiv-special-cases
-; X86-NEXT:    movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    xorl $127, %ebx
-; X86-NEXT:    orl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    cmovnel %ebx, %eax
+; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT:    cmovnel %ebx, %eax
+; X86-NEXT:    cmovel {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X86-NEXT:    jne .LBB4_1
+; X86-NEXT:  # %bb.8: # %_udiv-special-cases
+; X86-NEXT:    movl %edx, %edi
+; X86-NEXT:    movl %eax, %edx
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT:    xorl $127, %eax
+; X86-NEXT:    orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X86-NEXT:    movl %edi, %ecx
 ; X86-NEXT:    orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; X86-NEXT:    orl %ebx, %ecx
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    orl %eax, %ecx
+; X86-NEXT:    movl %edx, %eax
+; X86-NEXT:    movl %ebx, %edx
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
 ; X86-NEXT:    je .LBB4_9
-; X86-NEXT:  # %bb.2: # %udiv-bb1
+; X86-NEXT:  # %bb.5: # %udiv-bb1
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
 ; X86-NEXT:    movl %eax, {{[0-9]+}}(%esp)
 ; X86-NEXT:    xorps %xmm0, %xmm0
@@ -294,252 +297,264 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
 ; X86-NEXT:    movl %eax, {{[0-9]+}}(%esp)
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
 ; X86-NEXT:    movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT:    movl %edi, {{[0-9]+}}(%esp)
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT:    movl %edx, %ecx
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT:    movl %eax, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT:    movl %ebx, %ecx
 ; X86-NEXT:    xorb $127, %cl
 ; X86-NEXT:    movl %ecx, %eax
 ; X86-NEXT:    shrb $3, %al
 ; X86-NEXT:    andb $12, %al
 ; X86-NEXT:    negb %al
 ; X86-NEXT:    movsbl %al, %eax
-; X86-NEXT:    movl 152(%esp,%eax), %ebx
-; X86-NEXT:    movl 156(%esp,%eax), %esi
-; X86-NEXT:    shldl %cl, %ebx, %esi
-; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    movl 144(%esp,%eax), %esi
+; X86-NEXT:    movl 152(%esp,%eax), %esi
+; X86-NEXT:    movl 156(%esp,%eax), %edx
+; X86-NEXT:    shldl %cl, %esi, %edx
+; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl 144(%esp,%eax), %edx
 ; X86-NEXT:    movl 148(%esp,%eax), %eax
-; X86-NEXT:    shldl %cl, %eax, %ebx
-; X86-NEXT:    shldl %cl, %esi, %eax
-; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    shll %cl, %esi
-; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    addl $1, %edx
+; X86-NEXT:    shldl %cl, %eax, %esi
+; X86-NEXT:    shldl %cl, %edx, %eax
+; X86-NEXT:    shll %cl, %edx
+; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    addl $1, %ebx
+; X86-NEXT:    movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    adcl $0, %edi
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
 ; X86-NEXT:    adcl $0, %ecx
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT:    adcl $0, %eax
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NEXT:    adcl $0, %edx
+; X86-NEXT:    jae .LBB4_2
+; X86-NEXT:  # %bb.6:
+; X86-NEXT:    xorl %ecx, %ecx
+; X86-NEXT:    xorl %edx, %edx
+; X86-NEXT:    movl %esi, %ebx
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT:    adcl $0, %esi
-; X86-NEXT:    jae .LBB4_5
-; X86-NEXT:  # %bb.3:
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT:    movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
 ; X86-NEXT:    jmp .LBB4_7
-; X86-NEXT:  .LBB4_5: # %udiv-preheader
+; X86-NEXT:  .LBB4_1:
+; X86-NEXT:    movl %ebx, %edx
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT:    jmp .LBB4_9
+; X86-NEXT:  .LBB4_2: # %udiv-preheader
+; X86-NEXT:    movl %edi, %ebx
+; X86-NEXT:    movl %edx, %edi
 ; X86-NEXT:    movaps %xmm0, {{[0-9]+}}(%esp)
-; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT:    movl %esi, {{[0-9]+}}(%esp)
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT:    movl %esi, {{[0-9]+}}(%esp)
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT:    movl %esi, {{[0-9]+}}(%esp)
-; X86-NEXT:    movl %edi, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NEXT:    movl %edx, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NEXT:    movl %edx, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NEXT:    movl %edx, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NEXT:    movl %edx, {{[0-9]+}}(%esp)
 ; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    movl %edx, %eax
+; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    movl %ecx, %eax
 ; X86-NEXT:    shrb $3, %al
 ; X86-NEXT:    andb $12, %al
 ; X86-NEXT:    movzbl %al, %eax
-; X86-NEXT:    movl 108(%esp,%eax), %edi
-; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    movl %edx, %ecx
+; X86-NEXT:    movl 108(%esp,%eax), %edx
+; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    movl 104(%esp,%eax), %ebx
 ; X86-NEXT:    movl %ebx, %esi
-; X86-NEXT:    shrdl %cl, %edi, %esi
-; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    movl 96(%esp,%eax), %edx
-; X86-NEXT:    movl 100(%esp,%eax), %esi
+; X86-NEXT:    shrdl %cl, %edx, %esi
 ; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    shrdl %cl, %ebx, %esi
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NEXT:    shrl %cl, %edi
-; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    # kill: def $cl killed $cl killed $ecx
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT:    shrdl %cl, %eax, %edx
+; X86-NEXT:    movl 96(%esp,%eax), %esi
+; X86-NEXT:    movl 100(%esp,%eax), %eax
+; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl %eax, %edi
+; X86-NEXT:    shrdl %cl, %ebx, %edi
+; X86-NEXT:    movl %edi, %ebx
+; X86-NEXT:    shrl %cl, %edx
 ; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X86-NEXT:    shrdl %cl, %eax, %esi
+; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
 ; X86-NEXT:    addl $-1, %eax
 ; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
 ; X86-NEXT:    adcl $-1, %eax
 ; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT:    adcl $-1, %eax
-; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT:    adcl $-1, %eax
-; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    adcl $-1, %ecx
+; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    adcl $-1, %ecx
+; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    xorl %edx, %edx
 ; X86-NEXT:    movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
 ; X86-NEXT:    .p2align 4
-; X86-NEXT:  .LBB4_6: # %udiv-do-while
+; X86-NEXT:  .LBB4_3: # %udiv-do-while
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
+; X86-NEXT:    movl %edx, %esi
+; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT:    shldl $1, %eax, %edi
-; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    movl %esi, %edx
-; X86-NEXT:    shldl $1, %esi, %eax
+; X86-NEXT:    shldl $1, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NEXT:    movl %ebx, %edx
+; X86-NEXT:    shldl $1, %ebx, %eax
 ; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT:    shldl $1, %esi, %edx
-; X86-NEXT:    shldl $1, %ecx, %esi
-; X86-NEXT:    shldl $1, %ebx, %ecx
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT:    orl %eax, %ecx
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT:    shldl $1, %ebx, %edx
+; X86-NEXT:    shldl $1, %ecx, %ebx
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X86-NEXT:    shldl $1, %edi, %ecx
+; X86-NEXT:    orl %esi, %ecx
 ; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT:    shldl $1, %ecx, %ebx
-; X86-NEXT:    orl %eax, %ebx
-; X86-NEXT:    movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    shldl $1, %ecx, %edi
+; X86-NEXT:    orl %esi, %edi
+; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
 ; X86-NEXT:    shldl $1, %edi, %ecx
-; X86-NEXT:    orl %eax, %ecx
+; X86-NEXT:    orl %esi, %ecx
 ; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    addl %edi, %edi
 ; X86-NEXT:    orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
 ; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    cmpl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
+; X86-NEXT:    cmpl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
 ; X86-NEXT:    sbbl %edx, %ecx
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT:    sbbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
 ; X86-NEXT:    sbbl %eax, %ecx
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    sbbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
 ; X86-NEXT:    sarl $31, %ecx
-; X86-NEXT:    movl %ecx, %edi
-; X86-NEXT:    andl $1, %edi
-; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    movl %ecx, %edi
-; X86-NEXT:    andl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    movl %ecx, %ebx
-; X86-NEXT:    andl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X86-NEXT:    movl %ecx, %esi
+; X86-NEXT:    andl $1, %esi
+; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    movl %ecx, %edi
 ; X86-NEXT:    andl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X86-NEXT:    movl %ecx, %esi
+; X86-NEXT:    andl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:    andl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
 ; X86-NEXT:    andl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; X86-NEXT:    movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT:    subl %ecx, %esi
-; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    sbbl %edi, %edx
-; X86-NEXT:    movl %edx, %esi
-; X86-NEXT:    sbbl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT:    movl %eax, %edi
-; X86-NEXT:    sbbl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X86-NEXT:    subl %ecx, %ebx
+; X86-NEXT:    movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    sbbl %eax, %edx
+; X86-NEXT:    movl %edx, %ebx
+; X86-NEXT:    sbbl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT:    addl $-1, %edx
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT:    sbbl %edi, %eax
+; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT:    adcl $-1, %ecx
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NEXT:    adcl $-1, %ebx
+; X86-NEXT:    addl $-1, %ecx
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
 ; X86-NEXT:    adcl $-1, %eax
-; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X86-NEXT:    adcl $-1, %edi
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X86-NEXT:    adcl $-1, %esi
 ; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    orl %esi, %eax
+; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    orl %edi, %ecx
 ; X86-NEXT:    orl %eax, %ecx
-; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    orl %ebx, %edx
-; X86-NEXT:    orl %ecx, %edx
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NEXT:    jne .LBB4_6
-; X86-NEXT:  .LBB4_7: # %udiv-loop-exit
-; X86-NEXT:    shldl $1, %ebx, %ecx
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT:    shldl $1, %eax, %ebx
-; X86-NEXT:    movl %ebx, %edx
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT:    shldl $1, %esi, %eax
+; X86-NEXT:    jne .LBB4_3
+; X86-NEXT:  # %bb.4:
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT:    leal (%edi,%esi,2), %edi
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
 ; X86-NEXT:    movl %ecx, %esi
-; X86-NEXT:  .LBB4_8: # %udiv-end
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:  .LBB4_7: # %udiv-loop-exit
+; X86-NEXT:    shldl $1, %ebx, %esi
+; X86-NEXT:    orl %edx, %esi
+; X86-NEXT:    shldl $1, %eax, %ebx
+; X86-NEXT:    orl %edx, %ebx
+; X86-NEXT:    shldl $1, %edi, %eax
+; X86-NEXT:    orl %edx, %eax
+; X86-NEXT:    movl %edi, %edx
+; X86-NEXT:    addl %edi, %edx
+; X86-NEXT:    orl %ecx, %edx
 ; X86-NEXT:  .LBB4_9: # %udiv-end
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
 ; X86-NEXT:    xorl %ecx, %esi
-; X86-NEXT:    movl %edx, %ebx
 ; X86-NEXT:    xorl %ecx, %ebx
 ; X86-NEXT:    xorl %ecx, %eax
-; X86-NEXT:    xorl %ecx, %edi
-; X86-NEXT:    subl %ecx, %edi
+; X86-NEXT:    xorl %ecx, %edx
+; X86-NEXT:    subl %ecx, %edx
 ; X86-NEXT:    sbbl %ecx, %eax
-; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    sbbl %ecx, %ebx
 ; X86-NEXT:    sbbl %ecx, %esi
+; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    movl 56(%ebp), %ecx
-; X86-NEXT:    movl %edi, (%ecx)
+; X86-NEXT:    movl %edx, (%ecx)
 ; X86-NEXT:    movl %eax, 4(%ecx)
 ; X86-NEXT:    movl %ebx, 8(%ecx)
 ; X86-NEXT:    movl %esi, 12(%ecx)
-; X86-NEXT:    movl %edi, %ecx
-; X86-NEXT:    movl 40(%ebp), %edi
-; X86-NEXT:    mull %edi
+; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl 40(%ebp), %ecx
+; X86-NEXT:    movl %ebx, %edi
+; X86-NEXT:    movl %edx, %esi
 ; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    mull %ecx
+; X86-NEXT:    movl %edx, %ebx
 ; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    movl %ecx, %edi
-; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    movl %ecx, %eax
-; X86-NEXT:    mull 40(%ebp)
+; X86-NEXT:    movl %esi, %eax
+; X86-NEXT:    mull %ecx
 ; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    movl %edx, %ecx
 ; X86-NEXT:    addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; X86-NEXT:    adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT:    movl %edi, %eax
-; X86-NEXT:    movl %esi, %edi
+; X86-NEXT:    adcl $0, %ebx
+; X86-NEXT:    movl %esi, %eax
 ; X86-NEXT:    movl 44(%ebp), %esi
 ; X86-NEXT:    mull %esi
 ; X86-NEXT:    addl %ecx, %eax
 ; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X86-NEXT:    adcl %ebx, %edx
 ; X86-NEXT:    movl %edx, %ecx
-; X86-NEXT:    setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT:    movl %esi, %eax
-; X86-NEXT:    mull 44(%ebp)
+; X86-NEXT:    setb %bl
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT:    mull %esi
 ; X86-NEXT:    addl %ecx, %eax
-; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
+; X86-NEXT:    movl %eax, %ecx
+; X86-NEXT:    movzbl %bl, %eax
 ; X86-NEXT:    adcl %eax, %edx
 ; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
 ; X86-NEXT:    movl 40(%ebp), %eax
-; X86-NEXT:    imull %eax, %edi
-; X86-NEXT:    mull %ebx
+; X86-NEXT:    imull %eax, %ebx
+; X86-NEXT:    mull %edi
 ; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    imull 44(%ebp), %ebx
-; X86-NEXT:    addl %edx, %ebx
-; X86-NEXT:    addl %edi, %ebx
+; X86-NEXT:    imull %esi, %edi
+; X86-NEXT:    addl %edx, %edi
+; X86-NEXT:    addl %ebx, %edi
 ; X86-NEXT:    movl 48(%ebp), %eax
-; X86-NEXT:    movl %eax, %ecx
-; X86-NEXT:    imull %esi, %ecx
-; X86-NEXT:    movl 52(%ebp), %esi
+; X86-NEXT:    movl %eax, %esi
+; X86-NEXT:    imull {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X86-NEXT:    movl 52(%ebp), %ebx
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT:    imull %edx, %esi
+; X86-NEXT:    imull %edx, %ebx
 ; X86-NEXT:    mull %edx
-; X86-NEXT:    addl %edx, %esi
-; X86-NEXT:    addl %ecx, %esi
-; X86-NEXT:    addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; X86-NEXT:    adcl %ebx, %esi
+; X86-NEXT:    addl %edx, %ebx
+; X86-NEXT:    addl %esi, %ebx
 ; X86-NEXT:    addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X86-NEXT:    adcl %edi, %ebx
+; X86-NEXT:    addl %ecx, %eax
+; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
 ; X86-NEXT:    movl 24(%ebp), %edx
 ; X86-NEXT:    subl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
 ; X86-NEXT:    movl 28(%ebp), %ecx
 ; X86-NEXT:    sbbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
 ; X86-NEXT:    movl 32(%ebp), %edi
 ; X86-NEXT:    sbbl %eax, %edi
-; X86-NEXT:    movl 36(%ebp), %ebx
-; X86-NEXT:    sbbl %esi, %ebx
+; X86-NEXT:    movl 36(%ebp), %esi
+; X86-NEXT:    sbbl %ebx, %esi
 ; X86-NEXT:    movl 8(%ebp), %eax
 ; X86-NEXT:    movl %edx, (%eax)
 ; X86-NEXT:    movl %ecx, 4(%eax)
 ; X86-NEXT:    movl %edi, 8(%eax)
-; X86-NEXT:    movl %ebx, 12(%eax)
+; X86-NEXT:    movl %esi, 12(%eax)
 ; X86-NEXT:    leal -12(%ebp), %esp
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    popl %edi
diff --git a/llvm/test/CodeGen/X86/div-rem-pair-recomposition-unsigned.ll b/llvm/test/CodeGen/X86/div-rem-pair-recomposition-unsigned.ll
index 7f5ede7a858d2..859e9244d29d2 100644
--- a/llvm/test/CodeGen/X86/div-rem-pair-recomposition-unsigned.ll
+++ b/llvm/test/CodeGen/X86/div-rem-pair-recomposition-unsigned.ll
@@ -152,48 +152,49 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    andl $-16, %esp
 ; X86-NEXT:    subl $160, %esp
-; X86-NEXT:    movl 48(%ebp), %esi
-; X86-NEXT:    movl 40(%ebp), %ecx
-; X86-NEXT:    movl 52(%ebp), %edi
-; X86-NEXT:    movl 44(%ebp), %eax
-; X86-NEXT:    orl %edi, %eax
-; X86-NEXT:    orl %esi, %ecx
+; X86-NEXT:    movl 40(%ebp), %ebx
+; X86-NEXT:    movl 52(%ebp), %esi
+; X86-NEXT:    movl 44(%ebp), %edi
+; X86-NEXT:    movl %edi, %eax
+; X86-NEXT:    orl %esi, %eax
+; X86-NEXT:    movl %ebx, %ecx
+; X86-NEXT:    orl 48(%ebp), %ecx
 ; X86-NEXT:    orl %eax, %ecx
-; X86-NEXT:    sete %bl
+; X86-NEXT:    sete %cl
 ; X86-NEXT:    movl 28(%ebp), %eax
 ; X86-NEXT:    orl 36(%ebp), %eax
 ; X86-NEXT:    movl 24(%ebp), %edx
 ; X86-NEXT:    orl 32(%ebp), %edx
 ; X86-NEXT:    orl %eax, %edx
 ; X86-NEXT:    sete %al
-; X86-NEXT:    orb %bl, %al
+; X86-NEXT:    orb %cl, %al
 ; X86-NEXT:    movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
-; X86-NEXT:    bsrl %edi, %edx
+; X86-NEXT:    bsrl %esi, %edx
 ; X86-NEXT:    xorl $31, %edx
-; X86-NEXT:    bsrl %esi, %ecx
+; X86-NEXT:    bsrl 48(%ebp), %ecx
 ; X86-NEXT:    xorl $31, %ecx
 ; X86-NEXT:    orl $32, %ecx
-; X86-NEXT:    testl %edi, %edi
+; X86-NEXT:    testl %esi, %esi
 ; X86-NEXT:    cmovnel %edx, %ecx
-; X86-NEXT:    movl 44(%ebp), %eax
-; X86-NEXT:    bsrl %eax, %edx
+; X86-NEXT:    bsrl %edi, %edx
 ; X86-NEXT:    xorl $31, %edx
-; X86-NEXT:    bsrl 40(%ebp), %ebx
-; X86-NEXT:    xorl $31, %ebx
-; X86-NEXT:    orl $32, %ebx
-; X86-NEXT:    testl %eax, %eax
-; X86-NEXT:    cmovnel %edx, %ebx
-; X86-NEXT:    orl $64, %ebx
-; X86-NEXT:    orl %edi, %esi
-; X86-NEXT:    cmovnel %ecx, %ebx
-; X86-NEXT:    movl 36(%ebp), %esi
-; X86-NEXT:    bsrl %esi, %edx
+; X86-NEXT:    bsrl %ebx, %eax
+; X86-NEXT:    xorl $31, %eax
+; X86-NEXT:    orl $32, %eax
+; X86-NEXT:    testl %edi, %edi
+; X86-NEXT:    cmovnel %edx, %eax
+; X86-NEXT:    orl $64, %eax
+; X86-NEXT:    movl 48(%ebp), %edx
+; X86-NEXT:    orl %esi, %edx
+; X86-NEXT:    cmovnel %ecx, %eax
+; X86-NEXT:    movl 36(%ebp), %ebx
+; X86-NEXT:    bsrl %ebx, %edx
 ; X86-NEXT:    xorl $31, %edx
-; X86-NEXT:    movl 32(%ebp), %eax
-; X86-NEXT:    bsrl %eax, %ecx
+; X86-NEXT:    movl 32(%ebp), %ecx
+; X86-NEXT:    bsrl %ecx, %ecx
 ; X86-NEXT:    xorl $31, %ecx
 ; X86-NEXT:    orl $32, %ecx
-; X86-NEXT:    testl %esi, %esi
+; X86-NEXT:    testl %ebx, %ebx
 ; X86-NEXT:    cmovnel %edx, %ecx
 ; X86-NEXT:    movl 28(%ebp), %edi
 ; X86-NEXT:    bsrl %edi, %esi
@@ -204,23 +205,23 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
 ; X86-NEXT:    testl %edi, %edi
 ; X86-NEXT:    cmovnel %esi, %edx
 ; X86-NEXT:    orl $64, %edx
-; X86-NEXT:    movl %eax, %esi
-; X86-NEXT:    movl 36(%ebp), %edi
-; X86-NEXT:    orl %edi, %esi
+; X86-NEXT:    movl 32(%ebp), %esi
+; X86-NEXT:    orl %ebx, %esi
 ; X86-NEXT:    cmovnel %ecx, %edx
-; X86-NEXT:    subl %edx, %ebx
-; X86-NEXT:    movl $0, %eax
-; X86-NEXT:    sbbl %eax, %eax
+; X86-NEXT:    xorl %edi, %edi
+; X86-NEXT:    subl %edx, %eax
+; X86-NEXT:    movl $0, %ebx
+; X86-NEXT:    sbbl %ebx, %ebx
 ; X86-NEXT:    movl $0, %ecx
 ; X86-NEXT:    sbbl %ecx, %ecx
 ; X86-NEXT:    movl $0, %esi
 ; X86-NEXT:    sbbl %esi, %esi
 ; X86-NEXT:    movl $127, %edx
-; X86-NEXT:    movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    cmpl %ebx, %edx
-; X86-NEXT:    movl $0, %edx
 ; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    sbbl %eax, %edx
+; X86-NEXT:    cmpl %eax, %edx
+; X86-NEXT:    movl $0, %edx
+; X86-NEXT:    movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    sbbl %ebx, %edx
 ; X86-NEXT:    movl $0, %edx
 ; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    sbbl %ecx, %edx
@@ -229,82 +230,77 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
 ; X86-NEXT:    sbbl %esi, %edx
 ; X86-NEXT:    setb %dl
 ; X86-NEXT:    orb {{[-0-9]+}}(%e{{[sb]}}p), %dl # 1-byte Folded Reload
-; X86-NEXT:    movl %edi, %edx
-; X86-NEXT:    movl $0, %eax
-; X86-NEXT:    cmovnel %eax, %edx
-; X86-NEXT:    movl 32(%ebp), %ebx
-; X86-NEXT:    cmovnel %eax, %ebx
-; X86-NEXT:    movl 28(%ebp), %edi
-; X86-NEXT:    cmovnel %eax, %edi
-; X86-NEXT:    movl $0, %ecx
-; X86-NEXT:    movl 24(%ebp), %eax
-; X86-NEXT:    cmovnel %ecx, %eax
-; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    movl 56(%ebp), %esi
-; X86-NEXT:    jne .LBB4_6
+; X86-NEXT:    movl 36(%ebp), %eax
+; X86-NEXT:    cmovnel %edi, %eax
+; X86-NEXT:    movl 32(%ebp), %esi
+; X86-NEXT:    cmovnel %edi, %esi
+; X86-NEXT:    movl 28(%ebp), %edx
+; X86-NEXT:    cmovnel %edi, %edx
+; X86-NEXT:    movl 24(%ebp), %ebx
+; X86-NEXT:    cmovnel %edi, %ebx
+; X86-NEXT:    movl 56(%ebp), %edi
+; X86-NEXT:    jne .LBB4_8
 ; X86-NEXT:  # %bb.1: # %_udiv-special-cases
+; X86-NEXT:    movl %eax, %edi
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
 ; X86-NEXT:    xorl $127, %eax
 ; X86-NEXT:    orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
 ; X86-NEXT:    orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
 ; X86-NEXT:    orl %eax, %ecx
-; X86-NEXT:    je .LBB4_6
+; X86-NEXT:    movl %edi, %eax
+; X86-NEXT:    movl 56(%ebp), %edi
+; X86-NEXT:    movl 24(%ebp), %ecx
+; X86-NEXT:    je .LBB4_8
 ; X86-NEXT:  # %bb.2: # %udiv-bb1
-; X86-NEXT:    movl 24(%ebp), %edi
-; X86-NEXT:    movl %edi, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl %ecx, {{[0-9]+}}(%esp)
 ; X86-NEXT:    xorps %xmm0, %xmm0
 ; X86-NEXT:    movaps %xmm0, {{[0-9]+}}(%esp)
-; X86-NEXT:    movl 28(%ebp), %edx
-; X86-NEXT:    movl %edx, {{[0-9]+}}(%esp)
-; X86-NEXT:    movl 32(%ebp), %esi
-; X86-NEXT:    movl %esi, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl 28(%ebp), %eax
+; X86-NEXT:    movl %eax, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl 32(%ebp), %eax
+; X86-NEXT:    movl %eax, {{[0-9]+}}(%esp)
 ; X86-NEXT:    movl 36(%ebp), %eax
 ; X86-NEXT:    movl %eax, {{[0-9]+}}(%esp)
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NEXT:    movl %ebx, %ecx
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT:    movl %eax, %ecx
 ; X86-NEXT:    xorb $127, %cl
 ; X86-NEXT:    movl %ecx, %eax
 ; X86-NEXT:    shrb $3, %al
 ; X86-NEXT:    andb $12, %al
 ; X86-NEXT:    negb %al
 ; X86-NEXT:    movsbl %al, %eax
-; X86-NEXT:    movl 136(%esp,%eax), %edx
-; X86-NEXT:    movl 140(%esp,%eax), %edi
-; X86-NEXT:    shldl %cl, %edx, %edi
-; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    movl 128(%esp,%eax), %esi
-; X86-NEXT:    movl 132(%esp,%eax), %eax
-; X86-NEXT:    shldl %cl, %eax, %edx
+; X86-NEXT:    movl 136(%esp,%eax), %esi
+; X86-NEXT:    movl 140(%esp,%eax), %edx
+; X86-NEXT:    shldl %cl, %esi, %edx
 ; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    shldl %cl, %esi, %eax
-; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    shll %cl, %esi
-; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT:    addl $1, %ebx
-; X86-NEXT:    movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NEXT:    adcl $0, %ebx
+; X86-NEXT:    movl 128(%esp,%eax), %ebx
+; X86-NEXT:    movl 132(%esp,%eax), %edx
+; X86-NEXT:    shldl %cl, %edx, %esi
+; X86-NEXT:    shldl %cl, %ebx, %edx
+; X86-NEXT:    shll %cl, %ebx
+; X86-NEXT:    addl $1, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
 ; X86-NEXT:    adcl $0, %eax
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
 ; X86-NEXT:    adcl $0, %ecx
+; X86-NEXT:    adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
 ; X86-NEXT:    jae .LBB4_3
-; X86-NEXT:  # %bb.7:
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:  # %bb.6:
+; X86-NEXT:    movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NEXT:    movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT:    jmp .LBB4_5
+; X86-NEXT:    jmp .LBB4_7
 ; X86-NEXT:  .LBB4_3: # %udiv-preheader
 ; X86-NEXT:    movaps %xmm0, {{[0-9]+}}(%esp)
-; X86-NEXT:    movl 24(%ebp), %edx
-; X86-NEXT:    movl %edx, {{[0-9]+}}(%esp)
-; X86-NEXT:    movl 28(%ebp), %edx
-; X86-NEXT:    movl %edx, {{[0-9]+}}(%esp)
-; X86-NEXT:    movl 32(%ebp), %edx
-; X86-NEXT:    movl %edx, {{[0-9]+}}(%esp)
-; X86-NEXT:    movl 36(%ebp), %edx
-; X86-NEXT:    movl %edx, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl 24(%ebp), %edi
+; X86-NEXT:    movl %edi, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl 28(%ebp), %edi
+; X86-NEXT:    movl %edi, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl 32(%ebp), %edi
+; X86-NEXT:    movl %edi, {{[0-9]+}}(%esp)
+; X86-NEXT:    movl 36(%ebp), %edi
+; X86-NEXT:    movl %edi, {{[0-9]+}}(%esp)
 ; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
 ; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
@@ -313,23 +309,23 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
 ; X86-NEXT:    andb $12, %al
 ; X86-NEXT:    movzbl %al, %eax
 ; X86-NEXT:    movl 92(%esp,%eax), %edi
-; X86-NEXT:    movl 88(%esp,%eax), %edx
-; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    shrdl %cl, %edi, %edx
+; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    movl 80(%esp,%eax), %esi
-; X86-NEXT:    movl 84(%esp,%eax), %eax
-; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl 88(%esp,%eax), %edi
+; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT:    shrdl %cl, %edx, %eax
-; X86-NEXT:    movl %eax, %edx
-; X86-NEXT:    movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT:    shrl %cl, %edi
+; X86-NEXT:    shrdl %cl, %edx, %edi
 ; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl 80(%esp,%eax), %edx
+; X86-NEXT:    movl 84(%esp,%eax), %eax
+; X86-NEXT:    movl %eax, %ebx
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X86-NEXT:    shrdl %cl, %edi, %ebx
+; X86-NEXT:    shrl %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
 ; X86-NEXT:    # kill: def $cl killed $cl killed $ecx
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT:    shrdl %cl, %eax, %esi
-; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    shrdl %cl, %eax, %edx
+; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    movl 40(%ebp), %eax
 ; X86-NEXT:    addl $-1, %eax
 ; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
@@ -343,141 +339,141 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
 ; X86-NEXT:    adcl $-1, %eax
 ; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NEXT:    movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
 ; X86-NEXT:    .p2align 4
 ; X86-NEXT:  .LBB4_4: # %udiv-do-while
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
-; X86-NEXT:    movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; X86-NEXT:    shldl $1, %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT:    movl %edx, %edi
-; X86-NEXT:    shldl $1, %edx, %ebx
-; X86-NEXT:    movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT:    shldl $1, %edx, %edi
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT:    shldl $1, %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
 ; X86-NEXT:    shldl $1, %ebx, %edx
-; X86-NEXT:    shldl $1, %ecx, %ebx
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT:    orl %esi, %ebx
-; X86-NEXT:    movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    shldl $1, %eax, %ecx
-; X86-NEXT:    orl %esi, %ecx
+; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X86-NEXT:    shldl $1, %edi, %ebx
+; X86-NEXT:    shldl $1, %ecx, %edi
+; X86-NEXT:    shldl $1, %esi, %ecx
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT:    orl %eax, %ecx
 ; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT:    shldl $1, %ecx, %eax
-; X86-NEXT:    orl %esi, %eax
-; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    addl %ecx, %ecx
-; X86-NEXT:    orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X86-NEXT:    shldl $1, %ecx, %esi
+; X86-NEXT:    orl %eax, %esi
+; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X86-NEXT:    shldl $1, %esi, %ecx
+; X86-NEXT:    orl %eax, %ecx
 ; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    cmpl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT:    sbbl %edi, %eax
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT:    sbbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X86-NEXT:    addl %esi, %esi
+; X86-NEXT:    orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    cmpl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT:    sbbl %esi, %ecx
+; X86-NEXT:    sbbl %ebx, %ecx
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    sbbl %edx, %ecx
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    sbbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
 ; X86-NEXT:    sarl $31, %ecx
 ; X86-NEXT:    movl %ecx, %eax
 ; X86-NEXT:    andl $1, %eax
 ; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    movl %ecx, %eax
-; X86-NEXT:    andl 52(%ebp), %eax
-; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl %ecx, %esi
+; X86-NEXT:    andl 52(%ebp), %esi
 ; X86-NEXT:    movl %ecx, %eax
 ; X86-NEXT:    andl 48(%ebp), %eax
-; X86-NEXT:    movl %ecx, %ebx
-; X86-NEXT:    andl 44(%ebp), %ebx
+; X86-NEXT:    movl %ecx, %edx
+; X86-NEXT:    andl 44(%ebp), %edx
 ; X86-NEXT:    andl 40(%ebp), %ecx
-; X86-NEXT:    movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT:    subl %ecx, %edx
-; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    sbbl %ebx, %edi
-; X86-NEXT:    movl %edi, %edx
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT:    subl %ecx, %edi
+; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    sbbl %edx, %ebx
 ; X86-NEXT:    sbbl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT:    movl %esi, %eax
-; X86-NEXT:    sbbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT:    sbbl %esi, %eax
 ; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
 ; X86-NEXT:    addl $-1, %ecx
-; X86-NEXT:    adcl $-1, %ebx
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-NEXT:    adcl $-1, %esi
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT:    adcl $-1, %eax
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NEXT:    adcl $-1, %edx
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
 ; X86-NEXT:    adcl $-1, %edi
-; X86-NEXT:    movl %ebx, %eax
+; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    orl %edi, %eax
 ; X86-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    orl %esi, %ecx
+; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    orl %edx, %ecx
 ; X86-NEXT:    orl %eax, %ecx
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
 ; X86-NEXT:    jne .LBB4_4
-; X86-NEXT:  .LBB4_5: # %udiv-loop-exit
+; X86-NEXT:  # %bb.5:
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-NEXT:    shldl $1, %ecx, %edx
-; X86-NEXT:    shldl $1, %eax, %ecx
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:    movl 56(%ebp), %edi
+; X86-NEXT:  .LBB4_7: # %udiv-loop-exit
 ; X86-NEXT:    shldl $1, %esi, %eax
-; X86-NEXT:    movl %eax, %edi
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT:    leal (%eax,%esi,2), %eax
-; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    movl %ecx, %ebx
-; X86-NEXT:    movl 56(%ebp), %esi
-; X86-NEXT:  .LBB4_6: # %udiv-end
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT:    movl %eax, (%esi)
-; X86-NEXT:    movl %edi, 4(%esi)
-; X86-NEXT:    movl %ebx, 8(%esi)
-; X86-NEXT:    movl %edx, 12(%esi)
-; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    movl %edx, %ecx
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-NEXT:    orl %ecx, %eax
+; X86-NEXT:    shldl $1, %edx, %esi
+; X86-NEXT:    orl %ecx, %esi
+; X86-NEXT:    shldl $1, %ebx, %edx
+; X86-NEXT:    orl %ecx, %edx
+; X86-NEXT:    addl %ebx, %ebx
+; X86-NEXT:    orl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X86-NEXT:  .LBB4_8: # %udiv-end
+; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl %ebx, (%edi)
+; X86-NEXT:    movl %edx, 4(%edi)
+; X86-NEXT:    movl %esi, 8(%edi)
+; X86-NEXT:    movl %eax, 12(%edi)
+; X86-NEXT:    movl %eax, %ecx
 ; X86-NEXT:    movl 48(%ebp), %eax
 ; X86-NEXT:    movl %eax, %esi
-; X86-NEXT:    imull %edi, %esi
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; X86-NEXT:    mull %edi
+; X86-NEXT:    imull %edx, %esi
+; X86-NEXT:    mull %ebx
+; X86-NEXT:    movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    addl %esi, %edx
-; X86-NEXT:    movl 52(%ebp), %eax
-; X86-NEXT:    imull %edi, %eax
-; X86-NEXT:    addl %edx, %eax
-; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    movl 40(%ebp), %edi
-; X86-NEXT:    movl %edi, %eax
+; X86-NEXT:    movl 52(%ebp), %edi
+; X86-NEXT:    imull %ebx, %edi
+; X86-NEXT:    addl %edx, %edi
+; X86-NEXT:    movl 40(%ebp), %eax
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
 ; X86-NEXT:    mull %ebx
-; X86-NEXT:    imull %edi, %ecx
+; X86-NEXT:    movl %eax, %esi
+; X86-NEXT:    imull 40(%ebp), %ecx
 ; X86-NEXT:    addl %edx, %ecx
-; X86-NEXT:    movl 44(%ebp), %esi
-; X86-NEXT:    imull %esi, %ebx
+; X86-NEXT:    movl 44(%ebp), %eax
+; X86-NEXT:    imull %eax, %ebx
 ; X86-NEXT:    addl %ecx, %ebx
-; X86-NEXT:    addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X86-NEXT:    addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    adcl %edi, %ebx
+; X86-NEXT:    movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
 ; X86-NEXT:    movl %esi, %eax
-; X86-NEXT:    mull %edi
-; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl 40(%ebp), %ecx
+; X86-NEXT:    mull %ecx
+; X86-NEXT:    movl %edx, %edi
 ; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT:    mull %edi
+; X86-NEXT:    mull %ecx
+; X86-NEXT:    movl %edx, %ebx
 ; X86-NEXT:    movl %eax, %ecx
-; X86-NEXT:    addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; X86-NEXT:    adcl $0, %edx
-; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    addl %edi, %ecx
+; X86-NEXT:    adcl $0, %ebx
 ; X86-NEXT:    movl %esi, %eax
 ; X86-NEXT:    mull 44(%ebp)
+; X86-NEXT:    movl 28(%ebp), %esi
 ; X86-NEXT:    movl %edx, %edi
-; X86-NEXT:    movl %eax, %esi
-; X86-NEXT:    addl %ecx, %esi
-; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X86-NEXT:    addl %ecx, %eax
+; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    adcl %ebx, %edi
 ; X86-NEXT:    setb %cl
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
 ; X86-NEXT:    mull 44(%ebp)
@@ -485,20 +481,19 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
 ; X86-NEXT:    movzbl %cl, %ecx
 ; X86-NEXT:    adcl %ecx, %edx
 ; X86-NEXT:    addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; X86-NEXT:    adcl %ebx, %edx
-; X86-NEXT:    movl 24(%ebp), %edi
-; X86-NEXT:    subl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-NEXT:    movl 28(%ebp), %ecx
-; X86-NEXT:    sbbl %esi, %ecx
-; X86-NEXT:    movl 32(%ebp), %esi
-; X86-NEXT:    sbbl %eax, %esi
-; X86-NEXT:    movl 36(%ebp), %ebx
-; X86-NEXT:    sbbl %edx, %ebx
+; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X86-NEXT:    movl 24(%ebp), %ebx
+; X86-NEXT:    subl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X86-NEXT:    sbbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X86-NEXT:    movl 32(%ebp), %edi
+; X86-NEXT:    sbbl %eax, %edi
+; X86-NEXT:    movl 36(%ebp), %ecx
+; X86-NEXT:    sbbl %edx, %ecx
 ; X86-NEXT:    movl 8(%ebp), %eax
-; X86-NEXT:    movl %edi, (%eax)
-; X86-NEXT:    movl %ecx, 4(%eax)
-; X86-NEXT:    movl %esi, 8(%eax)
-; X86-NEXT:    movl %ebx, 12(%eax)
+; X86-NEXT:    movl %ebx, (%eax)
+; X86-NEXT:    movl %esi, 4(%eax)
+; X86-NEXT:    movl %edi, 8(%eax)
+; X86-NEXT:    movl %ecx, 12(%eax)
 ; X86-NEXT:    leal -12(%ebp), %esp
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    popl %edi
diff --git a/llvm/test/Transforms/ExpandIRInsts/X86/expand-large-fp-convert-fptosi129.ll b/llvm/test/Transforms/ExpandIRInsts/X86/expand-large-fp-convert-fptosi129.ll
index d3a77de057ef5..7413f6e8656e7 100644
--- a/llvm/test/Transforms/ExpandIRInsts/X86/expand-large-fp-convert-fptosi129.ll
+++ b/llvm/test/Transforms/ExpandIRInsts/X86/expand-large-fp-convert-fptosi129.ll
@@ -1,8 +1,8 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals
 ; RUN: opt -S -mtriple=x86_64-- --expand-ir-insts < %s | FileCheck %s
 ; RUN: opt -S -mtriple=x86_64-- -passes='require<libcall-lowering-info>,expand-ir-insts' < %s | FileCheck %s
 
-define i129 @halftosi129(half %a) {
+define i129 @halftosi129(half %a) !prof !0 {
 ; CHECK-LABEL: @halftosi129(
 ; CHECK-NEXT:    [[TMP1:%.*]] = fptosi half [[A:%.*]] to i32
 ; CHECK-NEXT:    [[TMP2:%.*]] = sext i32 [[TMP1]] to i129
@@ -12,29 +12,29 @@ define i129 @halftosi129(half %a) {
   ret i129 %conv
 }
 
-define i129 @floattosi129(float %a) {
+define i129 @floattosi129(float %a) !prof !0 {
 ; CHECK-LABEL: @floattosi129(
 ; CHECK-NEXT:  fp-to-i-entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = bitcast float [[A:%.*]] to i32
 ; CHECK-NEXT:    [[TMP1:%.*]] = zext i32 [[TMP0]] to i129
 ; CHECK-NEXT:    [[TMP2:%.*]] = icmp sgt i32 [[TMP0]], -1
-; CHECK-NEXT:    [[TMP3:%.*]] = select i1 [[TMP2]], i129 1, i129 -1
+; CHECK-NEXT:    [[TMP3:%.*]] = select i1 [[TMP2]], i129 1, i129 -1, !prof [[PROF1:![0-9]+]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = lshr i129 [[TMP1]], 23
 ; CHECK-NEXT:    [[TMP5:%.*]] = and i129 [[TMP4]], 255
 ; CHECK-NEXT:    [[TMP6:%.*]] = and i129 [[TMP1]], 8388607
 ; CHECK-NEXT:    [[TMP7:%.*]] = or i129 [[TMP6]], 8388608
 ; CHECK-NEXT:    [[TMP8:%.*]] = icmp ult i129 [[TMP5]], 127
-; CHECK-NEXT:    br i1 [[TMP8]], label [[FP_TO_I_CLEANUP:%.*]], label [[FP_TO_I_IF_END:%.*]]
+; CHECK-NEXT:    br i1 [[TMP8]], label [[FP_TO_I_CLEANUP:%.*]], label [[FP_TO_I_IF_END:%.*]], !prof [[PROF2:![0-9]+]]
 ; CHECK:       fp-to-i-if-end:
 ; CHECK-NEXT:    [[TMP9:%.*]] = add i129 [[TMP5]], -256
 ; CHECK-NEXT:    [[TMP10:%.*]] = icmp ult i129 [[TMP9]], -129
-; CHECK-NEXT:    br i1 [[TMP10]], label [[FP_TO_I_IF_THEN5:%.*]], label [[FP_TO_I_IF_END9:%.*]]
+; CHECK-NEXT:    br i1 [[TMP10]], label [[FP_TO_I_IF_THEN5:%.*]], label [[FP_TO_I_IF_END9:%.*]], !prof [[PROF2]]
 ; CHECK:       fp-to-i-if-then5:
-; CHECK-NEXT:    [[TMP11:%.*]] = select i1 [[TMP2]], i129 340282366920938463463374607431768211455, i129 -340282366920938463463374607431768211456
+; CHECK-NEXT:    [[TMP11:%.*]] = select i1 [[TMP2]], i129 340282366920938463463374607431768211455, i129 -340282366920938463463374607431768211456, !prof [[PROF3:![0-9]+]]
 ; CHECK-NEXT:    br label [[FP_TO_I_CLEANUP]]
 ; CHECK:       fp-to-i-if-end9:
 ; CHECK-NEXT:    [[TMP12:%.*]] = icmp ult i129 [[TMP5]], 150
-; CHECK-NEXT:    br i1 [[TMP12]], label [[FP_TO_I_IF_THEN12:%.*]], label [[FP_TO_I_IF_ELSE:%.*]]
+; CHECK-NEXT:    br i1 [[TMP12]], label [[FP_TO_I_IF_THEN12:%.*]], label [[FP_TO_I_IF_ELSE:%.*]], !prof [[PROF1]]
 ; CHECK:       fp-to-i-if-then12:
 ; CHECK-NEXT:    [[TMP13:%.*]] = sub i129 150, [[TMP5]]
 ; CHECK-NEXT:    [[TMP14:%.*]] = lshr i129 [[TMP7]], [[TMP13]]
@@ -59,23 +59,23 @@ define i129 @doubletosi129(double %a) {
 ; CHECK-NEXT:    [[TMP0:%.*]] = bitcast double [[A:%.*]] to i64
 ; CHECK-NEXT:    [[TMP1:%.*]] = zext i64 [[TMP0]] to i129
 ; CHECK-NEXT:    [[TMP2:%.*]] = icmp sgt i64 [[TMP0]], -1
-; CHECK-NEXT:    [[TMP3:%.*]] = select i1 [[TMP2]], i129 1, i129 -1
+; CHECK-NEXT:    [[TMP3:%.*]] = select i1 [[TMP2]], i129 1, i129 -1, !prof [[PROF1]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = lshr i129 [[TMP1]], 52
 ; CHECK-NEXT:    [[TMP5:%.*]] = and i129 [[TMP4]], 2047
 ; CHECK-NEXT:    [[TMP6:%.*]] = and i129 [[TMP1]], 4503599627370495
 ; CHECK-NEXT:    [[TMP7:%.*]] = or i129 [[TMP6]], 4503599627370496
 ; CHECK-NEXT:    [[TMP8:%.*]] = icmp ult i129 [[TMP5]], 1023
-; CHECK-NEXT:    br i1 [[TMP8]], label [[FP_TO_I_CLEANUP:%.*]], label [[FP_TO_I_IF_END:%.*]]
+; CHECK-NEXT:    br i1 [[TMP8]], label [[FP_TO_I_CLEANUP:%.*]], label [[FP_TO_I_IF_END:%.*]], !prof [[PROF2]]
 ; CHECK:       fp-to-i-if-end:
 ; CHECK-NEXT:    [[TMP9:%.*]] = add i129 [[TMP5]], -1152
 ; CHECK-NEXT:    [[TMP10:%.*]] = icmp ult i129 [[TMP9]], -129
-; CHECK-NEXT:    br i1 [[TMP10]], label [[FP_TO_I_IF_THEN5:%.*]], label [[FP_TO_I_IF_END9:%.*]]
+; CHECK-NEXT:    br i1 [[TMP10]], label [[FP_TO_I_IF_THEN5:%.*]], label [[FP_TO_I_IF_END9:%.*]], !prof [[PROF2]]
 ; CHECK:       fp-to-i-if-then5:
 ; CHECK-NEXT:    [[TMP11:%.*]] = select i1 [[TMP2]], i129 340282366920938463463374607431768211455, i129 -340282366920938463463374607431768211456
 ; CHECK-NEXT:    br label [[FP_TO_I_CLEANUP]]
 ; CHECK:       fp-to-i-if-end9:
 ; CHECK-NEXT:    [[TMP12:%.*]] = icmp ult i129 [[TMP5]], 1075
-; CHECK-NEXT:    br i1 [[TMP12]], label [[FP_TO_I_IF_THEN12:%.*]], label [[FP_TO_I_IF_ELSE:%.*]]
+; CHECK-NEXT:    br i1 [[TMP12]], label [[FP_TO_I_IF_THEN12:%.*]], label [[FP_TO_I_IF_ELSE:%.*]], !prof [[PROF1]]
 ; CHECK:       fp-to-i-if-then12:
 ; CHECK-NEXT:    [[TMP13:%.*]] = sub i129 1075, [[TMP5]]
 ; CHECK-NEXT:    [[TMP14:%.*]] = lshr i129 [[TMP7]], [[TMP13]]
@@ -101,23 +101,23 @@ define i129 @x86_fp80tosi129(x86_fp80 %a) {
 ; CHECK-NEXT:    [[TMP1:%.*]] = bitcast fp128 [[TMP0]] to i128
 ; CHECK-NEXT:    [[TMP2:%.*]] = zext i128 [[TMP1]] to i129
 ; CHECK-NEXT:    [[TMP3:%.*]] = icmp sgt i128 [[TMP1]], -1
-; CHECK-NEXT:    [[TMP4:%.*]] = select i1 [[TMP3]], i129 1, i129 -1
+; CHECK-NEXT:    [[TMP4:%.*]] = select i1 [[TMP3]], i129 1, i129 -1, !prof [[PROF1]]
 ; CHECK-NEXT:    [[TMP5:%.*]] = lshr i129 [[TMP2]], 112
 ; CHECK-NEXT:    [[TMP6:%.*]] = and i129 [[TMP5]], 32767
 ; CHECK-NEXT:    [[TMP7:%.*]] = and i129 [[TMP2]], 5192296858534827628530496329220095
 ; CHECK-NEXT:    [[TMP8:%.*]] = or i129 [[TMP7]], 5192296858534827628530496329220096
 ; CHECK-NEXT:    [[TMP9:%.*]] = icmp ult i129 [[TMP6]], 16383
-; CHECK-NEXT:    br i1 [[TMP9]], label [[FP_TO_I_CLEANUP:%.*]], label [[FP_TO_I_IF_END:%.*]]
+; CHECK-NEXT:    br i1 [[TMP9]], label [[FP_TO_I_CLEANUP:%.*]], label [[FP_TO_I_IF_END:%.*]], !prof [[PROF2]]
 ; CHECK:       fp-to-i-if-end:
 ; CHECK-NEXT:    [[TMP10:%.*]] = add i129 [[TMP6]], -16512
 ; CHECK-NEXT:    [[TMP11:%.*]] = icmp ult i129 [[TMP10]], -129
-; CHECK-NEXT:    br i1 [[TMP11]], label [[FP_TO_I_IF_THEN5:%.*]], label [[FP_TO_I_IF_END9:%.*]]
+; CHECK-NEXT:    br i1 [[TMP11]], label [[FP_TO_I_IF_THEN5:%.*]], label [[FP_TO_I_IF_END9:%.*]], !prof [[PROF2]]
 ; CHECK:       fp-to-i-if-then5:
 ; CHECK-NEXT:    [[TMP12:%.*]] = select i1 [[TMP3]], i129 340282366920938463463374607431768211455, i129 -340282366920938463463374607431768211456
 ; CHECK-NEXT:    br label [[FP_TO_I_CLEANUP]]
 ; CHECK:       fp-to-i-if-end9:
 ; CHECK-NEXT:    [[TMP13:%.*]] = icmp ult i129 [[TMP6]], 16495
-; CHECK-NEXT:    br i1 [[TMP13]], label [[FP_TO_I_IF_THEN12:%.*]], label [[FP_TO_I_IF_ELSE:%.*]]
+; CHECK-NEXT:    br i1 [[TMP13]], label [[FP_TO_I_IF_THEN12:%.*]], label [[FP_TO_I_IF_ELSE:%.*]], !prof [[PROF1]]
 ; CHECK:       fp-to-i-if-then12:
 ; CHECK-NEXT:    [[TMP14:%.*]] = sub i129 16495, [[TMP6]]
 ; CHECK-NEXT:    [[TMP15:%.*]] = lshr i129 [[TMP8]], [[TMP14]]
@@ -142,23 +142,23 @@ define i129 @fp128tosi129(fp128 %a) {
 ; CHECK-NEXT:    [[TMP0:%.*]] = bitcast fp128 [[A:%.*]] to i128
 ; CHECK-NEXT:    [[TMP1:%.*]] = zext i128 [[TMP0]] to i129
 ; CHECK-NEXT:    [[TMP2:%.*]] = icmp sgt i128 [[TMP0]], -1
-; CHECK-NEXT:    [[TMP3:%.*]] = select i1 [[TMP2]], i129 1, i129 -1
+; CHECK-NEXT:    [[TMP3:%.*]] = select i1 [[TMP2]], i129 1, i129 -1, !prof [[PROF1]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = lshr i129 [[TMP1]], 112
 ; CHECK-NEXT:    [[TMP5:%.*]] = and i129 [[TMP4]], 32767
 ; CHECK-NEXT:    [[TMP6:%.*]] = and i129 [[TMP1]], 5192296858534827628530496329220095
 ; CHECK-NEXT:    [[TMP7:%.*]] = or i129 [[TMP6]], 5192296858534827628530496329220096
 ; CHECK-NEXT:    [[TMP8:%.*]] = icmp ult i129 [[TMP5]], 16383
-; CHECK-NEXT:    br i1 [[TMP8]], label [[FP_TO_I_CLEANUP:%.*]], label [[FP_TO_I_IF_END:%.*]]
+; CHECK-NEXT:    br i1 [[TMP8]], label [[FP_TO_I_CLEANUP:%.*]], label [[FP_TO_I_IF_END:%.*]], !prof [[PROF2]]
 ; CHECK:       fp-to-i-if-end:
 ; CHECK-NEXT:    [[TMP9:%.*]] = add i129 [[TMP5]], -16512
 ; CHECK-NEXT:    [[TMP10:%.*]] = icmp ult i129 [[TMP9]], -129
-; CHECK-NEXT:    br i1 [[TMP10]], label [[FP_TO_I_IF_THEN5:%.*]], label [[FP_TO_I_IF_END9:%.*]]
+; CHECK-NEXT:    br i1 [[TMP10]], label [[FP_TO_I_IF_THEN5:%.*]], label [[FP_TO_I_IF_END9:%.*]], !prof [[PROF2]]
 ; CHECK:       fp-to-i-if-then5:
 ; CHECK-NEXT:    [[TMP11:%.*]] = select i1 [[TMP2]], i129 340282366920938463463374607431768211455, i129 -340282366920938463463374607431768211456
 ; CHECK-NEXT:    br label [[FP_TO_I_CLEANUP]]
 ; CHECK:       fp-to-i-if-end9:
 ; CHECK-NEXT:    [[TMP12:%.*]] = icmp ult i129 [[TMP5]], 16495
-; CHECK-NEXT:    br i1 [[TMP12]], label [[FP_TO_I_IF_THEN12:%.*]], label [[FP_TO_I_IF_ELSE:%.*]]
+; CHECK-NEXT:    br i1 [[TMP12]], label [[FP_TO_I_IF_THEN12:%.*]], label [[FP_TO_I_IF_ELSE:%.*]], !prof [[PROF1]]
 ; CHECK:       fp-to-i-if-then12:
 ; CHECK-NEXT:    [[TMP13:%.*]] = sub i129 16495, [[TMP5]]
 ; CHECK-NEXT:    [[TMP14:%.*]] = lshr i129 [[TMP7]], [[TMP13]]
@@ -184,23 +184,23 @@ define <2 x i129> @floattosi129v2(<2 x float> %a) {
 ; CHECK-NEXT:    [[TMP1:%.*]] = bitcast float [[TMP0]] to i32
 ; CHECK-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i129
 ; CHECK-NEXT:    [[TMP3:%.*]] = icmp sgt i32 [[TMP1]], -1
-; CHECK-NEXT:    [[TMP4:%.*]] = select i1 [[TMP3]], i129 1, i129 -1
+; CHECK-NEXT:    [[TMP4:%.*]] = select i1 [[TMP3]], i129 1, i129 -1, !prof [[PROF1]]
 ; CHECK-NEXT:    [[TMP5:%.*]] = lshr i129 [[TMP2]], 23
 ; CHECK-NEXT:    [[TMP6:%.*]] = and i129 [[TMP5]], 255
 ; CHECK-NEXT:    [[TMP7:%.*]] = and i129 [[TMP2]], 8388607
 ; CHECK-NEXT:    [[TMP8:%.*]] = or i129 [[TMP7]], 8388608
 ; CHECK-NEXT:    [[TMP9:%.*]] = icmp ult i129 [[TMP6]], 127
-; CHECK-NEXT:    br i1 [[TMP9]], label [[FP_TO_I_CLEANUP1:%.*]], label [[FP_TO_I_IF_END2:%.*]]
+; CHECK-NEXT:    br i1 [[TMP9]], label [[FP_TO_I_CLEANUP1:%.*]], label [[FP_TO_I_IF_END2:%.*]], !prof [[PROF2]]
 ; CHECK:       fp-to-i-if-end2:
 ; CHECK-NEXT:    [[TMP10:%.*]] = add i129 [[TMP6]], -256
 ; CHECK-NEXT:    [[TMP11:%.*]] = icmp ult i129 [[TMP10]], -129
-; CHECK-NEXT:    br i1 [[TMP11]], label [[FP_TO_I_IF_THEN53:%.*]], label [[FP_TO_I_IF_END94:%.*]]
+; CHECK-NEXT:    br i1 [[TMP11]], label [[FP_TO_I_IF_THEN53:%.*]], label [[FP_TO_I_IF_END94:%.*]], !prof [[PROF2]]
 ; CHECK:       fp-to-i-if-then53:
 ; CHECK-NEXT:    [[TMP12:%.*]] = select i1 [[TMP3]], i129 340282366920938463463374607431768211455, i129 -340282366920938463463374607431768211456
 ; CHECK-NEXT:    br label [[FP_TO_I_CLEANUP1]]
 ; CHECK:       fp-to-i-if-end94:
 ; CHECK-NEXT:    [[TMP13:%.*]] = icmp ult i129 [[TMP6]], 150
-; CHECK-NEXT:    br i1 [[TMP13]], label [[FP_TO_I_IF_THEN125:%.*]], label [[FP_TO_I_IF_ELSE6:%.*]]
+; CHECK-NEXT:    br i1 [[TMP13]], label [[FP_TO_I_IF_THEN125:%.*]], label [[FP_TO_I_IF_ELSE6:%.*]], !prof [[PROF1]]
 ; CHECK:       fp-to-i-if-then125:
 ; CHECK-NEXT:    [[TMP14:%.*]] = sub i129 150, [[TMP6]]
 ; CHECK-NEXT:    [[TMP15:%.*]] = lshr i129 [[TMP8]], [[TMP14]]
@@ -218,23 +218,23 @@ define <2 x i129> @floattosi129v2(<2 x float> %a) {
 ; CHECK-NEXT:    [[TMP23:%.*]] = bitcast float [[TMP22]] to i32
 ; CHECK-NEXT:    [[TMP24:%.*]] = zext i32 [[TMP23]] to i129
 ; CHECK-NEXT:    [[TMP25:%.*]] = icmp sgt i32 [[TMP23]], -1
-; CHECK-NEXT:    [[TMP26:%.*]] = select i1 [[TMP25]], i129 1, i129 -1
+; CHECK-NEXT:    [[TMP26:%.*]] = select i1 [[TMP25]], i129 1, i129 -1, !prof [[PROF1]]
 ; CHECK-NEXT:    [[TMP27:%.*]] = lshr i129 [[TMP24]], 23
 ; CHECK-NEXT:    [[TMP28:%.*]] = and i129 [[TMP27]], 255
 ; CHECK-NEXT:    [[TMP29:%.*]] = and i129 [[TMP24]], 8388607
 ; CHECK-NEXT:    [[TMP30:%.*]] = or i129 [[TMP29]], 8388608
 ; CHECK-NEXT:    [[TMP31:%.*]] = icmp ult i129 [[TMP28]], 127
-; CHECK-NEXT:    br i1 [[TMP31]], label [[FP_TO_I_CLEANUP:%.*]], label [[FP_TO_I_IF_END:%.*]]
+; CHECK-NEXT:    br i1 [[TMP31]], label [[FP_TO_I_CLEANUP:%.*]], label [[FP_TO_I_IF_END:%.*]], !prof [[PROF2]]
 ; CHECK:       fp-to-i-if-end:
 ; CHECK-NEXT:    [[TMP32:%.*]] = add i129 [[TMP28]], -256
 ; CHECK-NEXT:    [[TMP33:%.*]] = icmp ult i129 [[TMP32]], -129
-; CHECK-NEXT:    br i1 [[TMP33]], label [[FP_TO_I_IF_THEN5:%.*]], label [[FP_TO_I_IF_END9:%.*]]
+; CHECK-NEXT:    br i1 [[TMP33]], label [[FP_TO_I_IF_THEN5:%.*]], label [[FP_TO_I_IF_END9:%.*]], !prof [[PROF2]]
 ; CHECK:       fp-to-i-if-then5:
 ; CHECK-NEXT:    [[TMP34:%.*]] = select i1 [[TMP25]], i129 340282366920938463463374607431768211455, i129 -340282366920938463463374607431768211456
 ; CHECK-NEXT:    br label [[FP_TO_I_CLEANUP]]
 ; CHECK:       fp-to-i-if-end9:
 ; CHECK-NEXT:    [[TMP35:%.*]] = icmp ult i129 [[TMP28]], 150
-; CHECK-NEXT:    br i1 [[TMP35]], label [[FP_TO_I_IF_THEN12:%.*]], label [[FP_TO_I_IF_ELSE:%.*]]
+; CHECK-NEXT:    br i1 [[TMP35]], label [[FP_TO_I_IF_THEN12:%.*]], label [[FP_TO_I_IF_ELSE:%.*]], !prof [[PROF1]]
 ; CHECK:       fp-to-i-if-then12:
 ; CHECK-NEXT:    [[TMP36:%.*]] = sub i129 150, [[TMP28]]
 ; CHECK-NEXT:    [[TMP37:%.*]] = lshr i129 [[TMP30]], [[TMP36]]
@@ -253,3 +253,11 @@ define <2 x i129> @floattosi129v2(<2 x float> %a) {
   %conv = fptosi <2 x float> %a to <2 x i129>
   ret <2 x i129> %conv
 }
+
+!0 = !{!"function_entry_count", i64 1000}
+;.
+; CHECK: [[META0:![0-9]+]] = !{!"function_entry_count", i64 1000}
+; CHECK: [[PROF1]] = !{!"branch_weights", i32 1048575, i32 1}
+; CHECK: [[PROF2]] = !{!"branch_weights", i32 1, i32 1048575}
+; CHECK: [[PROF3]] = !{!"unknown", !"expand-ir-insts"}
+;.
diff --git a/llvm/test/Transforms/ExpandIRInsts/X86/expand-large-fp-convert-fptoui129.ll b/llvm/test/Transforms/ExpandIRInsts/X86/expand-large-fp-convert-fptoui129.ll
index 07de91d404988..179ae9561512d 100644
--- a/llvm/test/Transforms/ExpandIRInsts/X86/expand-large-fp-convert-fptoui129.ll
+++ b/llvm/test/Transforms/ExpandIRInsts/X86/expand-large-fp-convert-fptoui129.ll
@@ -1,8 +1,8 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals
 ; RUN: opt -S -mtriple=x86_64-- --expand-ir-insts < %s | FileCheck %s
 ; RUN: opt -S -mtriple=x86_64-- -passes='require<libcall-lowering-info>,expand-ir-insts' < %s | FileCheck %s
 
-define i129 @halftoui129(half %a) {
+define i129 @halftoui129(half %a) !prof !0 {
 ; CHECK-LABEL: @halftoui129(
 ; CHECK-NEXT:    [[TMP1:%.*]] = fptoui half [[A:%.*]] to i32
 ; CHECK-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i129
@@ -12,29 +12,29 @@ define i129 @halftoui129(half %a) {
   ret i129 %conv
 }
 
-define i129 @floattoui129(float %a) {
+define i129 @floattoui129(float %a)  !prof !0 {
 ; CHECK-LABEL: @floattoui129(
 ; CHECK-NEXT:  fp-to-i-entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = bitcast float [[A:%.*]] to i32
 ; CHECK-NEXT:    [[TMP1:%.*]] = zext i32 [[TMP0]] to i129
 ; CHECK-NEXT:    [[TMP2:%.*]] = icmp sgt i32 [[TMP0]], -1
-; CHECK-NEXT:    [[TMP3:%.*]] = select i1 [[TMP2]], i129 1, i129 -1
+; CHECK-NEXT:    [[TMP3:%.*]] = select i1 [[TMP2]], i129 1, i129 -1, !prof [[PROF1:![0-9]+]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = lshr i129 [[TMP1]], 23
 ; CHECK-NEXT:    [[TMP5:%.*]] = and i129 [[TMP4]], 255
 ; CHECK-NEXT:    [[TMP6:%.*]] = and i129 [[TMP1]], 8388607
 ; CHECK-NEXT:    [[TMP7:%.*]] = or i129 [[TMP6]], 8388608
 ; CHECK-NEXT:    [[TMP8:%.*]] = icmp ult i129 [[TMP5]], 127
-; CHECK-NEXT:    br i1 [[TMP8]], label [[FP_TO_I_CLEANUP:%.*]], label [[FP_TO_I_IF_END:%.*]]
+; CHECK-NEXT:    br i1 [[TMP8]], label [[FP_TO_I_CLEANUP:%.*]], label [[FP_TO_I_IF_END:%.*]], !prof [[PROF2:![0-9]+]]
 ; CHECK:       fp-to-i-if-end:
 ; CHECK-NEXT:    [[TMP9:%.*]] = add i129 [[TMP5]], -256
 ; CHECK-NEXT:    [[TMP10:%.*]] = icmp ult i129 [[TMP9]], -129
-; CHECK-NEXT:    br i1 [[TMP10]], label [[FP_TO_I_IF_THEN5:%.*]], label [[FP_TO_I_IF_END9:%.*]]
+; CHECK-NEXT:    br i1 [[TMP10]], label [[FP_TO_I_IF_THEN5:%.*]], label [[FP_TO_I_IF_END9:%.*]], !prof [[PROF2]]
 ; CHECK:       fp-to-i-if-then5:
-; CHECK-NEXT:    [[TMP11:%.*]] = select i1 [[TMP2]], i129 340282366920938463463374607431768211455, i129 -340282366920938463463374607431768211456
+; CHECK-NEXT:    [[TMP11:%.*]] = select i1 [[TMP2]], i129 340282366920938463463374607431768211455, i129 -340282366920938463463374607431768211456, !prof [[PROF3:![0-9]+]]
 ; CHECK-NEXT:    br label [[FP_TO_I_CLEANUP]]
 ; CHECK:       fp-to-i-if-end9:
 ; CHECK-NEXT:    [[TMP12:%.*]] = icmp ult i129 [[TMP5]], 150
-; CHECK-NEXT:    br i1 [[TMP12]], label [[FP_TO_I_IF_THEN12:%.*]], label [[FP_TO_I_IF_ELSE:%.*]]
+; CHECK-NEXT:    br i1 [[TMP12]], label [[FP_TO_I_IF_THEN12:%.*]], label [[FP_TO_I_IF_ELSE:%.*]], !prof [[PROF1]]
 ; CHECK:       fp-to-i-if-then12:
 ; CHECK-NEXT:    [[TMP13:%.*]] = sub i129 150, [[TMP5]]
 ; CHECK-NEXT:    [[TMP14:%.*]] = lshr i129 [[TMP7]], [[TMP13]]
@@ -59,23 +59,23 @@ define i129 @doubletoui129(double %a) {
 ; CHECK-NEXT:    [[TMP0:%.*]] = bitcast double [[A:%.*]] to i64
 ; CHECK-NEXT:    [[TMP1:%.*]] = zext i64 [[TMP0]] to i129
 ; CHECK-NEXT:    [[TMP2:%.*]] = icmp sgt i64 [[TMP0]], -1
-; CHECK-NEXT:    [[TMP3:%.*]] = select i1 [[TMP2]], i129 1, i129 -1
+; CHECK-NEXT:    [[TMP3:%.*]] = select i1 [[TMP2]], i129 1, i129 -1, !prof [[PROF1]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = lshr i129 [[TMP1]], 52
 ; CHECK-NEXT:    [[TMP5:%.*]] = and i129 [[TMP4]], 2047
 ; CHECK-NEXT:    [[TMP6:%.*]] = and i129 [[TMP1]], 4503599627370495
 ; CHECK-NEXT:    [[TMP7:%.*]] = or i129 [[TMP6]], 4503599627370496
 ; CHECK-NEXT:    [[TMP8:%.*]] = icmp ult i129 [[TMP5]], 1023
-; CHECK-NEXT:    br i1 [[TMP8]], label [[FP_TO_I_CLEANUP:%.*]], label [[FP_TO_I_IF_END:%.*]]
+; CHECK-NEXT:    br i1 [[TMP8]], label [[FP_TO_I_CLEANUP:%.*]], label [[FP_TO_I_IF_END:%.*]], !prof [[PROF2]]
 ; CHECK:       fp-to-i-if-end:
 ; CHECK-NEXT:    [[TMP9:%.*]] = add i129 [[TMP5]], -1152
 ; CHECK-NEXT:    [[TMP10:%.*]] = icmp ult i129 [[TMP9]], -129
-; CHECK-NEXT:    br i1 [[TMP10]], label [[FP_TO_I_IF_THEN5:%.*]], label [[FP_TO_I_IF_END9:%.*]]
+; CHECK-NEXT:    br i1 [[TMP10]], label [[FP_TO_I_IF_THEN5:%.*]], label [[FP_TO_I_IF_END9:%.*]], !prof [[PROF2]]
 ; CHECK:       fp-to-i-if-then5:
 ; CHECK-NEXT:    [[TMP11:%.*]] = select i1 [[TMP2]], i129 340282366920938463463374607431768211455, i129 -340282366920938463463374607431768211456
 ; CHECK-NEXT:    br label [[FP_TO_I_CLEANUP]]
 ; CHECK:       fp-to-i-if-end9:
 ; CHECK-NEXT:    [[TMP12:%.*]] = icmp ult i129 [[TMP5]], 1075
-; CHECK-NEXT:    br i1 [[TMP12]], label [[FP_TO_I_IF_THEN12:%.*]], label [[FP_TO_I_IF_ELSE:%.*]]
+; CHECK-NEXT:    br i1 [[TMP12]], label [[FP_TO_I_IF_THEN12:%.*]], label [[FP_TO_I_IF_ELSE:%.*]], !prof [[PROF1]]
 ; CHECK:       fp-to-i-if-then12:
 ; CHECK-NEXT:    [[TMP13:%.*]] = sub i129 1075, [[TMP5]]
 ; CHECK-NEXT:    [[TMP14:%.*]] = lshr i129 [[TMP7]], [[TMP13]]
@@ -101,23 +101,23 @@ define i129 @x86_fp80toui129(x86_fp80 %a) {
 ; CHECK-NEXT:    [[TMP1:%.*]] = bitcast fp128 [[TMP0]] to i128
 ; CHECK-NEXT:    [[TMP2:%.*]] = zext i128 [[TMP1]] to i129
 ; CHECK-NEXT:    [[TMP3:%.*]] = icmp sgt i128 [[TMP1]], -1
-; CHECK-NEXT:    [[TMP4:%.*]] = select i1 [[TMP3]], i129 1, i129 -1
+; CHECK-NEXT:    [[TMP4:%.*]] = select i1 [[TMP3]], i129 1, i129 -1, !prof [[PROF1]]
 ; CHECK-NEXT:    [[TMP5:%.*]] = lshr i129 [[TMP2]], 112
 ; CHECK-NEXT:    [[TMP6:%.*]] = and i129 [[TMP5]], 32767
 ; CHECK-NEXT:    [[TMP7:%.*]] = and i129 [[TMP2]], 5192296858534827628530496329220095
 ; CHECK-NEXT:    [[TMP8:%.*]] = or i129 [[TMP7]], 5192296858534827628530496329220096
 ; CHECK-NEXT:    [[TMP9:%.*]] = icmp ult i129 [[TMP6]], 16383
-; CHECK-NEXT:    br i1 [[TMP9]], label [[FP_TO_I_CLEANUP:%.*]], label [[FP_TO_I_IF_END:%.*]]
+; CHECK-NEXT:    br i1 [[TMP9]], label [[FP_TO_I_CLEANUP:%.*]], label [[FP_TO_I_IF_END:%.*]], !prof [[PROF2]]
 ; CHECK:       fp-to-i-if-end:
 ; CHECK-NEXT:    [[TMP10:%.*]] = add i129 [[TMP6]], -16512
 ; CHECK-NEXT:    [[TMP11:%.*]] = icmp ult i129 [[TMP10]], -129
-; CHECK-NEXT:    br i1 [[TMP11]], label [[FP_TO_I_IF_THEN5:%.*]], label [[FP_TO_I_IF_END9:%.*]]
+; CHECK-NEXT:    br i1 [[TMP11]], label [[FP_TO_I_IF_THEN5:%.*]], label [[FP_TO_I_IF_END9:%.*]], !prof [[PROF2]]
 ; CHECK:       fp-to-i-if-then5:
 ; CHECK-NEXT:    [[TMP12:%.*]] = select i1 [[TMP3]], i129 340282366920938463463374607431768211455, i129 -340282366920938463463374607431768211456
 ; CHECK-NEXT:    br label [[FP_TO_I_CLEANUP]]
 ; CHECK:       fp-to-i-if-end9:
 ; CHECK-NEXT:    [[TMP13:%.*]] = icmp ult i129 [[TMP6]], 16495
-; CHECK-NEXT:    br i1 [[TMP13]], label [[FP_TO_I_IF_THEN12:%.*]], label [[FP_TO_I_IF_ELSE:%.*]]
+; CHECK-NEXT:    br i1 [[TMP13]], label [[FP_TO_I_IF_THEN12:%.*]], label [[FP_TO_I_IF_ELSE:%.*]], !prof [[PROF1]]
 ; CHECK:       fp-to-i-if-then12:
 ; CHECK-NEXT:    [[TMP14:%.*]] = sub i129 16495, [[TMP6]]
 ; CHECK-NEXT:    [[TMP15:%.*]] = lshr i129 [[TMP8]], [[TMP14]]
@@ -142,23 +142,23 @@ define i129 @fp128toui129(fp128 %a) {
 ; CHECK-NEXT:    [[TMP0:%.*]] = bitcast fp128 [[A:%.*]] to i128
 ; CHECK-NEXT:    [[TMP1:%.*]] = zext i128 [[TMP0]] to i129
 ; CHECK-NEXT:    [[TMP2:%.*]] = icmp sgt i128 [[TMP0]], -1
-; CHECK-NEXT:    [[TMP3:%.*]] = select i1 [[TMP2]], i129 1, i129 -1
+; CHECK-NEXT:    [[TMP3:%.*]] = select i1 [[TMP2]], i129 1, i129 -1, !prof [[PROF1]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = lshr i129 [[TMP1]], 112
 ; CHECK-NEXT:    [[TMP5:%.*]] = and i129 [[TMP4]], 32767
 ; CHECK-NEXT:    [[TMP6:%.*]] = and i129 [[TMP1]], 5192296858534827628530496329220095
 ; CHECK-NEXT:    [[TMP7:%.*]] = or i129 [[TMP6]], 5192296858534827628530496329220096
 ; CHECK-NEXT:    [[TMP8:%.*]] = icmp ult i129 [[TMP5]], 16383
-; CHECK-NEXT:    br i1 [[TMP8]], label [[FP_TO_I_CLEANUP:%.*]], label [[FP_TO_I_IF_END:%.*]]
+; CHECK-NEXT:    br i1 [[TMP8]], label [[FP_TO_I_CLEANUP:%.*]], label [[FP_TO_I_IF_END:%.*]], !prof [[PROF2]]
 ; CHECK:       fp-to-i-if-end:
 ; CHECK-NEXT:    [[TMP9:%.*]] = add i129 [[TMP5]], -16512
 ; CHECK-NEXT:    [[TMP10:%.*]] = icmp ult i129 [[TMP9]], -129
-; CHECK-NEXT:    br i1 [[TMP10]], label [[FP_TO_I_IF_THEN5:%.*]], label [[FP_TO_I_IF_END9:%.*]]
+; CHECK-NEXT:    br i1 [[TMP10]], label [[FP_TO_I_IF_THEN5:%.*]], label [[FP_TO_I_IF_END9:%.*]], !prof [[PROF2]]
 ; CHECK:       fp-to-i-if-then5:
 ; CHECK-NEXT:    [[TMP11:%.*]] = select i1 [[TMP2]], i129 340282366920938463463374607431768211455, i129 -340282366920938463463374607431768211456
 ; CHECK-NEXT:    br label [[FP_TO_I_CLEANUP]]
 ; CHECK:       fp-to-i-if-end9:
 ; CHECK-NEXT:    [[TMP12:%.*]] = icmp ult i129 [[TMP5]], 16495
-; CHECK-NEXT:    br i1 [[TMP12]], label [[FP_TO_I_IF_THEN12:%.*]], label [[FP_TO_I_IF_ELSE:%.*]]
+; CHECK-NEXT:    br i1 [[TMP12]], label [[FP_TO_I_IF_THEN12:%.*]], label [[FP_TO_I_IF_ELSE:%.*]], !prof [[PROF1]]
 ; CHECK:       fp-to-i-if-then12:
 ; CHECK-NEXT:    [[TMP13:%.*]] = sub i129 16495, [[TMP5]]
 ; CHECK-NEXT:    [[TMP14:%.*]] = lshr i129 [[TMP7]], [[TMP13]]
@@ -184,23 +184,23 @@ define <2 x i129> @floattoui129v2(<2 x float> %a) {
 ; CHECK-NEXT:    [[TMP1:%.*]] = bitcast float [[TMP0]] to i32
 ; CHECK-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i129
 ; CHECK-NEXT:    [[TMP3:%.*]] = icmp sgt i32 [[TMP1]], -1
-; CHECK-NEXT:    [[TMP4:%.*]] = select i1 [[TMP3]], i129 1, i129 -1
+; CHECK-NEXT:    [[TMP4:%.*]] = select i1 [[TMP3]], i129 1, i129 -1, !prof [[PROF1]]
 ; CHECK-NEXT:    [[TMP5:%.*]] = lshr i129 [[TMP2]], 23
 ; CHECK-NEXT:    [[TMP6:%.*]] = and i129 [[TMP5]], 255
 ; CHECK-NEXT:    [[TMP7:%.*]] = and i129 [[TMP2]], 8388607
 ; CHECK-NEXT:    [[TMP8:%.*]] = or i129 [[TMP7]], 8388608
 ; CHECK-NEXT:    [[TMP9:%.*]] = icmp ult i129 [[TMP6]], 127
-; CHECK-NEXT:    br i1 [[TMP9]], label [[FP_TO_I_CLEANUP1:%.*]], label [[FP_TO_I_IF_END2:%.*]]
+; CHECK-NEXT:    br i1 [[TMP9]], label [[FP_TO_I_CLEANUP1:%.*]], label [[FP_TO_I_IF_END2:%.*]], !prof [[PROF2]]
 ; CHECK:       fp-to-i-if-end2:
 ; CHECK-NEXT:    [[TMP10:%.*]] = add i129 [[TMP6]], -256
 ; CHECK-NEXT:    [[TMP11:%.*]] = icmp ult i129 [[TMP10]], -129
-; CHECK-NEXT:    br i1 [[TMP11]], label [[FP_TO_I_IF_THEN53:%.*]], label [[FP_TO_I_IF_END94:%.*]]
+; CHECK-NEXT:    br i1 [[TMP11]], label [[FP_TO_I_IF_THEN53:%.*]], label [[FP_TO_I_IF_END94:%.*]], !prof [[PROF2]]
 ; CHECK:       fp-to-i-if-then53:
 ; CHECK-NEXT:    [[TMP12:%.*]] = select i1 [[TMP3]], i129 340282366920938463463374607431768211455, i129 -340282366920938463463374607431768211456
 ; CHECK-NEXT:    br label [[FP_TO_I_CLEANUP1]]
 ; CHECK:       fp-to-i-if-end94:
 ; CHECK-NEXT:    [[TMP13:%.*]] = icmp ult i129 [[TMP6]], 150
-; CHECK-NEXT:    br i1 [[TMP13]], label [[FP_TO_I_IF_THEN125:%.*]], label [[FP_TO_I_IF_ELSE6:%.*]]
+; CHECK-NEXT:    br i1 [[TMP13]], label [[FP_TO_I_IF_THEN125:%.*]], label [[FP_TO_I_IF_ELSE6:%.*]], !prof [[PROF1]]
 ; CHECK:       fp-to-i-if-then125:
 ; CHECK-NEXT:    [[TMP14:%.*]] = sub i129 150, [[TMP6]]
 ; CHECK-NEXT:    [[TMP15:%.*]] = lshr i129 [[TMP8]], [[TMP14]]
@@ -218,23 +218,23 @@ define <2 x i129> @floattoui129v2(<2 x float> %a) {
 ; CHECK-NEXT:    [[TMP23:%.*]] = bitcast float [[TMP22]] to i32
 ; CHECK-NEXT:    [[TMP24:%.*]] = zext i32 [[TMP23]] to i129
 ; CHECK-NEXT:    [[TMP25:%.*]] = icmp sgt i32 [[TMP23]], -1
-; CHECK-NEXT:    [[TMP26:%.*]] = select i1 [[TMP25]], i129 1, i129 -1
+; CHECK-NEXT:    [[TMP26:%.*]] = select i1 [[TMP25]], i129 1, i129 -1, !prof [[PROF1]]
 ; CHECK-NEXT:    [[TMP27:%.*]] = lshr i129 [[TMP24]], 23
 ; CHECK-NEXT:    [[TMP28:%.*]] = and i129 [[TMP27]], 255
 ; CHECK-NEXT:    [[TMP29:%.*]] = and i129 [[TMP24]], 8388607
 ; CHECK-NEXT:    [[TMP30:%.*]] = or i129 [[TMP29]], 8388608
 ; CHECK-NEXT:    [[TMP31:%.*]] = icmp ult i129 [[TMP28]], 127
-; CHECK-NEXT:    br i1 [[TMP31]], label [[FP_TO_I_CLEANUP:%.*]], label [[FP_TO_I_IF_END:%.*]]
+; CHECK-NEXT:    br i1 [[TMP31]], label [[FP_TO_I_CLEANUP:%.*]], label [[FP_TO_I_IF_END:%.*]], !prof [[PROF2]]
 ; CHECK:       fp-to-i-if-end:
 ; CHECK-NEXT:    [[TMP32:%.*]] = add i129 [[TMP28]], -256
 ; CHECK-NEXT:    [[TMP33:%.*]] = icmp ult i129 [[TMP32]], -129
-; CHECK-NEXT:    br i1 [[TMP33]], label [[FP_TO_I_IF_THEN5:%.*]], label [[FP_TO_I_IF_END9:%.*]]
+; CHECK-NEXT:    br i1 [[TMP33]], label [[FP_TO_I_IF_THEN5:%.*]], label [[FP_TO_I_IF_END9:%.*]], !prof [[PROF2]]
 ; CHECK:       fp-to-i-if-then5:
 ; CHECK-NEXT:    [[TMP34:%.*]] = select i1 [[TMP25]], i129 340282366920938463463374607431768211455, i129 -340282366920938463463374607431768211456
 ; CHECK-NEXT:    br label [[FP_TO_I_CLEANUP]]
 ; CHECK:       fp-to-i-if-end9:
 ; CHECK-NEXT:    [[TMP35:%.*]] = icmp ult i129 [[TMP28]], 150
-; CHECK-NEXT:    br i1 [[TMP35]], label [[FP_TO_I_IF_THEN12:%.*]], label [[FP_TO_I_IF_ELSE:%.*]]
+; CHECK-NEXT:    br i1 [[TMP35]], label [[FP_TO_I_IF_THEN12:%.*]], label [[FP_TO_I_IF_ELSE:%.*]], !prof [[PROF1]]
 ; CHECK:       fp-to-i-if-then12:
 ; CHECK-NEXT:    [[TMP36:%.*]] = sub i129 150, [[TMP28]]
 ; CHECK-NEXT:    [[TMP37:%.*]] = lshr i129 [[TMP30]], [[TMP36]]
@@ -253,3 +253,11 @@ define <2 x i129> @floattoui129v2(<2 x float> %a) {
   %conv = fptoui <2 x float> %a to <2 x i129>
   ret <2 x i129> %conv
 }
+
+!0 = !{!"function_entry_count", i64 1000}
+;.
+; CHECK: [[META0:![0-9]+]] = !{!"function_entry_count", i64 1000}
+; CHECK: [[PROF1]] = !{!"branch_weights", i32 1048575, i32 1}
+; CHECK: [[PROF2]] = !{!"branch_weights", i32 1, i32 1048575}
+; CHECK: [[PROF3]] = !{!"unknown", !"expand-ir-insts"}
+;.
diff --git a/llvm/test/Transforms/ExpandIRInsts/X86/expand-large-fp-convert-si129tofp.ll b/llvm/test/Transforms/ExpandIRInsts/X86/expand-large-fp-convert-si129tofp.ll
index fab6e431872e7..5c7f56caedb4f 100644
--- a/llvm/test/Transforms/ExpandIRInsts/X86/expand-large-fp-convert-si129tofp.ll
+++ b/llvm/test/Transforms/ExpandIRInsts/X86/expand-large-fp-convert-si129tofp.ll
@@ -1,12 +1,12 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals
 ; RUN: opt -S -mtriple=x86_64-- --expand-ir-insts < %s | FileCheck %s
 ; RUN: opt -S -mtriple=x86_64-- -passes='require<libcall-lowering-info>,expand-ir-insts' < %s | FileCheck %s
 
-define half @si129tohalf(i129 %a) {
+define half @si129tohalf(i129 %a) !prof !0 {
 ; CHECK-LABEL: @si129tohalf(
 ; CHECK-NEXT:  itofp-entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = icmp eq i129 [[A:%.*]], 0
-; CHECK-NEXT:    br i1 [[TMP0]], label [[ITOFP_RETURN:%.*]], label [[ITOFP_IF_END:%.*]]
+; CHECK-NEXT:    br i1 [[TMP0]], label [[ITOFP_RETURN:%.*]], label [[ITOFP_IF_END:%.*]], !prof [[PROF1:![0-9]+]]
 ; CHECK:       itofp-if-end:
 ; CHECK-NEXT:    [[TMP1:%.*]] = ashr i129 [[A]], 128
 ; CHECK-NEXT:    [[TMP2:%.*]] = xor i129 [[TMP1]], [[A]]
@@ -16,12 +16,12 @@ define half @si129tohalf(i129 %a) {
 ; CHECK-NEXT:    [[TMP6:%.*]] = sub i32 129, [[TMP5]]
 ; CHECK-NEXT:    [[TMP7:%.*]] = sub i32 128, [[TMP5]]
 ; CHECK-NEXT:    [[TMP8:%.*]] = icmp sgt i32 [[TMP6]], 24
-; CHECK-NEXT:    br i1 [[TMP8]], label [[ITOFP_IF_THEN4:%.*]], label [[ITOFP_IF_ELSE:%.*]]
+; CHECK-NEXT:    br i1 [[TMP8]], label [[ITOFP_IF_THEN4:%.*]], label [[ITOFP_IF_ELSE:%.*]], !prof [[PROF1]]
 ; CHECK:       itofp-if-then4:
 ; CHECK-NEXT:    switch i32 [[TMP6]], label [[ITOFP_SW_DEFAULT:%.*]] [
 ; CHECK-NEXT:      i32 25, label [[ITOFP_SW_BB:%.*]]
 ; CHECK-NEXT:      i32 26, label [[ITOFP_SW_EPILOG:%.*]]
-; CHECK-NEXT:    ]
+; CHECK-NEXT:    ], !prof [[PROF2:![0-9]+]]
 ; CHECK:       itofp-sw-bb:
 ; CHECK-NEXT:    [[TMP9:%.*]] = shl i129 [[TMP3]], 1
 ; CHECK-NEXT:    br label [[ITOFP_SW_EPILOG]]
@@ -51,7 +51,7 @@ define half @si129tohalf(i129 %a) {
 ; CHECK-NEXT:    [[TMP29:%.*]] = trunc i129 [[TMP27]] to i32
 ; CHECK-NEXT:    [[TMP30:%.*]] = lshr i129 [[TMP27]], 32
 ; CHECK-NEXT:    [[TMP31:%.*]] = trunc i129 [[TMP30]] to i32
-; CHECK-NEXT:    br i1 [[TMP28]], label [[ITOFP_IF_END26:%.*]], label [[ITOFP_IF_THEN20:%.*]]
+; CHECK-NEXT:    br i1 [[TMP28]], label [[ITOFP_IF_END26:%.*]], label [[ITOFP_IF_THEN20:%.*]], !prof [[PROF1]]
 ; CHECK:       itofp-if-then20:
 ; CHECK-NEXT:    [[TMP32:%.*]] = ashr i129 [[TMP26]], 3
 ; CHECK-NEXT:    [[TMP33:%.*]] = trunc i129 [[TMP32]] to i32
@@ -91,7 +91,7 @@ define float @si129tofloat(i129 %a) {
 ; CHECK-LABEL: @si129tofloat(
 ; CHECK-NEXT:  itofp-entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = icmp eq i129 [[A:%.*]], 0
-; CHECK-NEXT:    br i1 [[TMP0]], label [[ITOFP_RETURN:%.*]], label [[ITOFP_IF_END:%.*]]
+; CHECK-NEXT:    br i1 [[TMP0]], label [[ITOFP_RETURN:%.*]], label [[ITOFP_IF_END:%.*]], !prof [[PROF1]]
 ; CHECK:       itofp-if-end:
 ; CHECK-NEXT:    [[TMP1:%.*]] = ashr i129 [[A]], 128
 ; CHECK-NEXT:    [[TMP2:%.*]] = xor i129 [[TMP1]], [[A]]
@@ -101,12 +101,12 @@ define float @si129tofloat(i129 %a) {
 ; CHECK-NEXT:    [[TMP6:%.*]] = sub i32 129, [[TMP5]]
 ; CHECK-NEXT:    [[TMP7:%.*]] = sub i32 128, [[TMP5]]
 ; CHECK-NEXT:    [[TMP8:%.*]] = icmp sgt i32 [[TMP6]], 24
-; CHECK-NEXT:    br i1 [[TMP8]], label [[ITOFP_IF_THEN4:%.*]], label [[ITOFP_IF_ELSE:%.*]]
+; CHECK-NEXT:    br i1 [[TMP8]], label [[ITOFP_IF_THEN4:%.*]], label [[ITOFP_IF_ELSE:%.*]], !prof [[PROF1]]
 ; CHECK:       itofp-if-then4:
 ; CHECK-NEXT:    switch i32 [[TMP6]], label [[ITOFP_SW_DEFAULT:%.*]] [
 ; CHECK-NEXT:      i32 25, label [[ITOFP_SW_BB:%.*]]
 ; CHECK-NEXT:      i32 26, label [[ITOFP_SW_EPILOG:%.*]]
-; CHECK-NEXT:    ]
+; CHECK-NEXT:    ], !prof [[PROF2]]
 ; CHECK:       itofp-sw-bb:
 ; CHECK-NEXT:    [[TMP9:%.*]] = shl i129 [[TMP3]], 1
 ; CHECK-NEXT:    br label [[ITOFP_SW_EPILOG]]
@@ -136,7 +136,7 @@ define float @si129tofloat(i129 %a) {
 ; CHECK-NEXT:    [[TMP29:%.*]] = trunc i129 [[TMP27]] to i32
 ; CHECK-NEXT:    [[TMP30:%.*]] = lshr i129 [[TMP27]], 32
 ; CHECK-NEXT:    [[TMP31:%.*]] = trunc i129 [[TMP30]] to i32
-; CHECK-NEXT:    br i1 [[TMP28]], label [[ITOFP_IF_END26:%.*]], label [[ITOFP_IF_THEN20:%.*]]
+; CHECK-NEXT:    br i1 [[TMP28]], label [[ITOFP_IF_END26:%.*]], label [[ITOFP_IF_THEN20:%.*]], !prof [[PROF1]]
 ; CHECK:       itofp-if-then20:
 ; CHECK-NEXT:    [[TMP32:%.*]] = ashr i129 [[TMP26]], 3
 ; CHECK-NEXT:    [[TMP33:%.*]] = trunc i129 [[TMP32]] to i32
@@ -175,7 +175,7 @@ define double @si129todouble(i129 %a) {
 ; CHECK-LABEL: @si129todouble(
 ; CHECK-NEXT:  itofp-entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = icmp eq i129 [[A:%.*]], 0
-; CHECK-NEXT:    br i1 [[TMP0]], label [[ITOFP_RETURN:%.*]], label [[ITOFP_IF_END:%.*]]
+; CHECK-NEXT:    br i1 [[TMP0]], label [[ITOFP_RETURN:%.*]], label [[ITOFP_IF_END:%.*]], !prof [[PROF1]]
 ; CHECK:       itofp-if-end:
 ; CHECK-NEXT:    [[TMP1:%.*]] = ashr i129 [[A]], 128
 ; CHECK-NEXT:    [[TMP2:%.*]] = xor i129 [[TMP1]], [[A]]
@@ -185,12 +185,12 @@ define double @si129todouble(i129 %a) {
 ; CHECK-NEXT:    [[TMP6:%.*]] = sub i32 129, [[TMP5]]
 ; CHECK-NEXT:    [[TMP7:%.*]] = sub i32 128, [[TMP5]]
 ; CHECK-NEXT:    [[TMP8:%.*]] = icmp sgt i32 [[TMP6]], 53
-; CHECK-NEXT:    br i1 [[TMP8]], label [[ITOFP_IF_THEN4:%.*]], label [[ITOFP_IF_ELSE:%.*]]
+; CHECK-NEXT:    br i1 [[TMP8]], label [[ITOFP_IF_THEN4:%.*]], label [[ITOFP_IF_ELSE:%.*]], !prof [[PROF1]]
 ; CHECK:       itofp-if-then4:
 ; CHECK-NEXT:    switch i32 [[TMP6]], label [[ITOFP_SW_DEFAULT:%.*]] [
 ; CHECK-NEXT:      i32 54, label [[ITOFP_SW_BB:%.*]]
 ; CHECK-NEXT:      i32 55, label [[ITOFP_SW_EPILOG:%.*]]
-; CHECK-NEXT:    ]
+; CHECK-NEXT:    ], !prof [[PROF2]]
 ; CHECK:       itofp-sw-bb:
 ; CHECK-NEXT:    [[TMP9:%.*]] = shl i129 [[TMP3]], 1
 ; CHECK-NEXT:    br label [[ITOFP_SW_EPILOG]]
@@ -220,7 +220,7 @@ define double @si129todouble(i129 %a) {
 ; CHECK-NEXT:    [[TMP29:%.*]] = trunc i129 [[TMP27]] to i64
 ; CHECK-NEXT:    [[TMP30:%.*]] = lshr i129 [[TMP27]], 32
 ; CHECK-NEXT:    [[TMP31:%.*]] = trunc i129 [[TMP30]] to i32
-; CHECK-NEXT:    br i1 [[TMP28]], label [[ITOFP_IF_END26:%.*]], label [[ITOFP_IF_THEN20:%.*]]
+; CHECK-NEXT:    br i1 [[TMP28]], label [[ITOFP_IF_END26:%.*]], label [[ITOFP_IF_THEN20:%.*]], !prof [[PROF1]]
 ; CHECK:       itofp-if-then20:
 ; CHECK-NEXT:    [[TMP32:%.*]] = ashr i129 [[TMP26]], 3
 ; CHECK-NEXT:    [[TMP33:%.*]] = trunc i129 [[TMP32]] to i64
@@ -264,7 +264,7 @@ define x86_fp80 @si129tox86_fp80(i129 %a) {
 ; CHECK-LABEL: @si129tox86_fp80(
 ; CHECK-NEXT:  itofp-entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = icmp eq i129 [[A:%.*]], 0
-; CHECK-NEXT:    br i1 [[TMP0]], label [[ITOFP_RETURN:%.*]], label [[ITOFP_IF_END:%.*]]
+; CHECK-NEXT:    br i1 [[TMP0]], label [[ITOFP_RETURN:%.*]], label [[ITOFP_IF_END:%.*]], !prof [[PROF1]]
 ; CHECK:       itofp-if-end:
 ; CHECK-NEXT:    [[TMP1:%.*]] = ashr i129 [[A]], 128
 ; CHECK-NEXT:    [[TMP2:%.*]] = xor i129 [[TMP1]], [[A]]
@@ -274,12 +274,12 @@ define x86_fp80 @si129tox86_fp80(i129 %a) {
 ; CHECK-NEXT:    [[TMP6:%.*]] = sub i129 129, [[TMP4]]
 ; CHECK-NEXT:    [[TMP7:%.*]] = sub i129 128, [[TMP4]]
 ; CHECK-NEXT:    [[TMP8:%.*]] = icmp sgt i129 [[TMP6]], 113
-; CHECK-NEXT:    br i1 [[TMP8]], label [[ITOFP_IF_THEN4:%.*]], label [[ITOFP_IF_ELSE:%.*]]
+; CHECK-NEXT:    br i1 [[TMP8]], label [[ITOFP_IF_THEN4:%.*]], label [[ITOFP_IF_ELSE:%.*]], !prof [[PROF1]]
 ; CHECK:       itofp-if-then4:
 ; CHECK-NEXT:    switch i129 [[TMP6]], label [[ITOFP_SW_DEFAULT:%.*]] [
 ; CHECK-NEXT:      i129 114, label [[ITOFP_SW_BB:%.*]]
 ; CHECK-NEXT:      i129 115, label [[ITOFP_SW_EPILOG:%.*]]
-; CHECK-NEXT:    ]
+; CHECK-NEXT:    ], !prof [[PROF2]]
 ; CHECK:       itofp-sw-bb:
 ; CHECK-NEXT:    [[TMP9:%.*]] = shl i129 [[TMP3]], 1
 ; CHECK-NEXT:    br label [[ITOFP_SW_EPILOG]]
@@ -307,7 +307,7 @@ define x86_fp80 @si129tox86_fp80(i129 %a) {
 ; CHECK-NEXT:    [[TMP27:%.*]] = trunc i129 [[TMP25]] to i128
 ; CHECK-NEXT:    [[TMP28:%.*]] = lshr i129 [[TMP25]], 32
 ; CHECK-NEXT:    [[TMP29:%.*]] = trunc i129 [[TMP7]] to i64
-; CHECK-NEXT:    br i1 [[TMP26]], label [[ITOFP_IF_END26:%.*]], label [[ITOFP_IF_THEN20:%.*]]
+; CHECK-NEXT:    br i1 [[TMP26]], label [[ITOFP_IF_END26:%.*]], label [[ITOFP_IF_THEN20:%.*]], !prof [[PROF1]]
 ; CHECK:       itofp-if-then20:
 ; CHECK-NEXT:    [[TMP30:%.*]] = ashr i129 [[TMP24]], 3
 ; CHECK-NEXT:    [[TMP31:%.*]] = trunc i129 [[TMP30]] to i128
@@ -348,7 +348,7 @@ define fp128 @si129tofp128(i129 %a) {
 ; CHECK-LABEL: @si129tofp128(
 ; CHECK-NEXT:  itofp-entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = icmp eq i129 [[A:%.*]], 0
-; CHECK-NEXT:    br i1 [[TMP0]], label [[ITOFP_RETURN:%.*]], label [[ITOFP_IF_END:%.*]]
+; CHECK-NEXT:    br i1 [[TMP0]], label [[ITOFP_RETURN:%.*]], label [[ITOFP_IF_END:%.*]], !prof [[PROF1]]
 ; CHECK:       itofp-if-end:
 ; CHECK-NEXT:    [[TMP1:%.*]] = ashr i129 [[A]], 128
 ; CHECK-NEXT:    [[TMP2:%.*]] = xor i129 [[TMP1]], [[A]]
@@ -358,12 +358,12 @@ define fp128 @si129tofp128(i129 %a) {
 ; CHECK-NEXT:    [[TMP6:%.*]] = sub i129 129, [[TMP4]]
 ; CHECK-NEXT:    [[TMP7:%.*]] = sub i129 128, [[TMP4]]
 ; CHECK-NEXT:    [[TMP8:%.*]] = icmp sgt i129 [[TMP6]], 113
-; CHECK-NEXT:    br i1 [[TMP8]], label [[ITOFP_IF_THEN4:%.*]], label [[ITOFP_IF_ELSE:%.*]]
+; CHECK-NEXT:    br i1 [[TMP8]], label [[ITOFP_IF_THEN4:%.*]], label [[ITOFP_IF_ELSE:%.*]], !prof [[PROF1]]
 ; CHECK:       itofp-if-then4:
 ; CHECK-NEXT:    switch i129 [[TMP6]], label [[ITOFP_SW_DEFAULT:%.*]] [
 ; CHECK-NEXT:      i129 114, label [[ITOFP_SW_BB:%.*]]
 ; CHECK-NEXT:      i129 115, label [[ITOFP_SW_EPILOG:%.*]]
-; CHECK-NEXT:    ]
+; CHECK-NEXT:    ], !prof [[PROF2]]
 ; CHECK:       itofp-sw-bb:
 ; CHECK-NEXT:    [[TMP9:%.*]] = shl i129 [[TMP3]], 1
 ; CHECK-NEXT:    br label [[ITOFP_SW_EPILOG]]
@@ -391,7 +391,7 @@ define fp128 @si129tofp128(i129 %a) {
 ; CHECK-NEXT:    [[TMP27:%.*]] = trunc i129 [[TMP25]] to i128
 ; CHECK-NEXT:    [[TMP28:%.*]] = lshr i129 [[TMP25]], 32
 ; CHECK-NEXT:    [[TMP29:%.*]] = trunc i129 [[TMP7]] to i64
-; CHECK-NEXT:    br i1 [[TMP26]], label [[ITOFP_IF_END26:%.*]], label [[ITOFP_IF_THEN20:%.*]]
+; CHECK-NEXT:    br i1 [[TMP26]], label [[ITOFP_IF_END26:%.*]], label [[ITOFP_IF_THEN20:%.*]], !prof [[PROF1]]
 ; CHECK:       itofp-if-then20:
 ; CHECK-NEXT:    [[TMP30:%.*]] = ashr i129 [[TMP24]], 3
 ; CHECK-NEXT:    [[TMP31:%.*]] = trunc i129 [[TMP30]] to i128
@@ -432,7 +432,7 @@ define <2 x float> @si129tofloatv2(<2 x i129> %a) {
 ; CHECK-NEXT:  itofp-entryitofp-entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = extractelement <2 x i129> [[A:%.*]], i64 0
 ; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i129 [[TMP0]], 0
-; CHECK-NEXT:    br i1 [[TMP1]], label [[ITOFP_RETURN1:%.*]], label [[ITOFP_IF_END2:%.*]]
+; CHECK-NEXT:    br i1 [[TMP1]], label [[ITOFP_RETURN1:%.*]], label [[ITOFP_IF_END2:%.*]], !prof [[PROF1]]
 ; CHECK:       itofp-if-end2:
 ; CHECK-NEXT:    [[TMP2:%.*]] = ashr i129 [[TMP0]], 128
 ; CHECK-NEXT:    [[TMP3:%.*]] = xor i129 [[TMP2]], [[TMP0]]
@@ -442,12 +442,12 @@ define <2 x float> @si129tofloatv2(<2 x i129> %a) {
 ; CHECK-NEXT:    [[TMP7:%.*]] = sub i32 129, [[TMP6]]
 ; CHECK-NEXT:    [[TMP8:%.*]] = sub i32 128, [[TMP6]]
 ; CHECK-NEXT:    [[TMP9:%.*]] = icmp sgt i32 [[TMP7]], 24
-; CHECK-NEXT:    br i1 [[TMP9]], label [[ITOFP_IF_THEN43:%.*]], label [[ITOFP_IF_ELSE8:%.*]]
+; CHECK-NEXT:    br i1 [[TMP9]], label [[ITOFP_IF_THEN43:%.*]], label [[ITOFP_IF_ELSE8:%.*]], !prof [[PROF1]]
 ; CHECK:       itofp-if-then43:
 ; CHECK-NEXT:    switch i32 [[TMP7]], label [[ITOFP_SW_DEFAULT5:%.*]] [
 ; CHECK-NEXT:      i32 25, label [[ITOFP_SW_BB4:%.*]]
 ; CHECK-NEXT:      i32 26, label [[ITOFP_SW_EPILOG6:%.*]]
-; CHECK-NEXT:    ]
+; CHECK-NEXT:    ], !prof [[PROF2]]
 ; CHECK:       itofp-sw-bb4:
 ; CHECK-NEXT:    [[TMP10:%.*]] = shl i129 [[TMP4]], 1
 ; CHECK-NEXT:    br label [[ITOFP_SW_EPILOG6]]
@@ -477,7 +477,7 @@ define <2 x float> @si129tofloatv2(<2 x i129> %a) {
 ; CHECK-NEXT:    [[TMP30:%.*]] = trunc i129 [[TMP28]] to i32
 ; CHECK-NEXT:    [[TMP31:%.*]] = lshr i129 [[TMP28]], 32
 ; CHECK-NEXT:    [[TMP32:%.*]] = trunc i129 [[TMP31]] to i32
-; CHECK-NEXT:    br i1 [[TMP29]], label [[ITOFP_IF_END269:%.*]], label [[ITOFP_IF_THEN207:%.*]]
+; CHECK-NEXT:    br i1 [[TMP29]], label [[ITOFP_IF_END269:%.*]], label [[ITOFP_IF_THEN207:%.*]], !prof [[PROF1]]
 ; CHECK:       itofp-if-then207:
 ; CHECK-NEXT:    [[TMP33:%.*]] = ashr i129 [[TMP27]], 3
 ; CHECK-NEXT:    [[TMP34:%.*]] = trunc i129 [[TMP33]] to i32
@@ -509,7 +509,7 @@ define <2 x float> @si129tofloatv2(<2 x i129> %a) {
 ; CHECK-NEXT:    [[TMP54:%.*]] = insertelement <2 x float> poison, float [[TMP53]], i64 0
 ; CHECK-NEXT:    [[TMP55:%.*]] = extractelement <2 x i129> [[A]], i64 1
 ; CHECK-NEXT:    [[TMP56:%.*]] = icmp eq i129 [[TMP55]], 0
-; CHECK-NEXT:    br i1 [[TMP56]], label [[ITOFP_RETURN:%.*]], label [[ITOFP_IF_END:%.*]]
+; CHECK-NEXT:    br i1 [[TMP56]], label [[ITOFP_RETURN:%.*]], label [[ITOFP_IF_END:%.*]], !prof [[PROF1]]
 ; CHECK:       itofp-if-end:
 ; CHECK-NEXT:    [[TMP57:%.*]] = ashr i129 [[TMP55]], 128
 ; CHECK-NEXT:    [[TMP58:%.*]] = xor i129 [[TMP57]], [[TMP55]]
@@ -519,12 +519,12 @@ define <2 x float> @si129tofloatv2(<2 x i129> %a) {
 ; CHECK-NEXT:    [[TMP62:%.*]] = sub i32 129, [[TMP61]]
 ; CHECK-NEXT:    [[TMP63:%.*]] = sub i32 128, [[TMP61]]
 ; CHECK-NEXT:    [[TMP64:%.*]] = icmp sgt i32 [[TMP62]], 24
-; CHECK-NEXT:    br i1 [[TMP64]], label [[ITOFP_IF_THEN4:%.*]], label [[ITOFP_IF_ELSE:%.*]]
+; CHECK-NEXT:    br i1 [[TMP64]], label [[ITOFP_IF_THEN4:%.*]], label [[ITOFP_IF_ELSE:%.*]], !prof [[PROF1]]
 ; CHECK:       itofp-if-then4:
 ; CHECK-NEXT:    switch i32 [[TMP62]], label [[ITOFP_SW_DEFAULT:%.*]] [
 ; CHECK-NEXT:      i32 25, label [[ITOFP_SW_BB:%.*]]
 ; CHECK-NEXT:      i32 26, label [[ITOFP_SW_EPILOG:%.*]]
-; CHECK-NEXT:    ]
+; CHECK-NEXT:    ], !prof [[PROF2]]
 ; CHECK:       itofp-sw-bb:
 ; CHECK-NEXT:    [[TMP65:%.*]] = shl i129 [[TMP59]], 1
 ; CHECK-NEXT:    br label [[ITOFP_SW_EPILOG]]
@@ -554,7 +554,7 @@ define <2 x float> @si129tofloatv2(<2 x i129> %a) {
 ; CHECK-NEXT:    [[TMP85:%.*]] = trunc i129 [[TMP83]] to i32
 ; CHECK-NEXT:    [[TMP86:%.*]] = lshr i129 [[TMP83]], 32
 ; CHECK-NEXT:    [[TMP87:%.*]] = trunc i129 [[TMP86]] to i32
-; CHECK-NEXT:    br i1 [[TMP84]], label [[ITOFP_IF_END26:%.*]], label [[ITOFP_IF_THEN20:%.*]]
+; CHECK-NEXT:    br i1 [[TMP84]], label [[ITOFP_IF_END26:%.*]], label [[ITOFP_IF_THEN20:%.*]], !prof [[PROF1]]
 ; CHECK:       itofp-if-then20:
 ; CHECK-NEXT:    [[TMP88:%.*]] = ashr i129 [[TMP82]], 3
 ; CHECK-NEXT:    [[TMP89:%.*]] = trunc i129 [[TMP88]] to i32
@@ -589,3 +589,12 @@ define <2 x float> @si129tofloatv2(<2 x i129> %a) {
   %conv = sitofp <2 x i129> %a to <2 x float>
   ret <2 x float> %conv
 }
+
+!0 = !{!"function_entry_count", i64 1000}
+;.
+; CHECK: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
+;.
+; CHECK: [[META0:![0-9]+]] = !{!"function_entry_count", i64 1000}
+; CHECK: [[PROF1]] = !{!"branch_weights", i32 1048575, i32 1}
+; CHECK: [[PROF2]] = !{!"branch_weights", i32 1048575, i32 1, i32 1}
+;.
diff --git a/llvm/test/Transforms/ExpandIRInsts/X86/expand-large-fp-convert-ui129tofp.ll b/llvm/test/Transforms/ExpandIRInsts/X86/expand-large-fp-convert-ui129tofp.ll
index 3a3a8e40ea8d1..41859b0d3b79c 100644
--- a/llvm/test/Transforms/ExpandIRInsts/X86/expand-large-fp-convert-ui129tofp.ll
+++ b/llvm/test/Transforms/ExpandIRInsts/X86/expand-large-fp-convert-ui129tofp.ll
@@ -1,12 +1,12 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals
 ; RUN: opt -S -mtriple=x86_64-- --expand-ir-insts < %s | FileCheck %s
 ; RUN: opt -S -mtriple=x86_64-- -passes='require<libcall-lowering-info>,expand-ir-insts' < %s | FileCheck %s
 
-define half @ui129tohalf(i129 %a) {
+define half @ui129tohalf(i129 %a) !prof !0 {
 ; CHECK-LABEL: @ui129tohalf(
 ; CHECK-NEXT:  itofp-entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = icmp eq i129 [[A:%.*]], 0
-; CHECK-NEXT:    br i1 [[TMP0]], label [[ITOFP_RETURN:%.*]], label [[ITOFP_IF_END:%.*]]
+; CHECK-NEXT:    br i1 [[TMP0]], label [[ITOFP_RETURN:%.*]], label [[ITOFP_IF_END:%.*]], !prof [[PROF1:![0-9]+]]
 ; CHECK:       itofp-if-end:
 ; CHECK-NEXT:    [[TMP1:%.*]] = ashr i129 [[A]], 128
 ; CHECK-NEXT:    [[TMP2:%.*]] = xor i129 [[TMP1]], [[A]]
@@ -16,12 +16,12 @@ define half @ui129tohalf(i129 %a) {
 ; CHECK-NEXT:    [[TMP6:%.*]] = sub i32 129, [[TMP5]]
 ; CHECK-NEXT:    [[TMP7:%.*]] = sub i32 128, [[TMP5]]
 ; CHECK-NEXT:    [[TMP8:%.*]] = icmp sgt i32 [[TMP6]], 24
-; CHECK-NEXT:    br i1 [[TMP8]], label [[ITOFP_IF_THEN4:%.*]], label [[ITOFP_IF_ELSE:%.*]]
+; CHECK-NEXT:    br i1 [[TMP8]], label [[ITOFP_IF_THEN4:%.*]], label [[ITOFP_IF_ELSE:%.*]], !prof [[PROF1]]
 ; CHECK:       itofp-if-then4:
 ; CHECK-NEXT:    switch i32 [[TMP6]], label [[ITOFP_SW_DEFAULT:%.*]] [
 ; CHECK-NEXT:      i32 25, label [[ITOFP_SW_BB:%.*]]
 ; CHECK-NEXT:      i32 26, label [[ITOFP_SW_EPILOG:%.*]]
-; CHECK-NEXT:    ]
+; CHECK-NEXT:    ], !prof [[PROF2:![0-9]+]]
 ; CHECK:       itofp-sw-bb:
 ; CHECK-NEXT:    [[TMP9:%.*]] = shl i129 [[A]], 1
 ; CHECK-NEXT:    br label [[ITOFP_SW_EPILOG]]
@@ -51,7 +51,7 @@ define half @ui129tohalf(i129 %a) {
 ; CHECK-NEXT:    [[TMP29:%.*]] = trunc i129 [[TMP27]] to i32
 ; CHECK-NEXT:    [[TMP30:%.*]] = lshr i129 [[TMP27]], 32
 ; CHECK-NEXT:    [[TMP31:%.*]] = trunc i129 [[TMP30]] to i32
-; CHECK-NEXT:    br i1 [[TMP28]], label [[ITOFP_IF_END26:%.*]], label [[ITOFP_IF_THEN20:%.*]]
+; CHECK-NEXT:    br i1 [[TMP28]], label [[ITOFP_IF_END26:%.*]], label [[ITOFP_IF_THEN20:%.*]], !prof [[PROF1]]
 ; CHECK:       itofp-if-then20:
 ; CHECK-NEXT:    [[TMP32:%.*]] = lshr i129 [[TMP26]], 3
 ; CHECK-NEXT:    [[TMP33:%.*]] = trunc i129 [[TMP32]] to i32
@@ -91,7 +91,7 @@ define float @ui129tofloat(i129 %a) {
 ; CHECK-LABEL: @ui129tofloat(
 ; CHECK-NEXT:  itofp-entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = icmp eq i129 [[A:%.*]], 0
-; CHECK-NEXT:    br i1 [[TMP0]], label [[ITOFP_RETURN:%.*]], label [[ITOFP_IF_END:%.*]]
+; CHECK-NEXT:    br i1 [[TMP0]], label [[ITOFP_RETURN:%.*]], label [[ITOFP_IF_END:%.*]], !prof [[PROF1]]
 ; CHECK:       itofp-if-end:
 ; CHECK-NEXT:    [[TMP1:%.*]] = ashr i129 [[A]], 128
 ; CHECK-NEXT:    [[TMP2:%.*]] = xor i129 [[TMP1]], [[A]]
@@ -101,12 +101,12 @@ define float @ui129tofloat(i129 %a) {
 ; CHECK-NEXT:    [[TMP6:%.*]] = sub i32 129, [[TMP5]]
 ; CHECK-NEXT:    [[TMP7:%.*]] = sub i32 128, [[TMP5]]
 ; CHECK-NEXT:    [[TMP8:%.*]] = icmp sgt i32 [[TMP6]], 24
-; CHECK-NEXT:    br i1 [[TMP8]], label [[ITOFP_IF_THEN4:%.*]], label [[ITOFP_IF_ELSE:%.*]]
+; CHECK-NEXT:    br i1 [[TMP8]], label [[ITOFP_IF_THEN4:%.*]], label [[ITOFP_IF_ELSE:%.*]], !prof [[PROF1]]
 ; CHECK:       itofp-if-then4:
 ; CHECK-NEXT:    switch i32 [[TMP6]], label [[ITOFP_SW_DEFAULT:%.*]] [
 ; CHECK-NEXT:      i32 25, label [[ITOFP_SW_BB:%.*]]
 ; CHECK-NEXT:      i32 26, label [[ITOFP_SW_EPILOG:%.*]]
-; CHECK-NEXT:    ]
+; CHECK-NEXT:    ], !prof [[PROF2]]
 ; CHECK:       itofp-sw-bb:
 ; CHECK-NEXT:    [[TMP9:%.*]] = shl i129 [[A]], 1
 ; CHECK-NEXT:    br label [[ITOFP_SW_EPILOG]]
@@ -136,7 +136,7 @@ define float @ui129tofloat(i129 %a) {
 ; CHECK-NEXT:    [[TMP29:%.*]] = trunc i129 [[TMP27]] to i32
 ; CHECK-NEXT:    [[TMP30:%.*]] = lshr i129 [[TMP27]], 32
 ; CHECK-NEXT:    [[TMP31:%.*]] = trunc i129 [[TMP30]] to i32
-; CHECK-NEXT:    br i1 [[TMP28]], label [[ITOFP_IF_END26:%.*]], label [[ITOFP_IF_THEN20:%.*]]
+; CHECK-NEXT:    br i1 [[TMP28]], label [[ITOFP_IF_END26:%.*]], label [[ITOFP_IF_THEN20:%.*]], !prof [[PROF1]]
 ; CHECK:       itofp-if-then20:
 ; CHECK-NEXT:    [[TMP32:%.*]] = lshr i129 [[TMP26]], 3
 ; CHECK-NEXT:    [[TMP33:%.*]] = trunc i129 [[TMP32]] to i32
@@ -175,7 +175,7 @@ define double @ui129todouble(i129 %a) {
 ; CHECK-LABEL: @ui129todouble(
 ; CHECK-NEXT:  itofp-entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = icmp eq i129 [[A:%.*]], 0
-; CHECK-NEXT:    br i1 [[TMP0]], label [[ITOFP_RETURN:%.*]], label [[ITOFP_IF_END:%.*]]
+; CHECK-NEXT:    br i1 [[TMP0]], label [[ITOFP_RETURN:%.*]], label [[ITOFP_IF_END:%.*]], !prof [[PROF1]]
 ; CHECK:       itofp-if-end:
 ; CHECK-NEXT:    [[TMP1:%.*]] = ashr i129 [[A]], 128
 ; CHECK-NEXT:    [[TMP2:%.*]] = xor i129 [[TMP1]], [[A]]
@@ -185,12 +185,12 @@ define double @ui129todouble(i129 %a) {
 ; CHECK-NEXT:    [[TMP6:%.*]] = sub i32 129, [[TMP5]]
 ; CHECK-NEXT:    [[TMP7:%.*]] = sub i32 128, [[TMP5]]
 ; CHECK-NEXT:    [[TMP8:%.*]] = icmp sgt i32 [[TMP6]], 53
-; CHECK-NEXT:    br i1 [[TMP8]], label [[ITOFP_IF_THEN4:%.*]], label [[ITOFP_IF_ELSE:%.*]]
+; CHECK-NEXT:    br i1 [[TMP8]], label [[ITOFP_IF_THEN4:%.*]], label [[ITOFP_IF_ELSE:%.*]], !prof [[PROF1]]
 ; CHECK:       itofp-if-then4:
 ; CHECK-NEXT:    switch i32 [[TMP6]], label [[ITOFP_SW_DEFAULT:%.*]] [
 ; CHECK-NEXT:      i32 54, label [[ITOFP_SW_BB:%.*]]
 ; CHECK-NEXT:      i32 55, label [[ITOFP_SW_EPILOG:%.*]]
-; CHECK-NEXT:    ]
+; CHECK-NEXT:    ], !prof [[PROF2]]
 ; CHECK:       itofp-sw-bb:
 ; CHECK-NEXT:    [[TMP9:%.*]] = shl i129 [[A]], 1
 ; CHECK-NEXT:    br label [[ITOFP_SW_EPILOG]]
@@ -220,7 +220,7 @@ define double @ui129todouble(i129 %a) {
 ; CHECK-NEXT:    [[TMP29:%.*]] = trunc i129 [[TMP27]] to i64
 ; CHECK-NEXT:    [[TMP30:%.*]] = lshr i129 [[TMP27]], 32
 ; CHECK-NEXT:    [[TMP31:%.*]] = trunc i129 [[TMP30]] to i32
-; CHECK-NEXT:    br i1 [[TMP28]], label [[ITOFP_IF_END26:%.*]], label [[ITOFP_IF_THEN20:%.*]]
+; CHECK-NEXT:    br i1 [[TMP28]], label [[ITOFP_IF_END26:%.*]], label [[ITOFP_IF_THEN20:%.*]], !prof [[PROF1]]
 ; CHECK:       itofp-if-then20:
 ; CHECK-NEXT:    [[TMP32:%.*]] = lshr i129 [[TMP26]], 3
 ; CHECK-NEXT:    [[TMP33:%.*]] = trunc i129 [[TMP32]] to i64
@@ -264,7 +264,7 @@ define x86_fp80 @ui129tox86_fp80(i129 %a) {
 ; CHECK-LABEL: @ui129tox86_fp80(
 ; CHECK-NEXT:  itofp-entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = icmp eq i129 [[A:%.*]], 0
-; CHECK-NEXT:    br i1 [[TMP0]], label [[ITOFP_RETURN:%.*]], label [[ITOFP_IF_END:%.*]]
+; CHECK-NEXT:    br i1 [[TMP0]], label [[ITOFP_RETURN:%.*]], label [[ITOFP_IF_END:%.*]], !prof [[PROF1]]
 ; CHECK:       itofp-if-end:
 ; CHECK-NEXT:    [[TMP1:%.*]] = ashr i129 [[A]], 128
 ; CHECK-NEXT:    [[TMP2:%.*]] = xor i129 [[TMP1]], [[A]]
@@ -274,12 +274,12 @@ define x86_fp80 @ui129tox86_fp80(i129 %a) {
 ; CHECK-NEXT:    [[TMP6:%.*]] = sub i129 129, [[TMP4]]
 ; CHECK-NEXT:    [[TMP7:%.*]] = sub i129 128, [[TMP4]]
 ; CHECK-NEXT:    [[TMP8:%.*]] = icmp sgt i129 [[TMP6]], 113
-; CHECK-NEXT:    br i1 [[TMP8]], label [[ITOFP_IF_THEN4:%.*]], label [[ITOFP_IF_ELSE:%.*]]
+; CHECK-NEXT:    br i1 [[TMP8]], label [[ITOFP_IF_THEN4:%.*]], label [[ITOFP_IF_ELSE:%.*]], !prof [[PROF1]]
 ; CHECK:       itofp-if-then4:
 ; CHECK-NEXT:    switch i129 [[TMP6]], label [[ITOFP_SW_DEFAULT:%.*]] [
 ; CHECK-NEXT:      i129 114, label [[ITOFP_SW_BB:%.*]]
 ; CHECK-NEXT:      i129 115, label [[ITOFP_SW_EPILOG:%.*]]
-; CHECK-NEXT:    ]
+; CHECK-NEXT:    ], !prof [[PROF2]]
 ; CHECK:       itofp-sw-bb:
 ; CHECK-NEXT:    [[TMP9:%.*]] = shl i129 [[A]], 1
 ; CHECK-NEXT:    br label [[ITOFP_SW_EPILOG]]
@@ -307,7 +307,7 @@ define x86_fp80 @ui129tox86_fp80(i129 %a) {
 ; CHECK-NEXT:    [[TMP27:%.*]] = trunc i129 [[TMP25]] to i128
 ; CHECK-NEXT:    [[TMP28:%.*]] = lshr i129 [[TMP25]], 32
 ; CHECK-NEXT:    [[TMP29:%.*]] = trunc i129 [[TMP7]] to i64
-; CHECK-NEXT:    br i1 [[TMP26]], label [[ITOFP_IF_END26:%.*]], label [[ITOFP_IF_THEN20:%.*]]
+; CHECK-NEXT:    br i1 [[TMP26]], label [[ITOFP_IF_END26:%.*]], label [[ITOFP_IF_THEN20:%.*]], !prof [[PROF1]]
 ; CHECK:       itofp-if-then20:
 ; CHECK-NEXT:    [[TMP30:%.*]] = lshr i129 [[TMP24]], 3
 ; CHECK-NEXT:    [[TMP31:%.*]] = trunc i129 [[TMP30]] to i128
@@ -348,7 +348,7 @@ define fp128 @ui129tofp128(i129 %a) {
 ; CHECK-LABEL: @ui129tofp128(
 ; CHECK-NEXT:  itofp-entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = icmp eq i129 [[A:%.*]], 0
-; CHECK-NEXT:    br i1 [[TMP0]], label [[ITOFP_RETURN:%.*]], label [[ITOFP_IF_END:%.*]]
+; CHECK-NEXT:    br i1 [[TMP0]], label [[ITOFP_RETURN:%.*]], label [[ITOFP_IF_END:%.*]], !prof [[PROF1]]
 ; CHECK:       itofp-if-end:
 ; CHECK-NEXT:    [[TMP1:%.*]] = ashr i129 [[A]], 128
 ; CHECK-NEXT:    [[TMP2:%.*]] = xor i129 [[TMP1]], [[A]]
@@ -358,12 +358,12 @@ define fp128 @ui129tofp128(i129 %a) {
 ; CHECK-NEXT:    [[TMP6:%.*]] = sub i129 129, [[TMP4]]
 ; CHECK-NEXT:    [[TMP7:%.*]] = sub i129 128, [[TMP4]]
 ; CHECK-NEXT:    [[TMP8:%.*]] = icmp sgt i129 [[TMP6]], 113
-; CHECK-NEXT:    br i1 [[TMP8]], label [[ITOFP_IF_THEN4:%.*]], label [[ITOFP_IF_ELSE:%.*]]
+; CHECK-NEXT:    br i1 [[TMP8]], label [[ITOFP_IF_THEN4:%.*]], label [[ITOFP_IF_ELSE:%.*]], !prof [[PROF1]]
 ; CHECK:       itofp-if-then4:
 ; CHECK-NEXT:    switch i129 [[TMP6]], label [[ITOFP_SW_DEFAULT:%.*]] [
 ; CHECK-NEXT:      i129 114, label [[ITOFP_SW_BB:%.*]]
 ; CHECK-NEXT:      i129 115, label [[ITOFP_SW_EPILOG:%.*]]
-; CHECK-NEXT:    ]
+; CHECK-NEXT:    ], !prof [[PROF2]]
 ; CHECK:       itofp-sw-bb:
 ; CHECK-NEXT:    [[TMP9:%.*]] = shl i129 [[A]], 1
 ; CHECK-NEXT:    br label [[ITOFP_SW_EPILOG]]
@@ -391,7 +391,7 @@ define fp128 @ui129tofp128(i129 %a) {
 ; CHECK-NEXT:    [[TMP27:%.*]] = trunc i129 [[TMP25]] to i128
 ; CHECK-NEXT:    [[TMP28:%.*]] = lshr i129 [[TMP25]], 32
 ; CHECK-NEXT:    [[TMP29:%.*]] = trunc i129 [[TMP7]] to i64
-; CHECK-NEXT:    br i1 [[TMP26]], label [[ITOFP_IF_END26:%.*]], label [[ITOFP_IF_THEN20:%.*]]
+; CHECK-NEXT:    br i1 [[TMP26]], label [[ITOFP_IF_END26:%.*]], label [[ITOFP_IF_THEN20:%.*]], !prof [[PROF1]]
 ; CHECK:       itofp-if-then20:
 ; CHECK-NEXT:    [[TMP30:%.*]] = lshr i129 [[TMP24]], 3
 ; CHECK-NEXT:    [[TMP31:%.*]] = trunc i129 [[TMP30]] to i128
@@ -432,7 +432,7 @@ define <2 x float> @ui129tofloatv2(<2 x i129> %a) {
 ; CHECK-NEXT:  itofp-entryitofp-entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = extractelement <2 x i129> [[A:%.*]], i64 0
 ; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i129 [[TMP0]], 0
-; CHECK-NEXT:    br i1 [[TMP1]], label [[ITOFP_RETURN1:%.*]], label [[ITOFP_IF_END2:%.*]]
+; CHECK-NEXT:    br i1 [[TMP1]], label [[ITOFP_RETURN1:%.*]], label [[ITOFP_IF_END2:%.*]], !prof [[PROF1]]
 ; CHECK:       itofp-if-end2:
 ; CHECK-NEXT:    [[TMP2:%.*]] = ashr i129 [[TMP0]], 128
 ; CHECK-NEXT:    [[TMP3:%.*]] = xor i129 [[TMP2]], [[TMP0]]
@@ -442,12 +442,12 @@ define <2 x float> @ui129tofloatv2(<2 x i129> %a) {
 ; CHECK-NEXT:    [[TMP7:%.*]] = sub i32 129, [[TMP6]]
 ; CHECK-NEXT:    [[TMP8:%.*]] = sub i32 128, [[TMP6]]
 ; CHECK-NEXT:    [[TMP9:%.*]] = icmp sgt i32 [[TMP7]], 24
-; CHECK-NEXT:    br i1 [[TMP9]], label [[ITOFP_IF_THEN43:%.*]], label [[ITOFP_IF_ELSE8:%.*]]
+; CHECK-NEXT:    br i1 [[TMP9]], label [[ITOFP_IF_THEN43:%.*]], label [[ITOFP_IF_ELSE8:%.*]], !prof [[PROF1]]
 ; CHECK:       itofp-if-then43:
 ; CHECK-NEXT:    switch i32 [[TMP7]], label [[ITOFP_SW_DEFAULT5:%.*]] [
 ; CHECK-NEXT:      i32 25, label [[ITOFP_SW_BB4:%.*]]
 ; CHECK-NEXT:      i32 26, label [[ITOFP_SW_EPILOG6:%.*]]
-; CHECK-NEXT:    ]
+; CHECK-NEXT:    ], !prof [[PROF2]]
 ; CHECK:       itofp-sw-bb4:
 ; CHECK-NEXT:    [[TMP10:%.*]] = shl i129 [[TMP0]], 1
 ; CHECK-NEXT:    br label [[ITOFP_SW_EPILOG6]]
@@ -477,7 +477,7 @@ define <2 x float> @ui129tofloatv2(<2 x i129> %a) {
 ; CHECK-NEXT:    [[TMP30:%.*]] = trunc i129 [[TMP28]] to i32
 ; CHECK-NEXT:    [[TMP31:%.*]] = lshr i129 [[TMP28]], 32
 ; CHECK-NEXT:    [[TMP32:%.*]] = trunc i129 [[TMP31]] to i32
-; CHECK-NEXT:    br i1 [[TMP29]], label [[ITOFP_IF_END269:%.*]], label [[ITOFP_IF_THEN207:%.*]]
+; CHECK-NEXT:    br i1 [[TMP29]], label [[ITOFP_IF_END269:%.*]], label [[ITOFP_IF_THEN207:%.*]], !prof [[PROF1]]
 ; CHECK:       itofp-if-then207:
 ; CHECK-NEXT:    [[TMP33:%.*]] = lshr i129 [[TMP27]], 3
 ; CHECK-NEXT:    [[TMP34:%.*]] = trunc i129 [[TMP33]] to i32
@@ -509,7 +509,7 @@ define <2 x float> @ui129tofloatv2(<2 x i129> %a) {
 ; CHECK-NEXT:    [[TMP54:%.*]] = insertelement <2 x float> poison, float [[TMP53]], i64 0
 ; CHECK-NEXT:    [[TMP55:%.*]] = extractelement <2 x i129> [[A]], i64 1
 ; CHECK-NEXT:    [[TMP56:%.*]] = icmp eq i129 [[TMP55]], 0
-; CHECK-NEXT:    br i1 [[TMP56]], label [[ITOFP_RETURN:%.*]], label [[ITOFP_IF_END:%.*]]
+; CHECK-NEXT:    br i1 [[TMP56]], label [[ITOFP_RETURN:%.*]], label [[ITOFP_IF_END:%.*]], !prof [[PROF1]]
 ; CHECK:       itofp-if-end:
 ; CHECK-NEXT:    [[TMP57:%.*]] = ashr i129 [[TMP55]], 128
 ; CHECK-NEXT:    [[TMP58:%.*]] = xor i129 [[TMP57]], [[TMP55]]
@@ -519,12 +519,12 @@ define <2 x float> @ui129tofloatv2(<2 x i129> %a) {
 ; CHECK-NEXT:    [[TMP62:%.*]] = sub i32 129, [[TMP61]]
 ; CHECK-NEXT:    [[TMP63:%.*]] = sub i32 128, [[TMP61]]
 ; CHECK-NEXT:    [[TMP64:%.*]] = icmp sgt i32 [[TMP62]], 24
-; CHECK-NEXT:    br i1 [[TMP64]], label [[ITOFP_IF_THEN4:%.*]], label [[ITOFP_IF_ELSE:%.*]]
+; CHECK-NEXT:    br i1 [[TMP64]], label [[ITOFP_IF_THEN4:%.*]], label [[ITOFP_IF_ELSE:%.*]], !prof [[PROF1]]
 ; CHECK:       itofp-if-then4:
 ; CHECK-NEXT:    switch i32 [[TMP62]], label [[ITOFP_SW_DEFAULT:%.*]] [
 ; CHECK-NEXT:      i32 25, label [[ITOFP_SW_BB:%.*]]
 ; CHECK-NEXT:      i32 26, label [[ITOFP_SW_EPILOG:%.*]]
-; CHECK-NEXT:    ]
+; CHECK-NEXT:    ], !prof [[PROF2]]
 ; CHECK:       itofp-sw-bb:
 ; CHECK-NEXT:    [[TMP65:%.*]] = shl i129 [[TMP55]], 1
 ; CHECK-NEXT:    br label [[ITOFP_SW_EPILOG]]
@@ -554,7 +554,7 @@ define <2 x float> @ui129tofloatv2(<2 x i129> %a) {
 ; CHECK-NEXT:    [[TMP85:%.*]] = trunc i129 [[TMP83]] to i32
 ; CHECK-NEXT:    [[TMP86:%.*]] = lshr i129 [[TMP83]], 32
 ; CHECK-NEXT:    [[TMP87:%.*]] = trunc i129 [[TMP86]] to i32
-; CHECK-NEXT:    br i1 [[TMP84]], label [[ITOFP_IF_END26:%.*]], label [[ITOFP_IF_THEN20:%.*]]
+; CHECK-NEXT:    br i1 [[TMP84]], label [[ITOFP_IF_END26:%.*]], label [[ITOFP_IF_THEN20:%.*]], !prof [[PROF1]]
 ; CHECK:       itofp-if-then20:
 ; CHECK-NEXT:    [[TMP88:%.*]] = lshr i129 [[TMP82]], 3
 ; CHECK-NEXT:    [[TMP89:%.*]] = trunc i129 [[TMP88]] to i32
@@ -589,3 +589,12 @@ define <2 x float> @ui129tofloatv2(<2 x i129> %a) {
   %conv = uitofp <2 x i129> %a to <2 x float>
   ret <2 x float> %conv
 }
+
+!0 = !{!"function_entry_count", i64 1000}
+;.
+; CHECK: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
+;.
+; CHECK: [[META0:![0-9]+]] = !{!"function_entry_count", i64 1000}
+; CHECK: [[PROF1]] = !{!"branch_weights", i32 1048575, i32 1}
+; CHECK: [[PROF2]] = !{!"branch_weights", i32 1048575, i32 1, i32 1}
+;.
diff --git a/llvm/test/Transforms/ExpandIRInsts/X86/expand-large-fp-optnone.ll b/llvm/test/Transforms/ExpandIRInsts/X86/expand-large-fp-optnone.ll
index 5b622c1ad77eb..5bcc6491f7b57 100644
--- a/llvm/test/Transforms/ExpandIRInsts/X86/expand-large-fp-optnone.ll
+++ b/llvm/test/Transforms/ExpandIRInsts/X86/expand-large-fp-optnone.ll
@@ -1,16 +1,16 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals all --version 5
 ; RUN: opt -S -mtriple=x86_64-- --expand-ir-insts < %s | FileCheck %s
 ; RUN: opt -S -mtriple=x86_64-- -passes='require<libcall-lowering-info>,expand-ir-insts' < %s | FileCheck %s
 
 ; expand-ir-insts must also run with optnone
 
 ; Function Attrs: noinline optnone
-define double @main(i224 %0) #0 {
+define double @main(i224 %0) #0 !prof !0 {
 ; CHECK-LABEL: define double @main(
-; CHECK-SAME: i224 [[TMP0:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-SAME: i224 [[TMP0:%.*]]) #[[ATTR0:[0-9]+]] !prof [[PROF0:![0-9]+]] {
 ; CHECK-NEXT:  [[ENTRYITOFP_ENTRY:.*]]:
 ; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i224 [[TMP0]], 0
-; CHECK-NEXT:    br i1 [[TMP1]], label %[[ITOFP_RETURN:.*]], label %[[ITOFP_IF_END:.*]]
+; CHECK-NEXT:    br i1 [[TMP1]], label %[[ITOFP_RETURN:.*]], label %[[ITOFP_IF_END:.*]], !prof [[PROF1:![0-9]+]]
 ; CHECK:       [[ITOFP_IF_END]]:
 ; CHECK-NEXT:    [[TMP2:%.*]] = ashr i224 [[TMP0]], 223
 ; CHECK-NEXT:    [[TMP3:%.*]] = xor i224 [[TMP2]], [[TMP0]]
@@ -20,12 +20,12 @@ define double @main(i224 %0) #0 {
 ; CHECK-NEXT:    [[TMP7:%.*]] = sub i32 224, [[TMP6]]
 ; CHECK-NEXT:    [[TMP8:%.*]] = sub i32 223, [[TMP6]]
 ; CHECK-NEXT:    [[TMP9:%.*]] = icmp sgt i32 [[TMP7]], 53
-; CHECK-NEXT:    br i1 [[TMP9]], label %[[ITOFP_IF_THEN4:.*]], label %[[ITOFP_IF_ELSE:.*]]
+; CHECK-NEXT:    br i1 [[TMP9]], label %[[ITOFP_IF_THEN4:.*]], label %[[ITOFP_IF_ELSE:.*]], !prof [[PROF1]]
 ; CHECK:       [[ITOFP_IF_THEN4]]:
 ; CHECK-NEXT:    switch i32 [[TMP7]], label %[[ITOFP_SW_DEFAULT:.*]] [
 ; CHECK-NEXT:      i32 54, label %[[ITOFP_SW_BB:.*]]
 ; CHECK-NEXT:      i32 55, label %[[ITOFP_SW_EPILOG:.*]]
-; CHECK-NEXT:    ]
+; CHECK-NEXT:    ], !prof [[PROF2:![0-9]+]]
 ; CHECK:       [[ITOFP_SW_BB]]:
 ; CHECK-NEXT:    [[TMP10:%.*]] = shl i224 [[TMP4]], 1
 ; CHECK-NEXT:    br label %[[ITOFP_SW_EPILOG]]
@@ -55,7 +55,7 @@ define double @main(i224 %0) #0 {
 ; CHECK-NEXT:    [[TMP30:%.*]] = trunc i224 [[TMP28]] to i64
 ; CHECK-NEXT:    [[TMP31:%.*]] = lshr i224 [[TMP28]], 32
 ; CHECK-NEXT:    [[TMP32:%.*]] = trunc i224 [[TMP31]] to i32
-; CHECK-NEXT:    br i1 [[TMP29]], label %[[ITOFP_IF_END26:.*]], label %[[ITOFP_IF_THEN20:.*]]
+; CHECK-NEXT:    br i1 [[TMP29]], label %[[ITOFP_IF_END26:.*]], label %[[ITOFP_IF_THEN20:.*]], !prof [[PROF1]]
 ; CHECK:       [[ITOFP_IF_THEN20]]:
 ; CHECK-NEXT:    [[TMP33:%.*]] = ashr i224 [[TMP27]], 3
 ; CHECK-NEXT:    [[TMP34:%.*]] = trunc i224 [[TMP33]] to i64
@@ -97,3 +97,13 @@ entry:
 }
 
 attributes #0 = { noinline optnone }
+
+!0 = !{!"function_entry_count", i64 1000}
+;.
+; CHECK: attributes #[[ATTR0]] = { noinline optnone }
+; CHECK: attributes #[[ATTR1:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
+;.
+; CHECK: [[PROF0]] = !{!"function_entry_count", i64 1000}
+; CHECK: [[PROF1]] = !{!"branch_weights", i32 1048575, i32 1}
+; CHECK: [[PROF2]] = !{!"branch_weights", i32 1048575, i32 1, i32 1}
+;.
diff --git a/llvm/test/Transforms/ExpandIRInsts/X86/sdiv129.ll b/llvm/test/Transforms/ExpandIRInsts/X86/sdiv129.ll
index fc823cd543144..9fa4d7a5a5cc1 100644
--- a/llvm/test/Transforms/ExpandIRInsts/X86/sdiv129.ll
+++ b/llvm/test/Transforms/ExpandIRInsts/X86/sdiv129.ll
@@ -2,8 +2,9 @@
 ; RUN: opt -S -mtriple=x86_64-- -expand-ir-insts -expand-div-rem-bits 128 < %s | FileCheck %s
 ; RUN: opt -S -mtriple=x86_64-- -passes='require<libcall-lowering-info>,expand-ir-insts' -expand-div-rem-bits 128 < %s | FileCheck %s
 
-define void @sdiv129(ptr %ptr, ptr %out) nounwind {
+define void @sdiv129(ptr %ptr, ptr %out) nounwind !prof !0 {
 ; CHECK-LABEL: @sdiv129(
+; CHECK: !prof [[PROF_0:![0-9]+]] {
 ; CHECK-NEXT:  _udiv-special-cases:
 ; CHECK-NEXT:    [[A:%.*]] = load i129, ptr [[PTR:%.*]], align 16
 ; CHECK-NEXT:    [[TMP0:%.*]] = freeze i129 [[A]]
@@ -24,11 +25,11 @@ define void @sdiv129(ptr %ptr, ptr %out) nounwind {
 ; CHECK-NEXT:    [[TMP15:%.*]] = call i129 @llvm.ctlz.i129(i129 [[TMP10]], i1 true)
 ; CHECK-NEXT:    [[TMP16:%.*]] = sub i129 [[TMP14]], [[TMP15]]
 ; CHECK-NEXT:    [[TMP17:%.*]] = icmp ugt i129 [[TMP16]], 128
-; CHECK-NEXT:    [[TMP18:%.*]] = select i1 [[TMP13]], i1 true, i1 [[TMP17]]
+; CHECK-NEXT:    [[TMP18:%.*]] = select i1 [[TMP13]], i1 true, i1 [[TMP17]], !prof [[PROF_1:![0-9]+]]
 ; CHECK-NEXT:    [[TMP19:%.*]] = icmp eq i129 [[TMP16]], 128
-; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP18]], i129 0, i129 [[TMP10]]
-; CHECK-NEXT:    [[TMP21:%.*]] = select i1 [[TMP18]], i1 true, i1 [[TMP19]]
-; CHECK-NEXT:    br i1 [[TMP21]], label [[UDIV_END:%.*]], label [[UDIV_BB1:%.*]]
+; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP18]], i129 0, i129 [[TMP10]], !prof [[PROF_1:![0-9]+]]
+; CHECK-NEXT:    [[TMP21:%.*]] = select i1 [[TMP18]], i1 true, i1 [[TMP19]], !prof [[PROF_1:![0-9]+]]
+; CHECK-NEXT:    br i1 [[TMP21]], label [[UDIV_END:%.*]], label [[UDIV_BB1:%.*]], !prof [[PROF_1:![0-9]+]]
 ; CHECK:       udiv-loop-exit:
 ; CHECK-NEXT:    [[TMP22:%.*]] = phi i129 [ 0, [[UDIV_BB1]] ], [ [[TMP37:%.*]], [[UDIV_DO_WHILE:%.*]] ]
 ; CHECK-NEXT:    [[TMP23:%.*]] = phi i129 [ [[TMP46:%.*]], [[UDIV_BB1]] ], [ [[TMP34:%.*]], [[UDIV_DO_WHILE]] ]
@@ -52,7 +53,7 @@ define void @sdiv129(ptr %ptr, ptr %out) nounwind {
 ; CHECK-NEXT:    [[TMP39]] = sub i129 [[TMP32]], [[TMP38]]
 ; CHECK-NEXT:    [[TMP40]] = add i129 [[TMP27]], -1
 ; CHECK-NEXT:    [[TMP41:%.*]] = icmp eq i129 [[TMP40]], 0
-; CHECK-NEXT:    br i1 [[TMP41]], label [[UDIV_LOOP_EXIT:%.*]], label [[UDIV_DO_WHILE]]
+; CHECK-NEXT:    br i1 [[TMP41]], label [[UDIV_LOOP_EXIT:%.*]], label [[UDIV_DO_WHILE]], !prof [[PROF_1:![0-9]+]]
 ; CHECK:       udiv-preheader:
 ; CHECK-NEXT:    [[TMP42]] = lshr i129 [[TMP10]], [[TMP44]]
 ; CHECK-NEXT:    [[TMP43]] = add i129 [[TMP9]], -1
@@ -62,7 +63,7 @@ define void @sdiv129(ptr %ptr, ptr %out) nounwind {
 ; CHECK-NEXT:    [[TMP45:%.*]] = sub i129 128, [[TMP16]]
 ; CHECK-NEXT:    [[TMP46]] = shl i129 [[TMP10]], [[TMP45]]
 ; CHECK-NEXT:    [[TMP47:%.*]] = icmp eq i129 [[TMP44]], 0
-; CHECK-NEXT:    br i1 [[TMP47]], label [[UDIV_LOOP_EXIT]], label [[UDIV_PREHEADER]]
+; CHECK-NEXT:    br i1 [[TMP47]], label [[UDIV_LOOP_EXIT]], label [[UDIV_PREHEADER]], !prof [[PROF_1:![0-9]+]]
 ; CHECK:       udiv-end:
 ; CHECK-NEXT:    [[TMP48:%.*]] = phi i129 [ [[TMP25]], [[UDIV_LOOP_EXIT]] ], [ [[TMP20]], [[_UDIV_SPECIAL_CASES:%.*]] ]
 ; CHECK-NEXT:    [[TMP49:%.*]] = xor i129 [[TMP48]], [[TMP8]]
@@ -75,3 +76,7 @@ define void @sdiv129(ptr %ptr, ptr %out) nounwind {
   store i129 %res, ptr %out
   ret void
 }
+
+!0 = !{!"function_entry_count", i64 1000}
+; CHECK: [[PROF_0]] = !{!"function_entry_count", i64 1000}
+; CHECK: [[PROF_1]] = !{!"branch_weights", i32 1, i32 1048575}
diff --git a/llvm/test/Transforms/ExpandIRInsts/X86/srem129.ll b/llvm/test/Transforms/ExpandIRInsts/X86/srem129.ll
index 667152228d258..57167e9d8f3a7 100644
--- a/llvm/test/Transforms/ExpandIRInsts/X86/srem129.ll
+++ b/llvm/test/Transforms/ExpandIRInsts/X86/srem129.ll
@@ -1,8 +1,8 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals
 ; RUN: opt -S -mtriple=x86_64-- -expand-ir-insts -expand-div-rem-bits 128 < %s | FileCheck %s
 ; RUN: opt -S -mtriple=x86_64-- -passes='require<libcall-lowering-info>,expand-ir-insts' -expand-div-rem-bits 128 < %s | FileCheck %s
 
-define void @test(ptr %ptr, ptr %out) nounwind {
+define void @test(ptr %ptr, ptr %out) nounwind !prof !0 {
 ; CHECK-LABEL: @test(
 ; CHECK-NEXT:  _udiv-special-cases:
 ; CHECK-NEXT:    [[A:%.*]] = load i129, ptr [[PTR:%.*]], align 16
@@ -25,11 +25,11 @@ define void @test(ptr %ptr, ptr %out) nounwind {
 ; CHECK-NEXT:    [[TMP16:%.*]] = call i129 @llvm.ctlz.i129(i129 [[TMP11]], i1 true)
 ; CHECK-NEXT:    [[TMP17:%.*]] = sub i129 [[TMP15]], [[TMP16]]
 ; CHECK-NEXT:    [[TMP18:%.*]] = icmp ugt i129 [[TMP17]], 128
-; CHECK-NEXT:    [[TMP19:%.*]] = select i1 [[TMP14]], i1 true, i1 [[TMP18]]
+; CHECK-NEXT:    [[TMP19:%.*]] = select i1 [[TMP14]], i1 true, i1 [[TMP18]], !prof [[PROF1:![0-9]+]]
 ; CHECK-NEXT:    [[TMP20:%.*]] = icmp eq i129 [[TMP17]], 128
-; CHECK-NEXT:    [[TMP21:%.*]] = select i1 [[TMP19]], i129 0, i129 [[TMP11]]
-; CHECK-NEXT:    [[TMP22:%.*]] = select i1 [[TMP19]], i1 true, i1 [[TMP20]]
-; CHECK-NEXT:    br i1 [[TMP22]], label [[UDIV_END:%.*]], label [[UDIV_BB1:%.*]]
+; CHECK-NEXT:    [[TMP21:%.*]] = select i1 [[TMP19]], i129 0, i129 [[TMP11]], !prof [[PROF1]]
+; CHECK-NEXT:    [[TMP22:%.*]] = select i1 [[TMP19]], i1 true, i1 [[TMP20]], !prof [[PROF1]]
+; CHECK-NEXT:    br i1 [[TMP22]], label [[UDIV_END:%.*]], label [[UDIV_BB1:%.*]], !prof [[PROF1]]
 ; CHECK:       udiv-loop-exit:
 ; CHECK-NEXT:    [[TMP23:%.*]] = phi i129 [ 0, [[UDIV_BB1]] ], [ [[TMP38:%.*]], [[UDIV_DO_WHILE:%.*]] ]
 ; CHECK-NEXT:    [[TMP24:%.*]] = phi i129 [ [[TMP47:%.*]], [[UDIV_BB1]] ], [ [[TMP35:%.*]], [[UDIV_DO_WHILE]] ]
@@ -53,7 +53,7 @@ define void @test(ptr %ptr, ptr %out) nounwind {
 ; CHECK-NEXT:    [[TMP40]] = sub i129 [[TMP33]], [[TMP39]]
 ; CHECK-NEXT:    [[TMP41]] = add i129 [[TMP28]], -1
 ; CHECK-NEXT:    [[TMP42:%.*]] = icmp eq i129 [[TMP41]], 0
-; CHECK-NEXT:    br i1 [[TMP42]], label [[UDIV_LOOP_EXIT:%.*]], label [[UDIV_DO_WHILE]]
+; CHECK-NEXT:    br i1 [[TMP42]], label [[UDIV_LOOP_EXIT:%.*]], label [[UDIV_DO_WHILE]], !prof [[PROF1]]
 ; CHECK:       udiv-preheader:
 ; CHECK-NEXT:    [[TMP43]] = lshr i129 [[TMP11]], [[TMP45]]
 ; CHECK-NEXT:    [[TMP44]] = add i129 [[TMP10]], -1
@@ -63,7 +63,7 @@ define void @test(ptr %ptr, ptr %out) nounwind {
 ; CHECK-NEXT:    [[TMP46:%.*]] = sub i129 128, [[TMP17]]
 ; CHECK-NEXT:    [[TMP47]] = shl i129 [[TMP11]], [[TMP46]]
 ; CHECK-NEXT:    [[TMP48:%.*]] = icmp eq i129 [[TMP45]], 0
-; CHECK-NEXT:    br i1 [[TMP48]], label [[UDIV_LOOP_EXIT]], label [[UDIV_PREHEADER]]
+; CHECK-NEXT:    br i1 [[TMP48]], label [[UDIV_LOOP_EXIT]], label [[UDIV_PREHEADER]], !prof [[PROF1]]
 ; CHECK:       udiv-end:
 ; CHECK-NEXT:    [[TMP49:%.*]] = phi i129 [ [[TMP26]], [[UDIV_LOOP_EXIT]] ], [ [[TMP21]], [[_UDIV_SPECIAL_CASES:%.*]] ]
 ; CHECK-NEXT:    [[TMP50:%.*]] = mul i129 [[TMP9]], [[TMP49]]
@@ -78,3 +78,12 @@ define void @test(ptr %ptr, ptr %out) nounwind {
   store i129 %res, ptr %out
   ret void
 }
+
+!0 = !{!"function_entry_count", i64 1000}
+;.
+; CHECK: attributes #[[ATTR0:[0-9]+]] = { nounwind }
+; CHECK: attributes #[[ATTR1:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
+;.
+; CHECK: [[META0:![0-9]+]] = !{!"function_entry_count", i64 1000}
+; CHECK: [[PROF1]] = !{!"branch_weights", i32 1, i32 1048575}
+;.
diff --git a/llvm/test/Transforms/ExpandIRInsts/X86/udiv129.ll b/llvm/test/Transforms/ExpandIRInsts/X86/udiv129.ll
index b2b83815f79b0..d1db517cc7730 100644
--- a/llvm/test/Transforms/ExpandIRInsts/X86/udiv129.ll
+++ b/llvm/test/Transforms/ExpandIRInsts/X86/udiv129.ll
@@ -2,8 +2,9 @@
 ; RUN: opt -S -mtriple=x86_64-- -expand-ir-insts -expand-div-rem-bits 128 < %s | FileCheck %s
 ; RUN: opt -S -mtriple=x86_64-- -passes='require<libcall-lowering-info>,expand-ir-insts' -expand-div-rem-bits 128 < %s | FileCheck %s
 
-define void @test(ptr %ptr, ptr %out) nounwind {
+define void @test(ptr %ptr, ptr %out) nounwind !prof !0 {
 ; CHECK-LABEL: @test(
+; CHECK: !prof [[PROF_0:![0-9]+]] {
 ; CHECK-NEXT:  _udiv-special-cases:
 ; CHECK-NEXT:    [[A:%.*]] = load i129, ptr [[PTR:%.*]], align 16
 ; CHECK-NEXT:    [[TMP0:%.*]] = freeze i129 3
@@ -15,11 +16,11 @@ define void @test(ptr %ptr, ptr %out) nounwind {
 ; CHECK-NEXT:    [[TMP6:%.*]] = call i129 @llvm.ctlz.i129(i129 [[TMP1]], i1 true)
 ; CHECK-NEXT:    [[TMP7:%.*]] = sub i129 [[TMP5]], [[TMP6]]
 ; CHECK-NEXT:    [[TMP8:%.*]] = icmp ugt i129 [[TMP7]], 128
-; CHECK-NEXT:    [[TMP9:%.*]] = select i1 [[TMP4]], i1 true, i1 [[TMP8]]
+; CHECK-NEXT:    [[TMP9:%.*]] = select i1 [[TMP4]], i1 true, i1 [[TMP8]], !prof [[PROF_1:![0-9]+]]
 ; CHECK-NEXT:    [[TMP10:%.*]] = icmp eq i129 [[TMP7]], 128
-; CHECK-NEXT:    [[TMP11:%.*]] = select i1 [[TMP9]], i129 0, i129 [[TMP1]]
-; CHECK-NEXT:    [[TMP12:%.*]] = select i1 [[TMP9]], i1 true, i1 [[TMP10]]
-; CHECK-NEXT:    br i1 [[TMP12]], label [[UDIV_END:%.*]], label [[UDIV_BB1:%.*]]
+; CHECK-NEXT:    [[TMP11:%.*]] = select i1 [[TMP9]], i129 0, i129 [[TMP1]], !prof [[PROF_1:![0-9]+]]
+; CHECK-NEXT:    [[TMP12:%.*]] = select i1 [[TMP9]], i1 true, i1 [[TMP10]], !prof [[PROF_1:![0-9]+]]
+; CHECK-NEXT:    br i1 [[TMP12]], label [[UDIV_END:%.*]], label [[UDIV_BB1:%.*]], !prof [[PROF_1:![0-9]+]]
 ; CHECK:       udiv-loop-exit:
 ; CHECK-NEXT:    [[TMP13:%.*]] = phi i129 [ 0, [[UDIV_BB1]] ], [ [[TMP28:%.*]], [[UDIV_DO_WHILE:%.*]] ]
 ; CHECK-NEXT:    [[TMP14:%.*]] = phi i129 [ [[TMP37:%.*]], [[UDIV_BB1]] ], [ [[TMP25:%.*]], [[UDIV_DO_WHILE]] ]
@@ -43,7 +44,7 @@ define void @test(ptr %ptr, ptr %out) nounwind {
 ; CHECK-NEXT:    [[TMP30]] = sub i129 [[TMP23]], [[TMP29]]
 ; CHECK-NEXT:    [[TMP31]] = add i129 [[TMP18]], -1
 ; CHECK-NEXT:    [[TMP32:%.*]] = icmp eq i129 [[TMP31]], 0
-; CHECK-NEXT:    br i1 [[TMP32]], label [[UDIV_LOOP_EXIT:%.*]], label [[UDIV_DO_WHILE]]
+; CHECK-NEXT:    br i1 [[TMP32]], label [[UDIV_LOOP_EXIT:%.*]], label [[UDIV_DO_WHILE]], !prof [[PROF_1:![0-9]+]]
 ; CHECK:       udiv-preheader:
 ; CHECK-NEXT:    [[TMP33]] = lshr i129 [[TMP1]], [[TMP35]]
 ; CHECK-NEXT:    [[TMP34]] = add i129 [[TMP0]], -1
@@ -53,7 +54,7 @@ define void @test(ptr %ptr, ptr %out) nounwind {
 ; CHECK-NEXT:    [[TMP36:%.*]] = sub i129 128, [[TMP7]]
 ; CHECK-NEXT:    [[TMP37]] = shl i129 [[TMP1]], [[TMP36]]
 ; CHECK-NEXT:    [[TMP38:%.*]] = icmp eq i129 [[TMP35]], 0
-; CHECK-NEXT:    br i1 [[TMP38]], label [[UDIV_LOOP_EXIT]], label [[UDIV_PREHEADER]]
+; CHECK-NEXT:    br i1 [[TMP38]], label [[UDIV_LOOP_EXIT]], label [[UDIV_PREHEADER]], !prof [[PROF_1:![0-9]+]]
 ; CHECK:       udiv-end:
 ; CHECK-NEXT:    [[TMP39:%.*]] = phi i129 [ [[TMP16]], [[UDIV_LOOP_EXIT]] ], [ [[TMP11]], [[_UDIV_SPECIAL_CASES:%.*]] ]
 ; CHECK-NEXT:    store i129 [[TMP39]], ptr [[OUT:%.*]], align 16
@@ -64,3 +65,7 @@ define void @test(ptr %ptr, ptr %out) nounwind {
   store i129 %res, ptr %out
   ret void
 }
+
+!0 = !{!"function_entry_count", i64 1000}
+; CHECK: [[PROF_0]] = !{!"function_entry_count", i64 1000}
+; CHECK: [[PROF_1]] = !{!"branch_weights", i32 1, i32 1048575}
diff --git a/llvm/test/Transforms/ExpandIRInsts/X86/urem129.ll b/llvm/test/Transforms/ExpandIRInsts/X86/urem129.ll
index 46e72001b2c2d..78fc40784e5f8 100644
--- a/llvm/test/Transforms/ExpandIRInsts/X86/urem129.ll
+++ b/llvm/test/Transforms/ExpandIRInsts/X86/urem129.ll
@@ -2,8 +2,9 @@
 ; RUN: opt -S -mtriple=x86_64-- -expand-ir-insts -expand-div-rem-bits 128 < %s | FileCheck %s
 ; RUN: opt -S -mtriple=x86_64-- -passes='require<libcall-lowering-info>,expand-ir-insts' -expand-div-rem-bits 128 < %s | FileCheck %s
 
-define void @test(ptr %ptr, ptr %out) nounwind {
+define void @test(ptr %ptr, ptr %out) nounwind !prof !0 {
 ; CHECK-LABEL: @test(
+; CHECK: !prof [[PROF_0:![0-9]+]] {
 ; CHECK-NEXT:  _udiv-special-cases:
 ; CHECK-NEXT:    [[A:%.*]] = load i129, ptr [[PTR:%.*]], align 16
 ; CHECK-NEXT:    [[TMP0:%.*]] = freeze i129 [[A]]
@@ -17,11 +18,11 @@ define void @test(ptr %ptr, ptr %out) nounwind {
 ; CHECK-NEXT:    [[TMP8:%.*]] = call i129 @llvm.ctlz.i129(i129 [[TMP3]], i1 true)
 ; CHECK-NEXT:    [[TMP9:%.*]] = sub i129 [[TMP7]], [[TMP8]]
 ; CHECK-NEXT:    [[TMP10:%.*]] = icmp ugt i129 [[TMP9]], 128
-; CHECK-NEXT:    [[TMP11:%.*]] = select i1 [[TMP6]], i1 true, i1 [[TMP10]]
+; CHECK-NEXT:    [[TMP11:%.*]] = select i1 [[TMP6]], i1 true, i1 [[TMP10]], !prof [[PROF_1:![0-9]+]]
 ; CHECK-NEXT:    [[TMP12:%.*]] = icmp eq i129 [[TMP9]], 128
-; CHECK-NEXT:    [[TMP13:%.*]] = select i1 [[TMP11]], i129 0, i129 [[TMP3]]
-; CHECK-NEXT:    [[TMP14:%.*]] = select i1 [[TMP11]], i1 true, i1 [[TMP12]]
-; CHECK-NEXT:    br i1 [[TMP14]], label [[UDIV_END:%.*]], label [[UDIV_BB1:%.*]]
+; CHECK-NEXT:    [[TMP13:%.*]] = select i1 [[TMP11]], i129 0, i129 [[TMP3]], !prof [[PROF_1:![0-9]+]]
+; CHECK-NEXT:    [[TMP14:%.*]] = select i1 [[TMP11]], i1 true, i1 [[TMP12]], !prof [[PROF_1:![0-9]+]]
+; CHECK-NEXT:    br i1 [[TMP14]], label [[UDIV_END:%.*]], label [[UDIV_BB1:%.*]], !prof [[PROF_1:![0-9]+]]
 ; CHECK:       udiv-loop-exit:
 ; CHECK-NEXT:    [[TMP15:%.*]] = phi i129 [ 0, [[UDIV_BB1]] ], [ [[TMP30:%.*]], [[UDIV_DO_WHILE:%.*]] ]
 ; CHECK-NEXT:    [[TMP16:%.*]] = phi i129 [ [[TMP39:%.*]], [[UDIV_BB1]] ], [ [[TMP27:%.*]], [[UDIV_DO_WHILE]] ]
@@ -45,7 +46,7 @@ define void @test(ptr %ptr, ptr %out) nounwind {
 ; CHECK-NEXT:    [[TMP32]] = sub i129 [[TMP25]], [[TMP31]]
 ; CHECK-NEXT:    [[TMP33]] = add i129 [[TMP20]], -1
 ; CHECK-NEXT:    [[TMP34:%.*]] = icmp eq i129 [[TMP33]], 0
-; CHECK-NEXT:    br i1 [[TMP34]], label [[UDIV_LOOP_EXIT:%.*]], label [[UDIV_DO_WHILE]]
+; CHECK-NEXT:    br i1 [[TMP34]], label [[UDIV_LOOP_EXIT:%.*]], label [[UDIV_DO_WHILE]], !prof [[PROF_1:![0-9]+]]
 ; CHECK:       udiv-preheader:
 ; CHECK-NEXT:    [[TMP35]] = lshr i129 [[TMP3]], [[TMP37]]
 ; CHECK-NEXT:    [[TMP36]] = add i129 [[TMP2]], -1
@@ -55,7 +56,7 @@ define void @test(ptr %ptr, ptr %out) nounwind {
 ; CHECK-NEXT:    [[TMP38:%.*]] = sub i129 128, [[TMP9]]
 ; CHECK-NEXT:    [[TMP39]] = shl i129 [[TMP3]], [[TMP38]]
 ; CHECK-NEXT:    [[TMP40:%.*]] = icmp eq i129 [[TMP37]], 0
-; CHECK-NEXT:    br i1 [[TMP40]], label [[UDIV_LOOP_EXIT]], label [[UDIV_PREHEADER]]
+; CHECK-NEXT:    br i1 [[TMP40]], label [[UDIV_LOOP_EXIT]], label [[UDIV_PREHEADER]], !prof [[PROF_1:![0-9]+]]
 ; CHECK:       udiv-end:
 ; CHECK-NEXT:    [[TMP41:%.*]] = phi i129 [ [[TMP18]], [[UDIV_LOOP_EXIT]] ], [ [[TMP13]], [[_UDIV_SPECIAL_CASES:%.*]] ]
 ; CHECK-NEXT:    [[TMP42:%.*]] = mul i129 [[TMP1]], [[TMP41]]
@@ -68,3 +69,7 @@ define void @test(ptr %ptr, ptr %out) nounwind {
   store i129 %res, ptr %out
   ret void
 }
+
+!0 = !{!"function_entry_count", i64 1000}
+; CHECK: [[PROF_0]] = !{!"function_entry_count", i64 1000}
+; CHECK: [[PROF_1]] = !{!"branch_weights", i32 1, i32 1048575}
diff --git a/llvm/test/Transforms/ExpandIRInsts/X86/vector.ll b/llvm/test/Transforms/ExpandIRInsts/X86/vector.ll
index 58e74b8d17b55..0e7f844599c1b 100644
--- a/llvm/test/Transforms/ExpandIRInsts/X86/vector.ll
+++ b/llvm/test/Transforms/ExpandIRInsts/X86/vector.ll
@@ -2,9 +2,9 @@
 ; RUN: opt -S -mtriple=x86_64-- -expand-ir-insts -expand-div-rem-bits 128 < %s | FileCheck %s
 ; RUN: opt -S -mtriple=x86_64-- -passes='require<libcall-lowering-info>,expand-ir-insts' -expand-div-rem-bits 128 < %s | FileCheck %s
 
-define <2 x i129> @sdiv129(<2 x i129> %a, <2 x i129> %b) nounwind {
+define <2 x i129> @sdiv129(<2 x i129> %a, <2 x i129> %b) nounwind !prof !0 {
 ; CHECK-LABEL: define <2 x i129> @sdiv129(
-; CHECK-SAME: <2 x i129> [[A:%.*]], <2 x i129> [[B:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-SAME: <2 x i129> [[A:%.*]], <2 x i129> [[B:%.*]]) #[[ATTR0:[0-9]+]] !prof [[PROF0:![0-9]+]] {
 ; CHECK-NEXT:  _udiv-special-cases_udiv-special-cases:
 ; CHECK-NEXT:    [[TMP0:%.*]] = extractelement <2 x i129> [[A]], i64 0
 ; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x i129> [[B]], i64 0
@@ -26,11 +26,11 @@ define <2 x i129> @sdiv129(<2 x i129> %a, <2 x i129> %b) nounwind {
 ; CHECK-NEXT:    [[TMP17:%.*]] = call i129 @llvm.ctlz.i129(i129 [[TMP12]], i1 true)
 ; CHECK-NEXT:    [[TMP18:%.*]] = sub i129 [[TMP16]], [[TMP17]]
 ; CHECK-NEXT:    [[TMP19:%.*]] = icmp ugt i129 [[TMP18]], 128
-; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP15]], i1 true, i1 [[TMP19]]
+; CHECK-NEXT:    [[TMP20:%.*]] = select i1 [[TMP15]], i1 true, i1 [[TMP19]], !prof [[PROF1:![0-9]+]]
 ; CHECK-NEXT:    [[TMP21:%.*]] = icmp eq i129 [[TMP18]], 128
-; CHECK-NEXT:    [[TMP22:%.*]] = select i1 [[TMP20]], i129 0, i129 [[TMP12]]
-; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP20]], i1 true, i1 [[TMP21]]
-; CHECK-NEXT:    br i1 [[TMP23]], label [[UDIV_END1:%.*]], label [[UDIV_BB15:%.*]]
+; CHECK-NEXT:    [[TMP22:%.*]] = select i1 [[TMP20]], i129 0, i129 [[TMP12]], !prof [[PROF1]]
+; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP20]], i1 true, i1 [[TMP21]], !prof [[PROF1]]
+; CHECK-NEXT:    br i1 [[TMP23]], label [[UDIV_END1:%.*]], label [[UDIV_BB15:%.*]], !prof [[PROF1]]
 ; CHECK:       udiv-loop-exit2:
 ; CHECK-NEXT:    [[TMP24:%.*]] = phi i129 [ 0, [[UDIV_BB15]] ], [ [[TMP39:%.*]], [[UDIV_DO_WHILE3:%.*]] ]
 ; CHECK-NEXT:    [[TMP25:%.*]] = phi i129 [ [[TMP48:%.*]], [[UDIV_BB15]] ], [ [[TMP36:%.*]], [[UDIV_DO_WHILE3]] ]
@@ -54,7 +54,7 @@ define <2 x i129> @sdiv129(<2 x i129> %a, <2 x i129> %b) nounwind {
 ; CHECK-NEXT:    [[TMP41]] = sub i129 [[TMP34]], [[TMP40]]
 ; CHECK-NEXT:    [[TMP42]] = add i129 [[TMP29]], -1
 ; CHECK-NEXT:    [[TMP43:%.*]] = icmp eq i129 [[TMP42]], 0
-; CHECK-NEXT:    br i1 [[TMP43]], label [[UDIV_LOOP_EXIT2:%.*]], label [[UDIV_DO_WHILE3]]
+; CHECK-NEXT:    br i1 [[TMP43]], label [[UDIV_LOOP_EXIT2:%.*]], label [[UDIV_DO_WHILE3]], !prof [[PROF1]]
 ; CHECK:       udiv-preheader4:
 ; CHECK-NEXT:    [[TMP44]] = lshr i129 [[TMP12]], [[TMP46]]
 ; CHECK-NEXT:    [[TMP45]] = add i129 [[TMP11]], -1
@@ -64,7 +64,7 @@ define <2 x i129> @sdiv129(<2 x i129> %a, <2 x i129> %b) nounwind {
 ; CHECK-NEXT:    [[TMP47:%.*]] = sub i129 128, [[TMP18]]
 ; CHECK-NEXT:    [[TMP48]] = shl i129 [[TMP12]], [[TMP47]]
 ; CHECK-NEXT:    [[TMP49:%.*]] = icmp eq i129 [[TMP46]], 0
-; CHECK-NEXT:    br i1 [[TMP49]], label [[UDIV_LOOP_EXIT2]], label [[UDIV_PREHEADER4]]
+; CHECK-NEXT:    br i1 [[TMP49]], label [[UDIV_LOOP_EXIT2]], label [[UDIV_PREHEADER4]], !prof [[PROF1]]
 ; CHECK:       udiv-end1:
 ; CHECK-NEXT:    [[TMP50:%.*]] = phi i129 [ [[TMP27]], [[UDIV_LOOP_EXIT2]] ], [ [[TMP22]], [[_UDIV_SPECIAL_CASES_UDIV_SPECIAL_CASES:%.*]] ]
 ; CHECK-NEXT:    [[TMP51:%.*]] = xor i129 [[TMP50]], [[TMP10]]
@@ -90,11 +90,11 @@ define <2 x i129> @sdiv129(<2 x i129> %a, <2 x i129> %b) nounwind {
 ; CHECK-NEXT:    [[TMP71:%.*]] = call i129 @llvm.ctlz.i129(i129 [[TMP66]], i1 true)
 ; CHECK-NEXT:    [[TMP72:%.*]] = sub i129 [[TMP70]], [[TMP71]]
 ; CHECK-NEXT:    [[TMP73:%.*]] = icmp ugt i129 [[TMP72]], 128
-; CHECK-NEXT:    [[TMP74:%.*]] = select i1 [[TMP69]], i1 true, i1 [[TMP73]]
+; CHECK-NEXT:    [[TMP74:%.*]] = select i1 [[TMP69]], i1 true, i1 [[TMP73]], !prof [[PROF1]]
 ; CHECK-NEXT:    [[TMP75:%.*]] = icmp eq i129 [[TMP72]], 128
-; CHECK-NEXT:    [[TMP76:%.*]] = select i1 [[TMP74]], i129 0, i129 [[TMP66]]
-; CHECK-NEXT:    [[TMP77:%.*]] = select i1 [[TMP74]], i1 true, i1 [[TMP75]]
-; CHECK-NEXT:    br i1 [[TMP77]], label [[UDIV_END:%.*]], label [[UDIV_BB1:%.*]]
+; CHECK-NEXT:    [[TMP76:%.*]] = select i1 [[TMP74]], i129 0, i129 [[TMP66]], !prof [[PROF1]]
+; CHECK-NEXT:    [[TMP77:%.*]] = select i1 [[TMP74]], i1 true, i1 [[TMP75]], !prof [[PROF1]]
+; CHECK-NEXT:    br i1 [[TMP77]], label [[UDIV_END:%.*]], label [[UDIV_BB1:%.*]], !prof [[PROF1]]
 ; CHECK:       udiv-loop-exit:
 ; CHECK-NEXT:    [[TMP78:%.*]] = phi i129 [ 0, [[UDIV_BB1]] ], [ [[TMP93:%.*]], [[UDIV_DO_WHILE:%.*]] ]
 ; CHECK-NEXT:    [[TMP79:%.*]] = phi i129 [ [[TMP102:%.*]], [[UDIV_BB1]] ], [ [[TMP90:%.*]], [[UDIV_DO_WHILE]] ]
@@ -118,7 +118,7 @@ define <2 x i129> @sdiv129(<2 x i129> %a, <2 x i129> %b) nounwind {
 ; CHECK-NEXT:    [[TMP95]] = sub i129 [[TMP88]], [[TMP94]]
 ; CHECK-NEXT:    [[TMP96]] = add i129 [[TMP83]], -1
 ; CHECK-NEXT:    [[TMP97:%.*]] = icmp eq i129 [[TMP96]], 0
-; CHECK-NEXT:    br i1 [[TMP97]], label [[UDIV_LOOP_EXIT:%.*]], label [[UDIV_DO_WHILE]]
+; CHECK-NEXT:    br i1 [[TMP97]], label [[UDIV_LOOP_EXIT:%.*]], label [[UDIV_DO_WHILE]], !prof [[PROF1]]
 ; CHECK:       udiv-preheader:
 ; CHECK-NEXT:    [[TMP98]] = lshr i129 [[TMP66]], [[TMP100]]
 ; CHECK-NEXT:    [[TMP99]] = add i129 [[TMP65]], -1
@@ -128,7 +128,7 @@ define <2 x i129> @sdiv129(<2 x i129> %a, <2 x i129> %b) nounwind {
 ; CHECK-NEXT:    [[TMP101:%.*]] = sub i129 128, [[TMP72]]
 ; CHECK-NEXT:    [[TMP102]] = shl i129 [[TMP66]], [[TMP101]]
 ; CHECK-NEXT:    [[TMP103:%.*]] = icmp eq i129 [[TMP100]], 0
-; CHECK-NEXT:    br i1 [[TMP103]], label [[UDIV_LOOP_EXIT]], label [[UDIV_PREHEADER]]
+; CHECK-NEXT:    br i1 [[TMP103]], label [[UDIV_LOOP_EXIT]], label [[UDIV_PREHEADER]], !prof [[PROF1]]
 ; CHECK:       udiv-end:
 ; CHECK-NEXT:    [[TMP104:%.*]] = phi i129 [ [[TMP81]], [[UDIV_LOOP_EXIT]] ], [ [[TMP76]], [[UDIV_END1]] ]
 ; CHECK-NEXT:    [[TMP105:%.*]] = xor i129 [[TMP104]], [[TMP64]]
@@ -155,11 +155,11 @@ define <2 x i129> @udiv129(<2 x i129> %a, <2 x i129> %b) nounwind {
 ; CHECK-NEXT:    [[TMP8:%.*]] = call i129 @llvm.ctlz.i129(i129 [[TMP3]], i1 true)
 ; CHECK-NEXT:    [[TMP9:%.*]] = sub i129 [[TMP7]], [[TMP8]]
 ; CHECK-NEXT:    [[TMP10:%.*]] = icmp ugt i129 [[TMP9]], 128
-; CHECK-NEXT:    [[TMP11:%.*]] = select i1 [[TMP6]], i1 true, i1 [[TMP10]]
+; CHECK-NEXT:    [[TMP11:%.*]] = select i1 [[TMP6]], i1 true, i1 [[TMP10]], !prof [[PROF1]]
 ; CHECK-NEXT:    [[TMP12:%.*]] = icmp eq i129 [[TMP9]], 128
-; CHECK-NEXT:    [[TMP13:%.*]] = select i1 [[TMP11]], i129 0, i129 [[TMP3]]
-; CHECK-NEXT:    [[TMP14:%.*]] = select i1 [[TMP11]], i1 true, i1 [[TMP12]]
-; CHECK-NEXT:    br i1 [[TMP14]], label [[UDIV_END1:%.*]], label [[UDIV_BB15:%.*]]
+; CHECK-NEXT:    [[TMP13:%.*]] = select i1 [[TMP11]], i129 0, i129 [[TMP3]], !prof [[PROF1]]
+; CHECK-NEXT:    [[TMP14:%.*]] = select i1 [[TMP11]], i1 true, i1 [[TMP12]], !prof [[PROF1]]
+; CHECK-NEXT:    br i1 [[TMP14]], label [[UDIV_END1:%.*]], label [[UDIV_BB15:%.*]], !prof [[PROF1]]
 ; CHECK:       udiv-loop-exit2:
 ; CHECK-NEXT:    [[TMP15:%.*]] = phi i129 [ 0, [[UDIV_BB15]] ], [ [[TMP30:%.*]], [[UDIV_DO_WHILE3:%.*]] ]
 ; CHECK-NEXT:    [[TMP16:%.*]] = phi i129 [ [[TMP39:%.*]], [[UDIV_BB15]] ], [ [[TMP27:%.*]], [[UDIV_DO_WHILE3]] ]
@@ -183,7 +183,7 @@ define <2 x i129> @udiv129(<2 x i129> %a, <2 x i129> %b) nounwind {
 ; CHECK-NEXT:    [[TMP32]] = sub i129 [[TMP25]], [[TMP31]]
 ; CHECK-NEXT:    [[TMP33]] = add i129 [[TMP20]], -1
 ; CHECK-NEXT:    [[TMP34:%.*]] = icmp eq i129 [[TMP33]], 0
-; CHECK-NEXT:    br i1 [[TMP34]], label [[UDIV_LOOP_EXIT2:%.*]], label [[UDIV_DO_WHILE3]]
+; CHECK-NEXT:    br i1 [[TMP34]], label [[UDIV_LOOP_EXIT2:%.*]], label [[UDIV_DO_WHILE3]], !prof [[PROF1]]
 ; CHECK:       udiv-preheader4:
 ; CHECK-NEXT:    [[TMP35]] = lshr i129 [[TMP3]], [[TMP37]]
 ; CHECK-NEXT:    [[TMP36]] = add i129 [[TMP2]], -1
@@ -193,7 +193,7 @@ define <2 x i129> @udiv129(<2 x i129> %a, <2 x i129> %b) nounwind {
 ; CHECK-NEXT:    [[TMP38:%.*]] = sub i129 128, [[TMP9]]
 ; CHECK-NEXT:    [[TMP39]] = shl i129 [[TMP3]], [[TMP38]]
 ; CHECK-NEXT:    [[TMP40:%.*]] = icmp eq i129 [[TMP37]], 0
-; CHECK-NEXT:    br i1 [[TMP40]], label [[UDIV_LOOP_EXIT2]], label [[UDIV_PREHEADER4]]
+; CHECK-NEXT:    br i1 [[TMP40]], label [[UDIV_LOOP_EXIT2]], label [[UDIV_PREHEADER4]], !prof [[PROF1]]
 ; CHECK:       udiv-end1:
 ; CHECK-NEXT:    [[TMP41:%.*]] = phi i129 [ [[TMP18]], [[UDIV_LOOP_EXIT2]] ], [ [[TMP13]], [[_UDIV_SPECIAL_CASES_UDIV_SPECIAL_CASES:%.*]] ]
 ; CHECK-NEXT:    [[TMP42:%.*]] = insertelement <2 x i129> poison, i129 [[TMP41]], i64 0
@@ -208,11 +208,11 @@ define <2 x i129> @udiv129(<2 x i129> %a, <2 x i129> %b) nounwind {
 ; CHECK-NEXT:    [[TMP51:%.*]] = call i129 @llvm.ctlz.i129(i129 [[TMP46]], i1 true)
 ; CHECK-NEXT:    [[TMP52:%.*]] = sub i129 [[TMP50]], [[TMP51]]
 ; CHECK-NEXT:    [[TMP53:%.*]] = icmp ugt i129 [[TMP52]], 128
-; CHECK-NEXT:    [[TMP54:%.*]] = select i1 [[TMP49]], i1 true, i1 [[TMP53]]
+; CHECK-NEXT:    [[TMP54:%.*]] = select i1 [[TMP49]], i1 true, i1 [[TMP53]], !prof [[PROF1]]
 ; CHECK-NEXT:    [[TMP55:%.*]] = icmp eq i129 [[TMP52]], 128
-; CHECK-NEXT:    [[TMP56:%.*]] = select i1 [[TMP54]], i129 0, i129 [[TMP46]]
-; CHECK-NEXT:    [[TMP57:%.*]] = select i1 [[TMP54]], i1 true, i1 [[TMP55]]
-; CHECK-NEXT:    br i1 [[TMP57]], label [[UDIV_END:%.*]], label [[UDIV_BB1:%.*]]
+; CHECK-NEXT:    [[TMP56:%.*]] = select i1 [[TMP54]], i129 0, i129 [[TMP46]], !prof [[PROF1]]
+; CHECK-NEXT:    [[TMP57:%.*]] = select i1 [[TMP54]], i1 true, i1 [[TMP55]], !prof [[PROF1]]
+; CHECK-NEXT:    br i1 [[TMP57]], label [[UDIV_END:%.*]], label [[UDIV_BB1:%.*]], !prof [[PROF1]]
 ; CHECK:       udiv-loop-exit:
 ; CHECK-NEXT:    [[TMP58:%.*]] = phi i129 [ 0, [[UDIV_BB1]] ], [ [[TMP73:%.*]], [[UDIV_DO_WHILE:%.*]] ]
 ; CHECK-NEXT:    [[TMP59:%.*]] = phi i129 [ [[TMP82:%.*]], [[UDIV_BB1]] ], [ [[TMP70:%.*]], [[UDIV_DO_WHILE]] ]
@@ -236,7 +236,7 @@ define <2 x i129> @udiv129(<2 x i129> %a, <2 x i129> %b) nounwind {
 ; CHECK-NEXT:    [[TMP75]] = sub i129 [[TMP68]], [[TMP74]]
 ; CHECK-NEXT:    [[TMP76]] = add i129 [[TMP63]], -1
 ; CHECK-NEXT:    [[TMP77:%.*]] = icmp eq i129 [[TMP76]], 0
-; CHECK-NEXT:    br i1 [[TMP77]], label [[UDIV_LOOP_EXIT:%.*]], label [[UDIV_DO_WHILE]]
+; CHECK-NEXT:    br i1 [[TMP77]], label [[UDIV_LOOP_EXIT:%.*]], label [[UDIV_DO_WHILE]], !prof [[PROF1]]
 ; CHECK:       udiv-preheader:
 ; CHECK-NEXT:    [[TMP78]] = lshr i129 [[TMP46]], [[TMP80]]
 ; CHECK-NEXT:    [[TMP79]] = add i129 [[TMP45]], -1
@@ -246,7 +246,7 @@ define <2 x i129> @udiv129(<2 x i129> %a, <2 x i129> %b) nounwind {
 ; CHECK-NEXT:    [[TMP81:%.*]] = sub i129 128, [[TMP52]]
 ; CHECK-NEXT:    [[TMP82]] = shl i129 [[TMP46]], [[TMP81]]
 ; CHECK-NEXT:    [[TMP83:%.*]] = icmp eq i129 [[TMP80]], 0
-; CHECK-NEXT:    br i1 [[TMP83]], label [[UDIV_LOOP_EXIT]], label [[UDIV_PREHEADER]]
+; CHECK-NEXT:    br i1 [[TMP83]], label [[UDIV_LOOP_EXIT]], label [[UDIV_PREHEADER]], !prof [[PROF1]]
 ; CHECK:       udiv-end:
 ; CHECK-NEXT:    [[TMP84:%.*]] = phi i129 [ [[TMP61]], [[UDIV_LOOP_EXIT]] ], [ [[TMP56]], [[UDIV_END1]] ]
 ; CHECK-NEXT:    [[TMP85:%.*]] = insertelement <2 x i129> [[TMP42]], i129 [[TMP84]], i64 1
@@ -281,11 +281,11 @@ define <2 x i129> @srem129(<2 x i129> %a, <2 x i129> %b) nounwind {
 ; CHECK-NEXT:    [[TMP18:%.*]] = call i129 @llvm.ctlz.i129(i129 [[TMP13]], i1 true)
 ; CHECK-NEXT:    [[TMP19:%.*]] = sub i129 [[TMP17]], [[TMP18]]
 ; CHECK-NEXT:    [[TMP20:%.*]] = icmp ugt i129 [[TMP19]], 128
-; CHECK-NEXT:    [[TMP21:%.*]] = select i1 [[TMP16]], i1 true, i1 [[TMP20]]
+; CHECK-NEXT:    [[TMP21:%.*]] = select i1 [[TMP16]], i1 true, i1 [[TMP20]], !prof [[PROF1]]
 ; CHECK-NEXT:    [[TMP22:%.*]] = icmp eq i129 [[TMP19]], 128
-; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP21]], i129 0, i129 [[TMP13]]
-; CHECK-NEXT:    [[TMP24:%.*]] = select i1 [[TMP21]], i1 true, i1 [[TMP22]]
-; CHECK-NEXT:    br i1 [[TMP24]], label [[UDIV_END1:%.*]], label [[UDIV_BB15:%.*]]
+; CHECK-NEXT:    [[TMP23:%.*]] = select i1 [[TMP21]], i129 0, i129 [[TMP13]], !prof [[PROF1]]
+; CHECK-NEXT:    [[TMP24:%.*]] = select i1 [[TMP21]], i1 true, i1 [[TMP22]], !prof [[PROF1]]
+; CHECK-NEXT:    br i1 [[TMP24]], label [[UDIV_END1:%.*]], label [[UDIV_BB15:%.*]], !prof [[PROF1]]
 ; CHECK:       udiv-loop-exit2:
 ; CHECK-NEXT:    [[TMP25:%.*]] = phi i129 [ 0, [[UDIV_BB15]] ], [ [[TMP40:%.*]], [[UDIV_DO_WHILE3:%.*]] ]
 ; CHECK-NEXT:    [[TMP26:%.*]] = phi i129 [ [[TMP49:%.*]], [[UDIV_BB15]] ], [ [[TMP37:%.*]], [[UDIV_DO_WHILE3]] ]
@@ -309,7 +309,7 @@ define <2 x i129> @srem129(<2 x i129> %a, <2 x i129> %b) nounwind {
 ; CHECK-NEXT:    [[TMP42]] = sub i129 [[TMP35]], [[TMP41]]
 ; CHECK-NEXT:    [[TMP43]] = add i129 [[TMP30]], -1
 ; CHECK-NEXT:    [[TMP44:%.*]] = icmp eq i129 [[TMP43]], 0
-; CHECK-NEXT:    br i1 [[TMP44]], label [[UDIV_LOOP_EXIT2:%.*]], label [[UDIV_DO_WHILE3]]
+; CHECK-NEXT:    br i1 [[TMP44]], label [[UDIV_LOOP_EXIT2:%.*]], label [[UDIV_DO_WHILE3]], !prof [[PROF1]]
 ; CHECK:       udiv-preheader4:
 ; CHECK-NEXT:    [[TMP45]] = lshr i129 [[TMP13]], [[TMP47]]
 ; CHECK-NEXT:    [[TMP46]] = add i129 [[TMP12]], -1
@@ -319,7 +319,7 @@ define <2 x i129> @srem129(<2 x i129> %a, <2 x i129> %b) nounwind {
 ; CHECK-NEXT:    [[TMP48:%.*]] = sub i129 128, [[TMP19]]
 ; CHECK-NEXT:    [[TMP49]] = shl i129 [[TMP13]], [[TMP48]]
 ; CHECK-NEXT:    [[TMP50:%.*]] = icmp eq i129 [[TMP47]], 0
-; CHECK-NEXT:    br i1 [[TMP50]], label [[UDIV_LOOP_EXIT2]], label [[UDIV_PREHEADER4]]
+; CHECK-NEXT:    br i1 [[TMP50]], label [[UDIV_LOOP_EXIT2]], label [[UDIV_PREHEADER4]], !prof [[PROF1]]
 ; CHECK:       udiv-end1:
 ; CHECK-NEXT:    [[TMP51:%.*]] = phi i129 [ [[TMP28]], [[UDIV_LOOP_EXIT2]] ], [ [[TMP23]], [[_UDIV_SPECIAL_CASES_UDIV_SPECIAL_CASES:%.*]] ]
 ; CHECK-NEXT:    [[TMP52:%.*]] = mul i129 [[TMP11]], [[TMP51]]
@@ -348,11 +348,11 @@ define <2 x i129> @srem129(<2 x i129> %a, <2 x i129> %b) nounwind {
 ; CHECK-NEXT:    [[TMP75:%.*]] = call i129 @llvm.ctlz.i129(i129 [[TMP70]], i1 true)
 ; CHECK-NEXT:    [[TMP76:%.*]] = sub i129 [[TMP74]], [[TMP75]]
 ; CHECK-NEXT:    [[TMP77:%.*]] = icmp ugt i129 [[TMP76]], 128
-; CHECK-NEXT:    [[TMP78:%.*]] = select i1 [[TMP73]], i1 true, i1 [[TMP77]]
+; CHECK-NEXT:    [[TMP78:%.*]] = select i1 [[TMP73]], i1 true, i1 [[TMP77]], !prof [[PROF1]]
 ; CHECK-NEXT:    [[TMP79:%.*]] = icmp eq i129 [[TMP76]], 128
-; CHECK-NEXT:    [[TMP80:%.*]] = select i1 [[TMP78]], i129 0, i129 [[TMP70]]
-; CHECK-NEXT:    [[TMP81:%.*]] = select i1 [[TMP78]], i1 true, i1 [[TMP79]]
-; CHECK-NEXT:    br i1 [[TMP81]], label [[UDIV_END:%.*]], label [[UDIV_BB1:%.*]]
+; CHECK-NEXT:    [[TMP80:%.*]] = select i1 [[TMP78]], i129 0, i129 [[TMP70]], !prof [[PROF1]]
+; CHECK-NEXT:    [[TMP81:%.*]] = select i1 [[TMP78]], i1 true, i1 [[TMP79]], !prof [[PROF1]]
+; CHECK-NEXT:    br i1 [[TMP81]], label [[UDIV_END:%.*]], label [[UDIV_BB1:%.*]], !prof [[PROF1]]
 ; CHECK:       udiv-loop-exit:
 ; CHECK-NEXT:    [[TMP82:%.*]] = phi i129 [ 0, [[UDIV_BB1]] ], [ [[TMP97:%.*]], [[UDIV_DO_WHILE:%.*]] ]
 ; CHECK-NEXT:    [[TMP83:%.*]] = phi i129 [ [[TMP106:%.*]], [[UDIV_BB1]] ], [ [[TMP94:%.*]], [[UDIV_DO_WHILE]] ]
@@ -376,7 +376,7 @@ define <2 x i129> @srem129(<2 x i129> %a, <2 x i129> %b) nounwind {
 ; CHECK-NEXT:    [[TMP99]] = sub i129 [[TMP92]], [[TMP98]]
 ; CHECK-NEXT:    [[TMP100]] = add i129 [[TMP87]], -1
 ; CHECK-NEXT:    [[TMP101:%.*]] = icmp eq i129 [[TMP100]], 0
-; CHECK-NEXT:    br i1 [[TMP101]], label [[UDIV_LOOP_EXIT:%.*]], label [[UDIV_DO_WHILE]]
+; CHECK-NEXT:    br i1 [[TMP101]], label [[UDIV_LOOP_EXIT:%.*]], label [[UDIV_DO_WHILE]], !prof [[PROF1]]
 ; CHECK:       udiv-preheader:
 ; CHECK-NEXT:    [[TMP102]] = lshr i129 [[TMP70]], [[TMP104]]
 ; CHECK-NEXT:    [[TMP103]] = add i129 [[TMP69]], -1
@@ -386,7 +386,7 @@ define <2 x i129> @srem129(<2 x i129> %a, <2 x i129> %b) nounwind {
 ; CHECK-NEXT:    [[TMP105:%.*]] = sub i129 128, [[TMP76]]
 ; CHECK-NEXT:    [[TMP106]] = shl i129 [[TMP70]], [[TMP105]]
 ; CHECK-NEXT:    [[TMP107:%.*]] = icmp eq i129 [[TMP104]], 0
-; CHECK-NEXT:    br i1 [[TMP107]], label [[UDIV_LOOP_EXIT]], label [[UDIV_PREHEADER]]
+; CHECK-NEXT:    br i1 [[TMP107]], label [[UDIV_LOOP_EXIT]], label [[UDIV_PREHEADER]], !prof [[PROF1]]
 ; CHECK:       udiv-end:
 ; CHECK-NEXT:    [[TMP108:%.*]] = phi i129 [ [[TMP85]], [[UDIV_LOOP_EXIT]] ], [ [[TMP80]], [[UDIV_END1]] ]
 ; CHECK-NEXT:    [[TMP109:%.*]] = mul i129 [[TMP68]], [[TMP108]]
@@ -417,11 +417,11 @@ define <2 x i129> @urem129(<2 x i129> %a, <2 x i129> %b) nounwind {
 ; CHECK-NEXT:    [[TMP10:%.*]] = call i129 @llvm.ctlz.i129(i129 [[TMP5]], i1 true)
 ; CHECK-NEXT:    [[TMP11:%.*]] = sub i129 [[TMP9]], [[TMP10]]
 ; CHECK-NEXT:    [[TMP12:%.*]] = icmp ugt i129 [[TMP11]], 128
-; CHECK-NEXT:    [[TMP13:%.*]] = select i1 [[TMP8]], i1 true, i1 [[TMP12]]
+; CHECK-NEXT:    [[TMP13:%.*]] = select i1 [[TMP8]], i1 true, i1 [[TMP12]], !prof [[PROF1]]
 ; CHECK-NEXT:    [[TMP14:%.*]] = icmp eq i129 [[TMP11]], 128
-; CHECK-NEXT:    [[TMP15:%.*]] = select i1 [[TMP13]], i129 0, i129 [[TMP5]]
-; CHECK-NEXT:    [[TMP16:%.*]] = select i1 [[TMP13]], i1 true, i1 [[TMP14]]
-; CHECK-NEXT:    br i1 [[TMP16]], label [[UDIV_END1:%.*]], label [[UDIV_BB15:%.*]]
+; CHECK-NEXT:    [[TMP15:%.*]] = select i1 [[TMP13]], i129 0, i129 [[TMP5]], !prof [[PROF1]]
+; CHECK-NEXT:    [[TMP16:%.*]] = select i1 [[TMP13]], i1 true, i1 [[TMP14]], !prof [[PROF1]]
+; CHECK-NEXT:    br i1 [[TMP16]], label [[UDIV_END1:%.*]], label [[UDIV_BB15:%.*]], !prof [[PROF1]]
 ; CHECK:       udiv-loop-exit2:
 ; CHECK-NEXT:    [[TMP17:%.*]] = phi i129 [ 0, [[UDIV_BB15]] ], [ [[TMP32:%.*]], [[UDIV_DO_WHILE3:%.*]] ]
 ; CHECK-NEXT:    [[TMP18:%.*]] = phi i129 [ [[TMP41:%.*]], [[UDIV_BB15]] ], [ [[TMP29:%.*]], [[UDIV_DO_WHILE3]] ]
@@ -445,7 +445,7 @@ define <2 x i129> @urem129(<2 x i129> %a, <2 x i129> %b) nounwind {
 ; CHECK-NEXT:    [[TMP34]] = sub i129 [[TMP27]], [[TMP33]]
 ; CHECK-NEXT:    [[TMP35]] = add i129 [[TMP22]], -1
 ; CHECK-NEXT:    [[TMP36:%.*]] = icmp eq i129 [[TMP35]], 0
-; CHECK-NEXT:    br i1 [[TMP36]], label [[UDIV_LOOP_EXIT2:%.*]], label [[UDIV_DO_WHILE3]]
+; CHECK-NEXT:    br i1 [[TMP36]], label [[UDIV_LOOP_EXIT2:%.*]], label [[UDIV_DO_WHILE3]], !prof [[PROF1]]
 ; CHECK:       udiv-preheader4:
 ; CHECK-NEXT:    [[TMP37]] = lshr i129 [[TMP5]], [[TMP39]]
 ; CHECK-NEXT:    [[TMP38]] = add i129 [[TMP4]], -1
@@ -455,7 +455,7 @@ define <2 x i129> @urem129(<2 x i129> %a, <2 x i129> %b) nounwind {
 ; CHECK-NEXT:    [[TMP40:%.*]] = sub i129 128, [[TMP11]]
 ; CHECK-NEXT:    [[TMP41]] = shl i129 [[TMP5]], [[TMP40]]
 ; CHECK-NEXT:    [[TMP42:%.*]] = icmp eq i129 [[TMP39]], 0
-; CHECK-NEXT:    br i1 [[TMP42]], label [[UDIV_LOOP_EXIT2]], label [[UDIV_PREHEADER4]]
+; CHECK-NEXT:    br i1 [[TMP42]], label [[UDIV_LOOP_EXIT2]], label [[UDIV_PREHEADER4]], !prof [[PROF1]]
 ; CHECK:       udiv-end1:
 ; CHECK-NEXT:    [[TMP43:%.*]] = phi i129 [ [[TMP20]], [[UDIV_LOOP_EXIT2]] ], [ [[TMP15]], [[_UDIV_SPECIAL_CASES_UDIV_SPECIAL_CASES:%.*]] ]
 ; CHECK-NEXT:    [[TMP44:%.*]] = mul i129 [[TMP3]], [[TMP43]]
@@ -474,11 +474,11 @@ define <2 x i129> @urem129(<2 x i129> %a, <2 x i129> %b) nounwind {
 ; CHECK-NEXT:    [[TMP57:%.*]] = call i129 @llvm.ctlz.i129(i129 [[TMP52]], i1 true)
 ; CHECK-NEXT:    [[TMP58:%.*]] = sub i129 [[TMP56]], [[TMP57]]
 ; CHECK-NEXT:    [[TMP59:%.*]] = icmp ugt i129 [[TMP58]], 128
-; CHECK-NEXT:    [[TMP60:%.*]] = select i1 [[TMP55]], i1 true, i1 [[TMP59]]
+; CHECK-NEXT:    [[TMP60:%.*]] = select i1 [[TMP55]], i1 true, i1 [[TMP59]], !prof [[PROF1]]
 ; CHECK-NEXT:    [[TMP61:%.*]] = icmp eq i129 [[TMP58]], 128
-; CHECK-NEXT:    [[TMP62:%.*]] = select i1 [[TMP60]], i129 0, i129 [[TMP52]]
-; CHECK-NEXT:    [[TMP63:%.*]] = select i1 [[TMP60]], i1 true, i1 [[TMP61]]
-; CHECK-NEXT:    br i1 [[TMP63]], label [[UDIV_END:%.*]], label [[UDIV_BB1:%.*]]
+; CHECK-NEXT:    [[TMP62:%.*]] = select i1 [[TMP60]], i129 0, i129 [[TMP52]], !prof [[PROF1]]
+; CHECK-NEXT:    [[TMP63:%.*]] = select i1 [[TMP60]], i1 true, i1 [[TMP61]], !prof [[PROF1]]
+; CHECK-NEXT:    br i1 [[TMP63]], label [[UDIV_END:%.*]], label [[UDIV_BB1:%.*]], !prof [[PROF1]]
 ; CHECK:       udiv-loop-exit:
 ; CHECK-NEXT:    [[TMP64:%.*]] = phi i129 [ 0, [[UDIV_BB1]] ], [ [[TMP79:%.*]], [[UDIV_DO_WHILE:%.*]] ]
 ; CHECK-NEXT:    [[TMP65:%.*]] = phi i129 [ [[TMP88:%.*]], [[UDIV_BB1]] ], [ [[TMP76:%.*]], [[UDIV_DO_WHILE]] ]
@@ -502,7 +502,7 @@ define <2 x i129> @urem129(<2 x i129> %a, <2 x i129> %b) nounwind {
 ; CHECK-NEXT:    [[TMP81]] = sub i129 [[TMP74]], [[TMP80]]
 ; CHECK-NEXT:    [[TMP82]] = add i129 [[TMP69]], -1
 ; CHECK-NEXT:    [[TMP83:%.*]] = icmp eq i129 [[TMP82]], 0
-; CHECK-NEXT:    br i1 [[TMP83]], label [[UDIV_LOOP_EXIT:%.*]], label [[UDIV_DO_WHILE]]
+; CHECK-NEXT:    br i1 [[TMP83]], label [[UDIV_LOOP_EXIT:%.*]], label [[UDIV_DO_WHILE]], !prof [[PROF1]]
 ; CHECK:       udiv-preheader:
 ; CHECK-NEXT:    [[TMP84]] = lshr i129 [[TMP52]], [[TMP86]]
 ; CHECK-NEXT:    [[TMP85]] = add i129 [[TMP51]], -1
@@ -512,7 +512,7 @@ define <2 x i129> @urem129(<2 x i129> %a, <2 x i129> %b) nounwind {
 ; CHECK-NEXT:    [[TMP87:%.*]] = sub i129 128, [[TMP58]]
 ; CHECK-NEXT:    [[TMP88]] = shl i129 [[TMP52]], [[TMP87]]
 ; CHECK-NEXT:    [[TMP89:%.*]] = icmp eq i129 [[TMP86]], 0
-; CHECK-NEXT:    br i1 [[TMP89]], label [[UDIV_LOOP_EXIT]], label [[UDIV_PREHEADER]]
+; CHECK-NEXT:    br i1 [[TMP89]], label [[UDIV_LOOP_EXIT]], label [[UDIV_PREHEADER]], !prof [[PROF1]]
 ; CHECK:       udiv-end:
 ; CHECK-NEXT:    [[TMP90:%.*]] = phi i129 [ [[TMP67]], [[UDIV_LOOP_EXIT]] ], [ [[TMP62]], [[UDIV_END1]] ]
 ; CHECK-NEXT:    [[TMP91:%.*]] = mul i129 [[TMP50]], [[TMP90]]
@@ -534,3 +534,9 @@ define <vscale x 2 x i129> @sdiv129_scalable(<vscale x 2 x i129> %a, <vscale x 2
   %res = sdiv <vscale x 2 x i129> %a, %b
   ret <vscale x 2 x i129> %res
 }
+
+!0 = !{!"function_entry_count", i64 1000}
+;.
+; CHECK: [[PROF0]] = !{!"function_entry_count", i64 1000}
+; CHECK: [[PROF1]] = !{!"branch_weights", i32 1, i32 1048575}
+;.
diff --git a/llvm/utils/profcheck-xfail.txt b/llvm/utils/profcheck-xfail.txt
index 6c4ad00f4650c..f7c128cd44fe5 100644
--- a/llvm/utils/profcheck-xfail.txt
+++ b/llvm/utils/profcheck-xfail.txt
@@ -134,15 +134,7 @@ Transforms/CorrelatedValuePropagation/urem.ll
 Transforms/CrossDSOCFI/basic.ll
 Transforms/CrossDSOCFI/cfi_functions.ll
 Transforms/CrossDSOCFI/thumb.ll
-Transforms/ExpandIRInsts/X86/sdiv129.ll
-Transforms/ExpandIRInsts/X86/srem129.ll
-Transforms/ExpandIRInsts/X86/udiv129.ll
-Transforms/ExpandIRInsts/X86/urem129.ll
-Transforms/ExpandIRInsts/X86/vector.ll
-Transforms/ExpandIRInsts/X86/expand-large-fp-convert-fptosi129.ll
-Transforms/ExpandIRInsts/X86/expand-large-fp-convert-fptoui129.ll
-Transforms/ExpandIRInsts/X86/expand-large-fp-convert-si129tofp.ll
-Transforms/ExpandIRInsts/X86/expand-large-fp-convert-ui129tofp.ll
+
 Transforms/FixIrreducible/basic.ll
 Transforms/FixIrreducible/bug45623.ll
 Transforms/FixIrreducible/callbr.ll



More information about the llvm-commits mailing list