[llvm] [DAGCombiner][LegalizeTypes] Fuse i128 sdiv+srem / udiv+urem into single __divmodti4 / __udivmodti4 call (PR #187908)
Takashi Idobe via llvm-commits
llvm-commits at lists.llvm.org
Sat Apr 4 19:08:47 PDT 2026
https://github.com/Takashiidobe updated https://github.com/llvm/llvm-project/pull/187908
>From 41fad40dfb8c6abbb0cdbf1620473700dc68a308 Mon Sep 17 00:00:00 2001
From: Takashiidobe <idobetakashi at gmail.com>
Date: Sat, 21 Mar 2026 23:34:25 -0400
Subject: [PATCH 01/15] [test] Add i128 sdiv+srem missed-optimization test for
divmod fusion
Add a lit test documenting the current (unoptimized) codegen for i128
sdiv+srem and udiv+urem pairs on x86_64 and AArch64. Both targets
currently emit two separate helper calls (__divti3 + __modti3 or
__udivti3 + __umodti3) rather than a single fused __divmodti4 /
__udivmodti4 call.
The test serves as a baseline and will be updated when the optimization
lands in a follow-up commit.
---
llvm/test/CodeGen/X86/i128-divrem-libcall.ll | 170 +++++++++++++++++++
1 file changed, 170 insertions(+)
create mode 100644 llvm/test/CodeGen/X86/i128-divrem-libcall.ll
diff --git a/llvm/test/CodeGen/X86/i128-divrem-libcall.ll b/llvm/test/CodeGen/X86/i128-divrem-libcall.ll
new file mode 100644
index 0000000000000..c8258ec7873de
--- /dev/null
+++ b/llvm/test/CodeGen/X86/i128-divrem-libcall.ll
@@ -0,0 +1,170 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=aarch64-linux-gnu | FileCheck %s --check-prefix=A64
+
+; Test that sdiv+srem / udiv+urem on i128 with the same operands are candidates
+; for fusing into a single __divmodti4 / __udivmodti4 call.
+;
+; Currently this is a missed optimization: two separate helper calls are emitted
+; (__divti3 + __modti3, or __udivti3 + __umodti3) instead of one fused call.
+; See: DAGCombiner::useDivRem, ExpandIntRes_DIVREM, RuntimeLibcalls SDIVREM_I128.
+
+define void @sdivrem_i128(ptr %out, i128 %n, i128 %d) nounwind {
+; X64-LABEL: sdivrem_i128:
+; X64: # %bb.0:
+; X64-NEXT: pushq %rbp
+; X64-NEXT: pushq %r15
+; X64-NEXT: pushq %r14
+; X64-NEXT: pushq %r13
+; X64-NEXT: pushq %r12
+; X64-NEXT: pushq %rbx
+; X64-NEXT: pushq %rax
+; X64-NEXT: movq %r8, %rbx
+; X64-NEXT: movq %rcx, %r14
+; X64-NEXT: movq %rdx, %r15
+; X64-NEXT: movq %rsi, %r12
+; X64-NEXT: movq %rdi, %r13
+; X64-NEXT: movq %rsi, %rdi
+; X64-NEXT: movq %rdx, %rsi
+; X64-NEXT: movq %rcx, %rdx
+; X64-NEXT: movq %r8, %rcx
+; X64-NEXT: callq __divti3 at PLT
+; X64-NEXT: movq %rax, (%rsp) # 8-byte Spill
+; X64-NEXT: movq %rdx, %rbp
+; X64-NEXT: movq %r12, %rdi
+; X64-NEXT: movq %r15, %rsi
+; X64-NEXT: movq %r14, %rdx
+; X64-NEXT: movq %rbx, %rcx
+; X64-NEXT: callq __modti3 at PLT
+; X64-NEXT: movq %rbp, 8(%r13)
+; X64-NEXT: movq (%rsp), %rcx # 8-byte Reload
+; X64-NEXT: movq %rcx, (%r13)
+; X64-NEXT: movq %rdx, 24(%r13)
+; X64-NEXT: movq %rax, 16(%r13)
+; X64-NEXT: addq $8, %rsp
+; X64-NEXT: popq %rbx
+; X64-NEXT: popq %r12
+; X64-NEXT: popq %r13
+; X64-NEXT: popq %r14
+; X64-NEXT: popq %r15
+; X64-NEXT: popq %rbp
+; X64-NEXT: retq
+;
+; A64-LABEL: sdivrem_i128:
+; A64: // %bb.0:
+; A64-NEXT: stp x30, x25, [sp, #-64]! // 16-byte Folded Spill
+; A64-NEXT: stp x24, x23, [sp, #16] // 16-byte Folded Spill
+; A64-NEXT: mov x23, x0
+; A64-NEXT: mov x0, x2
+; A64-NEXT: stp x22, x21, [sp, #32] // 16-byte Folded Spill
+; A64-NEXT: mov x21, x3
+; A64-NEXT: mov x22, x2
+; A64-NEXT: mov x1, x3
+; A64-NEXT: mov x2, x4
+; A64-NEXT: mov x3, x5
+; A64-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill
+; A64-NEXT: mov x19, x5
+; A64-NEXT: mov x20, x4
+; A64-NEXT: bl __divti3
+; A64-NEXT: mov x24, x0
+; A64-NEXT: mov x25, x1
+; A64-NEXT: mov x0, x22
+; A64-NEXT: mov x1, x21
+; A64-NEXT: mov x2, x20
+; A64-NEXT: mov x3, x19
+; A64-NEXT: bl __modti3
+; A64-NEXT: stp x24, x25, [x23]
+; A64-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload
+; A64-NEXT: stp x0, x1, [x23, #16]
+; A64-NEXT: ldp x22, x21, [sp, #32] // 16-byte Folded Reload
+; A64-NEXT: ldp x24, x23, [sp, #16] // 16-byte Folded Reload
+; A64-NEXT: ldp x30, x25, [sp], #64 // 16-byte Folded Reload
+; A64-NEXT: ret
+ %q = sdiv i128 %n, %d
+ %r = srem i128 %n, %d
+ %p0 = getelementptr inbounds {i128, i128}, ptr %out, i32 0, i32 0
+ %p1 = getelementptr inbounds {i128, i128}, ptr %out, i32 0, i32 1
+ store i128 %q, ptr %p0, align 16
+ store i128 %r, ptr %p1, align 16
+ ret void
+}
+
+define void @udivrem_i128(ptr %out, i128 %n, i128 %d) nounwind {
+; X64-LABEL: udivrem_i128:
+; X64: # %bb.0:
+; X64-NEXT: pushq %rbp
+; X64-NEXT: pushq %r15
+; X64-NEXT: pushq %r14
+; X64-NEXT: pushq %r13
+; X64-NEXT: pushq %r12
+; X64-NEXT: pushq %rbx
+; X64-NEXT: pushq %rax
+; X64-NEXT: movq %r8, %rbx
+; X64-NEXT: movq %rcx, %r14
+; X64-NEXT: movq %rdx, %r15
+; X64-NEXT: movq %rsi, %r12
+; X64-NEXT: movq %rdi, %r13
+; X64-NEXT: movq %rsi, %rdi
+; X64-NEXT: movq %rdx, %rsi
+; X64-NEXT: movq %rcx, %rdx
+; X64-NEXT: movq %r8, %rcx
+; X64-NEXT: callq __udivti3 at PLT
+; X64-NEXT: movq %rax, (%rsp) # 8-byte Spill
+; X64-NEXT: movq %rdx, %rbp
+; X64-NEXT: movq %r12, %rdi
+; X64-NEXT: movq %r15, %rsi
+; X64-NEXT: movq %r14, %rdx
+; X64-NEXT: movq %rbx, %rcx
+; X64-NEXT: callq __umodti3 at PLT
+; X64-NEXT: movq %rbp, 8(%r13)
+; X64-NEXT: movq (%rsp), %rcx # 8-byte Reload
+; X64-NEXT: movq %rcx, (%r13)
+; X64-NEXT: movq %rdx, 24(%r13)
+; X64-NEXT: movq %rax, 16(%r13)
+; X64-NEXT: addq $8, %rsp
+; X64-NEXT: popq %rbx
+; X64-NEXT: popq %r12
+; X64-NEXT: popq %r13
+; X64-NEXT: popq %r14
+; X64-NEXT: popq %r15
+; X64-NEXT: popq %rbp
+; X64-NEXT: retq
+;
+; A64-LABEL: udivrem_i128:
+; A64: // %bb.0:
+; A64-NEXT: stp x30, x25, [sp, #-64]! // 16-byte Folded Spill
+; A64-NEXT: stp x24, x23, [sp, #16] // 16-byte Folded Spill
+; A64-NEXT: mov x23, x0
+; A64-NEXT: mov x0, x2
+; A64-NEXT: stp x22, x21, [sp, #32] // 16-byte Folded Spill
+; A64-NEXT: mov x21, x3
+; A64-NEXT: mov x22, x2
+; A64-NEXT: mov x1, x3
+; A64-NEXT: mov x2, x4
+; A64-NEXT: mov x3, x5
+; A64-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill
+; A64-NEXT: mov x19, x5
+; A64-NEXT: mov x20, x4
+; A64-NEXT: bl __udivti3
+; A64-NEXT: mov x24, x0
+; A64-NEXT: mov x25, x1
+; A64-NEXT: mov x0, x22
+; A64-NEXT: mov x1, x21
+; A64-NEXT: mov x2, x20
+; A64-NEXT: mov x3, x19
+; A64-NEXT: bl __umodti3
+; A64-NEXT: stp x24, x25, [x23]
+; A64-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload
+; A64-NEXT: stp x0, x1, [x23, #16]
+; A64-NEXT: ldp x22, x21, [sp, #32] // 16-byte Folded Reload
+; A64-NEXT: ldp x24, x23, [sp, #16] // 16-byte Folded Reload
+; A64-NEXT: ldp x30, x25, [sp], #64 // 16-byte Folded Reload
+; A64-NEXT: ret
+ %q = udiv i128 %n, %d
+ %r = urem i128 %n, %d
+ %p0 = getelementptr inbounds {i128, i128}, ptr %out, i32 0, i32 0
+ %p1 = getelementptr inbounds {i128, i128}, ptr %out, i32 0, i32 1
+ store i128 %q, ptr %p0, align 16
+ store i128 %r, ptr %p1, align 16
+ ret void
+}
>From 7a1f34d512d5992182cc508db76633bbe7df58d6 Mon Sep 17 00:00:00 2001
From: Takashiidobe <idobetakashi at gmail.com>
Date: Sat, 21 Mar 2026 23:35:11 -0400
Subject: [PATCH 02/15] [DAGCombiner][LegalizeTypes] Fuse i128 sdiv+srem into
single __divmodti4 call
When both the quotient and remainder of a signed (or unsigned) 128-bit
division are needed, LLVM previously emitted two separate helper calls
(__divti3 + __modti3). This patch fuses them into a single call to
__divmodti4 / __udivmodti4, which is already provided by compiler-rt
and libgcc for 64-bit targets.
Three changes are required:
1. RuntimeLibcalls.td: Register __divmodti4 and __udivmodti4 as the
libcall implementations for SDIVREM_I128 and UDIVREM_I128 in
Int128RTLibcalls. This set is already gated to 64-bit targets
(AArch64 ILP64, RISC-V64, PPC64, x86_64, Wasm, etc.) following
the same pattern as __multi3.
2. DAGCombiner.cpp: Fix the early-exit guard in useDivRem() that
unconditionally bailed for non-legal types (including i128).
The condition now allows the combination to proceed when a fused
divrem libcall is available, consistent with the comment above it
("DivMod lib calls can still work on non-legal types").
3. LegalizeIntegerTypes.cpp: Add ExpandIntRes_DIVREM() to handle
ISD::SDIVREM and ISD::UDIVREM during type legalization. Without
this handler the type legalizer would crash ("Do not know how to
expand the result of this operator!") because SDIVREM with an i128
result type had no expansion path. The new handler emits the fused
libcall (quoting the stack-temp ABI used by __divmodti4) and falls
back to separate SDIV + SREM nodes when no fused libcall is
registered (e.g. on 32-bit targets).
Fixes the missed optimization tracked in llvm/llvm-project#46350.
---
llvm/include/llvm/IR/RuntimeLibcalls.td | 2 +
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 3 +-
.../SelectionDAG/LegalizeIntegerTypes.cpp | 69 +++++++
llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h | 1 +
llvm/test/CodeGen/X86/i128-divrem-libcall.ll | 171 ++++++------------
5 files changed, 127 insertions(+), 119 deletions(-)
diff --git a/llvm/include/llvm/IR/RuntimeLibcalls.td b/llvm/include/llvm/IR/RuntimeLibcalls.td
index a0f505f1fda2f..0e2f20a74aa23 100644
--- a/llvm/include/llvm/IR/RuntimeLibcalls.td
+++ b/llvm/include/llvm/IR/RuntimeLibcalls.td
@@ -1788,6 +1788,8 @@ defset list<RuntimeLibcallImpl> Int128RTLibcalls = {
def __lshrti3 : RuntimeLibcallImpl<SRL_I128>;
def __ashrti3 : RuntimeLibcallImpl<SRA_I128>;
def __multi3 : RuntimeLibcallImpl<MUL_I128>;
+ def __divmodti4 : RuntimeLibcallImpl<SDIVREM_I128>;
+ def __udivmodti4 : RuntimeLibcallImpl<UDIVREM_I128>;
}
//--------------------------------------------------------------------
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 383e45c5ea3a8..22cb979c9d8c3 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -5057,7 +5057,8 @@ SDValue DAGCombiner::useDivRem(SDNode *Node) {
if (VT.isVector() || !VT.isInteger())
return SDValue();
- if (!TLI.isTypeLegal(VT) && !TLI.isOperationCustom(DivRemOpc, VT))
+ if (!TLI.isTypeLegal(VT) && !TLI.isOperationCustom(DivRemOpc, VT) &&
+ !isDivRemLibcallAvailable(Node, isSigned, DAG))
return SDValue();
// If DIVREM is going to get expanded into a libcall,
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index c6a4fe0b64cd7..0b036ce546be6 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -3114,6 +3114,10 @@ void DAGTypeLegalizer::ExpandIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::READCYCLECOUNTER:
case ISD::READSTEADYCOUNTER: ExpandIntRes_READCOUNTER(N, Lo, Hi); break;
case ISD::SDIV: ExpandIntRes_SDIV(N, Lo, Hi); break;
+ case ISD::SDIVREM:
+ case ISD::UDIVREM:
+ ExpandIntRes_DIVREM(N, Lo, Hi);
+ break;
case ISD::SIGN_EXTEND: ExpandIntRes_SIGN_EXTEND(N, Lo, Hi); break;
case ISD::SIGN_EXTEND_INREG: ExpandIntRes_SIGN_EXTEND_INREG(N, Lo, Hi); break;
case ISD::SREM: ExpandIntRes_SREM(N, Lo, Hi); break;
@@ -4901,6 +4905,71 @@ void DAGTypeLegalizer::ExpandIntRes_SADDSUBO(SDNode *Node,
ReplaceValueWith(SDValue(Node, 1), Ovf);
}
+void DAGTypeLegalizer::ExpandIntRes_DIVREM(SDNode *N, SDValue &Lo,
+ SDValue &Hi) {
+ SDLoc dl(N);
+ EVT VT = N->getValueType(0);
+ bool isSigned = (N->getOpcode() == ISD::SDIVREM);
+ RTLIB::Libcall LC = isSigned ? RTLIB::SDIVREM_I128 : RTLIB::UDIVREM_I128;
+
+ // If no fused divrem libcall is available, fall back to separate div and rem
+ // nodes that the existing type-legalization handlers can expand
+ // independently.
+ if (DAG.getLibcalls().getLibcallImpl(LC) == RTLIB::Unsupported) {
+ unsigned DivOp = isSigned ? ISD::SDIV : ISD::UDIV;
+ unsigned RemOp = isSigned ? ISD::SREM : ISD::UREM;
+ SDValue Ops[2] = {N->getOperand(0), N->getOperand(1)};
+ SDValue Q = DAG.getNode(DivOp, dl, VT, Ops);
+ SDValue R = DAG.getNode(RemOp, dl, VT, Ops);
+ SplitInteger(Q, Lo, Hi);
+ ReplaceValueWith(SDValue(N, 1), R);
+ return;
+ }
+
+ // Emit __divmodti4 / __udivmodti4:
+ // RetTy libcall(RetTy a, RetTy b, RetTy *rem)
+ // The quotient is the return value; the remainder is written via the pointer.
+ Type *RetTy = VT.getTypeForEVT(*DAG.getContext());
+ TargetLowering::ArgListTy Args;
+ for (const SDValue &Op : N->op_values()) {
+ TargetLowering::ArgListEntry Entry(
+ Op, Op.getValueType().getTypeForEVT(*DAG.getContext()));
+ Entry.IsSExt = isSigned;
+ Entry.IsZExt = !isSigned;
+ Args.push_back(Entry);
+ }
+
+ SDValue FIPtr = DAG.CreateStackTemporary(VT);
+ TargetLowering::ArgListEntry PtrEntry(
+ FIPtr, PointerType::getUnqual(RetTy->getContext()));
+ PtrEntry.IsSExt = PtrEntry.IsZExt = false;
+ Args.push_back(PtrEntry);
+
+ RTLIB::LibcallImpl LCImpl = DAG.getLibcalls().getLibcallImpl(LC);
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ CLI.setDebugLoc(dl)
+ .setChain(DAG.getEntryNode())
+ .setLibCallee(
+ DAG.getLibcalls().getLibcallImplCallingConv(LCImpl), RetTy,
+ DAG.getExternalSymbol(LCImpl, TLI.getPointerTy(DAG.getDataLayout())),
+ std::move(Args))
+ .setSExtResult(isSigned)
+ .setZExtResult(!isSigned);
+
+ std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI);
+
+ // Quotient is the return value; split it into Lo/Hi for the expanded type.
+ SplitInteger(CallInfo.first, Lo, Hi);
+
+ // Remainder is written to the stack temporary; load it back and register
+ // it as the replacement for result 1 of the original SDIVREM/UDIVREM node.
+ int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex();
+ SDValue Rem = DAG.getLoad(
+ VT, dl, CallInfo.second, FIPtr,
+ MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
+ ReplaceValueWith(SDValue(N, 1), Rem);
+}
+
void DAGTypeLegalizer::ExpandIntRes_SDIV(SDNode *N,
SDValue &Lo, SDValue &Hi) {
EVT VT = N->getValueType(0);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index 4362845450acf..e1c11e1c35a31 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -472,6 +472,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
void ExpandIntRes_BSWAP (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_PARITY (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_MUL (SDNode *N, SDValue &Lo, SDValue &Hi);
+ void ExpandIntRes_DIVREM(SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_SDIV (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_SREM (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_UDIV (SDNode *N, SDValue &Lo, SDValue &Hi);
diff --git a/llvm/test/CodeGen/X86/i128-divrem-libcall.ll b/llvm/test/CodeGen/X86/i128-divrem-libcall.ll
index c8258ec7873de..ee40d39bbd0e0 100644
--- a/llvm/test/CodeGen/X86/i128-divrem-libcall.ll
+++ b/llvm/test/CodeGen/X86/i128-divrem-libcall.ll
@@ -2,83 +2,50 @@
; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefix=X64
; RUN: llc < %s -mtriple=aarch64-linux-gnu | FileCheck %s --check-prefix=A64
-; Test that sdiv+srem / udiv+urem on i128 with the same operands are candidates
-; for fusing into a single __divmodti4 / __udivmodti4 call.
-;
-; Currently this is a missed optimization: two separate helper calls are emitted
-; (__divti3 + __modti3, or __udivti3 + __umodti3) instead of one fused call.
-; See: DAGCombiner::useDivRem, ExpandIntRes_DIVREM, RuntimeLibcalls SDIVREM_I128.
+; Verify that sdiv+srem / udiv+urem on i128 with the same operands lower to a
+; single __divmodti4 / __udivmodti4 call rather than two separate helper calls.
+; DAGCombiner::useDivRem fuses the pair into ISD::SDIVREM/UDIVREM, which is
+; then expanded to the fused libcall via ExpandIntRes_DIVREM in the type
+; legalizer.
define void @sdivrem_i128(ptr %out, i128 %n, i128 %d) nounwind {
; X64-LABEL: sdivrem_i128:
; X64: # %bb.0:
-; X64-NEXT: pushq %rbp
-; X64-NEXT: pushq %r15
-; X64-NEXT: pushq %r14
-; X64-NEXT: pushq %r13
-; X64-NEXT: pushq %r12
; X64-NEXT: pushq %rbx
-; X64-NEXT: pushq %rax
-; X64-NEXT: movq %r8, %rbx
-; X64-NEXT: movq %rcx, %r14
-; X64-NEXT: movq %rdx, %r15
-; X64-NEXT: movq %rsi, %r12
-; X64-NEXT: movq %rdi, %r13
+; X64-NEXT: subq $16, %rsp
+; X64-NEXT: movq %r8, %rax
+; X64-NEXT: movq %rdi, %rbx
+; X64-NEXT: movq %rsp, %r8
; X64-NEXT: movq %rsi, %rdi
; X64-NEXT: movq %rdx, %rsi
; X64-NEXT: movq %rcx, %rdx
-; X64-NEXT: movq %r8, %rcx
-; X64-NEXT: callq __divti3 at PLT
-; X64-NEXT: movq %rax, (%rsp) # 8-byte Spill
-; X64-NEXT: movq %rdx, %rbp
-; X64-NEXT: movq %r12, %rdi
-; X64-NEXT: movq %r15, %rsi
-; X64-NEXT: movq %r14, %rdx
-; X64-NEXT: movq %rbx, %rcx
-; X64-NEXT: callq __modti3 at PLT
-; X64-NEXT: movq %rbp, 8(%r13)
-; X64-NEXT: movq (%rsp), %rcx # 8-byte Reload
-; X64-NEXT: movq %rcx, (%r13)
-; X64-NEXT: movq %rdx, 24(%r13)
-; X64-NEXT: movq %rax, 16(%r13)
-; X64-NEXT: addq $8, %rsp
+; X64-NEXT: movq %rax, %rcx
+; X64-NEXT: callq __divmodti4 at PLT
+; X64-NEXT: movaps (%rsp), %xmm0
+; X64-NEXT: movq %rdx, 8(%rbx)
+; X64-NEXT: movq %rax, (%rbx)
+; X64-NEXT: movaps %xmm0, 16(%rbx)
+; X64-NEXT: addq $16, %rsp
; X64-NEXT: popq %rbx
-; X64-NEXT: popq %r12
-; X64-NEXT: popq %r13
-; X64-NEXT: popq %r14
-; X64-NEXT: popq %r15
-; X64-NEXT: popq %rbp
; X64-NEXT: retq
;
; A64-LABEL: sdivrem_i128:
; A64: // %bb.0:
-; A64-NEXT: stp x30, x25, [sp, #-64]! // 16-byte Folded Spill
-; A64-NEXT: stp x24, x23, [sp, #16] // 16-byte Folded Spill
-; A64-NEXT: mov x23, x0
-; A64-NEXT: mov x0, x2
-; A64-NEXT: stp x22, x21, [sp, #32] // 16-byte Folded Spill
-; A64-NEXT: mov x21, x3
-; A64-NEXT: mov x22, x2
+; A64-NEXT: sub sp, sp, #32
+; A64-NEXT: mov x8, x4
+; A64-NEXT: stp x30, x19, [sp, #16] // 16-byte Folded Spill
; A64-NEXT: mov x1, x3
-; A64-NEXT: mov x2, x4
+; A64-NEXT: mov x19, x0
+; A64-NEXT: mov x4, sp
+; A64-NEXT: mov x0, x2
+; A64-NEXT: mov x2, x8
; A64-NEXT: mov x3, x5
-; A64-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill
-; A64-NEXT: mov x19, x5
-; A64-NEXT: mov x20, x4
-; A64-NEXT: bl __divti3
-; A64-NEXT: mov x24, x0
-; A64-NEXT: mov x25, x1
-; A64-NEXT: mov x0, x22
-; A64-NEXT: mov x1, x21
-; A64-NEXT: mov x2, x20
-; A64-NEXT: mov x3, x19
-; A64-NEXT: bl __modti3
-; A64-NEXT: stp x24, x25, [x23]
-; A64-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload
-; A64-NEXT: stp x0, x1, [x23, #16]
-; A64-NEXT: ldp x22, x21, [sp, #32] // 16-byte Folded Reload
-; A64-NEXT: ldp x24, x23, [sp, #16] // 16-byte Folded Reload
-; A64-NEXT: ldp x30, x25, [sp], #64 // 16-byte Folded Reload
+; A64-NEXT: bl __divmodti4
+; A64-NEXT: ldp x8, x9, [sp]
+; A64-NEXT: stp x0, x1, [x19]
+; A64-NEXT: stp x8, x9, [x19, #16]
+; A64-NEXT: ldp x30, x19, [sp, #16] // 16-byte Folded Reload
+; A64-NEXT: add sp, sp, #32
; A64-NEXT: ret
%q = sdiv i128 %n, %d
%r = srem i128 %n, %d
@@ -92,73 +59,41 @@ define void @sdivrem_i128(ptr %out, i128 %n, i128 %d) nounwind {
define void @udivrem_i128(ptr %out, i128 %n, i128 %d) nounwind {
; X64-LABEL: udivrem_i128:
; X64: # %bb.0:
-; X64-NEXT: pushq %rbp
-; X64-NEXT: pushq %r15
-; X64-NEXT: pushq %r14
-; X64-NEXT: pushq %r13
-; X64-NEXT: pushq %r12
; X64-NEXT: pushq %rbx
-; X64-NEXT: pushq %rax
-; X64-NEXT: movq %r8, %rbx
-; X64-NEXT: movq %rcx, %r14
-; X64-NEXT: movq %rdx, %r15
-; X64-NEXT: movq %rsi, %r12
-; X64-NEXT: movq %rdi, %r13
+; X64-NEXT: subq $16, %rsp
+; X64-NEXT: movq %r8, %rax
+; X64-NEXT: movq %rdi, %rbx
+; X64-NEXT: movq %rsp, %r8
; X64-NEXT: movq %rsi, %rdi
; X64-NEXT: movq %rdx, %rsi
; X64-NEXT: movq %rcx, %rdx
-; X64-NEXT: movq %r8, %rcx
-; X64-NEXT: callq __udivti3 at PLT
-; X64-NEXT: movq %rax, (%rsp) # 8-byte Spill
-; X64-NEXT: movq %rdx, %rbp
-; X64-NEXT: movq %r12, %rdi
-; X64-NEXT: movq %r15, %rsi
-; X64-NEXT: movq %r14, %rdx
-; X64-NEXT: movq %rbx, %rcx
-; X64-NEXT: callq __umodti3 at PLT
-; X64-NEXT: movq %rbp, 8(%r13)
-; X64-NEXT: movq (%rsp), %rcx # 8-byte Reload
-; X64-NEXT: movq %rcx, (%r13)
-; X64-NEXT: movq %rdx, 24(%r13)
-; X64-NEXT: movq %rax, 16(%r13)
-; X64-NEXT: addq $8, %rsp
+; X64-NEXT: movq %rax, %rcx
+; X64-NEXT: callq __udivmodti4 at PLT
+; X64-NEXT: movaps (%rsp), %xmm0
+; X64-NEXT: movq %rdx, 8(%rbx)
+; X64-NEXT: movq %rax, (%rbx)
+; X64-NEXT: movaps %xmm0, 16(%rbx)
+; X64-NEXT: addq $16, %rsp
; X64-NEXT: popq %rbx
-; X64-NEXT: popq %r12
-; X64-NEXT: popq %r13
-; X64-NEXT: popq %r14
-; X64-NEXT: popq %r15
-; X64-NEXT: popq %rbp
; X64-NEXT: retq
;
; A64-LABEL: udivrem_i128:
; A64: // %bb.0:
-; A64-NEXT: stp x30, x25, [sp, #-64]! // 16-byte Folded Spill
-; A64-NEXT: stp x24, x23, [sp, #16] // 16-byte Folded Spill
-; A64-NEXT: mov x23, x0
-; A64-NEXT: mov x0, x2
-; A64-NEXT: stp x22, x21, [sp, #32] // 16-byte Folded Spill
-; A64-NEXT: mov x21, x3
-; A64-NEXT: mov x22, x2
+; A64-NEXT: sub sp, sp, #32
+; A64-NEXT: mov x8, x4
+; A64-NEXT: stp x30, x19, [sp, #16] // 16-byte Folded Spill
; A64-NEXT: mov x1, x3
-; A64-NEXT: mov x2, x4
+; A64-NEXT: mov x19, x0
+; A64-NEXT: mov x4, sp
+; A64-NEXT: mov x0, x2
+; A64-NEXT: mov x2, x8
; A64-NEXT: mov x3, x5
-; A64-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill
-; A64-NEXT: mov x19, x5
-; A64-NEXT: mov x20, x4
-; A64-NEXT: bl __udivti3
-; A64-NEXT: mov x24, x0
-; A64-NEXT: mov x25, x1
-; A64-NEXT: mov x0, x22
-; A64-NEXT: mov x1, x21
-; A64-NEXT: mov x2, x20
-; A64-NEXT: mov x3, x19
-; A64-NEXT: bl __umodti3
-; A64-NEXT: stp x24, x25, [x23]
-; A64-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload
-; A64-NEXT: stp x0, x1, [x23, #16]
-; A64-NEXT: ldp x22, x21, [sp, #32] // 16-byte Folded Reload
-; A64-NEXT: ldp x24, x23, [sp, #16] // 16-byte Folded Reload
-; A64-NEXT: ldp x30, x25, [sp], #64 // 16-byte Folded Reload
+; A64-NEXT: bl __udivmodti4
+; A64-NEXT: ldp x8, x9, [sp]
+; A64-NEXT: stp x0, x1, [x19]
+; A64-NEXT: stp x8, x9, [x19, #16]
+; A64-NEXT: ldp x30, x19, [sp, #16] // 16-byte Folded Reload
+; A64-NEXT: add sp, sp, #32
; A64-NEXT: ret
%q = udiv i128 %n, %d
%r = urem i128 %n, %d
>From 14541127894b7ff43fee4e4015208212a6dcaad8 Mon Sep 17 00:00:00 2001
From: Takashiidobe <idobetakashi at gmail.com>
Date: Sat, 21 Mar 2026 23:41:26 -0400
Subject: [PATCH 03/15] [test] Add i686 fallback check and Windows validation
for i128 divrem
Extend the i128-divrem-libcall.ll test with:
- A third RUN line for i686-linux-gnu using CHECK-NOT to verify that
__divmodti4/__udivmodti4 are never emitted on 32-bit targets where
Int128RTLibcalls (and therefore the SDIVREM_I128 libcall) is not
registered. This exercises the fallback path in ExpandIntRes_DIVREM
that replaces the SDIVREM node with separate SDIV + SREM.
- Confirmed that x86_64-pc-windows-msvc and x86_64-win32 triples emit
__divmodti4 correctly: compiler-rt includes divmodti4.c in
GENERIC_SOURCES for all targets, and the existing divmod128.ll
Windows tests continue to pass without modification.
---
llvm/test/CodeGen/X86/i128-divrem-libcall.ll | 57 ++++++++++++++++++++
1 file changed, 57 insertions(+)
diff --git a/llvm/test/CodeGen/X86/i128-divrem-libcall.ll b/llvm/test/CodeGen/X86/i128-divrem-libcall.ll
index ee40d39bbd0e0..2486ddb6cbbde 100644
--- a/llvm/test/CodeGen/X86/i128-divrem-libcall.ll
+++ b/llvm/test/CodeGen/X86/i128-divrem-libcall.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefix=X64
; RUN: llc < %s -mtriple=aarch64-linux-gnu | FileCheck %s --check-prefix=A64
+; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefix=X86-32
; Verify that sdiv+srem / udiv+urem on i128 with the same operands lower to a
; single __divmodti4 / __udivmodti4 call rather than two separate helper calls.
@@ -103,3 +104,59 @@ define void @udivrem_i128(ptr %out, i128 %n, i128 %d) nounwind {
store i128 %r, ptr %p1, align 16
ret void
}
+
+; On 32-bit targets __divmodti4/__udivmodti4 are not registered (Int128RTLibcalls
+; is only added to 64-bit targets), so the fallback path in ExpandIntRes_DIVREM
+; fires: the SDIVREM node is replaced by separate SDIV + SREM, which then expand
+; inline. Verify no fused call is emitted and the code compiles without crashing.
+
+; X86-32-NOT: __divmodti4
+; X86-32-NOT: __udivmodti4
+
+define void @sdivrem_i128_fallback(ptr %out, i128 %n, i128 %d) nounwind {
+; X64-LABEL: sdivrem_i128_fallback:
+; X64: # %bb.0:
+; X64-NEXT: pushq %rbx
+; X64-NEXT: subq $16, %rsp
+; X64-NEXT: movq %r8, %rax
+; X64-NEXT: movq %rdi, %rbx
+; X64-NEXT: movq %rsp, %r8
+; X64-NEXT: movq %rsi, %rdi
+; X64-NEXT: movq %rdx, %rsi
+; X64-NEXT: movq %rcx, %rdx
+; X64-NEXT: movq %rax, %rcx
+; X64-NEXT: callq __divmodti4 at PLT
+; X64-NEXT: movaps (%rsp), %xmm0
+; X64-NEXT: movq %rdx, 8(%rbx)
+; X64-NEXT: movq %rax, (%rbx)
+; X64-NEXT: movaps %xmm0, 16(%rbx)
+; X64-NEXT: addq $16, %rsp
+; X64-NEXT: popq %rbx
+; X64-NEXT: retq
+;
+; A64-LABEL: sdivrem_i128_fallback:
+; A64: // %bb.0:
+; A64-NEXT: sub sp, sp, #32
+; A64-NEXT: mov x8, x4
+; A64-NEXT: stp x30, x19, [sp, #16] // 16-byte Folded Spill
+; A64-NEXT: mov x1, x3
+; A64-NEXT: mov x19, x0
+; A64-NEXT: mov x4, sp
+; A64-NEXT: mov x0, x2
+; A64-NEXT: mov x2, x8
+; A64-NEXT: mov x3, x5
+; A64-NEXT: bl __divmodti4
+; A64-NEXT: ldp x8, x9, [sp]
+; A64-NEXT: stp x0, x1, [x19]
+; A64-NEXT: stp x8, x9, [x19, #16]
+; A64-NEXT: ldp x30, x19, [sp, #16] // 16-byte Folded Reload
+; A64-NEXT: add sp, sp, #32
+; A64-NEXT: ret
+ %q = sdiv i128 %n, %d
+ %r = srem i128 %n, %d
+ %p0 = getelementptr inbounds {i128, i128}, ptr %out, i32 0, i32 0
+ %p1 = getelementptr inbounds {i128, i128}, ptr %out, i32 0, i32 1
+ store i128 %q, ptr %p0, align 16
+ store i128 %r, ptr %p1, align 16
+ ret void
+}
>From 0f8e2005fd89cd6797d091abdd6e173738cd5562 Mon Sep 17 00:00:00 2001
From: Takashiidobe <idobetakashi at gmail.com>
Date: Sun, 22 Mar 2026 00:01:07 -0400
Subject: [PATCH 04/15] [test] Add RISC-V64 coverage to i128 divrem libcall
test
Add a riscv64-linux-gnu RUN line (-mattr=+m) and corresponding RV64
check blocks for sdivrem_i128, udivrem_i128, and sdivrem_i128_fallback.
Verifies that the divmod fusion fires on RISC-V64 just as on x86_64 and
AArch64: a single `call __divmodti4` / `call __udivmodti4` is emitted
instead of separate __divti3 + __modti3 calls.
---
llvm/test/CodeGen/X86/i128-divrem-libcall.ll | 79 +++++++++++++++++++-
1 file changed, 76 insertions(+), 3 deletions(-)
diff --git a/llvm/test/CodeGen/X86/i128-divrem-libcall.ll b/llvm/test/CodeGen/X86/i128-divrem-libcall.ll
index 2486ddb6cbbde..c50c3dda5e02f 100644
--- a/llvm/test/CodeGen/X86/i128-divrem-libcall.ll
+++ b/llvm/test/CodeGen/X86/i128-divrem-libcall.ll
@@ -1,7 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
-; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefix=X64
-; RUN: llc < %s -mtriple=aarch64-linux-gnu | FileCheck %s --check-prefix=A64
-; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefix=X86-32
+; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=aarch64-linux-gnu | FileCheck %s --check-prefix=A64
+; RUN: llc < %s -mtriple=riscv64-linux-gnu -mattr=+m | FileCheck %s --check-prefix=RV64
+; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefix=X86-32
; Verify that sdiv+srem / udiv+urem on i128 with the same operands lower to a
; single __divmodti4 / __udivmodti4 call rather than two separate helper calls.
@@ -48,6 +49,30 @@ define void @sdivrem_i128(ptr %out, i128 %n, i128 %d) nounwind {
; A64-NEXT: ldp x30, x19, [sp, #16] // 16-byte Folded Reload
; A64-NEXT: add sp, sp, #32
; A64-NEXT: ret
+;
+; RV64-LABEL: sdivrem_i128:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -32
+; RV64-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT: mv a5, a4
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: mv a4, sp
+; RV64-NEXT: mv a0, a1
+; RV64-NEXT: mv a1, a2
+; RV64-NEXT: mv a2, a3
+; RV64-NEXT: mv a3, a5
+; RV64-NEXT: call __divmodti4
+; RV64-NEXT: ld a2, 0(sp)
+; RV64-NEXT: ld a3, 8(sp)
+; RV64-NEXT: sd a0, 0(s0)
+; RV64-NEXT: sd a1, 8(s0)
+; RV64-NEXT: sd a2, 16(s0)
+; RV64-NEXT: sd a3, 24(s0)
+; RV64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 32
+; RV64-NEXT: ret
%q = sdiv i128 %n, %d
%r = srem i128 %n, %d
%p0 = getelementptr inbounds {i128, i128}, ptr %out, i32 0, i32 0
@@ -96,6 +121,30 @@ define void @udivrem_i128(ptr %out, i128 %n, i128 %d) nounwind {
; A64-NEXT: ldp x30, x19, [sp, #16] // 16-byte Folded Reload
; A64-NEXT: add sp, sp, #32
; A64-NEXT: ret
+;
+; RV64-LABEL: udivrem_i128:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -32
+; RV64-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT: mv a5, a4
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: mv a4, sp
+; RV64-NEXT: mv a0, a1
+; RV64-NEXT: mv a1, a2
+; RV64-NEXT: mv a2, a3
+; RV64-NEXT: mv a3, a5
+; RV64-NEXT: call __udivmodti4
+; RV64-NEXT: ld a2, 0(sp)
+; RV64-NEXT: ld a3, 8(sp)
+; RV64-NEXT: sd a0, 0(s0)
+; RV64-NEXT: sd a1, 8(s0)
+; RV64-NEXT: sd a2, 16(s0)
+; RV64-NEXT: sd a3, 24(s0)
+; RV64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 32
+; RV64-NEXT: ret
%q = udiv i128 %n, %d
%r = urem i128 %n, %d
%p0 = getelementptr inbounds {i128, i128}, ptr %out, i32 0, i32 0
@@ -152,6 +201,30 @@ define void @sdivrem_i128_fallback(ptr %out, i128 %n, i128 %d) nounwind {
; A64-NEXT: ldp x30, x19, [sp, #16] // 16-byte Folded Reload
; A64-NEXT: add sp, sp, #32
; A64-NEXT: ret
+;
+; RV64-LABEL: sdivrem_i128_fallback:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -32
+; RV64-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT: mv a5, a4
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: mv a4, sp
+; RV64-NEXT: mv a0, a1
+; RV64-NEXT: mv a1, a2
+; RV64-NEXT: mv a2, a3
+; RV64-NEXT: mv a3, a5
+; RV64-NEXT: call __divmodti4
+; RV64-NEXT: ld a2, 0(sp)
+; RV64-NEXT: ld a3, 8(sp)
+; RV64-NEXT: sd a0, 0(s0)
+; RV64-NEXT: sd a1, 8(s0)
+; RV64-NEXT: sd a2, 16(s0)
+; RV64-NEXT: sd a3, 24(s0)
+; RV64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 32
+; RV64-NEXT: ret
%q = sdiv i128 %n, %d
%r = srem i128 %n, %d
%p0 = getelementptr inbounds {i128, i128}, ptr %out, i32 0, i32 0
>From ab85461d89ee6fc01f244b3f6ee54df61139572b Mon Sep 17 00:00:00 2001
From: Takashiidobe <idobetakashi at gmail.com>
Date: Sun, 22 Mar 2026 09:04:52 -0400
Subject: [PATCH 05/15] update tests to handle more targets.
---
.../SelectionDAG/LegalizeIntegerTypes.cpp | 25 +-
.../CodeGen/Generic/i128-divrem-libcall.ll | 53 ++++
llvm/test/CodeGen/X86/i128-divrem-libcall.ll | 235 ------------------
3 files changed, 65 insertions(+), 248 deletions(-)
create mode 100644 llvm/test/CodeGen/Generic/i128-divrem-libcall.ll
delete mode 100644 llvm/test/CodeGen/X86/i128-divrem-libcall.ll
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 0b036ce546be6..6318a077da6ed 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -4909,15 +4909,13 @@ void DAGTypeLegalizer::ExpandIntRes_DIVREM(SDNode *N, SDValue &Lo,
SDValue &Hi) {
SDLoc dl(N);
EVT VT = N->getValueType(0);
- bool isSigned = (N->getOpcode() == ISD::SDIVREM);
- RTLIB::Libcall LC = isSigned ? RTLIB::SDIVREM_I128 : RTLIB::UDIVREM_I128;
+ bool IsSigned = (N->getOpcode() == ISD::SDIVREM);
+ RTLIB::Libcall LC = IsSigned ? RTLIB::SDIVREM_I128 : RTLIB::UDIVREM_I128;
- // If no fused divrem libcall is available, fall back to separate div and rem
- // nodes that the existing type-legalization handlers can expand
- // independently.
+ // If no fused divrem libcall is available, fall back to separate div and rem.
if (DAG.getLibcalls().getLibcallImpl(LC) == RTLIB::Unsupported) {
- unsigned DivOp = isSigned ? ISD::SDIV : ISD::UDIV;
- unsigned RemOp = isSigned ? ISD::SREM : ISD::UREM;
+ unsigned DivOp = IsSigned ? ISD::SDIV : ISD::UDIV;
+ unsigned RemOp = IsSigned ? ISD::SREM : ISD::UREM;
SDValue Ops[2] = {N->getOperand(0), N->getOperand(1)};
SDValue Q = DAG.getNode(DivOp, dl, VT, Ops);
SDValue R = DAG.getNode(RemOp, dl, VT, Ops);
@@ -4928,21 +4926,22 @@ void DAGTypeLegalizer::ExpandIntRes_DIVREM(SDNode *N, SDValue &Lo,
// Emit __divmodti4 / __udivmodti4:
// RetTy libcall(RetTy a, RetTy b, RetTy *rem)
- // The quotient is the return value; the remainder is written via the pointer.
+ // The quotient is the return value; the remainder is written via pointer.
Type *RetTy = VT.getTypeForEVT(*DAG.getContext());
TargetLowering::ArgListTy Args;
for (const SDValue &Op : N->op_values()) {
TargetLowering::ArgListEntry Entry(
Op, Op.getValueType().getTypeForEVT(*DAG.getContext()));
- Entry.IsSExt = isSigned;
- Entry.IsZExt = !isSigned;
+ Entry.IsSExt = IsSigned;
+ Entry.IsZExt = !IsSigned;
Args.push_back(Entry);
}
+ // The libcall writes the remainder via a pointer argument; allocate a stack
+ // slot for it and pass its address as the third argument.
SDValue FIPtr = DAG.CreateStackTemporary(VT);
TargetLowering::ArgListEntry PtrEntry(
FIPtr, PointerType::getUnqual(RetTy->getContext()));
- PtrEntry.IsSExt = PtrEntry.IsZExt = false;
Args.push_back(PtrEntry);
RTLIB::LibcallImpl LCImpl = DAG.getLibcalls().getLibcallImpl(LC);
@@ -4953,8 +4952,8 @@ void DAGTypeLegalizer::ExpandIntRes_DIVREM(SDNode *N, SDValue &Lo,
DAG.getLibcalls().getLibcallImplCallingConv(LCImpl), RetTy,
DAG.getExternalSymbol(LCImpl, TLI.getPointerTy(DAG.getDataLayout())),
std::move(Args))
- .setSExtResult(isSigned)
- .setZExtResult(!isSigned);
+ .setSExtResult(IsSigned)
+ .setZExtResult(!IsSigned);
std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI);
diff --git a/llvm/test/CodeGen/Generic/i128-divrem-libcall.ll b/llvm/test/CodeGen/Generic/i128-divrem-libcall.ll
new file mode 100644
index 0000000000000..759ae41fa2226
--- /dev/null
+++ b/llvm/test/CodeGen/Generic/i128-divrem-libcall.ll
@@ -0,0 +1,53 @@
+; 64-bit targets: fused __divmodti4 / __udivmodti4
+; RUN: %if x86-registered-target %{ llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=CHECK,FUSED %}
+; RUN: %if x86-registered-target %{ llc < %s -mtriple=x86_64-pc-windows-msvc | FileCheck %s --check-prefixes=CHECK,FUSED %}
+; RUN: %if aarch64-registered-target %{ llc < %s -mtriple=aarch64-linux-gnu | FileCheck %s --check-prefixes=CHECK,FUSED %}
+; RUN: %if riscv-registered-target %{ llc < %s -mtriple=riscv64-linux-gnu | FileCheck %s --check-prefixes=CHECK,FUSED %}
+; RUN: %if riscv-registered-target %{ llc < %s -mtriple=riscv64-linux-gnu -mattr=+m | FileCheck %s --check-prefixes=CHECK,FUSED %}
+; RUN: %if powerpc-registered-target %{ llc < %s -mtriple=powerpc64-linux-gnu | FileCheck %s --check-prefixes=CHECK,FUSED %}
+; RUN: %if sparc-registered-target %{ llc < %s -mtriple=sparcv9-linux-gnu | FileCheck %s --check-prefixes=CHECK,FUSED %}
+; RUN: %if loongarch-registered-target %{ llc < %s -mtriple=loongarch64-linux-gnu | FileCheck %s --check-prefixes=CHECK,FUSED %}
+; RUN: %if webassembly-registered-target %{ llc < %s -mtriple=wasm32-unknown-unknown | FileCheck %s --check-prefixes=CHECK,FUSED %}
+; RUN: %if webassembly-registered-target %{ llc < %s -mtriple=wasm64-unknown-unknown | FileCheck %s --check-prefixes=CHECK,FUSED %}
+
+; 32-bit / ILP32 targets: no fused libcall
+; RUN: %if x86-registered-target %{ llc < %s -mtriple=i386-linux-gnu | FileCheck %s --check-prefixes=CHECK,SPLIT %}
+; RUN: %if x86-registered-target %{ llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=CHECK,SPLIT %}
+; RUN: %if arm-registered-target %{ llc < %s -mtriple=armv6-linux-gnueabihf | FileCheck %s --check-prefixes=CHECK,SPLIT %}
+; RUN: %if arm-registered-target %{ llc < %s -mtriple=armv7-linux-gnueabi | FileCheck %s --check-prefixes=CHECK,SPLIT %}
+; RUN: %if aarch64-registered-target %{ llc < %s -mtriple=aarch64_32-apple-watchos | FileCheck %s --check-prefixes=CHECK,SPLIT %}
+; RUN: %if riscv-registered-target %{ llc < %s -mtriple=riscv32-linux-gnu | FileCheck %s --check-prefixes=CHECK,SPLIT %}
+; RUN: %if riscv-registered-target %{ llc < %s -mtriple=riscv32-linux-gnu -mattr=+m | FileCheck %s --check-prefixes=CHECK,SPLIT %}
+; RUN: %if arm-registered-target %{ llc < %s -mtriple=armv7-none-eabi | FileCheck %s --check-prefixes=CHECK,SPLIT %}
+
+; Verify that sdiv+srem / udiv+urem on i128 fuse into a single __divmodti4 /
+; __udivmodti4 call on targets where the libcall is available (64-bit targets
+; and wasm), and do not on targets where it is not (32-bit / ILP32).
+;
+; The lowering varies by target:
+; 64-bit targets and wasm: fused __divmodti4 / __udivmodti4
+; 32-bit targets that lack the fused call may lower to:
+; - separate __divti3 + __modti3 / __udivti3 + __umodti3 calls, or
+; - fully inline expansion (e.g. i686)
+
+define void @sdivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
+; CHECK-LABEL: sdivrem_i128:
+; FUSED: __divmodti4
+; SPLIT-NOT: __divmodti4
+ %q = sdiv i128 %n, %d
+ %r = srem i128 %n, %d
+ store i128 %q, ptr %q_out
+ store i128 %r, ptr %r_out
+ ret void
+}
+
+define void @udivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
+; CHECK-LABEL: udivrem_i128:
+; FUSED: __udivmodti4
+; SPLIT-NOT: __udivmodti4
+ %q = udiv i128 %n, %d
+ %r = urem i128 %n, %d
+ store i128 %q, ptr %q_out
+ store i128 %r, ptr %r_out
+ ret void
+}
diff --git a/llvm/test/CodeGen/X86/i128-divrem-libcall.ll b/llvm/test/CodeGen/X86/i128-divrem-libcall.ll
deleted file mode 100644
index c50c3dda5e02f..0000000000000
--- a/llvm/test/CodeGen/X86/i128-divrem-libcall.ll
+++ /dev/null
@@ -1,235 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
-; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefix=X64
-; RUN: llc < %s -mtriple=aarch64-linux-gnu | FileCheck %s --check-prefix=A64
-; RUN: llc < %s -mtriple=riscv64-linux-gnu -mattr=+m | FileCheck %s --check-prefix=RV64
-; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefix=X86-32
-
-; Verify that sdiv+srem / udiv+urem on i128 with the same operands lower to a
-; single __divmodti4 / __udivmodti4 call rather than two separate helper calls.
-; DAGCombiner::useDivRem fuses the pair into ISD::SDIVREM/UDIVREM, which is
-; then expanded to the fused libcall via ExpandIntRes_DIVREM in the type
-; legalizer.
-
-define void @sdivrem_i128(ptr %out, i128 %n, i128 %d) nounwind {
-; X64-LABEL: sdivrem_i128:
-; X64: # %bb.0:
-; X64-NEXT: pushq %rbx
-; X64-NEXT: subq $16, %rsp
-; X64-NEXT: movq %r8, %rax
-; X64-NEXT: movq %rdi, %rbx
-; X64-NEXT: movq %rsp, %r8
-; X64-NEXT: movq %rsi, %rdi
-; X64-NEXT: movq %rdx, %rsi
-; X64-NEXT: movq %rcx, %rdx
-; X64-NEXT: movq %rax, %rcx
-; X64-NEXT: callq __divmodti4 at PLT
-; X64-NEXT: movaps (%rsp), %xmm0
-; X64-NEXT: movq %rdx, 8(%rbx)
-; X64-NEXT: movq %rax, (%rbx)
-; X64-NEXT: movaps %xmm0, 16(%rbx)
-; X64-NEXT: addq $16, %rsp
-; X64-NEXT: popq %rbx
-; X64-NEXT: retq
-;
-; A64-LABEL: sdivrem_i128:
-; A64: // %bb.0:
-; A64-NEXT: sub sp, sp, #32
-; A64-NEXT: mov x8, x4
-; A64-NEXT: stp x30, x19, [sp, #16] // 16-byte Folded Spill
-; A64-NEXT: mov x1, x3
-; A64-NEXT: mov x19, x0
-; A64-NEXT: mov x4, sp
-; A64-NEXT: mov x0, x2
-; A64-NEXT: mov x2, x8
-; A64-NEXT: mov x3, x5
-; A64-NEXT: bl __divmodti4
-; A64-NEXT: ldp x8, x9, [sp]
-; A64-NEXT: stp x0, x1, [x19]
-; A64-NEXT: stp x8, x9, [x19, #16]
-; A64-NEXT: ldp x30, x19, [sp, #16] // 16-byte Folded Reload
-; A64-NEXT: add sp, sp, #32
-; A64-NEXT: ret
-;
-; RV64-LABEL: sdivrem_i128:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -32
-; RV64-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
-; RV64-NEXT: mv a5, a4
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: mv a4, sp
-; RV64-NEXT: mv a0, a1
-; RV64-NEXT: mv a1, a2
-; RV64-NEXT: mv a2, a3
-; RV64-NEXT: mv a3, a5
-; RV64-NEXT: call __divmodti4
-; RV64-NEXT: ld a2, 0(sp)
-; RV64-NEXT: ld a3, 8(sp)
-; RV64-NEXT: sd a0, 0(s0)
-; RV64-NEXT: sd a1, 8(s0)
-; RV64-NEXT: sd a2, 16(s0)
-; RV64-NEXT: sd a3, 24(s0)
-; RV64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 32
-; RV64-NEXT: ret
- %q = sdiv i128 %n, %d
- %r = srem i128 %n, %d
- %p0 = getelementptr inbounds {i128, i128}, ptr %out, i32 0, i32 0
- %p1 = getelementptr inbounds {i128, i128}, ptr %out, i32 0, i32 1
- store i128 %q, ptr %p0, align 16
- store i128 %r, ptr %p1, align 16
- ret void
-}
-
-define void @udivrem_i128(ptr %out, i128 %n, i128 %d) nounwind {
-; X64-LABEL: udivrem_i128:
-; X64: # %bb.0:
-; X64-NEXT: pushq %rbx
-; X64-NEXT: subq $16, %rsp
-; X64-NEXT: movq %r8, %rax
-; X64-NEXT: movq %rdi, %rbx
-; X64-NEXT: movq %rsp, %r8
-; X64-NEXT: movq %rsi, %rdi
-; X64-NEXT: movq %rdx, %rsi
-; X64-NEXT: movq %rcx, %rdx
-; X64-NEXT: movq %rax, %rcx
-; X64-NEXT: callq __udivmodti4 at PLT
-; X64-NEXT: movaps (%rsp), %xmm0
-; X64-NEXT: movq %rdx, 8(%rbx)
-; X64-NEXT: movq %rax, (%rbx)
-; X64-NEXT: movaps %xmm0, 16(%rbx)
-; X64-NEXT: addq $16, %rsp
-; X64-NEXT: popq %rbx
-; X64-NEXT: retq
-;
-; A64-LABEL: udivrem_i128:
-; A64: // %bb.0:
-; A64-NEXT: sub sp, sp, #32
-; A64-NEXT: mov x8, x4
-; A64-NEXT: stp x30, x19, [sp, #16] // 16-byte Folded Spill
-; A64-NEXT: mov x1, x3
-; A64-NEXT: mov x19, x0
-; A64-NEXT: mov x4, sp
-; A64-NEXT: mov x0, x2
-; A64-NEXT: mov x2, x8
-; A64-NEXT: mov x3, x5
-; A64-NEXT: bl __udivmodti4
-; A64-NEXT: ldp x8, x9, [sp]
-; A64-NEXT: stp x0, x1, [x19]
-; A64-NEXT: stp x8, x9, [x19, #16]
-; A64-NEXT: ldp x30, x19, [sp, #16] // 16-byte Folded Reload
-; A64-NEXT: add sp, sp, #32
-; A64-NEXT: ret
-;
-; RV64-LABEL: udivrem_i128:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -32
-; RV64-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
-; RV64-NEXT: mv a5, a4
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: mv a4, sp
-; RV64-NEXT: mv a0, a1
-; RV64-NEXT: mv a1, a2
-; RV64-NEXT: mv a2, a3
-; RV64-NEXT: mv a3, a5
-; RV64-NEXT: call __udivmodti4
-; RV64-NEXT: ld a2, 0(sp)
-; RV64-NEXT: ld a3, 8(sp)
-; RV64-NEXT: sd a0, 0(s0)
-; RV64-NEXT: sd a1, 8(s0)
-; RV64-NEXT: sd a2, 16(s0)
-; RV64-NEXT: sd a3, 24(s0)
-; RV64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 32
-; RV64-NEXT: ret
- %q = udiv i128 %n, %d
- %r = urem i128 %n, %d
- %p0 = getelementptr inbounds {i128, i128}, ptr %out, i32 0, i32 0
- %p1 = getelementptr inbounds {i128, i128}, ptr %out, i32 0, i32 1
- store i128 %q, ptr %p0, align 16
- store i128 %r, ptr %p1, align 16
- ret void
-}
-
-; On 32-bit targets __divmodti4/__udivmodti4 are not registered (Int128RTLibcalls
-; is only added to 64-bit targets), so the fallback path in ExpandIntRes_DIVREM
-; fires: the SDIVREM node is replaced by separate SDIV + SREM, which then expand
-; inline. Verify no fused call is emitted and the code compiles without crashing.
-
-; X86-32-NOT: __divmodti4
-; X86-32-NOT: __udivmodti4
-
-define void @sdivrem_i128_fallback(ptr %out, i128 %n, i128 %d) nounwind {
-; X64-LABEL: sdivrem_i128_fallback:
-; X64: # %bb.0:
-; X64-NEXT: pushq %rbx
-; X64-NEXT: subq $16, %rsp
-; X64-NEXT: movq %r8, %rax
-; X64-NEXT: movq %rdi, %rbx
-; X64-NEXT: movq %rsp, %r8
-; X64-NEXT: movq %rsi, %rdi
-; X64-NEXT: movq %rdx, %rsi
-; X64-NEXT: movq %rcx, %rdx
-; X64-NEXT: movq %rax, %rcx
-; X64-NEXT: callq __divmodti4 at PLT
-; X64-NEXT: movaps (%rsp), %xmm0
-; X64-NEXT: movq %rdx, 8(%rbx)
-; X64-NEXT: movq %rax, (%rbx)
-; X64-NEXT: movaps %xmm0, 16(%rbx)
-; X64-NEXT: addq $16, %rsp
-; X64-NEXT: popq %rbx
-; X64-NEXT: retq
-;
-; A64-LABEL: sdivrem_i128_fallback:
-; A64: // %bb.0:
-; A64-NEXT: sub sp, sp, #32
-; A64-NEXT: mov x8, x4
-; A64-NEXT: stp x30, x19, [sp, #16] // 16-byte Folded Spill
-; A64-NEXT: mov x1, x3
-; A64-NEXT: mov x19, x0
-; A64-NEXT: mov x4, sp
-; A64-NEXT: mov x0, x2
-; A64-NEXT: mov x2, x8
-; A64-NEXT: mov x3, x5
-; A64-NEXT: bl __divmodti4
-; A64-NEXT: ldp x8, x9, [sp]
-; A64-NEXT: stp x0, x1, [x19]
-; A64-NEXT: stp x8, x9, [x19, #16]
-; A64-NEXT: ldp x30, x19, [sp, #16] // 16-byte Folded Reload
-; A64-NEXT: add sp, sp, #32
-; A64-NEXT: ret
-;
-; RV64-LABEL: sdivrem_i128_fallback:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -32
-; RV64-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
-; RV64-NEXT: mv a5, a4
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: mv a4, sp
-; RV64-NEXT: mv a0, a1
-; RV64-NEXT: mv a1, a2
-; RV64-NEXT: mv a2, a3
-; RV64-NEXT: mv a3, a5
-; RV64-NEXT: call __divmodti4
-; RV64-NEXT: ld a2, 0(sp)
-; RV64-NEXT: ld a3, 8(sp)
-; RV64-NEXT: sd a0, 0(s0)
-; RV64-NEXT: sd a1, 8(s0)
-; RV64-NEXT: sd a2, 16(s0)
-; RV64-NEXT: sd a3, 24(s0)
-; RV64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 32
-; RV64-NEXT: ret
- %q = sdiv i128 %n, %d
- %r = srem i128 %n, %d
- %p0 = getelementptr inbounds {i128, i128}, ptr %out, i32 0, i32 0
- %p1 = getelementptr inbounds {i128, i128}, ptr %out, i32 0, i32 1
- store i128 %q, ptr %p0, align 16
- store i128 %r, ptr %p1, align 16
- ret void
-}
>From 5872c8063098bb85993028d8f8bef7a8d09e01de Mon Sep 17 00:00:00 2001
From: Takashiidobe <idobetakashi at gmail.com>
Date: Sun, 22 Mar 2026 09:20:55 -0400
Subject: [PATCH 06/15] add extra testing, especially for mac os
---
llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h | 2 +-
.../CodeGen/Generic/i128-divrem-libcall.ll | 44 +++++++++++++------
2 files changed, 32 insertions(+), 14 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index e1c11e1c35a31..eabca009c84ea 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -472,7 +472,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
void ExpandIntRes_BSWAP (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_PARITY (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_MUL (SDNode *N, SDValue &Lo, SDValue &Hi);
- void ExpandIntRes_DIVREM(SDNode *N, SDValue &Lo, SDValue &Hi);
+ void ExpandIntRes_DIVREM (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_SDIV (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_SREM (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_UDIV (SDNode *N, SDValue &Lo, SDValue &Hi);
diff --git a/llvm/test/CodeGen/Generic/i128-divrem-libcall.ll b/llvm/test/CodeGen/Generic/i128-divrem-libcall.ll
index 759ae41fa2226..36ffdd5f7e563 100644
--- a/llvm/test/CodeGen/Generic/i128-divrem-libcall.ll
+++ b/llvm/test/CodeGen/Generic/i128-divrem-libcall.ll
@@ -1,6 +1,8 @@
; 64-bit targets: fused __divmodti4 / __udivmodti4
-; RUN: %if x86-registered-target %{ llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=CHECK,FUSED %}
+; RUN: %if x86-registered-target %{ llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=CHECK,FUSED %}
+; RUN: %if x86-registered-target %{ llc < %s -mtriple=x86_64-linux-gnux32 | FileCheck %s --check-prefixes=CHECK,FUSED %}
; RUN: %if x86-registered-target %{ llc < %s -mtriple=x86_64-pc-windows-msvc | FileCheck %s --check-prefixes=CHECK,FUSED %}
+; RUN: %if x86-registered-target %{ llc < %s -mtriple=x86_64-w64-mingw32 | FileCheck %s --check-prefixes=CHECK,FUSED %}
; RUN: %if aarch64-registered-target %{ llc < %s -mtriple=aarch64-linux-gnu | FileCheck %s --check-prefixes=CHECK,FUSED %}
; RUN: %if riscv-registered-target %{ llc < %s -mtriple=riscv64-linux-gnu | FileCheck %s --check-prefixes=CHECK,FUSED %}
; RUN: %if riscv-registered-target %{ llc < %s -mtriple=riscv64-linux-gnu -mattr=+m | FileCheck %s --check-prefixes=CHECK,FUSED %}
@@ -10,15 +12,21 @@
; RUN: %if webassembly-registered-target %{ llc < %s -mtriple=wasm32-unknown-unknown | FileCheck %s --check-prefixes=CHECK,FUSED %}
; RUN: %if webassembly-registered-target %{ llc < %s -mtriple=wasm64-unknown-unknown | FileCheck %s --check-prefixes=CHECK,FUSED %}
-; 32-bit / ILP32 targets: no fused libcall
-; RUN: %if x86-registered-target %{ llc < %s -mtriple=i386-linux-gnu | FileCheck %s --check-prefixes=CHECK,SPLIT %}
-; RUN: %if x86-registered-target %{ llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=CHECK,SPLIT %}
-; RUN: %if arm-registered-target %{ llc < %s -mtriple=armv6-linux-gnueabihf | FileCheck %s --check-prefixes=CHECK,SPLIT %}
-; RUN: %if arm-registered-target %{ llc < %s -mtriple=armv7-linux-gnueabi | FileCheck %s --check-prefixes=CHECK,SPLIT %}
-; RUN: %if aarch64-registered-target %{ llc < %s -mtriple=aarch64_32-apple-watchos | FileCheck %s --check-prefixes=CHECK,SPLIT %}
-; RUN: %if riscv-registered-target %{ llc < %s -mtriple=riscv32-linux-gnu | FileCheck %s --check-prefixes=CHECK,SPLIT %}
-; RUN: %if riscv-registered-target %{ llc < %s -mtriple=riscv32-linux-gnu -mattr=+m | FileCheck %s --check-prefixes=CHECK,SPLIT %}
-; RUN: %if arm-registered-target %{ llc < %s -mtriple=armv7-none-eabi | FileCheck %s --check-prefixes=CHECK,SPLIT %}
+; 32-bit / ILP32 targets that expand inline (no runtime library or no libcall)
+; RUN: %if x86-registered-target %{ llc < %s -mtriple=i386-linux-gnu | FileCheck %s --check-prefixes=CHECK,INLINE %}
+; RUN: %if x86-registered-target %{ llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=CHECK,INLINE %}
+; RUN: %if riscv-registered-target %{ llc < %s -mtriple=riscv32-linux-gnu | FileCheck %s --check-prefixes=CHECK,INLINE %}
+; RUN: %if riscv-registered-target %{ llc < %s -mtriple=riscv32-linux-gnu -mattr=+m | FileCheck %s --check-prefixes=CHECK,INLINE %}
+; RUN: %if arm-registered-target %{ llc < %s -mtriple=armv7-none-eabi | FileCheck %s --check-prefixes=CHECK,INLINE %}
+
+; 32-bit / ILP32 targets that fall back to separate __divti3 + __modti3 calls
+; RUN: %if arm-registered-target %{ llc < %s -mtriple=armv6-linux-gnueabihf | FileCheck %s --check-prefixes=CHECK,DIVMOD %}
+; RUN: %if arm-registered-target %{ llc < %s -mtriple=armv7-linux-gnueabi | FileCheck %s --check-prefixes=CHECK,DIVMOD %}
+; RUN: %if aarch64-registered-target %{ llc < %s -mtriple=aarch64_32-apple-watchos | FileCheck %s --check-prefixes=CHECK,DIVMOD %}
+
+; 64-bit Mac OS: fused ___divmodti4 (extra underscore)
+; RUN: %if x86-registered-target %{ llc < %s -mtriple=x86_64-apple-macosx | FileCheck %s --check-prefixes=CHECK,FUSED-DARWIN %}
+; RUN: %if aarch64-registered-target %{ llc < %s -mtriple=arm64-apple-macosx | FileCheck %s --check-prefixes=CHECK,FUSED-DARWIN %}
; Verify that sdiv+srem / udiv+urem on i128 fuse into a single __divmodti4 /
; __udivmodti4 call on targets where the libcall is available (64-bit targets
@@ -28,12 +36,17 @@
; 64-bit targets and wasm: fused __divmodti4 / __udivmodti4
; 32-bit targets that lack the fused call may lower to:
; - separate __divti3 + __modti3 / __udivti3 + __umodti3 calls, or
-; - fully inline expansion (e.g. i686)
+; - fully inline expansion (e.g. i686, bare metal)
define void @sdivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
; CHECK-LABEL: sdivrem_i128:
; FUSED: __divmodti4
-; SPLIT-NOT: __divmodti4
+; FUSED-DARWIN: ___divmodti4
+; DIVMOD: __divti3
+; DIVMOD: __modti3
+; INLINE-NOT: __divmodti4
+; INLINE-NOT: __divti3
+; INLINE-NOT: __modti3
%q = sdiv i128 %n, %d
%r = srem i128 %n, %d
store i128 %q, ptr %q_out
@@ -44,7 +57,12 @@ define void @sdivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
define void @udivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
; CHECK-LABEL: udivrem_i128:
; FUSED: __udivmodti4
-; SPLIT-NOT: __udivmodti4
+; FUSED-DARWIN: ___udivmodti4
+; DIVMOD: __udivti3
+; DIVMOD: __umodti3
+; INLINE-NOT: __udivmodti4
+; INLINE-NOT: __udivti3
+; INLINE-NOT: __umodti3
%q = udiv i128 %n, %d
%r = urem i128 %n, %d
store i128 %q, ptr %q_out
>From fff7c1ec493a26bb89da4cc9d0b082ab1f964fd4 Mon Sep 17 00:00:00 2001
From: Takashiidobe <idobetakashi at gmail.com>
Date: Sun, 22 Mar 2026 10:08:40 -0400
Subject: [PATCH 07/15] fix failing armv6 + armv7 tests
---
llvm/test/CodeGen/Generic/i128-divrem-libcall.ll | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/llvm/test/CodeGen/Generic/i128-divrem-libcall.ll b/llvm/test/CodeGen/Generic/i128-divrem-libcall.ll
index 36ffdd5f7e563..057270d1aaf07 100644
--- a/llvm/test/CodeGen/Generic/i128-divrem-libcall.ll
+++ b/llvm/test/CodeGen/Generic/i128-divrem-libcall.ll
@@ -17,11 +17,11 @@
; RUN: %if x86-registered-target %{ llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=CHECK,INLINE %}
; RUN: %if riscv-registered-target %{ llc < %s -mtriple=riscv32-linux-gnu | FileCheck %s --check-prefixes=CHECK,INLINE %}
; RUN: %if riscv-registered-target %{ llc < %s -mtriple=riscv32-linux-gnu -mattr=+m | FileCheck %s --check-prefixes=CHECK,INLINE %}
+; RUN: %if arm-registered-target %{ llc < %s -mtriple=armv6-linux-gnueabihf | FileCheck %s --check-prefixes=CHECK,INLINE %}
+; RUN: %if arm-registered-target %{ llc < %s -mtriple=armv7-linux-gnueabi | FileCheck %s --check-prefixes=CHECK,INLINE %}
; RUN: %if arm-registered-target %{ llc < %s -mtriple=armv7-none-eabi | FileCheck %s --check-prefixes=CHECK,INLINE %}
-; 32-bit / ILP32 targets that fall back to separate __divti3 + __modti3 calls
-; RUN: %if arm-registered-target %{ llc < %s -mtriple=armv6-linux-gnueabihf | FileCheck %s --check-prefixes=CHECK,DIVMOD %}
-; RUN: %if arm-registered-target %{ llc < %s -mtriple=armv7-linux-gnueabi | FileCheck %s --check-prefixes=CHECK,DIVMOD %}
+; ILP32 targets that fall back to separate __divti3 + __modti3 calls
; RUN: %if aarch64-registered-target %{ llc < %s -mtriple=aarch64_32-apple-watchos | FileCheck %s --check-prefixes=CHECK,DIVMOD %}
; 64-bit Mac OS: fused ___divmodti4 (extra underscore)
>From f38fdb5f10e35c6700ca304f00e152f847a0068d Mon Sep 17 00:00:00 2001
From: Takashiidobe <idobetakashi at gmail.com>
Date: Sun, 22 Mar 2026 19:25:00 -0400
Subject: [PATCH 08/15] add support for Win64 i128 ABI and add more testing
---
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 6 +-
.../SelectionDAG/LegalizeIntegerTypes.cpp | 11 ++-
llvm/lib/Target/X86/X86ISelLowering.cpp | 77 +++++++++++++++++++
llvm/lib/Target/X86/X86ISelLowering.h | 2 +
.../CodeGen/Generic/i128-divrem-libcall.ll | 65 ++++++++++++++--
5 files changed, 151 insertions(+), 10 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 22cb979c9d8c3..bd1a364696c30 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -5057,8 +5057,12 @@ SDValue DAGCombiner::useDivRem(SDNode *Node) {
if (VT.isVector() || !VT.isInteger())
return SDValue();
+ // For non-legal types, only allow the DIVREM node to form when a fused
+ // libcall is available. ExpandIntRes_DIVREM currently only handles i128;
+ // extending to other widths requires generalizing it to select the libcall
+ // by VT.
if (!TLI.isTypeLegal(VT) && !TLI.isOperationCustom(DivRemOpc, VT) &&
- !isDivRemLibcallAvailable(Node, isSigned, DAG))
+ !(VT == MVT::i128 && isDivRemLibcallAvailable(Node, isSigned, DAG)))
return SDValue();
// If DIVREM is going to get expanded into a libcall,
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 6318a077da6ed..6838dfb746ebe 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -4910,10 +4910,17 @@ void DAGTypeLegalizer::ExpandIntRes_DIVREM(SDNode *N, SDValue &Lo,
SDLoc dl(N);
EVT VT = N->getValueType(0);
bool IsSigned = (N->getOpcode() == ISD::SDIVREM);
+
+ // Only i128 is handled here; other widths require generalizing this
+ // function to select the libcall by VT.
RTLIB::Libcall LC = IsSigned ? RTLIB::SDIVREM_I128 : RTLIB::UDIVREM_I128;
+ assert(VT == MVT::i128 &&
+ "ExpandIntRes_DIVREM only handles i128; generalize by VT first");
- // If no fused divrem libcall is available, fall back to separate div and rem.
- if (DAG.getLibcalls().getLibcallImpl(LC) == RTLIB::Unsupported) {
+ // If no fused divrem libcall is available (or VT is not i128 in release
+ // builds), fall back to separate div and rem.
+ if (VT != MVT::i128 ||
+ DAG.getLibcalls().getLibcallImpl(LC) == RTLIB::Unsupported) {
unsigned DivOp = IsSigned ? ISD::SDIV : ISD::UDIV;
unsigned RemOp = IsSigned ? ISD::SREM : ISD::UREM;
SDValue Ops[2] = {N->getOperand(0), N->getOperand(1)};
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 67d25e4eabbac..f1be013412c69 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -2665,6 +2665,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::UDIV, MVT::i128, Custom);
setOperationAction(ISD::SREM, MVT::i128, Custom);
setOperationAction(ISD::UREM, MVT::i128, Custom);
+ setOperationAction(ISD::SDIVREM, MVT::i128, Custom);
+ setOperationAction(ISD::UDIVREM, MVT::i128, Custom);
setOperationAction(ISD::FP_TO_SINT, MVT::i128, Custom);
setOperationAction(ISD::FP_TO_UINT, MVT::i128, Custom);
setOperationAction(ISD::SINT_TO_FP, MVT::i128, Custom);
@@ -30626,6 +30628,72 @@ SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) cons
return DAG.getBitcast(VT, CallInfo.first);
}
+void X86TargetLowering::LowerWin64_i128DIVREM(SDNode *N, SelectionDAG &DAG,
+ SDValue &Quot,
+ SDValue &Rem) const {
+ assert(Subtarget.isTargetWin64() && "Unexpected target");
+ EVT VT = N->getValueType(0);
+ assert(VT == MVT::i128 && "Unexpected type");
+
+ bool isSigned = N->getOpcode() == ISD::SDIVREM;
+ RTLIB::Libcall LC = isSigned ? RTLIB::SDIVREM_I128 : RTLIB::UDIVREM_I128;
+ RTLIB::LibcallImpl LCImpl = DAG.getLibcalls().getLibcallImpl(LC);
+
+ SDLoc dl(N);
+
+ // If no fused divrem libcall is available, fall back to separate div and rem.
+ // This goes through LowerWin64_i128OP with the correct pointer-arg ABI.
+ if (LCImpl == RTLIB::Unsupported) {
+ unsigned DivOp = isSigned ? ISD::SDIV : ISD::UDIV;
+ unsigned RemOp = isSigned ? ISD::SREM : ISD::UREM;
+ Quot = DAG.getNode(DivOp, dl, VT, N->getOperand(0), N->getOperand(1));
+ Rem = DAG.getNode(RemOp, dl, VT, N->getOperand(0), N->getOperand(1));
+ return;
+ }
+ SDValue InChain = DAG.getEntryNode();
+
+ TargetLowering::ArgListTy Args;
+
+ // Spill both i128 inputs to stack temporaries and pass as pointers as per
+ // Win64 CC (Win64 has no calling convention for passing i128 by value).
+ for (unsigned i = 0; i < 2; ++i) {
+ EVT ArgVT = N->getOperand(i).getValueType();
+ assert(ArgVT == MVT::i128 && "Unexpected argument type");
+ SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
+ int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
+ MachinePointerInfo MPI =
+ MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
+ InChain =
+ DAG.getStore(InChain, dl, N->getOperand(i), StackPtr, MPI, Align(16));
+ Args.emplace_back(StackPtr, PointerType::get(*DAG.getContext(), 0));
+ }
+
+ // Allocate a stack slot for the remainder output pointer.
+ MachineFunction &MF = DAG.getMachineFunction();
+ int RemFI = MF.getFrameInfo().CreateStackObject(16, Align(16), false);
+ SDValue RemPtr = DAG.getFrameIndex(RemFI, getPointerTy(DAG.getDataLayout()));
+ Args.emplace_back(RemPtr, PointerType::get(*DAG.getContext(), 0));
+
+ SDValue Callee =
+ DAG.getExternalSymbol(LCImpl, getPointerTy(DAG.getDataLayout()));
+
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ CLI.setDebugLoc(dl)
+ .setChain(InChain)
+ .setLibCallee(
+ DAG.getLibcalls().getLibcallImplCallingConv(LCImpl),
+ static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()), Callee,
+ std::move(Args))
+ .setInRegister()
+ .setSExtResult(isSigned)
+ .setZExtResult(!isSigned);
+
+ std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
+ Quot = DAG.getBitcast(VT, CallInfo.first);
+ Rem = DAG.getLoad(VT, dl, CallInfo.second, RemPtr,
+ MachinePointerInfo::getFixedStack(MF, RemFI), Align(16));
+}
+
SDValue X86TargetLowering::LowerWin64_FP_TO_INT128(SDValue Op,
SelectionDAG &DAG,
SDValue &Chain) const {
@@ -34927,6 +34995,15 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
Results.push_back(V);
return;
}
+ case ISD::SDIVREM:
+ case ISD::UDIVREM: {
+ assert(N->getValueType(0) == MVT::i128 && Subtarget.isTargetWin64());
+ SDValue Q, R;
+ LowerWin64_i128DIVREM(N, DAG, Q, R);
+ Results.push_back(Q);
+ Results.push_back(R);
+ return;
+ }
case ISD::TRUNCATE: {
MVT VT = N->getSimpleValueType(0);
if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector)
diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h
index 5c7c54cacd239..80781e9cac406 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/llvm/lib/Target/X86/X86ISelLowering.h
@@ -832,6 +832,8 @@ namespace llvm {
SDValue LowerSET_FPENV_MEM(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerRESET_FPENV(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const;
+ void LowerWin64_i128DIVREM(SDNode *N, SelectionDAG &DAG, SDValue &Quot,
+ SDValue &Rem) const;
SDValue LowerWin64_FP_TO_INT128(SDValue Op, SelectionDAG &DAG,
SDValue &Chain) const;
SDValue LowerWin64_INT128_TO_FP(SDValue Op, SelectionDAG &DAG) const;
diff --git a/llvm/test/CodeGen/Generic/i128-divrem-libcall.ll b/llvm/test/CodeGen/Generic/i128-divrem-libcall.ll
index 057270d1aaf07..594b4c73d4b06 100644
--- a/llvm/test/CodeGen/Generic/i128-divrem-libcall.ll
+++ b/llvm/test/CodeGen/Generic/i128-divrem-libcall.ll
@@ -1,9 +1,9 @@
; 64-bit targets: fused __divmodti4 / __udivmodti4
-; RUN: %if x86-registered-target %{ llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=CHECK,FUSED %}
+; RUN: %if x86-registered-target %{ llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=CHECK,FUSED,SYSV-X64 %}
; RUN: %if x86-registered-target %{ llc < %s -mtriple=x86_64-linux-gnux32 | FileCheck %s --check-prefixes=CHECK,FUSED %}
-; RUN: %if x86-registered-target %{ llc < %s -mtriple=x86_64-pc-windows-msvc | FileCheck %s --check-prefixes=CHECK,FUSED %}
+; RUN: %if x86-registered-target %{ llc < %s -mtriple=x86_64-pc-windows-msvc | FileCheck %s --check-prefixes=CHECK,WIN64 %}
; RUN: %if x86-registered-target %{ llc < %s -mtriple=x86_64-w64-mingw32 | FileCheck %s --check-prefixes=CHECK,FUSED %}
-; RUN: %if aarch64-registered-target %{ llc < %s -mtriple=aarch64-linux-gnu | FileCheck %s --check-prefixes=CHECK,FUSED %}
+; RUN: %if aarch64-registered-target %{ llc < %s -mtriple=aarch64-linux-gnu | FileCheck %s --check-prefixes=CHECK,FUSED,SYSV-A64 %}
; RUN: %if riscv-registered-target %{ llc < %s -mtriple=riscv64-linux-gnu | FileCheck %s --check-prefixes=CHECK,FUSED %}
; RUN: %if riscv-registered-target %{ llc < %s -mtriple=riscv64-linux-gnu -mattr=+m | FileCheck %s --check-prefixes=CHECK,FUSED %}
; RUN: %if powerpc-registered-target %{ llc < %s -mtriple=powerpc64-linux-gnu | FileCheck %s --check-prefixes=CHECK,FUSED %}
@@ -21,27 +21,58 @@
; RUN: %if arm-registered-target %{ llc < %s -mtriple=armv7-linux-gnueabi | FileCheck %s --check-prefixes=CHECK,INLINE %}
; RUN: %if arm-registered-target %{ llc < %s -mtriple=armv7-none-eabi | FileCheck %s --check-prefixes=CHECK,INLINE %}
+; Win32: i128 fully inline-expanded, no libcalls registered
+; RUN: %if x86-registered-target %{ llc < %s -mtriple=i686-pc-windows-msvc | FileCheck %s --check-prefixes=CHECK,WIN32 %}
+
; ILP32 targets that fall back to separate __divti3 + __modti3 calls
; RUN: %if aarch64-registered-target %{ llc < %s -mtriple=aarch64_32-apple-watchos | FileCheck %s --check-prefixes=CHECK,DIVMOD %}
-; 64-bit Mac OS: fused ___divmodti4 (extra underscore)
+; 64-bit Mac OS: fused ___divmodti4 (extra underscore, same ABI as Linux AArch64)
; RUN: %if x86-registered-target %{ llc < %s -mtriple=x86_64-apple-macosx | FileCheck %s --check-prefixes=CHECK,FUSED-DARWIN %}
-; RUN: %if aarch64-registered-target %{ llc < %s -mtriple=arm64-apple-macosx | FileCheck %s --check-prefixes=CHECK,FUSED-DARWIN %}
+; RUN: %if aarch64-registered-target %{ llc < %s -mtriple=arm64-apple-macosx | FileCheck %s --check-prefixes=CHECK,DARWIN-A64 %}
; Verify that sdiv+srem / udiv+urem on i128 fuse into a single __divmodti4 /
; __udivmodti4 call on targets where the libcall is available (64-bit targets
; and wasm), and do not on targets where it is not (32-bit / ILP32).
;
-; The lowering varies by target:
-; 64-bit targets and wasm: fused __divmodti4 / __udivmodti4
+; Detailed ABI checks for the four most popular calling conventions:
+; WIN64 (x86_64 Windows): all args spilled to stack and passed as pointers
+; in %rcx/%rdx/%r8, quotient returned in %xmm0.
+; DARWIN-A64 (AArch64 macOS): identical to SYSV-A64 but symbol has an extra
+; leading underscore (___divmodti4).
+; SYSV-X64 (x86_64 Linux/BSD): i128 args in register pairs, rem pointer via
+; %rsp in %r8, quotient returned in %rax:%rdx.
+; SYSV-A64 (AArch64 Linux): i128 args in x0:x1/x2:x3, rem pointer via sp in
+; x4, quotient returned in x0:x1.
+; Win32 (i686-windows-msvc): no i128 libcalls registered, fully inline.
; 32-bit targets that lack the fused call may lower to:
; - separate __divti3 + __modti3 / __udivti3 + __umodti3 calls, or
; - fully inline expansion (e.g. i686, bare metal)
define void @sdivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
; CHECK-LABEL: sdivrem_i128:
+; SYSV-X64: movq %rsp, %r8
+; SYSV-A64: mov x4, sp
; FUSED: __divmodti4
; FUSED-DARWIN: ___divmodti4
+; SYSV-X64: movq (%rsp),
+; SYSV-X64: movq %rax,
+; SYSV-X64: movq %rdx,
+; SYSV-A64: ldp {{.*}}, [sp]
+; SYSV-A64: stp x0, x1,
+; DARWIN-A64: mov x4, sp
+; DARWIN-A64: bl ___divmodti4
+; DARWIN-A64: ldp {{.*}}, [sp]
+; DARWIN-A64: stp x0, x1,
+; WIN64: leaq {{[0-9]+}}(%rsp), %rcx
+; WIN64: leaq {{[0-9]+}}(%rsp), %rdx
+; WIN64: leaq {{[0-9]+}}(%rsp), %r8
+; WIN64: callq __divmodti4
+; WIN64: movaps {{[0-9]+}}(%rsp), %xmm1
+; WIN64: movaps %xmm0,
+; WIN32-NOT: __divmodti4
+; WIN32-NOT: __divti3
+; WIN32-NOT: __modti3
; DIVMOD: __divti3
; DIVMOD: __modti3
; INLINE-NOT: __divmodti4
@@ -56,8 +87,28 @@ define void @sdivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
define void @udivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
; CHECK-LABEL: udivrem_i128:
+; SYSV-X64: movq %rsp, %r8
+; SYSV-A64: mov x4, sp
; FUSED: __udivmodti4
; FUSED-DARWIN: ___udivmodti4
+; SYSV-X64: movq (%rsp),
+; SYSV-X64: movq %rax,
+; SYSV-X64: movq %rdx,
+; SYSV-A64: ldp {{.*}}, [sp]
+; SYSV-A64: stp x0, x1,
+; DARWIN-A64: mov x4, sp
+; DARWIN-A64: bl ___udivmodti4
+; DARWIN-A64: ldp {{.*}}, [sp]
+; DARWIN-A64: stp x0, x1,
+; WIN64: leaq {{[0-9]+}}(%rsp), %rcx
+; WIN64: leaq {{[0-9]+}}(%rsp), %rdx
+; WIN64: leaq {{[0-9]+}}(%rsp), %r8
+; WIN64: callq __udivmodti4
+; WIN64: movaps {{[0-9]+}}(%rsp), %xmm1
+; WIN64: movaps %xmm0,
+; WIN32-NOT: __udivmodti4
+; WIN32-NOT: __udivti3
+; WIN32-NOT: __umodti3
; DIVMOD: __udivti3
; DIVMOD: __umodti3
; INLINE-NOT: __udivmodti4
>From 249b10b34c511efee490cf33854b6ba5c3cefb02 Mon Sep 17 00:00:00 2001
From: Takashiidobe <idobetakashi at gmail.com>
Date: Thu, 26 Mar 2026 21:34:16 -0400
Subject: [PATCH 09/15] add clang format exemption lines around the header that
isn't following clang formatting rules
---
llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h | 2 ++
1 file changed, 2 insertions(+)
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index eabca009c84ea..d8c0ce41871a6 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -441,6 +441,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
// Integer Result Expansion.
void ExpandIntegerResult(SDNode *N, unsigned ResNo);
+ // clang-format off
void ExpandIntRes_ANY_EXTEND (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_AssertSext (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_AssertZext (SDNode *N, SDValue &Lo, SDValue &Hi);
@@ -502,6 +503,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
void ExpandIntRes_CLMUL(SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_VSCALE (SDNode *N, SDValue &Lo, SDValue &Hi);
+ // clang-format on
void ExpandIntRes_READ_REGISTER(SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_CTTZ_ELTS(SDNode *N, SDValue &Lo, SDValue &Hi);
>From 33f4565393d22845a4a1dad46233141e9d246bb0 Mon Sep 17 00:00:00 2001
From: Takashiidobe <idobetakashi at gmail.com>
Date: Sun, 29 Mar 2026 18:49:12 -0400
Subject: [PATCH 10/15] code review feedback
---
.../SelectionDAG/LegalizeIntegerTypes.cpp | 32 ++++++++++++-------
llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h | 2 +-
llvm/lib/Target/X86/X86ISelLowering.cpp | 9 +++---
3 files changed, 27 insertions(+), 16 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 6838dfb746ebe..442243cf5fcd5 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -3116,7 +3116,7 @@ void DAGTypeLegalizer::ExpandIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::SDIV: ExpandIntRes_SDIV(N, Lo, Hi); break;
case ISD::SDIVREM:
case ISD::UDIVREM:
- ExpandIntRes_DIVREM(N, Lo, Hi);
+ ExpandIntRes_DIVREM(N, ResNo, Lo, Hi);
break;
case ISD::SIGN_EXTEND: ExpandIntRes_SIGN_EXTEND(N, Lo, Hi); break;
case ISD::SIGN_EXTEND_INREG: ExpandIntRes_SIGN_EXTEND_INREG(N, Lo, Hi); break;
@@ -4905,8 +4905,8 @@ void DAGTypeLegalizer::ExpandIntRes_SADDSUBO(SDNode *Node,
ReplaceValueWith(SDValue(Node, 1), Ovf);
}
-void DAGTypeLegalizer::ExpandIntRes_DIVREM(SDNode *N, SDValue &Lo,
- SDValue &Hi) {
+void DAGTypeLegalizer::ExpandIntRes_DIVREM(SDNode *N, unsigned ResNo,
+ SDValue &Lo, SDValue &Hi) {
SDLoc dl(N);
EVT VT = N->getValueType(0);
bool IsSigned = (N->getOpcode() == ISD::SDIVREM);
@@ -4926,8 +4926,13 @@ void DAGTypeLegalizer::ExpandIntRes_DIVREM(SDNode *N, SDValue &Lo,
SDValue Ops[2] = {N->getOperand(0), N->getOperand(1)};
SDValue Q = DAG.getNode(DivOp, dl, VT, Ops);
SDValue R = DAG.getNode(RemOp, dl, VT, Ops);
- SplitInteger(Q, Lo, Hi);
- ReplaceValueWith(SDValue(N, 1), R);
+ if (ResNo == 0) {
+ SplitInteger(Q, Lo, Hi);
+ ReplaceValueWith(SDValue(N, 1), R);
+ } else {
+ SplitInteger(R, Lo, Hi);
+ ReplaceValueWith(SDValue(N, 0), Q);
+ }
return;
}
@@ -4964,16 +4969,21 @@ void DAGTypeLegalizer::ExpandIntRes_DIVREM(SDNode *N, SDValue &Lo,
std::pair<SDValue, SDValue> CallInfo = TLI.LowerCallTo(CLI);
- // Quotient is the return value; split it into Lo/Hi for the expanded type.
- SplitInteger(CallInfo.first, Lo, Hi);
-
- // Remainder is written to the stack temporary; load it back and register
- // it as the replacement for result 1 of the original SDIVREM/UDIVREM node.
+ // Load the remainder from the stack temporary.
int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex();
SDValue Rem = DAG.getLoad(
VT, dl, CallInfo.second, FIPtr,
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
- ReplaceValueWith(SDValue(N, 1), Rem);
+
+ // Split the requested result into Lo/Hi and register the other result as its
+ // replacement.
+ if (ResNo == 0) {
+ SplitInteger(CallInfo.first, Lo, Hi);
+ ReplaceValueWith(SDValue(N, 1), Rem);
+ } else {
+ SplitInteger(Rem, Lo, Hi);
+ ReplaceValueWith(SDValue(N, 0), CallInfo.first);
+ }
}
void DAGTypeLegalizer::ExpandIntRes_SDIV(SDNode *N,
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index d8c0ce41871a6..34ba1db4ebbc8 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -473,7 +473,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
void ExpandIntRes_BSWAP (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_PARITY (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_MUL (SDNode *N, SDValue &Lo, SDValue &Hi);
- void ExpandIntRes_DIVREM (SDNode *N, SDValue &Lo, SDValue &Hi);
+ void ExpandIntRes_DIVREM (SDNode *N, unsigned ResNo, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_SDIV (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_SREM (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_UDIV (SDNode *N, SDValue &Lo, SDValue &Hi);
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index f1be013412c69..caa474d6f86de 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -30617,9 +30617,10 @@ SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) cons
TargetLowering::CallLoweringInfo CLI(DAG);
CLI.setDebugLoc(dl)
.setChain(InChain)
- .setLibCallee(DAG.getLibcalls().getLibcallImplCallingConv(LCImpl),
- EVT(MVT::v2i64).getTypeForEVT(*DAG.getContext()), Callee,
- std::move(Args))
+ .setLibCallee(
+ DAG.getLibcalls().getLibcallImplCallingConv(LCImpl),
+ EVT(MVT::v2i64).getTypeForEVT(*DAG.getContext()), Callee,
+ std::move(Args))
.setInRegister()
.setSExtResult(isSigned)
.setZExtResult(!isSigned);
@@ -30682,7 +30683,7 @@ void X86TargetLowering::LowerWin64_i128DIVREM(SDNode *N, SelectionDAG &DAG,
.setChain(InChain)
.setLibCallee(
DAG.getLibcalls().getLibcallImplCallingConv(LCImpl),
- static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()), Callee,
+ EVT(MVT::v2i64).getTypeForEVT(*DAG.getContext()), Callee,
std::move(Args))
.setInRegister()
.setSExtResult(isSigned)
>From 7890634f5628db4e5255f3bb89aaf3e798e99d0a Mon Sep 17 00:00:00 2001
From: Takashiidobe <idobetakashi at gmail.com>
Date: Mon, 30 Mar 2026 18:06:30 -0400
Subject: [PATCH 11/15] code review feedback: demorgan if condition
---
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index bd1a364696c30..43183e4a849b6 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -5061,8 +5061,8 @@ SDValue DAGCombiner::useDivRem(SDNode *Node) {
// libcall is available. ExpandIntRes_DIVREM currently only handles i128;
// extending to other widths requires generalizing it to select the libcall
// by VT.
- if (!TLI.isTypeLegal(VT) && !TLI.isOperationCustom(DivRemOpc, VT) &&
- !(VT == MVT::i128 && isDivRemLibcallAvailable(Node, isSigned, DAG)))
+ if (!(TLI.isTypeLegal(VT) || TLI.isOperationCustom(DivRemOpc, VT) ||
+ (VT == MVT::i128 && isDivRemLibcallAvailable(Node, isSigned, DAG))))
return SDValue();
// If DIVREM is going to get expanded into a libcall,
>From 966d579b863e7cd98bfce3282435b2cdf0d65858 Mon Sep 17 00:00:00 2001
From: Takashiidobe <idobetakashi at gmail.com>
Date: Tue, 31 Mar 2026 17:47:45 -0400
Subject: [PATCH 12/15] more feedback: demorgan another condition
---
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 43183e4a849b6..202672e19c4ab 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -5061,8 +5061,8 @@ SDValue DAGCombiner::useDivRem(SDNode *Node) {
// libcall is available. ExpandIntRes_DIVREM currently only handles i128;
// extending to other widths requires generalizing it to select the libcall
// by VT.
- if (!(TLI.isTypeLegal(VT) || TLI.isOperationCustom(DivRemOpc, VT) ||
- (VT == MVT::i128 && isDivRemLibcallAvailable(Node, isSigned, DAG))))
+ if (!TLI.isTypeLegal(VT) && !TLI.isOperationCustom(DivRemOpc, VT) &&
+ (VT != MVT::i128 || !isDivRemLibcallAvailable(Node, isSigned, DAG)))
return SDValue();
// If DIVREM is going to get expanded into a libcall,
>From 4740cf0407e3b1d1c067b1a443b061f9730708fe Mon Sep 17 00:00:00 2001
From: Takashiidobe <idobetakashi at gmail.com>
Date: Sat, 4 Apr 2026 18:30:58 -0400
Subject: [PATCH 13/15] split out divrem test from generic
---
.../CodeGen/AArch64/i128-divrem-libcall.ll | 214 +
llvm/test/CodeGen/ARM/i128-divrem-libcall.ll | 2414 ++++++++
.../CodeGen/Generic/i128-divrem-libcall.ll | 122 -
.../CodeGen/LoongArch/i128-divrem-libcall.ll | 80 +
.../CodeGen/PowerPC/i128-divrem-libcall.ll | 84 +
.../test/CodeGen/RISCV/i128-divrem-libcall.ll | 5015 +++++++++++++++++
.../test/CodeGen/SPARC/i128-divrem-libcall.ll | 64 +
.../WebAssembly/i128-divrem-libcall.ll | 177 +
llvm/test/CodeGen/X86/i128-divrem-libcall.ll | 3393 +++++++++++
9 files changed, 11441 insertions(+), 122 deletions(-)
create mode 100644 llvm/test/CodeGen/AArch64/i128-divrem-libcall.ll
create mode 100644 llvm/test/CodeGen/ARM/i128-divrem-libcall.ll
delete mode 100644 llvm/test/CodeGen/Generic/i128-divrem-libcall.ll
create mode 100644 llvm/test/CodeGen/LoongArch/i128-divrem-libcall.ll
create mode 100644 llvm/test/CodeGen/PowerPC/i128-divrem-libcall.ll
create mode 100644 llvm/test/CodeGen/RISCV/i128-divrem-libcall.ll
create mode 100644 llvm/test/CodeGen/SPARC/i128-divrem-libcall.ll
create mode 100644 llvm/test/CodeGen/WebAssembly/i128-divrem-libcall.ll
create mode 100644 llvm/test/CodeGen/X86/i128-divrem-libcall.ll
diff --git a/llvm/test/CodeGen/AArch64/i128-divrem-libcall.ll b/llvm/test/CodeGen/AArch64/i128-divrem-libcall.ll
new file mode 100644
index 0000000000000..9fa2f4754acc0
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/i128-divrem-libcall.ll
@@ -0,0 +1,214 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=aarch64-linux-gnu | FileCheck %s --check-prefix=LINUX-A64
+; RUN: llc < %s -mtriple=arm64-apple-macosx | FileCheck %s --check-prefix=DARWIN-A64
+; RUN: llc < %s -mtriple=arm64_32-apple-watchos | FileCheck %s --check-prefix=WATCHOS
+
+define void @sdivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
+; LINUX-A64-LABEL: sdivrem_i128:
+; LINUX-A64: // %bb.0:
+; LINUX-A64-NEXT: sub sp, sp, #48
+; LINUX-A64-NEXT: str x30, [sp, #16] // 8-byte Spill
+; LINUX-A64-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill
+; LINUX-A64-NEXT: .cfi_def_cfa_offset 48
+; LINUX-A64-NEXT: .cfi_offset w19, -8
+; LINUX-A64-NEXT: .cfi_offset w20, -16
+; LINUX-A64-NEXT: .cfi_offset w30, -32
+; LINUX-A64-NEXT: mov x8, x4
+; LINUX-A64-NEXT: mov x19, x1
+; LINUX-A64-NEXT: mov x20, x0
+; LINUX-A64-NEXT: mov x4, sp
+; LINUX-A64-NEXT: mov x0, x2
+; LINUX-A64-NEXT: mov x1, x3
+; LINUX-A64-NEXT: mov x2, x8
+; LINUX-A64-NEXT: mov x3, x5
+; LINUX-A64-NEXT: bl __divmodti4
+; LINUX-A64-NEXT: ldp x9, x8, [sp]
+; LINUX-A64-NEXT: stp x0, x1, [x20]
+; LINUX-A64-NEXT: ldr x30, [sp, #16] // 8-byte Reload
+; LINUX-A64-NEXT: stp x9, x8, [x19]
+; LINUX-A64-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload
+; LINUX-A64-NEXT: add sp, sp, #48
+; LINUX-A64-NEXT: ret
+;
+; DARWIN-A64-LABEL: sdivrem_i128:
+; DARWIN-A64: ; %bb.0:
+; DARWIN-A64-NEXT: sub sp, sp, #48
+; DARWIN-A64-NEXT: stp x20, x19, [sp, #16] ; 16-byte Folded Spill
+; DARWIN-A64-NEXT: stp x29, x30, [sp, #32] ; 16-byte Folded Spill
+; DARWIN-A64-NEXT: .cfi_def_cfa_offset 48
+; DARWIN-A64-NEXT: .cfi_offset w30, -8
+; DARWIN-A64-NEXT: .cfi_offset w29, -16
+; DARWIN-A64-NEXT: .cfi_offset w19, -24
+; DARWIN-A64-NEXT: .cfi_offset w20, -32
+; DARWIN-A64-NEXT: mov x8, x4
+; DARWIN-A64-NEXT: mov x19, x1
+; DARWIN-A64-NEXT: mov x20, x0
+; DARWIN-A64-NEXT: mov x4, sp
+; DARWIN-A64-NEXT: mov x0, x2
+; DARWIN-A64-NEXT: mov x1, x3
+; DARWIN-A64-NEXT: mov x2, x8
+; DARWIN-A64-NEXT: mov x3, x5
+; DARWIN-A64-NEXT: bl ___divmodti4
+; DARWIN-A64-NEXT: ldp x9, x8, [sp]
+; DARWIN-A64-NEXT: stp x0, x1, [x20]
+; DARWIN-A64-NEXT: ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
+; DARWIN-A64-NEXT: stp x9, x8, [x19]
+; DARWIN-A64-NEXT: ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
+; DARWIN-A64-NEXT: add sp, sp, #48
+; DARWIN-A64-NEXT: ret
+;
+; WATCHOS-LABEL: sdivrem_i128:
+; WATCHOS: ; %bb.0:
+; WATCHOS-NEXT: stp x26, x25, [sp, #-80]! ; 16-byte Folded Spill
+; WATCHOS-NEXT: stp x24, x23, [sp, #16] ; 16-byte Folded Spill
+; WATCHOS-NEXT: stp x22, x21, [sp, #32] ; 16-byte Folded Spill
+; WATCHOS-NEXT: stp x20, x19, [sp, #48] ; 16-byte Folded Spill
+; WATCHOS-NEXT: stp x29, x30, [sp, #64] ; 16-byte Folded Spill
+; WATCHOS-NEXT: .cfi_def_cfa_offset 80
+; WATCHOS-NEXT: .cfi_offset w30, -8
+; WATCHOS-NEXT: .cfi_offset w29, -16
+; WATCHOS-NEXT: .cfi_offset w19, -24
+; WATCHOS-NEXT: .cfi_offset w20, -32
+; WATCHOS-NEXT: .cfi_offset w21, -40
+; WATCHOS-NEXT: .cfi_offset w22, -48
+; WATCHOS-NEXT: .cfi_offset w23, -56
+; WATCHOS-NEXT: .cfi_offset w24, -64
+; WATCHOS-NEXT: .cfi_offset w25, -72
+; WATCHOS-NEXT: .cfi_offset w26, -80
+; WATCHOS-NEXT: mov x21, x3
+; WATCHOS-NEXT: mov x22, x2
+; WATCHOS-NEXT: mov x23, x1
+; WATCHOS-NEXT: mov x24, x0
+; WATCHOS-NEXT: mov x0, x2
+; WATCHOS-NEXT: mov x1, x3
+; WATCHOS-NEXT: mov x2, x4
+; WATCHOS-NEXT: mov x3, x5
+; WATCHOS-NEXT: mov x19, x5
+; WATCHOS-NEXT: mov x20, x4
+; WATCHOS-NEXT: bl ___divti3
+; WATCHOS-NEXT: mov x25, x0
+; WATCHOS-NEXT: mov x26, x1
+; WATCHOS-NEXT: mov x0, x22
+; WATCHOS-NEXT: mov x1, x21
+; WATCHOS-NEXT: mov x2, x20
+; WATCHOS-NEXT: mov x3, x19
+; WATCHOS-NEXT: bl ___modti3
+; WATCHOS-NEXT: stp x25, x26, [x24]
+; WATCHOS-NEXT: ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
+; WATCHOS-NEXT: stp x0, x1, [x23]
+; WATCHOS-NEXT: ldp x20, x19, [sp, #48] ; 16-byte Folded Reload
+; WATCHOS-NEXT: ldp x22, x21, [sp, #32] ; 16-byte Folded Reload
+; WATCHOS-NEXT: ldp x24, x23, [sp, #16] ; 16-byte Folded Reload
+; WATCHOS-NEXT: ldp x26, x25, [sp], #80 ; 16-byte Folded Reload
+; WATCHOS-NEXT: ret
+ %q = sdiv i128 %n, %d
+ %r = srem i128 %n, %d
+ store i128 %q, ptr %q_out
+ store i128 %r, ptr %r_out
+ ret void
+}
+
+define void @udivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
+; LINUX-A64-LABEL: udivrem_i128:
+; LINUX-A64: // %bb.0:
+; LINUX-A64-NEXT: sub sp, sp, #48
+; LINUX-A64-NEXT: str x30, [sp, #16] // 8-byte Spill
+; LINUX-A64-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill
+; LINUX-A64-NEXT: .cfi_def_cfa_offset 48
+; LINUX-A64-NEXT: .cfi_offset w19, -8
+; LINUX-A64-NEXT: .cfi_offset w20, -16
+; LINUX-A64-NEXT: .cfi_offset w30, -32
+; LINUX-A64-NEXT: mov x8, x4
+; LINUX-A64-NEXT: mov x19, x1
+; LINUX-A64-NEXT: mov x20, x0
+; LINUX-A64-NEXT: mov x4, sp
+; LINUX-A64-NEXT: mov x0, x2
+; LINUX-A64-NEXT: mov x1, x3
+; LINUX-A64-NEXT: mov x2, x8
+; LINUX-A64-NEXT: mov x3, x5
+; LINUX-A64-NEXT: bl __udivmodti4
+; LINUX-A64-NEXT: ldp x9, x8, [sp]
+; LINUX-A64-NEXT: stp x0, x1, [x20]
+; LINUX-A64-NEXT: ldr x30, [sp, #16] // 8-byte Reload
+; LINUX-A64-NEXT: stp x9, x8, [x19]
+; LINUX-A64-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload
+; LINUX-A64-NEXT: add sp, sp, #48
+; LINUX-A64-NEXT: ret
+;
+; DARWIN-A64-LABEL: udivrem_i128:
+; DARWIN-A64: ; %bb.0:
+; DARWIN-A64-NEXT: sub sp, sp, #48
+; DARWIN-A64-NEXT: stp x20, x19, [sp, #16] ; 16-byte Folded Spill
+; DARWIN-A64-NEXT: stp x29, x30, [sp, #32] ; 16-byte Folded Spill
+; DARWIN-A64-NEXT: .cfi_def_cfa_offset 48
+; DARWIN-A64-NEXT: .cfi_offset w30, -8
+; DARWIN-A64-NEXT: .cfi_offset w29, -16
+; DARWIN-A64-NEXT: .cfi_offset w19, -24
+; DARWIN-A64-NEXT: .cfi_offset w20, -32
+; DARWIN-A64-NEXT: mov x8, x4
+; DARWIN-A64-NEXT: mov x19, x1
+; DARWIN-A64-NEXT: mov x20, x0
+; DARWIN-A64-NEXT: mov x4, sp
+; DARWIN-A64-NEXT: mov x0, x2
+; DARWIN-A64-NEXT: mov x1, x3
+; DARWIN-A64-NEXT: mov x2, x8
+; DARWIN-A64-NEXT: mov x3, x5
+; DARWIN-A64-NEXT: bl ___udivmodti4
+; DARWIN-A64-NEXT: ldp x9, x8, [sp]
+; DARWIN-A64-NEXT: stp x0, x1, [x20]
+; DARWIN-A64-NEXT: ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
+; DARWIN-A64-NEXT: stp x9, x8, [x19]
+; DARWIN-A64-NEXT: ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
+; DARWIN-A64-NEXT: add sp, sp, #48
+; DARWIN-A64-NEXT: ret
+;
+; WATCHOS-LABEL: udivrem_i128:
+; WATCHOS: ; %bb.0:
+; WATCHOS-NEXT: stp x26, x25, [sp, #-80]! ; 16-byte Folded Spill
+; WATCHOS-NEXT: stp x24, x23, [sp, #16] ; 16-byte Folded Spill
+; WATCHOS-NEXT: stp x22, x21, [sp, #32] ; 16-byte Folded Spill
+; WATCHOS-NEXT: stp x20, x19, [sp, #48] ; 16-byte Folded Spill
+; WATCHOS-NEXT: stp x29, x30, [sp, #64] ; 16-byte Folded Spill
+; WATCHOS-NEXT: .cfi_def_cfa_offset 80
+; WATCHOS-NEXT: .cfi_offset w30, -8
+; WATCHOS-NEXT: .cfi_offset w29, -16
+; WATCHOS-NEXT: .cfi_offset w19, -24
+; WATCHOS-NEXT: .cfi_offset w20, -32
+; WATCHOS-NEXT: .cfi_offset w21, -40
+; WATCHOS-NEXT: .cfi_offset w22, -48
+; WATCHOS-NEXT: .cfi_offset w23, -56
+; WATCHOS-NEXT: .cfi_offset w24, -64
+; WATCHOS-NEXT: .cfi_offset w25, -72
+; WATCHOS-NEXT: .cfi_offset w26, -80
+; WATCHOS-NEXT: mov x21, x3
+; WATCHOS-NEXT: mov x22, x2
+; WATCHOS-NEXT: mov x23, x1
+; WATCHOS-NEXT: mov x24, x0
+; WATCHOS-NEXT: mov x0, x2
+; WATCHOS-NEXT: mov x1, x3
+; WATCHOS-NEXT: mov x2, x4
+; WATCHOS-NEXT: mov x3, x5
+; WATCHOS-NEXT: mov x19, x5
+; WATCHOS-NEXT: mov x20, x4
+; WATCHOS-NEXT: bl ___udivti3
+; WATCHOS-NEXT: mov x25, x0
+; WATCHOS-NEXT: mov x26, x1
+; WATCHOS-NEXT: mov x0, x22
+; WATCHOS-NEXT: mov x1, x21
+; WATCHOS-NEXT: mov x2, x20
+; WATCHOS-NEXT: mov x3, x19
+; WATCHOS-NEXT: bl ___umodti3
+; WATCHOS-NEXT: stp x25, x26, [x24]
+; WATCHOS-NEXT: ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
+; WATCHOS-NEXT: stp x0, x1, [x23]
+; WATCHOS-NEXT: ldp x20, x19, [sp, #48] ; 16-byte Folded Reload
+; WATCHOS-NEXT: ldp x22, x21, [sp, #32] ; 16-byte Folded Reload
+; WATCHOS-NEXT: ldp x24, x23, [sp, #16] ; 16-byte Folded Reload
+; WATCHOS-NEXT: ldp x26, x25, [sp], #80 ; 16-byte Folded Reload
+; WATCHOS-NEXT: ret
+ %q = udiv i128 %n, %d
+ %r = urem i128 %n, %d
+ store i128 %q, ptr %q_out
+ store i128 %r, ptr %r_out
+ ret void
+}
diff --git a/llvm/test/CodeGen/ARM/i128-divrem-libcall.ll b/llvm/test/CodeGen/ARM/i128-divrem-libcall.ll
new file mode 100644
index 0000000000000..af4fee06c24f6
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/i128-divrem-libcall.ll
@@ -0,0 +1,2414 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=armv6-linux-gnueabihf | FileCheck %s --check-prefix=ARMV6
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi | FileCheck %s --check-prefix=ARMV7
+; RUN: llc < %s -mtriple=armv7-none-eabi | FileCheck %s --check-prefix=ARMV7
+
+define void @sdivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
+; ARMV6-LABEL: sdivrem_i128:
+; ARMV6: @ %bb.0: @ %_udiv-special-cases_udiv-special-cases
+; ARMV6-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; ARMV6-NEXT: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; ARMV6-NEXT: .pad #252
+; ARMV6-NEXT: sub sp, sp, #252
+; ARMV6-NEXT: ldr r10, [sp, #292]
+; ARMV6-NEXT: str r1, [sp, #44] @ 4-byte Spill
+; ARMV6-NEXT: str r0, [sp, #40] @ 4-byte Spill
+; ARMV6-NEXT: ldr r0, [sp, #288]
+; ARMV6-NEXT: eor r1, r3, r10, asr #31
+; ARMV6-NEXT: eor r3, r2, r10, asr #31
+; ARMV6-NEXT: eor r2, r10, r10, asr #31
+; ARMV6-NEXT: subs r8, r3, r10, asr #31
+; ARMV6-NEXT: eor r0, r0, r10, asr #31
+; ARMV6-NEXT: sbcs r9, r1, r10, asr #31
+; ARMV6-NEXT: str r0, [sp, #36] @ 4-byte Spill
+; ARMV6-NEXT: sbcs lr, r0, r10, asr #31
+; ARMV6-NEXT: str r1, [sp, #32] @ 4-byte Spill
+; ARMV6-NEXT: sbc r11, r2, r10, asr #31
+; ARMV6-NEXT: clz r0, lr
+; ARMV6-NEXT: clz r1, r8
+; ARMV6-NEXT: add r0, r0, #32
+; ARMV6-NEXT: cmp r11, #0
+; ARMV6-NEXT: add r1, r1, #32
+; ARMV6-NEXT: clzne r0, r11
+; ARMV6-NEXT: cmp r9, #0
+; ARMV6-NEXT: str r2, [sp, #28] @ 4-byte Spill
+; ARMV6-NEXT: clzne r1, r9
+; ARMV6-NEXT: orrs r2, lr, r11
+; ARMV6-NEXT: ldr r6, [sp, #308]
+; ARMV6-NEXT: addeq r0, r1, #64
+; ARMV6-NEXT: ldr r1, [sp, #304]
+; ARMV6-NEXT: str r3, [sp, #24] @ 4-byte Spill
+; ARMV6-NEXT: eor r2, r1, r6, asr #31
+; ARMV6-NEXT: ldr r1, [sp, #300]
+; ARMV6-NEXT: str r2, [sp, #20] @ 4-byte Spill
+; ARMV6-NEXT: eor r3, r1, r6, asr #31
+; ARMV6-NEXT: ldr r1, [sp, #296]
+; ARMV6-NEXT: str r3, [sp, #52] @ 4-byte Spill
+; ARMV6-NEXT: eor r7, r1, r6, asr #31
+; ARMV6-NEXT: str r7, [sp, #48] @ 4-byte Spill
+; ARMV6-NEXT: subs r7, r7, r6, asr #31
+; ARMV6-NEXT: eor r1, r6, r6, asr #31
+; ARMV6-NEXT: sbcs r5, r3, r6, asr #31
+; ARMV6-NEXT: str r1, [sp, #16] @ 4-byte Spill
+; ARMV6-NEXT: sbcs r4, r2, r6, asr #31
+; ARMV6-NEXT: clz r2, r7
+; ARMV6-NEXT: sbc r12, r1, r6, asr #31
+; ARMV6-NEXT: clz r1, r4
+; ARMV6-NEXT: add r1, r1, #32
+; ARMV6-NEXT: cmp r12, #0
+; ARMV6-NEXT: clzne r1, r12
+; ARMV6-NEXT: add r2, r2, #32
+; ARMV6-NEXT: cmp r5, #0
+; ARMV6-NEXT: str r5, [sp, #80] @ 4-byte Spill
+; ARMV6-NEXT: clzne r2, r5
+; ARMV6-NEXT: orrs r3, r4, r12
+; ARMV6-NEXT: addeq r1, r2, #64
+; ARMV6-NEXT: mov r2, #0
+; ARMV6-NEXT: subs r0, r1, r0
+; ARMV6-NEXT: str r0, [sp, #104] @ 4-byte Spill
+; ARMV6-NEXT: sbcs r1, r2, #0
+; ARMV6-NEXT: str r1, [sp, #100] @ 4-byte Spill
+; ARMV6-NEXT: sbcs r3, r2, #0
+; ARMV6-NEXT: str r3, [sp, #112] @ 4-byte Spill
+; ARMV6-NEXT: sbc r2, r2, #0
+; ARMV6-NEXT: rsbs r0, r0, #127
+; ARMV6-NEXT: rscs r0, r1, #0
+; ARMV6-NEXT: str r2, [sp, #108] @ 4-byte Spill
+; ARMV6-NEXT: rscs r0, r3, #0
+; ARMV6-NEXT: orr r1, r9, r11
+; ARMV6-NEXT: rscs r0, r2, #0
+; ARMV6-NEXT: orr r2, r8, lr
+; ARMV6-NEXT: orr r1, r2, r1
+; ARMV6-NEXT: orr r2, r5, r12
+; ARMV6-NEXT: orr r3, r7, r4
+; ARMV6-NEXT: clz r1, r1
+; ARMV6-NEXT: orr r2, r3, r2
+; ARMV6-NEXT: mov r0, #0
+; ARMV6-NEXT: clz r2, r2
+; ARMV6-NEXT: lsr r1, r1, #5
+; ARMV6-NEXT: movlo r0, #1
+; ARMV6-NEXT: str r4, [sp, #76] @ 4-byte Spill
+; ARMV6-NEXT: lsr r2, r2, #5
+; ARMV6-NEXT: orr r1, r2, r1
+; ARMV6-NEXT: orrs r0, r1, r0
+; ARMV6-NEXT: mov r3, r11
+; ARMV6-NEXT: mov r4, lr
+; ARMV6-NEXT: mov r5, r9
+; ARMV6-NEXT: mov r1, r8
+; ARMV6-NEXT: movne r3, #0
+; ARMV6-NEXT: movne r4, #0
+; ARMV6-NEXT: movne r5, #0
+; ARMV6-NEXT: movne r8, #0
+; ARMV6-NEXT: str r7, [sp, #84] @ 4-byte Spill
+; ARMV6-NEXT: asr r7, r6, #31
+; ARMV6-NEXT: eor r0, r7, r10, asr #31
+; ARMV6-NEXT: str r12, [sp, #72] @ 4-byte Spill
+; ARMV6-NEXT: str r0, [sp, #12] @ 4-byte Spill
+; ARMV6-NEXT: bne .LBB0_6
+; ARMV6-NEXT: @ %bb.1: @ %_udiv-special-cases_udiv-special-cases
+; ARMV6-NEXT: ldr r0, [sp, #104] @ 4-byte Reload
+; ARMV6-NEXT: ldr r10, [sp, #100] @ 4-byte Reload
+; ARMV6-NEXT: eor r7, r0, #127
+; ARMV6-NEXT: ldr r0, [sp, #112] @ 4-byte Reload
+; ARMV6-NEXT: orr r7, r7, r0
+; ARMV6-NEXT: ldr r0, [sp, #108] @ 4-byte Reload
+; ARMV6-NEXT: orr r6, r10, r0
+; ARMV6-NEXT: ldr r0, [sp, #52] @ 4-byte Reload
+; ARMV6-NEXT: orrs r7, r7, r6
+; ARMV6-NEXT: ldr r6, [sp, #48] @ 4-byte Reload
+; ARMV6-NEXT: beq .LBB0_7
+; ARMV6-NEXT: @ %bb.2: @ %udiv-bb15
+; ARMV6-NEXT: mov r5, #0
+; ARMV6-NEXT: mov r2, r9
+; ARMV6-NEXT: str r1, [sp, #232]
+; ARMV6-NEXT: add r3, sp, #216
+; ARMV6-NEXT: str r5, [sp, #228]
+; ARMV6-NEXT: mov r9, r1
+; ARMV6-NEXT: str r5, [sp, #224]
+; ARMV6-NEXT: mov r1, #12
+; ARMV6-NEXT: str r5, [sp, #220]
+; ARMV6-NEXT: mov r8, r2
+; ARMV6-NEXT: str r5, [sp, #216]
+; ARMV6-NEXT: add r3, r3, #16
+; ARMV6-NEXT: str r2, [sp, #236]
+; ARMV6-NEXT: str lr, [sp, #240]
+; ARMV6-NEXT: str r11, [sp, #244]
+; ARMV6-NEXT: ldr r4, [sp, #104] @ 4-byte Reload
+; ARMV6-NEXT: rsb r0, r4, #127
+; ARMV6-NEXT: and r2, r1, r0, lsr #3
+; ARMV6-NEXT: and r0, r0, #31
+; ARMV6-NEXT: eor r12, r0, #31
+; ARMV6-NEXT: ldr r2, [r3, -r2]!
+; ARMV6-NEXT: ldr r6, [r3, #8]
+; ARMV6-NEXT: ldr r7, [r3, #4]
+; ARMV6-NEXT: ldr r3, [r3, #12]
+; ARMV6-NEXT: lsr r1, r6, #1
+; ARMV6-NEXT: lsl r3, r3, r0
+; ARMV6-NEXT: orr r1, r3, r1, lsr r12
+; ARMV6-NEXT: str r1, [sp, #116] @ 4-byte Spill
+; ARMV6-NEXT: lsl r1, r6, r0
+; ARMV6-NEXT: lsrs r3, r7, #1
+; ARMV6-NEXT: lsr r6, r2, #1
+; ARMV6-NEXT: orr r1, r1, r3, lsr r12
+; ARMV6-NEXT: str r1, [sp, #92] @ 4-byte Spill
+; ARMV6-NEXT: lsl r1, r7, r0
+; ARMV6-NEXT: lsl r0, r2, r0
+; ARMV6-NEXT: adds r7, r4, #1
+; ARMV6-NEXT: str r0, [sp, #96] @ 4-byte Spill
+; ARMV6-NEXT: adcs r0, r10, #0
+; ARMV6-NEXT: str r0, [sp, #100] @ 4-byte Spill
+; ARMV6-NEXT: ldr r0, [sp, #112] @ 4-byte Reload
+; ARMV6-NEXT: orr r1, r1, r6, lsr r12
+; ARMV6-NEXT: str r1, [sp, #88] @ 4-byte Spill
+; ARMV6-NEXT: adcs r12, r0, #0
+; ARMV6-NEXT: ldr r0, [sp, #108] @ 4-byte Reload
+; ARMV6-NEXT: adcs r0, r0, #0
+; ARMV6-NEXT: str r0, [sp, #104] @ 4-byte Spill
+; ARMV6-NEXT: adcs r0, r5, #0
+; ARMV6-NEXT: bne .LBB0_14
+; ARMV6-NEXT: @ %bb.3: @ %udiv-preheader4
+; ARMV6-NEXT: str r9, [sp, #184]
+; ARMV6-NEXT: mov r0, #12
+; ARMV6-NEXT: and r1, r0, r7, lsr #3
+; ARMV6-NEXT: add r9, sp, #184
+; ARMV6-NEXT: add r0, r9, r1
+; ARMV6-NEXT: str r5, [sp, #212]
+; ARMV6-NEXT: str r5, [sp, #208]
+; ARMV6-NEXT: and r4, r7, #31
+; ARMV6-NEXT: str r11, [sp, #196]
+; ARMV6-NEXT: eor r11, r4, #31
+; ARMV6-NEXT: str r5, [sp, #204]
+; ARMV6-NEXT: mov r6, #0
+; ARMV6-NEXT: str r5, [sp, #200]
+; ARMV6-NEXT: str r8, [sp, #188]
+; ARMV6-NEXT: str lr, [sp, #192]
+; ARMV6-NEXT: ldr r5, [r0, #4]
+; ARMV6-NEXT: ldr r2, [r0, #8]
+; ARMV6-NEXT: ldr r3, [r0, #12]
+; ARMV6-NEXT: str r7, [sp, #112] @ 4-byte Spill
+; ARMV6-NEXT: ldr r1, [r9, r1]
+; ARMV6-NEXT: lsr r0, r2, r4
+; ARMV6-NEXT: lsl r7, r3, #1
+; ARMV6-NEXT: orr r0, r0, r7, lsl r11
+; ARMV6-NEXT: lsr r7, r5, r4
+; ARMV6-NEXT: lsl r5, r5, #1
+; ARMV6-NEXT: lsl r2, r2, #1
+; ARMV6-NEXT: lsr r1, r1, r4
+; ARMV6-NEXT: orr r8, r1, r5, lsl r11
+; ARMV6-NEXT: ldr r1, [sp, #84] @ 4-byte Reload
+; ARMV6-NEXT: orr r2, r7, r2, lsl r11
+; ARMV6-NEXT: ldr r11, [sp, #116] @ 4-byte Reload
+; ARMV6-NEXT: subs r1, r1, #1
+; ARMV6-NEXT: str r1, [sp, #68] @ 4-byte Spill
+; ARMV6-NEXT: ldr r1, [sp, #80] @ 4-byte Reload
+; ARMV6-NEXT: ldr lr, [sp, #100] @ 4-byte Reload
+; ARMV6-NEXT: sbcs r1, r1, #0
+; ARMV6-NEXT: str r1, [sp, #64] @ 4-byte Spill
+; ARMV6-NEXT: ldr r1, [sp, #76] @ 4-byte Reload
+; ARMV6-NEXT: ldr r7, [sp, #96] @ 4-byte Reload
+; ARMV6-NEXT: sbcs r1, r1, #0
+; ARMV6-NEXT: str r1, [sp, #60] @ 4-byte Spill
+; ARMV6-NEXT: ldr r1, [sp, #72] @ 4-byte Reload
+; ARMV6-NEXT: ldr r10, [sp, #92] @ 4-byte Reload
+; ARMV6-NEXT: sbc r1, r1, #0
+; ARMV6-NEXT: str r1, [sp, #56] @ 4-byte Spill
+; ARMV6-NEXT: lsr r1, r3, r4
+; ARMV6-NEXT: ldr r4, [sp, #104] @ 4-byte Reload
+; ARMV6-NEXT: ldr r9, [sp, #88] @ 4-byte Reload
+; ARMV6-NEXT: str r1, [sp, #108] @ 4-byte Spill
+; ARMV6-NEXT: mov r1, #0
+; ARMV6-NEXT: .LBB0_4: @ %udiv-do-while3
+; ARMV6-NEXT: @ =>This Inner Loop Header: Depth=1
+; ARMV6-NEXT: mov r5, r1
+; ARMV6-NEXT: ldr r1, [sp, #108] @ 4-byte Reload
+; ARMV6-NEXT: add r3, sp, #92
+; ARMV6-NEXT: str r11, [sp, #116] @ 4-byte Spill
+; ARMV6-NEXT: stm r3, {r6, r7, r9, r10} @ 16-byte Folded Spill
+; ARMV6-NEXT: lsl r7, r8, #1
+; ARMV6-NEXT: lsl r1, r1, #1
+; ARMV6-NEXT: ldr r3, [sp, #68] @ 4-byte Reload
+; ARMV6-NEXT: orr r7, r7, r11, lsr #31
+; ARMV6-NEXT: orr r1, r1, r0, lsr #31
+; ARMV6-NEXT: lsl r0, r0, #1
+; ARMV6-NEXT: subs r3, r3, r7
+; ARMV6-NEXT: orr r0, r0, r2, lsr #31
+; ARMV6-NEXT: lsl r2, r2, #1
+; ARMV6-NEXT: ldr r3, [sp, #64] @ 4-byte Reload
+; ARMV6-NEXT: orr r2, r2, r8, lsr #31
+; ARMV6-NEXT: ldr r6, [sp, #72] @ 4-byte Reload
+; ARMV6-NEXT: sbcs r3, r3, r2
+; ARMV6-NEXT: ldr r3, [sp, #60] @ 4-byte Reload
+; ARMV6-NEXT: sbcs r3, r3, r0
+; ARMV6-NEXT: ldr r3, [sp, #56] @ 4-byte Reload
+; ARMV6-NEXT: sbc r3, r3, r1
+; ARMV6-NEXT: and r10, r6, r3, asr #31
+; ARMV6-NEXT: ldr r6, [sp, #76] @ 4-byte Reload
+; ARMV6-NEXT: and r8, r6, r3, asr #31
+; ARMV6-NEXT: ldr r6, [sp, #80] @ 4-byte Reload
+; ARMV6-NEXT: and r9, r6, r3, asr #31
+; ARMV6-NEXT: ldr r6, [sp, #84] @ 4-byte Reload
+; ARMV6-NEXT: and r11, r6, r3, asr #31
+; ARMV6-NEXT: subs r6, r7, r11
+; ARMV6-NEXT: str r6, [sp, #88] @ 4-byte Spill
+; ARMV6-NEXT: sbcs r2, r2, r9
+; ARMV6-NEXT: sbcs r11, r0, r8
+; ARMV6-NEXT: ldr r8, [sp, #88] @ 4-byte Reload
+; ARMV6-NEXT: sbc r0, r1, r10
+; ARMV6-NEXT: mov r1, #1
+; ARMV6-NEXT: and r6, r1, r3, asr #31
+; ARMV6-NEXT: ldr r1, [sp, #112] @ 4-byte Reload
+; ARMV6-NEXT: str r0, [sp, #108] @ 4-byte Spill
+; ARMV6-NEXT: subs r1, r1, #1
+; ARMV6-NEXT: str r1, [sp, #112] @ 4-byte Spill
+; ARMV6-NEXT: sbcs lr, lr, #0
+; ARMV6-NEXT: ldr r0, [sp, #96] @ 4-byte Reload
+; ARMV6-NEXT: sbcs r12, r12, #0
+; ARMV6-NEXT: sbc r4, r4, #0
+; ARMV6-NEXT: orr r1, r1, r12
+; ARMV6-NEXT: orr r3, lr, r4
+; ARMV6-NEXT: orrs r1, r1, r3
+; ARMV6-NEXT: ldr r3, [sp, #100] @ 4-byte Reload
+; ARMV6-NEXT: ldr r1, [sp, #92] @ 4-byte Reload
+; ARMV6-NEXT: orr r7, r1, r0, lsl #1
+; ARMV6-NEXT: lsl r1, r3, #1
+; ARMV6-NEXT: orr r1, r1, r0, lsr #31
+; ARMV6-NEXT: ldr r0, [sp, #104] @ 4-byte Reload
+; ARMV6-NEXT: orr r9, r5, r1
+; ARMV6-NEXT: lsl r1, r0, #1
+; ARMV6-NEXT: orr r1, r1, r3, lsr #31
+; ARMV6-NEXT: orr r10, r5, r1
+; ARMV6-NEXT: ldr r1, [sp, #116] @ 4-byte Reload
+; ARMV6-NEXT: lsl r1, r1, #1
+; ARMV6-NEXT: orr r1, r1, r0, lsr #31
+; ARMV6-NEXT: mov r0, r11
+; ARMV6-NEXT: orr r11, r5, r1
+; ARMV6-NEXT: mov r1, #0
+; ARMV6-NEXT: bne .LBB0_4
+; ARMV6-NEXT: .LBB0_5: @ %udiv-loop-exit2
+; ARMV6-NEXT: lsl r1, r9, #1
+; ARMV6-NEXT: orr r5, r1, r7, lsr #31
+; ARMV6-NEXT: lsl r1, r10, #1
+; ARMV6-NEXT: orr r4, r1, r9, lsr #31
+; ARMV6-NEXT: lsl r1, r11, #1
+; ARMV6-NEXT: orr r8, r6, r7, lsl #1
+; ARMV6-NEXT: orr r3, r1, r10, lsr #31
+; ARMV6-NEXT: .LBB0_6:
+; ARMV6-NEXT: ldr r0, [sp, #52] @ 4-byte Reload
+; ARMV6-NEXT: ldr r6, [sp, #48] @ 4-byte Reload
+; ARMV6-NEXT: .LBB0_7: @ %udiv-end1
+; ARMV6-NEXT: ldr r2, [sp, #292]
+; ARMV6-NEXT: ldr r1, [sp, #24] @ 4-byte Reload
+; ARMV6-NEXT: str r5, [sp, #76] @ 4-byte Spill
+; ARMV6-NEXT: subs r10, r1, r2, asr #31
+; ARMV6-NEXT: ldr r1, [sp, #32] @ 4-byte Reload
+; ARMV6-NEXT: str r3, [sp, #96] @ 4-byte Spill
+; ARMV6-NEXT: sbcs r11, r1, r2, asr #31
+; ARMV6-NEXT: ldr r1, [sp, #36] @ 4-byte Reload
+; ARMV6-NEXT: str r8, [sp, #116] @ 4-byte Spill
+; ARMV6-NEXT: sbcs r5, r1, r2, asr #31
+; ARMV6-NEXT: ldr r1, [sp, #28] @ 4-byte Reload
+; ARMV6-NEXT: str r4, [sp, #100] @ 4-byte Spill
+; ARMV6-NEXT: sbc r1, r1, r2, asr #31
+; ARMV6-NEXT: clz r2, r5
+; ARMV6-NEXT: add r7, r2, #32
+; ARMV6-NEXT: clz r2, r10
+; ARMV6-NEXT: cmp r1, #0
+; ARMV6-NEXT: add r2, r2, #32
+; ARMV6-NEXT: clzne r7, r1
+; ARMV6-NEXT: cmp r11, #0
+; ARMV6-NEXT: clzne r2, r11
+; ARMV6-NEXT: orrs r3, r5, r1
+; ARMV6-NEXT: addeq r7, r2, #64
+; ARMV6-NEXT: ldr r2, [sp, #308]
+; ARMV6-NEXT: ldr r3, [sp, #20] @ 4-byte Reload
+; ARMV6-NEXT: str r11, [sp, #60] @ 4-byte Spill
+; ARMV6-NEXT: subs lr, r6, r2, asr #31
+; ARMV6-NEXT: str r5, [sp, #52] @ 4-byte Spill
+; ARMV6-NEXT: sbcs r9, r0, r2, asr #31
+; ARMV6-NEXT: ldr r0, [sp, #16] @ 4-byte Reload
+; ARMV6-NEXT: sbcs r8, r3, r2, asr #31
+; ARMV6-NEXT: clz r3, lr
+; ARMV6-NEXT: sbc r0, r0, r2, asr #31
+; ARMV6-NEXT: clz r2, r8
+; ARMV6-NEXT: add r2, r2, #32
+; ARMV6-NEXT: cmp r0, #0
+; ARMV6-NEXT: clzne r2, r0
+; ARMV6-NEXT: add r3, r3, #32
+; ARMV6-NEXT: cmp r9, #0
+; ARMV6-NEXT: str r0, [sp, #80] @ 4-byte Spill
+; ARMV6-NEXT: clzne r3, r9
+; ARMV6-NEXT: orrs r6, r8, r0
+; ARMV6-NEXT: addeq r2, r3, #64
+; ARMV6-NEXT: mov r6, #0
+; ARMV6-NEXT: subs r2, r2, r7
+; ARMV6-NEXT: str r9, [sp, #88] @ 4-byte Spill
+; ARMV6-NEXT: sbcs r4, r6, #0
+; ARMV6-NEXT: str r4, [sp, #108] @ 4-byte Spill
+; ARMV6-NEXT: sbcs r7, r6, #0
+; ARMV6-NEXT: mov r12, r2
+; ARMV6-NEXT: sbc r3, r6, #0
+; ARMV6-NEXT: rsbs r2, r2, #127
+; ARMV6-NEXT: rscs r2, r4, #0
+; ARMV6-NEXT: str r3, [sp, #112] @ 4-byte Spill
+; ARMV6-NEXT: rscs r2, r7, #0
+; ARMV6-NEXT: orr r4, lr, r8
+; ARMV6-NEXT: rscs r2, r3, #0
+; ARMV6-NEXT: orr r3, r10, r5
+; ARMV6-NEXT: orr r2, r11, r1
+; ARMV6-NEXT: str r7, [sp, #104] @ 4-byte Spill
+; ARMV6-NEXT: orr r2, r3, r2
+; ARMV6-NEXT: orr r3, r9, r0
+; ARMV6-NEXT: orr r3, r4, r3
+; ARMV6-NEXT: clz r2, r2
+; ARMV6-NEXT: clz r3, r3
+; ARMV6-NEXT: ldr r7, [sp, #12] @ 4-byte Reload
+; ARMV6-NEXT: ldr r4, [sp, #116] @ 4-byte Reload
+; ARMV6-NEXT: lsr r2, r2, #5
+; ARMV6-NEXT: lsr r3, r3, #5
+; ARMV6-NEXT: ldr r0, [sp, #96] @ 4-byte Reload
+; ARMV6-NEXT: orr r2, r3, r2
+; ARMV6-NEXT: ldr r3, [sp, #76] @ 4-byte Reload
+; ARMV6-NEXT: movlo r6, #1
+; ARMV6-NEXT: eor r4, r4, r7
+; ARMV6-NEXT: orr r2, r2, r6
+; ARMV6-NEXT: eor r6, r0, r7
+; ARMV6-NEXT: ldr r0, [sp, #100] @ 4-byte Reload
+; ARMV6-NEXT: subs r4, r4, r7
+; ARMV6-NEXT: eor r3, r3, r7
+; ARMV6-NEXT: mov r9, r10
+; ARMV6-NEXT: sbcs r3, r3, r7
+; ARMV6-NEXT: eor r0, r0, r7
+; ARMV6-NEXT: sbcs r0, r0, r7
+; ARMV6-NEXT: str r0, [sp, #28] @ 4-byte Spill
+; ARMV6-NEXT: sbc r0, r6, r7
+; ARMV6-NEXT: str r0, [sp, #24] @ 4-byte Spill
+; ARMV6-NEXT: ldr r0, [sp, #292]
+; ARMV6-NEXT: cmp r2, #0
+; ARMV6-NEXT: str r4, [sp, #36] @ 4-byte Spill
+; ARMV6-NEXT: mov r4, r11
+; ARMV6-NEXT: mov r10, r1
+; ARMV6-NEXT: mov r11, r5
+; ARMV6-NEXT: mov r6, r9
+; ARMV6-NEXT: str r3, [sp, #32] @ 4-byte Spill
+; ARMV6-NEXT: mov r3, r4
+; ARMV6-NEXT: asr r0, r0, #31
+; ARMV6-NEXT: movne r10, #0
+; ARMV6-NEXT: movne r11, #0
+; ARMV6-NEXT: movne r3, #0
+; ARMV6-NEXT: movne r6, #0
+; ARMV6-NEXT: str r1, [sp, #56] @ 4-byte Spill
+; ARMV6-NEXT: str r8, [sp, #84] @ 4-byte Spill
+; ARMV6-NEXT: str r0, [sp, #48] @ 4-byte Spill
+; ARMV6-NEXT: str lr, [sp, #92] @ 4-byte Spill
+; ARMV6-NEXT: bne .LBB0_13
+; ARMV6-NEXT: @ %bb.8: @ %udiv-end1
+; ARMV6-NEXT: ldr r0, [sp, #104] @ 4-byte Reload
+; ARMV6-NEXT: eor r1, r12, #127
+; ARMV6-NEXT: ldr r2, [sp, #108] @ 4-byte Reload
+; ARMV6-NEXT: orr r1, r1, r0
+; ARMV6-NEXT: ldr r0, [sp, #112] @ 4-byte Reload
+; ARMV6-NEXT: orr r5, r2, r0
+; ARMV6-NEXT: orrs r1, r1, r5
+; ARMV6-NEXT: beq .LBB0_13
+; ARMV6-NEXT: @ %bb.9: @ %udiv-bb1
+; ARMV6-NEXT: mov r10, #0
+; ARMV6-NEXT: str r9, [sp, #168]
+; ARMV6-NEXT: str r10, [sp, #164]
+; ARMV6-NEXT: add r3, sp, #152
+; ARMV6-NEXT: str r10, [sp, #160]
+; ARMV6-NEXT: mov r1, #12
+; ARMV6-NEXT: str r10, [sp, #156]
+; ARMV6-NEXT: add r3, r3, #16
+; ARMV6-NEXT: str r10, [sp, #152]
+; ARMV6-NEXT: ldr r0, [sp, #60] @ 4-byte Reload
+; ARMV6-NEXT: str r0, [sp, #172]
+; ARMV6-NEXT: ldr r8, [sp, #52] @ 4-byte Reload
+; ARMV6-NEXT: str r8, [sp, #176]
+; ARMV6-NEXT: ldr r0, [sp, #56] @ 4-byte Reload
+; ARMV6-NEXT: str r0, [sp, #180]
+; ARMV6-NEXT: rsb r0, r12, #127
+; ARMV6-NEXT: and r1, r1, r0, lsr #3
+; ARMV6-NEXT: and r0, r0, #31
+; ARMV6-NEXT: eor r2, r0, #31
+; ARMV6-NEXT: ldr r6, [r3, -r1]!
+; ARMV6-NEXT: ldmib r3, {r1, r5}
+; ARMV6-NEXT: lsr r4, r5, #1
+; ARMV6-NEXT: lsl r7, r6, r0
+; ARMV6-NEXT: ldr r3, [r3, #12]
+; ARMV6-NEXT: str r9, [sp, #8] @ 4-byte Spill
+; ARMV6-NEXT: lsl r3, r3, r0
+; ARMV6-NEXT: orr r3, r3, r4, lsr r2
+; ARMV6-NEXT: str r3, [sp, #116] @ 4-byte Spill
+; ARMV6-NEXT: lsl r3, r5, r0
+; ARMV6-NEXT: lsrs r5, r1, #1
+; ARMV6-NEXT: orr r4, r3, r5, lsr r2
+; ARMV6-NEXT: lsl r1, r1, r0
+; ARMV6-NEXT: lsr r3, r6, #1
+; ARMV6-NEXT: adds r0, r12, #1
+; ARMV6-NEXT: orr r11, r1, r3, lsr r2
+; ARMV6-NEXT: ldr r1, [sp, #108] @ 4-byte Reload
+; ARMV6-NEXT: adcs r1, r1, #0
+; ARMV6-NEXT: str r1, [sp, #100] @ 4-byte Spill
+; ARMV6-NEXT: ldr r1, [sp, #104] @ 4-byte Reload
+; ARMV6-NEXT: adcs lr, r1, #0
+; ARMV6-NEXT: ldr r1, [sp, #112] @ 4-byte Reload
+; ARMV6-NEXT: adcs r1, r1, #0
+; ARMV6-NEXT: str r1, [sp, #108] @ 4-byte Spill
+; ARMV6-NEXT: adcs r2, r10, #0
+; ARMV6-NEXT: bne .LBB0_15
+; ARMV6-NEXT: @ %bb.10: @ %udiv-preheader
+; ARMV6-NEXT: str r9, [sp, #120]
+; ARMV6-NEXT: add r9, sp, #120
+; ARMV6-NEXT: str r10, [sp, #148]
+; ARMV6-NEXT: str r10, [sp, #144]
+; ARMV6-NEXT: str r10, [sp, #140]
+; ARMV6-NEXT: str r10, [sp, #136]
+; ARMV6-NEXT: ldr r1, [sp, #60] @ 4-byte Reload
+; ARMV6-NEXT: str r8, [sp, #128]
+; ARMV6-NEXT: and r8, r0, #31
+; ARMV6-NEXT: str r1, [sp, #124]
+; ARMV6-NEXT: ldr r1, [sp, #56] @ 4-byte Reload
+; ARMV6-NEXT: str r1, [sp, #132]
+; ARMV6-NEXT: mov r1, #12
+; ARMV6-NEXT: and r2, r1, r0, lsr #3
+; ARMV6-NEXT: add r5, r9, r2
+; ARMV6-NEXT: ldmib r5, {r1, r3, r10}
+; ARMV6-NEXT: lsr r5, r3, r8
+; ARMV6-NEXT: lsl r6, r10, #1
+; ARMV6-NEXT: str r0, [sp, #112] @ 4-byte Spill
+; ARMV6-NEXT: eor r0, r8, #31
+; ARMV6-NEXT: ldr r2, [r9, r2]
+; ARMV6-NEXT: lsl r3, r3, #1
+; ARMV6-NEXT: orr r6, r5, r6, lsl r0
+; ARMV6-NEXT: lsr r5, r1, r8
+; ARMV6-NEXT: orr r12, r5, r3, lsl r0
+; ARMV6-NEXT: lsl r3, r1, #1
+; ARMV6-NEXT: lsr r10, r10, r8
+; ARMV6-NEXT: lsr r2, r2, r8
+; ARMV6-NEXT: orr r5, r2, r3, lsl r0
+; ARMV6-NEXT: ldr r0, [sp, #92] @ 4-byte Reload
+; ARMV6-NEXT: mov r9, #0
+; ARMV6-NEXT: ldr r2, [sp, #80] @ 4-byte Reload
+; ARMV6-NEXT: subs r0, r0, #1
+; ARMV6-NEXT: str r0, [sp, #76] @ 4-byte Spill
+; ARMV6-NEXT: ldr r0, [sp, #88] @ 4-byte Reload
+; ARMV6-NEXT: ldr r1, [sp, #116] @ 4-byte Reload
+; ARMV6-NEXT: sbcs r0, r0, #0
+; ARMV6-NEXT: str r0, [sp, #72] @ 4-byte Spill
+; ARMV6-NEXT: ldr r0, [sp, #84] @ 4-byte Reload
+; ARMV6-NEXT: ldr r8, [sp, #100] @ 4-byte Reload
+; ARMV6-NEXT: sbcs r0, r0, #0
+; ARMV6-NEXT: str r0, [sp, #68] @ 4-byte Spill
+; ARMV6-NEXT: sbc r0, r2, #0
+; ARMV6-NEXT: str r0, [sp, #64] @ 4-byte Spill
+; ARMV6-NEXT: ldr r0, [sp, #108] @ 4-byte Reload
+; ARMV6-NEXT: mov r2, #0
+; ARMV6-NEXT: .LBB0_11: @ %udiv-do-while
+; ARMV6-NEXT: @ =>This Inner Loop Header: Depth=1
+; ARMV6-NEXT: add r3, sp, #96
+; ARMV6-NEXT: str r4, [sp, #108] @ 4-byte Spill
+; ARMV6-NEXT: stm r3, {r2, r7, r11} @ 12-byte Folded Spill
+; ARMV6-NEXT: lsl r2, r10, #1
+; ARMV6-NEXT: lsl r3, r6, #1
+; ARMV6-NEXT: orr r2, r2, r6, lsr #31
+; ARMV6-NEXT: lsl r6, r12, #1
+; ARMV6-NEXT: orr r6, r6, r5, lsr #31
+; ARMV6-NEXT: lsl r5, r5, #1
+; ARMV6-NEXT: ldr r4, [sp, #76] @ 4-byte Reload
+; ARMV6-NEXT: orr r5, r5, r1, lsr #31
+; ARMV6-NEXT: orr r3, r3, r12, lsr #31
+; ARMV6-NEXT: ldr r7, [sp, #64] @ 4-byte Reload
+; ARMV6-NEXT: subs r4, r4, r5
+; ARMV6-NEXT: str r1, [sp, #116] @ 4-byte Spill
+; ARMV6-NEXT: ldr r4, [sp, #72] @ 4-byte Reload
+; ARMV6-NEXT: ldr r1, [sp, #84] @ 4-byte Reload
+; ARMV6-NEXT: sbcs r4, r4, r6
+; ARMV6-NEXT: ldr r4, [sp, #68] @ 4-byte Reload
+; ARMV6-NEXT: sbcs r4, r4, r3
+; ARMV6-NEXT: sbc r4, r7, r2
+; ARMV6-NEXT: ldr r7, [sp, #80] @ 4-byte Reload
+; ARMV6-NEXT: and r10, r7, r4, asr #31
+; ARMV6-NEXT: and r7, r1, r4, asr #31
+; ARMV6-NEXT: ldr r1, [sp, #88] @ 4-byte Reload
+; ARMV6-NEXT: and r11, r1, r4, asr #31
+; ARMV6-NEXT: ldr r1, [sp, #92] @ 4-byte Reload
+; ARMV6-NEXT: and r12, r1, r4, asr #31
+; ARMV6-NEXT: mov r1, #1
+; ARMV6-NEXT: subs r5, r5, r12
+; ARMV6-NEXT: sbcs r12, r6, r11
+; ARMV6-NEXT: sbcs r6, r3, r7
+; ARMV6-NEXT: sbc r10, r2, r10
+; ARMV6-NEXT: and r2, r1, r4, asr #31
+; ARMV6-NEXT: ldr r1, [sp, #112] @ 4-byte Reload
+; ARMV6-NEXT: subs r1, r1, #1
+; ARMV6-NEXT: str r1, [sp, #112] @ 4-byte Spill
+; ARMV6-NEXT: sbcs r8, r8, #0
+; ARMV6-NEXT: sbcs lr, lr, #0
+; ARMV6-NEXT: sbc r0, r0, #0
+; ARMV6-NEXT: orr r4, r1, lr
+; ARMV6-NEXT: orr r3, r8, r0
+; ARMV6-NEXT: ldr r1, [sp, #100] @ 4-byte Reload
+; ARMV6-NEXT: orrs r3, r4, r3
+; ARMV6-NEXT: ldr r3, [sp, #96] @ 4-byte Reload
+; ARMV6-NEXT: orr r7, r3, r1, lsl #1
+; ARMV6-NEXT: ldr r3, [sp, #104] @ 4-byte Reload
+; ARMV6-NEXT: lsl r4, r3, #1
+; ARMV6-NEXT: orr r1, r4, r1, lsr #31
+; ARMV6-NEXT: orr r11, r9, r1
+; ARMV6-NEXT: ldr r1, [sp, #108] @ 4-byte Reload
+; ARMV6-NEXT: lsl r4, r1, #1
+; ARMV6-NEXT: orr r4, r4, r3, lsr #31
+; ARMV6-NEXT: ldr r3, [sp, #116] @ 4-byte Reload
+; ARMV6-NEXT: orr r4, r9, r4
+; ARMV6-NEXT: lsl r3, r3, #1
+; ARMV6-NEXT: orr r3, r3, r1, lsr #31
+; ARMV6-NEXT: orr r1, r9, r3
+; ARMV6-NEXT: mov r9, #0
+; ARMV6-NEXT: bne .LBB0_11
+; ARMV6-NEXT: .LBB0_12: @ %udiv-loop-exit
+; ARMV6-NEXT: lsl r0, r11, #1
+; ARMV6-NEXT: orr r3, r0, r7, lsr #31
+; ARMV6-NEXT: lsl r0, r4, #1
+; ARMV6-NEXT: ldr r9, [sp, #8] @ 4-byte Reload
+; ARMV6-NEXT: orr r11, r0, r11, lsr #31
+; ARMV6-NEXT: lsl r0, r1, #1
+; ARMV6-NEXT: orr r6, r2, r7, lsl #1
+; ARMV6-NEXT: orr r10, r0, r4, lsr #31
+; ARMV6-NEXT: .LBB0_13: @ %udiv-end
+; ARMV6-NEXT: ldr r1, [sp, #40] @ 4-byte Reload
+; ARMV6-NEXT: mov r4, #0
+; ARMV6-NEXT: ldr r0, [sp, #36] @ 4-byte Reload
+; ARMV6-NEXT: mov r12, #0
+; ARMV6-NEXT: ldr r2, [sp, #88] @ 4-byte Reload
+; ARMV6-NEXT: str r0, [r1]
+; ARMV6-NEXT: ldr r0, [sp, #32] @ 4-byte Reload
+; ARMV6-NEXT: str r0, [r1, #4]
+; ARMV6-NEXT: ldr r0, [sp, #28] @ 4-byte Reload
+; ARMV6-NEXT: str r0, [r1, #8]
+; ARMV6-NEXT: ldr r0, [sp, #24] @ 4-byte Reload
+; ARMV6-NEXT: str r0, [r1, #12]
+; ARMV6-NEXT: ldr r0, [sp, #92] @ 4-byte Reload
+; ARMV6-NEXT: ldr lr, [sp, #80] @ 4-byte Reload
+; ARMV6-NEXT: umull r1, r7, r0, r6
+; ARMV6-NEXT: umlal r7, r4, r2, r6
+; ARMV6-NEXT: str r1, [sp, #116] @ 4-byte Spill
+; ARMV6-NEXT: umull r5, r1, r0, r3
+; ARMV6-NEXT: adds r8, r5, r7
+; ARMV6-NEXT: adcs r1, r4, r1
+; ARMV6-NEXT: ldr r4, [sp, #84] @ 4-byte Reload
+; ARMV6-NEXT: adc r7, r12, #0
+; ARMV6-NEXT: umlal r1, r7, r2, r3
+; ARMV6-NEXT: umull r12, r5, r6, r4
+; ARMV6-NEXT: mla r6, r6, lr, r5
+; ARMV6-NEXT: mla r3, r3, r4, r6
+; ARMV6-NEXT: umull r6, r5, r11, r0
+; ARMV6-NEXT: mla r2, r11, r2, r5
+; ARMV6-NEXT: mla r0, r10, r0, r2
+; ARMV6-NEXT: adds r2, r6, r12
+; ARMV6-NEXT: adc r0, r0, r3
+; ARMV6-NEXT: adds r1, r1, r2
+; ARMV6-NEXT: ldr r2, [sp, #116] @ 4-byte Reload
+; ARMV6-NEXT: adc r0, r7, r0
+; ARMV6-NEXT: ldr r3, [sp, #60] @ 4-byte Reload
+; ARMV6-NEXT: subs r2, r9, r2
+; ARMV6-NEXT: ldr r7, [sp, #52] @ 4-byte Reload
+; ARMV6-NEXT: sbcs r3, r3, r8
+; ARMV6-NEXT: sbcs r1, r7, r1
+; ARMV6-NEXT: ldr r7, [sp, #56] @ 4-byte Reload
+; ARMV6-NEXT: sbc r0, r7, r0
+; ARMV6-NEXT: ldr r7, [sp, #48] @ 4-byte Reload
+; ARMV6-NEXT: eor r2, r2, r7
+; ARMV6-NEXT: eor r3, r3, r7
+; ARMV6-NEXT: subs r2, r2, r7
+; ARMV6-NEXT: eor r1, r1, r7
+; ARMV6-NEXT: sbcs r3, r3, r7
+; ARMV6-NEXT: eor r0, r0, r7
+; ARMV6-NEXT: sbcs r1, r1, r7
+; ARMV6-NEXT: sbc r0, r0, r7
+; ARMV6-NEXT: ldr r7, [sp, #44] @ 4-byte Reload
+; ARMV6-NEXT: stm r7, {r2, r3}
+; ARMV6-NEXT: str r1, [r7, #8]
+; ARMV6-NEXT: str r0, [r7, #12]
+; ARMV6-NEXT: add sp, sp, #252
+; ARMV6-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+; ARMV6-NEXT: .LBB0_14:
+; ARMV6-NEXT: ldr r11, [sp, #116] @ 4-byte Reload
+; ARMV6-NEXT: mov r6, #0
+; ARMV6-NEXT: ldr r7, [sp, #96] @ 4-byte Reload
+; ARMV6-NEXT: ldr r10, [sp, #92] @ 4-byte Reload
+; ARMV6-NEXT: ldr r9, [sp, #88] @ 4-byte Reload
+; ARMV6-NEXT: b .LBB0_5
+; ARMV6-NEXT: .LBB0_15:
+; ARMV6-NEXT: ldr r1, [sp, #116] @ 4-byte Reload
+; ARMV6-NEXT: mov r2, #0
+; ARMV6-NEXT: b .LBB0_12
+;
+; ARMV7-LABEL: sdivrem_i128:
+; ARMV7: @ %bb.0: @ %_udiv-special-cases_udiv-special-cases
+; ARMV7-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; ARMV7-NEXT: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; ARMV7-NEXT: .pad #244
+; ARMV7-NEXT: sub sp, sp, #244
+; ARMV7-NEXT: ldr r10, [sp, #284]
+; ARMV7-NEXT: str r0, [sp, #28] @ 4-byte Spill
+; ARMV7-NEXT: ldr r0, [sp, #280]
+; ARMV7-NEXT: eor r2, r2, r10, asr #31
+; ARMV7-NEXT: str r1, [sp, #32] @ 4-byte Spill
+; ARMV7-NEXT: eor r1, r3, r10, asr #31
+; ARMV7-NEXT: str r2, [sp, #44] @ 4-byte Spill
+; ARMV7-NEXT: subs r2, r2, r10, asr #31
+; ARMV7-NEXT: eor r0, r0, r10, asr #31
+; ARMV7-NEXT: sbcs r11, r1, r10, asr #31
+; ARMV7-NEXT: str r1, [sp, #36] @ 4-byte Spill
+; ARMV7-NEXT: eor r1, r10, r10, asr #31
+; ARMV7-NEXT: sbcs r9, r0, r10, asr #31
+; ARMV7-NEXT: sbc r12, r1, r10, asr #31
+; ARMV7-NEXT: str r0, [sp, #40] @ 4-byte Spill
+; ARMV7-NEXT: clz r0, r9
+; ARMV7-NEXT: str r1, [sp, #24] @ 4-byte Spill
+; ARMV7-NEXT: clz r1, r2
+; ARMV7-NEXT: add r0, r0, #32
+; ARMV7-NEXT: cmp r12, #0
+; ARMV7-NEXT: add r1, r1, #32
+; ARMV7-NEXT: clzne r0, r12
+; ARMV7-NEXT: cmp r11, #0
+; ARMV7-NEXT: ldr r6, [sp, #300]
+; ARMV7-NEXT: clzne r1, r11
+; ARMV7-NEXT: ldr r3, [sp, #288]
+; ARMV7-NEXT: str r2, [sp, #88] @ 4-byte Spill
+; ARMV7-NEXT: orrs r2, r9, r12
+; ARMV7-NEXT: ldr r2, [sp, #292]
+; ARMV7-NEXT: addeq r0, r1, #64
+; ARMV7-NEXT: ldr r1, [sp, #296]
+; ARMV7-NEXT: eor r3, r3, r6, asr #31
+; ARMV7-NEXT: eor r2, r2, r6, asr #31
+; ARMV7-NEXT: subs lr, r3, r6, asr #31
+; ARMV7-NEXT: eor r1, r1, r6, asr #31
+; ARMV7-NEXT: sbcs r4, r2, r6, asr #31
+; ARMV7-NEXT: str r2, [sp, #12] @ 4-byte Spill
+; ARMV7-NEXT: eor r2, r6, r6, asr #31
+; ARMV7-NEXT: sbcs r5, r1, r6, asr #31
+; ARMV7-NEXT: str r1, [sp, #48] @ 4-byte Spill
+; ARMV7-NEXT: sbc r7, r2, r6, asr #31
+; ARMV7-NEXT: clz r1, r5
+; ARMV7-NEXT: str r2, [sp, #8] @ 4-byte Spill
+; ARMV7-NEXT: clz r2, lr
+; ARMV7-NEXT: add r1, r1, #32
+; ARMV7-NEXT: cmp r7, #0
+; ARMV7-NEXT: clzne r1, r7
+; ARMV7-NEXT: add r2, r2, #32
+; ARMV7-NEXT: cmp r4, #0
+; ARMV7-NEXT: str r3, [sp, #16] @ 4-byte Spill
+; ARMV7-NEXT: clzne r2, r4
+; ARMV7-NEXT: orrs r3, r5, r7
+; ARMV7-NEXT: addeq r1, r2, #64
+; ARMV7-NEXT: str r7, [sp, #68] @ 4-byte Spill
+; ARMV7-NEXT: subs r8, r1, r0
+; ARMV7-NEXT: mov r0, #0
+; ARMV7-NEXT: sbcs r1, r0, #0
+; ARMV7-NEXT: str r1, [sp, #100] @ 4-byte Spill
+; ARMV7-NEXT: sbcs r2, r0, #0
+; ARMV7-NEXT: str r2, [sp, #96] @ 4-byte Spill
+; ARMV7-NEXT: sbc r3, r0, #0
+; ARMV7-NEXT: rsbs r0, r8, #127
+; ARMV7-NEXT: rscs r0, r1, #0
+; ARMV7-NEXT: orr r1, r4, r7
+; ARMV7-NEXT: ldr r7, [sp, #88] @ 4-byte Reload
+; ARMV7-NEXT: rscs r0, r2, #0
+; ARMV7-NEXT: orr r2, lr, r5
+; ARMV7-NEXT: rscs r0, r3, #0
+; ARMV7-NEXT: orr r1, r2, r1
+; ARMV7-NEXT: str r3, [sp, #92] @ 4-byte Spill
+; ARMV7-NEXT: orr r2, r11, r12
+; ARMV7-NEXT: orr r3, r7, r9
+; ARMV7-NEXT: orr r2, r3, r2
+; ARMV7-NEXT: clz r1, r1
+; ARMV7-NEXT: clz r2, r2
+; ARMV7-NEXT: mov r0, #0
+; ARMV7-NEXT: lsr r1, r1, #5
+; ARMV7-NEXT: movwlo r0, #1
+; ARMV7-NEXT: lsr r2, r2, #5
+; ARMV7-NEXT: orr r1, r1, r2
+; ARMV7-NEXT: orrs r0, r1, r0
+; ARMV7-NEXT: mov r3, r9
+; ARMV7-NEXT: asr r0, r6, #31
+; ARMV7-NEXT: mov r6, r7
+; ARMV7-NEXT: eor r0, r0, r10, asr #31
+; ARMV7-NEXT: mov r1, r12
+; ARMV7-NEXT: str r0, [sp, #20] @ 4-byte Spill
+; ARMV7-NEXT: mov r0, r11
+; ARMV7-NEXT: movwne r0, #0
+; ARMV7-NEXT: movwne r6, #0
+; ARMV7-NEXT: movwne r3, #0
+; ARMV7-NEXT: movwne r1, #0
+; ARMV7-NEXT: str r0, [sp, #108] @ 4-byte Spill
+; ARMV7-NEXT: bne .LBB0_15
+; ARMV7-NEXT: @ %bb.1: @ %_udiv-special-cases_udiv-special-cases
+; ARMV7-NEXT: mov r10, r7
+; ARMV7-NEXT: ldr r7, [sp, #96] @ 4-byte Reload
+; ARMV7-NEXT: eor r0, r8, #127
+; ARMV7-NEXT: mov r2, r11
+; ARMV7-NEXT: str r5, [sp, #72] @ 4-byte Spill
+; ARMV7-NEXT: orr r0, r0, r7
+; ARMV7-NEXT: ldr r7, [sp, #100] @ 4-byte Reload
+; ARMV7-NEXT: ldr r5, [sp, #92] @ 4-byte Reload
+; ARMV7-NEXT: ldr r11, [sp, #48] @ 4-byte Reload
+; ARMV7-NEXT: orr r7, r7, r5
+; ARMV7-NEXT: str r4, [sp, #76] @ 4-byte Spill
+; ARMV7-NEXT: orrs r0, r0, r7
+; ARMV7-NEXT: str lr, [sp, #80] @ 4-byte Spill
+; ARMV7-NEXT: beq .LBB0_6
+; ARMV7-NEXT: @ %bb.2: @ %udiv-bb15
+; ARMV7-NEXT: mov r0, #0
+; ARMV7-NEXT: mov r1, #12
+; ARMV7-NEXT: str r0, [sp, #220]
+; ARMV7-NEXT: mov r11, r2
+; ARMV7-NEXT: str r0, [sp, #216]
+; ARMV7-NEXT: str r0, [sp, #212]
+; ARMV7-NEXT: str r0, [sp, #208]
+; ARMV7-NEXT: add r0, sp, #228
+; ARMV7-NEXT: stm r0, {r2, r9, r12}
+; ARMV7-NEXT: rsb r0, r8, #127
+; ARMV7-NEXT: add r2, sp, #208
+; ARMV7-NEXT: and r1, r1, r0, lsr #3
+; ARMV7-NEXT: add r2, r2, #16
+; ARMV7-NEXT: str r10, [sp, #224]
+; ARMV7-NEXT: and r6, r0, #31
+; ARMV7-NEXT: ldr r1, [r2, -r1]!
+; ARMV7-NEXT: eor r4, r6, #31
+; ARMV7-NEXT: str r9, [sp, #64] @ 4-byte Spill
+; ARMV7-NEXT: ldmib r2, {r3, r7}
+; ARMV7-NEXT: lsr r5, r1, #1
+; ARMV7-NEXT: ldr r2, [r2, #12]
+; ARMV7-NEXT: lsl r0, r3, r6
+; ARMV7-NEXT: orr r9, r0, r5, lsr r4
+; ARMV7-NEXT: lsr r5, r7, #1
+; ARMV7-NEXT: lsrs r3, r3, #1
+; ARMV7-NEXT: lsl r2, r2, r6
+; ARMV7-NEXT: orr r0, r2, r5, lsr r4
+; ARMV7-NEXT: str r0, [sp, #108] @ 4-byte Spill
+; ARMV7-NEXT: lsl r2, r7, r6
+; ARMV7-NEXT: ldr r0, [sp, #100] @ 4-byte Reload
+; ARMV7-NEXT: orr lr, r2, r3, lsr r4
+; ARMV7-NEXT: adds r4, r8, #1
+; ARMV7-NEXT: lsl r2, r1, r6
+; ARMV7-NEXT: adcs r8, r0, #0
+; ARMV7-NEXT: ldr r0, [sp, #96] @ 4-byte Reload
+; ARMV7-NEXT: mov r3, #0
+; ARMV7-NEXT: adcs r0, r0, #0
+; ARMV7-NEXT: str r0, [sp, #84] @ 4-byte Spill
+; ARMV7-NEXT: ldr r0, [sp, #92] @ 4-byte Reload
+; ARMV7-NEXT: adcs r0, r0, #0
+; ARMV7-NEXT: str r0, [sp, #96] @ 4-byte Spill
+; ARMV7-NEXT: mov r0, #0
+; ARMV7-NEXT: adcs r1, r3, #0
+; ARMV7-NEXT: str r0, [sp, #104] @ 4-byte Spill
+; ARMV7-NEXT: bne .LBB0_13
+; ARMV7-NEXT: @ %bb.3: @ %udiv-preheader4
+; ARMV7-NEXT: mov r1, #0
+; ARMV7-NEXT: str r10, [sp, #176]
+; ARMV7-NEXT: str r1, [sp, #204]
+; ARMV7-NEXT: add r10, sp, #176
+; ARMV7-NEXT: str r1, [sp, #200]
+; ARMV7-NEXT: str r1, [sp, #196]
+; ARMV7-NEXT: str r1, [sp, #192]
+; ARMV7-NEXT: ubfx r1, r4, #5, #2
+; ARMV7-NEXT: str r11, [sp, #180]
+; ARMV7-NEXT: ldr r0, [sp, #64] @ 4-byte Reload
+; ARMV7-NEXT: add r3, r10, r1, lsl #2
+; ARMV7-NEXT: str r0, [sp, #184]
+; ARMV7-NEXT: and r0, r4, #31
+; ARMV7-NEXT: str r12, [sp, #188]
+; ARMV7-NEXT: ldmib r3, {r5, r6, r12}
+; ARMV7-NEXT: lsr r3, r6, r0
+; ARMV7-NEXT: lsl r7, r12, #1
+; ARMV7-NEXT: str r4, [sp, #100] @ 4-byte Spill
+; ARMV7-NEXT: eor r4, r0, #31
+; ARMV7-NEXT: ldr r1, [r10, r1, lsl #2]
+; ARMV7-NEXT: lsl r6, r6, #1
+; ARMV7-NEXT: orr r3, r3, r7, lsl r4
+; ARMV7-NEXT: lsr r7, r5, r0
+; ARMV7-NEXT: lsl r5, r5, #1
+; ARMV7-NEXT: orr r6, r7, r6, lsl r4
+; ARMV7-NEXT: lsr r12, r12, r0
+; ARMV7-NEXT: lsr r1, r1, r0
+; ARMV7-NEXT: orr r7, r1, r5, lsl r4
+; ARMV7-NEXT: ldr r1, [sp, #80] @ 4-byte Reload
+; ARMV7-NEXT: mov r10, #0
+; ARMV7-NEXT: ldr r0, [sp, #108] @ 4-byte Reload
+; ARMV7-NEXT: subs r1, r1, #1
+; ARMV7-NEXT: str r1, [sp, #64] @ 4-byte Spill
+; ARMV7-NEXT: ldr r1, [sp, #76] @ 4-byte Reload
+; ARMV7-NEXT: ldr r11, [sp, #84] @ 4-byte Reload
+; ARMV7-NEXT: sbcs r1, r1, #0
+; ARMV7-NEXT: str r1, [sp, #60] @ 4-byte Spill
+; ARMV7-NEXT: ldr r1, [sp, #72] @ 4-byte Reload
+; ARMV7-NEXT: sbcs r1, r1, #0
+; ARMV7-NEXT: str r1, [sp, #56] @ 4-byte Spill
+; ARMV7-NEXT: ldr r1, [sp, #68] @ 4-byte Reload
+; ARMV7-NEXT: sbc r1, r1, #0
+; ARMV7-NEXT: str r1, [sp, #52] @ 4-byte Spill
+; ARMV7-NEXT: ldr r1, [sp, #96] @ 4-byte Reload
+; ARMV7-NEXT: .LBB0_4: @ %udiv-do-while3
+; ARMV7-NEXT: @ =>This Inner Loop Header: Depth=1
+; ARMV7-NEXT: lsl r5, r12, #1
+; ARMV7-NEXT: add r4, sp, #88
+; ARMV7-NEXT: orr r5, r5, r3, lsr #31
+; ARMV7-NEXT: lsl r3, r3, #1
+; ARMV7-NEXT: stm r4, {r2, r9, lr} @ 12-byte Folded Spill
+; ARMV7-NEXT: orr r3, r3, r6, lsr #31
+; ARMV7-NEXT: lsl r6, r6, #1
+; ARMV7-NEXT: ldr r2, [sp, #104] @ 4-byte Reload
+; ARMV7-NEXT: orr r6, r6, r7, lsr #31
+; ARMV7-NEXT: lsl r7, r7, #1
+; ARMV7-NEXT: str r2, [sp, #84] @ 4-byte Spill
+; ARMV7-NEXT: ldr r2, [sp, #64] @ 4-byte Reload
+; ARMV7-NEXT: orr r7, r7, r0, lsr #31
+; ARMV7-NEXT: str r0, [sp, #108] @ 4-byte Spill
+; ARMV7-NEXT: subs r2, r2, r7
+; ARMV7-NEXT: ldr r0, [sp, #72] @ 4-byte Reload
+; ARMV7-NEXT: ldr r2, [sp, #60] @ 4-byte Reload
+; ARMV7-NEXT: ldr r4, [sp, #68] @ 4-byte Reload
+; ARMV7-NEXT: sbcs r2, r2, r6
+; ARMV7-NEXT: ldr r2, [sp, #56] @ 4-byte Reload
+; ARMV7-NEXT: sbcs r2, r2, r3
+; ARMV7-NEXT: ldr r2, [sp, #52] @ 4-byte Reload
+; ARMV7-NEXT: sbc r2, r2, r5
+; ARMV7-NEXT: and r9, r0, r2, asr #31
+; ARMV7-NEXT: ldr r0, [sp, #76] @ 4-byte Reload
+; ARMV7-NEXT: and r4, r4, r2, asr #31
+; ARMV7-NEXT: and lr, r0, r2, asr #31
+; ARMV7-NEXT: ldr r0, [sp, #80] @ 4-byte Reload
+; ARMV7-NEXT: and r12, r0, r2, asr #31
+; ARMV7-NEXT: mov r0, #1
+; ARMV7-NEXT: subs r7, r7, r12
+; ARMV7-NEXT: and r0, r0, r2, asr #31
+; ARMV7-NEXT: sbcs r6, r6, lr
+; ARMV7-NEXT: str r0, [sp, #104] @ 4-byte Spill
+; ARMV7-NEXT: ldr r0, [sp, #100] @ 4-byte Reload
+; ARMV7-NEXT: sbcs r3, r3, r9
+; ARMV7-NEXT: sbc r12, r5, r4
+; ARMV7-NEXT: ldr r4, [sp, #92] @ 4-byte Reload
+; ARMV7-NEXT: subs r0, r0, #1
+; ARMV7-NEXT: str r0, [sp, #100] @ 4-byte Spill
+; ARMV7-NEXT: sbcs r8, r8, #0
+; ARMV7-NEXT: sbcs r11, r11, #0
+; ARMV7-NEXT: sbc r1, r1, #0
+; ARMV7-NEXT: orr r5, r0, r11
+; ARMV7-NEXT: orr r2, r8, r1
+; ARMV7-NEXT: ldr r0, [sp, #88] @ 4-byte Reload
+; ARMV7-NEXT: orrs r2, r5, r2
+; ARMV7-NEXT: lsl r5, r4, #1
+; ARMV7-NEXT: ldr r2, [sp, #84] @ 4-byte Reload
+; ARMV7-NEXT: orr r2, r2, r0, lsl #1
+; ARMV7-NEXT: orr r0, r5, r0, lsr #31
+; ARMV7-NEXT: orr r9, r10, r0
+; ARMV7-NEXT: ldr r0, [sp, #96] @ 4-byte Reload
+; ARMV7-NEXT: lsl r5, r0, #1
+; ARMV7-NEXT: orr r5, r5, r4, lsr #31
+; ARMV7-NEXT: ldr r4, [sp, #108] @ 4-byte Reload
+; ARMV7-NEXT: orr lr, r10, r5
+; ARMV7-NEXT: lsl r5, r4, #1
+; ARMV7-NEXT: orr r5, r5, r0, lsr #31
+; ARMV7-NEXT: orr r0, r10, r5
+; ARMV7-NEXT: mov r10, #0
+; ARMV7-NEXT: bne .LBB0_4
+; ARMV7-NEXT: .LBB0_5: @ %udiv-loop-exit2
+; ARMV7-NEXT: ldr r1, [sp, #104] @ 4-byte Reload
+; ARMV7-NEXT: lsl r0, r0, #1
+; ARMV7-NEXT: ldr r11, [sp, #48] @ 4-byte Reload
+; ARMV7-NEXT: orr r6, r1, r2, lsl #1
+; ARMV7-NEXT: lsl r1, r9, #1
+; ARMV7-NEXT: orr r1, r1, r2, lsr #31
+; ARMV7-NEXT: str r1, [sp, #108] @ 4-byte Spill
+; ARMV7-NEXT: lsl r2, lr, #1
+; ARMV7-NEXT: orr r3, r2, r9, lsr #31
+; ARMV7-NEXT: orr r1, r0, lr, lsr #31
+; ARMV7-NEXT: .LBB0_6: @ %udiv-end1
+; ARMV7-NEXT: ldr r2, [sp, #284]
+; ARMV7-NEXT: ldr r0, [sp, #44] @ 4-byte Reload
+; ARMV7-NEXT: ldr r7, [sp, #24] @ 4-byte Reload
+; ARMV7-NEXT: subs r8, r0, r2, asr #31
+; ARMV7-NEXT: ldr r0, [sp, #36] @ 4-byte Reload
+; ARMV7-NEXT: ldr r4, [sp, #300]
+; ARMV7-NEXT: sbcs r10, r0, r2, asr #31
+; ARMV7-NEXT: ldr r0, [sp, #40] @ 4-byte Reload
+; ARMV7-NEXT: str r3, [sp, #96] @ 4-byte Spill
+; ARMV7-NEXT: sbcs lr, r0, r2, asr #31
+; ARMV7-NEXT: ldr r3, [sp, #16] @ 4-byte Reload
+; ARMV7-NEXT: sbc r9, r7, r2, asr #31
+; ARMV7-NEXT: clz r0, lr
+; ARMV7-NEXT: clz r7, r8
+; ARMV7-NEXT: add r0, r0, #32
+; ARMV7-NEXT: cmp r9, #0
+; ARMV7-NEXT: add r7, r7, #32
+; ARMV7-NEXT: clzne r0, r9
+; ARMV7-NEXT: cmp r10, #0
+; ARMV7-NEXT: clzne r7, r10
+; ARMV7-NEXT: orrs r5, lr, r9
+; ARMV7-NEXT: addeq r0, r7, #64
+; ARMV7-NEXT: subs r12, r3, r4, asr #31
+; ARMV7-NEXT: ldr r3, [sp, #12] @ 4-byte Reload
+; ARMV7-NEXT: clz r5, r12
+; ARMV7-NEXT: str r6, [sp, #92] @ 4-byte Spill
+; ARMV7-NEXT: add r5, r5, #32
+; ARMV7-NEXT: sbcs r6, r3, r4, asr #31
+; ARMV7-NEXT: ldr r3, [sp, #8] @ 4-byte Reload
+; ARMV7-NEXT: sbcs r2, r11, r4, asr #31
+; ARMV7-NEXT: str r1, [sp, #88] @ 4-byte Spill
+; ARMV7-NEXT: sbc r3, r3, r4, asr #31
+; ARMV7-NEXT: clz r7, r2
+; ARMV7-NEXT: add r7, r7, #32
+; ARMV7-NEXT: cmp r3, #0
+; ARMV7-NEXT: clzne r7, r3
+; ARMV7-NEXT: cmp r6, #0
+; ARMV7-NEXT: clzne r5, r6
+; ARMV7-NEXT: orrs r4, r2, r3
+; ARMV7-NEXT: addeq r7, r5, #64
+; ARMV7-NEXT: str r6, [sp, #72] @ 4-byte Spill
+; ARMV7-NEXT: subs r11, r7, r0
+; ARMV7-NEXT: mov r7, #0
+; ARMV7-NEXT: sbcs r4, r7, #0
+; ARMV7-NEXT: str r4, [sp, #84] @ 4-byte Spill
+; ARMV7-NEXT: sbcs r5, r7, #0
+; ARMV7-NEXT: str r5, [sp, #104] @ 4-byte Spill
+; ARMV7-NEXT: sbc r1, r7, #0
+; ARMV7-NEXT: rsbs r0, r11, #127
+; ARMV7-NEXT: rscs r0, r4, #0
+; ARMV7-NEXT: orr r4, r8, lr
+; ARMV7-NEXT: rscs r0, r5, #0
+; ARMV7-NEXT: orr r5, r12, r2
+; ARMV7-NEXT: rscs r0, r1, #0
+; ARMV7-NEXT: str r1, [sp, #100] @ 4-byte Spill
+; ARMV7-NEXT: orr r0, r6, r3
+; ARMV7-NEXT: ldr r6, [sp, #20] @ 4-byte Reload
+; ARMV7-NEXT: orr r0, r5, r0
+; ARMV7-NEXT: orr r5, r10, r9
+; ARMV7-NEXT: clz r0, r0
+; ARMV7-NEXT: orr r4, r4, r5
+; ARMV7-NEXT: clz r4, r4
+; ARMV7-NEXT: ldr r1, [sp, #88] @ 4-byte Reload
+; ARMV7-NEXT: lsr r0, r0, #5
+; ARMV7-NEXT: movwlo r7, #1
+; ARMV7-NEXT: lsr r4, r4, #5
+; ARMV7-NEXT: orr r0, r0, r4
+; ARMV7-NEXT: orr r0, r0, r7
+; ARMV7-NEXT: eor r7, r1, r6
+; ARMV7-NEXT: ldr r1, [sp, #96] @ 4-byte Reload
+; ARMV7-NEXT: str r3, [sp, #64] @ 4-byte Spill
+; ARMV7-NEXT: ldr r3, [sp, #92] @ 4-byte Reload
+; ARMV7-NEXT: str r2, [sp, #68] @ 4-byte Spill
+; ARMV7-NEXT: eor r2, r1, r6
+; ARMV7-NEXT: ldr r1, [sp, #108] @ 4-byte Reload
+; ARMV7-NEXT: eor r4, r3, r6
+; ARMV7-NEXT: subs r3, r4, r6
+; ARMV7-NEXT: str r9, [sp, #36] @ 4-byte Spill
+; ARMV7-NEXT: eor r1, r1, r6
+; ARMV7-NEXT: str lr, [sp, #40] @ 4-byte Spill
+; ARMV7-NEXT: sbcs r1, r1, r6
+; ARMV7-NEXT: str r1, [sp, #12] @ 4-byte Spill
+; ARMV7-NEXT: sbcs r1, r2, r6
+; ARMV7-NEXT: str r1, [sp, #8] @ 4-byte Spill
+; ARMV7-NEXT: sbc r1, r7, r6
+; ARMV7-NEXT: str r1, [sp, #20] @ 4-byte Spill
+; ARMV7-NEXT: ldr r1, [sp, #284]
+; ARMV7-NEXT: cmp r0, #0
+; ARMV7-NEXT: mov r6, r8
+; ARMV7-NEXT: mov r2, r10
+; ARMV7-NEXT: movwne r6, #0
+; ARMV7-NEXT: movwne r2, #0
+; ARMV7-NEXT: asr r1, r1, #31
+; ARMV7-NEXT: movwne lr, #0
+; ARMV7-NEXT: movwne r9, #0
+; ARMV7-NEXT: str r3, [sp, #16] @ 4-byte Spill
+; ARMV7-NEXT: str r1, [sp, #24] @ 4-byte Spill
+; ARMV7-NEXT: str r8, [sp, #44] @ 4-byte Spill
+; ARMV7-NEXT: bne .LBB0_12
+; ARMV7-NEXT: @ %bb.7: @ %udiv-end1
+; ARMV7-NEXT: ldr r3, [sp, #104] @ 4-byte Reload
+; ARMV7-NEXT: eor r0, r11, #127
+; ARMV7-NEXT: mov r1, r8
+; ARMV7-NEXT: ldr r8, [sp, #84] @ 4-byte Reload
+; ARMV7-NEXT: orr r0, r0, r3
+; ARMV7-NEXT: ldr r3, [sp, #100] @ 4-byte Reload
+; ARMV7-NEXT: orr r4, r8, r3
+; ARMV7-NEXT: orrs r0, r0, r4
+; ARMV7-NEXT: beq .LBB0_12
+; ARMV7-NEXT: @ %bb.8: @ %udiv-bb1
+; ARMV7-NEXT: mov r7, #0
+; ARMV7-NEXT: str r12, [sp, #76] @ 4-byte Spill
+; ARMV7-NEXT: str r1, [sp, #160]
+; ARMV7-NEXT: rsb r0, r11, #127
+; ARMV7-NEXT: str r7, [sp, #156]
+; ARMV7-NEXT: mov r1, #12
+; ARMV7-NEXT: str r7, [sp, #152]
+; ARMV7-NEXT: add r2, sp, #144
+; ARMV7-NEXT: str r7, [sp, #148]
+; ARMV7-NEXT: and r1, r1, r0, lsr #3
+; ARMV7-NEXT: str r7, [sp, #144]
+; ARMV7-NEXT: add r2, r2, #16
+; ARMV7-NEXT: str r10, [sp, #164]
+; ARMV7-NEXT: and r0, r0, #31
+; ARMV7-NEXT: ldr r9, [sp, #40] @ 4-byte Reload
+; ARMV7-NEXT: eor lr, r0, #31
+; ARMV7-NEXT: str r9, [sp, #168]
+; ARMV7-NEXT: ldr r12, [sp, #36] @ 4-byte Reload
+; ARMV7-NEXT: str r12, [sp, #172]
+; ARMV7-NEXT: ldr r1, [r2, -r1]!
+; ARMV7-NEXT: ldmib r2, {r3, r6}
+; ARMV7-NEXT: lsr r4, r1, #1
+; ARMV7-NEXT: ldr r2, [r2, #12]
+; ARMV7-NEXT: lsl r5, r3, r0
+; ARMV7-NEXT: orr r4, r5, r4, lsr lr
+; ARMV7-NEXT: str r4, [sp, #96] @ 4-byte Spill
+; ARMV7-NEXT: lsr r4, r6, #1
+; ARMV7-NEXT: lsl r2, r2, r0
+; ARMV7-NEXT: lsrs r3, r3, #1
+; ARMV7-NEXT: orr r2, r2, r4, lsr lr
+; ARMV7-NEXT: str r2, [sp, #108] @ 4-byte Spill
+; ARMV7-NEXT: lsl r2, r6, r0
+; ARMV7-NEXT: orr r2, r2, r3, lsr lr
+; ARMV7-NEXT: adds r3, r11, #1
+; ARMV7-NEXT: str r2, [sp, #92] @ 4-byte Spill
+; ARMV7-NEXT: mov r2, r10
+; ARMV7-NEXT: lsl r10, r1, r0
+; ARMV7-NEXT: ldr r0, [sp, #104] @ 4-byte Reload
+; ARMV7-NEXT: adcs r8, r8, #0
+; ARMV7-NEXT: str r2, [sp, #4] @ 4-byte Spill
+; ARMV7-NEXT: adcs lr, r0, #0
+; ARMV7-NEXT: ldr r0, [sp, #100] @ 4-byte Reload
+; ARMV7-NEXT: adcs r0, r0, #0
+; ARMV7-NEXT: str r0, [sp, #88] @ 4-byte Spill
+; ARMV7-NEXT: adcs r0, r7, #0
+; ARMV7-NEXT: ldr r0, [sp, #44] @ 4-byte Reload
+; ARMV7-NEXT: bne .LBB0_14
+; ARMV7-NEXT: @ %bb.9: @ %udiv-preheader
+; ARMV7-NEXT: add r1, sp, #112
+; ARMV7-NEXT: str r7, [sp, #140]
+; ARMV7-NEXT: stm r1, {r0, r2, r9, r12}
+; ARMV7-NEXT: ubfx r0, r3, #5, #2
+; ARMV7-NEXT: add r12, sp, #112
+; ARMV7-NEXT: add r2, r12, r0, lsl #2
+; ARMV7-NEXT: str r7, [sp, #136]
+; ARMV7-NEXT: str r7, [sp, #132]
+; ARMV7-NEXT: str r7, [sp, #128]
+; ARMV7-NEXT: ldr r9, [r2, #12]
+; ARMV7-NEXT: ldr r1, [r2, #8]
+; ARMV7-NEXT: ldr r7, [r2, #4]
+; ARMV7-NEXT: and r2, r3, #31
+; ARMV7-NEXT: eor r4, r2, #31
+; ARMV7-NEXT: str r3, [sp, #104] @ 4-byte Spill
+; ARMV7-NEXT: lsr r3, r1, r2
+; ARMV7-NEXT: lsl r6, r9, #1
+; ARMV7-NEXT: lsl r1, r1, #1
+; ARMV7-NEXT: orr r6, r3, r6, lsl r4
+; ARMV7-NEXT: lsr r3, r7, r2
+; ARMV7-NEXT: orr r1, r3, r1, lsl r4
+; ARMV7-NEXT: str r1, [sp, #100] @ 4-byte Spill
+; ARMV7-NEXT: lsl r1, r7, #1
+; ARMV7-NEXT: ldr r0, [r12, r0, lsl #2]
+; ARMV7-NEXT: lsr r11, r9, r2
+; ARMV7-NEXT: ldr r3, [sp, #64] @ 4-byte Reload
+; ARMV7-NEXT: ldr r7, [sp, #88] @ 4-byte Reload
+; ARMV7-NEXT: ldr r5, [sp, #92] @ 4-byte Reload
+; ARMV7-NEXT: lsr r0, r0, r2
+; ARMV7-NEXT: mov r2, #0
+; ARMV7-NEXT: orr r0, r0, r1, lsl r4
+; ARMV7-NEXT: ldr r1, [sp, #76] @ 4-byte Reload
+; ARMV7-NEXT: ldr r4, [sp, #96] @ 4-byte Reload
+; ARMV7-NEXT: subs r1, r1, #1
+; ARMV7-NEXT: str r1, [sp, #60] @ 4-byte Spill
+; ARMV7-NEXT: ldr r1, [sp, #72] @ 4-byte Reload
+; ARMV7-NEXT: sbcs r1, r1, #0
+; ARMV7-NEXT: str r1, [sp, #56] @ 4-byte Spill
+; ARMV7-NEXT: ldr r1, [sp, #68] @ 4-byte Reload
+; ARMV7-NEXT: sbcs r1, r1, #0
+; ARMV7-NEXT: str r1, [sp, #52] @ 4-byte Spill
+; ARMV7-NEXT: sbc r1, r3, #0
+; ARMV7-NEXT: ldr r3, [sp, #108] @ 4-byte Reload
+; ARMV7-NEXT: str r1, [sp, #48] @ 4-byte Spill
+; ARMV7-NEXT: mov r1, #0
+; ARMV7-NEXT: .LBB0_10: @ %udiv-do-while
+; ARMV7-NEXT: @ =>This Inner Loop Header: Depth=1
+; ARMV7-NEXT: str r2, [sp, #80] @ 4-byte Spill
+; ARMV7-NEXT: lsl r2, r11, #1
+; ARMV7-NEXT: ldr r11, [sp, #100] @ 4-byte Reload
+; ARMV7-NEXT: orr r2, r2, r6, lsr #31
+; ARMV7-NEXT: str r4, [sp, #88] @ 4-byte Spill
+; ARMV7-NEXT: lsl r4, r6, #1
+; ARMV7-NEXT: str r5, [sp, #96] @ 4-byte Spill
+; ARMV7-NEXT: lsl r6, r11, #1
+; ARMV7-NEXT: ldr r5, [sp, #60] @ 4-byte Reload
+; ARMV7-NEXT: orr r6, r6, r0, lsr #31
+; ARMV7-NEXT: lsl r0, r0, #1
+; ARMV7-NEXT: orr r0, r0, r3, lsr #31
+; ARMV7-NEXT: orr r4, r4, r11, lsr #31
+; ARMV7-NEXT: subs r5, r5, r0
+; ARMV7-NEXT: str r3, [sp, #108] @ 4-byte Spill
+; ARMV7-NEXT: ldr r5, [sp, #56] @ 4-byte Reload
+; ARMV7-NEXT: ldr r3, [sp, #68] @ 4-byte Reload
+; ARMV7-NEXT: sbcs r5, r5, r6
+; ARMV7-NEXT: str r1, [sp, #92] @ 4-byte Spill
+; ARMV7-NEXT: ldr r5, [sp, #52] @ 4-byte Reload
+; ARMV7-NEXT: ldr r1, [sp, #64] @ 4-byte Reload
+; ARMV7-NEXT: sbcs r5, r5, r4
+; ARMV7-NEXT: str r10, [sp, #84] @ 4-byte Spill
+; ARMV7-NEXT: ldr r5, [sp, #48] @ 4-byte Reload
+; ARMV7-NEXT: sbc r5, r5, r2
+; ARMV7-NEXT: and r9, r3, r5, asr #31
+; ARMV7-NEXT: ldr r3, [sp, #72] @ 4-byte Reload
+; ARMV7-NEXT: and r10, r1, r5, asr #31
+; ARMV7-NEXT: and r11, r3, r5, asr #31
+; ARMV7-NEXT: ldr r3, [sp, #76] @ 4-byte Reload
+; ARMV7-NEXT: and r12, r3, r5, asr #31
+; ARMV7-NEXT: ldr r3, [sp, #104] @ 4-byte Reload
+; ARMV7-NEXT: subs r0, r0, r12
+; ARMV7-NEXT: sbcs r1, r6, r11
+; ARMV7-NEXT: str r1, [sp, #100] @ 4-byte Spill
+; ARMV7-NEXT: sbcs r6, r4, r9
+; ARMV7-NEXT: ldr r1, [sp, #84] @ 4-byte Reload
+; ARMV7-NEXT: sbc r11, r2, r10
+; ARMV7-NEXT: subs r3, r3, #1
+; ARMV7-NEXT: sbcs r8, r8, #0
+; ARMV7-NEXT: str r3, [sp, #104] @ 4-byte Spill
+; ARMV7-NEXT: sbcs lr, lr, #0
+; ARMV7-NEXT: mov r2, #1
+; ARMV7-NEXT: orr r4, r3, lr
+; ARMV7-NEXT: ldr r3, [sp, #80] @ 4-byte Reload
+; ARMV7-NEXT: sbc r7, r7, #0
+; ARMV7-NEXT: ldr r9, [sp, #88] @ 4-byte Reload
+; ARMV7-NEXT: and r2, r2, r5, asr #31
+; ARMV7-NEXT: orr r5, r8, r7
+; ARMV7-NEXT: orr r12, r3, r1, lsl #1
+; ARMV7-NEXT: ldr r3, [sp, #96] @ 4-byte Reload
+; ARMV7-NEXT: orrs r5, r4, r5
+; ARMV7-NEXT: ldr r10, [sp, #92] @ 4-byte Reload
+; ARMV7-NEXT: lsl r5, r9, #1
+; ARMV7-NEXT: orr r1, r5, r1, lsr #31
+; ARMV7-NEXT: orr r4, r10, r1
+; ARMV7-NEXT: lsl r1, r3, #1
+; ARMV7-NEXT: orr r1, r1, r9, lsr #31
+; ARMV7-NEXT: orr r5, r10, r1
+; ARMV7-NEXT: ldr r1, [sp, #108] @ 4-byte Reload
+; ARMV7-NEXT: lsl r1, r1, #1
+; ARMV7-NEXT: orr r1, r1, r3, lsr #31
+; ARMV7-NEXT: orr r3, r10, r1
+; ARMV7-NEXT: mov r10, r12
+; ARMV7-NEXT: mov r1, #0
+; ARMV7-NEXT: bne .LBB0_10
+; ARMV7-NEXT: .LBB0_11: @ %udiv-loop-exit
+; ARMV7-NEXT: lsl r1, r4, #1
+; ARMV7-NEXT: orr r6, r2, r10, lsl #1
+; ARMV7-NEXT: orr r2, r1, r10, lsr #31
+; ARMV7-NEXT: lsl r1, r5, #1
+; ARMV7-NEXT: ldr r10, [sp, #4] @ 4-byte Reload
+; ARMV7-NEXT: orr lr, r1, r4, lsr #31
+; ARMV7-NEXT: ldr r12, [sp, #76] @ 4-byte Reload
+; ARMV7-NEXT: lsl r1, r3, #1
+; ARMV7-NEXT: orr r9, r1, r5, lsr #31
+; ARMV7-NEXT: .LBB0_12: @ %udiv-end
+; ARMV7-NEXT: umull r0, r7, r12, r6
+; ARMV7-NEXT: ldr r5, [sp, #16] @ 4-byte Reload
+; ARMV7-NEXT: ldr r11, [sp, #72] @ 4-byte Reload
+; ARMV7-NEXT: mov r4, #0
+; ARMV7-NEXT: mov r1, r12
+; ARMV7-NEXT: ldr r3, [sp, #68] @ 4-byte Reload
+; ARMV7-NEXT: str r0, [sp, #108] @ 4-byte Spill
+; ARMV7-NEXT: umlal r7, r4, r11, r6
+; ARMV7-NEXT: ldr r0, [sp, #28] @ 4-byte Reload
+; ARMV7-NEXT: mov r12, #0
+; ARMV7-NEXT: str r5, [r0]
+; ARMV7-NEXT: ldr r5, [sp, #12] @ 4-byte Reload
+; ARMV7-NEXT: str r5, [r0, #4]
+; ARMV7-NEXT: ldr r5, [sp, #8] @ 4-byte Reload
+; ARMV7-NEXT: str r5, [r0, #8]
+; ARMV7-NEXT: ldr r5, [sp, #20] @ 4-byte Reload
+; ARMV7-NEXT: str r5, [r0, #12]
+; ARMV7-NEXT: umull r5, r0, r1, r2
+; ARMV7-NEXT: adds r8, r5, r7
+; ARMV7-NEXT: adcs r0, r4, r0
+; ARMV7-NEXT: ldr r4, [sp, #64] @ 4-byte Reload
+; ARMV7-NEXT: adc r7, r12, #0
+; ARMV7-NEXT: umull r12, r5, r6, r3
+; ARMV7-NEXT: mla r6, r6, r4, r5
+; ARMV7-NEXT: umlal r0, r7, r11, r2
+; ARMV7-NEXT: mla r2, r2, r3, r6
+; ARMV7-NEXT: umull r6, r5, lr, r1
+; ARMV7-NEXT: mla r3, lr, r11, r5
+; ARMV7-NEXT: mla r1, r9, r1, r3
+; ARMV7-NEXT: adds r3, r6, r12
+; ARMV7-NEXT: adc r1, r1, r2
+; ARMV7-NEXT: adds r0, r0, r3
+; ARMV7-NEXT: ldr r2, [sp, #108] @ 4-byte Reload
+; ARMV7-NEXT: adc r1, r7, r1
+; ARMV7-NEXT: ldr r3, [sp, #44] @ 4-byte Reload
+; ARMV7-NEXT: ldr r7, [sp, #40] @ 4-byte Reload
+; ARMV7-NEXT: subs r2, r3, r2
+; ARMV7-NEXT: sbcs r3, r10, r8
+; ARMV7-NEXT: sbcs r0, r7, r0
+; ARMV7-NEXT: ldr r7, [sp, #36] @ 4-byte Reload
+; ARMV7-NEXT: sbc r1, r7, r1
+; ARMV7-NEXT: ldr r7, [sp, #24] @ 4-byte Reload
+; ARMV7-NEXT: eor r2, r2, r7
+; ARMV7-NEXT: eor r3, r3, r7
+; ARMV7-NEXT: subs r2, r2, r7
+; ARMV7-NEXT: eor r0, r0, r7
+; ARMV7-NEXT: sbcs r3, r3, r7
+; ARMV7-NEXT: eor r1, r1, r7
+; ARMV7-NEXT: sbcs r0, r0, r7
+; ARMV7-NEXT: sbc r1, r1, r7
+; ARMV7-NEXT: ldr r7, [sp, #32] @ 4-byte Reload
+; ARMV7-NEXT: stm r7, {r2, r3}
+; ARMV7-NEXT: str r0, [r7, #8]
+; ARMV7-NEXT: str r1, [r7, #12]
+; ARMV7-NEXT: add sp, sp, #244
+; ARMV7-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+; ARMV7-NEXT: .LBB0_13:
+; ARMV7-NEXT: ldr r0, [sp, #108] @ 4-byte Reload
+; ARMV7-NEXT: b .LBB0_5
+; ARMV7-NEXT: .LBB0_14:
+; ARMV7-NEXT: mov r2, #0
+; ARMV7-NEXT: ldr r3, [sp, #108] @ 4-byte Reload
+; ARMV7-NEXT: ldr r4, [sp, #96] @ 4-byte Reload
+; ARMV7-NEXT: ldr r5, [sp, #92] @ 4-byte Reload
+; ARMV7-NEXT: b .LBB0_11
+; ARMV7-NEXT: .LBB0_15:
+; ARMV7-NEXT: ldr r11, [sp, #48] @ 4-byte Reload
+; ARMV7-NEXT: b .LBB0_6
+ %q = sdiv i128 %n, %d
+ %r = srem i128 %n, %d
+ store i128 %q, ptr %q_out
+ store i128 %r, ptr %r_out
+ ret void
+}
+
+define void @udivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
+; ARMV6-LABEL: udivrem_i128:
+; ARMV6: @ %bb.0: @ %_udiv-special-cases_udiv-special-cases
+; ARMV6-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; ARMV6-NEXT: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; ARMV6-NEXT: .pad #228
+; ARMV6-NEXT: sub sp, sp, #228
+; ARMV6-NEXT: ldr r11, [sp, #264]
+; ARMV6-NEXT: mov r8, r2
+; ARMV6-NEXT: ldr r4, [sp, #268]
+; ARMV6-NEXT: clz r2, r3
+; ARMV6-NEXT: str r0, [sp, #28] @ 4-byte Spill
+; ARMV6-NEXT: clz r0, r11
+; ARMV6-NEXT: str r1, [sp, #32] @ 4-byte Spill
+; ARMV6-NEXT: clz r1, r8
+; ARMV6-NEXT: add r0, r0, #32
+; ARMV6-NEXT: cmp r4, #0
+; ARMV6-NEXT: clzne r0, r4
+; ARMV6-NEXT: add r1, r1, #32
+; ARMV6-NEXT: cmp r3, #0
+; ARMV6-NEXT: str r1, [sp, #8] @ 4-byte Spill
+; ARMV6-NEXT: str r2, [sp, #4] @ 4-byte Spill
+; ARMV6-NEXT: movne r1, r2
+; ARMV6-NEXT: orrs r2, r11, r4
+; ARMV6-NEXT: ldr lr, [sp, #280]
+; ARMV6-NEXT: ldr r2, [sp, #272]
+; ARMV6-NEXT: addeq r0, r1, #64
+; ARMV6-NEXT: ldr r7, [sp, #284]
+; ARMV6-NEXT: clz r1, lr
+; ARMV6-NEXT: ldr r12, [sp, #276]
+; ARMV6-NEXT: clz r2, r2
+; ARMV6-NEXT: add r1, r1, #32
+; ARMV6-NEXT: cmp r7, #0
+; ARMV6-NEXT: clzne r1, r7
+; ARMV6-NEXT: add r2, r2, #32
+; ARMV6-NEXT: cmp r12, #0
+; ARMV6-NEXT: mov r10, r3
+; ARMV6-NEXT: clzne r2, r12
+; ARMV6-NEXT: orrs r3, lr, r7
+; ARMV6-NEXT: addeq r1, r2, #64
+; ARMV6-NEXT: mov r2, #0
+; ARMV6-NEXT: subs r9, r1, r0
+; ARMV6-NEXT: ldr r3, [sp, #272]
+; ARMV6-NEXT: sbcs r6, r2, #0
+; ARMV6-NEXT: str r4, [sp, #48] @ 4-byte Spill
+; ARMV6-NEXT: sbcs r5, r2, #0
+; ARMV6-NEXT: orr r3, r3, lr
+; ARMV6-NEXT: sbc r1, r2, #0
+; ARMV6-NEXT: rsbs r0, r9, #127
+; ARMV6-NEXT: rscs r0, r6, #0
+; ARMV6-NEXT: str r1, [sp, #84] @ 4-byte Spill
+; ARMV6-NEXT: rscs r0, r5, #0
+; ARMV6-NEXT: orr r2, r8, r11
+; ARMV6-NEXT: rscs r0, r1, #0
+; ARMV6-NEXT: orr r1, r10, r4
+; ARMV6-NEXT: orr r1, r2, r1
+; ARMV6-NEXT: orr r2, r12, r7
+; ARMV6-NEXT: orr r2, r3, r2
+; ARMV6-NEXT: clz r1, r1
+; ARMV6-NEXT: clz r2, r2
+; ARMV6-NEXT: mov r0, #0
+; ARMV6-NEXT: lsr r1, r1, #5
+; ARMV6-NEXT: movlo r0, #1
+; ARMV6-NEXT: lsr r2, r2, #5
+; ARMV6-NEXT: orr r1, r2, r1
+; ARMV6-NEXT: orrs r0, r1, r0
+; ARMV6-NEXT: mov r3, r10
+; ARMV6-NEXT: mov r7, r8
+; ARMV6-NEXT: str r11, [sp, #44] @ 4-byte Spill
+; ARMV6-NEXT: movne r4, #0
+; ARMV6-NEXT: movne r11, #0
+; ARMV6-NEXT: movne r3, #0
+; ARMV6-NEXT: movne r7, #0
+; ARMV6-NEXT: str r10, [sp, #40] @ 4-byte Spill
+; ARMV6-NEXT: str r8, [sp, #36] @ 4-byte Spill
+; ARMV6-NEXT: bne .LBB1_6
+; ARMV6-NEXT: @ %bb.1: @ %_udiv-special-cases_udiv-special-cases
+; ARMV6-NEXT: ldr r1, [sp, #84] @ 4-byte Reload
+; ARMV6-NEXT: eor r0, r9, #127
+; ARMV6-NEXT: orr r0, r0, r5
+; ARMV6-NEXT: str r5, [sp, #76] @ 4-byte Spill
+; ARMV6-NEXT: orr r1, r6, r1
+; ARMV6-NEXT: str r6, [sp, #80] @ 4-byte Spill
+; ARMV6-NEXT: orrs r0, r0, r1
+; ARMV6-NEXT: beq .LBB1_6
+; ARMV6-NEXT: @ %bb.2: @ %udiv-bb15
+; ARMV6-NEXT: mov r0, #0
+; ARMV6-NEXT: str r8, [sp, #208]
+; ARMV6-NEXT: str r0, [sp, #204]
+; ARMV6-NEXT: add r2, sp, #192
+; ARMV6-NEXT: str r0, [sp, #200]
+; ARMV6-NEXT: mov r11, #12
+; ARMV6-NEXT: str r0, [sp, #196]
+; ARMV6-NEXT: add r2, r2, #16
+; ARMV6-NEXT: str r0, [sp, #192]
+; ARMV6-NEXT: rsb r0, r9, #127
+; ARMV6-NEXT: str r10, [sp, #212]
+; ARMV6-NEXT: mov r6, r8
+; ARMV6-NEXT: ldr lr, [sp, #44] @ 4-byte Reload
+; ARMV6-NEXT: and r1, r11, r0, lsr #3
+; ARMV6-NEXT: str lr, [sp, #216]
+; ARMV6-NEXT: and r0, r0, #31
+; ARMV6-NEXT: ldr r12, [sp, #48] @ 4-byte Reload
+; ARMV6-NEXT: eor r5, r0, #31
+; ARMV6-NEXT: str r12, [sp, #220]
+; ARMV6-NEXT: ldr r1, [r2, -r1]!
+; ARMV6-NEXT: ldr r4, [r2, #8]
+; ARMV6-NEXT: ldr r7, [r2, #4]
+; ARMV6-NEXT: ldr r2, [r2, #12]
+; ARMV6-NEXT: lsr r3, r4, #1
+; ARMV6-NEXT: lsl r2, r2, r0
+; ARMV6-NEXT: orr r2, r2, r3, lsr r5
+; ARMV6-NEXT: str r2, [sp, #92] @ 4-byte Spill
+; ARMV6-NEXT: lsl r2, r4, r0
+; ARMV6-NEXT: lsrs r3, r7, #1
+; ARMV6-NEXT: orr r8, r2, r3, lsr r5
+; ARMV6-NEXT: lsl r2, r7, r0
+; ARMV6-NEXT: lsr r3, r1, #1
+; ARMV6-NEXT: lsl r4, r1, r0
+; ARMV6-NEXT: ldr r0, [sp, #80] @ 4-byte Reload
+; ARMV6-NEXT: orr r2, r2, r3, lsr r5
+; ARMV6-NEXT: adds r5, r9, #1
+; ARMV6-NEXT: str r2, [sp, #72] @ 4-byte Spill
+; ARMV6-NEXT: adcs r0, r0, #0
+; ARMV6-NEXT: str r0, [sp, #80] @ 4-byte Spill
+; ARMV6-NEXT: ldr r0, [sp, #76] @ 4-byte Reload
+; ARMV6-NEXT: mov r2, #0
+; ARMV6-NEXT: adcs r0, r0, #0
+; ARMV6-NEXT: str r0, [sp, #76] @ 4-byte Spill
+; ARMV6-NEXT: ldr r0, [sp, #84] @ 4-byte Reload
+; ARMV6-NEXT: adcs r7, r0, #0
+; ARMV6-NEXT: mov r0, #0
+; ARMV6-NEXT: str r0, [sp, #88] @ 4-byte Spill
+; ARMV6-NEXT: adcs r0, r2, #0
+; ARMV6-NEXT: bne .LBB1_13
+; ARMV6-NEXT: @ %bb.3: @ %udiv-preheader4
+; ARMV6-NEXT: mov r0, #0
+; ARMV6-NEXT: str r12, [sp, #172]
+; ARMV6-NEXT: str r0, [sp, #188]
+; ARMV6-NEXT: str r0, [sp, #184]
+; ARMV6-NEXT: str r0, [sp, #180]
+; ARMV6-NEXT: str r0, [sp, #176]
+; ARMV6-NEXT: add r0, sp, #160
+; ARMV6-NEXT: stm r0, {r6, r10, lr}
+; ARMV6-NEXT: and r0, r11, r5, lsr #3
+; ARMV6-NEXT: add r6, sp, #160
+; ARMV6-NEXT: add r2, r6, r0
+; ARMV6-NEXT: ldr r10, [sp, #80] @ 4-byte Reload
+; ARMV6-NEXT: ldr r11, [sp, #72] @ 4-byte Reload
+; ARMV6-NEXT: ldmib r2, {r1, r3, lr}
+; ARMV6-NEXT: lsl r2, lr, #1
+; ARMV6-NEXT: str r5, [sp, #84] @ 4-byte Spill
+; ARMV6-NEXT: and r5, r5, #31
+; ARMV6-NEXT: ldr r0, [r6, r0]
+; ARMV6-NEXT: eor r12, r5, #31
+; ARMV6-NEXT: lsr r9, r3, r5
+; ARMV6-NEXT: orr r9, r9, r2, lsl r12
+; ARMV6-NEXT: lsr r2, r1, r5
+; ARMV6-NEXT: lsl r1, r1, #1
+; ARMV6-NEXT: lsl r3, r3, #1
+; ARMV6-NEXT: lsr r0, r0, r5
+; ARMV6-NEXT: orr r1, r0, r1, lsl r12
+; ARMV6-NEXT: ldr r0, [sp, #272]
+; ARMV6-NEXT: orr r3, r2, r3, lsl r12
+; ARMV6-NEXT: ldr r6, [sp, #92] @ 4-byte Reload
+; ARMV6-NEXT: lsr lr, lr, r5
+; ARMV6-NEXT: subs r0, r0, #1
+; ARMV6-NEXT: str r0, [sp, #64] @ 4-byte Spill
+; ARMV6-NEXT: ldr r0, [sp, #276]
+; ARMV6-NEXT: mov r5, #0
+; ARMV6-NEXT: ldr r12, [sp, #76] @ 4-byte Reload
+; ARMV6-NEXT: sbcs r0, r0, #0
+; ARMV6-NEXT: str r0, [sp, #60] @ 4-byte Spill
+; ARMV6-NEXT: ldr r0, [sp, #280]
+; ARMV6-NEXT: sbcs r0, r0, #0
+; ARMV6-NEXT: str r0, [sp, #56] @ 4-byte Spill
+; ARMV6-NEXT: ldr r0, [sp, #284]
+; ARMV6-NEXT: sbc r0, r0, #0
+; ARMV6-NEXT: str r0, [sp, #52] @ 4-byte Spill
+; ARMV6-NEXT: .LBB1_4: @ %udiv-do-while3
+; ARMV6-NEXT: @ =>This Inner Loop Header: Depth=1
+; ARMV6-NEXT: ldr r2, [sp, #88] @ 4-byte Reload
+; ARMV6-NEXT: lsl r0, lr, #1
+; ARMV6-NEXT: str r2, [sp, #68] @ 4-byte Spill
+; ARMV6-NEXT: lsl r2, r9, #1
+; ARMV6-NEXT: orr r2, r2, r3, lsr #31
+; ARMV6-NEXT: lsl r3, r3, #1
+; ARMV6-NEXT: orr r3, r3, r1, lsr #31
+; ARMV6-NEXT: lsl r1, r1, #1
+; ARMV6-NEXT: str r6, [sp, #92] @ 4-byte Spill
+; ARMV6-NEXT: orr r1, r1, r6, lsr #31
+; ARMV6-NEXT: ldr r6, [sp, #64] @ 4-byte Reload
+; ARMV6-NEXT: orr r0, r0, r9, lsr #31
+; ARMV6-NEXT: str r4, [sp, #72] @ 4-byte Spill
+; ARMV6-NEXT: subs r4, r6, r1
+; ARMV6-NEXT: ldr r6, [sp, #60] @ 4-byte Reload
+; ARMV6-NEXT: str r11, [sp, #76] @ 4-byte Spill
+; ARMV6-NEXT: sbcs r4, r6, r3
+; ARMV6-NEXT: ldr r6, [sp, #56] @ 4-byte Reload
+; ARMV6-NEXT: str r8, [sp, #80] @ 4-byte Spill
+; ARMV6-NEXT: sbcs r4, r6, r2
+; ARMV6-NEXT: ldr r6, [sp, #52] @ 4-byte Reload
+; ARMV6-NEXT: sbc r4, r6, r0
+; ARMV6-NEXT: ldr r6, [sp, #284]
+; ARMV6-NEXT: and r11, r6, r4, asr #31
+; ARMV6-NEXT: ldr r6, [sp, #280]
+; ARMV6-NEXT: and lr, r6, r4, asr #31
+; ARMV6-NEXT: ldr r6, [sp, #276]
+; ARMV6-NEXT: and r8, r6, r4, asr #31
+; ARMV6-NEXT: ldr r6, [sp, #272]
+; ARMV6-NEXT: and r9, r6, r4, asr #31
+; ARMV6-NEXT: ldr r6, [sp, #72] @ 4-byte Reload
+; ARMV6-NEXT: subs r1, r1, r9
+; ARMV6-NEXT: sbcs r3, r3, r8
+; ARMV6-NEXT: sbcs r9, r2, lr
+; ARMV6-NEXT: sbc lr, r0, r11
+; ARMV6-NEXT: mov r0, #1
+; ARMV6-NEXT: and r0, r0, r4, asr #31
+; ARMV6-NEXT: str r0, [sp, #88] @ 4-byte Spill
+; ARMV6-NEXT: ldr r0, [sp, #84] @ 4-byte Reload
+; ARMV6-NEXT: subs r0, r0, #1
+; ARMV6-NEXT: str r0, [sp, #84] @ 4-byte Spill
+; ARMV6-NEXT: sbcs r10, r10, #0
+; ARMV6-NEXT: sbcs r12, r12, #0
+; ARMV6-NEXT: sbc r7, r7, #0
+; ARMV6-NEXT: orr r0, r0, r12
+; ARMV6-NEXT: orr r4, r10, r7
+; ARMV6-NEXT: orrs r0, r0, r4
+; ARMV6-NEXT: ldr r4, [sp, #76] @ 4-byte Reload
+; ARMV6-NEXT: ldr r0, [sp, #68] @ 4-byte Reload
+; ARMV6-NEXT: orr r2, r0, r6, lsl #1
+; ARMV6-NEXT: lsl r0, r4, #1
+; ARMV6-NEXT: orr r0, r0, r6, lsr #31
+; ARMV6-NEXT: ldr r6, [sp, #80] @ 4-byte Reload
+; ARMV6-NEXT: orr r11, r5, r0
+; ARMV6-NEXT: lsl r0, r6, #1
+; ARMV6-NEXT: orr r0, r0, r4, lsr #31
+; ARMV6-NEXT: mov r4, r2
+; ARMV6-NEXT: orr r8, r5, r0
+; ARMV6-NEXT: ldr r0, [sp, #92] @ 4-byte Reload
+; ARMV6-NEXT: lsl r0, r0, #1
+; ARMV6-NEXT: orr r0, r0, r6, lsr #31
+; ARMV6-NEXT: orr r6, r5, r0
+; ARMV6-NEXT: mov r5, #0
+; ARMV6-NEXT: bne .LBB1_4
+; ARMV6-NEXT: .LBB1_5: @ %udiv-loop-exit2
+; ARMV6-NEXT: ldr r0, [sp, #88] @ 4-byte Reload
+; ARMV6-NEXT: lsl r3, r11, #1
+; ARMV6-NEXT: lsl r2, r8, #1
+; ARMV6-NEXT: orr r3, r3, r4, lsr #31
+; ARMV6-NEXT: orr r7, r0, r4, lsl #1
+; ARMV6-NEXT: lsl r0, r6, #1
+; ARMV6-NEXT: orr r4, r0, r8, lsr #31
+; ARMV6-NEXT: ldr r10, [sp, #40] @ 4-byte Reload
+; ARMV6-NEXT: ldr r8, [sp, #36] @ 4-byte Reload
+; ARMV6-NEXT: orr r11, r2, r11, lsr #31
+; ARMV6-NEXT: .LBB1_6: @ %udiv-end1
+; ARMV6-NEXT: ldr r6, [sp, #44] @ 4-byte Reload
+; ARMV6-NEXT: mov lr, #0
+; ARMV6-NEXT: str r7, [sp, #12] @ 4-byte Spill
+; ARMV6-NEXT: ldr r7, [sp, #48] @ 4-byte Reload
+; ARMV6-NEXT: clz r0, r6
+; ARMV6-NEXT: ldmib sp, {r1, r2} @ 8-byte Folded Reload
+; ARMV6-NEXT: add r0, r0, #32
+; ARMV6-NEXT: cmp r7, #0
+; ARMV6-NEXT: clzne r0, r7
+; ARMV6-NEXT: cmp r10, #0
+; ARMV6-NEXT: movne r2, r1
+; ARMV6-NEXT: orrs r1, r6, r7
+; ARMV6-NEXT: ldr r5, [sp, #280]
+; ARMV6-NEXT: addeq r0, r2, #64
+; ARMV6-NEXT: ldr r2, [sp, #272]
+; ARMV6-NEXT: str r4, [sp, #24] @ 4-byte Spill
+; ARMV6-NEXT: clz r1, r5
+; ARMV6-NEXT: ldr r4, [sp, #284]
+; ARMV6-NEXT: clz r2, r2
+; ARMV6-NEXT: ldr r12, [sp, #276]
+; ARMV6-NEXT: add r1, r1, #32
+; ARMV6-NEXT: cmp r4, #0
+; ARMV6-NEXT: add r2, r2, #32
+; ARMV6-NEXT: clzne r1, r4
+; ARMV6-NEXT: cmp r12, #0
+; ARMV6-NEXT: str r3, [sp, #16] @ 4-byte Spill
+; ARMV6-NEXT: clzne r2, r12
+; ARMV6-NEXT: orrs r3, r5, r4
+; ARMV6-NEXT: str r11, [sp, #20] @ 4-byte Spill
+; ARMV6-NEXT: addeq r1, r2, #64
+; ARMV6-NEXT: subs r9, r1, r0
+; ARMV6-NEXT: sbcs r2, lr, #0
+; ARMV6-NEXT: sbcs r1, lr, #0
+; ARMV6-NEXT: sbc r3, lr, #0
+; ARMV6-NEXT: rsbs r0, r9, #127
+; ARMV6-NEXT: rscs r0, r2, #0
+; ARMV6-NEXT: mov r11, r1
+; ARMV6-NEXT: rscs r0, r1, #0
+; ARMV6-NEXT: orr r1, r10, r7
+; ARMV6-NEXT: rscs r0, r3, #0
+; ARMV6-NEXT: str r3, [sp, #84] @ 4-byte Spill
+; ARMV6-NEXT: orr r0, r8, r6
+; ARMV6-NEXT: orr r3, r12, r4
+; ARMV6-NEXT: orr r0, r0, r1
+; ARMV6-NEXT: ldr r1, [sp, #272]
+; ARMV6-NEXT: clz r0, r0
+; ARMV6-NEXT: movlo lr, #1
+; ARMV6-NEXT: orr r1, r1, r5
+; ARMV6-NEXT: mov r4, r8
+; ARMV6-NEXT: orr r1, r1, r3
+; ARMV6-NEXT: lsr r0, r0, #5
+; ARMV6-NEXT: clz r1, r1
+; ARMV6-NEXT: mov r3, r10
+; ARMV6-NEXT: lsr r1, r1, #5
+; ARMV6-NEXT: orr r0, r1, r0
+; ARMV6-NEXT: orrs r0, r0, lr
+; ARMV6-NEXT: movne r7, #0
+; ARMV6-NEXT: movne r6, #0
+; ARMV6-NEXT: movne r3, #0
+; ARMV6-NEXT: movne r4, #0
+; ARMV6-NEXT: bne .LBB1_12
+; ARMV6-NEXT: @ %bb.7: @ %udiv-end1
+; ARMV6-NEXT: ldr r1, [sp, #84] @ 4-byte Reload
+; ARMV6-NEXT: eor r0, r9, #127
+; ARMV6-NEXT: orr r0, r0, r11
+; ARMV6-NEXT: orr r1, r2, r1
+; ARMV6-NEXT: orrs r0, r0, r1
+; ARMV6-NEXT: beq .LBB1_12
+; ARMV6-NEXT: @ %bb.8: @ %udiv-bb1
+; ARMV6-NEXT: rsb r1, r9, #127
+; ARMV6-NEXT: add r4, sp, #128
+; ARMV6-NEXT: ldr r0, [sp, #264]
+; ARMV6-NEXT: mov lr, r11
+; ARMV6-NEXT: mov r12, r2
+; ARMV6-NEXT: mov r2, #0
+; ARMV6-NEXT: str r0, [sp, #152]
+; ARMV6-NEXT: mov r0, #12
+; ARMV6-NEXT: ldr r11, [sp, #268]
+; ARMV6-NEXT: and r3, r0, r1, lsr #3
+; ARMV6-NEXT: add r4, r4, #16
+; ARMV6-NEXT: str r8, [sp, #144]
+; ARMV6-NEXT: str r2, [sp, #140]
+; ARMV6-NEXT: and r0, r1, #31
+; ARMV6-NEXT: str r2, [sp, #136]
+; ARMV6-NEXT: eor r6, r0, #31
+; ARMV6-NEXT: str r2, [sp, #132]
+; ARMV6-NEXT: str r2, [sp, #128]
+; ARMV6-NEXT: str r10, [sp, #148]
+; ARMV6-NEXT: str r11, [sp, #156]
+; ARMV6-NEXT: ldr r3, [r4, -r3]!
+; ARMV6-NEXT: ldr r7, [r4, #4]
+; ARMV6-NEXT: ldr r5, [r4, #8]
+; ARMV6-NEXT: ldr r4, [r4, #12]
+; ARMV6-NEXT: lsl r1, r4, r0
+; ARMV6-NEXT: lsr r4, r5, #1
+; ARMV6-NEXT: orr r1, r1, r4, lsr r6
+; ARMV6-NEXT: str r1, [sp, #92] @ 4-byte Spill
+; ARMV6-NEXT: lsl r1, r5, r0
+; ARMV6-NEXT: lsrs r4, r7, #1
+; ARMV6-NEXT: orr r1, r1, r4, lsr r6
+; ARMV6-NEXT: str r1, [sp, #88] @ 4-byte Spill
+; ARMV6-NEXT: lsl r1, r7, r0
+; ARMV6-NEXT: lsr r4, r3, #1
+; ARMV6-NEXT: orr r1, r1, r4, lsr r6
+; ARMV6-NEXT: adds r7, r9, #1
+; ARMV6-NEXT: adcs r6, r12, #0
+; ARMV6-NEXT: str r1, [sp, #80] @ 4-byte Spill
+; ARMV6-NEXT: lsl r1, r3, r0
+; ARMV6-NEXT: ldr r0, [sp, #84] @ 4-byte Reload
+; ARMV6-NEXT: adcs r9, lr, #0
+; ARMV6-NEXT: adcs r0, r0, #0
+; ARMV6-NEXT: str r0, [sp, #76] @ 4-byte Spill
+; ARMV6-NEXT: adcs r0, r2, #0
+; ARMV6-NEXT: bne .LBB1_14
+; ARMV6-NEXT: @ %bb.9: @ %udiv-preheader
+; ARMV6-NEXT: ldr r0, [sp, #264]
+; ARMV6-NEXT: add r5, sp, #96
+; ARMV6-NEXT: str r0, [sp, #104]
+; ARMV6-NEXT: mov r0, #12
+; ARMV6-NEXT: str r2, [sp, #124]
+; ARMV6-NEXT: str r2, [sp, #120]
+; ARMV6-NEXT: str r2, [sp, #116]
+; ARMV6-NEXT: str r2, [sp, #112]
+; ARMV6-NEXT: and r2, r0, r7, lsr #3
+; ARMV6-NEXT: add r0, r5, r2
+; ARMV6-NEXT: str r11, [sp, #108]
+; ARMV6-NEXT: str r8, [sp, #96]
+; ARMV6-NEXT: and r11, r7, #31
+; ARMV6-NEXT: str r10, [sp, #100]
+; ARMV6-NEXT: eor r12, r11, #31
+; ARMV6-NEXT: ldr r3, [r0, #8]
+; ARMV6-NEXT: ldr r8, [r0, #12]
+; ARMV6-NEXT: ldr r4, [r0, #4]
+; ARMV6-NEXT: str r7, [sp, #84] @ 4-byte Spill
+; ARMV6-NEXT: lsr r0, r3, r11
+; ARMV6-NEXT: lsl r7, r8, #1
+; ARMV6-NEXT: lsl r3, r3, #1
+; ARMV6-NEXT: orr lr, r0, r7, lsl r12
+; ARMV6-NEXT: lsr r0, r4, r11
+; ARMV6-NEXT: orr r10, r0, r3, lsl r12
+; ARMV6-NEXT: ldr r3, [r5, r2]
+; ARMV6-NEXT: lsl r4, r4, #1
+; ARMV6-NEXT: lsr r8, r8, r11
+; ARMV6-NEXT: ldr r5, [sp, #92] @ 4-byte Reload
+; ARMV6-NEXT: lsr r3, r3, r11
+; ARMV6-NEXT: orr r4, r3, r4, lsl r12
+; ARMV6-NEXT: ldr r3, [sp, #272]
+; ARMV6-NEXT: ldr r12, [sp, #76] @ 4-byte Reload
+; ARMV6-NEXT: subs r0, r3, #1
+; ARMV6-NEXT: ldr r3, [sp, #276]
+; ARMV6-NEXT: str r0, [sp, #64] @ 4-byte Spill
+; ARMV6-NEXT: sbcs r0, r3, #0
+; ARMV6-NEXT: ldr r3, [sp, #280]
+; ARMV6-NEXT: str r0, [sp, #60] @ 4-byte Spill
+; ARMV6-NEXT: sbcs r0, r3, #0
+; ARMV6-NEXT: ldr r3, [sp, #284]
+; ARMV6-NEXT: ldr r11, [sp, #80] @ 4-byte Reload
+; ARMV6-NEXT: str r0, [sp, #56] @ 4-byte Spill
+; ARMV6-NEXT: sbc r0, r3, #0
+; ARMV6-NEXT: str r0, [sp, #52] @ 4-byte Spill
+; ARMV6-NEXT: mov r3, #0
+; ARMV6-NEXT: mov r0, #0
+; ARMV6-NEXT: .LBB1_10: @ %udiv-do-while
+; ARMV6-NEXT: @ =>This Inner Loop Header: Depth=1
+; ARMV6-NEXT: add r7, sp, #72
+; ARMV6-NEXT: ldr r2, [sp, #88] @ 4-byte Reload
+; ARMV6-NEXT: stm r7, {r0, r1, r2} @ 12-byte Folded Spill
+; ARMV6-NEXT: lsl r0, r10, #1
+; ARMV6-NEXT: orr r0, r0, r4, lsr #31
+; ARMV6-NEXT: lsl r4, r4, #1
+; ARMV6-NEXT: ldr r1, [sp, #64] @ 4-byte Reload
+; ARMV6-NEXT: orr r4, r4, r5, lsr #31
+; ARMV6-NEXT: str r5, [sp, #92] @ 4-byte Spill
+; ARMV6-NEXT: subs r5, r1, r4
+; ARMV6-NEXT: ldr r1, [sp, #60] @ 4-byte Reload
+; ARMV6-NEXT: lsl r7, lr, #1
+; ARMV6-NEXT: mov r2, r3
+; ARMV6-NEXT: sbcs r5, r1, r0
+; ARMV6-NEXT: ldr r1, [sp, #56] @ 4-byte Reload
+; ARMV6-NEXT: orr r7, r7, r10, lsr #31
+; ARMV6-NEXT: lsl r3, r8, #1
+; ARMV6-NEXT: sbcs r5, r1, r7
+; ARMV6-NEXT: ldr r1, [sp, #52] @ 4-byte Reload
+; ARMV6-NEXT: orr r3, r3, lr, lsr #31
+; ARMV6-NEXT: str r11, [sp, #88] @ 4-byte Spill
+; ARMV6-NEXT: sbc r5, r1, r3
+; ARMV6-NEXT: ldr r1, [sp, #284]
+; ARMV6-NEXT: and r11, r1, r5, asr #31
+; ARMV6-NEXT: ldr r1, [sp, #280]
+; ARMV6-NEXT: and lr, r1, r5, asr #31
+; ARMV6-NEXT: ldr r1, [sp, #276]
+; ARMV6-NEXT: and r10, r1, r5, asr #31
+; ARMV6-NEXT: ldr r1, [sp, #272]
+; ARMV6-NEXT: and r8, r1, r5, asr #31
+; ARMV6-NEXT: subs r1, r4, r8
+; ARMV6-NEXT: str r1, [sp, #68] @ 4-byte Spill
+; ARMV6-NEXT: sbcs r4, r0, r10
+; ARMV6-NEXT: mov r0, #1
+; ARMV6-NEXT: sbcs r10, r7, lr
+; ARMV6-NEXT: and r0, r0, r5, asr #31
+; ARMV6-NEXT: sbc r8, r3, r11
+; ARMV6-NEXT: ldr r3, [sp, #84] @ 4-byte Reload
+; ARMV6-NEXT: ldr r1, [sp, #76] @ 4-byte Reload
+; ARMV6-NEXT: subs r3, r3, #1
+; ARMV6-NEXT: str r3, [sp, #84] @ 4-byte Spill
+; ARMV6-NEXT: sbcs r6, r6, #0
+; ARMV6-NEXT: ldr lr, [sp, #88] @ 4-byte Reload
+; ARMV6-NEXT: sbcs r9, r9, #0
+; ARMV6-NEXT: sbc r12, r12, #0
+; ARMV6-NEXT: orr r3, r3, r9
+; ARMV6-NEXT: orr r5, r6, r12
+; ARMV6-NEXT: orrs r3, r3, r5
+; ARMV6-NEXT: ldr r5, [sp, #80] @ 4-byte Reload
+; ARMV6-NEXT: ldr r3, [sp, #72] @ 4-byte Reload
+; ARMV6-NEXT: orr r7, r3, r1, lsl #1
+; ARMV6-NEXT: lsl r3, lr, #1
+; ARMV6-NEXT: orr r1, r3, r1, lsr #31
+; ARMV6-NEXT: lsl r3, r5, #1
+; ARMV6-NEXT: orr r3, r3, lr, lsr #31
+; ARMV6-NEXT: orr r11, r2, r1
+; ARMV6-NEXT: orr r1, r2, r3
+; ARMV6-NEXT: str r1, [sp, #88] @ 4-byte Spill
+; ARMV6-NEXT: ldr r1, [sp, #92] @ 4-byte Reload
+; ARMV6-NEXT: mov lr, r10
+; ARMV6-NEXT: mov r10, r4
+; ARMV6-NEXT: ldr r4, [sp, #68] @ 4-byte Reload
+; ARMV6-NEXT: lsl r3, r1, #1
+; ARMV6-NEXT: mov r1, r7
+; ARMV6-NEXT: orr r3, r3, r5, lsr #31
+; ARMV6-NEXT: orr r5, r2, r3
+; ARMV6-NEXT: mov r3, #0
+; ARMV6-NEXT: bne .LBB1_10
+; ARMV6-NEXT: .LBB1_11: @ %udiv-loop-exit
+; ARMV6-NEXT: orr r4, r0, r1, lsl #1
+; ARMV6-NEXT: lsl r0, r11, #1
+; ARMV6-NEXT: orr r3, r0, r1, lsr #31
+; ARMV6-NEXT: ldr r1, [sp, #88] @ 4-byte Reload
+; ARMV6-NEXT: ldr r10, [sp, #40] @ 4-byte Reload
+; ARMV6-NEXT: ldr r8, [sp, #36] @ 4-byte Reload
+; ARMV6-NEXT: lsl r0, r1, #1
+; ARMV6-NEXT: orr r6, r0, r11, lsr #31
+; ARMV6-NEXT: lsl r0, r5, #1
+; ARMV6-NEXT: orr r7, r0, r1, lsr #31
+; ARMV6-NEXT: .LBB1_12: @ %udiv-end
+; ARMV6-NEXT: ldr r0, [sp, #28] @ 4-byte Reload
+; ARMV6-NEXT: mov lr, r7
+; ARMV6-NEXT: ldr r1, [sp, #12] @ 4-byte Reload
+; ARMV6-NEXT: mov r7, #0
+; ARMV6-NEXT: ldr r11, [sp, #272]
+; ARMV6-NEXT: mov r12, r6
+; ARMV6-NEXT: str r1, [r0]
+; ARMV6-NEXT: mov r2, #0
+; ARMV6-NEXT: ldr r1, [sp, #16] @ 4-byte Reload
+; ARMV6-NEXT: str r1, [r0, #4]
+; ARMV6-NEXT: umull r6, r5, r11, r3
+; ARMV6-NEXT: ldr r1, [sp, #20] @ 4-byte Reload
+; ARMV6-NEXT: str r1, [r0, #8]
+; ARMV6-NEXT: ldr r1, [sp, #24] @ 4-byte Reload
+; ARMV6-NEXT: str r1, [r0, #12]
+; ARMV6-NEXT: umull r0, r1, r11, r4
+; ARMV6-NEXT: ldr r9, [sp, #276]
+; ARMV6-NEXT: umlal r1, r7, r9, r4
+; ARMV6-NEXT: str r0, [sp, #92] @ 4-byte Spill
+; ARMV6-NEXT: adds r0, r6, r1
+; ARMV6-NEXT: ldr r1, [sp, #280]
+; ARMV6-NEXT: adcs r7, r7, r5
+; ARMV6-NEXT: str r0, [sp, #88] @ 4-byte Spill
+; ARMV6-NEXT: ldr r0, [sp, #284]
+; ARMV6-NEXT: adc r2, r2, #0
+; ARMV6-NEXT: umull r6, r5, r4, r1
+; ARMV6-NEXT: mla r5, r4, r0, r5
+; ARMV6-NEXT: ldr r0, [sp, #92] @ 4-byte Reload
+; ARMV6-NEXT: umlal r7, r2, r9, r3
+; ARMV6-NEXT: mla r3, r3, r1, r5
+; ARMV6-NEXT: ldr r1, [sp, #88] @ 4-byte Reload
+; ARMV6-NEXT: umull r5, r4, r12, r11
+; ARMV6-NEXT: mla r4, r12, r9, r4
+; ARMV6-NEXT: adds r6, r5, r6
+; ARMV6-NEXT: mla r4, lr, r11, r4
+; ARMV6-NEXT: adc r3, r4, r3
+; ARMV6-NEXT: adds r7, r7, r6
+; ARMV6-NEXT: adc r2, r2, r3
+; ARMV6-NEXT: subs r0, r8, r0
+; ARMV6-NEXT: ldr r3, [sp, #264]
+; ARMV6-NEXT: sbcs r1, r10, r1
+; ARMV6-NEXT: sbcs r3, r3, r7
+; ARMV6-NEXT: ldr r7, [sp, #268]
+; ARMV6-NEXT: sbc r2, r7, r2
+; ARMV6-NEXT: ldr r7, [sp, #32] @ 4-byte Reload
+; ARMV6-NEXT: stm r7, {r0, r1, r3}
+; ARMV6-NEXT: str r2, [r7, #12]
+; ARMV6-NEXT: add sp, sp, #228
+; ARMV6-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+; ARMV6-NEXT: .LBB1_13:
+; ARMV6-NEXT: ldr r6, [sp, #92] @ 4-byte Reload
+; ARMV6-NEXT: ldr r11, [sp, #72] @ 4-byte Reload
+; ARMV6-NEXT: b .LBB1_5
+; ARMV6-NEXT: .LBB1_14:
+; ARMV6-NEXT: ldr r5, [sp, #92] @ 4-byte Reload
+; ARMV6-NEXT: mov r0, #0
+; ARMV6-NEXT: ldr r11, [sp, #80] @ 4-byte Reload
+; ARMV6-NEXT: b .LBB1_11
+;
+; ARMV7-LABEL: udivrem_i128:
+; ARMV7: @ %bb.0: @ %_udiv-special-cases_udiv-special-cases
+; ARMV7-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; ARMV7-NEXT: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; ARMV7-NEXT: .pad #228
+; ARMV7-NEXT: sub sp, sp, #228
+; ARMV7-NEXT: ldr r4, [sp, #264]
+; ARMV7-NEXT: mov r11, r3
+; ARMV7-NEXT: ldr r5, [sp, #268]
+; ARMV7-NEXT: clz r3, r3
+; ARMV7-NEXT: str r0, [sp, #24] @ 4-byte Spill
+; ARMV7-NEXT: clz r0, r4
+; ARMV7-NEXT: str r1, [sp, #28] @ 4-byte Spill
+; ARMV7-NEXT: clz r1, r2
+; ARMV7-NEXT: add r0, r0, #32
+; ARMV7-NEXT: cmp r5, #0
+; ARMV7-NEXT: clzne r0, r5
+; ARMV7-NEXT: add r1, r1, #32
+; ARMV7-NEXT: ldr r10, [sp, #280]
+; ARMV7-NEXT: cmp r11, #0
+; ARMV7-NEXT: str r1, [sp, #8] @ 4-byte Spill
+; ARMV7-NEXT: movne r1, r3
+; ARMV7-NEXT: str r3, [sp, #4] @ 4-byte Spill
+; ARMV7-NEXT: orrs r3, r4, r5
+; ARMV7-NEXT: ldr r12, [sp, #272]
+; ARMV7-NEXT: addeq r0, r1, #64
+; ARMV7-NEXT: ldr r6, [sp, #284]
+; ARMV7-NEXT: clz r1, r10
+; ARMV7-NEXT: ldr lr, [sp, #276]
+; ARMV7-NEXT: clz r3, r12
+; ARMV7-NEXT: add r1, r1, #32
+; ARMV7-NEXT: cmp r6, #0
+; ARMV7-NEXT: clzne r1, r6
+; ARMV7-NEXT: add r7, r3, #32
+; ARMV7-NEXT: cmp lr, #0
+; ARMV7-NEXT: str r4, [sp, #44] @ 4-byte Spill
+; ARMV7-NEXT: clzne r7, lr
+; ARMV7-NEXT: orrs r3, r10, r6
+; ARMV7-NEXT: addeq r1, r7, #64
+; ARMV7-NEXT: orr r7, r2, r4
+; ARMV7-NEXT: subs r9, r1, r0
+; ARMV7-NEXT: mov r0, #0
+; ARMV7-NEXT: sbcs r3, r0, #0
+; ARMV7-NEXT: str r3, [sp, #80] @ 4-byte Spill
+; ARMV7-NEXT: sbcs r8, r0, #0
+; ARMV7-NEXT: str r5, [sp, #40] @ 4-byte Spill
+; ARMV7-NEXT: sbc r1, r0, #0
+; ARMV7-NEXT: rsbs r0, r9, #127
+; ARMV7-NEXT: rscs r0, r3, #0
+; ARMV7-NEXT: str r1, [sp, #84] @ 4-byte Spill
+; ARMV7-NEXT: rscs r0, r8, #0
+; ARMV7-NEXT: orr r3, r12, r10
+; ARMV7-NEXT: rscs r0, r1, #0
+; ARMV7-NEXT: orr r1, lr, r6
+; ARMV7-NEXT: orr r1, r3, r1
+; ARMV7-NEXT: orr r3, r11, r5
+; ARMV7-NEXT: orr r3, r7, r3
+; ARMV7-NEXT: clz r1, r1
+; ARMV7-NEXT: clz r3, r3
+; ARMV7-NEXT: mov r0, #0
+; ARMV7-NEXT: lsr r1, r1, #5
+; ARMV7-NEXT: movwlo r0, #1
+; ARMV7-NEXT: lsr r3, r3, #5
+; ARMV7-NEXT: orr r1, r1, r3
+; ARMV7-NEXT: orrs r0, r1, r0
+; ARMV7-NEXT: mov r6, r11
+; ARMV7-NEXT: mov r7, r2
+; ARMV7-NEXT: movwne r5, #0
+; ARMV7-NEXT: movwne r4, #0
+; ARMV7-NEXT: movwne r6, #0
+; ARMV7-NEXT: movwne r7, #0
+; ARMV7-NEXT: str r2, [sp, #36] @ 4-byte Spill
+; ARMV7-NEXT: str r11, [sp, #32] @ 4-byte Spill
+; ARMV7-NEXT: bne .LBB1_6
+; ARMV7-NEXT: @ %bb.1: @ %_udiv-special-cases_udiv-special-cases
+; ARMV7-NEXT: ldr r1, [sp, #84] @ 4-byte Reload
+; ARMV7-NEXT: eor r0, r9, #127
+; ARMV7-NEXT: ldr r2, [sp, #80] @ 4-byte Reload
+; ARMV7-NEXT: orr r0, r0, r8
+; ARMV7-NEXT: str r8, [sp, #76] @ 4-byte Spill
+; ARMV7-NEXT: orr r2, r2, r1
+; ARMV7-NEXT: orrs r0, r0, r2
+; ARMV7-NEXT: beq .LBB1_6
+; ARMV7-NEXT: @ %bb.2: @ %udiv-bb15
+; ARMV7-NEXT: ldr r8, [sp, #36] @ 4-byte Reload
+; ARMV7-NEXT: mov r0, #0
+; ARMV7-NEXT: str r8, [sp, #208]
+; ARMV7-NEXT: add r3, sp, #192
+; ARMV7-NEXT: str r0, [sp, #204]
+; ARMV7-NEXT: mov r2, #12
+; ARMV7-NEXT: str r0, [sp, #200]
+; ARMV7-NEXT: add r3, r3, #16
+; ARMV7-NEXT: str r0, [sp, #196]
+; ARMV7-NEXT: str r0, [sp, #192]
+; ARMV7-NEXT: rsb r0, r9, #127
+; ARMV7-NEXT: str r11, [sp, #212]
+; ARMV7-NEXT: ldr r12, [sp, #44] @ 4-byte Reload
+; ARMV7-NEXT: and r2, r2, r0, lsr #3
+; ARMV7-NEXT: str r12, [sp, #216]
+; ARMV7-NEXT: and r0, r0, #31
+; ARMV7-NEXT: ldr r10, [sp, #40] @ 4-byte Reload
+; ARMV7-NEXT: eor r1, r0, #31
+; ARMV7-NEXT: str r10, [sp, #220]
+; ARMV7-NEXT: ldr r7, [r3, -r2]!
+; ARMV7-NEXT: ldmib r3, {r2, r4}
+; ARMV7-NEXT: lsr r6, r7, #1
+; ARMV7-NEXT: ldr r3, [r3, #12]
+; ARMV7-NEXT: lsl r5, r2, r0
+; ARMV7-NEXT: lsrs r2, r2, #1
+; ARMV7-NEXT: orr r5, r5, r6, lsr r1
+; ARMV7-NEXT: str r5, [sp, #72] @ 4-byte Spill
+; ARMV7-NEXT: lsl r3, r3, r0
+; ARMV7-NEXT: lsr r5, r4, #1
+; ARMV7-NEXT: orr r3, r3, r5, lsr r1
+; ARMV7-NEXT: str r3, [sp, #92] @ 4-byte Spill
+; ARMV7-NEXT: lsl r3, r4, r0
+; ARMV7-NEXT: lsl r0, r7, r0
+; ARMV7-NEXT: str r0, [sp, #68] @ 4-byte Spill
+; ARMV7-NEXT: adds r5, r9, #1
+; ARMV7-NEXT: ldr r0, [sp, #80] @ 4-byte Reload
+; ARMV7-NEXT: orr r1, r3, r2, lsr r1
+; ARMV7-NEXT: str r1, [sp, #64] @ 4-byte Spill
+; ARMV7-NEXT: mov r3, #0
+; ARMV7-NEXT: adcs r0, r0, #0
+; ARMV7-NEXT: str r0, [sp, #20] @ 4-byte Spill
+; ARMV7-NEXT: ldr r0, [sp, #76] @ 4-byte Reload
+; ARMV7-NEXT: adcs r6, r0, #0
+; ARMV7-NEXT: ldr r0, [sp, #84] @ 4-byte Reload
+; ARMV7-NEXT: adcs r0, r0, #0
+; ARMV7-NEXT: str r0, [sp, #76] @ 4-byte Spill
+; ARMV7-NEXT: mov r0, #0
+; ARMV7-NEXT: str r0, [sp, #88] @ 4-byte Spill
+; ARMV7-NEXT: adcs r0, r3, #0
+; ARMV7-NEXT: bne .LBB1_13
+; ARMV7-NEXT: @ %bb.3: @ %udiv-preheader4
+; ARMV7-NEXT: mov r0, #0
+; ARMV7-NEXT: ubfx r1, r5, #5, #2
+; ARMV7-NEXT: str r0, [sp, #188]
+; ARMV7-NEXT: and r9, r5, #31
+; ARMV7-NEXT: str r0, [sp, #184]
+; ARMV7-NEXT: eor r3, r9, #31
+; ARMV7-NEXT: str r0, [sp, #180]
+; ARMV7-NEXT: str r0, [sp, #176]
+; ARMV7-NEXT: add r0, sp, #160
+; ARMV7-NEXT: stm r0, {r8, r11, r12}
+; ARMV7-NEXT: add r12, sp, #160
+; ARMV7-NEXT: add r0, r12, r1, lsl #2
+; ARMV7-NEXT: str r10, [sp, #172]
+; ARMV7-NEXT: ldmib r0, {r4, r7, r8}
+; ARMV7-NEXT: lsr r10, r7, r9
+; ARMV7-NEXT: lsl r0, r8, #1
+; ARMV7-NEXT: lsl r7, r7, #1
+; ARMV7-NEXT: str r5, [sp, #84] @ 4-byte Spill
+; ARMV7-NEXT: orr lr, r10, r0, lsl r3
+; ARMV7-NEXT: lsr r10, r4, r9
+; ARMV7-NEXT: orr r0, r10, r7, lsl r3
+; ARMV7-NEXT: str r0, [sp, #80] @ 4-byte Spill
+; ARMV7-NEXT: ldr r1, [r12, r1, lsl #2]
+; ARMV7-NEXT: lsl r7, r4, #1
+; ARMV7-NEXT: ldr r12, [sp, #276]
+; ARMV7-NEXT: lsr r11, r8, r9
+; ARMV7-NEXT: ldr r10, [sp, #76] @ 4-byte Reload
+; ARMV7-NEXT: ldr r5, [sp, #68] @ 4-byte Reload
+; ARMV7-NEXT: mov r8, #0
+; ARMV7-NEXT: lsr r1, r1, r9
+; ARMV7-NEXT: ldr r9, [sp, #20] @ 4-byte Reload
+; ARMV7-NEXT: orr r1, r1, r7, lsl r3
+; ARMV7-NEXT: ldr r7, [sp, #272]
+; ARMV7-NEXT: ldr r2, [sp, #64] @ 4-byte Reload
+; ARMV7-NEXT: subs r3, r7, #1
+; ARMV7-NEXT: ldr r7, [sp, #280]
+; ARMV7-NEXT: str r3, [sp, #60] @ 4-byte Spill
+; ARMV7-NEXT: sbcs r3, r12, #0
+; ARMV7-NEXT: str r3, [sp, #56] @ 4-byte Spill
+; ARMV7-NEXT: sbcs r3, r7, #0
+; ARMV7-NEXT: ldr r7, [sp, #284]
+; ARMV7-NEXT: str r3, [sp, #52] @ 4-byte Spill
+; ARMV7-NEXT: sbc r3, r7, #0
+; ARMV7-NEXT: str r3, [sp, #48] @ 4-byte Spill
+; ARMV7-NEXT: ldr r12, [sp, #92] @ 4-byte Reload
+; ARMV7-NEXT: ldr r3, [sp, #72] @ 4-byte Reload
+; ARMV7-NEXT: .LBB1_4: @ %udiv-do-while3
+; ARMV7-NEXT: @ =>This Inner Loop Header: Depth=1
+; ARMV7-NEXT: str r3, [sp, #72] @ 4-byte Spill
+; ARMV7-NEXT: lsl r3, r11, #1
+; ARMV7-NEXT: ldr r11, [sp, #80] @ 4-byte Reload
+; ARMV7-NEXT: lsl r7, lr, #1
+; ARMV7-NEXT: ldr r0, [sp, #88] @ 4-byte Reload
+; ARMV7-NEXT: orr r3, r3, lr, lsr #31
+; ARMV7-NEXT: str r0, [sp, #64] @ 4-byte Spill
+; ARMV7-NEXT: lsl r0, r11, #1
+; ARMV7-NEXT: str r2, [sp, #76] @ 4-byte Spill
+; ARMV7-NEXT: orr r0, r0, r1, lsr #31
+; ARMV7-NEXT: lsl r1, r1, #1
+; ARMV7-NEXT: ldr r2, [sp, #60] @ 4-byte Reload
+; ARMV7-NEXT: orr r1, r1, r12, lsr #31
+; ARMV7-NEXT: orr r7, r7, r11, lsr #31
+; ARMV7-NEXT: str r5, [sp, #68] @ 4-byte Spill
+; ARMV7-NEXT: subs r4, r2, r1
+; ARMV7-NEXT: ldr r2, [sp, #56] @ 4-byte Reload
+; ARMV7-NEXT: str r12, [sp, #92] @ 4-byte Spill
+; ARMV7-NEXT: sbcs r4, r2, r0
+; ARMV7-NEXT: ldr r2, [sp, #52] @ 4-byte Reload
+; ARMV7-NEXT: sbcs r4, r2, r7
+; ARMV7-NEXT: ldr r2, [sp, #48] @ 4-byte Reload
+; ARMV7-NEXT: sbc r4, r2, r3
+; ARMV7-NEXT: ldr r2, [sp, #284]
+; ARMV7-NEXT: and r5, r2, r4, asr #31
+; ARMV7-NEXT: ldr r2, [sp, #280]
+; ARMV7-NEXT: and lr, r2, r4, asr #31
+; ARMV7-NEXT: ldr r2, [sp, #276]
+; ARMV7-NEXT: and r11, r2, r4, asr #31
+; ARMV7-NEXT: ldr r2, [sp, #272]
+; ARMV7-NEXT: and r12, r2, r4, asr #31
+; ARMV7-NEXT: mov r2, #1
+; ARMV7-NEXT: subs r1, r1, r12
+; ARMV7-NEXT: and r2, r2, r4, asr #31
+; ARMV7-NEXT: sbcs r0, r0, r11
+; ARMV7-NEXT: ldr r4, [sp, #84] @ 4-byte Reload
+; ARMV7-NEXT: sbcs lr, r7, lr
+; ARMV7-NEXT: str r2, [sp, #88] @ 4-byte Spill
+; ARMV7-NEXT: ldr r2, [sp, #68] @ 4-byte Reload
+; ARMV7-NEXT: sbc r11, r3, r5
+; ARMV7-NEXT: subs r4, r4, #1
+; ARMV7-NEXT: str r0, [sp, #80] @ 4-byte Spill
+; ARMV7-NEXT: ldr r0, [sp, #64] @ 4-byte Reload
+; ARMV7-NEXT: sbcs r9, r9, #0
+; ARMV7-NEXT: sbcs r6, r6, #0
+; ARMV7-NEXT: ldr r7, [sp, #76] @ 4-byte Reload
+; ARMV7-NEXT: orr r5, r0, r2, lsl #1
+; ARMV7-NEXT: ldr r0, [sp, #72] @ 4-byte Reload
+; ARMV7-NEXT: sbc r10, r10, #0
+; ARMV7-NEXT: str r4, [sp, #84] @ 4-byte Spill
+; ARMV7-NEXT: orr r3, r9, r10
+; ARMV7-NEXT: orr r4, r4, r6
+; ARMV7-NEXT: orrs r3, r4, r3
+; ARMV7-NEXT: lsl r4, r0, #1
+; ARMV7-NEXT: orr r2, r4, r2, lsr #31
+; ARMV7-NEXT: orr r3, r8, r2
+; ARMV7-NEXT: lsl r2, r7, #1
+; ARMV7-NEXT: orr r2, r2, r0, lsr #31
+; ARMV7-NEXT: ldr r0, [sp, #92] @ 4-byte Reload
+; ARMV7-NEXT: orr r2, r8, r2
+; ARMV7-NEXT: lsl r4, r0, #1
+; ARMV7-NEXT: orr r4, r4, r7, lsr #31
+; ARMV7-NEXT: orr r12, r8, r4
+; ARMV7-NEXT: mov r8, #0
+; ARMV7-NEXT: bne .LBB1_4
+; ARMV7-NEXT: .LBB1_5: @ %udiv-loop-exit2
+; ARMV7-NEXT: ldr r0, [sp, #88] @ 4-byte Reload
+; ARMV7-NEXT: lsl r1, r3, #1
+; ARMV7-NEXT: orr r6, r1, r5, lsr #31
+; ARMV7-NEXT: lsl r1, r2, #1
+; ARMV7-NEXT: ldr r11, [sp, #32] @ 4-byte Reload
+; ARMV7-NEXT: orr r4, r1, r3, lsr #31
+; ARMV7-NEXT: orr r7, r0, r5, lsl #1
+; ARMV7-NEXT: lsl r1, r12, #1
+; ARMV7-NEXT: orr r5, r1, r2, lsr #31
+; ARMV7-NEXT: .LBB1_6: @ %udiv-end1
+; ARMV7-NEXT: str r5, [sp, #48] @ 4-byte Spill
+; ARMV7-NEXT: ldr r5, [sp, #44] @ 4-byte Reload
+; ARMV7-NEXT: str r4, [sp, #20] @ 4-byte Spill
+; ARMV7-NEXT: ldr r4, [sp, #40] @ 4-byte Reload
+; ARMV7-NEXT: clz r0, r5
+; ARMV7-NEXT: ldmib sp, {r1, r2} @ 8-byte Folded Reload
+; ARMV7-NEXT: add r0, r0, #32
+; ARMV7-NEXT: cmp r4, #0
+; ARMV7-NEXT: clzne r0, r4
+; ARMV7-NEXT: cmp r11, #0
+; ARMV7-NEXT: ldr lr, [sp, #280]
+; ARMV7-NEXT: movne r2, r1
+; ARMV7-NEXT: orrs r1, r5, r4
+; ARMV7-NEXT: str r6, [sp, #16] @ 4-byte Spill
+; ARMV7-NEXT: ldr r6, [sp, #284]
+; ARMV7-NEXT: clz r1, lr
+; ARMV7-NEXT: ldr r12, [sp, #272]
+; ARMV7-NEXT: addeq r0, r2, #64
+; ARMV7-NEXT: str r7, [sp, #12] @ 4-byte Spill
+; ARMV7-NEXT: add r1, r1, #32
+; ARMV7-NEXT: ldr r7, [sp, #276]
+; ARMV7-NEXT: clz r2, r12
+; ARMV7-NEXT: cmp r6, #0
+; ARMV7-NEXT: add r2, r2, #32
+; ARMV7-NEXT: clzne r1, r6
+; ARMV7-NEXT: cmp r7, #0
+; ARMV7-NEXT: clzne r2, r7
+; ARMV7-NEXT: orrs r3, lr, r6
+; ARMV7-NEXT: addeq r1, r2, #64
+; ARMV7-NEXT: mov r2, #0
+; ARMV7-NEXT: subs r10, r1, r0
+; ARMV7-NEXT: orr r3, r11, r4
+; ARMV7-NEXT: sbcs r9, r2, #0
+; ARMV7-NEXT: sbcs r1, r2, #0
+; ARMV7-NEXT: str r1, [sp, #80] @ 4-byte Spill
+; ARMV7-NEXT: sbc r8, r2, #0
+; ARMV7-NEXT: rsbs r0, r10, #127
+; ARMV7-NEXT: rscs r0, r9, #0
+; ARMV7-NEXT: rscs r0, r1, #0
+; ARMV7-NEXT: orr r1, r7, r6
+; ARMV7-NEXT: rscs r0, r8, #0
+; ARMV7-NEXT: mov r7, r11
+; ARMV7-NEXT: orr r0, r12, lr
+; ARMV7-NEXT: ldr r12, [sp, #36] @ 4-byte Reload
+; ARMV7-NEXT: orr r0, r0, r1
+; ARMV7-NEXT: movwlo r2, #1
+; ARMV7-NEXT: orr r1, r12, r5
+; ARMV7-NEXT: clz r0, r0
+; ARMV7-NEXT: orr r1, r1, r3
+; ARMV7-NEXT: mov r3, r12
+; ARMV7-NEXT: clz r1, r1
+; ARMV7-NEXT: lsr r0, r0, #5
+; ARMV7-NEXT: lsr r1, r1, #5
+; ARMV7-NEXT: orr r0, r0, r1
+; ARMV7-NEXT: orrs r0, r0, r2
+; ARMV7-NEXT: movwne r5, #0
+; ARMV7-NEXT: movwne r4, #0
+; ARMV7-NEXT: movwne r3, #0
+; ARMV7-NEXT: movwne r7, #0
+; ARMV7-NEXT: str r5, [sp, #44] @ 4-byte Spill
+; ARMV7-NEXT: str r4, [sp, #40] @ 4-byte Spill
+; ARMV7-NEXT: bne .LBB1_15
+; ARMV7-NEXT: @ %bb.7: @ %udiv-end1
+; ARMV7-NEXT: ldr r1, [sp, #80] @ 4-byte Reload
+; ARMV7-NEXT: eor r0, r10, #127
+; ARMV7-NEXT: ldr lr, [sp, #264]
+; ARMV7-NEXT: orr r0, r0, r1
+; ARMV7-NEXT: orr r1, r9, r8
+; ARMV7-NEXT: orrs r0, r0, r1
+; ARMV7-NEXT: beq .LBB1_12
+; ARMV7-NEXT: @ %bb.8: @ %udiv-bb1
+; ARMV7-NEXT: mov r0, #0
+; ARMV7-NEXT: add r3, sp, #128
+; ARMV7-NEXT: str r0, [sp, #140]
+; ARMV7-NEXT: mov r1, #12
+; ARMV7-NEXT: str r0, [sp, #136]
+; ARMV7-NEXT: add r3, r3, #16
+; ARMV7-NEXT: str r0, [sp, #132]
+; ARMV7-NEXT: str r0, [sp, #128]
+; ARMV7-NEXT: ldr r0, [sp, #268]
+; ARMV7-NEXT: str r0, [sp, #156]
+; ARMV7-NEXT: rsb r0, r10, #127
+; ARMV7-NEXT: str r12, [sp, #144]
+; ARMV7-NEXT: and r1, r1, r0, lsr #3
+; ARMV7-NEXT: str r11, [sp, #148]
+; ARMV7-NEXT: str lr, [sp, #152]
+; ARMV7-NEXT: and r0, r0, #31
+; ARMV7-NEXT: ldr r1, [r3, -r1]!
+; ARMV7-NEXT: eor r2, r0, #31
+; ARMV7-NEXT: ldmib r3, {r5, r7}
+; ARMV7-NEXT: lsr r6, r1, #1
+; ARMV7-NEXT: ldr r3, [r3, #12]
+; ARMV7-NEXT: lsl r4, r5, r0
+; ARMV7-NEXT: orr r6, r4, r6, lsr r2
+; ARMV7-NEXT: lsr r4, r7, #1
+; ARMV7-NEXT: str r6, [sp, #84] @ 4-byte Spill
+; ARMV7-NEXT: lsl r3, r3, r0
+; ARMV7-NEXT: orr r3, r3, r4, lsr r2
+; ARMV7-NEXT: lsrs r4, r5, #1
+; ARMV7-NEXT: str r3, [sp, #92] @ 4-byte Spill
+; ARMV7-NEXT: lsl r3, r7, r0
+; ARMV7-NEXT: orr r5, r3, r4, lsr r2
+; ARMV7-NEXT: adds r4, r10, #1
+; ARMV7-NEXT: lsl r7, r1, r0
+; ARMV7-NEXT: adcs r0, r9, #0
+; ARMV7-NEXT: str r0, [sp, #72] @ 4-byte Spill
+; ARMV7-NEXT: mov r1, #0
+; ARMV7-NEXT: ldr r0, [sp, #80] @ 4-byte Reload
+; ARMV7-NEXT: adcs r2, r0, #0
+; ARMV7-NEXT: adcs r0, r8, #0
+; ARMV7-NEXT: str r0, [sp, #76] @ 4-byte Spill
+; ARMV7-NEXT: mov r0, #0
+; ARMV7-NEXT: str r0, [sp, #88] @ 4-byte Spill
+; ARMV7-NEXT: adcs r0, r1, #0
+; ARMV7-NEXT: bne .LBB1_14
+; ARMV7-NEXT: @ %bb.9: @ %udiv-preheader
+; ARMV7-NEXT: mov r0, #0
+; ARMV7-NEXT: add r6, sp, #96
+; ARMV7-NEXT: str r0, [sp, #124]
+; ARMV7-NEXT: and r10, r4, #31
+; ARMV7-NEXT: str r0, [sp, #120]
+; ARMV7-NEXT: str r0, [sp, #116]
+; ARMV7-NEXT: str r0, [sp, #112]
+; ARMV7-NEXT: ldr r0, [sp, #264]
+; ARMV7-NEXT: str r0, [sp, #104]
+; ARMV7-NEXT: ldr r0, [sp, #268]
+; ARMV7-NEXT: str r0, [sp, #108]
+; ARMV7-NEXT: ubfx r0, r4, #5, #2
+; ARMV7-NEXT: str r12, [sp, #96]
+; ARMV7-NEXT: add r3, r6, r0, lsl #2
+; ARMV7-NEXT: str r11, [sp, #100]
+; ARMV7-NEXT: ldr r1, [r3, #8]
+; ARMV7-NEXT: ldr r12, [r3, #4]
+; ARMV7-NEXT: ldr r9, [r3, #12]
+; ARMV7-NEXT: str r4, [sp, #80] @ 4-byte Spill
+; ARMV7-NEXT: eor r4, r10, #31
+; ARMV7-NEXT: ldr r0, [r6, r0, lsl #2]
+; ARMV7-NEXT: lsr r11, r1, r10
+; ARMV7-NEXT: lsl r3, r9, #1
+; ARMV7-NEXT: lsl r1, r1, #1
+; ARMV7-NEXT: orr lr, r11, r3, lsl r4
+; ARMV7-NEXT: lsr r11, r12, r10
+; ARMV7-NEXT: orr r3, r11, r1, lsl r4
+; ARMV7-NEXT: lsr r0, r0, r10
+; ARMV7-NEXT: lsl r1, r12, #1
+; ARMV7-NEXT: orr r8, r0, r1, lsl r4
+; ARMV7-NEXT: ldr r0, [sp, #272]
+; ARMV7-NEXT: lsr r1, r9, r10
+; ARMV7-NEXT: ldr r6, [sp, #92] @ 4-byte Reload
+; ARMV7-NEXT: subs r0, r0, #1
+; ARMV7-NEXT: str r0, [sp, #64] @ 4-byte Spill
+; ARMV7-NEXT: ldr r0, [sp, #276]
+; ARMV7-NEXT: mov r10, #0
+; ARMV7-NEXT: ldr r9, [sp, #72] @ 4-byte Reload
+; ARMV7-NEXT: sbcs r0, r0, #0
+; ARMV7-NEXT: str r0, [sp, #60] @ 4-byte Spill
+; ARMV7-NEXT: ldr r0, [sp, #280]
+; ARMV7-NEXT: ldr r11, [sp, #76] @ 4-byte Reload
+; ARMV7-NEXT: sbcs r0, r0, #0
+; ARMV7-NEXT: str r0, [sp, #56] @ 4-byte Spill
+; ARMV7-NEXT: ldr r0, [sp, #284]
+; ARMV7-NEXT: sbc r0, r0, #0
+; ARMV7-NEXT: str r0, [sp, #52] @ 4-byte Spill
+; ARMV7-NEXT: .LBB1_10: @ %udiv-do-while
+; ARMV7-NEXT: @ =>This Inner Loop Header: Depth=1
+; ARMV7-NEXT: ldr r4, [sp, #88] @ 4-byte Reload
+; ARMV7-NEXT: str r4, [sp, #68] @ 4-byte Spill
+; ARMV7-NEXT: lsl r4, r8, #1
+; ARMV7-NEXT: ldr r0, [sp, #64] @ 4-byte Reload
+; ARMV7-NEXT: orr r4, r4, r6, lsr #31
+; ARMV7-NEXT: str r5, [sp, #76] @ 4-byte Spill
+; ARMV7-NEXT: ldr r5, [sp, #84] @ 4-byte Reload
+; ARMV7-NEXT: str r7, [sp, #84] @ 4-byte Spill
+; ARMV7-NEXT: lsl r7, r1, #1
+; ARMV7-NEXT: lsl r1, lr, #1
+; ARMV7-NEXT: str r5, [sp, #72] @ 4-byte Spill
+; ARMV7-NEXT: orr r1, r1, r3, lsr #31
+; ARMV7-NEXT: lsl r3, r3, #1
+; ARMV7-NEXT: subs r5, r0, r4
+; ARMV7-NEXT: ldr r0, [sp, #60] @ 4-byte Reload
+; ARMV7-NEXT: orr r3, r3, r8, lsr #31
+; ARMV7-NEXT: orr r7, r7, lr, lsr #31
+; ARMV7-NEXT: sbcs r5, r0, r3
+; ARMV7-NEXT: ldr r0, [sp, #56] @ 4-byte Reload
+; ARMV7-NEXT: str r6, [sp, #92] @ 4-byte Spill
+; ARMV7-NEXT: sbcs r5, r0, r1
+; ARMV7-NEXT: ldr r0, [sp, #52] @ 4-byte Reload
+; ARMV7-NEXT: sbc r5, r0, r7
+; ARMV7-NEXT: ldr r0, [sp, #284]
+; ARMV7-NEXT: and r6, r0, r5, asr #31
+; ARMV7-NEXT: ldr r0, [sp, #280]
+; ARMV7-NEXT: and lr, r0, r5, asr #31
+; ARMV7-NEXT: ldr r0, [sp, #276]
+; ARMV7-NEXT: and r12, r0, r5, asr #31
+; ARMV7-NEXT: ldr r0, [sp, #272]
+; ARMV7-NEXT: and r8, r0, r5, asr #31
+; ARMV7-NEXT: mov r0, #1
+; ARMV7-NEXT: subs r8, r4, r8
+; ARMV7-NEXT: and r0, r0, r5, asr #31
+; ARMV7-NEXT: sbcs r3, r3, r12
+; ARMV7-NEXT: ldr r5, [sp, #80] @ 4-byte Reload
+; ARMV7-NEXT: sbcs lr, r1, lr
+; ARMV7-NEXT: str r0, [sp, #88] @ 4-byte Spill
+; ARMV7-NEXT: sbc r12, r7, r6
+; ARMV7-NEXT: subs r5, r5, #1
+; ARMV7-NEXT: sbcs r9, r9, #0
+; ARMV7-NEXT: ldr r0, [sp, #84] @ 4-byte Reload
+; ARMV7-NEXT: sbcs r2, r2, #0
+; ARMV7-NEXT: ldr r4, [sp, #72] @ 4-byte Reload
+; ARMV7-NEXT: sbc r11, r11, #0
+; ARMV7-NEXT: str r5, [sp, #80] @ 4-byte Spill
+; ARMV7-NEXT: orr r7, r9, r11
+; ARMV7-NEXT: orr r5, r5, r2
+; ARMV7-NEXT: ldr r1, [sp, #68] @ 4-byte Reload
+; ARMV7-NEXT: orrs r7, r5, r7
+; ARMV7-NEXT: lsl r5, r4, #1
+; ARMV7-NEXT: ldr r6, [sp, #76] @ 4-byte Reload
+; ARMV7-NEXT: orr r7, r1, r0, lsl #1
+; ARMV7-NEXT: orr r0, r5, r0, lsr #31
+; ARMV7-NEXT: orr r0, r10, r0
+; ARMV7-NEXT: str r0, [sp, #84] @ 4-byte Spill
+; ARMV7-NEXT: ldr r0, [sp, #92] @ 4-byte Reload
+; ARMV7-NEXT: lsl r5, r6, #1
+; ARMV7-NEXT: orr r5, r5, r4, lsr #31
+; ARMV7-NEXT: mov r1, r12
+; ARMV7-NEXT: orr r5, r10, r5
+; ARMV7-NEXT: lsl r0, r0, #1
+; ARMV7-NEXT: orr r0, r0, r6, lsr #31
+; ARMV7-NEXT: orr r6, r10, r0
+; ARMV7-NEXT: mov r10, #0
+; ARMV7-NEXT: bne .LBB1_10
+; ARMV7-NEXT: .LBB1_11: @ %udiv-loop-exit
+; ARMV7-NEXT: ldr r0, [sp, #88] @ 4-byte Reload
+; ARMV7-NEXT: ldr r11, [sp, #32] @ 4-byte Reload
+; ARMV7-NEXT: orr r3, r0, r7, lsl #1
+; ARMV7-NEXT: ldr r0, [sp, #84] @ 4-byte Reload
+; ARMV7-NEXT: ldr r12, [sp, #36] @ 4-byte Reload
+; ARMV7-NEXT: ldr lr, [sp, #264]
+; ARMV7-NEXT: lsl r1, r0, #1
+; ARMV7-NEXT: orr r7, r1, r7, lsr #31
+; ARMV7-NEXT: lsl r1, r5, #1
+; ARMV7-NEXT: orr r0, r1, r0, lsr #31
+; ARMV7-NEXT: str r0, [sp, #44] @ 4-byte Spill
+; ARMV7-NEXT: lsl r0, r6, #1
+; ARMV7-NEXT: orr r0, r0, r5, lsr #31
+; ARMV7-NEXT: str r0, [sp, #40] @ 4-byte Spill
+; ARMV7-NEXT: .LBB1_12: @ %udiv-end
+; ARMV7-NEXT: ldr r10, [sp, #272]
+; ARMV7-NEXT: mov r6, #0
+; ARMV7-NEXT: ldr r4, [sp, #12] @ 4-byte Reload
+; ARMV7-NEXT: mov r2, #0
+; ARMV7-NEXT: ldr r8, [sp, #276]
+; ARMV7-NEXT: umull r0, r1, r10, r3
+; ARMV7-NEXT: umlal r1, r6, r8, r3
+; ARMV7-NEXT: str r0, [sp, #92] @ 4-byte Spill
+; ARMV7-NEXT: ldr r0, [sp, #24] @ 4-byte Reload
+; ARMV7-NEXT: str r4, [r0]
+; ARMV7-NEXT: ldr r4, [sp, #16] @ 4-byte Reload
+; ARMV7-NEXT: str r4, [r0, #4]
+; ARMV7-NEXT: ldr r4, [sp, #20] @ 4-byte Reload
+; ARMV7-NEXT: str r4, [r0, #8]
+; ARMV7-NEXT: ldr r4, [sp, #48] @ 4-byte Reload
+; ARMV7-NEXT: str r4, [r0, #12]
+; ARMV7-NEXT: umull r5, r4, r10, r7
+; ARMV7-NEXT: ldr r0, [sp, #280]
+; ARMV7-NEXT: adds r9, r5, r1
+; ARMV7-NEXT: ldr r1, [sp, #284]
+; ARMV7-NEXT: adcs r6, r6, r4
+; ARMV7-NEXT: umull r5, r4, r3, r0
+; ARMV7-NEXT: adc r2, r2, #0
+; ARMV7-NEXT: mla r3, r3, r1, r4
+; ARMV7-NEXT: umlal r6, r2, r8, r7
+; ARMV7-NEXT: mla r3, r7, r0, r3
+; ARMV7-NEXT: ldr r0, [sp, #44] @ 4-byte Reload
+; ARMV7-NEXT: umull r7, r4, r0, r10
+; ARMV7-NEXT: mla r4, r0, r8, r4
+; ARMV7-NEXT: ldr r0, [sp, #40] @ 4-byte Reload
+; ARMV7-NEXT: adds r7, r7, r5
+; ARMV7-NEXT: mla r4, r0, r10, r4
+; ARMV7-NEXT: ldr r0, [sp, #92] @ 4-byte Reload
+; ARMV7-NEXT: adc r3, r4, r3
+; ARMV7-NEXT: adds r7, r6, r7
+; ARMV7-NEXT: adc r2, r2, r3
+; ARMV7-NEXT: subs r0, r12, r0
+; ARMV7-NEXT: sbcs r1, r11, r9
+; ARMV7-NEXT: sbcs r3, lr, r7
+; ARMV7-NEXT: ldr r7, [sp, #268]
+; ARMV7-NEXT: sbc r2, r7, r2
+; ARMV7-NEXT: ldr r7, [sp, #28] @ 4-byte Reload
+; ARMV7-NEXT: stm r7, {r0, r1, r3}
+; ARMV7-NEXT: str r2, [r7, #12]
+; ARMV7-NEXT: add sp, sp, #228
+; ARMV7-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+; ARMV7-NEXT: .LBB1_13:
+; ARMV7-NEXT: ldr r12, [sp, #92] @ 4-byte Reload
+; ARMV7-NEXT: ldr r3, [sp, #72] @ 4-byte Reload
+; ARMV7-NEXT: ldr r5, [sp, #68] @ 4-byte Reload
+; ARMV7-NEXT: ldr r2, [sp, #64] @ 4-byte Reload
+; ARMV7-NEXT: b .LBB1_5
+; ARMV7-NEXT: .LBB1_14:
+; ARMV7-NEXT: ldr r6, [sp, #92] @ 4-byte Reload
+; ARMV7-NEXT: b .LBB1_11
+; ARMV7-NEXT: .LBB1_15:
+; ARMV7-NEXT: ldr lr, [sp, #264]
+; ARMV7-NEXT: b .LBB1_12
+ %q = udiv i128 %n, %d
+ %r = urem i128 %n, %d
+ store i128 %q, ptr %q_out
+ store i128 %r, ptr %r_out
+ ret void
+}
diff --git a/llvm/test/CodeGen/Generic/i128-divrem-libcall.ll b/llvm/test/CodeGen/Generic/i128-divrem-libcall.ll
deleted file mode 100644
index 594b4c73d4b06..0000000000000
--- a/llvm/test/CodeGen/Generic/i128-divrem-libcall.ll
+++ /dev/null
@@ -1,122 +0,0 @@
-; 64-bit targets: fused __divmodti4 / __udivmodti4
-; RUN: %if x86-registered-target %{ llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=CHECK,FUSED,SYSV-X64 %}
-; RUN: %if x86-registered-target %{ llc < %s -mtriple=x86_64-linux-gnux32 | FileCheck %s --check-prefixes=CHECK,FUSED %}
-; RUN: %if x86-registered-target %{ llc < %s -mtriple=x86_64-pc-windows-msvc | FileCheck %s --check-prefixes=CHECK,WIN64 %}
-; RUN: %if x86-registered-target %{ llc < %s -mtriple=x86_64-w64-mingw32 | FileCheck %s --check-prefixes=CHECK,FUSED %}
-; RUN: %if aarch64-registered-target %{ llc < %s -mtriple=aarch64-linux-gnu | FileCheck %s --check-prefixes=CHECK,FUSED,SYSV-A64 %}
-; RUN: %if riscv-registered-target %{ llc < %s -mtriple=riscv64-linux-gnu | FileCheck %s --check-prefixes=CHECK,FUSED %}
-; RUN: %if riscv-registered-target %{ llc < %s -mtriple=riscv64-linux-gnu -mattr=+m | FileCheck %s --check-prefixes=CHECK,FUSED %}
-; RUN: %if powerpc-registered-target %{ llc < %s -mtriple=powerpc64-linux-gnu | FileCheck %s --check-prefixes=CHECK,FUSED %}
-; RUN: %if sparc-registered-target %{ llc < %s -mtriple=sparcv9-linux-gnu | FileCheck %s --check-prefixes=CHECK,FUSED %}
-; RUN: %if loongarch-registered-target %{ llc < %s -mtriple=loongarch64-linux-gnu | FileCheck %s --check-prefixes=CHECK,FUSED %}
-; RUN: %if webassembly-registered-target %{ llc < %s -mtriple=wasm32-unknown-unknown | FileCheck %s --check-prefixes=CHECK,FUSED %}
-; RUN: %if webassembly-registered-target %{ llc < %s -mtriple=wasm64-unknown-unknown | FileCheck %s --check-prefixes=CHECK,FUSED %}
-
-; 32-bit / ILP32 targets that expand inline (no runtime library or no libcall)
-; RUN: %if x86-registered-target %{ llc < %s -mtriple=i386-linux-gnu | FileCheck %s --check-prefixes=CHECK,INLINE %}
-; RUN: %if x86-registered-target %{ llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefixes=CHECK,INLINE %}
-; RUN: %if riscv-registered-target %{ llc < %s -mtriple=riscv32-linux-gnu | FileCheck %s --check-prefixes=CHECK,INLINE %}
-; RUN: %if riscv-registered-target %{ llc < %s -mtriple=riscv32-linux-gnu -mattr=+m | FileCheck %s --check-prefixes=CHECK,INLINE %}
-; RUN: %if arm-registered-target %{ llc < %s -mtriple=armv6-linux-gnueabihf | FileCheck %s --check-prefixes=CHECK,INLINE %}
-; RUN: %if arm-registered-target %{ llc < %s -mtriple=armv7-linux-gnueabi | FileCheck %s --check-prefixes=CHECK,INLINE %}
-; RUN: %if arm-registered-target %{ llc < %s -mtriple=armv7-none-eabi | FileCheck %s --check-prefixes=CHECK,INLINE %}
-
-; Win32: i128 fully inline-expanded, no libcalls registered
-; RUN: %if x86-registered-target %{ llc < %s -mtriple=i686-pc-windows-msvc | FileCheck %s --check-prefixes=CHECK,WIN32 %}
-
-; ILP32 targets that fall back to separate __divti3 + __modti3 calls
-; RUN: %if aarch64-registered-target %{ llc < %s -mtriple=aarch64_32-apple-watchos | FileCheck %s --check-prefixes=CHECK,DIVMOD %}
-
-; 64-bit Mac OS: fused ___divmodti4 (extra underscore, same ABI as Linux AArch64)
-; RUN: %if x86-registered-target %{ llc < %s -mtriple=x86_64-apple-macosx | FileCheck %s --check-prefixes=CHECK,FUSED-DARWIN %}
-; RUN: %if aarch64-registered-target %{ llc < %s -mtriple=arm64-apple-macosx | FileCheck %s --check-prefixes=CHECK,DARWIN-A64 %}
-
-; Verify that sdiv+srem / udiv+urem on i128 fuse into a single __divmodti4 /
-; __udivmodti4 call on targets where the libcall is available (64-bit targets
-; and wasm), and do not on targets where it is not (32-bit / ILP32).
-;
-; Detailed ABI checks for the four most popular calling conventions:
-; WIN64 (x86_64 Windows): all args spilled to stack and passed as pointers
-; in %rcx/%rdx/%r8, quotient returned in %xmm0.
-; DARWIN-A64 (AArch64 macOS): identical to SYSV-A64 but symbol has an extra
-; leading underscore (___divmodti4).
-; SYSV-X64 (x86_64 Linux/BSD): i128 args in register pairs, rem pointer via
-; %rsp in %r8, quotient returned in %rax:%rdx.
-; SYSV-A64 (AArch64 Linux): i128 args in x0:x1/x2:x3, rem pointer via sp in
-; x4, quotient returned in x0:x1.
-; Win32 (i686-windows-msvc): no i128 libcalls registered, fully inline.
-; 32-bit targets that lack the fused call may lower to:
-; - separate __divti3 + __modti3 / __udivti3 + __umodti3 calls, or
-; - fully inline expansion (e.g. i686, bare metal)
-
-define void @sdivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
-; CHECK-LABEL: sdivrem_i128:
-; SYSV-X64: movq %rsp, %r8
-; SYSV-A64: mov x4, sp
-; FUSED: __divmodti4
-; FUSED-DARWIN: ___divmodti4
-; SYSV-X64: movq (%rsp),
-; SYSV-X64: movq %rax,
-; SYSV-X64: movq %rdx,
-; SYSV-A64: ldp {{.*}}, [sp]
-; SYSV-A64: stp x0, x1,
-; DARWIN-A64: mov x4, sp
-; DARWIN-A64: bl ___divmodti4
-; DARWIN-A64: ldp {{.*}}, [sp]
-; DARWIN-A64: stp x0, x1,
-; WIN64: leaq {{[0-9]+}}(%rsp), %rcx
-; WIN64: leaq {{[0-9]+}}(%rsp), %rdx
-; WIN64: leaq {{[0-9]+}}(%rsp), %r8
-; WIN64: callq __divmodti4
-; WIN64: movaps {{[0-9]+}}(%rsp), %xmm1
-; WIN64: movaps %xmm0,
-; WIN32-NOT: __divmodti4
-; WIN32-NOT: __divti3
-; WIN32-NOT: __modti3
-; DIVMOD: __divti3
-; DIVMOD: __modti3
-; INLINE-NOT: __divmodti4
-; INLINE-NOT: __divti3
-; INLINE-NOT: __modti3
- %q = sdiv i128 %n, %d
- %r = srem i128 %n, %d
- store i128 %q, ptr %q_out
- store i128 %r, ptr %r_out
- ret void
-}
-
-define void @udivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
-; CHECK-LABEL: udivrem_i128:
-; SYSV-X64: movq %rsp, %r8
-; SYSV-A64: mov x4, sp
-; FUSED: __udivmodti4
-; FUSED-DARWIN: ___udivmodti4
-; SYSV-X64: movq (%rsp),
-; SYSV-X64: movq %rax,
-; SYSV-X64: movq %rdx,
-; SYSV-A64: ldp {{.*}}, [sp]
-; SYSV-A64: stp x0, x1,
-; DARWIN-A64: mov x4, sp
-; DARWIN-A64: bl ___udivmodti4
-; DARWIN-A64: ldp {{.*}}, [sp]
-; DARWIN-A64: stp x0, x1,
-; WIN64: leaq {{[0-9]+}}(%rsp), %rcx
-; WIN64: leaq {{[0-9]+}}(%rsp), %rdx
-; WIN64: leaq {{[0-9]+}}(%rsp), %r8
-; WIN64: callq __udivmodti4
-; WIN64: movaps {{[0-9]+}}(%rsp), %xmm1
-; WIN64: movaps %xmm0,
-; WIN32-NOT: __udivmodti4
-; WIN32-NOT: __udivti3
-; WIN32-NOT: __umodti3
-; DIVMOD: __udivti3
-; DIVMOD: __umodti3
-; INLINE-NOT: __udivmodti4
-; INLINE-NOT: __udivti3
-; INLINE-NOT: __umodti3
- %q = udiv i128 %n, %d
- %r = urem i128 %n, %d
- store i128 %q, ptr %q_out
- store i128 %r, ptr %r_out
- ret void
-}
diff --git a/llvm/test/CodeGen/LoongArch/i128-divrem-libcall.ll b/llvm/test/CodeGen/LoongArch/i128-divrem-libcall.ll
new file mode 100644
index 0000000000000..8365395f357a6
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/i128-divrem-libcall.ll
@@ -0,0 +1,80 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=loongarch64-linux-gnu | FileCheck %s
+
+define void @sdivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
+; CHECK-LABEL: sdivrem_i128:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi.d $sp, $sp, -48
+; CHECK-NEXT: .cfi_def_cfa_offset 48
+; CHECK-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
+; CHECK-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
+; CHECK-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
+; CHECK-NEXT: .cfi_offset 1, -8
+; CHECK-NEXT: .cfi_offset 22, -16
+; CHECK-NEXT: .cfi_offset 23, -24
+; CHECK-NEXT: move $a6, $a4
+; CHECK-NEXT: move $fp, $a1
+; CHECK-NEXT: move $s0, $a0
+; CHECK-NEXT: addi.d $a4, $sp, 0
+; CHECK-NEXT: move $a0, $a2
+; CHECK-NEXT: move $a1, $a3
+; CHECK-NEXT: move $a2, $a6
+; CHECK-NEXT: move $a3, $a5
+; CHECK-NEXT: pcaddu18i $ra, %call36(__divmodti4)
+; CHECK-NEXT: jirl $ra, $ra, 0
+; CHECK-NEXT: ld.d $a2, $sp, 0
+; CHECK-NEXT: ld.d $a3, $sp, 8
+; CHECK-NEXT: st.d $a0, $s0, 0
+; CHECK-NEXT: st.d $a1, $s0, 8
+; CHECK-NEXT: st.d $a2, $fp, 0
+; CHECK-NEXT: st.d $a3, $fp, 8
+; CHECK-NEXT: ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; CHECK-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; CHECK-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; CHECK-NEXT: addi.d $sp, $sp, 48
+; CHECK-NEXT: ret
+ %q = sdiv i128 %n, %d
+ %r = srem i128 %n, %d
+ store i128 %q, ptr %q_out
+ store i128 %r, ptr %r_out
+ ret void
+}
+
+define void @udivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
+; CHECK-LABEL: udivrem_i128:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi.d $sp, $sp, -48
+; CHECK-NEXT: .cfi_def_cfa_offset 48
+; CHECK-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
+; CHECK-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
+; CHECK-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
+; CHECK-NEXT: .cfi_offset 1, -8
+; CHECK-NEXT: .cfi_offset 22, -16
+; CHECK-NEXT: .cfi_offset 23, -24
+; CHECK-NEXT: move $a6, $a4
+; CHECK-NEXT: move $fp, $a1
+; CHECK-NEXT: move $s0, $a0
+; CHECK-NEXT: addi.d $a4, $sp, 0
+; CHECK-NEXT: move $a0, $a2
+; CHECK-NEXT: move $a1, $a3
+; CHECK-NEXT: move $a2, $a6
+; CHECK-NEXT: move $a3, $a5
+; CHECK-NEXT: pcaddu18i $ra, %call36(__udivmodti4)
+; CHECK-NEXT: jirl $ra, $ra, 0
+; CHECK-NEXT: ld.d $a2, $sp, 0
+; CHECK-NEXT: ld.d $a3, $sp, 8
+; CHECK-NEXT: st.d $a0, $s0, 0
+; CHECK-NEXT: st.d $a1, $s0, 8
+; CHECK-NEXT: st.d $a2, $fp, 0
+; CHECK-NEXT: st.d $a3, $fp, 8
+; CHECK-NEXT: ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; CHECK-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; CHECK-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; CHECK-NEXT: addi.d $sp, $sp, 48
+; CHECK-NEXT: ret
+ %q = udiv i128 %n, %d
+ %r = urem i128 %n, %d
+ store i128 %q, ptr %q_out
+ store i128 %r, ptr %r_out
+ ret void
+}
diff --git a/llvm/test/CodeGen/PowerPC/i128-divrem-libcall.ll b/llvm/test/CodeGen/PowerPC/i128-divrem-libcall.ll
new file mode 100644
index 0000000000000..7e26ac0384c3b
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/i128-divrem-libcall.ll
@@ -0,0 +1,84 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=powerpc64-linux-gnu | FileCheck %s
+
+define void @sdivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
+; CHECK-LABEL: sdivrem_i128:
+; CHECK: # %bb.0:
+; CHECK-NEXT: mflr 0
+; CHECK-NEXT: stdu 1, -160(1)
+; CHECK-NEXT: std 0, 176(1)
+; CHECK-NEXT: .cfi_def_cfa_offset 160
+; CHECK-NEXT: .cfi_offset lr, 16
+; CHECK-NEXT: .cfi_offset r29, -24
+; CHECK-NEXT: .cfi_offset r30, -16
+; CHECK-NEXT: mr 9, 7
+; CHECK-NEXT: std 29, 136(1) # 8-byte Folded Spill
+; CHECK-NEXT: mr 29, 3
+; CHECK-NEXT: addi 7, 1, 112
+; CHECK-NEXT: mr 3, 5
+; CHECK-NEXT: std 30, 144(1) # 8-byte Folded Spill
+; CHECK-NEXT: mr 30, 4
+; CHECK-NEXT: mr 4, 6
+; CHECK-NEXT: mr 5, 9
+; CHECK-NEXT: mr 6, 8
+; CHECK-NEXT: bl __divmodti4
+; CHECK-NEXT: nop
+; CHECK-NEXT: ld 5, 120(1)
+; CHECK-NEXT: ld 6, 112(1)
+; CHECK-NEXT: std 3, 0(29)
+; CHECK-NEXT: std 4, 8(29)
+; CHECK-NEXT: std 6, 0(30)
+; CHECK-NEXT: std 5, 8(30)
+; CHECK-NEXT: ld 30, 144(1) # 8-byte Folded Reload
+; CHECK-NEXT: ld 29, 136(1) # 8-byte Folded Reload
+; CHECK-NEXT: addi 1, 1, 160
+; CHECK-NEXT: ld 0, 16(1)
+; CHECK-NEXT: mtlr 0
+; CHECK-NEXT: blr
+ %q = sdiv i128 %n, %d
+ %r = srem i128 %n, %d
+ store i128 %q, ptr %q_out
+ store i128 %r, ptr %r_out
+ ret void
+}
+
+define void @udivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
+; CHECK-LABEL: udivrem_i128:
+; CHECK: # %bb.0:
+; CHECK-NEXT: mflr 0
+; CHECK-NEXT: stdu 1, -160(1)
+; CHECK-NEXT: std 0, 176(1)
+; CHECK-NEXT: .cfi_def_cfa_offset 160
+; CHECK-NEXT: .cfi_offset lr, 16
+; CHECK-NEXT: .cfi_offset r29, -24
+; CHECK-NEXT: .cfi_offset r30, -16
+; CHECK-NEXT: mr 9, 7
+; CHECK-NEXT: std 29, 136(1) # 8-byte Folded Spill
+; CHECK-NEXT: mr 29, 3
+; CHECK-NEXT: addi 7, 1, 112
+; CHECK-NEXT: mr 3, 5
+; CHECK-NEXT: std 30, 144(1) # 8-byte Folded Spill
+; CHECK-NEXT: mr 30, 4
+; CHECK-NEXT: mr 4, 6
+; CHECK-NEXT: mr 5, 9
+; CHECK-NEXT: mr 6, 8
+; CHECK-NEXT: bl __udivmodti4
+; CHECK-NEXT: nop
+; CHECK-NEXT: ld 5, 120(1)
+; CHECK-NEXT: ld 6, 112(1)
+; CHECK-NEXT: std 3, 0(29)
+; CHECK-NEXT: std 4, 8(29)
+; CHECK-NEXT: std 6, 0(30)
+; CHECK-NEXT: std 5, 8(30)
+; CHECK-NEXT: ld 30, 144(1) # 8-byte Folded Reload
+; CHECK-NEXT: ld 29, 136(1) # 8-byte Folded Reload
+; CHECK-NEXT: addi 1, 1, 160
+; CHECK-NEXT: ld 0, 16(1)
+; CHECK-NEXT: mtlr 0
+; CHECK-NEXT: blr
+ %q = udiv i128 %n, %d
+ %r = urem i128 %n, %d
+ store i128 %q, ptr %q_out
+ store i128 %r, ptr %r_out
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/i128-divrem-libcall.ll b/llvm/test/CodeGen/RISCV/i128-divrem-libcall.ll
new file mode 100644
index 0000000000000..952a989a335d7
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/i128-divrem-libcall.ll
@@ -0,0 +1,5015 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=riscv64-linux-gnu | FileCheck %s --check-prefix=RV64
+; RUN: llc < %s -mtriple=riscv64-linux-gnu -mattr=+m | FileCheck %s --check-prefix=RV64
+; RUN: llc < %s -mtriple=riscv32-linux-gnu | FileCheck %s --check-prefix=RV32I
+; RUN: llc < %s -mtriple=riscv32-linux-gnu -mattr=+m | FileCheck %s --check-prefix=RV32M
+
+define void @sdivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
+; RV64-LABEL: sdivrem_i128:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -48
+; RV64-NEXT: .cfi_def_cfa_offset 48
+; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: mv a6, a4
+; RV64-NEXT: mv s0, a1
+; RV64-NEXT: mv s1, a0
+; RV64-NEXT: mv a4, sp
+; RV64-NEXT: mv a0, a2
+; RV64-NEXT: mv a1, a3
+; RV64-NEXT: mv a2, a6
+; RV64-NEXT: mv a3, a5
+; RV64-NEXT: call __divmodti4
+; RV64-NEXT: ld a2, 0(sp)
+; RV64-NEXT: ld a3, 8(sp)
+; RV64-NEXT: sd a0, 0(s1)
+; RV64-NEXT: sd a1, 8(s1)
+; RV64-NEXT: sd a2, 0(s0)
+; RV64-NEXT: sd a3, 8(s0)
+; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT: .cfi_restore ra
+; RV64-NEXT: .cfi_restore s0
+; RV64-NEXT: .cfi_restore s1
+; RV64-NEXT: addi sp, sp, 48
+; RV64-NEXT: .cfi_def_cfa_offset 0
+; RV64-NEXT: ret
+;
+; RV32I-LABEL: sdivrem_i128:
+; RV32I: # %bb.0: # %_udiv-special-cases_udiv-special-cases
+; RV32I-NEXT: addi sp, sp, -288
+; RV32I-NEXT: .cfi_def_cfa_offset 288
+; RV32I-NEXT: sw ra, 284(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s0, 280(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s1, 276(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s2, 272(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s3, 268(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s4, 264(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s5, 260(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s6, 256(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s7, 252(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s8, 248(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s9, 244(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s10, 240(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s11, 236(sp) # 4-byte Folded Spill
+; RV32I-NEXT: .cfi_offset ra, -4
+; RV32I-NEXT: .cfi_offset s0, -8
+; RV32I-NEXT: .cfi_offset s1, -12
+; RV32I-NEXT: .cfi_offset s2, -16
+; RV32I-NEXT: .cfi_offset s3, -20
+; RV32I-NEXT: .cfi_offset s4, -24
+; RV32I-NEXT: .cfi_offset s5, -28
+; RV32I-NEXT: .cfi_offset s6, -32
+; RV32I-NEXT: .cfi_offset s7, -36
+; RV32I-NEXT: .cfi_offset s8, -40
+; RV32I-NEXT: .cfi_offset s9, -44
+; RV32I-NEXT: .cfi_offset s10, -48
+; RV32I-NEXT: .cfi_offset s11, -52
+; RV32I-NEXT: sw a1, 52(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw a0, 48(sp) # 4-byte Folded Spill
+; RV32I-NEXT: lw ra, 0(a2)
+; RV32I-NEXT: lw s3, 4(a2)
+; RV32I-NEXT: lw s11, 8(a2)
+; RV32I-NEXT: lw t4, 0(a3)
+; RV32I-NEXT: lw t3, 4(a3)
+; RV32I-NEXT: lw a7, 8(a3)
+; RV32I-NEXT: lw t0, 12(a3)
+; RV32I-NEXT: lw t2, 12(a2)
+; RV32I-NEXT: or a0, ra, s3
+; RV32I-NEXT: snez a1, s11
+; RV32I-NEXT: snez a4, a0
+; RV32I-NEXT: add a1, t2, a1
+; RV32I-NEXT: snez t1, ra
+; RV32I-NEXT: sw a1, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT: bltz t2, .LBB0_2
+; RV32I-NEXT: # %bb.1: # %_udiv-special-cases_udiv-special-cases
+; RV32I-NEXT: mv s4, t2
+; RV32I-NEXT: mv t6, s11
+; RV32I-NEXT: mv s5, s3
+; RV32I-NEXT: mv s6, ra
+; RV32I-NEXT: j .LBB0_3
+; RV32I-NEXT: .LBB0_2:
+; RV32I-NEXT: neg a0, s11
+; RV32I-NEXT: neg a1, a1
+; RV32I-NEXT: neg a2, s3
+; RV32I-NEXT: sltu a3, a0, a4
+; RV32I-NEXT: sub t6, a0, a4
+; RV32I-NEXT: sub s5, a2, t1
+; RV32I-NEXT: sub s4, a1, a3
+; RV32I-NEXT: neg s6, ra
+; RV32I-NEXT: .LBB0_3: # %_udiv-special-cases_udiv-special-cases
+; RV32I-NEXT: or a0, t4, t3
+; RV32I-NEXT: snez a1, a7
+; RV32I-NEXT: snez s2, a0
+; RV32I-NEXT: add a1, t0, a1
+; RV32I-NEXT: snez a5, t4
+; RV32I-NEXT: sw a5, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw a1, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT: bltz t0, .LBB0_5
+; RV32I-NEXT: # %bb.4: # %_udiv-special-cases_udiv-special-cases
+; RV32I-NEXT: mv s9, t0
+; RV32I-NEXT: mv s8, a7
+; RV32I-NEXT: mv t5, t3
+; RV32I-NEXT: sw t4, 88(sp) # 4-byte Folded Spill
+; RV32I-NEXT: j .LBB0_6
+; RV32I-NEXT: .LBB0_5:
+; RV32I-NEXT: neg a0, a7
+; RV32I-NEXT: neg a1, a1
+; RV32I-NEXT: neg a2, t3
+; RV32I-NEXT: sltu a3, a0, s2
+; RV32I-NEXT: sub s8, a0, s2
+; RV32I-NEXT: sub t5, a2, a5
+; RV32I-NEXT: sub s9, a1, a3
+; RV32I-NEXT: neg a0, t4
+; RV32I-NEXT: sw a0, 88(sp) # 4-byte Folded Spill
+; RV32I-NEXT: .LBB0_6: # %_udiv-special-cases_udiv-special-cases
+; RV32I-NEXT: lui a0, 349525
+; RV32I-NEXT: lui a1, 209715
+; RV32I-NEXT: lui a2, 61681
+; RV32I-NEXT: addi s7, a0, 1365
+; RV32I-NEXT: addi s1, a1, 819
+; RV32I-NEXT: addi a1, a2, -241
+; RV32I-NEXT: bnez t5, .LBB0_8
+; RV32I-NEXT: # %bb.7: # %_udiv-special-cases_udiv-special-cases
+; RV32I-NEXT: lw a2, 88(sp) # 4-byte Folded Reload
+; RV32I-NEXT: srli a0, a2, 1
+; RV32I-NEXT: or a0, a2, a0
+; RV32I-NEXT: srli a2, a0, 2
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: srli a2, a0, 4
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: srli a2, a0, 8
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: srli a2, a0, 16
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: not a0, a0
+; RV32I-NEXT: srli a2, a0, 1
+; RV32I-NEXT: and a2, a2, s7
+; RV32I-NEXT: sub a0, a0, a2
+; RV32I-NEXT: and a2, a0, s1
+; RV32I-NEXT: srli a0, a0, 2
+; RV32I-NEXT: and a0, a0, s1
+; RV32I-NEXT: add a0, a2, a0
+; RV32I-NEXT: srli a2, a0, 4
+; RV32I-NEXT: add a0, a0, a2
+; RV32I-NEXT: and a0, a0, a1
+; RV32I-NEXT: slli a2, a0, 8
+; RV32I-NEXT: add a0, a0, a2
+; RV32I-NEXT: slli a2, a0, 16
+; RV32I-NEXT: add a0, a0, a2
+; RV32I-NEXT: srli a0, a0, 24
+; RV32I-NEXT: addi a5, a0, 32
+; RV32I-NEXT: j .LBB0_9
+; RV32I-NEXT: .LBB0_8:
+; RV32I-NEXT: srli a0, t5, 1
+; RV32I-NEXT: or a0, t5, a0
+; RV32I-NEXT: srli a2, a0, 2
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: srli a2, a0, 4
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: srli a2, a0, 8
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: srli a2, a0, 16
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: not a0, a0
+; RV32I-NEXT: srli a2, a0, 1
+; RV32I-NEXT: and a2, a2, s7
+; RV32I-NEXT: sub a0, a0, a2
+; RV32I-NEXT: and a2, a0, s1
+; RV32I-NEXT: srli a0, a0, 2
+; RV32I-NEXT: and a0, a0, s1
+; RV32I-NEXT: add a0, a2, a0
+; RV32I-NEXT: srli a2, a0, 4
+; RV32I-NEXT: add a0, a0, a2
+; RV32I-NEXT: and a0, a0, a1
+; RV32I-NEXT: slli a2, a0, 8
+; RV32I-NEXT: add a0, a0, a2
+; RV32I-NEXT: slli a2, a0, 16
+; RV32I-NEXT: add a0, a0, a2
+; RV32I-NEXT: srli a5, a0, 24
+; RV32I-NEXT: .LBB0_9: # %_udiv-special-cases_udiv-special-cases
+; RV32I-NEXT: sw a4, 32(sp) # 4-byte Folded Spill
+; RV32I-NEXT: or a6, s8, s9
+; RV32I-NEXT: bnez s9, .LBB0_11
+; RV32I-NEXT: # %bb.10: # %_udiv-special-cases_udiv-special-cases
+; RV32I-NEXT: srli a0, s8, 1
+; RV32I-NEXT: or a0, s8, a0
+; RV32I-NEXT: srli a2, a0, 2
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: srli a2, a0, 4
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: srli a2, a0, 8
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: srli a2, a0, 16
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: not a0, a0
+; RV32I-NEXT: srli a2, a0, 1
+; RV32I-NEXT: and a2, a2, s7
+; RV32I-NEXT: sub a0, a0, a2
+; RV32I-NEXT: and a2, a0, s1
+; RV32I-NEXT: srli a0, a0, 2
+; RV32I-NEXT: and a0, a0, s1
+; RV32I-NEXT: add a0, a2, a0
+; RV32I-NEXT: srli a2, a0, 4
+; RV32I-NEXT: add a0, a0, a2
+; RV32I-NEXT: and a0, a0, a1
+; RV32I-NEXT: slli a2, a0, 8
+; RV32I-NEXT: add a0, a0, a2
+; RV32I-NEXT: slli a2, a0, 16
+; RV32I-NEXT: add a0, a0, a2
+; RV32I-NEXT: srli a0, a0, 24
+; RV32I-NEXT: addi a4, a0, 32
+; RV32I-NEXT: j .LBB0_12
+; RV32I-NEXT: .LBB0_11:
+; RV32I-NEXT: srli a0, s9, 1
+; RV32I-NEXT: or a0, s9, a0
+; RV32I-NEXT: srli a2, a0, 2
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: srli a2, a0, 4
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: srli a2, a0, 8
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: srli a2, a0, 16
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: not a0, a0
+; RV32I-NEXT: srli a2, a0, 1
+; RV32I-NEXT: and a2, a2, s7
+; RV32I-NEXT: sub a0, a0, a2
+; RV32I-NEXT: and a2, a0, s1
+; RV32I-NEXT: srli a0, a0, 2
+; RV32I-NEXT: and a0, a0, s1
+; RV32I-NEXT: add a0, a2, a0
+; RV32I-NEXT: srli a2, a0, 4
+; RV32I-NEXT: add a0, a0, a2
+; RV32I-NEXT: and a0, a0, a1
+; RV32I-NEXT: slli a2, a0, 8
+; RV32I-NEXT: add a0, a0, a2
+; RV32I-NEXT: slli a2, a0, 16
+; RV32I-NEXT: add a0, a0, a2
+; RV32I-NEXT: srli a4, a0, 24
+; RV32I-NEXT: .LBB0_12: # %_udiv-special-cases_udiv-special-cases
+; RV32I-NEXT: sw t0, 100(sp) # 4-byte Folded Spill
+; RV32I-NEXT: addi a0, a5, 64
+; RV32I-NEXT: bnez a6, .LBB0_14
+; RV32I-NEXT: # %bb.13: # %_udiv-special-cases_udiv-special-cases
+; RV32I-NEXT: mv a4, a0
+; RV32I-NEXT: .LBB0_14: # %_udiv-special-cases_udiv-special-cases
+; RV32I-NEXT: snez s0, a6
+; RV32I-NEXT: sw a7, 44(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw t1, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT: bnez s5, .LBB0_16
+; RV32I-NEXT: # %bb.15: # %_udiv-special-cases_udiv-special-cases
+; RV32I-NEXT: srli a2, s6, 1
+; RV32I-NEXT: or a2, s6, a2
+; RV32I-NEXT: srli a3, a2, 2
+; RV32I-NEXT: or a2, a2, a3
+; RV32I-NEXT: srli a3, a2, 4
+; RV32I-NEXT: or a2, a2, a3
+; RV32I-NEXT: srli a3, a2, 8
+; RV32I-NEXT: or a2, a2, a3
+; RV32I-NEXT: srli a3, a2, 16
+; RV32I-NEXT: or a2, a2, a3
+; RV32I-NEXT: not a2, a2
+; RV32I-NEXT: srli a3, a2, 1
+; RV32I-NEXT: and a3, a3, s7
+; RV32I-NEXT: sub a2, a2, a3
+; RV32I-NEXT: and a3, a2, s1
+; RV32I-NEXT: srli a2, a2, 2
+; RV32I-NEXT: and a2, a2, s1
+; RV32I-NEXT: add a2, a3, a2
+; RV32I-NEXT: srli a3, a2, 4
+; RV32I-NEXT: add a2, a2, a3
+; RV32I-NEXT: and a2, a2, a1
+; RV32I-NEXT: slli a3, a2, 8
+; RV32I-NEXT: add a2, a2, a3
+; RV32I-NEXT: slli a3, a2, 16
+; RV32I-NEXT: add a2, a2, a3
+; RV32I-NEXT: srli a2, a2, 24
+; RV32I-NEXT: addi a2, a2, 32
+; RV32I-NEXT: j .LBB0_17
+; RV32I-NEXT: .LBB0_16:
+; RV32I-NEXT: srli a2, s5, 1
+; RV32I-NEXT: or a2, s5, a2
+; RV32I-NEXT: srli a3, a2, 2
+; RV32I-NEXT: or a2, a2, a3
+; RV32I-NEXT: srli a3, a2, 4
+; RV32I-NEXT: or a2, a2, a3
+; RV32I-NEXT: srli a3, a2, 8
+; RV32I-NEXT: or a2, a2, a3
+; RV32I-NEXT: srli a3, a2, 16
+; RV32I-NEXT: or a2, a2, a3
+; RV32I-NEXT: not a2, a2
+; RV32I-NEXT: srli a3, a2, 1
+; RV32I-NEXT: and a3, a3, s7
+; RV32I-NEXT: sub a2, a2, a3
+; RV32I-NEXT: and a3, a2, s1
+; RV32I-NEXT: srli a2, a2, 2
+; RV32I-NEXT: and a2, a2, s1
+; RV32I-NEXT: add a2, a3, a2
+; RV32I-NEXT: srli a3, a2, 4
+; RV32I-NEXT: add a2, a2, a3
+; RV32I-NEXT: and a2, a2, a1
+; RV32I-NEXT: slli a3, a2, 8
+; RV32I-NEXT: add a2, a2, a3
+; RV32I-NEXT: slli a3, a2, 16
+; RV32I-NEXT: add a2, a2, a3
+; RV32I-NEXT: srli a2, a2, 24
+; RV32I-NEXT: .LBB0_17: # %_udiv-special-cases_udiv-special-cases
+; RV32I-NEXT: sw s8, 84(sp) # 4-byte Folded Spill
+; RV32I-NEXT: lw a3, 88(sp) # 4-byte Folded Reload
+; RV32I-NEXT: or a6, a3, s8
+; RV32I-NEXT: sw s9, 80(sp) # 4-byte Folded Spill
+; RV32I-NEXT: or a7, t5, s9
+; RV32I-NEXT: or t0, s6, t6
+; RV32I-NEXT: or t1, s5, s4
+; RV32I-NEXT: sltu a5, a0, a5
+; RV32I-NEXT: addi s0, s0, -1
+; RV32I-NEXT: addi a0, a2, 64
+; RV32I-NEXT: or s2, t6, s4
+; RV32I-NEXT: sltu s8, a0, a2
+; RV32I-NEXT: snez s9, s2
+; RV32I-NEXT: addi s9, s9, -1
+; RV32I-NEXT: bnez s4, .LBB0_19
+; RV32I-NEXT: # %bb.18: # %_udiv-special-cases_udiv-special-cases
+; RV32I-NEXT: srli a2, t6, 1
+; RV32I-NEXT: or a2, t6, a2
+; RV32I-NEXT: srli a3, a2, 2
+; RV32I-NEXT: or a2, a2, a3
+; RV32I-NEXT: srli a3, a2, 4
+; RV32I-NEXT: or a2, a2, a3
+; RV32I-NEXT: srli a3, a2, 8
+; RV32I-NEXT: or a2, a2, a3
+; RV32I-NEXT: srli a3, a2, 16
+; RV32I-NEXT: or a2, a2, a3
+; RV32I-NEXT: not a2, a2
+; RV32I-NEXT: srli a3, a2, 1
+; RV32I-NEXT: and a3, a3, s7
+; RV32I-NEXT: sub a2, a2, a3
+; RV32I-NEXT: and a3, a2, s1
+; RV32I-NEXT: srli a2, a2, 2
+; RV32I-NEXT: and a2, a2, s1
+; RV32I-NEXT: add a2, a3, a2
+; RV32I-NEXT: srli a3, a2, 4
+; RV32I-NEXT: add a2, a2, a3
+; RV32I-NEXT: and a1, a2, a1
+; RV32I-NEXT: slli a2, a1, 8
+; RV32I-NEXT: add a1, a1, a2
+; RV32I-NEXT: slli a2, a1, 16
+; RV32I-NEXT: add a1, a1, a2
+; RV32I-NEXT: srli a1, a1, 24
+; RV32I-NEXT: addi a2, a1, 32
+; RV32I-NEXT: j .LBB0_20
+; RV32I-NEXT: .LBB0_19:
+; RV32I-NEXT: srli a2, s4, 1
+; RV32I-NEXT: or a2, s4, a2
+; RV32I-NEXT: srli a3, a2, 2
+; RV32I-NEXT: or a2, a2, a3
+; RV32I-NEXT: srli a3, a2, 4
+; RV32I-NEXT: or a2, a2, a3
+; RV32I-NEXT: srli a3, a2, 8
+; RV32I-NEXT: or a2, a2, a3
+; RV32I-NEXT: srli a3, a2, 16
+; RV32I-NEXT: or a2, a2, a3
+; RV32I-NEXT: not a2, a2
+; RV32I-NEXT: srli a3, a2, 1
+; RV32I-NEXT: and a3, a3, s7
+; RV32I-NEXT: sub a2, a2, a3
+; RV32I-NEXT: and a3, a2, s1
+; RV32I-NEXT: srli a2, a2, 2
+; RV32I-NEXT: and a2, a2, s1
+; RV32I-NEXT: add a2, a3, a2
+; RV32I-NEXT: srli a3, a2, 4
+; RV32I-NEXT: add a2, a2, a3
+; RV32I-NEXT: and a1, a2, a1
+; RV32I-NEXT: slli a2, a1, 8
+; RV32I-NEXT: add a1, a1, a2
+; RV32I-NEXT: slli a2, a1, 16
+; RV32I-NEXT: add a1, a1, a2
+; RV32I-NEXT: srli a2, a1, 24
+; RV32I-NEXT: .LBB0_20: # %_udiv-special-cases_udiv-special-cases
+; RV32I-NEXT: or a3, a6, a7
+; RV32I-NEXT: or a6, t0, t1
+; RV32I-NEXT: and a5, s0, a5
+; RV32I-NEXT: and a1, s9, s8
+; RV32I-NEXT: bnez s2, .LBB0_22
+; RV32I-NEXT: # %bb.21: # %_udiv-special-cases_udiv-special-cases
+; RV32I-NEXT: mv a2, a0
+; RV32I-NEXT: .LBB0_22: # %_udiv-special-cases_udiv-special-cases
+; RV32I-NEXT: seqz a0, a3
+; RV32I-NEXT: seqz a3, a6
+; RV32I-NEXT: sltu a7, a4, a2
+; RV32I-NEXT: sub t0, a5, a1
+; RV32I-NEXT: mv a6, a7
+; RV32I-NEXT: lw t1, 100(sp) # 4-byte Folded Reload
+; RV32I-NEXT: beq a5, a1, .LBB0_24
+; RV32I-NEXT: # %bb.23: # %_udiv-special-cases_udiv-special-cases
+; RV32I-NEXT: sltu a6, a5, a1
+; RV32I-NEXT: .LBB0_24: # %_udiv-special-cases_udiv-special-cases
+; RV32I-NEXT: sub a1, t0, a7
+; RV32I-NEXT: or a3, a0, a3
+; RV32I-NEXT: neg a0, a6
+; RV32I-NEXT: seqz s10, a6
+; RV32I-NEXT: addi s10, s10, -1
+; RV32I-NEXT: sub a2, a4, a2
+; RV32I-NEXT: or a5, a0, s10
+; RV32I-NEXT: xor a4, t1, t2
+; RV32I-NEXT: beqz a5, .LBB0_26
+; RV32I-NEXT: # %bb.25: # %_udiv-special-cases_udiv-special-cases
+; RV32I-NEXT: snez a5, a5
+; RV32I-NEXT: j .LBB0_27
+; RV32I-NEXT: .LBB0_26:
+; RV32I-NEXT: snez a5, a1
+; RV32I-NEXT: sltiu a6, a2, 128
+; RV32I-NEXT: xori a6, a6, 1
+; RV32I-NEXT: or a5, a6, a5
+; RV32I-NEXT: .LBB0_27: # %_udiv-special-cases_udiv-special-cases
+; RV32I-NEXT: srai a4, a4, 31
+; RV32I-NEXT: sw a4, 60(sp) # 4-byte Folded Spill
+; RV32I-NEXT: or a5, a3, a5
+; RV32I-NEXT: addi a4, a5, -1
+; RV32I-NEXT: and s8, a4, s4
+; RV32I-NEXT: and s7, a4, t6
+; RV32I-NEXT: and a3, a4, s5
+; RV32I-NEXT: and a4, a4, s6
+; RV32I-NEXT: sw t2, 56(sp) # 4-byte Folded Spill
+; RV32I-NEXT: bnez a5, .LBB0_38
+; RV32I-NEXT: # %bb.28: # %_udiv-special-cases_udiv-special-cases
+; RV32I-NEXT: xori a5, a2, 127
+; RV32I-NEXT: or a5, a5, a0
+; RV32I-NEXT: or a6, a1, s10
+; RV32I-NEXT: or a5, a5, a6
+; RV32I-NEXT: beqz a5, .LBB0_38
+; RV32I-NEXT: # %bb.29: # %udiv-bb15
+; RV32I-NEXT: sw t4, 92(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw t3, 96(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s11, 64(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s3, 68(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw ra, 72(sp) # 4-byte Folded Spill
+; RV32I-NEXT: addi s7, a2, 1
+; RV32I-NEXT: li a4, 127
+; RV32I-NEXT: sub a4, a4, a2
+; RV32I-NEXT: sw zero, 200(sp)
+; RV32I-NEXT: sw zero, 204(sp)
+; RV32I-NEXT: sw zero, 208(sp)
+; RV32I-NEXT: sw zero, 212(sp)
+; RV32I-NEXT: sw s6, 216(sp)
+; RV32I-NEXT: sw s5, 220(sp)
+; RV32I-NEXT: sw t6, 224(sp)
+; RV32I-NEXT: sw s4, 228(sp)
+; RV32I-NEXT: addi a2, sp, 216
+; RV32I-NEXT: seqz s8, s7
+; RV32I-NEXT: srli a3, a4, 3
+; RV32I-NEXT: andi a5, a4, 31
+; RV32I-NEXT: add s8, a1, s8
+; RV32I-NEXT: andi a3, a3, 12
+; RV32I-NEXT: xori a5, a5, 31
+; RV32I-NEXT: or a1, s7, s8
+; RV32I-NEXT: sub a2, a2, a3
+; RV32I-NEXT: seqz a1, a1
+; RV32I-NEXT: lw a3, 0(a2)
+; RV32I-NEXT: lw a6, 4(a2)
+; RV32I-NEXT: lw a7, 8(a2)
+; RV32I-NEXT: lw a2, 12(a2)
+; RV32I-NEXT: add a1, a0, a1
+; RV32I-NEXT: sltu a0, a1, a0
+; RV32I-NEXT: or t0, s7, a1
+; RV32I-NEXT: add s10, s10, a0
+; RV32I-NEXT: or a0, s8, s10
+; RV32I-NEXT: srli t1, a7, 1
+; RV32I-NEXT: srli t2, a6, 1
+; RV32I-NEXT: or a0, t0, a0
+; RV32I-NEXT: srli t0, a3, 1
+; RV32I-NEXT: srl t1, t1, a5
+; RV32I-NEXT: srl t2, t2, a5
+; RV32I-NEXT: srl a5, t0, a5
+; RV32I-NEXT: sll a2, a2, a4
+; RV32I-NEXT: or t0, a2, t1
+; RV32I-NEXT: sll a2, a7, a4
+; RV32I-NEXT: sll a6, a6, a4
+; RV32I-NEXT: or t2, a2, t2
+; RV32I-NEXT: or a6, a6, a5
+; RV32I-NEXT: sll s9, a3, a4
+; RV32I-NEXT: li ra, 0
+; RV32I-NEXT: beqz a0, .LBB0_37
+; RV32I-NEXT: # %bb.30: # %udiv-preheader4
+; RV32I-NEXT: li t1, 0
+; RV32I-NEXT: li a4, 0
+; RV32I-NEXT: li a7, 0
+; RV32I-NEXT: sw zero, 184(sp)
+; RV32I-NEXT: sw zero, 188(sp)
+; RV32I-NEXT: sw zero, 192(sp)
+; RV32I-NEXT: sw zero, 196(sp)
+; RV32I-NEXT: sw s6, 168(sp)
+; RV32I-NEXT: sw s5, 172(sp)
+; RV32I-NEXT: sw t6, 176(sp)
+; RV32I-NEXT: sw s4, 180(sp)
+; RV32I-NEXT: srli a0, s7, 3
+; RV32I-NEXT: addi a2, sp, 168
+; RV32I-NEXT: andi a0, a0, 12
+; RV32I-NEXT: add a0, a2, a0
+; RV32I-NEXT: lw a2, 4(a0)
+; RV32I-NEXT: lw a3, 8(a0)
+; RV32I-NEXT: lw a5, 12(a0)
+; RV32I-NEXT: lw t3, 0(a0)
+; RV32I-NEXT: andi a0, s7, 31
+; RV32I-NEXT: xori a0, a0, 31
+; RV32I-NEXT: slli t4, a5, 1
+; RV32I-NEXT: slli t6, a3, 1
+; RV32I-NEXT: slli s0, a2, 1
+; RV32I-NEXT: sll t4, t4, a0
+; RV32I-NEXT: sll s1, t6, a0
+; RV32I-NEXT: sll s0, s0, a0
+; RV32I-NEXT: lw s2, 88(sp) # 4-byte Folded Reload
+; RV32I-NEXT: seqz a0, s2
+; RV32I-NEXT: srl a3, a3, s7
+; RV32I-NEXT: or s11, a3, t4
+; RV32I-NEXT: or a3, s2, t5
+; RV32I-NEXT: sub t6, t5, a0
+; RV32I-NEXT: seqz a3, a3
+; RV32I-NEXT: srl a0, a2, s7
+; RV32I-NEXT: or a0, a0, s1
+; RV32I-NEXT: lw a2, 84(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sub s4, a2, a3
+; RV32I-NEXT: sltu a2, a2, a3
+; RV32I-NEXT: lw a3, 80(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sub s5, a3, a2
+; RV32I-NEXT: srl a2, t3, s7
+; RV32I-NEXT: srl s1, a5, s7
+; RV32I-NEXT: or s0, a2, s0
+; RV32I-NEXT: addi s2, s2, -1
+; RV32I-NEXT: sw s2, 76(sp) # 4-byte Folded Spill
+; RV32I-NEXT: j .LBB0_32
+; RV32I-NEXT: .LBB0_31: # %udiv-do-while3
+; RV32I-NEXT: # in Loop: Header=BB0_32 Depth=1
+; RV32I-NEXT: srli a5, t2, 31
+; RV32I-NEXT: slli t0, t0, 1
+; RV32I-NEXT: sub a3, s3, a3
+; RV32I-NEXT: srli s3, a6, 31
+; RV32I-NEXT: slli t2, t2, 1
+; RV32I-NEXT: or a5, t0, a5
+; RV32I-NEXT: srli t0, s9, 31
+; RV32I-NEXT: slli a6, a6, 1
+; RV32I-NEXT: slli s9, s9, 1
+; RV32I-NEXT: or t2, t2, s3
+; RV32I-NEXT: lw t3, 80(sp) # 4-byte Folded Reload
+; RV32I-NEXT: and s3, s11, t3
+; RV32I-NEXT: or a6, a6, t0
+; RV32I-NEXT: lw t0, 84(sp) # 4-byte Folded Reload
+; RV32I-NEXT: and t0, s11, t0
+; RV32I-NEXT: or s9, ra, s9
+; RV32I-NEXT: sub t3, a2, t0
+; RV32I-NEXT: sltu a2, a2, t0
+; RV32I-NEXT: or t0, s7, s8
+; RV32I-NEXT: sub s3, a0, s3
+; RV32I-NEXT: seqz t4, s7
+; RV32I-NEXT: addi s7, s7, -1
+; RV32I-NEXT: andi ra, s11, 1
+; RV32I-NEXT: sub a0, a3, s6
+; RV32I-NEXT: seqz a3, t0
+; RV32I-NEXT: sub s8, s8, t4
+; RV32I-NEXT: or a6, t1, a6
+; RV32I-NEXT: or t2, a4, t2
+; RV32I-NEXT: or t0, a7, a5
+; RV32I-NEXT: sub s11, t3, s1
+; RV32I-NEXT: sltu a4, t3, s1
+; RV32I-NEXT: sub s1, s3, a2
+; RV32I-NEXT: sltu a2, a1, a3
+; RV32I-NEXT: sub a1, a1, a3
+; RV32I-NEXT: sub s1, s1, a4
+; RV32I-NEXT: sub s10, s10, a2
+; RV32I-NEXT: or a2, s8, s10
+; RV32I-NEXT: or a3, s7, a1
+; RV32I-NEXT: or a2, a3, a2
+; RV32I-NEXT: sub s0, s0, s2
+; RV32I-NEXT: li t1, 0
+; RV32I-NEXT: li a4, 0
+; RV32I-NEXT: li a7, 0
+; RV32I-NEXT: beqz a2, .LBB0_37
+; RV32I-NEXT: .LBB0_32: # %udiv-do-while3
+; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT: srli a2, s0, 31
+; RV32I-NEXT: slli a3, a0, 1
+; RV32I-NEXT: slli s0, s0, 1
+; RV32I-NEXT: or s3, a3, a2
+; RV32I-NEXT: srli a2, t0, 31
+; RV32I-NEXT: or s0, s0, a2
+; RV32I-NEXT: beq t6, s3, .LBB0_34
+; RV32I-NEXT: # %bb.33: # %udiv-do-while3
+; RV32I-NEXT: # in Loop: Header=BB0_32 Depth=1
+; RV32I-NEXT: sltu a3, t6, s3
+; RV32I-NEXT: j .LBB0_35
+; RV32I-NEXT: .LBB0_34: # in Loop: Header=BB0_32 Depth=1
+; RV32I-NEXT: lw a2, 76(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sltu a3, a2, s0
+; RV32I-NEXT: .LBB0_35: # %udiv-do-while3
+; RV32I-NEXT: # in Loop: Header=BB0_32 Depth=1
+; RV32I-NEXT: srli a2, s11, 31
+; RV32I-NEXT: slli s1, s1, 1
+; RV32I-NEXT: srli s2, a0, 31
+; RV32I-NEXT: slli s11, s11, 1
+; RV32I-NEXT: or a0, s1, a2
+; RV32I-NEXT: or a2, s11, s2
+; RV32I-NEXT: sub s1, s4, a2
+; RV32I-NEXT: sltu s2, s4, a2
+; RV32I-NEXT: sub s6, s5, a0
+; RV32I-NEXT: sltu a3, s1, a3
+; RV32I-NEXT: sub s1, s6, s2
+; RV32I-NEXT: sub s1, s1, a3
+; RV32I-NEXT: srai s11, s1, 31
+; RV32I-NEXT: lw a3, 88(sp) # 4-byte Folded Reload
+; RV32I-NEXT: and s2, s11, a3
+; RV32I-NEXT: and a3, s11, t5
+; RV32I-NEXT: sltu s6, s0, s2
+; RV32I-NEXT: mv s1, s6
+; RV32I-NEXT: beq s3, a3, .LBB0_31
+; RV32I-NEXT: # %bb.36: # %udiv-do-while3
+; RV32I-NEXT: # in Loop: Header=BB0_32 Depth=1
+; RV32I-NEXT: sltu s1, s3, a3
+; RV32I-NEXT: j .LBB0_31
+; RV32I-NEXT: .LBB0_37: # %udiv-loop-exit2
+; RV32I-NEXT: srli a0, s9, 31
+; RV32I-NEXT: slli a3, a6, 1
+; RV32I-NEXT: srli a1, a6, 31
+; RV32I-NEXT: or a3, a3, a0
+; RV32I-NEXT: slli a0, t2, 1
+; RV32I-NEXT: srli a5, t2, 31
+; RV32I-NEXT: slli t0, t0, 1
+; RV32I-NEXT: slli s9, s9, 1
+; RV32I-NEXT: or s7, a0, a1
+; RV32I-NEXT: or s8, t0, a5
+; RV32I-NEXT: or a4, ra, s9
+; RV32I-NEXT: lw ra, 72(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s3, 68(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s11, 64(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw t3, 96(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw t4, 92(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw t2, 56(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw t1, 100(sp) # 4-byte Folded Reload
+; RV32I-NEXT: .LBB0_38: # %udiv-end1
+; RV32I-NEXT: lw s2, 60(sp) # 4-byte Folded Reload
+; RV32I-NEXT: xor a0, a4, s2
+; RV32I-NEXT: sw a0, 40(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sltu a0, a0, s2
+; RV32I-NEXT: xor a1, a3, s2
+; RV32I-NEXT: sw a0, 36(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw a0, 84(sp) # 4-byte Folded Spill
+; RV32I-NEXT: lw s9, 44(sp) # 4-byte Folded Reload
+; RV32I-NEXT: beqz a3, .LBB0_40
+; RV32I-NEXT: # %bb.39: # %udiv-end1
+; RV32I-NEXT: sltu a0, a1, s2
+; RV32I-NEXT: sw a0, 84(sp) # 4-byte Folded Spill
+; RV32I-NEXT: .LBB0_40: # %udiv-end1
+; RV32I-NEXT: neg a0, s11
+; RV32I-NEXT: lw a3, 32(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sw a1, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT: bltz t2, .LBB0_42
+; RV32I-NEXT: # %bb.41: # %udiv-end1
+; RV32I-NEXT: mv s4, t2
+; RV32I-NEXT: bltz t2, .LBB0_43
+; RV32I-NEXT: j .LBB0_44
+; RV32I-NEXT: .LBB0_42:
+; RV32I-NEXT: sltu a1, a0, a3
+; RV32I-NEXT: lw a2, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT: neg a2, a2
+; RV32I-NEXT: sub s4, a2, a1
+; RV32I-NEXT: bgez t2, .LBB0_44
+; RV32I-NEXT: .LBB0_43:
+; RV32I-NEXT: sub s11, a0, a3
+; RV32I-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT: add a0, s3, a0
+; RV32I-NEXT: neg s3, a0
+; RV32I-NEXT: neg ra, ra
+; RV32I-NEXT: .LBB0_44: # %udiv-end1
+; RV32I-NEXT: bgez t1, .LBB0_46
+; RV32I-NEXT: # %bb.45:
+; RV32I-NEXT: neg a0, s9
+; RV32I-NEXT: lw a1, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT: neg a1, a1
+; RV32I-NEXT: lw a2, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: add a2, t3, a2
+; RV32I-NEXT: lw a4, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sltu a3, a0, a4
+; RV32I-NEXT: sub s9, a0, a4
+; RV32I-NEXT: neg t3, a2
+; RV32I-NEXT: sub t1, a1, a3
+; RV32I-NEXT: neg t4, t4
+; RV32I-NEXT: .LBB0_46: # %udiv-end1
+; RV32I-NEXT: lui a0, 349525
+; RV32I-NEXT: lui a2, 209715
+; RV32I-NEXT: lui a3, 61681
+; RV32I-NEXT: addi a1, a0, 1365
+; RV32I-NEXT: addi a5, a2, 819
+; RV32I-NEXT: addi a4, a3, -241
+; RV32I-NEXT: bnez t3, .LBB0_49
+; RV32I-NEXT: # %bb.47: # %udiv-end1
+; RV32I-NEXT: srli a0, t4, 1
+; RV32I-NEXT: or a0, t4, a0
+; RV32I-NEXT: srli a2, a0, 2
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: srli a2, a0, 4
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: srli a2, a0, 8
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: srli a2, a0, 16
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: not a0, a0
+; RV32I-NEXT: srli a2, a0, 1
+; RV32I-NEXT: and a2, a2, a1
+; RV32I-NEXT: sub a0, a0, a2
+; RV32I-NEXT: and a2, a0, a5
+; RV32I-NEXT: srli a0, a0, 2
+; RV32I-NEXT: and a0, a0, a5
+; RV32I-NEXT: add a0, a2, a0
+; RV32I-NEXT: srli a2, a0, 4
+; RV32I-NEXT: add a0, a0, a2
+; RV32I-NEXT: and a0, a0, a4
+; RV32I-NEXT: slli a2, a0, 8
+; RV32I-NEXT: add a0, a0, a2
+; RV32I-NEXT: slli a2, a0, 16
+; RV32I-NEXT: add a0, a0, a2
+; RV32I-NEXT: srli a0, a0, 24
+; RV32I-NEXT: addi a7, a0, 32
+; RV32I-NEXT: or t0, s9, t1
+; RV32I-NEXT: beqz t1, .LBB0_50
+; RV32I-NEXT: .LBB0_48:
+; RV32I-NEXT: srli a0, t1, 1
+; RV32I-NEXT: or a0, t1, a0
+; RV32I-NEXT: srli a2, a0, 2
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: srli a2, a0, 4
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: srli a2, a0, 8
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: srli a2, a0, 16
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: not a0, a0
+; RV32I-NEXT: srli a2, a0, 1
+; RV32I-NEXT: and a2, a2, a1
+; RV32I-NEXT: sub a0, a0, a2
+; RV32I-NEXT: and a2, a0, a5
+; RV32I-NEXT: srli a0, a0, 2
+; RV32I-NEXT: and a0, a0, a5
+; RV32I-NEXT: add a0, a2, a0
+; RV32I-NEXT: srli a2, a0, 4
+; RV32I-NEXT: add a0, a0, a2
+; RV32I-NEXT: and a0, a0, a4
+; RV32I-NEXT: slli a2, a0, 8
+; RV32I-NEXT: add a0, a0, a2
+; RV32I-NEXT: slli a2, a0, 16
+; RV32I-NEXT: add a0, a0, a2
+; RV32I-NEXT: srli a6, a0, 24
+; RV32I-NEXT: addi a0, a7, 64
+; RV32I-NEXT: beqz t0, .LBB0_51
+; RV32I-NEXT: j .LBB0_52
+; RV32I-NEXT: .LBB0_49:
+; RV32I-NEXT: srli a0, t3, 1
+; RV32I-NEXT: or a0, t3, a0
+; RV32I-NEXT: srli a2, a0, 2
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: srli a2, a0, 4
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: srli a2, a0, 8
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: srli a2, a0, 16
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: not a0, a0
+; RV32I-NEXT: srli a2, a0, 1
+; RV32I-NEXT: and a2, a2, a1
+; RV32I-NEXT: sub a0, a0, a2
+; RV32I-NEXT: and a2, a0, a5
+; RV32I-NEXT: srli a0, a0, 2
+; RV32I-NEXT: and a0, a0, a5
+; RV32I-NEXT: add a0, a2, a0
+; RV32I-NEXT: srli a2, a0, 4
+; RV32I-NEXT: add a0, a0, a2
+; RV32I-NEXT: and a0, a0, a4
+; RV32I-NEXT: slli a2, a0, 8
+; RV32I-NEXT: add a0, a0, a2
+; RV32I-NEXT: slli a2, a0, 16
+; RV32I-NEXT: add a0, a0, a2
+; RV32I-NEXT: srli a7, a0, 24
+; RV32I-NEXT: or t0, s9, t1
+; RV32I-NEXT: bnez t1, .LBB0_48
+; RV32I-NEXT: .LBB0_50: # %udiv-end1
+; RV32I-NEXT: srli a0, s9, 1
+; RV32I-NEXT: or a0, s9, a0
+; RV32I-NEXT: srli a2, a0, 2
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: srli a2, a0, 4
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: srli a2, a0, 8
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: srli a2, a0, 16
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: not a0, a0
+; RV32I-NEXT: srli a2, a0, 1
+; RV32I-NEXT: and a2, a2, a1
+; RV32I-NEXT: sub a0, a0, a2
+; RV32I-NEXT: and a2, a0, a5
+; RV32I-NEXT: srli a0, a0, 2
+; RV32I-NEXT: and a0, a0, a5
+; RV32I-NEXT: add a0, a2, a0
+; RV32I-NEXT: srli a2, a0, 4
+; RV32I-NEXT: add a0, a0, a2
+; RV32I-NEXT: and a0, a0, a4
+; RV32I-NEXT: slli a2, a0, 8
+; RV32I-NEXT: add a0, a0, a2
+; RV32I-NEXT: slli a2, a0, 16
+; RV32I-NEXT: add a0, a0, a2
+; RV32I-NEXT: srli a0, a0, 24
+; RV32I-NEXT: addi a6, a0, 32
+; RV32I-NEXT: addi a0, a7, 64
+; RV32I-NEXT: bnez t0, .LBB0_52
+; RV32I-NEXT: .LBB0_51: # %udiv-end1
+; RV32I-NEXT: mv a6, a0
+; RV32I-NEXT: .LBB0_52: # %udiv-end1
+; RV32I-NEXT: snez t5, t0
+; RV32I-NEXT: bnez s3, .LBB0_54
+; RV32I-NEXT: # %bb.53: # %udiv-end1
+; RV32I-NEXT: srli a2, ra, 1
+; RV32I-NEXT: or a2, ra, a2
+; RV32I-NEXT: srli a3, a2, 2
+; RV32I-NEXT: or a2, a2, a3
+; RV32I-NEXT: srli a3, a2, 4
+; RV32I-NEXT: or a2, a2, a3
+; RV32I-NEXT: srli a3, a2, 8
+; RV32I-NEXT: or a2, a2, a3
+; RV32I-NEXT: srli a3, a2, 16
+; RV32I-NEXT: or a2, a2, a3
+; RV32I-NEXT: not a2, a2
+; RV32I-NEXT: srli a3, a2, 1
+; RV32I-NEXT: and a3, a3, a1
+; RV32I-NEXT: sub a2, a2, a3
+; RV32I-NEXT: and a3, a2, a5
+; RV32I-NEXT: srli a2, a2, 2
+; RV32I-NEXT: and a2, a2, a5
+; RV32I-NEXT: add a2, a3, a2
+; RV32I-NEXT: srli a3, a2, 4
+; RV32I-NEXT: add a2, a2, a3
+; RV32I-NEXT: and a2, a2, a4
+; RV32I-NEXT: slli a3, a2, 8
+; RV32I-NEXT: add a2, a2, a3
+; RV32I-NEXT: slli a3, a2, 16
+; RV32I-NEXT: add a2, a2, a3
+; RV32I-NEXT: srli a2, a2, 24
+; RV32I-NEXT: addi a2, a2, 32
+; RV32I-NEXT: j .LBB0_55
+; RV32I-NEXT: .LBB0_54:
+; RV32I-NEXT: srli a2, s3, 1
+; RV32I-NEXT: or a2, s3, a2
+; RV32I-NEXT: srli a3, a2, 2
+; RV32I-NEXT: or a2, a2, a3
+; RV32I-NEXT: srli a3, a2, 4
+; RV32I-NEXT: or a2, a2, a3
+; RV32I-NEXT: srli a3, a2, 8
+; RV32I-NEXT: or a2, a2, a3
+; RV32I-NEXT: srli a3, a2, 16
+; RV32I-NEXT: or a2, a2, a3
+; RV32I-NEXT: not a2, a2
+; RV32I-NEXT: srli a3, a2, 1
+; RV32I-NEXT: and a3, a3, a1
+; RV32I-NEXT: sub a2, a2, a3
+; RV32I-NEXT: and a3, a2, a5
+; RV32I-NEXT: srli a2, a2, 2
+; RV32I-NEXT: and a2, a2, a5
+; RV32I-NEXT: add a2, a3, a2
+; RV32I-NEXT: srli a3, a2, 4
+; RV32I-NEXT: add a2, a2, a3
+; RV32I-NEXT: and a2, a2, a4
+; RV32I-NEXT: slli a3, a2, 8
+; RV32I-NEXT: add a2, a2, a3
+; RV32I-NEXT: slli a3, a2, 16
+; RV32I-NEXT: add a2, a2, a3
+; RV32I-NEXT: srli a2, a2, 24
+; RV32I-NEXT: .LBB0_55: # %udiv-end1
+; RV32I-NEXT: sw t4, 92(sp) # 4-byte Folded Spill
+; RV32I-NEXT: or t0, t4, s9
+; RV32I-NEXT: sw t3, 96(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw t1, 100(sp) # 4-byte Folded Spill
+; RV32I-NEXT: or t1, t3, t1
+; RV32I-NEXT: or t3, ra, s11
+; RV32I-NEXT: or t4, s3, s4
+; RV32I-NEXT: sltu a7, a0, a7
+; RV32I-NEXT: addi t5, t5, -1
+; RV32I-NEXT: addi a0, a2, 64
+; RV32I-NEXT: or t6, s11, s4
+; RV32I-NEXT: sltu s0, a0, a2
+; RV32I-NEXT: snez s1, t6
+; RV32I-NEXT: addi s1, s1, -1
+; RV32I-NEXT: bnez s4, .LBB0_57
+; RV32I-NEXT: # %bb.56: # %udiv-end1
+; RV32I-NEXT: srli a2, s11, 1
+; RV32I-NEXT: or a2, s11, a2
+; RV32I-NEXT: srli a3, a2, 2
+; RV32I-NEXT: or a2, a2, a3
+; RV32I-NEXT: srli a3, a2, 4
+; RV32I-NEXT: or a2, a2, a3
+; RV32I-NEXT: srli a3, a2, 8
+; RV32I-NEXT: or a2, a2, a3
+; RV32I-NEXT: srli a3, a2, 16
+; RV32I-NEXT: or a2, a2, a3
+; RV32I-NEXT: not a2, a2
+; RV32I-NEXT: srli a3, a2, 1
+; RV32I-NEXT: and a1, a3, a1
+; RV32I-NEXT: sub a2, a2, a1
+; RV32I-NEXT: and a1, a2, a5
+; RV32I-NEXT: srli a2, a2, 2
+; RV32I-NEXT: and a2, a2, a5
+; RV32I-NEXT: add a1, a1, a2
+; RV32I-NEXT: srli a2, a1, 4
+; RV32I-NEXT: add a1, a1, a2
+; RV32I-NEXT: and a1, a1, a4
+; RV32I-NEXT: slli a2, a1, 8
+; RV32I-NEXT: add a1, a1, a2
+; RV32I-NEXT: slli a2, a1, 16
+; RV32I-NEXT: add a1, a1, a2
+; RV32I-NEXT: srli a1, a1, 24
+; RV32I-NEXT: addi a2, a1, 32
+; RV32I-NEXT: j .LBB0_58
+; RV32I-NEXT: .LBB0_57:
+; RV32I-NEXT: srli a2, s4, 1
+; RV32I-NEXT: or a2, s4, a2
+; RV32I-NEXT: srli a3, a2, 2
+; RV32I-NEXT: or a2, a2, a3
+; RV32I-NEXT: srli a3, a2, 4
+; RV32I-NEXT: or a2, a2, a3
+; RV32I-NEXT: srli a3, a2, 8
+; RV32I-NEXT: or a2, a2, a3
+; RV32I-NEXT: srli a3, a2, 16
+; RV32I-NEXT: or a2, a2, a3
+; RV32I-NEXT: not a2, a2
+; RV32I-NEXT: srli a3, a2, 1
+; RV32I-NEXT: and a1, a3, a1
+; RV32I-NEXT: sub a2, a2, a1
+; RV32I-NEXT: and a1, a2, a5
+; RV32I-NEXT: srli a2, a2, 2
+; RV32I-NEXT: and a2, a2, a5
+; RV32I-NEXT: add a1, a1, a2
+; RV32I-NEXT: srli a2, a1, 4
+; RV32I-NEXT: add a1, a1, a2
+; RV32I-NEXT: and a1, a1, a4
+; RV32I-NEXT: slli a2, a1, 8
+; RV32I-NEXT: add a1, a1, a2
+; RV32I-NEXT: slli a2, a1, 16
+; RV32I-NEXT: add a1, a1, a2
+; RV32I-NEXT: srli a2, a1, 24
+; RV32I-NEXT: .LBB0_58: # %udiv-end1
+; RV32I-NEXT: or a3, t0, t1
+; RV32I-NEXT: or a4, t3, t4
+; RV32I-NEXT: and a1, t5, a7
+; RV32I-NEXT: and s0, s1, s0
+; RV32I-NEXT: bnez t6, .LBB0_60
+; RV32I-NEXT: # %bb.59: # %udiv-end1
+; RV32I-NEXT: mv a2, a0
+; RV32I-NEXT: .LBB0_60: # %udiv-end1
+; RV32I-NEXT: seqz a0, a3
+; RV32I-NEXT: seqz a5, a4
+; RV32I-NEXT: sltu a7, a6, a2
+; RV32I-NEXT: sub t1, a1, s0
+; RV32I-NEXT: mv t0, a7
+; RV32I-NEXT: beq a1, s0, .LBB0_62
+; RV32I-NEXT: # %bb.61: # %udiv-end1
+; RV32I-NEXT: sltu t0, a1, s0
+; RV32I-NEXT: .LBB0_62: # %udiv-end1
+; RV32I-NEXT: xor a3, s8, s2
+; RV32I-NEXT: xor a4, s7, s2
+; RV32I-NEXT: sub a1, t1, a7
+; RV32I-NEXT: or a7, a0, a5
+; RV32I-NEXT: neg a0, t0
+; RV32I-NEXT: seqz t0, t0
+; RV32I-NEXT: addi t0, t0, -1
+; RV32I-NEXT: or a5, a0, t0
+; RV32I-NEXT: sub a2, a6, a2
+; RV32I-NEXT: beqz a5, .LBB0_64
+; RV32I-NEXT: # %bb.63: # %udiv-end1
+; RV32I-NEXT: snez a6, a5
+; RV32I-NEXT: j .LBB0_65
+; RV32I-NEXT: .LBB0_64:
+; RV32I-NEXT: snez a5, a1
+; RV32I-NEXT: sltiu a6, a2, 128
+; RV32I-NEXT: xori a6, a6, 1
+; RV32I-NEXT: or a6, a6, a5
+; RV32I-NEXT: .LBB0_65: # %udiv-end1
+; RV32I-NEXT: sub a5, a4, s2
+; RV32I-NEXT: sw a5, 80(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sltu a4, a4, s2
+; RV32I-NEXT: sub a5, a3, s2
+; RV32I-NEXT: or a3, a7, a6
+; RV32I-NEXT: addi a6, a3, -1
+; RV32I-NEXT: sw s4, 76(sp) # 4-byte Folded Spill
+; RV32I-NEXT: and s5, a6, s4
+; RV32I-NEXT: and s7, a6, s11
+; RV32I-NEXT: and s4, a6, s3
+; RV32I-NEXT: and s6, a6, ra
+; RV32I-NEXT: sw ra, 72(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s3, 68(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s11, 64(sp) # 4-byte Folded Spill
+; RV32I-NEXT: bnez a3, .LBB0_77
+; RV32I-NEXT: # %bb.66: # %udiv-end1
+; RV32I-NEXT: xori a3, a2, 127
+; RV32I-NEXT: or a3, a3, a0
+; RV32I-NEXT: or a6, a1, t0
+; RV32I-NEXT: or a3, a3, a6
+; RV32I-NEXT: beqz a3, .LBB0_77
+; RV32I-NEXT: # %bb.67: # %udiv-bb1
+; RV32I-NEXT: sw a5, 32(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw a4, 44(sp) # 4-byte Folded Spill
+; RV32I-NEXT: addi a6, a2, 1
+; RV32I-NEXT: li a3, 127
+; RV32I-NEXT: sub a7, a3, a2
+; RV32I-NEXT: sw zero, 136(sp)
+; RV32I-NEXT: sw zero, 140(sp)
+; RV32I-NEXT: sw zero, 144(sp)
+; RV32I-NEXT: sw zero, 148(sp)
+; RV32I-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sw s0, 152(sp)
+; RV32I-NEXT: lw s1, 68(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sw s1, 156(sp)
+; RV32I-NEXT: lw s2, 64(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sw s2, 160(sp)
+; RV32I-NEXT: lw s3, 76(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sw s3, 164(sp)
+; RV32I-NEXT: addi a2, sp, 152
+; RV32I-NEXT: seqz a3, a6
+; RV32I-NEXT: srli a4, a7, 3
+; RV32I-NEXT: andi a5, a7, 31
+; RV32I-NEXT: add a1, a1, a3
+; RV32I-NEXT: andi a4, a4, 12
+; RV32I-NEXT: xori a3, a5, 31
+; RV32I-NEXT: or a5, a6, a1
+; RV32I-NEXT: sub a2, a2, a4
+; RV32I-NEXT: seqz t3, a5
+; RV32I-NEXT: lw a4, 0(a2)
+; RV32I-NEXT: lw a5, 4(a2)
+; RV32I-NEXT: lw t1, 8(a2)
+; RV32I-NEXT: lw a2, 12(a2)
+; RV32I-NEXT: add t3, a0, t3
+; RV32I-NEXT: sltu a0, t3, a0
+; RV32I-NEXT: or t2, a6, t3
+; RV32I-NEXT: add t0, t0, a0
+; RV32I-NEXT: or a0, a1, t0
+; RV32I-NEXT: srli t4, t1, 1
+; RV32I-NEXT: srli t5, a5, 1
+; RV32I-NEXT: or a0, t2, a0
+; RV32I-NEXT: srli t2, a4, 1
+; RV32I-NEXT: srl t4, t4, a3
+; RV32I-NEXT: srl t5, t5, a3
+; RV32I-NEXT: srl a3, t2, a3
+; RV32I-NEXT: sll a2, a2, a7
+; RV32I-NEXT: or t6, a2, t4
+; RV32I-NEXT: sll a2, t1, a7
+; RV32I-NEXT: sll a5, a5, a7
+; RV32I-NEXT: or t4, a2, t5
+; RV32I-NEXT: or t5, a5, a3
+; RV32I-NEXT: sll a7, a4, a7
+; RV32I-NEXT: beqz a0, .LBB0_75
+; RV32I-NEXT: # %bb.68: # %udiv-preheader
+; RV32I-NEXT: li t1, 0
+; RV32I-NEXT: li s4, 0
+; RV32I-NEXT: li s5, 0
+; RV32I-NEXT: li s6, 0
+; RV32I-NEXT: sw zero, 120(sp)
+; RV32I-NEXT: sw zero, 124(sp)
+; RV32I-NEXT: sw zero, 128(sp)
+; RV32I-NEXT: sw zero, 132(sp)
+; RV32I-NEXT: sw s0, 104(sp)
+; RV32I-NEXT: sw s1, 108(sp)
+; RV32I-NEXT: sw s2, 112(sp)
+; RV32I-NEXT: sw s3, 116(sp)
+; RV32I-NEXT: srli a0, a6, 3
+; RV32I-NEXT: addi a2, sp, 104
+; RV32I-NEXT: andi a0, a0, 12
+; RV32I-NEXT: add a0, a2, a0
+; RV32I-NEXT: lw a2, 4(a0)
+; RV32I-NEXT: lw a3, 8(a0)
+; RV32I-NEXT: lw a4, 12(a0)
+; RV32I-NEXT: lw a5, 0(a0)
+; RV32I-NEXT: andi a0, a6, 31
+; RV32I-NEXT: xori a0, a0, 31
+; RV32I-NEXT: slli t2, a4, 1
+; RV32I-NEXT: slli s0, a3, 1
+; RV32I-NEXT: slli s1, a2, 1
+; RV32I-NEXT: sll t2, t2, a0
+; RV32I-NEXT: sll s0, s0, a0
+; RV32I-NEXT: sll s2, s1, a0
+; RV32I-NEXT: lw s3, 92(sp) # 4-byte Folded Reload
+; RV32I-NEXT: seqz a0, s3
+; RV32I-NEXT: srl a3, a3, a6
+; RV32I-NEXT: or s11, a3, t2
+; RV32I-NEXT: lw t2, 96(sp) # 4-byte Folded Reload
+; RV32I-NEXT: or a3, s3, t2
+; RV32I-NEXT: sub s7, t2, a0
+; RV32I-NEXT: seqz a3, a3
+; RV32I-NEXT: srl a0, a2, a6
+; RV32I-NEXT: or a0, a0, s0
+; RV32I-NEXT: sub s8, s9, a3
+; RV32I-NEXT: mv ra, s9
+; RV32I-NEXT: sltu a2, s9, a3
+; RV32I-NEXT: lw a3, 100(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sub s9, a3, a2
+; RV32I-NEXT: srl a2, a5, a6
+; RV32I-NEXT: srl s1, a4, a6
+; RV32I-NEXT: or s0, a2, s2
+; RV32I-NEXT: addi s3, s3, -1
+; RV32I-NEXT: sw s3, 88(sp) # 4-byte Folded Spill
+; RV32I-NEXT: j .LBB0_70
+; RV32I-NEXT: .LBB0_69: # %udiv-do-while
+; RV32I-NEXT: # in Loop: Header=BB0_70 Depth=1
+; RV32I-NEXT: srli t2, t4, 31
+; RV32I-NEXT: slli t6, t6, 1
+; RV32I-NEXT: sub s3, s3, s11
+; RV32I-NEXT: srli s11, t5, 31
+; RV32I-NEXT: slli t4, t4, 1
+; RV32I-NEXT: or t2, t6, t2
+; RV32I-NEXT: srli t6, a7, 31
+; RV32I-NEXT: slli t5, t5, 1
+; RV32I-NEXT: slli a7, a7, 1
+; RV32I-NEXT: or t4, t4, s11
+; RV32I-NEXT: lw a4, 100(sp) # 4-byte Folded Reload
+; RV32I-NEXT: and s11, s10, a4
+; RV32I-NEXT: or t5, t5, t6
+; RV32I-NEXT: and t6, s10, ra
+; RV32I-NEXT: or a7, t1, a7
+; RV32I-NEXT: sub a4, a3, t6
+; RV32I-NEXT: sltu a3, a3, t6
+; RV32I-NEXT: or t6, a6, a1
+; RV32I-NEXT: sub a5, a0, s11
+; RV32I-NEXT: seqz s11, a6
+; RV32I-NEXT: addi a6, a6, -1
+; RV32I-NEXT: andi t1, s10, 1
+; RV32I-NEXT: sub a0, s3, a2
+; RV32I-NEXT: seqz a2, t6
+; RV32I-NEXT: sub a1, a1, s11
+; RV32I-NEXT: or t5, s4, t5
+; RV32I-NEXT: or t4, s5, t4
+; RV32I-NEXT: or t6, s6, t2
+; RV32I-NEXT: sub s11, a4, s1
+; RV32I-NEXT: sltu a4, a4, s1
+; RV32I-NEXT: sub a5, a5, a3
+; RV32I-NEXT: sltu a3, t3, a2
+; RV32I-NEXT: sub t3, t3, a2
+; RV32I-NEXT: sub s1, a5, a4
+; RV32I-NEXT: sub t0, t0, a3
+; RV32I-NEXT: or a2, a1, t0
+; RV32I-NEXT: or a3, a6, t3
+; RV32I-NEXT: or a2, a3, a2
+; RV32I-NEXT: sub s0, s0, s2
+; RV32I-NEXT: li s4, 0
+; RV32I-NEXT: li s5, 0
+; RV32I-NEXT: li s6, 0
+; RV32I-NEXT: beqz a2, .LBB0_76
+; RV32I-NEXT: .LBB0_70: # %udiv-do-while
+; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT: srli a2, s0, 31
+; RV32I-NEXT: slli a3, a0, 1
+; RV32I-NEXT: slli s0, s0, 1
+; RV32I-NEXT: or s3, a3, a2
+; RV32I-NEXT: srli a2, t6, 31
+; RV32I-NEXT: or s0, s0, a2
+; RV32I-NEXT: beq s7, s3, .LBB0_72
+; RV32I-NEXT: # %bb.71: # %udiv-do-while
+; RV32I-NEXT: # in Loop: Header=BB0_70 Depth=1
+; RV32I-NEXT: sltu a2, s7, s3
+; RV32I-NEXT: j .LBB0_73
+; RV32I-NEXT: .LBB0_72: # in Loop: Header=BB0_70 Depth=1
+; RV32I-NEXT: lw a2, 88(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sltu a2, a2, s0
+; RV32I-NEXT: .LBB0_73: # %udiv-do-while
+; RV32I-NEXT: # in Loop: Header=BB0_70 Depth=1
+; RV32I-NEXT: srli a3, s11, 31
+; RV32I-NEXT: slli s1, s1, 1
+; RV32I-NEXT: srli s2, a0, 31
+; RV32I-NEXT: slli s11, s11, 1
+; RV32I-NEXT: or a0, s1, a3
+; RV32I-NEXT: or a3, s11, s2
+; RV32I-NEXT: sub s1, s8, a3
+; RV32I-NEXT: sltu s2, s8, a3
+; RV32I-NEXT: sub s10, s9, a0
+; RV32I-NEXT: sltu a2, s1, a2
+; RV32I-NEXT: sub s1, s10, s2
+; RV32I-NEXT: sub s1, s1, a2
+; RV32I-NEXT: srai s10, s1, 31
+; RV32I-NEXT: lw a2, 92(sp) # 4-byte Folded Reload
+; RV32I-NEXT: and s2, s10, a2
+; RV32I-NEXT: lw a2, 96(sp) # 4-byte Folded Reload
+; RV32I-NEXT: and s11, s10, a2
+; RV32I-NEXT: sltu a2, s0, s2
+; RV32I-NEXT: mv s1, a2
+; RV32I-NEXT: beq s3, s11, .LBB0_69
+; RV32I-NEXT: # %bb.74: # %udiv-do-while
+; RV32I-NEXT: # in Loop: Header=BB0_70 Depth=1
+; RV32I-NEXT: sltu s1, s3, s11
+; RV32I-NEXT: j .LBB0_69
+; RV32I-NEXT: .LBB0_75:
+; RV32I-NEXT: mv ra, s9
+; RV32I-NEXT: li t1, 0
+; RV32I-NEXT: .LBB0_76: # %udiv-loop-exit
+; RV32I-NEXT: srli a0, a7, 31
+; RV32I-NEXT: slli a1, t5, 1
+; RV32I-NEXT: srli a2, t5, 31
+; RV32I-NEXT: or s4, a1, a0
+; RV32I-NEXT: slli a0, t4, 1
+; RV32I-NEXT: srli a1, t4, 31
+; RV32I-NEXT: slli t6, t6, 1
+; RV32I-NEXT: slli a7, a7, 1
+; RV32I-NEXT: or s7, a0, a2
+; RV32I-NEXT: or s5, t6, a1
+; RV32I-NEXT: or s6, t1, a7
+; RV32I-NEXT: lw t2, 56(sp) # 4-byte Folded Reload
+; RV32I-NEXT: mv s9, ra
+; RV32I-NEXT: lw a4, 44(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw a5, 32(sp) # 4-byte Folded Reload
+; RV32I-NEXT: .LBB0_77: # %udiv-end
+; RV32I-NEXT: lw a0, 84(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw a1, 80(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sltu a0, a1, a0
+; RV32I-NEXT: sw a0, 88(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sub a5, a5, a4
+; RV32I-NEXT: sw a5, 44(sp) # 4-byte Folded Spill
+; RV32I-NEXT: lw a0, 60(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw a1, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sub s10, a1, a0
+; RV32I-NEXT: srai s11, t2, 31
+; RV32I-NEXT: mv a0, s6
+; RV32I-NEXT: mv a1, s4
+; RV32I-NEXT: mv a2, s9
+; RV32I-NEXT: lw a3, 100(sp) # 4-byte Folded Reload
+; RV32I-NEXT: call __muldi3
+; RV32I-NEXT: mv s8, a0
+; RV32I-NEXT: mv s9, a1
+; RV32I-NEXT: mv a0, s7
+; RV32I-NEXT: mv a1, s5
+; RV32I-NEXT: lw s1, 92(sp) # 4-byte Folded Reload
+; RV32I-NEXT: mv a2, s1
+; RV32I-NEXT: lw s0, 96(sp) # 4-byte Folded Reload
+; RV32I-NEXT: mv a3, s0
+; RV32I-NEXT: call __muldi3
+; RV32I-NEXT: add a1, a1, s9
+; RV32I-NEXT: add s8, a0, s8
+; RV32I-NEXT: sltu s2, s8, a0
+; RV32I-NEXT: add s2, a1, s2
+; RV32I-NEXT: mv a0, s1
+; RV32I-NEXT: li a1, 0
+; RV32I-NEXT: mv a2, s6
+; RV32I-NEXT: li a3, 0
+; RV32I-NEXT: call __muldi3
+; RV32I-NEXT: mv s5, a0
+; RV32I-NEXT: mv s7, a1
+; RV32I-NEXT: mv a0, s0
+; RV32I-NEXT: li a1, 0
+; RV32I-NEXT: mv a2, s6
+; RV32I-NEXT: li a3, 0
+; RV32I-NEXT: call __muldi3
+; RV32I-NEXT: add s3, a0, s7
+; RV32I-NEXT: sltu a0, s3, a0
+; RV32I-NEXT: add s6, a1, a0
+; RV32I-NEXT: mv a0, s1
+; RV32I-NEXT: li a1, 0
+; RV32I-NEXT: mv a2, s4
+; RV32I-NEXT: li a3, 0
+; RV32I-NEXT: call __muldi3
+; RV32I-NEXT: add s3, a0, s3
+; RV32I-NEXT: sltu a0, s3, a0
+; RV32I-NEXT: add a0, a1, a0
+; RV32I-NEXT: add s7, s6, a0
+; RV32I-NEXT: mv a0, s0
+; RV32I-NEXT: li a1, 0
+; RV32I-NEXT: mv a2, s4
+; RV32I-NEXT: li a3, 0
+; RV32I-NEXT: call __muldi3
+; RV32I-NEXT: add a2, a0, s7
+; RV32I-NEXT: sltu a4, s7, s6
+; RV32I-NEXT: lw t0, 72(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sltu a3, t0, s5
+; RV32I-NEXT: sltu a0, a2, a0
+; RV32I-NEXT: add a1, a1, a4
+; RV32I-NEXT: add s8, a2, s8
+; RV32I-NEXT: add a0, a1, a0
+; RV32I-NEXT: sltu a1, s8, a2
+; RV32I-NEXT: lw a5, 64(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sltu a2, a5, s8
+; RV32I-NEXT: add a0, a0, s2
+; RV32I-NEXT: lw a4, 76(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sub a1, a4, a1
+; RV32I-NEXT: sub a1, a1, a0
+; RV32I-NEXT: sub a4, a1, a2
+; RV32I-NEXT: sub a5, a5, s8
+; RV32I-NEXT: mv a6, a3
+; RV32I-NEXT: lw t1, 68(sp) # 4-byte Folded Reload
+; RV32I-NEXT: beq t1, s3, .LBB0_79
+; RV32I-NEXT: # %bb.78: # %udiv-end
+; RV32I-NEXT: sltu a6, t1, s3
+; RV32I-NEXT: .LBB0_79: # %udiv-end
+; RV32I-NEXT: lw a0, 88(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s1, 44(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sub s1, s1, a0
+; RV32I-NEXT: lw a0, 36(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sub a0, s10, a0
+; RV32I-NEXT: lw a1, 84(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw a2, 80(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sub a1, a2, a1
+; RV32I-NEXT: lw a2, 60(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw a7, 40(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sub a2, a7, a2
+; RV32I-NEXT: sltu a7, a5, a6
+; RV32I-NEXT: sub a5, a5, a6
+; RV32I-NEXT: sub a6, t1, s3
+; RV32I-NEXT: sub t0, t0, s5
+; RV32I-NEXT: sub a4, a4, a7
+; RV32I-NEXT: sub t1, a6, a3
+; RV32I-NEXT: xor a7, a5, s11
+; RV32I-NEXT: xor a3, t0, s11
+; RV32I-NEXT: xor a5, a4, s11
+; RV32I-NEXT: xor a6, t1, s11
+; RV32I-NEXT: sltu t0, a7, s11
+; RV32I-NEXT: sltu a4, a3, s11
+; RV32I-NEXT: add t0, s11, t0
+; RV32I-NEXT: sub a5, a5, t0
+; RV32I-NEXT: sub a7, a7, s11
+; RV32I-NEXT: mv t0, a4
+; RV32I-NEXT: beqz t1, .LBB0_81
+; RV32I-NEXT: # %bb.80: # %udiv-end
+; RV32I-NEXT: sltu t0, a6, s11
+; RV32I-NEXT: .LBB0_81: # %udiv-end
+; RV32I-NEXT: sltu t1, a7, t0
+; RV32I-NEXT: sub a7, a7, t0
+; RV32I-NEXT: sub a6, a6, s11
+; RV32I-NEXT: sub a3, a3, s11
+; RV32I-NEXT: lw t0, 48(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sw a2, 0(t0)
+; RV32I-NEXT: sw a0, 4(t0)
+; RV32I-NEXT: sw a1, 8(t0)
+; RV32I-NEXT: sw s1, 12(t0)
+; RV32I-NEXT: sub a0, a5, t1
+; RV32I-NEXT: sub a1, a6, a4
+; RV32I-NEXT: lw a2, 52(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sw a3, 0(a2)
+; RV32I-NEXT: sw a1, 4(a2)
+; RV32I-NEXT: sw a7, 8(a2)
+; RV32I-NEXT: sw a0, 12(a2)
+; RV32I-NEXT: lw ra, 284(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s0, 280(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s1, 276(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s2, 272(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s3, 268(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s4, 264(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s5, 260(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s6, 256(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s7, 252(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s8, 248(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s9, 244(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s10, 240(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s11, 236(sp) # 4-byte Folded Reload
+; RV32I-NEXT: .cfi_restore ra
+; RV32I-NEXT: .cfi_restore s0
+; RV32I-NEXT: .cfi_restore s1
+; RV32I-NEXT: .cfi_restore s2
+; RV32I-NEXT: .cfi_restore s3
+; RV32I-NEXT: .cfi_restore s4
+; RV32I-NEXT: .cfi_restore s5
+; RV32I-NEXT: .cfi_restore s6
+; RV32I-NEXT: .cfi_restore s7
+; RV32I-NEXT: .cfi_restore s8
+; RV32I-NEXT: .cfi_restore s9
+; RV32I-NEXT: .cfi_restore s10
+; RV32I-NEXT: .cfi_restore s11
+; RV32I-NEXT: addi sp, sp, 288
+; RV32I-NEXT: .cfi_def_cfa_offset 0
+; RV32I-NEXT: ret
+;
+; RV32M-LABEL: sdivrem_i128:
+; RV32M: # %bb.0: # %_udiv-special-cases_udiv-special-cases
+; RV32M-NEXT: addi sp, sp, -272
+; RV32M-NEXT: .cfi_def_cfa_offset 272
+; RV32M-NEXT: sw ra, 268(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw s0, 264(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw s1, 260(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw s2, 256(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw s3, 252(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw s4, 248(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw s5, 244(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw s6, 240(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw s7, 236(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw s8, 232(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw s9, 228(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw s10, 224(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw s11, 220(sp) # 4-byte Folded Spill
+; RV32M-NEXT: .cfi_offset ra, -4
+; RV32M-NEXT: .cfi_offset s0, -8
+; RV32M-NEXT: .cfi_offset s1, -12
+; RV32M-NEXT: .cfi_offset s2, -16
+; RV32M-NEXT: .cfi_offset s3, -20
+; RV32M-NEXT: .cfi_offset s4, -24
+; RV32M-NEXT: .cfi_offset s5, -28
+; RV32M-NEXT: .cfi_offset s6, -32
+; RV32M-NEXT: .cfi_offset s7, -36
+; RV32M-NEXT: .cfi_offset s8, -40
+; RV32M-NEXT: .cfi_offset s9, -44
+; RV32M-NEXT: .cfi_offset s10, -48
+; RV32M-NEXT: .cfi_offset s11, -52
+; RV32M-NEXT: sw a1, 32(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
+; RV32M-NEXT: lw a4, 0(a2)
+; RV32M-NEXT: lw s9, 4(a2)
+; RV32M-NEXT: lw s7, 8(a2)
+; RV32M-NEXT: lw s6, 0(a3)
+; RV32M-NEXT: lw a6, 4(a3)
+; RV32M-NEXT: lw a5, 8(a3)
+; RV32M-NEXT: lw t3, 12(a3)
+; RV32M-NEXT: lw t4, 12(a2)
+; RV32M-NEXT: or a0, a4, s9
+; RV32M-NEXT: snez a1, s7
+; RV32M-NEXT: snez a7, a0
+; RV32M-NEXT: add a1, t4, a1
+; RV32M-NEXT: snez t0, a4
+; RV32M-NEXT: sw s9, 56(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw s7, 52(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw a1, 12(sp) # 4-byte Folded Spill
+; RV32M-NEXT: bltz t4, .LBB0_2
+; RV32M-NEXT: # %bb.1: # %_udiv-special-cases_udiv-special-cases
+; RV32M-NEXT: mv s8, t4
+; RV32M-NEXT: mv s10, a4
+; RV32M-NEXT: j .LBB0_3
+; RV32M-NEXT: .LBB0_2:
+; RV32M-NEXT: neg a0, s7
+; RV32M-NEXT: neg a1, a1
+; RV32M-NEXT: neg a2, s9
+; RV32M-NEXT: sltu a3, a0, a7
+; RV32M-NEXT: sub s7, a0, a7
+; RV32M-NEXT: sub s9, a2, t0
+; RV32M-NEXT: sub s8, a1, a3
+; RV32M-NEXT: neg s10, a4
+; RV32M-NEXT: .LBB0_3: # %_udiv-special-cases_udiv-special-cases
+; RV32M-NEXT: sw a4, 60(sp) # 4-byte Folded Spill
+; RV32M-NEXT: or a0, s6, a6
+; RV32M-NEXT: snez a1, a5
+; RV32M-NEXT: snez a4, a0
+; RV32M-NEXT: add a1, t3, a1
+; RV32M-NEXT: snez t1, s6
+; RV32M-NEXT: sw s6, 40(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw a7, 24(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw t0, 16(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw a4, 8(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw t1, 4(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw a1, 0(sp) # 4-byte Folded Spill
+; RV32M-NEXT: bltz t3, .LBB0_5
+; RV32M-NEXT: # %bb.4: # %_udiv-special-cases_udiv-special-cases
+; RV32M-NEXT: mv s5, t3
+; RV32M-NEXT: mv t5, a5
+; RV32M-NEXT: mv a7, a6
+; RV32M-NEXT: sw s6, 76(sp) # 4-byte Folded Spill
+; RV32M-NEXT: j .LBB0_6
+; RV32M-NEXT: .LBB0_5:
+; RV32M-NEXT: neg a0, a5
+; RV32M-NEXT: neg a1, a1
+; RV32M-NEXT: neg a2, a6
+; RV32M-NEXT: sltu a3, a0, a4
+; RV32M-NEXT: sub t5, a0, a4
+; RV32M-NEXT: sub a7, a2, t1
+; RV32M-NEXT: sub s5, a1, a3
+; RV32M-NEXT: neg a0, s6
+; RV32M-NEXT: sw a0, 76(sp) # 4-byte Folded Spill
+; RV32M-NEXT: .LBB0_6: # %_udiv-special-cases_udiv-special-cases
+; RV32M-NEXT: lui a0, 349525
+; RV32M-NEXT: lui a1, 209715
+; RV32M-NEXT: lui a2, 61681
+; RV32M-NEXT: lui a3, 4112
+; RV32M-NEXT: addi s1, a0, 1365
+; RV32M-NEXT: addi s0, a1, 819
+; RV32M-NEXT: addi t6, a2, -241
+; RV32M-NEXT: addi t2, a3, 257
+; RV32M-NEXT: bnez a7, .LBB0_9
+; RV32M-NEXT: # %bb.7: # %_udiv-special-cases_udiv-special-cases
+; RV32M-NEXT: lw a1, 76(sp) # 4-byte Folded Reload
+; RV32M-NEXT: srli a0, a1, 1
+; RV32M-NEXT: or a0, a1, a0
+; RV32M-NEXT: srli a1, a0, 2
+; RV32M-NEXT: or a0, a0, a1
+; RV32M-NEXT: srli a1, a0, 4
+; RV32M-NEXT: or a0, a0, a1
+; RV32M-NEXT: srli a1, a0, 8
+; RV32M-NEXT: or a0, a0, a1
+; RV32M-NEXT: srli a1, a0, 16
+; RV32M-NEXT: or a0, a0, a1
+; RV32M-NEXT: not a0, a0
+; RV32M-NEXT: srli a1, a0, 1
+; RV32M-NEXT: and a1, a1, s1
+; RV32M-NEXT: sub a0, a0, a1
+; RV32M-NEXT: and a1, a0, s0
+; RV32M-NEXT: srli a0, a0, 2
+; RV32M-NEXT: and a0, a0, s0
+; RV32M-NEXT: add a0, a1, a0
+; RV32M-NEXT: srli a1, a0, 4
+; RV32M-NEXT: add a0, a0, a1
+; RV32M-NEXT: and a0, a0, t6
+; RV32M-NEXT: mul a0, a0, t2
+; RV32M-NEXT: srli a0, a0, 24
+; RV32M-NEXT: addi a1, a0, 32
+; RV32M-NEXT: or a2, t5, s5
+; RV32M-NEXT: beqz s5, .LBB0_10
+; RV32M-NEXT: .LBB0_8:
+; RV32M-NEXT: srli a0, s5, 1
+; RV32M-NEXT: or a0, s5, a0
+; RV32M-NEXT: srli a3, a0, 2
+; RV32M-NEXT: or a0, a0, a3
+; RV32M-NEXT: srli a3, a0, 4
+; RV32M-NEXT: or a0, a0, a3
+; RV32M-NEXT: srli a3, a0, 8
+; RV32M-NEXT: or a0, a0, a3
+; RV32M-NEXT: srli a3, a0, 16
+; RV32M-NEXT: or a0, a0, a3
+; RV32M-NEXT: not a0, a0
+; RV32M-NEXT: srli a3, a0, 1
+; RV32M-NEXT: and a3, a3, s1
+; RV32M-NEXT: sub a0, a0, a3
+; RV32M-NEXT: and a3, a0, s0
+; RV32M-NEXT: srli a0, a0, 2
+; RV32M-NEXT: and a0, a0, s0
+; RV32M-NEXT: add a0, a3, a0
+; RV32M-NEXT: srli a3, a0, 4
+; RV32M-NEXT: add a0, a0, a3
+; RV32M-NEXT: and a0, a0, t6
+; RV32M-NEXT: mul a0, a0, t2
+; RV32M-NEXT: srli a0, a0, 24
+; RV32M-NEXT: addi a4, a1, 64
+; RV32M-NEXT: beqz a2, .LBB0_11
+; RV32M-NEXT: j .LBB0_12
+; RV32M-NEXT: .LBB0_9:
+; RV32M-NEXT: srli a0, a7, 1
+; RV32M-NEXT: or a0, a7, a0
+; RV32M-NEXT: srli a1, a0, 2
+; RV32M-NEXT: or a0, a0, a1
+; RV32M-NEXT: srli a1, a0, 4
+; RV32M-NEXT: or a0, a0, a1
+; RV32M-NEXT: srli a1, a0, 8
+; RV32M-NEXT: or a0, a0, a1
+; RV32M-NEXT: srli a1, a0, 16
+; RV32M-NEXT: or a0, a0, a1
+; RV32M-NEXT: not a0, a0
+; RV32M-NEXT: srli a1, a0, 1
+; RV32M-NEXT: and a1, a1, s1
+; RV32M-NEXT: sub a0, a0, a1
+; RV32M-NEXT: and a1, a0, s0
+; RV32M-NEXT: srli a0, a0, 2
+; RV32M-NEXT: and a0, a0, s0
+; RV32M-NEXT: add a0, a1, a0
+; RV32M-NEXT: srli a1, a0, 4
+; RV32M-NEXT: add a0, a0, a1
+; RV32M-NEXT: and a0, a0, t6
+; RV32M-NEXT: mul a1, a0, t2
+; RV32M-NEXT: srli a1, a1, 24
+; RV32M-NEXT: or a2, t5, s5
+; RV32M-NEXT: bnez s5, .LBB0_8
+; RV32M-NEXT: .LBB0_10: # %_udiv-special-cases_udiv-special-cases
+; RV32M-NEXT: srli a0, t5, 1
+; RV32M-NEXT: or a0, t5, a0
+; RV32M-NEXT: srli a3, a0, 2
+; RV32M-NEXT: or a0, a0, a3
+; RV32M-NEXT: srli a3, a0, 4
+; RV32M-NEXT: or a0, a0, a3
+; RV32M-NEXT: srli a3, a0, 8
+; RV32M-NEXT: or a0, a0, a3
+; RV32M-NEXT: srli a3, a0, 16
+; RV32M-NEXT: or a0, a0, a3
+; RV32M-NEXT: not a0, a0
+; RV32M-NEXT: srli a3, a0, 1
+; RV32M-NEXT: and a3, a3, s1
+; RV32M-NEXT: sub a0, a0, a3
+; RV32M-NEXT: and a3, a0, s0
+; RV32M-NEXT: srli a0, a0, 2
+; RV32M-NEXT: and a0, a0, s0
+; RV32M-NEXT: add a0, a3, a0
+; RV32M-NEXT: srli a3, a0, 4
+; RV32M-NEXT: add a0, a0, a3
+; RV32M-NEXT: and a0, a0, t6
+; RV32M-NEXT: mul a0, a0, t2
+; RV32M-NEXT: srli a0, a0, 24
+; RV32M-NEXT: addi a0, a0, 32
+; RV32M-NEXT: addi a4, a1, 64
+; RV32M-NEXT: bnez a2, .LBB0_12
+; RV32M-NEXT: .LBB0_11: # %_udiv-special-cases_udiv-special-cases
+; RV32M-NEXT: mv a0, a4
+; RV32M-NEXT: .LBB0_12: # %_udiv-special-cases_udiv-special-cases
+; RV32M-NEXT: snez s2, a2
+; RV32M-NEXT: sw a6, 36(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw a5, 84(sp) # 4-byte Folded Spill
+; RV32M-NEXT: bnez s9, .LBB0_14
+; RV32M-NEXT: # %bb.13: # %_udiv-special-cases_udiv-special-cases
+; RV32M-NEXT: srli a2, s10, 1
+; RV32M-NEXT: or a2, s10, a2
+; RV32M-NEXT: srli a3, a2, 2
+; RV32M-NEXT: or a2, a2, a3
+; RV32M-NEXT: srli a3, a2, 4
+; RV32M-NEXT: or a2, a2, a3
+; RV32M-NEXT: srli a3, a2, 8
+; RV32M-NEXT: or a2, a2, a3
+; RV32M-NEXT: srli a3, a2, 16
+; RV32M-NEXT: or a2, a2, a3
+; RV32M-NEXT: not a2, a2
+; RV32M-NEXT: srli a3, a2, 1
+; RV32M-NEXT: and a3, a3, s1
+; RV32M-NEXT: sub a2, a2, a3
+; RV32M-NEXT: and a3, a2, s0
+; RV32M-NEXT: srli a2, a2, 2
+; RV32M-NEXT: and a2, a2, s0
+; RV32M-NEXT: add a2, a3, a2
+; RV32M-NEXT: srli a3, a2, 4
+; RV32M-NEXT: add a2, a2, a3
+; RV32M-NEXT: and a2, a2, t6
+; RV32M-NEXT: mul a2, a2, t2
+; RV32M-NEXT: srli a2, a2, 24
+; RV32M-NEXT: addi a3, a2, 32
+; RV32M-NEXT: j .LBB0_15
+; RV32M-NEXT: .LBB0_14:
+; RV32M-NEXT: srli a2, s9, 1
+; RV32M-NEXT: or a2, s9, a2
+; RV32M-NEXT: srli a3, a2, 2
+; RV32M-NEXT: or a2, a2, a3
+; RV32M-NEXT: srli a3, a2, 4
+; RV32M-NEXT: or a2, a2, a3
+; RV32M-NEXT: srli a3, a2, 8
+; RV32M-NEXT: or a2, a2, a3
+; RV32M-NEXT: srli a3, a2, 16
+; RV32M-NEXT: or a2, a2, a3
+; RV32M-NEXT: not a2, a2
+; RV32M-NEXT: srli a3, a2, 1
+; RV32M-NEXT: and a3, a3, s1
+; RV32M-NEXT: sub a2, a2, a3
+; RV32M-NEXT: and a3, a2, s0
+; RV32M-NEXT: srli a2, a2, 2
+; RV32M-NEXT: and a2, a2, s0
+; RV32M-NEXT: add a2, a3, a2
+; RV32M-NEXT: srli a3, a2, 4
+; RV32M-NEXT: add a2, a2, a3
+; RV32M-NEXT: and a2, a2, t6
+; RV32M-NEXT: mul a3, a2, t2
+; RV32M-NEXT: srli a3, a3, 24
+; RV32M-NEXT: .LBB0_15: # %_udiv-special-cases_udiv-special-cases
+; RV32M-NEXT: lw a2, 76(sp) # 4-byte Folded Reload
+; RV32M-NEXT: or a2, a2, t5
+; RV32M-NEXT: sw a7, 68(sp) # 4-byte Folded Spill
+; RV32M-NEXT: or a5, a7, s5
+; RV32M-NEXT: or a6, s10, s7
+; RV32M-NEXT: or a7, s9, s8
+; RV32M-NEXT: sltu a4, a4, a1
+; RV32M-NEXT: addi s2, s2, -1
+; RV32M-NEXT: addi a1, a3, 64
+; RV32M-NEXT: or s3, s7, s8
+; RV32M-NEXT: sltu t0, a1, a3
+; RV32M-NEXT: snez a3, s3
+; RV32M-NEXT: addi a3, a3, -1
+; RV32M-NEXT: bnez s8, .LBB0_17
+; RV32M-NEXT: # %bb.16: # %_udiv-special-cases_udiv-special-cases
+; RV32M-NEXT: srli t1, s7, 1
+; RV32M-NEXT: or t1, s7, t1
+; RV32M-NEXT: srli s4, t1, 2
+; RV32M-NEXT: or t1, t1, s4
+; RV32M-NEXT: srli s4, t1, 4
+; RV32M-NEXT: or t1, t1, s4
+; RV32M-NEXT: srli s4, t1, 8
+; RV32M-NEXT: or t1, t1, s4
+; RV32M-NEXT: srli s4, t1, 16
+; RV32M-NEXT: or t1, t1, s4
+; RV32M-NEXT: not t1, t1
+; RV32M-NEXT: srli s4, t1, 1
+; RV32M-NEXT: and s1, s4, s1
+; RV32M-NEXT: sub t1, t1, s1
+; RV32M-NEXT: and s1, t1, s0
+; RV32M-NEXT: srli t1, t1, 2
+; RV32M-NEXT: and t1, t1, s0
+; RV32M-NEXT: add t1, s1, t1
+; RV32M-NEXT: srli s0, t1, 4
+; RV32M-NEXT: add t1, t1, s0
+; RV32M-NEXT: and t1, t1, t6
+; RV32M-NEXT: mul t1, t1, t2
+; RV32M-NEXT: srli t1, t1, 24
+; RV32M-NEXT: addi t1, t1, 32
+; RV32M-NEXT: j .LBB0_18
+; RV32M-NEXT: .LBB0_17:
+; RV32M-NEXT: srli t1, s8, 1
+; RV32M-NEXT: or t1, s8, t1
+; RV32M-NEXT: srli s4, t1, 2
+; RV32M-NEXT: or t1, t1, s4
+; RV32M-NEXT: srli s4, t1, 4
+; RV32M-NEXT: or t1, t1, s4
+; RV32M-NEXT: srli s4, t1, 8
+; RV32M-NEXT: or t1, t1, s4
+; RV32M-NEXT: srli s4, t1, 16
+; RV32M-NEXT: or t1, t1, s4
+; RV32M-NEXT: not t1, t1
+; RV32M-NEXT: srli s4, t1, 1
+; RV32M-NEXT: and s1, s4, s1
+; RV32M-NEXT: sub t1, t1, s1
+; RV32M-NEXT: and s1, t1, s0
+; RV32M-NEXT: srli t1, t1, 2
+; RV32M-NEXT: and t1, t1, s0
+; RV32M-NEXT: add t1, s1, t1
+; RV32M-NEXT: srli s0, t1, 4
+; RV32M-NEXT: add t1, t1, s0
+; RV32M-NEXT: and t1, t1, t6
+; RV32M-NEXT: mul t1, t1, t2
+; RV32M-NEXT: srli t1, t1, 24
+; RV32M-NEXT: .LBB0_18: # %_udiv-special-cases_udiv-special-cases
+; RV32M-NEXT: or a5, a2, a5
+; RV32M-NEXT: or a6, a6, a7
+; RV32M-NEXT: and a2, s2, a4
+; RV32M-NEXT: and a3, a3, t0
+; RV32M-NEXT: lw t6, 40(sp) # 4-byte Folded Reload
+; RV32M-NEXT: bnez s3, .LBB0_20
+; RV32M-NEXT: # %bb.19: # %_udiv-special-cases_udiv-special-cases
+; RV32M-NEXT: mv t1, a1
+; RV32M-NEXT: .LBB0_20: # %_udiv-special-cases_udiv-special-cases
+; RV32M-NEXT: seqz a1, a5
+; RV32M-NEXT: seqz a4, a6
+; RV32M-NEXT: sltu a6, a0, t1
+; RV32M-NEXT: sub a7, a2, a3
+; RV32M-NEXT: mv a5, a6
+; RV32M-NEXT: beq a2, a3, .LBB0_22
+; RV32M-NEXT: # %bb.21: # %_udiv-special-cases_udiv-special-cases
+; RV32M-NEXT: sltu a5, a2, a3
+; RV32M-NEXT: .LBB0_22: # %_udiv-special-cases_udiv-special-cases
+; RV32M-NEXT: sub a2, a7, a6
+; RV32M-NEXT: or a3, a1, a4
+; RV32M-NEXT: neg a1, a5
+; RV32M-NEXT: seqz s2, a5
+; RV32M-NEXT: addi s2, s2, -1
+; RV32M-NEXT: sub a0, a0, t1
+; RV32M-NEXT: or a5, a1, s2
+; RV32M-NEXT: xor a4, t3, t4
+; RV32M-NEXT: lw s3, 36(sp) # 4-byte Folded Reload
+; RV32M-NEXT: beqz a5, .LBB0_24
+; RV32M-NEXT: # %bb.23: # %_udiv-special-cases_udiv-special-cases
+; RV32M-NEXT: snez a5, a5
+; RV32M-NEXT: j .LBB0_25
+; RV32M-NEXT: .LBB0_24:
+; RV32M-NEXT: snez a5, a2
+; RV32M-NEXT: sltiu a6, a0, 128
+; RV32M-NEXT: xori a6, a6, 1
+; RV32M-NEXT: or a5, a6, a5
+; RV32M-NEXT: .LBB0_25: # %_udiv-special-cases_udiv-special-cases
+; RV32M-NEXT: sw t4, 48(sp) # 4-byte Folded Spill
+; RV32M-NEXT: srai a4, a4, 31
+; RV32M-NEXT: sw a4, 44(sp) # 4-byte Folded Spill
+; RV32M-NEXT: or a5, a3, a5
+; RV32M-NEXT: addi a4, a5, -1
+; RV32M-NEXT: and ra, a4, s8
+; RV32M-NEXT: and s11, a4, s7
+; RV32M-NEXT: and a3, a4, s9
+; RV32M-NEXT: and a4, a4, s10
+; RV32M-NEXT: bnez a5, .LBB0_36
+; RV32M-NEXT: # %bb.26: # %_udiv-special-cases_udiv-special-cases
+; RV32M-NEXT: sw s5, 72(sp) # 4-byte Folded Spill
+; RV32M-NEXT: xori a5, a0, 127
+; RV32M-NEXT: or a5, a5, a1
+; RV32M-NEXT: or a6, a2, s2
+; RV32M-NEXT: or a5, a5, a6
+; RV32M-NEXT: beqz a5, .LBB0_36
+; RV32M-NEXT: # %bb.27: # %udiv-bb15
+; RV32M-NEXT: sw t3, 80(sp) # 4-byte Folded Spill
+; RV32M-NEXT: addi s11, a0, 1
+; RV32M-NEXT: li a3, 127
+; RV32M-NEXT: sub a0, a3, a0
+; RV32M-NEXT: sw zero, 184(sp)
+; RV32M-NEXT: sw zero, 188(sp)
+; RV32M-NEXT: sw zero, 192(sp)
+; RV32M-NEXT: sw zero, 196(sp)
+; RV32M-NEXT: sw s10, 200(sp)
+; RV32M-NEXT: sw s9, 204(sp)
+; RV32M-NEXT: sw s7, 208(sp)
+; RV32M-NEXT: sw s8, 212(sp)
+; RV32M-NEXT: addi a3, sp, 200
+; RV32M-NEXT: seqz ra, s11
+; RV32M-NEXT: srli a4, a0, 3
+; RV32M-NEXT: andi a5, a0, 31
+; RV32M-NEXT: add ra, a2, ra
+; RV32M-NEXT: andi a4, a4, 12
+; RV32M-NEXT: xori a2, a5, 31
+; RV32M-NEXT: or a5, s11, ra
+; RV32M-NEXT: sub a3, a3, a4
+; RV32M-NEXT: seqz s4, a5
+; RV32M-NEXT: lw a4, 0(a3)
+; RV32M-NEXT: lw a5, 4(a3)
+; RV32M-NEXT: lw a6, 8(a3)
+; RV32M-NEXT: lw a3, 12(a3)
+; RV32M-NEXT: add s4, a1, s4
+; RV32M-NEXT: sltu a1, s4, a1
+; RV32M-NEXT: or a7, s11, s4
+; RV32M-NEXT: add s2, s2, a1
+; RV32M-NEXT: or a1, ra, s2
+; RV32M-NEXT: srli t0, a6, 1
+; RV32M-NEXT: srli t1, a5, 1
+; RV32M-NEXT: or a7, a7, a1
+; RV32M-NEXT: srli a1, a4, 1
+; RV32M-NEXT: srl t0, t0, a2
+; RV32M-NEXT: srl t1, t1, a2
+; RV32M-NEXT: srl a2, a1, a2
+; RV32M-NEXT: sll a1, a3, a0
+; RV32M-NEXT: or a1, a1, t0
+; RV32M-NEXT: sll a3, a6, a0
+; RV32M-NEXT: sll s1, a5, a0
+; RV32M-NEXT: or s5, a3, t1
+; RV32M-NEXT: or s1, s1, a2
+; RV32M-NEXT: sll s0, a4, a0
+; RV32M-NEXT: li t6, 0
+; RV32M-NEXT: beqz a7, .LBB0_35
+; RV32M-NEXT: # %bb.28: # %udiv-preheader4
+; RV32M-NEXT: li a0, 0
+; RV32M-NEXT: li s3, 0
+; RV32M-NEXT: li a2, 0
+; RV32M-NEXT: sw zero, 168(sp)
+; RV32M-NEXT: sw zero, 172(sp)
+; RV32M-NEXT: sw zero, 176(sp)
+; RV32M-NEXT: sw zero, 180(sp)
+; RV32M-NEXT: sw s10, 152(sp)
+; RV32M-NEXT: sw s9, 156(sp)
+; RV32M-NEXT: sw s7, 160(sp)
+; RV32M-NEXT: sw s8, 164(sp)
+; RV32M-NEXT: srli a3, s11, 3
+; RV32M-NEXT: addi a4, sp, 152
+; RV32M-NEXT: andi a3, a3, 12
+; RV32M-NEXT: add a3, a4, a3
+; RV32M-NEXT: lw a4, 4(a3)
+; RV32M-NEXT: lw a5, 8(a3)
+; RV32M-NEXT: lw a6, 12(a3)
+; RV32M-NEXT: lw a3, 0(a3)
+; RV32M-NEXT: andi a7, s11, 31
+; RV32M-NEXT: xori a7, a7, 31
+; RV32M-NEXT: slli t0, a6, 1
+; RV32M-NEXT: slli t1, a5, 1
+; RV32M-NEXT: slli t2, a4, 1
+; RV32M-NEXT: sll t0, t0, a7
+; RV32M-NEXT: sll t1, t1, a7
+; RV32M-NEXT: sll t3, t2, a7
+; RV32M-NEXT: lw t4, 76(sp) # 4-byte Folded Reload
+; RV32M-NEXT: seqz t2, t4
+; RV32M-NEXT: srl a5, a5, s11
+; RV32M-NEXT: or a7, a5, t0
+; RV32M-NEXT: lw t0, 68(sp) # 4-byte Folded Reload
+; RV32M-NEXT: or a5, t4, t0
+; RV32M-NEXT: sub s7, t0, t2
+; RV32M-NEXT: seqz t0, a5
+; RV32M-NEXT: srl a4, a4, s11
+; RV32M-NEXT: or a5, a4, t1
+; RV32M-NEXT: sub s8, t5, t0
+; RV32M-NEXT: mv s6, t5
+; RV32M-NEXT: sltu a4, t5, t0
+; RV32M-NEXT: lw t0, 72(sp) # 4-byte Folded Reload
+; RV32M-NEXT: sub s9, t0, a4
+; RV32M-NEXT: srl a3, a3, s11
+; RV32M-NEXT: srl t2, a6, s11
+; RV32M-NEXT: or a4, a3, t3
+; RV32M-NEXT: addi t4, t4, -1
+; RV32M-NEXT: sw t4, 64(sp) # 4-byte Folded Spill
+; RV32M-NEXT: j .LBB0_30
+; RV32M-NEXT: .LBB0_29: # %udiv-do-while3
+; RV32M-NEXT: # in Loop: Header=BB0_30 Depth=1
+; RV32M-NEXT: srli t3, s5, 31
+; RV32M-NEXT: slli a1, a1, 1
+; RV32M-NEXT: sub t0, t0, s10
+; RV32M-NEXT: srli s10, s1, 31
+; RV32M-NEXT: slli s5, s5, 1
+; RV32M-NEXT: or a1, a1, t3
+; RV32M-NEXT: srli t3, s0, 31
+; RV32M-NEXT: slli s1, s1, 1
+; RV32M-NEXT: slli s0, s0, 1
+; RV32M-NEXT: or s5, s5, s10
+; RV32M-NEXT: lw t5, 72(sp) # 4-byte Folded Reload
+; RV32M-NEXT: and s10, a7, t5
+; RV32M-NEXT: or s1, s1, t3
+; RV32M-NEXT: and t3, a7, t4
+; RV32M-NEXT: or s0, t6, s0
+; RV32M-NEXT: sub t4, t1, t3
+; RV32M-NEXT: sltu t1, t1, t3
+; RV32M-NEXT: or t3, s11, ra
+; RV32M-NEXT: sub s10, a5, s10
+; RV32M-NEXT: seqz t5, s11
+; RV32M-NEXT: addi s11, s11, -1
+; RV32M-NEXT: andi t6, a7, 1
+; RV32M-NEXT: sub a5, t0, a3
+; RV32M-NEXT: seqz a3, t3
+; RV32M-NEXT: sub ra, ra, t5
+; RV32M-NEXT: or s1, a0, s1
+; RV32M-NEXT: or s5, s3, s5
+; RV32M-NEXT: or a1, a2, a1
+; RV32M-NEXT: sub a7, t4, t2
+; RV32M-NEXT: sltu a0, t4, t2
+; RV32M-NEXT: sub a2, s10, t1
+; RV32M-NEXT: sltu t0, s4, a3
+; RV32M-NEXT: sub s4, s4, a3
+; RV32M-NEXT: sub t2, a2, a0
+; RV32M-NEXT: sub s2, s2, t0
+; RV32M-NEXT: or a0, ra, s2
+; RV32M-NEXT: or a2, s11, s4
+; RV32M-NEXT: or a3, a2, a0
+; RV32M-NEXT: sub a4, a6, a4
+; RV32M-NEXT: li a0, 0
+; RV32M-NEXT: li s3, 0
+; RV32M-NEXT: li a2, 0
+; RV32M-NEXT: beqz a3, .LBB0_35
+; RV32M-NEXT: .LBB0_30: # %udiv-do-while3
+; RV32M-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32M-NEXT: srli a3, a4, 31
+; RV32M-NEXT: slli a6, a5, 1
+; RV32M-NEXT: slli a4, a4, 1
+; RV32M-NEXT: or t0, a6, a3
+; RV32M-NEXT: srli a3, a1, 31
+; RV32M-NEXT: or a6, a4, a3
+; RV32M-NEXT: beq s7, t0, .LBB0_32
+; RV32M-NEXT: # %bb.31: # %udiv-do-while3
+; RV32M-NEXT: # in Loop: Header=BB0_30 Depth=1
+; RV32M-NEXT: sltu a3, s7, t0
+; RV32M-NEXT: j .LBB0_33
+; RV32M-NEXT: .LBB0_32: # in Loop: Header=BB0_30 Depth=1
+; RV32M-NEXT: lw a3, 64(sp) # 4-byte Folded Reload
+; RV32M-NEXT: sltu a3, a3, a6
+; RV32M-NEXT: .LBB0_33: # %udiv-do-while3
+; RV32M-NEXT: # in Loop: Header=BB0_30 Depth=1
+; RV32M-NEXT: mv t4, s6
+; RV32M-NEXT: srli a4, a7, 31
+; RV32M-NEXT: slli t2, t2, 1
+; RV32M-NEXT: srli t1, a5, 31
+; RV32M-NEXT: slli a7, a7, 1
+; RV32M-NEXT: or a5, t2, a4
+; RV32M-NEXT: or t1, a7, t1
+; RV32M-NEXT: sub a4, s8, t1
+; RV32M-NEXT: sltu a7, s8, t1
+; RV32M-NEXT: sub t2, s9, a5
+; RV32M-NEXT: sltu a3, a4, a3
+; RV32M-NEXT: sub a4, t2, a7
+; RV32M-NEXT: sub a4, a4, a3
+; RV32M-NEXT: srai a7, a4, 31
+; RV32M-NEXT: lw a3, 76(sp) # 4-byte Folded Reload
+; RV32M-NEXT: and a4, a7, a3
+; RV32M-NEXT: lw a3, 68(sp) # 4-byte Folded Reload
+; RV32M-NEXT: and s10, a7, a3
+; RV32M-NEXT: sltu a3, a6, a4
+; RV32M-NEXT: mv t2, a3
+; RV32M-NEXT: beq t0, s10, .LBB0_29
+; RV32M-NEXT: # %bb.34: # %udiv-do-while3
+; RV32M-NEXT: # in Loop: Header=BB0_30 Depth=1
+; RV32M-NEXT: sltu t2, t0, s10
+; RV32M-NEXT: j .LBB0_29
+; RV32M-NEXT: .LBB0_35: # %udiv-loop-exit2
+; RV32M-NEXT: srli a0, s0, 31
+; RV32M-NEXT: slli a3, s1, 1
+; RV32M-NEXT: srli s1, s1, 31
+; RV32M-NEXT: or a3, a3, a0
+; RV32M-NEXT: slli a0, s5, 1
+; RV32M-NEXT: srli a2, s5, 31
+; RV32M-NEXT: slli a1, a1, 1
+; RV32M-NEXT: slli s0, s0, 1
+; RV32M-NEXT: or s11, a0, s1
+; RV32M-NEXT: or ra, a1, a2
+; RV32M-NEXT: or a4, t6, s0
+; RV32M-NEXT: lw t6, 40(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw t3, 80(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw s3, 36(sp) # 4-byte Folded Reload
+; RV32M-NEXT: .LBB0_36: # %udiv-end1
+; RV32M-NEXT: lw s4, 44(sp) # 4-byte Folded Reload
+; RV32M-NEXT: xor a0, a4, s4
+; RV32M-NEXT: sw a0, 64(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sltu s5, a0, s4
+; RV32M-NEXT: xor a1, a3, s4
+; RV32M-NEXT: sw s5, 40(sp) # 4-byte Folded Spill
+; RV32M-NEXT: lw a6, 84(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw a4, 24(sp) # 4-byte Folded Reload
+; RV32M-NEXT: beqz a3, .LBB0_38
+; RV32M-NEXT: # %bb.37: # %udiv-end1
+; RV32M-NEXT: sltu s5, a1, s4
+; RV32M-NEXT: .LBB0_38: # %udiv-end1
+; RV32M-NEXT: lw s10, 52(sp) # 4-byte Folded Reload
+; RV32M-NEXT: neg a0, s10
+; RV32M-NEXT: lw s6, 60(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw s8, 56(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw a3, 48(sp) # 4-byte Folded Reload
+; RV32M-NEXT: sw a1, 20(sp) # 4-byte Folded Spill
+; RV32M-NEXT: bltz a3, .LBB0_40
+; RV32M-NEXT: # %bb.39: # %udiv-end1
+; RV32M-NEXT: mv s9, a3
+; RV32M-NEXT: bltz a3, .LBB0_41
+; RV32M-NEXT: j .LBB0_42
+; RV32M-NEXT: .LBB0_40:
+; RV32M-NEXT: sltu a1, a0, a4
+; RV32M-NEXT: lw a2, 12(sp) # 4-byte Folded Reload
+; RV32M-NEXT: neg a2, a2
+; RV32M-NEXT: sub s9, a2, a1
+; RV32M-NEXT: bgez a3, .LBB0_42
+; RV32M-NEXT: .LBB0_41:
+; RV32M-NEXT: sub s10, a0, a4
+; RV32M-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
+; RV32M-NEXT: add a0, s8, a0
+; RV32M-NEXT: neg s8, a0
+; RV32M-NEXT: neg s6, s6
+; RV32M-NEXT: .LBB0_42: # %udiv-end1
+; RV32M-NEXT: bgez t3, .LBB0_44
+; RV32M-NEXT: # %bb.43:
+; RV32M-NEXT: neg a0, a6
+; RV32M-NEXT: lw a1, 0(sp) # 4-byte Folded Reload
+; RV32M-NEXT: neg a1, a1
+; RV32M-NEXT: lw a2, 4(sp) # 4-byte Folded Reload
+; RV32M-NEXT: add a2, s3, a2
+; RV32M-NEXT: lw a4, 8(sp) # 4-byte Folded Reload
+; RV32M-NEXT: sltu a3, a0, a4
+; RV32M-NEXT: sub a6, a0, a4
+; RV32M-NEXT: neg s3, a2
+; RV32M-NEXT: sub t3, a1, a3
+; RV32M-NEXT: neg t6, t6
+; RV32M-NEXT: .LBB0_44: # %udiv-end1
+; RV32M-NEXT: lui a0, 349525
+; RV32M-NEXT: lui a1, 209715
+; RV32M-NEXT: lui a2, 61681
+; RV32M-NEXT: lui a3, 4112
+; RV32M-NEXT: addi s2, a0, 1365
+; RV32M-NEXT: addi s1, a1, 819
+; RV32M-NEXT: addi a1, a2, -241
+; RV32M-NEXT: addi s0, a3, 257
+; RV32M-NEXT: bnez s3, .LBB0_47
+; RV32M-NEXT: # %bb.45: # %udiv-end1
+; RV32M-NEXT: srli a0, t6, 1
+; RV32M-NEXT: or a0, t6, a0
+; RV32M-NEXT: srli a2, a0, 2
+; RV32M-NEXT: or a0, a0, a2
+; RV32M-NEXT: srli a2, a0, 4
+; RV32M-NEXT: or a0, a0, a2
+; RV32M-NEXT: srli a2, a0, 8
+; RV32M-NEXT: or a0, a0, a2
+; RV32M-NEXT: srli a2, a0, 16
+; RV32M-NEXT: or a0, a0, a2
+; RV32M-NEXT: not a0, a0
+; RV32M-NEXT: srli a2, a0, 1
+; RV32M-NEXT: and a2, a2, s2
+; RV32M-NEXT: sub a0, a0, a2
+; RV32M-NEXT: and a2, a0, s1
+; RV32M-NEXT: srli a0, a0, 2
+; RV32M-NEXT: and a0, a0, s1
+; RV32M-NEXT: add a0, a2, a0
+; RV32M-NEXT: srli a2, a0, 4
+; RV32M-NEXT: add a0, a0, a2
+; RV32M-NEXT: and a0, a0, a1
+; RV32M-NEXT: mul a0, a0, s0
+; RV32M-NEXT: srli a0, a0, 24
+; RV32M-NEXT: addi a2, a0, 32
+; RV32M-NEXT: or a5, a6, t3
+; RV32M-NEXT: beqz t3, .LBB0_48
+; RV32M-NEXT: .LBB0_46:
+; RV32M-NEXT: srli a0, t3, 1
+; RV32M-NEXT: or a0, t3, a0
+; RV32M-NEXT: srli a3, a0, 2
+; RV32M-NEXT: or a0, a0, a3
+; RV32M-NEXT: srli a3, a0, 4
+; RV32M-NEXT: or a0, a0, a3
+; RV32M-NEXT: srli a3, a0, 8
+; RV32M-NEXT: or a0, a0, a3
+; RV32M-NEXT: srli a3, a0, 16
+; RV32M-NEXT: or a0, a0, a3
+; RV32M-NEXT: not a0, a0
+; RV32M-NEXT: srli a3, a0, 1
+; RV32M-NEXT: and a3, a3, s2
+; RV32M-NEXT: sub a0, a0, a3
+; RV32M-NEXT: and a3, a0, s1
+; RV32M-NEXT: srli a0, a0, 2
+; RV32M-NEXT: and a0, a0, s1
+; RV32M-NEXT: add a0, a3, a0
+; RV32M-NEXT: srli a3, a0, 4
+; RV32M-NEXT: add a0, a0, a3
+; RV32M-NEXT: and a0, a0, a1
+; RV32M-NEXT: mul a0, a0, s0
+; RV32M-NEXT: srli a0, a0, 24
+; RV32M-NEXT: addi a4, a2, 64
+; RV32M-NEXT: beqz a5, .LBB0_49
+; RV32M-NEXT: j .LBB0_50
+; RV32M-NEXT: .LBB0_47:
+; RV32M-NEXT: srli a0, s3, 1
+; RV32M-NEXT: or a0, s3, a0
+; RV32M-NEXT: srli a2, a0, 2
+; RV32M-NEXT: or a0, a0, a2
+; RV32M-NEXT: srli a2, a0, 4
+; RV32M-NEXT: or a0, a0, a2
+; RV32M-NEXT: srli a2, a0, 8
+; RV32M-NEXT: or a0, a0, a2
+; RV32M-NEXT: srli a2, a0, 16
+; RV32M-NEXT: or a0, a0, a2
+; RV32M-NEXT: not a0, a0
+; RV32M-NEXT: srli a2, a0, 1
+; RV32M-NEXT: and a2, a2, s2
+; RV32M-NEXT: sub a0, a0, a2
+; RV32M-NEXT: and a2, a0, s1
+; RV32M-NEXT: srli a0, a0, 2
+; RV32M-NEXT: and a0, a0, s1
+; RV32M-NEXT: add a0, a2, a0
+; RV32M-NEXT: srli a2, a0, 4
+; RV32M-NEXT: add a0, a0, a2
+; RV32M-NEXT: and a0, a0, a1
+; RV32M-NEXT: mul a2, a0, s0
+; RV32M-NEXT: srli a2, a2, 24
+; RV32M-NEXT: or a5, a6, t3
+; RV32M-NEXT: bnez t3, .LBB0_46
+; RV32M-NEXT: .LBB0_48: # %udiv-end1
+; RV32M-NEXT: srli a0, a6, 1
+; RV32M-NEXT: or a0, a6, a0
+; RV32M-NEXT: srli a3, a0, 2
+; RV32M-NEXT: or a0, a0, a3
+; RV32M-NEXT: srli a3, a0, 4
+; RV32M-NEXT: or a0, a0, a3
+; RV32M-NEXT: srli a3, a0, 8
+; RV32M-NEXT: or a0, a0, a3
+; RV32M-NEXT: srli a3, a0, 16
+; RV32M-NEXT: or a0, a0, a3
+; RV32M-NEXT: not a0, a0
+; RV32M-NEXT: srli a3, a0, 1
+; RV32M-NEXT: and a3, a3, s2
+; RV32M-NEXT: sub a0, a0, a3
+; RV32M-NEXT: and a3, a0, s1
+; RV32M-NEXT: srli a0, a0, 2
+; RV32M-NEXT: and a0, a0, s1
+; RV32M-NEXT: add a0, a3, a0
+; RV32M-NEXT: srli a3, a0, 4
+; RV32M-NEXT: add a0, a0, a3
+; RV32M-NEXT: and a0, a0, a1
+; RV32M-NEXT: mul a0, a0, s0
+; RV32M-NEXT: srli a0, a0, 24
+; RV32M-NEXT: addi a0, a0, 32
+; RV32M-NEXT: addi a4, a2, 64
+; RV32M-NEXT: bnez a5, .LBB0_50
+; RV32M-NEXT: .LBB0_49: # %udiv-end1
+; RV32M-NEXT: mv a0, a4
+; RV32M-NEXT: .LBB0_50: # %udiv-end1
+; RV32M-NEXT: snez t0, a5
+; RV32M-NEXT: bnez s8, .LBB0_52
+; RV32M-NEXT: # %bb.51: # %udiv-end1
+; RV32M-NEXT: srli a3, s6, 1
+; RV32M-NEXT: or a3, s6, a3
+; RV32M-NEXT: srli a5, a3, 2
+; RV32M-NEXT: or a3, a3, a5
+; RV32M-NEXT: srli a5, a3, 4
+; RV32M-NEXT: or a3, a3, a5
+; RV32M-NEXT: srli a5, a3, 8
+; RV32M-NEXT: or a3, a3, a5
+; RV32M-NEXT: srli a5, a3, 16
+; RV32M-NEXT: or a3, a3, a5
+; RV32M-NEXT: not a3, a3
+; RV32M-NEXT: srli a5, a3, 1
+; RV32M-NEXT: and a5, a5, s2
+; RV32M-NEXT: sub a3, a3, a5
+; RV32M-NEXT: and a5, a3, s1
+; RV32M-NEXT: srli a3, a3, 2
+; RV32M-NEXT: and a3, a3, s1
+; RV32M-NEXT: add a3, a5, a3
+; RV32M-NEXT: srli a5, a3, 4
+; RV32M-NEXT: add a3, a3, a5
+; RV32M-NEXT: and a3, a3, a1
+; RV32M-NEXT: mul a3, a3, s0
+; RV32M-NEXT: srli a3, a3, 24
+; RV32M-NEXT: addi a3, a3, 32
+; RV32M-NEXT: j .LBB0_53
+; RV32M-NEXT: .LBB0_52:
+; RV32M-NEXT: srli a3, s8, 1
+; RV32M-NEXT: or a3, s8, a3
+; RV32M-NEXT: srli a5, a3, 2
+; RV32M-NEXT: or a3, a3, a5
+; RV32M-NEXT: srli a5, a3, 4
+; RV32M-NEXT: or a3, a3, a5
+; RV32M-NEXT: srli a5, a3, 8
+; RV32M-NEXT: or a3, a3, a5
+; RV32M-NEXT: srli a5, a3, 16
+; RV32M-NEXT: or a3, a3, a5
+; RV32M-NEXT: not a3, a3
+; RV32M-NEXT: srli a5, a3, 1
+; RV32M-NEXT: and a5, a5, s2
+; RV32M-NEXT: sub a3, a3, a5
+; RV32M-NEXT: and a5, a3, s1
+; RV32M-NEXT: srli a3, a3, 2
+; RV32M-NEXT: and a3, a3, s1
+; RV32M-NEXT: add a3, a5, a3
+; RV32M-NEXT: srli a5, a3, 4
+; RV32M-NEXT: add a3, a3, a5
+; RV32M-NEXT: and a3, a3, a1
+; RV32M-NEXT: mul a3, a3, s0
+; RV32M-NEXT: srli a3, a3, 24
+; RV32M-NEXT: .LBB0_53: # %udiv-end1
+; RV32M-NEXT: sw a6, 84(sp) # 4-byte Folded Spill
+; RV32M-NEXT: or a5, t6, a6
+; RV32M-NEXT: sw t3, 80(sp) # 4-byte Folded Spill
+; RV32M-NEXT: or a6, s3, t3
+; RV32M-NEXT: or a7, s6, s10
+; RV32M-NEXT: or t2, s8, s9
+; RV32M-NEXT: sltu a4, a4, a2
+; RV32M-NEXT: addi t0, t0, -1
+; RV32M-NEXT: addi a2, a3, 64
+; RV32M-NEXT: or t3, s10, s9
+; RV32M-NEXT: sltu t4, a2, a3
+; RV32M-NEXT: snez a3, t3
+; RV32M-NEXT: addi a3, a3, -1
+; RV32M-NEXT: bnez s9, .LBB0_55
+; RV32M-NEXT: # %bb.54: # %udiv-end1
+; RV32M-NEXT: srli t1, s10, 1
+; RV32M-NEXT: or t1, s10, t1
+; RV32M-NEXT: srli t5, t1, 2
+; RV32M-NEXT: or t1, t1, t5
+; RV32M-NEXT: srli t5, t1, 4
+; RV32M-NEXT: or t1, t1, t5
+; RV32M-NEXT: srli t5, t1, 8
+; RV32M-NEXT: or t1, t1, t5
+; RV32M-NEXT: srli t5, t1, 16
+; RV32M-NEXT: or t1, t1, t5
+; RV32M-NEXT: not t1, t1
+; RV32M-NEXT: srli t5, t1, 1
+; RV32M-NEXT: and t5, t5, s2
+; RV32M-NEXT: sub t1, t1, t5
+; RV32M-NEXT: and t5, t1, s1
+; RV32M-NEXT: srli t1, t1, 2
+; RV32M-NEXT: and t1, t1, s1
+; RV32M-NEXT: add t1, t5, t1
+; RV32M-NEXT: srli t5, t1, 4
+; RV32M-NEXT: add t1, t1, t5
+; RV32M-NEXT: and a1, t1, a1
+; RV32M-NEXT: mul a1, a1, s0
+; RV32M-NEXT: srli a1, a1, 24
+; RV32M-NEXT: addi t1, a1, 32
+; RV32M-NEXT: j .LBB0_56
+; RV32M-NEXT: .LBB0_55:
+; RV32M-NEXT: srli t1, s9, 1
+; RV32M-NEXT: or t1, s9, t1
+; RV32M-NEXT: srli t5, t1, 2
+; RV32M-NEXT: or t1, t1, t5
+; RV32M-NEXT: srli t5, t1, 4
+; RV32M-NEXT: or t1, t1, t5
+; RV32M-NEXT: srli t5, t1, 8
+; RV32M-NEXT: or t1, t1, t5
+; RV32M-NEXT: srli t5, t1, 16
+; RV32M-NEXT: or t1, t1, t5
+; RV32M-NEXT: not t1, t1
+; RV32M-NEXT: srli t5, t1, 1
+; RV32M-NEXT: and t5, t5, s2
+; RV32M-NEXT: sub t1, t1, t5
+; RV32M-NEXT: and t5, t1, s1
+; RV32M-NEXT: srli t1, t1, 2
+; RV32M-NEXT: and t1, t1, s1
+; RV32M-NEXT: add t1, t5, t1
+; RV32M-NEXT: srli t5, t1, 4
+; RV32M-NEXT: add t1, t1, t5
+; RV32M-NEXT: and a1, t1, a1
+; RV32M-NEXT: mul a1, a1, s0
+; RV32M-NEXT: srli t1, a1, 24
+; RV32M-NEXT: .LBB0_56: # %udiv-end1
+; RV32M-NEXT: or a5, a5, a6
+; RV32M-NEXT: or a6, a7, t2
+; RV32M-NEXT: and a1, t0, a4
+; RV32M-NEXT: and a3, a3, t4
+; RV32M-NEXT: bnez t3, .LBB0_58
+; RV32M-NEXT: # %bb.57: # %udiv-end1
+; RV32M-NEXT: mv t1, a2
+; RV32M-NEXT: .LBB0_58: # %udiv-end1
+; RV32M-NEXT: seqz a4, a5
+; RV32M-NEXT: seqz a5, a6
+; RV32M-NEXT: sltu a6, a0, t1
+; RV32M-NEXT: sub t0, a1, a3
+; RV32M-NEXT: mv a7, a6
+; RV32M-NEXT: beq a1, a3, .LBB0_60
+; RV32M-NEXT: # %bb.59: # %udiv-end1
+; RV32M-NEXT: sltu a7, a1, a3
+; RV32M-NEXT: .LBB0_60: # %udiv-end1
+; RV32M-NEXT: xor a2, ra, s4
+; RV32M-NEXT: xor a3, s11, s4
+; RV32M-NEXT: sub s0, t0, a6
+; RV32M-NEXT: or a4, a4, a5
+; RV32M-NEXT: neg a1, a7
+; RV32M-NEXT: seqz s7, a7
+; RV32M-NEXT: addi s7, s7, -1
+; RV32M-NEXT: or a5, a1, s7
+; RV32M-NEXT: sub a0, a0, t1
+; RV32M-NEXT: beqz a5, .LBB0_62
+; RV32M-NEXT: # %bb.61: # %udiv-end1
+; RV32M-NEXT: snez a5, a5
+; RV32M-NEXT: j .LBB0_63
+; RV32M-NEXT: .LBB0_62:
+; RV32M-NEXT: snez a5, s0
+; RV32M-NEXT: sltiu a6, a0, 128
+; RV32M-NEXT: xori a6, a6, 1
+; RV32M-NEXT: or a5, a6, a5
+; RV32M-NEXT: .LBB0_63: # %udiv-end1
+; RV32M-NEXT: sub s11, a3, s4
+; RV32M-NEXT: sltu t1, a3, s4
+; RV32M-NEXT: sub t2, a2, s4
+; RV32M-NEXT: or a5, a4, a5
+; RV32M-NEXT: addi a2, a5, -1
+; RV32M-NEXT: and a7, a2, s9
+; RV32M-NEXT: and t0, a2, s10
+; RV32M-NEXT: and a4, a2, s8
+; RV32M-NEXT: and a6, a2, s6
+; RV32M-NEXT: sw s9, 68(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw s5, 36(sp) # 4-byte Folded Spill
+; RV32M-NEXT: bnez a5, .LBB0_75
+; RV32M-NEXT: # %bb.64: # %udiv-end1
+; RV32M-NEXT: xori a2, a0, 127
+; RV32M-NEXT: or a2, a2, a1
+; RV32M-NEXT: or a3, s0, s7
+; RV32M-NEXT: or a2, a2, a3
+; RV32M-NEXT: beqz a2, .LBB0_75
+; RV32M-NEXT: # %bb.65: # %udiv-bb1
+; RV32M-NEXT: sw t2, 12(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw t1, 16(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw s11, 24(sp) # 4-byte Folded Spill
+; RV32M-NEXT: addi s4, a0, 1
+; RV32M-NEXT: li a2, 127
+; RV32M-NEXT: sub a2, a2, a0
+; RV32M-NEXT: sw zero, 120(sp)
+; RV32M-NEXT: sw zero, 124(sp)
+; RV32M-NEXT: sw zero, 128(sp)
+; RV32M-NEXT: sw zero, 132(sp)
+; RV32M-NEXT: sw s6, 136(sp)
+; RV32M-NEXT: sw s8, 140(sp)
+; RV32M-NEXT: sw s10, 144(sp)
+; RV32M-NEXT: lw t3, 68(sp) # 4-byte Folded Reload
+; RV32M-NEXT: sw t3, 148(sp)
+; RV32M-NEXT: addi a0, sp, 136
+; RV32M-NEXT: seqz a3, s4
+; RV32M-NEXT: srli a4, a2, 3
+; RV32M-NEXT: andi a5, a2, 31
+; RV32M-NEXT: add s0, s0, a3
+; RV32M-NEXT: andi a4, a4, 12
+; RV32M-NEXT: xori a3, a5, 31
+; RV32M-NEXT: or a5, s4, s0
+; RV32M-NEXT: sub a0, a0, a4
+; RV32M-NEXT: seqz s9, a5
+; RV32M-NEXT: lw a4, 0(a0)
+; RV32M-NEXT: lw a5, 4(a0)
+; RV32M-NEXT: lw a6, 8(a0)
+; RV32M-NEXT: lw a0, 12(a0)
+; RV32M-NEXT: add s9, a1, s9
+; RV32M-NEXT: sltu a1, s9, a1
+; RV32M-NEXT: or a7, s4, s9
+; RV32M-NEXT: add s7, s7, a1
+; RV32M-NEXT: or a1, s0, s7
+; RV32M-NEXT: srli t0, a6, 1
+; RV32M-NEXT: srli t1, a5, 1
+; RV32M-NEXT: or a7, a7, a1
+; RV32M-NEXT: srli a1, a4, 1
+; RV32M-NEXT: srl t0, t0, a3
+; RV32M-NEXT: srl t1, t1, a3
+; RV32M-NEXT: srl a1, a1, a3
+; RV32M-NEXT: sll a0, a0, a2
+; RV32M-NEXT: or a0, a0, t0
+; RV32M-NEXT: sll a3, a6, a2
+; RV32M-NEXT: sll a5, a5, a2
+; RV32M-NEXT: mv a6, s10
+; RV32M-NEXT: or s10, a3, t1
+; RV32M-NEXT: or a1, a5, a1
+; RV32M-NEXT: sll s5, a4, a2
+; RV32M-NEXT: sw s6, 60(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw s8, 56(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw a6, 52(sp) # 4-byte Folded Spill
+; RV32M-NEXT: beqz a7, .LBB0_73
+; RV32M-NEXT: # %bb.66: # %udiv-preheader
+; RV32M-NEXT: mv t2, s8
+; RV32M-NEXT: li s8, 0
+; RV32M-NEXT: li s11, 0
+; RV32M-NEXT: li ra, 0
+; RV32M-NEXT: li a2, 0
+; RV32M-NEXT: sw zero, 104(sp)
+; RV32M-NEXT: sw zero, 108(sp)
+; RV32M-NEXT: sw zero, 112(sp)
+; RV32M-NEXT: sw zero, 116(sp)
+; RV32M-NEXT: sw s6, 88(sp)
+; RV32M-NEXT: sw t2, 92(sp)
+; RV32M-NEXT: sw a6, 96(sp)
+; RV32M-NEXT: sw t3, 100(sp)
+; RV32M-NEXT: srli a3, s4, 3
+; RV32M-NEXT: addi a4, sp, 88
+; RV32M-NEXT: andi a3, a3, 12
+; RV32M-NEXT: add a3, a4, a3
+; RV32M-NEXT: lw a4, 4(a3)
+; RV32M-NEXT: lw a5, 8(a3)
+; RV32M-NEXT: lw a6, 12(a3)
+; RV32M-NEXT: lw a3, 0(a3)
+; RV32M-NEXT: andi a7, s4, 31
+; RV32M-NEXT: xori a7, a7, 31
+; RV32M-NEXT: slli t0, a6, 1
+; RV32M-NEXT: slli t1, a5, 1
+; RV32M-NEXT: slli t2, a4, 1
+; RV32M-NEXT: sll t0, t0, a7
+; RV32M-NEXT: sll t1, t1, a7
+; RV32M-NEXT: sll a7, t2, a7
+; RV32M-NEXT: seqz t2, t6
+; RV32M-NEXT: srl a5, a5, s4
+; RV32M-NEXT: or t5, a5, t0
+; RV32M-NEXT: or a5, t6, s3
+; RV32M-NEXT: sub s6, s3, t2
+; RV32M-NEXT: seqz t0, a5
+; RV32M-NEXT: srl a4, a4, s4
+; RV32M-NEXT: or a5, a4, t1
+; RV32M-NEXT: lw a4, 84(sp) # 4-byte Folded Reload
+; RV32M-NEXT: sub t3, a4, t0
+; RV32M-NEXT: sltu a4, a4, t0
+; RV32M-NEXT: lw t0, 80(sp) # 4-byte Folded Reload
+; RV32M-NEXT: sub a4, t0, a4
+; RV32M-NEXT: sw a4, 76(sp) # 4-byte Folded Spill
+; RV32M-NEXT: srl a3, a3, s4
+; RV32M-NEXT: srl t2, a6, s4
+; RV32M-NEXT: or a4, a3, a7
+; RV32M-NEXT: addi a3, t6, -1
+; RV32M-NEXT: sw a3, 72(sp) # 4-byte Folded Spill
+; RV32M-NEXT: j .LBB0_68
+; RV32M-NEXT: .LBB0_67: # %udiv-do-while
+; RV32M-NEXT: # in Loop: Header=BB0_68 Depth=1
+; RV32M-NEXT: srli s3, s10, 31
+; RV32M-NEXT: slli a0, a0, 1
+; RV32M-NEXT: sub t0, t0, s2
+; RV32M-NEXT: srli s2, a1, 31
+; RV32M-NEXT: slli s10, s10, 1
+; RV32M-NEXT: or a0, a0, s3
+; RV32M-NEXT: srli s3, s5, 31
+; RV32M-NEXT: slli a1, a1, 1
+; RV32M-NEXT: slli s5, s5, 1
+; RV32M-NEXT: or s2, s10, s2
+; RV32M-NEXT: lw t6, 80(sp) # 4-byte Folded Reload
+; RV32M-NEXT: and s10, t5, t6
+; RV32M-NEXT: or a1, a1, s3
+; RV32M-NEXT: lw t6, 84(sp) # 4-byte Folded Reload
+; RV32M-NEXT: and s3, t5, t6
+; RV32M-NEXT: or s5, s8, s5
+; RV32M-NEXT: sub s1, t1, s3
+; RV32M-NEXT: sltu t1, t1, s3
+; RV32M-NEXT: or s3, s4, s0
+; RV32M-NEXT: sub t6, a5, s10
+; RV32M-NEXT: seqz s10, s4
+; RV32M-NEXT: addi s4, s4, -1
+; RV32M-NEXT: andi s8, t5, 1
+; RV32M-NEXT: sub a5, t0, a3
+; RV32M-NEXT: seqz a3, s3
+; RV32M-NEXT: sub s0, s0, s10
+; RV32M-NEXT: or a1, s11, a1
+; RV32M-NEXT: or s10, ra, s2
+; RV32M-NEXT: or a0, a2, a0
+; RV32M-NEXT: sub t5, s1, t2
+; RV32M-NEXT: sltu a2, s1, t2
+; RV32M-NEXT: sub t0, t6, t1
+; RV32M-NEXT: sltu t1, s9, a3
+; RV32M-NEXT: sub s9, s9, a3
+; RV32M-NEXT: sub t2, t0, a2
+; RV32M-NEXT: sub s7, s7, t1
+; RV32M-NEXT: or a2, s0, s7
+; RV32M-NEXT: or a3, s4, s9
+; RV32M-NEXT: or a3, a3, a2
+; RV32M-NEXT: sub a4, a6, a4
+; RV32M-NEXT: li s11, 0
+; RV32M-NEXT: li ra, 0
+; RV32M-NEXT: li a2, 0
+; RV32M-NEXT: mv t6, a7
+; RV32M-NEXT: mv s3, t4
+; RV32M-NEXT: beqz a3, .LBB0_74
+; RV32M-NEXT: .LBB0_68: # %udiv-do-while
+; RV32M-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32M-NEXT: srli a3, a4, 31
+; RV32M-NEXT: slli a6, a5, 1
+; RV32M-NEXT: slli a4, a4, 1
+; RV32M-NEXT: or t0, a6, a3
+; RV32M-NEXT: srli a3, a0, 31
+; RV32M-NEXT: or a6, a4, a3
+; RV32M-NEXT: beq s6, t0, .LBB0_70
+; RV32M-NEXT: # %bb.69: # %udiv-do-while
+; RV32M-NEXT: # in Loop: Header=BB0_68 Depth=1
+; RV32M-NEXT: sltu a3, s6, t0
+; RV32M-NEXT: j .LBB0_71
+; RV32M-NEXT: .LBB0_70: # in Loop: Header=BB0_68 Depth=1
+; RV32M-NEXT: lw a3, 72(sp) # 4-byte Folded Reload
+; RV32M-NEXT: sltu a3, a3, a6
+; RV32M-NEXT: .LBB0_71: # %udiv-do-while
+; RV32M-NEXT: # in Loop: Header=BB0_68 Depth=1
+; RV32M-NEXT: srli a4, t5, 31
+; RV32M-NEXT: slli t2, t2, 1
+; RV32M-NEXT: srli t1, a5, 31
+; RV32M-NEXT: slli t5, t5, 1
+; RV32M-NEXT: or a5, t2, a4
+; RV32M-NEXT: or t1, t5, t1
+; RV32M-NEXT: sub a4, t3, t1
+; RV32M-NEXT: sltu t2, t3, t1
+; RV32M-NEXT: lw a7, 76(sp) # 4-byte Folded Reload
+; RV32M-NEXT: sub t5, a7, a5
+; RV32M-NEXT: sltu a3, a4, a3
+; RV32M-NEXT: sub a4, t5, t2
+; RV32M-NEXT: sub a4, a4, a3
+; RV32M-NEXT: srai t5, a4, 31
+; RV32M-NEXT: mv a7, t6
+; RV32M-NEXT: and a4, t5, t6
+; RV32M-NEXT: mv t4, s3
+; RV32M-NEXT: and s2, t5, s3
+; RV32M-NEXT: sltu a3, a6, a4
+; RV32M-NEXT: mv t2, a3
+; RV32M-NEXT: beq t0, s2, .LBB0_67
+; RV32M-NEXT: # %bb.72: # %udiv-do-while
+; RV32M-NEXT: # in Loop: Header=BB0_68 Depth=1
+; RV32M-NEXT: sltu t2, t0, s2
+; RV32M-NEXT: j .LBB0_67
+; RV32M-NEXT: .LBB0_73:
+; RV32M-NEXT: li s8, 0
+; RV32M-NEXT: .LBB0_74: # %udiv-loop-exit
+; RV32M-NEXT: srli a2, s5, 31
+; RV32M-NEXT: slli a4, a1, 1
+; RV32M-NEXT: srli a1, a1, 31
+; RV32M-NEXT: or a4, a4, a2
+; RV32M-NEXT: slli a2, s10, 1
+; RV32M-NEXT: srli a3, s10, 31
+; RV32M-NEXT: slli a0, a0, 1
+; RV32M-NEXT: slli s5, s5, 1
+; RV32M-NEXT: or t0, a2, a1
+; RV32M-NEXT: or a7, a0, a3
+; RV32M-NEXT: or a6, s8, s5
+; RV32M-NEXT: lw s6, 60(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw s8, 56(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw s10, 52(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw s11, 24(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw t1, 16(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw t2, 12(sp) # 4-byte Folded Reload
+; RV32M-NEXT: .LBB0_75: # %udiv-end
+; RV32M-NEXT: lw s9, 36(sp) # 4-byte Folded Reload
+; RV32M-NEXT: sltu a1, s11, s9
+; RV32M-NEXT: sub a2, t2, t1
+; RV32M-NEXT: lw s7, 44(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
+; RV32M-NEXT: sub a5, a0, s7
+; RV32M-NEXT: lw a0, 48(sp) # 4-byte Folded Reload
+; RV32M-NEXT: srai a0, a0, 31
+; RV32M-NEXT: mulhu t1, t6, a6
+; RV32M-NEXT: mul t2, s3, a6
+; RV32M-NEXT: mulhu t3, s3, a6
+; RV32M-NEXT: mul t4, t6, a4
+; RV32M-NEXT: mulhu t5, t6, a4
+; RV32M-NEXT: mv a3, t6
+; RV32M-NEXT: mul t6, s3, a4
+; RV32M-NEXT: lw s4, 84(sp) # 4-byte Folded Reload
+; RV32M-NEXT: mul s0, a6, s4
+; RV32M-NEXT: mul s1, t0, a3
+; RV32M-NEXT: mulhu s2, s3, a4
+; RV32M-NEXT: mul s3, t0, s3
+; RV32M-NEXT: mulhu t0, t0, a3
+; RV32M-NEXT: mul a7, a7, a3
+; RV32M-NEXT: mul a4, a4, s4
+; RV32M-NEXT: mulhu s4, a6, s4
+; RV32M-NEXT: lw s5, 80(sp) # 4-byte Folded Reload
+; RV32M-NEXT: mul s5, a6, s5
+; RV32M-NEXT: mul a3, a3, a6
+; RV32M-NEXT: add a6, t2, t1
+; RV32M-NEXT: add s0, s1, s0
+; RV32M-NEXT: add t0, t0, s3
+; RV32M-NEXT: add s4, s4, s5
+; RV32M-NEXT: sltu t1, a6, t2
+; RV32M-NEXT: add a6, t4, a6
+; RV32M-NEXT: add a7, t0, a7
+; RV32M-NEXT: add a4, s4, a4
+; RV32M-NEXT: sltu t0, s0, s1
+; RV32M-NEXT: add t1, t3, t1
+; RV32M-NEXT: sltu t2, a6, t4
+; RV32M-NEXT: add a4, a7, a4
+; RV32M-NEXT: add t2, t5, t2
+; RV32M-NEXT: add a4, a4, t0
+; RV32M-NEXT: add t2, t1, t2
+; RV32M-NEXT: add a7, t6, t2
+; RV32M-NEXT: sltu t1, t2, t1
+; RV32M-NEXT: add t0, a7, s0
+; RV32M-NEXT: sltu t2, a7, t6
+; RV32M-NEXT: add t1, s2, t1
+; RV32M-NEXT: sltu a7, t0, a7
+; RV32M-NEXT: add t1, t1, t2
+; RV32M-NEXT: sltu t2, s10, t0
+; RV32M-NEXT: add a4, t1, a4
+; RV32M-NEXT: lw t1, 68(sp) # 4-byte Folded Reload
+; RV32M-NEXT: sub a7, t1, a7
+; RV32M-NEXT: sub a4, a7, a4
+; RV32M-NEXT: sltu t1, s6, a3
+; RV32M-NEXT: sub a7, a4, t2
+; RV32M-NEXT: mv t2, t1
+; RV32M-NEXT: beq s8, a6, .LBB0_77
+; RV32M-NEXT: # %bb.76: # %udiv-end
+; RV32M-NEXT: sltu t2, s8, a6
+; RV32M-NEXT: .LBB0_77: # %udiv-end
+; RV32M-NEXT: sub a2, a2, a1
+; RV32M-NEXT: lw a1, 40(sp) # 4-byte Folded Reload
+; RV32M-NEXT: sub a1, a5, a1
+; RV32M-NEXT: sub a4, s11, s9
+; RV32M-NEXT: lw a5, 64(sp) # 4-byte Folded Reload
+; RV32M-NEXT: sub a5, a5, s7
+; RV32M-NEXT: sub t0, s10, t0
+; RV32M-NEXT: sub a6, s8, a6
+; RV32M-NEXT: sub a3, s6, a3
+; RV32M-NEXT: sltu t3, t0, t2
+; RV32M-NEXT: sub t0, t0, t2
+; RV32M-NEXT: sub t4, a6, t1
+; RV32M-NEXT: xor a3, a3, a0
+; RV32M-NEXT: sub t1, a7, t3
+; RV32M-NEXT: xor t2, t0, a0
+; RV32M-NEXT: xor a7, t4, a0
+; RV32M-NEXT: sltu a6, a3, a0
+; RV32M-NEXT: xor t0, t1, a0
+; RV32M-NEXT: sltu t1, t2, a0
+; RV32M-NEXT: add t1, a0, t1
+; RV32M-NEXT: sub t0, t0, t1
+; RV32M-NEXT: sub t1, t2, a0
+; RV32M-NEXT: mv t2, a6
+; RV32M-NEXT: beqz t4, .LBB0_79
+; RV32M-NEXT: # %bb.78: # %udiv-end
+; RV32M-NEXT: sltu t2, a7, a0
+; RV32M-NEXT: .LBB0_79: # %udiv-end
+; RV32M-NEXT: sltu t3, t1, t2
+; RV32M-NEXT: sub t1, t1, t2
+; RV32M-NEXT: sub a7, a7, a0
+; RV32M-NEXT: sub a3, a3, a0
+; RV32M-NEXT: lw a0, 28(sp) # 4-byte Folded Reload
+; RV32M-NEXT: sw a5, 0(a0)
+; RV32M-NEXT: sw a1, 4(a0)
+; RV32M-NEXT: sw a4, 8(a0)
+; RV32M-NEXT: sw a2, 12(a0)
+; RV32M-NEXT: sub a0, t0, t3
+; RV32M-NEXT: sub a1, a7, a6
+; RV32M-NEXT: lw a2, 32(sp) # 4-byte Folded Reload
+; RV32M-NEXT: sw a3, 0(a2)
+; RV32M-NEXT: sw a1, 4(a2)
+; RV32M-NEXT: sw t1, 8(a2)
+; RV32M-NEXT: sw a0, 12(a2)
+; RV32M-NEXT: lw ra, 268(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw s0, 264(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw s1, 260(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw s2, 256(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw s3, 252(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw s4, 248(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw s5, 244(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw s6, 240(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw s7, 236(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw s8, 232(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw s9, 228(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw s10, 224(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw s11, 220(sp) # 4-byte Folded Reload
+; RV32M-NEXT: .cfi_restore ra
+; RV32M-NEXT: .cfi_restore s0
+; RV32M-NEXT: .cfi_restore s1
+; RV32M-NEXT: .cfi_restore s2
+; RV32M-NEXT: .cfi_restore s3
+; RV32M-NEXT: .cfi_restore s4
+; RV32M-NEXT: .cfi_restore s5
+; RV32M-NEXT: .cfi_restore s6
+; RV32M-NEXT: .cfi_restore s7
+; RV32M-NEXT: .cfi_restore s8
+; RV32M-NEXT: .cfi_restore s9
+; RV32M-NEXT: .cfi_restore s10
+; RV32M-NEXT: .cfi_restore s11
+; RV32M-NEXT: addi sp, sp, 272
+; RV32M-NEXT: .cfi_def_cfa_offset 0
+; RV32M-NEXT: ret
+ %q = sdiv i128 %n, %d
+ %r = srem i128 %n, %d
+ store i128 %q, ptr %q_out
+ store i128 %r, ptr %r_out
+ ret void
+}
+
+define void @udivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
+; RV64-LABEL: udivrem_i128:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -48
+; RV64-NEXT: .cfi_def_cfa_offset 48
+; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: mv a6, a4
+; RV64-NEXT: mv s0, a1
+; RV64-NEXT: mv s1, a0
+; RV64-NEXT: mv a4, sp
+; RV64-NEXT: mv a0, a2
+; RV64-NEXT: mv a1, a3
+; RV64-NEXT: mv a2, a6
+; RV64-NEXT: mv a3, a5
+; RV64-NEXT: call __udivmodti4
+; RV64-NEXT: ld a2, 0(sp)
+; RV64-NEXT: ld a3, 8(sp)
+; RV64-NEXT: sd a0, 0(s1)
+; RV64-NEXT: sd a1, 8(s1)
+; RV64-NEXT: sd a2, 0(s0)
+; RV64-NEXT: sd a3, 8(s0)
+; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT: .cfi_restore ra
+; RV64-NEXT: .cfi_restore s0
+; RV64-NEXT: .cfi_restore s1
+; RV64-NEXT: addi sp, sp, 48
+; RV64-NEXT: .cfi_def_cfa_offset 0
+; RV64-NEXT: ret
+;
+; RV32I-LABEL: udivrem_i128:
+; RV32I: # %bb.0: # %_udiv-special-cases_udiv-special-cases
+; RV32I-NEXT: addi sp, sp, -240
+; RV32I-NEXT: .cfi_def_cfa_offset 240
+; RV32I-NEXT: sw ra, 236(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s0, 232(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s1, 228(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s2, 224(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s3, 220(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s4, 216(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s5, 212(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s6, 208(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s7, 204(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s8, 200(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s9, 196(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s10, 192(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s11, 188(sp) # 4-byte Folded Spill
+; RV32I-NEXT: .cfi_offset ra, -4
+; RV32I-NEXT: .cfi_offset s0, -8
+; RV32I-NEXT: .cfi_offset s1, -12
+; RV32I-NEXT: .cfi_offset s2, -16
+; RV32I-NEXT: .cfi_offset s3, -20
+; RV32I-NEXT: .cfi_offset s4, -24
+; RV32I-NEXT: .cfi_offset s5, -28
+; RV32I-NEXT: .cfi_offset s6, -32
+; RV32I-NEXT: .cfi_offset s7, -36
+; RV32I-NEXT: .cfi_offset s8, -40
+; RV32I-NEXT: .cfi_offset s9, -44
+; RV32I-NEXT: .cfi_offset s10, -48
+; RV32I-NEXT: .cfi_offset s11, -52
+; RV32I-NEXT: mv a4, a2
+; RV32I-NEXT: sw a1, 24(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
+; RV32I-NEXT: lw s11, 0(a3)
+; RV32I-NEXT: lw s10, 4(a3)
+; RV32I-NEXT: lw s9, 8(a3)
+; RV32I-NEXT: lw ra, 12(a3)
+; RV32I-NEXT: lui a5, 349525
+; RV32I-NEXT: addi a5, a5, 1365
+; RV32I-NEXT: lui a1, 209715
+; RV32I-NEXT: addi a1, a1, 819
+; RV32I-NEXT: lui a0, 61681
+; RV32I-NEXT: addi a0, a0, -241
+; RV32I-NEXT: bnez s10, .LBB1_2
+; RV32I-NEXT: # %bb.1: # %_udiv-special-cases_udiv-special-cases
+; RV32I-NEXT: srli a6, s11, 1
+; RV32I-NEXT: or a6, s11, a6
+; RV32I-NEXT: srli a7, a6, 2
+; RV32I-NEXT: or a6, a6, a7
+; RV32I-NEXT: srli a7, a6, 4
+; RV32I-NEXT: or a6, a6, a7
+; RV32I-NEXT: srli a7, a6, 8
+; RV32I-NEXT: or a6, a6, a7
+; RV32I-NEXT: srli a7, a6, 16
+; RV32I-NEXT: or a6, a6, a7
+; RV32I-NEXT: not a6, a6
+; RV32I-NEXT: srli a7, a6, 1
+; RV32I-NEXT: and a7, a7, a5
+; RV32I-NEXT: sub a6, a6, a7
+; RV32I-NEXT: and a7, a6, a1
+; RV32I-NEXT: srli a6, a6, 2
+; RV32I-NEXT: and a6, a6, a1
+; RV32I-NEXT: add a6, a7, a6
+; RV32I-NEXT: srli a7, a6, 4
+; RV32I-NEXT: add a6, a6, a7
+; RV32I-NEXT: and a6, a6, a0
+; RV32I-NEXT: slli a7, a6, 8
+; RV32I-NEXT: add a6, a6, a7
+; RV32I-NEXT: slli a7, a6, 16
+; RV32I-NEXT: add a6, a6, a7
+; RV32I-NEXT: srli a6, a6, 24
+; RV32I-NEXT: addi a7, a6, 32
+; RV32I-NEXT: j .LBB1_3
+; RV32I-NEXT: .LBB1_2:
+; RV32I-NEXT: srli a6, s10, 1
+; RV32I-NEXT: or a6, s10, a6
+; RV32I-NEXT: srli a7, a6, 2
+; RV32I-NEXT: or a6, a6, a7
+; RV32I-NEXT: srli a7, a6, 4
+; RV32I-NEXT: or a6, a6, a7
+; RV32I-NEXT: srli a7, a6, 8
+; RV32I-NEXT: or a6, a6, a7
+; RV32I-NEXT: srli a7, a6, 16
+; RV32I-NEXT: or a6, a6, a7
+; RV32I-NEXT: not a6, a6
+; RV32I-NEXT: srli a7, a6, 1
+; RV32I-NEXT: and a7, a7, a5
+; RV32I-NEXT: sub a6, a6, a7
+; RV32I-NEXT: and a7, a6, a1
+; RV32I-NEXT: srli a6, a6, 2
+; RV32I-NEXT: and a6, a6, a1
+; RV32I-NEXT: add a6, a7, a6
+; RV32I-NEXT: srli a7, a6, 4
+; RV32I-NEXT: add a6, a6, a7
+; RV32I-NEXT: and a6, a6, a0
+; RV32I-NEXT: slli a7, a6, 8
+; RV32I-NEXT: add a6, a6, a7
+; RV32I-NEXT: slli a7, a6, 16
+; RV32I-NEXT: add a6, a6, a7
+; RV32I-NEXT: srli a7, a6, 24
+; RV32I-NEXT: .LBB1_3: # %_udiv-special-cases_udiv-special-cases
+; RV32I-NEXT: lw s3, 4(a4)
+; RV32I-NEXT: or t1, s9, ra
+; RV32I-NEXT: bnez ra, .LBB1_5
+; RV32I-NEXT: # %bb.4: # %_udiv-special-cases_udiv-special-cases
+; RV32I-NEXT: srli a6, s9, 1
+; RV32I-NEXT: or a6, s9, a6
+; RV32I-NEXT: srli t0, a6, 2
+; RV32I-NEXT: or a6, a6, t0
+; RV32I-NEXT: srli t0, a6, 4
+; RV32I-NEXT: or a6, a6, t0
+; RV32I-NEXT: srli t0, a6, 8
+; RV32I-NEXT: or a6, a6, t0
+; RV32I-NEXT: srli t0, a6, 16
+; RV32I-NEXT: or a6, a6, t0
+; RV32I-NEXT: not a6, a6
+; RV32I-NEXT: srli t0, a6, 1
+; RV32I-NEXT: and t0, t0, a5
+; RV32I-NEXT: sub a6, a6, t0
+; RV32I-NEXT: and t0, a6, a1
+; RV32I-NEXT: srli a6, a6, 2
+; RV32I-NEXT: and a6, a6, a1
+; RV32I-NEXT: add a6, t0, a6
+; RV32I-NEXT: srli t0, a6, 4
+; RV32I-NEXT: add a6, a6, t0
+; RV32I-NEXT: and a6, a6, a0
+; RV32I-NEXT: slli t0, a6, 8
+; RV32I-NEXT: add a6, a6, t0
+; RV32I-NEXT: slli t0, a6, 16
+; RV32I-NEXT: add a6, a6, t0
+; RV32I-NEXT: srli a6, a6, 24
+; RV32I-NEXT: addi a6, a6, 32
+; RV32I-NEXT: j .LBB1_6
+; RV32I-NEXT: .LBB1_5:
+; RV32I-NEXT: srli a6, ra, 1
+; RV32I-NEXT: or a6, ra, a6
+; RV32I-NEXT: srli t0, a6, 2
+; RV32I-NEXT: or a6, a6, t0
+; RV32I-NEXT: srli t0, a6, 4
+; RV32I-NEXT: or a6, a6, t0
+; RV32I-NEXT: srli t0, a6, 8
+; RV32I-NEXT: or a6, a6, t0
+; RV32I-NEXT: srli t0, a6, 16
+; RV32I-NEXT: or a6, a6, t0
+; RV32I-NEXT: not a6, a6
+; RV32I-NEXT: srli t0, a6, 1
+; RV32I-NEXT: and t0, t0, a5
+; RV32I-NEXT: sub a6, a6, t0
+; RV32I-NEXT: and t0, a6, a1
+; RV32I-NEXT: srli a6, a6, 2
+; RV32I-NEXT: and a6, a6, a1
+; RV32I-NEXT: add a6, t0, a6
+; RV32I-NEXT: srli t0, a6, 4
+; RV32I-NEXT: add a6, a6, t0
+; RV32I-NEXT: and a6, a6, a0
+; RV32I-NEXT: slli t0, a6, 8
+; RV32I-NEXT: add a6, a6, t0
+; RV32I-NEXT: slli t0, a6, 16
+; RV32I-NEXT: add a6, a6, t0
+; RV32I-NEXT: srli a6, a6, 24
+; RV32I-NEXT: .LBB1_6: # %_udiv-special-cases_udiv-special-cases
+; RV32I-NEXT: lw s5, 12(a4)
+; RV32I-NEXT: addi t0, a7, 64
+; RV32I-NEXT: bnez t1, .LBB1_8
+; RV32I-NEXT: # %bb.7: # %_udiv-special-cases_udiv-special-cases
+; RV32I-NEXT: mv a6, t0
+; RV32I-NEXT: .LBB1_8: # %_udiv-special-cases_udiv-special-cases
+; RV32I-NEXT: lw s2, 0(a4)
+; RV32I-NEXT: lw s6, 8(a4)
+; RV32I-NEXT: snez a4, t1
+; RV32I-NEXT: bnez s3, .LBB1_10
+; RV32I-NEXT: # %bb.9: # %_udiv-special-cases_udiv-special-cases
+; RV32I-NEXT: srli t1, s2, 1
+; RV32I-NEXT: or t1, s2, t1
+; RV32I-NEXT: srli t2, t1, 2
+; RV32I-NEXT: or t1, t1, t2
+; RV32I-NEXT: srli t2, t1, 4
+; RV32I-NEXT: or t1, t1, t2
+; RV32I-NEXT: srli t2, t1, 8
+; RV32I-NEXT: or t1, t1, t2
+; RV32I-NEXT: srli t2, t1, 16
+; RV32I-NEXT: or t1, t1, t2
+; RV32I-NEXT: not t1, t1
+; RV32I-NEXT: srli t2, t1, 1
+; RV32I-NEXT: and t2, t2, a5
+; RV32I-NEXT: sub t1, t1, t2
+; RV32I-NEXT: and t2, t1, a1
+; RV32I-NEXT: srli t1, t1, 2
+; RV32I-NEXT: and t1, t1, a1
+; RV32I-NEXT: add t1, t2, t1
+; RV32I-NEXT: srli t2, t1, 4
+; RV32I-NEXT: add t1, t1, t2
+; RV32I-NEXT: and t1, t1, a0
+; RV32I-NEXT: slli t2, t1, 8
+; RV32I-NEXT: add t1, t1, t2
+; RV32I-NEXT: slli t2, t1, 16
+; RV32I-NEXT: add t1, t1, t2
+; RV32I-NEXT: srli t1, t1, 24
+; RV32I-NEXT: addi t6, t1, 32
+; RV32I-NEXT: j .LBB1_11
+; RV32I-NEXT: .LBB1_10:
+; RV32I-NEXT: srli t1, s3, 1
+; RV32I-NEXT: or t1, s3, t1
+; RV32I-NEXT: srli t2, t1, 2
+; RV32I-NEXT: or t1, t1, t2
+; RV32I-NEXT: srli t2, t1, 4
+; RV32I-NEXT: or t1, t1, t2
+; RV32I-NEXT: srli t2, t1, 8
+; RV32I-NEXT: or t1, t1, t2
+; RV32I-NEXT: srli t2, t1, 16
+; RV32I-NEXT: or t1, t1, t2
+; RV32I-NEXT: not t1, t1
+; RV32I-NEXT: srli t2, t1, 1
+; RV32I-NEXT: and t2, t2, a5
+; RV32I-NEXT: sub t1, t1, t2
+; RV32I-NEXT: and t2, t1, a1
+; RV32I-NEXT: srli t1, t1, 2
+; RV32I-NEXT: and t1, t1, a1
+; RV32I-NEXT: add t1, t2, t1
+; RV32I-NEXT: srli t2, t1, 4
+; RV32I-NEXT: add t1, t1, t2
+; RV32I-NEXT: and t1, t1, a0
+; RV32I-NEXT: slli t2, t1, 8
+; RV32I-NEXT: add t1, t1, t2
+; RV32I-NEXT: slli t2, t1, 16
+; RV32I-NEXT: add t1, t1, t2
+; RV32I-NEXT: srli t6, t1, 24
+; RV32I-NEXT: .LBB1_11: # %_udiv-special-cases_udiv-special-cases
+; RV32I-NEXT: or t1, s10, ra
+; RV32I-NEXT: or t2, s11, s9
+; RV32I-NEXT: or t3, s3, s5
+; RV32I-NEXT: or t4, s2, s6
+; RV32I-NEXT: sltu a7, t0, a7
+; RV32I-NEXT: addi t0, a4, -1
+; RV32I-NEXT: addi a4, t6, 64
+; RV32I-NEXT: or t5, s6, s5
+; RV32I-NEXT: sltu t6, a4, t6
+; RV32I-NEXT: snez s4, t5
+; RV32I-NEXT: addi s4, s4, -1
+; RV32I-NEXT: bnez s5, .LBB1_13
+; RV32I-NEXT: # %bb.12: # %_udiv-special-cases_udiv-special-cases
+; RV32I-NEXT: srli s0, s6, 1
+; RV32I-NEXT: or s0, s6, s0
+; RV32I-NEXT: srli s1, s0, 2
+; RV32I-NEXT: or s0, s0, s1
+; RV32I-NEXT: srli s1, s0, 4
+; RV32I-NEXT: or s0, s0, s1
+; RV32I-NEXT: srli s1, s0, 8
+; RV32I-NEXT: or s0, s0, s1
+; RV32I-NEXT: srli s1, s0, 16
+; RV32I-NEXT: or s0, s0, s1
+; RV32I-NEXT: not s0, s0
+; RV32I-NEXT: srli s1, s0, 1
+; RV32I-NEXT: and a5, s1, a5
+; RV32I-NEXT: sub s0, s0, a5
+; RV32I-NEXT: and a5, s0, a1
+; RV32I-NEXT: srli s0, s0, 2
+; RV32I-NEXT: and a1, s0, a1
+; RV32I-NEXT: add a1, a5, a1
+; RV32I-NEXT: srli a5, a1, 4
+; RV32I-NEXT: add a1, a1, a5
+; RV32I-NEXT: and a0, a1, a0
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: srli a0, a0, 24
+; RV32I-NEXT: addi a0, a0, 32
+; RV32I-NEXT: j .LBB1_14
+; RV32I-NEXT: .LBB1_13:
+; RV32I-NEXT: srli s0, s5, 1
+; RV32I-NEXT: or s0, s5, s0
+; RV32I-NEXT: srli s1, s0, 2
+; RV32I-NEXT: or s0, s0, s1
+; RV32I-NEXT: srli s1, s0, 4
+; RV32I-NEXT: or s0, s0, s1
+; RV32I-NEXT: srli s1, s0, 8
+; RV32I-NEXT: or s0, s0, s1
+; RV32I-NEXT: srli s1, s0, 16
+; RV32I-NEXT: or s0, s0, s1
+; RV32I-NEXT: not s0, s0
+; RV32I-NEXT: srli s1, s0, 1
+; RV32I-NEXT: and a5, s1, a5
+; RV32I-NEXT: sub s0, s0, a5
+; RV32I-NEXT: and a5, s0, a1
+; RV32I-NEXT: srli s0, s0, 2
+; RV32I-NEXT: and a1, s0, a1
+; RV32I-NEXT: add a1, a5, a1
+; RV32I-NEXT: srli a5, a1, 4
+; RV32I-NEXT: add a1, a1, a5
+; RV32I-NEXT: and a0, a1, a0
+; RV32I-NEXT: slli a1, a0, 8
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: slli a1, a0, 16
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: srli a0, a0, 24
+; RV32I-NEXT: .LBB1_14: # %_udiv-special-cases_udiv-special-cases
+; RV32I-NEXT: or t1, t2, t1
+; RV32I-NEXT: or t2, t4, t3
+; RV32I-NEXT: and a1, t0, a7
+; RV32I-NEXT: and a5, s4, t6
+; RV32I-NEXT: bnez t5, .LBB1_16
+; RV32I-NEXT: # %bb.15: # %_udiv-special-cases_udiv-special-cases
+; RV32I-NEXT: mv a0, a4
+; RV32I-NEXT: .LBB1_16: # %_udiv-special-cases_udiv-special-cases
+; RV32I-NEXT: seqz a7, t1
+; RV32I-NEXT: seqz t0, t2
+; RV32I-NEXT: sltu a4, a6, a0
+; RV32I-NEXT: sub t2, a1, a5
+; RV32I-NEXT: mv t1, a4
+; RV32I-NEXT: beq a1, a5, .LBB1_18
+; RV32I-NEXT: # %bb.17: # %_udiv-special-cases_udiv-special-cases
+; RV32I-NEXT: sltu t1, a1, a5
+; RV32I-NEXT: .LBB1_18: # %_udiv-special-cases_udiv-special-cases
+; RV32I-NEXT: sub a4, t2, a4
+; RV32I-NEXT: or a1, a7, t0
+; RV32I-NEXT: neg a5, t1
+; RV32I-NEXT: seqz a7, t1
+; RV32I-NEXT: addi a7, a7, -1
+; RV32I-NEXT: or t0, a5, a7
+; RV32I-NEXT: sub a0, a6, a0
+; RV32I-NEXT: beqz t0, .LBB1_20
+; RV32I-NEXT: # %bb.19: # %_udiv-special-cases_udiv-special-cases
+; RV32I-NEXT: snez a6, t0
+; RV32I-NEXT: j .LBB1_21
+; RV32I-NEXT: .LBB1_20:
+; RV32I-NEXT: snez a6, a4
+; RV32I-NEXT: sltiu t0, a0, 128
+; RV32I-NEXT: xori t0, t0, 1
+; RV32I-NEXT: or a6, t0, a6
+; RV32I-NEXT: .LBB1_21: # %_udiv-special-cases_udiv-special-cases
+; RV32I-NEXT: li s8, 127
+; RV32I-NEXT: or a1, a1, a6
+; RV32I-NEXT: addi a6, a1, -1
+; RV32I-NEXT: and t1, a6, s5
+; RV32I-NEXT: and t2, a6, s6
+; RV32I-NEXT: and t0, a6, s3
+; RV32I-NEXT: and t3, a6, s2
+; RV32I-NEXT: sw s10, 52(sp) # 4-byte Folded Spill
+; RV32I-NEXT: mv a2, s11
+; RV32I-NEXT: sw s3, 28(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw ra, 48(sp) # 4-byte Folded Spill
+; RV32I-NEXT: mv a3, s9
+; RV32I-NEXT: sw s2, 40(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s5, 36(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s6, 32(sp) # 4-byte Folded Spill
+; RV32I-NEXT: bnez a1, .LBB1_32
+; RV32I-NEXT: # %bb.22: # %_udiv-special-cases_udiv-special-cases
+; RV32I-NEXT: xori a1, a0, 127
+; RV32I-NEXT: or a1, a1, a5
+; RV32I-NEXT: or a6, a4, a7
+; RV32I-NEXT: or a1, a1, a6
+; RV32I-NEXT: beqz a1, .LBB1_32
+; RV32I-NEXT: # %bb.23: # %udiv-bb15
+; RV32I-NEXT: addi a1, a0, 1
+; RV32I-NEXT: sub a0, s8, a0
+; RV32I-NEXT: sw zero, 152(sp)
+; RV32I-NEXT: sw zero, 156(sp)
+; RV32I-NEXT: sw zero, 160(sp)
+; RV32I-NEXT: sw zero, 164(sp)
+; RV32I-NEXT: sw s2, 168(sp)
+; RV32I-NEXT: sw s3, 172(sp)
+; RV32I-NEXT: sw s6, 176(sp)
+; RV32I-NEXT: sw s5, 180(sp)
+; RV32I-NEXT: addi a6, sp, 168
+; RV32I-NEXT: seqz t0, a1
+; RV32I-NEXT: srli t1, a0, 3
+; RV32I-NEXT: andi t2, a0, 31
+; RV32I-NEXT: add a4, a4, t0
+; RV32I-NEXT: andi t0, t1, 12
+; RV32I-NEXT: xori t1, t2, 31
+; RV32I-NEXT: or t2, a1, a4
+; RV32I-NEXT: sub a6, a6, t0
+; RV32I-NEXT: seqz t0, t2
+; RV32I-NEXT: lw t4, 0(a6)
+; RV32I-NEXT: lw t2, 4(a6)
+; RV32I-NEXT: lw t5, 8(a6)
+; RV32I-NEXT: lw t3, 12(a6)
+; RV32I-NEXT: add t0, a5, t0
+; RV32I-NEXT: sltu a6, t0, a5
+; RV32I-NEXT: or a5, a1, t0
+; RV32I-NEXT: add a6, a7, a6
+; RV32I-NEXT: or a7, a4, a6
+; RV32I-NEXT: srli t6, t5, 1
+; RV32I-NEXT: srli s0, t2, 1
+; RV32I-NEXT: or a7, a5, a7
+; RV32I-NEXT: srli a5, t4, 1
+; RV32I-NEXT: srl t6, t6, t1
+; RV32I-NEXT: srl s0, s0, t1
+; RV32I-NEXT: srl a5, a5, t1
+; RV32I-NEXT: sll t1, t3, a0
+; RV32I-NEXT: or t3, t1, t6
+; RV32I-NEXT: sll t1, t5, a0
+; RV32I-NEXT: sll t2, t2, a0
+; RV32I-NEXT: or t1, t1, s0
+; RV32I-NEXT: or t2, t2, a5
+; RV32I-NEXT: sll a5, t4, a0
+; RV32I-NEXT: beqz a7, .LBB1_31
+; RV32I-NEXT: # %bb.24: # %udiv-preheader4
+; RV32I-NEXT: li a7, 0
+; RV32I-NEXT: li t4, 0
+; RV32I-NEXT: li t5, 0
+; RV32I-NEXT: li t6, 0
+; RV32I-NEXT: sw zero, 136(sp)
+; RV32I-NEXT: sw zero, 140(sp)
+; RV32I-NEXT: sw zero, 144(sp)
+; RV32I-NEXT: sw zero, 148(sp)
+; RV32I-NEXT: sw s2, 120(sp)
+; RV32I-NEXT: sw s3, 124(sp)
+; RV32I-NEXT: sw s6, 128(sp)
+; RV32I-NEXT: sw s5, 132(sp)
+; RV32I-NEXT: srli a0, a1, 3
+; RV32I-NEXT: addi s0, sp, 120
+; RV32I-NEXT: andi a0, a0, 12
+; RV32I-NEXT: add a0, s0, a0
+; RV32I-NEXT: lw s0, 4(a0)
+; RV32I-NEXT: lw s1, 8(a0)
+; RV32I-NEXT: lw s2, 12(a0)
+; RV32I-NEXT: lw a0, 0(a0)
+; RV32I-NEXT: andi s3, a1, 31
+; RV32I-NEXT: xori s3, s3, 31
+; RV32I-NEXT: slli s4, s2, 1
+; RV32I-NEXT: slli s5, s1, 1
+; RV32I-NEXT: slli s6, s0, 1
+; RV32I-NEXT: sll s4, s4, s3
+; RV32I-NEXT: sll s5, s5, s3
+; RV32I-NEXT: sll s3, s6, s3
+; RV32I-NEXT: seqz s6, s11
+; RV32I-NEXT: srl s1, s1, a1
+; RV32I-NEXT: or s8, s1, s4
+; RV32I-NEXT: or s1, s11, s10
+; RV32I-NEXT: sub s4, s10, s6
+; RV32I-NEXT: seqz s1, s1
+; RV32I-NEXT: srl s0, s0, a1
+; RV32I-NEXT: or s9, s0, s5
+; RV32I-NEXT: sub s5, a3, s1
+; RV32I-NEXT: sltu s0, a3, s1
+; RV32I-NEXT: sub s6, ra, s0
+; RV32I-NEXT: srl s0, a0, a1
+; RV32I-NEXT: srl a0, s2, a1
+; RV32I-NEXT: mv s1, s11
+; RV32I-NEXT: or s11, s0, s3
+; RV32I-NEXT: addi s1, s1, -1
+; RV32I-NEXT: sw s1, 44(sp) # 4-byte Folded Spill
+; RV32I-NEXT: j .LBB1_26
+; RV32I-NEXT: .LBB1_25: # %udiv-do-while3
+; RV32I-NEXT: # in Loop: Header=BB1_26 Depth=1
+; RV32I-NEXT: srli s2, t1, 31
+; RV32I-NEXT: slli t3, t3, 1
+; RV32I-NEXT: sub s7, s10, s7
+; RV32I-NEXT: srli s10, t2, 31
+; RV32I-NEXT: slli t1, t1, 1
+; RV32I-NEXT: or t3, t3, s2
+; RV32I-NEXT: srli s2, a5, 31
+; RV32I-NEXT: slli t2, t2, 1
+; RV32I-NEXT: slli a5, a5, 1
+; RV32I-NEXT: or t1, t1, s10
+; RV32I-NEXT: lw s3, 48(sp) # 4-byte Folded Reload
+; RV32I-NEXT: and s10, s9, s3
+; RV32I-NEXT: or t2, t2, s2
+; RV32I-NEXT: and s2, s9, a3
+; RV32I-NEXT: or a5, a7, a5
+; RV32I-NEXT: sub s3, s0, s2
+; RV32I-NEXT: sltu s0, s0, s2
+; RV32I-NEXT: or s2, a1, a4
+; RV32I-NEXT: sub s10, s8, s10
+; RV32I-NEXT: seqz s8, a1
+; RV32I-NEXT: addi a1, a1, -1
+; RV32I-NEXT: andi a7, s9, 1
+; RV32I-NEXT: sub s9, s7, s1
+; RV32I-NEXT: seqz s1, s2
+; RV32I-NEXT: sub a4, a4, s8
+; RV32I-NEXT: or t2, t4, t2
+; RV32I-NEXT: or t1, t5, t1
+; RV32I-NEXT: or t3, t6, t3
+; RV32I-NEXT: sub s8, s3, a0
+; RV32I-NEXT: sltu a0, s3, a0
+; RV32I-NEXT: sub t4, s10, s0
+; RV32I-NEXT: sltu t5, t0, s1
+; RV32I-NEXT: sub t0, t0, s1
+; RV32I-NEXT: sub a0, t4, a0
+; RV32I-NEXT: sub a6, a6, t5
+; RV32I-NEXT: or t4, a4, a6
+; RV32I-NEXT: or t5, a1, t0
+; RV32I-NEXT: or s0, t5, t4
+; RV32I-NEXT: sub s11, ra, s11
+; RV32I-NEXT: li t4, 0
+; RV32I-NEXT: li t5, 0
+; RV32I-NEXT: li t6, 0
+; RV32I-NEXT: beqz s0, .LBB1_31
+; RV32I-NEXT: .LBB1_26: # %udiv-do-while3
+; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT: srli s0, s11, 31
+; RV32I-NEXT: slli s1, s9, 1
+; RV32I-NEXT: slli s11, s11, 1
+; RV32I-NEXT: or s10, s1, s0
+; RV32I-NEXT: srli s0, t3, 31
+; RV32I-NEXT: or ra, s11, s0
+; RV32I-NEXT: beq s4, s10, .LBB1_28
+; RV32I-NEXT: # %bb.27: # %udiv-do-while3
+; RV32I-NEXT: # in Loop: Header=BB1_26 Depth=1
+; RV32I-NEXT: sltu s1, s4, s10
+; RV32I-NEXT: j .LBB1_29
+; RV32I-NEXT: .LBB1_28: # in Loop: Header=BB1_26 Depth=1
+; RV32I-NEXT: lw s0, 44(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sltu s1, s0, ra
+; RV32I-NEXT: .LBB1_29: # %udiv-do-while3
+; RV32I-NEXT: # in Loop: Header=BB1_26 Depth=1
+; RV32I-NEXT: srli s0, s8, 31
+; RV32I-NEXT: slli a0, a0, 1
+; RV32I-NEXT: srli s7, s9, 31
+; RV32I-NEXT: slli s9, s8, 1
+; RV32I-NEXT: or s8, a0, s0
+; RV32I-NEXT: or s0, s9, s7
+; RV32I-NEXT: sub a0, s5, s0
+; RV32I-NEXT: sltu s7, s5, s0
+; RV32I-NEXT: sub s9, s6, s8
+; RV32I-NEXT: sltu a0, a0, s1
+; RV32I-NEXT: sub s1, s9, s7
+; RV32I-NEXT: sub s1, s1, a0
+; RV32I-NEXT: srai s9, s1, 31
+; RV32I-NEXT: and s11, s9, a2
+; RV32I-NEXT: lw a0, 52(sp) # 4-byte Folded Reload
+; RV32I-NEXT: and s7, s9, a0
+; RV32I-NEXT: sltu s1, ra, s11
+; RV32I-NEXT: mv a0, s1
+; RV32I-NEXT: beq s10, s7, .LBB1_25
+; RV32I-NEXT: # %bb.30: # %udiv-do-while3
+; RV32I-NEXT: # in Loop: Header=BB1_26 Depth=1
+; RV32I-NEXT: sltu a0, s10, s7
+; RV32I-NEXT: j .LBB1_25
+; RV32I-NEXT: .LBB1_31: # %udiv-loop-exit2
+; RV32I-NEXT: srli a0, a5, 31
+; RV32I-NEXT: slli a1, t2, 1
+; RV32I-NEXT: srli a4, t2, 31
+; RV32I-NEXT: or t0, a1, a0
+; RV32I-NEXT: slli a0, t1, 1
+; RV32I-NEXT: srli a1, t1, 31
+; RV32I-NEXT: slli t3, t3, 1
+; RV32I-NEXT: slli a5, a5, 1
+; RV32I-NEXT: or t2, a0, a4
+; RV32I-NEXT: or t1, t3, a1
+; RV32I-NEXT: or t3, a7, a5
+; RV32I-NEXT: lw s10, 52(sp) # 4-byte Folded Reload
+; RV32I-NEXT: mv s11, a2
+; RV32I-NEXT: lw s3, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw ra, 48(sp) # 4-byte Folded Reload
+; RV32I-NEXT: mv s9, a3
+; RV32I-NEXT: lw s2, 40(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s5, 36(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s6, 32(sp) # 4-byte Folded Reload
+; RV32I-NEXT: li s8, 127
+; RV32I-NEXT: .LBB1_32: # %udiv-end1
+; RV32I-NEXT: lui a0, 349525
+; RV32I-NEXT: lui a1, 209715
+; RV32I-NEXT: lui a6, 61681
+; RV32I-NEXT: addi a5, a0, 1365
+; RV32I-NEXT: addi a4, a1, 819
+; RV32I-NEXT: addi a1, a6, -241
+; RV32I-NEXT: sw t0, 16(sp) # 4-byte Folded Spill
+; RV32I-NEXT: bnez s10, .LBB1_35
+; RV32I-NEXT: # %bb.33: # %udiv-end1
+; RV32I-NEXT: srli a0, s11, 1
+; RV32I-NEXT: or a0, s11, a0
+; RV32I-NEXT: srli a6, a0, 2
+; RV32I-NEXT: or a0, a0, a6
+; RV32I-NEXT: srli a6, a0, 4
+; RV32I-NEXT: or a0, a0, a6
+; RV32I-NEXT: srli a6, a0, 8
+; RV32I-NEXT: or a0, a0, a6
+; RV32I-NEXT: srli a6, a0, 16
+; RV32I-NEXT: or a0, a0, a6
+; RV32I-NEXT: not a0, a0
+; RV32I-NEXT: srli a6, a0, 1
+; RV32I-NEXT: and a6, a6, a5
+; RV32I-NEXT: sub a0, a0, a6
+; RV32I-NEXT: and a6, a0, a4
+; RV32I-NEXT: srli a0, a0, 2
+; RV32I-NEXT: and a0, a0, a4
+; RV32I-NEXT: add a0, a6, a0
+; RV32I-NEXT: srli a6, a0, 4
+; RV32I-NEXT: add a0, a0, a6
+; RV32I-NEXT: and a0, a0, a1
+; RV32I-NEXT: slli a6, a0, 8
+; RV32I-NEXT: add a0, a0, a6
+; RV32I-NEXT: slli a6, a0, 16
+; RV32I-NEXT: add a0, a0, a6
+; RV32I-NEXT: srli a0, a0, 24
+; RV32I-NEXT: addi a7, a0, 32
+; RV32I-NEXT: or t0, s9, ra
+; RV32I-NEXT: beqz ra, .LBB1_36
+; RV32I-NEXT: .LBB1_34:
+; RV32I-NEXT: srli a0, ra, 1
+; RV32I-NEXT: or a0, ra, a0
+; RV32I-NEXT: srli a6, a0, 2
+; RV32I-NEXT: or a0, a0, a6
+; RV32I-NEXT: srli a6, a0, 4
+; RV32I-NEXT: or a0, a0, a6
+; RV32I-NEXT: srli a6, a0, 8
+; RV32I-NEXT: or a0, a0, a6
+; RV32I-NEXT: srli a6, a0, 16
+; RV32I-NEXT: or a0, a0, a6
+; RV32I-NEXT: not a0, a0
+; RV32I-NEXT: srli a6, a0, 1
+; RV32I-NEXT: and a6, a6, a5
+; RV32I-NEXT: sub a0, a0, a6
+; RV32I-NEXT: and a6, a0, a4
+; RV32I-NEXT: srli a0, a0, 2
+; RV32I-NEXT: and a0, a0, a4
+; RV32I-NEXT: add a0, a6, a0
+; RV32I-NEXT: srli a6, a0, 4
+; RV32I-NEXT: add a0, a0, a6
+; RV32I-NEXT: and a0, a0, a1
+; RV32I-NEXT: slli a6, a0, 8
+; RV32I-NEXT: add a0, a0, a6
+; RV32I-NEXT: slli a6, a0, 16
+; RV32I-NEXT: add a0, a0, a6
+; RV32I-NEXT: srli a6, a0, 24
+; RV32I-NEXT: addi a0, a7, 64
+; RV32I-NEXT: beqz t0, .LBB1_37
+; RV32I-NEXT: j .LBB1_38
+; RV32I-NEXT: .LBB1_35:
+; RV32I-NEXT: srli a0, s10, 1
+; RV32I-NEXT: or a0, s10, a0
+; RV32I-NEXT: srli a6, a0, 2
+; RV32I-NEXT: or a0, a0, a6
+; RV32I-NEXT: srli a6, a0, 4
+; RV32I-NEXT: or a0, a0, a6
+; RV32I-NEXT: srli a6, a0, 8
+; RV32I-NEXT: or a0, a0, a6
+; RV32I-NEXT: srli a6, a0, 16
+; RV32I-NEXT: or a0, a0, a6
+; RV32I-NEXT: not a0, a0
+; RV32I-NEXT: srli a6, a0, 1
+; RV32I-NEXT: and a6, a6, a5
+; RV32I-NEXT: sub a0, a0, a6
+; RV32I-NEXT: and a6, a0, a4
+; RV32I-NEXT: srli a0, a0, 2
+; RV32I-NEXT: and a0, a0, a4
+; RV32I-NEXT: add a0, a6, a0
+; RV32I-NEXT: srli a6, a0, 4
+; RV32I-NEXT: add a0, a0, a6
+; RV32I-NEXT: and a0, a0, a1
+; RV32I-NEXT: slli a6, a0, 8
+; RV32I-NEXT: add a0, a0, a6
+; RV32I-NEXT: slli a6, a0, 16
+; RV32I-NEXT: add a0, a0, a6
+; RV32I-NEXT: srli a7, a0, 24
+; RV32I-NEXT: or t0, s9, ra
+; RV32I-NEXT: bnez ra, .LBB1_34
+; RV32I-NEXT: .LBB1_36: # %udiv-end1
+; RV32I-NEXT: srli a0, s9, 1
+; RV32I-NEXT: or a0, s9, a0
+; RV32I-NEXT: srli a6, a0, 2
+; RV32I-NEXT: or a0, a0, a6
+; RV32I-NEXT: srli a6, a0, 4
+; RV32I-NEXT: or a0, a0, a6
+; RV32I-NEXT: srli a6, a0, 8
+; RV32I-NEXT: or a0, a0, a6
+; RV32I-NEXT: srli a6, a0, 16
+; RV32I-NEXT: or a0, a0, a6
+; RV32I-NEXT: not a0, a0
+; RV32I-NEXT: srli a6, a0, 1
+; RV32I-NEXT: and a6, a6, a5
+; RV32I-NEXT: sub a0, a0, a6
+; RV32I-NEXT: and a6, a0, a4
+; RV32I-NEXT: srli a0, a0, 2
+; RV32I-NEXT: and a0, a0, a4
+; RV32I-NEXT: add a0, a6, a0
+; RV32I-NEXT: srli a6, a0, 4
+; RV32I-NEXT: add a0, a0, a6
+; RV32I-NEXT: and a0, a0, a1
+; RV32I-NEXT: slli a6, a0, 8
+; RV32I-NEXT: add a0, a0, a6
+; RV32I-NEXT: slli a6, a0, 16
+; RV32I-NEXT: add a0, a0, a6
+; RV32I-NEXT: srli a0, a0, 24
+; RV32I-NEXT: addi a6, a0, 32
+; RV32I-NEXT: addi a0, a7, 64
+; RV32I-NEXT: bnez t0, .LBB1_38
+; RV32I-NEXT: .LBB1_37: # %udiv-end1
+; RV32I-NEXT: mv a6, a0
+; RV32I-NEXT: .LBB1_38: # %udiv-end1
+; RV32I-NEXT: sw t1, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: snez t4, t0
+; RV32I-NEXT: sw t2, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw t3, 4(sp) # 4-byte Folded Spill
+; RV32I-NEXT: bnez s3, .LBB1_40
+; RV32I-NEXT: # %bb.39: # %udiv-end1
+; RV32I-NEXT: srli t0, s2, 1
+; RV32I-NEXT: or t0, s2, t0
+; RV32I-NEXT: srli t1, t0, 2
+; RV32I-NEXT: or t0, t0, t1
+; RV32I-NEXT: srli t1, t0, 4
+; RV32I-NEXT: or t0, t0, t1
+; RV32I-NEXT: srli t1, t0, 8
+; RV32I-NEXT: or t0, t0, t1
+; RV32I-NEXT: srli t1, t0, 16
+; RV32I-NEXT: or t0, t0, t1
+; RV32I-NEXT: not t0, t0
+; RV32I-NEXT: srli t1, t0, 1
+; RV32I-NEXT: and t1, t1, a5
+; RV32I-NEXT: sub t0, t0, t1
+; RV32I-NEXT: and t1, t0, a4
+; RV32I-NEXT: srli t0, t0, 2
+; RV32I-NEXT: and t0, t0, a4
+; RV32I-NEXT: add t0, t1, t0
+; RV32I-NEXT: srli t1, t0, 4
+; RV32I-NEXT: add t0, t0, t1
+; RV32I-NEXT: and t0, t0, a1
+; RV32I-NEXT: slli t1, t0, 8
+; RV32I-NEXT: add t0, t0, t1
+; RV32I-NEXT: slli t1, t0, 16
+; RV32I-NEXT: add t0, t0, t1
+; RV32I-NEXT: srli t0, t0, 24
+; RV32I-NEXT: addi t6, t0, 32
+; RV32I-NEXT: j .LBB1_41
+; RV32I-NEXT: .LBB1_40:
+; RV32I-NEXT: srli t0, s3, 1
+; RV32I-NEXT: or t0, s3, t0
+; RV32I-NEXT: srli t1, t0, 2
+; RV32I-NEXT: or t0, t0, t1
+; RV32I-NEXT: srli t1, t0, 4
+; RV32I-NEXT: or t0, t0, t1
+; RV32I-NEXT: srli t1, t0, 8
+; RV32I-NEXT: or t0, t0, t1
+; RV32I-NEXT: srli t1, t0, 16
+; RV32I-NEXT: or t0, t0, t1
+; RV32I-NEXT: not t0, t0
+; RV32I-NEXT: srli t1, t0, 1
+; RV32I-NEXT: and t1, t1, a5
+; RV32I-NEXT: sub t0, t0, t1
+; RV32I-NEXT: and t1, t0, a4
+; RV32I-NEXT: srli t0, t0, 2
+; RV32I-NEXT: and t0, t0, a4
+; RV32I-NEXT: add t0, t1, t0
+; RV32I-NEXT: srli t1, t0, 4
+; RV32I-NEXT: add t0, t0, t1
+; RV32I-NEXT: and t0, t0, a1
+; RV32I-NEXT: slli t1, t0, 8
+; RV32I-NEXT: add t0, t0, t1
+; RV32I-NEXT: slli t1, t0, 16
+; RV32I-NEXT: add t0, t0, t1
+; RV32I-NEXT: srli t6, t0, 24
+; RV32I-NEXT: .LBB1_41: # %udiv-end1
+; RV32I-NEXT: or t0, s11, s9
+; RV32I-NEXT: or t1, s10, ra
+; RV32I-NEXT: or t2, s2, s6
+; RV32I-NEXT: or t3, s3, s5
+; RV32I-NEXT: sltu a7, a0, a7
+; RV32I-NEXT: addi t4, t4, -1
+; RV32I-NEXT: addi a0, t6, 64
+; RV32I-NEXT: or t5, s6, s5
+; RV32I-NEXT: sltu t6, a0, t6
+; RV32I-NEXT: snez s4, t5
+; RV32I-NEXT: addi s4, s4, -1
+; RV32I-NEXT: bnez s5, .LBB1_43
+; RV32I-NEXT: # %bb.42: # %udiv-end1
+; RV32I-NEXT: srli s0, s6, 1
+; RV32I-NEXT: or s0, s6, s0
+; RV32I-NEXT: srli s1, s0, 2
+; RV32I-NEXT: or s0, s0, s1
+; RV32I-NEXT: srli s1, s0, 4
+; RV32I-NEXT: or s0, s0, s1
+; RV32I-NEXT: srli s1, s0, 8
+; RV32I-NEXT: or s0, s0, s1
+; RV32I-NEXT: srli s1, s0, 16
+; RV32I-NEXT: or s0, s0, s1
+; RV32I-NEXT: not s0, s0
+; RV32I-NEXT: srli s1, s0, 1
+; RV32I-NEXT: and a5, s1, a5
+; RV32I-NEXT: sub s0, s0, a5
+; RV32I-NEXT: and a5, s0, a4
+; RV32I-NEXT: srli s0, s0, 2
+; RV32I-NEXT: and a4, s0, a4
+; RV32I-NEXT: add a4, a5, a4
+; RV32I-NEXT: srli a5, a4, 4
+; RV32I-NEXT: add a4, a4, a5
+; RV32I-NEXT: and a1, a4, a1
+; RV32I-NEXT: slli a4, a1, 8
+; RV32I-NEXT: add a1, a1, a4
+; RV32I-NEXT: slli a4, a1, 16
+; RV32I-NEXT: add a1, a1, a4
+; RV32I-NEXT: srli a1, a1, 24
+; RV32I-NEXT: addi a1, a1, 32
+; RV32I-NEXT: j .LBB1_44
+; RV32I-NEXT: .LBB1_43:
+; RV32I-NEXT: srli s0, s5, 1
+; RV32I-NEXT: or s0, s5, s0
+; RV32I-NEXT: srli s1, s0, 2
+; RV32I-NEXT: or s0, s0, s1
+; RV32I-NEXT: srli s1, s0, 4
+; RV32I-NEXT: or s0, s0, s1
+; RV32I-NEXT: srli s1, s0, 8
+; RV32I-NEXT: or s0, s0, s1
+; RV32I-NEXT: srli s1, s0, 16
+; RV32I-NEXT: or s0, s0, s1
+; RV32I-NEXT: not s0, s0
+; RV32I-NEXT: srli s1, s0, 1
+; RV32I-NEXT: and a5, s1, a5
+; RV32I-NEXT: sub s0, s0, a5
+; RV32I-NEXT: and a5, s0, a4
+; RV32I-NEXT: srli s0, s0, 2
+; RV32I-NEXT: and a4, s0, a4
+; RV32I-NEXT: add a4, a5, a4
+; RV32I-NEXT: srli a5, a4, 4
+; RV32I-NEXT: add a4, a4, a5
+; RV32I-NEXT: and a1, a4, a1
+; RV32I-NEXT: slli a4, a1, 8
+; RV32I-NEXT: add a1, a1, a4
+; RV32I-NEXT: slli a4, a1, 16
+; RV32I-NEXT: add a1, a1, a4
+; RV32I-NEXT: srli a1, a1, 24
+; RV32I-NEXT: .LBB1_44: # %udiv-end1
+; RV32I-NEXT: or t0, t0, t1
+; RV32I-NEXT: or t1, t2, t3
+; RV32I-NEXT: and a4, t4, a7
+; RV32I-NEXT: and a5, s4, t6
+; RV32I-NEXT: bnez t5, .LBB1_46
+; RV32I-NEXT: # %bb.45: # %udiv-end1
+; RV32I-NEXT: mv a1, a0
+; RV32I-NEXT: .LBB1_46: # %udiv-end1
+; RV32I-NEXT: seqz a0, t0
+; RV32I-NEXT: seqz t0, t1
+; RV32I-NEXT: sltu a7, a6, a1
+; RV32I-NEXT: sub t2, a4, a5
+; RV32I-NEXT: mv t1, a7
+; RV32I-NEXT: beq a4, a5, .LBB1_48
+; RV32I-NEXT: # %bb.47: # %udiv-end1
+; RV32I-NEXT: sltu t1, a4, a5
+; RV32I-NEXT: .LBB1_48: # %udiv-end1
+; RV32I-NEXT: sub a7, t2, a7
+; RV32I-NEXT: or t0, a0, t0
+; RV32I-NEXT: neg a4, t1
+; RV32I-NEXT: seqz a5, t1
+; RV32I-NEXT: addi a5, a5, -1
+; RV32I-NEXT: or t1, a4, a5
+; RV32I-NEXT: sub a0, a6, a1
+; RV32I-NEXT: beqz t1, .LBB1_50
+; RV32I-NEXT: # %bb.49: # %udiv-end1
+; RV32I-NEXT: snez a1, t1
+; RV32I-NEXT: j .LBB1_51
+; RV32I-NEXT: .LBB1_50:
+; RV32I-NEXT: snez a1, a7
+; RV32I-NEXT: sltiu a6, a0, 128
+; RV32I-NEXT: xori a6, a6, 1
+; RV32I-NEXT: or a1, a6, a1
+; RV32I-NEXT: .LBB1_51: # %udiv-end1
+; RV32I-NEXT: or a1, t0, a1
+; RV32I-NEXT: addi a6, a1, -1
+; RV32I-NEXT: and s5, a6, s5
+; RV32I-NEXT: and s7, a6, s6
+; RV32I-NEXT: and s4, a6, s3
+; RV32I-NEXT: and s6, a6, s2
+; RV32I-NEXT: bnez a1, .LBB1_62
+; RV32I-NEXT: # %bb.52: # %udiv-end1
+; RV32I-NEXT: xori a1, a0, 127
+; RV32I-NEXT: or a1, a1, a4
+; RV32I-NEXT: or a6, a7, a5
+; RV32I-NEXT: or a1, a1, a6
+; RV32I-NEXT: beqz a1, .LBB1_62
+; RV32I-NEXT: # %bb.53: # %udiv-bb1
+; RV32I-NEXT: addi a1, a0, 1
+; RV32I-NEXT: sub a6, s8, a0
+; RV32I-NEXT: sw zero, 88(sp)
+; RV32I-NEXT: sw zero, 92(sp)
+; RV32I-NEXT: sw zero, 96(sp)
+; RV32I-NEXT: sw zero, 100(sp)
+; RV32I-NEXT: lw s2, 40(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sw s2, 104(sp)
+; RV32I-NEXT: sw s3, 108(sp)
+; RV32I-NEXT: lw s5, 32(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sw s5, 112(sp)
+; RV32I-NEXT: lw s4, 36(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sw s4, 116(sp)
+; RV32I-NEXT: addi t0, sp, 104
+; RV32I-NEXT: seqz a0, a1
+; RV32I-NEXT: srli t1, a6, 3
+; RV32I-NEXT: andi t2, a6, 31
+; RV32I-NEXT: add a0, a7, a0
+; RV32I-NEXT: andi a7, t1, 12
+; RV32I-NEXT: xori t1, t2, 31
+; RV32I-NEXT: or t2, a1, a0
+; RV32I-NEXT: sub a7, t0, a7
+; RV32I-NEXT: seqz t0, t2
+; RV32I-NEXT: lw t3, 0(a7)
+; RV32I-NEXT: lw t4, 4(a7)
+; RV32I-NEXT: lw t5, 8(a7)
+; RV32I-NEXT: lw t2, 12(a7)
+; RV32I-NEXT: add a7, a4, t0
+; RV32I-NEXT: sltu a4, a7, a4
+; RV32I-NEXT: or t0, a1, a7
+; RV32I-NEXT: add a5, a5, a4
+; RV32I-NEXT: or a4, a0, a5
+; RV32I-NEXT: srli t6, t5, 1
+; RV32I-NEXT: srli s0, t4, 1
+; RV32I-NEXT: or s1, t0, a4
+; RV32I-NEXT: srli a4, t3, 1
+; RV32I-NEXT: srl t0, t6, t1
+; RV32I-NEXT: srl t6, s0, t1
+; RV32I-NEXT: srl a4, a4, t1
+; RV32I-NEXT: sll t1, t2, a6
+; RV32I-NEXT: or t2, t1, t0
+; RV32I-NEXT: sll t0, t5, a6
+; RV32I-NEXT: sll t1, t4, a6
+; RV32I-NEXT: or t0, t0, t6
+; RV32I-NEXT: or t1, t1, a4
+; RV32I-NEXT: sll a4, t3, a6
+; RV32I-NEXT: li a6, 0
+; RV32I-NEXT: beqz s1, .LBB1_61
+; RV32I-NEXT: # %bb.54: # %udiv-preheader
+; RV32I-NEXT: li t3, 0
+; RV32I-NEXT: li t4, 0
+; RV32I-NEXT: li t5, 0
+; RV32I-NEXT: sw zero, 72(sp)
+; RV32I-NEXT: sw zero, 76(sp)
+; RV32I-NEXT: sw zero, 80(sp)
+; RV32I-NEXT: sw zero, 84(sp)
+; RV32I-NEXT: sw s2, 56(sp)
+; RV32I-NEXT: sw s3, 60(sp)
+; RV32I-NEXT: sw s5, 64(sp)
+; RV32I-NEXT: sw s4, 68(sp)
+; RV32I-NEXT: srli t6, a1, 3
+; RV32I-NEXT: addi s0, sp, 56
+; RV32I-NEXT: andi t6, t6, 12
+; RV32I-NEXT: add t6, s0, t6
+; RV32I-NEXT: lw s0, 4(t6)
+; RV32I-NEXT: lw s1, 8(t6)
+; RV32I-NEXT: lw s2, 12(t6)
+; RV32I-NEXT: lw s3, 0(t6)
+; RV32I-NEXT: andi t6, a1, 31
+; RV32I-NEXT: xori t6, t6, 31
+; RV32I-NEXT: slli s4, s2, 1
+; RV32I-NEXT: slli s5, s1, 1
+; RV32I-NEXT: slli s6, s0, 1
+; RV32I-NEXT: sll s4, s4, t6
+; RV32I-NEXT: sll s5, s5, t6
+; RV32I-NEXT: sll s6, s6, t6
+; RV32I-NEXT: seqz t6, s11
+; RV32I-NEXT: srl s1, s1, a1
+; RV32I-NEXT: or s7, s1, s4
+; RV32I-NEXT: or s1, s11, s10
+; RV32I-NEXT: sub t6, s10, t6
+; RV32I-NEXT: seqz s1, s1
+; RV32I-NEXT: srl s0, s0, a1
+; RV32I-NEXT: or s8, s0, s5
+; RV32I-NEXT: sub s4, s9, s1
+; RV32I-NEXT: sltu s0, s9, s1
+; RV32I-NEXT: sub s5, ra, s0
+; RV32I-NEXT: srl s0, s3, a1
+; RV32I-NEXT: srl ra, s2, a1
+; RV32I-NEXT: or s9, s0, s6
+; RV32I-NEXT: addi s0, s11, -1
+; RV32I-NEXT: sw s0, 44(sp) # 4-byte Folded Spill
+; RV32I-NEXT: j .LBB1_56
+; RV32I-NEXT: .LBB1_55: # %udiv-do-while
+; RV32I-NEXT: # in Loop: Header=BB1_56 Depth=1
+; RV32I-NEXT: srli s2, t0, 31
+; RV32I-NEXT: slli t2, t2, 1
+; RV32I-NEXT: sub s3, s10, s6
+; RV32I-NEXT: srli s6, t1, 31
+; RV32I-NEXT: slli t0, t0, 1
+; RV32I-NEXT: or t2, t2, s2
+; RV32I-NEXT: srli s2, a4, 31
+; RV32I-NEXT: slli t1, t1, 1
+; RV32I-NEXT: slli a4, a4, 1
+; RV32I-NEXT: or t0, t0, s6
+; RV32I-NEXT: lw s6, 48(sp) # 4-byte Folded Reload
+; RV32I-NEXT: and s6, s8, s6
+; RV32I-NEXT: or t1, t1, s2
+; RV32I-NEXT: and s2, s8, a3
+; RV32I-NEXT: or a4, a6, a4
+; RV32I-NEXT: sub s10, s0, s2
+; RV32I-NEXT: sltu s0, s0, s2
+; RV32I-NEXT: or s2, a1, a0
+; RV32I-NEXT: sub s6, s7, s6
+; RV32I-NEXT: seqz s7, a1
+; RV32I-NEXT: addi a1, a1, -1
+; RV32I-NEXT: andi a6, s8, 1
+; RV32I-NEXT: sub s8, s3, s1
+; RV32I-NEXT: seqz s1, s2
+; RV32I-NEXT: sub a0, a0, s7
+; RV32I-NEXT: or t1, t3, t1
+; RV32I-NEXT: or t0, t4, t0
+; RV32I-NEXT: or t2, t5, t2
+; RV32I-NEXT: sub s7, s10, ra
+; RV32I-NEXT: sltu t3, s10, ra
+; RV32I-NEXT: sub t4, s6, s0
+; RV32I-NEXT: sltu t5, a7, s1
+; RV32I-NEXT: sub a7, a7, s1
+; RV32I-NEXT: sub ra, t4, t3
+; RV32I-NEXT: sub a5, a5, t5
+; RV32I-NEXT: or t3, a0, a5
+; RV32I-NEXT: or t4, a1, a7
+; RV32I-NEXT: or s0, t4, t3
+; RV32I-NEXT: sub s9, s9, s11
+; RV32I-NEXT: li t3, 0
+; RV32I-NEXT: li t4, 0
+; RV32I-NEXT: li t5, 0
+; RV32I-NEXT: mv s11, a2
+; RV32I-NEXT: beqz s0, .LBB1_61
+; RV32I-NEXT: .LBB1_56: # %udiv-do-while
+; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-NEXT: srli s0, s9, 31
+; RV32I-NEXT: slli s1, s8, 1
+; RV32I-NEXT: slli s9, s9, 1
+; RV32I-NEXT: or s10, s1, s0
+; RV32I-NEXT: srli s0, t2, 31
+; RV32I-NEXT: or s9, s9, s0
+; RV32I-NEXT: beq t6, s10, .LBB1_58
+; RV32I-NEXT: # %bb.57: # %udiv-do-while
+; RV32I-NEXT: # in Loop: Header=BB1_56 Depth=1
+; RV32I-NEXT: sltu s1, t6, s10
+; RV32I-NEXT: j .LBB1_59
+; RV32I-NEXT: .LBB1_58: # in Loop: Header=BB1_56 Depth=1
+; RV32I-NEXT: lw s0, 44(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sltu s1, s0, s9
+; RV32I-NEXT: .LBB1_59: # %udiv-do-while
+; RV32I-NEXT: # in Loop: Header=BB1_56 Depth=1
+; RV32I-NEXT: srli s0, s7, 31
+; RV32I-NEXT: slli ra, ra, 1
+; RV32I-NEXT: srli s2, s8, 31
+; RV32I-NEXT: slli s3, s7, 1
+; RV32I-NEXT: or s7, ra, s0
+; RV32I-NEXT: or s0, s3, s2
+; RV32I-NEXT: sub s2, s4, s0
+; RV32I-NEXT: sltu s3, s4, s0
+; RV32I-NEXT: sub s6, s5, s7
+; RV32I-NEXT: sltu s1, s2, s1
+; RV32I-NEXT: sub s2, s6, s3
+; RV32I-NEXT: sub s1, s2, s1
+; RV32I-NEXT: srai s8, s1, 31
+; RV32I-NEXT: and s11, s8, s11
+; RV32I-NEXT: lw s1, 52(sp) # 4-byte Folded Reload
+; RV32I-NEXT: and s6, s8, s1
+; RV32I-NEXT: sltu s1, s9, s11
+; RV32I-NEXT: mv ra, s1
+; RV32I-NEXT: beq s10, s6, .LBB1_55
+; RV32I-NEXT: # %bb.60: # %udiv-do-while
+; RV32I-NEXT: # in Loop: Header=BB1_56 Depth=1
+; RV32I-NEXT: sltu ra, s10, s6
+; RV32I-NEXT: j .LBB1_55
+; RV32I-NEXT: .LBB1_61: # %udiv-loop-exit
+; RV32I-NEXT: srli a0, a4, 31
+; RV32I-NEXT: slli a1, t1, 1
+; RV32I-NEXT: srli a5, t1, 31
+; RV32I-NEXT: or s4, a1, a0
+; RV32I-NEXT: slli a0, t0, 1
+; RV32I-NEXT: srli a1, t0, 31
+; RV32I-NEXT: slli t2, t2, 1
+; RV32I-NEXT: slli a4, a4, 1
+; RV32I-NEXT: or s7, a0, a5
+; RV32I-NEXT: or s5, t2, a1
+; RV32I-NEXT: or s6, a6, a4
+; RV32I-NEXT: lw s10, 52(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s3, 28(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw ra, 48(sp) # 4-byte Folded Reload
+; RV32I-NEXT: mv s9, a3
+; RV32I-NEXT: .LBB1_62: # %udiv-end
+; RV32I-NEXT: mv a0, s6
+; RV32I-NEXT: mv a1, s4
+; RV32I-NEXT: mv a2, s9
+; RV32I-NEXT: mv a3, ra
+; RV32I-NEXT: call __muldi3
+; RV32I-NEXT: mv s8, a0
+; RV32I-NEXT: mv s9, a1
+; RV32I-NEXT: mv a0, s7
+; RV32I-NEXT: mv a1, s5
+; RV32I-NEXT: mv a2, s11
+; RV32I-NEXT: mv a3, s10
+; RV32I-NEXT: call __muldi3
+; RV32I-NEXT: add a1, a1, s9
+; RV32I-NEXT: add s9, a0, s8
+; RV32I-NEXT: sltu s8, s9, a0
+; RV32I-NEXT: add s8, a1, s8
+; RV32I-NEXT: mv a0, s11
+; RV32I-NEXT: li a1, 0
+; RV32I-NEXT: mv a2, s6
+; RV32I-NEXT: li a3, 0
+; RV32I-NEXT: call __muldi3
+; RV32I-NEXT: mv s5, a0
+; RV32I-NEXT: mv s7, a1
+; RV32I-NEXT: mv a0, s10
+; RV32I-NEXT: li a1, 0
+; RV32I-NEXT: mv a2, s6
+; RV32I-NEXT: li a3, 0
+; RV32I-NEXT: call __muldi3
+; RV32I-NEXT: add s0, a0, s7
+; RV32I-NEXT: sltu a0, s0, a0
+; RV32I-NEXT: add s1, a1, a0
+; RV32I-NEXT: mv a0, s11
+; RV32I-NEXT: li a1, 0
+; RV32I-NEXT: mv a2, s4
+; RV32I-NEXT: li a3, 0
+; RV32I-NEXT: call __muldi3
+; RV32I-NEXT: add s0, a0, s0
+; RV32I-NEXT: sltu a0, s0, a0
+; RV32I-NEXT: add a0, a1, a0
+; RV32I-NEXT: add s2, s1, a0
+; RV32I-NEXT: mv a0, s10
+; RV32I-NEXT: li a1, 0
+; RV32I-NEXT: mv a2, s4
+; RV32I-NEXT: li a3, 0
+; RV32I-NEXT: call __muldi3
+; RV32I-NEXT: add a3, a0, s2
+; RV32I-NEXT: sltu a4, s2, s1
+; RV32I-NEXT: lw a5, 40(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sltu a2, a5, s5
+; RV32I-NEXT: sltu a0, a3, a0
+; RV32I-NEXT: add a1, a1, a4
+; RV32I-NEXT: add s9, a3, s9
+; RV32I-NEXT: add a0, a1, a0
+; RV32I-NEXT: sltu a1, s9, a3
+; RV32I-NEXT: lw a6, 32(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sltu a3, a6, s9
+; RV32I-NEXT: add a0, a0, s8
+; RV32I-NEXT: lw a4, 36(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sub a1, a4, a1
+; RV32I-NEXT: sub a0, a1, a0
+; RV32I-NEXT: sub a0, a0, a3
+; RV32I-NEXT: sub a1, a6, s9
+; RV32I-NEXT: mv a3, a2
+; RV32I-NEXT: beq s3, s0, .LBB1_64
+; RV32I-NEXT: # %bb.63: # %udiv-end
+; RV32I-NEXT: sltu a3, s3, s0
+; RV32I-NEXT: .LBB1_64: # %udiv-end
+; RV32I-NEXT: sltu a4, a1, a3
+; RV32I-NEXT: sub a1, a1, a3
+; RV32I-NEXT: sub a3, s3, s0
+; RV32I-NEXT: sub a5, a5, s5
+; RV32I-NEXT: lw a6, 20(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw a7, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sw a7, 0(a6)
+; RV32I-NEXT: lw a7, 16(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sw a7, 4(a6)
+; RV32I-NEXT: lw a7, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sw a7, 8(a6)
+; RV32I-NEXT: lw a7, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sw a7, 12(a6)
+; RV32I-NEXT: sub a0, a0, a4
+; RV32I-NEXT: sub a3, a3, a2
+; RV32I-NEXT: lw a2, 24(sp) # 4-byte Folded Reload
+; RV32I-NEXT: sw a5, 0(a2)
+; RV32I-NEXT: sw a3, 4(a2)
+; RV32I-NEXT: sw a1, 8(a2)
+; RV32I-NEXT: sw a0, 12(a2)
+; RV32I-NEXT: lw ra, 236(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s0, 232(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s1, 228(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s2, 224(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s3, 220(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s4, 216(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s5, 212(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s6, 208(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s7, 204(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s8, 200(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s9, 196(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s10, 192(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s11, 188(sp) # 4-byte Folded Reload
+; RV32I-NEXT: .cfi_restore ra
+; RV32I-NEXT: .cfi_restore s0
+; RV32I-NEXT: .cfi_restore s1
+; RV32I-NEXT: .cfi_restore s2
+; RV32I-NEXT: .cfi_restore s3
+; RV32I-NEXT: .cfi_restore s4
+; RV32I-NEXT: .cfi_restore s5
+; RV32I-NEXT: .cfi_restore s6
+; RV32I-NEXT: .cfi_restore s7
+; RV32I-NEXT: .cfi_restore s8
+; RV32I-NEXT: .cfi_restore s9
+; RV32I-NEXT: .cfi_restore s10
+; RV32I-NEXT: .cfi_restore s11
+; RV32I-NEXT: addi sp, sp, 240
+; RV32I-NEXT: .cfi_def_cfa_offset 0
+; RV32I-NEXT: ret
+;
+; RV32M-LABEL: udivrem_i128:
+; RV32M: # %bb.0: # %_udiv-special-cases_udiv-special-cases
+; RV32M-NEXT: addi sp, sp, -256
+; RV32M-NEXT: .cfi_def_cfa_offset 256
+; RV32M-NEXT: sw ra, 252(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw s0, 248(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw s1, 244(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw s2, 240(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw s3, 236(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw s4, 232(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw s5, 228(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw s6, 224(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw s7, 220(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw s8, 216(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw s9, 212(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw s10, 208(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw s11, 204(sp) # 4-byte Folded Spill
+; RV32M-NEXT: .cfi_offset ra, -4
+; RV32M-NEXT: .cfi_offset s0, -8
+; RV32M-NEXT: .cfi_offset s1, -12
+; RV32M-NEXT: .cfi_offset s2, -16
+; RV32M-NEXT: .cfi_offset s3, -20
+; RV32M-NEXT: .cfi_offset s4, -24
+; RV32M-NEXT: .cfi_offset s5, -28
+; RV32M-NEXT: .cfi_offset s6, -32
+; RV32M-NEXT: .cfi_offset s7, -36
+; RV32M-NEXT: .cfi_offset s8, -40
+; RV32M-NEXT: .cfi_offset s9, -44
+; RV32M-NEXT: .cfi_offset s10, -48
+; RV32M-NEXT: .cfi_offset s11, -52
+; RV32M-NEXT: sw a1, 32(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
+; RV32M-NEXT: lw a6, 0(a3)
+; RV32M-NEXT: lw s11, 4(a3)
+; RV32M-NEXT: lw ra, 8(a3)
+; RV32M-NEXT: lw s10, 12(a3)
+; RV32M-NEXT: lui a0, 349525
+; RV32M-NEXT: lui a1, 209715
+; RV32M-NEXT: addi t5, a0, 1365
+; RV32M-NEXT: addi t4, a1, 819
+; RV32M-NEXT: lui t2, 61681
+; RV32M-NEXT: addi t2, t2, -241
+; RV32M-NEXT: lui t3, 4112
+; RV32M-NEXT: addi t3, t3, 257
+; RV32M-NEXT: bnez s11, .LBB1_2
+; RV32M-NEXT: # %bb.1: # %_udiv-special-cases_udiv-special-cases
+; RV32M-NEXT: srli a0, a6, 1
+; RV32M-NEXT: or a0, a6, a0
+; RV32M-NEXT: srli a1, a0, 2
+; RV32M-NEXT: or a0, a0, a1
+; RV32M-NEXT: srli a1, a0, 4
+; RV32M-NEXT: or a0, a0, a1
+; RV32M-NEXT: srli a1, a0, 8
+; RV32M-NEXT: or a0, a0, a1
+; RV32M-NEXT: srli a1, a0, 16
+; RV32M-NEXT: or a0, a0, a1
+; RV32M-NEXT: not a0, a0
+; RV32M-NEXT: srli a1, a0, 1
+; RV32M-NEXT: and a1, a1, t5
+; RV32M-NEXT: sub a0, a0, a1
+; RV32M-NEXT: and a1, a0, t4
+; RV32M-NEXT: srli a0, a0, 2
+; RV32M-NEXT: and a0, a0, t4
+; RV32M-NEXT: add a0, a1, a0
+; RV32M-NEXT: srli a1, a0, 4
+; RV32M-NEXT: add a0, a0, a1
+; RV32M-NEXT: and a0, a0, t2
+; RV32M-NEXT: mul a0, a0, t3
+; RV32M-NEXT: srli a0, a0, 24
+; RV32M-NEXT: addi a1, a0, 32
+; RV32M-NEXT: j .LBB1_3
+; RV32M-NEXT: .LBB1_2:
+; RV32M-NEXT: srli a0, s11, 1
+; RV32M-NEXT: or a0, s11, a0
+; RV32M-NEXT: srli a1, a0, 2
+; RV32M-NEXT: or a0, a0, a1
+; RV32M-NEXT: srli a1, a0, 4
+; RV32M-NEXT: or a0, a0, a1
+; RV32M-NEXT: srli a1, a0, 8
+; RV32M-NEXT: or a0, a0, a1
+; RV32M-NEXT: srli a1, a0, 16
+; RV32M-NEXT: or a0, a0, a1
+; RV32M-NEXT: not a0, a0
+; RV32M-NEXT: srli a1, a0, 1
+; RV32M-NEXT: and a1, a1, t5
+; RV32M-NEXT: sub a0, a0, a1
+; RV32M-NEXT: and a1, a0, t4
+; RV32M-NEXT: srli a0, a0, 2
+; RV32M-NEXT: and a0, a0, t4
+; RV32M-NEXT: add a0, a1, a0
+; RV32M-NEXT: srli a1, a0, 4
+; RV32M-NEXT: add a0, a0, a1
+; RV32M-NEXT: and a0, a0, t2
+; RV32M-NEXT: mul a1, a0, t3
+; RV32M-NEXT: srli a1, a1, 24
+; RV32M-NEXT: .LBB1_3: # %_udiv-special-cases_udiv-special-cases
+; RV32M-NEXT: lw s8, 4(a2)
+; RV32M-NEXT: or t6, ra, s10
+; RV32M-NEXT: bnez s10, .LBB1_5
+; RV32M-NEXT: # %bb.4: # %_udiv-special-cases_udiv-special-cases
+; RV32M-NEXT: srli a0, ra, 1
+; RV32M-NEXT: or a0, ra, a0
+; RV32M-NEXT: srli a7, a0, 2
+; RV32M-NEXT: or a0, a0, a7
+; RV32M-NEXT: srli a7, a0, 4
+; RV32M-NEXT: or a0, a0, a7
+; RV32M-NEXT: srli a7, a0, 8
+; RV32M-NEXT: or a0, a0, a7
+; RV32M-NEXT: srli a7, a0, 16
+; RV32M-NEXT: or a0, a0, a7
+; RV32M-NEXT: not a0, a0
+; RV32M-NEXT: srli a7, a0, 1
+; RV32M-NEXT: and a7, a7, t5
+; RV32M-NEXT: sub a0, a0, a7
+; RV32M-NEXT: and a7, a0, t4
+; RV32M-NEXT: srli a0, a0, 2
+; RV32M-NEXT: and a0, a0, t4
+; RV32M-NEXT: add a0, a7, a0
+; RV32M-NEXT: srli a7, a0, 4
+; RV32M-NEXT: add a0, a0, a7
+; RV32M-NEXT: and a0, a0, t2
+; RV32M-NEXT: mul a0, a0, t3
+; RV32M-NEXT: srli a0, a0, 24
+; RV32M-NEXT: addi a0, a0, 32
+; RV32M-NEXT: j .LBB1_6
+; RV32M-NEXT: .LBB1_5:
+; RV32M-NEXT: srli a0, s10, 1
+; RV32M-NEXT: or a0, s10, a0
+; RV32M-NEXT: srli a7, a0, 2
+; RV32M-NEXT: or a0, a0, a7
+; RV32M-NEXT: srli a7, a0, 4
+; RV32M-NEXT: or a0, a0, a7
+; RV32M-NEXT: srli a7, a0, 8
+; RV32M-NEXT: or a0, a0, a7
+; RV32M-NEXT: srli a7, a0, 16
+; RV32M-NEXT: or a0, a0, a7
+; RV32M-NEXT: not a0, a0
+; RV32M-NEXT: srli a7, a0, 1
+; RV32M-NEXT: and a7, a7, t5
+; RV32M-NEXT: sub a0, a0, a7
+; RV32M-NEXT: and a7, a0, t4
+; RV32M-NEXT: srli a0, a0, 2
+; RV32M-NEXT: and a0, a0, t4
+; RV32M-NEXT: add a0, a7, a0
+; RV32M-NEXT: srli a7, a0, 4
+; RV32M-NEXT: add a0, a0, a7
+; RV32M-NEXT: and a0, a0, t2
+; RV32M-NEXT: mul a0, a0, t3
+; RV32M-NEXT: srli a0, a0, 24
+; RV32M-NEXT: .LBB1_6: # %_udiv-special-cases_udiv-special-cases
+; RV32M-NEXT: lw s7, 12(a2)
+; RV32M-NEXT: addi t0, a1, 64
+; RV32M-NEXT: bnez t6, .LBB1_8
+; RV32M-NEXT: # %bb.7: # %_udiv-special-cases_udiv-special-cases
+; RV32M-NEXT: mv a0, t0
+; RV32M-NEXT: .LBB1_8: # %_udiv-special-cases_udiv-special-cases
+; RV32M-NEXT: lw s9, 0(a2)
+; RV32M-NEXT: lw s6, 8(a2)
+; RV32M-NEXT: snez s3, t6
+; RV32M-NEXT: bnez s8, .LBB1_10
+; RV32M-NEXT: # %bb.9: # %_udiv-special-cases_udiv-special-cases
+; RV32M-NEXT: srli a2, s9, 1
+; RV32M-NEXT: or a2, s9, a2
+; RV32M-NEXT: srli a7, a2, 2
+; RV32M-NEXT: or a2, a2, a7
+; RV32M-NEXT: srli a7, a2, 4
+; RV32M-NEXT: or a2, a2, a7
+; RV32M-NEXT: srli a7, a2, 8
+; RV32M-NEXT: or a2, a2, a7
+; RV32M-NEXT: srli a7, a2, 16
+; RV32M-NEXT: or a2, a2, a7
+; RV32M-NEXT: not a2, a2
+; RV32M-NEXT: srli a7, a2, 1
+; RV32M-NEXT: and a7, a7, t5
+; RV32M-NEXT: sub a2, a2, a7
+; RV32M-NEXT: and a7, a2, t4
+; RV32M-NEXT: srli a2, a2, 2
+; RV32M-NEXT: and a2, a2, t4
+; RV32M-NEXT: add a2, a7, a2
+; RV32M-NEXT: srli a7, a2, 4
+; RV32M-NEXT: add a2, a2, a7
+; RV32M-NEXT: and a2, a2, t2
+; RV32M-NEXT: mul a2, a2, t3
+; RV32M-NEXT: srli a2, a2, 24
+; RV32M-NEXT: addi a2, a2, 32
+; RV32M-NEXT: j .LBB1_11
+; RV32M-NEXT: .LBB1_10:
+; RV32M-NEXT: srli a2, s8, 1
+; RV32M-NEXT: or a2, s8, a2
+; RV32M-NEXT: srli a7, a2, 2
+; RV32M-NEXT: or a2, a2, a7
+; RV32M-NEXT: srli a7, a2, 4
+; RV32M-NEXT: or a2, a2, a7
+; RV32M-NEXT: srli a7, a2, 8
+; RV32M-NEXT: or a2, a2, a7
+; RV32M-NEXT: srli a7, a2, 16
+; RV32M-NEXT: or a2, a2, a7
+; RV32M-NEXT: not a2, a2
+; RV32M-NEXT: srli a7, a2, 1
+; RV32M-NEXT: and a7, a7, t5
+; RV32M-NEXT: sub a2, a2, a7
+; RV32M-NEXT: and a7, a2, t4
+; RV32M-NEXT: srli a2, a2, 2
+; RV32M-NEXT: and a2, a2, t4
+; RV32M-NEXT: add a2, a7, a2
+; RV32M-NEXT: srli a7, a2, 4
+; RV32M-NEXT: add a2, a2, a7
+; RV32M-NEXT: and a2, a2, t2
+; RV32M-NEXT: mul a2, a2, t3
+; RV32M-NEXT: srli a2, a2, 24
+; RV32M-NEXT: .LBB1_11: # %_udiv-special-cases_udiv-special-cases
+; RV32M-NEXT: or t6, s11, s10
+; RV32M-NEXT: or s0, a6, ra
+; RV32M-NEXT: or s1, s8, s7
+; RV32M-NEXT: or s2, s9, s6
+; RV32M-NEXT: sltu t0, t0, a1
+; RV32M-NEXT: addi s3, s3, -1
+; RV32M-NEXT: addi a1, a2, 64
+; RV32M-NEXT: or s4, s6, s7
+; RV32M-NEXT: sltu a7, a1, a2
+; RV32M-NEXT: snez a2, s4
+; RV32M-NEXT: addi a2, a2, -1
+; RV32M-NEXT: bnez s7, .LBB1_13
+; RV32M-NEXT: # %bb.12: # %_udiv-special-cases_udiv-special-cases
+; RV32M-NEXT: srli t1, s6, 1
+; RV32M-NEXT: or t1, s6, t1
+; RV32M-NEXT: srli s5, t1, 2
+; RV32M-NEXT: or t1, t1, s5
+; RV32M-NEXT: srli s5, t1, 4
+; RV32M-NEXT: or t1, t1, s5
+; RV32M-NEXT: srli s5, t1, 8
+; RV32M-NEXT: or t1, t1, s5
+; RV32M-NEXT: srli s5, t1, 16
+; RV32M-NEXT: or t1, t1, s5
+; RV32M-NEXT: not t1, t1
+; RV32M-NEXT: srli s5, t1, 1
+; RV32M-NEXT: and t5, s5, t5
+; RV32M-NEXT: sub t1, t1, t5
+; RV32M-NEXT: and t5, t1, t4
+; RV32M-NEXT: srli t1, t1, 2
+; RV32M-NEXT: and t1, t1, t4
+; RV32M-NEXT: add t1, t5, t1
+; RV32M-NEXT: srli t4, t1, 4
+; RV32M-NEXT: add t1, t1, t4
+; RV32M-NEXT: and t1, t1, t2
+; RV32M-NEXT: mul t1, t1, t3
+; RV32M-NEXT: srli t1, t1, 24
+; RV32M-NEXT: addi t1, t1, 32
+; RV32M-NEXT: j .LBB1_14
+; RV32M-NEXT: .LBB1_13:
+; RV32M-NEXT: srli t1, s7, 1
+; RV32M-NEXT: or t1, s7, t1
+; RV32M-NEXT: srli s5, t1, 2
+; RV32M-NEXT: or t1, t1, s5
+; RV32M-NEXT: srli s5, t1, 4
+; RV32M-NEXT: or t1, t1, s5
+; RV32M-NEXT: srli s5, t1, 8
+; RV32M-NEXT: or t1, t1, s5
+; RV32M-NEXT: srli s5, t1, 16
+; RV32M-NEXT: or t1, t1, s5
+; RV32M-NEXT: not t1, t1
+; RV32M-NEXT: srli s5, t1, 1
+; RV32M-NEXT: and t5, s5, t5
+; RV32M-NEXT: sub t1, t1, t5
+; RV32M-NEXT: and t5, t1, t4
+; RV32M-NEXT: srli t1, t1, 2
+; RV32M-NEXT: and t1, t1, t4
+; RV32M-NEXT: add t1, t5, t1
+; RV32M-NEXT: srli t4, t1, 4
+; RV32M-NEXT: add t1, t1, t4
+; RV32M-NEXT: and t1, t1, t2
+; RV32M-NEXT: mul t1, t1, t3
+; RV32M-NEXT: srli t1, t1, 24
+; RV32M-NEXT: .LBB1_14: # %_udiv-special-cases_udiv-special-cases
+; RV32M-NEXT: or t2, s0, t6
+; RV32M-NEXT: or t3, s2, s1
+; RV32M-NEXT: and t0, s3, t0
+; RV32M-NEXT: and a2, a2, a7
+; RV32M-NEXT: bnez s4, .LBB1_16
+; RV32M-NEXT: # %bb.15: # %_udiv-special-cases_udiv-special-cases
+; RV32M-NEXT: mv t1, a1
+; RV32M-NEXT: .LBB1_16: # %_udiv-special-cases_udiv-special-cases
+; RV32M-NEXT: seqz a1, t2
+; RV32M-NEXT: seqz a7, t3
+; RV32M-NEXT: sltu t2, a0, t1
+; RV32M-NEXT: sub t3, t0, a2
+; RV32M-NEXT: mv t4, t2
+; RV32M-NEXT: beq t0, a2, .LBB1_18
+; RV32M-NEXT: # %bb.17: # %_udiv-special-cases_udiv-special-cases
+; RV32M-NEXT: sltu t4, t0, a2
+; RV32M-NEXT: .LBB1_18: # %_udiv-special-cases_udiv-special-cases
+; RV32M-NEXT: sub t3, t3, t2
+; RV32M-NEXT: or a2, a1, a7
+; RV32M-NEXT: neg a1, t4
+; RV32M-NEXT: seqz t4, t4
+; RV32M-NEXT: addi t4, t4, -1
+; RV32M-NEXT: or a7, a1, t4
+; RV32M-NEXT: sub a0, a0, t1
+; RV32M-NEXT: beqz a7, .LBB1_20
+; RV32M-NEXT: # %bb.19: # %_udiv-special-cases_udiv-special-cases
+; RV32M-NEXT: snez a7, a7
+; RV32M-NEXT: j .LBB1_21
+; RV32M-NEXT: .LBB1_20:
+; RV32M-NEXT: snez a7, t3
+; RV32M-NEXT: sltiu t0, a0, 128
+; RV32M-NEXT: xori t0, t0, 1
+; RV32M-NEXT: or a7, t0, a7
+; RV32M-NEXT: .LBB1_21: # %_udiv-special-cases_udiv-special-cases
+; RV32M-NEXT: li s5, 127
+; RV32M-NEXT: or a2, a2, a7
+; RV32M-NEXT: addi a7, a2, -1
+; RV32M-NEXT: and a4, a7, s7
+; RV32M-NEXT: and a5, a7, s6
+; RV32M-NEXT: and a3, a7, s8
+; RV32M-NEXT: and t0, a7, s9
+; RV32M-NEXT: sw s10, 68(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw ra, 64(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw s11, 60(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw s8, 48(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw s9, 44(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw s6, 40(sp) # 4-byte Folded Spill
+; RV32M-NEXT: sw s7, 36(sp) # 4-byte Folded Spill
+; RV32M-NEXT: bnez a2, .LBB1_33
+; RV32M-NEXT: # %bb.22: # %_udiv-special-cases_udiv-special-cases
+; RV32M-NEXT: xori a2, a0, 127
+; RV32M-NEXT: or a2, a2, a1
+; RV32M-NEXT: or a7, t3, t4
+; RV32M-NEXT: or a2, a2, a7
+; RV32M-NEXT: beqz a2, .LBB1_33
+; RV32M-NEXT: # %bb.23: # %udiv-bb15
+; RV32M-NEXT: addi t2, a0, 1
+; RV32M-NEXT: sub a0, s5, a0
+; RV32M-NEXT: sw zero, 168(sp)
+; RV32M-NEXT: sw zero, 172(sp)
+; RV32M-NEXT: sw zero, 176(sp)
+; RV32M-NEXT: sw zero, 180(sp)
+; RV32M-NEXT: sw s9, 184(sp)
+; RV32M-NEXT: sw s8, 188(sp)
+; RV32M-NEXT: sw s6, 192(sp)
+; RV32M-NEXT: sw s7, 196(sp)
+; RV32M-NEXT: addi a2, sp, 184
+; RV32M-NEXT: seqz a3, t2
+; RV32M-NEXT: srli a4, a0, 3
+; RV32M-NEXT: andi a5, a0, 31
+; RV32M-NEXT: add t3, t3, a3
+; RV32M-NEXT: andi a4, a4, 12
+; RV32M-NEXT: xori a3, a5, 31
+; RV32M-NEXT: or a5, t2, t3
+; RV32M-NEXT: sub a2, a2, a4
+; RV32M-NEXT: seqz s1, a5
+; RV32M-NEXT: lw a4, 0(a2)
+; RV32M-NEXT: lw a5, 4(a2)
+; RV32M-NEXT: lw a7, 8(a2)
+; RV32M-NEXT: lw a2, 12(a2)
+; RV32M-NEXT: add s1, a1, s1
+; RV32M-NEXT: sltu t5, s1, a1
+; RV32M-NEXT: or a1, t2, s1
+; RV32M-NEXT: add t5, t4, t5
+; RV32M-NEXT: or t0, t3, t5
+; RV32M-NEXT: srli t1, a7, 1
+; RV32M-NEXT: srli t4, a5, 1
+; RV32M-NEXT: or a1, a1, t0
+; RV32M-NEXT: srli t0, a4, 1
+; RV32M-NEXT: srl t1, t1, a3
+; RV32M-NEXT: srl t4, t4, a3
+; RV32M-NEXT: srl a3, t0, a3
+; RV32M-NEXT: sll a2, a2, a0
+; RV32M-NEXT: or s4, a2, t1
+; RV32M-NEXT: sll a2, a7, a0
+; RV32M-NEXT: sll a5, a5, a0
+; RV32M-NEXT: or s2, a2, t4
+; RV32M-NEXT: or s3, a5, a3
+; RV32M-NEXT: sll t4, a4, a0
+; RV32M-NEXT: beqz a1, .LBB1_31
+; RV32M-NEXT: # %bb.24: # %udiv-preheader4
+; RV32M-NEXT: mv a0, s6
+; RV32M-NEXT: mv a2, s7
+; RV32M-NEXT: li s0, 0
+; RV32M-NEXT: li s5, 0
+; RV32M-NEXT: li s6, 0
+; RV32M-NEXT: li s7, 0
+; RV32M-NEXT: sw zero, 152(sp)
+; RV32M-NEXT: sw zero, 156(sp)
+; RV32M-NEXT: sw zero, 160(sp)
+; RV32M-NEXT: sw zero, 164(sp)
+; RV32M-NEXT: sw s9, 136(sp)
+; RV32M-NEXT: sw s8, 140(sp)
+; RV32M-NEXT: sw a0, 144(sp)
+; RV32M-NEXT: sw a2, 148(sp)
+; RV32M-NEXT: srli a0, t2, 3
+; RV32M-NEXT: addi a1, sp, 136
+; RV32M-NEXT: andi a0, a0, 12
+; RV32M-NEXT: add a0, a1, a0
+; RV32M-NEXT: lw a1, 4(a0)
+; RV32M-NEXT: lw a2, 8(a0)
+; RV32M-NEXT: lw a3, 12(a0)
+; RV32M-NEXT: lw a4, 0(a0)
+; RV32M-NEXT: andi a0, t2, 31
+; RV32M-NEXT: xori a0, a0, 31
+; RV32M-NEXT: slli a5, a3, 1
+; RV32M-NEXT: slli a7, a2, 1
+; RV32M-NEXT: slli t0, a1, 1
+; RV32M-NEXT: sll a5, a5, a0
+; RV32M-NEXT: sll a7, a7, a0
+; RV32M-NEXT: sll t0, t0, a0
+; RV32M-NEXT: seqz t1, a6
+; RV32M-NEXT: srl a0, a2, t2
+; RV32M-NEXT: or a0, a0, a5
+; RV32M-NEXT: or a2, a6, s11
+; RV32M-NEXT: sub s8, s11, t1
+; RV32M-NEXT: seqz a2, a2
+; RV32M-NEXT: srl a1, a1, t2
+; RV32M-NEXT: or s11, a1, a7
+; RV32M-NEXT: sub s9, ra, a2
+; RV32M-NEXT: sltu a1, ra, a2
+; RV32M-NEXT: sub s10, s10, a1
+; RV32M-NEXT: srl a1, a4, t2
+; RV32M-NEXT: srl t6, a3, t2
+; RV32M-NEXT: or t0, a1, t0
+; RV32M-NEXT: j .LBB1_26
+; RV32M-NEXT: .LBB1_25: # %udiv-do-while3
+; RV32M-NEXT: # in Loop: Header=BB1_26 Depth=1
+; RV32M-NEXT: srli a4, s2, 31
+; RV32M-NEXT: slli s4, s4, 1
+; RV32M-NEXT: sub a1, a7, a1
+; RV32M-NEXT: srli a7, s3, 31
+; RV32M-NEXT: slli s2, s2, 1
+; RV32M-NEXT: or a4, s4, a4
+; RV32M-NEXT: srli s4, t4, 31
+; RV32M-NEXT: slli s3, s3, 1
+; RV32M-NEXT: slli t4, t4, 1
+; RV32M-NEXT: or a7, s2, a7
+; RV32M-NEXT: lw a3, 68(sp) # 4-byte Folded Reload
+; RV32M-NEXT: and s2, s11, a3
+; RV32M-NEXT: or s3, s3, s4
+; RV32M-NEXT: lw a3, 64(sp) # 4-byte Folded Reload
+; RV32M-NEXT: and s4, s11, a3
+; RV32M-NEXT: or t4, s0, t4
+; RV32M-NEXT: sub a3, t1, s4
+; RV32M-NEXT: sltu t1, t1, s4
+; RV32M-NEXT: or s4, t2, t3
+; RV32M-NEXT: sub a5, a0, s2
+; RV32M-NEXT: seqz a0, t2
+; RV32M-NEXT: addi t2, t2, -1
+; RV32M-NEXT: andi s0, s11, 1
+; RV32M-NEXT: sub s11, a1, a2
+; RV32M-NEXT: seqz a1, s4
+; RV32M-NEXT: sub t3, t3, a0
+; RV32M-NEXT: or s3, s5, s3
+; RV32M-NEXT: or s2, s6, a7
+; RV32M-NEXT: or s4, s7, a4
+; RV32M-NEXT: sub a0, a3, t6
+; RV32M-NEXT: sltu a2, a3, t6
+; RV32M-NEXT: sub a3, a5, t1
+; RV32M-NEXT: sltu a4, s1, a1
+; RV32M-NEXT: sub s1, s1, a1
+; RV32M-NEXT: sub t6, a3, a2
+; RV32M-NEXT: sub t5, t5, a4
+; RV32M-NEXT: or a1, t3, t5
+; RV32M-NEXT: or a2, t2, s1
+; RV32M-NEXT: or a1, a2, a1
+; RV32M-NEXT: sub t0, ra, t0
+; RV32M-NEXT: li s5, 0
+; RV32M-NEXT: li s6, 0
+; RV32M-NEXT: li s7, 0
+; RV32M-NEXT: beqz a1, .LBB1_32
+; RV32M-NEXT: .LBB1_26: # %udiv-do-while3
+; RV32M-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32M-NEXT: srli a1, t0, 31
+; RV32M-NEXT: slli a2, s11, 1
+; RV32M-NEXT: slli t0, t0, 1
+; RV32M-NEXT: or a7, a2, a1
+; RV32M-NEXT: srli a1, s4, 31
+; RV32M-NEXT: or ra, t0, a1
+; RV32M-NEXT: beq s8, a7, .LBB1_28
+; RV32M-NEXT: # %bb.27: # %udiv-do-while3
+; RV32M-NEXT: # in Loop: Header=BB1_26 Depth=1
+; RV32M-NEXT: sltu a1, s8, a7
+; RV32M-NEXT: j .LBB1_29
+; RV32M-NEXT: .LBB1_28: # in Loop: Header=BB1_26 Depth=1
+; RV32M-NEXT: addi a1, a6, -1
+; RV32M-NEXT: sltu a1, a1, ra
+; RV32M-NEXT: .LBB1_29: # %udiv-do-while3
+; RV32M-NEXT: # in Loop: Header=BB1_26 Depth=1
+; RV32M-NEXT: srli a2, a0, 31
+; RV32M-NEXT: slli t6, t6, 1
+; RV32M-NEXT: srli t0, s11, 31
+; RV32M-NEXT: slli t1, a0, 1
+; RV32M-NEXT: or a0, t6, a2
+; RV32M-NEXT: or t1, t1, t0
+; RV32M-NEXT: sub a2, s9, t1
+; RV32M-NEXT: sltu t0, s9, t1
+; RV32M-NEXT: sub t6, s10, a0
+; RV32M-NEXT: sltu a1, a2, a1
+; RV32M-NEXT: sub a2, t6, t0
+; RV32M-NEXT: sub a2, a2, a1
+; RV32M-NEXT: srai s11, a2, 31
+; RV32M-NEXT: and t0, s11, a6
+; RV32M-NEXT: lw a1, 60(sp) # 4-byte Folded Reload
+; RV32M-NEXT: and a1, s11, a1
+; RV32M-NEXT: sltu a2, ra, t0
+; RV32M-NEXT: mv t6, a2
+; RV32M-NEXT: beq a7, a1, .LBB1_25
+; RV32M-NEXT: # %bb.30: # %udiv-do-while3
+; RV32M-NEXT: # in Loop: Header=BB1_26 Depth=1
+; RV32M-NEXT: sltu t6, a7, a1
+; RV32M-NEXT: j .LBB1_25
+; RV32M-NEXT: .LBB1_31:
+; RV32M-NEXT: li s0, 0
+; RV32M-NEXT: .LBB1_32: # %udiv-loop-exit2
+; RV32M-NEXT: srli a0, t4, 31
+; RV32M-NEXT: slli a1, s3, 1
+; RV32M-NEXT: srli a2, s3, 31
+; RV32M-NEXT: or a3, a1, a0
+; RV32M-NEXT: slli a0, s2, 1
+; RV32M-NEXT: srli a1, s2, 31
+; RV32M-NEXT: slli s4, s4, 1
+; RV32M-NEXT: slli t4, t4, 1
+; RV32M-NEXT: or a5, a0, a2
+; RV32M-NEXT: or a4, s4, a1
+; RV32M-NEXT: or t0, s0, t4
+; RV32M-NEXT: lw s10, 68(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw ra, 64(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw s11, 60(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw s8, 48(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw s9, 44(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw s6, 40(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw s7, 36(sp) # 4-byte Folded Reload
+; RV32M-NEXT: li s5, 127
+; RV32M-NEXT: .LBB1_33: # %udiv-end1
+; RV32M-NEXT: sw a3, 24(sp) # 4-byte Folded Spill
+; RV32M-NEXT: lui a0, 349525
+; RV32M-NEXT: lui a1, 209715
+; RV32M-NEXT: lui a2, 61681
+; RV32M-NEXT: lui a3, 4112
+; RV32M-NEXT: addi s3, a0, 1365
+; RV32M-NEXT: addi s2, a1, 819
+; RV32M-NEXT: addi s1, a2, -241
+; RV32M-NEXT: addi s0, a3, 257
+; RV32M-NEXT: bnez s11, .LBB1_36
+; RV32M-NEXT: # %bb.34: # %udiv-end1
+; RV32M-NEXT: srli a0, a6, 1
+; RV32M-NEXT: or a0, a6, a0
+; RV32M-NEXT: srli a1, a0, 2
+; RV32M-NEXT: or a0, a0, a1
+; RV32M-NEXT: srli a1, a0, 4
+; RV32M-NEXT: or a0, a0, a1
+; RV32M-NEXT: srli a1, a0, 8
+; RV32M-NEXT: or a0, a0, a1
+; RV32M-NEXT: srli a1, a0, 16
+; RV32M-NEXT: or a0, a0, a1
+; RV32M-NEXT: not a0, a0
+; RV32M-NEXT: srli a1, a0, 1
+; RV32M-NEXT: and a1, a1, s3
+; RV32M-NEXT: sub a0, a0, a1
+; RV32M-NEXT: and a1, a0, s2
+; RV32M-NEXT: srli a0, a0, 2
+; RV32M-NEXT: and a0, a0, s2
+; RV32M-NEXT: add a0, a1, a0
+; RV32M-NEXT: srli a1, a0, 4
+; RV32M-NEXT: add a0, a0, a1
+; RV32M-NEXT: and a0, a0, s1
+; RV32M-NEXT: mul a0, a0, s0
+; RV32M-NEXT: srli a0, a0, 24
+; RV32M-NEXT: addi t2, a0, 32
+; RV32M-NEXT: or a7, ra, s10
+; RV32M-NEXT: beqz s10, .LBB1_37
+; RV32M-NEXT: .LBB1_35:
+; RV32M-NEXT: srli a0, s10, 1
+; RV32M-NEXT: or a0, s10, a0
+; RV32M-NEXT: srli a1, a0, 2
+; RV32M-NEXT: or a0, a0, a1
+; RV32M-NEXT: srli a1, a0, 4
+; RV32M-NEXT: or a0, a0, a1
+; RV32M-NEXT: srli a1, a0, 8
+; RV32M-NEXT: or a0, a0, a1
+; RV32M-NEXT: srli a1, a0, 16
+; RV32M-NEXT: or a0, a0, a1
+; RV32M-NEXT: not a0, a0
+; RV32M-NEXT: srli a1, a0, 1
+; RV32M-NEXT: and a1, a1, s3
+; RV32M-NEXT: sub a0, a0, a1
+; RV32M-NEXT: and a1, a0, s2
+; RV32M-NEXT: srli a0, a0, 2
+; RV32M-NEXT: and a0, a0, s2
+; RV32M-NEXT: add a0, a1, a0
+; RV32M-NEXT: srli a1, a0, 4
+; RV32M-NEXT: add a0, a0, a1
+; RV32M-NEXT: and a0, a0, s1
+; RV32M-NEXT: mul a1, a0, s0
+; RV32M-NEXT: srli a1, a1, 24
+; RV32M-NEXT: addi a0, t2, 64
+; RV32M-NEXT: beqz a7, .LBB1_38
+; RV32M-NEXT: j .LBB1_39
+; RV32M-NEXT: .LBB1_36:
+; RV32M-NEXT: srli a0, s11, 1
+; RV32M-NEXT: or a0, s11, a0
+; RV32M-NEXT: srli a1, a0, 2
+; RV32M-NEXT: or a0, a0, a1
+; RV32M-NEXT: srli a1, a0, 4
+; RV32M-NEXT: or a0, a0, a1
+; RV32M-NEXT: srli a1, a0, 8
+; RV32M-NEXT: or a0, a0, a1
+; RV32M-NEXT: srli a1, a0, 16
+; RV32M-NEXT: or a0, a0, a1
+; RV32M-NEXT: not a0, a0
+; RV32M-NEXT: srli a1, a0, 1
+; RV32M-NEXT: and a1, a1, s3
+; RV32M-NEXT: sub a0, a0, a1
+; RV32M-NEXT: and a1, a0, s2
+; RV32M-NEXT: srli a0, a0, 2
+; RV32M-NEXT: and a0, a0, s2
+; RV32M-NEXT: add a0, a1, a0
+; RV32M-NEXT: srli a1, a0, 4
+; RV32M-NEXT: add a0, a0, a1
+; RV32M-NEXT: and a0, a0, s1
+; RV32M-NEXT: mul a0, a0, s0
+; RV32M-NEXT: srli t2, a0, 24
+; RV32M-NEXT: or a7, ra, s10
+; RV32M-NEXT: bnez s10, .LBB1_35
+; RV32M-NEXT: .LBB1_37: # %udiv-end1
+; RV32M-NEXT: srli a0, ra, 1
+; RV32M-NEXT: or a0, ra, a0
+; RV32M-NEXT: srli a1, a0, 2
+; RV32M-NEXT: or a0, a0, a1
+; RV32M-NEXT: srli a1, a0, 4
+; RV32M-NEXT: or a0, a0, a1
+; RV32M-NEXT: srli a1, a0, 8
+; RV32M-NEXT: or a0, a0, a1
+; RV32M-NEXT: srli a1, a0, 16
+; RV32M-NEXT: or a0, a0, a1
+; RV32M-NEXT: not a0, a0
+; RV32M-NEXT: srli a1, a0, 1
+; RV32M-NEXT: and a1, a1, s3
+; RV32M-NEXT: sub a0, a0, a1
+; RV32M-NEXT: and a1, a0, s2
+; RV32M-NEXT: srli a0, a0, 2
+; RV32M-NEXT: and a0, a0, s2
+; RV32M-NEXT: add a0, a1, a0
+; RV32M-NEXT: srli a1, a0, 4
+; RV32M-NEXT: add a0, a0, a1
+; RV32M-NEXT: and a0, a0, s1
+; RV32M-NEXT: mul a0, a0, s0
+; RV32M-NEXT: srli a0, a0, 24
+; RV32M-NEXT: addi a1, a0, 32
+; RV32M-NEXT: addi a0, t2, 64
+; RV32M-NEXT: bnez a7, .LBB1_39
+; RV32M-NEXT: .LBB1_38: # %udiv-end1
+; RV32M-NEXT: mv a1, a0
+; RV32M-NEXT: .LBB1_39: # %udiv-end1
+; RV32M-NEXT: snez a7, a7
+; RV32M-NEXT: sw t0, 12(sp) # 4-byte Folded Spill
+; RV32M-NEXT: bnez s8, .LBB1_41
+; RV32M-NEXT: # %bb.40: # %udiv-end1
+; RV32M-NEXT: srli a2, s9, 1
+; RV32M-NEXT: or a2, s9, a2
+; RV32M-NEXT: srli a3, a2, 2
+; RV32M-NEXT: or a2, a2, a3
+; RV32M-NEXT: srli a3, a2, 4
+; RV32M-NEXT: or a2, a2, a3
+; RV32M-NEXT: srli a3, a2, 8
+; RV32M-NEXT: or a2, a2, a3
+; RV32M-NEXT: srli a3, a2, 16
+; RV32M-NEXT: or a2, a2, a3
+; RV32M-NEXT: not a2, a2
+; RV32M-NEXT: srli a3, a2, 1
+; RV32M-NEXT: and a3, a3, s3
+; RV32M-NEXT: sub a2, a2, a3
+; RV32M-NEXT: and a3, a2, s2
+; RV32M-NEXT: srli a2, a2, 2
+; RV32M-NEXT: and a2, a2, s2
+; RV32M-NEXT: add a2, a3, a2
+; RV32M-NEXT: srli a3, a2, 4
+; RV32M-NEXT: add a2, a2, a3
+; RV32M-NEXT: and a2, a2, s1
+; RV32M-NEXT: mul a2, a2, s0
+; RV32M-NEXT: srli a2, a2, 24
+; RV32M-NEXT: addi a2, a2, 32
+; RV32M-NEXT: j .LBB1_42
+; RV32M-NEXT: .LBB1_41:
+; RV32M-NEXT: srli a2, s8, 1
+; RV32M-NEXT: or a2, s8, a2
+; RV32M-NEXT: srli a3, a2, 2
+; RV32M-NEXT: or a2, a2, a3
+; RV32M-NEXT: srli a3, a2, 4
+; RV32M-NEXT: or a2, a2, a3
+; RV32M-NEXT: srli a3, a2, 8
+; RV32M-NEXT: or a2, a2, a3
+; RV32M-NEXT: srli a3, a2, 16
+; RV32M-NEXT: or a2, a2, a3
+; RV32M-NEXT: not a2, a2
+; RV32M-NEXT: srli a3, a2, 1
+; RV32M-NEXT: and a3, a3, s3
+; RV32M-NEXT: sub a2, a2, a3
+; RV32M-NEXT: and a3, a2, s2
+; RV32M-NEXT: srli a2, a2, 2
+; RV32M-NEXT: and a2, a2, s2
+; RV32M-NEXT: add a2, a3, a2
+; RV32M-NEXT: srli a3, a2, 4
+; RV32M-NEXT: add a2, a2, a3
+; RV32M-NEXT: and a2, a2, s1
+; RV32M-NEXT: mul a2, a2, s0
+; RV32M-NEXT: srli a2, a2, 24
+; RV32M-NEXT: .LBB1_42: # %udiv-end1
+; RV32M-NEXT: sw a4, 20(sp) # 4-byte Folded Spill
+; RV32M-NEXT: or t0, a6, ra
+; RV32M-NEXT: or t3, s11, s10
+; RV32M-NEXT: or t4, s9, s6
+; RV32M-NEXT: or t5, s8, s7
+; RV32M-NEXT: sltu t2, a0, t2
+; RV32M-NEXT: addi a7, a7, -1
+; RV32M-NEXT: addi a0, a2, 64
+; RV32M-NEXT: or t6, s6, s7
+; RV32M-NEXT: sltu s4, a0, a2
+; RV32M-NEXT: snez a2, t6
+; RV32M-NEXT: addi a2, a2, -1
+; RV32M-NEXT: bnez s7, .LBB1_44
+; RV32M-NEXT: # %bb.43: # %udiv-end1
+; RV32M-NEXT: srli a3, s6, 1
+; RV32M-NEXT: or a3, s6, a3
+; RV32M-NEXT: srli a4, a3, 2
+; RV32M-NEXT: or a3, a3, a4
+; RV32M-NEXT: srli a4, a3, 4
+; RV32M-NEXT: or a3, a3, a4
+; RV32M-NEXT: srli a4, a3, 8
+; RV32M-NEXT: or a3, a3, a4
+; RV32M-NEXT: srli a4, a3, 16
+; RV32M-NEXT: or a3, a3, a4
+; RV32M-NEXT: not a3, a3
+; RV32M-NEXT: srli a4, a3, 1
+; RV32M-NEXT: and a4, a4, s3
+; RV32M-NEXT: sub a3, a3, a4
+; RV32M-NEXT: and a4, a3, s2
+; RV32M-NEXT: srli a3, a3, 2
+; RV32M-NEXT: and a3, a3, s2
+; RV32M-NEXT: add a3, a4, a3
+; RV32M-NEXT: srli a4, a3, 4
+; RV32M-NEXT: add a3, a3, a4
+; RV32M-NEXT: and a3, a3, s1
+; RV32M-NEXT: mul a3, a3, s0
+; RV32M-NEXT: srli a3, a3, 24
+; RV32M-NEXT: addi t1, a3, 32
+; RV32M-NEXT: j .LBB1_45
+; RV32M-NEXT: .LBB1_44:
+; RV32M-NEXT: srli a3, s7, 1
+; RV32M-NEXT: or a3, s7, a3
+; RV32M-NEXT: srli a4, a3, 2
+; RV32M-NEXT: or a3, a3, a4
+; RV32M-NEXT: srli a4, a3, 4
+; RV32M-NEXT: or a3, a3, a4
+; RV32M-NEXT: srli a4, a3, 8
+; RV32M-NEXT: or a3, a3, a4
+; RV32M-NEXT: srli a4, a3, 16
+; RV32M-NEXT: or a3, a3, a4
+; RV32M-NEXT: not a3, a3
+; RV32M-NEXT: srli a4, a3, 1
+; RV32M-NEXT: and a4, a4, s3
+; RV32M-NEXT: sub a3, a3, a4
+; RV32M-NEXT: and a4, a3, s2
+; RV32M-NEXT: srli a3, a3, 2
+; RV32M-NEXT: and a3, a3, s2
+; RV32M-NEXT: add a3, a4, a3
+; RV32M-NEXT: srli a4, a3, 4
+; RV32M-NEXT: add a3, a3, a4
+; RV32M-NEXT: and a3, a3, s1
+; RV32M-NEXT: mul a3, a3, s0
+; RV32M-NEXT: srli t1, a3, 24
+; RV32M-NEXT: .LBB1_45: # %udiv-end1
+; RV32M-NEXT: or t0, t0, t3
+; RV32M-NEXT: or t3, t4, t5
+; RV32M-NEXT: and a7, a7, t2
+; RV32M-NEXT: and a2, a2, s4
+; RV32M-NEXT: bnez t6, .LBB1_47
+; RV32M-NEXT: # %bb.46: # %udiv-end1
+; RV32M-NEXT: mv t1, a0
+; RV32M-NEXT: .LBB1_47: # %udiv-end1
+; RV32M-NEXT: seqz a0, t0
+; RV32M-NEXT: seqz t0, t3
+; RV32M-NEXT: sltu t3, a1, t1
+; RV32M-NEXT: sub t4, a7, a2
+; RV32M-NEXT: mv t2, t3
+; RV32M-NEXT: beq a7, a2, .LBB1_49
+; RV32M-NEXT: # %bb.48: # %udiv-end1
+; RV32M-NEXT: sltu t2, a7, a2
+; RV32M-NEXT: .LBB1_49: # %udiv-end1
+; RV32M-NEXT: sub t6, t4, t3
+; RV32M-NEXT: or a2, a0, t0
+; RV32M-NEXT: neg a0, t2
+; RV32M-NEXT: seqz s2, t2
+; RV32M-NEXT: addi s2, s2, -1
+; RV32M-NEXT: or a7, a0, s2
+; RV32M-NEXT: sub t1, a1, t1
+; RV32M-NEXT: beqz a7, .LBB1_51
+; RV32M-NEXT: # %bb.50: # %udiv-end1
+; RV32M-NEXT: snez a1, a7
+; RV32M-NEXT: j .LBB1_52
+; RV32M-NEXT: .LBB1_51:
+; RV32M-NEXT: snez a1, t6
+; RV32M-NEXT: sltiu a3, t1, 128
+; RV32M-NEXT: xori a3, a3, 1
+; RV32M-NEXT: or a1, a3, a1
+; RV32M-NEXT: .LBB1_52: # %udiv-end1
+; RV32M-NEXT: or a3, a2, a1
+; RV32M-NEXT: addi a4, a3, -1
+; RV32M-NEXT: and a7, a4, s7
+; RV32M-NEXT: and a2, a4, s6
+; RV32M-NEXT: and a1, a4, s8
+; RV32M-NEXT: and t0, a4, s9
+; RV32M-NEXT: sw a5, 16(sp) # 4-byte Folded Spill
+; RV32M-NEXT: bnez a3, .LBB1_64
+; RV32M-NEXT: # %bb.53: # %udiv-end1
+; RV32M-NEXT: xori a3, t1, 127
+; RV32M-NEXT: or a3, a3, a0
+; RV32M-NEXT: or a4, t6, s2
+; RV32M-NEXT: or a3, a3, a4
+; RV32M-NEXT: beqz a3, .LBB1_64
+; RV32M-NEXT: # %bb.54: # %udiv-bb1
+; RV32M-NEXT: sw a6, 56(sp) # 4-byte Folded Spill
+; RV32M-NEXT: addi s0, t1, 1
+; RV32M-NEXT: sub a1, s5, t1
+; RV32M-NEXT: sw zero, 104(sp)
+; RV32M-NEXT: sw zero, 108(sp)
+; RV32M-NEXT: sw zero, 112(sp)
+; RV32M-NEXT: sw zero, 116(sp)
+; RV32M-NEXT: sw s9, 120(sp)
+; RV32M-NEXT: sw s8, 124(sp)
+; RV32M-NEXT: sw s6, 128(sp)
+; RV32M-NEXT: sw s7, 132(sp)
+; RV32M-NEXT: addi a2, sp, 120
+; RV32M-NEXT: seqz a3, s0
+; RV32M-NEXT: srli a4, a1, 3
+; RV32M-NEXT: andi a5, a1, 31
+; RV32M-NEXT: add t6, t6, a3
+; RV32M-NEXT: andi a4, a4, 12
+; RV32M-NEXT: xori a3, a5, 31
+; RV32M-NEXT: or a5, s0, t6
+; RV32M-NEXT: sub a2, a2, a4
+; RV32M-NEXT: seqz s4, a5
+; RV32M-NEXT: lw a4, 0(a2)
+; RV32M-NEXT: lw a5, 4(a2)
+; RV32M-NEXT: lw a6, 8(a2)
+; RV32M-NEXT: lw a2, 12(a2)
+; RV32M-NEXT: add s4, a0, s4
+; RV32M-NEXT: sltu a0, s4, a0
+; RV32M-NEXT: or a7, s0, s4
+; RV32M-NEXT: add s2, s2, a0
+; RV32M-NEXT: or a0, t6, s2
+; RV32M-NEXT: srli t0, a6, 1
+; RV32M-NEXT: srli t1, a5, 1
+; RV32M-NEXT: or a0, a7, a0
+; RV32M-NEXT: srli a7, a4, 1
+; RV32M-NEXT: srl t0, t0, a3
+; RV32M-NEXT: srl t1, t1, a3
+; RV32M-NEXT: srl a3, a7, a3
+; RV32M-NEXT: sll a2, a2, a1
+; RV32M-NEXT: mv a7, s7
+; RV32M-NEXT: or s7, a2, t0
+; RV32M-NEXT: sll a2, a6, a1
+; RV32M-NEXT: sll a5, a5, a1
+; RV32M-NEXT: or s5, a2, t1
+; RV32M-NEXT: mv a6, s6
+; RV32M-NEXT: or s6, a5, a3
+; RV32M-NEXT: sll s1, a4, a1
+; RV32M-NEXT: beqz a0, .LBB1_62
+; RV32M-NEXT: # %bb.55: # %udiv-preheader
+; RV32M-NEXT: mv a1, s8
+; RV32M-NEXT: mv a2, s9
+; RV32M-NEXT: li s3, 0
+; RV32M-NEXT: li s8, 0
+; RV32M-NEXT: li s9, 0
+; RV32M-NEXT: li s10, 0
+; RV32M-NEXT: sw zero, 88(sp)
+; RV32M-NEXT: sw zero, 92(sp)
+; RV32M-NEXT: sw zero, 96(sp)
+; RV32M-NEXT: sw zero, 100(sp)
+; RV32M-NEXT: sw a2, 72(sp)
+; RV32M-NEXT: sw a1, 76(sp)
+; RV32M-NEXT: sw a6, 80(sp)
+; RV32M-NEXT: sw a7, 84(sp)
+; RV32M-NEXT: srli a0, s0, 3
+; RV32M-NEXT: addi a1, sp, 72
+; RV32M-NEXT: andi a0, a0, 12
+; RV32M-NEXT: add a0, a1, a0
+; RV32M-NEXT: lw a1, 4(a0)
+; RV32M-NEXT: lw a2, 8(a0)
+; RV32M-NEXT: lw a3, 12(a0)
+; RV32M-NEXT: lw a4, 0(a0)
+; RV32M-NEXT: andi a0, s0, 31
+; RV32M-NEXT: xori a0, a0, 31
+; RV32M-NEXT: slli a5, a3, 1
+; RV32M-NEXT: slli a6, a2, 1
+; RV32M-NEXT: slli a7, a1, 1
+; RV32M-NEXT: sll a5, a5, a0
+; RV32M-NEXT: sll a6, a6, a0
+; RV32M-NEXT: sll a7, a7, a0
+; RV32M-NEXT: lw t1, 56(sp) # 4-byte Folded Reload
+; RV32M-NEXT: seqz t0, t1
+; RV32M-NEXT: srl a0, a2, s0
+; RV32M-NEXT: or a0, a0, a5
+; RV32M-NEXT: or a2, t1, s11
+; RV32M-NEXT: sub s11, s11, t0
+; RV32M-NEXT: seqz a2, a2
+; RV32M-NEXT: srl a1, a1, s0
+; RV32M-NEXT: or t3, a1, a6
+; RV32M-NEXT: mv a6, t1
+; RV32M-NEXT: mv a1, ra
+; RV32M-NEXT: sub ra, ra, a2
+; RV32M-NEXT: sltu a1, a1, a2
+; RV32M-NEXT: lw a2, 68(sp) # 4-byte Folded Reload
+; RV32M-NEXT: sub t2, a2, a1
+; RV32M-NEXT: srl a1, a4, s0
+; RV32M-NEXT: srl t5, a3, s0
+; RV32M-NEXT: or t0, a1, a7
+; RV32M-NEXT: addi a1, t1, -1
+; RV32M-NEXT: sw a1, 52(sp) # 4-byte Folded Spill
+; RV32M-NEXT: j .LBB1_57
+; RV32M-NEXT: .LBB1_56: # %udiv-do-while
+; RV32M-NEXT: # in Loop: Header=BB1_57 Depth=1
+; RV32M-NEXT: srli a3, s5, 31
+; RV32M-NEXT: slli s7, s7, 1
+; RV32M-NEXT: sub a1, a7, a1
+; RV32M-NEXT: srli a4, s6, 31
+; RV32M-NEXT: slli s5, s5, 1
+; RV32M-NEXT: or a3, s7, a3
+; RV32M-NEXT: srli a5, s1, 31
+; RV32M-NEXT: slli s6, s6, 1
+; RV32M-NEXT: slli s1, s1, 1
+; RV32M-NEXT: or a4, s5, a4
+; RV32M-NEXT: lw a6, 68(sp) # 4-byte Folded Reload
+; RV32M-NEXT: and a7, t3, a6
+; RV32M-NEXT: or a5, s6, a5
+; RV32M-NEXT: lw a6, 64(sp) # 4-byte Folded Reload
+; RV32M-NEXT: and s5, t3, a6
+; RV32M-NEXT: or s1, s3, s1
+; RV32M-NEXT: sub a6, t1, s5
+; RV32M-NEXT: sltu t1, t1, s5
+; RV32M-NEXT: or s5, s0, t6
+; RV32M-NEXT: sub a7, a0, a7
+; RV32M-NEXT: seqz a0, s0
+; RV32M-NEXT: addi s0, s0, -1
+; RV32M-NEXT: andi s3, t3, 1
+; RV32M-NEXT: sub t3, a1, a2
+; RV32M-NEXT: seqz a1, s5
+; RV32M-NEXT: sub t6, t6, a0
+; RV32M-NEXT: or s6, s8, a5
+; RV32M-NEXT: or s5, s9, a4
+; RV32M-NEXT: or s7, s10, a3
+; RV32M-NEXT: sub a0, a6, t5
+; RV32M-NEXT: sltu a2, a6, t5
+; RV32M-NEXT: sub a3, a7, t1
+; RV32M-NEXT: sltu a4, s4, a1
+; RV32M-NEXT: sub s4, s4, a1
+; RV32M-NEXT: sub t5, a3, a2
+; RV32M-NEXT: sub s2, s2, a4
+; RV32M-NEXT: or a1, t6, s2
+; RV32M-NEXT: or a2, s0, s4
+; RV32M-NEXT: or a1, a2, a1
+; RV32M-NEXT: sub t0, t4, t0
+; RV32M-NEXT: li s8, 0
+; RV32M-NEXT: li s9, 0
+; RV32M-NEXT: li s10, 0
+; RV32M-NEXT: lw a6, 56(sp) # 4-byte Folded Reload
+; RV32M-NEXT: beqz a1, .LBB1_63
+; RV32M-NEXT: .LBB1_57: # %udiv-do-while
+; RV32M-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32M-NEXT: srli a1, t0, 31
+; RV32M-NEXT: slli a2, t3, 1
+; RV32M-NEXT: slli t0, t0, 1
+; RV32M-NEXT: or a7, a2, a1
+; RV32M-NEXT: srli a1, s7, 31
+; RV32M-NEXT: or t4, t0, a1
+; RV32M-NEXT: beq s11, a7, .LBB1_59
+; RV32M-NEXT: # %bb.58: # %udiv-do-while
+; RV32M-NEXT: # in Loop: Header=BB1_57 Depth=1
+; RV32M-NEXT: sltu a1, s11, a7
+; RV32M-NEXT: j .LBB1_60
+; RV32M-NEXT: .LBB1_59: # in Loop: Header=BB1_57 Depth=1
+; RV32M-NEXT: lw a1, 52(sp) # 4-byte Folded Reload
+; RV32M-NEXT: sltu a1, a1, t4
+; RV32M-NEXT: .LBB1_60: # %udiv-do-while
+; RV32M-NEXT: # in Loop: Header=BB1_57 Depth=1
+; RV32M-NEXT: srli a2, a0, 31
+; RV32M-NEXT: slli t5, t5, 1
+; RV32M-NEXT: srli a3, t3, 31
+; RV32M-NEXT: slli a4, a0, 1
+; RV32M-NEXT: or a0, t5, a2
+; RV32M-NEXT: or t1, a4, a3
+; RV32M-NEXT: sub a2, ra, t1
+; RV32M-NEXT: sltu a3, ra, t1
+; RV32M-NEXT: sub a4, t2, a0
+; RV32M-NEXT: sltu a1, a2, a1
+; RV32M-NEXT: sub a4, a4, a3
+; RV32M-NEXT: sub a4, a4, a1
+; RV32M-NEXT: srai t3, a4, 31
+; RV32M-NEXT: and t0, t3, a6
+; RV32M-NEXT: lw a1, 60(sp) # 4-byte Folded Reload
+; RV32M-NEXT: and a1, t3, a1
+; RV32M-NEXT: sltu a2, t4, t0
+; RV32M-NEXT: mv t5, a2
+; RV32M-NEXT: beq a7, a1, .LBB1_56
+; RV32M-NEXT: # %bb.61: # %udiv-do-while
+; RV32M-NEXT: # in Loop: Header=BB1_57 Depth=1
+; RV32M-NEXT: sltu t5, a7, a1
+; RV32M-NEXT: j .LBB1_56
+; RV32M-NEXT: .LBB1_62:
+; RV32M-NEXT: li s3, 0
+; RV32M-NEXT: lw a6, 56(sp) # 4-byte Folded Reload
+; RV32M-NEXT: .LBB1_63: # %udiv-loop-exit
+; RV32M-NEXT: srli a0, s1, 31
+; RV32M-NEXT: slli a1, s6, 1
+; RV32M-NEXT: srli a2, s6, 31
+; RV32M-NEXT: or a1, a1, a0
+; RV32M-NEXT: slli a0, s5, 1
+; RV32M-NEXT: srli a3, s5, 31
+; RV32M-NEXT: slli s7, s7, 1
+; RV32M-NEXT: slli s1, s1, 1
+; RV32M-NEXT: or a2, a0, a2
+; RV32M-NEXT: or a7, s7, a3
+; RV32M-NEXT: or t0, s3, s1
+; RV32M-NEXT: lw ra, 64(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw s11, 60(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw s8, 48(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw s9, 44(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw s6, 40(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw s7, 36(sp) # 4-byte Folded Reload
+; RV32M-NEXT: .LBB1_64: # %udiv-end
+; RV32M-NEXT: mulhu a3, a6, t0
+; RV32M-NEXT: mul a4, s11, t0
+; RV32M-NEXT: mulhu a5, s11, t0
+; RV32M-NEXT: mv a0, a6
+; RV32M-NEXT: mul a6, a6, a1
+; RV32M-NEXT: mulhu t1, a0, a1
+; RV32M-NEXT: mul t2, s11, a1
+; RV32M-NEXT: mul t3, t0, ra
+; RV32M-NEXT: mul t4, a2, a0
+; RV32M-NEXT: mulhu t5, s11, a1
+; RV32M-NEXT: mul t6, a2, s11
+; RV32M-NEXT: mulhu a2, a2, a0
+; RV32M-NEXT: mul a7, a7, a0
+; RV32M-NEXT: mul s0, a1, ra
+; RV32M-NEXT: mulhu a1, t0, ra
+; RV32M-NEXT: lw s1, 68(sp) # 4-byte Folded Reload
+; RV32M-NEXT: mul s1, t0, s1
+; RV32M-NEXT: mul a0, a0, t0
+; RV32M-NEXT: add a3, a4, a3
+; RV32M-NEXT: add t3, t4, t3
+; RV32M-NEXT: add a2, a2, t6
+; RV32M-NEXT: add s1, a1, s1
+; RV32M-NEXT: sltu a4, a3, a4
+; RV32M-NEXT: add a1, a6, a3
+; RV32M-NEXT: add a2, a2, a7
+; RV32M-NEXT: add s0, s1, s0
+; RV32M-NEXT: sltu a3, t3, t4
+; RV32M-NEXT: add a4, a5, a4
+; RV32M-NEXT: sltu a5, a1, a6
+; RV32M-NEXT: add a2, a2, s0
+; RV32M-NEXT: add a5, t1, a5
+; RV32M-NEXT: add a2, a2, a3
+; RV32M-NEXT: add a5, a4, a5
+; RV32M-NEXT: add a3, t2, a5
+; RV32M-NEXT: sltu a5, a5, a4
+; RV32M-NEXT: add a4, a3, t3
+; RV32M-NEXT: sltu a6, a3, t2
+; RV32M-NEXT: add a5, t5, a5
+; RV32M-NEXT: sltu a3, a4, a3
+; RV32M-NEXT: add a5, a5, a6
+; RV32M-NEXT: sltu a6, s6, a4
+; RV32M-NEXT: add a2, a5, a2
+; RV32M-NEXT: sub a3, s7, a3
+; RV32M-NEXT: sub a2, a3, a2
+; RV32M-NEXT: sltu a3, s9, a0
+; RV32M-NEXT: sub a2, a2, a6
+; RV32M-NEXT: mv a5, a3
+; RV32M-NEXT: beq s8, a1, .LBB1_66
+; RV32M-NEXT: # %bb.65: # %udiv-end
+; RV32M-NEXT: sltu a5, s8, a1
+; RV32M-NEXT: .LBB1_66: # %udiv-end
+; RV32M-NEXT: sub a4, s6, a4
+; RV32M-NEXT: sub a1, s8, a1
+; RV32M-NEXT: sub a0, s9, a0
+; RV32M-NEXT: lw a6, 28(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw a7, 12(sp) # 4-byte Folded Reload
+; RV32M-NEXT: sw a7, 0(a6)
+; RV32M-NEXT: lw a7, 24(sp) # 4-byte Folded Reload
+; RV32M-NEXT: sw a7, 4(a6)
+; RV32M-NEXT: lw a7, 16(sp) # 4-byte Folded Reload
+; RV32M-NEXT: sw a7, 8(a6)
+; RV32M-NEXT: lw a7, 20(sp) # 4-byte Folded Reload
+; RV32M-NEXT: sw a7, 12(a6)
+; RV32M-NEXT: sltu a6, a4, a5
+; RV32M-NEXT: sub a4, a4, a5
+; RV32M-NEXT: sub a1, a1, a3
+; RV32M-NEXT: sub a2, a2, a6
+; RV32M-NEXT: lw a3, 32(sp) # 4-byte Folded Reload
+; RV32M-NEXT: sw a0, 0(a3)
+; RV32M-NEXT: sw a1, 4(a3)
+; RV32M-NEXT: sw a4, 8(a3)
+; RV32M-NEXT: sw a2, 12(a3)
+; RV32M-NEXT: lw ra, 252(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw s0, 248(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw s1, 244(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw s2, 240(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw s3, 236(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw s4, 232(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw s5, 228(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw s6, 224(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw s7, 220(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw s8, 216(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw s9, 212(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw s10, 208(sp) # 4-byte Folded Reload
+; RV32M-NEXT: lw s11, 204(sp) # 4-byte Folded Reload
+; RV32M-NEXT: .cfi_restore ra
+; RV32M-NEXT: .cfi_restore s0
+; RV32M-NEXT: .cfi_restore s1
+; RV32M-NEXT: .cfi_restore s2
+; RV32M-NEXT: .cfi_restore s3
+; RV32M-NEXT: .cfi_restore s4
+; RV32M-NEXT: .cfi_restore s5
+; RV32M-NEXT: .cfi_restore s6
+; RV32M-NEXT: .cfi_restore s7
+; RV32M-NEXT: .cfi_restore s8
+; RV32M-NEXT: .cfi_restore s9
+; RV32M-NEXT: .cfi_restore s10
+; RV32M-NEXT: .cfi_restore s11
+; RV32M-NEXT: addi sp, sp, 256
+; RV32M-NEXT: .cfi_def_cfa_offset 0
+; RV32M-NEXT: ret
+ %q = udiv i128 %n, %d
+ %r = urem i128 %n, %d
+ store i128 %q, ptr %q_out
+ store i128 %r, ptr %r_out
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPARC/i128-divrem-libcall.ll b/llvm/test/CodeGen/SPARC/i128-divrem-libcall.ll
new file mode 100644
index 0000000000000..932e9a6ccbc90
--- /dev/null
+++ b/llvm/test/CodeGen/SPARC/i128-divrem-libcall.ll
@@ -0,0 +1,64 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=sparcv9-linux-gnu | FileCheck %s
+
+define void @sdivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
+; CHECK-LABEL: sdivrem_i128:
+; CHECK: .cfi_startproc
+; CHECK-NEXT: ! %bb.0:
+; CHECK-NEXT: save %sp, -192, %sp
+; CHECK-NEXT: .cfi_def_cfa_register %fp
+; CHECK-NEXT: .cfi_window_save
+; CHECK-NEXT: .cfi_register %o7, %i7
+; CHECK-NEXT: mov %i5, %o3
+; CHECK-NEXT: mov %i4, %o2
+; CHECK-NEXT: mov %i3, %o1
+; CHECK-NEXT: mov %i2, %o0
+; CHECK-NEXT: add %fp, 2031, %i2
+; CHECK-NEXT: call __divmodti4
+; CHECK-NEXT: mov %i2, %o4
+; CHECK-NEXT: or %i2, 8, %i2
+; CHECK-NEXT: ldx [%fp+2031], %i3
+; CHECK-NEXT: ldx [%i2], %i2
+; CHECK-NEXT: stx %o0, [%i0]
+; CHECK-NEXT: stx %o1, [%i0+8]
+; CHECK-NEXT: stx %i3, [%i1]
+; CHECK-NEXT: stx %i2, [%i1+8]
+; CHECK-NEXT: ret
+; CHECK-NEXT: restore
+ %q = sdiv i128 %n, %d
+ %r = srem i128 %n, %d
+ store i128 %q, ptr %q_out
+ store i128 %r, ptr %r_out
+ ret void
+}
+
+define void @udivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
+; CHECK-LABEL: udivrem_i128:
+; CHECK: .cfi_startproc
+; CHECK-NEXT: ! %bb.0:
+; CHECK-NEXT: save %sp, -192, %sp
+; CHECK-NEXT: .cfi_def_cfa_register %fp
+; CHECK-NEXT: .cfi_window_save
+; CHECK-NEXT: .cfi_register %o7, %i7
+; CHECK-NEXT: mov %i5, %o3
+; CHECK-NEXT: mov %i4, %o2
+; CHECK-NEXT: mov %i3, %o1
+; CHECK-NEXT: mov %i2, %o0
+; CHECK-NEXT: add %fp, 2031, %i2
+; CHECK-NEXT: call __udivmodti4
+; CHECK-NEXT: mov %i2, %o4
+; CHECK-NEXT: or %i2, 8, %i2
+; CHECK-NEXT: ldx [%fp+2031], %i3
+; CHECK-NEXT: ldx [%i2], %i2
+; CHECK-NEXT: stx %o0, [%i0]
+; CHECK-NEXT: stx %o1, [%i0+8]
+; CHECK-NEXT: stx %i3, [%i1]
+; CHECK-NEXT: stx %i2, [%i1+8]
+; CHECK-NEXT: ret
+; CHECK-NEXT: restore
+ %q = udiv i128 %n, %d
+ %r = urem i128 %n, %d
+ store i128 %q, ptr %q_out
+ store i128 %r, ptr %r_out
+ ret void
+}
diff --git a/llvm/test/CodeGen/WebAssembly/i128-divrem-libcall.ll b/llvm/test/CodeGen/WebAssembly/i128-divrem-libcall.ll
new file mode 100644
index 0000000000000..d67089fcd7865
--- /dev/null
+++ b/llvm/test/CodeGen/WebAssembly/i128-divrem-libcall.ll
@@ -0,0 +1,177 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=wasm32-unknown-unknown | FileCheck %s --check-prefix=WASM32
+; RUN: llc < %s -mtriple=wasm64-unknown-unknown | FileCheck %s --check-prefix=WASM64
+
+define void @sdivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
+; WASM32-LABEL: sdivrem_i128:
+; WASM32: .functype sdivrem_i128 (i32, i32, i64, i64, i64, i64) -> ()
+; WASM32-NEXT: .local i32
+; WASM32-NEXT: # %bb.0:
+; WASM32-NEXT: global.get __stack_pointer
+; WASM32-NEXT: i32.const 32
+; WASM32-NEXT: i32.sub
+; WASM32-NEXT: local.tee 6
+; WASM32-NEXT: global.set __stack_pointer
+; WASM32-NEXT: local.get 6
+; WASM32-NEXT: local.get 2
+; WASM32-NEXT: local.get 3
+; WASM32-NEXT: local.get 4
+; WASM32-NEXT: local.get 5
+; WASM32-NEXT: local.get 6
+; WASM32-NEXT: i32.const 16
+; WASM32-NEXT: i32.add
+; WASM32-NEXT: call __divmodti4
+; WASM32-NEXT: local.get 0
+; WASM32-NEXT: local.get 6
+; WASM32-NEXT: i64.load 0
+; WASM32-NEXT: i64.store 0
+; WASM32-NEXT: local.get 0
+; WASM32-NEXT: local.get 6
+; WASM32-NEXT: i64.load 8
+; WASM32-NEXT: i64.store 8
+; WASM32-NEXT: local.get 1
+; WASM32-NEXT: local.get 6
+; WASM32-NEXT: i64.load 16
+; WASM32-NEXT: i64.store 0
+; WASM32-NEXT: local.get 1
+; WASM32-NEXT: local.get 6
+; WASM32-NEXT: i64.load 24
+; WASM32-NEXT: i64.store 8
+; WASM32-NEXT: local.get 6
+; WASM32-NEXT: i32.const 32
+; WASM32-NEXT: i32.add
+; WASM32-NEXT: global.set __stack_pointer
+; WASM32-NEXT: # fallthrough-return
+;
+; WASM64-LABEL: sdivrem_i128:
+; WASM64: .functype sdivrem_i128 (i64, i64, i64, i64, i64, i64) -> ()
+; WASM64-NEXT: .local i64
+; WASM64-NEXT: # %bb.0:
+; WASM64-NEXT: global.get __stack_pointer
+; WASM64-NEXT: i64.const 32
+; WASM64-NEXT: i64.sub
+; WASM64-NEXT: local.tee 6
+; WASM64-NEXT: global.set __stack_pointer
+; WASM64-NEXT: local.get 6
+; WASM64-NEXT: local.get 2
+; WASM64-NEXT: local.get 3
+; WASM64-NEXT: local.get 4
+; WASM64-NEXT: local.get 5
+; WASM64-NEXT: local.get 6
+; WASM64-NEXT: i64.const 16
+; WASM64-NEXT: i64.add
+; WASM64-NEXT: call __divmodti4
+; WASM64-NEXT: local.get 0
+; WASM64-NEXT: local.get 6
+; WASM64-NEXT: i64.load 0
+; WASM64-NEXT: i64.store 0
+; WASM64-NEXT: local.get 0
+; WASM64-NEXT: local.get 6
+; WASM64-NEXT: i64.load 8
+; WASM64-NEXT: i64.store 8
+; WASM64-NEXT: local.get 1
+; WASM64-NEXT: local.get 6
+; WASM64-NEXT: i64.load 16
+; WASM64-NEXT: i64.store 0
+; WASM64-NEXT: local.get 1
+; WASM64-NEXT: local.get 6
+; WASM64-NEXT: i64.load 24
+; WASM64-NEXT: i64.store 8
+; WASM64-NEXT: local.get 6
+; WASM64-NEXT: i64.const 32
+; WASM64-NEXT: i64.add
+; WASM64-NEXT: global.set __stack_pointer
+; WASM64-NEXT: # fallthrough-return
+ %q = sdiv i128 %n, %d
+ %r = srem i128 %n, %d
+ store i128 %q, ptr %q_out
+ store i128 %r, ptr %r_out
+ ret void
+}
+
+define void @udivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
+; WASM32-LABEL: udivrem_i128:
+; WASM32: .functype udivrem_i128 (i32, i32, i64, i64, i64, i64) -> ()
+; WASM32-NEXT: .local i32
+; WASM32-NEXT: # %bb.0:
+; WASM32-NEXT: global.get __stack_pointer
+; WASM32-NEXT: i32.const 32
+; WASM32-NEXT: i32.sub
+; WASM32-NEXT: local.tee 6
+; WASM32-NEXT: global.set __stack_pointer
+; WASM32-NEXT: local.get 6
+; WASM32-NEXT: local.get 2
+; WASM32-NEXT: local.get 3
+; WASM32-NEXT: local.get 4
+; WASM32-NEXT: local.get 5
+; WASM32-NEXT: local.get 6
+; WASM32-NEXT: i32.const 16
+; WASM32-NEXT: i32.add
+; WASM32-NEXT: call __udivmodti4
+; WASM32-NEXT: local.get 0
+; WASM32-NEXT: local.get 6
+; WASM32-NEXT: i64.load 0
+; WASM32-NEXT: i64.store 0
+; WASM32-NEXT: local.get 0
+; WASM32-NEXT: local.get 6
+; WASM32-NEXT: i64.load 8
+; WASM32-NEXT: i64.store 8
+; WASM32-NEXT: local.get 1
+; WASM32-NEXT: local.get 6
+; WASM32-NEXT: i64.load 16
+; WASM32-NEXT: i64.store 0
+; WASM32-NEXT: local.get 1
+; WASM32-NEXT: local.get 6
+; WASM32-NEXT: i64.load 24
+; WASM32-NEXT: i64.store 8
+; WASM32-NEXT: local.get 6
+; WASM32-NEXT: i32.const 32
+; WASM32-NEXT: i32.add
+; WASM32-NEXT: global.set __stack_pointer
+; WASM32-NEXT: # fallthrough-return
+;
+; WASM64-LABEL: udivrem_i128:
+; WASM64: .functype udivrem_i128 (i64, i64, i64, i64, i64, i64) -> ()
+; WASM64-NEXT: .local i64
+; WASM64-NEXT: # %bb.0:
+; WASM64-NEXT: global.get __stack_pointer
+; WASM64-NEXT: i64.const 32
+; WASM64-NEXT: i64.sub
+; WASM64-NEXT: local.tee 6
+; WASM64-NEXT: global.set __stack_pointer
+; WASM64-NEXT: local.get 6
+; WASM64-NEXT: local.get 2
+; WASM64-NEXT: local.get 3
+; WASM64-NEXT: local.get 4
+; WASM64-NEXT: local.get 5
+; WASM64-NEXT: local.get 6
+; WASM64-NEXT: i64.const 16
+; WASM64-NEXT: i64.add
+; WASM64-NEXT: call __udivmodti4
+; WASM64-NEXT: local.get 0
+; WASM64-NEXT: local.get 6
+; WASM64-NEXT: i64.load 0
+; WASM64-NEXT: i64.store 0
+; WASM64-NEXT: local.get 0
+; WASM64-NEXT: local.get 6
+; WASM64-NEXT: i64.load 8
+; WASM64-NEXT: i64.store 8
+; WASM64-NEXT: local.get 1
+; WASM64-NEXT: local.get 6
+; WASM64-NEXT: i64.load 16
+; WASM64-NEXT: i64.store 0
+; WASM64-NEXT: local.get 1
+; WASM64-NEXT: local.get 6
+; WASM64-NEXT: i64.load 24
+; WASM64-NEXT: i64.store 8
+; WASM64-NEXT: local.get 6
+; WASM64-NEXT: i64.const 32
+; WASM64-NEXT: i64.add
+; WASM64-NEXT: global.set __stack_pointer
+; WASM64-NEXT: # fallthrough-return
+ %q = udiv i128 %n, %d
+ %r = urem i128 %n, %d
+ store i128 %q, ptr %q_out
+ store i128 %r, ptr %r_out
+ ret void
+}
diff --git a/llvm/test/CodeGen/X86/i128-divrem-libcall.ll b/llvm/test/CodeGen/X86/i128-divrem-libcall.ll
new file mode 100644
index 0000000000000..779eac61995e2
--- /dev/null
+++ b/llvm/test/CodeGen/X86/i128-divrem-libcall.ll
@@ -0,0 +1,3393 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefix=LINUX-X64
+; RUN: llc < %s -mtriple=x86_64-linux-gnux32 | FileCheck %s --check-prefix=LINUX-X32
+; RUN: llc < %s -mtriple=x86_64-apple-macosx | FileCheck %s --check-prefix=DARWIN-X64
+; RUN: llc < %s -mtriple=x86_64-w64-mingw32 | FileCheck %s --check-prefix=MINGW-X64
+; RUN: llc < %s -mtriple=x86_64-pc-windows-msvc | FileCheck %s --check-prefix=WIN64
+; RUN: llc < %s -mtriple=i386-linux-gnu | FileCheck %s --check-prefix=LINUX-X86
+; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefix=LINUX-X86
+; RUN: llc < %s -mtriple=i686-pc-windows-msvc | FileCheck %s --check-prefix=WIN32
+
+define void @sdivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
+; LINUX-X64-LABEL: sdivrem_i128:
+; LINUX-X64: # %bb.0:
+; LINUX-X64-NEXT: pushq %r14
+; LINUX-X64-NEXT: .cfi_def_cfa_offset 16
+; LINUX-X64-NEXT: pushq %rbx
+; LINUX-X64-NEXT: .cfi_def_cfa_offset 24
+; LINUX-X64-NEXT: subq $24, %rsp
+; LINUX-X64-NEXT: .cfi_def_cfa_offset 48
+; LINUX-X64-NEXT: .cfi_offset %rbx, -24
+; LINUX-X64-NEXT: .cfi_offset %r14, -16
+; LINUX-X64-NEXT: movq %r8, %rax
+; LINUX-X64-NEXT: movq %rsi, %rbx
+; LINUX-X64-NEXT: movq %rdi, %r14
+; LINUX-X64-NEXT: movq %rsp, %r8
+; LINUX-X64-NEXT: movq %rdx, %rdi
+; LINUX-X64-NEXT: movq %rcx, %rsi
+; LINUX-X64-NEXT: movq %rax, %rdx
+; LINUX-X64-NEXT: movq %r9, %rcx
+; LINUX-X64-NEXT: callq __divmodti4 at PLT
+; LINUX-X64-NEXT: movq (%rsp), %rcx
+; LINUX-X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi
+; LINUX-X64-NEXT: movq %rax, (%r14)
+; LINUX-X64-NEXT: movq %rdx, 8(%r14)
+; LINUX-X64-NEXT: movq %rcx, (%rbx)
+; LINUX-X64-NEXT: movq %rsi, 8(%rbx)
+; LINUX-X64-NEXT: addq $24, %rsp
+; LINUX-X64-NEXT: .cfi_def_cfa_offset 24
+; LINUX-X64-NEXT: popq %rbx
+; LINUX-X64-NEXT: .cfi_def_cfa_offset 16
+; LINUX-X64-NEXT: popq %r14
+; LINUX-X64-NEXT: .cfi_def_cfa_offset 8
+; LINUX-X64-NEXT: retq
+;
+; LINUX-X32-LABEL: sdivrem_i128:
+; LINUX-X32: # %bb.0:
+; LINUX-X32-NEXT: pushq %r14
+; LINUX-X32-NEXT: .cfi_def_cfa_offset 16
+; LINUX-X32-NEXT: pushq %rbx
+; LINUX-X32-NEXT: .cfi_def_cfa_offset 24
+; LINUX-X32-NEXT: subl $24, %esp
+; LINUX-X32-NEXT: .cfi_def_cfa_offset 48
+; LINUX-X32-NEXT: .cfi_offset %rbx, -24
+; LINUX-X32-NEXT: .cfi_offset %r14, -16
+; LINUX-X32-NEXT: movq %r8, %rax
+; LINUX-X32-NEXT: movq %rsi, %rbx
+; LINUX-X32-NEXT: movq %rdi, %r14
+; LINUX-X32-NEXT: movl %esp, %r8d
+; LINUX-X32-NEXT: movq %rdx, %rdi
+; LINUX-X32-NEXT: movq %rcx, %rsi
+; LINUX-X32-NEXT: movq %rax, %rdx
+; LINUX-X32-NEXT: movq %r9, %rcx
+; LINUX-X32-NEXT: callq __divmodti4 at PLT
+; LINUX-X32-NEXT: movq (%esp), %rcx
+; LINUX-X32-NEXT: movq {{[0-9]+}}(%esp), %rsi
+; LINUX-X32-NEXT: movq %rax, (%r14d)
+; LINUX-X32-NEXT: movq %rdx, 8(%r14d)
+; LINUX-X32-NEXT: movq %rcx, (%ebx)
+; LINUX-X32-NEXT: movq %rsi, 8(%ebx)
+; LINUX-X32-NEXT: addl $24, %esp
+; LINUX-X32-NEXT: .cfi_def_cfa_offset 24
+; LINUX-X32-NEXT: popq %rbx
+; LINUX-X32-NEXT: .cfi_def_cfa_offset 16
+; LINUX-X32-NEXT: popq %r14
+; LINUX-X32-NEXT: .cfi_def_cfa_offset 8
+; LINUX-X32-NEXT: retq
+;
+; DARWIN-X64-LABEL: sdivrem_i128:
+; DARWIN-X64: ## %bb.0:
+; DARWIN-X64-NEXT: pushq %r14
+; DARWIN-X64-NEXT: .cfi_def_cfa_offset 16
+; DARWIN-X64-NEXT: pushq %rbx
+; DARWIN-X64-NEXT: .cfi_def_cfa_offset 24
+; DARWIN-X64-NEXT: subq $24, %rsp
+; DARWIN-X64-NEXT: .cfi_def_cfa_offset 48
+; DARWIN-X64-NEXT: .cfi_offset %rbx, -24
+; DARWIN-X64-NEXT: .cfi_offset %r14, -16
+; DARWIN-X64-NEXT: movq %r8, %rax
+; DARWIN-X64-NEXT: movq %rsi, %rbx
+; DARWIN-X64-NEXT: movq %rdi, %r14
+; DARWIN-X64-NEXT: movq %rsp, %r8
+; DARWIN-X64-NEXT: movq %rdx, %rdi
+; DARWIN-X64-NEXT: movq %rcx, %rsi
+; DARWIN-X64-NEXT: movq %rax, %rdx
+; DARWIN-X64-NEXT: movq %r9, %rcx
+; DARWIN-X64-NEXT: callq ___divmodti4
+; DARWIN-X64-NEXT: movq (%rsp), %rcx
+; DARWIN-X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi
+; DARWIN-X64-NEXT: movq %rax, (%r14)
+; DARWIN-X64-NEXT: movq %rdx, 8(%r14)
+; DARWIN-X64-NEXT: movq %rcx, (%rbx)
+; DARWIN-X64-NEXT: movq %rsi, 8(%rbx)
+; DARWIN-X64-NEXT: addq $24, %rsp
+; DARWIN-X64-NEXT: popq %rbx
+; DARWIN-X64-NEXT: popq %r14
+; DARWIN-X64-NEXT: retq
+;
+; MINGW-X64-LABEL: sdivrem_i128:
+; MINGW-X64: # %bb.0:
+; MINGW-X64-NEXT: pushq %rsi
+; MINGW-X64-NEXT: .seh_pushreg %rsi
+; MINGW-X64-NEXT: pushq %rdi
+; MINGW-X64-NEXT: .seh_pushreg %rdi
+; MINGW-X64-NEXT: subq $88, %rsp
+; MINGW-X64-NEXT: .seh_stackalloc 88
+; MINGW-X64-NEXT: .seh_endprologue
+; MINGW-X64-NEXT: movq %rdx, %rsi
+; MINGW-X64-NEXT: movq %rcx, %rdi
+; MINGW-X64-NEXT: movaps {{[0-9]+}}(%rsp), %xmm0
+; MINGW-X64-NEXT: movq %r9, {{[0-9]+}}(%rsp)
+; MINGW-X64-NEXT: movq %r8, {{[0-9]+}}(%rsp)
+; MINGW-X64-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
+; MINGW-X64-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; MINGW-X64-NEXT: leaq {{[0-9]+}}(%rsp), %rdx
+; MINGW-X64-NEXT: leaq {{[0-9]+}}(%rsp), %r8
+; MINGW-X64-NEXT: callq __divmodti4
+; MINGW-X64-NEXT: movaps {{[0-9]+}}(%rsp), %xmm1
+; MINGW-X64-NEXT: movaps %xmm0, (%rdi)
+; MINGW-X64-NEXT: movaps %xmm1, (%rsi)
+; MINGW-X64-NEXT: .seh_startepilogue
+; MINGW-X64-NEXT: addq $88, %rsp
+; MINGW-X64-NEXT: popq %rdi
+; MINGW-X64-NEXT: popq %rsi
+; MINGW-X64-NEXT: .seh_endepilogue
+; MINGW-X64-NEXT: retq
+; MINGW-X64-NEXT: .seh_endproc
+;
+; WIN64-LABEL: sdivrem_i128:
+; WIN64: # %bb.0:
+; WIN64-NEXT: pushq %rsi
+; WIN64-NEXT: .seh_pushreg %rsi
+; WIN64-NEXT: pushq %rdi
+; WIN64-NEXT: .seh_pushreg %rdi
+; WIN64-NEXT: subq $88, %rsp
+; WIN64-NEXT: .seh_stackalloc 88
+; WIN64-NEXT: .seh_endprologue
+; WIN64-NEXT: movq %rdx, %rsi
+; WIN64-NEXT: movq %rcx, %rdi
+; WIN64-NEXT: movaps {{[0-9]+}}(%rsp), %xmm0
+; WIN64-NEXT: movq %r9, {{[0-9]+}}(%rsp)
+; WIN64-NEXT: movq %r8, {{[0-9]+}}(%rsp)
+; WIN64-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
+; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %rdx
+; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %r8
+; WIN64-NEXT: callq __divmodti4
+; WIN64-NEXT: movaps {{[0-9]+}}(%rsp), %xmm1
+; WIN64-NEXT: movaps %xmm0, (%rdi)
+; WIN64-NEXT: movaps %xmm1, (%rsi)
+; WIN64-NEXT: .seh_startepilogue
+; WIN64-NEXT: addq $88, %rsp
+; WIN64-NEXT: popq %rdi
+; WIN64-NEXT: popq %rsi
+; WIN64-NEXT: .seh_endepilogue
+; WIN64-NEXT: retq
+; WIN64-NEXT: .seh_endproc
+;
+; LINUX-X86-LABEL: sdivrem_i128:
+; LINUX-X86: # %bb.0: # %_udiv-special-cases_udiv-special-cases
+; LINUX-X86-NEXT: pushl %ebp
+; LINUX-X86-NEXT: .cfi_def_cfa_offset 8
+; LINUX-X86-NEXT: pushl %ebx
+; LINUX-X86-NEXT: .cfi_def_cfa_offset 12
+; LINUX-X86-NEXT: pushl %edi
+; LINUX-X86-NEXT: .cfi_def_cfa_offset 16
+; LINUX-X86-NEXT: pushl %esi
+; LINUX-X86-NEXT: .cfi_def_cfa_offset 20
+; LINUX-X86-NEXT: subl $284, %esp # imm = 0x11C
+; LINUX-X86-NEXT: .cfi_def_cfa_offset 304
+; LINUX-X86-NEXT: .cfi_offset %esi, -20
+; LINUX-X86-NEXT: .cfi_offset %edi, -16
+; LINUX-X86-NEXT: .cfi_offset %ebx, -12
+; LINUX-X86-NEXT: .cfi_offset %ebp, -8
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; LINUX-X86-NEXT: movl %ebp, %esi
+; LINUX-X86-NEXT: sarl $31, %esi
+; LINUX-X86-NEXT: xorl %esi, %ebp
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; LINUX-X86-NEXT: xorl %esi, %edx
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; LINUX-X86-NEXT: xorl %esi, %eax
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; LINUX-X86-NEXT: xorl %esi, %ecx
+; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: subl %esi, %ecx
+; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: sbbl %esi, %eax
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: sbbl %esi, %edx
+; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: sbbl %esi, %ebp
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; LINUX-X86-NEXT: movl %ebx, %edx
+; LINUX-X86-NEXT: sarl $31, %edx
+; LINUX-X86-NEXT: xorl %edx, %ebx
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; LINUX-X86-NEXT: xorl %edx, %edi
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; LINUX-X86-NEXT: xorl %edx, %eax
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; LINUX-X86-NEXT: xorl %edx, %ecx
+; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: subl %edx, %ecx
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: sbbl %edx, %eax
+; LINUX-X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: sbbl %edx, %edi
+; LINUX-X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: sbbl %edx, %ebx
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: orl %ebx, %eax
+; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: orl %edi, %ecx
+; LINUX-X86-NEXT: orl %eax, %ecx
+; LINUX-X86-NEXT: sete {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: orl %ebp, %ecx
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; LINUX-X86-NEXT: orl %eax, %edx
+; LINUX-X86-NEXT: orl %ecx, %edx
+; LINUX-X86-NEXT: sete %cl
+; LINUX-X86-NEXT: testl %ebx, %ebx
+; LINUX-X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: jne .LBB0_1
+; LINUX-X86-NEXT: # %bb.2: # %_udiv-special-cases_udiv-special-cases
+; LINUX-X86-NEXT: bsrl %edi, %ebp
+; LINUX-X86-NEXT: xorl $31, %ebp
+; LINUX-X86-NEXT: orl $32, %ebp
+; LINUX-X86-NEXT: jmp .LBB0_3
+; LINUX-X86-NEXT: .LBB0_1:
+; LINUX-X86-NEXT: bsrl %ebx, %ebp
+; LINUX-X86-NEXT: xorl $31, %ebp
+; LINUX-X86-NEXT: .LBB0_3: # %_udiv-special-cases_udiv-special-cases
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; LINUX-X86-NEXT: testl %edx, %edx
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; LINUX-X86-NEXT: jne .LBB0_4
+; LINUX-X86-NEXT: # %bb.5: # %_udiv-special-cases_udiv-special-cases
+; LINUX-X86-NEXT: bsrl %esi, %edx
+; LINUX-X86-NEXT: xorl $31, %edx
+; LINUX-X86-NEXT: orl $32, %edx
+; LINUX-X86-NEXT: jmp .LBB0_6
+; LINUX-X86-NEXT: .LBB0_4:
+; LINUX-X86-NEXT: bsrl %edx, %edx
+; LINUX-X86-NEXT: xorl $31, %edx
+; LINUX-X86-NEXT: .LBB0_6: # %_udiv-special-cases_udiv-special-cases
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; LINUX-X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; LINUX-X86-NEXT: jne .LBB0_8
+; LINUX-X86-NEXT: # %bb.7: # %_udiv-special-cases_udiv-special-cases
+; LINUX-X86-NEXT: orl $64, %edx
+; LINUX-X86-NEXT: movl %edx, %ebp
+; LINUX-X86-NEXT: .LBB0_8: # %_udiv-special-cases_udiv-special-cases
+; LINUX-X86-NEXT: testl %ebx, %ebx
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; LINUX-X86-NEXT: jne .LBB0_9
+; LINUX-X86-NEXT: # %bb.10: # %_udiv-special-cases_udiv-special-cases
+; LINUX-X86-NEXT: bsrl %eax, %edx
+; LINUX-X86-NEXT: xorl $31, %edx
+; LINUX-X86-NEXT: orl $32, %edx
+; LINUX-X86-NEXT: testl %esi, %esi
+; LINUX-X86-NEXT: je .LBB0_13
+; LINUX-X86-NEXT: .LBB0_12:
+; LINUX-X86-NEXT: bsrl %esi, %esi
+; LINUX-X86-NEXT: xorl $31, %esi
+; LINUX-X86-NEXT: jmp .LBB0_14
+; LINUX-X86-NEXT: .LBB0_9:
+; LINUX-X86-NEXT: bsrl %ebx, %edx
+; LINUX-X86-NEXT: xorl $31, %edx
+; LINUX-X86-NEXT: testl %esi, %esi
+; LINUX-X86-NEXT: jne .LBB0_12
+; LINUX-X86-NEXT: .LBB0_13: # %_udiv-special-cases_udiv-special-cases
+; LINUX-X86-NEXT: bsrl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; LINUX-X86-NEXT: xorl $31, %esi
+; LINUX-X86-NEXT: orl $32, %esi
+; LINUX-X86-NEXT: .LBB0_14: # %_udiv-special-cases_udiv-special-cases
+; LINUX-X86-NEXT: orb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; LINUX-X86-NEXT: orl %ebx, %eax
+; LINUX-X86-NEXT: jne .LBB0_16
+; LINUX-X86-NEXT: # %bb.15: # %_udiv-special-cases_udiv-special-cases
+; LINUX-X86-NEXT: orl $64, %esi
+; LINUX-X86-NEXT: movl %esi, %edx
+; LINUX-X86-NEXT: .LBB0_16: # %_udiv-special-cases_udiv-special-cases
+; LINUX-X86-NEXT: xorl %ebx, %ebx
+; LINUX-X86-NEXT: movl %ebp, %ecx
+; LINUX-X86-NEXT: subl %edx, %ecx
+; LINUX-X86-NEXT: movl $0, %edx
+; LINUX-X86-NEXT: sbbl %edx, %edx
+; LINUX-X86-NEXT: movl $0, %esi
+; LINUX-X86-NEXT: sbbl %esi, %esi
+; LINUX-X86-NEXT: movl $0, %edi
+; LINUX-X86-NEXT: sbbl %edi, %edi
+; LINUX-X86-NEXT: cmpb $0, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Reload
+; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: jne .LBB0_17
+; LINUX-X86-NEXT: # %bb.18: # %select.false.sink
+; LINUX-X86-NEXT: movl $127, %eax
+; LINUX-X86-NEXT: cmpl %ecx, %eax
+; LINUX-X86-NEXT: movl $0, %eax
+; LINUX-X86-NEXT: sbbl %edx, %eax
+; LINUX-X86-NEXT: movl $0, %eax
+; LINUX-X86-NEXT: sbbl %esi, %eax
+; LINUX-X86-NEXT: movl $0, %eax
+; LINUX-X86-NEXT: sbbl %edi, %eax
+; LINUX-X86-NEXT: setb %al
+; LINUX-X86-NEXT: .LBB0_19: # %select.end
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; LINUX-X86-NEXT: xorl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: testb %al, %al
+; LINUX-X86-NEXT: movl $0, %edx
+; LINUX-X86-NEXT: movl $0, %ebp
+; LINUX-X86-NEXT: movl $0, %esi
+; LINUX-X86-NEXT: jne .LBB0_21
+; LINUX-X86-NEXT: # %bb.20: # %select.end
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; LINUX-X86-NEXT: movl %edi, %edx
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; LINUX-X86-NEXT: .LBB0_21: # %select.end
+; LINUX-X86-NEXT: movl %edx, %edi
+; LINUX-X86-NEXT: jne .LBB0_22
+; LINUX-X86-NEXT: # %bb.29: # %select.end
+; LINUX-X86-NEXT: movl %ecx, %eax
+; LINUX-X86-NEXT: xorl $127, %eax
+; LINUX-X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; LINUX-X86-NEXT: movl %ecx, %edx
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; LINUX-X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; LINUX-X86-NEXT: orl %eax, %ecx
+; LINUX-X86-NEXT: movl %edi, %eax
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; LINUX-X86-NEXT: je .LBB0_30
+; LINUX-X86-NEXT: # %bb.27: # %udiv-bb15
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl %edx, %ecx
+; LINUX-X86-NEXT: xorb $127, %cl
+; LINUX-X86-NEXT: movl %ecx, %eax
+; LINUX-X86-NEXT: shrb $3, %al
+; LINUX-X86-NEXT: andb $12, %al
+; LINUX-X86-NEXT: negb %al
+; LINUX-X86-NEXT: movsbl %al, %eax
+; LINUX-X86-NEXT: movl 264(%esp,%eax), %edi
+; LINUX-X86-NEXT: movl 268(%esp,%eax), %esi
+; LINUX-X86-NEXT: shldl %cl, %edi, %esi
+; LINUX-X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl 256(%esp,%eax), %ebp
+; LINUX-X86-NEXT: movl 260(%esp,%eax), %ebx
+; LINUX-X86-NEXT: shldl %cl, %ebx, %edi
+; LINUX-X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: shldl %cl, %ebp, %ebx
+; LINUX-X86-NEXT: shll %cl, %ebp
+; LINUX-X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: addl $1, %edx
+; LINUX-X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; LINUX-X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; LINUX-X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; LINUX-X86-NEXT: jb .LBB0_28
+; LINUX-X86-NEXT: # %bb.24: # %udiv-preheader4
+; LINUX-X86-NEXT: movl %edx, %ecx
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl %ecx, %eax
+; LINUX-X86-NEXT: shrb $3, %al
+; LINUX-X86-NEXT: andb $12, %al
+; LINUX-X86-NEXT: movzbl %al, %eax
+; LINUX-X86-NEXT: movl 220(%esp,%eax), %ebp
+; LINUX-X86-NEXT: movl 216(%esp,%eax), %edx
+; LINUX-X86-NEXT: movl %edx, %esi
+; LINUX-X86-NEXT: shrdl %cl, %ebp, %esi
+; LINUX-X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl 208(%esp,%eax), %edi
+; LINUX-X86-NEXT: movl 212(%esp,%eax), %esi
+; LINUX-X86-NEXT: movl %esi, %eax
+; LINUX-X86-NEXT: shrdl %cl, %edx, %eax
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: shrl %cl, %ebp
+; LINUX-X86-NEXT: movl %ebp, %edx
+; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: # kill: def $cl killed $cl killed $ecx
+; LINUX-X86-NEXT: shrdl %cl, %esi, %edi
+; LINUX-X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: addl $-1, %eax
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: adcl $-1, %eax
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; LINUX-X86-NEXT: adcl $-1, %ecx
+; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; LINUX-X86-NEXT: adcl $-1, %ecx
+; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; LINUX-X86-NEXT: .p2align 4
+; LINUX-X86-NEXT: .LBB0_25: # %udiv-do-while3
+; LINUX-X86-NEXT: # =>This Inner Loop Header: Depth=1
+; LINUX-X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; LINUX-X86-NEXT: shldl $1, %ebp, %edx
+; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; LINUX-X86-NEXT: shldl $1, %edi, %ebp
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; LINUX-X86-NEXT: shldl $1, %esi, %edi
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; LINUX-X86-NEXT: shldl $1, %edx, %esi
+; LINUX-X86-NEXT: shldl $1, %ecx, %edx
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: orl %eax, %edx
+; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: shldl $1, %ebx, %ecx
+; LINUX-X86-NEXT: orl %eax, %ecx
+; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; LINUX-X86-NEXT: shldl $1, %ecx, %ebx
+; LINUX-X86-NEXT: orl %eax, %ebx
+; LINUX-X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: addl %ecx, %ecx
+; LINUX-X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: cmpl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; LINUX-X86-NEXT: sbbl %edi, %ecx
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; LINUX-X86-NEXT: sbbl %ebp, %ecx
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; LINUX-X86-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; LINUX-X86-NEXT: sarl $31, %edx
+; LINUX-X86-NEXT: movl %edx, %ecx
+; LINUX-X86-NEXT: andl $1, %ecx
+; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; LINUX-X86-NEXT: movl %edx, %ecx
+; LINUX-X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; LINUX-X86-NEXT: movl %edx, %ebx
+; LINUX-X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; LINUX-X86-NEXT: movl %edx, %eax
+; LINUX-X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; LINUX-X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; LINUX-X86-NEXT: subl %edx, %esi
+; LINUX-X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: sbbl %eax, %edi
+; LINUX-X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; LINUX-X86-NEXT: sbbl %ebx, %ebp
+; LINUX-X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; LINUX-X86-NEXT: sbbl %ecx, %edx
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; LINUX-X86-NEXT: addl $-1, %ecx
+; LINUX-X86-NEXT: adcl $-1, %ebp
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; LINUX-X86-NEXT: adcl $-1, %esi
+; LINUX-X86-NEXT: adcl $-1, %edi
+; LINUX-X86-NEXT: movl %ebp, %eax
+; LINUX-X86-NEXT: orl %edi, %eax
+; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: orl %esi, %ecx
+; LINUX-X86-NEXT: orl %eax, %ecx
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; LINUX-X86-NEXT: jne .LBB0_25
+; LINUX-X86-NEXT: .LBB0_26: # %udiv-loop-exit2
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; LINUX-X86-NEXT: shldl $1, %ecx, %esi
+; LINUX-X86-NEXT: shldl $1, %ebx, %ecx
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: shldl $1, %eax, %ebx
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; LINUX-X86-NEXT: leal (%edx,%eax,2), %eax
+; LINUX-X86-NEXT: movl %ecx, %ebp
+; LINUX-X86-NEXT: .LBB0_23: # %udiv-end1
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; LINUX-X86-NEXT: .LBB0_30: # %udiv-end1
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; LINUX-X86-NEXT: xorl %edx, %esi
+; LINUX-X86-NEXT: xorl %edx, %ebp
+; LINUX-X86-NEXT: xorl %edx, %ebx
+; LINUX-X86-NEXT: xorl %edx, %eax
+; LINUX-X86-NEXT: subl %edx, %eax
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: sbbl %edx, %ebx
+; LINUX-X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: sbbl %edx, %ebp
+; LINUX-X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: sbbl %edx, %esi
+; LINUX-X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: subl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; LINUX-X86-NEXT: sbbl %eax, %edx
+; LINUX-X86-NEXT: sbbl %eax, %ecx
+; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; LINUX-X86-NEXT: sbbl %eax, %ebx
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; LINUX-X86-NEXT: subl %ecx, %edi
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: sbbl %ecx, %eax
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; LINUX-X86-NEXT: sbbl %ecx, %ebp
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; LINUX-X86-NEXT: sbbl %ecx, %esi
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: orl %esi, %eax
+; LINUX-X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: orl %ebp, %edi
+; LINUX-X86-NEXT: orl %eax, %edi
+; LINUX-X86-NEXT: sete %al
+; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl %edx, %ecx
+; LINUX-X86-NEXT: orl %ebx, %ecx
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; LINUX-X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; LINUX-X86-NEXT: orl %ecx, %edx
+; LINUX-X86-NEXT: sete %cl
+; LINUX-X86-NEXT: testl %esi, %esi
+; LINUX-X86-NEXT: movl %ebx, %edi
+; LINUX-X86-NEXT: jne .LBB0_31
+; LINUX-X86-NEXT: # %bb.32: # %udiv-end1
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; LINUX-X86-NEXT: bsrl %ebx, %ebp
+; LINUX-X86-NEXT: xorl $31, %ebp
+; LINUX-X86-NEXT: orl $32, %ebp
+; LINUX-X86-NEXT: jmp .LBB0_33
+; LINUX-X86-NEXT: .LBB0_31:
+; LINUX-X86-NEXT: bsrl %esi, %ebp
+; LINUX-X86-NEXT: xorl $31, %ebp
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; LINUX-X86-NEXT: .LBB0_33: # %udiv-end1
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; LINUX-X86-NEXT: testl %edx, %edx
+; LINUX-X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: jne .LBB0_34
+; LINUX-X86-NEXT: # %bb.35: # %udiv-end1
+; LINUX-X86-NEXT: bsrl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; LINUX-X86-NEXT: xorl $31, %edx
+; LINUX-X86-NEXT: orl $32, %edx
+; LINUX-X86-NEXT: jmp .LBB0_36
+; LINUX-X86-NEXT: .LBB0_34:
+; LINUX-X86-NEXT: bsrl %edx, %edx
+; LINUX-X86-NEXT: xorl $31, %edx
+; LINUX-X86-NEXT: .LBB0_36: # %udiv-end1
+; LINUX-X86-NEXT: movl %ebx, %esi
+; LINUX-X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; LINUX-X86-NEXT: jne .LBB0_38
+; LINUX-X86-NEXT: # %bb.37: # %udiv-end1
+; LINUX-X86-NEXT: orl $64, %edx
+; LINUX-X86-NEXT: movl %edx, %ebp
+; LINUX-X86-NEXT: .LBB0_38: # %udiv-end1
+; LINUX-X86-NEXT: testl %edi, %edi
+; LINUX-X86-NEXT: jne .LBB0_39
+; LINUX-X86-NEXT: # %bb.40: # %udiv-end1
+; LINUX-X86-NEXT: bsrl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; LINUX-X86-NEXT: xorl $31, %edx
+; LINUX-X86-NEXT: orl $32, %edx
+; LINUX-X86-NEXT: jmp .LBB0_41
+; LINUX-X86-NEXT: .LBB0_39:
+; LINUX-X86-NEXT: bsrl %edi, %edx
+; LINUX-X86-NEXT: xorl $31, %edx
+; LINUX-X86-NEXT: .LBB0_41: # %udiv-end1
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; LINUX-X86-NEXT: testl %esi, %esi
+; LINUX-X86-NEXT: jne .LBB0_42
+; LINUX-X86-NEXT: # %bb.43: # %udiv-end1
+; LINUX-X86-NEXT: bsrl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; LINUX-X86-NEXT: xorl $31, %esi
+; LINUX-X86-NEXT: orl $32, %esi
+; LINUX-X86-NEXT: jmp .LBB0_44
+; LINUX-X86-NEXT: .LBB0_42:
+; LINUX-X86-NEXT: bsrl %esi, %esi
+; LINUX-X86-NEXT: xorl $31, %esi
+; LINUX-X86-NEXT: .LBB0_44: # %udiv-end1
+; LINUX-X86-NEXT: orb %cl, %al
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; LINUX-X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: orl %edi, %ecx
+; LINUX-X86-NEXT: jne .LBB0_46
+; LINUX-X86-NEXT: # %bb.45: # %udiv-end1
+; LINUX-X86-NEXT: orl $64, %esi
+; LINUX-X86-NEXT: movl %esi, %edx
+; LINUX-X86-NEXT: .LBB0_46: # %udiv-end1
+; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; LINUX-X86-NEXT: movl %ebp, %ebx
+; LINUX-X86-NEXT: subl %edx, %ebx
+; LINUX-X86-NEXT: movl $0, %esi
+; LINUX-X86-NEXT: sbbl %esi, %esi
+; LINUX-X86-NEXT: movl $0, %ecx
+; LINUX-X86-NEXT: sbbl %ecx, %ecx
+; LINUX-X86-NEXT: movl $0, %edi
+; LINUX-X86-NEXT: sbbl %edi, %edi
+; LINUX-X86-NEXT: testb %al, %al
+; LINUX-X86-NEXT: jne .LBB0_47
+; LINUX-X86-NEXT: # %bb.48: # %select.false.sink8
+; LINUX-X86-NEXT: movl $127, %eax
+; LINUX-X86-NEXT: cmpl %ebx, %eax
+; LINUX-X86-NEXT: movl $0, %eax
+; LINUX-X86-NEXT: sbbl %esi, %eax
+; LINUX-X86-NEXT: movl $0, %eax
+; LINUX-X86-NEXT: sbbl %ecx, %eax
+; LINUX-X86-NEXT: movl $0, %eax
+; LINUX-X86-NEXT: sbbl %edi, %eax
+; LINUX-X86-NEXT: setb %al
+; LINUX-X86-NEXT: .LBB0_49: # %select.end7
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; LINUX-X86-NEXT: testb %al, %al
+; LINUX-X86-NEXT: movl $0, %eax
+; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; LINUX-X86-NEXT: jne .LBB0_51
+; LINUX-X86-NEXT: # %bb.50: # %select.end7
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; LINUX-X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: .LBB0_51: # %select.end7
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: jne .LBB0_52
+; LINUX-X86-NEXT: # %bb.58: # %select.end7
+; LINUX-X86-NEXT: movl %ebx, %eax
+; LINUX-X86-NEXT: xorl $127, %eax
+; LINUX-X86-NEXT: orl %ecx, %eax
+; LINUX-X86-NEXT: movl %esi, %ebp
+; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl %esi, %ecx
+; LINUX-X86-NEXT: orl %edi, %ecx
+; LINUX-X86-NEXT: orl %eax, %ecx
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; LINUX-X86-NEXT: je .LBB0_59
+; LINUX-X86-NEXT: # %bb.56: # %udiv-bb1
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; LINUX-X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; LINUX-X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; LINUX-X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl %ebx, %ecx
+; LINUX-X86-NEXT: xorb $127, %cl
+; LINUX-X86-NEXT: movl %ecx, %eax
+; LINUX-X86-NEXT: shrb $3, %al
+; LINUX-X86-NEXT: andb $12, %al
+; LINUX-X86-NEXT: negb %al
+; LINUX-X86-NEXT: movsbl %al, %eax
+; LINUX-X86-NEXT: movl 200(%esp,%eax), %edx
+; LINUX-X86-NEXT: movl 204(%esp,%eax), %esi
+; LINUX-X86-NEXT: shldl %cl, %edx, %esi
+; LINUX-X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl 192(%esp,%eax), %esi
+; LINUX-X86-NEXT: movl 196(%esp,%eax), %eax
+; LINUX-X86-NEXT: shldl %cl, %eax, %edx
+; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: shldl %cl, %esi, %eax
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: shll %cl, %esi
+; LINUX-X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: addl $1, %ebx
+; LINUX-X86-NEXT: adcl $0, %ebp
+; LINUX-X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; LINUX-X86-NEXT: adcl $0, %edi
+; LINUX-X86-NEXT: jb .LBB0_57
+; LINUX-X86-NEXT: # %bb.53: # %udiv-preheader
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl %ebx, %eax
+; LINUX-X86-NEXT: shrb $3, %al
+; LINUX-X86-NEXT: andb $12, %al
+; LINUX-X86-NEXT: movzbl %al, %eax
+; LINUX-X86-NEXT: movl 156(%esp,%eax), %edx
+; LINUX-X86-NEXT: movl 152(%esp,%eax), %esi
+; LINUX-X86-NEXT: movl %esi, %ebp
+; LINUX-X86-NEXT: movl %ebx, %ecx
+; LINUX-X86-NEXT: shrdl %cl, %edx, %ebp
+; LINUX-X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl 144(%esp,%eax), %ebp
+; LINUX-X86-NEXT: movl 148(%esp,%eax), %eax
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: shrdl %cl, %esi, %eax
+; LINUX-X86-NEXT: movl %eax, %esi
+; LINUX-X86-NEXT: shrl %cl, %edx
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: shrdl %cl, %eax, %ebp
+; LINUX-X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: addl $-1, %eax
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: adcl $-1, %eax
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: adcl $-1, %eax
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: adcl $-1, %eax
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; LINUX-X86-NEXT: .p2align 4
+; LINUX-X86-NEXT: .LBB0_54: # %udiv-do-while
+; LINUX-X86-NEXT: # =>This Inner Loop Header: Depth=1
+; LINUX-X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; LINUX-X86-NEXT: shldl $1, %ebp, %edx
+; LINUX-X86-NEXT: movl %edx, %ebx
+; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl %esi, %ecx
+; LINUX-X86-NEXT: shldl $1, %esi, %ebp
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; LINUX-X86-NEXT: shldl $1, %esi, %ecx
+; LINUX-X86-NEXT: shldl $1, %eax, %esi
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; LINUX-X86-NEXT: shldl $1, %edx, %eax
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; LINUX-X86-NEXT: orl %edi, %eax
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: shldl $1, %eax, %edx
+; LINUX-X86-NEXT: orl %edi, %edx
+; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; LINUX-X86-NEXT: shldl $1, %edx, %eax
+; LINUX-X86-NEXT: orl %edi, %eax
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: addl %edx, %edx
+; LINUX-X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: cmpl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: sbbl %ecx, %eax
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: sbbl %ebp, %eax
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: sbbl %ebx, %eax
+; LINUX-X86-NEXT: sarl $31, %eax
+; LINUX-X86-NEXT: movl %eax, %edx
+; LINUX-X86-NEXT: andl $1, %edx
+; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; LINUX-X86-NEXT: movl %eax, %ebx
+; LINUX-X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; LINUX-X86-NEXT: movl %eax, %edi
+; LINUX-X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; LINUX-X86-NEXT: movl %eax, %edx
+; LINUX-X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; LINUX-X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; LINUX-X86-NEXT: subl %eax, %esi
+; LINUX-X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: sbbl %edx, %ecx
+; LINUX-X86-NEXT: movl %ecx, %esi
+; LINUX-X86-NEXT: sbbl %edi, %ebp
+; LINUX-X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; LINUX-X86-NEXT: sbbl %ebx, %edx
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; LINUX-X86-NEXT: addl $-1, %ebx
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: adcl $-1, %eax
+; LINUX-X86-NEXT: adcl $-1, %ebp
+; LINUX-X86-NEXT: adcl $-1, %edi
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: orl %edi, %eax
+; LINUX-X86-NEXT: movl %ebx, %ecx
+; LINUX-X86-NEXT: orl %ebp, %ecx
+; LINUX-X86-NEXT: orl %eax, %ecx
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: jne .LBB0_54
+; LINUX-X86-NEXT: .LBB0_55: # %udiv-loop-exit
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; LINUX-X86-NEXT: shldl $1, %edx, %eax
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; LINUX-X86-NEXT: shldl $1, %esi, %edx
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: shldl $1, %eax, %esi
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; LINUX-X86-NEXT: leal (%ecx,%eax,2), %eax
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: .LBB0_59: # %udiv-end
+; LINUX-X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; LINUX-X86-NEXT: movl %eax, (%ecx)
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: movl %eax, 4(%ecx)
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: movl %eax, 8(%ecx)
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: movl %eax, 12(%ecx)
+; LINUX-X86-NEXT: movl %edx, %esi
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: imull %eax, %esi
+; LINUX-X86-NEXT: movl %edx, %eax
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; LINUX-X86-NEXT: mull %ebp
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: addl %esi, %edx
+; LINUX-X86-NEXT: imull %ebp, %ecx
+; LINUX-X86-NEXT: addl %edx, %ecx
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; LINUX-X86-NEXT: movl %edi, %eax
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; LINUX-X86-NEXT: mull %ebx
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; LINUX-X86-NEXT: imull %edi, %esi
+; LINUX-X86-NEXT: addl %edx, %esi
+; LINUX-X86-NEXT: imull {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; LINUX-X86-NEXT: addl %esi, %ebx
+; LINUX-X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: adcl %ecx, %ebx
+; LINUX-X86-NEXT: movl %ebp, %eax
+; LINUX-X86-NEXT: mull %edi
+; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: mull %edi
+; LINUX-X86-NEXT: movl %edx, %ecx
+; LINUX-X86-NEXT: movl %eax, %esi
+; LINUX-X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; LINUX-X86-NEXT: adcl $0, %ecx
+; LINUX-X86-NEXT: movl %ebp, %eax
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; LINUX-X86-NEXT: mull %ebp
+; LINUX-X86-NEXT: movl %edx, %edi
+; LINUX-X86-NEXT: addl %esi, %eax
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: adcl %ecx, %edi
+; LINUX-X86-NEXT: setb %cl
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: mull %ebp
+; LINUX-X86-NEXT: addl %edi, %eax
+; LINUX-X86-NEXT: movzbl %cl, %ecx
+; LINUX-X86-NEXT: adcl %ecx, %edx
+; LINUX-X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; LINUX-X86-NEXT: adcl %ebx, %edx
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; LINUX-X86-NEXT: subl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; LINUX-X86-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; LINUX-X86-NEXT: sbbl %eax, %ecx
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: sbbl %edx, %eax
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; LINUX-X86-NEXT: xorl %edx, %eax
+; LINUX-X86-NEXT: xorl %edx, %ecx
+; LINUX-X86-NEXT: xorl %edx, %esi
+; LINUX-X86-NEXT: xorl %edx, %edi
+; LINUX-X86-NEXT: subl %edx, %edi
+; LINUX-X86-NEXT: sbbl %edx, %esi
+; LINUX-X86-NEXT: sbbl %edx, %ecx
+; LINUX-X86-NEXT: sbbl %edx, %eax
+; LINUX-X86-NEXT: movl %eax, %edx
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; LINUX-X86-NEXT: movl %edi, (%eax)
+; LINUX-X86-NEXT: movl %esi, 4(%eax)
+; LINUX-X86-NEXT: movl %ecx, 8(%eax)
+; LINUX-X86-NEXT: movl %edx, 12(%eax)
+; LINUX-X86-NEXT: addl $284, %esp # imm = 0x11C
+; LINUX-X86-NEXT: .cfi_def_cfa_offset 20
+; LINUX-X86-NEXT: popl %esi
+; LINUX-X86-NEXT: .cfi_def_cfa_offset 16
+; LINUX-X86-NEXT: popl %edi
+; LINUX-X86-NEXT: .cfi_def_cfa_offset 12
+; LINUX-X86-NEXT: popl %ebx
+; LINUX-X86-NEXT: .cfi_def_cfa_offset 8
+; LINUX-X86-NEXT: popl %ebp
+; LINUX-X86-NEXT: .cfi_def_cfa_offset 4
+; LINUX-X86-NEXT: retl
+; LINUX-X86-NEXT: .LBB0_17:
+; LINUX-X86-NEXT: .cfi_def_cfa_offset 304
+; LINUX-X86-NEXT: movb $1, %al
+; LINUX-X86-NEXT: jmp .LBB0_19
+; LINUX-X86-NEXT: .LBB0_28:
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; LINUX-X86-NEXT: jmp .LBB0_26
+; LINUX-X86-NEXT: .LBB0_47:
+; LINUX-X86-NEXT: movb $1, %al
+; LINUX-X86-NEXT: jmp .LBB0_49
+; LINUX-X86-NEXT: .LBB0_57:
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; LINUX-X86-NEXT: jmp .LBB0_55
+; LINUX-X86-NEXT: .LBB0_22:
+; LINUX-X86-NEXT: movl %edi, %eax
+; LINUX-X86-NEXT: jmp .LBB0_23
+; LINUX-X86-NEXT: .LBB0_52:
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; LINUX-X86-NEXT: jmp .LBB0_59
+;
+; WIN32-LABEL: sdivrem_i128:
+; WIN32: # %bb.0: # %_udiv-special-cases_udiv-special-cases
+; WIN32-NEXT: pushl %ebp
+; WIN32-NEXT: movl %esp, %ebp
+; WIN32-NEXT: pushl %ebx
+; WIN32-NEXT: pushl %edi
+; WIN32-NEXT: pushl %esi
+; WIN32-NEXT: andl $-16, %esp
+; WIN32-NEXT: subl $288, %esp # imm = 0x120
+; WIN32-NEXT: movl 36(%ebp), %edi
+; WIN32-NEXT: movl %edi, %esi
+; WIN32-NEXT: sarl $31, %esi
+; WIN32-NEXT: xorl %esi, %edi
+; WIN32-NEXT: movl 32(%ebp), %eax
+; WIN32-NEXT: xorl %esi, %eax
+; WIN32-NEXT: movl 28(%ebp), %ecx
+; WIN32-NEXT: xorl %esi, %ecx
+; WIN32-NEXT: movl 24(%ebp), %edx
+; WIN32-NEXT: xorl %esi, %edx
+; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: subl %esi, %edx
+; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: sbbl %esi, %ecx
+; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: sbbl %esi, %eax
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: sbbl %esi, %edi
+; WIN32-NEXT: movl 52(%ebp), %ebx
+; WIN32-NEXT: movl %ebx, %edx
+; WIN32-NEXT: sarl $31, %edx
+; WIN32-NEXT: xorl %edx, %ebx
+; WIN32-NEXT: movl 48(%ebp), %esi
+; WIN32-NEXT: xorl %edx, %esi
+; WIN32-NEXT: movl 44(%ebp), %eax
+; WIN32-NEXT: xorl %edx, %eax
+; WIN32-NEXT: movl 40(%ebp), %ecx
+; WIN32-NEXT: xorl %edx, %ecx
+; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: subl %edx, %ecx
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: sbbl %edx, %eax
+; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: sbbl %edx, %esi
+; WIN32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: sbbl %edx, %ebx
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: orl %ebx, %eax
+; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: orl %esi, %ecx
+; WIN32-NEXT: orl %eax, %ecx
+; WIN32-NEXT: sete %al
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: orl %edi, %ecx
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; WIN32-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; WIN32-NEXT: orl %ecx, %edx
+; WIN32-NEXT: sete %cl
+; WIN32-NEXT: testl %ebx, %ebx
+; WIN32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: jne LBB0_1
+; WIN32-NEXT: # %bb.2: # %_udiv-special-cases_udiv-special-cases
+; WIN32-NEXT: bsrl %esi, %edi
+; WIN32-NEXT: xorl $31, %edi
+; WIN32-NEXT: orl $32, %edi
+; WIN32-NEXT: jmp LBB0_3
+; WIN32-NEXT: LBB0_1:
+; WIN32-NEXT: bsrl %ebx, %edi
+; WIN32-NEXT: xorl $31, %edi
+; WIN32-NEXT: LBB0_3: # %_udiv-special-cases_udiv-special-cases
+; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; WIN32-NEXT: testl %edx, %edx
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; WIN32-NEXT: jne LBB0_4
+; WIN32-NEXT: # %bb.5: # %_udiv-special-cases_udiv-special-cases
+; WIN32-NEXT: bsrl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; WIN32-NEXT: xorl $31, %edx
+; WIN32-NEXT: orl $32, %edx
+; WIN32-NEXT: jmp LBB0_6
+; WIN32-NEXT: LBB0_4:
+; WIN32-NEXT: bsrl %edx, %edx
+; WIN32-NEXT: xorl $31, %edx
+; WIN32-NEXT: LBB0_6: # %_udiv-special-cases_udiv-special-cases
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; WIN32-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; WIN32-NEXT: jne LBB0_8
+; WIN32-NEXT: # %bb.7: # %_udiv-special-cases_udiv-special-cases
+; WIN32-NEXT: orl $64, %edx
+; WIN32-NEXT: movl %edx, %edi
+; WIN32-NEXT: LBB0_8: # %_udiv-special-cases_udiv-special-cases
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; WIN32-NEXT: testl %edx, %edx
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; WIN32-NEXT: jne LBB0_9
+; WIN32-NEXT: # %bb.10: # %_udiv-special-cases_udiv-special-cases
+; WIN32-NEXT: bsrl %ebx, %edx
+; WIN32-NEXT: xorl $31, %edx
+; WIN32-NEXT: orl $32, %edx
+; WIN32-NEXT: testl %esi, %esi
+; WIN32-NEXT: je LBB0_13
+; WIN32-NEXT: LBB0_12:
+; WIN32-NEXT: bsrl %esi, %esi
+; WIN32-NEXT: xorl $31, %esi
+; WIN32-NEXT: jmp LBB0_14
+; WIN32-NEXT: LBB0_9:
+; WIN32-NEXT: bsrl %edx, %edx
+; WIN32-NEXT: xorl $31, %edx
+; WIN32-NEXT: testl %esi, %esi
+; WIN32-NEXT: jne LBB0_12
+; WIN32-NEXT: LBB0_13: # %_udiv-special-cases_udiv-special-cases
+; WIN32-NEXT: bsrl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; WIN32-NEXT: xorl $31, %esi
+; WIN32-NEXT: orl $32, %esi
+; WIN32-NEXT: LBB0_14: # %_udiv-special-cases_udiv-special-cases
+; WIN32-NEXT: orb %cl, %al
+; WIN32-NEXT: movl %ebx, %ecx
+; WIN32-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; WIN32-NEXT: jne LBB0_16
+; WIN32-NEXT: # %bb.15: # %_udiv-special-cases_udiv-special-cases
+; WIN32-NEXT: orl $64, %esi
+; WIN32-NEXT: movl %esi, %edx
+; WIN32-NEXT: LBB0_16: # %_udiv-special-cases_udiv-special-cases
+; WIN32-NEXT: xorl %ebx, %ebx
+; WIN32-NEXT: subl %edx, %edi
+; WIN32-NEXT: movl $0, %ecx
+; WIN32-NEXT: sbbl %ecx, %ecx
+; WIN32-NEXT: movl $0, %edx
+; WIN32-NEXT: sbbl %edx, %edx
+; WIN32-NEXT: movl $0, %esi
+; WIN32-NEXT: sbbl %esi, %esi
+; WIN32-NEXT: testb %al, %al
+; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: jne LBB0_17
+; WIN32-NEXT: # %bb.18: # %select.false.sink
+; WIN32-NEXT: movl $127, %eax
+; WIN32-NEXT: cmpl %edi, %eax
+; WIN32-NEXT: movl $0, %eax
+; WIN32-NEXT: sbbl %ecx, %eax
+; WIN32-NEXT: movl $0, %eax
+; WIN32-NEXT: sbbl %edx, %eax
+; WIN32-NEXT: movl $0, %eax
+; WIN32-NEXT: sbbl %esi, %eax
+; WIN32-NEXT: setb %al
+; WIN32-NEXT: LBB0_19: # %select.end
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: xorl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: testb %al, %al
+; WIN32-NEXT: movl $0, %eax
+; WIN32-NEXT: movl $0, %edx
+; WIN32-NEXT: movl $0, %ecx
+; WIN32-NEXT: jne LBB0_21
+; WIN32-NEXT: # %bb.20: # %select.end
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: LBB0_21: # %select.end
+; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: jne LBB0_22
+; WIN32-NEXT: # %bb.28: # %select.end
+; WIN32-NEXT: movl %edi, %eax
+; WIN32-NEXT: xorl $127, %eax
+; WIN32-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; WIN32-NEXT: movl %edi, %esi
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; WIN32-NEXT: orl %eax, %ecx
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: je LBB0_29
+; WIN32-NEXT: # %bb.26: # %udiv-bb15
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl %esi, %eax
+; WIN32-NEXT: movl %eax, %ecx
+; WIN32-NEXT: movl %esi, %edi
+; WIN32-NEXT: xorb $127, %cl
+; WIN32-NEXT: movl %ecx, %eax
+; WIN32-NEXT: shrb $3, %al
+; WIN32-NEXT: andb $12, %al
+; WIN32-NEXT: negb %al
+; WIN32-NEXT: movsbl %al, %eax
+; WIN32-NEXT: movl 264(%esp,%eax), %esi
+; WIN32-NEXT: movl 268(%esp,%eax), %edx
+; WIN32-NEXT: shldl %cl, %esi, %edx
+; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl 256(%esp,%eax), %edx
+; WIN32-NEXT: movl 260(%esp,%eax), %ebx
+; WIN32-NEXT: shldl %cl, %ebx, %esi
+; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: shldl %cl, %edx, %ebx
+; WIN32-NEXT: shll %cl, %edx
+; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: addl $1, %edi
+; WIN32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: jb LBB0_27
+; WIN32-NEXT: # %bb.23: # %udiv-preheader4
+; WIN32-NEXT: movl %edi, %ecx
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl %ecx, %eax
+; WIN32-NEXT: shrb $3, %al
+; WIN32-NEXT: andb $12, %al
+; WIN32-NEXT: movzbl %al, %eax
+; WIN32-NEXT: movl 220(%esp,%eax), %edi
+; WIN32-NEXT: movl 216(%esp,%eax), %esi
+; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: shrdl %cl, %edi, %esi
+; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl 208(%esp,%eax), %edx
+; WIN32-NEXT: movl 212(%esp,%eax), %eax
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; WIN32-NEXT: shrdl %cl, %esi, %eax
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: shrl %cl, %edi
+; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: # kill: def $cl killed $cl killed $ecx
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: shrdl %cl, %eax, %edx
+; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: addl $-1, %ecx
+; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: adcl $-1, %ecx
+; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: adcl $-1, %ecx
+; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: adcl $-1, %ecx
+; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: .p2align 4
+; WIN32-NEXT: LBB0_24: # %udiv-do-while3
+; WIN32-NEXT: # =>This Inner Loop Header: Depth=1
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; WIN32-NEXT: shldl $1, %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; WIN32-NEXT: shldl $1, %esi, %edx
+; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: shldl $1, %eax, %esi
+; WIN32-NEXT: shldl $1, %ecx, %eax
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; WIN32-NEXT: shldl $1, %edi, %ecx
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; WIN32-NEXT: orl %edx, %ecx
+; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: shldl $1, %ebx, %edi
+; WIN32-NEXT: orl %edx, %edi
+; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: shldl $1, %ecx, %ebx
+; WIN32-NEXT: orl %edx, %ebx
+; WIN32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: addl %ecx, %ecx
+; WIN32-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: cmpl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: sbbl %esi, %ecx
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; WIN32-NEXT: sbbl %edx, %ecx
+; WIN32-NEXT: sarl $31, %ecx
+; WIN32-NEXT: movl %ecx, %edi
+; WIN32-NEXT: andl $1, %edi
+; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl %ecx, %edi
+; WIN32-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl %ecx, %edi
+; WIN32-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; WIN32-NEXT: movl %ecx, %ebx
+; WIN32-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; WIN32-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: subl %ecx, %eax
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: sbbl %ebx, %esi
+; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; WIN32-NEXT: sbbl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: movl %edx, %eax
+; WIN32-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; WIN32-NEXT: addl $-1, %edx
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: adcl $-1, %ecx
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; WIN32-NEXT: adcl $-1, %edi
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: adcl $-1, %eax
+; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: orl %eax, %ecx
+; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: orl %edi, %edx
+; WIN32-NEXT: orl %ecx, %edx
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: jne LBB0_24
+; WIN32-NEXT: LBB0_25: # %udiv-loop-exit2
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; WIN32-NEXT: shldl $1, %edx, %ecx
+; WIN32-NEXT: shldl $1, %ebx, %edx
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; WIN32-NEXT: shldl $1, %esi, %ebx
+; WIN32-NEXT: movl %ecx, %eax
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: leal (%ecx,%esi,2), %ecx
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; WIN32-NEXT: LBB0_29: # %udiv-end1
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; WIN32-NEXT: xorl %esi, %eax
+; WIN32-NEXT: xorl %esi, %edx
+; WIN32-NEXT: xorl %esi, %ebx
+; WIN32-NEXT: xorl %esi, %ecx
+; WIN32-NEXT: subl %esi, %ecx
+; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: sbbl %esi, %ebx
+; WIN32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: sbbl %esi, %edx
+; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: sbbl %esi, %eax
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: subl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: sbbl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: sbbl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: sbbl %ecx, %edi
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; WIN32-NEXT: subl %ecx, %esi
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: sbbl %ecx, %eax
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; WIN32-NEXT: sbbl %ecx, %edx
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; WIN32-NEXT: sbbl %ecx, %ebx
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: orl %ebx, %eax
+; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: orl %edx, %esi
+; WIN32-NEXT: orl %eax, %esi
+; WIN32-NEXT: sete {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: movl %edi, %eax
+; WIN32-NEXT: orl %edi, %ecx
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; WIN32-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; WIN32-NEXT: orl %ecx, %edx
+; WIN32-NEXT: sete %cl
+; WIN32-NEXT: testl %ebx, %ebx
+; WIN32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: jne LBB0_30
+; WIN32-NEXT: # %bb.31: # %udiv-end1
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; WIN32-NEXT: bsrl %edi, %ebx
+; WIN32-NEXT: xorl $31, %ebx
+; WIN32-NEXT: orl $32, %ebx
+; WIN32-NEXT: jmp LBB0_32
+; WIN32-NEXT: LBB0_30:
+; WIN32-NEXT: bsrl %ebx, %ebx
+; WIN32-NEXT: xorl $31, %ebx
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; WIN32-NEXT: LBB0_32: # %udiv-end1
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; WIN32-NEXT: testl %edx, %edx
+; WIN32-NEXT: jne LBB0_33
+; WIN32-NEXT: # %bb.34: # %udiv-end1
+; WIN32-NEXT: bsrl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; WIN32-NEXT: xorl $31, %edx
+; WIN32-NEXT: orl $32, %edx
+; WIN32-NEXT: jmp LBB0_35
+; WIN32-NEXT: LBB0_33:
+; WIN32-NEXT: bsrl %edx, %edx
+; WIN32-NEXT: xorl $31, %edx
+; WIN32-NEXT: LBB0_35: # %udiv-end1
+; WIN32-NEXT: movl %edi, %esi
+; WIN32-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; WIN32-NEXT: jne LBB0_37
+; WIN32-NEXT: # %bb.36: # %udiv-end1
+; WIN32-NEXT: orl $64, %edx
+; WIN32-NEXT: movl %edx, %ebx
+; WIN32-NEXT: LBB0_37: # %udiv-end1
+; WIN32-NEXT: testl %eax, %eax
+; WIN32-NEXT: jne LBB0_38
+; WIN32-NEXT: # %bb.39: # %udiv-end1
+; WIN32-NEXT: bsrl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; WIN32-NEXT: xorl $31, %edx
+; WIN32-NEXT: orl $32, %edx
+; WIN32-NEXT: jmp LBB0_40
+; WIN32-NEXT: LBB0_38:
+; WIN32-NEXT: bsrl %eax, %edx
+; WIN32-NEXT: xorl $31, %edx
+; WIN32-NEXT: LBB0_40: # %udiv-end1
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; WIN32-NEXT: testl %esi, %esi
+; WIN32-NEXT: jne LBB0_41
+; WIN32-NEXT: # %bb.42: # %udiv-end1
+; WIN32-NEXT: bsrl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; WIN32-NEXT: xorl $31, %esi
+; WIN32-NEXT: orl $32, %esi
+; WIN32-NEXT: jmp LBB0_43
+; WIN32-NEXT: LBB0_41:
+; WIN32-NEXT: bsrl %esi, %esi
+; WIN32-NEXT: xorl $31, %esi
+; WIN32-NEXT: LBB0_43: # %udiv-end1
+; WIN32-NEXT: orb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: orl %eax, %ecx
+; WIN32-NEXT: jne LBB0_45
+; WIN32-NEXT: # %bb.44: # %udiv-end1
+; WIN32-NEXT: orl $64, %esi
+; WIN32-NEXT: movl %esi, %edx
+; WIN32-NEXT: LBB0_45: # %udiv-end1
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: subl %edx, %ebx
+; WIN32-NEXT: movl $0, %edi
+; WIN32-NEXT: sbbl %edi, %edi
+; WIN32-NEXT: movl $0, %ecx
+; WIN32-NEXT: sbbl %ecx, %ecx
+; WIN32-NEXT: movl $0, %edx
+; WIN32-NEXT: sbbl %edx, %edx
+; WIN32-NEXT: cmpb $0, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Reload
+; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: jne LBB0_46
+; WIN32-NEXT: # %bb.47: # %select.false.sink8
+; WIN32-NEXT: movl $127, %eax
+; WIN32-NEXT: cmpl %ebx, %eax
+; WIN32-NEXT: movl $0, %eax
+; WIN32-NEXT: sbbl %edi, %eax
+; WIN32-NEXT: movl $0, %eax
+; WIN32-NEXT: sbbl %ecx, %eax
+; WIN32-NEXT: movl $0, %eax
+; WIN32-NEXT: sbbl %edx, %eax
+; WIN32-NEXT: setb %al
+; WIN32-NEXT: LBB0_48: # %select.end7
+; WIN32-NEXT: testb %al, %al
+; WIN32-NEXT: movl $0, %eax
+; WIN32-NEXT: movl $0, %edx
+; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: jne LBB0_50
+; WIN32-NEXT: # %bb.49: # %select.end7
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl %esi, %edx
+; WIN32-NEXT: LBB0_50: # %select.end7
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: jne LBB0_51
+; WIN32-NEXT: # %bb.57: # %select.end7
+; WIN32-NEXT: movl %ebx, %eax
+; WIN32-NEXT: xorl $127, %eax
+; WIN32-NEXT: orl %ecx, %eax
+; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl %edi, %ecx
+; WIN32-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; WIN32-NEXT: orl %eax, %ecx
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; WIN32-NEXT: je LBB0_58
+; WIN32-NEXT: # %bb.55: # %udiv-bb1
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; WIN32-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; WIN32-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; WIN32-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl %ebx, %ecx
+; WIN32-NEXT: xorb $127, %cl
+; WIN32-NEXT: movl %ecx, %eax
+; WIN32-NEXT: shrb $3, %al
+; WIN32-NEXT: andb $12, %al
+; WIN32-NEXT: negb %al
+; WIN32-NEXT: movsbl %al, %eax
+; WIN32-NEXT: movl 200(%esp,%eax), %esi
+; WIN32-NEXT: movl 204(%esp,%eax), %edx
+; WIN32-NEXT: shldl %cl, %esi, %edx
+; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl 192(%esp,%eax), %edx
+; WIN32-NEXT: movl 196(%esp,%eax), %eax
+; WIN32-NEXT: shldl %cl, %eax, %esi
+; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: shldl %cl, %edx, %eax
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: shll %cl, %edx
+; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: addl $1, %ebx
+; WIN32-NEXT: adcl $0, %edi
+; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: jb LBB0_56
+; WIN32-NEXT: # %bb.52: # %udiv-preheader
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; WIN32-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; WIN32-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; WIN32-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl %ebx, %eax
+; WIN32-NEXT: shrb $3, %al
+; WIN32-NEXT: andb $12, %al
+; WIN32-NEXT: movzbl %al, %eax
+; WIN32-NEXT: movl 156(%esp,%eax), %edi
+; WIN32-NEXT: movl 152(%esp,%eax), %esi
+; WIN32-NEXT: movl %esi, %edx
+; WIN32-NEXT: movl %ebx, %ecx
+; WIN32-NEXT: shrdl %cl, %edi, %edx
+; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl 144(%esp,%eax), %edx
+; WIN32-NEXT: movl 148(%esp,%eax), %eax
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: shrdl %cl, %esi, %eax
+; WIN32-NEXT: shrl %cl, %edi
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; WIN32-NEXT: shrdl %cl, %esi, %edx
+; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: addl $-1, %ecx
+; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: adcl $-1, %ecx
+; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: adcl $-1, %ecx
+; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: adcl $-1, %ecx
+; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: .p2align 4
+; WIN32-NEXT: LBB0_53: # %udiv-do-while
+; WIN32-NEXT: # =>This Inner Loop Header: Depth=1
+; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; WIN32-NEXT: shldl $1, %edx, %edi
+; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl %eax, %ebx
+; WIN32-NEXT: shldl $1, %eax, %edx
+; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: shldl $1, %eax, %ebx
+; WIN32-NEXT: shldl $1, %ecx, %eax
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; WIN32-NEXT: shldl $1, %edi, %ecx
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; WIN32-NEXT: orl %edx, %ecx
+; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: shldl $1, %esi, %edi
+; WIN32-NEXT: orl %edx, %edi
+; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: shldl $1, %ecx, %esi
+; WIN32-NEXT: orl %edx, %esi
+; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: addl %ecx, %ecx
+; WIN32-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: cmpl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: sbbl %ebx, %ecx
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; WIN32-NEXT: sarl $31, %ecx
+; WIN32-NEXT: movl %ecx, %esi
+; WIN32-NEXT: andl $1, %esi
+; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl %ecx, %edx
+; WIN32-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl %ecx, %esi
+; WIN32-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; WIN32-NEXT: movl %ecx, %edi
+; WIN32-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; WIN32-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: subl %ecx, %eax
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: sbbl %edi, %ebx
+; WIN32-NEXT: movl %ebx, %eax
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; WIN32-NEXT: sbbl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; WIN32-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; WIN32-NEXT: addl $-1, %ebx
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: adcl $-1, %ecx
+; WIN32-NEXT: adcl $-1, %edx
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; WIN32-NEXT: adcl $-1, %esi
+; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: orl %esi, %ecx
+; WIN32-NEXT: movl %ebx, %esi
+; WIN32-NEXT: orl %edx, %esi
+; WIN32-NEXT: orl %ecx, %esi
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: jne LBB0_53
+; WIN32-NEXT: LBB0_54: # %udiv-loop-exit
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; WIN32-NEXT: shldl $1, %edx, %ecx
+; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: shldl $1, %esi, %edx
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: shldl $1, %eax, %esi
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: leal (%ecx,%eax,2), %eax
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: LBB0_58: # %udiv-end
+; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: movl 8(%ebp), %ecx
+; WIN32-NEXT: movl %eax, (%ecx)
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: movl %eax, 4(%ecx)
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: movl %eax, 8(%ecx)
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: movl %eax, 12(%ecx)
+; WIN32-NEXT: movl %edx, %esi
+; WIN32-NEXT: imull {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; WIN32-NEXT: movl %edx, %eax
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; WIN32-NEXT: mull %edi
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: addl %esi, %edx
+; WIN32-NEXT: imull %edi, %ecx
+; WIN32-NEXT: addl %edx, %ecx
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; WIN32-NEXT: movl %ebx, %eax
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; WIN32-NEXT: mull %esi
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; WIN32-NEXT: imull %ebx, %edi
+; WIN32-NEXT: addl %edx, %edi
+; WIN32-NEXT: imull {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; WIN32-NEXT: addl %edi, %esi
+; WIN32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: adcl %ecx, %esi
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; WIN32-NEXT: movl %edi, %eax
+; WIN32-NEXT: mull %ebx
+; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: mull %ebx
+; WIN32-NEXT: movl %eax, %ecx
+; WIN32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; WIN32-NEXT: adcl $0, %edx
+; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl %edi, %eax
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; WIN32-NEXT: mull %ebx
+; WIN32-NEXT: movl %edx, %edi
+; WIN32-NEXT: addl %ecx, %eax
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; WIN32-NEXT: setb %cl
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: mull %ebx
+; WIN32-NEXT: addl %edi, %eax
+; WIN32-NEXT: movzbl %cl, %ecx
+; WIN32-NEXT: adcl %ecx, %edx
+; WIN32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; WIN32-NEXT: adcl %esi, %edx
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; WIN32-NEXT: subl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; WIN32-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: sbbl %eax, %ecx
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; WIN32-NEXT: sbbl %edx, %ebx
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: xorl %eax, %ebx
+; WIN32-NEXT: xorl %eax, %ecx
+; WIN32-NEXT: xorl %eax, %esi
+; WIN32-NEXT: xorl %eax, %edi
+; WIN32-NEXT: subl %eax, %edi
+; WIN32-NEXT: sbbl %eax, %esi
+; WIN32-NEXT: sbbl %eax, %ecx
+; WIN32-NEXT: sbbl %eax, %ebx
+; WIN32-NEXT: movl 12(%ebp), %eax
+; WIN32-NEXT: movl %edi, (%eax)
+; WIN32-NEXT: movl %esi, 4(%eax)
+; WIN32-NEXT: movl %ecx, 8(%eax)
+; WIN32-NEXT: movl %ebx, 12(%eax)
+; WIN32-NEXT: leal -12(%ebp), %esp
+; WIN32-NEXT: popl %esi
+; WIN32-NEXT: popl %edi
+; WIN32-NEXT: popl %ebx
+; WIN32-NEXT: popl %ebp
+; WIN32-NEXT: retl
+; WIN32-NEXT: LBB0_17:
+; WIN32-NEXT: movb $1, %al
+; WIN32-NEXT: jmp LBB0_19
+; WIN32-NEXT: LBB0_27:
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: jmp LBB0_25
+; WIN32-NEXT: LBB0_46:
+; WIN32-NEXT: movb $1, %al
+; WIN32-NEXT: jmp LBB0_48
+; WIN32-NEXT: LBB0_56:
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: jmp LBB0_54
+; WIN32-NEXT: LBB0_22:
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: jmp LBB0_29
+; WIN32-NEXT: LBB0_51:
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; WIN32-NEXT: jmp LBB0_58
+ %q = sdiv i128 %n, %d
+ %r = srem i128 %n, %d
+ store i128 %q, ptr %q_out
+ store i128 %r, ptr %r_out
+ ret void
+}
+
+define void @udivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
+; LINUX-X64-LABEL: udivrem_i128:
+; LINUX-X64: # %bb.0:
+; LINUX-X64-NEXT: pushq %r14
+; LINUX-X64-NEXT: .cfi_def_cfa_offset 16
+; LINUX-X64-NEXT: pushq %rbx
+; LINUX-X64-NEXT: .cfi_def_cfa_offset 24
+; LINUX-X64-NEXT: subq $24, %rsp
+; LINUX-X64-NEXT: .cfi_def_cfa_offset 48
+; LINUX-X64-NEXT: .cfi_offset %rbx, -24
+; LINUX-X64-NEXT: .cfi_offset %r14, -16
+; LINUX-X64-NEXT: movq %r8, %rax
+; LINUX-X64-NEXT: movq %rsi, %rbx
+; LINUX-X64-NEXT: movq %rdi, %r14
+; LINUX-X64-NEXT: movq %rsp, %r8
+; LINUX-X64-NEXT: movq %rdx, %rdi
+; LINUX-X64-NEXT: movq %rcx, %rsi
+; LINUX-X64-NEXT: movq %rax, %rdx
+; LINUX-X64-NEXT: movq %r9, %rcx
+; LINUX-X64-NEXT: callq __udivmodti4 at PLT
+; LINUX-X64-NEXT: movq (%rsp), %rcx
+; LINUX-X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi
+; LINUX-X64-NEXT: movq %rax, (%r14)
+; LINUX-X64-NEXT: movq %rdx, 8(%r14)
+; LINUX-X64-NEXT: movq %rcx, (%rbx)
+; LINUX-X64-NEXT: movq %rsi, 8(%rbx)
+; LINUX-X64-NEXT: addq $24, %rsp
+; LINUX-X64-NEXT: .cfi_def_cfa_offset 24
+; LINUX-X64-NEXT: popq %rbx
+; LINUX-X64-NEXT: .cfi_def_cfa_offset 16
+; LINUX-X64-NEXT: popq %r14
+; LINUX-X64-NEXT: .cfi_def_cfa_offset 8
+; LINUX-X64-NEXT: retq
+;
+; LINUX-X32-LABEL: udivrem_i128:
+; LINUX-X32: # %bb.0:
+; LINUX-X32-NEXT: pushq %r14
+; LINUX-X32-NEXT: .cfi_def_cfa_offset 16
+; LINUX-X32-NEXT: pushq %rbx
+; LINUX-X32-NEXT: .cfi_def_cfa_offset 24
+; LINUX-X32-NEXT: subl $24, %esp
+; LINUX-X32-NEXT: .cfi_def_cfa_offset 48
+; LINUX-X32-NEXT: .cfi_offset %rbx, -24
+; LINUX-X32-NEXT: .cfi_offset %r14, -16
+; LINUX-X32-NEXT: movq %r8, %rax
+; LINUX-X32-NEXT: movq %rsi, %rbx
+; LINUX-X32-NEXT: movq %rdi, %r14
+; LINUX-X32-NEXT: movl %esp, %r8d
+; LINUX-X32-NEXT: movq %rdx, %rdi
+; LINUX-X32-NEXT: movq %rcx, %rsi
+; LINUX-X32-NEXT: movq %rax, %rdx
+; LINUX-X32-NEXT: movq %r9, %rcx
+; LINUX-X32-NEXT: callq __udivmodti4 at PLT
+; LINUX-X32-NEXT: movq (%esp), %rcx
+; LINUX-X32-NEXT: movq {{[0-9]+}}(%esp), %rsi
+; LINUX-X32-NEXT: movq %rax, (%r14d)
+; LINUX-X32-NEXT: movq %rdx, 8(%r14d)
+; LINUX-X32-NEXT: movq %rcx, (%ebx)
+; LINUX-X32-NEXT: movq %rsi, 8(%ebx)
+; LINUX-X32-NEXT: addl $24, %esp
+; LINUX-X32-NEXT: .cfi_def_cfa_offset 24
+; LINUX-X32-NEXT: popq %rbx
+; LINUX-X32-NEXT: .cfi_def_cfa_offset 16
+; LINUX-X32-NEXT: popq %r14
+; LINUX-X32-NEXT: .cfi_def_cfa_offset 8
+; LINUX-X32-NEXT: retq
+;
+; DARWIN-X64-LABEL: udivrem_i128:
+; DARWIN-X64: ## %bb.0:
+; DARWIN-X64-NEXT: pushq %r14
+; DARWIN-X64-NEXT: .cfi_def_cfa_offset 16
+; DARWIN-X64-NEXT: pushq %rbx
+; DARWIN-X64-NEXT: .cfi_def_cfa_offset 24
+; DARWIN-X64-NEXT: subq $24, %rsp
+; DARWIN-X64-NEXT: .cfi_def_cfa_offset 48
+; DARWIN-X64-NEXT: .cfi_offset %rbx, -24
+; DARWIN-X64-NEXT: .cfi_offset %r14, -16
+; DARWIN-X64-NEXT: movq %r8, %rax
+; DARWIN-X64-NEXT: movq %rsi, %rbx
+; DARWIN-X64-NEXT: movq %rdi, %r14
+; DARWIN-X64-NEXT: movq %rsp, %r8
+; DARWIN-X64-NEXT: movq %rdx, %rdi
+; DARWIN-X64-NEXT: movq %rcx, %rsi
+; DARWIN-X64-NEXT: movq %rax, %rdx
+; DARWIN-X64-NEXT: movq %r9, %rcx
+; DARWIN-X64-NEXT: callq ___udivmodti4
+; DARWIN-X64-NEXT: movq (%rsp), %rcx
+; DARWIN-X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi
+; DARWIN-X64-NEXT: movq %rax, (%r14)
+; DARWIN-X64-NEXT: movq %rdx, 8(%r14)
+; DARWIN-X64-NEXT: movq %rcx, (%rbx)
+; DARWIN-X64-NEXT: movq %rsi, 8(%rbx)
+; DARWIN-X64-NEXT: addq $24, %rsp
+; DARWIN-X64-NEXT: popq %rbx
+; DARWIN-X64-NEXT: popq %r14
+; DARWIN-X64-NEXT: retq
+;
+; MINGW-X64-LABEL: udivrem_i128:
+; MINGW-X64: # %bb.0:
+; MINGW-X64-NEXT: pushq %rsi
+; MINGW-X64-NEXT: .seh_pushreg %rsi
+; MINGW-X64-NEXT: pushq %rdi
+; MINGW-X64-NEXT: .seh_pushreg %rdi
+; MINGW-X64-NEXT: subq $88, %rsp
+; MINGW-X64-NEXT: .seh_stackalloc 88
+; MINGW-X64-NEXT: .seh_endprologue
+; MINGW-X64-NEXT: movq %rdx, %rsi
+; MINGW-X64-NEXT: movq %rcx, %rdi
+; MINGW-X64-NEXT: movaps {{[0-9]+}}(%rsp), %xmm0
+; MINGW-X64-NEXT: movq %r9, {{[0-9]+}}(%rsp)
+; MINGW-X64-NEXT: movq %r8, {{[0-9]+}}(%rsp)
+; MINGW-X64-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
+; MINGW-X64-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; MINGW-X64-NEXT: leaq {{[0-9]+}}(%rsp), %rdx
+; MINGW-X64-NEXT: leaq {{[0-9]+}}(%rsp), %r8
+; MINGW-X64-NEXT: callq __udivmodti4
+; MINGW-X64-NEXT: movaps {{[0-9]+}}(%rsp), %xmm1
+; MINGW-X64-NEXT: movaps %xmm0, (%rdi)
+; MINGW-X64-NEXT: movaps %xmm1, (%rsi)
+; MINGW-X64-NEXT: .seh_startepilogue
+; MINGW-X64-NEXT: addq $88, %rsp
+; MINGW-X64-NEXT: popq %rdi
+; MINGW-X64-NEXT: popq %rsi
+; MINGW-X64-NEXT: .seh_endepilogue
+; MINGW-X64-NEXT: retq
+; MINGW-X64-NEXT: .seh_endproc
+;
+; WIN64-LABEL: udivrem_i128:
+; WIN64: # %bb.0:
+; WIN64-NEXT: pushq %rsi
+; WIN64-NEXT: .seh_pushreg %rsi
+; WIN64-NEXT: pushq %rdi
+; WIN64-NEXT: .seh_pushreg %rdi
+; WIN64-NEXT: subq $88, %rsp
+; WIN64-NEXT: .seh_stackalloc 88
+; WIN64-NEXT: .seh_endprologue
+; WIN64-NEXT: movq %rdx, %rsi
+; WIN64-NEXT: movq %rcx, %rdi
+; WIN64-NEXT: movaps {{[0-9]+}}(%rsp), %xmm0
+; WIN64-NEXT: movq %r9, {{[0-9]+}}(%rsp)
+; WIN64-NEXT: movq %r8, {{[0-9]+}}(%rsp)
+; WIN64-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
+; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %rdx
+; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %r8
+; WIN64-NEXT: callq __udivmodti4
+; WIN64-NEXT: movaps {{[0-9]+}}(%rsp), %xmm1
+; WIN64-NEXT: movaps %xmm0, (%rdi)
+; WIN64-NEXT: movaps %xmm1, (%rsi)
+; WIN64-NEXT: .seh_startepilogue
+; WIN64-NEXT: addq $88, %rsp
+; WIN64-NEXT: popq %rdi
+; WIN64-NEXT: popq %rsi
+; WIN64-NEXT: .seh_endepilogue
+; WIN64-NEXT: retq
+; WIN64-NEXT: .seh_endproc
+;
+; LINUX-X86-LABEL: udivrem_i128:
+; LINUX-X86: # %bb.0: # %_udiv-special-cases_udiv-special-cases
+; LINUX-X86-NEXT: pushl %ebp
+; LINUX-X86-NEXT: .cfi_def_cfa_offset 8
+; LINUX-X86-NEXT: pushl %ebx
+; LINUX-X86-NEXT: .cfi_def_cfa_offset 12
+; LINUX-X86-NEXT: pushl %edi
+; LINUX-X86-NEXT: .cfi_def_cfa_offset 16
+; LINUX-X86-NEXT: pushl %esi
+; LINUX-X86-NEXT: .cfi_def_cfa_offset 20
+; LINUX-X86-NEXT: subl $236, %esp
+; LINUX-X86-NEXT: .cfi_def_cfa_offset 256
+; LINUX-X86-NEXT: .cfi_offset %esi, -20
+; LINUX-X86-NEXT: .cfi_offset %edi, -16
+; LINUX-X86-NEXT: .cfi_offset %ebx, -12
+; LINUX-X86-NEXT: .cfi_offset %ebp, -8
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; LINUX-X86-NEXT: movl %esi, %eax
+; LINUX-X86-NEXT: orl %ebp, %eax
+; LINUX-X86-NEXT: orl %ebx, %ecx
+; LINUX-X86-NEXT: orl %eax, %ecx
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; LINUX-X86-NEXT: sete %al
+; LINUX-X86-NEXT: movl %edi, %ecx
+; LINUX-X86-NEXT: orl {{[0-9]+}}(%esp), %ecx
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; LINUX-X86-NEXT: orl {{[0-9]+}}(%esp), %edx
+; LINUX-X86-NEXT: orl %ecx, %edx
+; LINUX-X86-NEXT: sete %cl
+; LINUX-X86-NEXT: testl %ebp, %ebp
+; LINUX-X86-NEXT: jne .LBB1_2
+; LINUX-X86-NEXT: # %bb.1: # %_udiv-special-cases_udiv-special-cases
+; LINUX-X86-NEXT: bsrl %ebx, %edx
+; LINUX-X86-NEXT: xorl $31, %edx
+; LINUX-X86-NEXT: orl $32, %edx
+; LINUX-X86-NEXT: jmp .LBB1_3
+; LINUX-X86-NEXT: .LBB1_2:
+; LINUX-X86-NEXT: bsrl %ebp, %edx
+; LINUX-X86-NEXT: xorl $31, %edx
+; LINUX-X86-NEXT: .LBB1_3: # %_udiv-special-cases_udiv-special-cases
+; LINUX-X86-NEXT: movl %ebx, %ebp
+; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: testl %esi, %esi
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; LINUX-X86-NEXT: jne .LBB1_5
+; LINUX-X86-NEXT: # %bb.4: # %_udiv-special-cases_udiv-special-cases
+; LINUX-X86-NEXT: bsrl {{[0-9]+}}(%esp), %edx
+; LINUX-X86-NEXT: xorl $31, %edx
+; LINUX-X86-NEXT: orl $32, %edx
+; LINUX-X86-NEXT: jmp .LBB1_6
+; LINUX-X86-NEXT: .LBB1_5:
+; LINUX-X86-NEXT: bsrl %esi, %edx
+; LINUX-X86-NEXT: xorl $31, %edx
+; LINUX-X86-NEXT: .LBB1_6: # %_udiv-special-cases_udiv-special-cases
+; LINUX-X86-NEXT: movl %ebp, %esi
+; LINUX-X86-NEXT: orl {{[0-9]+}}(%esp), %esi
+; LINUX-X86-NEXT: jne .LBB1_8
+; LINUX-X86-NEXT: # %bb.7: # %_udiv-special-cases_udiv-special-cases
+; LINUX-X86-NEXT: orl $64, %edx
+; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: .LBB1_8: # %_udiv-special-cases_udiv-special-cases
+; LINUX-X86-NEXT: testl %ebx, %ebx
+; LINUX-X86-NEXT: jne .LBB1_11
+; LINUX-X86-NEXT: # %bb.9: # %_udiv-special-cases_udiv-special-cases
+; LINUX-X86-NEXT: bsrl {{[0-9]+}}(%esp), %edx
+; LINUX-X86-NEXT: xorl $31, %edx
+; LINUX-X86-NEXT: orl $32, %edx
+; LINUX-X86-NEXT: testl %edi, %edi
+; LINUX-X86-NEXT: je .LBB1_12
+; LINUX-X86-NEXT: .LBB1_10:
+; LINUX-X86-NEXT: bsrl %edi, %esi
+; LINUX-X86-NEXT: xorl $31, %esi
+; LINUX-X86-NEXT: jmp .LBB1_13
+; LINUX-X86-NEXT: .LBB1_11:
+; LINUX-X86-NEXT: bsrl %ebx, %edx
+; LINUX-X86-NEXT: xorl $31, %edx
+; LINUX-X86-NEXT: testl %edi, %edi
+; LINUX-X86-NEXT: jne .LBB1_10
+; LINUX-X86-NEXT: .LBB1_12: # %_udiv-special-cases_udiv-special-cases
+; LINUX-X86-NEXT: bsrl {{[0-9]+}}(%esp), %esi
+; LINUX-X86-NEXT: xorl $31, %esi
+; LINUX-X86-NEXT: orl $32, %esi
+; LINUX-X86-NEXT: .LBB1_13: # %_udiv-special-cases_udiv-special-cases
+; LINUX-X86-NEXT: orb %cl, %al
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; LINUX-X86-NEXT: orl %ebx, %ecx
+; LINUX-X86-NEXT: jne .LBB1_15
+; LINUX-X86-NEXT: # %bb.14: # %_udiv-special-cases_udiv-special-cases
+; LINUX-X86-NEXT: orl $64, %esi
+; LINUX-X86-NEXT: movl %esi, %edx
+; LINUX-X86-NEXT: .LBB1_15: # %_udiv-special-cases_udiv-special-cases
+; LINUX-X86-NEXT: xorl %ebp, %ebp
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; LINUX-X86-NEXT: subl %edx, %ecx
+; LINUX-X86-NEXT: movl $0, %edx
+; LINUX-X86-NEXT: sbbl %edx, %edx
+; LINUX-X86-NEXT: movl $0, %ebx
+; LINUX-X86-NEXT: sbbl %ebx, %ebx
+; LINUX-X86-NEXT: movl $0, %edi
+; LINUX-X86-NEXT: sbbl %edi, %edi
+; LINUX-X86-NEXT: testb %al, %al
+; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: jne .LBB1_52
+; LINUX-X86-NEXT: # %bb.16: # %select.false.sink
+; LINUX-X86-NEXT: movl $127, %eax
+; LINUX-X86-NEXT: cmpl %ecx, %eax
+; LINUX-X86-NEXT: movl $0, %eax
+; LINUX-X86-NEXT: sbbl %edx, %eax
+; LINUX-X86-NEXT: movl $0, %eax
+; LINUX-X86-NEXT: sbbl %ebx, %eax
+; LINUX-X86-NEXT: movl $0, %eax
+; LINUX-X86-NEXT: sbbl %edi, %eax
+; LINUX-X86-NEXT: setb %al
+; LINUX-X86-NEXT: .LBB1_17: # %select.end
+; LINUX-X86-NEXT: movl %edi, %edx
+; LINUX-X86-NEXT: testb %al, %al
+; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; LINUX-X86-NEXT: movl $0, %eax
+; LINUX-X86-NEXT: movl $0, %ecx
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; LINUX-X86-NEXT: jne .LBB1_19
+; LINUX-X86-NEXT: # %bb.18: # %select.end
+; LINUX-X86-NEXT: movl %esi, %ebp
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl %edi, %eax
+; LINUX-X86-NEXT: movl %ebx, %ecx
+; LINUX-X86-NEXT: .LBB1_19: # %select.end
+; LINUX-X86-NEXT: movl %edx, %esi
+; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; LINUX-X86-NEXT: jne .LBB1_25
+; LINUX-X86-NEXT: # %bb.20: # %select.end
+; LINUX-X86-NEXT: movl %edx, %eax
+; LINUX-X86-NEXT: xorl $127, %eax
+; LINUX-X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; LINUX-X86-NEXT: movl %edi, %ecx
+; LINUX-X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: orl %esi, %ecx
+; LINUX-X86-NEXT: orl %eax, %ecx
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; LINUX-X86-NEXT: je .LBB1_26
+; LINUX-X86-NEXT: # %bb.21: # %udiv-bb15
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; LINUX-X86-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl %edx, %ecx
+; LINUX-X86-NEXT: xorb $127, %cl
+; LINUX-X86-NEXT: movl %ecx, %eax
+; LINUX-X86-NEXT: shrb $3, %al
+; LINUX-X86-NEXT: andb $12, %al
+; LINUX-X86-NEXT: negb %al
+; LINUX-X86-NEXT: movsbl %al, %eax
+; LINUX-X86-NEXT: movl 216(%esp,%eax), %esi
+; LINUX-X86-NEXT: movl 220(%esp,%eax), %ebp
+; LINUX-X86-NEXT: shldl %cl, %esi, %ebp
+; LINUX-X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl 208(%esp,%eax), %ebx
+; LINUX-X86-NEXT: movl 212(%esp,%eax), %ebp
+; LINUX-X86-NEXT: shldl %cl, %ebp, %esi
+; LINUX-X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: shldl %cl, %ebx, %ebp
+; LINUX-X86-NEXT: shll %cl, %ebx
+; LINUX-X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: addl $1, %edx
+; LINUX-X86-NEXT: adcl $0, %edi
+; LINUX-X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; LINUX-X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; LINUX-X86-NEXT: jb .LBB1_53
+; LINUX-X86-NEXT: # %bb.22: # %udiv-preheader4
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl %edx, %eax
+; LINUX-X86-NEXT: shrb $3, %al
+; LINUX-X86-NEXT: andb $12, %al
+; LINUX-X86-NEXT: movzbl %al, %eax
+; LINUX-X86-NEXT: movl 172(%esp,%eax), %esi
+; LINUX-X86-NEXT: movl 168(%esp,%eax), %ebx
+; LINUX-X86-NEXT: movl %ebx, %edi
+; LINUX-X86-NEXT: movl %edx, %ecx
+; LINUX-X86-NEXT: shrdl %cl, %esi, %edi
+; LINUX-X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl %edx, %ecx
+; LINUX-X86-NEXT: movl 160(%esp,%eax), %edx
+; LINUX-X86-NEXT: movl 164(%esp,%eax), %edi
+; LINUX-X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: shrdl %cl, %ebx, %edi
+; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: shrl %cl, %esi
+; LINUX-X86-NEXT: movl %esi, %ebx
+; LINUX-X86-NEXT: # kill: def $cl killed $cl killed $ecx
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: shrdl %cl, %eax, %edx
+; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; LINUX-X86-NEXT: addl $-1, %eax
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; LINUX-X86-NEXT: adcl $-1, %eax
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; LINUX-X86-NEXT: adcl $-1, %eax
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; LINUX-X86-NEXT: adcl $-1, %eax
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; LINUX-X86-NEXT: .p2align 4
+; LINUX-X86-NEXT: .LBB1_23: # %udiv-do-while3
+; LINUX-X86-NEXT: # =>This Inner Loop Header: Depth=1
+; LINUX-X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; LINUX-X86-NEXT: shldl $1, %edx, %ebx
+; LINUX-X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl %edi, %esi
+; LINUX-X86-NEXT: shldl $1, %edi, %edx
+; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; LINUX-X86-NEXT: shldl $1, %edi, %esi
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; LINUX-X86-NEXT: shldl $1, %ebx, %edi
+; LINUX-X86-NEXT: shldl $1, %eax, %ebx
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; LINUX-X86-NEXT: orl %edx, %ebx
+; LINUX-X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: shldl $1, %ebp, %eax
+; LINUX-X86-NEXT: orl %edx, %eax
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: shldl $1, %ecx, %ebp
+; LINUX-X86-NEXT: orl %edx, %ebp
+; LINUX-X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: addl %ecx, %ecx
+; LINUX-X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: cmpl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; LINUX-X86-NEXT: sbbl %esi, %ecx
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; LINUX-X86-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; LINUX-X86-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; LINUX-X86-NEXT: sarl $31, %ebp
+; LINUX-X86-NEXT: movl %ebp, %eax
+; LINUX-X86-NEXT: andl $1, %eax
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; LINUX-X86-NEXT: movl %ebp, %ecx
+; LINUX-X86-NEXT: andl {{[0-9]+}}(%esp), %ecx
+; LINUX-X86-NEXT: movl %ebp, %ebx
+; LINUX-X86-NEXT: andl {{[0-9]+}}(%esp), %ebx
+; LINUX-X86-NEXT: movl %ebp, %eax
+; LINUX-X86-NEXT: andl {{[0-9]+}}(%esp), %eax
+; LINUX-X86-NEXT: andl {{[0-9]+}}(%esp), %ebp
+; LINUX-X86-NEXT: subl %ebp, %edi
+; LINUX-X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; LINUX-X86-NEXT: sbbl %eax, %esi
+; LINUX-X86-NEXT: movl %esi, %edi
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; LINUX-X86-NEXT: sbbl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; LINUX-X86-NEXT: sbbl %ecx, %ebx
+; LINUX-X86-NEXT: addl $-1, %edx
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: adcl $-1, %eax
+; LINUX-X86-NEXT: adcl $-1, %esi
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; LINUX-X86-NEXT: adcl $-1, %ecx
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: orl %ecx, %eax
+; LINUX-X86-NEXT: movl %edx, %ecx
+; LINUX-X86-NEXT: orl %esi, %ecx
+; LINUX-X86-NEXT: orl %eax, %ecx
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: jne .LBB1_23
+; LINUX-X86-NEXT: .LBB1_24: # %udiv-loop-exit2
+; LINUX-X86-NEXT: shldl $1, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; LINUX-X86-NEXT: shldl $1, %ebp, %eax
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: shldl $1, %ecx, %ebp
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: leal (%eax,%ecx,2), %eax
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; LINUX-X86-NEXT: .LBB1_25: # %udiv-end1
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; LINUX-X86-NEXT: .LBB1_26: # %udiv-end1
+; LINUX-X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; LINUX-X86-NEXT: movl %ecx, %edx
+; LINUX-X86-NEXT: movl %ebx, %edi
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; LINUX-X86-NEXT: orl %ebx, %eax
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; LINUX-X86-NEXT: orl %ebp, %ecx
+; LINUX-X86-NEXT: orl %eax, %ecx
+; LINUX-X86-NEXT: sete {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; LINUX-X86-NEXT: orl {{[0-9]+}}(%esp), %ecx
+; LINUX-X86-NEXT: movl %edx, %esi
+; LINUX-X86-NEXT: movl %edi, %eax
+; LINUX-X86-NEXT: orl %edi, %edx
+; LINUX-X86-NEXT: orl %ecx, %edx
+; LINUX-X86-NEXT: sete %cl
+; LINUX-X86-NEXT: testl %ebp, %ebp
+; LINUX-X86-NEXT: jne .LBB1_28
+; LINUX-X86-NEXT: # %bb.27: # %udiv-end1
+; LINUX-X86-NEXT: bsrl %ebx, %edi
+; LINUX-X86-NEXT: xorl $31, %edi
+; LINUX-X86-NEXT: orl $32, %edi
+; LINUX-X86-NEXT: jmp .LBB1_29
+; LINUX-X86-NEXT: .LBB1_28:
+; LINUX-X86-NEXT: bsrl %ebp, %edi
+; LINUX-X86-NEXT: xorl $31, %edi
+; LINUX-X86-NEXT: .LBB1_29: # %udiv-end1
+; LINUX-X86-NEXT: movl %ebx, %ebp
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; LINUX-X86-NEXT: testl %edx, %edx
+; LINUX-X86-NEXT: movl %eax, %ebx
+; LINUX-X86-NEXT: movl %esi, %eax
+; LINUX-X86-NEXT: jne .LBB1_31
+; LINUX-X86-NEXT: # %bb.30: # %udiv-end1
+; LINUX-X86-NEXT: bsrl {{[0-9]+}}(%esp), %edx
+; LINUX-X86-NEXT: xorl $31, %edx
+; LINUX-X86-NEXT: orl $32, %edx
+; LINUX-X86-NEXT: jmp .LBB1_32
+; LINUX-X86-NEXT: .LBB1_31:
+; LINUX-X86-NEXT: bsrl %edx, %edx
+; LINUX-X86-NEXT: xorl $31, %edx
+; LINUX-X86-NEXT: .LBB1_32: # %udiv-end1
+; LINUX-X86-NEXT: movl %ebp, %esi
+; LINUX-X86-NEXT: orl {{[0-9]+}}(%esp), %esi
+; LINUX-X86-NEXT: jne .LBB1_34
+; LINUX-X86-NEXT: # %bb.33: # %udiv-end1
+; LINUX-X86-NEXT: orl $64, %edx
+; LINUX-X86-NEXT: movl %edx, %edi
+; LINUX-X86-NEXT: .LBB1_34: # %udiv-end1
+; LINUX-X86-NEXT: testl %ebx, %ebx
+; LINUX-X86-NEXT: jne .LBB1_37
+; LINUX-X86-NEXT: # %bb.35: # %udiv-end1
+; LINUX-X86-NEXT: bsrl {{[0-9]+}}(%esp), %edx
+; LINUX-X86-NEXT: xorl $31, %edx
+; LINUX-X86-NEXT: orl $32, %edx
+; LINUX-X86-NEXT: testl %eax, %eax
+; LINUX-X86-NEXT: je .LBB1_38
+; LINUX-X86-NEXT: .LBB1_36:
+; LINUX-X86-NEXT: bsrl %eax, %esi
+; LINUX-X86-NEXT: xorl $31, %esi
+; LINUX-X86-NEXT: jmp .LBB1_39
+; LINUX-X86-NEXT: .LBB1_37:
+; LINUX-X86-NEXT: bsrl %ebx, %edx
+; LINUX-X86-NEXT: xorl $31, %edx
+; LINUX-X86-NEXT: testl %eax, %eax
+; LINUX-X86-NEXT: jne .LBB1_36
+; LINUX-X86-NEXT: .LBB1_38: # %udiv-end1
+; LINUX-X86-NEXT: bsrl {{[0-9]+}}(%esp), %esi
+; LINUX-X86-NEXT: xorl $31, %esi
+; LINUX-X86-NEXT: orl $32, %esi
+; LINUX-X86-NEXT: .LBB1_39: # %udiv-end1
+; LINUX-X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
+; LINUX-X86-NEXT: orb %cl, %al
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; LINUX-X86-NEXT: orl %ebx, %ecx
+; LINUX-X86-NEXT: jne .LBB1_41
+; LINUX-X86-NEXT: # %bb.40: # %udiv-end1
+; LINUX-X86-NEXT: orl $64, %esi
+; LINUX-X86-NEXT: movl %esi, %edx
+; LINUX-X86-NEXT: .LBB1_41: # %udiv-end1
+; LINUX-X86-NEXT: xorl %ebx, %ebx
+; LINUX-X86-NEXT: subl %edx, %edi
+; LINUX-X86-NEXT: movl %edi, %edx
+; LINUX-X86-NEXT: movl $0, %edi
+; LINUX-X86-NEXT: sbbl %edi, %edi
+; LINUX-X86-NEXT: movl $0, %ecx
+; LINUX-X86-NEXT: sbbl %ecx, %ecx
+; LINUX-X86-NEXT: movl $0, %ebp
+; LINUX-X86-NEXT: sbbl %ebp, %ebp
+; LINUX-X86-NEXT: testb %al, %al
+; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: jne .LBB1_54
+; LINUX-X86-NEXT: # %bb.42: # %select.false.sink8
+; LINUX-X86-NEXT: movl $127, %eax
+; LINUX-X86-NEXT: cmpl %edx, %eax
+; LINUX-X86-NEXT: movl $0, %eax
+; LINUX-X86-NEXT: sbbl %edi, %eax
+; LINUX-X86-NEXT: movl $0, %eax
+; LINUX-X86-NEXT: sbbl %ecx, %eax
+; LINUX-X86-NEXT: movl $0, %eax
+; LINUX-X86-NEXT: sbbl %ebp, %eax
+; LINUX-X86-NEXT: setb %al
+; LINUX-X86-NEXT: .LBB1_43: # %select.end7
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; LINUX-X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: testb %al, %al
+; LINUX-X86-NEXT: movl $0, %eax
+; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; LINUX-X86-NEXT: movl $0, %ebp
+; LINUX-X86-NEXT: jne .LBB1_45
+; LINUX-X86-NEXT: # %bb.44: # %select.end7
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; LINUX-X86-NEXT: .LBB1_45: # %select.end7
+; LINUX-X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: jne .LBB1_57
+; LINUX-X86-NEXT: # %bb.46: # %select.end7
+; LINUX-X86-NEXT: movl %edx, %eax
+; LINUX-X86-NEXT: xorl $127, %eax
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; LINUX-X86-NEXT: orl %ebx, %eax
+; LINUX-X86-NEXT: movl %edi, %ecx
+; LINUX-X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; LINUX-X86-NEXT: orl %eax, %ecx
+; LINUX-X86-NEXT: movl %edx, %ecx
+; LINUX-X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; LINUX-X86-NEXT: movl %ebp, %edx
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; LINUX-X86-NEXT: je .LBB1_51
+; LINUX-X86-NEXT: # %bb.47: # %udiv-bb1
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; LINUX-X86-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; LINUX-X86-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; LINUX-X86-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl %eax, %esi
+; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: movl %eax, %ecx
+; LINUX-X86-NEXT: xorb $127, %cl
+; LINUX-X86-NEXT: movl %ecx, %eax
+; LINUX-X86-NEXT: shrb $3, %al
+; LINUX-X86-NEXT: andb $12, %al
+; LINUX-X86-NEXT: negb %al
+; LINUX-X86-NEXT: movsbl %al, %eax
+; LINUX-X86-NEXT: movl 152(%esp,%eax), %edx
+; LINUX-X86-NEXT: movl 156(%esp,%eax), %ebp
+; LINUX-X86-NEXT: shldl %cl, %edx, %ebp
+; LINUX-X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl 144(%esp,%eax), %ebp
+; LINUX-X86-NEXT: movl 148(%esp,%eax), %eax
+; LINUX-X86-NEXT: shldl %cl, %eax, %edx
+; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: shldl %cl, %ebp, %eax
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: shll %cl, %ebp
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; LINUX-X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: addl $1, %edx
+; LINUX-X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; LINUX-X86-NEXT: adcl $0, %ebx
+; LINUX-X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; LINUX-X86-NEXT: jb .LBB1_55
+; LINUX-X86-NEXT: # %bb.48: # %udiv-preheader
+; LINUX-X86-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
+; LINUX-X86-NEXT: movl %edx, %eax
+; LINUX-X86-NEXT: shrb $3, %al
+; LINUX-X86-NEXT: andb $12, %al
+; LINUX-X86-NEXT: movzbl %al, %eax
+; LINUX-X86-NEXT: movl 108(%esp,%eax), %ebp
+; LINUX-X86-NEXT: movl 104(%esp,%eax), %ebx
+; LINUX-X86-NEXT: movl %ebx, %esi
+; LINUX-X86-NEXT: movl %edx, %ecx
+; LINUX-X86-NEXT: shrdl %cl, %ebp, %esi
+; LINUX-X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl 96(%esp,%eax), %esi
+; LINUX-X86-NEXT: movl 100(%esp,%eax), %eax
+; LINUX-X86-NEXT: movl %eax, %edi
+; LINUX-X86-NEXT: shrdl %cl, %ebx, %edi
+; LINUX-X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: shrl %cl, %ebp
+; LINUX-X86-NEXT: shrdl %cl, %eax, %esi
+; LINUX-X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; LINUX-X86-NEXT: addl $-1, %eax
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; LINUX-X86-NEXT: adcl $-1, %eax
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; LINUX-X86-NEXT: adcl $-1, %eax
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; LINUX-X86-NEXT: adcl $-1, %eax
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; LINUX-X86-NEXT: .p2align 4
+; LINUX-X86-NEXT: .LBB1_49: # %udiv-do-while
+; LINUX-X86-NEXT: # =>This Inner Loop Header: Depth=1
+; LINUX-X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; LINUX-X86-NEXT: shldl $1, %edi, %ebp
+; LINUX-X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; LINUX-X86-NEXT: shldl $1, %ecx, %edi
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; LINUX-X86-NEXT: shldl $1, %ebx, %ecx
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; LINUX-X86-NEXT: shldl $1, %edx, %ebx
+; LINUX-X86-NEXT: shldl $1, %eax, %edx
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; LINUX-X86-NEXT: orl %ebp, %edx
+; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: shldl $1, %esi, %eax
+; LINUX-X86-NEXT: orl %ebp, %eax
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: shldl $1, %eax, %esi
+; LINUX-X86-NEXT: orl %ebp, %esi
+; LINUX-X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: addl %eax, %eax
+; LINUX-X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: cmpl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: sbbl %ecx, %eax
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: sbbl %edi, %eax
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; LINUX-X86-NEXT: sarl $31, %eax
+; LINUX-X86-NEXT: movl %eax, %edx
+; LINUX-X86-NEXT: andl $1, %edx
+; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; LINUX-X86-NEXT: movl %eax, %esi
+; LINUX-X86-NEXT: andl {{[0-9]+}}(%esp), %esi
+; LINUX-X86-NEXT: movl %eax, %ebp
+; LINUX-X86-NEXT: andl {{[0-9]+}}(%esp), %ebp
+; LINUX-X86-NEXT: movl %eax, %edx
+; LINUX-X86-NEXT: andl {{[0-9]+}}(%esp), %edx
+; LINUX-X86-NEXT: andl {{[0-9]+}}(%esp), %eax
+; LINUX-X86-NEXT: subl %eax, %ebx
+; LINUX-X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: sbbl %edx, %ecx
+; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; LINUX-X86-NEXT: sbbl %ebp, %edi
+; LINUX-X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; LINUX-X86-NEXT: sbbl %esi, %ebp
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; LINUX-X86-NEXT: addl $-1, %edx
+; LINUX-X86-NEXT: adcl $-1, %edi
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; LINUX-X86-NEXT: adcl $-1, %ebx
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; LINUX-X86-NEXT: adcl $-1, %ecx
+; LINUX-X86-NEXT: movl %edi, %eax
+; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: orl %ecx, %eax
+; LINUX-X86-NEXT: movl %edx, %ecx
+; LINUX-X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: orl %ebx, %ecx
+; LINUX-X86-NEXT: orl %eax, %ecx
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: jne .LBB1_49
+; LINUX-X86-NEXT: .LBB1_50: # %udiv-loop-exit
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; LINUX-X86-NEXT: shldl $1, %eax, %edx
+; LINUX-X86-NEXT: shldl $1, %esi, %eax
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; LINUX-X86-NEXT: shldl $1, %edi, %esi
+; LINUX-X86-NEXT: movl %eax, %ecx
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: leal (%eax,%edi,2), %eax
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl %ecx, %edi
+; LINUX-X86-NEXT: .LBB1_51: # %udiv-end
+; LINUX-X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; LINUX-X86-NEXT: movl %eax, (%ecx)
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: movl %eax, 4(%ecx)
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: movl %eax, 8(%ecx)
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: movl %eax, 12(%ecx)
+; LINUX-X86-NEXT: movl %edi, %esi
+; LINUX-X86-NEXT: movl %edi, %eax
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; LINUX-X86-NEXT: imull %ebp, %esi
+; LINUX-X86-NEXT: movl %edx, %ecx
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; LINUX-X86-NEXT: mull %edi
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: addl %esi, %edx
+; LINUX-X86-NEXT: imull %edi, %ecx
+; LINUX-X86-NEXT: addl %edx, %ecx
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; LINUX-X86-NEXT: movl %esi, %eax
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; LINUX-X86-NEXT: mull %ebx
+; LINUX-X86-NEXT: movl %eax, %edi
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; LINUX-X86-NEXT: imull %esi, %eax
+; LINUX-X86-NEXT: addl %edx, %eax
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; LINUX-X86-NEXT: imull %ebp, %ebx
+; LINUX-X86-NEXT: addl %eax, %ebx
+; LINUX-X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; LINUX-X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: adcl %ecx, %ebx
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; LINUX-X86-NEXT: movl %edi, %eax
+; LINUX-X86-NEXT: mull %esi
+; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; LINUX-X86-NEXT: mull %esi
+; LINUX-X86-NEXT: movl %edx, %ecx
+; LINUX-X86-NEXT: movl %eax, %esi
+; LINUX-X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; LINUX-X86-NEXT: adcl $0, %ecx
+; LINUX-X86-NEXT: movl %edi, %eax
+; LINUX-X86-NEXT: mull %ebp
+; LINUX-X86-NEXT: movl %edx, %edi
+; LINUX-X86-NEXT: addl %esi, %eax
+; LINUX-X86-NEXT: movl %eax, %esi
+; LINUX-X86-NEXT: adcl %ecx, %edi
+; LINUX-X86-NEXT: setb %cl
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; LINUX-X86-NEXT: mull %ebp
+; LINUX-X86-NEXT: addl %edi, %eax
+; LINUX-X86-NEXT: movzbl %cl, %ecx
+; LINUX-X86-NEXT: adcl %ecx, %edx
+; LINUX-X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; LINUX-X86-NEXT: adcl %ebx, %edx
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; LINUX-X86-NEXT: subl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; LINUX-X86-NEXT: sbbl %esi, %ecx
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; LINUX-X86-NEXT: sbbl %eax, %esi
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; LINUX-X86-NEXT: sbbl %edx, %ebx
+; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; LINUX-X86-NEXT: movl %edi, (%eax)
+; LINUX-X86-NEXT: movl %ecx, 4(%eax)
+; LINUX-X86-NEXT: movl %esi, 8(%eax)
+; LINUX-X86-NEXT: movl %ebx, 12(%eax)
+; LINUX-X86-NEXT: addl $236, %esp
+; LINUX-X86-NEXT: .cfi_def_cfa_offset 20
+; LINUX-X86-NEXT: popl %esi
+; LINUX-X86-NEXT: .cfi_def_cfa_offset 16
+; LINUX-X86-NEXT: popl %edi
+; LINUX-X86-NEXT: .cfi_def_cfa_offset 12
+; LINUX-X86-NEXT: popl %ebx
+; LINUX-X86-NEXT: .cfi_def_cfa_offset 8
+; LINUX-X86-NEXT: popl %ebp
+; LINUX-X86-NEXT: .cfi_def_cfa_offset 4
+; LINUX-X86-NEXT: retl
+; LINUX-X86-NEXT: .LBB1_52:
+; LINUX-X86-NEXT: .cfi_def_cfa_offset 256
+; LINUX-X86-NEXT: movb $1, %al
+; LINUX-X86-NEXT: jmp .LBB1_17
+; LINUX-X86-NEXT: .LBB1_53:
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; LINUX-X86-NEXT: jmp .LBB1_24
+; LINUX-X86-NEXT: .LBB1_54:
+; LINUX-X86-NEXT: movb $1, %al
+; LINUX-X86-NEXT: jmp .LBB1_43
+; LINUX-X86-NEXT: .LBB1_55:
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; LINUX-X86-NEXT: jmp .LBB1_50
+; LINUX-X86-NEXT: .LBB1_57:
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; LINUX-X86-NEXT: movl %ebp, %edx
+; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; LINUX-X86-NEXT: jmp .LBB1_51
+;
+; WIN32-LABEL: udivrem_i128:
+; WIN32: # %bb.0: # %_udiv-special-cases_udiv-special-cases
+; WIN32-NEXT: pushl %ebp
+; WIN32-NEXT: movl %esp, %ebp
+; WIN32-NEXT: pushl %ebx
+; WIN32-NEXT: pushl %edi
+; WIN32-NEXT: pushl %esi
+; WIN32-NEXT: andl $-16, %esp
+; WIN32-NEXT: subl $240, %esp
+; WIN32-NEXT: movl 48(%ebp), %ebx
+; WIN32-NEXT: movl 40(%ebp), %ecx
+; WIN32-NEXT: movl 52(%ebp), %edi
+; WIN32-NEXT: movl 44(%ebp), %esi
+; WIN32-NEXT: movl %esi, %eax
+; WIN32-NEXT: orl %edi, %eax
+; WIN32-NEXT: orl %ebx, %ecx
+; WIN32-NEXT: orl %eax, %ecx
+; WIN32-NEXT: sete %al
+; WIN32-NEXT: movl 28(%ebp), %ecx
+; WIN32-NEXT: orl 36(%ebp), %ecx
+; WIN32-NEXT: movl 24(%ebp), %edx
+; WIN32-NEXT: orl 32(%ebp), %edx
+; WIN32-NEXT: orl %ecx, %edx
+; WIN32-NEXT: sete %cl
+; WIN32-NEXT: testl %edi, %edi
+; WIN32-NEXT: jne LBB1_1
+; WIN32-NEXT: # %bb.2: # %_udiv-special-cases_udiv-special-cases
+; WIN32-NEXT: bsrl %ebx, %edx
+; WIN32-NEXT: xorl $31, %edx
+; WIN32-NEXT: orl $32, %edx
+; WIN32-NEXT: jmp LBB1_3
+; WIN32-NEXT: LBB1_1:
+; WIN32-NEXT: bsrl %edi, %edx
+; WIN32-NEXT: xorl $31, %edx
+; WIN32-NEXT: LBB1_3: # %_udiv-special-cases_udiv-special-cases
+; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: testl %esi, %esi
+; WIN32-NEXT: jne LBB1_4
+; WIN32-NEXT: # %bb.5: # %_udiv-special-cases_udiv-special-cases
+; WIN32-NEXT: bsrl 40(%ebp), %edx
+; WIN32-NEXT: xorl $31, %edx
+; WIN32-NEXT: orl $32, %edx
+; WIN32-NEXT: jmp LBB1_6
+; WIN32-NEXT: LBB1_4:
+; WIN32-NEXT: bsrl %esi, %edx
+; WIN32-NEXT: xorl $31, %edx
+; WIN32-NEXT: LBB1_6: # %_udiv-special-cases_udiv-special-cases
+; WIN32-NEXT: movl %ebx, %esi
+; WIN32-NEXT: orl %edi, %esi
+; WIN32-NEXT: jne LBB1_8
+; WIN32-NEXT: # %bb.7: # %_udiv-special-cases_udiv-special-cases
+; WIN32-NEXT: orl $64, %edx
+; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: LBB1_8: # %_udiv-special-cases_udiv-special-cases
+; WIN32-NEXT: movl 36(%ebp), %edi
+; WIN32-NEXT: testl %edi, %edi
+; WIN32-NEXT: movl 28(%ebp), %esi
+; WIN32-NEXT: jne LBB1_9
+; WIN32-NEXT: # %bb.10: # %_udiv-special-cases_udiv-special-cases
+; WIN32-NEXT: bsrl 32(%ebp), %edx
+; WIN32-NEXT: xorl $31, %edx
+; WIN32-NEXT: orl $32, %edx
+; WIN32-NEXT: testl %esi, %esi
+; WIN32-NEXT: je LBB1_13
+; WIN32-NEXT: LBB1_12:
+; WIN32-NEXT: bsrl %esi, %esi
+; WIN32-NEXT: xorl $31, %esi
+; WIN32-NEXT: jmp LBB1_14
+; WIN32-NEXT: LBB1_9:
+; WIN32-NEXT: bsrl %edi, %edx
+; WIN32-NEXT: xorl $31, %edx
+; WIN32-NEXT: testl %esi, %esi
+; WIN32-NEXT: jne LBB1_12
+; WIN32-NEXT: LBB1_13: # %_udiv-special-cases_udiv-special-cases
+; WIN32-NEXT: bsrl 24(%ebp), %esi
+; WIN32-NEXT: xorl $31, %esi
+; WIN32-NEXT: orl $32, %esi
+; WIN32-NEXT: LBB1_14: # %_udiv-special-cases_udiv-special-cases
+; WIN32-NEXT: orb %cl, %al
+; WIN32-NEXT: movl 32(%ebp), %ecx
+; WIN32-NEXT: orl %edi, %ecx
+; WIN32-NEXT: jne LBB1_16
+; WIN32-NEXT: # %bb.15: # %_udiv-special-cases_udiv-special-cases
+; WIN32-NEXT: orl $64, %esi
+; WIN32-NEXT: movl %esi, %edx
+; WIN32-NEXT: LBB1_16: # %_udiv-special-cases_udiv-special-cases
+; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: subl %edx, %ecx
+; WIN32-NEXT: movl $0, %esi
+; WIN32-NEXT: sbbl %esi, %esi
+; WIN32-NEXT: movl $0, %edi
+; WIN32-NEXT: sbbl %edi, %edi
+; WIN32-NEXT: movl $0, %ebx
+; WIN32-NEXT: sbbl %ebx, %ebx
+; WIN32-NEXT: testb %al, %al
+; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: jne LBB1_17
+; WIN32-NEXT: # %bb.18: # %select.false.sink
+; WIN32-NEXT: movl $127, %eax
+; WIN32-NEXT: cmpl %ecx, %eax
+; WIN32-NEXT: movl $0, %eax
+; WIN32-NEXT: sbbl %esi, %eax
+; WIN32-NEXT: movl $0, %eax
+; WIN32-NEXT: movl %edi, %ecx
+; WIN32-NEXT: sbbl %edi, %eax
+; WIN32-NEXT: movl $0, %eax
+; WIN32-NEXT: sbbl %ebx, %eax
+; WIN32-NEXT: setb %al
+; WIN32-NEXT: LBB1_19: # %select.end
+; WIN32-NEXT: movl 28(%ebp), %edx
+; WIN32-NEXT: movl %ebx, %edi
+; WIN32-NEXT: movl %edx, %ebx
+; WIN32-NEXT: movl 32(%ebp), %esi
+; WIN32-NEXT: testb %al, %al
+; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: movl $0, %eax
+; WIN32-NEXT: movl $0, %edx
+; WIN32-NEXT: jne LBB1_21
+; WIN32-NEXT: # %bb.20: # %select.end
+; WIN32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl 24(%ebp), %eax
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl %esi, %eax
+; WIN32-NEXT: movl 36(%ebp), %edx
+; WIN32-NEXT: LBB1_21: # %select.end
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: jne LBB1_22
+; WIN32-NEXT: # %bb.28: # %select.end
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; WIN32-NEXT: movl %edx, %eax
+; WIN32-NEXT: xorl $127, %eax
+; WIN32-NEXT: orl %ecx, %eax
+; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: orl %edi, %ecx
+; WIN32-NEXT: orl %eax, %ecx
+; WIN32-NEXT: movl 36(%ebp), %ecx
+; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: je LBB1_29
+; WIN32-NEXT: # %bb.26: # %udiv-bb15
+; WIN32-NEXT: movl 24(%ebp), %eax
+; WIN32-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl %edx, %ecx
+; WIN32-NEXT: xorb $127, %cl
+; WIN32-NEXT: movl %ecx, %eax
+; WIN32-NEXT: shrb $3, %al
+; WIN32-NEXT: andb $12, %al
+; WIN32-NEXT: negb %al
+; WIN32-NEXT: movsbl %al, %eax
+; WIN32-NEXT: movl 216(%esp,%eax), %edx
+; WIN32-NEXT: movl 220(%esp,%eax), %edi
+; WIN32-NEXT: shldl %cl, %edx, %edi
+; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl %esi, %edi
+; WIN32-NEXT: movl 208(%esp,%eax), %esi
+; WIN32-NEXT: movl 212(%esp,%eax), %eax
+; WIN32-NEXT: shldl %cl, %eax, %edx
+; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; WIN32-NEXT: shldl %cl, %esi, %eax
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: shll %cl, %esi
+; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: addl $1, %edx
+; WIN32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: jb LBB1_27
+; WIN32-NEXT: # %bb.23: # %udiv-preheader4
+; WIN32-NEXT: movl 24(%ebp), %eax
+; WIN32-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl %edi, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl 36(%ebp), %eax
+; WIN32-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl %edx, %eax
+; WIN32-NEXT: shrb $3, %al
+; WIN32-NEXT: andb $12, %al
+; WIN32-NEXT: movzbl %al, %eax
+; WIN32-NEXT: movl 172(%esp,%eax), %esi
+; WIN32-NEXT: movl %edx, %ecx
+; WIN32-NEXT: movl 168(%esp,%eax), %edx
+; WIN32-NEXT: movl %edx, %edi
+; WIN32-NEXT: shrdl %cl, %esi, %edi
+; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl 160(%esp,%eax), %ebx
+; WIN32-NEXT: movl 164(%esp,%eax), %edi
+; WIN32-NEXT: movl %edi, %eax
+; WIN32-NEXT: shrdl %cl, %edx, %eax
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: shrl %cl, %esi
+; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: # kill: def $cl killed $cl killed $ecx
+; WIN32-NEXT: shrdl %cl, %edi, %ebx
+; WIN32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl 40(%ebp), %eax
+; WIN32-NEXT: addl $-1, %eax
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl 44(%ebp), %eax
+; WIN32-NEXT: adcl $-1, %eax
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl 48(%ebp), %ecx
+; WIN32-NEXT: adcl $-1, %ecx
+; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl 52(%ebp), %ecx
+; WIN32-NEXT: adcl $-1, %ecx
+; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: .p2align 4
+; WIN32-NEXT: LBB1_24: # %udiv-do-while3
+; WIN32-NEXT: # =>This Inner Loop Header: Depth=1
+; WIN32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: shldl $1, %eax, %esi
+; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; WIN32-NEXT: shldl $1, %ebx, %eax
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; WIN32-NEXT: shldl $1, %edi, %ebx
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; WIN32-NEXT: shldl $1, %esi, %edi
+; WIN32-NEXT: shldl $1, %ecx, %esi
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: orl %eax, %esi
+; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: shldl $1, %edx, %ecx
+; WIN32-NEXT: orl %eax, %ecx
+; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: shldl $1, %ecx, %edx
+; WIN32-NEXT: orl %eax, %edx
+; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: addl %ecx, %ecx
+; WIN32-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: cmpl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: sbbl %ebx, %ecx
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; WIN32-NEXT: sarl $31, %ecx
+; WIN32-NEXT: movl %ecx, %edx
+; WIN32-NEXT: andl $1, %edx
+; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl %ecx, %eax
+; WIN32-NEXT: andl 52(%ebp), %eax
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl %ecx, %esi
+; WIN32-NEXT: andl 48(%ebp), %esi
+; WIN32-NEXT: movl %ecx, %edx
+; WIN32-NEXT: andl 44(%ebp), %edx
+; WIN32-NEXT: andl 40(%ebp), %ecx
+; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: subl %ecx, %edi
+; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: sbbl %edx, %ebx
+; WIN32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: sbbl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; WIN32-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; WIN32-NEXT: addl $-1, %edx
+; WIN32-NEXT: adcl $-1, %eax
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; WIN32-NEXT: adcl $-1, %edi
+; WIN32-NEXT: adcl $-1, %ebx
+; WIN32-NEXT: movl %eax, %ecx
+; WIN32-NEXT: orl %ebx, %ecx
+; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: orl %edi, %edx
+; WIN32-NEXT: orl %ecx, %edx
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: jne LBB1_24
+; WIN32-NEXT: LBB1_25: # %udiv-loop-exit2
+; WIN32-NEXT: shldl $1, %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: shldl $1, %edx, %ecx
+; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: shldl $1, %eax, %edx
+; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: leal (%ecx,%eax,2), %eax
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl 36(%ebp), %ecx
+; WIN32-NEXT: movl 28(%ebp), %ebx
+; WIN32-NEXT: LBB1_29: # %udiv-end1
+; WIN32-NEXT: movl 40(%ebp), %eax
+; WIN32-NEXT: movl 48(%ebp), %esi
+; WIN32-NEXT: orl %esi, %eax
+; WIN32-NEXT: movl %ebx, %edx
+; WIN32-NEXT: movl %ecx, %ebx
+; WIN32-NEXT: movl 44(%ebp), %ecx
+; WIN32-NEXT: movl 52(%ebp), %edi
+; WIN32-NEXT: orl %edi, %ecx
+; WIN32-NEXT: orl %eax, %ecx
+; WIN32-NEXT: sete %al
+; WIN32-NEXT: movl 24(%ebp), %ecx
+; WIN32-NEXT: orl 32(%ebp), %ecx
+; WIN32-NEXT: orl %ebx, %edx
+; WIN32-NEXT: orl %ecx, %edx
+; WIN32-NEXT: sete %cl
+; WIN32-NEXT: testl %edi, %edi
+; WIN32-NEXT: jne LBB1_30
+; WIN32-NEXT: # %bb.31: # %udiv-end1
+; WIN32-NEXT: bsrl %esi, %edx
+; WIN32-NEXT: xorl $31, %edx
+; WIN32-NEXT: orl $32, %edx
+; WIN32-NEXT: jmp LBB1_32
+; WIN32-NEXT: LBB1_30:
+; WIN32-NEXT: bsrl %edi, %edx
+; WIN32-NEXT: xorl $31, %edx
+; WIN32-NEXT: LBB1_32: # %udiv-end1
+; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl %esi, %ebx
+; WIN32-NEXT: movl 44(%ebp), %edx
+; WIN32-NEXT: testl %edx, %edx
+; WIN32-NEXT: movl 40(%ebp), %esi
+; WIN32-NEXT: jne LBB1_33
+; WIN32-NEXT: # %bb.34: # %udiv-end1
+; WIN32-NEXT: bsrl %esi, %edx
+; WIN32-NEXT: xorl $31, %edx
+; WIN32-NEXT: orl $32, %edx
+; WIN32-NEXT: jmp LBB1_35
+; WIN32-NEXT: LBB1_33:
+; WIN32-NEXT: bsrl %edx, %edx
+; WIN32-NEXT: xorl $31, %edx
+; WIN32-NEXT: LBB1_35: # %udiv-end1
+; WIN32-NEXT: movl %ebx, %esi
+; WIN32-NEXT: orl %edi, %esi
+; WIN32-NEXT: jne LBB1_37
+; WIN32-NEXT: # %bb.36: # %udiv-end1
+; WIN32-NEXT: orl $64, %edx
+; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: LBB1_37: # %udiv-end1
+; WIN32-NEXT: movl 36(%ebp), %edx
+; WIN32-NEXT: testl %edx, %edx
+; WIN32-NEXT: movl 28(%ebp), %esi
+; WIN32-NEXT: jne LBB1_38
+; WIN32-NEXT: # %bb.39: # %udiv-end1
+; WIN32-NEXT: bsrl 32(%ebp), %edx
+; WIN32-NEXT: xorl $31, %edx
+; WIN32-NEXT: orl $32, %edx
+; WIN32-NEXT: testl %esi, %esi
+; WIN32-NEXT: je LBB1_42
+; WIN32-NEXT: LBB1_41:
+; WIN32-NEXT: bsrl %esi, %esi
+; WIN32-NEXT: xorl $31, %esi
+; WIN32-NEXT: jmp LBB1_43
+; WIN32-NEXT: LBB1_38:
+; WIN32-NEXT: bsrl %edx, %edx
+; WIN32-NEXT: xorl $31, %edx
+; WIN32-NEXT: testl %esi, %esi
+; WIN32-NEXT: jne LBB1_41
+; WIN32-NEXT: LBB1_42: # %udiv-end1
+; WIN32-NEXT: bsrl 24(%ebp), %esi
+; WIN32-NEXT: xorl $31, %esi
+; WIN32-NEXT: orl $32, %esi
+; WIN32-NEXT: LBB1_43: # %udiv-end1
+; WIN32-NEXT: orb %cl, %al
+; WIN32-NEXT: movl 32(%ebp), %ecx
+; WIN32-NEXT: orl 36(%ebp), %ecx
+; WIN32-NEXT: jne LBB1_45
+; WIN32-NEXT: # %bb.44: # %udiv-end1
+; WIN32-NEXT: orl $64, %esi
+; WIN32-NEXT: movl %esi, %edx
+; WIN32-NEXT: LBB1_45: # %udiv-end1
+; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: subl %edx, %ecx
+; WIN32-NEXT: movl $0, %edx
+; WIN32-NEXT: sbbl %edx, %edx
+; WIN32-NEXT: movl $0, %esi
+; WIN32-NEXT: sbbl %esi, %esi
+; WIN32-NEXT: movl $0, %edi
+; WIN32-NEXT: sbbl %edi, %edi
+; WIN32-NEXT: testb %al, %al
+; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: jne LBB1_46
+; WIN32-NEXT: # %bb.47: # %select.false.sink8
+; WIN32-NEXT: movl $127, %eax
+; WIN32-NEXT: cmpl %ecx, %eax
+; WIN32-NEXT: movl $0, %eax
+; WIN32-NEXT: sbbl %edx, %eax
+; WIN32-NEXT: movl $0, %eax
+; WIN32-NEXT: sbbl %esi, %eax
+; WIN32-NEXT: movl $0, %eax
+; WIN32-NEXT: sbbl %edi, %eax
+; WIN32-NEXT: setb %al
+; WIN32-NEXT: LBB1_48: # %select.end7
+; WIN32-NEXT: movl 28(%ebp), %ecx
+; WIN32-NEXT: movl 32(%ebp), %edx
+; WIN32-NEXT: movl %edi, %esi
+; WIN32-NEXT: testb %al, %al
+; WIN32-NEXT: movl $0, %edi
+; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: movl $0, %eax
+; WIN32-NEXT: jne LBB1_50
+; WIN32-NEXT: # %bb.49: # %select.end7
+; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl 24(%ebp), %edi
+; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl 36(%ebp), %eax
+; WIN32-NEXT: LBB1_50: # %select.end7
+; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: jne LBB1_51
+; WIN32-NEXT: # %bb.57: # %select.end7
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: movl %eax, %edi
+; WIN32-NEXT: xorl $127, %eax
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; WIN32-NEXT: orl %ebx, %eax
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: orl %esi, %ecx
+; WIN32-NEXT: orl %eax, %ecx
+; WIN32-NEXT: movl 36(%ebp), %ecx
+; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: je LBB1_58
+; WIN32-NEXT: # %bb.55: # %udiv-bb1
+; WIN32-NEXT: movl 24(%ebp), %eax
+; WIN32-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl 28(%ebp), %edx
+; WIN32-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl 32(%ebp), %esi
+; WIN32-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl %edi, %eax
+; WIN32-NEXT: movl %eax, %ecx
+; WIN32-NEXT: movl %edi, %esi
+; WIN32-NEXT: xorb $127, %cl
+; WIN32-NEXT: movl %ecx, %eax
+; WIN32-NEXT: shrb $3, %al
+; WIN32-NEXT: andb $12, %al
+; WIN32-NEXT: negb %al
+; WIN32-NEXT: movsbl %al, %eax
+; WIN32-NEXT: movl 152(%esp,%eax), %edx
+; WIN32-NEXT: movl 156(%esp,%eax), %edi
+; WIN32-NEXT: shldl %cl, %edx, %edi
+; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl 144(%esp,%eax), %edi
+; WIN32-NEXT: movl 148(%esp,%eax), %eax
+; WIN32-NEXT: shldl %cl, %eax, %edx
+; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: shldl %cl, %edi, %eax
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: shll %cl, %edi
+; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: addl $1, %esi
+; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: adcl $0, %ebx
+; WIN32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: movl 36(%ebp), %eax
+; WIN32-NEXT: jb LBB1_56
+; WIN32-NEXT: # %bb.52: # %udiv-preheader
+; WIN32-NEXT: movl 24(%ebp), %ecx
+; WIN32-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl 28(%ebp), %ecx
+; WIN32-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl 32(%ebp), %ecx
+; WIN32-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: movl %ecx, %eax
+; WIN32-NEXT: shrb $3, %al
+; WIN32-NEXT: andb $12, %al
+; WIN32-NEXT: movzbl %al, %eax
+; WIN32-NEXT: movl 108(%esp,%eax), %edi
+; WIN32-NEXT: movl 104(%esp,%eax), %ebx
+; WIN32-NEXT: movl %ebx, %edx
+; WIN32-NEXT: shrdl %cl, %edi, %edx
+; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl 96(%esp,%eax), %esi
+; WIN32-NEXT: movl 100(%esp,%eax), %eax
+; WIN32-NEXT: movl %eax, %edx
+; WIN32-NEXT: shrdl %cl, %ebx, %edx
+; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: shrl %cl, %edi
+; WIN32-NEXT: # kill: def $cl killed $cl killed $ecx
+; WIN32-NEXT: shrdl %cl, %eax, %esi
+; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl 40(%ebp), %ecx
+; WIN32-NEXT: addl $-1, %ecx
+; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl 44(%ebp), %ecx
+; WIN32-NEXT: adcl $-1, %ecx
+; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl 48(%ebp), %eax
+; WIN32-NEXT: adcl $-1, %eax
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl 52(%ebp), %ecx
+; WIN32-NEXT: adcl $-1, %ecx
+; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: .p2align 4
+; WIN32-NEXT: LBB1_53: # %udiv-do-while
+; WIN32-NEXT: # =>This Inner Loop Header: Depth=1
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: shldl $1, %eax, %edi
+; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; WIN32-NEXT: shldl $1, %esi, %eax
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: shldl $1, %eax, %esi
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; WIN32-NEXT: shldl $1, %ebx, %eax
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; WIN32-NEXT: shldl $1, %edi, %ebx
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; WIN32-NEXT: orl %edx, %ebx
+; WIN32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: shldl $1, %ecx, %edi
+; WIN32-NEXT: orl %edx, %edi
+; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; WIN32-NEXT: shldl $1, %edi, %ecx
+; WIN32-NEXT: orl %edx, %ecx
+; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: addl %edi, %edi
+; WIN32-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: cmpl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: sbbl %esi, %ecx
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; WIN32-NEXT: sbbl %edx, %ecx
+; WIN32-NEXT: sarl $31, %ecx
+; WIN32-NEXT: movl %ecx, %edi
+; WIN32-NEXT: andl $1, %edi
+; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl %ecx, %edi
+; WIN32-NEXT: andl 52(%ebp), %edi
+; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl %ecx, %edi
+; WIN32-NEXT: andl 48(%ebp), %edi
+; WIN32-NEXT: movl %ecx, %ebx
+; WIN32-NEXT: andl 44(%ebp), %ebx
+; WIN32-NEXT: andl 40(%ebp), %ecx
+; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: subl %ecx, %eax
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: sbbl %ebx, %esi
+; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: sbbl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; WIN32-NEXT: movl %edx, %edi
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; WIN32-NEXT: addl $-1, %esi
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: adcl $-1, %ecx
+; WIN32-NEXT: adcl $-1, %eax
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; WIN32-NEXT: adcl $-1, %ebx
+; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: orl %ebx, %ecx
+; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: orl %eax, %esi
+; WIN32-NEXT: orl %ecx, %esi
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: jne LBB1_53
+; WIN32-NEXT: LBB1_54: # %udiv-loop-exit
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: shldl $1, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: shldl $1, %ecx, %eax
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: shldl $1, %eax, %ecx
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; WIN32-NEXT: leal (%edx,%eax,2), %eax
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; WIN32-NEXT: movl %ecx, %eax
+; WIN32-NEXT: LBB1_58: # %udiv-end
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: movl 8(%ebp), %ecx
+; WIN32-NEXT: movl %eax, (%ecx)
+; WIN32-NEXT: movl %edx, 4(%ecx)
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: movl %eax, 8(%ecx)
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: movl %eax, 12(%ecx)
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: movl %ecx, %esi
+; WIN32-NEXT: movl 44(%ebp), %eax
+; WIN32-NEXT: imull %eax, %esi
+; WIN32-NEXT: movl %ecx, %eax
+; WIN32-NEXT: movl 40(%ebp), %edi
+; WIN32-NEXT: mull %edi
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: addl %esi, %edx
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; WIN32-NEXT: imull %edi, %esi
+; WIN32-NEXT: addl %edx, %esi
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: movl %ecx, %eax
+; WIN32-NEXT: movl 48(%ebp), %ebx
+; WIN32-NEXT: mull %ebx
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl 52(%ebp), %eax
+; WIN32-NEXT: imull %ecx, %eax
+; WIN32-NEXT: addl %edx, %eax
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; WIN32-NEXT: imull %edi, %ebx
+; WIN32-NEXT: addl %eax, %ebx
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: adcl %esi, %ebx
+; WIN32-NEXT: movl 40(%ebp), %esi
+; WIN32-NEXT: movl %esi, %eax
+; WIN32-NEXT: mull %ecx
+; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl 44(%ebp), %eax
+; WIN32-NEXT: mull %ecx
+; WIN32-NEXT: movl %eax, %ecx
+; WIN32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; WIN32-NEXT: adcl $0, %edx
+; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: movl %esi, %eax
+; WIN32-NEXT: mull %edi
+; WIN32-NEXT: movl %edx, %esi
+; WIN32-NEXT: addl %ecx, %eax
+; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; WIN32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; WIN32-NEXT: setb %cl
+; WIN32-NEXT: movl 44(%ebp), %eax
+; WIN32-NEXT: mull %edi
+; WIN32-NEXT: addl %esi, %eax
+; WIN32-NEXT: movzbl %cl, %ecx
+; WIN32-NEXT: adcl %ecx, %edx
+; WIN32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; WIN32-NEXT: adcl %ebx, %edx
+; WIN32-NEXT: movl 24(%ebp), %ebx
+; WIN32-NEXT: subl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; WIN32-NEXT: movl 28(%ebp), %ecx
+; WIN32-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; WIN32-NEXT: movl 32(%ebp), %esi
+; WIN32-NEXT: sbbl %eax, %esi
+; WIN32-NEXT: movl 36(%ebp), %edi
+; WIN32-NEXT: sbbl %edx, %edi
+; WIN32-NEXT: movl 12(%ebp), %eax
+; WIN32-NEXT: movl %ebx, (%eax)
+; WIN32-NEXT: movl %ecx, 4(%eax)
+; WIN32-NEXT: movl %esi, 8(%eax)
+; WIN32-NEXT: movl %edi, 12(%eax)
+; WIN32-NEXT: leal -12(%ebp), %esp
+; WIN32-NEXT: popl %esi
+; WIN32-NEXT: popl %edi
+; WIN32-NEXT: popl %ebx
+; WIN32-NEXT: popl %ebp
+; WIN32-NEXT: retl
+; WIN32-NEXT: LBB1_17:
+; WIN32-NEXT: movl %edi, %ecx
+; WIN32-NEXT: movb $1, %al
+; WIN32-NEXT: jmp LBB1_19
+; WIN32-NEXT: LBB1_27:
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: jmp LBB1_25
+; WIN32-NEXT: LBB1_46:
+; WIN32-NEXT: movb $1, %al
+; WIN32-NEXT: jmp LBB1_48
+; WIN32-NEXT: LBB1_56:
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; WIN32-NEXT: jmp LBB1_54
+; WIN32-NEXT: LBB1_22:
+; WIN32-NEXT: movl 36(%ebp), %ecx
+; WIN32-NEXT: jmp LBB1_29
+; WIN32-NEXT: LBB1_51:
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; WIN32-NEXT: jmp LBB1_58
+ %q = udiv i128 %n, %d
+ %r = urem i128 %n, %d
+ store i128 %q, ptr %q_out
+ store i128 %r, ptr %r_out
+ ret void
+}
>From db11c0581abb19b663b34f8a5259b4e71fa7bd15 Mon Sep 17 00:00:00 2001
From: Takashiidobe <idobetakashi at gmail.com>
Date: Sat, 4 Apr 2026 18:45:21 -0400
Subject: [PATCH 14/15] shorten tests by not autogenerating them
---
llvm/test/CodeGen/ARM/i128-divrem-libcall.ll | 2409 +-------
.../test/CodeGen/RISCV/i128-divrem-libcall.ll | 5022 +----------------
llvm/test/CodeGen/X86/i128-divrem-libcall.ll | 3449 +----------
3 files changed, 158 insertions(+), 10722 deletions(-)
diff --git a/llvm/test/CodeGen/ARM/i128-divrem-libcall.ll b/llvm/test/CodeGen/ARM/i128-divrem-libcall.ll
index af4fee06c24f6..0942251e40b54 100644
--- a/llvm/test/CodeGen/ARM/i128-divrem-libcall.ll
+++ b/llvm/test/CodeGen/ARM/i128-divrem-libcall.ll
@@ -1,1280 +1,23 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
; RUN: llc < %s -mtriple=armv6-linux-gnueabihf | FileCheck %s --check-prefix=ARMV6
; RUN: llc < %s -mtriple=armv7-linux-gnueabi | FileCheck %s --check-prefix=ARMV7
; RUN: llc < %s -mtriple=armv7-none-eabi | FileCheck %s --check-prefix=ARMV7
+; These 32-bit ARM triples inline-expand i128 div/rem. Keep the checks minimal:
+; verify that no libcalls are emitted and that the function reaches its
+; epilogue.
+
define void @sdivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
; ARMV6-LABEL: sdivrem_i128:
-; ARMV6: @ %bb.0: @ %_udiv-special-cases_udiv-special-cases
-; ARMV6-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
-; ARMV6-NEXT: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
-; ARMV6-NEXT: .pad #252
-; ARMV6-NEXT: sub sp, sp, #252
-; ARMV6-NEXT: ldr r10, [sp, #292]
-; ARMV6-NEXT: str r1, [sp, #44] @ 4-byte Spill
-; ARMV6-NEXT: str r0, [sp, #40] @ 4-byte Spill
-; ARMV6-NEXT: ldr r0, [sp, #288]
-; ARMV6-NEXT: eor r1, r3, r10, asr #31
-; ARMV6-NEXT: eor r3, r2, r10, asr #31
-; ARMV6-NEXT: eor r2, r10, r10, asr #31
-; ARMV6-NEXT: subs r8, r3, r10, asr #31
-; ARMV6-NEXT: eor r0, r0, r10, asr #31
-; ARMV6-NEXT: sbcs r9, r1, r10, asr #31
-; ARMV6-NEXT: str r0, [sp, #36] @ 4-byte Spill
-; ARMV6-NEXT: sbcs lr, r0, r10, asr #31
-; ARMV6-NEXT: str r1, [sp, #32] @ 4-byte Spill
-; ARMV6-NEXT: sbc r11, r2, r10, asr #31
-; ARMV6-NEXT: clz r0, lr
-; ARMV6-NEXT: clz r1, r8
-; ARMV6-NEXT: add r0, r0, #32
-; ARMV6-NEXT: cmp r11, #0
-; ARMV6-NEXT: add r1, r1, #32
-; ARMV6-NEXT: clzne r0, r11
-; ARMV6-NEXT: cmp r9, #0
-; ARMV6-NEXT: str r2, [sp, #28] @ 4-byte Spill
-; ARMV6-NEXT: clzne r1, r9
-; ARMV6-NEXT: orrs r2, lr, r11
-; ARMV6-NEXT: ldr r6, [sp, #308]
-; ARMV6-NEXT: addeq r0, r1, #64
-; ARMV6-NEXT: ldr r1, [sp, #304]
-; ARMV6-NEXT: str r3, [sp, #24] @ 4-byte Spill
-; ARMV6-NEXT: eor r2, r1, r6, asr #31
-; ARMV6-NEXT: ldr r1, [sp, #300]
-; ARMV6-NEXT: str r2, [sp, #20] @ 4-byte Spill
-; ARMV6-NEXT: eor r3, r1, r6, asr #31
-; ARMV6-NEXT: ldr r1, [sp, #296]
-; ARMV6-NEXT: str r3, [sp, #52] @ 4-byte Spill
-; ARMV6-NEXT: eor r7, r1, r6, asr #31
-; ARMV6-NEXT: str r7, [sp, #48] @ 4-byte Spill
-; ARMV6-NEXT: subs r7, r7, r6, asr #31
-; ARMV6-NEXT: eor r1, r6, r6, asr #31
-; ARMV6-NEXT: sbcs r5, r3, r6, asr #31
-; ARMV6-NEXT: str r1, [sp, #16] @ 4-byte Spill
-; ARMV6-NEXT: sbcs r4, r2, r6, asr #31
-; ARMV6-NEXT: clz r2, r7
-; ARMV6-NEXT: sbc r12, r1, r6, asr #31
-; ARMV6-NEXT: clz r1, r4
-; ARMV6-NEXT: add r1, r1, #32
-; ARMV6-NEXT: cmp r12, #0
-; ARMV6-NEXT: clzne r1, r12
-; ARMV6-NEXT: add r2, r2, #32
-; ARMV6-NEXT: cmp r5, #0
-; ARMV6-NEXT: str r5, [sp, #80] @ 4-byte Spill
-; ARMV6-NEXT: clzne r2, r5
-; ARMV6-NEXT: orrs r3, r4, r12
-; ARMV6-NEXT: addeq r1, r2, #64
-; ARMV6-NEXT: mov r2, #0
-; ARMV6-NEXT: subs r0, r1, r0
-; ARMV6-NEXT: str r0, [sp, #104] @ 4-byte Spill
-; ARMV6-NEXT: sbcs r1, r2, #0
-; ARMV6-NEXT: str r1, [sp, #100] @ 4-byte Spill
-; ARMV6-NEXT: sbcs r3, r2, #0
-; ARMV6-NEXT: str r3, [sp, #112] @ 4-byte Spill
-; ARMV6-NEXT: sbc r2, r2, #0
-; ARMV6-NEXT: rsbs r0, r0, #127
-; ARMV6-NEXT: rscs r0, r1, #0
-; ARMV6-NEXT: str r2, [sp, #108] @ 4-byte Spill
-; ARMV6-NEXT: rscs r0, r3, #0
-; ARMV6-NEXT: orr r1, r9, r11
-; ARMV6-NEXT: rscs r0, r2, #0
-; ARMV6-NEXT: orr r2, r8, lr
-; ARMV6-NEXT: orr r1, r2, r1
-; ARMV6-NEXT: orr r2, r5, r12
-; ARMV6-NEXT: orr r3, r7, r4
-; ARMV6-NEXT: clz r1, r1
-; ARMV6-NEXT: orr r2, r3, r2
-; ARMV6-NEXT: mov r0, #0
-; ARMV6-NEXT: clz r2, r2
-; ARMV6-NEXT: lsr r1, r1, #5
-; ARMV6-NEXT: movlo r0, #1
-; ARMV6-NEXT: str r4, [sp, #76] @ 4-byte Spill
-; ARMV6-NEXT: lsr r2, r2, #5
-; ARMV6-NEXT: orr r1, r2, r1
-; ARMV6-NEXT: orrs r0, r1, r0
-; ARMV6-NEXT: mov r3, r11
-; ARMV6-NEXT: mov r4, lr
-; ARMV6-NEXT: mov r5, r9
-; ARMV6-NEXT: mov r1, r8
-; ARMV6-NEXT: movne r3, #0
-; ARMV6-NEXT: movne r4, #0
-; ARMV6-NEXT: movne r5, #0
-; ARMV6-NEXT: movne r8, #0
-; ARMV6-NEXT: str r7, [sp, #84] @ 4-byte Spill
-; ARMV6-NEXT: asr r7, r6, #31
-; ARMV6-NEXT: eor r0, r7, r10, asr #31
-; ARMV6-NEXT: str r12, [sp, #72] @ 4-byte Spill
-; ARMV6-NEXT: str r0, [sp, #12] @ 4-byte Spill
-; ARMV6-NEXT: bne .LBB0_6
-; ARMV6-NEXT: @ %bb.1: @ %_udiv-special-cases_udiv-special-cases
-; ARMV6-NEXT: ldr r0, [sp, #104] @ 4-byte Reload
-; ARMV6-NEXT: ldr r10, [sp, #100] @ 4-byte Reload
-; ARMV6-NEXT: eor r7, r0, #127
-; ARMV6-NEXT: ldr r0, [sp, #112] @ 4-byte Reload
-; ARMV6-NEXT: orr r7, r7, r0
-; ARMV6-NEXT: ldr r0, [sp, #108] @ 4-byte Reload
-; ARMV6-NEXT: orr r6, r10, r0
-; ARMV6-NEXT: ldr r0, [sp, #52] @ 4-byte Reload
-; ARMV6-NEXT: orrs r7, r7, r6
-; ARMV6-NEXT: ldr r6, [sp, #48] @ 4-byte Reload
-; ARMV6-NEXT: beq .LBB0_7
-; ARMV6-NEXT: @ %bb.2: @ %udiv-bb15
-; ARMV6-NEXT: mov r5, #0
-; ARMV6-NEXT: mov r2, r9
-; ARMV6-NEXT: str r1, [sp, #232]
-; ARMV6-NEXT: add r3, sp, #216
-; ARMV6-NEXT: str r5, [sp, #228]
-; ARMV6-NEXT: mov r9, r1
-; ARMV6-NEXT: str r5, [sp, #224]
-; ARMV6-NEXT: mov r1, #12
-; ARMV6-NEXT: str r5, [sp, #220]
-; ARMV6-NEXT: mov r8, r2
-; ARMV6-NEXT: str r5, [sp, #216]
-; ARMV6-NEXT: add r3, r3, #16
-; ARMV6-NEXT: str r2, [sp, #236]
-; ARMV6-NEXT: str lr, [sp, #240]
-; ARMV6-NEXT: str r11, [sp, #244]
-; ARMV6-NEXT: ldr r4, [sp, #104] @ 4-byte Reload
-; ARMV6-NEXT: rsb r0, r4, #127
-; ARMV6-NEXT: and r2, r1, r0, lsr #3
-; ARMV6-NEXT: and r0, r0, #31
-; ARMV6-NEXT: eor r12, r0, #31
-; ARMV6-NEXT: ldr r2, [r3, -r2]!
-; ARMV6-NEXT: ldr r6, [r3, #8]
-; ARMV6-NEXT: ldr r7, [r3, #4]
-; ARMV6-NEXT: ldr r3, [r3, #12]
-; ARMV6-NEXT: lsr r1, r6, #1
-; ARMV6-NEXT: lsl r3, r3, r0
-; ARMV6-NEXT: orr r1, r3, r1, lsr r12
-; ARMV6-NEXT: str r1, [sp, #116] @ 4-byte Spill
-; ARMV6-NEXT: lsl r1, r6, r0
-; ARMV6-NEXT: lsrs r3, r7, #1
-; ARMV6-NEXT: lsr r6, r2, #1
-; ARMV6-NEXT: orr r1, r1, r3, lsr r12
-; ARMV6-NEXT: str r1, [sp, #92] @ 4-byte Spill
-; ARMV6-NEXT: lsl r1, r7, r0
-; ARMV6-NEXT: lsl r0, r2, r0
-; ARMV6-NEXT: adds r7, r4, #1
-; ARMV6-NEXT: str r0, [sp, #96] @ 4-byte Spill
-; ARMV6-NEXT: adcs r0, r10, #0
-; ARMV6-NEXT: str r0, [sp, #100] @ 4-byte Spill
-; ARMV6-NEXT: ldr r0, [sp, #112] @ 4-byte Reload
-; ARMV6-NEXT: orr r1, r1, r6, lsr r12
-; ARMV6-NEXT: str r1, [sp, #88] @ 4-byte Spill
-; ARMV6-NEXT: adcs r12, r0, #0
-; ARMV6-NEXT: ldr r0, [sp, #108] @ 4-byte Reload
-; ARMV6-NEXT: adcs r0, r0, #0
-; ARMV6-NEXT: str r0, [sp, #104] @ 4-byte Spill
-; ARMV6-NEXT: adcs r0, r5, #0
-; ARMV6-NEXT: bne .LBB0_14
-; ARMV6-NEXT: @ %bb.3: @ %udiv-preheader4
-; ARMV6-NEXT: str r9, [sp, #184]
-; ARMV6-NEXT: mov r0, #12
-; ARMV6-NEXT: and r1, r0, r7, lsr #3
-; ARMV6-NEXT: add r9, sp, #184
-; ARMV6-NEXT: add r0, r9, r1
-; ARMV6-NEXT: str r5, [sp, #212]
-; ARMV6-NEXT: str r5, [sp, #208]
-; ARMV6-NEXT: and r4, r7, #31
-; ARMV6-NEXT: str r11, [sp, #196]
-; ARMV6-NEXT: eor r11, r4, #31
-; ARMV6-NEXT: str r5, [sp, #204]
-; ARMV6-NEXT: mov r6, #0
-; ARMV6-NEXT: str r5, [sp, #200]
-; ARMV6-NEXT: str r8, [sp, #188]
-; ARMV6-NEXT: str lr, [sp, #192]
-; ARMV6-NEXT: ldr r5, [r0, #4]
-; ARMV6-NEXT: ldr r2, [r0, #8]
-; ARMV6-NEXT: ldr r3, [r0, #12]
-; ARMV6-NEXT: str r7, [sp, #112] @ 4-byte Spill
-; ARMV6-NEXT: ldr r1, [r9, r1]
-; ARMV6-NEXT: lsr r0, r2, r4
-; ARMV6-NEXT: lsl r7, r3, #1
-; ARMV6-NEXT: orr r0, r0, r7, lsl r11
-; ARMV6-NEXT: lsr r7, r5, r4
-; ARMV6-NEXT: lsl r5, r5, #1
-; ARMV6-NEXT: lsl r2, r2, #1
-; ARMV6-NEXT: lsr r1, r1, r4
-; ARMV6-NEXT: orr r8, r1, r5, lsl r11
-; ARMV6-NEXT: ldr r1, [sp, #84] @ 4-byte Reload
-; ARMV6-NEXT: orr r2, r7, r2, lsl r11
-; ARMV6-NEXT: ldr r11, [sp, #116] @ 4-byte Reload
-; ARMV6-NEXT: subs r1, r1, #1
-; ARMV6-NEXT: str r1, [sp, #68] @ 4-byte Spill
-; ARMV6-NEXT: ldr r1, [sp, #80] @ 4-byte Reload
-; ARMV6-NEXT: ldr lr, [sp, #100] @ 4-byte Reload
-; ARMV6-NEXT: sbcs r1, r1, #0
-; ARMV6-NEXT: str r1, [sp, #64] @ 4-byte Spill
-; ARMV6-NEXT: ldr r1, [sp, #76] @ 4-byte Reload
-; ARMV6-NEXT: ldr r7, [sp, #96] @ 4-byte Reload
-; ARMV6-NEXT: sbcs r1, r1, #0
-; ARMV6-NEXT: str r1, [sp, #60] @ 4-byte Spill
-; ARMV6-NEXT: ldr r1, [sp, #72] @ 4-byte Reload
-; ARMV6-NEXT: ldr r10, [sp, #92] @ 4-byte Reload
-; ARMV6-NEXT: sbc r1, r1, #0
-; ARMV6-NEXT: str r1, [sp, #56] @ 4-byte Spill
-; ARMV6-NEXT: lsr r1, r3, r4
-; ARMV6-NEXT: ldr r4, [sp, #104] @ 4-byte Reload
-; ARMV6-NEXT: ldr r9, [sp, #88] @ 4-byte Reload
-; ARMV6-NEXT: str r1, [sp, #108] @ 4-byte Spill
-; ARMV6-NEXT: mov r1, #0
-; ARMV6-NEXT: .LBB0_4: @ %udiv-do-while3
-; ARMV6-NEXT: @ =>This Inner Loop Header: Depth=1
-; ARMV6-NEXT: mov r5, r1
-; ARMV6-NEXT: ldr r1, [sp, #108] @ 4-byte Reload
-; ARMV6-NEXT: add r3, sp, #92
-; ARMV6-NEXT: str r11, [sp, #116] @ 4-byte Spill
-; ARMV6-NEXT: stm r3, {r6, r7, r9, r10} @ 16-byte Folded Spill
-; ARMV6-NEXT: lsl r7, r8, #1
-; ARMV6-NEXT: lsl r1, r1, #1
-; ARMV6-NEXT: ldr r3, [sp, #68] @ 4-byte Reload
-; ARMV6-NEXT: orr r7, r7, r11, lsr #31
-; ARMV6-NEXT: orr r1, r1, r0, lsr #31
-; ARMV6-NEXT: lsl r0, r0, #1
-; ARMV6-NEXT: subs r3, r3, r7
-; ARMV6-NEXT: orr r0, r0, r2, lsr #31
-; ARMV6-NEXT: lsl r2, r2, #1
-; ARMV6-NEXT: ldr r3, [sp, #64] @ 4-byte Reload
-; ARMV6-NEXT: orr r2, r2, r8, lsr #31
-; ARMV6-NEXT: ldr r6, [sp, #72] @ 4-byte Reload
-; ARMV6-NEXT: sbcs r3, r3, r2
-; ARMV6-NEXT: ldr r3, [sp, #60] @ 4-byte Reload
-; ARMV6-NEXT: sbcs r3, r3, r0
-; ARMV6-NEXT: ldr r3, [sp, #56] @ 4-byte Reload
-; ARMV6-NEXT: sbc r3, r3, r1
-; ARMV6-NEXT: and r10, r6, r3, asr #31
-; ARMV6-NEXT: ldr r6, [sp, #76] @ 4-byte Reload
-; ARMV6-NEXT: and r8, r6, r3, asr #31
-; ARMV6-NEXT: ldr r6, [sp, #80] @ 4-byte Reload
-; ARMV6-NEXT: and r9, r6, r3, asr #31
-; ARMV6-NEXT: ldr r6, [sp, #84] @ 4-byte Reload
-; ARMV6-NEXT: and r11, r6, r3, asr #31
-; ARMV6-NEXT: subs r6, r7, r11
-; ARMV6-NEXT: str r6, [sp, #88] @ 4-byte Spill
-; ARMV6-NEXT: sbcs r2, r2, r9
-; ARMV6-NEXT: sbcs r11, r0, r8
-; ARMV6-NEXT: ldr r8, [sp, #88] @ 4-byte Reload
-; ARMV6-NEXT: sbc r0, r1, r10
-; ARMV6-NEXT: mov r1, #1
-; ARMV6-NEXT: and r6, r1, r3, asr #31
-; ARMV6-NEXT: ldr r1, [sp, #112] @ 4-byte Reload
-; ARMV6-NEXT: str r0, [sp, #108] @ 4-byte Spill
-; ARMV6-NEXT: subs r1, r1, #1
-; ARMV6-NEXT: str r1, [sp, #112] @ 4-byte Spill
-; ARMV6-NEXT: sbcs lr, lr, #0
-; ARMV6-NEXT: ldr r0, [sp, #96] @ 4-byte Reload
-; ARMV6-NEXT: sbcs r12, r12, #0
-; ARMV6-NEXT: sbc r4, r4, #0
-; ARMV6-NEXT: orr r1, r1, r12
-; ARMV6-NEXT: orr r3, lr, r4
-; ARMV6-NEXT: orrs r1, r1, r3
-; ARMV6-NEXT: ldr r3, [sp, #100] @ 4-byte Reload
-; ARMV6-NEXT: ldr r1, [sp, #92] @ 4-byte Reload
-; ARMV6-NEXT: orr r7, r1, r0, lsl #1
-; ARMV6-NEXT: lsl r1, r3, #1
-; ARMV6-NEXT: orr r1, r1, r0, lsr #31
-; ARMV6-NEXT: ldr r0, [sp, #104] @ 4-byte Reload
-; ARMV6-NEXT: orr r9, r5, r1
-; ARMV6-NEXT: lsl r1, r0, #1
-; ARMV6-NEXT: orr r1, r1, r3, lsr #31
-; ARMV6-NEXT: orr r10, r5, r1
-; ARMV6-NEXT: ldr r1, [sp, #116] @ 4-byte Reload
-; ARMV6-NEXT: lsl r1, r1, #1
-; ARMV6-NEXT: orr r1, r1, r0, lsr #31
-; ARMV6-NEXT: mov r0, r11
-; ARMV6-NEXT: orr r11, r5, r1
-; ARMV6-NEXT: mov r1, #0
-; ARMV6-NEXT: bne .LBB0_4
-; ARMV6-NEXT: .LBB0_5: @ %udiv-loop-exit2
-; ARMV6-NEXT: lsl r1, r9, #1
-; ARMV6-NEXT: orr r5, r1, r7, lsr #31
-; ARMV6-NEXT: lsl r1, r10, #1
-; ARMV6-NEXT: orr r4, r1, r9, lsr #31
-; ARMV6-NEXT: lsl r1, r11, #1
-; ARMV6-NEXT: orr r8, r6, r7, lsl #1
-; ARMV6-NEXT: orr r3, r1, r10, lsr #31
-; ARMV6-NEXT: .LBB0_6:
-; ARMV6-NEXT: ldr r0, [sp, #52] @ 4-byte Reload
-; ARMV6-NEXT: ldr r6, [sp, #48] @ 4-byte Reload
-; ARMV6-NEXT: .LBB0_7: @ %udiv-end1
-; ARMV6-NEXT: ldr r2, [sp, #292]
-; ARMV6-NEXT: ldr r1, [sp, #24] @ 4-byte Reload
-; ARMV6-NEXT: str r5, [sp, #76] @ 4-byte Spill
-; ARMV6-NEXT: subs r10, r1, r2, asr #31
-; ARMV6-NEXT: ldr r1, [sp, #32] @ 4-byte Reload
-; ARMV6-NEXT: str r3, [sp, #96] @ 4-byte Spill
-; ARMV6-NEXT: sbcs r11, r1, r2, asr #31
-; ARMV6-NEXT: ldr r1, [sp, #36] @ 4-byte Reload
-; ARMV6-NEXT: str r8, [sp, #116] @ 4-byte Spill
-; ARMV6-NEXT: sbcs r5, r1, r2, asr #31
-; ARMV6-NEXT: ldr r1, [sp, #28] @ 4-byte Reload
-; ARMV6-NEXT: str r4, [sp, #100] @ 4-byte Spill
-; ARMV6-NEXT: sbc r1, r1, r2, asr #31
-; ARMV6-NEXT: clz r2, r5
-; ARMV6-NEXT: add r7, r2, #32
-; ARMV6-NEXT: clz r2, r10
-; ARMV6-NEXT: cmp r1, #0
-; ARMV6-NEXT: add r2, r2, #32
-; ARMV6-NEXT: clzne r7, r1
-; ARMV6-NEXT: cmp r11, #0
-; ARMV6-NEXT: clzne r2, r11
-; ARMV6-NEXT: orrs r3, r5, r1
-; ARMV6-NEXT: addeq r7, r2, #64
-; ARMV6-NEXT: ldr r2, [sp, #308]
-; ARMV6-NEXT: ldr r3, [sp, #20] @ 4-byte Reload
-; ARMV6-NEXT: str r11, [sp, #60] @ 4-byte Spill
-; ARMV6-NEXT: subs lr, r6, r2, asr #31
-; ARMV6-NEXT: str r5, [sp, #52] @ 4-byte Spill
-; ARMV6-NEXT: sbcs r9, r0, r2, asr #31
-; ARMV6-NEXT: ldr r0, [sp, #16] @ 4-byte Reload
-; ARMV6-NEXT: sbcs r8, r3, r2, asr #31
-; ARMV6-NEXT: clz r3, lr
-; ARMV6-NEXT: sbc r0, r0, r2, asr #31
-; ARMV6-NEXT: clz r2, r8
-; ARMV6-NEXT: add r2, r2, #32
-; ARMV6-NEXT: cmp r0, #0
-; ARMV6-NEXT: clzne r2, r0
-; ARMV6-NEXT: add r3, r3, #32
-; ARMV6-NEXT: cmp r9, #0
-; ARMV6-NEXT: str r0, [sp, #80] @ 4-byte Spill
-; ARMV6-NEXT: clzne r3, r9
-; ARMV6-NEXT: orrs r6, r8, r0
-; ARMV6-NEXT: addeq r2, r3, #64
-; ARMV6-NEXT: mov r6, #0
-; ARMV6-NEXT: subs r2, r2, r7
-; ARMV6-NEXT: str r9, [sp, #88] @ 4-byte Spill
-; ARMV6-NEXT: sbcs r4, r6, #0
-; ARMV6-NEXT: str r4, [sp, #108] @ 4-byte Spill
-; ARMV6-NEXT: sbcs r7, r6, #0
-; ARMV6-NEXT: mov r12, r2
-; ARMV6-NEXT: sbc r3, r6, #0
-; ARMV6-NEXT: rsbs r2, r2, #127
-; ARMV6-NEXT: rscs r2, r4, #0
-; ARMV6-NEXT: str r3, [sp, #112] @ 4-byte Spill
-; ARMV6-NEXT: rscs r2, r7, #0
-; ARMV6-NEXT: orr r4, lr, r8
-; ARMV6-NEXT: rscs r2, r3, #0
-; ARMV6-NEXT: orr r3, r10, r5
-; ARMV6-NEXT: orr r2, r11, r1
-; ARMV6-NEXT: str r7, [sp, #104] @ 4-byte Spill
-; ARMV6-NEXT: orr r2, r3, r2
-; ARMV6-NEXT: orr r3, r9, r0
-; ARMV6-NEXT: orr r3, r4, r3
-; ARMV6-NEXT: clz r2, r2
-; ARMV6-NEXT: clz r3, r3
-; ARMV6-NEXT: ldr r7, [sp, #12] @ 4-byte Reload
-; ARMV6-NEXT: ldr r4, [sp, #116] @ 4-byte Reload
-; ARMV6-NEXT: lsr r2, r2, #5
-; ARMV6-NEXT: lsr r3, r3, #5
-; ARMV6-NEXT: ldr r0, [sp, #96] @ 4-byte Reload
-; ARMV6-NEXT: orr r2, r3, r2
-; ARMV6-NEXT: ldr r3, [sp, #76] @ 4-byte Reload
-; ARMV6-NEXT: movlo r6, #1
-; ARMV6-NEXT: eor r4, r4, r7
-; ARMV6-NEXT: orr r2, r2, r6
-; ARMV6-NEXT: eor r6, r0, r7
-; ARMV6-NEXT: ldr r0, [sp, #100] @ 4-byte Reload
-; ARMV6-NEXT: subs r4, r4, r7
-; ARMV6-NEXT: eor r3, r3, r7
-; ARMV6-NEXT: mov r9, r10
-; ARMV6-NEXT: sbcs r3, r3, r7
-; ARMV6-NEXT: eor r0, r0, r7
-; ARMV6-NEXT: sbcs r0, r0, r7
-; ARMV6-NEXT: str r0, [sp, #28] @ 4-byte Spill
-; ARMV6-NEXT: sbc r0, r6, r7
-; ARMV6-NEXT: str r0, [sp, #24] @ 4-byte Spill
-; ARMV6-NEXT: ldr r0, [sp, #292]
-; ARMV6-NEXT: cmp r2, #0
-; ARMV6-NEXT: str r4, [sp, #36] @ 4-byte Spill
-; ARMV6-NEXT: mov r4, r11
-; ARMV6-NEXT: mov r10, r1
-; ARMV6-NEXT: mov r11, r5
-; ARMV6-NEXT: mov r6, r9
-; ARMV6-NEXT: str r3, [sp, #32] @ 4-byte Spill
-; ARMV6-NEXT: mov r3, r4
-; ARMV6-NEXT: asr r0, r0, #31
-; ARMV6-NEXT: movne r10, #0
-; ARMV6-NEXT: movne r11, #0
-; ARMV6-NEXT: movne r3, #0
-; ARMV6-NEXT: movne r6, #0
-; ARMV6-NEXT: str r1, [sp, #56] @ 4-byte Spill
-; ARMV6-NEXT: str r8, [sp, #84] @ 4-byte Spill
-; ARMV6-NEXT: str r0, [sp, #48] @ 4-byte Spill
-; ARMV6-NEXT: str lr, [sp, #92] @ 4-byte Spill
-; ARMV6-NEXT: bne .LBB0_13
-; ARMV6-NEXT: @ %bb.8: @ %udiv-end1
-; ARMV6-NEXT: ldr r0, [sp, #104] @ 4-byte Reload
-; ARMV6-NEXT: eor r1, r12, #127
-; ARMV6-NEXT: ldr r2, [sp, #108] @ 4-byte Reload
-; ARMV6-NEXT: orr r1, r1, r0
-; ARMV6-NEXT: ldr r0, [sp, #112] @ 4-byte Reload
-; ARMV6-NEXT: orr r5, r2, r0
-; ARMV6-NEXT: orrs r1, r1, r5
-; ARMV6-NEXT: beq .LBB0_13
-; ARMV6-NEXT: @ %bb.9: @ %udiv-bb1
-; ARMV6-NEXT: mov r10, #0
-; ARMV6-NEXT: str r9, [sp, #168]
-; ARMV6-NEXT: str r10, [sp, #164]
-; ARMV6-NEXT: add r3, sp, #152
-; ARMV6-NEXT: str r10, [sp, #160]
-; ARMV6-NEXT: mov r1, #12
-; ARMV6-NEXT: str r10, [sp, #156]
-; ARMV6-NEXT: add r3, r3, #16
-; ARMV6-NEXT: str r10, [sp, #152]
-; ARMV6-NEXT: ldr r0, [sp, #60] @ 4-byte Reload
-; ARMV6-NEXT: str r0, [sp, #172]
-; ARMV6-NEXT: ldr r8, [sp, #52] @ 4-byte Reload
-; ARMV6-NEXT: str r8, [sp, #176]
-; ARMV6-NEXT: ldr r0, [sp, #56] @ 4-byte Reload
-; ARMV6-NEXT: str r0, [sp, #180]
-; ARMV6-NEXT: rsb r0, r12, #127
-; ARMV6-NEXT: and r1, r1, r0, lsr #3
-; ARMV6-NEXT: and r0, r0, #31
-; ARMV6-NEXT: eor r2, r0, #31
-; ARMV6-NEXT: ldr r6, [r3, -r1]!
-; ARMV6-NEXT: ldmib r3, {r1, r5}
-; ARMV6-NEXT: lsr r4, r5, #1
-; ARMV6-NEXT: lsl r7, r6, r0
-; ARMV6-NEXT: ldr r3, [r3, #12]
-; ARMV6-NEXT: str r9, [sp, #8] @ 4-byte Spill
-; ARMV6-NEXT: lsl r3, r3, r0
-; ARMV6-NEXT: orr r3, r3, r4, lsr r2
-; ARMV6-NEXT: str r3, [sp, #116] @ 4-byte Spill
-; ARMV6-NEXT: lsl r3, r5, r0
-; ARMV6-NEXT: lsrs r5, r1, #1
-; ARMV6-NEXT: orr r4, r3, r5, lsr r2
-; ARMV6-NEXT: lsl r1, r1, r0
-; ARMV6-NEXT: lsr r3, r6, #1
-; ARMV6-NEXT: adds r0, r12, #1
-; ARMV6-NEXT: orr r11, r1, r3, lsr r2
-; ARMV6-NEXT: ldr r1, [sp, #108] @ 4-byte Reload
-; ARMV6-NEXT: adcs r1, r1, #0
-; ARMV6-NEXT: str r1, [sp, #100] @ 4-byte Spill
-; ARMV6-NEXT: ldr r1, [sp, #104] @ 4-byte Reload
-; ARMV6-NEXT: adcs lr, r1, #0
-; ARMV6-NEXT: ldr r1, [sp, #112] @ 4-byte Reload
-; ARMV6-NEXT: adcs r1, r1, #0
-; ARMV6-NEXT: str r1, [sp, #108] @ 4-byte Spill
-; ARMV6-NEXT: adcs r2, r10, #0
-; ARMV6-NEXT: bne .LBB0_15
-; ARMV6-NEXT: @ %bb.10: @ %udiv-preheader
-; ARMV6-NEXT: str r9, [sp, #120]
-; ARMV6-NEXT: add r9, sp, #120
-; ARMV6-NEXT: str r10, [sp, #148]
-; ARMV6-NEXT: str r10, [sp, #144]
-; ARMV6-NEXT: str r10, [sp, #140]
-; ARMV6-NEXT: str r10, [sp, #136]
-; ARMV6-NEXT: ldr r1, [sp, #60] @ 4-byte Reload
-; ARMV6-NEXT: str r8, [sp, #128]
-; ARMV6-NEXT: and r8, r0, #31
-; ARMV6-NEXT: str r1, [sp, #124]
-; ARMV6-NEXT: ldr r1, [sp, #56] @ 4-byte Reload
-; ARMV6-NEXT: str r1, [sp, #132]
-; ARMV6-NEXT: mov r1, #12
-; ARMV6-NEXT: and r2, r1, r0, lsr #3
-; ARMV6-NEXT: add r5, r9, r2
-; ARMV6-NEXT: ldmib r5, {r1, r3, r10}
-; ARMV6-NEXT: lsr r5, r3, r8
-; ARMV6-NEXT: lsl r6, r10, #1
-; ARMV6-NEXT: str r0, [sp, #112] @ 4-byte Spill
-; ARMV6-NEXT: eor r0, r8, #31
-; ARMV6-NEXT: ldr r2, [r9, r2]
-; ARMV6-NEXT: lsl r3, r3, #1
-; ARMV6-NEXT: orr r6, r5, r6, lsl r0
-; ARMV6-NEXT: lsr r5, r1, r8
-; ARMV6-NEXT: orr r12, r5, r3, lsl r0
-; ARMV6-NEXT: lsl r3, r1, #1
-; ARMV6-NEXT: lsr r10, r10, r8
-; ARMV6-NEXT: lsr r2, r2, r8
-; ARMV6-NEXT: orr r5, r2, r3, lsl r0
-; ARMV6-NEXT: ldr r0, [sp, #92] @ 4-byte Reload
-; ARMV6-NEXT: mov r9, #0
-; ARMV6-NEXT: ldr r2, [sp, #80] @ 4-byte Reload
-; ARMV6-NEXT: subs r0, r0, #1
-; ARMV6-NEXT: str r0, [sp, #76] @ 4-byte Spill
-; ARMV6-NEXT: ldr r0, [sp, #88] @ 4-byte Reload
-; ARMV6-NEXT: ldr r1, [sp, #116] @ 4-byte Reload
-; ARMV6-NEXT: sbcs r0, r0, #0
-; ARMV6-NEXT: str r0, [sp, #72] @ 4-byte Spill
-; ARMV6-NEXT: ldr r0, [sp, #84] @ 4-byte Reload
-; ARMV6-NEXT: ldr r8, [sp, #100] @ 4-byte Reload
-; ARMV6-NEXT: sbcs r0, r0, #0
-; ARMV6-NEXT: str r0, [sp, #68] @ 4-byte Spill
-; ARMV6-NEXT: sbc r0, r2, #0
-; ARMV6-NEXT: str r0, [sp, #64] @ 4-byte Spill
-; ARMV6-NEXT: ldr r0, [sp, #108] @ 4-byte Reload
-; ARMV6-NEXT: mov r2, #0
-; ARMV6-NEXT: .LBB0_11: @ %udiv-do-while
-; ARMV6-NEXT: @ =>This Inner Loop Header: Depth=1
-; ARMV6-NEXT: add r3, sp, #96
-; ARMV6-NEXT: str r4, [sp, #108] @ 4-byte Spill
-; ARMV6-NEXT: stm r3, {r2, r7, r11} @ 12-byte Folded Spill
-; ARMV6-NEXT: lsl r2, r10, #1
-; ARMV6-NEXT: lsl r3, r6, #1
-; ARMV6-NEXT: orr r2, r2, r6, lsr #31
-; ARMV6-NEXT: lsl r6, r12, #1
-; ARMV6-NEXT: orr r6, r6, r5, lsr #31
-; ARMV6-NEXT: lsl r5, r5, #1
-; ARMV6-NEXT: ldr r4, [sp, #76] @ 4-byte Reload
-; ARMV6-NEXT: orr r5, r5, r1, lsr #31
-; ARMV6-NEXT: orr r3, r3, r12, lsr #31
-; ARMV6-NEXT: ldr r7, [sp, #64] @ 4-byte Reload
-; ARMV6-NEXT: subs r4, r4, r5
-; ARMV6-NEXT: str r1, [sp, #116] @ 4-byte Spill
-; ARMV6-NEXT: ldr r4, [sp, #72] @ 4-byte Reload
-; ARMV6-NEXT: ldr r1, [sp, #84] @ 4-byte Reload
-; ARMV6-NEXT: sbcs r4, r4, r6
-; ARMV6-NEXT: ldr r4, [sp, #68] @ 4-byte Reload
-; ARMV6-NEXT: sbcs r4, r4, r3
-; ARMV6-NEXT: sbc r4, r7, r2
-; ARMV6-NEXT: ldr r7, [sp, #80] @ 4-byte Reload
-; ARMV6-NEXT: and r10, r7, r4, asr #31
-; ARMV6-NEXT: and r7, r1, r4, asr #31
-; ARMV6-NEXT: ldr r1, [sp, #88] @ 4-byte Reload
-; ARMV6-NEXT: and r11, r1, r4, asr #31
-; ARMV6-NEXT: ldr r1, [sp, #92] @ 4-byte Reload
-; ARMV6-NEXT: and r12, r1, r4, asr #31
-; ARMV6-NEXT: mov r1, #1
-; ARMV6-NEXT: subs r5, r5, r12
-; ARMV6-NEXT: sbcs r12, r6, r11
-; ARMV6-NEXT: sbcs r6, r3, r7
-; ARMV6-NEXT: sbc r10, r2, r10
-; ARMV6-NEXT: and r2, r1, r4, asr #31
-; ARMV6-NEXT: ldr r1, [sp, #112] @ 4-byte Reload
-; ARMV6-NEXT: subs r1, r1, #1
-; ARMV6-NEXT: str r1, [sp, #112] @ 4-byte Spill
-; ARMV6-NEXT: sbcs r8, r8, #0
-; ARMV6-NEXT: sbcs lr, lr, #0
-; ARMV6-NEXT: sbc r0, r0, #0
-; ARMV6-NEXT: orr r4, r1, lr
-; ARMV6-NEXT: orr r3, r8, r0
-; ARMV6-NEXT: ldr r1, [sp, #100] @ 4-byte Reload
-; ARMV6-NEXT: orrs r3, r4, r3
-; ARMV6-NEXT: ldr r3, [sp, #96] @ 4-byte Reload
-; ARMV6-NEXT: orr r7, r3, r1, lsl #1
-; ARMV6-NEXT: ldr r3, [sp, #104] @ 4-byte Reload
-; ARMV6-NEXT: lsl r4, r3, #1
-; ARMV6-NEXT: orr r1, r4, r1, lsr #31
-; ARMV6-NEXT: orr r11, r9, r1
-; ARMV6-NEXT: ldr r1, [sp, #108] @ 4-byte Reload
-; ARMV6-NEXT: lsl r4, r1, #1
-; ARMV6-NEXT: orr r4, r4, r3, lsr #31
-; ARMV6-NEXT: ldr r3, [sp, #116] @ 4-byte Reload
-; ARMV6-NEXT: orr r4, r9, r4
-; ARMV6-NEXT: lsl r3, r3, #1
-; ARMV6-NEXT: orr r3, r3, r1, lsr #31
-; ARMV6-NEXT: orr r1, r9, r3
-; ARMV6-NEXT: mov r9, #0
-; ARMV6-NEXT: bne .LBB0_11
-; ARMV6-NEXT: .LBB0_12: @ %udiv-loop-exit
-; ARMV6-NEXT: lsl r0, r11, #1
-; ARMV6-NEXT: orr r3, r0, r7, lsr #31
-; ARMV6-NEXT: lsl r0, r4, #1
-; ARMV6-NEXT: ldr r9, [sp, #8] @ 4-byte Reload
-; ARMV6-NEXT: orr r11, r0, r11, lsr #31
-; ARMV6-NEXT: lsl r0, r1, #1
-; ARMV6-NEXT: orr r6, r2, r7, lsl #1
-; ARMV6-NEXT: orr r10, r0, r4, lsr #31
-; ARMV6-NEXT: .LBB0_13: @ %udiv-end
-; ARMV6-NEXT: ldr r1, [sp, #40] @ 4-byte Reload
-; ARMV6-NEXT: mov r4, #0
-; ARMV6-NEXT: ldr r0, [sp, #36] @ 4-byte Reload
-; ARMV6-NEXT: mov r12, #0
-; ARMV6-NEXT: ldr r2, [sp, #88] @ 4-byte Reload
-; ARMV6-NEXT: str r0, [r1]
-; ARMV6-NEXT: ldr r0, [sp, #32] @ 4-byte Reload
-; ARMV6-NEXT: str r0, [r1, #4]
-; ARMV6-NEXT: ldr r0, [sp, #28] @ 4-byte Reload
-; ARMV6-NEXT: str r0, [r1, #8]
-; ARMV6-NEXT: ldr r0, [sp, #24] @ 4-byte Reload
-; ARMV6-NEXT: str r0, [r1, #12]
-; ARMV6-NEXT: ldr r0, [sp, #92] @ 4-byte Reload
-; ARMV6-NEXT: ldr lr, [sp, #80] @ 4-byte Reload
-; ARMV6-NEXT: umull r1, r7, r0, r6
-; ARMV6-NEXT: umlal r7, r4, r2, r6
-; ARMV6-NEXT: str r1, [sp, #116] @ 4-byte Spill
-; ARMV6-NEXT: umull r5, r1, r0, r3
-; ARMV6-NEXT: adds r8, r5, r7
-; ARMV6-NEXT: adcs r1, r4, r1
-; ARMV6-NEXT: ldr r4, [sp, #84] @ 4-byte Reload
-; ARMV6-NEXT: adc r7, r12, #0
-; ARMV6-NEXT: umlal r1, r7, r2, r3
-; ARMV6-NEXT: umull r12, r5, r6, r4
-; ARMV6-NEXT: mla r6, r6, lr, r5
-; ARMV6-NEXT: mla r3, r3, r4, r6
-; ARMV6-NEXT: umull r6, r5, r11, r0
-; ARMV6-NEXT: mla r2, r11, r2, r5
-; ARMV6-NEXT: mla r0, r10, r0, r2
-; ARMV6-NEXT: adds r2, r6, r12
-; ARMV6-NEXT: adc r0, r0, r3
-; ARMV6-NEXT: adds r1, r1, r2
-; ARMV6-NEXT: ldr r2, [sp, #116] @ 4-byte Reload
-; ARMV6-NEXT: adc r0, r7, r0
-; ARMV6-NEXT: ldr r3, [sp, #60] @ 4-byte Reload
-; ARMV6-NEXT: subs r2, r9, r2
-; ARMV6-NEXT: ldr r7, [sp, #52] @ 4-byte Reload
-; ARMV6-NEXT: sbcs r3, r3, r8
-; ARMV6-NEXT: sbcs r1, r7, r1
-; ARMV6-NEXT: ldr r7, [sp, #56] @ 4-byte Reload
-; ARMV6-NEXT: sbc r0, r7, r0
-; ARMV6-NEXT: ldr r7, [sp, #48] @ 4-byte Reload
-; ARMV6-NEXT: eor r2, r2, r7
-; ARMV6-NEXT: eor r3, r3, r7
-; ARMV6-NEXT: subs r2, r2, r7
-; ARMV6-NEXT: eor r1, r1, r7
-; ARMV6-NEXT: sbcs r3, r3, r7
-; ARMV6-NEXT: eor r0, r0, r7
-; ARMV6-NEXT: sbcs r1, r1, r7
-; ARMV6-NEXT: sbc r0, r0, r7
-; ARMV6-NEXT: ldr r7, [sp, #44] @ 4-byte Reload
-; ARMV6-NEXT: stm r7, {r2, r3}
-; ARMV6-NEXT: str r1, [r7, #8]
-; ARMV6-NEXT: str r0, [r7, #12]
-; ARMV6-NEXT: add sp, sp, #252
-; ARMV6-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
-; ARMV6-NEXT: .LBB0_14:
-; ARMV6-NEXT: ldr r11, [sp, #116] @ 4-byte Reload
-; ARMV6-NEXT: mov r6, #0
-; ARMV6-NEXT: ldr r7, [sp, #96] @ 4-byte Reload
-; ARMV6-NEXT: ldr r10, [sp, #92] @ 4-byte Reload
-; ARMV6-NEXT: ldr r9, [sp, #88] @ 4-byte Reload
-; ARMV6-NEXT: b .LBB0_5
-; ARMV6-NEXT: .LBB0_15:
-; ARMV6-NEXT: ldr r1, [sp, #116] @ 4-byte Reload
-; ARMV6-NEXT: mov r2, #0
-; ARMV6-NEXT: b .LBB0_12
+; ARMV6-NOT: __divmodti4
+; ARMV6-NOT: __divti3
+; ARMV6-NOT: __modti3
+; ARMV6: pop{{.*}}pc}
;
; ARMV7-LABEL: sdivrem_i128:
-; ARMV7: @ %bb.0: @ %_udiv-special-cases_udiv-special-cases
-; ARMV7-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
-; ARMV7-NEXT: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
-; ARMV7-NEXT: .pad #244
-; ARMV7-NEXT: sub sp, sp, #244
-; ARMV7-NEXT: ldr r10, [sp, #284]
-; ARMV7-NEXT: str r0, [sp, #28] @ 4-byte Spill
-; ARMV7-NEXT: ldr r0, [sp, #280]
-; ARMV7-NEXT: eor r2, r2, r10, asr #31
-; ARMV7-NEXT: str r1, [sp, #32] @ 4-byte Spill
-; ARMV7-NEXT: eor r1, r3, r10, asr #31
-; ARMV7-NEXT: str r2, [sp, #44] @ 4-byte Spill
-; ARMV7-NEXT: subs r2, r2, r10, asr #31
-; ARMV7-NEXT: eor r0, r0, r10, asr #31
-; ARMV7-NEXT: sbcs r11, r1, r10, asr #31
-; ARMV7-NEXT: str r1, [sp, #36] @ 4-byte Spill
-; ARMV7-NEXT: eor r1, r10, r10, asr #31
-; ARMV7-NEXT: sbcs r9, r0, r10, asr #31
-; ARMV7-NEXT: sbc r12, r1, r10, asr #31
-; ARMV7-NEXT: str r0, [sp, #40] @ 4-byte Spill
-; ARMV7-NEXT: clz r0, r9
-; ARMV7-NEXT: str r1, [sp, #24] @ 4-byte Spill
-; ARMV7-NEXT: clz r1, r2
-; ARMV7-NEXT: add r0, r0, #32
-; ARMV7-NEXT: cmp r12, #0
-; ARMV7-NEXT: add r1, r1, #32
-; ARMV7-NEXT: clzne r0, r12
-; ARMV7-NEXT: cmp r11, #0
-; ARMV7-NEXT: ldr r6, [sp, #300]
-; ARMV7-NEXT: clzne r1, r11
-; ARMV7-NEXT: ldr r3, [sp, #288]
-; ARMV7-NEXT: str r2, [sp, #88] @ 4-byte Spill
-; ARMV7-NEXT: orrs r2, r9, r12
-; ARMV7-NEXT: ldr r2, [sp, #292]
-; ARMV7-NEXT: addeq r0, r1, #64
-; ARMV7-NEXT: ldr r1, [sp, #296]
-; ARMV7-NEXT: eor r3, r3, r6, asr #31
-; ARMV7-NEXT: eor r2, r2, r6, asr #31
-; ARMV7-NEXT: subs lr, r3, r6, asr #31
-; ARMV7-NEXT: eor r1, r1, r6, asr #31
-; ARMV7-NEXT: sbcs r4, r2, r6, asr #31
-; ARMV7-NEXT: str r2, [sp, #12] @ 4-byte Spill
-; ARMV7-NEXT: eor r2, r6, r6, asr #31
-; ARMV7-NEXT: sbcs r5, r1, r6, asr #31
-; ARMV7-NEXT: str r1, [sp, #48] @ 4-byte Spill
-; ARMV7-NEXT: sbc r7, r2, r6, asr #31
-; ARMV7-NEXT: clz r1, r5
-; ARMV7-NEXT: str r2, [sp, #8] @ 4-byte Spill
-; ARMV7-NEXT: clz r2, lr
-; ARMV7-NEXT: add r1, r1, #32
-; ARMV7-NEXT: cmp r7, #0
-; ARMV7-NEXT: clzne r1, r7
-; ARMV7-NEXT: add r2, r2, #32
-; ARMV7-NEXT: cmp r4, #0
-; ARMV7-NEXT: str r3, [sp, #16] @ 4-byte Spill
-; ARMV7-NEXT: clzne r2, r4
-; ARMV7-NEXT: orrs r3, r5, r7
-; ARMV7-NEXT: addeq r1, r2, #64
-; ARMV7-NEXT: str r7, [sp, #68] @ 4-byte Spill
-; ARMV7-NEXT: subs r8, r1, r0
-; ARMV7-NEXT: mov r0, #0
-; ARMV7-NEXT: sbcs r1, r0, #0
-; ARMV7-NEXT: str r1, [sp, #100] @ 4-byte Spill
-; ARMV7-NEXT: sbcs r2, r0, #0
-; ARMV7-NEXT: str r2, [sp, #96] @ 4-byte Spill
-; ARMV7-NEXT: sbc r3, r0, #0
-; ARMV7-NEXT: rsbs r0, r8, #127
-; ARMV7-NEXT: rscs r0, r1, #0
-; ARMV7-NEXT: orr r1, r4, r7
-; ARMV7-NEXT: ldr r7, [sp, #88] @ 4-byte Reload
-; ARMV7-NEXT: rscs r0, r2, #0
-; ARMV7-NEXT: orr r2, lr, r5
-; ARMV7-NEXT: rscs r0, r3, #0
-; ARMV7-NEXT: orr r1, r2, r1
-; ARMV7-NEXT: str r3, [sp, #92] @ 4-byte Spill
-; ARMV7-NEXT: orr r2, r11, r12
-; ARMV7-NEXT: orr r3, r7, r9
-; ARMV7-NEXT: orr r2, r3, r2
-; ARMV7-NEXT: clz r1, r1
-; ARMV7-NEXT: clz r2, r2
-; ARMV7-NEXT: mov r0, #0
-; ARMV7-NEXT: lsr r1, r1, #5
-; ARMV7-NEXT: movwlo r0, #1
-; ARMV7-NEXT: lsr r2, r2, #5
-; ARMV7-NEXT: orr r1, r1, r2
-; ARMV7-NEXT: orrs r0, r1, r0
-; ARMV7-NEXT: mov r3, r9
-; ARMV7-NEXT: asr r0, r6, #31
-; ARMV7-NEXT: mov r6, r7
-; ARMV7-NEXT: eor r0, r0, r10, asr #31
-; ARMV7-NEXT: mov r1, r12
-; ARMV7-NEXT: str r0, [sp, #20] @ 4-byte Spill
-; ARMV7-NEXT: mov r0, r11
-; ARMV7-NEXT: movwne r0, #0
-; ARMV7-NEXT: movwne r6, #0
-; ARMV7-NEXT: movwne r3, #0
-; ARMV7-NEXT: movwne r1, #0
-; ARMV7-NEXT: str r0, [sp, #108] @ 4-byte Spill
-; ARMV7-NEXT: bne .LBB0_15
-; ARMV7-NEXT: @ %bb.1: @ %_udiv-special-cases_udiv-special-cases
-; ARMV7-NEXT: mov r10, r7
-; ARMV7-NEXT: ldr r7, [sp, #96] @ 4-byte Reload
-; ARMV7-NEXT: eor r0, r8, #127
-; ARMV7-NEXT: mov r2, r11
-; ARMV7-NEXT: str r5, [sp, #72] @ 4-byte Spill
-; ARMV7-NEXT: orr r0, r0, r7
-; ARMV7-NEXT: ldr r7, [sp, #100] @ 4-byte Reload
-; ARMV7-NEXT: ldr r5, [sp, #92] @ 4-byte Reload
-; ARMV7-NEXT: ldr r11, [sp, #48] @ 4-byte Reload
-; ARMV7-NEXT: orr r7, r7, r5
-; ARMV7-NEXT: str r4, [sp, #76] @ 4-byte Spill
-; ARMV7-NEXT: orrs r0, r0, r7
-; ARMV7-NEXT: str lr, [sp, #80] @ 4-byte Spill
-; ARMV7-NEXT: beq .LBB0_6
-; ARMV7-NEXT: @ %bb.2: @ %udiv-bb15
-; ARMV7-NEXT: mov r0, #0
-; ARMV7-NEXT: mov r1, #12
-; ARMV7-NEXT: str r0, [sp, #220]
-; ARMV7-NEXT: mov r11, r2
-; ARMV7-NEXT: str r0, [sp, #216]
-; ARMV7-NEXT: str r0, [sp, #212]
-; ARMV7-NEXT: str r0, [sp, #208]
-; ARMV7-NEXT: add r0, sp, #228
-; ARMV7-NEXT: stm r0, {r2, r9, r12}
-; ARMV7-NEXT: rsb r0, r8, #127
-; ARMV7-NEXT: add r2, sp, #208
-; ARMV7-NEXT: and r1, r1, r0, lsr #3
-; ARMV7-NEXT: add r2, r2, #16
-; ARMV7-NEXT: str r10, [sp, #224]
-; ARMV7-NEXT: and r6, r0, #31
-; ARMV7-NEXT: ldr r1, [r2, -r1]!
-; ARMV7-NEXT: eor r4, r6, #31
-; ARMV7-NEXT: str r9, [sp, #64] @ 4-byte Spill
-; ARMV7-NEXT: ldmib r2, {r3, r7}
-; ARMV7-NEXT: lsr r5, r1, #1
-; ARMV7-NEXT: ldr r2, [r2, #12]
-; ARMV7-NEXT: lsl r0, r3, r6
-; ARMV7-NEXT: orr r9, r0, r5, lsr r4
-; ARMV7-NEXT: lsr r5, r7, #1
-; ARMV7-NEXT: lsrs r3, r3, #1
-; ARMV7-NEXT: lsl r2, r2, r6
-; ARMV7-NEXT: orr r0, r2, r5, lsr r4
-; ARMV7-NEXT: str r0, [sp, #108] @ 4-byte Spill
-; ARMV7-NEXT: lsl r2, r7, r6
-; ARMV7-NEXT: ldr r0, [sp, #100] @ 4-byte Reload
-; ARMV7-NEXT: orr lr, r2, r3, lsr r4
-; ARMV7-NEXT: adds r4, r8, #1
-; ARMV7-NEXT: lsl r2, r1, r6
-; ARMV7-NEXT: adcs r8, r0, #0
-; ARMV7-NEXT: ldr r0, [sp, #96] @ 4-byte Reload
-; ARMV7-NEXT: mov r3, #0
-; ARMV7-NEXT: adcs r0, r0, #0
-; ARMV7-NEXT: str r0, [sp, #84] @ 4-byte Spill
-; ARMV7-NEXT: ldr r0, [sp, #92] @ 4-byte Reload
-; ARMV7-NEXT: adcs r0, r0, #0
-; ARMV7-NEXT: str r0, [sp, #96] @ 4-byte Spill
-; ARMV7-NEXT: mov r0, #0
-; ARMV7-NEXT: adcs r1, r3, #0
-; ARMV7-NEXT: str r0, [sp, #104] @ 4-byte Spill
-; ARMV7-NEXT: bne .LBB0_13
-; ARMV7-NEXT: @ %bb.3: @ %udiv-preheader4
-; ARMV7-NEXT: mov r1, #0
-; ARMV7-NEXT: str r10, [sp, #176]
-; ARMV7-NEXT: str r1, [sp, #204]
-; ARMV7-NEXT: add r10, sp, #176
-; ARMV7-NEXT: str r1, [sp, #200]
-; ARMV7-NEXT: str r1, [sp, #196]
-; ARMV7-NEXT: str r1, [sp, #192]
-; ARMV7-NEXT: ubfx r1, r4, #5, #2
-; ARMV7-NEXT: str r11, [sp, #180]
-; ARMV7-NEXT: ldr r0, [sp, #64] @ 4-byte Reload
-; ARMV7-NEXT: add r3, r10, r1, lsl #2
-; ARMV7-NEXT: str r0, [sp, #184]
-; ARMV7-NEXT: and r0, r4, #31
-; ARMV7-NEXT: str r12, [sp, #188]
-; ARMV7-NEXT: ldmib r3, {r5, r6, r12}
-; ARMV7-NEXT: lsr r3, r6, r0
-; ARMV7-NEXT: lsl r7, r12, #1
-; ARMV7-NEXT: str r4, [sp, #100] @ 4-byte Spill
-; ARMV7-NEXT: eor r4, r0, #31
-; ARMV7-NEXT: ldr r1, [r10, r1, lsl #2]
-; ARMV7-NEXT: lsl r6, r6, #1
-; ARMV7-NEXT: orr r3, r3, r7, lsl r4
-; ARMV7-NEXT: lsr r7, r5, r0
-; ARMV7-NEXT: lsl r5, r5, #1
-; ARMV7-NEXT: orr r6, r7, r6, lsl r4
-; ARMV7-NEXT: lsr r12, r12, r0
-; ARMV7-NEXT: lsr r1, r1, r0
-; ARMV7-NEXT: orr r7, r1, r5, lsl r4
-; ARMV7-NEXT: ldr r1, [sp, #80] @ 4-byte Reload
-; ARMV7-NEXT: mov r10, #0
-; ARMV7-NEXT: ldr r0, [sp, #108] @ 4-byte Reload
-; ARMV7-NEXT: subs r1, r1, #1
-; ARMV7-NEXT: str r1, [sp, #64] @ 4-byte Spill
-; ARMV7-NEXT: ldr r1, [sp, #76] @ 4-byte Reload
-; ARMV7-NEXT: ldr r11, [sp, #84] @ 4-byte Reload
-; ARMV7-NEXT: sbcs r1, r1, #0
-; ARMV7-NEXT: str r1, [sp, #60] @ 4-byte Spill
-; ARMV7-NEXT: ldr r1, [sp, #72] @ 4-byte Reload
-; ARMV7-NEXT: sbcs r1, r1, #0
-; ARMV7-NEXT: str r1, [sp, #56] @ 4-byte Spill
-; ARMV7-NEXT: ldr r1, [sp, #68] @ 4-byte Reload
-; ARMV7-NEXT: sbc r1, r1, #0
-; ARMV7-NEXT: str r1, [sp, #52] @ 4-byte Spill
-; ARMV7-NEXT: ldr r1, [sp, #96] @ 4-byte Reload
-; ARMV7-NEXT: .LBB0_4: @ %udiv-do-while3
-; ARMV7-NEXT: @ =>This Inner Loop Header: Depth=1
-; ARMV7-NEXT: lsl r5, r12, #1
-; ARMV7-NEXT: add r4, sp, #88
-; ARMV7-NEXT: orr r5, r5, r3, lsr #31
-; ARMV7-NEXT: lsl r3, r3, #1
-; ARMV7-NEXT: stm r4, {r2, r9, lr} @ 12-byte Folded Spill
-; ARMV7-NEXT: orr r3, r3, r6, lsr #31
-; ARMV7-NEXT: lsl r6, r6, #1
-; ARMV7-NEXT: ldr r2, [sp, #104] @ 4-byte Reload
-; ARMV7-NEXT: orr r6, r6, r7, lsr #31
-; ARMV7-NEXT: lsl r7, r7, #1
-; ARMV7-NEXT: str r2, [sp, #84] @ 4-byte Spill
-; ARMV7-NEXT: ldr r2, [sp, #64] @ 4-byte Reload
-; ARMV7-NEXT: orr r7, r7, r0, lsr #31
-; ARMV7-NEXT: str r0, [sp, #108] @ 4-byte Spill
-; ARMV7-NEXT: subs r2, r2, r7
-; ARMV7-NEXT: ldr r0, [sp, #72] @ 4-byte Reload
-; ARMV7-NEXT: ldr r2, [sp, #60] @ 4-byte Reload
-; ARMV7-NEXT: ldr r4, [sp, #68] @ 4-byte Reload
-; ARMV7-NEXT: sbcs r2, r2, r6
-; ARMV7-NEXT: ldr r2, [sp, #56] @ 4-byte Reload
-; ARMV7-NEXT: sbcs r2, r2, r3
-; ARMV7-NEXT: ldr r2, [sp, #52] @ 4-byte Reload
-; ARMV7-NEXT: sbc r2, r2, r5
-; ARMV7-NEXT: and r9, r0, r2, asr #31
-; ARMV7-NEXT: ldr r0, [sp, #76] @ 4-byte Reload
-; ARMV7-NEXT: and r4, r4, r2, asr #31
-; ARMV7-NEXT: and lr, r0, r2, asr #31
-; ARMV7-NEXT: ldr r0, [sp, #80] @ 4-byte Reload
-; ARMV7-NEXT: and r12, r0, r2, asr #31
-; ARMV7-NEXT: mov r0, #1
-; ARMV7-NEXT: subs r7, r7, r12
-; ARMV7-NEXT: and r0, r0, r2, asr #31
-; ARMV7-NEXT: sbcs r6, r6, lr
-; ARMV7-NEXT: str r0, [sp, #104] @ 4-byte Spill
-; ARMV7-NEXT: ldr r0, [sp, #100] @ 4-byte Reload
-; ARMV7-NEXT: sbcs r3, r3, r9
-; ARMV7-NEXT: sbc r12, r5, r4
-; ARMV7-NEXT: ldr r4, [sp, #92] @ 4-byte Reload
-; ARMV7-NEXT: subs r0, r0, #1
-; ARMV7-NEXT: str r0, [sp, #100] @ 4-byte Spill
-; ARMV7-NEXT: sbcs r8, r8, #0
-; ARMV7-NEXT: sbcs r11, r11, #0
-; ARMV7-NEXT: sbc r1, r1, #0
-; ARMV7-NEXT: orr r5, r0, r11
-; ARMV7-NEXT: orr r2, r8, r1
-; ARMV7-NEXT: ldr r0, [sp, #88] @ 4-byte Reload
-; ARMV7-NEXT: orrs r2, r5, r2
-; ARMV7-NEXT: lsl r5, r4, #1
-; ARMV7-NEXT: ldr r2, [sp, #84] @ 4-byte Reload
-; ARMV7-NEXT: orr r2, r2, r0, lsl #1
-; ARMV7-NEXT: orr r0, r5, r0, lsr #31
-; ARMV7-NEXT: orr r9, r10, r0
-; ARMV7-NEXT: ldr r0, [sp, #96] @ 4-byte Reload
-; ARMV7-NEXT: lsl r5, r0, #1
-; ARMV7-NEXT: orr r5, r5, r4, lsr #31
-; ARMV7-NEXT: ldr r4, [sp, #108] @ 4-byte Reload
-; ARMV7-NEXT: orr lr, r10, r5
-; ARMV7-NEXT: lsl r5, r4, #1
-; ARMV7-NEXT: orr r5, r5, r0, lsr #31
-; ARMV7-NEXT: orr r0, r10, r5
-; ARMV7-NEXT: mov r10, #0
-; ARMV7-NEXT: bne .LBB0_4
-; ARMV7-NEXT: .LBB0_5: @ %udiv-loop-exit2
-; ARMV7-NEXT: ldr r1, [sp, #104] @ 4-byte Reload
-; ARMV7-NEXT: lsl r0, r0, #1
-; ARMV7-NEXT: ldr r11, [sp, #48] @ 4-byte Reload
-; ARMV7-NEXT: orr r6, r1, r2, lsl #1
-; ARMV7-NEXT: lsl r1, r9, #1
-; ARMV7-NEXT: orr r1, r1, r2, lsr #31
-; ARMV7-NEXT: str r1, [sp, #108] @ 4-byte Spill
-; ARMV7-NEXT: lsl r2, lr, #1
-; ARMV7-NEXT: orr r3, r2, r9, lsr #31
-; ARMV7-NEXT: orr r1, r0, lr, lsr #31
-; ARMV7-NEXT: .LBB0_6: @ %udiv-end1
-; ARMV7-NEXT: ldr r2, [sp, #284]
-; ARMV7-NEXT: ldr r0, [sp, #44] @ 4-byte Reload
-; ARMV7-NEXT: ldr r7, [sp, #24] @ 4-byte Reload
-; ARMV7-NEXT: subs r8, r0, r2, asr #31
-; ARMV7-NEXT: ldr r0, [sp, #36] @ 4-byte Reload
-; ARMV7-NEXT: ldr r4, [sp, #300]
-; ARMV7-NEXT: sbcs r10, r0, r2, asr #31
-; ARMV7-NEXT: ldr r0, [sp, #40] @ 4-byte Reload
-; ARMV7-NEXT: str r3, [sp, #96] @ 4-byte Spill
-; ARMV7-NEXT: sbcs lr, r0, r2, asr #31
-; ARMV7-NEXT: ldr r3, [sp, #16] @ 4-byte Reload
-; ARMV7-NEXT: sbc r9, r7, r2, asr #31
-; ARMV7-NEXT: clz r0, lr
-; ARMV7-NEXT: clz r7, r8
-; ARMV7-NEXT: add r0, r0, #32
-; ARMV7-NEXT: cmp r9, #0
-; ARMV7-NEXT: add r7, r7, #32
-; ARMV7-NEXT: clzne r0, r9
-; ARMV7-NEXT: cmp r10, #0
-; ARMV7-NEXT: clzne r7, r10
-; ARMV7-NEXT: orrs r5, lr, r9
-; ARMV7-NEXT: addeq r0, r7, #64
-; ARMV7-NEXT: subs r12, r3, r4, asr #31
-; ARMV7-NEXT: ldr r3, [sp, #12] @ 4-byte Reload
-; ARMV7-NEXT: clz r5, r12
-; ARMV7-NEXT: str r6, [sp, #92] @ 4-byte Spill
-; ARMV7-NEXT: add r5, r5, #32
-; ARMV7-NEXT: sbcs r6, r3, r4, asr #31
-; ARMV7-NEXT: ldr r3, [sp, #8] @ 4-byte Reload
-; ARMV7-NEXT: sbcs r2, r11, r4, asr #31
-; ARMV7-NEXT: str r1, [sp, #88] @ 4-byte Spill
-; ARMV7-NEXT: sbc r3, r3, r4, asr #31
-; ARMV7-NEXT: clz r7, r2
-; ARMV7-NEXT: add r7, r7, #32
-; ARMV7-NEXT: cmp r3, #0
-; ARMV7-NEXT: clzne r7, r3
-; ARMV7-NEXT: cmp r6, #0
-; ARMV7-NEXT: clzne r5, r6
-; ARMV7-NEXT: orrs r4, r2, r3
-; ARMV7-NEXT: addeq r7, r5, #64
-; ARMV7-NEXT: str r6, [sp, #72] @ 4-byte Spill
-; ARMV7-NEXT: subs r11, r7, r0
-; ARMV7-NEXT: mov r7, #0
-; ARMV7-NEXT: sbcs r4, r7, #0
-; ARMV7-NEXT: str r4, [sp, #84] @ 4-byte Spill
-; ARMV7-NEXT: sbcs r5, r7, #0
-; ARMV7-NEXT: str r5, [sp, #104] @ 4-byte Spill
-; ARMV7-NEXT: sbc r1, r7, #0
-; ARMV7-NEXT: rsbs r0, r11, #127
-; ARMV7-NEXT: rscs r0, r4, #0
-; ARMV7-NEXT: orr r4, r8, lr
-; ARMV7-NEXT: rscs r0, r5, #0
-; ARMV7-NEXT: orr r5, r12, r2
-; ARMV7-NEXT: rscs r0, r1, #0
-; ARMV7-NEXT: str r1, [sp, #100] @ 4-byte Spill
-; ARMV7-NEXT: orr r0, r6, r3
-; ARMV7-NEXT: ldr r6, [sp, #20] @ 4-byte Reload
-; ARMV7-NEXT: orr r0, r5, r0
-; ARMV7-NEXT: orr r5, r10, r9
-; ARMV7-NEXT: clz r0, r0
-; ARMV7-NEXT: orr r4, r4, r5
-; ARMV7-NEXT: clz r4, r4
-; ARMV7-NEXT: ldr r1, [sp, #88] @ 4-byte Reload
-; ARMV7-NEXT: lsr r0, r0, #5
-; ARMV7-NEXT: movwlo r7, #1
-; ARMV7-NEXT: lsr r4, r4, #5
-; ARMV7-NEXT: orr r0, r0, r4
-; ARMV7-NEXT: orr r0, r0, r7
-; ARMV7-NEXT: eor r7, r1, r6
-; ARMV7-NEXT: ldr r1, [sp, #96] @ 4-byte Reload
-; ARMV7-NEXT: str r3, [sp, #64] @ 4-byte Spill
-; ARMV7-NEXT: ldr r3, [sp, #92] @ 4-byte Reload
-; ARMV7-NEXT: str r2, [sp, #68] @ 4-byte Spill
-; ARMV7-NEXT: eor r2, r1, r6
-; ARMV7-NEXT: ldr r1, [sp, #108] @ 4-byte Reload
-; ARMV7-NEXT: eor r4, r3, r6
-; ARMV7-NEXT: subs r3, r4, r6
-; ARMV7-NEXT: str r9, [sp, #36] @ 4-byte Spill
-; ARMV7-NEXT: eor r1, r1, r6
-; ARMV7-NEXT: str lr, [sp, #40] @ 4-byte Spill
-; ARMV7-NEXT: sbcs r1, r1, r6
-; ARMV7-NEXT: str r1, [sp, #12] @ 4-byte Spill
-; ARMV7-NEXT: sbcs r1, r2, r6
-; ARMV7-NEXT: str r1, [sp, #8] @ 4-byte Spill
-; ARMV7-NEXT: sbc r1, r7, r6
-; ARMV7-NEXT: str r1, [sp, #20] @ 4-byte Spill
-; ARMV7-NEXT: ldr r1, [sp, #284]
-; ARMV7-NEXT: cmp r0, #0
-; ARMV7-NEXT: mov r6, r8
-; ARMV7-NEXT: mov r2, r10
-; ARMV7-NEXT: movwne r6, #0
-; ARMV7-NEXT: movwne r2, #0
-; ARMV7-NEXT: asr r1, r1, #31
-; ARMV7-NEXT: movwne lr, #0
-; ARMV7-NEXT: movwne r9, #0
-; ARMV7-NEXT: str r3, [sp, #16] @ 4-byte Spill
-; ARMV7-NEXT: str r1, [sp, #24] @ 4-byte Spill
-; ARMV7-NEXT: str r8, [sp, #44] @ 4-byte Spill
-; ARMV7-NEXT: bne .LBB0_12
-; ARMV7-NEXT: @ %bb.7: @ %udiv-end1
-; ARMV7-NEXT: ldr r3, [sp, #104] @ 4-byte Reload
-; ARMV7-NEXT: eor r0, r11, #127
-; ARMV7-NEXT: mov r1, r8
-; ARMV7-NEXT: ldr r8, [sp, #84] @ 4-byte Reload
-; ARMV7-NEXT: orr r0, r0, r3
-; ARMV7-NEXT: ldr r3, [sp, #100] @ 4-byte Reload
-; ARMV7-NEXT: orr r4, r8, r3
-; ARMV7-NEXT: orrs r0, r0, r4
-; ARMV7-NEXT: beq .LBB0_12
-; ARMV7-NEXT: @ %bb.8: @ %udiv-bb1
-; ARMV7-NEXT: mov r7, #0
-; ARMV7-NEXT: str r12, [sp, #76] @ 4-byte Spill
-; ARMV7-NEXT: str r1, [sp, #160]
-; ARMV7-NEXT: rsb r0, r11, #127
-; ARMV7-NEXT: str r7, [sp, #156]
-; ARMV7-NEXT: mov r1, #12
-; ARMV7-NEXT: str r7, [sp, #152]
-; ARMV7-NEXT: add r2, sp, #144
-; ARMV7-NEXT: str r7, [sp, #148]
-; ARMV7-NEXT: and r1, r1, r0, lsr #3
-; ARMV7-NEXT: str r7, [sp, #144]
-; ARMV7-NEXT: add r2, r2, #16
-; ARMV7-NEXT: str r10, [sp, #164]
-; ARMV7-NEXT: and r0, r0, #31
-; ARMV7-NEXT: ldr r9, [sp, #40] @ 4-byte Reload
-; ARMV7-NEXT: eor lr, r0, #31
-; ARMV7-NEXT: str r9, [sp, #168]
-; ARMV7-NEXT: ldr r12, [sp, #36] @ 4-byte Reload
-; ARMV7-NEXT: str r12, [sp, #172]
-; ARMV7-NEXT: ldr r1, [r2, -r1]!
-; ARMV7-NEXT: ldmib r2, {r3, r6}
-; ARMV7-NEXT: lsr r4, r1, #1
-; ARMV7-NEXT: ldr r2, [r2, #12]
-; ARMV7-NEXT: lsl r5, r3, r0
-; ARMV7-NEXT: orr r4, r5, r4, lsr lr
-; ARMV7-NEXT: str r4, [sp, #96] @ 4-byte Spill
-; ARMV7-NEXT: lsr r4, r6, #1
-; ARMV7-NEXT: lsl r2, r2, r0
-; ARMV7-NEXT: lsrs r3, r3, #1
-; ARMV7-NEXT: orr r2, r2, r4, lsr lr
-; ARMV7-NEXT: str r2, [sp, #108] @ 4-byte Spill
-; ARMV7-NEXT: lsl r2, r6, r0
-; ARMV7-NEXT: orr r2, r2, r3, lsr lr
-; ARMV7-NEXT: adds r3, r11, #1
-; ARMV7-NEXT: str r2, [sp, #92] @ 4-byte Spill
-; ARMV7-NEXT: mov r2, r10
-; ARMV7-NEXT: lsl r10, r1, r0
-; ARMV7-NEXT: ldr r0, [sp, #104] @ 4-byte Reload
-; ARMV7-NEXT: adcs r8, r8, #0
-; ARMV7-NEXT: str r2, [sp, #4] @ 4-byte Spill
-; ARMV7-NEXT: adcs lr, r0, #0
-; ARMV7-NEXT: ldr r0, [sp, #100] @ 4-byte Reload
-; ARMV7-NEXT: adcs r0, r0, #0
-; ARMV7-NEXT: str r0, [sp, #88] @ 4-byte Spill
-; ARMV7-NEXT: adcs r0, r7, #0
-; ARMV7-NEXT: ldr r0, [sp, #44] @ 4-byte Reload
-; ARMV7-NEXT: bne .LBB0_14
-; ARMV7-NEXT: @ %bb.9: @ %udiv-preheader
-; ARMV7-NEXT: add r1, sp, #112
-; ARMV7-NEXT: str r7, [sp, #140]
-; ARMV7-NEXT: stm r1, {r0, r2, r9, r12}
-; ARMV7-NEXT: ubfx r0, r3, #5, #2
-; ARMV7-NEXT: add r12, sp, #112
-; ARMV7-NEXT: add r2, r12, r0, lsl #2
-; ARMV7-NEXT: str r7, [sp, #136]
-; ARMV7-NEXT: str r7, [sp, #132]
-; ARMV7-NEXT: str r7, [sp, #128]
-; ARMV7-NEXT: ldr r9, [r2, #12]
-; ARMV7-NEXT: ldr r1, [r2, #8]
-; ARMV7-NEXT: ldr r7, [r2, #4]
-; ARMV7-NEXT: and r2, r3, #31
-; ARMV7-NEXT: eor r4, r2, #31
-; ARMV7-NEXT: str r3, [sp, #104] @ 4-byte Spill
-; ARMV7-NEXT: lsr r3, r1, r2
-; ARMV7-NEXT: lsl r6, r9, #1
-; ARMV7-NEXT: lsl r1, r1, #1
-; ARMV7-NEXT: orr r6, r3, r6, lsl r4
-; ARMV7-NEXT: lsr r3, r7, r2
-; ARMV7-NEXT: orr r1, r3, r1, lsl r4
-; ARMV7-NEXT: str r1, [sp, #100] @ 4-byte Spill
-; ARMV7-NEXT: lsl r1, r7, #1
-; ARMV7-NEXT: ldr r0, [r12, r0, lsl #2]
-; ARMV7-NEXT: lsr r11, r9, r2
-; ARMV7-NEXT: ldr r3, [sp, #64] @ 4-byte Reload
-; ARMV7-NEXT: ldr r7, [sp, #88] @ 4-byte Reload
-; ARMV7-NEXT: ldr r5, [sp, #92] @ 4-byte Reload
-; ARMV7-NEXT: lsr r0, r0, r2
-; ARMV7-NEXT: mov r2, #0
-; ARMV7-NEXT: orr r0, r0, r1, lsl r4
-; ARMV7-NEXT: ldr r1, [sp, #76] @ 4-byte Reload
-; ARMV7-NEXT: ldr r4, [sp, #96] @ 4-byte Reload
-; ARMV7-NEXT: subs r1, r1, #1
-; ARMV7-NEXT: str r1, [sp, #60] @ 4-byte Spill
-; ARMV7-NEXT: ldr r1, [sp, #72] @ 4-byte Reload
-; ARMV7-NEXT: sbcs r1, r1, #0
-; ARMV7-NEXT: str r1, [sp, #56] @ 4-byte Spill
-; ARMV7-NEXT: ldr r1, [sp, #68] @ 4-byte Reload
-; ARMV7-NEXT: sbcs r1, r1, #0
-; ARMV7-NEXT: str r1, [sp, #52] @ 4-byte Spill
-; ARMV7-NEXT: sbc r1, r3, #0
-; ARMV7-NEXT: ldr r3, [sp, #108] @ 4-byte Reload
-; ARMV7-NEXT: str r1, [sp, #48] @ 4-byte Spill
-; ARMV7-NEXT: mov r1, #0
-; ARMV7-NEXT: .LBB0_10: @ %udiv-do-while
-; ARMV7-NEXT: @ =>This Inner Loop Header: Depth=1
-; ARMV7-NEXT: str r2, [sp, #80] @ 4-byte Spill
-; ARMV7-NEXT: lsl r2, r11, #1
-; ARMV7-NEXT: ldr r11, [sp, #100] @ 4-byte Reload
-; ARMV7-NEXT: orr r2, r2, r6, lsr #31
-; ARMV7-NEXT: str r4, [sp, #88] @ 4-byte Spill
-; ARMV7-NEXT: lsl r4, r6, #1
-; ARMV7-NEXT: str r5, [sp, #96] @ 4-byte Spill
-; ARMV7-NEXT: lsl r6, r11, #1
-; ARMV7-NEXT: ldr r5, [sp, #60] @ 4-byte Reload
-; ARMV7-NEXT: orr r6, r6, r0, lsr #31
-; ARMV7-NEXT: lsl r0, r0, #1
-; ARMV7-NEXT: orr r0, r0, r3, lsr #31
-; ARMV7-NEXT: orr r4, r4, r11, lsr #31
-; ARMV7-NEXT: subs r5, r5, r0
-; ARMV7-NEXT: str r3, [sp, #108] @ 4-byte Spill
-; ARMV7-NEXT: ldr r5, [sp, #56] @ 4-byte Reload
-; ARMV7-NEXT: ldr r3, [sp, #68] @ 4-byte Reload
-; ARMV7-NEXT: sbcs r5, r5, r6
-; ARMV7-NEXT: str r1, [sp, #92] @ 4-byte Spill
-; ARMV7-NEXT: ldr r5, [sp, #52] @ 4-byte Reload
-; ARMV7-NEXT: ldr r1, [sp, #64] @ 4-byte Reload
-; ARMV7-NEXT: sbcs r5, r5, r4
-; ARMV7-NEXT: str r10, [sp, #84] @ 4-byte Spill
-; ARMV7-NEXT: ldr r5, [sp, #48] @ 4-byte Reload
-; ARMV7-NEXT: sbc r5, r5, r2
-; ARMV7-NEXT: and r9, r3, r5, asr #31
-; ARMV7-NEXT: ldr r3, [sp, #72] @ 4-byte Reload
-; ARMV7-NEXT: and r10, r1, r5, asr #31
-; ARMV7-NEXT: and r11, r3, r5, asr #31
-; ARMV7-NEXT: ldr r3, [sp, #76] @ 4-byte Reload
-; ARMV7-NEXT: and r12, r3, r5, asr #31
-; ARMV7-NEXT: ldr r3, [sp, #104] @ 4-byte Reload
-; ARMV7-NEXT: subs r0, r0, r12
-; ARMV7-NEXT: sbcs r1, r6, r11
-; ARMV7-NEXT: str r1, [sp, #100] @ 4-byte Spill
-; ARMV7-NEXT: sbcs r6, r4, r9
-; ARMV7-NEXT: ldr r1, [sp, #84] @ 4-byte Reload
-; ARMV7-NEXT: sbc r11, r2, r10
-; ARMV7-NEXT: subs r3, r3, #1
-; ARMV7-NEXT: sbcs r8, r8, #0
-; ARMV7-NEXT: str r3, [sp, #104] @ 4-byte Spill
-; ARMV7-NEXT: sbcs lr, lr, #0
-; ARMV7-NEXT: mov r2, #1
-; ARMV7-NEXT: orr r4, r3, lr
-; ARMV7-NEXT: ldr r3, [sp, #80] @ 4-byte Reload
-; ARMV7-NEXT: sbc r7, r7, #0
-; ARMV7-NEXT: ldr r9, [sp, #88] @ 4-byte Reload
-; ARMV7-NEXT: and r2, r2, r5, asr #31
-; ARMV7-NEXT: orr r5, r8, r7
-; ARMV7-NEXT: orr r12, r3, r1, lsl #1
-; ARMV7-NEXT: ldr r3, [sp, #96] @ 4-byte Reload
-; ARMV7-NEXT: orrs r5, r4, r5
-; ARMV7-NEXT: ldr r10, [sp, #92] @ 4-byte Reload
-; ARMV7-NEXT: lsl r5, r9, #1
-; ARMV7-NEXT: orr r1, r5, r1, lsr #31
-; ARMV7-NEXT: orr r4, r10, r1
-; ARMV7-NEXT: lsl r1, r3, #1
-; ARMV7-NEXT: orr r1, r1, r9, lsr #31
-; ARMV7-NEXT: orr r5, r10, r1
-; ARMV7-NEXT: ldr r1, [sp, #108] @ 4-byte Reload
-; ARMV7-NEXT: lsl r1, r1, #1
-; ARMV7-NEXT: orr r1, r1, r3, lsr #31
-; ARMV7-NEXT: orr r3, r10, r1
-; ARMV7-NEXT: mov r10, r12
-; ARMV7-NEXT: mov r1, #0
-; ARMV7-NEXT: bne .LBB0_10
-; ARMV7-NEXT: .LBB0_11: @ %udiv-loop-exit
-; ARMV7-NEXT: lsl r1, r4, #1
-; ARMV7-NEXT: orr r6, r2, r10, lsl #1
-; ARMV7-NEXT: orr r2, r1, r10, lsr #31
-; ARMV7-NEXT: lsl r1, r5, #1
-; ARMV7-NEXT: ldr r10, [sp, #4] @ 4-byte Reload
-; ARMV7-NEXT: orr lr, r1, r4, lsr #31
-; ARMV7-NEXT: ldr r12, [sp, #76] @ 4-byte Reload
-; ARMV7-NEXT: lsl r1, r3, #1
-; ARMV7-NEXT: orr r9, r1, r5, lsr #31
-; ARMV7-NEXT: .LBB0_12: @ %udiv-end
-; ARMV7-NEXT: umull r0, r7, r12, r6
-; ARMV7-NEXT: ldr r5, [sp, #16] @ 4-byte Reload
-; ARMV7-NEXT: ldr r11, [sp, #72] @ 4-byte Reload
-; ARMV7-NEXT: mov r4, #0
-; ARMV7-NEXT: mov r1, r12
-; ARMV7-NEXT: ldr r3, [sp, #68] @ 4-byte Reload
-; ARMV7-NEXT: str r0, [sp, #108] @ 4-byte Spill
-; ARMV7-NEXT: umlal r7, r4, r11, r6
-; ARMV7-NEXT: ldr r0, [sp, #28] @ 4-byte Reload
-; ARMV7-NEXT: mov r12, #0
-; ARMV7-NEXT: str r5, [r0]
-; ARMV7-NEXT: ldr r5, [sp, #12] @ 4-byte Reload
-; ARMV7-NEXT: str r5, [r0, #4]
-; ARMV7-NEXT: ldr r5, [sp, #8] @ 4-byte Reload
-; ARMV7-NEXT: str r5, [r0, #8]
-; ARMV7-NEXT: ldr r5, [sp, #20] @ 4-byte Reload
-; ARMV7-NEXT: str r5, [r0, #12]
-; ARMV7-NEXT: umull r5, r0, r1, r2
-; ARMV7-NEXT: adds r8, r5, r7
-; ARMV7-NEXT: adcs r0, r4, r0
-; ARMV7-NEXT: ldr r4, [sp, #64] @ 4-byte Reload
-; ARMV7-NEXT: adc r7, r12, #0
-; ARMV7-NEXT: umull r12, r5, r6, r3
-; ARMV7-NEXT: mla r6, r6, r4, r5
-; ARMV7-NEXT: umlal r0, r7, r11, r2
-; ARMV7-NEXT: mla r2, r2, r3, r6
-; ARMV7-NEXT: umull r6, r5, lr, r1
-; ARMV7-NEXT: mla r3, lr, r11, r5
-; ARMV7-NEXT: mla r1, r9, r1, r3
-; ARMV7-NEXT: adds r3, r6, r12
-; ARMV7-NEXT: adc r1, r1, r2
-; ARMV7-NEXT: adds r0, r0, r3
-; ARMV7-NEXT: ldr r2, [sp, #108] @ 4-byte Reload
-; ARMV7-NEXT: adc r1, r7, r1
-; ARMV7-NEXT: ldr r3, [sp, #44] @ 4-byte Reload
-; ARMV7-NEXT: ldr r7, [sp, #40] @ 4-byte Reload
-; ARMV7-NEXT: subs r2, r3, r2
-; ARMV7-NEXT: sbcs r3, r10, r8
-; ARMV7-NEXT: sbcs r0, r7, r0
-; ARMV7-NEXT: ldr r7, [sp, #36] @ 4-byte Reload
-; ARMV7-NEXT: sbc r1, r7, r1
-; ARMV7-NEXT: ldr r7, [sp, #24] @ 4-byte Reload
-; ARMV7-NEXT: eor r2, r2, r7
-; ARMV7-NEXT: eor r3, r3, r7
-; ARMV7-NEXT: subs r2, r2, r7
-; ARMV7-NEXT: eor r0, r0, r7
-; ARMV7-NEXT: sbcs r3, r3, r7
-; ARMV7-NEXT: eor r1, r1, r7
-; ARMV7-NEXT: sbcs r0, r0, r7
-; ARMV7-NEXT: sbc r1, r1, r7
-; ARMV7-NEXT: ldr r7, [sp, #32] @ 4-byte Reload
-; ARMV7-NEXT: stm r7, {r2, r3}
-; ARMV7-NEXT: str r0, [r7, #8]
-; ARMV7-NEXT: str r1, [r7, #12]
-; ARMV7-NEXT: add sp, sp, #244
-; ARMV7-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
-; ARMV7-NEXT: .LBB0_13:
-; ARMV7-NEXT: ldr r0, [sp, #108] @ 4-byte Reload
-; ARMV7-NEXT: b .LBB0_5
-; ARMV7-NEXT: .LBB0_14:
-; ARMV7-NEXT: mov r2, #0
-; ARMV7-NEXT: ldr r3, [sp, #108] @ 4-byte Reload
-; ARMV7-NEXT: ldr r4, [sp, #96] @ 4-byte Reload
-; ARMV7-NEXT: ldr r5, [sp, #92] @ 4-byte Reload
-; ARMV7-NEXT: b .LBB0_11
-; ARMV7-NEXT: .LBB0_15:
-; ARMV7-NEXT: ldr r11, [sp, #48] @ 4-byte Reload
-; ARMV7-NEXT: b .LBB0_6
+; ARMV7-NOT: __divmodti4
+; ARMV7-NOT: __divti3
+; ARMV7-NOT: __modti3
+; ARMV7: pop{{.*}}pc}
%q = sdiv i128 %n, %d
%r = srem i128 %n, %d
store i128 %q, ptr %q_out
@@ -1284,1128 +27,16 @@ define void @sdivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
define void @udivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
; ARMV6-LABEL: udivrem_i128:
-; ARMV6: @ %bb.0: @ %_udiv-special-cases_udiv-special-cases
-; ARMV6-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
-; ARMV6-NEXT: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
-; ARMV6-NEXT: .pad #228
-; ARMV6-NEXT: sub sp, sp, #228
-; ARMV6-NEXT: ldr r11, [sp, #264]
-; ARMV6-NEXT: mov r8, r2
-; ARMV6-NEXT: ldr r4, [sp, #268]
-; ARMV6-NEXT: clz r2, r3
-; ARMV6-NEXT: str r0, [sp, #28] @ 4-byte Spill
-; ARMV6-NEXT: clz r0, r11
-; ARMV6-NEXT: str r1, [sp, #32] @ 4-byte Spill
-; ARMV6-NEXT: clz r1, r8
-; ARMV6-NEXT: add r0, r0, #32
-; ARMV6-NEXT: cmp r4, #0
-; ARMV6-NEXT: clzne r0, r4
-; ARMV6-NEXT: add r1, r1, #32
-; ARMV6-NEXT: cmp r3, #0
-; ARMV6-NEXT: str r1, [sp, #8] @ 4-byte Spill
-; ARMV6-NEXT: str r2, [sp, #4] @ 4-byte Spill
-; ARMV6-NEXT: movne r1, r2
-; ARMV6-NEXT: orrs r2, r11, r4
-; ARMV6-NEXT: ldr lr, [sp, #280]
-; ARMV6-NEXT: ldr r2, [sp, #272]
-; ARMV6-NEXT: addeq r0, r1, #64
-; ARMV6-NEXT: ldr r7, [sp, #284]
-; ARMV6-NEXT: clz r1, lr
-; ARMV6-NEXT: ldr r12, [sp, #276]
-; ARMV6-NEXT: clz r2, r2
-; ARMV6-NEXT: add r1, r1, #32
-; ARMV6-NEXT: cmp r7, #0
-; ARMV6-NEXT: clzne r1, r7
-; ARMV6-NEXT: add r2, r2, #32
-; ARMV6-NEXT: cmp r12, #0
-; ARMV6-NEXT: mov r10, r3
-; ARMV6-NEXT: clzne r2, r12
-; ARMV6-NEXT: orrs r3, lr, r7
-; ARMV6-NEXT: addeq r1, r2, #64
-; ARMV6-NEXT: mov r2, #0
-; ARMV6-NEXT: subs r9, r1, r0
-; ARMV6-NEXT: ldr r3, [sp, #272]
-; ARMV6-NEXT: sbcs r6, r2, #0
-; ARMV6-NEXT: str r4, [sp, #48] @ 4-byte Spill
-; ARMV6-NEXT: sbcs r5, r2, #0
-; ARMV6-NEXT: orr r3, r3, lr
-; ARMV6-NEXT: sbc r1, r2, #0
-; ARMV6-NEXT: rsbs r0, r9, #127
-; ARMV6-NEXT: rscs r0, r6, #0
-; ARMV6-NEXT: str r1, [sp, #84] @ 4-byte Spill
-; ARMV6-NEXT: rscs r0, r5, #0
-; ARMV6-NEXT: orr r2, r8, r11
-; ARMV6-NEXT: rscs r0, r1, #0
-; ARMV6-NEXT: orr r1, r10, r4
-; ARMV6-NEXT: orr r1, r2, r1
-; ARMV6-NEXT: orr r2, r12, r7
-; ARMV6-NEXT: orr r2, r3, r2
-; ARMV6-NEXT: clz r1, r1
-; ARMV6-NEXT: clz r2, r2
-; ARMV6-NEXT: mov r0, #0
-; ARMV6-NEXT: lsr r1, r1, #5
-; ARMV6-NEXT: movlo r0, #1
-; ARMV6-NEXT: lsr r2, r2, #5
-; ARMV6-NEXT: orr r1, r2, r1
-; ARMV6-NEXT: orrs r0, r1, r0
-; ARMV6-NEXT: mov r3, r10
-; ARMV6-NEXT: mov r7, r8
-; ARMV6-NEXT: str r11, [sp, #44] @ 4-byte Spill
-; ARMV6-NEXT: movne r4, #0
-; ARMV6-NEXT: movne r11, #0
-; ARMV6-NEXT: movne r3, #0
-; ARMV6-NEXT: movne r7, #0
-; ARMV6-NEXT: str r10, [sp, #40] @ 4-byte Spill
-; ARMV6-NEXT: str r8, [sp, #36] @ 4-byte Spill
-; ARMV6-NEXT: bne .LBB1_6
-; ARMV6-NEXT: @ %bb.1: @ %_udiv-special-cases_udiv-special-cases
-; ARMV6-NEXT: ldr r1, [sp, #84] @ 4-byte Reload
-; ARMV6-NEXT: eor r0, r9, #127
-; ARMV6-NEXT: orr r0, r0, r5
-; ARMV6-NEXT: str r5, [sp, #76] @ 4-byte Spill
-; ARMV6-NEXT: orr r1, r6, r1
-; ARMV6-NEXT: str r6, [sp, #80] @ 4-byte Spill
-; ARMV6-NEXT: orrs r0, r0, r1
-; ARMV6-NEXT: beq .LBB1_6
-; ARMV6-NEXT: @ %bb.2: @ %udiv-bb15
-; ARMV6-NEXT: mov r0, #0
-; ARMV6-NEXT: str r8, [sp, #208]
-; ARMV6-NEXT: str r0, [sp, #204]
-; ARMV6-NEXT: add r2, sp, #192
-; ARMV6-NEXT: str r0, [sp, #200]
-; ARMV6-NEXT: mov r11, #12
-; ARMV6-NEXT: str r0, [sp, #196]
-; ARMV6-NEXT: add r2, r2, #16
-; ARMV6-NEXT: str r0, [sp, #192]
-; ARMV6-NEXT: rsb r0, r9, #127
-; ARMV6-NEXT: str r10, [sp, #212]
-; ARMV6-NEXT: mov r6, r8
-; ARMV6-NEXT: ldr lr, [sp, #44] @ 4-byte Reload
-; ARMV6-NEXT: and r1, r11, r0, lsr #3
-; ARMV6-NEXT: str lr, [sp, #216]
-; ARMV6-NEXT: and r0, r0, #31
-; ARMV6-NEXT: ldr r12, [sp, #48] @ 4-byte Reload
-; ARMV6-NEXT: eor r5, r0, #31
-; ARMV6-NEXT: str r12, [sp, #220]
-; ARMV6-NEXT: ldr r1, [r2, -r1]!
-; ARMV6-NEXT: ldr r4, [r2, #8]
-; ARMV6-NEXT: ldr r7, [r2, #4]
-; ARMV6-NEXT: ldr r2, [r2, #12]
-; ARMV6-NEXT: lsr r3, r4, #1
-; ARMV6-NEXT: lsl r2, r2, r0
-; ARMV6-NEXT: orr r2, r2, r3, lsr r5
-; ARMV6-NEXT: str r2, [sp, #92] @ 4-byte Spill
-; ARMV6-NEXT: lsl r2, r4, r0
-; ARMV6-NEXT: lsrs r3, r7, #1
-; ARMV6-NEXT: orr r8, r2, r3, lsr r5
-; ARMV6-NEXT: lsl r2, r7, r0
-; ARMV6-NEXT: lsr r3, r1, #1
-; ARMV6-NEXT: lsl r4, r1, r0
-; ARMV6-NEXT: ldr r0, [sp, #80] @ 4-byte Reload
-; ARMV6-NEXT: orr r2, r2, r3, lsr r5
-; ARMV6-NEXT: adds r5, r9, #1
-; ARMV6-NEXT: str r2, [sp, #72] @ 4-byte Spill
-; ARMV6-NEXT: adcs r0, r0, #0
-; ARMV6-NEXT: str r0, [sp, #80] @ 4-byte Spill
-; ARMV6-NEXT: ldr r0, [sp, #76] @ 4-byte Reload
-; ARMV6-NEXT: mov r2, #0
-; ARMV6-NEXT: adcs r0, r0, #0
-; ARMV6-NEXT: str r0, [sp, #76] @ 4-byte Spill
-; ARMV6-NEXT: ldr r0, [sp, #84] @ 4-byte Reload
-; ARMV6-NEXT: adcs r7, r0, #0
-; ARMV6-NEXT: mov r0, #0
-; ARMV6-NEXT: str r0, [sp, #88] @ 4-byte Spill
-; ARMV6-NEXT: adcs r0, r2, #0
-; ARMV6-NEXT: bne .LBB1_13
-; ARMV6-NEXT: @ %bb.3: @ %udiv-preheader4
-; ARMV6-NEXT: mov r0, #0
-; ARMV6-NEXT: str r12, [sp, #172]
-; ARMV6-NEXT: str r0, [sp, #188]
-; ARMV6-NEXT: str r0, [sp, #184]
-; ARMV6-NEXT: str r0, [sp, #180]
-; ARMV6-NEXT: str r0, [sp, #176]
-; ARMV6-NEXT: add r0, sp, #160
-; ARMV6-NEXT: stm r0, {r6, r10, lr}
-; ARMV6-NEXT: and r0, r11, r5, lsr #3
-; ARMV6-NEXT: add r6, sp, #160
-; ARMV6-NEXT: add r2, r6, r0
-; ARMV6-NEXT: ldr r10, [sp, #80] @ 4-byte Reload
-; ARMV6-NEXT: ldr r11, [sp, #72] @ 4-byte Reload
-; ARMV6-NEXT: ldmib r2, {r1, r3, lr}
-; ARMV6-NEXT: lsl r2, lr, #1
-; ARMV6-NEXT: str r5, [sp, #84] @ 4-byte Spill
-; ARMV6-NEXT: and r5, r5, #31
-; ARMV6-NEXT: ldr r0, [r6, r0]
-; ARMV6-NEXT: eor r12, r5, #31
-; ARMV6-NEXT: lsr r9, r3, r5
-; ARMV6-NEXT: orr r9, r9, r2, lsl r12
-; ARMV6-NEXT: lsr r2, r1, r5
-; ARMV6-NEXT: lsl r1, r1, #1
-; ARMV6-NEXT: lsl r3, r3, #1
-; ARMV6-NEXT: lsr r0, r0, r5
-; ARMV6-NEXT: orr r1, r0, r1, lsl r12
-; ARMV6-NEXT: ldr r0, [sp, #272]
-; ARMV6-NEXT: orr r3, r2, r3, lsl r12
-; ARMV6-NEXT: ldr r6, [sp, #92] @ 4-byte Reload
-; ARMV6-NEXT: lsr lr, lr, r5
-; ARMV6-NEXT: subs r0, r0, #1
-; ARMV6-NEXT: str r0, [sp, #64] @ 4-byte Spill
-; ARMV6-NEXT: ldr r0, [sp, #276]
-; ARMV6-NEXT: mov r5, #0
-; ARMV6-NEXT: ldr r12, [sp, #76] @ 4-byte Reload
-; ARMV6-NEXT: sbcs r0, r0, #0
-; ARMV6-NEXT: str r0, [sp, #60] @ 4-byte Spill
-; ARMV6-NEXT: ldr r0, [sp, #280]
-; ARMV6-NEXT: sbcs r0, r0, #0
-; ARMV6-NEXT: str r0, [sp, #56] @ 4-byte Spill
-; ARMV6-NEXT: ldr r0, [sp, #284]
-; ARMV6-NEXT: sbc r0, r0, #0
-; ARMV6-NEXT: str r0, [sp, #52] @ 4-byte Spill
-; ARMV6-NEXT: .LBB1_4: @ %udiv-do-while3
-; ARMV6-NEXT: @ =>This Inner Loop Header: Depth=1
-; ARMV6-NEXT: ldr r2, [sp, #88] @ 4-byte Reload
-; ARMV6-NEXT: lsl r0, lr, #1
-; ARMV6-NEXT: str r2, [sp, #68] @ 4-byte Spill
-; ARMV6-NEXT: lsl r2, r9, #1
-; ARMV6-NEXT: orr r2, r2, r3, lsr #31
-; ARMV6-NEXT: lsl r3, r3, #1
-; ARMV6-NEXT: orr r3, r3, r1, lsr #31
-; ARMV6-NEXT: lsl r1, r1, #1
-; ARMV6-NEXT: str r6, [sp, #92] @ 4-byte Spill
-; ARMV6-NEXT: orr r1, r1, r6, lsr #31
-; ARMV6-NEXT: ldr r6, [sp, #64] @ 4-byte Reload
-; ARMV6-NEXT: orr r0, r0, r9, lsr #31
-; ARMV6-NEXT: str r4, [sp, #72] @ 4-byte Spill
-; ARMV6-NEXT: subs r4, r6, r1
-; ARMV6-NEXT: ldr r6, [sp, #60] @ 4-byte Reload
-; ARMV6-NEXT: str r11, [sp, #76] @ 4-byte Spill
-; ARMV6-NEXT: sbcs r4, r6, r3
-; ARMV6-NEXT: ldr r6, [sp, #56] @ 4-byte Reload
-; ARMV6-NEXT: str r8, [sp, #80] @ 4-byte Spill
-; ARMV6-NEXT: sbcs r4, r6, r2
-; ARMV6-NEXT: ldr r6, [sp, #52] @ 4-byte Reload
-; ARMV6-NEXT: sbc r4, r6, r0
-; ARMV6-NEXT: ldr r6, [sp, #284]
-; ARMV6-NEXT: and r11, r6, r4, asr #31
-; ARMV6-NEXT: ldr r6, [sp, #280]
-; ARMV6-NEXT: and lr, r6, r4, asr #31
-; ARMV6-NEXT: ldr r6, [sp, #276]
-; ARMV6-NEXT: and r8, r6, r4, asr #31
-; ARMV6-NEXT: ldr r6, [sp, #272]
-; ARMV6-NEXT: and r9, r6, r4, asr #31
-; ARMV6-NEXT: ldr r6, [sp, #72] @ 4-byte Reload
-; ARMV6-NEXT: subs r1, r1, r9
-; ARMV6-NEXT: sbcs r3, r3, r8
-; ARMV6-NEXT: sbcs r9, r2, lr
-; ARMV6-NEXT: sbc lr, r0, r11
-; ARMV6-NEXT: mov r0, #1
-; ARMV6-NEXT: and r0, r0, r4, asr #31
-; ARMV6-NEXT: str r0, [sp, #88] @ 4-byte Spill
-; ARMV6-NEXT: ldr r0, [sp, #84] @ 4-byte Reload
-; ARMV6-NEXT: subs r0, r0, #1
-; ARMV6-NEXT: str r0, [sp, #84] @ 4-byte Spill
-; ARMV6-NEXT: sbcs r10, r10, #0
-; ARMV6-NEXT: sbcs r12, r12, #0
-; ARMV6-NEXT: sbc r7, r7, #0
-; ARMV6-NEXT: orr r0, r0, r12
-; ARMV6-NEXT: orr r4, r10, r7
-; ARMV6-NEXT: orrs r0, r0, r4
-; ARMV6-NEXT: ldr r4, [sp, #76] @ 4-byte Reload
-; ARMV6-NEXT: ldr r0, [sp, #68] @ 4-byte Reload
-; ARMV6-NEXT: orr r2, r0, r6, lsl #1
-; ARMV6-NEXT: lsl r0, r4, #1
-; ARMV6-NEXT: orr r0, r0, r6, lsr #31
-; ARMV6-NEXT: ldr r6, [sp, #80] @ 4-byte Reload
-; ARMV6-NEXT: orr r11, r5, r0
-; ARMV6-NEXT: lsl r0, r6, #1
-; ARMV6-NEXT: orr r0, r0, r4, lsr #31
-; ARMV6-NEXT: mov r4, r2
-; ARMV6-NEXT: orr r8, r5, r0
-; ARMV6-NEXT: ldr r0, [sp, #92] @ 4-byte Reload
-; ARMV6-NEXT: lsl r0, r0, #1
-; ARMV6-NEXT: orr r0, r0, r6, lsr #31
-; ARMV6-NEXT: orr r6, r5, r0
-; ARMV6-NEXT: mov r5, #0
-; ARMV6-NEXT: bne .LBB1_4
-; ARMV6-NEXT: .LBB1_5: @ %udiv-loop-exit2
-; ARMV6-NEXT: ldr r0, [sp, #88] @ 4-byte Reload
-; ARMV6-NEXT: lsl r3, r11, #1
-; ARMV6-NEXT: lsl r2, r8, #1
-; ARMV6-NEXT: orr r3, r3, r4, lsr #31
-; ARMV6-NEXT: orr r7, r0, r4, lsl #1
-; ARMV6-NEXT: lsl r0, r6, #1
-; ARMV6-NEXT: orr r4, r0, r8, lsr #31
-; ARMV6-NEXT: ldr r10, [sp, #40] @ 4-byte Reload
-; ARMV6-NEXT: ldr r8, [sp, #36] @ 4-byte Reload
-; ARMV6-NEXT: orr r11, r2, r11, lsr #31
-; ARMV6-NEXT: .LBB1_6: @ %udiv-end1
-; ARMV6-NEXT: ldr r6, [sp, #44] @ 4-byte Reload
-; ARMV6-NEXT: mov lr, #0
-; ARMV6-NEXT: str r7, [sp, #12] @ 4-byte Spill
-; ARMV6-NEXT: ldr r7, [sp, #48] @ 4-byte Reload
-; ARMV6-NEXT: clz r0, r6
-; ARMV6-NEXT: ldmib sp, {r1, r2} @ 8-byte Folded Reload
-; ARMV6-NEXT: add r0, r0, #32
-; ARMV6-NEXT: cmp r7, #0
-; ARMV6-NEXT: clzne r0, r7
-; ARMV6-NEXT: cmp r10, #0
-; ARMV6-NEXT: movne r2, r1
-; ARMV6-NEXT: orrs r1, r6, r7
-; ARMV6-NEXT: ldr r5, [sp, #280]
-; ARMV6-NEXT: addeq r0, r2, #64
-; ARMV6-NEXT: ldr r2, [sp, #272]
-; ARMV6-NEXT: str r4, [sp, #24] @ 4-byte Spill
-; ARMV6-NEXT: clz r1, r5
-; ARMV6-NEXT: ldr r4, [sp, #284]
-; ARMV6-NEXT: clz r2, r2
-; ARMV6-NEXT: ldr r12, [sp, #276]
-; ARMV6-NEXT: add r1, r1, #32
-; ARMV6-NEXT: cmp r4, #0
-; ARMV6-NEXT: add r2, r2, #32
-; ARMV6-NEXT: clzne r1, r4
-; ARMV6-NEXT: cmp r12, #0
-; ARMV6-NEXT: str r3, [sp, #16] @ 4-byte Spill
-; ARMV6-NEXT: clzne r2, r12
-; ARMV6-NEXT: orrs r3, r5, r4
-; ARMV6-NEXT: str r11, [sp, #20] @ 4-byte Spill
-; ARMV6-NEXT: addeq r1, r2, #64
-; ARMV6-NEXT: subs r9, r1, r0
-; ARMV6-NEXT: sbcs r2, lr, #0
-; ARMV6-NEXT: sbcs r1, lr, #0
-; ARMV6-NEXT: sbc r3, lr, #0
-; ARMV6-NEXT: rsbs r0, r9, #127
-; ARMV6-NEXT: rscs r0, r2, #0
-; ARMV6-NEXT: mov r11, r1
-; ARMV6-NEXT: rscs r0, r1, #0
-; ARMV6-NEXT: orr r1, r10, r7
-; ARMV6-NEXT: rscs r0, r3, #0
-; ARMV6-NEXT: str r3, [sp, #84] @ 4-byte Spill
-; ARMV6-NEXT: orr r0, r8, r6
-; ARMV6-NEXT: orr r3, r12, r4
-; ARMV6-NEXT: orr r0, r0, r1
-; ARMV6-NEXT: ldr r1, [sp, #272]
-; ARMV6-NEXT: clz r0, r0
-; ARMV6-NEXT: movlo lr, #1
-; ARMV6-NEXT: orr r1, r1, r5
-; ARMV6-NEXT: mov r4, r8
-; ARMV6-NEXT: orr r1, r1, r3
-; ARMV6-NEXT: lsr r0, r0, #5
-; ARMV6-NEXT: clz r1, r1
-; ARMV6-NEXT: mov r3, r10
-; ARMV6-NEXT: lsr r1, r1, #5
-; ARMV6-NEXT: orr r0, r1, r0
-; ARMV6-NEXT: orrs r0, r0, lr
-; ARMV6-NEXT: movne r7, #0
-; ARMV6-NEXT: movne r6, #0
-; ARMV6-NEXT: movne r3, #0
-; ARMV6-NEXT: movne r4, #0
-; ARMV6-NEXT: bne .LBB1_12
-; ARMV6-NEXT: @ %bb.7: @ %udiv-end1
-; ARMV6-NEXT: ldr r1, [sp, #84] @ 4-byte Reload
-; ARMV6-NEXT: eor r0, r9, #127
-; ARMV6-NEXT: orr r0, r0, r11
-; ARMV6-NEXT: orr r1, r2, r1
-; ARMV6-NEXT: orrs r0, r0, r1
-; ARMV6-NEXT: beq .LBB1_12
-; ARMV6-NEXT: @ %bb.8: @ %udiv-bb1
-; ARMV6-NEXT: rsb r1, r9, #127
-; ARMV6-NEXT: add r4, sp, #128
-; ARMV6-NEXT: ldr r0, [sp, #264]
-; ARMV6-NEXT: mov lr, r11
-; ARMV6-NEXT: mov r12, r2
-; ARMV6-NEXT: mov r2, #0
-; ARMV6-NEXT: str r0, [sp, #152]
-; ARMV6-NEXT: mov r0, #12
-; ARMV6-NEXT: ldr r11, [sp, #268]
-; ARMV6-NEXT: and r3, r0, r1, lsr #3
-; ARMV6-NEXT: add r4, r4, #16
-; ARMV6-NEXT: str r8, [sp, #144]
-; ARMV6-NEXT: str r2, [sp, #140]
-; ARMV6-NEXT: and r0, r1, #31
-; ARMV6-NEXT: str r2, [sp, #136]
-; ARMV6-NEXT: eor r6, r0, #31
-; ARMV6-NEXT: str r2, [sp, #132]
-; ARMV6-NEXT: str r2, [sp, #128]
-; ARMV6-NEXT: str r10, [sp, #148]
-; ARMV6-NEXT: str r11, [sp, #156]
-; ARMV6-NEXT: ldr r3, [r4, -r3]!
-; ARMV6-NEXT: ldr r7, [r4, #4]
-; ARMV6-NEXT: ldr r5, [r4, #8]
-; ARMV6-NEXT: ldr r4, [r4, #12]
-; ARMV6-NEXT: lsl r1, r4, r0
-; ARMV6-NEXT: lsr r4, r5, #1
-; ARMV6-NEXT: orr r1, r1, r4, lsr r6
-; ARMV6-NEXT: str r1, [sp, #92] @ 4-byte Spill
-; ARMV6-NEXT: lsl r1, r5, r0
-; ARMV6-NEXT: lsrs r4, r7, #1
-; ARMV6-NEXT: orr r1, r1, r4, lsr r6
-; ARMV6-NEXT: str r1, [sp, #88] @ 4-byte Spill
-; ARMV6-NEXT: lsl r1, r7, r0
-; ARMV6-NEXT: lsr r4, r3, #1
-; ARMV6-NEXT: orr r1, r1, r4, lsr r6
-; ARMV6-NEXT: adds r7, r9, #1
-; ARMV6-NEXT: adcs r6, r12, #0
-; ARMV6-NEXT: str r1, [sp, #80] @ 4-byte Spill
-; ARMV6-NEXT: lsl r1, r3, r0
-; ARMV6-NEXT: ldr r0, [sp, #84] @ 4-byte Reload
-; ARMV6-NEXT: adcs r9, lr, #0
-; ARMV6-NEXT: adcs r0, r0, #0
-; ARMV6-NEXT: str r0, [sp, #76] @ 4-byte Spill
-; ARMV6-NEXT: adcs r0, r2, #0
-; ARMV6-NEXT: bne .LBB1_14
-; ARMV6-NEXT: @ %bb.9: @ %udiv-preheader
-; ARMV6-NEXT: ldr r0, [sp, #264]
-; ARMV6-NEXT: add r5, sp, #96
-; ARMV6-NEXT: str r0, [sp, #104]
-; ARMV6-NEXT: mov r0, #12
-; ARMV6-NEXT: str r2, [sp, #124]
-; ARMV6-NEXT: str r2, [sp, #120]
-; ARMV6-NEXT: str r2, [sp, #116]
-; ARMV6-NEXT: str r2, [sp, #112]
-; ARMV6-NEXT: and r2, r0, r7, lsr #3
-; ARMV6-NEXT: add r0, r5, r2
-; ARMV6-NEXT: str r11, [sp, #108]
-; ARMV6-NEXT: str r8, [sp, #96]
-; ARMV6-NEXT: and r11, r7, #31
-; ARMV6-NEXT: str r10, [sp, #100]
-; ARMV6-NEXT: eor r12, r11, #31
-; ARMV6-NEXT: ldr r3, [r0, #8]
-; ARMV6-NEXT: ldr r8, [r0, #12]
-; ARMV6-NEXT: ldr r4, [r0, #4]
-; ARMV6-NEXT: str r7, [sp, #84] @ 4-byte Spill
-; ARMV6-NEXT: lsr r0, r3, r11
-; ARMV6-NEXT: lsl r7, r8, #1
-; ARMV6-NEXT: lsl r3, r3, #1
-; ARMV6-NEXT: orr lr, r0, r7, lsl r12
-; ARMV6-NEXT: lsr r0, r4, r11
-; ARMV6-NEXT: orr r10, r0, r3, lsl r12
-; ARMV6-NEXT: ldr r3, [r5, r2]
-; ARMV6-NEXT: lsl r4, r4, #1
-; ARMV6-NEXT: lsr r8, r8, r11
-; ARMV6-NEXT: ldr r5, [sp, #92] @ 4-byte Reload
-; ARMV6-NEXT: lsr r3, r3, r11
-; ARMV6-NEXT: orr r4, r3, r4, lsl r12
-; ARMV6-NEXT: ldr r3, [sp, #272]
-; ARMV6-NEXT: ldr r12, [sp, #76] @ 4-byte Reload
-; ARMV6-NEXT: subs r0, r3, #1
-; ARMV6-NEXT: ldr r3, [sp, #276]
-; ARMV6-NEXT: str r0, [sp, #64] @ 4-byte Spill
-; ARMV6-NEXT: sbcs r0, r3, #0
-; ARMV6-NEXT: ldr r3, [sp, #280]
-; ARMV6-NEXT: str r0, [sp, #60] @ 4-byte Spill
-; ARMV6-NEXT: sbcs r0, r3, #0
-; ARMV6-NEXT: ldr r3, [sp, #284]
-; ARMV6-NEXT: ldr r11, [sp, #80] @ 4-byte Reload
-; ARMV6-NEXT: str r0, [sp, #56] @ 4-byte Spill
-; ARMV6-NEXT: sbc r0, r3, #0
-; ARMV6-NEXT: str r0, [sp, #52] @ 4-byte Spill
-; ARMV6-NEXT: mov r3, #0
-; ARMV6-NEXT: mov r0, #0
-; ARMV6-NEXT: .LBB1_10: @ %udiv-do-while
-; ARMV6-NEXT: @ =>This Inner Loop Header: Depth=1
-; ARMV6-NEXT: add r7, sp, #72
-; ARMV6-NEXT: ldr r2, [sp, #88] @ 4-byte Reload
-; ARMV6-NEXT: stm r7, {r0, r1, r2} @ 12-byte Folded Spill
-; ARMV6-NEXT: lsl r0, r10, #1
-; ARMV6-NEXT: orr r0, r0, r4, lsr #31
-; ARMV6-NEXT: lsl r4, r4, #1
-; ARMV6-NEXT: ldr r1, [sp, #64] @ 4-byte Reload
-; ARMV6-NEXT: orr r4, r4, r5, lsr #31
-; ARMV6-NEXT: str r5, [sp, #92] @ 4-byte Spill
-; ARMV6-NEXT: subs r5, r1, r4
-; ARMV6-NEXT: ldr r1, [sp, #60] @ 4-byte Reload
-; ARMV6-NEXT: lsl r7, lr, #1
-; ARMV6-NEXT: mov r2, r3
-; ARMV6-NEXT: sbcs r5, r1, r0
-; ARMV6-NEXT: ldr r1, [sp, #56] @ 4-byte Reload
-; ARMV6-NEXT: orr r7, r7, r10, lsr #31
-; ARMV6-NEXT: lsl r3, r8, #1
-; ARMV6-NEXT: sbcs r5, r1, r7
-; ARMV6-NEXT: ldr r1, [sp, #52] @ 4-byte Reload
-; ARMV6-NEXT: orr r3, r3, lr, lsr #31
-; ARMV6-NEXT: str r11, [sp, #88] @ 4-byte Spill
-; ARMV6-NEXT: sbc r5, r1, r3
-; ARMV6-NEXT: ldr r1, [sp, #284]
-; ARMV6-NEXT: and r11, r1, r5, asr #31
-; ARMV6-NEXT: ldr r1, [sp, #280]
-; ARMV6-NEXT: and lr, r1, r5, asr #31
-; ARMV6-NEXT: ldr r1, [sp, #276]
-; ARMV6-NEXT: and r10, r1, r5, asr #31
-; ARMV6-NEXT: ldr r1, [sp, #272]
-; ARMV6-NEXT: and r8, r1, r5, asr #31
-; ARMV6-NEXT: subs r1, r4, r8
-; ARMV6-NEXT: str r1, [sp, #68] @ 4-byte Spill
-; ARMV6-NEXT: sbcs r4, r0, r10
-; ARMV6-NEXT: mov r0, #1
-; ARMV6-NEXT: sbcs r10, r7, lr
-; ARMV6-NEXT: and r0, r0, r5, asr #31
-; ARMV6-NEXT: sbc r8, r3, r11
-; ARMV6-NEXT: ldr r3, [sp, #84] @ 4-byte Reload
-; ARMV6-NEXT: ldr r1, [sp, #76] @ 4-byte Reload
-; ARMV6-NEXT: subs r3, r3, #1
-; ARMV6-NEXT: str r3, [sp, #84] @ 4-byte Spill
-; ARMV6-NEXT: sbcs r6, r6, #0
-; ARMV6-NEXT: ldr lr, [sp, #88] @ 4-byte Reload
-; ARMV6-NEXT: sbcs r9, r9, #0
-; ARMV6-NEXT: sbc r12, r12, #0
-; ARMV6-NEXT: orr r3, r3, r9
-; ARMV6-NEXT: orr r5, r6, r12
-; ARMV6-NEXT: orrs r3, r3, r5
-; ARMV6-NEXT: ldr r5, [sp, #80] @ 4-byte Reload
-; ARMV6-NEXT: ldr r3, [sp, #72] @ 4-byte Reload
-; ARMV6-NEXT: orr r7, r3, r1, lsl #1
-; ARMV6-NEXT: lsl r3, lr, #1
-; ARMV6-NEXT: orr r1, r3, r1, lsr #31
-; ARMV6-NEXT: lsl r3, r5, #1
-; ARMV6-NEXT: orr r3, r3, lr, lsr #31
-; ARMV6-NEXT: orr r11, r2, r1
-; ARMV6-NEXT: orr r1, r2, r3
-; ARMV6-NEXT: str r1, [sp, #88] @ 4-byte Spill
-; ARMV6-NEXT: ldr r1, [sp, #92] @ 4-byte Reload
-; ARMV6-NEXT: mov lr, r10
-; ARMV6-NEXT: mov r10, r4
-; ARMV6-NEXT: ldr r4, [sp, #68] @ 4-byte Reload
-; ARMV6-NEXT: lsl r3, r1, #1
-; ARMV6-NEXT: mov r1, r7
-; ARMV6-NEXT: orr r3, r3, r5, lsr #31
-; ARMV6-NEXT: orr r5, r2, r3
-; ARMV6-NEXT: mov r3, #0
-; ARMV6-NEXT: bne .LBB1_10
-; ARMV6-NEXT: .LBB1_11: @ %udiv-loop-exit
-; ARMV6-NEXT: orr r4, r0, r1, lsl #1
-; ARMV6-NEXT: lsl r0, r11, #1
-; ARMV6-NEXT: orr r3, r0, r1, lsr #31
-; ARMV6-NEXT: ldr r1, [sp, #88] @ 4-byte Reload
-; ARMV6-NEXT: ldr r10, [sp, #40] @ 4-byte Reload
-; ARMV6-NEXT: ldr r8, [sp, #36] @ 4-byte Reload
-; ARMV6-NEXT: lsl r0, r1, #1
-; ARMV6-NEXT: orr r6, r0, r11, lsr #31
-; ARMV6-NEXT: lsl r0, r5, #1
-; ARMV6-NEXT: orr r7, r0, r1, lsr #31
-; ARMV6-NEXT: .LBB1_12: @ %udiv-end
-; ARMV6-NEXT: ldr r0, [sp, #28] @ 4-byte Reload
-; ARMV6-NEXT: mov lr, r7
-; ARMV6-NEXT: ldr r1, [sp, #12] @ 4-byte Reload
-; ARMV6-NEXT: mov r7, #0
-; ARMV6-NEXT: ldr r11, [sp, #272]
-; ARMV6-NEXT: mov r12, r6
-; ARMV6-NEXT: str r1, [r0]
-; ARMV6-NEXT: mov r2, #0
-; ARMV6-NEXT: ldr r1, [sp, #16] @ 4-byte Reload
-; ARMV6-NEXT: str r1, [r0, #4]
-; ARMV6-NEXT: umull r6, r5, r11, r3
-; ARMV6-NEXT: ldr r1, [sp, #20] @ 4-byte Reload
-; ARMV6-NEXT: str r1, [r0, #8]
-; ARMV6-NEXT: ldr r1, [sp, #24] @ 4-byte Reload
-; ARMV6-NEXT: str r1, [r0, #12]
-; ARMV6-NEXT: umull r0, r1, r11, r4
-; ARMV6-NEXT: ldr r9, [sp, #276]
-; ARMV6-NEXT: umlal r1, r7, r9, r4
-; ARMV6-NEXT: str r0, [sp, #92] @ 4-byte Spill
-; ARMV6-NEXT: adds r0, r6, r1
-; ARMV6-NEXT: ldr r1, [sp, #280]
-; ARMV6-NEXT: adcs r7, r7, r5
-; ARMV6-NEXT: str r0, [sp, #88] @ 4-byte Spill
-; ARMV6-NEXT: ldr r0, [sp, #284]
-; ARMV6-NEXT: adc r2, r2, #0
-; ARMV6-NEXT: umull r6, r5, r4, r1
-; ARMV6-NEXT: mla r5, r4, r0, r5
-; ARMV6-NEXT: ldr r0, [sp, #92] @ 4-byte Reload
-; ARMV6-NEXT: umlal r7, r2, r9, r3
-; ARMV6-NEXT: mla r3, r3, r1, r5
-; ARMV6-NEXT: ldr r1, [sp, #88] @ 4-byte Reload
-; ARMV6-NEXT: umull r5, r4, r12, r11
-; ARMV6-NEXT: mla r4, r12, r9, r4
-; ARMV6-NEXT: adds r6, r5, r6
-; ARMV6-NEXT: mla r4, lr, r11, r4
-; ARMV6-NEXT: adc r3, r4, r3
-; ARMV6-NEXT: adds r7, r7, r6
-; ARMV6-NEXT: adc r2, r2, r3
-; ARMV6-NEXT: subs r0, r8, r0
-; ARMV6-NEXT: ldr r3, [sp, #264]
-; ARMV6-NEXT: sbcs r1, r10, r1
-; ARMV6-NEXT: sbcs r3, r3, r7
-; ARMV6-NEXT: ldr r7, [sp, #268]
-; ARMV6-NEXT: sbc r2, r7, r2
-; ARMV6-NEXT: ldr r7, [sp, #32] @ 4-byte Reload
-; ARMV6-NEXT: stm r7, {r0, r1, r3}
-; ARMV6-NEXT: str r2, [r7, #12]
-; ARMV6-NEXT: add sp, sp, #228
-; ARMV6-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
-; ARMV6-NEXT: .LBB1_13:
-; ARMV6-NEXT: ldr r6, [sp, #92] @ 4-byte Reload
-; ARMV6-NEXT: ldr r11, [sp, #72] @ 4-byte Reload
-; ARMV6-NEXT: b .LBB1_5
-; ARMV6-NEXT: .LBB1_14:
-; ARMV6-NEXT: ldr r5, [sp, #92] @ 4-byte Reload
-; ARMV6-NEXT: mov r0, #0
-; ARMV6-NEXT: ldr r11, [sp, #80] @ 4-byte Reload
-; ARMV6-NEXT: b .LBB1_11
+; ARMV6-NOT: __udivmodti4
+; ARMV6-NOT: __udivti3
+; ARMV6-NOT: __umodti3
+; ARMV6: pop{{.*}}pc}
;
; ARMV7-LABEL: udivrem_i128:
-; ARMV7: @ %bb.0: @ %_udiv-special-cases_udiv-special-cases
-; ARMV7-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
-; ARMV7-NEXT: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
-; ARMV7-NEXT: .pad #228
-; ARMV7-NEXT: sub sp, sp, #228
-; ARMV7-NEXT: ldr r4, [sp, #264]
-; ARMV7-NEXT: mov r11, r3
-; ARMV7-NEXT: ldr r5, [sp, #268]
-; ARMV7-NEXT: clz r3, r3
-; ARMV7-NEXT: str r0, [sp, #24] @ 4-byte Spill
-; ARMV7-NEXT: clz r0, r4
-; ARMV7-NEXT: str r1, [sp, #28] @ 4-byte Spill
-; ARMV7-NEXT: clz r1, r2
-; ARMV7-NEXT: add r0, r0, #32
-; ARMV7-NEXT: cmp r5, #0
-; ARMV7-NEXT: clzne r0, r5
-; ARMV7-NEXT: add r1, r1, #32
-; ARMV7-NEXT: ldr r10, [sp, #280]
-; ARMV7-NEXT: cmp r11, #0
-; ARMV7-NEXT: str r1, [sp, #8] @ 4-byte Spill
-; ARMV7-NEXT: movne r1, r3
-; ARMV7-NEXT: str r3, [sp, #4] @ 4-byte Spill
-; ARMV7-NEXT: orrs r3, r4, r5
-; ARMV7-NEXT: ldr r12, [sp, #272]
-; ARMV7-NEXT: addeq r0, r1, #64
-; ARMV7-NEXT: ldr r6, [sp, #284]
-; ARMV7-NEXT: clz r1, r10
-; ARMV7-NEXT: ldr lr, [sp, #276]
-; ARMV7-NEXT: clz r3, r12
-; ARMV7-NEXT: add r1, r1, #32
-; ARMV7-NEXT: cmp r6, #0
-; ARMV7-NEXT: clzne r1, r6
-; ARMV7-NEXT: add r7, r3, #32
-; ARMV7-NEXT: cmp lr, #0
-; ARMV7-NEXT: str r4, [sp, #44] @ 4-byte Spill
-; ARMV7-NEXT: clzne r7, lr
-; ARMV7-NEXT: orrs r3, r10, r6
-; ARMV7-NEXT: addeq r1, r7, #64
-; ARMV7-NEXT: orr r7, r2, r4
-; ARMV7-NEXT: subs r9, r1, r0
-; ARMV7-NEXT: mov r0, #0
-; ARMV7-NEXT: sbcs r3, r0, #0
-; ARMV7-NEXT: str r3, [sp, #80] @ 4-byte Spill
-; ARMV7-NEXT: sbcs r8, r0, #0
-; ARMV7-NEXT: str r5, [sp, #40] @ 4-byte Spill
-; ARMV7-NEXT: sbc r1, r0, #0
-; ARMV7-NEXT: rsbs r0, r9, #127
-; ARMV7-NEXT: rscs r0, r3, #0
-; ARMV7-NEXT: str r1, [sp, #84] @ 4-byte Spill
-; ARMV7-NEXT: rscs r0, r8, #0
-; ARMV7-NEXT: orr r3, r12, r10
-; ARMV7-NEXT: rscs r0, r1, #0
-; ARMV7-NEXT: orr r1, lr, r6
-; ARMV7-NEXT: orr r1, r3, r1
-; ARMV7-NEXT: orr r3, r11, r5
-; ARMV7-NEXT: orr r3, r7, r3
-; ARMV7-NEXT: clz r1, r1
-; ARMV7-NEXT: clz r3, r3
-; ARMV7-NEXT: mov r0, #0
-; ARMV7-NEXT: lsr r1, r1, #5
-; ARMV7-NEXT: movwlo r0, #1
-; ARMV7-NEXT: lsr r3, r3, #5
-; ARMV7-NEXT: orr r1, r1, r3
-; ARMV7-NEXT: orrs r0, r1, r0
-; ARMV7-NEXT: mov r6, r11
-; ARMV7-NEXT: mov r7, r2
-; ARMV7-NEXT: movwne r5, #0
-; ARMV7-NEXT: movwne r4, #0
-; ARMV7-NEXT: movwne r6, #0
-; ARMV7-NEXT: movwne r7, #0
-; ARMV7-NEXT: str r2, [sp, #36] @ 4-byte Spill
-; ARMV7-NEXT: str r11, [sp, #32] @ 4-byte Spill
-; ARMV7-NEXT: bne .LBB1_6
-; ARMV7-NEXT: @ %bb.1: @ %_udiv-special-cases_udiv-special-cases
-; ARMV7-NEXT: ldr r1, [sp, #84] @ 4-byte Reload
-; ARMV7-NEXT: eor r0, r9, #127
-; ARMV7-NEXT: ldr r2, [sp, #80] @ 4-byte Reload
-; ARMV7-NEXT: orr r0, r0, r8
-; ARMV7-NEXT: str r8, [sp, #76] @ 4-byte Spill
-; ARMV7-NEXT: orr r2, r2, r1
-; ARMV7-NEXT: orrs r0, r0, r2
-; ARMV7-NEXT: beq .LBB1_6
-; ARMV7-NEXT: @ %bb.2: @ %udiv-bb15
-; ARMV7-NEXT: ldr r8, [sp, #36] @ 4-byte Reload
-; ARMV7-NEXT: mov r0, #0
-; ARMV7-NEXT: str r8, [sp, #208]
-; ARMV7-NEXT: add r3, sp, #192
-; ARMV7-NEXT: str r0, [sp, #204]
-; ARMV7-NEXT: mov r2, #12
-; ARMV7-NEXT: str r0, [sp, #200]
-; ARMV7-NEXT: add r3, r3, #16
-; ARMV7-NEXT: str r0, [sp, #196]
-; ARMV7-NEXT: str r0, [sp, #192]
-; ARMV7-NEXT: rsb r0, r9, #127
-; ARMV7-NEXT: str r11, [sp, #212]
-; ARMV7-NEXT: ldr r12, [sp, #44] @ 4-byte Reload
-; ARMV7-NEXT: and r2, r2, r0, lsr #3
-; ARMV7-NEXT: str r12, [sp, #216]
-; ARMV7-NEXT: and r0, r0, #31
-; ARMV7-NEXT: ldr r10, [sp, #40] @ 4-byte Reload
-; ARMV7-NEXT: eor r1, r0, #31
-; ARMV7-NEXT: str r10, [sp, #220]
-; ARMV7-NEXT: ldr r7, [r3, -r2]!
-; ARMV7-NEXT: ldmib r3, {r2, r4}
-; ARMV7-NEXT: lsr r6, r7, #1
-; ARMV7-NEXT: ldr r3, [r3, #12]
-; ARMV7-NEXT: lsl r5, r2, r0
-; ARMV7-NEXT: lsrs r2, r2, #1
-; ARMV7-NEXT: orr r5, r5, r6, lsr r1
-; ARMV7-NEXT: str r5, [sp, #72] @ 4-byte Spill
-; ARMV7-NEXT: lsl r3, r3, r0
-; ARMV7-NEXT: lsr r5, r4, #1
-; ARMV7-NEXT: orr r3, r3, r5, lsr r1
-; ARMV7-NEXT: str r3, [sp, #92] @ 4-byte Spill
-; ARMV7-NEXT: lsl r3, r4, r0
-; ARMV7-NEXT: lsl r0, r7, r0
-; ARMV7-NEXT: str r0, [sp, #68] @ 4-byte Spill
-; ARMV7-NEXT: adds r5, r9, #1
-; ARMV7-NEXT: ldr r0, [sp, #80] @ 4-byte Reload
-; ARMV7-NEXT: orr r1, r3, r2, lsr r1
-; ARMV7-NEXT: str r1, [sp, #64] @ 4-byte Spill
-; ARMV7-NEXT: mov r3, #0
-; ARMV7-NEXT: adcs r0, r0, #0
-; ARMV7-NEXT: str r0, [sp, #20] @ 4-byte Spill
-; ARMV7-NEXT: ldr r0, [sp, #76] @ 4-byte Reload
-; ARMV7-NEXT: adcs r6, r0, #0
-; ARMV7-NEXT: ldr r0, [sp, #84] @ 4-byte Reload
-; ARMV7-NEXT: adcs r0, r0, #0
-; ARMV7-NEXT: str r0, [sp, #76] @ 4-byte Spill
-; ARMV7-NEXT: mov r0, #0
-; ARMV7-NEXT: str r0, [sp, #88] @ 4-byte Spill
-; ARMV7-NEXT: adcs r0, r3, #0
-; ARMV7-NEXT: bne .LBB1_13
-; ARMV7-NEXT: @ %bb.3: @ %udiv-preheader4
-; ARMV7-NEXT: mov r0, #0
-; ARMV7-NEXT: ubfx r1, r5, #5, #2
-; ARMV7-NEXT: str r0, [sp, #188]
-; ARMV7-NEXT: and r9, r5, #31
-; ARMV7-NEXT: str r0, [sp, #184]
-; ARMV7-NEXT: eor r3, r9, #31
-; ARMV7-NEXT: str r0, [sp, #180]
-; ARMV7-NEXT: str r0, [sp, #176]
-; ARMV7-NEXT: add r0, sp, #160
-; ARMV7-NEXT: stm r0, {r8, r11, r12}
-; ARMV7-NEXT: add r12, sp, #160
-; ARMV7-NEXT: add r0, r12, r1, lsl #2
-; ARMV7-NEXT: str r10, [sp, #172]
-; ARMV7-NEXT: ldmib r0, {r4, r7, r8}
-; ARMV7-NEXT: lsr r10, r7, r9
-; ARMV7-NEXT: lsl r0, r8, #1
-; ARMV7-NEXT: lsl r7, r7, #1
-; ARMV7-NEXT: str r5, [sp, #84] @ 4-byte Spill
-; ARMV7-NEXT: orr lr, r10, r0, lsl r3
-; ARMV7-NEXT: lsr r10, r4, r9
-; ARMV7-NEXT: orr r0, r10, r7, lsl r3
-; ARMV7-NEXT: str r0, [sp, #80] @ 4-byte Spill
-; ARMV7-NEXT: ldr r1, [r12, r1, lsl #2]
-; ARMV7-NEXT: lsl r7, r4, #1
-; ARMV7-NEXT: ldr r12, [sp, #276]
-; ARMV7-NEXT: lsr r11, r8, r9
-; ARMV7-NEXT: ldr r10, [sp, #76] @ 4-byte Reload
-; ARMV7-NEXT: ldr r5, [sp, #68] @ 4-byte Reload
-; ARMV7-NEXT: mov r8, #0
-; ARMV7-NEXT: lsr r1, r1, r9
-; ARMV7-NEXT: ldr r9, [sp, #20] @ 4-byte Reload
-; ARMV7-NEXT: orr r1, r1, r7, lsl r3
-; ARMV7-NEXT: ldr r7, [sp, #272]
-; ARMV7-NEXT: ldr r2, [sp, #64] @ 4-byte Reload
-; ARMV7-NEXT: subs r3, r7, #1
-; ARMV7-NEXT: ldr r7, [sp, #280]
-; ARMV7-NEXT: str r3, [sp, #60] @ 4-byte Spill
-; ARMV7-NEXT: sbcs r3, r12, #0
-; ARMV7-NEXT: str r3, [sp, #56] @ 4-byte Spill
-; ARMV7-NEXT: sbcs r3, r7, #0
-; ARMV7-NEXT: ldr r7, [sp, #284]
-; ARMV7-NEXT: str r3, [sp, #52] @ 4-byte Spill
-; ARMV7-NEXT: sbc r3, r7, #0
-; ARMV7-NEXT: str r3, [sp, #48] @ 4-byte Spill
-; ARMV7-NEXT: ldr r12, [sp, #92] @ 4-byte Reload
-; ARMV7-NEXT: ldr r3, [sp, #72] @ 4-byte Reload
-; ARMV7-NEXT: .LBB1_4: @ %udiv-do-while3
-; ARMV7-NEXT: @ =>This Inner Loop Header: Depth=1
-; ARMV7-NEXT: str r3, [sp, #72] @ 4-byte Spill
-; ARMV7-NEXT: lsl r3, r11, #1
-; ARMV7-NEXT: ldr r11, [sp, #80] @ 4-byte Reload
-; ARMV7-NEXT: lsl r7, lr, #1
-; ARMV7-NEXT: ldr r0, [sp, #88] @ 4-byte Reload
-; ARMV7-NEXT: orr r3, r3, lr, lsr #31
-; ARMV7-NEXT: str r0, [sp, #64] @ 4-byte Spill
-; ARMV7-NEXT: lsl r0, r11, #1
-; ARMV7-NEXT: str r2, [sp, #76] @ 4-byte Spill
-; ARMV7-NEXT: orr r0, r0, r1, lsr #31
-; ARMV7-NEXT: lsl r1, r1, #1
-; ARMV7-NEXT: ldr r2, [sp, #60] @ 4-byte Reload
-; ARMV7-NEXT: orr r1, r1, r12, lsr #31
-; ARMV7-NEXT: orr r7, r7, r11, lsr #31
-; ARMV7-NEXT: str r5, [sp, #68] @ 4-byte Spill
-; ARMV7-NEXT: subs r4, r2, r1
-; ARMV7-NEXT: ldr r2, [sp, #56] @ 4-byte Reload
-; ARMV7-NEXT: str r12, [sp, #92] @ 4-byte Spill
-; ARMV7-NEXT: sbcs r4, r2, r0
-; ARMV7-NEXT: ldr r2, [sp, #52] @ 4-byte Reload
-; ARMV7-NEXT: sbcs r4, r2, r7
-; ARMV7-NEXT: ldr r2, [sp, #48] @ 4-byte Reload
-; ARMV7-NEXT: sbc r4, r2, r3
-; ARMV7-NEXT: ldr r2, [sp, #284]
-; ARMV7-NEXT: and r5, r2, r4, asr #31
-; ARMV7-NEXT: ldr r2, [sp, #280]
-; ARMV7-NEXT: and lr, r2, r4, asr #31
-; ARMV7-NEXT: ldr r2, [sp, #276]
-; ARMV7-NEXT: and r11, r2, r4, asr #31
-; ARMV7-NEXT: ldr r2, [sp, #272]
-; ARMV7-NEXT: and r12, r2, r4, asr #31
-; ARMV7-NEXT: mov r2, #1
-; ARMV7-NEXT: subs r1, r1, r12
-; ARMV7-NEXT: and r2, r2, r4, asr #31
-; ARMV7-NEXT: sbcs r0, r0, r11
-; ARMV7-NEXT: ldr r4, [sp, #84] @ 4-byte Reload
-; ARMV7-NEXT: sbcs lr, r7, lr
-; ARMV7-NEXT: str r2, [sp, #88] @ 4-byte Spill
-; ARMV7-NEXT: ldr r2, [sp, #68] @ 4-byte Reload
-; ARMV7-NEXT: sbc r11, r3, r5
-; ARMV7-NEXT: subs r4, r4, #1
-; ARMV7-NEXT: str r0, [sp, #80] @ 4-byte Spill
-; ARMV7-NEXT: ldr r0, [sp, #64] @ 4-byte Reload
-; ARMV7-NEXT: sbcs r9, r9, #0
-; ARMV7-NEXT: sbcs r6, r6, #0
-; ARMV7-NEXT: ldr r7, [sp, #76] @ 4-byte Reload
-; ARMV7-NEXT: orr r5, r0, r2, lsl #1
-; ARMV7-NEXT: ldr r0, [sp, #72] @ 4-byte Reload
-; ARMV7-NEXT: sbc r10, r10, #0
-; ARMV7-NEXT: str r4, [sp, #84] @ 4-byte Spill
-; ARMV7-NEXT: orr r3, r9, r10
-; ARMV7-NEXT: orr r4, r4, r6
-; ARMV7-NEXT: orrs r3, r4, r3
-; ARMV7-NEXT: lsl r4, r0, #1
-; ARMV7-NEXT: orr r2, r4, r2, lsr #31
-; ARMV7-NEXT: orr r3, r8, r2
-; ARMV7-NEXT: lsl r2, r7, #1
-; ARMV7-NEXT: orr r2, r2, r0, lsr #31
-; ARMV7-NEXT: ldr r0, [sp, #92] @ 4-byte Reload
-; ARMV7-NEXT: orr r2, r8, r2
-; ARMV7-NEXT: lsl r4, r0, #1
-; ARMV7-NEXT: orr r4, r4, r7, lsr #31
-; ARMV7-NEXT: orr r12, r8, r4
-; ARMV7-NEXT: mov r8, #0
-; ARMV7-NEXT: bne .LBB1_4
-; ARMV7-NEXT: .LBB1_5: @ %udiv-loop-exit2
-; ARMV7-NEXT: ldr r0, [sp, #88] @ 4-byte Reload
-; ARMV7-NEXT: lsl r1, r3, #1
-; ARMV7-NEXT: orr r6, r1, r5, lsr #31
-; ARMV7-NEXT: lsl r1, r2, #1
-; ARMV7-NEXT: ldr r11, [sp, #32] @ 4-byte Reload
-; ARMV7-NEXT: orr r4, r1, r3, lsr #31
-; ARMV7-NEXT: orr r7, r0, r5, lsl #1
-; ARMV7-NEXT: lsl r1, r12, #1
-; ARMV7-NEXT: orr r5, r1, r2, lsr #31
-; ARMV7-NEXT: .LBB1_6: @ %udiv-end1
-; ARMV7-NEXT: str r5, [sp, #48] @ 4-byte Spill
-; ARMV7-NEXT: ldr r5, [sp, #44] @ 4-byte Reload
-; ARMV7-NEXT: str r4, [sp, #20] @ 4-byte Spill
-; ARMV7-NEXT: ldr r4, [sp, #40] @ 4-byte Reload
-; ARMV7-NEXT: clz r0, r5
-; ARMV7-NEXT: ldmib sp, {r1, r2} @ 8-byte Folded Reload
-; ARMV7-NEXT: add r0, r0, #32
-; ARMV7-NEXT: cmp r4, #0
-; ARMV7-NEXT: clzne r0, r4
-; ARMV7-NEXT: cmp r11, #0
-; ARMV7-NEXT: ldr lr, [sp, #280]
-; ARMV7-NEXT: movne r2, r1
-; ARMV7-NEXT: orrs r1, r5, r4
-; ARMV7-NEXT: str r6, [sp, #16] @ 4-byte Spill
-; ARMV7-NEXT: ldr r6, [sp, #284]
-; ARMV7-NEXT: clz r1, lr
-; ARMV7-NEXT: ldr r12, [sp, #272]
-; ARMV7-NEXT: addeq r0, r2, #64
-; ARMV7-NEXT: str r7, [sp, #12] @ 4-byte Spill
-; ARMV7-NEXT: add r1, r1, #32
-; ARMV7-NEXT: ldr r7, [sp, #276]
-; ARMV7-NEXT: clz r2, r12
-; ARMV7-NEXT: cmp r6, #0
-; ARMV7-NEXT: add r2, r2, #32
-; ARMV7-NEXT: clzne r1, r6
-; ARMV7-NEXT: cmp r7, #0
-; ARMV7-NEXT: clzne r2, r7
-; ARMV7-NEXT: orrs r3, lr, r6
-; ARMV7-NEXT: addeq r1, r2, #64
-; ARMV7-NEXT: mov r2, #0
-; ARMV7-NEXT: subs r10, r1, r0
-; ARMV7-NEXT: orr r3, r11, r4
-; ARMV7-NEXT: sbcs r9, r2, #0
-; ARMV7-NEXT: sbcs r1, r2, #0
-; ARMV7-NEXT: str r1, [sp, #80] @ 4-byte Spill
-; ARMV7-NEXT: sbc r8, r2, #0
-; ARMV7-NEXT: rsbs r0, r10, #127
-; ARMV7-NEXT: rscs r0, r9, #0
-; ARMV7-NEXT: rscs r0, r1, #0
-; ARMV7-NEXT: orr r1, r7, r6
-; ARMV7-NEXT: rscs r0, r8, #0
-; ARMV7-NEXT: mov r7, r11
-; ARMV7-NEXT: orr r0, r12, lr
-; ARMV7-NEXT: ldr r12, [sp, #36] @ 4-byte Reload
-; ARMV7-NEXT: orr r0, r0, r1
-; ARMV7-NEXT: movwlo r2, #1
-; ARMV7-NEXT: orr r1, r12, r5
-; ARMV7-NEXT: clz r0, r0
-; ARMV7-NEXT: orr r1, r1, r3
-; ARMV7-NEXT: mov r3, r12
-; ARMV7-NEXT: clz r1, r1
-; ARMV7-NEXT: lsr r0, r0, #5
-; ARMV7-NEXT: lsr r1, r1, #5
-; ARMV7-NEXT: orr r0, r0, r1
-; ARMV7-NEXT: orrs r0, r0, r2
-; ARMV7-NEXT: movwne r5, #0
-; ARMV7-NEXT: movwne r4, #0
-; ARMV7-NEXT: movwne r3, #0
-; ARMV7-NEXT: movwne r7, #0
-; ARMV7-NEXT: str r5, [sp, #44] @ 4-byte Spill
-; ARMV7-NEXT: str r4, [sp, #40] @ 4-byte Spill
-; ARMV7-NEXT: bne .LBB1_15
-; ARMV7-NEXT: @ %bb.7: @ %udiv-end1
-; ARMV7-NEXT: ldr r1, [sp, #80] @ 4-byte Reload
-; ARMV7-NEXT: eor r0, r10, #127
-; ARMV7-NEXT: ldr lr, [sp, #264]
-; ARMV7-NEXT: orr r0, r0, r1
-; ARMV7-NEXT: orr r1, r9, r8
-; ARMV7-NEXT: orrs r0, r0, r1
-; ARMV7-NEXT: beq .LBB1_12
-; ARMV7-NEXT: @ %bb.8: @ %udiv-bb1
-; ARMV7-NEXT: mov r0, #0
-; ARMV7-NEXT: add r3, sp, #128
-; ARMV7-NEXT: str r0, [sp, #140]
-; ARMV7-NEXT: mov r1, #12
-; ARMV7-NEXT: str r0, [sp, #136]
-; ARMV7-NEXT: add r3, r3, #16
-; ARMV7-NEXT: str r0, [sp, #132]
-; ARMV7-NEXT: str r0, [sp, #128]
-; ARMV7-NEXT: ldr r0, [sp, #268]
-; ARMV7-NEXT: str r0, [sp, #156]
-; ARMV7-NEXT: rsb r0, r10, #127
-; ARMV7-NEXT: str r12, [sp, #144]
-; ARMV7-NEXT: and r1, r1, r0, lsr #3
-; ARMV7-NEXT: str r11, [sp, #148]
-; ARMV7-NEXT: str lr, [sp, #152]
-; ARMV7-NEXT: and r0, r0, #31
-; ARMV7-NEXT: ldr r1, [r3, -r1]!
-; ARMV7-NEXT: eor r2, r0, #31
-; ARMV7-NEXT: ldmib r3, {r5, r7}
-; ARMV7-NEXT: lsr r6, r1, #1
-; ARMV7-NEXT: ldr r3, [r3, #12]
-; ARMV7-NEXT: lsl r4, r5, r0
-; ARMV7-NEXT: orr r6, r4, r6, lsr r2
-; ARMV7-NEXT: lsr r4, r7, #1
-; ARMV7-NEXT: str r6, [sp, #84] @ 4-byte Spill
-; ARMV7-NEXT: lsl r3, r3, r0
-; ARMV7-NEXT: orr r3, r3, r4, lsr r2
-; ARMV7-NEXT: lsrs r4, r5, #1
-; ARMV7-NEXT: str r3, [sp, #92] @ 4-byte Spill
-; ARMV7-NEXT: lsl r3, r7, r0
-; ARMV7-NEXT: orr r5, r3, r4, lsr r2
-; ARMV7-NEXT: adds r4, r10, #1
-; ARMV7-NEXT: lsl r7, r1, r0
-; ARMV7-NEXT: adcs r0, r9, #0
-; ARMV7-NEXT: str r0, [sp, #72] @ 4-byte Spill
-; ARMV7-NEXT: mov r1, #0
-; ARMV7-NEXT: ldr r0, [sp, #80] @ 4-byte Reload
-; ARMV7-NEXT: adcs r2, r0, #0
-; ARMV7-NEXT: adcs r0, r8, #0
-; ARMV7-NEXT: str r0, [sp, #76] @ 4-byte Spill
-; ARMV7-NEXT: mov r0, #0
-; ARMV7-NEXT: str r0, [sp, #88] @ 4-byte Spill
-; ARMV7-NEXT: adcs r0, r1, #0
-; ARMV7-NEXT: bne .LBB1_14
-; ARMV7-NEXT: @ %bb.9: @ %udiv-preheader
-; ARMV7-NEXT: mov r0, #0
-; ARMV7-NEXT: add r6, sp, #96
-; ARMV7-NEXT: str r0, [sp, #124]
-; ARMV7-NEXT: and r10, r4, #31
-; ARMV7-NEXT: str r0, [sp, #120]
-; ARMV7-NEXT: str r0, [sp, #116]
-; ARMV7-NEXT: str r0, [sp, #112]
-; ARMV7-NEXT: ldr r0, [sp, #264]
-; ARMV7-NEXT: str r0, [sp, #104]
-; ARMV7-NEXT: ldr r0, [sp, #268]
-; ARMV7-NEXT: str r0, [sp, #108]
-; ARMV7-NEXT: ubfx r0, r4, #5, #2
-; ARMV7-NEXT: str r12, [sp, #96]
-; ARMV7-NEXT: add r3, r6, r0, lsl #2
-; ARMV7-NEXT: str r11, [sp, #100]
-; ARMV7-NEXT: ldr r1, [r3, #8]
-; ARMV7-NEXT: ldr r12, [r3, #4]
-; ARMV7-NEXT: ldr r9, [r3, #12]
-; ARMV7-NEXT: str r4, [sp, #80] @ 4-byte Spill
-; ARMV7-NEXT: eor r4, r10, #31
-; ARMV7-NEXT: ldr r0, [r6, r0, lsl #2]
-; ARMV7-NEXT: lsr r11, r1, r10
-; ARMV7-NEXT: lsl r3, r9, #1
-; ARMV7-NEXT: lsl r1, r1, #1
-; ARMV7-NEXT: orr lr, r11, r3, lsl r4
-; ARMV7-NEXT: lsr r11, r12, r10
-; ARMV7-NEXT: orr r3, r11, r1, lsl r4
-; ARMV7-NEXT: lsr r0, r0, r10
-; ARMV7-NEXT: lsl r1, r12, #1
-; ARMV7-NEXT: orr r8, r0, r1, lsl r4
-; ARMV7-NEXT: ldr r0, [sp, #272]
-; ARMV7-NEXT: lsr r1, r9, r10
-; ARMV7-NEXT: ldr r6, [sp, #92] @ 4-byte Reload
-; ARMV7-NEXT: subs r0, r0, #1
-; ARMV7-NEXT: str r0, [sp, #64] @ 4-byte Spill
-; ARMV7-NEXT: ldr r0, [sp, #276]
-; ARMV7-NEXT: mov r10, #0
-; ARMV7-NEXT: ldr r9, [sp, #72] @ 4-byte Reload
-; ARMV7-NEXT: sbcs r0, r0, #0
-; ARMV7-NEXT: str r0, [sp, #60] @ 4-byte Spill
-; ARMV7-NEXT: ldr r0, [sp, #280]
-; ARMV7-NEXT: ldr r11, [sp, #76] @ 4-byte Reload
-; ARMV7-NEXT: sbcs r0, r0, #0
-; ARMV7-NEXT: str r0, [sp, #56] @ 4-byte Spill
-; ARMV7-NEXT: ldr r0, [sp, #284]
-; ARMV7-NEXT: sbc r0, r0, #0
-; ARMV7-NEXT: str r0, [sp, #52] @ 4-byte Spill
-; ARMV7-NEXT: .LBB1_10: @ %udiv-do-while
-; ARMV7-NEXT: @ =>This Inner Loop Header: Depth=1
-; ARMV7-NEXT: ldr r4, [sp, #88] @ 4-byte Reload
-; ARMV7-NEXT: str r4, [sp, #68] @ 4-byte Spill
-; ARMV7-NEXT: lsl r4, r8, #1
-; ARMV7-NEXT: ldr r0, [sp, #64] @ 4-byte Reload
-; ARMV7-NEXT: orr r4, r4, r6, lsr #31
-; ARMV7-NEXT: str r5, [sp, #76] @ 4-byte Spill
-; ARMV7-NEXT: ldr r5, [sp, #84] @ 4-byte Reload
-; ARMV7-NEXT: str r7, [sp, #84] @ 4-byte Spill
-; ARMV7-NEXT: lsl r7, r1, #1
-; ARMV7-NEXT: lsl r1, lr, #1
-; ARMV7-NEXT: str r5, [sp, #72] @ 4-byte Spill
-; ARMV7-NEXT: orr r1, r1, r3, lsr #31
-; ARMV7-NEXT: lsl r3, r3, #1
-; ARMV7-NEXT: subs r5, r0, r4
-; ARMV7-NEXT: ldr r0, [sp, #60] @ 4-byte Reload
-; ARMV7-NEXT: orr r3, r3, r8, lsr #31
-; ARMV7-NEXT: orr r7, r7, lr, lsr #31
-; ARMV7-NEXT: sbcs r5, r0, r3
-; ARMV7-NEXT: ldr r0, [sp, #56] @ 4-byte Reload
-; ARMV7-NEXT: str r6, [sp, #92] @ 4-byte Spill
-; ARMV7-NEXT: sbcs r5, r0, r1
-; ARMV7-NEXT: ldr r0, [sp, #52] @ 4-byte Reload
-; ARMV7-NEXT: sbc r5, r0, r7
-; ARMV7-NEXT: ldr r0, [sp, #284]
-; ARMV7-NEXT: and r6, r0, r5, asr #31
-; ARMV7-NEXT: ldr r0, [sp, #280]
-; ARMV7-NEXT: and lr, r0, r5, asr #31
-; ARMV7-NEXT: ldr r0, [sp, #276]
-; ARMV7-NEXT: and r12, r0, r5, asr #31
-; ARMV7-NEXT: ldr r0, [sp, #272]
-; ARMV7-NEXT: and r8, r0, r5, asr #31
-; ARMV7-NEXT: mov r0, #1
-; ARMV7-NEXT: subs r8, r4, r8
-; ARMV7-NEXT: and r0, r0, r5, asr #31
-; ARMV7-NEXT: sbcs r3, r3, r12
-; ARMV7-NEXT: ldr r5, [sp, #80] @ 4-byte Reload
-; ARMV7-NEXT: sbcs lr, r1, lr
-; ARMV7-NEXT: str r0, [sp, #88] @ 4-byte Spill
-; ARMV7-NEXT: sbc r12, r7, r6
-; ARMV7-NEXT: subs r5, r5, #1
-; ARMV7-NEXT: sbcs r9, r9, #0
-; ARMV7-NEXT: ldr r0, [sp, #84] @ 4-byte Reload
-; ARMV7-NEXT: sbcs r2, r2, #0
-; ARMV7-NEXT: ldr r4, [sp, #72] @ 4-byte Reload
-; ARMV7-NEXT: sbc r11, r11, #0
-; ARMV7-NEXT: str r5, [sp, #80] @ 4-byte Spill
-; ARMV7-NEXT: orr r7, r9, r11
-; ARMV7-NEXT: orr r5, r5, r2
-; ARMV7-NEXT: ldr r1, [sp, #68] @ 4-byte Reload
-; ARMV7-NEXT: orrs r7, r5, r7
-; ARMV7-NEXT: lsl r5, r4, #1
-; ARMV7-NEXT: ldr r6, [sp, #76] @ 4-byte Reload
-; ARMV7-NEXT: orr r7, r1, r0, lsl #1
-; ARMV7-NEXT: orr r0, r5, r0, lsr #31
-; ARMV7-NEXT: orr r0, r10, r0
-; ARMV7-NEXT: str r0, [sp, #84] @ 4-byte Spill
-; ARMV7-NEXT: ldr r0, [sp, #92] @ 4-byte Reload
-; ARMV7-NEXT: lsl r5, r6, #1
-; ARMV7-NEXT: orr r5, r5, r4, lsr #31
-; ARMV7-NEXT: mov r1, r12
-; ARMV7-NEXT: orr r5, r10, r5
-; ARMV7-NEXT: lsl r0, r0, #1
-; ARMV7-NEXT: orr r0, r0, r6, lsr #31
-; ARMV7-NEXT: orr r6, r10, r0
-; ARMV7-NEXT: mov r10, #0
-; ARMV7-NEXT: bne .LBB1_10
-; ARMV7-NEXT: .LBB1_11: @ %udiv-loop-exit
-; ARMV7-NEXT: ldr r0, [sp, #88] @ 4-byte Reload
-; ARMV7-NEXT: ldr r11, [sp, #32] @ 4-byte Reload
-; ARMV7-NEXT: orr r3, r0, r7, lsl #1
-; ARMV7-NEXT: ldr r0, [sp, #84] @ 4-byte Reload
-; ARMV7-NEXT: ldr r12, [sp, #36] @ 4-byte Reload
-; ARMV7-NEXT: ldr lr, [sp, #264]
-; ARMV7-NEXT: lsl r1, r0, #1
-; ARMV7-NEXT: orr r7, r1, r7, lsr #31
-; ARMV7-NEXT: lsl r1, r5, #1
-; ARMV7-NEXT: orr r0, r1, r0, lsr #31
-; ARMV7-NEXT: str r0, [sp, #44] @ 4-byte Spill
-; ARMV7-NEXT: lsl r0, r6, #1
-; ARMV7-NEXT: orr r0, r0, r5, lsr #31
-; ARMV7-NEXT: str r0, [sp, #40] @ 4-byte Spill
-; ARMV7-NEXT: .LBB1_12: @ %udiv-end
-; ARMV7-NEXT: ldr r10, [sp, #272]
-; ARMV7-NEXT: mov r6, #0
-; ARMV7-NEXT: ldr r4, [sp, #12] @ 4-byte Reload
-; ARMV7-NEXT: mov r2, #0
-; ARMV7-NEXT: ldr r8, [sp, #276]
-; ARMV7-NEXT: umull r0, r1, r10, r3
-; ARMV7-NEXT: umlal r1, r6, r8, r3
-; ARMV7-NEXT: str r0, [sp, #92] @ 4-byte Spill
-; ARMV7-NEXT: ldr r0, [sp, #24] @ 4-byte Reload
-; ARMV7-NEXT: str r4, [r0]
-; ARMV7-NEXT: ldr r4, [sp, #16] @ 4-byte Reload
-; ARMV7-NEXT: str r4, [r0, #4]
-; ARMV7-NEXT: ldr r4, [sp, #20] @ 4-byte Reload
-; ARMV7-NEXT: str r4, [r0, #8]
-; ARMV7-NEXT: ldr r4, [sp, #48] @ 4-byte Reload
-; ARMV7-NEXT: str r4, [r0, #12]
-; ARMV7-NEXT: umull r5, r4, r10, r7
-; ARMV7-NEXT: ldr r0, [sp, #280]
-; ARMV7-NEXT: adds r9, r5, r1
-; ARMV7-NEXT: ldr r1, [sp, #284]
-; ARMV7-NEXT: adcs r6, r6, r4
-; ARMV7-NEXT: umull r5, r4, r3, r0
-; ARMV7-NEXT: adc r2, r2, #0
-; ARMV7-NEXT: mla r3, r3, r1, r4
-; ARMV7-NEXT: umlal r6, r2, r8, r7
-; ARMV7-NEXT: mla r3, r7, r0, r3
-; ARMV7-NEXT: ldr r0, [sp, #44] @ 4-byte Reload
-; ARMV7-NEXT: umull r7, r4, r0, r10
-; ARMV7-NEXT: mla r4, r0, r8, r4
-; ARMV7-NEXT: ldr r0, [sp, #40] @ 4-byte Reload
-; ARMV7-NEXT: adds r7, r7, r5
-; ARMV7-NEXT: mla r4, r0, r10, r4
-; ARMV7-NEXT: ldr r0, [sp, #92] @ 4-byte Reload
-; ARMV7-NEXT: adc r3, r4, r3
-; ARMV7-NEXT: adds r7, r6, r7
-; ARMV7-NEXT: adc r2, r2, r3
-; ARMV7-NEXT: subs r0, r12, r0
-; ARMV7-NEXT: sbcs r1, r11, r9
-; ARMV7-NEXT: sbcs r3, lr, r7
-; ARMV7-NEXT: ldr r7, [sp, #268]
-; ARMV7-NEXT: sbc r2, r7, r2
-; ARMV7-NEXT: ldr r7, [sp, #28] @ 4-byte Reload
-; ARMV7-NEXT: stm r7, {r0, r1, r3}
-; ARMV7-NEXT: str r2, [r7, #12]
-; ARMV7-NEXT: add sp, sp, #228
-; ARMV7-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
-; ARMV7-NEXT: .LBB1_13:
-; ARMV7-NEXT: ldr r12, [sp, #92] @ 4-byte Reload
-; ARMV7-NEXT: ldr r3, [sp, #72] @ 4-byte Reload
-; ARMV7-NEXT: ldr r5, [sp, #68] @ 4-byte Reload
-; ARMV7-NEXT: ldr r2, [sp, #64] @ 4-byte Reload
-; ARMV7-NEXT: b .LBB1_5
-; ARMV7-NEXT: .LBB1_14:
-; ARMV7-NEXT: ldr r6, [sp, #92] @ 4-byte Reload
-; ARMV7-NEXT: b .LBB1_11
-; ARMV7-NEXT: .LBB1_15:
-; ARMV7-NEXT: ldr lr, [sp, #264]
-; ARMV7-NEXT: b .LBB1_12
+; ARMV7-NOT: __udivmodti4
+; ARMV7-NOT: __udivti3
+; ARMV7-NOT: __umodti3
+; ARMV7: pop{{.*}}pc}
%q = udiv i128 %n, %d
%r = urem i128 %n, %d
store i128 %q, ptr %q_out
diff --git a/llvm/test/CodeGen/RISCV/i128-divrem-libcall.ll b/llvm/test/CodeGen/RISCV/i128-divrem-libcall.ll
index 952a989a335d7..f33d8e307bea1 100644
--- a/llvm/test/CodeGen/RISCV/i128-divrem-libcall.ll
+++ b/llvm/test/CodeGen/RISCV/i128-divrem-libcall.ll
@@ -1,2666 +1,34 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
; RUN: llc < %s -mtriple=riscv64-linux-gnu | FileCheck %s --check-prefix=RV64
; RUN: llc < %s -mtriple=riscv64-linux-gnu -mattr=+m | FileCheck %s --check-prefix=RV64
; RUN: llc < %s -mtriple=riscv32-linux-gnu | FileCheck %s --check-prefix=RV32I
; RUN: llc < %s -mtriple=riscv32-linux-gnu -mattr=+m | FileCheck %s --check-prefix=RV32M
+; RV64 uses fused libcalls. RV32 inline-expands, so only assert the absence of
+; libcalls there.
+
define void @sdivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
; RV64-LABEL: sdivrem_i128:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -48
-; RV64-NEXT: .cfi_def_cfa_offset 48
-; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: mv a6, a4
-; RV64-NEXT: mv s0, a1
-; RV64-NEXT: mv s1, a0
-; RV64-NEXT: mv a4, sp
-; RV64-NEXT: mv a0, a2
-; RV64-NEXT: mv a1, a3
-; RV64-NEXT: mv a2, a6
-; RV64-NEXT: mv a3, a5
-; RV64-NEXT: call __divmodti4
-; RV64-NEXT: ld a2, 0(sp)
-; RV64-NEXT: ld a3, 8(sp)
-; RV64-NEXT: sd a0, 0(s1)
-; RV64-NEXT: sd a1, 8(s1)
-; RV64-NEXT: sd a2, 0(s0)
-; RV64-NEXT: sd a3, 8(s0)
-; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
-; RV64-NEXT: .cfi_restore ra
-; RV64-NEXT: .cfi_restore s0
-; RV64-NEXT: .cfi_restore s1
-; RV64-NEXT: addi sp, sp, 48
-; RV64-NEXT: .cfi_def_cfa_offset 0
-; RV64-NEXT: ret
+; RV64: mv a4, sp
+; RV64: call __divmodti4
+; RV64: ld a2, 0(sp)
+; RV64: ld a3, 8(sp)
+; RV64: sd a0, 0(s1)
+; RV64: sd a1, 8(s1)
+; RV64: sd a2, 0(s0)
+; RV64: sd a3, 8(s0)
+; RV64: ret
;
; RV32I-LABEL: sdivrem_i128:
-; RV32I: # %bb.0: # %_udiv-special-cases_udiv-special-cases
-; RV32I-NEXT: addi sp, sp, -288
-; RV32I-NEXT: .cfi_def_cfa_offset 288
-; RV32I-NEXT: sw ra, 284(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s0, 280(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 276(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 272(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 268(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s4, 264(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s5, 260(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s6, 256(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s7, 252(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s8, 248(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s9, 244(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s10, 240(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s11, 236(sp) # 4-byte Folded Spill
-; RV32I-NEXT: .cfi_offset ra, -4
-; RV32I-NEXT: .cfi_offset s0, -8
-; RV32I-NEXT: .cfi_offset s1, -12
-; RV32I-NEXT: .cfi_offset s2, -16
-; RV32I-NEXT: .cfi_offset s3, -20
-; RV32I-NEXT: .cfi_offset s4, -24
-; RV32I-NEXT: .cfi_offset s5, -28
-; RV32I-NEXT: .cfi_offset s6, -32
-; RV32I-NEXT: .cfi_offset s7, -36
-; RV32I-NEXT: .cfi_offset s8, -40
-; RV32I-NEXT: .cfi_offset s9, -44
-; RV32I-NEXT: .cfi_offset s10, -48
-; RV32I-NEXT: .cfi_offset s11, -52
-; RV32I-NEXT: sw a1, 52(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw a0, 48(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lw ra, 0(a2)
-; RV32I-NEXT: lw s3, 4(a2)
-; RV32I-NEXT: lw s11, 8(a2)
-; RV32I-NEXT: lw t4, 0(a3)
-; RV32I-NEXT: lw t3, 4(a3)
-; RV32I-NEXT: lw a7, 8(a3)
-; RV32I-NEXT: lw t0, 12(a3)
-; RV32I-NEXT: lw t2, 12(a2)
-; RV32I-NEXT: or a0, ra, s3
-; RV32I-NEXT: snez a1, s11
-; RV32I-NEXT: snez a4, a0
-; RV32I-NEXT: add a1, t2, a1
-; RV32I-NEXT: snez t1, ra
-; RV32I-NEXT: sw a1, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: bltz t2, .LBB0_2
-; RV32I-NEXT: # %bb.1: # %_udiv-special-cases_udiv-special-cases
-; RV32I-NEXT: mv s4, t2
-; RV32I-NEXT: mv t6, s11
-; RV32I-NEXT: mv s5, s3
-; RV32I-NEXT: mv s6, ra
-; RV32I-NEXT: j .LBB0_3
-; RV32I-NEXT: .LBB0_2:
-; RV32I-NEXT: neg a0, s11
-; RV32I-NEXT: neg a1, a1
-; RV32I-NEXT: neg a2, s3
-; RV32I-NEXT: sltu a3, a0, a4
-; RV32I-NEXT: sub t6, a0, a4
-; RV32I-NEXT: sub s5, a2, t1
-; RV32I-NEXT: sub s4, a1, a3
-; RV32I-NEXT: neg s6, ra
-; RV32I-NEXT: .LBB0_3: # %_udiv-special-cases_udiv-special-cases
-; RV32I-NEXT: or a0, t4, t3
-; RV32I-NEXT: snez a1, a7
-; RV32I-NEXT: snez s2, a0
-; RV32I-NEXT: add a1, t0, a1
-; RV32I-NEXT: snez a5, t4
-; RV32I-NEXT: sw a5, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw a1, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: bltz t0, .LBB0_5
-; RV32I-NEXT: # %bb.4: # %_udiv-special-cases_udiv-special-cases
-; RV32I-NEXT: mv s9, t0
-; RV32I-NEXT: mv s8, a7
-; RV32I-NEXT: mv t5, t3
-; RV32I-NEXT: sw t4, 88(sp) # 4-byte Folded Spill
-; RV32I-NEXT: j .LBB0_6
-; RV32I-NEXT: .LBB0_5:
-; RV32I-NEXT: neg a0, a7
-; RV32I-NEXT: neg a1, a1
-; RV32I-NEXT: neg a2, t3
-; RV32I-NEXT: sltu a3, a0, s2
-; RV32I-NEXT: sub s8, a0, s2
-; RV32I-NEXT: sub t5, a2, a5
-; RV32I-NEXT: sub s9, a1, a3
-; RV32I-NEXT: neg a0, t4
-; RV32I-NEXT: sw a0, 88(sp) # 4-byte Folded Spill
-; RV32I-NEXT: .LBB0_6: # %_udiv-special-cases_udiv-special-cases
-; RV32I-NEXT: lui a0, 349525
-; RV32I-NEXT: lui a1, 209715
-; RV32I-NEXT: lui a2, 61681
-; RV32I-NEXT: addi s7, a0, 1365
-; RV32I-NEXT: addi s1, a1, 819
-; RV32I-NEXT: addi a1, a2, -241
-; RV32I-NEXT: bnez t5, .LBB0_8
-; RV32I-NEXT: # %bb.7: # %_udiv-special-cases_udiv-special-cases
-; RV32I-NEXT: lw a2, 88(sp) # 4-byte Folded Reload
-; RV32I-NEXT: srli a0, a2, 1
-; RV32I-NEXT: or a0, a2, a0
-; RV32I-NEXT: srli a2, a0, 2
-; RV32I-NEXT: or a0, a0, a2
-; RV32I-NEXT: srli a2, a0, 4
-; RV32I-NEXT: or a0, a0, a2
-; RV32I-NEXT: srli a2, a0, 8
-; RV32I-NEXT: or a0, a0, a2
-; RV32I-NEXT: srli a2, a0, 16
-; RV32I-NEXT: or a0, a0, a2
-; RV32I-NEXT: not a0, a0
-; RV32I-NEXT: srli a2, a0, 1
-; RV32I-NEXT: and a2, a2, s7
-; RV32I-NEXT: sub a0, a0, a2
-; RV32I-NEXT: and a2, a0, s1
-; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s1
-; RV32I-NEXT: add a0, a2, a0
-; RV32I-NEXT: srli a2, a0, 4
-; RV32I-NEXT: add a0, a0, a2
-; RV32I-NEXT: and a0, a0, a1
-; RV32I-NEXT: slli a2, a0, 8
-; RV32I-NEXT: add a0, a0, a2
-; RV32I-NEXT: slli a2, a0, 16
-; RV32I-NEXT: add a0, a0, a2
-; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: addi a5, a0, 32
-; RV32I-NEXT: j .LBB0_9
-; RV32I-NEXT: .LBB0_8:
-; RV32I-NEXT: srli a0, t5, 1
-; RV32I-NEXT: or a0, t5, a0
-; RV32I-NEXT: srli a2, a0, 2
-; RV32I-NEXT: or a0, a0, a2
-; RV32I-NEXT: srli a2, a0, 4
-; RV32I-NEXT: or a0, a0, a2
-; RV32I-NEXT: srli a2, a0, 8
-; RV32I-NEXT: or a0, a0, a2
-; RV32I-NEXT: srli a2, a0, 16
-; RV32I-NEXT: or a0, a0, a2
-; RV32I-NEXT: not a0, a0
-; RV32I-NEXT: srli a2, a0, 1
-; RV32I-NEXT: and a2, a2, s7
-; RV32I-NEXT: sub a0, a0, a2
-; RV32I-NEXT: and a2, a0, s1
-; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s1
-; RV32I-NEXT: add a0, a2, a0
-; RV32I-NEXT: srli a2, a0, 4
-; RV32I-NEXT: add a0, a0, a2
-; RV32I-NEXT: and a0, a0, a1
-; RV32I-NEXT: slli a2, a0, 8
-; RV32I-NEXT: add a0, a0, a2
-; RV32I-NEXT: slli a2, a0, 16
-; RV32I-NEXT: add a0, a0, a2
-; RV32I-NEXT: srli a5, a0, 24
-; RV32I-NEXT: .LBB0_9: # %_udiv-special-cases_udiv-special-cases
-; RV32I-NEXT: sw a4, 32(sp) # 4-byte Folded Spill
-; RV32I-NEXT: or a6, s8, s9
-; RV32I-NEXT: bnez s9, .LBB0_11
-; RV32I-NEXT: # %bb.10: # %_udiv-special-cases_udiv-special-cases
-; RV32I-NEXT: srli a0, s8, 1
-; RV32I-NEXT: or a0, s8, a0
-; RV32I-NEXT: srli a2, a0, 2
-; RV32I-NEXT: or a0, a0, a2
-; RV32I-NEXT: srli a2, a0, 4
-; RV32I-NEXT: or a0, a0, a2
-; RV32I-NEXT: srli a2, a0, 8
-; RV32I-NEXT: or a0, a0, a2
-; RV32I-NEXT: srli a2, a0, 16
-; RV32I-NEXT: or a0, a0, a2
-; RV32I-NEXT: not a0, a0
-; RV32I-NEXT: srli a2, a0, 1
-; RV32I-NEXT: and a2, a2, s7
-; RV32I-NEXT: sub a0, a0, a2
-; RV32I-NEXT: and a2, a0, s1
-; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s1
-; RV32I-NEXT: add a0, a2, a0
-; RV32I-NEXT: srli a2, a0, 4
-; RV32I-NEXT: add a0, a0, a2
-; RV32I-NEXT: and a0, a0, a1
-; RV32I-NEXT: slli a2, a0, 8
-; RV32I-NEXT: add a0, a0, a2
-; RV32I-NEXT: slli a2, a0, 16
-; RV32I-NEXT: add a0, a0, a2
-; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: addi a4, a0, 32
-; RV32I-NEXT: j .LBB0_12
-; RV32I-NEXT: .LBB0_11:
-; RV32I-NEXT: srli a0, s9, 1
-; RV32I-NEXT: or a0, s9, a0
-; RV32I-NEXT: srli a2, a0, 2
-; RV32I-NEXT: or a0, a0, a2
-; RV32I-NEXT: srli a2, a0, 4
-; RV32I-NEXT: or a0, a0, a2
-; RV32I-NEXT: srli a2, a0, 8
-; RV32I-NEXT: or a0, a0, a2
-; RV32I-NEXT: srli a2, a0, 16
-; RV32I-NEXT: or a0, a0, a2
-; RV32I-NEXT: not a0, a0
-; RV32I-NEXT: srli a2, a0, 1
-; RV32I-NEXT: and a2, a2, s7
-; RV32I-NEXT: sub a0, a0, a2
-; RV32I-NEXT: and a2, a0, s1
-; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, s1
-; RV32I-NEXT: add a0, a2, a0
-; RV32I-NEXT: srli a2, a0, 4
-; RV32I-NEXT: add a0, a0, a2
-; RV32I-NEXT: and a0, a0, a1
-; RV32I-NEXT: slli a2, a0, 8
-; RV32I-NEXT: add a0, a0, a2
-; RV32I-NEXT: slli a2, a0, 16
-; RV32I-NEXT: add a0, a0, a2
-; RV32I-NEXT: srli a4, a0, 24
-; RV32I-NEXT: .LBB0_12: # %_udiv-special-cases_udiv-special-cases
-; RV32I-NEXT: sw t0, 100(sp) # 4-byte Folded Spill
-; RV32I-NEXT: addi a0, a5, 64
-; RV32I-NEXT: bnez a6, .LBB0_14
-; RV32I-NEXT: # %bb.13: # %_udiv-special-cases_udiv-special-cases
-; RV32I-NEXT: mv a4, a0
-; RV32I-NEXT: .LBB0_14: # %_udiv-special-cases_udiv-special-cases
-; RV32I-NEXT: snez s0, a6
-; RV32I-NEXT: sw a7, 44(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw t1, 24(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: bnez s5, .LBB0_16
-; RV32I-NEXT: # %bb.15: # %_udiv-special-cases_udiv-special-cases
-; RV32I-NEXT: srli a2, s6, 1
-; RV32I-NEXT: or a2, s6, a2
-; RV32I-NEXT: srli a3, a2, 2
-; RV32I-NEXT: or a2, a2, a3
-; RV32I-NEXT: srli a3, a2, 4
-; RV32I-NEXT: or a2, a2, a3
-; RV32I-NEXT: srli a3, a2, 8
-; RV32I-NEXT: or a2, a2, a3
-; RV32I-NEXT: srli a3, a2, 16
-; RV32I-NEXT: or a2, a2, a3
-; RV32I-NEXT: not a2, a2
-; RV32I-NEXT: srli a3, a2, 1
-; RV32I-NEXT: and a3, a3, s7
-; RV32I-NEXT: sub a2, a2, a3
-; RV32I-NEXT: and a3, a2, s1
-; RV32I-NEXT: srli a2, a2, 2
-; RV32I-NEXT: and a2, a2, s1
-; RV32I-NEXT: add a2, a3, a2
-; RV32I-NEXT: srli a3, a2, 4
-; RV32I-NEXT: add a2, a2, a3
-; RV32I-NEXT: and a2, a2, a1
-; RV32I-NEXT: slli a3, a2, 8
-; RV32I-NEXT: add a2, a2, a3
-; RV32I-NEXT: slli a3, a2, 16
-; RV32I-NEXT: add a2, a2, a3
-; RV32I-NEXT: srli a2, a2, 24
-; RV32I-NEXT: addi a2, a2, 32
-; RV32I-NEXT: j .LBB0_17
-; RV32I-NEXT: .LBB0_16:
-; RV32I-NEXT: srli a2, s5, 1
-; RV32I-NEXT: or a2, s5, a2
-; RV32I-NEXT: srli a3, a2, 2
-; RV32I-NEXT: or a2, a2, a3
-; RV32I-NEXT: srli a3, a2, 4
-; RV32I-NEXT: or a2, a2, a3
-; RV32I-NEXT: srli a3, a2, 8
-; RV32I-NEXT: or a2, a2, a3
-; RV32I-NEXT: srli a3, a2, 16
-; RV32I-NEXT: or a2, a2, a3
-; RV32I-NEXT: not a2, a2
-; RV32I-NEXT: srli a3, a2, 1
-; RV32I-NEXT: and a3, a3, s7
-; RV32I-NEXT: sub a2, a2, a3
-; RV32I-NEXT: and a3, a2, s1
-; RV32I-NEXT: srli a2, a2, 2
-; RV32I-NEXT: and a2, a2, s1
-; RV32I-NEXT: add a2, a3, a2
-; RV32I-NEXT: srli a3, a2, 4
-; RV32I-NEXT: add a2, a2, a3
-; RV32I-NEXT: and a2, a2, a1
-; RV32I-NEXT: slli a3, a2, 8
-; RV32I-NEXT: add a2, a2, a3
-; RV32I-NEXT: slli a3, a2, 16
-; RV32I-NEXT: add a2, a2, a3
-; RV32I-NEXT: srli a2, a2, 24
-; RV32I-NEXT: .LBB0_17: # %_udiv-special-cases_udiv-special-cases
-; RV32I-NEXT: sw s8, 84(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lw a3, 88(sp) # 4-byte Folded Reload
-; RV32I-NEXT: or a6, a3, s8
-; RV32I-NEXT: sw s9, 80(sp) # 4-byte Folded Spill
-; RV32I-NEXT: or a7, t5, s9
-; RV32I-NEXT: or t0, s6, t6
-; RV32I-NEXT: or t1, s5, s4
-; RV32I-NEXT: sltu a5, a0, a5
-; RV32I-NEXT: addi s0, s0, -1
-; RV32I-NEXT: addi a0, a2, 64
-; RV32I-NEXT: or s2, t6, s4
-; RV32I-NEXT: sltu s8, a0, a2
-; RV32I-NEXT: snez s9, s2
-; RV32I-NEXT: addi s9, s9, -1
-; RV32I-NEXT: bnez s4, .LBB0_19
-; RV32I-NEXT: # %bb.18: # %_udiv-special-cases_udiv-special-cases
-; RV32I-NEXT: srli a2, t6, 1
-; RV32I-NEXT: or a2, t6, a2
-; RV32I-NEXT: srli a3, a2, 2
-; RV32I-NEXT: or a2, a2, a3
-; RV32I-NEXT: srli a3, a2, 4
-; RV32I-NEXT: or a2, a2, a3
-; RV32I-NEXT: srli a3, a2, 8
-; RV32I-NEXT: or a2, a2, a3
-; RV32I-NEXT: srli a3, a2, 16
-; RV32I-NEXT: or a2, a2, a3
-; RV32I-NEXT: not a2, a2
-; RV32I-NEXT: srli a3, a2, 1
-; RV32I-NEXT: and a3, a3, s7
-; RV32I-NEXT: sub a2, a2, a3
-; RV32I-NEXT: and a3, a2, s1
-; RV32I-NEXT: srli a2, a2, 2
-; RV32I-NEXT: and a2, a2, s1
-; RV32I-NEXT: add a2, a3, a2
-; RV32I-NEXT: srli a3, a2, 4
-; RV32I-NEXT: add a2, a2, a3
-; RV32I-NEXT: and a1, a2, a1
-; RV32I-NEXT: slli a2, a1, 8
-; RV32I-NEXT: add a1, a1, a2
-; RV32I-NEXT: slli a2, a1, 16
-; RV32I-NEXT: add a1, a1, a2
-; RV32I-NEXT: srli a1, a1, 24
-; RV32I-NEXT: addi a2, a1, 32
-; RV32I-NEXT: j .LBB0_20
-; RV32I-NEXT: .LBB0_19:
-; RV32I-NEXT: srli a2, s4, 1
-; RV32I-NEXT: or a2, s4, a2
-; RV32I-NEXT: srli a3, a2, 2
-; RV32I-NEXT: or a2, a2, a3
-; RV32I-NEXT: srli a3, a2, 4
-; RV32I-NEXT: or a2, a2, a3
-; RV32I-NEXT: srli a3, a2, 8
-; RV32I-NEXT: or a2, a2, a3
-; RV32I-NEXT: srli a3, a2, 16
-; RV32I-NEXT: or a2, a2, a3
-; RV32I-NEXT: not a2, a2
-; RV32I-NEXT: srli a3, a2, 1
-; RV32I-NEXT: and a3, a3, s7
-; RV32I-NEXT: sub a2, a2, a3
-; RV32I-NEXT: and a3, a2, s1
-; RV32I-NEXT: srli a2, a2, 2
-; RV32I-NEXT: and a2, a2, s1
-; RV32I-NEXT: add a2, a3, a2
-; RV32I-NEXT: srli a3, a2, 4
-; RV32I-NEXT: add a2, a2, a3
-; RV32I-NEXT: and a1, a2, a1
-; RV32I-NEXT: slli a2, a1, 8
-; RV32I-NEXT: add a1, a1, a2
-; RV32I-NEXT: slli a2, a1, 16
-; RV32I-NEXT: add a1, a1, a2
-; RV32I-NEXT: srli a2, a1, 24
-; RV32I-NEXT: .LBB0_20: # %_udiv-special-cases_udiv-special-cases
-; RV32I-NEXT: or a3, a6, a7
-; RV32I-NEXT: or a6, t0, t1
-; RV32I-NEXT: and a5, s0, a5
-; RV32I-NEXT: and a1, s9, s8
-; RV32I-NEXT: bnez s2, .LBB0_22
-; RV32I-NEXT: # %bb.21: # %_udiv-special-cases_udiv-special-cases
-; RV32I-NEXT: mv a2, a0
-; RV32I-NEXT: .LBB0_22: # %_udiv-special-cases_udiv-special-cases
-; RV32I-NEXT: seqz a0, a3
-; RV32I-NEXT: seqz a3, a6
-; RV32I-NEXT: sltu a7, a4, a2
-; RV32I-NEXT: sub t0, a5, a1
-; RV32I-NEXT: mv a6, a7
-; RV32I-NEXT: lw t1, 100(sp) # 4-byte Folded Reload
-; RV32I-NEXT: beq a5, a1, .LBB0_24
-; RV32I-NEXT: # %bb.23: # %_udiv-special-cases_udiv-special-cases
-; RV32I-NEXT: sltu a6, a5, a1
-; RV32I-NEXT: .LBB0_24: # %_udiv-special-cases_udiv-special-cases
-; RV32I-NEXT: sub a1, t0, a7
-; RV32I-NEXT: or a3, a0, a3
-; RV32I-NEXT: neg a0, a6
-; RV32I-NEXT: seqz s10, a6
-; RV32I-NEXT: addi s10, s10, -1
-; RV32I-NEXT: sub a2, a4, a2
-; RV32I-NEXT: or a5, a0, s10
-; RV32I-NEXT: xor a4, t1, t2
-; RV32I-NEXT: beqz a5, .LBB0_26
-; RV32I-NEXT: # %bb.25: # %_udiv-special-cases_udiv-special-cases
-; RV32I-NEXT: snez a5, a5
-; RV32I-NEXT: j .LBB0_27
-; RV32I-NEXT: .LBB0_26:
-; RV32I-NEXT: snez a5, a1
-; RV32I-NEXT: sltiu a6, a2, 128
-; RV32I-NEXT: xori a6, a6, 1
-; RV32I-NEXT: or a5, a6, a5
-; RV32I-NEXT: .LBB0_27: # %_udiv-special-cases_udiv-special-cases
-; RV32I-NEXT: srai a4, a4, 31
-; RV32I-NEXT: sw a4, 60(sp) # 4-byte Folded Spill
-; RV32I-NEXT: or a5, a3, a5
-; RV32I-NEXT: addi a4, a5, -1
-; RV32I-NEXT: and s8, a4, s4
-; RV32I-NEXT: and s7, a4, t6
-; RV32I-NEXT: and a3, a4, s5
-; RV32I-NEXT: and a4, a4, s6
-; RV32I-NEXT: sw t2, 56(sp) # 4-byte Folded Spill
-; RV32I-NEXT: bnez a5, .LBB0_38
-; RV32I-NEXT: # %bb.28: # %_udiv-special-cases_udiv-special-cases
-; RV32I-NEXT: xori a5, a2, 127
-; RV32I-NEXT: or a5, a5, a0
-; RV32I-NEXT: or a6, a1, s10
-; RV32I-NEXT: or a5, a5, a6
-; RV32I-NEXT: beqz a5, .LBB0_38
-; RV32I-NEXT: # %bb.29: # %udiv-bb15
-; RV32I-NEXT: sw t4, 92(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw t3, 96(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s11, 64(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 68(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw ra, 72(sp) # 4-byte Folded Spill
-; RV32I-NEXT: addi s7, a2, 1
-; RV32I-NEXT: li a4, 127
-; RV32I-NEXT: sub a4, a4, a2
-; RV32I-NEXT: sw zero, 200(sp)
-; RV32I-NEXT: sw zero, 204(sp)
-; RV32I-NEXT: sw zero, 208(sp)
-; RV32I-NEXT: sw zero, 212(sp)
-; RV32I-NEXT: sw s6, 216(sp)
-; RV32I-NEXT: sw s5, 220(sp)
-; RV32I-NEXT: sw t6, 224(sp)
-; RV32I-NEXT: sw s4, 228(sp)
-; RV32I-NEXT: addi a2, sp, 216
-; RV32I-NEXT: seqz s8, s7
-; RV32I-NEXT: srli a3, a4, 3
-; RV32I-NEXT: andi a5, a4, 31
-; RV32I-NEXT: add s8, a1, s8
-; RV32I-NEXT: andi a3, a3, 12
-; RV32I-NEXT: xori a5, a5, 31
-; RV32I-NEXT: or a1, s7, s8
-; RV32I-NEXT: sub a2, a2, a3
-; RV32I-NEXT: seqz a1, a1
-; RV32I-NEXT: lw a3, 0(a2)
-; RV32I-NEXT: lw a6, 4(a2)
-; RV32I-NEXT: lw a7, 8(a2)
-; RV32I-NEXT: lw a2, 12(a2)
-; RV32I-NEXT: add a1, a0, a1
-; RV32I-NEXT: sltu a0, a1, a0
-; RV32I-NEXT: or t0, s7, a1
-; RV32I-NEXT: add s10, s10, a0
-; RV32I-NEXT: or a0, s8, s10
-; RV32I-NEXT: srli t1, a7, 1
-; RV32I-NEXT: srli t2, a6, 1
-; RV32I-NEXT: or a0, t0, a0
-; RV32I-NEXT: srli t0, a3, 1
-; RV32I-NEXT: srl t1, t1, a5
-; RV32I-NEXT: srl t2, t2, a5
-; RV32I-NEXT: srl a5, t0, a5
-; RV32I-NEXT: sll a2, a2, a4
-; RV32I-NEXT: or t0, a2, t1
-; RV32I-NEXT: sll a2, a7, a4
-; RV32I-NEXT: sll a6, a6, a4
-; RV32I-NEXT: or t2, a2, t2
-; RV32I-NEXT: or a6, a6, a5
-; RV32I-NEXT: sll s9, a3, a4
-; RV32I-NEXT: li ra, 0
-; RV32I-NEXT: beqz a0, .LBB0_37
-; RV32I-NEXT: # %bb.30: # %udiv-preheader4
-; RV32I-NEXT: li t1, 0
-; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: li a7, 0
-; RV32I-NEXT: sw zero, 184(sp)
-; RV32I-NEXT: sw zero, 188(sp)
-; RV32I-NEXT: sw zero, 192(sp)
-; RV32I-NEXT: sw zero, 196(sp)
-; RV32I-NEXT: sw s6, 168(sp)
-; RV32I-NEXT: sw s5, 172(sp)
-; RV32I-NEXT: sw t6, 176(sp)
-; RV32I-NEXT: sw s4, 180(sp)
-; RV32I-NEXT: srli a0, s7, 3
-; RV32I-NEXT: addi a2, sp, 168
-; RV32I-NEXT: andi a0, a0, 12
-; RV32I-NEXT: add a0, a2, a0
-; RV32I-NEXT: lw a2, 4(a0)
-; RV32I-NEXT: lw a3, 8(a0)
-; RV32I-NEXT: lw a5, 12(a0)
-; RV32I-NEXT: lw t3, 0(a0)
-; RV32I-NEXT: andi a0, s7, 31
-; RV32I-NEXT: xori a0, a0, 31
-; RV32I-NEXT: slli t4, a5, 1
-; RV32I-NEXT: slli t6, a3, 1
-; RV32I-NEXT: slli s0, a2, 1
-; RV32I-NEXT: sll t4, t4, a0
-; RV32I-NEXT: sll s1, t6, a0
-; RV32I-NEXT: sll s0, s0, a0
-; RV32I-NEXT: lw s2, 88(sp) # 4-byte Folded Reload
-; RV32I-NEXT: seqz a0, s2
-; RV32I-NEXT: srl a3, a3, s7
-; RV32I-NEXT: or s11, a3, t4
-; RV32I-NEXT: or a3, s2, t5
-; RV32I-NEXT: sub t6, t5, a0
-; RV32I-NEXT: seqz a3, a3
-; RV32I-NEXT: srl a0, a2, s7
-; RV32I-NEXT: or a0, a0, s1
-; RV32I-NEXT: lw a2, 84(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sub s4, a2, a3
-; RV32I-NEXT: sltu a2, a2, a3
-; RV32I-NEXT: lw a3, 80(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sub s5, a3, a2
-; RV32I-NEXT: srl a2, t3, s7
-; RV32I-NEXT: srl s1, a5, s7
-; RV32I-NEXT: or s0, a2, s0
-; RV32I-NEXT: addi s2, s2, -1
-; RV32I-NEXT: sw s2, 76(sp) # 4-byte Folded Spill
-; RV32I-NEXT: j .LBB0_32
-; RV32I-NEXT: .LBB0_31: # %udiv-do-while3
-; RV32I-NEXT: # in Loop: Header=BB0_32 Depth=1
-; RV32I-NEXT: srli a5, t2, 31
-; RV32I-NEXT: slli t0, t0, 1
-; RV32I-NEXT: sub a3, s3, a3
-; RV32I-NEXT: srli s3, a6, 31
-; RV32I-NEXT: slli t2, t2, 1
-; RV32I-NEXT: or a5, t0, a5
-; RV32I-NEXT: srli t0, s9, 31
-; RV32I-NEXT: slli a6, a6, 1
-; RV32I-NEXT: slli s9, s9, 1
-; RV32I-NEXT: or t2, t2, s3
-; RV32I-NEXT: lw t3, 80(sp) # 4-byte Folded Reload
-; RV32I-NEXT: and s3, s11, t3
-; RV32I-NEXT: or a6, a6, t0
-; RV32I-NEXT: lw t0, 84(sp) # 4-byte Folded Reload
-; RV32I-NEXT: and t0, s11, t0
-; RV32I-NEXT: or s9, ra, s9
-; RV32I-NEXT: sub t3, a2, t0
-; RV32I-NEXT: sltu a2, a2, t0
-; RV32I-NEXT: or t0, s7, s8
-; RV32I-NEXT: sub s3, a0, s3
-; RV32I-NEXT: seqz t4, s7
-; RV32I-NEXT: addi s7, s7, -1
-; RV32I-NEXT: andi ra, s11, 1
-; RV32I-NEXT: sub a0, a3, s6
-; RV32I-NEXT: seqz a3, t0
-; RV32I-NEXT: sub s8, s8, t4
-; RV32I-NEXT: or a6, t1, a6
-; RV32I-NEXT: or t2, a4, t2
-; RV32I-NEXT: or t0, a7, a5
-; RV32I-NEXT: sub s11, t3, s1
-; RV32I-NEXT: sltu a4, t3, s1
-; RV32I-NEXT: sub s1, s3, a2
-; RV32I-NEXT: sltu a2, a1, a3
-; RV32I-NEXT: sub a1, a1, a3
-; RV32I-NEXT: sub s1, s1, a4
-; RV32I-NEXT: sub s10, s10, a2
-; RV32I-NEXT: or a2, s8, s10
-; RV32I-NEXT: or a3, s7, a1
-; RV32I-NEXT: or a2, a3, a2
-; RV32I-NEXT: sub s0, s0, s2
-; RV32I-NEXT: li t1, 0
-; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: li a7, 0
-; RV32I-NEXT: beqz a2, .LBB0_37
-; RV32I-NEXT: .LBB0_32: # %udiv-do-while3
-; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
-; RV32I-NEXT: srli a2, s0, 31
-; RV32I-NEXT: slli a3, a0, 1
-; RV32I-NEXT: slli s0, s0, 1
-; RV32I-NEXT: or s3, a3, a2
-; RV32I-NEXT: srli a2, t0, 31
-; RV32I-NEXT: or s0, s0, a2
-; RV32I-NEXT: beq t6, s3, .LBB0_34
-; RV32I-NEXT: # %bb.33: # %udiv-do-while3
-; RV32I-NEXT: # in Loop: Header=BB0_32 Depth=1
-; RV32I-NEXT: sltu a3, t6, s3
-; RV32I-NEXT: j .LBB0_35
-; RV32I-NEXT: .LBB0_34: # in Loop: Header=BB0_32 Depth=1
-; RV32I-NEXT: lw a2, 76(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sltu a3, a2, s0
-; RV32I-NEXT: .LBB0_35: # %udiv-do-while3
-; RV32I-NEXT: # in Loop: Header=BB0_32 Depth=1
-; RV32I-NEXT: srli a2, s11, 31
-; RV32I-NEXT: slli s1, s1, 1
-; RV32I-NEXT: srli s2, a0, 31
-; RV32I-NEXT: slli s11, s11, 1
-; RV32I-NEXT: or a0, s1, a2
-; RV32I-NEXT: or a2, s11, s2
-; RV32I-NEXT: sub s1, s4, a2
-; RV32I-NEXT: sltu s2, s4, a2
-; RV32I-NEXT: sub s6, s5, a0
-; RV32I-NEXT: sltu a3, s1, a3
-; RV32I-NEXT: sub s1, s6, s2
-; RV32I-NEXT: sub s1, s1, a3
-; RV32I-NEXT: srai s11, s1, 31
-; RV32I-NEXT: lw a3, 88(sp) # 4-byte Folded Reload
-; RV32I-NEXT: and s2, s11, a3
-; RV32I-NEXT: and a3, s11, t5
-; RV32I-NEXT: sltu s6, s0, s2
-; RV32I-NEXT: mv s1, s6
-; RV32I-NEXT: beq s3, a3, .LBB0_31
-; RV32I-NEXT: # %bb.36: # %udiv-do-while3
-; RV32I-NEXT: # in Loop: Header=BB0_32 Depth=1
-; RV32I-NEXT: sltu s1, s3, a3
-; RV32I-NEXT: j .LBB0_31
-; RV32I-NEXT: .LBB0_37: # %udiv-loop-exit2
-; RV32I-NEXT: srli a0, s9, 31
-; RV32I-NEXT: slli a3, a6, 1
-; RV32I-NEXT: srli a1, a6, 31
-; RV32I-NEXT: or a3, a3, a0
-; RV32I-NEXT: slli a0, t2, 1
-; RV32I-NEXT: srli a5, t2, 31
-; RV32I-NEXT: slli t0, t0, 1
-; RV32I-NEXT: slli s9, s9, 1
-; RV32I-NEXT: or s7, a0, a1
-; RV32I-NEXT: or s8, t0, a5
-; RV32I-NEXT: or a4, ra, s9
-; RV32I-NEXT: lw ra, 72(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 68(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s11, 64(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw t3, 96(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw t4, 92(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw t2, 56(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw t1, 100(sp) # 4-byte Folded Reload
-; RV32I-NEXT: .LBB0_38: # %udiv-end1
-; RV32I-NEXT: lw s2, 60(sp) # 4-byte Folded Reload
-; RV32I-NEXT: xor a0, a4, s2
-; RV32I-NEXT: sw a0, 40(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sltu a0, a0, s2
-; RV32I-NEXT: xor a1, a3, s2
-; RV32I-NEXT: sw a0, 36(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw a0, 84(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lw s9, 44(sp) # 4-byte Folded Reload
-; RV32I-NEXT: beqz a3, .LBB0_40
-; RV32I-NEXT: # %bb.39: # %udiv-end1
-; RV32I-NEXT: sltu a0, a1, s2
-; RV32I-NEXT: sw a0, 84(sp) # 4-byte Folded Spill
-; RV32I-NEXT: .LBB0_40: # %udiv-end1
-; RV32I-NEXT: neg a0, s11
-; RV32I-NEXT: lw a3, 32(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sw a1, 28(sp) # 4-byte Folded Spill
-; RV32I-NEXT: bltz t2, .LBB0_42
-; RV32I-NEXT: # %bb.41: # %udiv-end1
-; RV32I-NEXT: mv s4, t2
-; RV32I-NEXT: bltz t2, .LBB0_43
-; RV32I-NEXT: j .LBB0_44
-; RV32I-NEXT: .LBB0_42:
-; RV32I-NEXT: sltu a1, a0, a3
-; RV32I-NEXT: lw a2, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: neg a2, a2
-; RV32I-NEXT: sub s4, a2, a1
-; RV32I-NEXT: bgez t2, .LBB0_44
-; RV32I-NEXT: .LBB0_43:
-; RV32I-NEXT: sub s11, a0, a3
-; RV32I-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: add a0, s3, a0
-; RV32I-NEXT: neg s3, a0
-; RV32I-NEXT: neg ra, ra
-; RV32I-NEXT: .LBB0_44: # %udiv-end1
-; RV32I-NEXT: bgez t1, .LBB0_46
-; RV32I-NEXT: # %bb.45:
-; RV32I-NEXT: neg a0, s9
-; RV32I-NEXT: lw a1, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT: neg a1, a1
-; RV32I-NEXT: lw a2, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: add a2, t3, a2
-; RV32I-NEXT: lw a4, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sltu a3, a0, a4
-; RV32I-NEXT: sub s9, a0, a4
-; RV32I-NEXT: neg t3, a2
-; RV32I-NEXT: sub t1, a1, a3
-; RV32I-NEXT: neg t4, t4
-; RV32I-NEXT: .LBB0_46: # %udiv-end1
-; RV32I-NEXT: lui a0, 349525
-; RV32I-NEXT: lui a2, 209715
-; RV32I-NEXT: lui a3, 61681
-; RV32I-NEXT: addi a1, a0, 1365
-; RV32I-NEXT: addi a5, a2, 819
-; RV32I-NEXT: addi a4, a3, -241
-; RV32I-NEXT: bnez t3, .LBB0_49
-; RV32I-NEXT: # %bb.47: # %udiv-end1
-; RV32I-NEXT: srli a0, t4, 1
-; RV32I-NEXT: or a0, t4, a0
-; RV32I-NEXT: srli a2, a0, 2
-; RV32I-NEXT: or a0, a0, a2
-; RV32I-NEXT: srli a2, a0, 4
-; RV32I-NEXT: or a0, a0, a2
-; RV32I-NEXT: srli a2, a0, 8
-; RV32I-NEXT: or a0, a0, a2
-; RV32I-NEXT: srli a2, a0, 16
-; RV32I-NEXT: or a0, a0, a2
-; RV32I-NEXT: not a0, a0
-; RV32I-NEXT: srli a2, a0, 1
-; RV32I-NEXT: and a2, a2, a1
-; RV32I-NEXT: sub a0, a0, a2
-; RV32I-NEXT: and a2, a0, a5
-; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, a5
-; RV32I-NEXT: add a0, a2, a0
-; RV32I-NEXT: srli a2, a0, 4
-; RV32I-NEXT: add a0, a0, a2
-; RV32I-NEXT: and a0, a0, a4
-; RV32I-NEXT: slli a2, a0, 8
-; RV32I-NEXT: add a0, a0, a2
-; RV32I-NEXT: slli a2, a0, 16
-; RV32I-NEXT: add a0, a0, a2
-; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: addi a7, a0, 32
-; RV32I-NEXT: or t0, s9, t1
-; RV32I-NEXT: beqz t1, .LBB0_50
-; RV32I-NEXT: .LBB0_48:
-; RV32I-NEXT: srli a0, t1, 1
-; RV32I-NEXT: or a0, t1, a0
-; RV32I-NEXT: srli a2, a0, 2
-; RV32I-NEXT: or a0, a0, a2
-; RV32I-NEXT: srli a2, a0, 4
-; RV32I-NEXT: or a0, a0, a2
-; RV32I-NEXT: srli a2, a0, 8
-; RV32I-NEXT: or a0, a0, a2
-; RV32I-NEXT: srli a2, a0, 16
-; RV32I-NEXT: or a0, a0, a2
-; RV32I-NEXT: not a0, a0
-; RV32I-NEXT: srli a2, a0, 1
-; RV32I-NEXT: and a2, a2, a1
-; RV32I-NEXT: sub a0, a0, a2
-; RV32I-NEXT: and a2, a0, a5
-; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, a5
-; RV32I-NEXT: add a0, a2, a0
-; RV32I-NEXT: srli a2, a0, 4
-; RV32I-NEXT: add a0, a0, a2
-; RV32I-NEXT: and a0, a0, a4
-; RV32I-NEXT: slli a2, a0, 8
-; RV32I-NEXT: add a0, a0, a2
-; RV32I-NEXT: slli a2, a0, 16
-; RV32I-NEXT: add a0, a0, a2
-; RV32I-NEXT: srli a6, a0, 24
-; RV32I-NEXT: addi a0, a7, 64
-; RV32I-NEXT: beqz t0, .LBB0_51
-; RV32I-NEXT: j .LBB0_52
-; RV32I-NEXT: .LBB0_49:
-; RV32I-NEXT: srli a0, t3, 1
-; RV32I-NEXT: or a0, t3, a0
-; RV32I-NEXT: srli a2, a0, 2
-; RV32I-NEXT: or a0, a0, a2
-; RV32I-NEXT: srli a2, a0, 4
-; RV32I-NEXT: or a0, a0, a2
-; RV32I-NEXT: srli a2, a0, 8
-; RV32I-NEXT: or a0, a0, a2
-; RV32I-NEXT: srli a2, a0, 16
-; RV32I-NEXT: or a0, a0, a2
-; RV32I-NEXT: not a0, a0
-; RV32I-NEXT: srli a2, a0, 1
-; RV32I-NEXT: and a2, a2, a1
-; RV32I-NEXT: sub a0, a0, a2
-; RV32I-NEXT: and a2, a0, a5
-; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, a5
-; RV32I-NEXT: add a0, a2, a0
-; RV32I-NEXT: srli a2, a0, 4
-; RV32I-NEXT: add a0, a0, a2
-; RV32I-NEXT: and a0, a0, a4
-; RV32I-NEXT: slli a2, a0, 8
-; RV32I-NEXT: add a0, a0, a2
-; RV32I-NEXT: slli a2, a0, 16
-; RV32I-NEXT: add a0, a0, a2
-; RV32I-NEXT: srli a7, a0, 24
-; RV32I-NEXT: or t0, s9, t1
-; RV32I-NEXT: bnez t1, .LBB0_48
-; RV32I-NEXT: .LBB0_50: # %udiv-end1
-; RV32I-NEXT: srli a0, s9, 1
-; RV32I-NEXT: or a0, s9, a0
-; RV32I-NEXT: srli a2, a0, 2
-; RV32I-NEXT: or a0, a0, a2
-; RV32I-NEXT: srli a2, a0, 4
-; RV32I-NEXT: or a0, a0, a2
-; RV32I-NEXT: srli a2, a0, 8
-; RV32I-NEXT: or a0, a0, a2
-; RV32I-NEXT: srli a2, a0, 16
-; RV32I-NEXT: or a0, a0, a2
-; RV32I-NEXT: not a0, a0
-; RV32I-NEXT: srli a2, a0, 1
-; RV32I-NEXT: and a2, a2, a1
-; RV32I-NEXT: sub a0, a0, a2
-; RV32I-NEXT: and a2, a0, a5
-; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, a5
-; RV32I-NEXT: add a0, a2, a0
-; RV32I-NEXT: srli a2, a0, 4
-; RV32I-NEXT: add a0, a0, a2
-; RV32I-NEXT: and a0, a0, a4
-; RV32I-NEXT: slli a2, a0, 8
-; RV32I-NEXT: add a0, a0, a2
-; RV32I-NEXT: slli a2, a0, 16
-; RV32I-NEXT: add a0, a0, a2
-; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: addi a6, a0, 32
-; RV32I-NEXT: addi a0, a7, 64
-; RV32I-NEXT: bnez t0, .LBB0_52
-; RV32I-NEXT: .LBB0_51: # %udiv-end1
-; RV32I-NEXT: mv a6, a0
-; RV32I-NEXT: .LBB0_52: # %udiv-end1
-; RV32I-NEXT: snez t5, t0
-; RV32I-NEXT: bnez s3, .LBB0_54
-; RV32I-NEXT: # %bb.53: # %udiv-end1
-; RV32I-NEXT: srli a2, ra, 1
-; RV32I-NEXT: or a2, ra, a2
-; RV32I-NEXT: srli a3, a2, 2
-; RV32I-NEXT: or a2, a2, a3
-; RV32I-NEXT: srli a3, a2, 4
-; RV32I-NEXT: or a2, a2, a3
-; RV32I-NEXT: srli a3, a2, 8
-; RV32I-NEXT: or a2, a2, a3
-; RV32I-NEXT: srli a3, a2, 16
-; RV32I-NEXT: or a2, a2, a3
-; RV32I-NEXT: not a2, a2
-; RV32I-NEXT: srli a3, a2, 1
-; RV32I-NEXT: and a3, a3, a1
-; RV32I-NEXT: sub a2, a2, a3
-; RV32I-NEXT: and a3, a2, a5
-; RV32I-NEXT: srli a2, a2, 2
-; RV32I-NEXT: and a2, a2, a5
-; RV32I-NEXT: add a2, a3, a2
-; RV32I-NEXT: srli a3, a2, 4
-; RV32I-NEXT: add a2, a2, a3
-; RV32I-NEXT: and a2, a2, a4
-; RV32I-NEXT: slli a3, a2, 8
-; RV32I-NEXT: add a2, a2, a3
-; RV32I-NEXT: slli a3, a2, 16
-; RV32I-NEXT: add a2, a2, a3
-; RV32I-NEXT: srli a2, a2, 24
-; RV32I-NEXT: addi a2, a2, 32
-; RV32I-NEXT: j .LBB0_55
-; RV32I-NEXT: .LBB0_54:
-; RV32I-NEXT: srli a2, s3, 1
-; RV32I-NEXT: or a2, s3, a2
-; RV32I-NEXT: srli a3, a2, 2
-; RV32I-NEXT: or a2, a2, a3
-; RV32I-NEXT: srli a3, a2, 4
-; RV32I-NEXT: or a2, a2, a3
-; RV32I-NEXT: srli a3, a2, 8
-; RV32I-NEXT: or a2, a2, a3
-; RV32I-NEXT: srli a3, a2, 16
-; RV32I-NEXT: or a2, a2, a3
-; RV32I-NEXT: not a2, a2
-; RV32I-NEXT: srli a3, a2, 1
-; RV32I-NEXT: and a3, a3, a1
-; RV32I-NEXT: sub a2, a2, a3
-; RV32I-NEXT: and a3, a2, a5
-; RV32I-NEXT: srli a2, a2, 2
-; RV32I-NEXT: and a2, a2, a5
-; RV32I-NEXT: add a2, a3, a2
-; RV32I-NEXT: srli a3, a2, 4
-; RV32I-NEXT: add a2, a2, a3
-; RV32I-NEXT: and a2, a2, a4
-; RV32I-NEXT: slli a3, a2, 8
-; RV32I-NEXT: add a2, a2, a3
-; RV32I-NEXT: slli a3, a2, 16
-; RV32I-NEXT: add a2, a2, a3
-; RV32I-NEXT: srli a2, a2, 24
-; RV32I-NEXT: .LBB0_55: # %udiv-end1
-; RV32I-NEXT: sw t4, 92(sp) # 4-byte Folded Spill
-; RV32I-NEXT: or t0, t4, s9
-; RV32I-NEXT: sw t3, 96(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw t1, 100(sp) # 4-byte Folded Spill
-; RV32I-NEXT: or t1, t3, t1
-; RV32I-NEXT: or t3, ra, s11
-; RV32I-NEXT: or t4, s3, s4
-; RV32I-NEXT: sltu a7, a0, a7
-; RV32I-NEXT: addi t5, t5, -1
-; RV32I-NEXT: addi a0, a2, 64
-; RV32I-NEXT: or t6, s11, s4
-; RV32I-NEXT: sltu s0, a0, a2
-; RV32I-NEXT: snez s1, t6
-; RV32I-NEXT: addi s1, s1, -1
-; RV32I-NEXT: bnez s4, .LBB0_57
-; RV32I-NEXT: # %bb.56: # %udiv-end1
-; RV32I-NEXT: srli a2, s11, 1
-; RV32I-NEXT: or a2, s11, a2
-; RV32I-NEXT: srli a3, a2, 2
-; RV32I-NEXT: or a2, a2, a3
-; RV32I-NEXT: srli a3, a2, 4
-; RV32I-NEXT: or a2, a2, a3
-; RV32I-NEXT: srli a3, a2, 8
-; RV32I-NEXT: or a2, a2, a3
-; RV32I-NEXT: srli a3, a2, 16
-; RV32I-NEXT: or a2, a2, a3
-; RV32I-NEXT: not a2, a2
-; RV32I-NEXT: srli a3, a2, 1
-; RV32I-NEXT: and a1, a3, a1
-; RV32I-NEXT: sub a2, a2, a1
-; RV32I-NEXT: and a1, a2, a5
-; RV32I-NEXT: srli a2, a2, 2
-; RV32I-NEXT: and a2, a2, a5
-; RV32I-NEXT: add a1, a1, a2
-; RV32I-NEXT: srli a2, a1, 4
-; RV32I-NEXT: add a1, a1, a2
-; RV32I-NEXT: and a1, a1, a4
-; RV32I-NEXT: slli a2, a1, 8
-; RV32I-NEXT: add a1, a1, a2
-; RV32I-NEXT: slli a2, a1, 16
-; RV32I-NEXT: add a1, a1, a2
-; RV32I-NEXT: srli a1, a1, 24
-; RV32I-NEXT: addi a2, a1, 32
-; RV32I-NEXT: j .LBB0_58
-; RV32I-NEXT: .LBB0_57:
-; RV32I-NEXT: srli a2, s4, 1
-; RV32I-NEXT: or a2, s4, a2
-; RV32I-NEXT: srli a3, a2, 2
-; RV32I-NEXT: or a2, a2, a3
-; RV32I-NEXT: srli a3, a2, 4
-; RV32I-NEXT: or a2, a2, a3
-; RV32I-NEXT: srli a3, a2, 8
-; RV32I-NEXT: or a2, a2, a3
-; RV32I-NEXT: srli a3, a2, 16
-; RV32I-NEXT: or a2, a2, a3
-; RV32I-NEXT: not a2, a2
-; RV32I-NEXT: srli a3, a2, 1
-; RV32I-NEXT: and a1, a3, a1
-; RV32I-NEXT: sub a2, a2, a1
-; RV32I-NEXT: and a1, a2, a5
-; RV32I-NEXT: srli a2, a2, 2
-; RV32I-NEXT: and a2, a2, a5
-; RV32I-NEXT: add a1, a1, a2
-; RV32I-NEXT: srli a2, a1, 4
-; RV32I-NEXT: add a1, a1, a2
-; RV32I-NEXT: and a1, a1, a4
-; RV32I-NEXT: slli a2, a1, 8
-; RV32I-NEXT: add a1, a1, a2
-; RV32I-NEXT: slli a2, a1, 16
-; RV32I-NEXT: add a1, a1, a2
-; RV32I-NEXT: srli a2, a1, 24
-; RV32I-NEXT: .LBB0_58: # %udiv-end1
-; RV32I-NEXT: or a3, t0, t1
-; RV32I-NEXT: or a4, t3, t4
-; RV32I-NEXT: and a1, t5, a7
-; RV32I-NEXT: and s0, s1, s0
-; RV32I-NEXT: bnez t6, .LBB0_60
-; RV32I-NEXT: # %bb.59: # %udiv-end1
-; RV32I-NEXT: mv a2, a0
-; RV32I-NEXT: .LBB0_60: # %udiv-end1
-; RV32I-NEXT: seqz a0, a3
-; RV32I-NEXT: seqz a5, a4
-; RV32I-NEXT: sltu a7, a6, a2
-; RV32I-NEXT: sub t1, a1, s0
-; RV32I-NEXT: mv t0, a7
-; RV32I-NEXT: beq a1, s0, .LBB0_62
-; RV32I-NEXT: # %bb.61: # %udiv-end1
-; RV32I-NEXT: sltu t0, a1, s0
-; RV32I-NEXT: .LBB0_62: # %udiv-end1
-; RV32I-NEXT: xor a3, s8, s2
-; RV32I-NEXT: xor a4, s7, s2
-; RV32I-NEXT: sub a1, t1, a7
-; RV32I-NEXT: or a7, a0, a5
-; RV32I-NEXT: neg a0, t0
-; RV32I-NEXT: seqz t0, t0
-; RV32I-NEXT: addi t0, t0, -1
-; RV32I-NEXT: or a5, a0, t0
-; RV32I-NEXT: sub a2, a6, a2
-; RV32I-NEXT: beqz a5, .LBB0_64
-; RV32I-NEXT: # %bb.63: # %udiv-end1
-; RV32I-NEXT: snez a6, a5
-; RV32I-NEXT: j .LBB0_65
-; RV32I-NEXT: .LBB0_64:
-; RV32I-NEXT: snez a5, a1
-; RV32I-NEXT: sltiu a6, a2, 128
-; RV32I-NEXT: xori a6, a6, 1
-; RV32I-NEXT: or a6, a6, a5
-; RV32I-NEXT: .LBB0_65: # %udiv-end1
-; RV32I-NEXT: sub a5, a4, s2
-; RV32I-NEXT: sw a5, 80(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sltu a4, a4, s2
-; RV32I-NEXT: sub a5, a3, s2
-; RV32I-NEXT: or a3, a7, a6
-; RV32I-NEXT: addi a6, a3, -1
-; RV32I-NEXT: sw s4, 76(sp) # 4-byte Folded Spill
-; RV32I-NEXT: and s5, a6, s4
-; RV32I-NEXT: and s7, a6, s11
-; RV32I-NEXT: and s4, a6, s3
-; RV32I-NEXT: and s6, a6, ra
-; RV32I-NEXT: sw ra, 72(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 68(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s11, 64(sp) # 4-byte Folded Spill
-; RV32I-NEXT: bnez a3, .LBB0_77
-; RV32I-NEXT: # %bb.66: # %udiv-end1
-; RV32I-NEXT: xori a3, a2, 127
-; RV32I-NEXT: or a3, a3, a0
-; RV32I-NEXT: or a6, a1, t0
-; RV32I-NEXT: or a3, a3, a6
-; RV32I-NEXT: beqz a3, .LBB0_77
-; RV32I-NEXT: # %bb.67: # %udiv-bb1
-; RV32I-NEXT: sw a5, 32(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw a4, 44(sp) # 4-byte Folded Spill
-; RV32I-NEXT: addi a6, a2, 1
-; RV32I-NEXT: li a3, 127
-; RV32I-NEXT: sub a7, a3, a2
-; RV32I-NEXT: sw zero, 136(sp)
-; RV32I-NEXT: sw zero, 140(sp)
-; RV32I-NEXT: sw zero, 144(sp)
-; RV32I-NEXT: sw zero, 148(sp)
-; RV32I-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sw s0, 152(sp)
-; RV32I-NEXT: lw s1, 68(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sw s1, 156(sp)
-; RV32I-NEXT: lw s2, 64(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sw s2, 160(sp)
-; RV32I-NEXT: lw s3, 76(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sw s3, 164(sp)
-; RV32I-NEXT: addi a2, sp, 152
-; RV32I-NEXT: seqz a3, a6
-; RV32I-NEXT: srli a4, a7, 3
-; RV32I-NEXT: andi a5, a7, 31
-; RV32I-NEXT: add a1, a1, a3
-; RV32I-NEXT: andi a4, a4, 12
-; RV32I-NEXT: xori a3, a5, 31
-; RV32I-NEXT: or a5, a6, a1
-; RV32I-NEXT: sub a2, a2, a4
-; RV32I-NEXT: seqz t3, a5
-; RV32I-NEXT: lw a4, 0(a2)
-; RV32I-NEXT: lw a5, 4(a2)
-; RV32I-NEXT: lw t1, 8(a2)
-; RV32I-NEXT: lw a2, 12(a2)
-; RV32I-NEXT: add t3, a0, t3
-; RV32I-NEXT: sltu a0, t3, a0
-; RV32I-NEXT: or t2, a6, t3
-; RV32I-NEXT: add t0, t0, a0
-; RV32I-NEXT: or a0, a1, t0
-; RV32I-NEXT: srli t4, t1, 1
-; RV32I-NEXT: srli t5, a5, 1
-; RV32I-NEXT: or a0, t2, a0
-; RV32I-NEXT: srli t2, a4, 1
-; RV32I-NEXT: srl t4, t4, a3
-; RV32I-NEXT: srl t5, t5, a3
-; RV32I-NEXT: srl a3, t2, a3
-; RV32I-NEXT: sll a2, a2, a7
-; RV32I-NEXT: or t6, a2, t4
-; RV32I-NEXT: sll a2, t1, a7
-; RV32I-NEXT: sll a5, a5, a7
-; RV32I-NEXT: or t4, a2, t5
-; RV32I-NEXT: or t5, a5, a3
-; RV32I-NEXT: sll a7, a4, a7
-; RV32I-NEXT: beqz a0, .LBB0_75
-; RV32I-NEXT: # %bb.68: # %udiv-preheader
-; RV32I-NEXT: li t1, 0
-; RV32I-NEXT: li s4, 0
-; RV32I-NEXT: li s5, 0
-; RV32I-NEXT: li s6, 0
-; RV32I-NEXT: sw zero, 120(sp)
-; RV32I-NEXT: sw zero, 124(sp)
-; RV32I-NEXT: sw zero, 128(sp)
-; RV32I-NEXT: sw zero, 132(sp)
-; RV32I-NEXT: sw s0, 104(sp)
-; RV32I-NEXT: sw s1, 108(sp)
-; RV32I-NEXT: sw s2, 112(sp)
-; RV32I-NEXT: sw s3, 116(sp)
-; RV32I-NEXT: srli a0, a6, 3
-; RV32I-NEXT: addi a2, sp, 104
-; RV32I-NEXT: andi a0, a0, 12
-; RV32I-NEXT: add a0, a2, a0
-; RV32I-NEXT: lw a2, 4(a0)
-; RV32I-NEXT: lw a3, 8(a0)
-; RV32I-NEXT: lw a4, 12(a0)
-; RV32I-NEXT: lw a5, 0(a0)
-; RV32I-NEXT: andi a0, a6, 31
-; RV32I-NEXT: xori a0, a0, 31
-; RV32I-NEXT: slli t2, a4, 1
-; RV32I-NEXT: slli s0, a3, 1
-; RV32I-NEXT: slli s1, a2, 1
-; RV32I-NEXT: sll t2, t2, a0
-; RV32I-NEXT: sll s0, s0, a0
-; RV32I-NEXT: sll s2, s1, a0
-; RV32I-NEXT: lw s3, 92(sp) # 4-byte Folded Reload
-; RV32I-NEXT: seqz a0, s3
-; RV32I-NEXT: srl a3, a3, a6
-; RV32I-NEXT: or s11, a3, t2
-; RV32I-NEXT: lw t2, 96(sp) # 4-byte Folded Reload
-; RV32I-NEXT: or a3, s3, t2
-; RV32I-NEXT: sub s7, t2, a0
-; RV32I-NEXT: seqz a3, a3
-; RV32I-NEXT: srl a0, a2, a6
-; RV32I-NEXT: or a0, a0, s0
-; RV32I-NEXT: sub s8, s9, a3
-; RV32I-NEXT: mv ra, s9
-; RV32I-NEXT: sltu a2, s9, a3
-; RV32I-NEXT: lw a3, 100(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sub s9, a3, a2
-; RV32I-NEXT: srl a2, a5, a6
-; RV32I-NEXT: srl s1, a4, a6
-; RV32I-NEXT: or s0, a2, s2
-; RV32I-NEXT: addi s3, s3, -1
-; RV32I-NEXT: sw s3, 88(sp) # 4-byte Folded Spill
-; RV32I-NEXT: j .LBB0_70
-; RV32I-NEXT: .LBB0_69: # %udiv-do-while
-; RV32I-NEXT: # in Loop: Header=BB0_70 Depth=1
-; RV32I-NEXT: srli t2, t4, 31
-; RV32I-NEXT: slli t6, t6, 1
-; RV32I-NEXT: sub s3, s3, s11
-; RV32I-NEXT: srli s11, t5, 31
-; RV32I-NEXT: slli t4, t4, 1
-; RV32I-NEXT: or t2, t6, t2
-; RV32I-NEXT: srli t6, a7, 31
-; RV32I-NEXT: slli t5, t5, 1
-; RV32I-NEXT: slli a7, a7, 1
-; RV32I-NEXT: or t4, t4, s11
-; RV32I-NEXT: lw a4, 100(sp) # 4-byte Folded Reload
-; RV32I-NEXT: and s11, s10, a4
-; RV32I-NEXT: or t5, t5, t6
-; RV32I-NEXT: and t6, s10, ra
-; RV32I-NEXT: or a7, t1, a7
-; RV32I-NEXT: sub a4, a3, t6
-; RV32I-NEXT: sltu a3, a3, t6
-; RV32I-NEXT: or t6, a6, a1
-; RV32I-NEXT: sub a5, a0, s11
-; RV32I-NEXT: seqz s11, a6
-; RV32I-NEXT: addi a6, a6, -1
-; RV32I-NEXT: andi t1, s10, 1
-; RV32I-NEXT: sub a0, s3, a2
-; RV32I-NEXT: seqz a2, t6
-; RV32I-NEXT: sub a1, a1, s11
-; RV32I-NEXT: or t5, s4, t5
-; RV32I-NEXT: or t4, s5, t4
-; RV32I-NEXT: or t6, s6, t2
-; RV32I-NEXT: sub s11, a4, s1
-; RV32I-NEXT: sltu a4, a4, s1
-; RV32I-NEXT: sub a5, a5, a3
-; RV32I-NEXT: sltu a3, t3, a2
-; RV32I-NEXT: sub t3, t3, a2
-; RV32I-NEXT: sub s1, a5, a4
-; RV32I-NEXT: sub t0, t0, a3
-; RV32I-NEXT: or a2, a1, t0
-; RV32I-NEXT: or a3, a6, t3
-; RV32I-NEXT: or a2, a3, a2
-; RV32I-NEXT: sub s0, s0, s2
-; RV32I-NEXT: li s4, 0
-; RV32I-NEXT: li s5, 0
-; RV32I-NEXT: li s6, 0
-; RV32I-NEXT: beqz a2, .LBB0_76
-; RV32I-NEXT: .LBB0_70: # %udiv-do-while
-; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
-; RV32I-NEXT: srli a2, s0, 31
-; RV32I-NEXT: slli a3, a0, 1
-; RV32I-NEXT: slli s0, s0, 1
-; RV32I-NEXT: or s3, a3, a2
-; RV32I-NEXT: srli a2, t6, 31
-; RV32I-NEXT: or s0, s0, a2
-; RV32I-NEXT: beq s7, s3, .LBB0_72
-; RV32I-NEXT: # %bb.71: # %udiv-do-while
-; RV32I-NEXT: # in Loop: Header=BB0_70 Depth=1
-; RV32I-NEXT: sltu a2, s7, s3
-; RV32I-NEXT: j .LBB0_73
-; RV32I-NEXT: .LBB0_72: # in Loop: Header=BB0_70 Depth=1
-; RV32I-NEXT: lw a2, 88(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sltu a2, a2, s0
-; RV32I-NEXT: .LBB0_73: # %udiv-do-while
-; RV32I-NEXT: # in Loop: Header=BB0_70 Depth=1
-; RV32I-NEXT: srli a3, s11, 31
-; RV32I-NEXT: slli s1, s1, 1
-; RV32I-NEXT: srli s2, a0, 31
-; RV32I-NEXT: slli s11, s11, 1
-; RV32I-NEXT: or a0, s1, a3
-; RV32I-NEXT: or a3, s11, s2
-; RV32I-NEXT: sub s1, s8, a3
-; RV32I-NEXT: sltu s2, s8, a3
-; RV32I-NEXT: sub s10, s9, a0
-; RV32I-NEXT: sltu a2, s1, a2
-; RV32I-NEXT: sub s1, s10, s2
-; RV32I-NEXT: sub s1, s1, a2
-; RV32I-NEXT: srai s10, s1, 31
-; RV32I-NEXT: lw a2, 92(sp) # 4-byte Folded Reload
-; RV32I-NEXT: and s2, s10, a2
-; RV32I-NEXT: lw a2, 96(sp) # 4-byte Folded Reload
-; RV32I-NEXT: and s11, s10, a2
-; RV32I-NEXT: sltu a2, s0, s2
-; RV32I-NEXT: mv s1, a2
-; RV32I-NEXT: beq s3, s11, .LBB0_69
-; RV32I-NEXT: # %bb.74: # %udiv-do-while
-; RV32I-NEXT: # in Loop: Header=BB0_70 Depth=1
-; RV32I-NEXT: sltu s1, s3, s11
-; RV32I-NEXT: j .LBB0_69
-; RV32I-NEXT: .LBB0_75:
-; RV32I-NEXT: mv ra, s9
-; RV32I-NEXT: li t1, 0
-; RV32I-NEXT: .LBB0_76: # %udiv-loop-exit
-; RV32I-NEXT: srli a0, a7, 31
-; RV32I-NEXT: slli a1, t5, 1
-; RV32I-NEXT: srli a2, t5, 31
-; RV32I-NEXT: or s4, a1, a0
-; RV32I-NEXT: slli a0, t4, 1
-; RV32I-NEXT: srli a1, t4, 31
-; RV32I-NEXT: slli t6, t6, 1
-; RV32I-NEXT: slli a7, a7, 1
-; RV32I-NEXT: or s7, a0, a2
-; RV32I-NEXT: or s5, t6, a1
-; RV32I-NEXT: or s6, t1, a7
-; RV32I-NEXT: lw t2, 56(sp) # 4-byte Folded Reload
-; RV32I-NEXT: mv s9, ra
-; RV32I-NEXT: lw a4, 44(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw a5, 32(sp) # 4-byte Folded Reload
-; RV32I-NEXT: .LBB0_77: # %udiv-end
-; RV32I-NEXT: lw a0, 84(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw a1, 80(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sltu a0, a1, a0
-; RV32I-NEXT: sw a0, 88(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sub a5, a5, a4
-; RV32I-NEXT: sw a5, 44(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lw a0, 60(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw a1, 28(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sub s10, a1, a0
-; RV32I-NEXT: srai s11, t2, 31
-; RV32I-NEXT: mv a0, s6
-; RV32I-NEXT: mv a1, s4
-; RV32I-NEXT: mv a2, s9
-; RV32I-NEXT: lw a3, 100(sp) # 4-byte Folded Reload
-; RV32I-NEXT: call __muldi3
-; RV32I-NEXT: mv s8, a0
-; RV32I-NEXT: mv s9, a1
-; RV32I-NEXT: mv a0, s7
-; RV32I-NEXT: mv a1, s5
-; RV32I-NEXT: lw s1, 92(sp) # 4-byte Folded Reload
-; RV32I-NEXT: mv a2, s1
-; RV32I-NEXT: lw s0, 96(sp) # 4-byte Folded Reload
-; RV32I-NEXT: mv a3, s0
-; RV32I-NEXT: call __muldi3
-; RV32I-NEXT: add a1, a1, s9
-; RV32I-NEXT: add s8, a0, s8
-; RV32I-NEXT: sltu s2, s8, a0
-; RV32I-NEXT: add s2, a1, s2
-; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: mv a2, s6
-; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __muldi3
-; RV32I-NEXT: mv s5, a0
-; RV32I-NEXT: mv s7, a1
-; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: mv a2, s6
-; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __muldi3
-; RV32I-NEXT: add s3, a0, s7
-; RV32I-NEXT: sltu a0, s3, a0
-; RV32I-NEXT: add s6, a1, a0
-; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: mv a2, s4
-; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __muldi3
-; RV32I-NEXT: add s3, a0, s3
-; RV32I-NEXT: sltu a0, s3, a0
-; RV32I-NEXT: add a0, a1, a0
-; RV32I-NEXT: add s7, s6, a0
-; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: mv a2, s4
-; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __muldi3
-; RV32I-NEXT: add a2, a0, s7
-; RV32I-NEXT: sltu a4, s7, s6
-; RV32I-NEXT: lw t0, 72(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sltu a3, t0, s5
-; RV32I-NEXT: sltu a0, a2, a0
-; RV32I-NEXT: add a1, a1, a4
-; RV32I-NEXT: add s8, a2, s8
-; RV32I-NEXT: add a0, a1, a0
-; RV32I-NEXT: sltu a1, s8, a2
-; RV32I-NEXT: lw a5, 64(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sltu a2, a5, s8
-; RV32I-NEXT: add a0, a0, s2
-; RV32I-NEXT: lw a4, 76(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sub a1, a4, a1
-; RV32I-NEXT: sub a1, a1, a0
-; RV32I-NEXT: sub a4, a1, a2
-; RV32I-NEXT: sub a5, a5, s8
-; RV32I-NEXT: mv a6, a3
-; RV32I-NEXT: lw t1, 68(sp) # 4-byte Folded Reload
-; RV32I-NEXT: beq t1, s3, .LBB0_79
-; RV32I-NEXT: # %bb.78: # %udiv-end
-; RV32I-NEXT: sltu a6, t1, s3
-; RV32I-NEXT: .LBB0_79: # %udiv-end
-; RV32I-NEXT: lw a0, 88(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 44(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sub s1, s1, a0
-; RV32I-NEXT: lw a0, 36(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sub a0, s10, a0
-; RV32I-NEXT: lw a1, 84(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw a2, 80(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sub a1, a2, a1
-; RV32I-NEXT: lw a2, 60(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw a7, 40(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sub a2, a7, a2
-; RV32I-NEXT: sltu a7, a5, a6
-; RV32I-NEXT: sub a5, a5, a6
-; RV32I-NEXT: sub a6, t1, s3
-; RV32I-NEXT: sub t0, t0, s5
-; RV32I-NEXT: sub a4, a4, a7
-; RV32I-NEXT: sub t1, a6, a3
-; RV32I-NEXT: xor a7, a5, s11
-; RV32I-NEXT: xor a3, t0, s11
-; RV32I-NEXT: xor a5, a4, s11
-; RV32I-NEXT: xor a6, t1, s11
-; RV32I-NEXT: sltu t0, a7, s11
-; RV32I-NEXT: sltu a4, a3, s11
-; RV32I-NEXT: add t0, s11, t0
-; RV32I-NEXT: sub a5, a5, t0
-; RV32I-NEXT: sub a7, a7, s11
-; RV32I-NEXT: mv t0, a4
-; RV32I-NEXT: beqz t1, .LBB0_81
-; RV32I-NEXT: # %bb.80: # %udiv-end
-; RV32I-NEXT: sltu t0, a6, s11
-; RV32I-NEXT: .LBB0_81: # %udiv-end
-; RV32I-NEXT: sltu t1, a7, t0
-; RV32I-NEXT: sub a7, a7, t0
-; RV32I-NEXT: sub a6, a6, s11
-; RV32I-NEXT: sub a3, a3, s11
-; RV32I-NEXT: lw t0, 48(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sw a2, 0(t0)
-; RV32I-NEXT: sw a0, 4(t0)
-; RV32I-NEXT: sw a1, 8(t0)
-; RV32I-NEXT: sw s1, 12(t0)
-; RV32I-NEXT: sub a0, a5, t1
-; RV32I-NEXT: sub a1, a6, a4
-; RV32I-NEXT: lw a2, 52(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sw a3, 0(a2)
-; RV32I-NEXT: sw a1, 4(a2)
-; RV32I-NEXT: sw a7, 8(a2)
-; RV32I-NEXT: sw a0, 12(a2)
-; RV32I-NEXT: lw ra, 284(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s0, 280(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 276(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 272(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 268(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s4, 264(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s5, 260(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s6, 256(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s7, 252(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s8, 248(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s9, 244(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s10, 240(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s11, 236(sp) # 4-byte Folded Reload
-; RV32I-NEXT: .cfi_restore ra
-; RV32I-NEXT: .cfi_restore s0
-; RV32I-NEXT: .cfi_restore s1
-; RV32I-NEXT: .cfi_restore s2
-; RV32I-NEXT: .cfi_restore s3
-; RV32I-NEXT: .cfi_restore s4
-; RV32I-NEXT: .cfi_restore s5
-; RV32I-NEXT: .cfi_restore s6
-; RV32I-NEXT: .cfi_restore s7
-; RV32I-NEXT: .cfi_restore s8
-; RV32I-NEXT: .cfi_restore s9
-; RV32I-NEXT: .cfi_restore s10
-; RV32I-NEXT: .cfi_restore s11
-; RV32I-NEXT: addi sp, sp, 288
-; RV32I-NEXT: .cfi_def_cfa_offset 0
-; RV32I-NEXT: ret
+; RV32I-NOT: __divmodti4
+; RV32I-NOT: __divti3
+; RV32I-NOT: __modti3
+; RV32I: ret
;
; RV32M-LABEL: sdivrem_i128:
-; RV32M: # %bb.0: # %_udiv-special-cases_udiv-special-cases
-; RV32M-NEXT: addi sp, sp, -272
-; RV32M-NEXT: .cfi_def_cfa_offset 272
-; RV32M-NEXT: sw ra, 268(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw s0, 264(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw s1, 260(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw s2, 256(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw s3, 252(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw s4, 248(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw s5, 244(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw s6, 240(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw s7, 236(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw s8, 232(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw s9, 228(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw s10, 224(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw s11, 220(sp) # 4-byte Folded Spill
-; RV32M-NEXT: .cfi_offset ra, -4
-; RV32M-NEXT: .cfi_offset s0, -8
-; RV32M-NEXT: .cfi_offset s1, -12
-; RV32M-NEXT: .cfi_offset s2, -16
-; RV32M-NEXT: .cfi_offset s3, -20
-; RV32M-NEXT: .cfi_offset s4, -24
-; RV32M-NEXT: .cfi_offset s5, -28
-; RV32M-NEXT: .cfi_offset s6, -32
-; RV32M-NEXT: .cfi_offset s7, -36
-; RV32M-NEXT: .cfi_offset s8, -40
-; RV32M-NEXT: .cfi_offset s9, -44
-; RV32M-NEXT: .cfi_offset s10, -48
-; RV32M-NEXT: .cfi_offset s11, -52
-; RV32M-NEXT: sw a1, 32(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
-; RV32M-NEXT: lw a4, 0(a2)
-; RV32M-NEXT: lw s9, 4(a2)
-; RV32M-NEXT: lw s7, 8(a2)
-; RV32M-NEXT: lw s6, 0(a3)
-; RV32M-NEXT: lw a6, 4(a3)
-; RV32M-NEXT: lw a5, 8(a3)
-; RV32M-NEXT: lw t3, 12(a3)
-; RV32M-NEXT: lw t4, 12(a2)
-; RV32M-NEXT: or a0, a4, s9
-; RV32M-NEXT: snez a1, s7
-; RV32M-NEXT: snez a7, a0
-; RV32M-NEXT: add a1, t4, a1
-; RV32M-NEXT: snez t0, a4
-; RV32M-NEXT: sw s9, 56(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw s7, 52(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw a1, 12(sp) # 4-byte Folded Spill
-; RV32M-NEXT: bltz t4, .LBB0_2
-; RV32M-NEXT: # %bb.1: # %_udiv-special-cases_udiv-special-cases
-; RV32M-NEXT: mv s8, t4
-; RV32M-NEXT: mv s10, a4
-; RV32M-NEXT: j .LBB0_3
-; RV32M-NEXT: .LBB0_2:
-; RV32M-NEXT: neg a0, s7
-; RV32M-NEXT: neg a1, a1
-; RV32M-NEXT: neg a2, s9
-; RV32M-NEXT: sltu a3, a0, a7
-; RV32M-NEXT: sub s7, a0, a7
-; RV32M-NEXT: sub s9, a2, t0
-; RV32M-NEXT: sub s8, a1, a3
-; RV32M-NEXT: neg s10, a4
-; RV32M-NEXT: .LBB0_3: # %_udiv-special-cases_udiv-special-cases
-; RV32M-NEXT: sw a4, 60(sp) # 4-byte Folded Spill
-; RV32M-NEXT: or a0, s6, a6
-; RV32M-NEXT: snez a1, a5
-; RV32M-NEXT: snez a4, a0
-; RV32M-NEXT: add a1, t3, a1
-; RV32M-NEXT: snez t1, s6
-; RV32M-NEXT: sw s6, 40(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw a7, 24(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw t0, 16(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw a4, 8(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw t1, 4(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw a1, 0(sp) # 4-byte Folded Spill
-; RV32M-NEXT: bltz t3, .LBB0_5
-; RV32M-NEXT: # %bb.4: # %_udiv-special-cases_udiv-special-cases
-; RV32M-NEXT: mv s5, t3
-; RV32M-NEXT: mv t5, a5
-; RV32M-NEXT: mv a7, a6
-; RV32M-NEXT: sw s6, 76(sp) # 4-byte Folded Spill
-; RV32M-NEXT: j .LBB0_6
-; RV32M-NEXT: .LBB0_5:
-; RV32M-NEXT: neg a0, a5
-; RV32M-NEXT: neg a1, a1
-; RV32M-NEXT: neg a2, a6
-; RV32M-NEXT: sltu a3, a0, a4
-; RV32M-NEXT: sub t5, a0, a4
-; RV32M-NEXT: sub a7, a2, t1
-; RV32M-NEXT: sub s5, a1, a3
-; RV32M-NEXT: neg a0, s6
-; RV32M-NEXT: sw a0, 76(sp) # 4-byte Folded Spill
-; RV32M-NEXT: .LBB0_6: # %_udiv-special-cases_udiv-special-cases
-; RV32M-NEXT: lui a0, 349525
-; RV32M-NEXT: lui a1, 209715
-; RV32M-NEXT: lui a2, 61681
-; RV32M-NEXT: lui a3, 4112
-; RV32M-NEXT: addi s1, a0, 1365
-; RV32M-NEXT: addi s0, a1, 819
-; RV32M-NEXT: addi t6, a2, -241
-; RV32M-NEXT: addi t2, a3, 257
-; RV32M-NEXT: bnez a7, .LBB0_9
-; RV32M-NEXT: # %bb.7: # %_udiv-special-cases_udiv-special-cases
-; RV32M-NEXT: lw a1, 76(sp) # 4-byte Folded Reload
-; RV32M-NEXT: srli a0, a1, 1
-; RV32M-NEXT: or a0, a1, a0
-; RV32M-NEXT: srli a1, a0, 2
-; RV32M-NEXT: or a0, a0, a1
-; RV32M-NEXT: srli a1, a0, 4
-; RV32M-NEXT: or a0, a0, a1
-; RV32M-NEXT: srli a1, a0, 8
-; RV32M-NEXT: or a0, a0, a1
-; RV32M-NEXT: srli a1, a0, 16
-; RV32M-NEXT: or a0, a0, a1
-; RV32M-NEXT: not a0, a0
-; RV32M-NEXT: srli a1, a0, 1
-; RV32M-NEXT: and a1, a1, s1
-; RV32M-NEXT: sub a0, a0, a1
-; RV32M-NEXT: and a1, a0, s0
-; RV32M-NEXT: srli a0, a0, 2
-; RV32M-NEXT: and a0, a0, s0
-; RV32M-NEXT: add a0, a1, a0
-; RV32M-NEXT: srli a1, a0, 4
-; RV32M-NEXT: add a0, a0, a1
-; RV32M-NEXT: and a0, a0, t6
-; RV32M-NEXT: mul a0, a0, t2
-; RV32M-NEXT: srli a0, a0, 24
-; RV32M-NEXT: addi a1, a0, 32
-; RV32M-NEXT: or a2, t5, s5
-; RV32M-NEXT: beqz s5, .LBB0_10
-; RV32M-NEXT: .LBB0_8:
-; RV32M-NEXT: srli a0, s5, 1
-; RV32M-NEXT: or a0, s5, a0
-; RV32M-NEXT: srli a3, a0, 2
-; RV32M-NEXT: or a0, a0, a3
-; RV32M-NEXT: srli a3, a0, 4
-; RV32M-NEXT: or a0, a0, a3
-; RV32M-NEXT: srli a3, a0, 8
-; RV32M-NEXT: or a0, a0, a3
-; RV32M-NEXT: srli a3, a0, 16
-; RV32M-NEXT: or a0, a0, a3
-; RV32M-NEXT: not a0, a0
-; RV32M-NEXT: srli a3, a0, 1
-; RV32M-NEXT: and a3, a3, s1
-; RV32M-NEXT: sub a0, a0, a3
-; RV32M-NEXT: and a3, a0, s0
-; RV32M-NEXT: srli a0, a0, 2
-; RV32M-NEXT: and a0, a0, s0
-; RV32M-NEXT: add a0, a3, a0
-; RV32M-NEXT: srli a3, a0, 4
-; RV32M-NEXT: add a0, a0, a3
-; RV32M-NEXT: and a0, a0, t6
-; RV32M-NEXT: mul a0, a0, t2
-; RV32M-NEXT: srli a0, a0, 24
-; RV32M-NEXT: addi a4, a1, 64
-; RV32M-NEXT: beqz a2, .LBB0_11
-; RV32M-NEXT: j .LBB0_12
-; RV32M-NEXT: .LBB0_9:
-; RV32M-NEXT: srli a0, a7, 1
-; RV32M-NEXT: or a0, a7, a0
-; RV32M-NEXT: srli a1, a0, 2
-; RV32M-NEXT: or a0, a0, a1
-; RV32M-NEXT: srli a1, a0, 4
-; RV32M-NEXT: or a0, a0, a1
-; RV32M-NEXT: srli a1, a0, 8
-; RV32M-NEXT: or a0, a0, a1
-; RV32M-NEXT: srli a1, a0, 16
-; RV32M-NEXT: or a0, a0, a1
-; RV32M-NEXT: not a0, a0
-; RV32M-NEXT: srli a1, a0, 1
-; RV32M-NEXT: and a1, a1, s1
-; RV32M-NEXT: sub a0, a0, a1
-; RV32M-NEXT: and a1, a0, s0
-; RV32M-NEXT: srli a0, a0, 2
-; RV32M-NEXT: and a0, a0, s0
-; RV32M-NEXT: add a0, a1, a0
-; RV32M-NEXT: srli a1, a0, 4
-; RV32M-NEXT: add a0, a0, a1
-; RV32M-NEXT: and a0, a0, t6
-; RV32M-NEXT: mul a1, a0, t2
-; RV32M-NEXT: srli a1, a1, 24
-; RV32M-NEXT: or a2, t5, s5
-; RV32M-NEXT: bnez s5, .LBB0_8
-; RV32M-NEXT: .LBB0_10: # %_udiv-special-cases_udiv-special-cases
-; RV32M-NEXT: srli a0, t5, 1
-; RV32M-NEXT: or a0, t5, a0
-; RV32M-NEXT: srli a3, a0, 2
-; RV32M-NEXT: or a0, a0, a3
-; RV32M-NEXT: srli a3, a0, 4
-; RV32M-NEXT: or a0, a0, a3
-; RV32M-NEXT: srli a3, a0, 8
-; RV32M-NEXT: or a0, a0, a3
-; RV32M-NEXT: srli a3, a0, 16
-; RV32M-NEXT: or a0, a0, a3
-; RV32M-NEXT: not a0, a0
-; RV32M-NEXT: srli a3, a0, 1
-; RV32M-NEXT: and a3, a3, s1
-; RV32M-NEXT: sub a0, a0, a3
-; RV32M-NEXT: and a3, a0, s0
-; RV32M-NEXT: srli a0, a0, 2
-; RV32M-NEXT: and a0, a0, s0
-; RV32M-NEXT: add a0, a3, a0
-; RV32M-NEXT: srli a3, a0, 4
-; RV32M-NEXT: add a0, a0, a3
-; RV32M-NEXT: and a0, a0, t6
-; RV32M-NEXT: mul a0, a0, t2
-; RV32M-NEXT: srli a0, a0, 24
-; RV32M-NEXT: addi a0, a0, 32
-; RV32M-NEXT: addi a4, a1, 64
-; RV32M-NEXT: bnez a2, .LBB0_12
-; RV32M-NEXT: .LBB0_11: # %_udiv-special-cases_udiv-special-cases
-; RV32M-NEXT: mv a0, a4
-; RV32M-NEXT: .LBB0_12: # %_udiv-special-cases_udiv-special-cases
-; RV32M-NEXT: snez s2, a2
-; RV32M-NEXT: sw a6, 36(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw a5, 84(sp) # 4-byte Folded Spill
-; RV32M-NEXT: bnez s9, .LBB0_14
-; RV32M-NEXT: # %bb.13: # %_udiv-special-cases_udiv-special-cases
-; RV32M-NEXT: srli a2, s10, 1
-; RV32M-NEXT: or a2, s10, a2
-; RV32M-NEXT: srli a3, a2, 2
-; RV32M-NEXT: or a2, a2, a3
-; RV32M-NEXT: srli a3, a2, 4
-; RV32M-NEXT: or a2, a2, a3
-; RV32M-NEXT: srli a3, a2, 8
-; RV32M-NEXT: or a2, a2, a3
-; RV32M-NEXT: srli a3, a2, 16
-; RV32M-NEXT: or a2, a2, a3
-; RV32M-NEXT: not a2, a2
-; RV32M-NEXT: srli a3, a2, 1
-; RV32M-NEXT: and a3, a3, s1
-; RV32M-NEXT: sub a2, a2, a3
-; RV32M-NEXT: and a3, a2, s0
-; RV32M-NEXT: srli a2, a2, 2
-; RV32M-NEXT: and a2, a2, s0
-; RV32M-NEXT: add a2, a3, a2
-; RV32M-NEXT: srli a3, a2, 4
-; RV32M-NEXT: add a2, a2, a3
-; RV32M-NEXT: and a2, a2, t6
-; RV32M-NEXT: mul a2, a2, t2
-; RV32M-NEXT: srli a2, a2, 24
-; RV32M-NEXT: addi a3, a2, 32
-; RV32M-NEXT: j .LBB0_15
-; RV32M-NEXT: .LBB0_14:
-; RV32M-NEXT: srli a2, s9, 1
-; RV32M-NEXT: or a2, s9, a2
-; RV32M-NEXT: srli a3, a2, 2
-; RV32M-NEXT: or a2, a2, a3
-; RV32M-NEXT: srli a3, a2, 4
-; RV32M-NEXT: or a2, a2, a3
-; RV32M-NEXT: srli a3, a2, 8
-; RV32M-NEXT: or a2, a2, a3
-; RV32M-NEXT: srli a3, a2, 16
-; RV32M-NEXT: or a2, a2, a3
-; RV32M-NEXT: not a2, a2
-; RV32M-NEXT: srli a3, a2, 1
-; RV32M-NEXT: and a3, a3, s1
-; RV32M-NEXT: sub a2, a2, a3
-; RV32M-NEXT: and a3, a2, s0
-; RV32M-NEXT: srli a2, a2, 2
-; RV32M-NEXT: and a2, a2, s0
-; RV32M-NEXT: add a2, a3, a2
-; RV32M-NEXT: srli a3, a2, 4
-; RV32M-NEXT: add a2, a2, a3
-; RV32M-NEXT: and a2, a2, t6
-; RV32M-NEXT: mul a3, a2, t2
-; RV32M-NEXT: srli a3, a3, 24
-; RV32M-NEXT: .LBB0_15: # %_udiv-special-cases_udiv-special-cases
-; RV32M-NEXT: lw a2, 76(sp) # 4-byte Folded Reload
-; RV32M-NEXT: or a2, a2, t5
-; RV32M-NEXT: sw a7, 68(sp) # 4-byte Folded Spill
-; RV32M-NEXT: or a5, a7, s5
-; RV32M-NEXT: or a6, s10, s7
-; RV32M-NEXT: or a7, s9, s8
-; RV32M-NEXT: sltu a4, a4, a1
-; RV32M-NEXT: addi s2, s2, -1
-; RV32M-NEXT: addi a1, a3, 64
-; RV32M-NEXT: or s3, s7, s8
-; RV32M-NEXT: sltu t0, a1, a3
-; RV32M-NEXT: snez a3, s3
-; RV32M-NEXT: addi a3, a3, -1
-; RV32M-NEXT: bnez s8, .LBB0_17
-; RV32M-NEXT: # %bb.16: # %_udiv-special-cases_udiv-special-cases
-; RV32M-NEXT: srli t1, s7, 1
-; RV32M-NEXT: or t1, s7, t1
-; RV32M-NEXT: srli s4, t1, 2
-; RV32M-NEXT: or t1, t1, s4
-; RV32M-NEXT: srli s4, t1, 4
-; RV32M-NEXT: or t1, t1, s4
-; RV32M-NEXT: srli s4, t1, 8
-; RV32M-NEXT: or t1, t1, s4
-; RV32M-NEXT: srli s4, t1, 16
-; RV32M-NEXT: or t1, t1, s4
-; RV32M-NEXT: not t1, t1
-; RV32M-NEXT: srli s4, t1, 1
-; RV32M-NEXT: and s1, s4, s1
-; RV32M-NEXT: sub t1, t1, s1
-; RV32M-NEXT: and s1, t1, s0
-; RV32M-NEXT: srli t1, t1, 2
-; RV32M-NEXT: and t1, t1, s0
-; RV32M-NEXT: add t1, s1, t1
-; RV32M-NEXT: srli s0, t1, 4
-; RV32M-NEXT: add t1, t1, s0
-; RV32M-NEXT: and t1, t1, t6
-; RV32M-NEXT: mul t1, t1, t2
-; RV32M-NEXT: srli t1, t1, 24
-; RV32M-NEXT: addi t1, t1, 32
-; RV32M-NEXT: j .LBB0_18
-; RV32M-NEXT: .LBB0_17:
-; RV32M-NEXT: srli t1, s8, 1
-; RV32M-NEXT: or t1, s8, t1
-; RV32M-NEXT: srli s4, t1, 2
-; RV32M-NEXT: or t1, t1, s4
-; RV32M-NEXT: srli s4, t1, 4
-; RV32M-NEXT: or t1, t1, s4
-; RV32M-NEXT: srli s4, t1, 8
-; RV32M-NEXT: or t1, t1, s4
-; RV32M-NEXT: srli s4, t1, 16
-; RV32M-NEXT: or t1, t1, s4
-; RV32M-NEXT: not t1, t1
-; RV32M-NEXT: srli s4, t1, 1
-; RV32M-NEXT: and s1, s4, s1
-; RV32M-NEXT: sub t1, t1, s1
-; RV32M-NEXT: and s1, t1, s0
-; RV32M-NEXT: srli t1, t1, 2
-; RV32M-NEXT: and t1, t1, s0
-; RV32M-NEXT: add t1, s1, t1
-; RV32M-NEXT: srli s0, t1, 4
-; RV32M-NEXT: add t1, t1, s0
-; RV32M-NEXT: and t1, t1, t6
-; RV32M-NEXT: mul t1, t1, t2
-; RV32M-NEXT: srli t1, t1, 24
-; RV32M-NEXT: .LBB0_18: # %_udiv-special-cases_udiv-special-cases
-; RV32M-NEXT: or a5, a2, a5
-; RV32M-NEXT: or a6, a6, a7
-; RV32M-NEXT: and a2, s2, a4
-; RV32M-NEXT: and a3, a3, t0
-; RV32M-NEXT: lw t6, 40(sp) # 4-byte Folded Reload
-; RV32M-NEXT: bnez s3, .LBB0_20
-; RV32M-NEXT: # %bb.19: # %_udiv-special-cases_udiv-special-cases
-; RV32M-NEXT: mv t1, a1
-; RV32M-NEXT: .LBB0_20: # %_udiv-special-cases_udiv-special-cases
-; RV32M-NEXT: seqz a1, a5
-; RV32M-NEXT: seqz a4, a6
-; RV32M-NEXT: sltu a6, a0, t1
-; RV32M-NEXT: sub a7, a2, a3
-; RV32M-NEXT: mv a5, a6
-; RV32M-NEXT: beq a2, a3, .LBB0_22
-; RV32M-NEXT: # %bb.21: # %_udiv-special-cases_udiv-special-cases
-; RV32M-NEXT: sltu a5, a2, a3
-; RV32M-NEXT: .LBB0_22: # %_udiv-special-cases_udiv-special-cases
-; RV32M-NEXT: sub a2, a7, a6
-; RV32M-NEXT: or a3, a1, a4
-; RV32M-NEXT: neg a1, a5
-; RV32M-NEXT: seqz s2, a5
-; RV32M-NEXT: addi s2, s2, -1
-; RV32M-NEXT: sub a0, a0, t1
-; RV32M-NEXT: or a5, a1, s2
-; RV32M-NEXT: xor a4, t3, t4
-; RV32M-NEXT: lw s3, 36(sp) # 4-byte Folded Reload
-; RV32M-NEXT: beqz a5, .LBB0_24
-; RV32M-NEXT: # %bb.23: # %_udiv-special-cases_udiv-special-cases
-; RV32M-NEXT: snez a5, a5
-; RV32M-NEXT: j .LBB0_25
-; RV32M-NEXT: .LBB0_24:
-; RV32M-NEXT: snez a5, a2
-; RV32M-NEXT: sltiu a6, a0, 128
-; RV32M-NEXT: xori a6, a6, 1
-; RV32M-NEXT: or a5, a6, a5
-; RV32M-NEXT: .LBB0_25: # %_udiv-special-cases_udiv-special-cases
-; RV32M-NEXT: sw t4, 48(sp) # 4-byte Folded Spill
-; RV32M-NEXT: srai a4, a4, 31
-; RV32M-NEXT: sw a4, 44(sp) # 4-byte Folded Spill
-; RV32M-NEXT: or a5, a3, a5
-; RV32M-NEXT: addi a4, a5, -1
-; RV32M-NEXT: and ra, a4, s8
-; RV32M-NEXT: and s11, a4, s7
-; RV32M-NEXT: and a3, a4, s9
-; RV32M-NEXT: and a4, a4, s10
-; RV32M-NEXT: bnez a5, .LBB0_36
-; RV32M-NEXT: # %bb.26: # %_udiv-special-cases_udiv-special-cases
-; RV32M-NEXT: sw s5, 72(sp) # 4-byte Folded Spill
-; RV32M-NEXT: xori a5, a0, 127
-; RV32M-NEXT: or a5, a5, a1
-; RV32M-NEXT: or a6, a2, s2
-; RV32M-NEXT: or a5, a5, a6
-; RV32M-NEXT: beqz a5, .LBB0_36
-; RV32M-NEXT: # %bb.27: # %udiv-bb15
-; RV32M-NEXT: sw t3, 80(sp) # 4-byte Folded Spill
-; RV32M-NEXT: addi s11, a0, 1
-; RV32M-NEXT: li a3, 127
-; RV32M-NEXT: sub a0, a3, a0
-; RV32M-NEXT: sw zero, 184(sp)
-; RV32M-NEXT: sw zero, 188(sp)
-; RV32M-NEXT: sw zero, 192(sp)
-; RV32M-NEXT: sw zero, 196(sp)
-; RV32M-NEXT: sw s10, 200(sp)
-; RV32M-NEXT: sw s9, 204(sp)
-; RV32M-NEXT: sw s7, 208(sp)
-; RV32M-NEXT: sw s8, 212(sp)
-; RV32M-NEXT: addi a3, sp, 200
-; RV32M-NEXT: seqz ra, s11
-; RV32M-NEXT: srli a4, a0, 3
-; RV32M-NEXT: andi a5, a0, 31
-; RV32M-NEXT: add ra, a2, ra
-; RV32M-NEXT: andi a4, a4, 12
-; RV32M-NEXT: xori a2, a5, 31
-; RV32M-NEXT: or a5, s11, ra
-; RV32M-NEXT: sub a3, a3, a4
-; RV32M-NEXT: seqz s4, a5
-; RV32M-NEXT: lw a4, 0(a3)
-; RV32M-NEXT: lw a5, 4(a3)
-; RV32M-NEXT: lw a6, 8(a3)
-; RV32M-NEXT: lw a3, 12(a3)
-; RV32M-NEXT: add s4, a1, s4
-; RV32M-NEXT: sltu a1, s4, a1
-; RV32M-NEXT: or a7, s11, s4
-; RV32M-NEXT: add s2, s2, a1
-; RV32M-NEXT: or a1, ra, s2
-; RV32M-NEXT: srli t0, a6, 1
-; RV32M-NEXT: srli t1, a5, 1
-; RV32M-NEXT: or a7, a7, a1
-; RV32M-NEXT: srli a1, a4, 1
-; RV32M-NEXT: srl t0, t0, a2
-; RV32M-NEXT: srl t1, t1, a2
-; RV32M-NEXT: srl a2, a1, a2
-; RV32M-NEXT: sll a1, a3, a0
-; RV32M-NEXT: or a1, a1, t0
-; RV32M-NEXT: sll a3, a6, a0
-; RV32M-NEXT: sll s1, a5, a0
-; RV32M-NEXT: or s5, a3, t1
-; RV32M-NEXT: or s1, s1, a2
-; RV32M-NEXT: sll s0, a4, a0
-; RV32M-NEXT: li t6, 0
-; RV32M-NEXT: beqz a7, .LBB0_35
-; RV32M-NEXT: # %bb.28: # %udiv-preheader4
-; RV32M-NEXT: li a0, 0
-; RV32M-NEXT: li s3, 0
-; RV32M-NEXT: li a2, 0
-; RV32M-NEXT: sw zero, 168(sp)
-; RV32M-NEXT: sw zero, 172(sp)
-; RV32M-NEXT: sw zero, 176(sp)
-; RV32M-NEXT: sw zero, 180(sp)
-; RV32M-NEXT: sw s10, 152(sp)
-; RV32M-NEXT: sw s9, 156(sp)
-; RV32M-NEXT: sw s7, 160(sp)
-; RV32M-NEXT: sw s8, 164(sp)
-; RV32M-NEXT: srli a3, s11, 3
-; RV32M-NEXT: addi a4, sp, 152
-; RV32M-NEXT: andi a3, a3, 12
-; RV32M-NEXT: add a3, a4, a3
-; RV32M-NEXT: lw a4, 4(a3)
-; RV32M-NEXT: lw a5, 8(a3)
-; RV32M-NEXT: lw a6, 12(a3)
-; RV32M-NEXT: lw a3, 0(a3)
-; RV32M-NEXT: andi a7, s11, 31
-; RV32M-NEXT: xori a7, a7, 31
-; RV32M-NEXT: slli t0, a6, 1
-; RV32M-NEXT: slli t1, a5, 1
-; RV32M-NEXT: slli t2, a4, 1
-; RV32M-NEXT: sll t0, t0, a7
-; RV32M-NEXT: sll t1, t1, a7
-; RV32M-NEXT: sll t3, t2, a7
-; RV32M-NEXT: lw t4, 76(sp) # 4-byte Folded Reload
-; RV32M-NEXT: seqz t2, t4
-; RV32M-NEXT: srl a5, a5, s11
-; RV32M-NEXT: or a7, a5, t0
-; RV32M-NEXT: lw t0, 68(sp) # 4-byte Folded Reload
-; RV32M-NEXT: or a5, t4, t0
-; RV32M-NEXT: sub s7, t0, t2
-; RV32M-NEXT: seqz t0, a5
-; RV32M-NEXT: srl a4, a4, s11
-; RV32M-NEXT: or a5, a4, t1
-; RV32M-NEXT: sub s8, t5, t0
-; RV32M-NEXT: mv s6, t5
-; RV32M-NEXT: sltu a4, t5, t0
-; RV32M-NEXT: lw t0, 72(sp) # 4-byte Folded Reload
-; RV32M-NEXT: sub s9, t0, a4
-; RV32M-NEXT: srl a3, a3, s11
-; RV32M-NEXT: srl t2, a6, s11
-; RV32M-NEXT: or a4, a3, t3
-; RV32M-NEXT: addi t4, t4, -1
-; RV32M-NEXT: sw t4, 64(sp) # 4-byte Folded Spill
-; RV32M-NEXT: j .LBB0_30
-; RV32M-NEXT: .LBB0_29: # %udiv-do-while3
-; RV32M-NEXT: # in Loop: Header=BB0_30 Depth=1
-; RV32M-NEXT: srli t3, s5, 31
-; RV32M-NEXT: slli a1, a1, 1
-; RV32M-NEXT: sub t0, t0, s10
-; RV32M-NEXT: srli s10, s1, 31
-; RV32M-NEXT: slli s5, s5, 1
-; RV32M-NEXT: or a1, a1, t3
-; RV32M-NEXT: srli t3, s0, 31
-; RV32M-NEXT: slli s1, s1, 1
-; RV32M-NEXT: slli s0, s0, 1
-; RV32M-NEXT: or s5, s5, s10
-; RV32M-NEXT: lw t5, 72(sp) # 4-byte Folded Reload
-; RV32M-NEXT: and s10, a7, t5
-; RV32M-NEXT: or s1, s1, t3
-; RV32M-NEXT: and t3, a7, t4
-; RV32M-NEXT: or s0, t6, s0
-; RV32M-NEXT: sub t4, t1, t3
-; RV32M-NEXT: sltu t1, t1, t3
-; RV32M-NEXT: or t3, s11, ra
-; RV32M-NEXT: sub s10, a5, s10
-; RV32M-NEXT: seqz t5, s11
-; RV32M-NEXT: addi s11, s11, -1
-; RV32M-NEXT: andi t6, a7, 1
-; RV32M-NEXT: sub a5, t0, a3
-; RV32M-NEXT: seqz a3, t3
-; RV32M-NEXT: sub ra, ra, t5
-; RV32M-NEXT: or s1, a0, s1
-; RV32M-NEXT: or s5, s3, s5
-; RV32M-NEXT: or a1, a2, a1
-; RV32M-NEXT: sub a7, t4, t2
-; RV32M-NEXT: sltu a0, t4, t2
-; RV32M-NEXT: sub a2, s10, t1
-; RV32M-NEXT: sltu t0, s4, a3
-; RV32M-NEXT: sub s4, s4, a3
-; RV32M-NEXT: sub t2, a2, a0
-; RV32M-NEXT: sub s2, s2, t0
-; RV32M-NEXT: or a0, ra, s2
-; RV32M-NEXT: or a2, s11, s4
-; RV32M-NEXT: or a3, a2, a0
-; RV32M-NEXT: sub a4, a6, a4
-; RV32M-NEXT: li a0, 0
-; RV32M-NEXT: li s3, 0
-; RV32M-NEXT: li a2, 0
-; RV32M-NEXT: beqz a3, .LBB0_35
-; RV32M-NEXT: .LBB0_30: # %udiv-do-while3
-; RV32M-NEXT: # =>This Inner Loop Header: Depth=1
-; RV32M-NEXT: srli a3, a4, 31
-; RV32M-NEXT: slli a6, a5, 1
-; RV32M-NEXT: slli a4, a4, 1
-; RV32M-NEXT: or t0, a6, a3
-; RV32M-NEXT: srli a3, a1, 31
-; RV32M-NEXT: or a6, a4, a3
-; RV32M-NEXT: beq s7, t0, .LBB0_32
-; RV32M-NEXT: # %bb.31: # %udiv-do-while3
-; RV32M-NEXT: # in Loop: Header=BB0_30 Depth=1
-; RV32M-NEXT: sltu a3, s7, t0
-; RV32M-NEXT: j .LBB0_33
-; RV32M-NEXT: .LBB0_32: # in Loop: Header=BB0_30 Depth=1
-; RV32M-NEXT: lw a3, 64(sp) # 4-byte Folded Reload
-; RV32M-NEXT: sltu a3, a3, a6
-; RV32M-NEXT: .LBB0_33: # %udiv-do-while3
-; RV32M-NEXT: # in Loop: Header=BB0_30 Depth=1
-; RV32M-NEXT: mv t4, s6
-; RV32M-NEXT: srli a4, a7, 31
-; RV32M-NEXT: slli t2, t2, 1
-; RV32M-NEXT: srli t1, a5, 31
-; RV32M-NEXT: slli a7, a7, 1
-; RV32M-NEXT: or a5, t2, a4
-; RV32M-NEXT: or t1, a7, t1
-; RV32M-NEXT: sub a4, s8, t1
-; RV32M-NEXT: sltu a7, s8, t1
-; RV32M-NEXT: sub t2, s9, a5
-; RV32M-NEXT: sltu a3, a4, a3
-; RV32M-NEXT: sub a4, t2, a7
-; RV32M-NEXT: sub a4, a4, a3
-; RV32M-NEXT: srai a7, a4, 31
-; RV32M-NEXT: lw a3, 76(sp) # 4-byte Folded Reload
-; RV32M-NEXT: and a4, a7, a3
-; RV32M-NEXT: lw a3, 68(sp) # 4-byte Folded Reload
-; RV32M-NEXT: and s10, a7, a3
-; RV32M-NEXT: sltu a3, a6, a4
-; RV32M-NEXT: mv t2, a3
-; RV32M-NEXT: beq t0, s10, .LBB0_29
-; RV32M-NEXT: # %bb.34: # %udiv-do-while3
-; RV32M-NEXT: # in Loop: Header=BB0_30 Depth=1
-; RV32M-NEXT: sltu t2, t0, s10
-; RV32M-NEXT: j .LBB0_29
-; RV32M-NEXT: .LBB0_35: # %udiv-loop-exit2
-; RV32M-NEXT: srli a0, s0, 31
-; RV32M-NEXT: slli a3, s1, 1
-; RV32M-NEXT: srli s1, s1, 31
-; RV32M-NEXT: or a3, a3, a0
-; RV32M-NEXT: slli a0, s5, 1
-; RV32M-NEXT: srli a2, s5, 31
-; RV32M-NEXT: slli a1, a1, 1
-; RV32M-NEXT: slli s0, s0, 1
-; RV32M-NEXT: or s11, a0, s1
-; RV32M-NEXT: or ra, a1, a2
-; RV32M-NEXT: or a4, t6, s0
-; RV32M-NEXT: lw t6, 40(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw t3, 80(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw s3, 36(sp) # 4-byte Folded Reload
-; RV32M-NEXT: .LBB0_36: # %udiv-end1
-; RV32M-NEXT: lw s4, 44(sp) # 4-byte Folded Reload
-; RV32M-NEXT: xor a0, a4, s4
-; RV32M-NEXT: sw a0, 64(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sltu s5, a0, s4
-; RV32M-NEXT: xor a1, a3, s4
-; RV32M-NEXT: sw s5, 40(sp) # 4-byte Folded Spill
-; RV32M-NEXT: lw a6, 84(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw a4, 24(sp) # 4-byte Folded Reload
-; RV32M-NEXT: beqz a3, .LBB0_38
-; RV32M-NEXT: # %bb.37: # %udiv-end1
-; RV32M-NEXT: sltu s5, a1, s4
-; RV32M-NEXT: .LBB0_38: # %udiv-end1
-; RV32M-NEXT: lw s10, 52(sp) # 4-byte Folded Reload
-; RV32M-NEXT: neg a0, s10
-; RV32M-NEXT: lw s6, 60(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw s8, 56(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw a3, 48(sp) # 4-byte Folded Reload
-; RV32M-NEXT: sw a1, 20(sp) # 4-byte Folded Spill
-; RV32M-NEXT: bltz a3, .LBB0_40
-; RV32M-NEXT: # %bb.39: # %udiv-end1
-; RV32M-NEXT: mv s9, a3
-; RV32M-NEXT: bltz a3, .LBB0_41
-; RV32M-NEXT: j .LBB0_42
-; RV32M-NEXT: .LBB0_40:
-; RV32M-NEXT: sltu a1, a0, a4
-; RV32M-NEXT: lw a2, 12(sp) # 4-byte Folded Reload
-; RV32M-NEXT: neg a2, a2
-; RV32M-NEXT: sub s9, a2, a1
-; RV32M-NEXT: bgez a3, .LBB0_42
-; RV32M-NEXT: .LBB0_41:
-; RV32M-NEXT: sub s10, a0, a4
-; RV32M-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
-; RV32M-NEXT: add a0, s8, a0
-; RV32M-NEXT: neg s8, a0
-; RV32M-NEXT: neg s6, s6
-; RV32M-NEXT: .LBB0_42: # %udiv-end1
-; RV32M-NEXT: bgez t3, .LBB0_44
-; RV32M-NEXT: # %bb.43:
-; RV32M-NEXT: neg a0, a6
-; RV32M-NEXT: lw a1, 0(sp) # 4-byte Folded Reload
-; RV32M-NEXT: neg a1, a1
-; RV32M-NEXT: lw a2, 4(sp) # 4-byte Folded Reload
-; RV32M-NEXT: add a2, s3, a2
-; RV32M-NEXT: lw a4, 8(sp) # 4-byte Folded Reload
-; RV32M-NEXT: sltu a3, a0, a4
-; RV32M-NEXT: sub a6, a0, a4
-; RV32M-NEXT: neg s3, a2
-; RV32M-NEXT: sub t3, a1, a3
-; RV32M-NEXT: neg t6, t6
-; RV32M-NEXT: .LBB0_44: # %udiv-end1
-; RV32M-NEXT: lui a0, 349525
-; RV32M-NEXT: lui a1, 209715
-; RV32M-NEXT: lui a2, 61681
-; RV32M-NEXT: lui a3, 4112
-; RV32M-NEXT: addi s2, a0, 1365
-; RV32M-NEXT: addi s1, a1, 819
-; RV32M-NEXT: addi a1, a2, -241
-; RV32M-NEXT: addi s0, a3, 257
-; RV32M-NEXT: bnez s3, .LBB0_47
-; RV32M-NEXT: # %bb.45: # %udiv-end1
-; RV32M-NEXT: srli a0, t6, 1
-; RV32M-NEXT: or a0, t6, a0
-; RV32M-NEXT: srli a2, a0, 2
-; RV32M-NEXT: or a0, a0, a2
-; RV32M-NEXT: srli a2, a0, 4
-; RV32M-NEXT: or a0, a0, a2
-; RV32M-NEXT: srli a2, a0, 8
-; RV32M-NEXT: or a0, a0, a2
-; RV32M-NEXT: srli a2, a0, 16
-; RV32M-NEXT: or a0, a0, a2
-; RV32M-NEXT: not a0, a0
-; RV32M-NEXT: srli a2, a0, 1
-; RV32M-NEXT: and a2, a2, s2
-; RV32M-NEXT: sub a0, a0, a2
-; RV32M-NEXT: and a2, a0, s1
-; RV32M-NEXT: srli a0, a0, 2
-; RV32M-NEXT: and a0, a0, s1
-; RV32M-NEXT: add a0, a2, a0
-; RV32M-NEXT: srli a2, a0, 4
-; RV32M-NEXT: add a0, a0, a2
-; RV32M-NEXT: and a0, a0, a1
-; RV32M-NEXT: mul a0, a0, s0
-; RV32M-NEXT: srli a0, a0, 24
-; RV32M-NEXT: addi a2, a0, 32
-; RV32M-NEXT: or a5, a6, t3
-; RV32M-NEXT: beqz t3, .LBB0_48
-; RV32M-NEXT: .LBB0_46:
-; RV32M-NEXT: srli a0, t3, 1
-; RV32M-NEXT: or a0, t3, a0
-; RV32M-NEXT: srli a3, a0, 2
-; RV32M-NEXT: or a0, a0, a3
-; RV32M-NEXT: srli a3, a0, 4
-; RV32M-NEXT: or a0, a0, a3
-; RV32M-NEXT: srli a3, a0, 8
-; RV32M-NEXT: or a0, a0, a3
-; RV32M-NEXT: srli a3, a0, 16
-; RV32M-NEXT: or a0, a0, a3
-; RV32M-NEXT: not a0, a0
-; RV32M-NEXT: srli a3, a0, 1
-; RV32M-NEXT: and a3, a3, s2
-; RV32M-NEXT: sub a0, a0, a3
-; RV32M-NEXT: and a3, a0, s1
-; RV32M-NEXT: srli a0, a0, 2
-; RV32M-NEXT: and a0, a0, s1
-; RV32M-NEXT: add a0, a3, a0
-; RV32M-NEXT: srli a3, a0, 4
-; RV32M-NEXT: add a0, a0, a3
-; RV32M-NEXT: and a0, a0, a1
-; RV32M-NEXT: mul a0, a0, s0
-; RV32M-NEXT: srli a0, a0, 24
-; RV32M-NEXT: addi a4, a2, 64
-; RV32M-NEXT: beqz a5, .LBB0_49
-; RV32M-NEXT: j .LBB0_50
-; RV32M-NEXT: .LBB0_47:
-; RV32M-NEXT: srli a0, s3, 1
-; RV32M-NEXT: or a0, s3, a0
-; RV32M-NEXT: srli a2, a0, 2
-; RV32M-NEXT: or a0, a0, a2
-; RV32M-NEXT: srli a2, a0, 4
-; RV32M-NEXT: or a0, a0, a2
-; RV32M-NEXT: srli a2, a0, 8
-; RV32M-NEXT: or a0, a0, a2
-; RV32M-NEXT: srli a2, a0, 16
-; RV32M-NEXT: or a0, a0, a2
-; RV32M-NEXT: not a0, a0
-; RV32M-NEXT: srli a2, a0, 1
-; RV32M-NEXT: and a2, a2, s2
-; RV32M-NEXT: sub a0, a0, a2
-; RV32M-NEXT: and a2, a0, s1
-; RV32M-NEXT: srli a0, a0, 2
-; RV32M-NEXT: and a0, a0, s1
-; RV32M-NEXT: add a0, a2, a0
-; RV32M-NEXT: srli a2, a0, 4
-; RV32M-NEXT: add a0, a0, a2
-; RV32M-NEXT: and a0, a0, a1
-; RV32M-NEXT: mul a2, a0, s0
-; RV32M-NEXT: srli a2, a2, 24
-; RV32M-NEXT: or a5, a6, t3
-; RV32M-NEXT: bnez t3, .LBB0_46
-; RV32M-NEXT: .LBB0_48: # %udiv-end1
-; RV32M-NEXT: srli a0, a6, 1
-; RV32M-NEXT: or a0, a6, a0
-; RV32M-NEXT: srli a3, a0, 2
-; RV32M-NEXT: or a0, a0, a3
-; RV32M-NEXT: srli a3, a0, 4
-; RV32M-NEXT: or a0, a0, a3
-; RV32M-NEXT: srli a3, a0, 8
-; RV32M-NEXT: or a0, a0, a3
-; RV32M-NEXT: srli a3, a0, 16
-; RV32M-NEXT: or a0, a0, a3
-; RV32M-NEXT: not a0, a0
-; RV32M-NEXT: srli a3, a0, 1
-; RV32M-NEXT: and a3, a3, s2
-; RV32M-NEXT: sub a0, a0, a3
-; RV32M-NEXT: and a3, a0, s1
-; RV32M-NEXT: srli a0, a0, 2
-; RV32M-NEXT: and a0, a0, s1
-; RV32M-NEXT: add a0, a3, a0
-; RV32M-NEXT: srli a3, a0, 4
-; RV32M-NEXT: add a0, a0, a3
-; RV32M-NEXT: and a0, a0, a1
-; RV32M-NEXT: mul a0, a0, s0
-; RV32M-NEXT: srli a0, a0, 24
-; RV32M-NEXT: addi a0, a0, 32
-; RV32M-NEXT: addi a4, a2, 64
-; RV32M-NEXT: bnez a5, .LBB0_50
-; RV32M-NEXT: .LBB0_49: # %udiv-end1
-; RV32M-NEXT: mv a0, a4
-; RV32M-NEXT: .LBB0_50: # %udiv-end1
-; RV32M-NEXT: snez t0, a5
-; RV32M-NEXT: bnez s8, .LBB0_52
-; RV32M-NEXT: # %bb.51: # %udiv-end1
-; RV32M-NEXT: srli a3, s6, 1
-; RV32M-NEXT: or a3, s6, a3
-; RV32M-NEXT: srli a5, a3, 2
-; RV32M-NEXT: or a3, a3, a5
-; RV32M-NEXT: srli a5, a3, 4
-; RV32M-NEXT: or a3, a3, a5
-; RV32M-NEXT: srli a5, a3, 8
-; RV32M-NEXT: or a3, a3, a5
-; RV32M-NEXT: srli a5, a3, 16
-; RV32M-NEXT: or a3, a3, a5
-; RV32M-NEXT: not a3, a3
-; RV32M-NEXT: srli a5, a3, 1
-; RV32M-NEXT: and a5, a5, s2
-; RV32M-NEXT: sub a3, a3, a5
-; RV32M-NEXT: and a5, a3, s1
-; RV32M-NEXT: srli a3, a3, 2
-; RV32M-NEXT: and a3, a3, s1
-; RV32M-NEXT: add a3, a5, a3
-; RV32M-NEXT: srli a5, a3, 4
-; RV32M-NEXT: add a3, a3, a5
-; RV32M-NEXT: and a3, a3, a1
-; RV32M-NEXT: mul a3, a3, s0
-; RV32M-NEXT: srli a3, a3, 24
-; RV32M-NEXT: addi a3, a3, 32
-; RV32M-NEXT: j .LBB0_53
-; RV32M-NEXT: .LBB0_52:
-; RV32M-NEXT: srli a3, s8, 1
-; RV32M-NEXT: or a3, s8, a3
-; RV32M-NEXT: srli a5, a3, 2
-; RV32M-NEXT: or a3, a3, a5
-; RV32M-NEXT: srli a5, a3, 4
-; RV32M-NEXT: or a3, a3, a5
-; RV32M-NEXT: srli a5, a3, 8
-; RV32M-NEXT: or a3, a3, a5
-; RV32M-NEXT: srli a5, a3, 16
-; RV32M-NEXT: or a3, a3, a5
-; RV32M-NEXT: not a3, a3
-; RV32M-NEXT: srli a5, a3, 1
-; RV32M-NEXT: and a5, a5, s2
-; RV32M-NEXT: sub a3, a3, a5
-; RV32M-NEXT: and a5, a3, s1
-; RV32M-NEXT: srli a3, a3, 2
-; RV32M-NEXT: and a3, a3, s1
-; RV32M-NEXT: add a3, a5, a3
-; RV32M-NEXT: srli a5, a3, 4
-; RV32M-NEXT: add a3, a3, a5
-; RV32M-NEXT: and a3, a3, a1
-; RV32M-NEXT: mul a3, a3, s0
-; RV32M-NEXT: srli a3, a3, 24
-; RV32M-NEXT: .LBB0_53: # %udiv-end1
-; RV32M-NEXT: sw a6, 84(sp) # 4-byte Folded Spill
-; RV32M-NEXT: or a5, t6, a6
-; RV32M-NEXT: sw t3, 80(sp) # 4-byte Folded Spill
-; RV32M-NEXT: or a6, s3, t3
-; RV32M-NEXT: or a7, s6, s10
-; RV32M-NEXT: or t2, s8, s9
-; RV32M-NEXT: sltu a4, a4, a2
-; RV32M-NEXT: addi t0, t0, -1
-; RV32M-NEXT: addi a2, a3, 64
-; RV32M-NEXT: or t3, s10, s9
-; RV32M-NEXT: sltu t4, a2, a3
-; RV32M-NEXT: snez a3, t3
-; RV32M-NEXT: addi a3, a3, -1
-; RV32M-NEXT: bnez s9, .LBB0_55
-; RV32M-NEXT: # %bb.54: # %udiv-end1
-; RV32M-NEXT: srli t1, s10, 1
-; RV32M-NEXT: or t1, s10, t1
-; RV32M-NEXT: srli t5, t1, 2
-; RV32M-NEXT: or t1, t1, t5
-; RV32M-NEXT: srli t5, t1, 4
-; RV32M-NEXT: or t1, t1, t5
-; RV32M-NEXT: srli t5, t1, 8
-; RV32M-NEXT: or t1, t1, t5
-; RV32M-NEXT: srli t5, t1, 16
-; RV32M-NEXT: or t1, t1, t5
-; RV32M-NEXT: not t1, t1
-; RV32M-NEXT: srli t5, t1, 1
-; RV32M-NEXT: and t5, t5, s2
-; RV32M-NEXT: sub t1, t1, t5
-; RV32M-NEXT: and t5, t1, s1
-; RV32M-NEXT: srli t1, t1, 2
-; RV32M-NEXT: and t1, t1, s1
-; RV32M-NEXT: add t1, t5, t1
-; RV32M-NEXT: srli t5, t1, 4
-; RV32M-NEXT: add t1, t1, t5
-; RV32M-NEXT: and a1, t1, a1
-; RV32M-NEXT: mul a1, a1, s0
-; RV32M-NEXT: srli a1, a1, 24
-; RV32M-NEXT: addi t1, a1, 32
-; RV32M-NEXT: j .LBB0_56
-; RV32M-NEXT: .LBB0_55:
-; RV32M-NEXT: srli t1, s9, 1
-; RV32M-NEXT: or t1, s9, t1
-; RV32M-NEXT: srli t5, t1, 2
-; RV32M-NEXT: or t1, t1, t5
-; RV32M-NEXT: srli t5, t1, 4
-; RV32M-NEXT: or t1, t1, t5
-; RV32M-NEXT: srli t5, t1, 8
-; RV32M-NEXT: or t1, t1, t5
-; RV32M-NEXT: srli t5, t1, 16
-; RV32M-NEXT: or t1, t1, t5
-; RV32M-NEXT: not t1, t1
-; RV32M-NEXT: srli t5, t1, 1
-; RV32M-NEXT: and t5, t5, s2
-; RV32M-NEXT: sub t1, t1, t5
-; RV32M-NEXT: and t5, t1, s1
-; RV32M-NEXT: srli t1, t1, 2
-; RV32M-NEXT: and t1, t1, s1
-; RV32M-NEXT: add t1, t5, t1
-; RV32M-NEXT: srli t5, t1, 4
-; RV32M-NEXT: add t1, t1, t5
-; RV32M-NEXT: and a1, t1, a1
-; RV32M-NEXT: mul a1, a1, s0
-; RV32M-NEXT: srli t1, a1, 24
-; RV32M-NEXT: .LBB0_56: # %udiv-end1
-; RV32M-NEXT: or a5, a5, a6
-; RV32M-NEXT: or a6, a7, t2
-; RV32M-NEXT: and a1, t0, a4
-; RV32M-NEXT: and a3, a3, t4
-; RV32M-NEXT: bnez t3, .LBB0_58
-; RV32M-NEXT: # %bb.57: # %udiv-end1
-; RV32M-NEXT: mv t1, a2
-; RV32M-NEXT: .LBB0_58: # %udiv-end1
-; RV32M-NEXT: seqz a4, a5
-; RV32M-NEXT: seqz a5, a6
-; RV32M-NEXT: sltu a6, a0, t1
-; RV32M-NEXT: sub t0, a1, a3
-; RV32M-NEXT: mv a7, a6
-; RV32M-NEXT: beq a1, a3, .LBB0_60
-; RV32M-NEXT: # %bb.59: # %udiv-end1
-; RV32M-NEXT: sltu a7, a1, a3
-; RV32M-NEXT: .LBB0_60: # %udiv-end1
-; RV32M-NEXT: xor a2, ra, s4
-; RV32M-NEXT: xor a3, s11, s4
-; RV32M-NEXT: sub s0, t0, a6
-; RV32M-NEXT: or a4, a4, a5
-; RV32M-NEXT: neg a1, a7
-; RV32M-NEXT: seqz s7, a7
-; RV32M-NEXT: addi s7, s7, -1
-; RV32M-NEXT: or a5, a1, s7
-; RV32M-NEXT: sub a0, a0, t1
-; RV32M-NEXT: beqz a5, .LBB0_62
-; RV32M-NEXT: # %bb.61: # %udiv-end1
-; RV32M-NEXT: snez a5, a5
-; RV32M-NEXT: j .LBB0_63
-; RV32M-NEXT: .LBB0_62:
-; RV32M-NEXT: snez a5, s0
-; RV32M-NEXT: sltiu a6, a0, 128
-; RV32M-NEXT: xori a6, a6, 1
-; RV32M-NEXT: or a5, a6, a5
-; RV32M-NEXT: .LBB0_63: # %udiv-end1
-; RV32M-NEXT: sub s11, a3, s4
-; RV32M-NEXT: sltu t1, a3, s4
-; RV32M-NEXT: sub t2, a2, s4
-; RV32M-NEXT: or a5, a4, a5
-; RV32M-NEXT: addi a2, a5, -1
-; RV32M-NEXT: and a7, a2, s9
-; RV32M-NEXT: and t0, a2, s10
-; RV32M-NEXT: and a4, a2, s8
-; RV32M-NEXT: and a6, a2, s6
-; RV32M-NEXT: sw s9, 68(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw s5, 36(sp) # 4-byte Folded Spill
-; RV32M-NEXT: bnez a5, .LBB0_75
-; RV32M-NEXT: # %bb.64: # %udiv-end1
-; RV32M-NEXT: xori a2, a0, 127
-; RV32M-NEXT: or a2, a2, a1
-; RV32M-NEXT: or a3, s0, s7
-; RV32M-NEXT: or a2, a2, a3
-; RV32M-NEXT: beqz a2, .LBB0_75
-; RV32M-NEXT: # %bb.65: # %udiv-bb1
-; RV32M-NEXT: sw t2, 12(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw t1, 16(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw s11, 24(sp) # 4-byte Folded Spill
-; RV32M-NEXT: addi s4, a0, 1
-; RV32M-NEXT: li a2, 127
-; RV32M-NEXT: sub a2, a2, a0
-; RV32M-NEXT: sw zero, 120(sp)
-; RV32M-NEXT: sw zero, 124(sp)
-; RV32M-NEXT: sw zero, 128(sp)
-; RV32M-NEXT: sw zero, 132(sp)
-; RV32M-NEXT: sw s6, 136(sp)
-; RV32M-NEXT: sw s8, 140(sp)
-; RV32M-NEXT: sw s10, 144(sp)
-; RV32M-NEXT: lw t3, 68(sp) # 4-byte Folded Reload
-; RV32M-NEXT: sw t3, 148(sp)
-; RV32M-NEXT: addi a0, sp, 136
-; RV32M-NEXT: seqz a3, s4
-; RV32M-NEXT: srli a4, a2, 3
-; RV32M-NEXT: andi a5, a2, 31
-; RV32M-NEXT: add s0, s0, a3
-; RV32M-NEXT: andi a4, a4, 12
-; RV32M-NEXT: xori a3, a5, 31
-; RV32M-NEXT: or a5, s4, s0
-; RV32M-NEXT: sub a0, a0, a4
-; RV32M-NEXT: seqz s9, a5
-; RV32M-NEXT: lw a4, 0(a0)
-; RV32M-NEXT: lw a5, 4(a0)
-; RV32M-NEXT: lw a6, 8(a0)
-; RV32M-NEXT: lw a0, 12(a0)
-; RV32M-NEXT: add s9, a1, s9
-; RV32M-NEXT: sltu a1, s9, a1
-; RV32M-NEXT: or a7, s4, s9
-; RV32M-NEXT: add s7, s7, a1
-; RV32M-NEXT: or a1, s0, s7
-; RV32M-NEXT: srli t0, a6, 1
-; RV32M-NEXT: srli t1, a5, 1
-; RV32M-NEXT: or a7, a7, a1
-; RV32M-NEXT: srli a1, a4, 1
-; RV32M-NEXT: srl t0, t0, a3
-; RV32M-NEXT: srl t1, t1, a3
-; RV32M-NEXT: srl a1, a1, a3
-; RV32M-NEXT: sll a0, a0, a2
-; RV32M-NEXT: or a0, a0, t0
-; RV32M-NEXT: sll a3, a6, a2
-; RV32M-NEXT: sll a5, a5, a2
-; RV32M-NEXT: mv a6, s10
-; RV32M-NEXT: or s10, a3, t1
-; RV32M-NEXT: or a1, a5, a1
-; RV32M-NEXT: sll s5, a4, a2
-; RV32M-NEXT: sw s6, 60(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw s8, 56(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw a6, 52(sp) # 4-byte Folded Spill
-; RV32M-NEXT: beqz a7, .LBB0_73
-; RV32M-NEXT: # %bb.66: # %udiv-preheader
-; RV32M-NEXT: mv t2, s8
-; RV32M-NEXT: li s8, 0
-; RV32M-NEXT: li s11, 0
-; RV32M-NEXT: li ra, 0
-; RV32M-NEXT: li a2, 0
-; RV32M-NEXT: sw zero, 104(sp)
-; RV32M-NEXT: sw zero, 108(sp)
-; RV32M-NEXT: sw zero, 112(sp)
-; RV32M-NEXT: sw zero, 116(sp)
-; RV32M-NEXT: sw s6, 88(sp)
-; RV32M-NEXT: sw t2, 92(sp)
-; RV32M-NEXT: sw a6, 96(sp)
-; RV32M-NEXT: sw t3, 100(sp)
-; RV32M-NEXT: srli a3, s4, 3
-; RV32M-NEXT: addi a4, sp, 88
-; RV32M-NEXT: andi a3, a3, 12
-; RV32M-NEXT: add a3, a4, a3
-; RV32M-NEXT: lw a4, 4(a3)
-; RV32M-NEXT: lw a5, 8(a3)
-; RV32M-NEXT: lw a6, 12(a3)
-; RV32M-NEXT: lw a3, 0(a3)
-; RV32M-NEXT: andi a7, s4, 31
-; RV32M-NEXT: xori a7, a7, 31
-; RV32M-NEXT: slli t0, a6, 1
-; RV32M-NEXT: slli t1, a5, 1
-; RV32M-NEXT: slli t2, a4, 1
-; RV32M-NEXT: sll t0, t0, a7
-; RV32M-NEXT: sll t1, t1, a7
-; RV32M-NEXT: sll a7, t2, a7
-; RV32M-NEXT: seqz t2, t6
-; RV32M-NEXT: srl a5, a5, s4
-; RV32M-NEXT: or t5, a5, t0
-; RV32M-NEXT: or a5, t6, s3
-; RV32M-NEXT: sub s6, s3, t2
-; RV32M-NEXT: seqz t0, a5
-; RV32M-NEXT: srl a4, a4, s4
-; RV32M-NEXT: or a5, a4, t1
-; RV32M-NEXT: lw a4, 84(sp) # 4-byte Folded Reload
-; RV32M-NEXT: sub t3, a4, t0
-; RV32M-NEXT: sltu a4, a4, t0
-; RV32M-NEXT: lw t0, 80(sp) # 4-byte Folded Reload
-; RV32M-NEXT: sub a4, t0, a4
-; RV32M-NEXT: sw a4, 76(sp) # 4-byte Folded Spill
-; RV32M-NEXT: srl a3, a3, s4
-; RV32M-NEXT: srl t2, a6, s4
-; RV32M-NEXT: or a4, a3, a7
-; RV32M-NEXT: addi a3, t6, -1
-; RV32M-NEXT: sw a3, 72(sp) # 4-byte Folded Spill
-; RV32M-NEXT: j .LBB0_68
-; RV32M-NEXT: .LBB0_67: # %udiv-do-while
-; RV32M-NEXT: # in Loop: Header=BB0_68 Depth=1
-; RV32M-NEXT: srli s3, s10, 31
-; RV32M-NEXT: slli a0, a0, 1
-; RV32M-NEXT: sub t0, t0, s2
-; RV32M-NEXT: srli s2, a1, 31
-; RV32M-NEXT: slli s10, s10, 1
-; RV32M-NEXT: or a0, a0, s3
-; RV32M-NEXT: srli s3, s5, 31
-; RV32M-NEXT: slli a1, a1, 1
-; RV32M-NEXT: slli s5, s5, 1
-; RV32M-NEXT: or s2, s10, s2
-; RV32M-NEXT: lw t6, 80(sp) # 4-byte Folded Reload
-; RV32M-NEXT: and s10, t5, t6
-; RV32M-NEXT: or a1, a1, s3
-; RV32M-NEXT: lw t6, 84(sp) # 4-byte Folded Reload
-; RV32M-NEXT: and s3, t5, t6
-; RV32M-NEXT: or s5, s8, s5
-; RV32M-NEXT: sub s1, t1, s3
-; RV32M-NEXT: sltu t1, t1, s3
-; RV32M-NEXT: or s3, s4, s0
-; RV32M-NEXT: sub t6, a5, s10
-; RV32M-NEXT: seqz s10, s4
-; RV32M-NEXT: addi s4, s4, -1
-; RV32M-NEXT: andi s8, t5, 1
-; RV32M-NEXT: sub a5, t0, a3
-; RV32M-NEXT: seqz a3, s3
-; RV32M-NEXT: sub s0, s0, s10
-; RV32M-NEXT: or a1, s11, a1
-; RV32M-NEXT: or s10, ra, s2
-; RV32M-NEXT: or a0, a2, a0
-; RV32M-NEXT: sub t5, s1, t2
-; RV32M-NEXT: sltu a2, s1, t2
-; RV32M-NEXT: sub t0, t6, t1
-; RV32M-NEXT: sltu t1, s9, a3
-; RV32M-NEXT: sub s9, s9, a3
-; RV32M-NEXT: sub t2, t0, a2
-; RV32M-NEXT: sub s7, s7, t1
-; RV32M-NEXT: or a2, s0, s7
-; RV32M-NEXT: or a3, s4, s9
-; RV32M-NEXT: or a3, a3, a2
-; RV32M-NEXT: sub a4, a6, a4
-; RV32M-NEXT: li s11, 0
-; RV32M-NEXT: li ra, 0
-; RV32M-NEXT: li a2, 0
-; RV32M-NEXT: mv t6, a7
-; RV32M-NEXT: mv s3, t4
-; RV32M-NEXT: beqz a3, .LBB0_74
-; RV32M-NEXT: .LBB0_68: # %udiv-do-while
-; RV32M-NEXT: # =>This Inner Loop Header: Depth=1
-; RV32M-NEXT: srli a3, a4, 31
-; RV32M-NEXT: slli a6, a5, 1
-; RV32M-NEXT: slli a4, a4, 1
-; RV32M-NEXT: or t0, a6, a3
-; RV32M-NEXT: srli a3, a0, 31
-; RV32M-NEXT: or a6, a4, a3
-; RV32M-NEXT: beq s6, t0, .LBB0_70
-; RV32M-NEXT: # %bb.69: # %udiv-do-while
-; RV32M-NEXT: # in Loop: Header=BB0_68 Depth=1
-; RV32M-NEXT: sltu a3, s6, t0
-; RV32M-NEXT: j .LBB0_71
-; RV32M-NEXT: .LBB0_70: # in Loop: Header=BB0_68 Depth=1
-; RV32M-NEXT: lw a3, 72(sp) # 4-byte Folded Reload
-; RV32M-NEXT: sltu a3, a3, a6
-; RV32M-NEXT: .LBB0_71: # %udiv-do-while
-; RV32M-NEXT: # in Loop: Header=BB0_68 Depth=1
-; RV32M-NEXT: srli a4, t5, 31
-; RV32M-NEXT: slli t2, t2, 1
-; RV32M-NEXT: srli t1, a5, 31
-; RV32M-NEXT: slli t5, t5, 1
-; RV32M-NEXT: or a5, t2, a4
-; RV32M-NEXT: or t1, t5, t1
-; RV32M-NEXT: sub a4, t3, t1
-; RV32M-NEXT: sltu t2, t3, t1
-; RV32M-NEXT: lw a7, 76(sp) # 4-byte Folded Reload
-; RV32M-NEXT: sub t5, a7, a5
-; RV32M-NEXT: sltu a3, a4, a3
-; RV32M-NEXT: sub a4, t5, t2
-; RV32M-NEXT: sub a4, a4, a3
-; RV32M-NEXT: srai t5, a4, 31
-; RV32M-NEXT: mv a7, t6
-; RV32M-NEXT: and a4, t5, t6
-; RV32M-NEXT: mv t4, s3
-; RV32M-NEXT: and s2, t5, s3
-; RV32M-NEXT: sltu a3, a6, a4
-; RV32M-NEXT: mv t2, a3
-; RV32M-NEXT: beq t0, s2, .LBB0_67
-; RV32M-NEXT: # %bb.72: # %udiv-do-while
-; RV32M-NEXT: # in Loop: Header=BB0_68 Depth=1
-; RV32M-NEXT: sltu t2, t0, s2
-; RV32M-NEXT: j .LBB0_67
-; RV32M-NEXT: .LBB0_73:
-; RV32M-NEXT: li s8, 0
-; RV32M-NEXT: .LBB0_74: # %udiv-loop-exit
-; RV32M-NEXT: srli a2, s5, 31
-; RV32M-NEXT: slli a4, a1, 1
-; RV32M-NEXT: srli a1, a1, 31
-; RV32M-NEXT: or a4, a4, a2
-; RV32M-NEXT: slli a2, s10, 1
-; RV32M-NEXT: srli a3, s10, 31
-; RV32M-NEXT: slli a0, a0, 1
-; RV32M-NEXT: slli s5, s5, 1
-; RV32M-NEXT: or t0, a2, a1
-; RV32M-NEXT: or a7, a0, a3
-; RV32M-NEXT: or a6, s8, s5
-; RV32M-NEXT: lw s6, 60(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw s8, 56(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw s10, 52(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw s11, 24(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw t1, 16(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw t2, 12(sp) # 4-byte Folded Reload
-; RV32M-NEXT: .LBB0_75: # %udiv-end
-; RV32M-NEXT: lw s9, 36(sp) # 4-byte Folded Reload
-; RV32M-NEXT: sltu a1, s11, s9
-; RV32M-NEXT: sub a2, t2, t1
-; RV32M-NEXT: lw s7, 44(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
-; RV32M-NEXT: sub a5, a0, s7
-; RV32M-NEXT: lw a0, 48(sp) # 4-byte Folded Reload
-; RV32M-NEXT: srai a0, a0, 31
-; RV32M-NEXT: mulhu t1, t6, a6
-; RV32M-NEXT: mul t2, s3, a6
-; RV32M-NEXT: mulhu t3, s3, a6
-; RV32M-NEXT: mul t4, t6, a4
-; RV32M-NEXT: mulhu t5, t6, a4
-; RV32M-NEXT: mv a3, t6
-; RV32M-NEXT: mul t6, s3, a4
-; RV32M-NEXT: lw s4, 84(sp) # 4-byte Folded Reload
-; RV32M-NEXT: mul s0, a6, s4
-; RV32M-NEXT: mul s1, t0, a3
-; RV32M-NEXT: mulhu s2, s3, a4
-; RV32M-NEXT: mul s3, t0, s3
-; RV32M-NEXT: mulhu t0, t0, a3
-; RV32M-NEXT: mul a7, a7, a3
-; RV32M-NEXT: mul a4, a4, s4
-; RV32M-NEXT: mulhu s4, a6, s4
-; RV32M-NEXT: lw s5, 80(sp) # 4-byte Folded Reload
-; RV32M-NEXT: mul s5, a6, s5
-; RV32M-NEXT: mul a3, a3, a6
-; RV32M-NEXT: add a6, t2, t1
-; RV32M-NEXT: add s0, s1, s0
-; RV32M-NEXT: add t0, t0, s3
-; RV32M-NEXT: add s4, s4, s5
-; RV32M-NEXT: sltu t1, a6, t2
-; RV32M-NEXT: add a6, t4, a6
-; RV32M-NEXT: add a7, t0, a7
-; RV32M-NEXT: add a4, s4, a4
-; RV32M-NEXT: sltu t0, s0, s1
-; RV32M-NEXT: add t1, t3, t1
-; RV32M-NEXT: sltu t2, a6, t4
-; RV32M-NEXT: add a4, a7, a4
-; RV32M-NEXT: add t2, t5, t2
-; RV32M-NEXT: add a4, a4, t0
-; RV32M-NEXT: add t2, t1, t2
-; RV32M-NEXT: add a7, t6, t2
-; RV32M-NEXT: sltu t1, t2, t1
-; RV32M-NEXT: add t0, a7, s0
-; RV32M-NEXT: sltu t2, a7, t6
-; RV32M-NEXT: add t1, s2, t1
-; RV32M-NEXT: sltu a7, t0, a7
-; RV32M-NEXT: add t1, t1, t2
-; RV32M-NEXT: sltu t2, s10, t0
-; RV32M-NEXT: add a4, t1, a4
-; RV32M-NEXT: lw t1, 68(sp) # 4-byte Folded Reload
-; RV32M-NEXT: sub a7, t1, a7
-; RV32M-NEXT: sub a4, a7, a4
-; RV32M-NEXT: sltu t1, s6, a3
-; RV32M-NEXT: sub a7, a4, t2
-; RV32M-NEXT: mv t2, t1
-; RV32M-NEXT: beq s8, a6, .LBB0_77
-; RV32M-NEXT: # %bb.76: # %udiv-end
-; RV32M-NEXT: sltu t2, s8, a6
-; RV32M-NEXT: .LBB0_77: # %udiv-end
-; RV32M-NEXT: sub a2, a2, a1
-; RV32M-NEXT: lw a1, 40(sp) # 4-byte Folded Reload
-; RV32M-NEXT: sub a1, a5, a1
-; RV32M-NEXT: sub a4, s11, s9
-; RV32M-NEXT: lw a5, 64(sp) # 4-byte Folded Reload
-; RV32M-NEXT: sub a5, a5, s7
-; RV32M-NEXT: sub t0, s10, t0
-; RV32M-NEXT: sub a6, s8, a6
-; RV32M-NEXT: sub a3, s6, a3
-; RV32M-NEXT: sltu t3, t0, t2
-; RV32M-NEXT: sub t0, t0, t2
-; RV32M-NEXT: sub t4, a6, t1
-; RV32M-NEXT: xor a3, a3, a0
-; RV32M-NEXT: sub t1, a7, t3
-; RV32M-NEXT: xor t2, t0, a0
-; RV32M-NEXT: xor a7, t4, a0
-; RV32M-NEXT: sltu a6, a3, a0
-; RV32M-NEXT: xor t0, t1, a0
-; RV32M-NEXT: sltu t1, t2, a0
-; RV32M-NEXT: add t1, a0, t1
-; RV32M-NEXT: sub t0, t0, t1
-; RV32M-NEXT: sub t1, t2, a0
-; RV32M-NEXT: mv t2, a6
-; RV32M-NEXT: beqz t4, .LBB0_79
-; RV32M-NEXT: # %bb.78: # %udiv-end
-; RV32M-NEXT: sltu t2, a7, a0
-; RV32M-NEXT: .LBB0_79: # %udiv-end
-; RV32M-NEXT: sltu t3, t1, t2
-; RV32M-NEXT: sub t1, t1, t2
-; RV32M-NEXT: sub a7, a7, a0
-; RV32M-NEXT: sub a3, a3, a0
-; RV32M-NEXT: lw a0, 28(sp) # 4-byte Folded Reload
-; RV32M-NEXT: sw a5, 0(a0)
-; RV32M-NEXT: sw a1, 4(a0)
-; RV32M-NEXT: sw a4, 8(a0)
-; RV32M-NEXT: sw a2, 12(a0)
-; RV32M-NEXT: sub a0, t0, t3
-; RV32M-NEXT: sub a1, a7, a6
-; RV32M-NEXT: lw a2, 32(sp) # 4-byte Folded Reload
-; RV32M-NEXT: sw a3, 0(a2)
-; RV32M-NEXT: sw a1, 4(a2)
-; RV32M-NEXT: sw t1, 8(a2)
-; RV32M-NEXT: sw a0, 12(a2)
-; RV32M-NEXT: lw ra, 268(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw s0, 264(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw s1, 260(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw s2, 256(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw s3, 252(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw s4, 248(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw s5, 244(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw s6, 240(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw s7, 236(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw s8, 232(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw s9, 228(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw s10, 224(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw s11, 220(sp) # 4-byte Folded Reload
-; RV32M-NEXT: .cfi_restore ra
-; RV32M-NEXT: .cfi_restore s0
-; RV32M-NEXT: .cfi_restore s1
-; RV32M-NEXT: .cfi_restore s2
-; RV32M-NEXT: .cfi_restore s3
-; RV32M-NEXT: .cfi_restore s4
-; RV32M-NEXT: .cfi_restore s5
-; RV32M-NEXT: .cfi_restore s6
-; RV32M-NEXT: .cfi_restore s7
-; RV32M-NEXT: .cfi_restore s8
-; RV32M-NEXT: .cfi_restore s9
-; RV32M-NEXT: .cfi_restore s10
-; RV32M-NEXT: .cfi_restore s11
-; RV32M-NEXT: addi sp, sp, 272
-; RV32M-NEXT: .cfi_def_cfa_offset 0
-; RV32M-NEXT: ret
+; RV32M-NOT: __divmodti4
+; RV32M-NOT: __divti3
+; RV32M-NOT: __modti3
+; RV32M: ret
%q = sdiv i128 %n, %d
%r = srem i128 %n, %d
store i128 %q, ptr %q_out
@@ -2670,2343 +38,27 @@ define void @sdivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
define void @udivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
; RV64-LABEL: udivrem_i128:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -48
-; RV64-NEXT: .cfi_def_cfa_offset 48
-; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: mv a6, a4
-; RV64-NEXT: mv s0, a1
-; RV64-NEXT: mv s1, a0
-; RV64-NEXT: mv a4, sp
-; RV64-NEXT: mv a0, a2
-; RV64-NEXT: mv a1, a3
-; RV64-NEXT: mv a2, a6
-; RV64-NEXT: mv a3, a5
-; RV64-NEXT: call __udivmodti4
-; RV64-NEXT: ld a2, 0(sp)
-; RV64-NEXT: ld a3, 8(sp)
-; RV64-NEXT: sd a0, 0(s1)
-; RV64-NEXT: sd a1, 8(s1)
-; RV64-NEXT: sd a2, 0(s0)
-; RV64-NEXT: sd a3, 8(s0)
-; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
-; RV64-NEXT: .cfi_restore ra
-; RV64-NEXT: .cfi_restore s0
-; RV64-NEXT: .cfi_restore s1
-; RV64-NEXT: addi sp, sp, 48
-; RV64-NEXT: .cfi_def_cfa_offset 0
-; RV64-NEXT: ret
+; RV64: mv a4, sp
+; RV64: call __udivmodti4
+; RV64: ld a2, 0(sp)
+; RV64: ld a3, 8(sp)
+; RV64: sd a0, 0(s1)
+; RV64: sd a1, 8(s1)
+; RV64: sd a2, 0(s0)
+; RV64: sd a3, 8(s0)
+; RV64: ret
;
; RV32I-LABEL: udivrem_i128:
-; RV32I: # %bb.0: # %_udiv-special-cases_udiv-special-cases
-; RV32I-NEXT: addi sp, sp, -240
-; RV32I-NEXT: .cfi_def_cfa_offset 240
-; RV32I-NEXT: sw ra, 236(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s0, 232(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s1, 228(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s2, 224(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s3, 220(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s4, 216(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s5, 212(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s6, 208(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s7, 204(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s8, 200(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s9, 196(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s10, 192(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s11, 188(sp) # 4-byte Folded Spill
-; RV32I-NEXT: .cfi_offset ra, -4
-; RV32I-NEXT: .cfi_offset s0, -8
-; RV32I-NEXT: .cfi_offset s1, -12
-; RV32I-NEXT: .cfi_offset s2, -16
-; RV32I-NEXT: .cfi_offset s3, -20
-; RV32I-NEXT: .cfi_offset s4, -24
-; RV32I-NEXT: .cfi_offset s5, -28
-; RV32I-NEXT: .cfi_offset s6, -32
-; RV32I-NEXT: .cfi_offset s7, -36
-; RV32I-NEXT: .cfi_offset s8, -40
-; RV32I-NEXT: .cfi_offset s9, -44
-; RV32I-NEXT: .cfi_offset s10, -48
-; RV32I-NEXT: .cfi_offset s11, -52
-; RV32I-NEXT: mv a4, a2
-; RV32I-NEXT: sw a1, 24(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
-; RV32I-NEXT: lw s11, 0(a3)
-; RV32I-NEXT: lw s10, 4(a3)
-; RV32I-NEXT: lw s9, 8(a3)
-; RV32I-NEXT: lw ra, 12(a3)
-; RV32I-NEXT: lui a5, 349525
-; RV32I-NEXT: addi a5, a5, 1365
-; RV32I-NEXT: lui a1, 209715
-; RV32I-NEXT: addi a1, a1, 819
-; RV32I-NEXT: lui a0, 61681
-; RV32I-NEXT: addi a0, a0, -241
-; RV32I-NEXT: bnez s10, .LBB1_2
-; RV32I-NEXT: # %bb.1: # %_udiv-special-cases_udiv-special-cases
-; RV32I-NEXT: srli a6, s11, 1
-; RV32I-NEXT: or a6, s11, a6
-; RV32I-NEXT: srli a7, a6, 2
-; RV32I-NEXT: or a6, a6, a7
-; RV32I-NEXT: srli a7, a6, 4
-; RV32I-NEXT: or a6, a6, a7
-; RV32I-NEXT: srli a7, a6, 8
-; RV32I-NEXT: or a6, a6, a7
-; RV32I-NEXT: srli a7, a6, 16
-; RV32I-NEXT: or a6, a6, a7
-; RV32I-NEXT: not a6, a6
-; RV32I-NEXT: srli a7, a6, 1
-; RV32I-NEXT: and a7, a7, a5
-; RV32I-NEXT: sub a6, a6, a7
-; RV32I-NEXT: and a7, a6, a1
-; RV32I-NEXT: srli a6, a6, 2
-; RV32I-NEXT: and a6, a6, a1
-; RV32I-NEXT: add a6, a7, a6
-; RV32I-NEXT: srli a7, a6, 4
-; RV32I-NEXT: add a6, a6, a7
-; RV32I-NEXT: and a6, a6, a0
-; RV32I-NEXT: slli a7, a6, 8
-; RV32I-NEXT: add a6, a6, a7
-; RV32I-NEXT: slli a7, a6, 16
-; RV32I-NEXT: add a6, a6, a7
-; RV32I-NEXT: srli a6, a6, 24
-; RV32I-NEXT: addi a7, a6, 32
-; RV32I-NEXT: j .LBB1_3
-; RV32I-NEXT: .LBB1_2:
-; RV32I-NEXT: srli a6, s10, 1
-; RV32I-NEXT: or a6, s10, a6
-; RV32I-NEXT: srli a7, a6, 2
-; RV32I-NEXT: or a6, a6, a7
-; RV32I-NEXT: srli a7, a6, 4
-; RV32I-NEXT: or a6, a6, a7
-; RV32I-NEXT: srli a7, a6, 8
-; RV32I-NEXT: or a6, a6, a7
-; RV32I-NEXT: srli a7, a6, 16
-; RV32I-NEXT: or a6, a6, a7
-; RV32I-NEXT: not a6, a6
-; RV32I-NEXT: srli a7, a6, 1
-; RV32I-NEXT: and a7, a7, a5
-; RV32I-NEXT: sub a6, a6, a7
-; RV32I-NEXT: and a7, a6, a1
-; RV32I-NEXT: srli a6, a6, 2
-; RV32I-NEXT: and a6, a6, a1
-; RV32I-NEXT: add a6, a7, a6
-; RV32I-NEXT: srli a7, a6, 4
-; RV32I-NEXT: add a6, a6, a7
-; RV32I-NEXT: and a6, a6, a0
-; RV32I-NEXT: slli a7, a6, 8
-; RV32I-NEXT: add a6, a6, a7
-; RV32I-NEXT: slli a7, a6, 16
-; RV32I-NEXT: add a6, a6, a7
-; RV32I-NEXT: srli a7, a6, 24
-; RV32I-NEXT: .LBB1_3: # %_udiv-special-cases_udiv-special-cases
-; RV32I-NEXT: lw s3, 4(a4)
-; RV32I-NEXT: or t1, s9, ra
-; RV32I-NEXT: bnez ra, .LBB1_5
-; RV32I-NEXT: # %bb.4: # %_udiv-special-cases_udiv-special-cases
-; RV32I-NEXT: srli a6, s9, 1
-; RV32I-NEXT: or a6, s9, a6
-; RV32I-NEXT: srli t0, a6, 2
-; RV32I-NEXT: or a6, a6, t0
-; RV32I-NEXT: srli t0, a6, 4
-; RV32I-NEXT: or a6, a6, t0
-; RV32I-NEXT: srli t0, a6, 8
-; RV32I-NEXT: or a6, a6, t0
-; RV32I-NEXT: srli t0, a6, 16
-; RV32I-NEXT: or a6, a6, t0
-; RV32I-NEXT: not a6, a6
-; RV32I-NEXT: srli t0, a6, 1
-; RV32I-NEXT: and t0, t0, a5
-; RV32I-NEXT: sub a6, a6, t0
-; RV32I-NEXT: and t0, a6, a1
-; RV32I-NEXT: srli a6, a6, 2
-; RV32I-NEXT: and a6, a6, a1
-; RV32I-NEXT: add a6, t0, a6
-; RV32I-NEXT: srli t0, a6, 4
-; RV32I-NEXT: add a6, a6, t0
-; RV32I-NEXT: and a6, a6, a0
-; RV32I-NEXT: slli t0, a6, 8
-; RV32I-NEXT: add a6, a6, t0
-; RV32I-NEXT: slli t0, a6, 16
-; RV32I-NEXT: add a6, a6, t0
-; RV32I-NEXT: srli a6, a6, 24
-; RV32I-NEXT: addi a6, a6, 32
-; RV32I-NEXT: j .LBB1_6
-; RV32I-NEXT: .LBB1_5:
-; RV32I-NEXT: srli a6, ra, 1
-; RV32I-NEXT: or a6, ra, a6
-; RV32I-NEXT: srli t0, a6, 2
-; RV32I-NEXT: or a6, a6, t0
-; RV32I-NEXT: srli t0, a6, 4
-; RV32I-NEXT: or a6, a6, t0
-; RV32I-NEXT: srli t0, a6, 8
-; RV32I-NEXT: or a6, a6, t0
-; RV32I-NEXT: srli t0, a6, 16
-; RV32I-NEXT: or a6, a6, t0
-; RV32I-NEXT: not a6, a6
-; RV32I-NEXT: srli t0, a6, 1
-; RV32I-NEXT: and t0, t0, a5
-; RV32I-NEXT: sub a6, a6, t0
-; RV32I-NEXT: and t0, a6, a1
-; RV32I-NEXT: srli a6, a6, 2
-; RV32I-NEXT: and a6, a6, a1
-; RV32I-NEXT: add a6, t0, a6
-; RV32I-NEXT: srli t0, a6, 4
-; RV32I-NEXT: add a6, a6, t0
-; RV32I-NEXT: and a6, a6, a0
-; RV32I-NEXT: slli t0, a6, 8
-; RV32I-NEXT: add a6, a6, t0
-; RV32I-NEXT: slli t0, a6, 16
-; RV32I-NEXT: add a6, a6, t0
-; RV32I-NEXT: srli a6, a6, 24
-; RV32I-NEXT: .LBB1_6: # %_udiv-special-cases_udiv-special-cases
-; RV32I-NEXT: lw s5, 12(a4)
-; RV32I-NEXT: addi t0, a7, 64
-; RV32I-NEXT: bnez t1, .LBB1_8
-; RV32I-NEXT: # %bb.7: # %_udiv-special-cases_udiv-special-cases
-; RV32I-NEXT: mv a6, t0
-; RV32I-NEXT: .LBB1_8: # %_udiv-special-cases_udiv-special-cases
-; RV32I-NEXT: lw s2, 0(a4)
-; RV32I-NEXT: lw s6, 8(a4)
-; RV32I-NEXT: snez a4, t1
-; RV32I-NEXT: bnez s3, .LBB1_10
-; RV32I-NEXT: # %bb.9: # %_udiv-special-cases_udiv-special-cases
-; RV32I-NEXT: srli t1, s2, 1
-; RV32I-NEXT: or t1, s2, t1
-; RV32I-NEXT: srli t2, t1, 2
-; RV32I-NEXT: or t1, t1, t2
-; RV32I-NEXT: srli t2, t1, 4
-; RV32I-NEXT: or t1, t1, t2
-; RV32I-NEXT: srli t2, t1, 8
-; RV32I-NEXT: or t1, t1, t2
-; RV32I-NEXT: srli t2, t1, 16
-; RV32I-NEXT: or t1, t1, t2
-; RV32I-NEXT: not t1, t1
-; RV32I-NEXT: srli t2, t1, 1
-; RV32I-NEXT: and t2, t2, a5
-; RV32I-NEXT: sub t1, t1, t2
-; RV32I-NEXT: and t2, t1, a1
-; RV32I-NEXT: srli t1, t1, 2
-; RV32I-NEXT: and t1, t1, a1
-; RV32I-NEXT: add t1, t2, t1
-; RV32I-NEXT: srli t2, t1, 4
-; RV32I-NEXT: add t1, t1, t2
-; RV32I-NEXT: and t1, t1, a0
-; RV32I-NEXT: slli t2, t1, 8
-; RV32I-NEXT: add t1, t1, t2
-; RV32I-NEXT: slli t2, t1, 16
-; RV32I-NEXT: add t1, t1, t2
-; RV32I-NEXT: srli t1, t1, 24
-; RV32I-NEXT: addi t6, t1, 32
-; RV32I-NEXT: j .LBB1_11
-; RV32I-NEXT: .LBB1_10:
-; RV32I-NEXT: srli t1, s3, 1
-; RV32I-NEXT: or t1, s3, t1
-; RV32I-NEXT: srli t2, t1, 2
-; RV32I-NEXT: or t1, t1, t2
-; RV32I-NEXT: srli t2, t1, 4
-; RV32I-NEXT: or t1, t1, t2
-; RV32I-NEXT: srli t2, t1, 8
-; RV32I-NEXT: or t1, t1, t2
-; RV32I-NEXT: srli t2, t1, 16
-; RV32I-NEXT: or t1, t1, t2
-; RV32I-NEXT: not t1, t1
-; RV32I-NEXT: srli t2, t1, 1
-; RV32I-NEXT: and t2, t2, a5
-; RV32I-NEXT: sub t1, t1, t2
-; RV32I-NEXT: and t2, t1, a1
-; RV32I-NEXT: srli t1, t1, 2
-; RV32I-NEXT: and t1, t1, a1
-; RV32I-NEXT: add t1, t2, t1
-; RV32I-NEXT: srli t2, t1, 4
-; RV32I-NEXT: add t1, t1, t2
-; RV32I-NEXT: and t1, t1, a0
-; RV32I-NEXT: slli t2, t1, 8
-; RV32I-NEXT: add t1, t1, t2
-; RV32I-NEXT: slli t2, t1, 16
-; RV32I-NEXT: add t1, t1, t2
-; RV32I-NEXT: srli t6, t1, 24
-; RV32I-NEXT: .LBB1_11: # %_udiv-special-cases_udiv-special-cases
-; RV32I-NEXT: or t1, s10, ra
-; RV32I-NEXT: or t2, s11, s9
-; RV32I-NEXT: or t3, s3, s5
-; RV32I-NEXT: or t4, s2, s6
-; RV32I-NEXT: sltu a7, t0, a7
-; RV32I-NEXT: addi t0, a4, -1
-; RV32I-NEXT: addi a4, t6, 64
-; RV32I-NEXT: or t5, s6, s5
-; RV32I-NEXT: sltu t6, a4, t6
-; RV32I-NEXT: snez s4, t5
-; RV32I-NEXT: addi s4, s4, -1
-; RV32I-NEXT: bnez s5, .LBB1_13
-; RV32I-NEXT: # %bb.12: # %_udiv-special-cases_udiv-special-cases
-; RV32I-NEXT: srli s0, s6, 1
-; RV32I-NEXT: or s0, s6, s0
-; RV32I-NEXT: srli s1, s0, 2
-; RV32I-NEXT: or s0, s0, s1
-; RV32I-NEXT: srli s1, s0, 4
-; RV32I-NEXT: or s0, s0, s1
-; RV32I-NEXT: srli s1, s0, 8
-; RV32I-NEXT: or s0, s0, s1
-; RV32I-NEXT: srli s1, s0, 16
-; RV32I-NEXT: or s0, s0, s1
-; RV32I-NEXT: not s0, s0
-; RV32I-NEXT: srli s1, s0, 1
-; RV32I-NEXT: and a5, s1, a5
-; RV32I-NEXT: sub s0, s0, a5
-; RV32I-NEXT: and a5, s0, a1
-; RV32I-NEXT: srli s0, s0, 2
-; RV32I-NEXT: and a1, s0, a1
-; RV32I-NEXT: add a1, a5, a1
-; RV32I-NEXT: srli a5, a1, 4
-; RV32I-NEXT: add a1, a1, a5
-; RV32I-NEXT: and a0, a1, a0
-; RV32I-NEXT: slli a1, a0, 8
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: slli a1, a0, 16
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: addi a0, a0, 32
-; RV32I-NEXT: j .LBB1_14
-; RV32I-NEXT: .LBB1_13:
-; RV32I-NEXT: srli s0, s5, 1
-; RV32I-NEXT: or s0, s5, s0
-; RV32I-NEXT: srli s1, s0, 2
-; RV32I-NEXT: or s0, s0, s1
-; RV32I-NEXT: srli s1, s0, 4
-; RV32I-NEXT: or s0, s0, s1
-; RV32I-NEXT: srli s1, s0, 8
-; RV32I-NEXT: or s0, s0, s1
-; RV32I-NEXT: srli s1, s0, 16
-; RV32I-NEXT: or s0, s0, s1
-; RV32I-NEXT: not s0, s0
-; RV32I-NEXT: srli s1, s0, 1
-; RV32I-NEXT: and a5, s1, a5
-; RV32I-NEXT: sub s0, s0, a5
-; RV32I-NEXT: and a5, s0, a1
-; RV32I-NEXT: srli s0, s0, 2
-; RV32I-NEXT: and a1, s0, a1
-; RV32I-NEXT: add a1, a5, a1
-; RV32I-NEXT: srli a5, a1, 4
-; RV32I-NEXT: add a1, a1, a5
-; RV32I-NEXT: and a0, a1, a0
-; RV32I-NEXT: slli a1, a0, 8
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: slli a1, a0, 16
-; RV32I-NEXT: add a0, a0, a1
-; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: .LBB1_14: # %_udiv-special-cases_udiv-special-cases
-; RV32I-NEXT: or t1, t2, t1
-; RV32I-NEXT: or t2, t4, t3
-; RV32I-NEXT: and a1, t0, a7
-; RV32I-NEXT: and a5, s4, t6
-; RV32I-NEXT: bnez t5, .LBB1_16
-; RV32I-NEXT: # %bb.15: # %_udiv-special-cases_udiv-special-cases
-; RV32I-NEXT: mv a0, a4
-; RV32I-NEXT: .LBB1_16: # %_udiv-special-cases_udiv-special-cases
-; RV32I-NEXT: seqz a7, t1
-; RV32I-NEXT: seqz t0, t2
-; RV32I-NEXT: sltu a4, a6, a0
-; RV32I-NEXT: sub t2, a1, a5
-; RV32I-NEXT: mv t1, a4
-; RV32I-NEXT: beq a1, a5, .LBB1_18
-; RV32I-NEXT: # %bb.17: # %_udiv-special-cases_udiv-special-cases
-; RV32I-NEXT: sltu t1, a1, a5
-; RV32I-NEXT: .LBB1_18: # %_udiv-special-cases_udiv-special-cases
-; RV32I-NEXT: sub a4, t2, a4
-; RV32I-NEXT: or a1, a7, t0
-; RV32I-NEXT: neg a5, t1
-; RV32I-NEXT: seqz a7, t1
-; RV32I-NEXT: addi a7, a7, -1
-; RV32I-NEXT: or t0, a5, a7
-; RV32I-NEXT: sub a0, a6, a0
-; RV32I-NEXT: beqz t0, .LBB1_20
-; RV32I-NEXT: # %bb.19: # %_udiv-special-cases_udiv-special-cases
-; RV32I-NEXT: snez a6, t0
-; RV32I-NEXT: j .LBB1_21
-; RV32I-NEXT: .LBB1_20:
-; RV32I-NEXT: snez a6, a4
-; RV32I-NEXT: sltiu t0, a0, 128
-; RV32I-NEXT: xori t0, t0, 1
-; RV32I-NEXT: or a6, t0, a6
-; RV32I-NEXT: .LBB1_21: # %_udiv-special-cases_udiv-special-cases
-; RV32I-NEXT: li s8, 127
-; RV32I-NEXT: or a1, a1, a6
-; RV32I-NEXT: addi a6, a1, -1
-; RV32I-NEXT: and t1, a6, s5
-; RV32I-NEXT: and t2, a6, s6
-; RV32I-NEXT: and t0, a6, s3
-; RV32I-NEXT: and t3, a6, s2
-; RV32I-NEXT: sw s10, 52(sp) # 4-byte Folded Spill
-; RV32I-NEXT: mv a2, s11
-; RV32I-NEXT: sw s3, 28(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw ra, 48(sp) # 4-byte Folded Spill
-; RV32I-NEXT: mv a3, s9
-; RV32I-NEXT: sw s2, 40(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s5, 36(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw s6, 32(sp) # 4-byte Folded Spill
-; RV32I-NEXT: bnez a1, .LBB1_32
-; RV32I-NEXT: # %bb.22: # %_udiv-special-cases_udiv-special-cases
-; RV32I-NEXT: xori a1, a0, 127
-; RV32I-NEXT: or a1, a1, a5
-; RV32I-NEXT: or a6, a4, a7
-; RV32I-NEXT: or a1, a1, a6
-; RV32I-NEXT: beqz a1, .LBB1_32
-; RV32I-NEXT: # %bb.23: # %udiv-bb15
-; RV32I-NEXT: addi a1, a0, 1
-; RV32I-NEXT: sub a0, s8, a0
-; RV32I-NEXT: sw zero, 152(sp)
-; RV32I-NEXT: sw zero, 156(sp)
-; RV32I-NEXT: sw zero, 160(sp)
-; RV32I-NEXT: sw zero, 164(sp)
-; RV32I-NEXT: sw s2, 168(sp)
-; RV32I-NEXT: sw s3, 172(sp)
-; RV32I-NEXT: sw s6, 176(sp)
-; RV32I-NEXT: sw s5, 180(sp)
-; RV32I-NEXT: addi a6, sp, 168
-; RV32I-NEXT: seqz t0, a1
-; RV32I-NEXT: srli t1, a0, 3
-; RV32I-NEXT: andi t2, a0, 31
-; RV32I-NEXT: add a4, a4, t0
-; RV32I-NEXT: andi t0, t1, 12
-; RV32I-NEXT: xori t1, t2, 31
-; RV32I-NEXT: or t2, a1, a4
-; RV32I-NEXT: sub a6, a6, t0
-; RV32I-NEXT: seqz t0, t2
-; RV32I-NEXT: lw t4, 0(a6)
-; RV32I-NEXT: lw t2, 4(a6)
-; RV32I-NEXT: lw t5, 8(a6)
-; RV32I-NEXT: lw t3, 12(a6)
-; RV32I-NEXT: add t0, a5, t0
-; RV32I-NEXT: sltu a6, t0, a5
-; RV32I-NEXT: or a5, a1, t0
-; RV32I-NEXT: add a6, a7, a6
-; RV32I-NEXT: or a7, a4, a6
-; RV32I-NEXT: srli t6, t5, 1
-; RV32I-NEXT: srli s0, t2, 1
-; RV32I-NEXT: or a7, a5, a7
-; RV32I-NEXT: srli a5, t4, 1
-; RV32I-NEXT: srl t6, t6, t1
-; RV32I-NEXT: srl s0, s0, t1
-; RV32I-NEXT: srl a5, a5, t1
-; RV32I-NEXT: sll t1, t3, a0
-; RV32I-NEXT: or t3, t1, t6
-; RV32I-NEXT: sll t1, t5, a0
-; RV32I-NEXT: sll t2, t2, a0
-; RV32I-NEXT: or t1, t1, s0
-; RV32I-NEXT: or t2, t2, a5
-; RV32I-NEXT: sll a5, t4, a0
-; RV32I-NEXT: beqz a7, .LBB1_31
-; RV32I-NEXT: # %bb.24: # %udiv-preheader4
-; RV32I-NEXT: li a7, 0
-; RV32I-NEXT: li t4, 0
-; RV32I-NEXT: li t5, 0
-; RV32I-NEXT: li t6, 0
-; RV32I-NEXT: sw zero, 136(sp)
-; RV32I-NEXT: sw zero, 140(sp)
-; RV32I-NEXT: sw zero, 144(sp)
-; RV32I-NEXT: sw zero, 148(sp)
-; RV32I-NEXT: sw s2, 120(sp)
-; RV32I-NEXT: sw s3, 124(sp)
-; RV32I-NEXT: sw s6, 128(sp)
-; RV32I-NEXT: sw s5, 132(sp)
-; RV32I-NEXT: srli a0, a1, 3
-; RV32I-NEXT: addi s0, sp, 120
-; RV32I-NEXT: andi a0, a0, 12
-; RV32I-NEXT: add a0, s0, a0
-; RV32I-NEXT: lw s0, 4(a0)
-; RV32I-NEXT: lw s1, 8(a0)
-; RV32I-NEXT: lw s2, 12(a0)
-; RV32I-NEXT: lw a0, 0(a0)
-; RV32I-NEXT: andi s3, a1, 31
-; RV32I-NEXT: xori s3, s3, 31
-; RV32I-NEXT: slli s4, s2, 1
-; RV32I-NEXT: slli s5, s1, 1
-; RV32I-NEXT: slli s6, s0, 1
-; RV32I-NEXT: sll s4, s4, s3
-; RV32I-NEXT: sll s5, s5, s3
-; RV32I-NEXT: sll s3, s6, s3
-; RV32I-NEXT: seqz s6, s11
-; RV32I-NEXT: srl s1, s1, a1
-; RV32I-NEXT: or s8, s1, s4
-; RV32I-NEXT: or s1, s11, s10
-; RV32I-NEXT: sub s4, s10, s6
-; RV32I-NEXT: seqz s1, s1
-; RV32I-NEXT: srl s0, s0, a1
-; RV32I-NEXT: or s9, s0, s5
-; RV32I-NEXT: sub s5, a3, s1
-; RV32I-NEXT: sltu s0, a3, s1
-; RV32I-NEXT: sub s6, ra, s0
-; RV32I-NEXT: srl s0, a0, a1
-; RV32I-NEXT: srl a0, s2, a1
-; RV32I-NEXT: mv s1, s11
-; RV32I-NEXT: or s11, s0, s3
-; RV32I-NEXT: addi s1, s1, -1
-; RV32I-NEXT: sw s1, 44(sp) # 4-byte Folded Spill
-; RV32I-NEXT: j .LBB1_26
-; RV32I-NEXT: .LBB1_25: # %udiv-do-while3
-; RV32I-NEXT: # in Loop: Header=BB1_26 Depth=1
-; RV32I-NEXT: srli s2, t1, 31
-; RV32I-NEXT: slli t3, t3, 1
-; RV32I-NEXT: sub s7, s10, s7
-; RV32I-NEXT: srli s10, t2, 31
-; RV32I-NEXT: slli t1, t1, 1
-; RV32I-NEXT: or t3, t3, s2
-; RV32I-NEXT: srli s2, a5, 31
-; RV32I-NEXT: slli t2, t2, 1
-; RV32I-NEXT: slli a5, a5, 1
-; RV32I-NEXT: or t1, t1, s10
-; RV32I-NEXT: lw s3, 48(sp) # 4-byte Folded Reload
-; RV32I-NEXT: and s10, s9, s3
-; RV32I-NEXT: or t2, t2, s2
-; RV32I-NEXT: and s2, s9, a3
-; RV32I-NEXT: or a5, a7, a5
-; RV32I-NEXT: sub s3, s0, s2
-; RV32I-NEXT: sltu s0, s0, s2
-; RV32I-NEXT: or s2, a1, a4
-; RV32I-NEXT: sub s10, s8, s10
-; RV32I-NEXT: seqz s8, a1
-; RV32I-NEXT: addi a1, a1, -1
-; RV32I-NEXT: andi a7, s9, 1
-; RV32I-NEXT: sub s9, s7, s1
-; RV32I-NEXT: seqz s1, s2
-; RV32I-NEXT: sub a4, a4, s8
-; RV32I-NEXT: or t2, t4, t2
-; RV32I-NEXT: or t1, t5, t1
-; RV32I-NEXT: or t3, t6, t3
-; RV32I-NEXT: sub s8, s3, a0
-; RV32I-NEXT: sltu a0, s3, a0
-; RV32I-NEXT: sub t4, s10, s0
-; RV32I-NEXT: sltu t5, t0, s1
-; RV32I-NEXT: sub t0, t0, s1
-; RV32I-NEXT: sub a0, t4, a0
-; RV32I-NEXT: sub a6, a6, t5
-; RV32I-NEXT: or t4, a4, a6
-; RV32I-NEXT: or t5, a1, t0
-; RV32I-NEXT: or s0, t5, t4
-; RV32I-NEXT: sub s11, ra, s11
-; RV32I-NEXT: li t4, 0
-; RV32I-NEXT: li t5, 0
-; RV32I-NEXT: li t6, 0
-; RV32I-NEXT: beqz s0, .LBB1_31
-; RV32I-NEXT: .LBB1_26: # %udiv-do-while3
-; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
-; RV32I-NEXT: srli s0, s11, 31
-; RV32I-NEXT: slli s1, s9, 1
-; RV32I-NEXT: slli s11, s11, 1
-; RV32I-NEXT: or s10, s1, s0
-; RV32I-NEXT: srli s0, t3, 31
-; RV32I-NEXT: or ra, s11, s0
-; RV32I-NEXT: beq s4, s10, .LBB1_28
-; RV32I-NEXT: # %bb.27: # %udiv-do-while3
-; RV32I-NEXT: # in Loop: Header=BB1_26 Depth=1
-; RV32I-NEXT: sltu s1, s4, s10
-; RV32I-NEXT: j .LBB1_29
-; RV32I-NEXT: .LBB1_28: # in Loop: Header=BB1_26 Depth=1
-; RV32I-NEXT: lw s0, 44(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sltu s1, s0, ra
-; RV32I-NEXT: .LBB1_29: # %udiv-do-while3
-; RV32I-NEXT: # in Loop: Header=BB1_26 Depth=1
-; RV32I-NEXT: srli s0, s8, 31
-; RV32I-NEXT: slli a0, a0, 1
-; RV32I-NEXT: srli s7, s9, 31
-; RV32I-NEXT: slli s9, s8, 1
-; RV32I-NEXT: or s8, a0, s0
-; RV32I-NEXT: or s0, s9, s7
-; RV32I-NEXT: sub a0, s5, s0
-; RV32I-NEXT: sltu s7, s5, s0
-; RV32I-NEXT: sub s9, s6, s8
-; RV32I-NEXT: sltu a0, a0, s1
-; RV32I-NEXT: sub s1, s9, s7
-; RV32I-NEXT: sub s1, s1, a0
-; RV32I-NEXT: srai s9, s1, 31
-; RV32I-NEXT: and s11, s9, a2
-; RV32I-NEXT: lw a0, 52(sp) # 4-byte Folded Reload
-; RV32I-NEXT: and s7, s9, a0
-; RV32I-NEXT: sltu s1, ra, s11
-; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: beq s10, s7, .LBB1_25
-; RV32I-NEXT: # %bb.30: # %udiv-do-while3
-; RV32I-NEXT: # in Loop: Header=BB1_26 Depth=1
-; RV32I-NEXT: sltu a0, s10, s7
-; RV32I-NEXT: j .LBB1_25
-; RV32I-NEXT: .LBB1_31: # %udiv-loop-exit2
-; RV32I-NEXT: srli a0, a5, 31
-; RV32I-NEXT: slli a1, t2, 1
-; RV32I-NEXT: srli a4, t2, 31
-; RV32I-NEXT: or t0, a1, a0
-; RV32I-NEXT: slli a0, t1, 1
-; RV32I-NEXT: srli a1, t1, 31
-; RV32I-NEXT: slli t3, t3, 1
-; RV32I-NEXT: slli a5, a5, 1
-; RV32I-NEXT: or t2, a0, a4
-; RV32I-NEXT: or t1, t3, a1
-; RV32I-NEXT: or t3, a7, a5
-; RV32I-NEXT: lw s10, 52(sp) # 4-byte Folded Reload
-; RV32I-NEXT: mv s11, a2
-; RV32I-NEXT: lw s3, 28(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw ra, 48(sp) # 4-byte Folded Reload
-; RV32I-NEXT: mv s9, a3
-; RV32I-NEXT: lw s2, 40(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s5, 36(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s6, 32(sp) # 4-byte Folded Reload
-; RV32I-NEXT: li s8, 127
-; RV32I-NEXT: .LBB1_32: # %udiv-end1
-; RV32I-NEXT: lui a0, 349525
-; RV32I-NEXT: lui a1, 209715
-; RV32I-NEXT: lui a6, 61681
-; RV32I-NEXT: addi a5, a0, 1365
-; RV32I-NEXT: addi a4, a1, 819
-; RV32I-NEXT: addi a1, a6, -241
-; RV32I-NEXT: sw t0, 16(sp) # 4-byte Folded Spill
-; RV32I-NEXT: bnez s10, .LBB1_35
-; RV32I-NEXT: # %bb.33: # %udiv-end1
-; RV32I-NEXT: srli a0, s11, 1
-; RV32I-NEXT: or a0, s11, a0
-; RV32I-NEXT: srli a6, a0, 2
-; RV32I-NEXT: or a0, a0, a6
-; RV32I-NEXT: srli a6, a0, 4
-; RV32I-NEXT: or a0, a0, a6
-; RV32I-NEXT: srli a6, a0, 8
-; RV32I-NEXT: or a0, a0, a6
-; RV32I-NEXT: srli a6, a0, 16
-; RV32I-NEXT: or a0, a0, a6
-; RV32I-NEXT: not a0, a0
-; RV32I-NEXT: srli a6, a0, 1
-; RV32I-NEXT: and a6, a6, a5
-; RV32I-NEXT: sub a0, a0, a6
-; RV32I-NEXT: and a6, a0, a4
-; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, a4
-; RV32I-NEXT: add a0, a6, a0
-; RV32I-NEXT: srli a6, a0, 4
-; RV32I-NEXT: add a0, a0, a6
-; RV32I-NEXT: and a0, a0, a1
-; RV32I-NEXT: slli a6, a0, 8
-; RV32I-NEXT: add a0, a0, a6
-; RV32I-NEXT: slli a6, a0, 16
-; RV32I-NEXT: add a0, a0, a6
-; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: addi a7, a0, 32
-; RV32I-NEXT: or t0, s9, ra
-; RV32I-NEXT: beqz ra, .LBB1_36
-; RV32I-NEXT: .LBB1_34:
-; RV32I-NEXT: srli a0, ra, 1
-; RV32I-NEXT: or a0, ra, a0
-; RV32I-NEXT: srli a6, a0, 2
-; RV32I-NEXT: or a0, a0, a6
-; RV32I-NEXT: srli a6, a0, 4
-; RV32I-NEXT: or a0, a0, a6
-; RV32I-NEXT: srli a6, a0, 8
-; RV32I-NEXT: or a0, a0, a6
-; RV32I-NEXT: srli a6, a0, 16
-; RV32I-NEXT: or a0, a0, a6
-; RV32I-NEXT: not a0, a0
-; RV32I-NEXT: srli a6, a0, 1
-; RV32I-NEXT: and a6, a6, a5
-; RV32I-NEXT: sub a0, a0, a6
-; RV32I-NEXT: and a6, a0, a4
-; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, a4
-; RV32I-NEXT: add a0, a6, a0
-; RV32I-NEXT: srli a6, a0, 4
-; RV32I-NEXT: add a0, a0, a6
-; RV32I-NEXT: and a0, a0, a1
-; RV32I-NEXT: slli a6, a0, 8
-; RV32I-NEXT: add a0, a0, a6
-; RV32I-NEXT: slli a6, a0, 16
-; RV32I-NEXT: add a0, a0, a6
-; RV32I-NEXT: srli a6, a0, 24
-; RV32I-NEXT: addi a0, a7, 64
-; RV32I-NEXT: beqz t0, .LBB1_37
-; RV32I-NEXT: j .LBB1_38
-; RV32I-NEXT: .LBB1_35:
-; RV32I-NEXT: srli a0, s10, 1
-; RV32I-NEXT: or a0, s10, a0
-; RV32I-NEXT: srli a6, a0, 2
-; RV32I-NEXT: or a0, a0, a6
-; RV32I-NEXT: srli a6, a0, 4
-; RV32I-NEXT: or a0, a0, a6
-; RV32I-NEXT: srli a6, a0, 8
-; RV32I-NEXT: or a0, a0, a6
-; RV32I-NEXT: srli a6, a0, 16
-; RV32I-NEXT: or a0, a0, a6
-; RV32I-NEXT: not a0, a0
-; RV32I-NEXT: srli a6, a0, 1
-; RV32I-NEXT: and a6, a6, a5
-; RV32I-NEXT: sub a0, a0, a6
-; RV32I-NEXT: and a6, a0, a4
-; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, a4
-; RV32I-NEXT: add a0, a6, a0
-; RV32I-NEXT: srli a6, a0, 4
-; RV32I-NEXT: add a0, a0, a6
-; RV32I-NEXT: and a0, a0, a1
-; RV32I-NEXT: slli a6, a0, 8
-; RV32I-NEXT: add a0, a0, a6
-; RV32I-NEXT: slli a6, a0, 16
-; RV32I-NEXT: add a0, a0, a6
-; RV32I-NEXT: srli a7, a0, 24
-; RV32I-NEXT: or t0, s9, ra
-; RV32I-NEXT: bnez ra, .LBB1_34
-; RV32I-NEXT: .LBB1_36: # %udiv-end1
-; RV32I-NEXT: srli a0, s9, 1
-; RV32I-NEXT: or a0, s9, a0
-; RV32I-NEXT: srli a6, a0, 2
-; RV32I-NEXT: or a0, a0, a6
-; RV32I-NEXT: srli a6, a0, 4
-; RV32I-NEXT: or a0, a0, a6
-; RV32I-NEXT: srli a6, a0, 8
-; RV32I-NEXT: or a0, a0, a6
-; RV32I-NEXT: srli a6, a0, 16
-; RV32I-NEXT: or a0, a0, a6
-; RV32I-NEXT: not a0, a0
-; RV32I-NEXT: srli a6, a0, 1
-; RV32I-NEXT: and a6, a6, a5
-; RV32I-NEXT: sub a0, a0, a6
-; RV32I-NEXT: and a6, a0, a4
-; RV32I-NEXT: srli a0, a0, 2
-; RV32I-NEXT: and a0, a0, a4
-; RV32I-NEXT: add a0, a6, a0
-; RV32I-NEXT: srli a6, a0, 4
-; RV32I-NEXT: add a0, a0, a6
-; RV32I-NEXT: and a0, a0, a1
-; RV32I-NEXT: slli a6, a0, 8
-; RV32I-NEXT: add a0, a0, a6
-; RV32I-NEXT: slli a6, a0, 16
-; RV32I-NEXT: add a0, a0, a6
-; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: addi a6, a0, 32
-; RV32I-NEXT: addi a0, a7, 64
-; RV32I-NEXT: bnez t0, .LBB1_38
-; RV32I-NEXT: .LBB1_37: # %udiv-end1
-; RV32I-NEXT: mv a6, a0
-; RV32I-NEXT: .LBB1_38: # %udiv-end1
-; RV32I-NEXT: sw t1, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: snez t4, t0
-; RV32I-NEXT: sw t2, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: sw t3, 4(sp) # 4-byte Folded Spill
-; RV32I-NEXT: bnez s3, .LBB1_40
-; RV32I-NEXT: # %bb.39: # %udiv-end1
-; RV32I-NEXT: srli t0, s2, 1
-; RV32I-NEXT: or t0, s2, t0
-; RV32I-NEXT: srli t1, t0, 2
-; RV32I-NEXT: or t0, t0, t1
-; RV32I-NEXT: srli t1, t0, 4
-; RV32I-NEXT: or t0, t0, t1
-; RV32I-NEXT: srli t1, t0, 8
-; RV32I-NEXT: or t0, t0, t1
-; RV32I-NEXT: srli t1, t0, 16
-; RV32I-NEXT: or t0, t0, t1
-; RV32I-NEXT: not t0, t0
-; RV32I-NEXT: srli t1, t0, 1
-; RV32I-NEXT: and t1, t1, a5
-; RV32I-NEXT: sub t0, t0, t1
-; RV32I-NEXT: and t1, t0, a4
-; RV32I-NEXT: srli t0, t0, 2
-; RV32I-NEXT: and t0, t0, a4
-; RV32I-NEXT: add t0, t1, t0
-; RV32I-NEXT: srli t1, t0, 4
-; RV32I-NEXT: add t0, t0, t1
-; RV32I-NEXT: and t0, t0, a1
-; RV32I-NEXT: slli t1, t0, 8
-; RV32I-NEXT: add t0, t0, t1
-; RV32I-NEXT: slli t1, t0, 16
-; RV32I-NEXT: add t0, t0, t1
-; RV32I-NEXT: srli t0, t0, 24
-; RV32I-NEXT: addi t6, t0, 32
-; RV32I-NEXT: j .LBB1_41
-; RV32I-NEXT: .LBB1_40:
-; RV32I-NEXT: srli t0, s3, 1
-; RV32I-NEXT: or t0, s3, t0
-; RV32I-NEXT: srli t1, t0, 2
-; RV32I-NEXT: or t0, t0, t1
-; RV32I-NEXT: srli t1, t0, 4
-; RV32I-NEXT: or t0, t0, t1
-; RV32I-NEXT: srli t1, t0, 8
-; RV32I-NEXT: or t0, t0, t1
-; RV32I-NEXT: srli t1, t0, 16
-; RV32I-NEXT: or t0, t0, t1
-; RV32I-NEXT: not t0, t0
-; RV32I-NEXT: srli t1, t0, 1
-; RV32I-NEXT: and t1, t1, a5
-; RV32I-NEXT: sub t0, t0, t1
-; RV32I-NEXT: and t1, t0, a4
-; RV32I-NEXT: srli t0, t0, 2
-; RV32I-NEXT: and t0, t0, a4
-; RV32I-NEXT: add t0, t1, t0
-; RV32I-NEXT: srli t1, t0, 4
-; RV32I-NEXT: add t0, t0, t1
-; RV32I-NEXT: and t0, t0, a1
-; RV32I-NEXT: slli t1, t0, 8
-; RV32I-NEXT: add t0, t0, t1
-; RV32I-NEXT: slli t1, t0, 16
-; RV32I-NEXT: add t0, t0, t1
-; RV32I-NEXT: srli t6, t0, 24
-; RV32I-NEXT: .LBB1_41: # %udiv-end1
-; RV32I-NEXT: or t0, s11, s9
-; RV32I-NEXT: or t1, s10, ra
-; RV32I-NEXT: or t2, s2, s6
-; RV32I-NEXT: or t3, s3, s5
-; RV32I-NEXT: sltu a7, a0, a7
-; RV32I-NEXT: addi t4, t4, -1
-; RV32I-NEXT: addi a0, t6, 64
-; RV32I-NEXT: or t5, s6, s5
-; RV32I-NEXT: sltu t6, a0, t6
-; RV32I-NEXT: snez s4, t5
-; RV32I-NEXT: addi s4, s4, -1
-; RV32I-NEXT: bnez s5, .LBB1_43
-; RV32I-NEXT: # %bb.42: # %udiv-end1
-; RV32I-NEXT: srli s0, s6, 1
-; RV32I-NEXT: or s0, s6, s0
-; RV32I-NEXT: srli s1, s0, 2
-; RV32I-NEXT: or s0, s0, s1
-; RV32I-NEXT: srli s1, s0, 4
-; RV32I-NEXT: or s0, s0, s1
-; RV32I-NEXT: srli s1, s0, 8
-; RV32I-NEXT: or s0, s0, s1
-; RV32I-NEXT: srli s1, s0, 16
-; RV32I-NEXT: or s0, s0, s1
-; RV32I-NEXT: not s0, s0
-; RV32I-NEXT: srli s1, s0, 1
-; RV32I-NEXT: and a5, s1, a5
-; RV32I-NEXT: sub s0, s0, a5
-; RV32I-NEXT: and a5, s0, a4
-; RV32I-NEXT: srli s0, s0, 2
-; RV32I-NEXT: and a4, s0, a4
-; RV32I-NEXT: add a4, a5, a4
-; RV32I-NEXT: srli a5, a4, 4
-; RV32I-NEXT: add a4, a4, a5
-; RV32I-NEXT: and a1, a4, a1
-; RV32I-NEXT: slli a4, a1, 8
-; RV32I-NEXT: add a1, a1, a4
-; RV32I-NEXT: slli a4, a1, 16
-; RV32I-NEXT: add a1, a1, a4
-; RV32I-NEXT: srli a1, a1, 24
-; RV32I-NEXT: addi a1, a1, 32
-; RV32I-NEXT: j .LBB1_44
-; RV32I-NEXT: .LBB1_43:
-; RV32I-NEXT: srli s0, s5, 1
-; RV32I-NEXT: or s0, s5, s0
-; RV32I-NEXT: srli s1, s0, 2
-; RV32I-NEXT: or s0, s0, s1
-; RV32I-NEXT: srli s1, s0, 4
-; RV32I-NEXT: or s0, s0, s1
-; RV32I-NEXT: srli s1, s0, 8
-; RV32I-NEXT: or s0, s0, s1
-; RV32I-NEXT: srli s1, s0, 16
-; RV32I-NEXT: or s0, s0, s1
-; RV32I-NEXT: not s0, s0
-; RV32I-NEXT: srli s1, s0, 1
-; RV32I-NEXT: and a5, s1, a5
-; RV32I-NEXT: sub s0, s0, a5
-; RV32I-NEXT: and a5, s0, a4
-; RV32I-NEXT: srli s0, s0, 2
-; RV32I-NEXT: and a4, s0, a4
-; RV32I-NEXT: add a4, a5, a4
-; RV32I-NEXT: srli a5, a4, 4
-; RV32I-NEXT: add a4, a4, a5
-; RV32I-NEXT: and a1, a4, a1
-; RV32I-NEXT: slli a4, a1, 8
-; RV32I-NEXT: add a1, a1, a4
-; RV32I-NEXT: slli a4, a1, 16
-; RV32I-NEXT: add a1, a1, a4
-; RV32I-NEXT: srli a1, a1, 24
-; RV32I-NEXT: .LBB1_44: # %udiv-end1
-; RV32I-NEXT: or t0, t0, t1
-; RV32I-NEXT: or t1, t2, t3
-; RV32I-NEXT: and a4, t4, a7
-; RV32I-NEXT: and a5, s4, t6
-; RV32I-NEXT: bnez t5, .LBB1_46
-; RV32I-NEXT: # %bb.45: # %udiv-end1
-; RV32I-NEXT: mv a1, a0
-; RV32I-NEXT: .LBB1_46: # %udiv-end1
-; RV32I-NEXT: seqz a0, t0
-; RV32I-NEXT: seqz t0, t1
-; RV32I-NEXT: sltu a7, a6, a1
-; RV32I-NEXT: sub t2, a4, a5
-; RV32I-NEXT: mv t1, a7
-; RV32I-NEXT: beq a4, a5, .LBB1_48
-; RV32I-NEXT: # %bb.47: # %udiv-end1
-; RV32I-NEXT: sltu t1, a4, a5
-; RV32I-NEXT: .LBB1_48: # %udiv-end1
-; RV32I-NEXT: sub a7, t2, a7
-; RV32I-NEXT: or t0, a0, t0
-; RV32I-NEXT: neg a4, t1
-; RV32I-NEXT: seqz a5, t1
-; RV32I-NEXT: addi a5, a5, -1
-; RV32I-NEXT: or t1, a4, a5
-; RV32I-NEXT: sub a0, a6, a1
-; RV32I-NEXT: beqz t1, .LBB1_50
-; RV32I-NEXT: # %bb.49: # %udiv-end1
-; RV32I-NEXT: snez a1, t1
-; RV32I-NEXT: j .LBB1_51
-; RV32I-NEXT: .LBB1_50:
-; RV32I-NEXT: snez a1, a7
-; RV32I-NEXT: sltiu a6, a0, 128
-; RV32I-NEXT: xori a6, a6, 1
-; RV32I-NEXT: or a1, a6, a1
-; RV32I-NEXT: .LBB1_51: # %udiv-end1
-; RV32I-NEXT: or a1, t0, a1
-; RV32I-NEXT: addi a6, a1, -1
-; RV32I-NEXT: and s5, a6, s5
-; RV32I-NEXT: and s7, a6, s6
-; RV32I-NEXT: and s4, a6, s3
-; RV32I-NEXT: and s6, a6, s2
-; RV32I-NEXT: bnez a1, .LBB1_62
-; RV32I-NEXT: # %bb.52: # %udiv-end1
-; RV32I-NEXT: xori a1, a0, 127
-; RV32I-NEXT: or a1, a1, a4
-; RV32I-NEXT: or a6, a7, a5
-; RV32I-NEXT: or a1, a1, a6
-; RV32I-NEXT: beqz a1, .LBB1_62
-; RV32I-NEXT: # %bb.53: # %udiv-bb1
-; RV32I-NEXT: addi a1, a0, 1
-; RV32I-NEXT: sub a6, s8, a0
-; RV32I-NEXT: sw zero, 88(sp)
-; RV32I-NEXT: sw zero, 92(sp)
-; RV32I-NEXT: sw zero, 96(sp)
-; RV32I-NEXT: sw zero, 100(sp)
-; RV32I-NEXT: lw s2, 40(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sw s2, 104(sp)
-; RV32I-NEXT: sw s3, 108(sp)
-; RV32I-NEXT: lw s5, 32(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sw s5, 112(sp)
-; RV32I-NEXT: lw s4, 36(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sw s4, 116(sp)
-; RV32I-NEXT: addi t0, sp, 104
-; RV32I-NEXT: seqz a0, a1
-; RV32I-NEXT: srli t1, a6, 3
-; RV32I-NEXT: andi t2, a6, 31
-; RV32I-NEXT: add a0, a7, a0
-; RV32I-NEXT: andi a7, t1, 12
-; RV32I-NEXT: xori t1, t2, 31
-; RV32I-NEXT: or t2, a1, a0
-; RV32I-NEXT: sub a7, t0, a7
-; RV32I-NEXT: seqz t0, t2
-; RV32I-NEXT: lw t3, 0(a7)
-; RV32I-NEXT: lw t4, 4(a7)
-; RV32I-NEXT: lw t5, 8(a7)
-; RV32I-NEXT: lw t2, 12(a7)
-; RV32I-NEXT: add a7, a4, t0
-; RV32I-NEXT: sltu a4, a7, a4
-; RV32I-NEXT: or t0, a1, a7
-; RV32I-NEXT: add a5, a5, a4
-; RV32I-NEXT: or a4, a0, a5
-; RV32I-NEXT: srli t6, t5, 1
-; RV32I-NEXT: srli s0, t4, 1
-; RV32I-NEXT: or s1, t0, a4
-; RV32I-NEXT: srli a4, t3, 1
-; RV32I-NEXT: srl t0, t6, t1
-; RV32I-NEXT: srl t6, s0, t1
-; RV32I-NEXT: srl a4, a4, t1
-; RV32I-NEXT: sll t1, t2, a6
-; RV32I-NEXT: or t2, t1, t0
-; RV32I-NEXT: sll t0, t5, a6
-; RV32I-NEXT: sll t1, t4, a6
-; RV32I-NEXT: or t0, t0, t6
-; RV32I-NEXT: or t1, t1, a4
-; RV32I-NEXT: sll a4, t3, a6
-; RV32I-NEXT: li a6, 0
-; RV32I-NEXT: beqz s1, .LBB1_61
-; RV32I-NEXT: # %bb.54: # %udiv-preheader
-; RV32I-NEXT: li t3, 0
-; RV32I-NEXT: li t4, 0
-; RV32I-NEXT: li t5, 0
-; RV32I-NEXT: sw zero, 72(sp)
-; RV32I-NEXT: sw zero, 76(sp)
-; RV32I-NEXT: sw zero, 80(sp)
-; RV32I-NEXT: sw zero, 84(sp)
-; RV32I-NEXT: sw s2, 56(sp)
-; RV32I-NEXT: sw s3, 60(sp)
-; RV32I-NEXT: sw s5, 64(sp)
-; RV32I-NEXT: sw s4, 68(sp)
-; RV32I-NEXT: srli t6, a1, 3
-; RV32I-NEXT: addi s0, sp, 56
-; RV32I-NEXT: andi t6, t6, 12
-; RV32I-NEXT: add t6, s0, t6
-; RV32I-NEXT: lw s0, 4(t6)
-; RV32I-NEXT: lw s1, 8(t6)
-; RV32I-NEXT: lw s2, 12(t6)
-; RV32I-NEXT: lw s3, 0(t6)
-; RV32I-NEXT: andi t6, a1, 31
-; RV32I-NEXT: xori t6, t6, 31
-; RV32I-NEXT: slli s4, s2, 1
-; RV32I-NEXT: slli s5, s1, 1
-; RV32I-NEXT: slli s6, s0, 1
-; RV32I-NEXT: sll s4, s4, t6
-; RV32I-NEXT: sll s5, s5, t6
-; RV32I-NEXT: sll s6, s6, t6
-; RV32I-NEXT: seqz t6, s11
-; RV32I-NEXT: srl s1, s1, a1
-; RV32I-NEXT: or s7, s1, s4
-; RV32I-NEXT: or s1, s11, s10
-; RV32I-NEXT: sub t6, s10, t6
-; RV32I-NEXT: seqz s1, s1
-; RV32I-NEXT: srl s0, s0, a1
-; RV32I-NEXT: or s8, s0, s5
-; RV32I-NEXT: sub s4, s9, s1
-; RV32I-NEXT: sltu s0, s9, s1
-; RV32I-NEXT: sub s5, ra, s0
-; RV32I-NEXT: srl s0, s3, a1
-; RV32I-NEXT: srl ra, s2, a1
-; RV32I-NEXT: or s9, s0, s6
-; RV32I-NEXT: addi s0, s11, -1
-; RV32I-NEXT: sw s0, 44(sp) # 4-byte Folded Spill
-; RV32I-NEXT: j .LBB1_56
-; RV32I-NEXT: .LBB1_55: # %udiv-do-while
-; RV32I-NEXT: # in Loop: Header=BB1_56 Depth=1
-; RV32I-NEXT: srli s2, t0, 31
-; RV32I-NEXT: slli t2, t2, 1
-; RV32I-NEXT: sub s3, s10, s6
-; RV32I-NEXT: srli s6, t1, 31
-; RV32I-NEXT: slli t0, t0, 1
-; RV32I-NEXT: or t2, t2, s2
-; RV32I-NEXT: srli s2, a4, 31
-; RV32I-NEXT: slli t1, t1, 1
-; RV32I-NEXT: slli a4, a4, 1
-; RV32I-NEXT: or t0, t0, s6
-; RV32I-NEXT: lw s6, 48(sp) # 4-byte Folded Reload
-; RV32I-NEXT: and s6, s8, s6
-; RV32I-NEXT: or t1, t1, s2
-; RV32I-NEXT: and s2, s8, a3
-; RV32I-NEXT: or a4, a6, a4
-; RV32I-NEXT: sub s10, s0, s2
-; RV32I-NEXT: sltu s0, s0, s2
-; RV32I-NEXT: or s2, a1, a0
-; RV32I-NEXT: sub s6, s7, s6
-; RV32I-NEXT: seqz s7, a1
-; RV32I-NEXT: addi a1, a1, -1
-; RV32I-NEXT: andi a6, s8, 1
-; RV32I-NEXT: sub s8, s3, s1
-; RV32I-NEXT: seqz s1, s2
-; RV32I-NEXT: sub a0, a0, s7
-; RV32I-NEXT: or t1, t3, t1
-; RV32I-NEXT: or t0, t4, t0
-; RV32I-NEXT: or t2, t5, t2
-; RV32I-NEXT: sub s7, s10, ra
-; RV32I-NEXT: sltu t3, s10, ra
-; RV32I-NEXT: sub t4, s6, s0
-; RV32I-NEXT: sltu t5, a7, s1
-; RV32I-NEXT: sub a7, a7, s1
-; RV32I-NEXT: sub ra, t4, t3
-; RV32I-NEXT: sub a5, a5, t5
-; RV32I-NEXT: or t3, a0, a5
-; RV32I-NEXT: or t4, a1, a7
-; RV32I-NEXT: or s0, t4, t3
-; RV32I-NEXT: sub s9, s9, s11
-; RV32I-NEXT: li t3, 0
-; RV32I-NEXT: li t4, 0
-; RV32I-NEXT: li t5, 0
-; RV32I-NEXT: mv s11, a2
-; RV32I-NEXT: beqz s0, .LBB1_61
-; RV32I-NEXT: .LBB1_56: # %udiv-do-while
-; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
-; RV32I-NEXT: srli s0, s9, 31
-; RV32I-NEXT: slli s1, s8, 1
-; RV32I-NEXT: slli s9, s9, 1
-; RV32I-NEXT: or s10, s1, s0
-; RV32I-NEXT: srli s0, t2, 31
-; RV32I-NEXT: or s9, s9, s0
-; RV32I-NEXT: beq t6, s10, .LBB1_58
-; RV32I-NEXT: # %bb.57: # %udiv-do-while
-; RV32I-NEXT: # in Loop: Header=BB1_56 Depth=1
-; RV32I-NEXT: sltu s1, t6, s10
-; RV32I-NEXT: j .LBB1_59
-; RV32I-NEXT: .LBB1_58: # in Loop: Header=BB1_56 Depth=1
-; RV32I-NEXT: lw s0, 44(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sltu s1, s0, s9
-; RV32I-NEXT: .LBB1_59: # %udiv-do-while
-; RV32I-NEXT: # in Loop: Header=BB1_56 Depth=1
-; RV32I-NEXT: srli s0, s7, 31
-; RV32I-NEXT: slli ra, ra, 1
-; RV32I-NEXT: srli s2, s8, 31
-; RV32I-NEXT: slli s3, s7, 1
-; RV32I-NEXT: or s7, ra, s0
-; RV32I-NEXT: or s0, s3, s2
-; RV32I-NEXT: sub s2, s4, s0
-; RV32I-NEXT: sltu s3, s4, s0
-; RV32I-NEXT: sub s6, s5, s7
-; RV32I-NEXT: sltu s1, s2, s1
-; RV32I-NEXT: sub s2, s6, s3
-; RV32I-NEXT: sub s1, s2, s1
-; RV32I-NEXT: srai s8, s1, 31
-; RV32I-NEXT: and s11, s8, s11
-; RV32I-NEXT: lw s1, 52(sp) # 4-byte Folded Reload
-; RV32I-NEXT: and s6, s8, s1
-; RV32I-NEXT: sltu s1, s9, s11
-; RV32I-NEXT: mv ra, s1
-; RV32I-NEXT: beq s10, s6, .LBB1_55
-; RV32I-NEXT: # %bb.60: # %udiv-do-while
-; RV32I-NEXT: # in Loop: Header=BB1_56 Depth=1
-; RV32I-NEXT: sltu ra, s10, s6
-; RV32I-NEXT: j .LBB1_55
-; RV32I-NEXT: .LBB1_61: # %udiv-loop-exit
-; RV32I-NEXT: srli a0, a4, 31
-; RV32I-NEXT: slli a1, t1, 1
-; RV32I-NEXT: srli a5, t1, 31
-; RV32I-NEXT: or s4, a1, a0
-; RV32I-NEXT: slli a0, t0, 1
-; RV32I-NEXT: srli a1, t0, 31
-; RV32I-NEXT: slli t2, t2, 1
-; RV32I-NEXT: slli a4, a4, 1
-; RV32I-NEXT: or s7, a0, a5
-; RV32I-NEXT: or s5, t2, a1
-; RV32I-NEXT: or s6, a6, a4
-; RV32I-NEXT: lw s10, 52(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 28(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw ra, 48(sp) # 4-byte Folded Reload
-; RV32I-NEXT: mv s9, a3
-; RV32I-NEXT: .LBB1_62: # %udiv-end
-; RV32I-NEXT: mv a0, s6
-; RV32I-NEXT: mv a1, s4
-; RV32I-NEXT: mv a2, s9
-; RV32I-NEXT: mv a3, ra
-; RV32I-NEXT: call __muldi3
-; RV32I-NEXT: mv s8, a0
-; RV32I-NEXT: mv s9, a1
-; RV32I-NEXT: mv a0, s7
-; RV32I-NEXT: mv a1, s5
-; RV32I-NEXT: mv a2, s11
-; RV32I-NEXT: mv a3, s10
-; RV32I-NEXT: call __muldi3
-; RV32I-NEXT: add a1, a1, s9
-; RV32I-NEXT: add s9, a0, s8
-; RV32I-NEXT: sltu s8, s9, a0
-; RV32I-NEXT: add s8, a1, s8
-; RV32I-NEXT: mv a0, s11
-; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: mv a2, s6
-; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __muldi3
-; RV32I-NEXT: mv s5, a0
-; RV32I-NEXT: mv s7, a1
-; RV32I-NEXT: mv a0, s10
-; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: mv a2, s6
-; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __muldi3
-; RV32I-NEXT: add s0, a0, s7
-; RV32I-NEXT: sltu a0, s0, a0
-; RV32I-NEXT: add s1, a1, a0
-; RV32I-NEXT: mv a0, s11
-; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: mv a2, s4
-; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __muldi3
-; RV32I-NEXT: add s0, a0, s0
-; RV32I-NEXT: sltu a0, s0, a0
-; RV32I-NEXT: add a0, a1, a0
-; RV32I-NEXT: add s2, s1, a0
-; RV32I-NEXT: mv a0, s10
-; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: mv a2, s4
-; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __muldi3
-; RV32I-NEXT: add a3, a0, s2
-; RV32I-NEXT: sltu a4, s2, s1
-; RV32I-NEXT: lw a5, 40(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sltu a2, a5, s5
-; RV32I-NEXT: sltu a0, a3, a0
-; RV32I-NEXT: add a1, a1, a4
-; RV32I-NEXT: add s9, a3, s9
-; RV32I-NEXT: add a0, a1, a0
-; RV32I-NEXT: sltu a1, s9, a3
-; RV32I-NEXT: lw a6, 32(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sltu a3, a6, s9
-; RV32I-NEXT: add a0, a0, s8
-; RV32I-NEXT: lw a4, 36(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sub a1, a4, a1
-; RV32I-NEXT: sub a0, a1, a0
-; RV32I-NEXT: sub a0, a0, a3
-; RV32I-NEXT: sub a1, a6, s9
-; RV32I-NEXT: mv a3, a2
-; RV32I-NEXT: beq s3, s0, .LBB1_64
-; RV32I-NEXT: # %bb.63: # %udiv-end
-; RV32I-NEXT: sltu a3, s3, s0
-; RV32I-NEXT: .LBB1_64: # %udiv-end
-; RV32I-NEXT: sltu a4, a1, a3
-; RV32I-NEXT: sub a1, a1, a3
-; RV32I-NEXT: sub a3, s3, s0
-; RV32I-NEXT: sub a5, a5, s5
-; RV32I-NEXT: lw a6, 20(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw a7, 4(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sw a7, 0(a6)
-; RV32I-NEXT: lw a7, 16(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sw a7, 4(a6)
-; RV32I-NEXT: lw a7, 8(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sw a7, 8(a6)
-; RV32I-NEXT: lw a7, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sw a7, 12(a6)
-; RV32I-NEXT: sub a0, a0, a4
-; RV32I-NEXT: sub a3, a3, a2
-; RV32I-NEXT: lw a2, 24(sp) # 4-byte Folded Reload
-; RV32I-NEXT: sw a5, 0(a2)
-; RV32I-NEXT: sw a3, 4(a2)
-; RV32I-NEXT: sw a1, 8(a2)
-; RV32I-NEXT: sw a0, 12(a2)
-; RV32I-NEXT: lw ra, 236(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s0, 232(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s1, 228(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s2, 224(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s3, 220(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s4, 216(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s5, 212(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s6, 208(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s7, 204(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s8, 200(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s9, 196(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s10, 192(sp) # 4-byte Folded Reload
-; RV32I-NEXT: lw s11, 188(sp) # 4-byte Folded Reload
-; RV32I-NEXT: .cfi_restore ra
-; RV32I-NEXT: .cfi_restore s0
-; RV32I-NEXT: .cfi_restore s1
-; RV32I-NEXT: .cfi_restore s2
-; RV32I-NEXT: .cfi_restore s3
-; RV32I-NEXT: .cfi_restore s4
-; RV32I-NEXT: .cfi_restore s5
-; RV32I-NEXT: .cfi_restore s6
-; RV32I-NEXT: .cfi_restore s7
-; RV32I-NEXT: .cfi_restore s8
-; RV32I-NEXT: .cfi_restore s9
-; RV32I-NEXT: .cfi_restore s10
-; RV32I-NEXT: .cfi_restore s11
-; RV32I-NEXT: addi sp, sp, 240
-; RV32I-NEXT: .cfi_def_cfa_offset 0
-; RV32I-NEXT: ret
+; RV32I-NOT: __udivmodti4
+; RV32I-NOT: __udivti3
+; RV32I-NOT: __umodti3
+; RV32I: ret
;
; RV32M-LABEL: udivrem_i128:
-; RV32M: # %bb.0: # %_udiv-special-cases_udiv-special-cases
-; RV32M-NEXT: addi sp, sp, -256
-; RV32M-NEXT: .cfi_def_cfa_offset 256
-; RV32M-NEXT: sw ra, 252(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw s0, 248(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw s1, 244(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw s2, 240(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw s3, 236(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw s4, 232(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw s5, 228(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw s6, 224(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw s7, 220(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw s8, 216(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw s9, 212(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw s10, 208(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw s11, 204(sp) # 4-byte Folded Spill
-; RV32M-NEXT: .cfi_offset ra, -4
-; RV32M-NEXT: .cfi_offset s0, -8
-; RV32M-NEXT: .cfi_offset s1, -12
-; RV32M-NEXT: .cfi_offset s2, -16
-; RV32M-NEXT: .cfi_offset s3, -20
-; RV32M-NEXT: .cfi_offset s4, -24
-; RV32M-NEXT: .cfi_offset s5, -28
-; RV32M-NEXT: .cfi_offset s6, -32
-; RV32M-NEXT: .cfi_offset s7, -36
-; RV32M-NEXT: .cfi_offset s8, -40
-; RV32M-NEXT: .cfi_offset s9, -44
-; RV32M-NEXT: .cfi_offset s10, -48
-; RV32M-NEXT: .cfi_offset s11, -52
-; RV32M-NEXT: sw a1, 32(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
-; RV32M-NEXT: lw a6, 0(a3)
-; RV32M-NEXT: lw s11, 4(a3)
-; RV32M-NEXT: lw ra, 8(a3)
-; RV32M-NEXT: lw s10, 12(a3)
-; RV32M-NEXT: lui a0, 349525
-; RV32M-NEXT: lui a1, 209715
-; RV32M-NEXT: addi t5, a0, 1365
-; RV32M-NEXT: addi t4, a1, 819
-; RV32M-NEXT: lui t2, 61681
-; RV32M-NEXT: addi t2, t2, -241
-; RV32M-NEXT: lui t3, 4112
-; RV32M-NEXT: addi t3, t3, 257
-; RV32M-NEXT: bnez s11, .LBB1_2
-; RV32M-NEXT: # %bb.1: # %_udiv-special-cases_udiv-special-cases
-; RV32M-NEXT: srli a0, a6, 1
-; RV32M-NEXT: or a0, a6, a0
-; RV32M-NEXT: srli a1, a0, 2
-; RV32M-NEXT: or a0, a0, a1
-; RV32M-NEXT: srli a1, a0, 4
-; RV32M-NEXT: or a0, a0, a1
-; RV32M-NEXT: srli a1, a0, 8
-; RV32M-NEXT: or a0, a0, a1
-; RV32M-NEXT: srli a1, a0, 16
-; RV32M-NEXT: or a0, a0, a1
-; RV32M-NEXT: not a0, a0
-; RV32M-NEXT: srli a1, a0, 1
-; RV32M-NEXT: and a1, a1, t5
-; RV32M-NEXT: sub a0, a0, a1
-; RV32M-NEXT: and a1, a0, t4
-; RV32M-NEXT: srli a0, a0, 2
-; RV32M-NEXT: and a0, a0, t4
-; RV32M-NEXT: add a0, a1, a0
-; RV32M-NEXT: srli a1, a0, 4
-; RV32M-NEXT: add a0, a0, a1
-; RV32M-NEXT: and a0, a0, t2
-; RV32M-NEXT: mul a0, a0, t3
-; RV32M-NEXT: srli a0, a0, 24
-; RV32M-NEXT: addi a1, a0, 32
-; RV32M-NEXT: j .LBB1_3
-; RV32M-NEXT: .LBB1_2:
-; RV32M-NEXT: srli a0, s11, 1
-; RV32M-NEXT: or a0, s11, a0
-; RV32M-NEXT: srli a1, a0, 2
-; RV32M-NEXT: or a0, a0, a1
-; RV32M-NEXT: srli a1, a0, 4
-; RV32M-NEXT: or a0, a0, a1
-; RV32M-NEXT: srli a1, a0, 8
-; RV32M-NEXT: or a0, a0, a1
-; RV32M-NEXT: srli a1, a0, 16
-; RV32M-NEXT: or a0, a0, a1
-; RV32M-NEXT: not a0, a0
-; RV32M-NEXT: srli a1, a0, 1
-; RV32M-NEXT: and a1, a1, t5
-; RV32M-NEXT: sub a0, a0, a1
-; RV32M-NEXT: and a1, a0, t4
-; RV32M-NEXT: srli a0, a0, 2
-; RV32M-NEXT: and a0, a0, t4
-; RV32M-NEXT: add a0, a1, a0
-; RV32M-NEXT: srli a1, a0, 4
-; RV32M-NEXT: add a0, a0, a1
-; RV32M-NEXT: and a0, a0, t2
-; RV32M-NEXT: mul a1, a0, t3
-; RV32M-NEXT: srli a1, a1, 24
-; RV32M-NEXT: .LBB1_3: # %_udiv-special-cases_udiv-special-cases
-; RV32M-NEXT: lw s8, 4(a2)
-; RV32M-NEXT: or t6, ra, s10
-; RV32M-NEXT: bnez s10, .LBB1_5
-; RV32M-NEXT: # %bb.4: # %_udiv-special-cases_udiv-special-cases
-; RV32M-NEXT: srli a0, ra, 1
-; RV32M-NEXT: or a0, ra, a0
-; RV32M-NEXT: srli a7, a0, 2
-; RV32M-NEXT: or a0, a0, a7
-; RV32M-NEXT: srli a7, a0, 4
-; RV32M-NEXT: or a0, a0, a7
-; RV32M-NEXT: srli a7, a0, 8
-; RV32M-NEXT: or a0, a0, a7
-; RV32M-NEXT: srli a7, a0, 16
-; RV32M-NEXT: or a0, a0, a7
-; RV32M-NEXT: not a0, a0
-; RV32M-NEXT: srli a7, a0, 1
-; RV32M-NEXT: and a7, a7, t5
-; RV32M-NEXT: sub a0, a0, a7
-; RV32M-NEXT: and a7, a0, t4
-; RV32M-NEXT: srli a0, a0, 2
-; RV32M-NEXT: and a0, a0, t4
-; RV32M-NEXT: add a0, a7, a0
-; RV32M-NEXT: srli a7, a0, 4
-; RV32M-NEXT: add a0, a0, a7
-; RV32M-NEXT: and a0, a0, t2
-; RV32M-NEXT: mul a0, a0, t3
-; RV32M-NEXT: srli a0, a0, 24
-; RV32M-NEXT: addi a0, a0, 32
-; RV32M-NEXT: j .LBB1_6
-; RV32M-NEXT: .LBB1_5:
-; RV32M-NEXT: srli a0, s10, 1
-; RV32M-NEXT: or a0, s10, a0
-; RV32M-NEXT: srli a7, a0, 2
-; RV32M-NEXT: or a0, a0, a7
-; RV32M-NEXT: srli a7, a0, 4
-; RV32M-NEXT: or a0, a0, a7
-; RV32M-NEXT: srli a7, a0, 8
-; RV32M-NEXT: or a0, a0, a7
-; RV32M-NEXT: srli a7, a0, 16
-; RV32M-NEXT: or a0, a0, a7
-; RV32M-NEXT: not a0, a0
-; RV32M-NEXT: srli a7, a0, 1
-; RV32M-NEXT: and a7, a7, t5
-; RV32M-NEXT: sub a0, a0, a7
-; RV32M-NEXT: and a7, a0, t4
-; RV32M-NEXT: srli a0, a0, 2
-; RV32M-NEXT: and a0, a0, t4
-; RV32M-NEXT: add a0, a7, a0
-; RV32M-NEXT: srli a7, a0, 4
-; RV32M-NEXT: add a0, a0, a7
-; RV32M-NEXT: and a0, a0, t2
-; RV32M-NEXT: mul a0, a0, t3
-; RV32M-NEXT: srli a0, a0, 24
-; RV32M-NEXT: .LBB1_6: # %_udiv-special-cases_udiv-special-cases
-; RV32M-NEXT: lw s7, 12(a2)
-; RV32M-NEXT: addi t0, a1, 64
-; RV32M-NEXT: bnez t6, .LBB1_8
-; RV32M-NEXT: # %bb.7: # %_udiv-special-cases_udiv-special-cases
-; RV32M-NEXT: mv a0, t0
-; RV32M-NEXT: .LBB1_8: # %_udiv-special-cases_udiv-special-cases
-; RV32M-NEXT: lw s9, 0(a2)
-; RV32M-NEXT: lw s6, 8(a2)
-; RV32M-NEXT: snez s3, t6
-; RV32M-NEXT: bnez s8, .LBB1_10
-; RV32M-NEXT: # %bb.9: # %_udiv-special-cases_udiv-special-cases
-; RV32M-NEXT: srli a2, s9, 1
-; RV32M-NEXT: or a2, s9, a2
-; RV32M-NEXT: srli a7, a2, 2
-; RV32M-NEXT: or a2, a2, a7
-; RV32M-NEXT: srli a7, a2, 4
-; RV32M-NEXT: or a2, a2, a7
-; RV32M-NEXT: srli a7, a2, 8
-; RV32M-NEXT: or a2, a2, a7
-; RV32M-NEXT: srli a7, a2, 16
-; RV32M-NEXT: or a2, a2, a7
-; RV32M-NEXT: not a2, a2
-; RV32M-NEXT: srli a7, a2, 1
-; RV32M-NEXT: and a7, a7, t5
-; RV32M-NEXT: sub a2, a2, a7
-; RV32M-NEXT: and a7, a2, t4
-; RV32M-NEXT: srli a2, a2, 2
-; RV32M-NEXT: and a2, a2, t4
-; RV32M-NEXT: add a2, a7, a2
-; RV32M-NEXT: srli a7, a2, 4
-; RV32M-NEXT: add a2, a2, a7
-; RV32M-NEXT: and a2, a2, t2
-; RV32M-NEXT: mul a2, a2, t3
-; RV32M-NEXT: srli a2, a2, 24
-; RV32M-NEXT: addi a2, a2, 32
-; RV32M-NEXT: j .LBB1_11
-; RV32M-NEXT: .LBB1_10:
-; RV32M-NEXT: srli a2, s8, 1
-; RV32M-NEXT: or a2, s8, a2
-; RV32M-NEXT: srli a7, a2, 2
-; RV32M-NEXT: or a2, a2, a7
-; RV32M-NEXT: srli a7, a2, 4
-; RV32M-NEXT: or a2, a2, a7
-; RV32M-NEXT: srli a7, a2, 8
-; RV32M-NEXT: or a2, a2, a7
-; RV32M-NEXT: srli a7, a2, 16
-; RV32M-NEXT: or a2, a2, a7
-; RV32M-NEXT: not a2, a2
-; RV32M-NEXT: srli a7, a2, 1
-; RV32M-NEXT: and a7, a7, t5
-; RV32M-NEXT: sub a2, a2, a7
-; RV32M-NEXT: and a7, a2, t4
-; RV32M-NEXT: srli a2, a2, 2
-; RV32M-NEXT: and a2, a2, t4
-; RV32M-NEXT: add a2, a7, a2
-; RV32M-NEXT: srli a7, a2, 4
-; RV32M-NEXT: add a2, a2, a7
-; RV32M-NEXT: and a2, a2, t2
-; RV32M-NEXT: mul a2, a2, t3
-; RV32M-NEXT: srli a2, a2, 24
-; RV32M-NEXT: .LBB1_11: # %_udiv-special-cases_udiv-special-cases
-; RV32M-NEXT: or t6, s11, s10
-; RV32M-NEXT: or s0, a6, ra
-; RV32M-NEXT: or s1, s8, s7
-; RV32M-NEXT: or s2, s9, s6
-; RV32M-NEXT: sltu t0, t0, a1
-; RV32M-NEXT: addi s3, s3, -1
-; RV32M-NEXT: addi a1, a2, 64
-; RV32M-NEXT: or s4, s6, s7
-; RV32M-NEXT: sltu a7, a1, a2
-; RV32M-NEXT: snez a2, s4
-; RV32M-NEXT: addi a2, a2, -1
-; RV32M-NEXT: bnez s7, .LBB1_13
-; RV32M-NEXT: # %bb.12: # %_udiv-special-cases_udiv-special-cases
-; RV32M-NEXT: srli t1, s6, 1
-; RV32M-NEXT: or t1, s6, t1
-; RV32M-NEXT: srli s5, t1, 2
-; RV32M-NEXT: or t1, t1, s5
-; RV32M-NEXT: srli s5, t1, 4
-; RV32M-NEXT: or t1, t1, s5
-; RV32M-NEXT: srli s5, t1, 8
-; RV32M-NEXT: or t1, t1, s5
-; RV32M-NEXT: srli s5, t1, 16
-; RV32M-NEXT: or t1, t1, s5
-; RV32M-NEXT: not t1, t1
-; RV32M-NEXT: srli s5, t1, 1
-; RV32M-NEXT: and t5, s5, t5
-; RV32M-NEXT: sub t1, t1, t5
-; RV32M-NEXT: and t5, t1, t4
-; RV32M-NEXT: srli t1, t1, 2
-; RV32M-NEXT: and t1, t1, t4
-; RV32M-NEXT: add t1, t5, t1
-; RV32M-NEXT: srli t4, t1, 4
-; RV32M-NEXT: add t1, t1, t4
-; RV32M-NEXT: and t1, t1, t2
-; RV32M-NEXT: mul t1, t1, t3
-; RV32M-NEXT: srli t1, t1, 24
-; RV32M-NEXT: addi t1, t1, 32
-; RV32M-NEXT: j .LBB1_14
-; RV32M-NEXT: .LBB1_13:
-; RV32M-NEXT: srli t1, s7, 1
-; RV32M-NEXT: or t1, s7, t1
-; RV32M-NEXT: srli s5, t1, 2
-; RV32M-NEXT: or t1, t1, s5
-; RV32M-NEXT: srli s5, t1, 4
-; RV32M-NEXT: or t1, t1, s5
-; RV32M-NEXT: srli s5, t1, 8
-; RV32M-NEXT: or t1, t1, s5
-; RV32M-NEXT: srli s5, t1, 16
-; RV32M-NEXT: or t1, t1, s5
-; RV32M-NEXT: not t1, t1
-; RV32M-NEXT: srli s5, t1, 1
-; RV32M-NEXT: and t5, s5, t5
-; RV32M-NEXT: sub t1, t1, t5
-; RV32M-NEXT: and t5, t1, t4
-; RV32M-NEXT: srli t1, t1, 2
-; RV32M-NEXT: and t1, t1, t4
-; RV32M-NEXT: add t1, t5, t1
-; RV32M-NEXT: srli t4, t1, 4
-; RV32M-NEXT: add t1, t1, t4
-; RV32M-NEXT: and t1, t1, t2
-; RV32M-NEXT: mul t1, t1, t3
-; RV32M-NEXT: srli t1, t1, 24
-; RV32M-NEXT: .LBB1_14: # %_udiv-special-cases_udiv-special-cases
-; RV32M-NEXT: or t2, s0, t6
-; RV32M-NEXT: or t3, s2, s1
-; RV32M-NEXT: and t0, s3, t0
-; RV32M-NEXT: and a2, a2, a7
-; RV32M-NEXT: bnez s4, .LBB1_16
-; RV32M-NEXT: # %bb.15: # %_udiv-special-cases_udiv-special-cases
-; RV32M-NEXT: mv t1, a1
-; RV32M-NEXT: .LBB1_16: # %_udiv-special-cases_udiv-special-cases
-; RV32M-NEXT: seqz a1, t2
-; RV32M-NEXT: seqz a7, t3
-; RV32M-NEXT: sltu t2, a0, t1
-; RV32M-NEXT: sub t3, t0, a2
-; RV32M-NEXT: mv t4, t2
-; RV32M-NEXT: beq t0, a2, .LBB1_18
-; RV32M-NEXT: # %bb.17: # %_udiv-special-cases_udiv-special-cases
-; RV32M-NEXT: sltu t4, t0, a2
-; RV32M-NEXT: .LBB1_18: # %_udiv-special-cases_udiv-special-cases
-; RV32M-NEXT: sub t3, t3, t2
-; RV32M-NEXT: or a2, a1, a7
-; RV32M-NEXT: neg a1, t4
-; RV32M-NEXT: seqz t4, t4
-; RV32M-NEXT: addi t4, t4, -1
-; RV32M-NEXT: or a7, a1, t4
-; RV32M-NEXT: sub a0, a0, t1
-; RV32M-NEXT: beqz a7, .LBB1_20
-; RV32M-NEXT: # %bb.19: # %_udiv-special-cases_udiv-special-cases
-; RV32M-NEXT: snez a7, a7
-; RV32M-NEXT: j .LBB1_21
-; RV32M-NEXT: .LBB1_20:
-; RV32M-NEXT: snez a7, t3
-; RV32M-NEXT: sltiu t0, a0, 128
-; RV32M-NEXT: xori t0, t0, 1
-; RV32M-NEXT: or a7, t0, a7
-; RV32M-NEXT: .LBB1_21: # %_udiv-special-cases_udiv-special-cases
-; RV32M-NEXT: li s5, 127
-; RV32M-NEXT: or a2, a2, a7
-; RV32M-NEXT: addi a7, a2, -1
-; RV32M-NEXT: and a4, a7, s7
-; RV32M-NEXT: and a5, a7, s6
-; RV32M-NEXT: and a3, a7, s8
-; RV32M-NEXT: and t0, a7, s9
-; RV32M-NEXT: sw s10, 68(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw ra, 64(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw s11, 60(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw s8, 48(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw s9, 44(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw s6, 40(sp) # 4-byte Folded Spill
-; RV32M-NEXT: sw s7, 36(sp) # 4-byte Folded Spill
-; RV32M-NEXT: bnez a2, .LBB1_33
-; RV32M-NEXT: # %bb.22: # %_udiv-special-cases_udiv-special-cases
-; RV32M-NEXT: xori a2, a0, 127
-; RV32M-NEXT: or a2, a2, a1
-; RV32M-NEXT: or a7, t3, t4
-; RV32M-NEXT: or a2, a2, a7
-; RV32M-NEXT: beqz a2, .LBB1_33
-; RV32M-NEXT: # %bb.23: # %udiv-bb15
-; RV32M-NEXT: addi t2, a0, 1
-; RV32M-NEXT: sub a0, s5, a0
-; RV32M-NEXT: sw zero, 168(sp)
-; RV32M-NEXT: sw zero, 172(sp)
-; RV32M-NEXT: sw zero, 176(sp)
-; RV32M-NEXT: sw zero, 180(sp)
-; RV32M-NEXT: sw s9, 184(sp)
-; RV32M-NEXT: sw s8, 188(sp)
-; RV32M-NEXT: sw s6, 192(sp)
-; RV32M-NEXT: sw s7, 196(sp)
-; RV32M-NEXT: addi a2, sp, 184
-; RV32M-NEXT: seqz a3, t2
-; RV32M-NEXT: srli a4, a0, 3
-; RV32M-NEXT: andi a5, a0, 31
-; RV32M-NEXT: add t3, t3, a3
-; RV32M-NEXT: andi a4, a4, 12
-; RV32M-NEXT: xori a3, a5, 31
-; RV32M-NEXT: or a5, t2, t3
-; RV32M-NEXT: sub a2, a2, a4
-; RV32M-NEXT: seqz s1, a5
-; RV32M-NEXT: lw a4, 0(a2)
-; RV32M-NEXT: lw a5, 4(a2)
-; RV32M-NEXT: lw a7, 8(a2)
-; RV32M-NEXT: lw a2, 12(a2)
-; RV32M-NEXT: add s1, a1, s1
-; RV32M-NEXT: sltu t5, s1, a1
-; RV32M-NEXT: or a1, t2, s1
-; RV32M-NEXT: add t5, t4, t5
-; RV32M-NEXT: or t0, t3, t5
-; RV32M-NEXT: srli t1, a7, 1
-; RV32M-NEXT: srli t4, a5, 1
-; RV32M-NEXT: or a1, a1, t0
-; RV32M-NEXT: srli t0, a4, 1
-; RV32M-NEXT: srl t1, t1, a3
-; RV32M-NEXT: srl t4, t4, a3
-; RV32M-NEXT: srl a3, t0, a3
-; RV32M-NEXT: sll a2, a2, a0
-; RV32M-NEXT: or s4, a2, t1
-; RV32M-NEXT: sll a2, a7, a0
-; RV32M-NEXT: sll a5, a5, a0
-; RV32M-NEXT: or s2, a2, t4
-; RV32M-NEXT: or s3, a5, a3
-; RV32M-NEXT: sll t4, a4, a0
-; RV32M-NEXT: beqz a1, .LBB1_31
-; RV32M-NEXT: # %bb.24: # %udiv-preheader4
-; RV32M-NEXT: mv a0, s6
-; RV32M-NEXT: mv a2, s7
-; RV32M-NEXT: li s0, 0
-; RV32M-NEXT: li s5, 0
-; RV32M-NEXT: li s6, 0
-; RV32M-NEXT: li s7, 0
-; RV32M-NEXT: sw zero, 152(sp)
-; RV32M-NEXT: sw zero, 156(sp)
-; RV32M-NEXT: sw zero, 160(sp)
-; RV32M-NEXT: sw zero, 164(sp)
-; RV32M-NEXT: sw s9, 136(sp)
-; RV32M-NEXT: sw s8, 140(sp)
-; RV32M-NEXT: sw a0, 144(sp)
-; RV32M-NEXT: sw a2, 148(sp)
-; RV32M-NEXT: srli a0, t2, 3
-; RV32M-NEXT: addi a1, sp, 136
-; RV32M-NEXT: andi a0, a0, 12
-; RV32M-NEXT: add a0, a1, a0
-; RV32M-NEXT: lw a1, 4(a0)
-; RV32M-NEXT: lw a2, 8(a0)
-; RV32M-NEXT: lw a3, 12(a0)
-; RV32M-NEXT: lw a4, 0(a0)
-; RV32M-NEXT: andi a0, t2, 31
-; RV32M-NEXT: xori a0, a0, 31
-; RV32M-NEXT: slli a5, a3, 1
-; RV32M-NEXT: slli a7, a2, 1
-; RV32M-NEXT: slli t0, a1, 1
-; RV32M-NEXT: sll a5, a5, a0
-; RV32M-NEXT: sll a7, a7, a0
-; RV32M-NEXT: sll t0, t0, a0
-; RV32M-NEXT: seqz t1, a6
-; RV32M-NEXT: srl a0, a2, t2
-; RV32M-NEXT: or a0, a0, a5
-; RV32M-NEXT: or a2, a6, s11
-; RV32M-NEXT: sub s8, s11, t1
-; RV32M-NEXT: seqz a2, a2
-; RV32M-NEXT: srl a1, a1, t2
-; RV32M-NEXT: or s11, a1, a7
-; RV32M-NEXT: sub s9, ra, a2
-; RV32M-NEXT: sltu a1, ra, a2
-; RV32M-NEXT: sub s10, s10, a1
-; RV32M-NEXT: srl a1, a4, t2
-; RV32M-NEXT: srl t6, a3, t2
-; RV32M-NEXT: or t0, a1, t0
-; RV32M-NEXT: j .LBB1_26
-; RV32M-NEXT: .LBB1_25: # %udiv-do-while3
-; RV32M-NEXT: # in Loop: Header=BB1_26 Depth=1
-; RV32M-NEXT: srli a4, s2, 31
-; RV32M-NEXT: slli s4, s4, 1
-; RV32M-NEXT: sub a1, a7, a1
-; RV32M-NEXT: srli a7, s3, 31
-; RV32M-NEXT: slli s2, s2, 1
-; RV32M-NEXT: or a4, s4, a4
-; RV32M-NEXT: srli s4, t4, 31
-; RV32M-NEXT: slli s3, s3, 1
-; RV32M-NEXT: slli t4, t4, 1
-; RV32M-NEXT: or a7, s2, a7
-; RV32M-NEXT: lw a3, 68(sp) # 4-byte Folded Reload
-; RV32M-NEXT: and s2, s11, a3
-; RV32M-NEXT: or s3, s3, s4
-; RV32M-NEXT: lw a3, 64(sp) # 4-byte Folded Reload
-; RV32M-NEXT: and s4, s11, a3
-; RV32M-NEXT: or t4, s0, t4
-; RV32M-NEXT: sub a3, t1, s4
-; RV32M-NEXT: sltu t1, t1, s4
-; RV32M-NEXT: or s4, t2, t3
-; RV32M-NEXT: sub a5, a0, s2
-; RV32M-NEXT: seqz a0, t2
-; RV32M-NEXT: addi t2, t2, -1
-; RV32M-NEXT: andi s0, s11, 1
-; RV32M-NEXT: sub s11, a1, a2
-; RV32M-NEXT: seqz a1, s4
-; RV32M-NEXT: sub t3, t3, a0
-; RV32M-NEXT: or s3, s5, s3
-; RV32M-NEXT: or s2, s6, a7
-; RV32M-NEXT: or s4, s7, a4
-; RV32M-NEXT: sub a0, a3, t6
-; RV32M-NEXT: sltu a2, a3, t6
-; RV32M-NEXT: sub a3, a5, t1
-; RV32M-NEXT: sltu a4, s1, a1
-; RV32M-NEXT: sub s1, s1, a1
-; RV32M-NEXT: sub t6, a3, a2
-; RV32M-NEXT: sub t5, t5, a4
-; RV32M-NEXT: or a1, t3, t5
-; RV32M-NEXT: or a2, t2, s1
-; RV32M-NEXT: or a1, a2, a1
-; RV32M-NEXT: sub t0, ra, t0
-; RV32M-NEXT: li s5, 0
-; RV32M-NEXT: li s6, 0
-; RV32M-NEXT: li s7, 0
-; RV32M-NEXT: beqz a1, .LBB1_32
-; RV32M-NEXT: .LBB1_26: # %udiv-do-while3
-; RV32M-NEXT: # =>This Inner Loop Header: Depth=1
-; RV32M-NEXT: srli a1, t0, 31
-; RV32M-NEXT: slli a2, s11, 1
-; RV32M-NEXT: slli t0, t0, 1
-; RV32M-NEXT: or a7, a2, a1
-; RV32M-NEXT: srli a1, s4, 31
-; RV32M-NEXT: or ra, t0, a1
-; RV32M-NEXT: beq s8, a7, .LBB1_28
-; RV32M-NEXT: # %bb.27: # %udiv-do-while3
-; RV32M-NEXT: # in Loop: Header=BB1_26 Depth=1
-; RV32M-NEXT: sltu a1, s8, a7
-; RV32M-NEXT: j .LBB1_29
-; RV32M-NEXT: .LBB1_28: # in Loop: Header=BB1_26 Depth=1
-; RV32M-NEXT: addi a1, a6, -1
-; RV32M-NEXT: sltu a1, a1, ra
-; RV32M-NEXT: .LBB1_29: # %udiv-do-while3
-; RV32M-NEXT: # in Loop: Header=BB1_26 Depth=1
-; RV32M-NEXT: srli a2, a0, 31
-; RV32M-NEXT: slli t6, t6, 1
-; RV32M-NEXT: srli t0, s11, 31
-; RV32M-NEXT: slli t1, a0, 1
-; RV32M-NEXT: or a0, t6, a2
-; RV32M-NEXT: or t1, t1, t0
-; RV32M-NEXT: sub a2, s9, t1
-; RV32M-NEXT: sltu t0, s9, t1
-; RV32M-NEXT: sub t6, s10, a0
-; RV32M-NEXT: sltu a1, a2, a1
-; RV32M-NEXT: sub a2, t6, t0
-; RV32M-NEXT: sub a2, a2, a1
-; RV32M-NEXT: srai s11, a2, 31
-; RV32M-NEXT: and t0, s11, a6
-; RV32M-NEXT: lw a1, 60(sp) # 4-byte Folded Reload
-; RV32M-NEXT: and a1, s11, a1
-; RV32M-NEXT: sltu a2, ra, t0
-; RV32M-NEXT: mv t6, a2
-; RV32M-NEXT: beq a7, a1, .LBB1_25
-; RV32M-NEXT: # %bb.30: # %udiv-do-while3
-; RV32M-NEXT: # in Loop: Header=BB1_26 Depth=1
-; RV32M-NEXT: sltu t6, a7, a1
-; RV32M-NEXT: j .LBB1_25
-; RV32M-NEXT: .LBB1_31:
-; RV32M-NEXT: li s0, 0
-; RV32M-NEXT: .LBB1_32: # %udiv-loop-exit2
-; RV32M-NEXT: srli a0, t4, 31
-; RV32M-NEXT: slli a1, s3, 1
-; RV32M-NEXT: srli a2, s3, 31
-; RV32M-NEXT: or a3, a1, a0
-; RV32M-NEXT: slli a0, s2, 1
-; RV32M-NEXT: srli a1, s2, 31
-; RV32M-NEXT: slli s4, s4, 1
-; RV32M-NEXT: slli t4, t4, 1
-; RV32M-NEXT: or a5, a0, a2
-; RV32M-NEXT: or a4, s4, a1
-; RV32M-NEXT: or t0, s0, t4
-; RV32M-NEXT: lw s10, 68(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw ra, 64(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw s11, 60(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw s8, 48(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw s9, 44(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw s6, 40(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw s7, 36(sp) # 4-byte Folded Reload
-; RV32M-NEXT: li s5, 127
-; RV32M-NEXT: .LBB1_33: # %udiv-end1
-; RV32M-NEXT: sw a3, 24(sp) # 4-byte Folded Spill
-; RV32M-NEXT: lui a0, 349525
-; RV32M-NEXT: lui a1, 209715
-; RV32M-NEXT: lui a2, 61681
-; RV32M-NEXT: lui a3, 4112
-; RV32M-NEXT: addi s3, a0, 1365
-; RV32M-NEXT: addi s2, a1, 819
-; RV32M-NEXT: addi s1, a2, -241
-; RV32M-NEXT: addi s0, a3, 257
-; RV32M-NEXT: bnez s11, .LBB1_36
-; RV32M-NEXT: # %bb.34: # %udiv-end1
-; RV32M-NEXT: srli a0, a6, 1
-; RV32M-NEXT: or a0, a6, a0
-; RV32M-NEXT: srli a1, a0, 2
-; RV32M-NEXT: or a0, a0, a1
-; RV32M-NEXT: srli a1, a0, 4
-; RV32M-NEXT: or a0, a0, a1
-; RV32M-NEXT: srli a1, a0, 8
-; RV32M-NEXT: or a0, a0, a1
-; RV32M-NEXT: srli a1, a0, 16
-; RV32M-NEXT: or a0, a0, a1
-; RV32M-NEXT: not a0, a0
-; RV32M-NEXT: srli a1, a0, 1
-; RV32M-NEXT: and a1, a1, s3
-; RV32M-NEXT: sub a0, a0, a1
-; RV32M-NEXT: and a1, a0, s2
-; RV32M-NEXT: srli a0, a0, 2
-; RV32M-NEXT: and a0, a0, s2
-; RV32M-NEXT: add a0, a1, a0
-; RV32M-NEXT: srli a1, a0, 4
-; RV32M-NEXT: add a0, a0, a1
-; RV32M-NEXT: and a0, a0, s1
-; RV32M-NEXT: mul a0, a0, s0
-; RV32M-NEXT: srli a0, a0, 24
-; RV32M-NEXT: addi t2, a0, 32
-; RV32M-NEXT: or a7, ra, s10
-; RV32M-NEXT: beqz s10, .LBB1_37
-; RV32M-NEXT: .LBB1_35:
-; RV32M-NEXT: srli a0, s10, 1
-; RV32M-NEXT: or a0, s10, a0
-; RV32M-NEXT: srli a1, a0, 2
-; RV32M-NEXT: or a0, a0, a1
-; RV32M-NEXT: srli a1, a0, 4
-; RV32M-NEXT: or a0, a0, a1
-; RV32M-NEXT: srli a1, a0, 8
-; RV32M-NEXT: or a0, a0, a1
-; RV32M-NEXT: srli a1, a0, 16
-; RV32M-NEXT: or a0, a0, a1
-; RV32M-NEXT: not a0, a0
-; RV32M-NEXT: srli a1, a0, 1
-; RV32M-NEXT: and a1, a1, s3
-; RV32M-NEXT: sub a0, a0, a1
-; RV32M-NEXT: and a1, a0, s2
-; RV32M-NEXT: srli a0, a0, 2
-; RV32M-NEXT: and a0, a0, s2
-; RV32M-NEXT: add a0, a1, a0
-; RV32M-NEXT: srli a1, a0, 4
-; RV32M-NEXT: add a0, a0, a1
-; RV32M-NEXT: and a0, a0, s1
-; RV32M-NEXT: mul a1, a0, s0
-; RV32M-NEXT: srli a1, a1, 24
-; RV32M-NEXT: addi a0, t2, 64
-; RV32M-NEXT: beqz a7, .LBB1_38
-; RV32M-NEXT: j .LBB1_39
-; RV32M-NEXT: .LBB1_36:
-; RV32M-NEXT: srli a0, s11, 1
-; RV32M-NEXT: or a0, s11, a0
-; RV32M-NEXT: srli a1, a0, 2
-; RV32M-NEXT: or a0, a0, a1
-; RV32M-NEXT: srli a1, a0, 4
-; RV32M-NEXT: or a0, a0, a1
-; RV32M-NEXT: srli a1, a0, 8
-; RV32M-NEXT: or a0, a0, a1
-; RV32M-NEXT: srli a1, a0, 16
-; RV32M-NEXT: or a0, a0, a1
-; RV32M-NEXT: not a0, a0
-; RV32M-NEXT: srli a1, a0, 1
-; RV32M-NEXT: and a1, a1, s3
-; RV32M-NEXT: sub a0, a0, a1
-; RV32M-NEXT: and a1, a0, s2
-; RV32M-NEXT: srli a0, a0, 2
-; RV32M-NEXT: and a0, a0, s2
-; RV32M-NEXT: add a0, a1, a0
-; RV32M-NEXT: srli a1, a0, 4
-; RV32M-NEXT: add a0, a0, a1
-; RV32M-NEXT: and a0, a0, s1
-; RV32M-NEXT: mul a0, a0, s0
-; RV32M-NEXT: srli t2, a0, 24
-; RV32M-NEXT: or a7, ra, s10
-; RV32M-NEXT: bnez s10, .LBB1_35
-; RV32M-NEXT: .LBB1_37: # %udiv-end1
-; RV32M-NEXT: srli a0, ra, 1
-; RV32M-NEXT: or a0, ra, a0
-; RV32M-NEXT: srli a1, a0, 2
-; RV32M-NEXT: or a0, a0, a1
-; RV32M-NEXT: srli a1, a0, 4
-; RV32M-NEXT: or a0, a0, a1
-; RV32M-NEXT: srli a1, a0, 8
-; RV32M-NEXT: or a0, a0, a1
-; RV32M-NEXT: srli a1, a0, 16
-; RV32M-NEXT: or a0, a0, a1
-; RV32M-NEXT: not a0, a0
-; RV32M-NEXT: srli a1, a0, 1
-; RV32M-NEXT: and a1, a1, s3
-; RV32M-NEXT: sub a0, a0, a1
-; RV32M-NEXT: and a1, a0, s2
-; RV32M-NEXT: srli a0, a0, 2
-; RV32M-NEXT: and a0, a0, s2
-; RV32M-NEXT: add a0, a1, a0
-; RV32M-NEXT: srli a1, a0, 4
-; RV32M-NEXT: add a0, a0, a1
-; RV32M-NEXT: and a0, a0, s1
-; RV32M-NEXT: mul a0, a0, s0
-; RV32M-NEXT: srli a0, a0, 24
-; RV32M-NEXT: addi a1, a0, 32
-; RV32M-NEXT: addi a0, t2, 64
-; RV32M-NEXT: bnez a7, .LBB1_39
-; RV32M-NEXT: .LBB1_38: # %udiv-end1
-; RV32M-NEXT: mv a1, a0
-; RV32M-NEXT: .LBB1_39: # %udiv-end1
-; RV32M-NEXT: snez a7, a7
-; RV32M-NEXT: sw t0, 12(sp) # 4-byte Folded Spill
-; RV32M-NEXT: bnez s8, .LBB1_41
-; RV32M-NEXT: # %bb.40: # %udiv-end1
-; RV32M-NEXT: srli a2, s9, 1
-; RV32M-NEXT: or a2, s9, a2
-; RV32M-NEXT: srli a3, a2, 2
-; RV32M-NEXT: or a2, a2, a3
-; RV32M-NEXT: srli a3, a2, 4
-; RV32M-NEXT: or a2, a2, a3
-; RV32M-NEXT: srli a3, a2, 8
-; RV32M-NEXT: or a2, a2, a3
-; RV32M-NEXT: srli a3, a2, 16
-; RV32M-NEXT: or a2, a2, a3
-; RV32M-NEXT: not a2, a2
-; RV32M-NEXT: srli a3, a2, 1
-; RV32M-NEXT: and a3, a3, s3
-; RV32M-NEXT: sub a2, a2, a3
-; RV32M-NEXT: and a3, a2, s2
-; RV32M-NEXT: srli a2, a2, 2
-; RV32M-NEXT: and a2, a2, s2
-; RV32M-NEXT: add a2, a3, a2
-; RV32M-NEXT: srli a3, a2, 4
-; RV32M-NEXT: add a2, a2, a3
-; RV32M-NEXT: and a2, a2, s1
-; RV32M-NEXT: mul a2, a2, s0
-; RV32M-NEXT: srli a2, a2, 24
-; RV32M-NEXT: addi a2, a2, 32
-; RV32M-NEXT: j .LBB1_42
-; RV32M-NEXT: .LBB1_41:
-; RV32M-NEXT: srli a2, s8, 1
-; RV32M-NEXT: or a2, s8, a2
-; RV32M-NEXT: srli a3, a2, 2
-; RV32M-NEXT: or a2, a2, a3
-; RV32M-NEXT: srli a3, a2, 4
-; RV32M-NEXT: or a2, a2, a3
-; RV32M-NEXT: srli a3, a2, 8
-; RV32M-NEXT: or a2, a2, a3
-; RV32M-NEXT: srli a3, a2, 16
-; RV32M-NEXT: or a2, a2, a3
-; RV32M-NEXT: not a2, a2
-; RV32M-NEXT: srli a3, a2, 1
-; RV32M-NEXT: and a3, a3, s3
-; RV32M-NEXT: sub a2, a2, a3
-; RV32M-NEXT: and a3, a2, s2
-; RV32M-NEXT: srli a2, a2, 2
-; RV32M-NEXT: and a2, a2, s2
-; RV32M-NEXT: add a2, a3, a2
-; RV32M-NEXT: srli a3, a2, 4
-; RV32M-NEXT: add a2, a2, a3
-; RV32M-NEXT: and a2, a2, s1
-; RV32M-NEXT: mul a2, a2, s0
-; RV32M-NEXT: srli a2, a2, 24
-; RV32M-NEXT: .LBB1_42: # %udiv-end1
-; RV32M-NEXT: sw a4, 20(sp) # 4-byte Folded Spill
-; RV32M-NEXT: or t0, a6, ra
-; RV32M-NEXT: or t3, s11, s10
-; RV32M-NEXT: or t4, s9, s6
-; RV32M-NEXT: or t5, s8, s7
-; RV32M-NEXT: sltu t2, a0, t2
-; RV32M-NEXT: addi a7, a7, -1
-; RV32M-NEXT: addi a0, a2, 64
-; RV32M-NEXT: or t6, s6, s7
-; RV32M-NEXT: sltu s4, a0, a2
-; RV32M-NEXT: snez a2, t6
-; RV32M-NEXT: addi a2, a2, -1
-; RV32M-NEXT: bnez s7, .LBB1_44
-; RV32M-NEXT: # %bb.43: # %udiv-end1
-; RV32M-NEXT: srli a3, s6, 1
-; RV32M-NEXT: or a3, s6, a3
-; RV32M-NEXT: srli a4, a3, 2
-; RV32M-NEXT: or a3, a3, a4
-; RV32M-NEXT: srli a4, a3, 4
-; RV32M-NEXT: or a3, a3, a4
-; RV32M-NEXT: srli a4, a3, 8
-; RV32M-NEXT: or a3, a3, a4
-; RV32M-NEXT: srli a4, a3, 16
-; RV32M-NEXT: or a3, a3, a4
-; RV32M-NEXT: not a3, a3
-; RV32M-NEXT: srli a4, a3, 1
-; RV32M-NEXT: and a4, a4, s3
-; RV32M-NEXT: sub a3, a3, a4
-; RV32M-NEXT: and a4, a3, s2
-; RV32M-NEXT: srli a3, a3, 2
-; RV32M-NEXT: and a3, a3, s2
-; RV32M-NEXT: add a3, a4, a3
-; RV32M-NEXT: srli a4, a3, 4
-; RV32M-NEXT: add a3, a3, a4
-; RV32M-NEXT: and a3, a3, s1
-; RV32M-NEXT: mul a3, a3, s0
-; RV32M-NEXT: srli a3, a3, 24
-; RV32M-NEXT: addi t1, a3, 32
-; RV32M-NEXT: j .LBB1_45
-; RV32M-NEXT: .LBB1_44:
-; RV32M-NEXT: srli a3, s7, 1
-; RV32M-NEXT: or a3, s7, a3
-; RV32M-NEXT: srli a4, a3, 2
-; RV32M-NEXT: or a3, a3, a4
-; RV32M-NEXT: srli a4, a3, 4
-; RV32M-NEXT: or a3, a3, a4
-; RV32M-NEXT: srli a4, a3, 8
-; RV32M-NEXT: or a3, a3, a4
-; RV32M-NEXT: srli a4, a3, 16
-; RV32M-NEXT: or a3, a3, a4
-; RV32M-NEXT: not a3, a3
-; RV32M-NEXT: srli a4, a3, 1
-; RV32M-NEXT: and a4, a4, s3
-; RV32M-NEXT: sub a3, a3, a4
-; RV32M-NEXT: and a4, a3, s2
-; RV32M-NEXT: srli a3, a3, 2
-; RV32M-NEXT: and a3, a3, s2
-; RV32M-NEXT: add a3, a4, a3
-; RV32M-NEXT: srli a4, a3, 4
-; RV32M-NEXT: add a3, a3, a4
-; RV32M-NEXT: and a3, a3, s1
-; RV32M-NEXT: mul a3, a3, s0
-; RV32M-NEXT: srli t1, a3, 24
-; RV32M-NEXT: .LBB1_45: # %udiv-end1
-; RV32M-NEXT: or t0, t0, t3
-; RV32M-NEXT: or t3, t4, t5
-; RV32M-NEXT: and a7, a7, t2
-; RV32M-NEXT: and a2, a2, s4
-; RV32M-NEXT: bnez t6, .LBB1_47
-; RV32M-NEXT: # %bb.46: # %udiv-end1
-; RV32M-NEXT: mv t1, a0
-; RV32M-NEXT: .LBB1_47: # %udiv-end1
-; RV32M-NEXT: seqz a0, t0
-; RV32M-NEXT: seqz t0, t3
-; RV32M-NEXT: sltu t3, a1, t1
-; RV32M-NEXT: sub t4, a7, a2
-; RV32M-NEXT: mv t2, t3
-; RV32M-NEXT: beq a7, a2, .LBB1_49
-; RV32M-NEXT: # %bb.48: # %udiv-end1
-; RV32M-NEXT: sltu t2, a7, a2
-; RV32M-NEXT: .LBB1_49: # %udiv-end1
-; RV32M-NEXT: sub t6, t4, t3
-; RV32M-NEXT: or a2, a0, t0
-; RV32M-NEXT: neg a0, t2
-; RV32M-NEXT: seqz s2, t2
-; RV32M-NEXT: addi s2, s2, -1
-; RV32M-NEXT: or a7, a0, s2
-; RV32M-NEXT: sub t1, a1, t1
-; RV32M-NEXT: beqz a7, .LBB1_51
-; RV32M-NEXT: # %bb.50: # %udiv-end1
-; RV32M-NEXT: snez a1, a7
-; RV32M-NEXT: j .LBB1_52
-; RV32M-NEXT: .LBB1_51:
-; RV32M-NEXT: snez a1, t6
-; RV32M-NEXT: sltiu a3, t1, 128
-; RV32M-NEXT: xori a3, a3, 1
-; RV32M-NEXT: or a1, a3, a1
-; RV32M-NEXT: .LBB1_52: # %udiv-end1
-; RV32M-NEXT: or a3, a2, a1
-; RV32M-NEXT: addi a4, a3, -1
-; RV32M-NEXT: and a7, a4, s7
-; RV32M-NEXT: and a2, a4, s6
-; RV32M-NEXT: and a1, a4, s8
-; RV32M-NEXT: and t0, a4, s9
-; RV32M-NEXT: sw a5, 16(sp) # 4-byte Folded Spill
-; RV32M-NEXT: bnez a3, .LBB1_64
-; RV32M-NEXT: # %bb.53: # %udiv-end1
-; RV32M-NEXT: xori a3, t1, 127
-; RV32M-NEXT: or a3, a3, a0
-; RV32M-NEXT: or a4, t6, s2
-; RV32M-NEXT: or a3, a3, a4
-; RV32M-NEXT: beqz a3, .LBB1_64
-; RV32M-NEXT: # %bb.54: # %udiv-bb1
-; RV32M-NEXT: sw a6, 56(sp) # 4-byte Folded Spill
-; RV32M-NEXT: addi s0, t1, 1
-; RV32M-NEXT: sub a1, s5, t1
-; RV32M-NEXT: sw zero, 104(sp)
-; RV32M-NEXT: sw zero, 108(sp)
-; RV32M-NEXT: sw zero, 112(sp)
-; RV32M-NEXT: sw zero, 116(sp)
-; RV32M-NEXT: sw s9, 120(sp)
-; RV32M-NEXT: sw s8, 124(sp)
-; RV32M-NEXT: sw s6, 128(sp)
-; RV32M-NEXT: sw s7, 132(sp)
-; RV32M-NEXT: addi a2, sp, 120
-; RV32M-NEXT: seqz a3, s0
-; RV32M-NEXT: srli a4, a1, 3
-; RV32M-NEXT: andi a5, a1, 31
-; RV32M-NEXT: add t6, t6, a3
-; RV32M-NEXT: andi a4, a4, 12
-; RV32M-NEXT: xori a3, a5, 31
-; RV32M-NEXT: or a5, s0, t6
-; RV32M-NEXT: sub a2, a2, a4
-; RV32M-NEXT: seqz s4, a5
-; RV32M-NEXT: lw a4, 0(a2)
-; RV32M-NEXT: lw a5, 4(a2)
-; RV32M-NEXT: lw a6, 8(a2)
-; RV32M-NEXT: lw a2, 12(a2)
-; RV32M-NEXT: add s4, a0, s4
-; RV32M-NEXT: sltu a0, s4, a0
-; RV32M-NEXT: or a7, s0, s4
-; RV32M-NEXT: add s2, s2, a0
-; RV32M-NEXT: or a0, t6, s2
-; RV32M-NEXT: srli t0, a6, 1
-; RV32M-NEXT: srli t1, a5, 1
-; RV32M-NEXT: or a0, a7, a0
-; RV32M-NEXT: srli a7, a4, 1
-; RV32M-NEXT: srl t0, t0, a3
-; RV32M-NEXT: srl t1, t1, a3
-; RV32M-NEXT: srl a3, a7, a3
-; RV32M-NEXT: sll a2, a2, a1
-; RV32M-NEXT: mv a7, s7
-; RV32M-NEXT: or s7, a2, t0
-; RV32M-NEXT: sll a2, a6, a1
-; RV32M-NEXT: sll a5, a5, a1
-; RV32M-NEXT: or s5, a2, t1
-; RV32M-NEXT: mv a6, s6
-; RV32M-NEXT: or s6, a5, a3
-; RV32M-NEXT: sll s1, a4, a1
-; RV32M-NEXT: beqz a0, .LBB1_62
-; RV32M-NEXT: # %bb.55: # %udiv-preheader
-; RV32M-NEXT: mv a1, s8
-; RV32M-NEXT: mv a2, s9
-; RV32M-NEXT: li s3, 0
-; RV32M-NEXT: li s8, 0
-; RV32M-NEXT: li s9, 0
-; RV32M-NEXT: li s10, 0
-; RV32M-NEXT: sw zero, 88(sp)
-; RV32M-NEXT: sw zero, 92(sp)
-; RV32M-NEXT: sw zero, 96(sp)
-; RV32M-NEXT: sw zero, 100(sp)
-; RV32M-NEXT: sw a2, 72(sp)
-; RV32M-NEXT: sw a1, 76(sp)
-; RV32M-NEXT: sw a6, 80(sp)
-; RV32M-NEXT: sw a7, 84(sp)
-; RV32M-NEXT: srli a0, s0, 3
-; RV32M-NEXT: addi a1, sp, 72
-; RV32M-NEXT: andi a0, a0, 12
-; RV32M-NEXT: add a0, a1, a0
-; RV32M-NEXT: lw a1, 4(a0)
-; RV32M-NEXT: lw a2, 8(a0)
-; RV32M-NEXT: lw a3, 12(a0)
-; RV32M-NEXT: lw a4, 0(a0)
-; RV32M-NEXT: andi a0, s0, 31
-; RV32M-NEXT: xori a0, a0, 31
-; RV32M-NEXT: slli a5, a3, 1
-; RV32M-NEXT: slli a6, a2, 1
-; RV32M-NEXT: slli a7, a1, 1
-; RV32M-NEXT: sll a5, a5, a0
-; RV32M-NEXT: sll a6, a6, a0
-; RV32M-NEXT: sll a7, a7, a0
-; RV32M-NEXT: lw t1, 56(sp) # 4-byte Folded Reload
-; RV32M-NEXT: seqz t0, t1
-; RV32M-NEXT: srl a0, a2, s0
-; RV32M-NEXT: or a0, a0, a5
-; RV32M-NEXT: or a2, t1, s11
-; RV32M-NEXT: sub s11, s11, t0
-; RV32M-NEXT: seqz a2, a2
-; RV32M-NEXT: srl a1, a1, s0
-; RV32M-NEXT: or t3, a1, a6
-; RV32M-NEXT: mv a6, t1
-; RV32M-NEXT: mv a1, ra
-; RV32M-NEXT: sub ra, ra, a2
-; RV32M-NEXT: sltu a1, a1, a2
-; RV32M-NEXT: lw a2, 68(sp) # 4-byte Folded Reload
-; RV32M-NEXT: sub t2, a2, a1
-; RV32M-NEXT: srl a1, a4, s0
-; RV32M-NEXT: srl t5, a3, s0
-; RV32M-NEXT: or t0, a1, a7
-; RV32M-NEXT: addi a1, t1, -1
-; RV32M-NEXT: sw a1, 52(sp) # 4-byte Folded Spill
-; RV32M-NEXT: j .LBB1_57
-; RV32M-NEXT: .LBB1_56: # %udiv-do-while
-; RV32M-NEXT: # in Loop: Header=BB1_57 Depth=1
-; RV32M-NEXT: srli a3, s5, 31
-; RV32M-NEXT: slli s7, s7, 1
-; RV32M-NEXT: sub a1, a7, a1
-; RV32M-NEXT: srli a4, s6, 31
-; RV32M-NEXT: slli s5, s5, 1
-; RV32M-NEXT: or a3, s7, a3
-; RV32M-NEXT: srli a5, s1, 31
-; RV32M-NEXT: slli s6, s6, 1
-; RV32M-NEXT: slli s1, s1, 1
-; RV32M-NEXT: or a4, s5, a4
-; RV32M-NEXT: lw a6, 68(sp) # 4-byte Folded Reload
-; RV32M-NEXT: and a7, t3, a6
-; RV32M-NEXT: or a5, s6, a5
-; RV32M-NEXT: lw a6, 64(sp) # 4-byte Folded Reload
-; RV32M-NEXT: and s5, t3, a6
-; RV32M-NEXT: or s1, s3, s1
-; RV32M-NEXT: sub a6, t1, s5
-; RV32M-NEXT: sltu t1, t1, s5
-; RV32M-NEXT: or s5, s0, t6
-; RV32M-NEXT: sub a7, a0, a7
-; RV32M-NEXT: seqz a0, s0
-; RV32M-NEXT: addi s0, s0, -1
-; RV32M-NEXT: andi s3, t3, 1
-; RV32M-NEXT: sub t3, a1, a2
-; RV32M-NEXT: seqz a1, s5
-; RV32M-NEXT: sub t6, t6, a0
-; RV32M-NEXT: or s6, s8, a5
-; RV32M-NEXT: or s5, s9, a4
-; RV32M-NEXT: or s7, s10, a3
-; RV32M-NEXT: sub a0, a6, t5
-; RV32M-NEXT: sltu a2, a6, t5
-; RV32M-NEXT: sub a3, a7, t1
-; RV32M-NEXT: sltu a4, s4, a1
-; RV32M-NEXT: sub s4, s4, a1
-; RV32M-NEXT: sub t5, a3, a2
-; RV32M-NEXT: sub s2, s2, a4
-; RV32M-NEXT: or a1, t6, s2
-; RV32M-NEXT: or a2, s0, s4
-; RV32M-NEXT: or a1, a2, a1
-; RV32M-NEXT: sub t0, t4, t0
-; RV32M-NEXT: li s8, 0
-; RV32M-NEXT: li s9, 0
-; RV32M-NEXT: li s10, 0
-; RV32M-NEXT: lw a6, 56(sp) # 4-byte Folded Reload
-; RV32M-NEXT: beqz a1, .LBB1_63
-; RV32M-NEXT: .LBB1_57: # %udiv-do-while
-; RV32M-NEXT: # =>This Inner Loop Header: Depth=1
-; RV32M-NEXT: srli a1, t0, 31
-; RV32M-NEXT: slli a2, t3, 1
-; RV32M-NEXT: slli t0, t0, 1
-; RV32M-NEXT: or a7, a2, a1
-; RV32M-NEXT: srli a1, s7, 31
-; RV32M-NEXT: or t4, t0, a1
-; RV32M-NEXT: beq s11, a7, .LBB1_59
-; RV32M-NEXT: # %bb.58: # %udiv-do-while
-; RV32M-NEXT: # in Loop: Header=BB1_57 Depth=1
-; RV32M-NEXT: sltu a1, s11, a7
-; RV32M-NEXT: j .LBB1_60
-; RV32M-NEXT: .LBB1_59: # in Loop: Header=BB1_57 Depth=1
-; RV32M-NEXT: lw a1, 52(sp) # 4-byte Folded Reload
-; RV32M-NEXT: sltu a1, a1, t4
-; RV32M-NEXT: .LBB1_60: # %udiv-do-while
-; RV32M-NEXT: # in Loop: Header=BB1_57 Depth=1
-; RV32M-NEXT: srli a2, a0, 31
-; RV32M-NEXT: slli t5, t5, 1
-; RV32M-NEXT: srli a3, t3, 31
-; RV32M-NEXT: slli a4, a0, 1
-; RV32M-NEXT: or a0, t5, a2
-; RV32M-NEXT: or t1, a4, a3
-; RV32M-NEXT: sub a2, ra, t1
-; RV32M-NEXT: sltu a3, ra, t1
-; RV32M-NEXT: sub a4, t2, a0
-; RV32M-NEXT: sltu a1, a2, a1
-; RV32M-NEXT: sub a4, a4, a3
-; RV32M-NEXT: sub a4, a4, a1
-; RV32M-NEXT: srai t3, a4, 31
-; RV32M-NEXT: and t0, t3, a6
-; RV32M-NEXT: lw a1, 60(sp) # 4-byte Folded Reload
-; RV32M-NEXT: and a1, t3, a1
-; RV32M-NEXT: sltu a2, t4, t0
-; RV32M-NEXT: mv t5, a2
-; RV32M-NEXT: beq a7, a1, .LBB1_56
-; RV32M-NEXT: # %bb.61: # %udiv-do-while
-; RV32M-NEXT: # in Loop: Header=BB1_57 Depth=1
-; RV32M-NEXT: sltu t5, a7, a1
-; RV32M-NEXT: j .LBB1_56
-; RV32M-NEXT: .LBB1_62:
-; RV32M-NEXT: li s3, 0
-; RV32M-NEXT: lw a6, 56(sp) # 4-byte Folded Reload
-; RV32M-NEXT: .LBB1_63: # %udiv-loop-exit
-; RV32M-NEXT: srli a0, s1, 31
-; RV32M-NEXT: slli a1, s6, 1
-; RV32M-NEXT: srli a2, s6, 31
-; RV32M-NEXT: or a1, a1, a0
-; RV32M-NEXT: slli a0, s5, 1
-; RV32M-NEXT: srli a3, s5, 31
-; RV32M-NEXT: slli s7, s7, 1
-; RV32M-NEXT: slli s1, s1, 1
-; RV32M-NEXT: or a2, a0, a2
-; RV32M-NEXT: or a7, s7, a3
-; RV32M-NEXT: or t0, s3, s1
-; RV32M-NEXT: lw ra, 64(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw s11, 60(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw s8, 48(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw s9, 44(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw s6, 40(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw s7, 36(sp) # 4-byte Folded Reload
-; RV32M-NEXT: .LBB1_64: # %udiv-end
-; RV32M-NEXT: mulhu a3, a6, t0
-; RV32M-NEXT: mul a4, s11, t0
-; RV32M-NEXT: mulhu a5, s11, t0
-; RV32M-NEXT: mv a0, a6
-; RV32M-NEXT: mul a6, a6, a1
-; RV32M-NEXT: mulhu t1, a0, a1
-; RV32M-NEXT: mul t2, s11, a1
-; RV32M-NEXT: mul t3, t0, ra
-; RV32M-NEXT: mul t4, a2, a0
-; RV32M-NEXT: mulhu t5, s11, a1
-; RV32M-NEXT: mul t6, a2, s11
-; RV32M-NEXT: mulhu a2, a2, a0
-; RV32M-NEXT: mul a7, a7, a0
-; RV32M-NEXT: mul s0, a1, ra
-; RV32M-NEXT: mulhu a1, t0, ra
-; RV32M-NEXT: lw s1, 68(sp) # 4-byte Folded Reload
-; RV32M-NEXT: mul s1, t0, s1
-; RV32M-NEXT: mul a0, a0, t0
-; RV32M-NEXT: add a3, a4, a3
-; RV32M-NEXT: add t3, t4, t3
-; RV32M-NEXT: add a2, a2, t6
-; RV32M-NEXT: add s1, a1, s1
-; RV32M-NEXT: sltu a4, a3, a4
-; RV32M-NEXT: add a1, a6, a3
-; RV32M-NEXT: add a2, a2, a7
-; RV32M-NEXT: add s0, s1, s0
-; RV32M-NEXT: sltu a3, t3, t4
-; RV32M-NEXT: add a4, a5, a4
-; RV32M-NEXT: sltu a5, a1, a6
-; RV32M-NEXT: add a2, a2, s0
-; RV32M-NEXT: add a5, t1, a5
-; RV32M-NEXT: add a2, a2, a3
-; RV32M-NEXT: add a5, a4, a5
-; RV32M-NEXT: add a3, t2, a5
-; RV32M-NEXT: sltu a5, a5, a4
-; RV32M-NEXT: add a4, a3, t3
-; RV32M-NEXT: sltu a6, a3, t2
-; RV32M-NEXT: add a5, t5, a5
-; RV32M-NEXT: sltu a3, a4, a3
-; RV32M-NEXT: add a5, a5, a6
-; RV32M-NEXT: sltu a6, s6, a4
-; RV32M-NEXT: add a2, a5, a2
-; RV32M-NEXT: sub a3, s7, a3
-; RV32M-NEXT: sub a2, a3, a2
-; RV32M-NEXT: sltu a3, s9, a0
-; RV32M-NEXT: sub a2, a2, a6
-; RV32M-NEXT: mv a5, a3
-; RV32M-NEXT: beq s8, a1, .LBB1_66
-; RV32M-NEXT: # %bb.65: # %udiv-end
-; RV32M-NEXT: sltu a5, s8, a1
-; RV32M-NEXT: .LBB1_66: # %udiv-end
-; RV32M-NEXT: sub a4, s6, a4
-; RV32M-NEXT: sub a1, s8, a1
-; RV32M-NEXT: sub a0, s9, a0
-; RV32M-NEXT: lw a6, 28(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw a7, 12(sp) # 4-byte Folded Reload
-; RV32M-NEXT: sw a7, 0(a6)
-; RV32M-NEXT: lw a7, 24(sp) # 4-byte Folded Reload
-; RV32M-NEXT: sw a7, 4(a6)
-; RV32M-NEXT: lw a7, 16(sp) # 4-byte Folded Reload
-; RV32M-NEXT: sw a7, 8(a6)
-; RV32M-NEXT: lw a7, 20(sp) # 4-byte Folded Reload
-; RV32M-NEXT: sw a7, 12(a6)
-; RV32M-NEXT: sltu a6, a4, a5
-; RV32M-NEXT: sub a4, a4, a5
-; RV32M-NEXT: sub a1, a1, a3
-; RV32M-NEXT: sub a2, a2, a6
-; RV32M-NEXT: lw a3, 32(sp) # 4-byte Folded Reload
-; RV32M-NEXT: sw a0, 0(a3)
-; RV32M-NEXT: sw a1, 4(a3)
-; RV32M-NEXT: sw a4, 8(a3)
-; RV32M-NEXT: sw a2, 12(a3)
-; RV32M-NEXT: lw ra, 252(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw s0, 248(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw s1, 244(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw s2, 240(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw s3, 236(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw s4, 232(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw s5, 228(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw s6, 224(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw s7, 220(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw s8, 216(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw s9, 212(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw s10, 208(sp) # 4-byte Folded Reload
-; RV32M-NEXT: lw s11, 204(sp) # 4-byte Folded Reload
-; RV32M-NEXT: .cfi_restore ra
-; RV32M-NEXT: .cfi_restore s0
-; RV32M-NEXT: .cfi_restore s1
-; RV32M-NEXT: .cfi_restore s2
-; RV32M-NEXT: .cfi_restore s3
-; RV32M-NEXT: .cfi_restore s4
-; RV32M-NEXT: .cfi_restore s5
-; RV32M-NEXT: .cfi_restore s6
-; RV32M-NEXT: .cfi_restore s7
-; RV32M-NEXT: .cfi_restore s8
-; RV32M-NEXT: .cfi_restore s9
-; RV32M-NEXT: .cfi_restore s10
-; RV32M-NEXT: .cfi_restore s11
-; RV32M-NEXT: addi sp, sp, 256
-; RV32M-NEXT: .cfi_def_cfa_offset 0
-; RV32M-NEXT: ret
+; RV32M-NOT: __udivmodti4
+; RV32M-NOT: __udivti3
+; RV32M-NOT: __umodti3
+; RV32M: ret
%q = udiv i128 %n, %d
%r = urem i128 %n, %d
store i128 %q, ptr %q_out
diff --git a/llvm/test/CodeGen/X86/i128-divrem-libcall.ll b/llvm/test/CodeGen/X86/i128-divrem-libcall.ll
index 779eac61995e2..c2127a13c4c07 100644
--- a/llvm/test/CodeGen/X86/i128-divrem-libcall.ll
+++ b/llvm/test/CodeGen/X86/i128-divrem-libcall.ll
@@ -1,1763 +1,76 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefix=LINUX-X64
; RUN: llc < %s -mtriple=x86_64-linux-gnux32 | FileCheck %s --check-prefix=LINUX-X32
; RUN: llc < %s -mtriple=x86_64-apple-macosx | FileCheck %s --check-prefix=DARWIN-X64
; RUN: llc < %s -mtriple=x86_64-w64-mingw32 | FileCheck %s --check-prefix=MINGW-X64
; RUN: llc < %s -mtriple=x86_64-pc-windows-msvc | FileCheck %s --check-prefix=WIN64
-; RUN: llc < %s -mtriple=i386-linux-gnu | FileCheck %s --check-prefix=LINUX-X86
-; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefix=LINUX-X86
-; RUN: llc < %s -mtriple=i686-pc-windows-msvc | FileCheck %s --check-prefix=WIN32
+; RUN: llc < %s -mtriple=i386-linux-gnu | FileCheck %s --check-prefix=LINUX-X86
+; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefix=LINUX-X86
+; RUN: llc < %s -mtriple=i686-pc-windows-msvc | FileCheck %s --check-prefix=WIN32
+
+; The 64-bit triples use fused libcalls with ABI-specific calling conventions.
+; The 32-bit triples inline-expand i128 div/rem, so only assert the absence of
+; libcalls there.
define void @sdivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
; LINUX-X64-LABEL: sdivrem_i128:
-; LINUX-X64: # %bb.0:
-; LINUX-X64-NEXT: pushq %r14
-; LINUX-X64-NEXT: .cfi_def_cfa_offset 16
-; LINUX-X64-NEXT: pushq %rbx
-; LINUX-X64-NEXT: .cfi_def_cfa_offset 24
-; LINUX-X64-NEXT: subq $24, %rsp
-; LINUX-X64-NEXT: .cfi_def_cfa_offset 48
-; LINUX-X64-NEXT: .cfi_offset %rbx, -24
-; LINUX-X64-NEXT: .cfi_offset %r14, -16
-; LINUX-X64-NEXT: movq %r8, %rax
-; LINUX-X64-NEXT: movq %rsi, %rbx
-; LINUX-X64-NEXT: movq %rdi, %r14
-; LINUX-X64-NEXT: movq %rsp, %r8
-; LINUX-X64-NEXT: movq %rdx, %rdi
-; LINUX-X64-NEXT: movq %rcx, %rsi
-; LINUX-X64-NEXT: movq %rax, %rdx
-; LINUX-X64-NEXT: movq %r9, %rcx
-; LINUX-X64-NEXT: callq __divmodti4 at PLT
-; LINUX-X64-NEXT: movq (%rsp), %rcx
-; LINUX-X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi
-; LINUX-X64-NEXT: movq %rax, (%r14)
-; LINUX-X64-NEXT: movq %rdx, 8(%r14)
-; LINUX-X64-NEXT: movq %rcx, (%rbx)
-; LINUX-X64-NEXT: movq %rsi, 8(%rbx)
-; LINUX-X64-NEXT: addq $24, %rsp
-; LINUX-X64-NEXT: .cfi_def_cfa_offset 24
-; LINUX-X64-NEXT: popq %rbx
-; LINUX-X64-NEXT: .cfi_def_cfa_offset 16
-; LINUX-X64-NEXT: popq %r14
-; LINUX-X64-NEXT: .cfi_def_cfa_offset 8
-; LINUX-X64-NEXT: retq
+; LINUX-X64: movq %rsp, %r8
+; LINUX-X64: callq __divmodti4 at PLT
+; LINUX-X64: movq (%rsp), %rcx
+; LINUX-X64: movq {{[0-9]+}}(%rsp), %rsi
+; LINUX-X64: movq %rax, (%r14)
+; LINUX-X64: movq %rdx, 8(%r14)
+; LINUX-X64: movq %rcx, (%rbx)
+; LINUX-X64: movq %rsi, 8(%rbx)
;
; LINUX-X32-LABEL: sdivrem_i128:
-; LINUX-X32: # %bb.0:
-; LINUX-X32-NEXT: pushq %r14
-; LINUX-X32-NEXT: .cfi_def_cfa_offset 16
-; LINUX-X32-NEXT: pushq %rbx
-; LINUX-X32-NEXT: .cfi_def_cfa_offset 24
-; LINUX-X32-NEXT: subl $24, %esp
-; LINUX-X32-NEXT: .cfi_def_cfa_offset 48
-; LINUX-X32-NEXT: .cfi_offset %rbx, -24
-; LINUX-X32-NEXT: .cfi_offset %r14, -16
-; LINUX-X32-NEXT: movq %r8, %rax
-; LINUX-X32-NEXT: movq %rsi, %rbx
-; LINUX-X32-NEXT: movq %rdi, %r14
-; LINUX-X32-NEXT: movl %esp, %r8d
-; LINUX-X32-NEXT: movq %rdx, %rdi
-; LINUX-X32-NEXT: movq %rcx, %rsi
-; LINUX-X32-NEXT: movq %rax, %rdx
-; LINUX-X32-NEXT: movq %r9, %rcx
-; LINUX-X32-NEXT: callq __divmodti4 at PLT
-; LINUX-X32-NEXT: movq (%esp), %rcx
-; LINUX-X32-NEXT: movq {{[0-9]+}}(%esp), %rsi
-; LINUX-X32-NEXT: movq %rax, (%r14d)
-; LINUX-X32-NEXT: movq %rdx, 8(%r14d)
-; LINUX-X32-NEXT: movq %rcx, (%ebx)
-; LINUX-X32-NEXT: movq %rsi, 8(%ebx)
-; LINUX-X32-NEXT: addl $24, %esp
-; LINUX-X32-NEXT: .cfi_def_cfa_offset 24
-; LINUX-X32-NEXT: popq %rbx
-; LINUX-X32-NEXT: .cfi_def_cfa_offset 16
-; LINUX-X32-NEXT: popq %r14
-; LINUX-X32-NEXT: .cfi_def_cfa_offset 8
-; LINUX-X32-NEXT: retq
+; LINUX-X32: movl %esp, %r8d
+; LINUX-X32: callq __divmodti4 at PLT
+; LINUX-X32: movq (%esp), %rcx
+; LINUX-X32: movq {{[0-9]+}}(%esp), %rsi
+; LINUX-X32: movq %rax, (%r14d)
+; LINUX-X32: movq %rdx, 8(%r14d)
+; LINUX-X32: movq %rcx, (%ebx)
+; LINUX-X32: movq %rsi, 8(%ebx)
;
; DARWIN-X64-LABEL: sdivrem_i128:
-; DARWIN-X64: ## %bb.0:
-; DARWIN-X64-NEXT: pushq %r14
-; DARWIN-X64-NEXT: .cfi_def_cfa_offset 16
-; DARWIN-X64-NEXT: pushq %rbx
-; DARWIN-X64-NEXT: .cfi_def_cfa_offset 24
-; DARWIN-X64-NEXT: subq $24, %rsp
-; DARWIN-X64-NEXT: .cfi_def_cfa_offset 48
-; DARWIN-X64-NEXT: .cfi_offset %rbx, -24
-; DARWIN-X64-NEXT: .cfi_offset %r14, -16
-; DARWIN-X64-NEXT: movq %r8, %rax
-; DARWIN-X64-NEXT: movq %rsi, %rbx
-; DARWIN-X64-NEXT: movq %rdi, %r14
-; DARWIN-X64-NEXT: movq %rsp, %r8
-; DARWIN-X64-NEXT: movq %rdx, %rdi
-; DARWIN-X64-NEXT: movq %rcx, %rsi
-; DARWIN-X64-NEXT: movq %rax, %rdx
-; DARWIN-X64-NEXT: movq %r9, %rcx
-; DARWIN-X64-NEXT: callq ___divmodti4
-; DARWIN-X64-NEXT: movq (%rsp), %rcx
-; DARWIN-X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi
-; DARWIN-X64-NEXT: movq %rax, (%r14)
-; DARWIN-X64-NEXT: movq %rdx, 8(%r14)
-; DARWIN-X64-NEXT: movq %rcx, (%rbx)
-; DARWIN-X64-NEXT: movq %rsi, 8(%rbx)
-; DARWIN-X64-NEXT: addq $24, %rsp
-; DARWIN-X64-NEXT: popq %rbx
-; DARWIN-X64-NEXT: popq %r14
-; DARWIN-X64-NEXT: retq
+; DARWIN-X64: movq %rsp, %r8
+; DARWIN-X64: callq ___divmodti4
+; DARWIN-X64: movq (%rsp), %rcx
+; DARWIN-X64: movq {{[0-9]+}}(%rsp), %rsi
+; DARWIN-X64: movq %rax, (%r14)
+; DARWIN-X64: movq %rdx, 8(%r14)
+; DARWIN-X64: movq %rcx, (%rbx)
+; DARWIN-X64: movq %rsi, 8(%rbx)
;
; MINGW-X64-LABEL: sdivrem_i128:
-; MINGW-X64: # %bb.0:
-; MINGW-X64-NEXT: pushq %rsi
-; MINGW-X64-NEXT: .seh_pushreg %rsi
-; MINGW-X64-NEXT: pushq %rdi
-; MINGW-X64-NEXT: .seh_pushreg %rdi
-; MINGW-X64-NEXT: subq $88, %rsp
-; MINGW-X64-NEXT: .seh_stackalloc 88
-; MINGW-X64-NEXT: .seh_endprologue
-; MINGW-X64-NEXT: movq %rdx, %rsi
-; MINGW-X64-NEXT: movq %rcx, %rdi
-; MINGW-X64-NEXT: movaps {{[0-9]+}}(%rsp), %xmm0
-; MINGW-X64-NEXT: movq %r9, {{[0-9]+}}(%rsp)
-; MINGW-X64-NEXT: movq %r8, {{[0-9]+}}(%rsp)
-; MINGW-X64-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; MINGW-X64-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
-; MINGW-X64-NEXT: leaq {{[0-9]+}}(%rsp), %rdx
-; MINGW-X64-NEXT: leaq {{[0-9]+}}(%rsp), %r8
-; MINGW-X64-NEXT: callq __divmodti4
-; MINGW-X64-NEXT: movaps {{[0-9]+}}(%rsp), %xmm1
-; MINGW-X64-NEXT: movaps %xmm0, (%rdi)
-; MINGW-X64-NEXT: movaps %xmm1, (%rsi)
-; MINGW-X64-NEXT: .seh_startepilogue
-; MINGW-X64-NEXT: addq $88, %rsp
-; MINGW-X64-NEXT: popq %rdi
-; MINGW-X64-NEXT: popq %rsi
-; MINGW-X64-NEXT: .seh_endepilogue
-; MINGW-X64-NEXT: retq
-; MINGW-X64-NEXT: .seh_endproc
+; MINGW-X64: leaq {{[0-9]+}}(%rsp), %rcx
+; MINGW-X64: leaq {{[0-9]+}}(%rsp), %rdx
+; MINGW-X64: leaq {{[0-9]+}}(%rsp), %r8
+; MINGW-X64: callq __divmodti4
+; MINGW-X64: movaps {{[0-9]+}}(%rsp), %xmm1
+; MINGW-X64: movaps %xmm0, (%rdi)
+; MINGW-X64: movaps %xmm1, (%rsi)
;
; WIN64-LABEL: sdivrem_i128:
-; WIN64: # %bb.0:
-; WIN64-NEXT: pushq %rsi
-; WIN64-NEXT: .seh_pushreg %rsi
-; WIN64-NEXT: pushq %rdi
-; WIN64-NEXT: .seh_pushreg %rdi
-; WIN64-NEXT: subq $88, %rsp
-; WIN64-NEXT: .seh_stackalloc 88
-; WIN64-NEXT: .seh_endprologue
-; WIN64-NEXT: movq %rdx, %rsi
-; WIN64-NEXT: movq %rcx, %rdi
-; WIN64-NEXT: movaps {{[0-9]+}}(%rsp), %xmm0
-; WIN64-NEXT: movq %r9, {{[0-9]+}}(%rsp)
-; WIN64-NEXT: movq %r8, {{[0-9]+}}(%rsp)
-; WIN64-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
-; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %rdx
-; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %r8
-; WIN64-NEXT: callq __divmodti4
-; WIN64-NEXT: movaps {{[0-9]+}}(%rsp), %xmm1
-; WIN64-NEXT: movaps %xmm0, (%rdi)
-; WIN64-NEXT: movaps %xmm1, (%rsi)
-; WIN64-NEXT: .seh_startepilogue
-; WIN64-NEXT: addq $88, %rsp
-; WIN64-NEXT: popq %rdi
-; WIN64-NEXT: popq %rsi
-; WIN64-NEXT: .seh_endepilogue
-; WIN64-NEXT: retq
-; WIN64-NEXT: .seh_endproc
+; WIN64: leaq {{[0-9]+}}(%rsp), %rcx
+; WIN64: leaq {{[0-9]+}}(%rsp), %rdx
+; WIN64: leaq {{[0-9]+}}(%rsp), %r8
+; WIN64: callq __divmodti4
+; WIN64: movaps {{[0-9]+}}(%rsp), %xmm1
+; WIN64: movaps %xmm0, (%rdi)
+; WIN64: movaps %xmm1, (%rsi)
;
; LINUX-X86-LABEL: sdivrem_i128:
-; LINUX-X86: # %bb.0: # %_udiv-special-cases_udiv-special-cases
-; LINUX-X86-NEXT: pushl %ebp
-; LINUX-X86-NEXT: .cfi_def_cfa_offset 8
-; LINUX-X86-NEXT: pushl %ebx
-; LINUX-X86-NEXT: .cfi_def_cfa_offset 12
-; LINUX-X86-NEXT: pushl %edi
-; LINUX-X86-NEXT: .cfi_def_cfa_offset 16
-; LINUX-X86-NEXT: pushl %esi
-; LINUX-X86-NEXT: .cfi_def_cfa_offset 20
-; LINUX-X86-NEXT: subl $284, %esp # imm = 0x11C
-; LINUX-X86-NEXT: .cfi_def_cfa_offset 304
-; LINUX-X86-NEXT: .cfi_offset %esi, -20
-; LINUX-X86-NEXT: .cfi_offset %edi, -16
-; LINUX-X86-NEXT: .cfi_offset %ebx, -12
-; LINUX-X86-NEXT: .cfi_offset %ebp, -8
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
-; LINUX-X86-NEXT: movl %ebp, %esi
-; LINUX-X86-NEXT: sarl $31, %esi
-; LINUX-X86-NEXT: xorl %esi, %ebp
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; LINUX-X86-NEXT: xorl %esi, %edx
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; LINUX-X86-NEXT: xorl %esi, %eax
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; LINUX-X86-NEXT: xorl %esi, %ecx
-; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: subl %esi, %ecx
-; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: sbbl %esi, %eax
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: sbbl %esi, %edx
-; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: sbbl %esi, %ebp
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; LINUX-X86-NEXT: movl %ebx, %edx
-; LINUX-X86-NEXT: sarl $31, %edx
-; LINUX-X86-NEXT: xorl %edx, %ebx
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; LINUX-X86-NEXT: xorl %edx, %edi
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; LINUX-X86-NEXT: xorl %edx, %eax
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; LINUX-X86-NEXT: xorl %edx, %ecx
-; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: subl %edx, %ecx
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: sbbl %edx, %eax
-; LINUX-X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: sbbl %edx, %edi
-; LINUX-X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: sbbl %edx, %ebx
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: orl %ebx, %eax
-; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: orl %edi, %ecx
-; LINUX-X86-NEXT: orl %eax, %ecx
-; LINUX-X86-NEXT: sete {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: orl %ebp, %ecx
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; LINUX-X86-NEXT: orl %eax, %edx
-; LINUX-X86-NEXT: orl %ecx, %edx
-; LINUX-X86-NEXT: sete %cl
-; LINUX-X86-NEXT: testl %ebx, %ebx
-; LINUX-X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: jne .LBB0_1
-; LINUX-X86-NEXT: # %bb.2: # %_udiv-special-cases_udiv-special-cases
-; LINUX-X86-NEXT: bsrl %edi, %ebp
-; LINUX-X86-NEXT: xorl $31, %ebp
-; LINUX-X86-NEXT: orl $32, %ebp
-; LINUX-X86-NEXT: jmp .LBB0_3
-; LINUX-X86-NEXT: .LBB0_1:
-; LINUX-X86-NEXT: bsrl %ebx, %ebp
-; LINUX-X86-NEXT: xorl $31, %ebp
-; LINUX-X86-NEXT: .LBB0_3: # %_udiv-special-cases_udiv-special-cases
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; LINUX-X86-NEXT: testl %edx, %edx
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; LINUX-X86-NEXT: jne .LBB0_4
-; LINUX-X86-NEXT: # %bb.5: # %_udiv-special-cases_udiv-special-cases
-; LINUX-X86-NEXT: bsrl %esi, %edx
-; LINUX-X86-NEXT: xorl $31, %edx
-; LINUX-X86-NEXT: orl $32, %edx
-; LINUX-X86-NEXT: jmp .LBB0_6
-; LINUX-X86-NEXT: .LBB0_4:
-; LINUX-X86-NEXT: bsrl %edx, %edx
-; LINUX-X86-NEXT: xorl $31, %edx
-; LINUX-X86-NEXT: .LBB0_6: # %_udiv-special-cases_udiv-special-cases
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; LINUX-X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; LINUX-X86-NEXT: jne .LBB0_8
-; LINUX-X86-NEXT: # %bb.7: # %_udiv-special-cases_udiv-special-cases
-; LINUX-X86-NEXT: orl $64, %edx
-; LINUX-X86-NEXT: movl %edx, %ebp
-; LINUX-X86-NEXT: .LBB0_8: # %_udiv-special-cases_udiv-special-cases
-; LINUX-X86-NEXT: testl %ebx, %ebx
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; LINUX-X86-NEXT: jne .LBB0_9
-; LINUX-X86-NEXT: # %bb.10: # %_udiv-special-cases_udiv-special-cases
-; LINUX-X86-NEXT: bsrl %eax, %edx
-; LINUX-X86-NEXT: xorl $31, %edx
-; LINUX-X86-NEXT: orl $32, %edx
-; LINUX-X86-NEXT: testl %esi, %esi
-; LINUX-X86-NEXT: je .LBB0_13
-; LINUX-X86-NEXT: .LBB0_12:
-; LINUX-X86-NEXT: bsrl %esi, %esi
-; LINUX-X86-NEXT: xorl $31, %esi
-; LINUX-X86-NEXT: jmp .LBB0_14
-; LINUX-X86-NEXT: .LBB0_9:
-; LINUX-X86-NEXT: bsrl %ebx, %edx
-; LINUX-X86-NEXT: xorl $31, %edx
-; LINUX-X86-NEXT: testl %esi, %esi
-; LINUX-X86-NEXT: jne .LBB0_12
-; LINUX-X86-NEXT: .LBB0_13: # %_udiv-special-cases_udiv-special-cases
-; LINUX-X86-NEXT: bsrl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; LINUX-X86-NEXT: xorl $31, %esi
-; LINUX-X86-NEXT: orl $32, %esi
-; LINUX-X86-NEXT: .LBB0_14: # %_udiv-special-cases_udiv-special-cases
-; LINUX-X86-NEXT: orb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
-; LINUX-X86-NEXT: orl %ebx, %eax
-; LINUX-X86-NEXT: jne .LBB0_16
-; LINUX-X86-NEXT: # %bb.15: # %_udiv-special-cases_udiv-special-cases
-; LINUX-X86-NEXT: orl $64, %esi
-; LINUX-X86-NEXT: movl %esi, %edx
-; LINUX-X86-NEXT: .LBB0_16: # %_udiv-special-cases_udiv-special-cases
-; LINUX-X86-NEXT: xorl %ebx, %ebx
-; LINUX-X86-NEXT: movl %ebp, %ecx
-; LINUX-X86-NEXT: subl %edx, %ecx
-; LINUX-X86-NEXT: movl $0, %edx
-; LINUX-X86-NEXT: sbbl %edx, %edx
-; LINUX-X86-NEXT: movl $0, %esi
-; LINUX-X86-NEXT: sbbl %esi, %esi
-; LINUX-X86-NEXT: movl $0, %edi
-; LINUX-X86-NEXT: sbbl %edi, %edi
-; LINUX-X86-NEXT: cmpb $0, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Reload
-; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: jne .LBB0_17
-; LINUX-X86-NEXT: # %bb.18: # %select.false.sink
-; LINUX-X86-NEXT: movl $127, %eax
-; LINUX-X86-NEXT: cmpl %ecx, %eax
-; LINUX-X86-NEXT: movl $0, %eax
-; LINUX-X86-NEXT: sbbl %edx, %eax
-; LINUX-X86-NEXT: movl $0, %eax
-; LINUX-X86-NEXT: sbbl %esi, %eax
-; LINUX-X86-NEXT: movl $0, %eax
-; LINUX-X86-NEXT: sbbl %edi, %eax
-; LINUX-X86-NEXT: setb %al
-; LINUX-X86-NEXT: .LBB0_19: # %select.end
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; LINUX-X86-NEXT: xorl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: testb %al, %al
-; LINUX-X86-NEXT: movl $0, %edx
-; LINUX-X86-NEXT: movl $0, %ebp
-; LINUX-X86-NEXT: movl $0, %esi
-; LINUX-X86-NEXT: jne .LBB0_21
-; LINUX-X86-NEXT: # %bb.20: # %select.end
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; LINUX-X86-NEXT: movl %edi, %edx
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; LINUX-X86-NEXT: .LBB0_21: # %select.end
-; LINUX-X86-NEXT: movl %edx, %edi
-; LINUX-X86-NEXT: jne .LBB0_22
-; LINUX-X86-NEXT: # %bb.29: # %select.end
-; LINUX-X86-NEXT: movl %ecx, %eax
-; LINUX-X86-NEXT: xorl $127, %eax
-; LINUX-X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; LINUX-X86-NEXT: movl %ecx, %edx
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; LINUX-X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; LINUX-X86-NEXT: orl %eax, %ecx
-; LINUX-X86-NEXT: movl %edi, %eax
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; LINUX-X86-NEXT: je .LBB0_30
-; LINUX-X86-NEXT: # %bb.27: # %udiv-bb15
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl %edx, %ecx
-; LINUX-X86-NEXT: xorb $127, %cl
-; LINUX-X86-NEXT: movl %ecx, %eax
-; LINUX-X86-NEXT: shrb $3, %al
-; LINUX-X86-NEXT: andb $12, %al
-; LINUX-X86-NEXT: negb %al
-; LINUX-X86-NEXT: movsbl %al, %eax
-; LINUX-X86-NEXT: movl 264(%esp,%eax), %edi
-; LINUX-X86-NEXT: movl 268(%esp,%eax), %esi
-; LINUX-X86-NEXT: shldl %cl, %edi, %esi
-; LINUX-X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl 256(%esp,%eax), %ebp
-; LINUX-X86-NEXT: movl 260(%esp,%eax), %ebx
-; LINUX-X86-NEXT: shldl %cl, %ebx, %edi
-; LINUX-X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: shldl %cl, %ebp, %ebx
-; LINUX-X86-NEXT: shll %cl, %ebp
-; LINUX-X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: addl $1, %edx
-; LINUX-X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; LINUX-X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; LINUX-X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; LINUX-X86-NEXT: jb .LBB0_28
-; LINUX-X86-NEXT: # %bb.24: # %udiv-preheader4
-; LINUX-X86-NEXT: movl %edx, %ecx
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl %ecx, %eax
-; LINUX-X86-NEXT: shrb $3, %al
-; LINUX-X86-NEXT: andb $12, %al
-; LINUX-X86-NEXT: movzbl %al, %eax
-; LINUX-X86-NEXT: movl 220(%esp,%eax), %ebp
-; LINUX-X86-NEXT: movl 216(%esp,%eax), %edx
-; LINUX-X86-NEXT: movl %edx, %esi
-; LINUX-X86-NEXT: shrdl %cl, %ebp, %esi
-; LINUX-X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl 208(%esp,%eax), %edi
-; LINUX-X86-NEXT: movl 212(%esp,%eax), %esi
-; LINUX-X86-NEXT: movl %esi, %eax
-; LINUX-X86-NEXT: shrdl %cl, %edx, %eax
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: shrl %cl, %ebp
-; LINUX-X86-NEXT: movl %ebp, %edx
-; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: # kill: def $cl killed $cl killed $ecx
-; LINUX-X86-NEXT: shrdl %cl, %esi, %edi
-; LINUX-X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: addl $-1, %eax
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: adcl $-1, %eax
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; LINUX-X86-NEXT: adcl $-1, %ecx
-; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; LINUX-X86-NEXT: adcl $-1, %ecx
-; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; LINUX-X86-NEXT: .p2align 4
-; LINUX-X86-NEXT: .LBB0_25: # %udiv-do-while3
-; LINUX-X86-NEXT: # =>This Inner Loop Header: Depth=1
-; LINUX-X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; LINUX-X86-NEXT: shldl $1, %ebp, %edx
-; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; LINUX-X86-NEXT: shldl $1, %edi, %ebp
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; LINUX-X86-NEXT: shldl $1, %esi, %edi
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; LINUX-X86-NEXT: shldl $1, %edx, %esi
-; LINUX-X86-NEXT: shldl $1, %ecx, %edx
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: orl %eax, %edx
-; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: shldl $1, %ebx, %ecx
-; LINUX-X86-NEXT: orl %eax, %ecx
-; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; LINUX-X86-NEXT: shldl $1, %ecx, %ebx
-; LINUX-X86-NEXT: orl %eax, %ebx
-; LINUX-X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: addl %ecx, %ecx
-; LINUX-X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: cmpl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; LINUX-X86-NEXT: sbbl %edi, %ecx
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; LINUX-X86-NEXT: sbbl %ebp, %ecx
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; LINUX-X86-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; LINUX-X86-NEXT: sarl $31, %edx
-; LINUX-X86-NEXT: movl %edx, %ecx
-; LINUX-X86-NEXT: andl $1, %ecx
-; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; LINUX-X86-NEXT: movl %edx, %ecx
-; LINUX-X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; LINUX-X86-NEXT: movl %edx, %ebx
-; LINUX-X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
-; LINUX-X86-NEXT: movl %edx, %eax
-; LINUX-X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; LINUX-X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; LINUX-X86-NEXT: subl %edx, %esi
-; LINUX-X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: sbbl %eax, %edi
-; LINUX-X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; LINUX-X86-NEXT: sbbl %ebx, %ebp
-; LINUX-X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; LINUX-X86-NEXT: sbbl %ecx, %edx
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; LINUX-X86-NEXT: addl $-1, %ecx
-; LINUX-X86-NEXT: adcl $-1, %ebp
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; LINUX-X86-NEXT: adcl $-1, %esi
-; LINUX-X86-NEXT: adcl $-1, %edi
-; LINUX-X86-NEXT: movl %ebp, %eax
-; LINUX-X86-NEXT: orl %edi, %eax
-; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: orl %esi, %ecx
-; LINUX-X86-NEXT: orl %eax, %ecx
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; LINUX-X86-NEXT: jne .LBB0_25
-; LINUX-X86-NEXT: .LBB0_26: # %udiv-loop-exit2
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; LINUX-X86-NEXT: shldl $1, %ecx, %esi
-; LINUX-X86-NEXT: shldl $1, %ebx, %ecx
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: shldl $1, %eax, %ebx
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; LINUX-X86-NEXT: leal (%edx,%eax,2), %eax
-; LINUX-X86-NEXT: movl %ecx, %ebp
-; LINUX-X86-NEXT: .LBB0_23: # %udiv-end1
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; LINUX-X86-NEXT: .LBB0_30: # %udiv-end1
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; LINUX-X86-NEXT: xorl %edx, %esi
-; LINUX-X86-NEXT: xorl %edx, %ebp
-; LINUX-X86-NEXT: xorl %edx, %ebx
-; LINUX-X86-NEXT: xorl %edx, %eax
-; LINUX-X86-NEXT: subl %edx, %eax
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: sbbl %edx, %ebx
-; LINUX-X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: sbbl %edx, %ebp
-; LINUX-X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: sbbl %edx, %esi
-; LINUX-X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: subl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; LINUX-X86-NEXT: sbbl %eax, %edx
-; LINUX-X86-NEXT: sbbl %eax, %ecx
-; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; LINUX-X86-NEXT: sbbl %eax, %ebx
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; LINUX-X86-NEXT: subl %ecx, %edi
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: sbbl %ecx, %eax
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; LINUX-X86-NEXT: sbbl %ecx, %ebp
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; LINUX-X86-NEXT: sbbl %ecx, %esi
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: orl %esi, %eax
-; LINUX-X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: orl %ebp, %edi
-; LINUX-X86-NEXT: orl %eax, %edi
-; LINUX-X86-NEXT: sete %al
-; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl %edx, %ecx
-; LINUX-X86-NEXT: orl %ebx, %ecx
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; LINUX-X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; LINUX-X86-NEXT: orl %ecx, %edx
-; LINUX-X86-NEXT: sete %cl
-; LINUX-X86-NEXT: testl %esi, %esi
-; LINUX-X86-NEXT: movl %ebx, %edi
-; LINUX-X86-NEXT: jne .LBB0_31
-; LINUX-X86-NEXT: # %bb.32: # %udiv-end1
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; LINUX-X86-NEXT: bsrl %ebx, %ebp
-; LINUX-X86-NEXT: xorl $31, %ebp
-; LINUX-X86-NEXT: orl $32, %ebp
-; LINUX-X86-NEXT: jmp .LBB0_33
-; LINUX-X86-NEXT: .LBB0_31:
-; LINUX-X86-NEXT: bsrl %esi, %ebp
-; LINUX-X86-NEXT: xorl $31, %ebp
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; LINUX-X86-NEXT: .LBB0_33: # %udiv-end1
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; LINUX-X86-NEXT: testl %edx, %edx
-; LINUX-X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: jne .LBB0_34
-; LINUX-X86-NEXT: # %bb.35: # %udiv-end1
-; LINUX-X86-NEXT: bsrl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; LINUX-X86-NEXT: xorl $31, %edx
-; LINUX-X86-NEXT: orl $32, %edx
-; LINUX-X86-NEXT: jmp .LBB0_36
-; LINUX-X86-NEXT: .LBB0_34:
-; LINUX-X86-NEXT: bsrl %edx, %edx
-; LINUX-X86-NEXT: xorl $31, %edx
-; LINUX-X86-NEXT: .LBB0_36: # %udiv-end1
-; LINUX-X86-NEXT: movl %ebx, %esi
-; LINUX-X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; LINUX-X86-NEXT: jne .LBB0_38
-; LINUX-X86-NEXT: # %bb.37: # %udiv-end1
-; LINUX-X86-NEXT: orl $64, %edx
-; LINUX-X86-NEXT: movl %edx, %ebp
-; LINUX-X86-NEXT: .LBB0_38: # %udiv-end1
-; LINUX-X86-NEXT: testl %edi, %edi
-; LINUX-X86-NEXT: jne .LBB0_39
-; LINUX-X86-NEXT: # %bb.40: # %udiv-end1
-; LINUX-X86-NEXT: bsrl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; LINUX-X86-NEXT: xorl $31, %edx
-; LINUX-X86-NEXT: orl $32, %edx
-; LINUX-X86-NEXT: jmp .LBB0_41
-; LINUX-X86-NEXT: .LBB0_39:
-; LINUX-X86-NEXT: bsrl %edi, %edx
-; LINUX-X86-NEXT: xorl $31, %edx
-; LINUX-X86-NEXT: .LBB0_41: # %udiv-end1
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; LINUX-X86-NEXT: testl %esi, %esi
-; LINUX-X86-NEXT: jne .LBB0_42
-; LINUX-X86-NEXT: # %bb.43: # %udiv-end1
-; LINUX-X86-NEXT: bsrl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; LINUX-X86-NEXT: xorl $31, %esi
-; LINUX-X86-NEXT: orl $32, %esi
-; LINUX-X86-NEXT: jmp .LBB0_44
-; LINUX-X86-NEXT: .LBB0_42:
-; LINUX-X86-NEXT: bsrl %esi, %esi
-; LINUX-X86-NEXT: xorl $31, %esi
-; LINUX-X86-NEXT: .LBB0_44: # %udiv-end1
-; LINUX-X86-NEXT: orb %cl, %al
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; LINUX-X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: orl %edi, %ecx
-; LINUX-X86-NEXT: jne .LBB0_46
-; LINUX-X86-NEXT: # %bb.45: # %udiv-end1
-; LINUX-X86-NEXT: orl $64, %esi
-; LINUX-X86-NEXT: movl %esi, %edx
-; LINUX-X86-NEXT: .LBB0_46: # %udiv-end1
-; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; LINUX-X86-NEXT: movl %ebp, %ebx
-; LINUX-X86-NEXT: subl %edx, %ebx
-; LINUX-X86-NEXT: movl $0, %esi
-; LINUX-X86-NEXT: sbbl %esi, %esi
-; LINUX-X86-NEXT: movl $0, %ecx
-; LINUX-X86-NEXT: sbbl %ecx, %ecx
-; LINUX-X86-NEXT: movl $0, %edi
-; LINUX-X86-NEXT: sbbl %edi, %edi
-; LINUX-X86-NEXT: testb %al, %al
-; LINUX-X86-NEXT: jne .LBB0_47
-; LINUX-X86-NEXT: # %bb.48: # %select.false.sink8
-; LINUX-X86-NEXT: movl $127, %eax
-; LINUX-X86-NEXT: cmpl %ebx, %eax
-; LINUX-X86-NEXT: movl $0, %eax
-; LINUX-X86-NEXT: sbbl %esi, %eax
-; LINUX-X86-NEXT: movl $0, %eax
-; LINUX-X86-NEXT: sbbl %ecx, %eax
-; LINUX-X86-NEXT: movl $0, %eax
-; LINUX-X86-NEXT: sbbl %edi, %eax
-; LINUX-X86-NEXT: setb %al
-; LINUX-X86-NEXT: .LBB0_49: # %select.end7
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; LINUX-X86-NEXT: testb %al, %al
-; LINUX-X86-NEXT: movl $0, %eax
-; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; LINUX-X86-NEXT: jne .LBB0_51
-; LINUX-X86-NEXT: # %bb.50: # %select.end7
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; LINUX-X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: .LBB0_51: # %select.end7
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: jne .LBB0_52
-; LINUX-X86-NEXT: # %bb.58: # %select.end7
-; LINUX-X86-NEXT: movl %ebx, %eax
-; LINUX-X86-NEXT: xorl $127, %eax
-; LINUX-X86-NEXT: orl %ecx, %eax
-; LINUX-X86-NEXT: movl %esi, %ebp
-; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl %esi, %ecx
-; LINUX-X86-NEXT: orl %edi, %ecx
-; LINUX-X86-NEXT: orl %eax, %ecx
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; LINUX-X86-NEXT: je .LBB0_59
-; LINUX-X86-NEXT: # %bb.56: # %udiv-bb1
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; LINUX-X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; LINUX-X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; LINUX-X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl %ebx, %ecx
-; LINUX-X86-NEXT: xorb $127, %cl
-; LINUX-X86-NEXT: movl %ecx, %eax
-; LINUX-X86-NEXT: shrb $3, %al
-; LINUX-X86-NEXT: andb $12, %al
-; LINUX-X86-NEXT: negb %al
-; LINUX-X86-NEXT: movsbl %al, %eax
-; LINUX-X86-NEXT: movl 200(%esp,%eax), %edx
-; LINUX-X86-NEXT: movl 204(%esp,%eax), %esi
-; LINUX-X86-NEXT: shldl %cl, %edx, %esi
-; LINUX-X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl 192(%esp,%eax), %esi
-; LINUX-X86-NEXT: movl 196(%esp,%eax), %eax
-; LINUX-X86-NEXT: shldl %cl, %eax, %edx
-; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: shldl %cl, %esi, %eax
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: shll %cl, %esi
-; LINUX-X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: addl $1, %ebx
-; LINUX-X86-NEXT: adcl $0, %ebp
-; LINUX-X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; LINUX-X86-NEXT: adcl $0, %edi
-; LINUX-X86-NEXT: jb .LBB0_57
-; LINUX-X86-NEXT: # %bb.53: # %udiv-preheader
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl %ebx, %eax
-; LINUX-X86-NEXT: shrb $3, %al
-; LINUX-X86-NEXT: andb $12, %al
-; LINUX-X86-NEXT: movzbl %al, %eax
-; LINUX-X86-NEXT: movl 156(%esp,%eax), %edx
-; LINUX-X86-NEXT: movl 152(%esp,%eax), %esi
-; LINUX-X86-NEXT: movl %esi, %ebp
-; LINUX-X86-NEXT: movl %ebx, %ecx
-; LINUX-X86-NEXT: shrdl %cl, %edx, %ebp
-; LINUX-X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl 144(%esp,%eax), %ebp
-; LINUX-X86-NEXT: movl 148(%esp,%eax), %eax
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: shrdl %cl, %esi, %eax
-; LINUX-X86-NEXT: movl %eax, %esi
-; LINUX-X86-NEXT: shrl %cl, %edx
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: shrdl %cl, %eax, %ebp
-; LINUX-X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: addl $-1, %eax
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: adcl $-1, %eax
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: adcl $-1, %eax
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: adcl $-1, %eax
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; LINUX-X86-NEXT: .p2align 4
-; LINUX-X86-NEXT: .LBB0_54: # %udiv-do-while
-; LINUX-X86-NEXT: # =>This Inner Loop Header: Depth=1
-; LINUX-X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; LINUX-X86-NEXT: shldl $1, %ebp, %edx
-; LINUX-X86-NEXT: movl %edx, %ebx
-; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl %esi, %ecx
-; LINUX-X86-NEXT: shldl $1, %esi, %ebp
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; LINUX-X86-NEXT: shldl $1, %esi, %ecx
-; LINUX-X86-NEXT: shldl $1, %eax, %esi
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; LINUX-X86-NEXT: shldl $1, %edx, %eax
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; LINUX-X86-NEXT: orl %edi, %eax
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: shldl $1, %eax, %edx
-; LINUX-X86-NEXT: orl %edi, %edx
-; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; LINUX-X86-NEXT: shldl $1, %edx, %eax
-; LINUX-X86-NEXT: orl %edi, %eax
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: addl %edx, %edx
-; LINUX-X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: cmpl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: sbbl %ecx, %eax
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: sbbl %ebp, %eax
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: sbbl %ebx, %eax
-; LINUX-X86-NEXT: sarl $31, %eax
-; LINUX-X86-NEXT: movl %eax, %edx
-; LINUX-X86-NEXT: andl $1, %edx
-; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; LINUX-X86-NEXT: movl %eax, %ebx
-; LINUX-X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
-; LINUX-X86-NEXT: movl %eax, %edi
-; LINUX-X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; LINUX-X86-NEXT: movl %eax, %edx
-; LINUX-X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; LINUX-X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; LINUX-X86-NEXT: subl %eax, %esi
-; LINUX-X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: sbbl %edx, %ecx
-; LINUX-X86-NEXT: movl %ecx, %esi
-; LINUX-X86-NEXT: sbbl %edi, %ebp
-; LINUX-X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; LINUX-X86-NEXT: sbbl %ebx, %edx
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; LINUX-X86-NEXT: addl $-1, %ebx
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: adcl $-1, %eax
-; LINUX-X86-NEXT: adcl $-1, %ebp
-; LINUX-X86-NEXT: adcl $-1, %edi
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: orl %edi, %eax
-; LINUX-X86-NEXT: movl %ebx, %ecx
-; LINUX-X86-NEXT: orl %ebp, %ecx
-; LINUX-X86-NEXT: orl %eax, %ecx
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: jne .LBB0_54
-; LINUX-X86-NEXT: .LBB0_55: # %udiv-loop-exit
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; LINUX-X86-NEXT: shldl $1, %edx, %eax
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; LINUX-X86-NEXT: shldl $1, %esi, %edx
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: shldl $1, %eax, %esi
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; LINUX-X86-NEXT: leal (%ecx,%eax,2), %eax
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: .LBB0_59: # %udiv-end
-; LINUX-X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; LINUX-X86-NEXT: movl %eax, (%ecx)
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: movl %eax, 4(%ecx)
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: movl %eax, 8(%ecx)
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: movl %eax, 12(%ecx)
-; LINUX-X86-NEXT: movl %edx, %esi
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: imull %eax, %esi
-; LINUX-X86-NEXT: movl %edx, %eax
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; LINUX-X86-NEXT: mull %ebp
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: addl %esi, %edx
-; LINUX-X86-NEXT: imull %ebp, %ecx
-; LINUX-X86-NEXT: addl %edx, %ecx
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; LINUX-X86-NEXT: movl %edi, %eax
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; LINUX-X86-NEXT: mull %ebx
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; LINUX-X86-NEXT: imull %edi, %esi
-; LINUX-X86-NEXT: addl %edx, %esi
-; LINUX-X86-NEXT: imull {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
-; LINUX-X86-NEXT: addl %esi, %ebx
-; LINUX-X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: adcl %ecx, %ebx
-; LINUX-X86-NEXT: movl %ebp, %eax
-; LINUX-X86-NEXT: mull %edi
-; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: mull %edi
-; LINUX-X86-NEXT: movl %edx, %ecx
-; LINUX-X86-NEXT: movl %eax, %esi
-; LINUX-X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; LINUX-X86-NEXT: adcl $0, %ecx
-; LINUX-X86-NEXT: movl %ebp, %eax
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; LINUX-X86-NEXT: mull %ebp
-; LINUX-X86-NEXT: movl %edx, %edi
-; LINUX-X86-NEXT: addl %esi, %eax
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: adcl %ecx, %edi
-; LINUX-X86-NEXT: setb %cl
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: mull %ebp
-; LINUX-X86-NEXT: addl %edi, %eax
-; LINUX-X86-NEXT: movzbl %cl, %ecx
-; LINUX-X86-NEXT: adcl %ecx, %edx
-; LINUX-X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; LINUX-X86-NEXT: adcl %ebx, %edx
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; LINUX-X86-NEXT: subl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; LINUX-X86-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; LINUX-X86-NEXT: sbbl %eax, %ecx
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: sbbl %edx, %eax
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; LINUX-X86-NEXT: xorl %edx, %eax
-; LINUX-X86-NEXT: xorl %edx, %ecx
-; LINUX-X86-NEXT: xorl %edx, %esi
-; LINUX-X86-NEXT: xorl %edx, %edi
-; LINUX-X86-NEXT: subl %edx, %edi
-; LINUX-X86-NEXT: sbbl %edx, %esi
-; LINUX-X86-NEXT: sbbl %edx, %ecx
-; LINUX-X86-NEXT: sbbl %edx, %eax
-; LINUX-X86-NEXT: movl %eax, %edx
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; LINUX-X86-NEXT: movl %edi, (%eax)
-; LINUX-X86-NEXT: movl %esi, 4(%eax)
-; LINUX-X86-NEXT: movl %ecx, 8(%eax)
-; LINUX-X86-NEXT: movl %edx, 12(%eax)
-; LINUX-X86-NEXT: addl $284, %esp # imm = 0x11C
-; LINUX-X86-NEXT: .cfi_def_cfa_offset 20
-; LINUX-X86-NEXT: popl %esi
-; LINUX-X86-NEXT: .cfi_def_cfa_offset 16
-; LINUX-X86-NEXT: popl %edi
-; LINUX-X86-NEXT: .cfi_def_cfa_offset 12
-; LINUX-X86-NEXT: popl %ebx
-; LINUX-X86-NEXT: .cfi_def_cfa_offset 8
-; LINUX-X86-NEXT: popl %ebp
-; LINUX-X86-NEXT: .cfi_def_cfa_offset 4
-; LINUX-X86-NEXT: retl
-; LINUX-X86-NEXT: .LBB0_17:
-; LINUX-X86-NEXT: .cfi_def_cfa_offset 304
-; LINUX-X86-NEXT: movb $1, %al
-; LINUX-X86-NEXT: jmp .LBB0_19
-; LINUX-X86-NEXT: .LBB0_28:
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; LINUX-X86-NEXT: jmp .LBB0_26
-; LINUX-X86-NEXT: .LBB0_47:
-; LINUX-X86-NEXT: movb $1, %al
-; LINUX-X86-NEXT: jmp .LBB0_49
-; LINUX-X86-NEXT: .LBB0_57:
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; LINUX-X86-NEXT: jmp .LBB0_55
-; LINUX-X86-NEXT: .LBB0_22:
-; LINUX-X86-NEXT: movl %edi, %eax
-; LINUX-X86-NEXT: jmp .LBB0_23
-; LINUX-X86-NEXT: .LBB0_52:
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; LINUX-X86-NEXT: jmp .LBB0_59
+; LINUX-X86-NOT: __divmodti4
+; LINUX-X86-NOT: __divti3
+; LINUX-X86-NOT: __modti3
+; LINUX-X86: retl
;
-; WIN32-LABEL: sdivrem_i128:
-; WIN32: # %bb.0: # %_udiv-special-cases_udiv-special-cases
-; WIN32-NEXT: pushl %ebp
-; WIN32-NEXT: movl %esp, %ebp
-; WIN32-NEXT: pushl %ebx
-; WIN32-NEXT: pushl %edi
-; WIN32-NEXT: pushl %esi
-; WIN32-NEXT: andl $-16, %esp
-; WIN32-NEXT: subl $288, %esp # imm = 0x120
-; WIN32-NEXT: movl 36(%ebp), %edi
-; WIN32-NEXT: movl %edi, %esi
-; WIN32-NEXT: sarl $31, %esi
-; WIN32-NEXT: xorl %esi, %edi
-; WIN32-NEXT: movl 32(%ebp), %eax
-; WIN32-NEXT: xorl %esi, %eax
-; WIN32-NEXT: movl 28(%ebp), %ecx
-; WIN32-NEXT: xorl %esi, %ecx
-; WIN32-NEXT: movl 24(%ebp), %edx
-; WIN32-NEXT: xorl %esi, %edx
-; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: subl %esi, %edx
-; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: sbbl %esi, %ecx
-; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: sbbl %esi, %eax
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: sbbl %esi, %edi
-; WIN32-NEXT: movl 52(%ebp), %ebx
-; WIN32-NEXT: movl %ebx, %edx
-; WIN32-NEXT: sarl $31, %edx
-; WIN32-NEXT: xorl %edx, %ebx
-; WIN32-NEXT: movl 48(%ebp), %esi
-; WIN32-NEXT: xorl %edx, %esi
-; WIN32-NEXT: movl 44(%ebp), %eax
-; WIN32-NEXT: xorl %edx, %eax
-; WIN32-NEXT: movl 40(%ebp), %ecx
-; WIN32-NEXT: xorl %edx, %ecx
-; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: subl %edx, %ecx
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: sbbl %edx, %eax
-; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: sbbl %edx, %esi
-; WIN32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: sbbl %edx, %ebx
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: orl %ebx, %eax
-; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: orl %esi, %ecx
-; WIN32-NEXT: orl %eax, %ecx
-; WIN32-NEXT: sete %al
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: orl %edi, %ecx
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; WIN32-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; WIN32-NEXT: orl %ecx, %edx
-; WIN32-NEXT: sete %cl
-; WIN32-NEXT: testl %ebx, %ebx
-; WIN32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: jne LBB0_1
-; WIN32-NEXT: # %bb.2: # %_udiv-special-cases_udiv-special-cases
-; WIN32-NEXT: bsrl %esi, %edi
-; WIN32-NEXT: xorl $31, %edi
-; WIN32-NEXT: orl $32, %edi
-; WIN32-NEXT: jmp LBB0_3
-; WIN32-NEXT: LBB0_1:
-; WIN32-NEXT: bsrl %ebx, %edi
-; WIN32-NEXT: xorl $31, %edi
-; WIN32-NEXT: LBB0_3: # %_udiv-special-cases_udiv-special-cases
-; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; WIN32-NEXT: testl %edx, %edx
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; WIN32-NEXT: jne LBB0_4
-; WIN32-NEXT: # %bb.5: # %_udiv-special-cases_udiv-special-cases
-; WIN32-NEXT: bsrl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; WIN32-NEXT: xorl $31, %edx
-; WIN32-NEXT: orl $32, %edx
-; WIN32-NEXT: jmp LBB0_6
-; WIN32-NEXT: LBB0_4:
-; WIN32-NEXT: bsrl %edx, %edx
-; WIN32-NEXT: xorl $31, %edx
-; WIN32-NEXT: LBB0_6: # %_udiv-special-cases_udiv-special-cases
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; WIN32-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; WIN32-NEXT: jne LBB0_8
-; WIN32-NEXT: # %bb.7: # %_udiv-special-cases_udiv-special-cases
-; WIN32-NEXT: orl $64, %edx
-; WIN32-NEXT: movl %edx, %edi
-; WIN32-NEXT: LBB0_8: # %_udiv-special-cases_udiv-special-cases
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; WIN32-NEXT: testl %edx, %edx
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; WIN32-NEXT: jne LBB0_9
-; WIN32-NEXT: # %bb.10: # %_udiv-special-cases_udiv-special-cases
-; WIN32-NEXT: bsrl %ebx, %edx
-; WIN32-NEXT: xorl $31, %edx
-; WIN32-NEXT: orl $32, %edx
-; WIN32-NEXT: testl %esi, %esi
-; WIN32-NEXT: je LBB0_13
-; WIN32-NEXT: LBB0_12:
-; WIN32-NEXT: bsrl %esi, %esi
-; WIN32-NEXT: xorl $31, %esi
-; WIN32-NEXT: jmp LBB0_14
-; WIN32-NEXT: LBB0_9:
-; WIN32-NEXT: bsrl %edx, %edx
-; WIN32-NEXT: xorl $31, %edx
-; WIN32-NEXT: testl %esi, %esi
-; WIN32-NEXT: jne LBB0_12
-; WIN32-NEXT: LBB0_13: # %_udiv-special-cases_udiv-special-cases
-; WIN32-NEXT: bsrl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; WIN32-NEXT: xorl $31, %esi
-; WIN32-NEXT: orl $32, %esi
-; WIN32-NEXT: LBB0_14: # %_udiv-special-cases_udiv-special-cases
-; WIN32-NEXT: orb %cl, %al
-; WIN32-NEXT: movl %ebx, %ecx
-; WIN32-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; WIN32-NEXT: jne LBB0_16
-; WIN32-NEXT: # %bb.15: # %_udiv-special-cases_udiv-special-cases
-; WIN32-NEXT: orl $64, %esi
-; WIN32-NEXT: movl %esi, %edx
-; WIN32-NEXT: LBB0_16: # %_udiv-special-cases_udiv-special-cases
-; WIN32-NEXT: xorl %ebx, %ebx
-; WIN32-NEXT: subl %edx, %edi
-; WIN32-NEXT: movl $0, %ecx
-; WIN32-NEXT: sbbl %ecx, %ecx
-; WIN32-NEXT: movl $0, %edx
-; WIN32-NEXT: sbbl %edx, %edx
-; WIN32-NEXT: movl $0, %esi
-; WIN32-NEXT: sbbl %esi, %esi
-; WIN32-NEXT: testb %al, %al
-; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: jne LBB0_17
-; WIN32-NEXT: # %bb.18: # %select.false.sink
-; WIN32-NEXT: movl $127, %eax
-; WIN32-NEXT: cmpl %edi, %eax
-; WIN32-NEXT: movl $0, %eax
-; WIN32-NEXT: sbbl %ecx, %eax
-; WIN32-NEXT: movl $0, %eax
-; WIN32-NEXT: sbbl %edx, %eax
-; WIN32-NEXT: movl $0, %eax
-; WIN32-NEXT: sbbl %esi, %eax
-; WIN32-NEXT: setb %al
-; WIN32-NEXT: LBB0_19: # %select.end
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: xorl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: testb %al, %al
-; WIN32-NEXT: movl $0, %eax
-; WIN32-NEXT: movl $0, %edx
-; WIN32-NEXT: movl $0, %ecx
-; WIN32-NEXT: jne LBB0_21
-; WIN32-NEXT: # %bb.20: # %select.end
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: LBB0_21: # %select.end
-; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: jne LBB0_22
-; WIN32-NEXT: # %bb.28: # %select.end
-; WIN32-NEXT: movl %edi, %eax
-; WIN32-NEXT: xorl $127, %eax
-; WIN32-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; WIN32-NEXT: movl %edi, %esi
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; WIN32-NEXT: orl %eax, %ecx
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: je LBB0_29
-; WIN32-NEXT: # %bb.26: # %udiv-bb15
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl %esi, %eax
-; WIN32-NEXT: movl %eax, %ecx
-; WIN32-NEXT: movl %esi, %edi
-; WIN32-NEXT: xorb $127, %cl
-; WIN32-NEXT: movl %ecx, %eax
-; WIN32-NEXT: shrb $3, %al
-; WIN32-NEXT: andb $12, %al
-; WIN32-NEXT: negb %al
-; WIN32-NEXT: movsbl %al, %eax
-; WIN32-NEXT: movl 264(%esp,%eax), %esi
-; WIN32-NEXT: movl 268(%esp,%eax), %edx
-; WIN32-NEXT: shldl %cl, %esi, %edx
-; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl 256(%esp,%eax), %edx
-; WIN32-NEXT: movl 260(%esp,%eax), %ebx
-; WIN32-NEXT: shldl %cl, %ebx, %esi
-; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: shldl %cl, %edx, %ebx
-; WIN32-NEXT: shll %cl, %edx
-; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: addl $1, %edi
-; WIN32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: jb LBB0_27
-; WIN32-NEXT: # %bb.23: # %udiv-preheader4
-; WIN32-NEXT: movl %edi, %ecx
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl %ecx, %eax
-; WIN32-NEXT: shrb $3, %al
-; WIN32-NEXT: andb $12, %al
-; WIN32-NEXT: movzbl %al, %eax
-; WIN32-NEXT: movl 220(%esp,%eax), %edi
-; WIN32-NEXT: movl 216(%esp,%eax), %esi
-; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: shrdl %cl, %edi, %esi
-; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl 208(%esp,%eax), %edx
-; WIN32-NEXT: movl 212(%esp,%eax), %eax
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; WIN32-NEXT: shrdl %cl, %esi, %eax
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: shrl %cl, %edi
-; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: # kill: def $cl killed $cl killed $ecx
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: shrdl %cl, %eax, %edx
-; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: addl $-1, %ecx
-; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: adcl $-1, %ecx
-; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: adcl $-1, %ecx
-; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: adcl $-1, %ecx
-; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: .p2align 4
-; WIN32-NEXT: LBB0_24: # %udiv-do-while3
-; WIN32-NEXT: # =>This Inner Loop Header: Depth=1
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; WIN32-NEXT: shldl $1, %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; WIN32-NEXT: shldl $1, %esi, %edx
-; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: shldl $1, %eax, %esi
-; WIN32-NEXT: shldl $1, %ecx, %eax
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; WIN32-NEXT: shldl $1, %edi, %ecx
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; WIN32-NEXT: orl %edx, %ecx
-; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: shldl $1, %ebx, %edi
-; WIN32-NEXT: orl %edx, %edi
-; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: shldl $1, %ecx, %ebx
-; WIN32-NEXT: orl %edx, %ebx
-; WIN32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: addl %ecx, %ecx
-; WIN32-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: cmpl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: sbbl %esi, %ecx
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; WIN32-NEXT: sbbl %edx, %ecx
-; WIN32-NEXT: sarl $31, %ecx
-; WIN32-NEXT: movl %ecx, %edi
-; WIN32-NEXT: andl $1, %edi
-; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl %ecx, %edi
-; WIN32-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl %ecx, %edi
-; WIN32-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; WIN32-NEXT: movl %ecx, %ebx
-; WIN32-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
-; WIN32-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: subl %ecx, %eax
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: sbbl %ebx, %esi
-; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; WIN32-NEXT: sbbl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: movl %edx, %eax
-; WIN32-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; WIN32-NEXT: addl $-1, %edx
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: adcl $-1, %ecx
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; WIN32-NEXT: adcl $-1, %edi
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: adcl $-1, %eax
-; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: orl %eax, %ecx
-; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: orl %edi, %edx
-; WIN32-NEXT: orl %ecx, %edx
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: jne LBB0_24
-; WIN32-NEXT: LBB0_25: # %udiv-loop-exit2
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; WIN32-NEXT: shldl $1, %edx, %ecx
-; WIN32-NEXT: shldl $1, %ebx, %edx
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; WIN32-NEXT: shldl $1, %esi, %ebx
-; WIN32-NEXT: movl %ecx, %eax
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: leal (%ecx,%esi,2), %ecx
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; WIN32-NEXT: LBB0_29: # %udiv-end1
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; WIN32-NEXT: xorl %esi, %eax
-; WIN32-NEXT: xorl %esi, %edx
-; WIN32-NEXT: xorl %esi, %ebx
-; WIN32-NEXT: xorl %esi, %ecx
-; WIN32-NEXT: subl %esi, %ecx
-; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: sbbl %esi, %ebx
-; WIN32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: sbbl %esi, %edx
-; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: sbbl %esi, %eax
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: subl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: sbbl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: sbbl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: sbbl %ecx, %edi
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; WIN32-NEXT: subl %ecx, %esi
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: sbbl %ecx, %eax
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; WIN32-NEXT: sbbl %ecx, %edx
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; WIN32-NEXT: sbbl %ecx, %ebx
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: orl %ebx, %eax
-; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: orl %edx, %esi
-; WIN32-NEXT: orl %eax, %esi
-; WIN32-NEXT: sete {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: movl %edi, %eax
-; WIN32-NEXT: orl %edi, %ecx
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; WIN32-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; WIN32-NEXT: orl %ecx, %edx
-; WIN32-NEXT: sete %cl
-; WIN32-NEXT: testl %ebx, %ebx
-; WIN32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: jne LBB0_30
-; WIN32-NEXT: # %bb.31: # %udiv-end1
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; WIN32-NEXT: bsrl %edi, %ebx
-; WIN32-NEXT: xorl $31, %ebx
-; WIN32-NEXT: orl $32, %ebx
-; WIN32-NEXT: jmp LBB0_32
-; WIN32-NEXT: LBB0_30:
-; WIN32-NEXT: bsrl %ebx, %ebx
-; WIN32-NEXT: xorl $31, %ebx
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; WIN32-NEXT: LBB0_32: # %udiv-end1
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; WIN32-NEXT: testl %edx, %edx
-; WIN32-NEXT: jne LBB0_33
-; WIN32-NEXT: # %bb.34: # %udiv-end1
-; WIN32-NEXT: bsrl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; WIN32-NEXT: xorl $31, %edx
-; WIN32-NEXT: orl $32, %edx
-; WIN32-NEXT: jmp LBB0_35
-; WIN32-NEXT: LBB0_33:
-; WIN32-NEXT: bsrl %edx, %edx
-; WIN32-NEXT: xorl $31, %edx
-; WIN32-NEXT: LBB0_35: # %udiv-end1
-; WIN32-NEXT: movl %edi, %esi
-; WIN32-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; WIN32-NEXT: jne LBB0_37
-; WIN32-NEXT: # %bb.36: # %udiv-end1
-; WIN32-NEXT: orl $64, %edx
-; WIN32-NEXT: movl %edx, %ebx
-; WIN32-NEXT: LBB0_37: # %udiv-end1
-; WIN32-NEXT: testl %eax, %eax
-; WIN32-NEXT: jne LBB0_38
-; WIN32-NEXT: # %bb.39: # %udiv-end1
-; WIN32-NEXT: bsrl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; WIN32-NEXT: xorl $31, %edx
-; WIN32-NEXT: orl $32, %edx
-; WIN32-NEXT: jmp LBB0_40
-; WIN32-NEXT: LBB0_38:
-; WIN32-NEXT: bsrl %eax, %edx
-; WIN32-NEXT: xorl $31, %edx
-; WIN32-NEXT: LBB0_40: # %udiv-end1
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; WIN32-NEXT: testl %esi, %esi
-; WIN32-NEXT: jne LBB0_41
-; WIN32-NEXT: # %bb.42: # %udiv-end1
-; WIN32-NEXT: bsrl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; WIN32-NEXT: xorl $31, %esi
-; WIN32-NEXT: orl $32, %esi
-; WIN32-NEXT: jmp LBB0_43
-; WIN32-NEXT: LBB0_41:
-; WIN32-NEXT: bsrl %esi, %esi
-; WIN32-NEXT: xorl $31, %esi
-; WIN32-NEXT: LBB0_43: # %udiv-end1
-; WIN32-NEXT: orb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: orl %eax, %ecx
-; WIN32-NEXT: jne LBB0_45
-; WIN32-NEXT: # %bb.44: # %udiv-end1
-; WIN32-NEXT: orl $64, %esi
-; WIN32-NEXT: movl %esi, %edx
-; WIN32-NEXT: LBB0_45: # %udiv-end1
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: subl %edx, %ebx
-; WIN32-NEXT: movl $0, %edi
-; WIN32-NEXT: sbbl %edi, %edi
-; WIN32-NEXT: movl $0, %ecx
-; WIN32-NEXT: sbbl %ecx, %ecx
-; WIN32-NEXT: movl $0, %edx
-; WIN32-NEXT: sbbl %edx, %edx
-; WIN32-NEXT: cmpb $0, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Reload
-; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: jne LBB0_46
-; WIN32-NEXT: # %bb.47: # %select.false.sink8
-; WIN32-NEXT: movl $127, %eax
-; WIN32-NEXT: cmpl %ebx, %eax
-; WIN32-NEXT: movl $0, %eax
-; WIN32-NEXT: sbbl %edi, %eax
-; WIN32-NEXT: movl $0, %eax
-; WIN32-NEXT: sbbl %ecx, %eax
-; WIN32-NEXT: movl $0, %eax
-; WIN32-NEXT: sbbl %edx, %eax
-; WIN32-NEXT: setb %al
-; WIN32-NEXT: LBB0_48: # %select.end7
-; WIN32-NEXT: testb %al, %al
-; WIN32-NEXT: movl $0, %eax
-; WIN32-NEXT: movl $0, %edx
-; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: jne LBB0_50
-; WIN32-NEXT: # %bb.49: # %select.end7
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl %esi, %edx
-; WIN32-NEXT: LBB0_50: # %select.end7
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: jne LBB0_51
-; WIN32-NEXT: # %bb.57: # %select.end7
-; WIN32-NEXT: movl %ebx, %eax
-; WIN32-NEXT: xorl $127, %eax
-; WIN32-NEXT: orl %ecx, %eax
-; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl %edi, %ecx
-; WIN32-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; WIN32-NEXT: orl %eax, %ecx
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; WIN32-NEXT: je LBB0_58
-; WIN32-NEXT: # %bb.55: # %udiv-bb1
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; WIN32-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; WIN32-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; WIN32-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl %ebx, %ecx
-; WIN32-NEXT: xorb $127, %cl
-; WIN32-NEXT: movl %ecx, %eax
-; WIN32-NEXT: shrb $3, %al
-; WIN32-NEXT: andb $12, %al
-; WIN32-NEXT: negb %al
-; WIN32-NEXT: movsbl %al, %eax
-; WIN32-NEXT: movl 200(%esp,%eax), %esi
-; WIN32-NEXT: movl 204(%esp,%eax), %edx
-; WIN32-NEXT: shldl %cl, %esi, %edx
-; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl 192(%esp,%eax), %edx
-; WIN32-NEXT: movl 196(%esp,%eax), %eax
-; WIN32-NEXT: shldl %cl, %eax, %esi
-; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: shldl %cl, %edx, %eax
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: shll %cl, %edx
-; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: addl $1, %ebx
-; WIN32-NEXT: adcl $0, %edi
-; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: jb LBB0_56
-; WIN32-NEXT: # %bb.52: # %udiv-preheader
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; WIN32-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; WIN32-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; WIN32-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl %ebx, %eax
-; WIN32-NEXT: shrb $3, %al
-; WIN32-NEXT: andb $12, %al
-; WIN32-NEXT: movzbl %al, %eax
-; WIN32-NEXT: movl 156(%esp,%eax), %edi
-; WIN32-NEXT: movl 152(%esp,%eax), %esi
-; WIN32-NEXT: movl %esi, %edx
-; WIN32-NEXT: movl %ebx, %ecx
-; WIN32-NEXT: shrdl %cl, %edi, %edx
-; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl 144(%esp,%eax), %edx
-; WIN32-NEXT: movl 148(%esp,%eax), %eax
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: shrdl %cl, %esi, %eax
-; WIN32-NEXT: shrl %cl, %edi
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; WIN32-NEXT: shrdl %cl, %esi, %edx
-; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: addl $-1, %ecx
-; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: adcl $-1, %ecx
-; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: adcl $-1, %ecx
-; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: adcl $-1, %ecx
-; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: .p2align 4
-; WIN32-NEXT: LBB0_53: # %udiv-do-while
-; WIN32-NEXT: # =>This Inner Loop Header: Depth=1
-; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; WIN32-NEXT: shldl $1, %edx, %edi
-; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl %eax, %ebx
-; WIN32-NEXT: shldl $1, %eax, %edx
-; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: shldl $1, %eax, %ebx
-; WIN32-NEXT: shldl $1, %ecx, %eax
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; WIN32-NEXT: shldl $1, %edi, %ecx
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; WIN32-NEXT: orl %edx, %ecx
-; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: shldl $1, %esi, %edi
-; WIN32-NEXT: orl %edx, %edi
-; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: shldl $1, %ecx, %esi
-; WIN32-NEXT: orl %edx, %esi
-; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: addl %ecx, %ecx
-; WIN32-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: cmpl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: sbbl %ebx, %ecx
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; WIN32-NEXT: sarl $31, %ecx
-; WIN32-NEXT: movl %ecx, %esi
-; WIN32-NEXT: andl $1, %esi
-; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl %ecx, %edx
-; WIN32-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl %ecx, %esi
-; WIN32-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; WIN32-NEXT: movl %ecx, %edi
-; WIN32-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; WIN32-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: subl %ecx, %eax
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: sbbl %edi, %ebx
-; WIN32-NEXT: movl %ebx, %eax
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; WIN32-NEXT: sbbl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; WIN32-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; WIN32-NEXT: addl $-1, %ebx
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: adcl $-1, %ecx
-; WIN32-NEXT: adcl $-1, %edx
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; WIN32-NEXT: adcl $-1, %esi
-; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: orl %esi, %ecx
-; WIN32-NEXT: movl %ebx, %esi
-; WIN32-NEXT: orl %edx, %esi
-; WIN32-NEXT: orl %ecx, %esi
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: jne LBB0_53
-; WIN32-NEXT: LBB0_54: # %udiv-loop-exit
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; WIN32-NEXT: shldl $1, %edx, %ecx
-; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: shldl $1, %esi, %edx
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: shldl $1, %eax, %esi
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: leal (%ecx,%eax,2), %eax
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: LBB0_58: # %udiv-end
-; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: movl 8(%ebp), %ecx
-; WIN32-NEXT: movl %eax, (%ecx)
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: movl %eax, 4(%ecx)
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: movl %eax, 8(%ecx)
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: movl %eax, 12(%ecx)
-; WIN32-NEXT: movl %edx, %esi
-; WIN32-NEXT: imull {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; WIN32-NEXT: movl %edx, %eax
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; WIN32-NEXT: mull %edi
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: addl %esi, %edx
-; WIN32-NEXT: imull %edi, %ecx
-; WIN32-NEXT: addl %edx, %ecx
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; WIN32-NEXT: movl %ebx, %eax
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; WIN32-NEXT: mull %esi
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; WIN32-NEXT: imull %ebx, %edi
-; WIN32-NEXT: addl %edx, %edi
-; WIN32-NEXT: imull {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; WIN32-NEXT: addl %edi, %esi
-; WIN32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: adcl %ecx, %esi
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; WIN32-NEXT: movl %edi, %eax
-; WIN32-NEXT: mull %ebx
-; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: mull %ebx
-; WIN32-NEXT: movl %eax, %ecx
-; WIN32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; WIN32-NEXT: adcl $0, %edx
-; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl %edi, %eax
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; WIN32-NEXT: mull %ebx
-; WIN32-NEXT: movl %edx, %edi
-; WIN32-NEXT: addl %ecx, %eax
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; WIN32-NEXT: setb %cl
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: mull %ebx
-; WIN32-NEXT: addl %edi, %eax
-; WIN32-NEXT: movzbl %cl, %ecx
-; WIN32-NEXT: adcl %ecx, %edx
-; WIN32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; WIN32-NEXT: adcl %esi, %edx
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; WIN32-NEXT: subl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; WIN32-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: sbbl %eax, %ecx
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; WIN32-NEXT: sbbl %edx, %ebx
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: xorl %eax, %ebx
-; WIN32-NEXT: xorl %eax, %ecx
-; WIN32-NEXT: xorl %eax, %esi
-; WIN32-NEXT: xorl %eax, %edi
-; WIN32-NEXT: subl %eax, %edi
-; WIN32-NEXT: sbbl %eax, %esi
-; WIN32-NEXT: sbbl %eax, %ecx
-; WIN32-NEXT: sbbl %eax, %ebx
-; WIN32-NEXT: movl 12(%ebp), %eax
-; WIN32-NEXT: movl %edi, (%eax)
-; WIN32-NEXT: movl %esi, 4(%eax)
-; WIN32-NEXT: movl %ecx, 8(%eax)
-; WIN32-NEXT: movl %ebx, 12(%eax)
-; WIN32-NEXT: leal -12(%ebp), %esp
-; WIN32-NEXT: popl %esi
-; WIN32-NEXT: popl %edi
-; WIN32-NEXT: popl %ebx
-; WIN32-NEXT: popl %ebp
-; WIN32-NEXT: retl
-; WIN32-NEXT: LBB0_17:
-; WIN32-NEXT: movb $1, %al
-; WIN32-NEXT: jmp LBB0_19
-; WIN32-NEXT: LBB0_27:
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: jmp LBB0_25
-; WIN32-NEXT: LBB0_46:
-; WIN32-NEXT: movb $1, %al
-; WIN32-NEXT: jmp LBB0_48
-; WIN32-NEXT: LBB0_56:
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: jmp LBB0_54
-; WIN32-NEXT: LBB0_22:
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: jmp LBB0_29
-; WIN32-NEXT: LBB0_51:
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; WIN32-NEXT: jmp LBB0_58
+; WIN32-LABEL: {{_?}}sdivrem_i128:
+; WIN32-NOT: __divmodti4
+; WIN32-NOT: __divti3
+; WIN32-NOT: __modti3
+; WIN32: retl
%q = sdiv i128 %n, %d
%r = srem i128 %n, %d
store i128 %q, ptr %q_out
@@ -1767,1624 +80,64 @@ define void @sdivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
define void @udivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
; LINUX-X64-LABEL: udivrem_i128:
-; LINUX-X64: # %bb.0:
-; LINUX-X64-NEXT: pushq %r14
-; LINUX-X64-NEXT: .cfi_def_cfa_offset 16
-; LINUX-X64-NEXT: pushq %rbx
-; LINUX-X64-NEXT: .cfi_def_cfa_offset 24
-; LINUX-X64-NEXT: subq $24, %rsp
-; LINUX-X64-NEXT: .cfi_def_cfa_offset 48
-; LINUX-X64-NEXT: .cfi_offset %rbx, -24
-; LINUX-X64-NEXT: .cfi_offset %r14, -16
-; LINUX-X64-NEXT: movq %r8, %rax
-; LINUX-X64-NEXT: movq %rsi, %rbx
-; LINUX-X64-NEXT: movq %rdi, %r14
-; LINUX-X64-NEXT: movq %rsp, %r8
-; LINUX-X64-NEXT: movq %rdx, %rdi
-; LINUX-X64-NEXT: movq %rcx, %rsi
-; LINUX-X64-NEXT: movq %rax, %rdx
-; LINUX-X64-NEXT: movq %r9, %rcx
-; LINUX-X64-NEXT: callq __udivmodti4 at PLT
-; LINUX-X64-NEXT: movq (%rsp), %rcx
-; LINUX-X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi
-; LINUX-X64-NEXT: movq %rax, (%r14)
-; LINUX-X64-NEXT: movq %rdx, 8(%r14)
-; LINUX-X64-NEXT: movq %rcx, (%rbx)
-; LINUX-X64-NEXT: movq %rsi, 8(%rbx)
-; LINUX-X64-NEXT: addq $24, %rsp
-; LINUX-X64-NEXT: .cfi_def_cfa_offset 24
-; LINUX-X64-NEXT: popq %rbx
-; LINUX-X64-NEXT: .cfi_def_cfa_offset 16
-; LINUX-X64-NEXT: popq %r14
-; LINUX-X64-NEXT: .cfi_def_cfa_offset 8
-; LINUX-X64-NEXT: retq
+; LINUX-X64: movq %rsp, %r8
+; LINUX-X64: callq __udivmodti4 at PLT
+; LINUX-X64: movq (%rsp), %rcx
+; LINUX-X64: movq {{[0-9]+}}(%rsp), %rsi
+; LINUX-X64: movq %rax, (%r14)
+; LINUX-X64: movq %rdx, 8(%r14)
+; LINUX-X64: movq %rcx, (%rbx)
+; LINUX-X64: movq %rsi, 8(%rbx)
;
; LINUX-X32-LABEL: udivrem_i128:
-; LINUX-X32: # %bb.0:
-; LINUX-X32-NEXT: pushq %r14
-; LINUX-X32-NEXT: .cfi_def_cfa_offset 16
-; LINUX-X32-NEXT: pushq %rbx
-; LINUX-X32-NEXT: .cfi_def_cfa_offset 24
-; LINUX-X32-NEXT: subl $24, %esp
-; LINUX-X32-NEXT: .cfi_def_cfa_offset 48
-; LINUX-X32-NEXT: .cfi_offset %rbx, -24
-; LINUX-X32-NEXT: .cfi_offset %r14, -16
-; LINUX-X32-NEXT: movq %r8, %rax
-; LINUX-X32-NEXT: movq %rsi, %rbx
-; LINUX-X32-NEXT: movq %rdi, %r14
-; LINUX-X32-NEXT: movl %esp, %r8d
-; LINUX-X32-NEXT: movq %rdx, %rdi
-; LINUX-X32-NEXT: movq %rcx, %rsi
-; LINUX-X32-NEXT: movq %rax, %rdx
-; LINUX-X32-NEXT: movq %r9, %rcx
-; LINUX-X32-NEXT: callq __udivmodti4 at PLT
-; LINUX-X32-NEXT: movq (%esp), %rcx
-; LINUX-X32-NEXT: movq {{[0-9]+}}(%esp), %rsi
-; LINUX-X32-NEXT: movq %rax, (%r14d)
-; LINUX-X32-NEXT: movq %rdx, 8(%r14d)
-; LINUX-X32-NEXT: movq %rcx, (%ebx)
-; LINUX-X32-NEXT: movq %rsi, 8(%ebx)
-; LINUX-X32-NEXT: addl $24, %esp
-; LINUX-X32-NEXT: .cfi_def_cfa_offset 24
-; LINUX-X32-NEXT: popq %rbx
-; LINUX-X32-NEXT: .cfi_def_cfa_offset 16
-; LINUX-X32-NEXT: popq %r14
-; LINUX-X32-NEXT: .cfi_def_cfa_offset 8
-; LINUX-X32-NEXT: retq
+; LINUX-X32: movl %esp, %r8d
+; LINUX-X32: callq __udivmodti4 at PLT
+; LINUX-X32: movq (%esp), %rcx
+; LINUX-X32: movq {{[0-9]+}}(%esp), %rsi
+; LINUX-X32: movq %rax, (%r14d)
+; LINUX-X32: movq %rdx, 8(%r14d)
+; LINUX-X32: movq %rcx, (%ebx)
+; LINUX-X32: movq %rsi, 8(%ebx)
;
; DARWIN-X64-LABEL: udivrem_i128:
-; DARWIN-X64: ## %bb.0:
-; DARWIN-X64-NEXT: pushq %r14
-; DARWIN-X64-NEXT: .cfi_def_cfa_offset 16
-; DARWIN-X64-NEXT: pushq %rbx
-; DARWIN-X64-NEXT: .cfi_def_cfa_offset 24
-; DARWIN-X64-NEXT: subq $24, %rsp
-; DARWIN-X64-NEXT: .cfi_def_cfa_offset 48
-; DARWIN-X64-NEXT: .cfi_offset %rbx, -24
-; DARWIN-X64-NEXT: .cfi_offset %r14, -16
-; DARWIN-X64-NEXT: movq %r8, %rax
-; DARWIN-X64-NEXT: movq %rsi, %rbx
-; DARWIN-X64-NEXT: movq %rdi, %r14
-; DARWIN-X64-NEXT: movq %rsp, %r8
-; DARWIN-X64-NEXT: movq %rdx, %rdi
-; DARWIN-X64-NEXT: movq %rcx, %rsi
-; DARWIN-X64-NEXT: movq %rax, %rdx
-; DARWIN-X64-NEXT: movq %r9, %rcx
-; DARWIN-X64-NEXT: callq ___udivmodti4
-; DARWIN-X64-NEXT: movq (%rsp), %rcx
-; DARWIN-X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi
-; DARWIN-X64-NEXT: movq %rax, (%r14)
-; DARWIN-X64-NEXT: movq %rdx, 8(%r14)
-; DARWIN-X64-NEXT: movq %rcx, (%rbx)
-; DARWIN-X64-NEXT: movq %rsi, 8(%rbx)
-; DARWIN-X64-NEXT: addq $24, %rsp
-; DARWIN-X64-NEXT: popq %rbx
-; DARWIN-X64-NEXT: popq %r14
-; DARWIN-X64-NEXT: retq
+; DARWIN-X64: movq %rsp, %r8
+; DARWIN-X64: callq ___udivmodti4
+; DARWIN-X64: movq (%rsp), %rcx
+; DARWIN-X64: movq {{[0-9]+}}(%rsp), %rsi
+; DARWIN-X64: movq %rax, (%r14)
+; DARWIN-X64: movq %rdx, 8(%r14)
+; DARWIN-X64: movq %rcx, (%rbx)
+; DARWIN-X64: movq %rsi, 8(%rbx)
;
; MINGW-X64-LABEL: udivrem_i128:
-; MINGW-X64: # %bb.0:
-; MINGW-X64-NEXT: pushq %rsi
-; MINGW-X64-NEXT: .seh_pushreg %rsi
-; MINGW-X64-NEXT: pushq %rdi
-; MINGW-X64-NEXT: .seh_pushreg %rdi
-; MINGW-X64-NEXT: subq $88, %rsp
-; MINGW-X64-NEXT: .seh_stackalloc 88
-; MINGW-X64-NEXT: .seh_endprologue
-; MINGW-X64-NEXT: movq %rdx, %rsi
-; MINGW-X64-NEXT: movq %rcx, %rdi
-; MINGW-X64-NEXT: movaps {{[0-9]+}}(%rsp), %xmm0
-; MINGW-X64-NEXT: movq %r9, {{[0-9]+}}(%rsp)
-; MINGW-X64-NEXT: movq %r8, {{[0-9]+}}(%rsp)
-; MINGW-X64-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; MINGW-X64-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
-; MINGW-X64-NEXT: leaq {{[0-9]+}}(%rsp), %rdx
-; MINGW-X64-NEXT: leaq {{[0-9]+}}(%rsp), %r8
-; MINGW-X64-NEXT: callq __udivmodti4
-; MINGW-X64-NEXT: movaps {{[0-9]+}}(%rsp), %xmm1
-; MINGW-X64-NEXT: movaps %xmm0, (%rdi)
-; MINGW-X64-NEXT: movaps %xmm1, (%rsi)
-; MINGW-X64-NEXT: .seh_startepilogue
-; MINGW-X64-NEXT: addq $88, %rsp
-; MINGW-X64-NEXT: popq %rdi
-; MINGW-X64-NEXT: popq %rsi
-; MINGW-X64-NEXT: .seh_endepilogue
-; MINGW-X64-NEXT: retq
-; MINGW-X64-NEXT: .seh_endproc
+; MINGW-X64: leaq {{[0-9]+}}(%rsp), %rcx
+; MINGW-X64: leaq {{[0-9]+}}(%rsp), %rdx
+; MINGW-X64: leaq {{[0-9]+}}(%rsp), %r8
+; MINGW-X64: callq __udivmodti4
+; MINGW-X64: movaps {{[0-9]+}}(%rsp), %xmm1
+; MINGW-X64: movaps %xmm0, (%rdi)
+; MINGW-X64: movaps %xmm1, (%rsi)
;
; WIN64-LABEL: udivrem_i128:
-; WIN64: # %bb.0:
-; WIN64-NEXT: pushq %rsi
-; WIN64-NEXT: .seh_pushreg %rsi
-; WIN64-NEXT: pushq %rdi
-; WIN64-NEXT: .seh_pushreg %rdi
-; WIN64-NEXT: subq $88, %rsp
-; WIN64-NEXT: .seh_stackalloc 88
-; WIN64-NEXT: .seh_endprologue
-; WIN64-NEXT: movq %rdx, %rsi
-; WIN64-NEXT: movq %rcx, %rdi
-; WIN64-NEXT: movaps {{[0-9]+}}(%rsp), %xmm0
-; WIN64-NEXT: movq %r9, {{[0-9]+}}(%rsp)
-; WIN64-NEXT: movq %r8, {{[0-9]+}}(%rsp)
-; WIN64-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
-; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
-; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %rdx
-; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %r8
-; WIN64-NEXT: callq __udivmodti4
-; WIN64-NEXT: movaps {{[0-9]+}}(%rsp), %xmm1
-; WIN64-NEXT: movaps %xmm0, (%rdi)
-; WIN64-NEXT: movaps %xmm1, (%rsi)
-; WIN64-NEXT: .seh_startepilogue
-; WIN64-NEXT: addq $88, %rsp
-; WIN64-NEXT: popq %rdi
-; WIN64-NEXT: popq %rsi
-; WIN64-NEXT: .seh_endepilogue
-; WIN64-NEXT: retq
-; WIN64-NEXT: .seh_endproc
+; WIN64: leaq {{[0-9]+}}(%rsp), %rcx
+; WIN64: leaq {{[0-9]+}}(%rsp), %rdx
+; WIN64: leaq {{[0-9]+}}(%rsp), %r8
+; WIN64: callq __udivmodti4
+; WIN64: movaps {{[0-9]+}}(%rsp), %xmm1
+; WIN64: movaps %xmm0, (%rdi)
+; WIN64: movaps %xmm1, (%rsi)
;
; LINUX-X86-LABEL: udivrem_i128:
-; LINUX-X86: # %bb.0: # %_udiv-special-cases_udiv-special-cases
-; LINUX-X86-NEXT: pushl %ebp
-; LINUX-X86-NEXT: .cfi_def_cfa_offset 8
-; LINUX-X86-NEXT: pushl %ebx
-; LINUX-X86-NEXT: .cfi_def_cfa_offset 12
-; LINUX-X86-NEXT: pushl %edi
-; LINUX-X86-NEXT: .cfi_def_cfa_offset 16
-; LINUX-X86-NEXT: pushl %esi
-; LINUX-X86-NEXT: .cfi_def_cfa_offset 20
-; LINUX-X86-NEXT: subl $236, %esp
-; LINUX-X86-NEXT: .cfi_def_cfa_offset 256
-; LINUX-X86-NEXT: .cfi_offset %esi, -20
-; LINUX-X86-NEXT: .cfi_offset %edi, -16
-; LINUX-X86-NEXT: .cfi_offset %ebx, -12
-; LINUX-X86-NEXT: .cfi_offset %ebp, -8
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
-; LINUX-X86-NEXT: movl %esi, %eax
-; LINUX-X86-NEXT: orl %ebp, %eax
-; LINUX-X86-NEXT: orl %ebx, %ecx
-; LINUX-X86-NEXT: orl %eax, %ecx
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; LINUX-X86-NEXT: sete %al
-; LINUX-X86-NEXT: movl %edi, %ecx
-; LINUX-X86-NEXT: orl {{[0-9]+}}(%esp), %ecx
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; LINUX-X86-NEXT: orl {{[0-9]+}}(%esp), %edx
-; LINUX-X86-NEXT: orl %ecx, %edx
-; LINUX-X86-NEXT: sete %cl
-; LINUX-X86-NEXT: testl %ebp, %ebp
-; LINUX-X86-NEXT: jne .LBB1_2
-; LINUX-X86-NEXT: # %bb.1: # %_udiv-special-cases_udiv-special-cases
-; LINUX-X86-NEXT: bsrl %ebx, %edx
-; LINUX-X86-NEXT: xorl $31, %edx
-; LINUX-X86-NEXT: orl $32, %edx
-; LINUX-X86-NEXT: jmp .LBB1_3
-; LINUX-X86-NEXT: .LBB1_2:
-; LINUX-X86-NEXT: bsrl %ebp, %edx
-; LINUX-X86-NEXT: xorl $31, %edx
-; LINUX-X86-NEXT: .LBB1_3: # %_udiv-special-cases_udiv-special-cases
-; LINUX-X86-NEXT: movl %ebx, %ebp
-; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: testl %esi, %esi
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; LINUX-X86-NEXT: jne .LBB1_5
-; LINUX-X86-NEXT: # %bb.4: # %_udiv-special-cases_udiv-special-cases
-; LINUX-X86-NEXT: bsrl {{[0-9]+}}(%esp), %edx
-; LINUX-X86-NEXT: xorl $31, %edx
-; LINUX-X86-NEXT: orl $32, %edx
-; LINUX-X86-NEXT: jmp .LBB1_6
-; LINUX-X86-NEXT: .LBB1_5:
-; LINUX-X86-NEXT: bsrl %esi, %edx
-; LINUX-X86-NEXT: xorl $31, %edx
-; LINUX-X86-NEXT: .LBB1_6: # %_udiv-special-cases_udiv-special-cases
-; LINUX-X86-NEXT: movl %ebp, %esi
-; LINUX-X86-NEXT: orl {{[0-9]+}}(%esp), %esi
-; LINUX-X86-NEXT: jne .LBB1_8
-; LINUX-X86-NEXT: # %bb.7: # %_udiv-special-cases_udiv-special-cases
-; LINUX-X86-NEXT: orl $64, %edx
-; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: .LBB1_8: # %_udiv-special-cases_udiv-special-cases
-; LINUX-X86-NEXT: testl %ebx, %ebx
-; LINUX-X86-NEXT: jne .LBB1_11
-; LINUX-X86-NEXT: # %bb.9: # %_udiv-special-cases_udiv-special-cases
-; LINUX-X86-NEXT: bsrl {{[0-9]+}}(%esp), %edx
-; LINUX-X86-NEXT: xorl $31, %edx
-; LINUX-X86-NEXT: orl $32, %edx
-; LINUX-X86-NEXT: testl %edi, %edi
-; LINUX-X86-NEXT: je .LBB1_12
-; LINUX-X86-NEXT: .LBB1_10:
-; LINUX-X86-NEXT: bsrl %edi, %esi
-; LINUX-X86-NEXT: xorl $31, %esi
-; LINUX-X86-NEXT: jmp .LBB1_13
-; LINUX-X86-NEXT: .LBB1_11:
-; LINUX-X86-NEXT: bsrl %ebx, %edx
-; LINUX-X86-NEXT: xorl $31, %edx
-; LINUX-X86-NEXT: testl %edi, %edi
-; LINUX-X86-NEXT: jne .LBB1_10
-; LINUX-X86-NEXT: .LBB1_12: # %_udiv-special-cases_udiv-special-cases
-; LINUX-X86-NEXT: bsrl {{[0-9]+}}(%esp), %esi
-; LINUX-X86-NEXT: xorl $31, %esi
-; LINUX-X86-NEXT: orl $32, %esi
-; LINUX-X86-NEXT: .LBB1_13: # %_udiv-special-cases_udiv-special-cases
-; LINUX-X86-NEXT: orb %cl, %al
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; LINUX-X86-NEXT: orl %ebx, %ecx
-; LINUX-X86-NEXT: jne .LBB1_15
-; LINUX-X86-NEXT: # %bb.14: # %_udiv-special-cases_udiv-special-cases
-; LINUX-X86-NEXT: orl $64, %esi
-; LINUX-X86-NEXT: movl %esi, %edx
-; LINUX-X86-NEXT: .LBB1_15: # %_udiv-special-cases_udiv-special-cases
-; LINUX-X86-NEXT: xorl %ebp, %ebp
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; LINUX-X86-NEXT: subl %edx, %ecx
-; LINUX-X86-NEXT: movl $0, %edx
-; LINUX-X86-NEXT: sbbl %edx, %edx
-; LINUX-X86-NEXT: movl $0, %ebx
-; LINUX-X86-NEXT: sbbl %ebx, %ebx
-; LINUX-X86-NEXT: movl $0, %edi
-; LINUX-X86-NEXT: sbbl %edi, %edi
-; LINUX-X86-NEXT: testb %al, %al
-; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: jne .LBB1_52
-; LINUX-X86-NEXT: # %bb.16: # %select.false.sink
-; LINUX-X86-NEXT: movl $127, %eax
-; LINUX-X86-NEXT: cmpl %ecx, %eax
-; LINUX-X86-NEXT: movl $0, %eax
-; LINUX-X86-NEXT: sbbl %edx, %eax
-; LINUX-X86-NEXT: movl $0, %eax
-; LINUX-X86-NEXT: sbbl %ebx, %eax
-; LINUX-X86-NEXT: movl $0, %eax
-; LINUX-X86-NEXT: sbbl %edi, %eax
-; LINUX-X86-NEXT: setb %al
-; LINUX-X86-NEXT: .LBB1_17: # %select.end
-; LINUX-X86-NEXT: movl %edi, %edx
-; LINUX-X86-NEXT: testb %al, %al
-; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; LINUX-X86-NEXT: movl $0, %eax
-; LINUX-X86-NEXT: movl $0, %ecx
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; LINUX-X86-NEXT: jne .LBB1_19
-; LINUX-X86-NEXT: # %bb.18: # %select.end
-; LINUX-X86-NEXT: movl %esi, %ebp
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl %edi, %eax
-; LINUX-X86-NEXT: movl %ebx, %ecx
-; LINUX-X86-NEXT: .LBB1_19: # %select.end
-; LINUX-X86-NEXT: movl %edx, %esi
-; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; LINUX-X86-NEXT: jne .LBB1_25
-; LINUX-X86-NEXT: # %bb.20: # %select.end
-; LINUX-X86-NEXT: movl %edx, %eax
-; LINUX-X86-NEXT: xorl $127, %eax
-; LINUX-X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; LINUX-X86-NEXT: movl %edi, %ecx
-; LINUX-X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: orl %esi, %ecx
-; LINUX-X86-NEXT: orl %eax, %ecx
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; LINUX-X86-NEXT: je .LBB1_26
-; LINUX-X86-NEXT: # %bb.21: # %udiv-bb15
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
-; LINUX-X86-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl %edx, %ecx
-; LINUX-X86-NEXT: xorb $127, %cl
-; LINUX-X86-NEXT: movl %ecx, %eax
-; LINUX-X86-NEXT: shrb $3, %al
-; LINUX-X86-NEXT: andb $12, %al
-; LINUX-X86-NEXT: negb %al
-; LINUX-X86-NEXT: movsbl %al, %eax
-; LINUX-X86-NEXT: movl 216(%esp,%eax), %esi
-; LINUX-X86-NEXT: movl 220(%esp,%eax), %ebp
-; LINUX-X86-NEXT: shldl %cl, %esi, %ebp
-; LINUX-X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl 208(%esp,%eax), %ebx
-; LINUX-X86-NEXT: movl 212(%esp,%eax), %ebp
-; LINUX-X86-NEXT: shldl %cl, %ebp, %esi
-; LINUX-X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: shldl %cl, %ebx, %ebp
-; LINUX-X86-NEXT: shll %cl, %ebx
-; LINUX-X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: addl $1, %edx
-; LINUX-X86-NEXT: adcl $0, %edi
-; LINUX-X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; LINUX-X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; LINUX-X86-NEXT: jb .LBB1_53
-; LINUX-X86-NEXT: # %bb.22: # %udiv-preheader4
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl %edx, %eax
-; LINUX-X86-NEXT: shrb $3, %al
-; LINUX-X86-NEXT: andb $12, %al
-; LINUX-X86-NEXT: movzbl %al, %eax
-; LINUX-X86-NEXT: movl 172(%esp,%eax), %esi
-; LINUX-X86-NEXT: movl 168(%esp,%eax), %ebx
-; LINUX-X86-NEXT: movl %ebx, %edi
-; LINUX-X86-NEXT: movl %edx, %ecx
-; LINUX-X86-NEXT: shrdl %cl, %esi, %edi
-; LINUX-X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl %edx, %ecx
-; LINUX-X86-NEXT: movl 160(%esp,%eax), %edx
-; LINUX-X86-NEXT: movl 164(%esp,%eax), %edi
-; LINUX-X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: shrdl %cl, %ebx, %edi
-; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: shrl %cl, %esi
-; LINUX-X86-NEXT: movl %esi, %ebx
-; LINUX-X86-NEXT: # kill: def $cl killed $cl killed $ecx
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: shrdl %cl, %eax, %edx
-; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; LINUX-X86-NEXT: addl $-1, %eax
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; LINUX-X86-NEXT: adcl $-1, %eax
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; LINUX-X86-NEXT: adcl $-1, %eax
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; LINUX-X86-NEXT: adcl $-1, %eax
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; LINUX-X86-NEXT: .p2align 4
-; LINUX-X86-NEXT: .LBB1_23: # %udiv-do-while3
-; LINUX-X86-NEXT: # =>This Inner Loop Header: Depth=1
-; LINUX-X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; LINUX-X86-NEXT: shldl $1, %edx, %ebx
-; LINUX-X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl %edi, %esi
-; LINUX-X86-NEXT: shldl $1, %edi, %edx
-; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; LINUX-X86-NEXT: shldl $1, %edi, %esi
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; LINUX-X86-NEXT: shldl $1, %ebx, %edi
-; LINUX-X86-NEXT: shldl $1, %eax, %ebx
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; LINUX-X86-NEXT: orl %edx, %ebx
-; LINUX-X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: shldl $1, %ebp, %eax
-; LINUX-X86-NEXT: orl %edx, %eax
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: shldl $1, %ecx, %ebp
-; LINUX-X86-NEXT: orl %edx, %ebp
-; LINUX-X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: addl %ecx, %ecx
-; LINUX-X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: cmpl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; LINUX-X86-NEXT: sbbl %esi, %ecx
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; LINUX-X86-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; LINUX-X86-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
-; LINUX-X86-NEXT: sarl $31, %ebp
-; LINUX-X86-NEXT: movl %ebp, %eax
-; LINUX-X86-NEXT: andl $1, %eax
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; LINUX-X86-NEXT: movl %ebp, %ecx
-; LINUX-X86-NEXT: andl {{[0-9]+}}(%esp), %ecx
-; LINUX-X86-NEXT: movl %ebp, %ebx
-; LINUX-X86-NEXT: andl {{[0-9]+}}(%esp), %ebx
-; LINUX-X86-NEXT: movl %ebp, %eax
-; LINUX-X86-NEXT: andl {{[0-9]+}}(%esp), %eax
-; LINUX-X86-NEXT: andl {{[0-9]+}}(%esp), %ebp
-; LINUX-X86-NEXT: subl %ebp, %edi
-; LINUX-X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; LINUX-X86-NEXT: sbbl %eax, %esi
-; LINUX-X86-NEXT: movl %esi, %edi
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; LINUX-X86-NEXT: sbbl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; LINUX-X86-NEXT: sbbl %ecx, %ebx
-; LINUX-X86-NEXT: addl $-1, %edx
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: adcl $-1, %eax
-; LINUX-X86-NEXT: adcl $-1, %esi
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; LINUX-X86-NEXT: adcl $-1, %ecx
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: orl %ecx, %eax
-; LINUX-X86-NEXT: movl %edx, %ecx
-; LINUX-X86-NEXT: orl %esi, %ecx
-; LINUX-X86-NEXT: orl %eax, %ecx
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: jne .LBB1_23
-; LINUX-X86-NEXT: .LBB1_24: # %udiv-loop-exit2
-; LINUX-X86-NEXT: shldl $1, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; LINUX-X86-NEXT: shldl $1, %ebp, %eax
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: shldl $1, %ecx, %ebp
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: leal (%eax,%ecx,2), %eax
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; LINUX-X86-NEXT: .LBB1_25: # %udiv-end1
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; LINUX-X86-NEXT: .LBB1_26: # %udiv-end1
-; LINUX-X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; LINUX-X86-NEXT: movl %ecx, %edx
-; LINUX-X86-NEXT: movl %ebx, %edi
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; LINUX-X86-NEXT: orl %ebx, %eax
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
-; LINUX-X86-NEXT: orl %ebp, %ecx
-; LINUX-X86-NEXT: orl %eax, %ecx
-; LINUX-X86-NEXT: sete {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; LINUX-X86-NEXT: orl {{[0-9]+}}(%esp), %ecx
-; LINUX-X86-NEXT: movl %edx, %esi
-; LINUX-X86-NEXT: movl %edi, %eax
-; LINUX-X86-NEXT: orl %edi, %edx
-; LINUX-X86-NEXT: orl %ecx, %edx
-; LINUX-X86-NEXT: sete %cl
-; LINUX-X86-NEXT: testl %ebp, %ebp
-; LINUX-X86-NEXT: jne .LBB1_28
-; LINUX-X86-NEXT: # %bb.27: # %udiv-end1
-; LINUX-X86-NEXT: bsrl %ebx, %edi
-; LINUX-X86-NEXT: xorl $31, %edi
-; LINUX-X86-NEXT: orl $32, %edi
-; LINUX-X86-NEXT: jmp .LBB1_29
-; LINUX-X86-NEXT: .LBB1_28:
-; LINUX-X86-NEXT: bsrl %ebp, %edi
-; LINUX-X86-NEXT: xorl $31, %edi
-; LINUX-X86-NEXT: .LBB1_29: # %udiv-end1
-; LINUX-X86-NEXT: movl %ebx, %ebp
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; LINUX-X86-NEXT: testl %edx, %edx
-; LINUX-X86-NEXT: movl %eax, %ebx
-; LINUX-X86-NEXT: movl %esi, %eax
-; LINUX-X86-NEXT: jne .LBB1_31
-; LINUX-X86-NEXT: # %bb.30: # %udiv-end1
-; LINUX-X86-NEXT: bsrl {{[0-9]+}}(%esp), %edx
-; LINUX-X86-NEXT: xorl $31, %edx
-; LINUX-X86-NEXT: orl $32, %edx
-; LINUX-X86-NEXT: jmp .LBB1_32
-; LINUX-X86-NEXT: .LBB1_31:
-; LINUX-X86-NEXT: bsrl %edx, %edx
-; LINUX-X86-NEXT: xorl $31, %edx
-; LINUX-X86-NEXT: .LBB1_32: # %udiv-end1
-; LINUX-X86-NEXT: movl %ebp, %esi
-; LINUX-X86-NEXT: orl {{[0-9]+}}(%esp), %esi
-; LINUX-X86-NEXT: jne .LBB1_34
-; LINUX-X86-NEXT: # %bb.33: # %udiv-end1
-; LINUX-X86-NEXT: orl $64, %edx
-; LINUX-X86-NEXT: movl %edx, %edi
-; LINUX-X86-NEXT: .LBB1_34: # %udiv-end1
-; LINUX-X86-NEXT: testl %ebx, %ebx
-; LINUX-X86-NEXT: jne .LBB1_37
-; LINUX-X86-NEXT: # %bb.35: # %udiv-end1
-; LINUX-X86-NEXT: bsrl {{[0-9]+}}(%esp), %edx
-; LINUX-X86-NEXT: xorl $31, %edx
-; LINUX-X86-NEXT: orl $32, %edx
-; LINUX-X86-NEXT: testl %eax, %eax
-; LINUX-X86-NEXT: je .LBB1_38
-; LINUX-X86-NEXT: .LBB1_36:
-; LINUX-X86-NEXT: bsrl %eax, %esi
-; LINUX-X86-NEXT: xorl $31, %esi
-; LINUX-X86-NEXT: jmp .LBB1_39
-; LINUX-X86-NEXT: .LBB1_37:
-; LINUX-X86-NEXT: bsrl %ebx, %edx
-; LINUX-X86-NEXT: xorl $31, %edx
-; LINUX-X86-NEXT: testl %eax, %eax
-; LINUX-X86-NEXT: jne .LBB1_36
-; LINUX-X86-NEXT: .LBB1_38: # %udiv-end1
-; LINUX-X86-NEXT: bsrl {{[0-9]+}}(%esp), %esi
-; LINUX-X86-NEXT: xorl $31, %esi
-; LINUX-X86-NEXT: orl $32, %esi
-; LINUX-X86-NEXT: .LBB1_39: # %udiv-end1
-; LINUX-X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
-; LINUX-X86-NEXT: orb %cl, %al
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; LINUX-X86-NEXT: orl %ebx, %ecx
-; LINUX-X86-NEXT: jne .LBB1_41
-; LINUX-X86-NEXT: # %bb.40: # %udiv-end1
-; LINUX-X86-NEXT: orl $64, %esi
-; LINUX-X86-NEXT: movl %esi, %edx
-; LINUX-X86-NEXT: .LBB1_41: # %udiv-end1
-; LINUX-X86-NEXT: xorl %ebx, %ebx
-; LINUX-X86-NEXT: subl %edx, %edi
-; LINUX-X86-NEXT: movl %edi, %edx
-; LINUX-X86-NEXT: movl $0, %edi
-; LINUX-X86-NEXT: sbbl %edi, %edi
-; LINUX-X86-NEXT: movl $0, %ecx
-; LINUX-X86-NEXT: sbbl %ecx, %ecx
-; LINUX-X86-NEXT: movl $0, %ebp
-; LINUX-X86-NEXT: sbbl %ebp, %ebp
-; LINUX-X86-NEXT: testb %al, %al
-; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: jne .LBB1_54
-; LINUX-X86-NEXT: # %bb.42: # %select.false.sink8
-; LINUX-X86-NEXT: movl $127, %eax
-; LINUX-X86-NEXT: cmpl %edx, %eax
-; LINUX-X86-NEXT: movl $0, %eax
-; LINUX-X86-NEXT: sbbl %edi, %eax
-; LINUX-X86-NEXT: movl $0, %eax
-; LINUX-X86-NEXT: sbbl %ecx, %eax
-; LINUX-X86-NEXT: movl $0, %eax
-; LINUX-X86-NEXT: sbbl %ebp, %eax
-; LINUX-X86-NEXT: setb %al
-; LINUX-X86-NEXT: .LBB1_43: # %select.end7
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; LINUX-X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: testb %al, %al
-; LINUX-X86-NEXT: movl $0, %eax
-; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; LINUX-X86-NEXT: movl $0, %ebp
-; LINUX-X86-NEXT: jne .LBB1_45
-; LINUX-X86-NEXT: # %bb.44: # %select.end7
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
-; LINUX-X86-NEXT: .LBB1_45: # %select.end7
-; LINUX-X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: jne .LBB1_57
-; LINUX-X86-NEXT: # %bb.46: # %select.end7
-; LINUX-X86-NEXT: movl %edx, %eax
-; LINUX-X86-NEXT: xorl $127, %eax
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; LINUX-X86-NEXT: orl %ebx, %eax
-; LINUX-X86-NEXT: movl %edi, %ecx
-; LINUX-X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; LINUX-X86-NEXT: orl %eax, %ecx
-; LINUX-X86-NEXT: movl %edx, %ecx
-; LINUX-X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; LINUX-X86-NEXT: movl %ebp, %edx
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; LINUX-X86-NEXT: je .LBB1_51
-; LINUX-X86-NEXT: # %bb.47: # %udiv-bb1
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; LINUX-X86-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; LINUX-X86-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
-; LINUX-X86-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl %eax, %esi
-; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: movl %eax, %ecx
-; LINUX-X86-NEXT: xorb $127, %cl
-; LINUX-X86-NEXT: movl %ecx, %eax
-; LINUX-X86-NEXT: shrb $3, %al
-; LINUX-X86-NEXT: andb $12, %al
-; LINUX-X86-NEXT: negb %al
-; LINUX-X86-NEXT: movsbl %al, %eax
-; LINUX-X86-NEXT: movl 152(%esp,%eax), %edx
-; LINUX-X86-NEXT: movl 156(%esp,%eax), %ebp
-; LINUX-X86-NEXT: shldl %cl, %edx, %ebp
-; LINUX-X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl 144(%esp,%eax), %ebp
-; LINUX-X86-NEXT: movl 148(%esp,%eax), %eax
-; LINUX-X86-NEXT: shldl %cl, %eax, %edx
-; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: shldl %cl, %ebp, %eax
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: shll %cl, %ebp
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; LINUX-X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: addl $1, %edx
-; LINUX-X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; LINUX-X86-NEXT: adcl $0, %ebx
-; LINUX-X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; LINUX-X86-NEXT: jb .LBB1_55
-; LINUX-X86-NEXT: # %bb.48: # %udiv-preheader
-; LINUX-X86-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; LINUX-X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl $0, {{[0-9]+}}(%esp)
-; LINUX-X86-NEXT: movl %edx, %eax
-; LINUX-X86-NEXT: shrb $3, %al
-; LINUX-X86-NEXT: andb $12, %al
-; LINUX-X86-NEXT: movzbl %al, %eax
-; LINUX-X86-NEXT: movl 108(%esp,%eax), %ebp
-; LINUX-X86-NEXT: movl 104(%esp,%eax), %ebx
-; LINUX-X86-NEXT: movl %ebx, %esi
-; LINUX-X86-NEXT: movl %edx, %ecx
-; LINUX-X86-NEXT: shrdl %cl, %ebp, %esi
-; LINUX-X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl 96(%esp,%eax), %esi
-; LINUX-X86-NEXT: movl 100(%esp,%eax), %eax
-; LINUX-X86-NEXT: movl %eax, %edi
-; LINUX-X86-NEXT: shrdl %cl, %ebx, %edi
-; LINUX-X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: shrl %cl, %ebp
-; LINUX-X86-NEXT: shrdl %cl, %eax, %esi
-; LINUX-X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; LINUX-X86-NEXT: addl $-1, %eax
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; LINUX-X86-NEXT: adcl $-1, %eax
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; LINUX-X86-NEXT: adcl $-1, %eax
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; LINUX-X86-NEXT: adcl $-1, %eax
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; LINUX-X86-NEXT: .p2align 4
-; LINUX-X86-NEXT: .LBB1_49: # %udiv-do-while
-; LINUX-X86-NEXT: # =>This Inner Loop Header: Depth=1
-; LINUX-X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; LINUX-X86-NEXT: shldl $1, %edi, %ebp
-; LINUX-X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; LINUX-X86-NEXT: shldl $1, %ecx, %edi
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; LINUX-X86-NEXT: shldl $1, %ebx, %ecx
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; LINUX-X86-NEXT: shldl $1, %edx, %ebx
-; LINUX-X86-NEXT: shldl $1, %eax, %edx
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; LINUX-X86-NEXT: orl %ebp, %edx
-; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: shldl $1, %esi, %eax
-; LINUX-X86-NEXT: orl %ebp, %eax
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: shldl $1, %eax, %esi
-; LINUX-X86-NEXT: orl %ebp, %esi
-; LINUX-X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: addl %eax, %eax
-; LINUX-X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: cmpl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: sbbl %ecx, %eax
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: sbbl %edi, %eax
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; LINUX-X86-NEXT: sarl $31, %eax
-; LINUX-X86-NEXT: movl %eax, %edx
-; LINUX-X86-NEXT: andl $1, %edx
-; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; LINUX-X86-NEXT: movl %eax, %esi
-; LINUX-X86-NEXT: andl {{[0-9]+}}(%esp), %esi
-; LINUX-X86-NEXT: movl %eax, %ebp
-; LINUX-X86-NEXT: andl {{[0-9]+}}(%esp), %ebp
-; LINUX-X86-NEXT: movl %eax, %edx
-; LINUX-X86-NEXT: andl {{[0-9]+}}(%esp), %edx
-; LINUX-X86-NEXT: andl {{[0-9]+}}(%esp), %eax
-; LINUX-X86-NEXT: subl %eax, %ebx
-; LINUX-X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: sbbl %edx, %ecx
-; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; LINUX-X86-NEXT: sbbl %ebp, %edi
-; LINUX-X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; LINUX-X86-NEXT: sbbl %esi, %ebp
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; LINUX-X86-NEXT: addl $-1, %edx
-; LINUX-X86-NEXT: adcl $-1, %edi
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; LINUX-X86-NEXT: adcl $-1, %ebx
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; LINUX-X86-NEXT: adcl $-1, %ecx
-; LINUX-X86-NEXT: movl %edi, %eax
-; LINUX-X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: orl %ecx, %eax
-; LINUX-X86-NEXT: movl %edx, %ecx
-; LINUX-X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: orl %ebx, %ecx
-; LINUX-X86-NEXT: orl %eax, %ecx
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: jne .LBB1_49
-; LINUX-X86-NEXT: .LBB1_50: # %udiv-loop-exit
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; LINUX-X86-NEXT: shldl $1, %eax, %edx
-; LINUX-X86-NEXT: shldl $1, %esi, %eax
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; LINUX-X86-NEXT: shldl $1, %edi, %esi
-; LINUX-X86-NEXT: movl %eax, %ecx
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: leal (%eax,%edi,2), %eax
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl %ecx, %edi
-; LINUX-X86-NEXT: .LBB1_51: # %udiv-end
-; LINUX-X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; LINUX-X86-NEXT: movl %eax, (%ecx)
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: movl %eax, 4(%ecx)
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: movl %eax, 8(%ecx)
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: movl %eax, 12(%ecx)
-; LINUX-X86-NEXT: movl %edi, %esi
-; LINUX-X86-NEXT: movl %edi, %eax
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
-; LINUX-X86-NEXT: imull %ebp, %esi
-; LINUX-X86-NEXT: movl %edx, %ecx
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; LINUX-X86-NEXT: mull %edi
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: addl %esi, %edx
-; LINUX-X86-NEXT: imull %edi, %ecx
-; LINUX-X86-NEXT: addl %edx, %ecx
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; LINUX-X86-NEXT: movl %esi, %eax
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; LINUX-X86-NEXT: mull %ebx
-; LINUX-X86-NEXT: movl %eax, %edi
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; LINUX-X86-NEXT: imull %esi, %eax
-; LINUX-X86-NEXT: addl %edx, %eax
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; LINUX-X86-NEXT: imull %ebp, %ebx
-; LINUX-X86-NEXT: addl %eax, %ebx
-; LINUX-X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; LINUX-X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: adcl %ecx, %ebx
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; LINUX-X86-NEXT: movl %edi, %eax
-; LINUX-X86-NEXT: mull %esi
-; LINUX-X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; LINUX-X86-NEXT: mull %esi
-; LINUX-X86-NEXT: movl %edx, %ecx
-; LINUX-X86-NEXT: movl %eax, %esi
-; LINUX-X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; LINUX-X86-NEXT: adcl $0, %ecx
-; LINUX-X86-NEXT: movl %edi, %eax
-; LINUX-X86-NEXT: mull %ebp
-; LINUX-X86-NEXT: movl %edx, %edi
-; LINUX-X86-NEXT: addl %esi, %eax
-; LINUX-X86-NEXT: movl %eax, %esi
-; LINUX-X86-NEXT: adcl %ecx, %edi
-; LINUX-X86-NEXT: setb %cl
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; LINUX-X86-NEXT: mull %ebp
-; LINUX-X86-NEXT: addl %edi, %eax
-; LINUX-X86-NEXT: movzbl %cl, %ecx
-; LINUX-X86-NEXT: adcl %ecx, %edx
-; LINUX-X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; LINUX-X86-NEXT: adcl %ebx, %edx
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; LINUX-X86-NEXT: subl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; LINUX-X86-NEXT: sbbl %esi, %ecx
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %esi
-; LINUX-X86-NEXT: sbbl %eax, %esi
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; LINUX-X86-NEXT: sbbl %edx, %ebx
-; LINUX-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; LINUX-X86-NEXT: movl %edi, (%eax)
-; LINUX-X86-NEXT: movl %ecx, 4(%eax)
-; LINUX-X86-NEXT: movl %esi, 8(%eax)
-; LINUX-X86-NEXT: movl %ebx, 12(%eax)
-; LINUX-X86-NEXT: addl $236, %esp
-; LINUX-X86-NEXT: .cfi_def_cfa_offset 20
-; LINUX-X86-NEXT: popl %esi
-; LINUX-X86-NEXT: .cfi_def_cfa_offset 16
-; LINUX-X86-NEXT: popl %edi
-; LINUX-X86-NEXT: .cfi_def_cfa_offset 12
-; LINUX-X86-NEXT: popl %ebx
-; LINUX-X86-NEXT: .cfi_def_cfa_offset 8
-; LINUX-X86-NEXT: popl %ebp
-; LINUX-X86-NEXT: .cfi_def_cfa_offset 4
-; LINUX-X86-NEXT: retl
-; LINUX-X86-NEXT: .LBB1_52:
-; LINUX-X86-NEXT: .cfi_def_cfa_offset 256
-; LINUX-X86-NEXT: movb $1, %al
-; LINUX-X86-NEXT: jmp .LBB1_17
-; LINUX-X86-NEXT: .LBB1_53:
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; LINUX-X86-NEXT: jmp .LBB1_24
-; LINUX-X86-NEXT: .LBB1_54:
-; LINUX-X86-NEXT: movb $1, %al
-; LINUX-X86-NEXT: jmp .LBB1_43
-; LINUX-X86-NEXT: .LBB1_55:
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; LINUX-X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; LINUX-X86-NEXT: jmp .LBB1_50
-; LINUX-X86-NEXT: .LBB1_57:
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; LINUX-X86-NEXT: movl %ebp, %edx
-; LINUX-X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; LINUX-X86-NEXT: jmp .LBB1_51
+; LINUX-X86-NOT: __udivmodti4
+; LINUX-X86-NOT: __udivti3
+; LINUX-X86-NOT: __umodti3
+; LINUX-X86: retl
;
-; WIN32-LABEL: udivrem_i128:
-; WIN32: # %bb.0: # %_udiv-special-cases_udiv-special-cases
-; WIN32-NEXT: pushl %ebp
-; WIN32-NEXT: movl %esp, %ebp
-; WIN32-NEXT: pushl %ebx
-; WIN32-NEXT: pushl %edi
-; WIN32-NEXT: pushl %esi
-; WIN32-NEXT: andl $-16, %esp
-; WIN32-NEXT: subl $240, %esp
-; WIN32-NEXT: movl 48(%ebp), %ebx
-; WIN32-NEXT: movl 40(%ebp), %ecx
-; WIN32-NEXT: movl 52(%ebp), %edi
-; WIN32-NEXT: movl 44(%ebp), %esi
-; WIN32-NEXT: movl %esi, %eax
-; WIN32-NEXT: orl %edi, %eax
-; WIN32-NEXT: orl %ebx, %ecx
-; WIN32-NEXT: orl %eax, %ecx
-; WIN32-NEXT: sete %al
-; WIN32-NEXT: movl 28(%ebp), %ecx
-; WIN32-NEXT: orl 36(%ebp), %ecx
-; WIN32-NEXT: movl 24(%ebp), %edx
-; WIN32-NEXT: orl 32(%ebp), %edx
-; WIN32-NEXT: orl %ecx, %edx
-; WIN32-NEXT: sete %cl
-; WIN32-NEXT: testl %edi, %edi
-; WIN32-NEXT: jne LBB1_1
-; WIN32-NEXT: # %bb.2: # %_udiv-special-cases_udiv-special-cases
-; WIN32-NEXT: bsrl %ebx, %edx
-; WIN32-NEXT: xorl $31, %edx
-; WIN32-NEXT: orl $32, %edx
-; WIN32-NEXT: jmp LBB1_3
-; WIN32-NEXT: LBB1_1:
-; WIN32-NEXT: bsrl %edi, %edx
-; WIN32-NEXT: xorl $31, %edx
-; WIN32-NEXT: LBB1_3: # %_udiv-special-cases_udiv-special-cases
-; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: testl %esi, %esi
-; WIN32-NEXT: jne LBB1_4
-; WIN32-NEXT: # %bb.5: # %_udiv-special-cases_udiv-special-cases
-; WIN32-NEXT: bsrl 40(%ebp), %edx
-; WIN32-NEXT: xorl $31, %edx
-; WIN32-NEXT: orl $32, %edx
-; WIN32-NEXT: jmp LBB1_6
-; WIN32-NEXT: LBB1_4:
-; WIN32-NEXT: bsrl %esi, %edx
-; WIN32-NEXT: xorl $31, %edx
-; WIN32-NEXT: LBB1_6: # %_udiv-special-cases_udiv-special-cases
-; WIN32-NEXT: movl %ebx, %esi
-; WIN32-NEXT: orl %edi, %esi
-; WIN32-NEXT: jne LBB1_8
-; WIN32-NEXT: # %bb.7: # %_udiv-special-cases_udiv-special-cases
-; WIN32-NEXT: orl $64, %edx
-; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: LBB1_8: # %_udiv-special-cases_udiv-special-cases
-; WIN32-NEXT: movl 36(%ebp), %edi
-; WIN32-NEXT: testl %edi, %edi
-; WIN32-NEXT: movl 28(%ebp), %esi
-; WIN32-NEXT: jne LBB1_9
-; WIN32-NEXT: # %bb.10: # %_udiv-special-cases_udiv-special-cases
-; WIN32-NEXT: bsrl 32(%ebp), %edx
-; WIN32-NEXT: xorl $31, %edx
-; WIN32-NEXT: orl $32, %edx
-; WIN32-NEXT: testl %esi, %esi
-; WIN32-NEXT: je LBB1_13
-; WIN32-NEXT: LBB1_12:
-; WIN32-NEXT: bsrl %esi, %esi
-; WIN32-NEXT: xorl $31, %esi
-; WIN32-NEXT: jmp LBB1_14
-; WIN32-NEXT: LBB1_9:
-; WIN32-NEXT: bsrl %edi, %edx
-; WIN32-NEXT: xorl $31, %edx
-; WIN32-NEXT: testl %esi, %esi
-; WIN32-NEXT: jne LBB1_12
-; WIN32-NEXT: LBB1_13: # %_udiv-special-cases_udiv-special-cases
-; WIN32-NEXT: bsrl 24(%ebp), %esi
-; WIN32-NEXT: xorl $31, %esi
-; WIN32-NEXT: orl $32, %esi
-; WIN32-NEXT: LBB1_14: # %_udiv-special-cases_udiv-special-cases
-; WIN32-NEXT: orb %cl, %al
-; WIN32-NEXT: movl 32(%ebp), %ecx
-; WIN32-NEXT: orl %edi, %ecx
-; WIN32-NEXT: jne LBB1_16
-; WIN32-NEXT: # %bb.15: # %_udiv-special-cases_udiv-special-cases
-; WIN32-NEXT: orl $64, %esi
-; WIN32-NEXT: movl %esi, %edx
-; WIN32-NEXT: LBB1_16: # %_udiv-special-cases_udiv-special-cases
-; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: subl %edx, %ecx
-; WIN32-NEXT: movl $0, %esi
-; WIN32-NEXT: sbbl %esi, %esi
-; WIN32-NEXT: movl $0, %edi
-; WIN32-NEXT: sbbl %edi, %edi
-; WIN32-NEXT: movl $0, %ebx
-; WIN32-NEXT: sbbl %ebx, %ebx
-; WIN32-NEXT: testb %al, %al
-; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: jne LBB1_17
-; WIN32-NEXT: # %bb.18: # %select.false.sink
-; WIN32-NEXT: movl $127, %eax
-; WIN32-NEXT: cmpl %ecx, %eax
-; WIN32-NEXT: movl $0, %eax
-; WIN32-NEXT: sbbl %esi, %eax
-; WIN32-NEXT: movl $0, %eax
-; WIN32-NEXT: movl %edi, %ecx
-; WIN32-NEXT: sbbl %edi, %eax
-; WIN32-NEXT: movl $0, %eax
-; WIN32-NEXT: sbbl %ebx, %eax
-; WIN32-NEXT: setb %al
-; WIN32-NEXT: LBB1_19: # %select.end
-; WIN32-NEXT: movl 28(%ebp), %edx
-; WIN32-NEXT: movl %ebx, %edi
-; WIN32-NEXT: movl %edx, %ebx
-; WIN32-NEXT: movl 32(%ebp), %esi
-; WIN32-NEXT: testb %al, %al
-; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: movl $0, %eax
-; WIN32-NEXT: movl $0, %edx
-; WIN32-NEXT: jne LBB1_21
-; WIN32-NEXT: # %bb.20: # %select.end
-; WIN32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl 24(%ebp), %eax
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl %esi, %eax
-; WIN32-NEXT: movl 36(%ebp), %edx
-; WIN32-NEXT: LBB1_21: # %select.end
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: jne LBB1_22
-; WIN32-NEXT: # %bb.28: # %select.end
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; WIN32-NEXT: movl %edx, %eax
-; WIN32-NEXT: xorl $127, %eax
-; WIN32-NEXT: orl %ecx, %eax
-; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: orl %edi, %ecx
-; WIN32-NEXT: orl %eax, %ecx
-; WIN32-NEXT: movl 36(%ebp), %ecx
-; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: je LBB1_29
-; WIN32-NEXT: # %bb.26: # %udiv-bb15
-; WIN32-NEXT: movl 24(%ebp), %eax
-; WIN32-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl %edx, %ecx
-; WIN32-NEXT: xorb $127, %cl
-; WIN32-NEXT: movl %ecx, %eax
-; WIN32-NEXT: shrb $3, %al
-; WIN32-NEXT: andb $12, %al
-; WIN32-NEXT: negb %al
-; WIN32-NEXT: movsbl %al, %eax
-; WIN32-NEXT: movl 216(%esp,%eax), %edx
-; WIN32-NEXT: movl 220(%esp,%eax), %edi
-; WIN32-NEXT: shldl %cl, %edx, %edi
-; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl %esi, %edi
-; WIN32-NEXT: movl 208(%esp,%eax), %esi
-; WIN32-NEXT: movl 212(%esp,%eax), %eax
-; WIN32-NEXT: shldl %cl, %eax, %edx
-; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; WIN32-NEXT: shldl %cl, %esi, %eax
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: shll %cl, %esi
-; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: addl $1, %edx
-; WIN32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: jb LBB1_27
-; WIN32-NEXT: # %bb.23: # %udiv-preheader4
-; WIN32-NEXT: movl 24(%ebp), %eax
-; WIN32-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl 36(%ebp), %eax
-; WIN32-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl %edx, %eax
-; WIN32-NEXT: shrb $3, %al
-; WIN32-NEXT: andb $12, %al
-; WIN32-NEXT: movzbl %al, %eax
-; WIN32-NEXT: movl 172(%esp,%eax), %esi
-; WIN32-NEXT: movl %edx, %ecx
-; WIN32-NEXT: movl 168(%esp,%eax), %edx
-; WIN32-NEXT: movl %edx, %edi
-; WIN32-NEXT: shrdl %cl, %esi, %edi
-; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl 160(%esp,%eax), %ebx
-; WIN32-NEXT: movl 164(%esp,%eax), %edi
-; WIN32-NEXT: movl %edi, %eax
-; WIN32-NEXT: shrdl %cl, %edx, %eax
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: shrl %cl, %esi
-; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: # kill: def $cl killed $cl killed $ecx
-; WIN32-NEXT: shrdl %cl, %edi, %ebx
-; WIN32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl 40(%ebp), %eax
-; WIN32-NEXT: addl $-1, %eax
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl 44(%ebp), %eax
-; WIN32-NEXT: adcl $-1, %eax
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl 48(%ebp), %ecx
-; WIN32-NEXT: adcl $-1, %ecx
-; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl 52(%ebp), %ecx
-; WIN32-NEXT: adcl $-1, %ecx
-; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: .p2align 4
-; WIN32-NEXT: LBB1_24: # %udiv-do-while3
-; WIN32-NEXT: # =>This Inner Loop Header: Depth=1
-; WIN32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: shldl $1, %eax, %esi
-; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; WIN32-NEXT: shldl $1, %ebx, %eax
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; WIN32-NEXT: shldl $1, %edi, %ebx
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; WIN32-NEXT: shldl $1, %esi, %edi
-; WIN32-NEXT: shldl $1, %ecx, %esi
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: orl %eax, %esi
-; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: shldl $1, %edx, %ecx
-; WIN32-NEXT: orl %eax, %ecx
-; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: shldl $1, %ecx, %edx
-; WIN32-NEXT: orl %eax, %edx
-; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: addl %ecx, %ecx
-; WIN32-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: cmpl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: sbbl %ebx, %ecx
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; WIN32-NEXT: sarl $31, %ecx
-; WIN32-NEXT: movl %ecx, %edx
-; WIN32-NEXT: andl $1, %edx
-; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl %ecx, %eax
-; WIN32-NEXT: andl 52(%ebp), %eax
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl %ecx, %esi
-; WIN32-NEXT: andl 48(%ebp), %esi
-; WIN32-NEXT: movl %ecx, %edx
-; WIN32-NEXT: andl 44(%ebp), %edx
-; WIN32-NEXT: andl 40(%ebp), %ecx
-; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: subl %ecx, %edi
-; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: sbbl %edx, %ebx
-; WIN32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: sbbl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; WIN32-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; WIN32-NEXT: addl $-1, %edx
-; WIN32-NEXT: adcl $-1, %eax
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; WIN32-NEXT: adcl $-1, %edi
-; WIN32-NEXT: adcl $-1, %ebx
-; WIN32-NEXT: movl %eax, %ecx
-; WIN32-NEXT: orl %ebx, %ecx
-; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: orl %edi, %edx
-; WIN32-NEXT: orl %ecx, %edx
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: jne LBB1_24
-; WIN32-NEXT: LBB1_25: # %udiv-loop-exit2
-; WIN32-NEXT: shldl $1, %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: shldl $1, %edx, %ecx
-; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: shldl $1, %eax, %edx
-; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: leal (%ecx,%eax,2), %eax
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl 36(%ebp), %ecx
-; WIN32-NEXT: movl 28(%ebp), %ebx
-; WIN32-NEXT: LBB1_29: # %udiv-end1
-; WIN32-NEXT: movl 40(%ebp), %eax
-; WIN32-NEXT: movl 48(%ebp), %esi
-; WIN32-NEXT: orl %esi, %eax
-; WIN32-NEXT: movl %ebx, %edx
-; WIN32-NEXT: movl %ecx, %ebx
-; WIN32-NEXT: movl 44(%ebp), %ecx
-; WIN32-NEXT: movl 52(%ebp), %edi
-; WIN32-NEXT: orl %edi, %ecx
-; WIN32-NEXT: orl %eax, %ecx
-; WIN32-NEXT: sete %al
-; WIN32-NEXT: movl 24(%ebp), %ecx
-; WIN32-NEXT: orl 32(%ebp), %ecx
-; WIN32-NEXT: orl %ebx, %edx
-; WIN32-NEXT: orl %ecx, %edx
-; WIN32-NEXT: sete %cl
-; WIN32-NEXT: testl %edi, %edi
-; WIN32-NEXT: jne LBB1_30
-; WIN32-NEXT: # %bb.31: # %udiv-end1
-; WIN32-NEXT: bsrl %esi, %edx
-; WIN32-NEXT: xorl $31, %edx
-; WIN32-NEXT: orl $32, %edx
-; WIN32-NEXT: jmp LBB1_32
-; WIN32-NEXT: LBB1_30:
-; WIN32-NEXT: bsrl %edi, %edx
-; WIN32-NEXT: xorl $31, %edx
-; WIN32-NEXT: LBB1_32: # %udiv-end1
-; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl %esi, %ebx
-; WIN32-NEXT: movl 44(%ebp), %edx
-; WIN32-NEXT: testl %edx, %edx
-; WIN32-NEXT: movl 40(%ebp), %esi
-; WIN32-NEXT: jne LBB1_33
-; WIN32-NEXT: # %bb.34: # %udiv-end1
-; WIN32-NEXT: bsrl %esi, %edx
-; WIN32-NEXT: xorl $31, %edx
-; WIN32-NEXT: orl $32, %edx
-; WIN32-NEXT: jmp LBB1_35
-; WIN32-NEXT: LBB1_33:
-; WIN32-NEXT: bsrl %edx, %edx
-; WIN32-NEXT: xorl $31, %edx
-; WIN32-NEXT: LBB1_35: # %udiv-end1
-; WIN32-NEXT: movl %ebx, %esi
-; WIN32-NEXT: orl %edi, %esi
-; WIN32-NEXT: jne LBB1_37
-; WIN32-NEXT: # %bb.36: # %udiv-end1
-; WIN32-NEXT: orl $64, %edx
-; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: LBB1_37: # %udiv-end1
-; WIN32-NEXT: movl 36(%ebp), %edx
-; WIN32-NEXT: testl %edx, %edx
-; WIN32-NEXT: movl 28(%ebp), %esi
-; WIN32-NEXT: jne LBB1_38
-; WIN32-NEXT: # %bb.39: # %udiv-end1
-; WIN32-NEXT: bsrl 32(%ebp), %edx
-; WIN32-NEXT: xorl $31, %edx
-; WIN32-NEXT: orl $32, %edx
-; WIN32-NEXT: testl %esi, %esi
-; WIN32-NEXT: je LBB1_42
-; WIN32-NEXT: LBB1_41:
-; WIN32-NEXT: bsrl %esi, %esi
-; WIN32-NEXT: xorl $31, %esi
-; WIN32-NEXT: jmp LBB1_43
-; WIN32-NEXT: LBB1_38:
-; WIN32-NEXT: bsrl %edx, %edx
-; WIN32-NEXT: xorl $31, %edx
-; WIN32-NEXT: testl %esi, %esi
-; WIN32-NEXT: jne LBB1_41
-; WIN32-NEXT: LBB1_42: # %udiv-end1
-; WIN32-NEXT: bsrl 24(%ebp), %esi
-; WIN32-NEXT: xorl $31, %esi
-; WIN32-NEXT: orl $32, %esi
-; WIN32-NEXT: LBB1_43: # %udiv-end1
-; WIN32-NEXT: orb %cl, %al
-; WIN32-NEXT: movl 32(%ebp), %ecx
-; WIN32-NEXT: orl 36(%ebp), %ecx
-; WIN32-NEXT: jne LBB1_45
-; WIN32-NEXT: # %bb.44: # %udiv-end1
-; WIN32-NEXT: orl $64, %esi
-; WIN32-NEXT: movl %esi, %edx
-; WIN32-NEXT: LBB1_45: # %udiv-end1
-; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: subl %edx, %ecx
-; WIN32-NEXT: movl $0, %edx
-; WIN32-NEXT: sbbl %edx, %edx
-; WIN32-NEXT: movl $0, %esi
-; WIN32-NEXT: sbbl %esi, %esi
-; WIN32-NEXT: movl $0, %edi
-; WIN32-NEXT: sbbl %edi, %edi
-; WIN32-NEXT: testb %al, %al
-; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: jne LBB1_46
-; WIN32-NEXT: # %bb.47: # %select.false.sink8
-; WIN32-NEXT: movl $127, %eax
-; WIN32-NEXT: cmpl %ecx, %eax
-; WIN32-NEXT: movl $0, %eax
-; WIN32-NEXT: sbbl %edx, %eax
-; WIN32-NEXT: movl $0, %eax
-; WIN32-NEXT: sbbl %esi, %eax
-; WIN32-NEXT: movl $0, %eax
-; WIN32-NEXT: sbbl %edi, %eax
-; WIN32-NEXT: setb %al
-; WIN32-NEXT: LBB1_48: # %select.end7
-; WIN32-NEXT: movl 28(%ebp), %ecx
-; WIN32-NEXT: movl 32(%ebp), %edx
-; WIN32-NEXT: movl %edi, %esi
-; WIN32-NEXT: testb %al, %al
-; WIN32-NEXT: movl $0, %edi
-; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: movl $0, %eax
-; WIN32-NEXT: jne LBB1_50
-; WIN32-NEXT: # %bb.49: # %select.end7
-; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl 24(%ebp), %edi
-; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl 36(%ebp), %eax
-; WIN32-NEXT: LBB1_50: # %select.end7
-; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: jne LBB1_51
-; WIN32-NEXT: # %bb.57: # %select.end7
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: movl %eax, %edi
-; WIN32-NEXT: xorl $127, %eax
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; WIN32-NEXT: orl %ebx, %eax
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: orl %esi, %ecx
-; WIN32-NEXT: orl %eax, %ecx
-; WIN32-NEXT: movl 36(%ebp), %ecx
-; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: je LBB1_58
-; WIN32-NEXT: # %bb.55: # %udiv-bb1
-; WIN32-NEXT: movl 24(%ebp), %eax
-; WIN32-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl 28(%ebp), %edx
-; WIN32-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl 32(%ebp), %esi
-; WIN32-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl %edi, %eax
-; WIN32-NEXT: movl %eax, %ecx
-; WIN32-NEXT: movl %edi, %esi
-; WIN32-NEXT: xorb $127, %cl
-; WIN32-NEXT: movl %ecx, %eax
-; WIN32-NEXT: shrb $3, %al
-; WIN32-NEXT: andb $12, %al
-; WIN32-NEXT: negb %al
-; WIN32-NEXT: movsbl %al, %eax
-; WIN32-NEXT: movl 152(%esp,%eax), %edx
-; WIN32-NEXT: movl 156(%esp,%eax), %edi
-; WIN32-NEXT: shldl %cl, %edx, %edi
-; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl 144(%esp,%eax), %edi
-; WIN32-NEXT: movl 148(%esp,%eax), %eax
-; WIN32-NEXT: shldl %cl, %eax, %edx
-; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: shldl %cl, %edi, %eax
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: shll %cl, %edi
-; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: addl $1, %esi
-; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: adcl $0, %ebx
-; WIN32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: movl 36(%ebp), %eax
-; WIN32-NEXT: jb LBB1_56
-; WIN32-NEXT: # %bb.52: # %udiv-preheader
-; WIN32-NEXT: movl 24(%ebp), %ecx
-; WIN32-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl 28(%ebp), %ecx
-; WIN32-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl 32(%ebp), %ecx
-; WIN32-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl $0, {{[0-9]+}}(%esp)
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: movl %ecx, %eax
-; WIN32-NEXT: shrb $3, %al
-; WIN32-NEXT: andb $12, %al
-; WIN32-NEXT: movzbl %al, %eax
-; WIN32-NEXT: movl 108(%esp,%eax), %edi
-; WIN32-NEXT: movl 104(%esp,%eax), %ebx
-; WIN32-NEXT: movl %ebx, %edx
-; WIN32-NEXT: shrdl %cl, %edi, %edx
-; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl 96(%esp,%eax), %esi
-; WIN32-NEXT: movl 100(%esp,%eax), %eax
-; WIN32-NEXT: movl %eax, %edx
-; WIN32-NEXT: shrdl %cl, %ebx, %edx
-; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: shrl %cl, %edi
-; WIN32-NEXT: # kill: def $cl killed $cl killed $ecx
-; WIN32-NEXT: shrdl %cl, %eax, %esi
-; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl 40(%ebp), %ecx
-; WIN32-NEXT: addl $-1, %ecx
-; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl 44(%ebp), %ecx
-; WIN32-NEXT: adcl $-1, %ecx
-; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl 48(%ebp), %eax
-; WIN32-NEXT: adcl $-1, %eax
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl 52(%ebp), %ecx
-; WIN32-NEXT: adcl $-1, %ecx
-; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: .p2align 4
-; WIN32-NEXT: LBB1_53: # %udiv-do-while
-; WIN32-NEXT: # =>This Inner Loop Header: Depth=1
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: shldl $1, %eax, %edi
-; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; WIN32-NEXT: shldl $1, %esi, %eax
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: shldl $1, %eax, %esi
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; WIN32-NEXT: shldl $1, %ebx, %eax
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; WIN32-NEXT: shldl $1, %edi, %ebx
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; WIN32-NEXT: orl %edx, %ebx
-; WIN32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: shldl $1, %ecx, %edi
-; WIN32-NEXT: orl %edx, %edi
-; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; WIN32-NEXT: shldl $1, %edi, %ecx
-; WIN32-NEXT: orl %edx, %ecx
-; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: addl %edi, %edi
-; WIN32-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: cmpl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: sbbl %esi, %ecx
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; WIN32-NEXT: sbbl %edx, %ecx
-; WIN32-NEXT: sarl $31, %ecx
-; WIN32-NEXT: movl %ecx, %edi
-; WIN32-NEXT: andl $1, %edi
-; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl %ecx, %edi
-; WIN32-NEXT: andl 52(%ebp), %edi
-; WIN32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl %ecx, %edi
-; WIN32-NEXT: andl 48(%ebp), %edi
-; WIN32-NEXT: movl %ecx, %ebx
-; WIN32-NEXT: andl 44(%ebp), %ebx
-; WIN32-NEXT: andl 40(%ebp), %ecx
-; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: subl %ecx, %eax
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: sbbl %ebx, %esi
-; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: sbbl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; WIN32-NEXT: movl %edx, %edi
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; WIN32-NEXT: addl $-1, %esi
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: adcl $-1, %ecx
-; WIN32-NEXT: adcl $-1, %eax
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
-; WIN32-NEXT: adcl $-1, %ebx
-; WIN32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: orl %ebx, %ecx
-; WIN32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: orl %eax, %esi
-; WIN32-NEXT: orl %ecx, %esi
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: jne LBB1_53
-; WIN32-NEXT: LBB1_54: # %udiv-loop-exit
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: shldl $1, %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: shldl $1, %ecx, %eax
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: shldl $1, %eax, %ecx
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; WIN32-NEXT: leal (%edx,%eax,2), %eax
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; WIN32-NEXT: movl %ecx, %eax
-; WIN32-NEXT: LBB1_58: # %udiv-end
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: movl 8(%ebp), %ecx
-; WIN32-NEXT: movl %eax, (%ecx)
-; WIN32-NEXT: movl %edx, 4(%ecx)
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: movl %eax, 8(%ecx)
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: movl %eax, 12(%ecx)
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: movl %ecx, %esi
-; WIN32-NEXT: movl 44(%ebp), %eax
-; WIN32-NEXT: imull %eax, %esi
-; WIN32-NEXT: movl %ecx, %eax
-; WIN32-NEXT: movl 40(%ebp), %edi
-; WIN32-NEXT: mull %edi
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: addl %esi, %edx
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; WIN32-NEXT: imull %edi, %esi
-; WIN32-NEXT: addl %edx, %esi
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: movl %ecx, %eax
-; WIN32-NEXT: movl 48(%ebp), %ebx
-; WIN32-NEXT: mull %ebx
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl 52(%ebp), %eax
-; WIN32-NEXT: imull %ecx, %eax
-; WIN32-NEXT: addl %edx, %eax
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; WIN32-NEXT: imull %edi, %ebx
-; WIN32-NEXT: addl %eax, %ebx
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: adcl %esi, %ebx
-; WIN32-NEXT: movl 40(%ebp), %esi
-; WIN32-NEXT: movl %esi, %eax
-; WIN32-NEXT: mull %ecx
-; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl 44(%ebp), %eax
-; WIN32-NEXT: mull %ecx
-; WIN32-NEXT: movl %eax, %ecx
-; WIN32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; WIN32-NEXT: adcl $0, %edx
-; WIN32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: movl %esi, %eax
-; WIN32-NEXT: mull %edi
-; WIN32-NEXT: movl %edx, %esi
-; WIN32-NEXT: addl %ecx, %eax
-; WIN32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; WIN32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; WIN32-NEXT: setb %cl
-; WIN32-NEXT: movl 44(%ebp), %eax
-; WIN32-NEXT: mull %edi
-; WIN32-NEXT: addl %esi, %eax
-; WIN32-NEXT: movzbl %cl, %ecx
-; WIN32-NEXT: adcl %ecx, %edx
-; WIN32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; WIN32-NEXT: adcl %ebx, %edx
-; WIN32-NEXT: movl 24(%ebp), %ebx
-; WIN32-NEXT: subl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
-; WIN32-NEXT: movl 28(%ebp), %ecx
-; WIN32-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; WIN32-NEXT: movl 32(%ebp), %esi
-; WIN32-NEXT: sbbl %eax, %esi
-; WIN32-NEXT: movl 36(%ebp), %edi
-; WIN32-NEXT: sbbl %edx, %edi
-; WIN32-NEXT: movl 12(%ebp), %eax
-; WIN32-NEXT: movl %ebx, (%eax)
-; WIN32-NEXT: movl %ecx, 4(%eax)
-; WIN32-NEXT: movl %esi, 8(%eax)
-; WIN32-NEXT: movl %edi, 12(%eax)
-; WIN32-NEXT: leal -12(%ebp), %esp
-; WIN32-NEXT: popl %esi
-; WIN32-NEXT: popl %edi
-; WIN32-NEXT: popl %ebx
-; WIN32-NEXT: popl %ebp
-; WIN32-NEXT: retl
-; WIN32-NEXT: LBB1_17:
-; WIN32-NEXT: movl %edi, %ecx
-; WIN32-NEXT: movb $1, %al
-; WIN32-NEXT: jmp LBB1_19
-; WIN32-NEXT: LBB1_27:
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: jmp LBB1_25
-; WIN32-NEXT: LBB1_46:
-; WIN32-NEXT: movb $1, %al
-; WIN32-NEXT: jmp LBB1_48
-; WIN32-NEXT: LBB1_56:
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; WIN32-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; WIN32-NEXT: jmp LBB1_54
-; WIN32-NEXT: LBB1_22:
-; WIN32-NEXT: movl 36(%ebp), %ecx
-; WIN32-NEXT: jmp LBB1_29
-; WIN32-NEXT: LBB1_51:
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; WIN32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; WIN32-NEXT: jmp LBB1_58
+; WIN32-LABEL: {{_?}}udivrem_i128:
+; WIN32-NOT: __udivmodti4
+; WIN32-NOT: __udivti3
+; WIN32-NOT: __umodti3
+; WIN32: retl
%q = udiv i128 %n, %d
%r = urem i128 %n, %d
store i128 %q, ptr %q_out
>From bf48b647ec72475c58e2abc8942e2cdd5ce77608 Mon Sep 17 00:00:00 2001
From: Takashiidobe <idobetakashi at gmail.com>
Date: Sat, 4 Apr 2026 19:05:16 -0400
Subject: [PATCH 15/15] split riscv, x86 files
---
...libcall.ll => i128-divrem-libcall-rv32.ll} | 28 +-
.../CodeGen/RISCV/i128-divrem-libcall-rv64.ll | 87 +++++
.../CodeGen/SystemZ/i128-divrem-libcall.ll | 88 +++++
.../CodeGen/X86/i128-divrem-libcall-x86-32.ll | 44 +++
.../CodeGen/X86/i128-divrem-libcall-x86-64.ll | 332 ++++++++++++++++++
llvm/test/CodeGen/X86/i128-divrem-libcall.ll | 146 --------
6 files changed, 553 insertions(+), 172 deletions(-)
rename llvm/test/CodeGen/RISCV/{i128-divrem-libcall.ll => i128-divrem-libcall-rv32.ll} (56%)
create mode 100644 llvm/test/CodeGen/RISCV/i128-divrem-libcall-rv64.ll
create mode 100644 llvm/test/CodeGen/SystemZ/i128-divrem-libcall.ll
create mode 100644 llvm/test/CodeGen/X86/i128-divrem-libcall-x86-32.ll
create mode 100644 llvm/test/CodeGen/X86/i128-divrem-libcall-x86-64.ll
delete mode 100644 llvm/test/CodeGen/X86/i128-divrem-libcall.ll
diff --git a/llvm/test/CodeGen/RISCV/i128-divrem-libcall.ll b/llvm/test/CodeGen/RISCV/i128-divrem-libcall-rv32.ll
similarity index 56%
rename from llvm/test/CodeGen/RISCV/i128-divrem-libcall.ll
rename to llvm/test/CodeGen/RISCV/i128-divrem-libcall-rv32.ll
index f33d8e307bea1..6df5e0e5a46a8 100644
--- a/llvm/test/CodeGen/RISCV/i128-divrem-libcall.ll
+++ b/llvm/test/CodeGen/RISCV/i128-divrem-libcall-rv32.ll
@@ -1,23 +1,10 @@
-; RUN: llc < %s -mtriple=riscv64-linux-gnu | FileCheck %s --check-prefix=RV64
-; RUN: llc < %s -mtriple=riscv64-linux-gnu -mattr=+m | FileCheck %s --check-prefix=RV64
; RUN: llc < %s -mtriple=riscv32-linux-gnu | FileCheck %s --check-prefix=RV32I
; RUN: llc < %s -mtriple=riscv32-linux-gnu -mattr=+m | FileCheck %s --check-prefix=RV32M
-; RV64 uses fused libcalls. RV32 inline-expands, so only assert the absence of
-; libcalls there.
+; RV32 inline-expands i128 div/rem, so keep the checks focused on the absence
+; of libcalls instead of the whole expanded algorithm.
define void @sdivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
-; RV64-LABEL: sdivrem_i128:
-; RV64: mv a4, sp
-; RV64: call __divmodti4
-; RV64: ld a2, 0(sp)
-; RV64: ld a3, 8(sp)
-; RV64: sd a0, 0(s1)
-; RV64: sd a1, 8(s1)
-; RV64: sd a2, 0(s0)
-; RV64: sd a3, 8(s0)
-; RV64: ret
-;
; RV32I-LABEL: sdivrem_i128:
; RV32I-NOT: __divmodti4
; RV32I-NOT: __divti3
@@ -37,17 +24,6 @@ define void @sdivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
}
define void @udivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
-; RV64-LABEL: udivrem_i128:
-; RV64: mv a4, sp
-; RV64: call __udivmodti4
-; RV64: ld a2, 0(sp)
-; RV64: ld a3, 8(sp)
-; RV64: sd a0, 0(s1)
-; RV64: sd a1, 8(s1)
-; RV64: sd a2, 0(s0)
-; RV64: sd a3, 8(s0)
-; RV64: ret
-;
; RV32I-LABEL: udivrem_i128:
; RV32I-NOT: __udivmodti4
; RV32I-NOT: __udivti3
diff --git a/llvm/test/CodeGen/RISCV/i128-divrem-libcall-rv64.ll b/llvm/test/CodeGen/RISCV/i128-divrem-libcall-rv64.ll
new file mode 100644
index 0000000000000..c8201d869b9db
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/i128-divrem-libcall-rv64.ll
@@ -0,0 +1,87 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=riscv64-linux-gnu | FileCheck %s --check-prefix=RV64
+; RUN: llc < %s -mtriple=riscv64-linux-gnu -mattr=+m | FileCheck %s --check-prefix=RV64
+
+define void @sdivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
+; RV64-LABEL: sdivrem_i128:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -48
+; RV64-NEXT: .cfi_def_cfa_offset 48
+; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: mv a6, a4
+; RV64-NEXT: mv s0, a1
+; RV64-NEXT: mv s1, a0
+; RV64-NEXT: mv a4, sp
+; RV64-NEXT: mv a0, a2
+; RV64-NEXT: mv a1, a3
+; RV64-NEXT: mv a2, a6
+; RV64-NEXT: mv a3, a5
+; RV64-NEXT: call __divmodti4
+; RV64-NEXT: ld a2, 0(sp)
+; RV64-NEXT: ld a3, 8(sp)
+; RV64-NEXT: sd a0, 0(s1)
+; RV64-NEXT: sd a1, 8(s1)
+; RV64-NEXT: sd a2, 0(s0)
+; RV64-NEXT: sd a3, 8(s0)
+; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT: .cfi_restore ra
+; RV64-NEXT: .cfi_restore s0
+; RV64-NEXT: .cfi_restore s1
+; RV64-NEXT: addi sp, sp, 48
+; RV64-NEXT: .cfi_def_cfa_offset 0
+; RV64-NEXT: ret
+ %q = sdiv i128 %n, %d
+ %r = srem i128 %n, %d
+ store i128 %q, ptr %q_out
+ store i128 %r, ptr %r_out
+ ret void
+}
+
+define void @udivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
+; RV64-LABEL: udivrem_i128:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -48
+; RV64-NEXT: .cfi_def_cfa_offset 48
+; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: mv a6, a4
+; RV64-NEXT: mv s0, a1
+; RV64-NEXT: mv s1, a0
+; RV64-NEXT: mv a4, sp
+; RV64-NEXT: mv a0, a2
+; RV64-NEXT: mv a1, a3
+; RV64-NEXT: mv a2, a6
+; RV64-NEXT: mv a3, a5
+; RV64-NEXT: call __udivmodti4
+; RV64-NEXT: ld a2, 0(sp)
+; RV64-NEXT: ld a3, 8(sp)
+; RV64-NEXT: sd a0, 0(s1)
+; RV64-NEXT: sd a1, 8(s1)
+; RV64-NEXT: sd a2, 0(s0)
+; RV64-NEXT: sd a3, 8(s0)
+; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT: .cfi_restore ra
+; RV64-NEXT: .cfi_restore s0
+; RV64-NEXT: .cfi_restore s1
+; RV64-NEXT: addi sp, sp, 48
+; RV64-NEXT: .cfi_def_cfa_offset 0
+; RV64-NEXT: ret
+ %q = udiv i128 %n, %d
+ %r = urem i128 %n, %d
+ store i128 %q, ptr %q_out
+ store i128 %r, ptr %r_out
+ ret void
+}
diff --git a/llvm/test/CodeGen/SystemZ/i128-divrem-libcall.ll b/llvm/test/CodeGen/SystemZ/i128-divrem-libcall.ll
new file mode 100644
index 0000000000000..c1d88c679626c
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/i128-divrem-libcall.ll
@@ -0,0 +1,88 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=s390x-unknown-linux | FileCheck %s
+
+; SystemZ (s390x) lowers i128 div/rem pairs through fused libcalls.
+
+define void @sdivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
+; CHECK-LABEL: sdivrem_i128:
+; CHECK: # %bb.0:
+; CHECK-NEXT: stmg %r12, %r15, 96(%r15)
+; CHECK-NEXT: .cfi_offset %r12, -64
+; CHECK-NEXT: .cfi_offset %r13, -56
+; CHECK-NEXT: .cfi_offset %r14, -48
+; CHECK-NEXT: .cfi_offset %r15, -40
+; CHECK-NEXT: aghi %r15, -224
+; CHECK-NEXT: .cfi_def_cfa_offset 384
+; CHECK-NEXT: lgr %r13, %r3
+; CHECK-NEXT: lgr %r12, %r2
+; CHECK-NEXT: lg %r0, 8(%r5)
+; CHECK-NEXT: lg %r1, 0(%r5)
+; CHECK-NEXT: lg %r2, 8(%r4)
+; CHECK-NEXT: lg %r14, 0(%r4)
+; CHECK-NEXT: stg %r0, 168(%r15)
+; CHECK-NEXT: stg %r1, 160(%r15)
+; CHECK-NEXT: stg %r2, 184(%r15)
+; CHECK-NEXT: la %r2, 192(%r15)
+; CHECK-NEXT: la %r3, 176(%r15)
+; CHECK-NEXT: la %r4, 160(%r15)
+; CHECK-NEXT: la %r5, 208(%r15)
+; CHECK-NEXT: stg %r14, 176(%r15)
+; CHECK-NEXT: brasl %r14, __divmodti4 at PLT
+; CHECK-NEXT: lg %r0, 200(%r15)
+; CHECK-NEXT: lg %r1, 192(%r15)
+; CHECK-NEXT: lg %r2, 216(%r15)
+; CHECK-NEXT: lg %r3, 208(%r15)
+; CHECK-NEXT: stg %r0, 8(%r12)
+; CHECK-NEXT: stg %r1, 0(%r12)
+; CHECK-NEXT: stg %r2, 8(%r13)
+; CHECK-NEXT: stg %r3, 0(%r13)
+; CHECK-NEXT: lmg %r12, %r15, 320(%r15)
+; CHECK-NEXT: br %r14
+ %q = sdiv i128 %n, %d
+ %r = srem i128 %n, %d
+ store i128 %q, ptr %q_out
+ store i128 %r, ptr %r_out
+ ret void
+}
+
+define void @udivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
+; CHECK-LABEL: udivrem_i128:
+; CHECK: # %bb.0:
+; CHECK-NEXT: stmg %r12, %r15, 96(%r15)
+; CHECK-NEXT: .cfi_offset %r12, -64
+; CHECK-NEXT: .cfi_offset %r13, -56
+; CHECK-NEXT: .cfi_offset %r14, -48
+; CHECK-NEXT: .cfi_offset %r15, -40
+; CHECK-NEXT: aghi %r15, -224
+; CHECK-NEXT: .cfi_def_cfa_offset 384
+; CHECK-NEXT: lgr %r13, %r3
+; CHECK-NEXT: lgr %r12, %r2
+; CHECK-NEXT: lg %r0, 8(%r5)
+; CHECK-NEXT: lg %r1, 0(%r5)
+; CHECK-NEXT: lg %r2, 8(%r4)
+; CHECK-NEXT: lg %r14, 0(%r4)
+; CHECK-NEXT: stg %r0, 168(%r15)
+; CHECK-NEXT: stg %r1, 160(%r15)
+; CHECK-NEXT: stg %r2, 184(%r15)
+; CHECK-NEXT: la %r2, 192(%r15)
+; CHECK-NEXT: la %r3, 176(%r15)
+; CHECK-NEXT: la %r4, 160(%r15)
+; CHECK-NEXT: la %r5, 208(%r15)
+; CHECK-NEXT: stg %r14, 176(%r15)
+; CHECK-NEXT: brasl %r14, __udivmodti4 at PLT
+; CHECK-NEXT: lg %r0, 200(%r15)
+; CHECK-NEXT: lg %r1, 192(%r15)
+; CHECK-NEXT: lg %r2, 216(%r15)
+; CHECK-NEXT: lg %r3, 208(%r15)
+; CHECK-NEXT: stg %r0, 8(%r12)
+; CHECK-NEXT: stg %r1, 0(%r12)
+; CHECK-NEXT: stg %r2, 8(%r13)
+; CHECK-NEXT: stg %r3, 0(%r13)
+; CHECK-NEXT: lmg %r12, %r15, 320(%r15)
+; CHECK-NEXT: br %r14
+ %q = udiv i128 %n, %d
+ %r = urem i128 %n, %d
+ store i128 %q, ptr %q_out
+ store i128 %r, ptr %r_out
+ ret void
+}
diff --git a/llvm/test/CodeGen/X86/i128-divrem-libcall-x86-32.ll b/llvm/test/CodeGen/X86/i128-divrem-libcall-x86-32.ll
new file mode 100644
index 0000000000000..6863a45213e97
--- /dev/null
+++ b/llvm/test/CodeGen/X86/i128-divrem-libcall-x86-32.ll
@@ -0,0 +1,44 @@
+; RUN: llc < %s -mtriple=i386-linux-gnu | FileCheck %s --check-prefix=LINUX-X86
+; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefix=LINUX-X86
+; RUN: llc < %s -mtriple=i686-pc-windows-msvc | FileCheck %s --check-prefix=WIN32
+
+; The 32-bit X86 triples inline-expand i128 div/rem, so keep the checks
+; focused on the absence of libcalls.
+
+define void @sdivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
+; LINUX-X86-LABEL: sdivrem_i128:
+; LINUX-X86-NOT: __divmodti4
+; LINUX-X86-NOT: __divti3
+; LINUX-X86-NOT: __modti3
+; LINUX-X86: retl
+;
+; WIN32-LABEL: {{_?}}sdivrem_i128:
+; WIN32-NOT: __divmodti4
+; WIN32-NOT: __divti3
+; WIN32-NOT: __modti3
+; WIN32: retl
+ %q = sdiv i128 %n, %d
+ %r = srem i128 %n, %d
+ store i128 %q, ptr %q_out
+ store i128 %r, ptr %r_out
+ ret void
+}
+
+define void @udivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
+; LINUX-X86-LABEL: udivrem_i128:
+; LINUX-X86-NOT: __udivmodti4
+; LINUX-X86-NOT: __udivti3
+; LINUX-X86-NOT: __umodti3
+; LINUX-X86: retl
+;
+; WIN32-LABEL: {{_?}}udivrem_i128:
+; WIN32-NOT: __udivmodti4
+; WIN32-NOT: __udivti3
+; WIN32-NOT: __umodti3
+; WIN32: retl
+ %q = udiv i128 %n, %d
+ %r = urem i128 %n, %d
+ store i128 %q, ptr %q_out
+ store i128 %r, ptr %r_out
+ ret void
+}
diff --git a/llvm/test/CodeGen/X86/i128-divrem-libcall-x86-64.ll b/llvm/test/CodeGen/X86/i128-divrem-libcall-x86-64.ll
new file mode 100644
index 0000000000000..8851ad952a995
--- /dev/null
+++ b/llvm/test/CodeGen/X86/i128-divrem-libcall-x86-64.ll
@@ -0,0 +1,332 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefix=LINUX-X64
+; RUN: llc < %s -mtriple=x86_64-linux-gnux32 | FileCheck %s --check-prefix=LINUX-X32
+; RUN: llc < %s -mtriple=x86_64-apple-macosx | FileCheck %s --check-prefix=DARWIN-X64
+; RUN: llc < %s -mtriple=x86_64-w64-mingw32 | FileCheck %s --check-prefix=MINGW-X64
+; RUN: llc < %s -mtriple=x86_64-pc-windows-msvc | FileCheck %s --check-prefix=WIN64
+
+define void @sdivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
+; LINUX-X64-LABEL: sdivrem_i128:
+; LINUX-X64: # %bb.0:
+; LINUX-X64-NEXT: pushq %r14
+; LINUX-X64-NEXT: .cfi_def_cfa_offset 16
+; LINUX-X64-NEXT: pushq %rbx
+; LINUX-X64-NEXT: .cfi_def_cfa_offset 24
+; LINUX-X64-NEXT: subq $24, %rsp
+; LINUX-X64-NEXT: .cfi_def_cfa_offset 48
+; LINUX-X64-NEXT: .cfi_offset %rbx, -24
+; LINUX-X64-NEXT: .cfi_offset %r14, -16
+; LINUX-X64-NEXT: movq %r8, %rax
+; LINUX-X64-NEXT: movq %rsi, %rbx
+; LINUX-X64-NEXT: movq %rdi, %r14
+; LINUX-X64-NEXT: movq %rsp, %r8
+; LINUX-X64-NEXT: movq %rdx, %rdi
+; LINUX-X64-NEXT: movq %rcx, %rsi
+; LINUX-X64-NEXT: movq %rax, %rdx
+; LINUX-X64-NEXT: movq %r9, %rcx
+; LINUX-X64-NEXT: callq __divmodti4 at PLT
+; LINUX-X64-NEXT: movq (%rsp), %rcx
+; LINUX-X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi
+; LINUX-X64-NEXT: movq %rax, (%r14)
+; LINUX-X64-NEXT: movq %rdx, 8(%r14)
+; LINUX-X64-NEXT: movq %rcx, (%rbx)
+; LINUX-X64-NEXT: movq %rsi, 8(%rbx)
+; LINUX-X64-NEXT: addq $24, %rsp
+; LINUX-X64-NEXT: .cfi_def_cfa_offset 24
+; LINUX-X64-NEXT: popq %rbx
+; LINUX-X64-NEXT: .cfi_def_cfa_offset 16
+; LINUX-X64-NEXT: popq %r14
+; LINUX-X64-NEXT: .cfi_def_cfa_offset 8
+; LINUX-X64-NEXT: retq
+;
+; LINUX-X32-LABEL: sdivrem_i128:
+; LINUX-X32: # %bb.0:
+; LINUX-X32-NEXT: pushq %r14
+; LINUX-X32-NEXT: .cfi_def_cfa_offset 16
+; LINUX-X32-NEXT: pushq %rbx
+; LINUX-X32-NEXT: .cfi_def_cfa_offset 24
+; LINUX-X32-NEXT: subl $24, %esp
+; LINUX-X32-NEXT: .cfi_def_cfa_offset 48
+; LINUX-X32-NEXT: .cfi_offset %rbx, -24
+; LINUX-X32-NEXT: .cfi_offset %r14, -16
+; LINUX-X32-NEXT: movq %r8, %rax
+; LINUX-X32-NEXT: movq %rsi, %rbx
+; LINUX-X32-NEXT: movq %rdi, %r14
+; LINUX-X32-NEXT: movl %esp, %r8d
+; LINUX-X32-NEXT: movq %rdx, %rdi
+; LINUX-X32-NEXT: movq %rcx, %rsi
+; LINUX-X32-NEXT: movq %rax, %rdx
+; LINUX-X32-NEXT: movq %r9, %rcx
+; LINUX-X32-NEXT: callq __divmodti4 at PLT
+; LINUX-X32-NEXT: movq (%esp), %rcx
+; LINUX-X32-NEXT: movq {{[0-9]+}}(%esp), %rsi
+; LINUX-X32-NEXT: movq %rax, (%r14d)
+; LINUX-X32-NEXT: movq %rdx, 8(%r14d)
+; LINUX-X32-NEXT: movq %rcx, (%ebx)
+; LINUX-X32-NEXT: movq %rsi, 8(%ebx)
+; LINUX-X32-NEXT: addl $24, %esp
+; LINUX-X32-NEXT: .cfi_def_cfa_offset 24
+; LINUX-X32-NEXT: popq %rbx
+; LINUX-X32-NEXT: .cfi_def_cfa_offset 16
+; LINUX-X32-NEXT: popq %r14
+; LINUX-X32-NEXT: .cfi_def_cfa_offset 8
+; LINUX-X32-NEXT: retq
+;
+; DARWIN-X64-LABEL: sdivrem_i128:
+; DARWIN-X64: ## %bb.0:
+; DARWIN-X64-NEXT: pushq %r14
+; DARWIN-X64-NEXT: .cfi_def_cfa_offset 16
+; DARWIN-X64-NEXT: pushq %rbx
+; DARWIN-X64-NEXT: .cfi_def_cfa_offset 24
+; DARWIN-X64-NEXT: subq $24, %rsp
+; DARWIN-X64-NEXT: .cfi_def_cfa_offset 48
+; DARWIN-X64-NEXT: .cfi_offset %rbx, -24
+; DARWIN-X64-NEXT: .cfi_offset %r14, -16
+; DARWIN-X64-NEXT: movq %r8, %rax
+; DARWIN-X64-NEXT: movq %rsi, %rbx
+; DARWIN-X64-NEXT: movq %rdi, %r14
+; DARWIN-X64-NEXT: movq %rsp, %r8
+; DARWIN-X64-NEXT: movq %rdx, %rdi
+; DARWIN-X64-NEXT: movq %rcx, %rsi
+; DARWIN-X64-NEXT: movq %rax, %rdx
+; DARWIN-X64-NEXT: movq %r9, %rcx
+; DARWIN-X64-NEXT: callq ___divmodti4
+; DARWIN-X64-NEXT: movq (%rsp), %rcx
+; DARWIN-X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi
+; DARWIN-X64-NEXT: movq %rax, (%r14)
+; DARWIN-X64-NEXT: movq %rdx, 8(%r14)
+; DARWIN-X64-NEXT: movq %rcx, (%rbx)
+; DARWIN-X64-NEXT: movq %rsi, 8(%rbx)
+; DARWIN-X64-NEXT: addq $24, %rsp
+; DARWIN-X64-NEXT: popq %rbx
+; DARWIN-X64-NEXT: popq %r14
+; DARWIN-X64-NEXT: retq
+;
+; MINGW-X64-LABEL: sdivrem_i128:
+; MINGW-X64: # %bb.0:
+; MINGW-X64-NEXT: pushq %rsi
+; MINGW-X64-NEXT: .seh_pushreg %rsi
+; MINGW-X64-NEXT: pushq %rdi
+; MINGW-X64-NEXT: .seh_pushreg %rdi
+; MINGW-X64-NEXT: subq $88, %rsp
+; MINGW-X64-NEXT: .seh_stackalloc 88
+; MINGW-X64-NEXT: .seh_endprologue
+; MINGW-X64-NEXT: movq %rdx, %rsi
+; MINGW-X64-NEXT: movq %rcx, %rdi
+; MINGW-X64-NEXT: movaps {{[0-9]+}}(%rsp), %xmm0
+; MINGW-X64-NEXT: movq %r9, {{[0-9]+}}(%rsp)
+; MINGW-X64-NEXT: movq %r8, {{[0-9]+}}(%rsp)
+; MINGW-X64-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
+; MINGW-X64-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; MINGW-X64-NEXT: leaq {{[0-9]+}}(%rsp), %rdx
+; MINGW-X64-NEXT: leaq {{[0-9]+}}(%rsp), %r8
+; MINGW-X64-NEXT: callq __divmodti4
+; MINGW-X64-NEXT: movaps {{[0-9]+}}(%rsp), %xmm1
+; MINGW-X64-NEXT: movaps %xmm0, (%rdi)
+; MINGW-X64-NEXT: movaps %xmm1, (%rsi)
+; MINGW-X64-NEXT: .seh_startepilogue
+; MINGW-X64-NEXT: addq $88, %rsp
+; MINGW-X64-NEXT: popq %rdi
+; MINGW-X64-NEXT: popq %rsi
+; MINGW-X64-NEXT: .seh_endepilogue
+; MINGW-X64-NEXT: retq
+; MINGW-X64-NEXT: .seh_endproc
+;
+; WIN64-LABEL: sdivrem_i128:
+; WIN64: # %bb.0:
+; WIN64-NEXT: pushq %rsi
+; WIN64-NEXT: .seh_pushreg %rsi
+; WIN64-NEXT: pushq %rdi
+; WIN64-NEXT: .seh_pushreg %rdi
+; WIN64-NEXT: subq $88, %rsp
+; WIN64-NEXT: .seh_stackalloc 88
+; WIN64-NEXT: .seh_endprologue
+; WIN64-NEXT: movq %rdx, %rsi
+; WIN64-NEXT: movq %rcx, %rdi
+; WIN64-NEXT: movaps {{[0-9]+}}(%rsp), %xmm0
+; WIN64-NEXT: movq %r9, {{[0-9]+}}(%rsp)
+; WIN64-NEXT: movq %r8, {{[0-9]+}}(%rsp)
+; WIN64-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
+; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %rdx
+; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %r8
+; WIN64-NEXT: callq __divmodti4
+; WIN64-NEXT: movaps {{[0-9]+}}(%rsp), %xmm1
+; WIN64-NEXT: movaps %xmm0, (%rdi)
+; WIN64-NEXT: movaps %xmm1, (%rsi)
+; WIN64-NEXT: .seh_startepilogue
+; WIN64-NEXT: addq $88, %rsp
+; WIN64-NEXT: popq %rdi
+; WIN64-NEXT: popq %rsi
+; WIN64-NEXT: .seh_endepilogue
+; WIN64-NEXT: retq
+; WIN64-NEXT: .seh_endproc
+ %q = sdiv i128 %n, %d
+ %r = srem i128 %n, %d
+ store i128 %q, ptr %q_out
+ store i128 %r, ptr %r_out
+ ret void
+}
+
+define void @udivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
+; LINUX-X64-LABEL: udivrem_i128:
+; LINUX-X64: # %bb.0:
+; LINUX-X64-NEXT: pushq %r14
+; LINUX-X64-NEXT: .cfi_def_cfa_offset 16
+; LINUX-X64-NEXT: pushq %rbx
+; LINUX-X64-NEXT: .cfi_def_cfa_offset 24
+; LINUX-X64-NEXT: subq $24, %rsp
+; LINUX-X64-NEXT: .cfi_def_cfa_offset 48
+; LINUX-X64-NEXT: .cfi_offset %rbx, -24
+; LINUX-X64-NEXT: .cfi_offset %r14, -16
+; LINUX-X64-NEXT: movq %r8, %rax
+; LINUX-X64-NEXT: movq %rsi, %rbx
+; LINUX-X64-NEXT: movq %rdi, %r14
+; LINUX-X64-NEXT: movq %rsp, %r8
+; LINUX-X64-NEXT: movq %rdx, %rdi
+; LINUX-X64-NEXT: movq %rcx, %rsi
+; LINUX-X64-NEXT: movq %rax, %rdx
+; LINUX-X64-NEXT: movq %r9, %rcx
+; LINUX-X64-NEXT: callq __udivmodti4 at PLT
+; LINUX-X64-NEXT: movq (%rsp), %rcx
+; LINUX-X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi
+; LINUX-X64-NEXT: movq %rax, (%r14)
+; LINUX-X64-NEXT: movq %rdx, 8(%r14)
+; LINUX-X64-NEXT: movq %rcx, (%rbx)
+; LINUX-X64-NEXT: movq %rsi, 8(%rbx)
+; LINUX-X64-NEXT: addq $24, %rsp
+; LINUX-X64-NEXT: .cfi_def_cfa_offset 24
+; LINUX-X64-NEXT: popq %rbx
+; LINUX-X64-NEXT: .cfi_def_cfa_offset 16
+; LINUX-X64-NEXT: popq %r14
+; LINUX-X64-NEXT: .cfi_def_cfa_offset 8
+; LINUX-X64-NEXT: retq
+;
+; LINUX-X32-LABEL: udivrem_i128:
+; LINUX-X32: # %bb.0:
+; LINUX-X32-NEXT: pushq %r14
+; LINUX-X32-NEXT: .cfi_def_cfa_offset 16
+; LINUX-X32-NEXT: pushq %rbx
+; LINUX-X32-NEXT: .cfi_def_cfa_offset 24
+; LINUX-X32-NEXT: subl $24, %esp
+; LINUX-X32-NEXT: .cfi_def_cfa_offset 48
+; LINUX-X32-NEXT: .cfi_offset %rbx, -24
+; LINUX-X32-NEXT: .cfi_offset %r14, -16
+; LINUX-X32-NEXT: movq %r8, %rax
+; LINUX-X32-NEXT: movq %rsi, %rbx
+; LINUX-X32-NEXT: movq %rdi, %r14
+; LINUX-X32-NEXT: movl %esp, %r8d
+; LINUX-X32-NEXT: movq %rdx, %rdi
+; LINUX-X32-NEXT: movq %rcx, %rsi
+; LINUX-X32-NEXT: movq %rax, %rdx
+; LINUX-X32-NEXT: movq %r9, %rcx
+; LINUX-X32-NEXT: callq __udivmodti4 at PLT
+; LINUX-X32-NEXT: movq (%esp), %rcx
+; LINUX-X32-NEXT: movq {{[0-9]+}}(%esp), %rsi
+; LINUX-X32-NEXT: movq %rax, (%r14d)
+; LINUX-X32-NEXT: movq %rdx, 8(%r14d)
+; LINUX-X32-NEXT: movq %rcx, (%ebx)
+; LINUX-X32-NEXT: movq %rsi, 8(%ebx)
+; LINUX-X32-NEXT: addl $24, %esp
+; LINUX-X32-NEXT: .cfi_def_cfa_offset 24
+; LINUX-X32-NEXT: popq %rbx
+; LINUX-X32-NEXT: .cfi_def_cfa_offset 16
+; LINUX-X32-NEXT: popq %r14
+; LINUX-X32-NEXT: .cfi_def_cfa_offset 8
+; LINUX-X32-NEXT: retq
+;
+; DARWIN-X64-LABEL: udivrem_i128:
+; DARWIN-X64: ## %bb.0:
+; DARWIN-X64-NEXT: pushq %r14
+; DARWIN-X64-NEXT: .cfi_def_cfa_offset 16
+; DARWIN-X64-NEXT: pushq %rbx
+; DARWIN-X64-NEXT: .cfi_def_cfa_offset 24
+; DARWIN-X64-NEXT: subq $24, %rsp
+; DARWIN-X64-NEXT: .cfi_def_cfa_offset 48
+; DARWIN-X64-NEXT: .cfi_offset %rbx, -24
+; DARWIN-X64-NEXT: .cfi_offset %r14, -16
+; DARWIN-X64-NEXT: movq %r8, %rax
+; DARWIN-X64-NEXT: movq %rsi, %rbx
+; DARWIN-X64-NEXT: movq %rdi, %r14
+; DARWIN-X64-NEXT: movq %rsp, %r8
+; DARWIN-X64-NEXT: movq %rdx, %rdi
+; DARWIN-X64-NEXT: movq %rcx, %rsi
+; DARWIN-X64-NEXT: movq %rax, %rdx
+; DARWIN-X64-NEXT: movq %r9, %rcx
+; DARWIN-X64-NEXT: callq ___udivmodti4
+; DARWIN-X64-NEXT: movq (%rsp), %rcx
+; DARWIN-X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi
+; DARWIN-X64-NEXT: movq %rax, (%r14)
+; DARWIN-X64-NEXT: movq %rdx, 8(%r14)
+; DARWIN-X64-NEXT: movq %rcx, (%rbx)
+; DARWIN-X64-NEXT: movq %rsi, 8(%rbx)
+; DARWIN-X64-NEXT: addq $24, %rsp
+; DARWIN-X64-NEXT: popq %rbx
+; DARWIN-X64-NEXT: popq %r14
+; DARWIN-X64-NEXT: retq
+;
+; MINGW-X64-LABEL: udivrem_i128:
+; MINGW-X64: # %bb.0:
+; MINGW-X64-NEXT: pushq %rsi
+; MINGW-X64-NEXT: .seh_pushreg %rsi
+; MINGW-X64-NEXT: pushq %rdi
+; MINGW-X64-NEXT: .seh_pushreg %rdi
+; MINGW-X64-NEXT: subq $88, %rsp
+; MINGW-X64-NEXT: .seh_stackalloc 88
+; MINGW-X64-NEXT: .seh_endprologue
+; MINGW-X64-NEXT: movq %rdx, %rsi
+; MINGW-X64-NEXT: movq %rcx, %rdi
+; MINGW-X64-NEXT: movaps {{[0-9]+}}(%rsp), %xmm0
+; MINGW-X64-NEXT: movq %r9, {{[0-9]+}}(%rsp)
+; MINGW-X64-NEXT: movq %r8, {{[0-9]+}}(%rsp)
+; MINGW-X64-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
+; MINGW-X64-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; MINGW-X64-NEXT: leaq {{[0-9]+}}(%rsp), %rdx
+; MINGW-X64-NEXT: leaq {{[0-9]+}}(%rsp), %r8
+; MINGW-X64-NEXT: callq __udivmodti4
+; MINGW-X64-NEXT: movaps {{[0-9]+}}(%rsp), %xmm1
+; MINGW-X64-NEXT: movaps %xmm0, (%rdi)
+; MINGW-X64-NEXT: movaps %xmm1, (%rsi)
+; MINGW-X64-NEXT: .seh_startepilogue
+; MINGW-X64-NEXT: addq $88, %rsp
+; MINGW-X64-NEXT: popq %rdi
+; MINGW-X64-NEXT: popq %rsi
+; MINGW-X64-NEXT: .seh_endepilogue
+; MINGW-X64-NEXT: retq
+; MINGW-X64-NEXT: .seh_endproc
+;
+; WIN64-LABEL: udivrem_i128:
+; WIN64: # %bb.0:
+; WIN64-NEXT: pushq %rsi
+; WIN64-NEXT: .seh_pushreg %rsi
+; WIN64-NEXT: pushq %rdi
+; WIN64-NEXT: .seh_pushreg %rdi
+; WIN64-NEXT: subq $88, %rsp
+; WIN64-NEXT: .seh_stackalloc 88
+; WIN64-NEXT: .seh_endprologue
+; WIN64-NEXT: movq %rdx, %rsi
+; WIN64-NEXT: movq %rcx, %rdi
+; WIN64-NEXT: movaps {{[0-9]+}}(%rsp), %xmm0
+; WIN64-NEXT: movq %r9, {{[0-9]+}}(%rsp)
+; WIN64-NEXT: movq %r8, {{[0-9]+}}(%rsp)
+; WIN64-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
+; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %rdx
+; WIN64-NEXT: leaq {{[0-9]+}}(%rsp), %r8
+; WIN64-NEXT: callq __udivmodti4
+; WIN64-NEXT: movaps {{[0-9]+}}(%rsp), %xmm1
+; WIN64-NEXT: movaps %xmm0, (%rdi)
+; WIN64-NEXT: movaps %xmm1, (%rsi)
+; WIN64-NEXT: .seh_startepilogue
+; WIN64-NEXT: addq $88, %rsp
+; WIN64-NEXT: popq %rdi
+; WIN64-NEXT: popq %rsi
+; WIN64-NEXT: .seh_endepilogue
+; WIN64-NEXT: retq
+; WIN64-NEXT: .seh_endproc
+ %q = udiv i128 %n, %d
+ %r = urem i128 %n, %d
+ store i128 %q, ptr %q_out
+ store i128 %r, ptr %r_out
+ ret void
+}
diff --git a/llvm/test/CodeGen/X86/i128-divrem-libcall.ll b/llvm/test/CodeGen/X86/i128-divrem-libcall.ll
deleted file mode 100644
index c2127a13c4c07..0000000000000
--- a/llvm/test/CodeGen/X86/i128-divrem-libcall.ll
+++ /dev/null
@@ -1,146 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefix=LINUX-X64
-; RUN: llc < %s -mtriple=x86_64-linux-gnux32 | FileCheck %s --check-prefix=LINUX-X32
-; RUN: llc < %s -mtriple=x86_64-apple-macosx | FileCheck %s --check-prefix=DARWIN-X64
-; RUN: llc < %s -mtriple=x86_64-w64-mingw32 | FileCheck %s --check-prefix=MINGW-X64
-; RUN: llc < %s -mtriple=x86_64-pc-windows-msvc | FileCheck %s --check-prefix=WIN64
-; RUN: llc < %s -mtriple=i386-linux-gnu | FileCheck %s --check-prefix=LINUX-X86
-; RUN: llc < %s -mtriple=i686-linux-gnu | FileCheck %s --check-prefix=LINUX-X86
-; RUN: llc < %s -mtriple=i686-pc-windows-msvc | FileCheck %s --check-prefix=WIN32
-
-; The 64-bit triples use fused libcalls with ABI-specific calling conventions.
-; The 32-bit triples inline-expand i128 div/rem, so only assert the absence of
-; libcalls there.
-
-define void @sdivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
-; LINUX-X64-LABEL: sdivrem_i128:
-; LINUX-X64: movq %rsp, %r8
-; LINUX-X64: callq __divmodti4 at PLT
-; LINUX-X64: movq (%rsp), %rcx
-; LINUX-X64: movq {{[0-9]+}}(%rsp), %rsi
-; LINUX-X64: movq %rax, (%r14)
-; LINUX-X64: movq %rdx, 8(%r14)
-; LINUX-X64: movq %rcx, (%rbx)
-; LINUX-X64: movq %rsi, 8(%rbx)
-;
-; LINUX-X32-LABEL: sdivrem_i128:
-; LINUX-X32: movl %esp, %r8d
-; LINUX-X32: callq __divmodti4 at PLT
-; LINUX-X32: movq (%esp), %rcx
-; LINUX-X32: movq {{[0-9]+}}(%esp), %rsi
-; LINUX-X32: movq %rax, (%r14d)
-; LINUX-X32: movq %rdx, 8(%r14d)
-; LINUX-X32: movq %rcx, (%ebx)
-; LINUX-X32: movq %rsi, 8(%ebx)
-;
-; DARWIN-X64-LABEL: sdivrem_i128:
-; DARWIN-X64: movq %rsp, %r8
-; DARWIN-X64: callq ___divmodti4
-; DARWIN-X64: movq (%rsp), %rcx
-; DARWIN-X64: movq {{[0-9]+}}(%rsp), %rsi
-; DARWIN-X64: movq %rax, (%r14)
-; DARWIN-X64: movq %rdx, 8(%r14)
-; DARWIN-X64: movq %rcx, (%rbx)
-; DARWIN-X64: movq %rsi, 8(%rbx)
-;
-; MINGW-X64-LABEL: sdivrem_i128:
-; MINGW-X64: leaq {{[0-9]+}}(%rsp), %rcx
-; MINGW-X64: leaq {{[0-9]+}}(%rsp), %rdx
-; MINGW-X64: leaq {{[0-9]+}}(%rsp), %r8
-; MINGW-X64: callq __divmodti4
-; MINGW-X64: movaps {{[0-9]+}}(%rsp), %xmm1
-; MINGW-X64: movaps %xmm0, (%rdi)
-; MINGW-X64: movaps %xmm1, (%rsi)
-;
-; WIN64-LABEL: sdivrem_i128:
-; WIN64: leaq {{[0-9]+}}(%rsp), %rcx
-; WIN64: leaq {{[0-9]+}}(%rsp), %rdx
-; WIN64: leaq {{[0-9]+}}(%rsp), %r8
-; WIN64: callq __divmodti4
-; WIN64: movaps {{[0-9]+}}(%rsp), %xmm1
-; WIN64: movaps %xmm0, (%rdi)
-; WIN64: movaps %xmm1, (%rsi)
-;
-; LINUX-X86-LABEL: sdivrem_i128:
-; LINUX-X86-NOT: __divmodti4
-; LINUX-X86-NOT: __divti3
-; LINUX-X86-NOT: __modti3
-; LINUX-X86: retl
-;
-; WIN32-LABEL: {{_?}}sdivrem_i128:
-; WIN32-NOT: __divmodti4
-; WIN32-NOT: __divti3
-; WIN32-NOT: __modti3
-; WIN32: retl
- %q = sdiv i128 %n, %d
- %r = srem i128 %n, %d
- store i128 %q, ptr %q_out
- store i128 %r, ptr %r_out
- ret void
-}
-
-define void @udivrem_i128(ptr %q_out, ptr %r_out, i128 %n, i128 %d) {
-; LINUX-X64-LABEL: udivrem_i128:
-; LINUX-X64: movq %rsp, %r8
-; LINUX-X64: callq __udivmodti4 at PLT
-; LINUX-X64: movq (%rsp), %rcx
-; LINUX-X64: movq {{[0-9]+}}(%rsp), %rsi
-; LINUX-X64: movq %rax, (%r14)
-; LINUX-X64: movq %rdx, 8(%r14)
-; LINUX-X64: movq %rcx, (%rbx)
-; LINUX-X64: movq %rsi, 8(%rbx)
-;
-; LINUX-X32-LABEL: udivrem_i128:
-; LINUX-X32: movl %esp, %r8d
-; LINUX-X32: callq __udivmodti4 at PLT
-; LINUX-X32: movq (%esp), %rcx
-; LINUX-X32: movq {{[0-9]+}}(%esp), %rsi
-; LINUX-X32: movq %rax, (%r14d)
-; LINUX-X32: movq %rdx, 8(%r14d)
-; LINUX-X32: movq %rcx, (%ebx)
-; LINUX-X32: movq %rsi, 8(%ebx)
-;
-; DARWIN-X64-LABEL: udivrem_i128:
-; DARWIN-X64: movq %rsp, %r8
-; DARWIN-X64: callq ___udivmodti4
-; DARWIN-X64: movq (%rsp), %rcx
-; DARWIN-X64: movq {{[0-9]+}}(%rsp), %rsi
-; DARWIN-X64: movq %rax, (%r14)
-; DARWIN-X64: movq %rdx, 8(%r14)
-; DARWIN-X64: movq %rcx, (%rbx)
-; DARWIN-X64: movq %rsi, 8(%rbx)
-;
-; MINGW-X64-LABEL: udivrem_i128:
-; MINGW-X64: leaq {{[0-9]+}}(%rsp), %rcx
-; MINGW-X64: leaq {{[0-9]+}}(%rsp), %rdx
-; MINGW-X64: leaq {{[0-9]+}}(%rsp), %r8
-; MINGW-X64: callq __udivmodti4
-; MINGW-X64: movaps {{[0-9]+}}(%rsp), %xmm1
-; MINGW-X64: movaps %xmm0, (%rdi)
-; MINGW-X64: movaps %xmm1, (%rsi)
-;
-; WIN64-LABEL: udivrem_i128:
-; WIN64: leaq {{[0-9]+}}(%rsp), %rcx
-; WIN64: leaq {{[0-9]+}}(%rsp), %rdx
-; WIN64: leaq {{[0-9]+}}(%rsp), %r8
-; WIN64: callq __udivmodti4
-; WIN64: movaps {{[0-9]+}}(%rsp), %xmm1
-; WIN64: movaps %xmm0, (%rdi)
-; WIN64: movaps %xmm1, (%rsi)
-;
-; LINUX-X86-LABEL: udivrem_i128:
-; LINUX-X86-NOT: __udivmodti4
-; LINUX-X86-NOT: __udivti3
-; LINUX-X86-NOT: __umodti3
-; LINUX-X86: retl
-;
-; WIN32-LABEL: {{_?}}udivrem_i128:
-; WIN32-NOT: __udivmodti4
-; WIN32-NOT: __udivti3
-; WIN32-NOT: __umodti3
-; WIN32: retl
- %q = udiv i128 %n, %d
- %r = urem i128 %n, %d
- store i128 %q, ptr %q_out
- store i128 %r, ptr %r_out
- ret void
-}
More information about the llvm-commits
mailing list