[llvm] 1499880 - [RISCV] Expand divisions larger than 64 bits on RV32. (#163688)

via llvm-commits llvm-commits at lists.llvm.org
Thu Oct 16 08:42:48 PDT 2025


Author: Craig Topper
Date: 2025-10-16T08:42:43-07:00
New Revision: 1499880a5a0295bccf8c6dc47950cb0c02a0e54f

URL: https://github.com/llvm/llvm-project/commit/1499880a5a0295bccf8c6dc47950cb0c02a0e54f
DIFF: https://github.com/llvm/llvm-project/commit/1499880a5a0295bccf8c6dc47950cb0c02a0e54f.diff

LOG: [RISCV] Expand divisions larger than 64 bits on RV32. (#163688)

The __(u)divti3, __(u)modti3 functions don't exist in libgcc for RV32.

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/idiv_large.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 7123a2d706787..eb875583ffca4 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1672,6 +1672,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
   if (Subtarget.useRVVForFixedLengthVectors())
     setTargetDAGCombine(ISD::BITCAST);
 
+  setMaxDivRemBitWidthSupported(Subtarget.is64Bit() ? 128 : 64);
+
   // Disable strict node mutation.
   IsStrictFPEnabled = true;
   EnableExtLdPromotion = true;

diff  --git a/llvm/test/CodeGen/RISCV/idiv_large.ll b/llvm/test/CodeGen/RISCV/idiv_large.ll
index 9937627962208..d7b00f61a50b9 100644
--- a/llvm/test/CodeGen/RISCV/idiv_large.ll
+++ b/llvm/test/CodeGen/RISCV/idiv_large.ll
@@ -1,16 +1,2315 @@
-; RUN: llc -mtriple=riscv32 < %s | FileCheck %s
-; RUN: llc -mtriple=riscv64 < %s | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -mtriple=riscv32 < %s | FileCheck %s --check-prefix=RV32
+; RUN: llc -mtriple=riscv64 < %s | FileCheck %s --check-prefix=RV64
+
+define i64 @udiv_i64(i64 %x, i64 %y) nounwind {
+; RV32-LABEL: udiv_i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    call __udivdi3
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: udiv_i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    tail __udivdi3
+  %res = udiv i64 %x, %y
+  ret i64 %res
+}
+
+define i65 @udiv_i65(i65 %x, i65 %y) nounwind {
+; RV32-LABEL: udiv_i65:
+; RV32:       # %bb.0: # %_udiv-special-cases
+; RV32-NEXT:    lw a3, 0(a2)
+; RV32-NEXT:    lw a4, 4(a2)
+; RV32-NEXT:    lw t1, 8(a2)
+; RV32-NEXT:    lui a2, 349525
+; RV32-NEXT:    lui a5, 209715
+; RV32-NEXT:    lui a6, 61681
+; RV32-NEXT:    addi t0, a2, 1365
+; RV32-NEXT:    addi a7, a5, 819
+; RV32-NEXT:    addi a6, a6, -241
+; RV32-NEXT:    srli a2, a4, 1
+; RV32-NEXT:    slli a5, t1, 31
+; RV32-NEXT:    slli t3, a4, 31
+; RV32-NEXT:    or t2, a5, a2
+; RV32-NEXT:    srli a2, a3, 1
+; RV32-NEXT:    or t4, a2, t3
+; RV32-NEXT:    bnez t2, .LBB1_2
+; RV32-NEXT:  # %bb.1: # %_udiv-special-cases
+; RV32-NEXT:    srli a2, t4, 1
+; RV32-NEXT:    or a2, t4, a2
+; RV32-NEXT:    srli a5, a2, 2
+; RV32-NEXT:    or a2, a2, a5
+; RV32-NEXT:    srli a5, a2, 4
+; RV32-NEXT:    or a2, a2, a5
+; RV32-NEXT:    srli a5, a2, 8
+; RV32-NEXT:    or a2, a2, a5
+; RV32-NEXT:    srli a5, a2, 16
+; RV32-NEXT:    or a2, a2, a5
+; RV32-NEXT:    not a2, a2
+; RV32-NEXT:    srli a5, a2, 1
+; RV32-NEXT:    and a5, a5, t0
+; RV32-NEXT:    sub a2, a2, a5
+; RV32-NEXT:    and a5, a2, a7
+; RV32-NEXT:    srli a2, a2, 2
+; RV32-NEXT:    and a2, a2, a7
+; RV32-NEXT:    add a2, a5, a2
+; RV32-NEXT:    srli a5, a2, 4
+; RV32-NEXT:    add a2, a2, a5
+; RV32-NEXT:    and a2, a2, a6
+; RV32-NEXT:    slli a5, a2, 8
+; RV32-NEXT:    add a2, a2, a5
+; RV32-NEXT:    slli a5, a2, 16
+; RV32-NEXT:    add a2, a2, a5
+; RV32-NEXT:    srli a2, a2, 24
+; RV32-NEXT:    addi t3, a2, 32
+; RV32-NEXT:    j .LBB1_3
+; RV32-NEXT:  .LBB1_2:
+; RV32-NEXT:    srli a2, t2, 1
+; RV32-NEXT:    or a2, t2, a2
+; RV32-NEXT:    srli a5, a2, 2
+; RV32-NEXT:    or a2, a2, a5
+; RV32-NEXT:    srli a5, a2, 4
+; RV32-NEXT:    or a2, a2, a5
+; RV32-NEXT:    srli a5, a2, 8
+; RV32-NEXT:    or a2, a2, a5
+; RV32-NEXT:    srli a5, a2, 16
+; RV32-NEXT:    or a2, a2, a5
+; RV32-NEXT:    not a2, a2
+; RV32-NEXT:    srli a5, a2, 1
+; RV32-NEXT:    and a5, a5, t0
+; RV32-NEXT:    sub a2, a2, a5
+; RV32-NEXT:    and a5, a2, a7
+; RV32-NEXT:    srli a2, a2, 2
+; RV32-NEXT:    and a2, a2, a7
+; RV32-NEXT:    add a2, a5, a2
+; RV32-NEXT:    srli a5, a2, 4
+; RV32-NEXT:    add a2, a2, a5
+; RV32-NEXT:    and a2, a2, a6
+; RV32-NEXT:    slli a5, a2, 8
+; RV32-NEXT:    add a2, a2, a5
+; RV32-NEXT:    slli a5, a2, 16
+; RV32-NEXT:    add a2, a2, a5
+; RV32-NEXT:    srli t3, a2, 24
+; RV32-NEXT:  .LBB1_3: # %_udiv-special-cases
+; RV32-NEXT:    addi sp, sp, -96
+; RV32-NEXT:    sw s0, 92(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s1, 88(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s2, 84(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s3, 80(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s4, 76(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s5, 72(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s6, 68(sp) # 4-byte Folded Spill
+; RV32-NEXT:    slli a2, a3, 31
+; RV32-NEXT:    li t5, 64
+; RV32-NEXT:    bnez a2, .LBB1_5
+; RV32-NEXT:  # %bb.4: # %_udiv-special-cases
+; RV32-NEXT:    li s0, 64
+; RV32-NEXT:    j .LBB1_6
+; RV32-NEXT:  .LBB1_5:
+; RV32-NEXT:    srli a5, a2, 1
+; RV32-NEXT:    or a2, a2, a5
+; RV32-NEXT:    srli a5, a2, 2
+; RV32-NEXT:    or a2, a2, a5
+; RV32-NEXT:    srli a5, a2, 4
+; RV32-NEXT:    or a2, a2, a5
+; RV32-NEXT:    srli a5, a2, 8
+; RV32-NEXT:    or a2, a2, a5
+; RV32-NEXT:    srli a5, a2, 16
+; RV32-NEXT:    or a2, a2, a5
+; RV32-NEXT:    not a2, a2
+; RV32-NEXT:    srli a5, a2, 1
+; RV32-NEXT:    and a5, a5, t0
+; RV32-NEXT:    sub a2, a2, a5
+; RV32-NEXT:    and a5, a2, a7
+; RV32-NEXT:    srli a2, a2, 2
+; RV32-NEXT:    and a2, a2, a7
+; RV32-NEXT:    add a2, a5, a2
+; RV32-NEXT:    srli a5, a2, 4
+; RV32-NEXT:    add a2, a2, a5
+; RV32-NEXT:    and a2, a2, a6
+; RV32-NEXT:    slli a5, a2, 8
+; RV32-NEXT:    add a2, a2, a5
+; RV32-NEXT:    slli a5, a2, 16
+; RV32-NEXT:    add a2, a2, a5
+; RV32-NEXT:    srli s0, a2, 24
+; RV32-NEXT:  .LBB1_6: # %_udiv-special-cases
+; RV32-NEXT:    lw a5, 0(a1)
+; RV32-NEXT:    lw a2, 4(a1)
+; RV32-NEXT:    lw s2, 8(a1)
+; RV32-NEXT:    or a1, t4, t2
+; RV32-NEXT:    addi s1, s0, 64
+; RV32-NEXT:    bnez a1, .LBB1_8
+; RV32-NEXT:  # %bb.7: # %_udiv-special-cases
+; RV32-NEXT:    mv t3, s1
+; RV32-NEXT:  .LBB1_8: # %_udiv-special-cases
+; RV32-NEXT:    snez s4, a1
+; RV32-NEXT:    srli a1, a2, 1
+; RV32-NEXT:    slli t2, s2, 31
+; RV32-NEXT:    slli t4, a2, 31
+; RV32-NEXT:    or a1, t2, a1
+; RV32-NEXT:    srli t2, a5, 1
+; RV32-NEXT:    or t6, t2, t4
+; RV32-NEXT:    bnez a1, .LBB1_10
+; RV32-NEXT:  # %bb.9: # %_udiv-special-cases
+; RV32-NEXT:    srli t2, t6, 1
+; RV32-NEXT:    or t2, t6, t2
+; RV32-NEXT:    srli t4, t2, 2
+; RV32-NEXT:    or t2, t2, t4
+; RV32-NEXT:    srli t4, t2, 4
+; RV32-NEXT:    or t2, t2, t4
+; RV32-NEXT:    srli t4, t2, 8
+; RV32-NEXT:    or t2, t2, t4
+; RV32-NEXT:    srli t4, t2, 16
+; RV32-NEXT:    or t2, t2, t4
+; RV32-NEXT:    not t2, t2
+; RV32-NEXT:    srli t4, t2, 1
+; RV32-NEXT:    and t4, t4, t0
+; RV32-NEXT:    sub t2, t2, t4
+; RV32-NEXT:    and t4, t2, a7
+; RV32-NEXT:    srli t2, t2, 2
+; RV32-NEXT:    and t2, t2, a7
+; RV32-NEXT:    add t2, t4, t2
+; RV32-NEXT:    srli t4, t2, 4
+; RV32-NEXT:    add t2, t2, t4
+; RV32-NEXT:    and t2, t2, a6
+; RV32-NEXT:    slli t4, t2, 8
+; RV32-NEXT:    add t2, t2, t4
+; RV32-NEXT:    slli t4, t2, 16
+; RV32-NEXT:    add t2, t2, t4
+; RV32-NEXT:    srli t2, t2, 24
+; RV32-NEXT:    addi s3, t2, 32
+; RV32-NEXT:    j .LBB1_11
+; RV32-NEXT:  .LBB1_10:
+; RV32-NEXT:    srli t2, a1, 1
+; RV32-NEXT:    or t2, a1, t2
+; RV32-NEXT:    srli t4, t2, 2
+; RV32-NEXT:    or t2, t2, t4
+; RV32-NEXT:    srli t4, t2, 4
+; RV32-NEXT:    or t2, t2, t4
+; RV32-NEXT:    srli t4, t2, 8
+; RV32-NEXT:    or t2, t2, t4
+; RV32-NEXT:    srli t4, t2, 16
+; RV32-NEXT:    or t2, t2, t4
+; RV32-NEXT:    not t2, t2
+; RV32-NEXT:    srli t4, t2, 1
+; RV32-NEXT:    and t4, t4, t0
+; RV32-NEXT:    sub t2, t2, t4
+; RV32-NEXT:    and t4, t2, a7
+; RV32-NEXT:    srli t2, t2, 2
+; RV32-NEXT:    and t2, t2, a7
+; RV32-NEXT:    add t2, t4, t2
+; RV32-NEXT:    srli t4, t2, 4
+; RV32-NEXT:    add t2, t2, t4
+; RV32-NEXT:    and t2, t2, a6
+; RV32-NEXT:    slli t4, t2, 8
+; RV32-NEXT:    add t2, t2, t4
+; RV32-NEXT:    slli t4, t2, 16
+; RV32-NEXT:    add t2, t2, t4
+; RV32-NEXT:    srli s3, t2, 24
+; RV32-NEXT:  .LBB1_11: # %_udiv-special-cases
+; RV32-NEXT:    andi t4, s2, 1
+; RV32-NEXT:    andi t1, t1, 1
+; RV32-NEXT:    or t2, a3, a4
+; RV32-NEXT:    or s2, a5, a2
+; RV32-NEXT:    sltu s0, s1, s0
+; RV32-NEXT:    slli s1, a5, 31
+; RV32-NEXT:    addi s4, s4, -1
+; RV32-NEXT:    beqz s1, .LBB1_13
+; RV32-NEXT:  # %bb.12:
+; RV32-NEXT:    srli t5, s1, 1
+; RV32-NEXT:    or t5, s1, t5
+; RV32-NEXT:    srli s1, t5, 2
+; RV32-NEXT:    or t5, t5, s1
+; RV32-NEXT:    srli s1, t5, 4
+; RV32-NEXT:    or t5, t5, s1
+; RV32-NEXT:    srli s1, t5, 8
+; RV32-NEXT:    or t5, t5, s1
+; RV32-NEXT:    srli s1, t5, 16
+; RV32-NEXT:    or t5, t5, s1
+; RV32-NEXT:    not t5, t5
+; RV32-NEXT:    srli s1, t5, 1
+; RV32-NEXT:    and t0, s1, t0
+; RV32-NEXT:    sub t0, t5, t0
+; RV32-NEXT:    and t5, t0, a7
+; RV32-NEXT:    srli t0, t0, 2
+; RV32-NEXT:    and a7, t0, a7
+; RV32-NEXT:    add a7, t5, a7
+; RV32-NEXT:    srli t0, a7, 4
+; RV32-NEXT:    add a7, a7, t0
+; RV32-NEXT:    and a6, a7, a6
+; RV32-NEXT:    slli a7, a6, 8
+; RV32-NEXT:    add a6, a6, a7
+; RV32-NEXT:    slli a7, a6, 16
+; RV32-NEXT:    add a6, a6, a7
+; RV32-NEXT:    srli t5, a6, 24
+; RV32-NEXT:  .LBB1_13: # %_udiv-special-cases
+; RV32-NEXT:    or t0, t2, t1
+; RV32-NEXT:    or a6, s2, t4
+; RV32-NEXT:    and a7, s4, s0
+; RV32-NEXT:    or t6, t6, a1
+; RV32-NEXT:    addi s0, t5, 64
+; RV32-NEXT:    bnez t6, .LBB1_15
+; RV32-NEXT:  # %bb.14: # %_udiv-special-cases
+; RV32-NEXT:    mv s3, s0
+; RV32-NEXT:  .LBB1_15: # %_udiv-special-cases
+; RV32-NEXT:    seqz a1, t0
+; RV32-NEXT:    sltu t0, s0, t5
+; RV32-NEXT:    snez t5, t6
+; RV32-NEXT:    addi t5, t5, -1
+; RV32-NEXT:    and t0, t5, t0
+; RV32-NEXT:    sltu t5, t3, s3
+; RV32-NEXT:    seqz a6, a6
+; RV32-NEXT:    mv t6, t5
+; RV32-NEXT:    beq a7, t0, .LBB1_17
+; RV32-NEXT:  # %bb.16: # %_udiv-special-cases
+; RV32-NEXT:    sltu t6, a7, t0
+; RV32-NEXT:  .LBB1_17: # %_udiv-special-cases
+; RV32-NEXT:    or a1, a1, a6
+; RV32-NEXT:    andi a6, t6, 1
+; RV32-NEXT:    sub a7, a7, t0
+; RV32-NEXT:    sub t5, a7, t5
+; RV32-NEXT:    sub a7, t3, s3
+; RV32-NEXT:    beqz a6, .LBB1_19
+; RV32-NEXT:  # %bb.18: # %_udiv-special-cases
+; RV32-NEXT:    mv t0, a6
+; RV32-NEXT:    j .LBB1_20
+; RV32-NEXT:  .LBB1_19:
+; RV32-NEXT:    sltiu t0, a7, 65
+; RV32-NEXT:    xori t0, t0, 1
+; RV32-NEXT:    snez t3, t5
+; RV32-NEXT:    or t0, t0, t3
+; RV32-NEXT:  .LBB1_20: # %_udiv-special-cases
+; RV32-NEXT:    or t6, a1, t0
+; RV32-NEXT:    addi a1, t6, -1
+; RV32-NEXT:    and t3, t4, a1
+; RV32-NEXT:    and t0, a1, a2
+; RV32-NEXT:    and a1, a1, a5
+; RV32-NEXT:    bnez t6, .LBB1_30
+; RV32-NEXT:  # %bb.21: # %_udiv-special-cases
+; RV32-NEXT:    xori t6, a7, 64
+; RV32-NEXT:    or t6, t6, a6
+; RV32-NEXT:    or t6, t6, t5
+; RV32-NEXT:    beqz t6, .LBB1_30
+; RV32-NEXT:  # %bb.22: # %udiv-bb1
+; RV32-NEXT:    addi a1, a7, 1
+; RV32-NEXT:    sw zero, 32(sp)
+; RV32-NEXT:    sw zero, 36(sp)
+; RV32-NEXT:    sw zero, 40(sp)
+; RV32-NEXT:    sw zero, 44(sp)
+; RV32-NEXT:    sw a5, 48(sp)
+; RV32-NEXT:    sw a2, 52(sp)
+; RV32-NEXT:    sw t4, 56(sp)
+; RV32-NEXT:    li t0, 64
+; RV32-NEXT:    addi t3, sp, 48
+; RV32-NEXT:    neg s1, a7
+; RV32-NEXT:    seqz t6, a1
+; RV32-NEXT:    sub a7, t0, a7
+; RV32-NEXT:    add t5, t5, t6
+; RV32-NEXT:    andi t0, a7, 31
+; RV32-NEXT:    srli a7, a7, 3
+; RV32-NEXT:    or t6, a1, t5
+; RV32-NEXT:    xori s2, t0, 31
+; RV32-NEXT:    andi a7, a7, 12
+; RV32-NEXT:    seqz t0, t6
+; RV32-NEXT:    sub s3, t3, a7
+; RV32-NEXT:    add a6, a6, t0
+; RV32-NEXT:    lw t3, 0(s3)
+; RV32-NEXT:    lw s4, 4(s3)
+; RV32-NEXT:    andi a7, a6, 1
+; RV32-NEXT:    or t6, t6, a7
+; RV32-NEXT:    srli a6, t3, 1
+; RV32-NEXT:    sll t0, s4, s1
+; RV32-NEXT:    srl a6, a6, s2
+; RV32-NEXT:    or t0, t0, a6
+; RV32-NEXT:    sll a6, t3, s1
+; RV32-NEXT:    li t3, 0
+; RV32-NEXT:    beqz t6, .LBB1_28
+; RV32-NEXT:  # %bb.23: # %udiv-preheader
+; RV32-NEXT:    li t6, 0
+; RV32-NEXT:    li s0, 0
+; RV32-NEXT:    srli s4, s4, 1
+; RV32-NEXT:    lw s3, 8(s3)
+; RV32-NEXT:    sw zero, 16(sp)
+; RV32-NEXT:    sw zero, 20(sp)
+; RV32-NEXT:    sw zero, 24(sp)
+; RV32-NEXT:    sw zero, 28(sp)
+; RV32-NEXT:    sw a5, 0(sp)
+; RV32-NEXT:    sw a2, 4(sp)
+; RV32-NEXT:    sw t4, 8(sp)
+; RV32-NEXT:    sw zero, 12(sp)
+; RV32-NEXT:    srli a2, a1, 3
+; RV32-NEXT:    srl a5, s4, s2
+; RV32-NEXT:    mv t4, sp
+; RV32-NEXT:    snez t2, t2
+; RV32-NEXT:    andi a2, a2, 12
+; RV32-NEXT:    add t1, t1, t2
+; RV32-NEXT:    add a2, t4, a2
+; RV32-NEXT:    lw t2, 0(a2)
+; RV32-NEXT:    lw t4, 4(a2)
+; RV32-NEXT:    lw a2, 8(a2)
+; RV32-NEXT:    sll s1, s3, s1
+; RV32-NEXT:    andi s2, a1, 31
+; RV32-NEXT:    xori s2, s2, 31
+; RV32-NEXT:    or s3, s1, a5
+; RV32-NEXT:    slli a2, a2, 1
+; RV32-NEXT:    slli a5, t4, 1
+; RV32-NEXT:    sll a2, a2, s2
+; RV32-NEXT:    sll s2, a5, s2
+; RV32-NEXT:    srl s1, t4, a1
+; RV32-NEXT:    or s1, s1, a2
+; RV32-NEXT:    seqz a2, a3
+; RV32-NEXT:    sub a2, a4, a2
+; RV32-NEXT:    addi a5, t1, 1
+; RV32-NEXT:    andi a5, a5, 1
+; RV32-NEXT:    andi s3, s3, 1
+; RV32-NEXT:    srl t1, t2, a1
+; RV32-NEXT:    or s2, t1, s2
+; RV32-NEXT:    addi t1, a3, -1
+; RV32-NEXT:    j .LBB1_26
+; RV32-NEXT:  .LBB1_24: # %udiv-do-while
+; RV32-NEXT:    # in Loop: Header=BB1_26 Depth=1
+; RV32-NEXT:    sltu t2, a2, s4
+; RV32-NEXT:  .LBB1_25: # %udiv-do-while
+; RV32-NEXT:    # in Loop: Header=BB1_26 Depth=1
+; RV32-NEXT:    srli s1, s1, 31
+; RV32-NEXT:    sub t4, a5, s1
+; RV32-NEXT:    sub t2, t4, t2
+; RV32-NEXT:    slli t2, t2, 31
+; RV32-NEXT:    srai s1, t2, 31
+; RV32-NEXT:    and s3, s1, a4
+; RV32-NEXT:    li t2, 0
+; RV32-NEXT:    li t4, 0
+; RV32-NEXT:    srli s5, a6, 31
+; RV32-NEXT:    sub s4, s4, s3
+; RV32-NEXT:    slli s3, t0, 1
+; RV32-NEXT:    or s3, s3, s5
+; RV32-NEXT:    srli t0, t0, 31
+; RV32-NEXT:    slli a6, a6, 1
+; RV32-NEXT:    or a6, t3, a6
+; RV32-NEXT:    seqz t3, a1
+; RV32-NEXT:    or s0, s0, t0
+; RV32-NEXT:    or s5, a1, t5
+; RV32-NEXT:    sub t5, t5, t3
+; RV32-NEXT:    and s6, s1, a3
+; RV32-NEXT:    addi a1, a1, -1
+; RV32-NEXT:    andi t3, s1, 1
+; RV32-NEXT:    or t0, t6, s3
+; RV32-NEXT:    sltu t6, s2, s6
+; RV32-NEXT:    snez s5, s5
+; RV32-NEXT:    andi s3, s0, 1
+; RV32-NEXT:    sub s1, s4, t6
+; RV32-NEXT:    add a7, a7, s5
+; RV32-NEXT:    addi a7, a7, 1
+; RV32-NEXT:    andi a7, a7, 1
+; RV32-NEXT:    or t6, a1, t5
+; RV32-NEXT:    or s4, t6, a7
+; RV32-NEXT:    sub s2, s2, s6
+; RV32-NEXT:    li t6, 0
+; RV32-NEXT:    li s0, 0
+; RV32-NEXT:    beqz s4, .LBB1_29
+; RV32-NEXT:  .LBB1_26: # %udiv-do-while
+; RV32-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32-NEXT:    srli t2, s2, 31
+; RV32-NEXT:    slli t4, s1, 1
+; RV32-NEXT:    slli s2, s2, 1
+; RV32-NEXT:    or s4, t4, t2
+; RV32-NEXT:    andi t2, s3, 1
+; RV32-NEXT:    or s2, s2, t2
+; RV32-NEXT:    bne a2, s4, .LBB1_24
+; RV32-NEXT:  # %bb.27: # in Loop: Header=BB1_26 Depth=1
+; RV32-NEXT:    sltu t2, t1, s2
+; RV32-NEXT:    j .LBB1_25
+; RV32-NEXT:  .LBB1_28:
+; RV32-NEXT:    li t2, 0
+; RV32-NEXT:    li t4, 0
+; RV32-NEXT:  .LBB1_29: # %udiv-loop-exit
+; RV32-NEXT:    srli a2, a6, 31
+; RV32-NEXT:    slli a3, t0, 1
+; RV32-NEXT:    srli a4, t0, 31
+; RV32-NEXT:    slli a6, a6, 1
+; RV32-NEXT:    or a1, t3, a6
+; RV32-NEXT:    or a2, t2, a2
+; RV32-NEXT:    or a4, t4, a4
+; RV32-NEXT:    or t0, a2, a3
+; RV32-NEXT:    andi t3, a4, 1
+; RV32-NEXT:  .LBB1_30: # %udiv-end
+; RV32-NEXT:    andi a2, t3, 1
+; RV32-NEXT:    sw a1, 0(a0)
+; RV32-NEXT:    sw t0, 4(a0)
+; RV32-NEXT:    sb a2, 8(a0)
+; RV32-NEXT:    lw s0, 92(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s1, 88(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s2, 84(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s3, 80(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s4, 76(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s5, 72(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s6, 68(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 96
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: udiv_i65:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    andi a1, a1, 1
+; RV64-NEXT:    andi a3, a3, 1
+; RV64-NEXT:    call __udivti3
+; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+  %res = udiv i65 %x, %y
+  ret i65 %res
+}
 
 define i128 @udiv_i128(i128 %x, i128 %y) nounwind {
-; CHECK-LABEL: udiv_i128:
-; CHECK:    call __udivti3
+; RV32-LABEL: udiv_i128:
+; RV32:       # %bb.0: # %_udiv-special-cases
+; RV32-NEXT:    addi sp, sp, -160
+; RV32-NEXT:    sw ra, 156(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s0, 152(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s1, 148(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s2, 144(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s3, 140(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s4, 136(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s5, 132(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s6, 128(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s7, 124(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s8, 120(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s9, 116(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s10, 112(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s11, 108(sp) # 4-byte Folded Spill
+; RV32-NEXT:    mv s7, a0
+; RV32-NEXT:    lw s8, 0(a2)
+; RV32-NEXT:    lw s9, 4(a2)
+; RV32-NEXT:    lw s11, 8(a2)
+; RV32-NEXT:    lw ra, 12(a2)
+; RV32-NEXT:    lui t4, 349525
+; RV32-NEXT:    addi t4, t4, 1365
+; RV32-NEXT:    lui t3, 209715
+; RV32-NEXT:    addi t3, t3, 819
+; RV32-NEXT:    lui t2, 61681
+; RV32-NEXT:    addi t2, t2, -241
+; RV32-NEXT:    bnez s9, .LBB2_2
+; RV32-NEXT:  # %bb.1: # %_udiv-special-cases
+; RV32-NEXT:    srli a0, s8, 1
+; RV32-NEXT:    or a0, s8, a0
+; RV32-NEXT:    srli a3, a0, 2
+; RV32-NEXT:    or a0, a0, a3
+; RV32-NEXT:    srli a3, a0, 4
+; RV32-NEXT:    or a0, a0, a3
+; RV32-NEXT:    srli a3, a0, 8
+; RV32-NEXT:    or a0, a0, a3
+; RV32-NEXT:    srli a3, a0, 16
+; RV32-NEXT:    or a0, a0, a3
+; RV32-NEXT:    not a0, a0
+; RV32-NEXT:    srli a3, a0, 1
+; RV32-NEXT:    and a3, a3, t4
+; RV32-NEXT:    sub a0, a0, a3
+; RV32-NEXT:    and a3, a0, t3
+; RV32-NEXT:    srli a0, a0, 2
+; RV32-NEXT:    and a0, a0, t3
+; RV32-NEXT:    add a0, a3, a0
+; RV32-NEXT:    srli a3, a0, 4
+; RV32-NEXT:    add a0, a0, a3
+; RV32-NEXT:    and a0, a0, t2
+; RV32-NEXT:    slli a3, a0, 8
+; RV32-NEXT:    add a0, a0, a3
+; RV32-NEXT:    slli a3, a0, 16
+; RV32-NEXT:    add a0, a0, a3
+; RV32-NEXT:    srli a0, a0, 24
+; RV32-NEXT:    addi t6, a0, 32
+; RV32-NEXT:    j .LBB2_3
+; RV32-NEXT:  .LBB2_2:
+; RV32-NEXT:    srli a0, s9, 1
+; RV32-NEXT:    or a0, s9, a0
+; RV32-NEXT:    srli a3, a0, 2
+; RV32-NEXT:    or a0, a0, a3
+; RV32-NEXT:    srli a3, a0, 4
+; RV32-NEXT:    or a0, a0, a3
+; RV32-NEXT:    srli a3, a0, 8
+; RV32-NEXT:    or a0, a0, a3
+; RV32-NEXT:    srli a3, a0, 16
+; RV32-NEXT:    or a0, a0, a3
+; RV32-NEXT:    not a0, a0
+; RV32-NEXT:    srli a3, a0, 1
+; RV32-NEXT:    and a3, a3, t4
+; RV32-NEXT:    sub a0, a0, a3
+; RV32-NEXT:    and a3, a0, t3
+; RV32-NEXT:    srli a0, a0, 2
+; RV32-NEXT:    and a0, a0, t3
+; RV32-NEXT:    add a0, a3, a0
+; RV32-NEXT:    srli a3, a0, 4
+; RV32-NEXT:    add a0, a0, a3
+; RV32-NEXT:    and a0, a0, t2
+; RV32-NEXT:    slli a3, a0, 8
+; RV32-NEXT:    add a0, a0, a3
+; RV32-NEXT:    slli a3, a0, 16
+; RV32-NEXT:    add a0, a0, a3
+; RV32-NEXT:    srli t6, a0, 24
+; RV32-NEXT:  .LBB2_3: # %_udiv-special-cases
+; RV32-NEXT:    lw a6, 4(a1)
+; RV32-NEXT:    or s0, s11, ra
+; RV32-NEXT:    bnez ra, .LBB2_5
+; RV32-NEXT:  # %bb.4: # %_udiv-special-cases
+; RV32-NEXT:    srli a0, s11, 1
+; RV32-NEXT:    or a0, s11, a0
+; RV32-NEXT:    srli a3, a0, 2
+; RV32-NEXT:    or a0, a0, a3
+; RV32-NEXT:    srli a3, a0, 4
+; RV32-NEXT:    or a0, a0, a3
+; RV32-NEXT:    srli a3, a0, 8
+; RV32-NEXT:    or a0, a0, a3
+; RV32-NEXT:    srli a3, a0, 16
+; RV32-NEXT:    or a0, a0, a3
+; RV32-NEXT:    not a0, a0
+; RV32-NEXT:    srli a3, a0, 1
+; RV32-NEXT:    and a3, a3, t4
+; RV32-NEXT:    sub a0, a0, a3
+; RV32-NEXT:    and a3, a0, t3
+; RV32-NEXT:    srli a0, a0, 2
+; RV32-NEXT:    and a0, a0, t3
+; RV32-NEXT:    add a0, a3, a0
+; RV32-NEXT:    srli a3, a0, 4
+; RV32-NEXT:    add a0, a0, a3
+; RV32-NEXT:    and a0, a0, t2
+; RV32-NEXT:    slli a3, a0, 8
+; RV32-NEXT:    add a0, a0, a3
+; RV32-NEXT:    slli a3, a0, 16
+; RV32-NEXT:    add a0, a0, a3
+; RV32-NEXT:    srli a0, a0, 24
+; RV32-NEXT:    addi t5, a0, 32
+; RV32-NEXT:    j .LBB2_6
+; RV32-NEXT:  .LBB2_5:
+; RV32-NEXT:    srli a0, ra, 1
+; RV32-NEXT:    or a0, ra, a0
+; RV32-NEXT:    srli a3, a0, 2
+; RV32-NEXT:    or a0, a0, a3
+; RV32-NEXT:    srli a3, a0, 4
+; RV32-NEXT:    or a0, a0, a3
+; RV32-NEXT:    srli a3, a0, 8
+; RV32-NEXT:    or a0, a0, a3
+; RV32-NEXT:    srli a3, a0, 16
+; RV32-NEXT:    or a0, a0, a3
+; RV32-NEXT:    not a0, a0
+; RV32-NEXT:    srli a3, a0, 1
+; RV32-NEXT:    and a3, a3, t4
+; RV32-NEXT:    sub a0, a0, a3
+; RV32-NEXT:    and a3, a0, t3
+; RV32-NEXT:    srli a0, a0, 2
+; RV32-NEXT:    and a0, a0, t3
+; RV32-NEXT:    add a0, a3, a0
+; RV32-NEXT:    srli a3, a0, 4
+; RV32-NEXT:    add a0, a0, a3
+; RV32-NEXT:    and a0, a0, t2
+; RV32-NEXT:    slli a3, a0, 8
+; RV32-NEXT:    add a0, a0, a3
+; RV32-NEXT:    slli a3, a0, 16
+; RV32-NEXT:    add a0, a0, a3
+; RV32-NEXT:    srli t5, a0, 24
+; RV32-NEXT:  .LBB2_6: # %_udiv-special-cases
+; RV32-NEXT:    lw a7, 12(a1)
+; RV32-NEXT:    addi a0, t6, 64
+; RV32-NEXT:    bnez s0, .LBB2_8
+; RV32-NEXT:  # %bb.7: # %_udiv-special-cases
+; RV32-NEXT:    mv t5, a0
+; RV32-NEXT:  .LBB2_8: # %_udiv-special-cases
+; RV32-NEXT:    lw t1, 0(a1)
+; RV32-NEXT:    lw t0, 8(a1)
+; RV32-NEXT:    snez s3, s0
+; RV32-NEXT:    bnez a6, .LBB2_10
+; RV32-NEXT:  # %bb.9: # %_udiv-special-cases
+; RV32-NEXT:    srli a1, t1, 1
+; RV32-NEXT:    or a1, t1, a1
+; RV32-NEXT:    srli a3, a1, 2
+; RV32-NEXT:    or a1, a1, a3
+; RV32-NEXT:    srli a3, a1, 4
+; RV32-NEXT:    or a1, a1, a3
+; RV32-NEXT:    srli a3, a1, 8
+; RV32-NEXT:    or a1, a1, a3
+; RV32-NEXT:    srli a3, a1, 16
+; RV32-NEXT:    or a1, a1, a3
+; RV32-NEXT:    not a1, a1
+; RV32-NEXT:    srli a3, a1, 1
+; RV32-NEXT:    and a3, a3, t4
+; RV32-NEXT:    sub a1, a1, a3
+; RV32-NEXT:    and a3, a1, t3
+; RV32-NEXT:    srli a1, a1, 2
+; RV32-NEXT:    and a1, a1, t3
+; RV32-NEXT:    add a1, a3, a1
+; RV32-NEXT:    srli a3, a1, 4
+; RV32-NEXT:    add a1, a1, a3
+; RV32-NEXT:    and a1, a1, t2
+; RV32-NEXT:    slli a3, a1, 8
+; RV32-NEXT:    add a1, a1, a3
+; RV32-NEXT:    slli a3, a1, 16
+; RV32-NEXT:    add a1, a1, a3
+; RV32-NEXT:    srli a1, a1, 24
+; RV32-NEXT:    addi a3, a1, 32
+; RV32-NEXT:    j .LBB2_11
+; RV32-NEXT:  .LBB2_10:
+; RV32-NEXT:    srli a1, a6, 1
+; RV32-NEXT:    or a1, a6, a1
+; RV32-NEXT:    srli a3, a1, 2
+; RV32-NEXT:    or a1, a1, a3
+; RV32-NEXT:    srli a3, a1, 4
+; RV32-NEXT:    or a1, a1, a3
+; RV32-NEXT:    srli a3, a1, 8
+; RV32-NEXT:    or a1, a1, a3
+; RV32-NEXT:    srli a3, a1, 16
+; RV32-NEXT:    or a1, a1, a3
+; RV32-NEXT:    not a1, a1
+; RV32-NEXT:    srli a3, a1, 1
+; RV32-NEXT:    and a3, a3, t4
+; RV32-NEXT:    sub a1, a1, a3
+; RV32-NEXT:    and a3, a1, t3
+; RV32-NEXT:    srli a1, a1, 2
+; RV32-NEXT:    and a1, a1, t3
+; RV32-NEXT:    add a1, a3, a1
+; RV32-NEXT:    srli a3, a1, 4
+; RV32-NEXT:    add a1, a1, a3
+; RV32-NEXT:    and a1, a1, t2
+; RV32-NEXT:    slli a3, a1, 8
+; RV32-NEXT:    add a1, a1, a3
+; RV32-NEXT:    slli a3, a1, 16
+; RV32-NEXT:    add a1, a1, a3
+; RV32-NEXT:    srli a3, a1, 24
+; RV32-NEXT:  .LBB2_11: # %_udiv-special-cases
+; RV32-NEXT:    or a1, s9, ra
+; RV32-NEXT:    or s0, s8, s11
+; RV32-NEXT:    or s1, a6, a7
+; RV32-NEXT:    or s2, t1, t0
+; RV32-NEXT:    sltu t6, a0, t6
+; RV32-NEXT:    addi s3, s3, -1
+; RV32-NEXT:    addi a0, a3, 64
+; RV32-NEXT:    or s4, t0, a7
+; RV32-NEXT:    sltu s5, a0, a3
+; RV32-NEXT:    snez s6, s4
+; RV32-NEXT:    addi s6, s6, -1
+; RV32-NEXT:    bnez a7, .LBB2_13
+; RV32-NEXT:  # %bb.12: # %_udiv-special-cases
+; RV32-NEXT:    srli a3, t0, 1
+; RV32-NEXT:    or a3, t0, a3
+; RV32-NEXT:    srli a4, a3, 2
+; RV32-NEXT:    or a3, a3, a4
+; RV32-NEXT:    srli a4, a3, 4
+; RV32-NEXT:    or a3, a3, a4
+; RV32-NEXT:    srli a4, a3, 8
+; RV32-NEXT:    or a3, a3, a4
+; RV32-NEXT:    srli a4, a3, 16
+; RV32-NEXT:    or a3, a3, a4
+; RV32-NEXT:    not a3, a3
+; RV32-NEXT:    srli a4, a3, 1
+; RV32-NEXT:    and a4, a4, t4
+; RV32-NEXT:    sub a3, a3, a4
+; RV32-NEXT:    and a4, a3, t3
+; RV32-NEXT:    srli a3, a3, 2
+; RV32-NEXT:    and a3, a3, t3
+; RV32-NEXT:    add a3, a4, a3
+; RV32-NEXT:    srli a4, a3, 4
+; RV32-NEXT:    add a3, a3, a4
+; RV32-NEXT:    and a3, a3, t2
+; RV32-NEXT:    slli a4, a3, 8
+; RV32-NEXT:    add a3, a3, a4
+; RV32-NEXT:    slli a4, a3, 16
+; RV32-NEXT:    add a3, a3, a4
+; RV32-NEXT:    srli a3, a3, 24
+; RV32-NEXT:    addi a3, a3, 32
+; RV32-NEXT:    j .LBB2_14
+; RV32-NEXT:  .LBB2_13:
+; RV32-NEXT:    srli a3, a7, 1
+; RV32-NEXT:    or a3, a7, a3
+; RV32-NEXT:    srli a4, a3, 2
+; RV32-NEXT:    or a3, a3, a4
+; RV32-NEXT:    srli a4, a3, 4
+; RV32-NEXT:    or a3, a3, a4
+; RV32-NEXT:    srli a4, a3, 8
+; RV32-NEXT:    or a3, a3, a4
+; RV32-NEXT:    srli a4, a3, 16
+; RV32-NEXT:    or a3, a3, a4
+; RV32-NEXT:    not a3, a3
+; RV32-NEXT:    srli a4, a3, 1
+; RV32-NEXT:    and a4, a4, t4
+; RV32-NEXT:    sub a3, a3, a4
+; RV32-NEXT:    and a4, a3, t3
+; RV32-NEXT:    srli a3, a3, 2
+; RV32-NEXT:    and a3, a3, t3
+; RV32-NEXT:    add a3, a4, a3
+; RV32-NEXT:    srli a4, a3, 4
+; RV32-NEXT:    add a3, a3, a4
+; RV32-NEXT:    and a3, a3, t2
+; RV32-NEXT:    slli a4, a3, 8
+; RV32-NEXT:    add a3, a3, a4
+; RV32-NEXT:    slli a4, a3, 16
+; RV32-NEXT:    add a3, a3, a4
+; RV32-NEXT:    srli a3, a3, 24
+; RV32-NEXT:  .LBB2_14: # %_udiv-special-cases
+; RV32-NEXT:    or s0, s0, a1
+; RV32-NEXT:    or a5, s2, s1
+; RV32-NEXT:    and a1, s3, t6
+; RV32-NEXT:    and a4, s6, s5
+; RV32-NEXT:    bnez s4, .LBB2_16
+; RV32-NEXT:  # %bb.15: # %_udiv-special-cases
+; RV32-NEXT:    mv a3, a0
+; RV32-NEXT:  .LBB2_16: # %_udiv-special-cases
+; RV32-NEXT:    seqz a0, s0
+; RV32-NEXT:    seqz a5, a5
+; RV32-NEXT:    sltu t2, t5, a3
+; RV32-NEXT:    sub t4, a1, a4
+; RV32-NEXT:    mv t3, t2
+; RV32-NEXT:    beq a1, a4, .LBB2_18
+; RV32-NEXT:  # %bb.17: # %_udiv-special-cases
+; RV32-NEXT:    sltu t3, a1, a4
+; RV32-NEXT:  .LBB2_18: # %_udiv-special-cases
+; RV32-NEXT:    sub t2, t4, t2
+; RV32-NEXT:    or a0, a0, a5
+; RV32-NEXT:    neg t4, t3
+; RV32-NEXT:    seqz t6, t3
+; RV32-NEXT:    addi t6, t6, -1
+; RV32-NEXT:    or a1, t4, t6
+; RV32-NEXT:    sub t3, t5, a3
+; RV32-NEXT:    beqz a1, .LBB2_20
+; RV32-NEXT:  # %bb.19: # %_udiv-special-cases
+; RV32-NEXT:    snez a1, a1
+; RV32-NEXT:    j .LBB2_21
+; RV32-NEXT:  .LBB2_20:
+; RV32-NEXT:    snez a1, t2
+; RV32-NEXT:    sltiu a3, t3, 128
+; RV32-NEXT:    xori a3, a3, 1
+; RV32-NEXT:    or a1, a3, a1
+; RV32-NEXT:  .LBB2_21: # %_udiv-special-cases
+; RV32-NEXT:    or a5, a0, a1
+; RV32-NEXT:    addi a3, a5, -1
+; RV32-NEXT:    and a0, a3, a7
+; RV32-NEXT:    and a1, a3, t0
+; RV32-NEXT:    and a4, a3, a6
+; RV32-NEXT:    and a3, a3, t1
+; RV32-NEXT:    bnez a5, .LBB2_26
+; RV32-NEXT:  # %bb.22: # %_udiv-special-cases
+; RV32-NEXT:    xori a5, t3, 127
+; RV32-NEXT:    or a5, a5, t4
+; RV32-NEXT:    or t5, t2, t6
+; RV32-NEXT:    or a5, a5, t5
+; RV32-NEXT:    beqz a5, .LBB2_26
+; RV32-NEXT:  # %bb.23: # %udiv-bb1
+; RV32-NEXT:    sw s7, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    addi a1, t3, 1
+; RV32-NEXT:    sw zero, 72(sp)
+; RV32-NEXT:    sw zero, 76(sp)
+; RV32-NEXT:    sw zero, 80(sp)
+; RV32-NEXT:    sw zero, 84(sp)
+; RV32-NEXT:    sw t1, 88(sp)
+; RV32-NEXT:    sw a6, 92(sp)
+; RV32-NEXT:    sw t0, 96(sp)
+; RV32-NEXT:    sw a7, 100(sp)
+; RV32-NEXT:    li a0, 127
+; RV32-NEXT:    addi a2, sp, 88
+; RV32-NEXT:    seqz a3, a1
+; RV32-NEXT:    sub a0, a0, t3
+; RV32-NEXT:    add t2, t2, a3
+; RV32-NEXT:    andi a3, a0, 31
+; RV32-NEXT:    srli a0, a0, 3
+; RV32-NEXT:    or a4, a1, t2
+; RV32-NEXT:    xori a3, a3, 31
+; RV32-NEXT:    andi a0, a0, 12
+; RV32-NEXT:    seqz t5, a4
+; RV32-NEXT:    sub a2, a2, a0
+; RV32-NEXT:    add t5, t4, t5
+; RV32-NEXT:    lw a0, 0(a2)
+; RV32-NEXT:    lw a4, 4(a2)
+; RV32-NEXT:    lw a5, 8(a2)
+; RV32-NEXT:    lw a2, 12(a2)
+; RV32-NEXT:    sltu t4, t5, t4
+; RV32-NEXT:    or s0, a1, t5
+; RV32-NEXT:    add t4, t6, t4
+; RV32-NEXT:    or t6, t2, t4
+; RV32-NEXT:    or s0, s0, t6
+; RV32-NEXT:    srli t6, a5, 1
+; RV32-NEXT:    srli s1, a4, 1
+; RV32-NEXT:    srli s2, a0, 1
+; RV32-NEXT:    srl t6, t6, a3
+; RV32-NEXT:    srl s1, s1, a3
+; RV32-NEXT:    srl a3, s2, a3
+; RV32-NEXT:    not t3, t3
+; RV32-NEXT:    sll a2, a2, t3
+; RV32-NEXT:    or s2, a2, t6
+; RV32-NEXT:    sll a2, a5, t3
+; RV32-NEXT:    sll a4, a4, t3
+; RV32-NEXT:    or s1, a2, s1
+; RV32-NEXT:    or t6, a4, a3
+; RV32-NEXT:    sll t3, a0, t3
+; RV32-NEXT:    bnez s0, .LBB2_27
+; RV32-NEXT:  # %bb.24:
+; RV32-NEXT:    li s6, 0
+; RV32-NEXT:    li s7, 0
+; RV32-NEXT:    li s8, 0
+; RV32-NEXT:  .LBB2_25: # %udiv-loop-exit
+; RV32-NEXT:    srli a0, s1, 31
+; RV32-NEXT:    slli s2, s2, 1
+; RV32-NEXT:    or a0, s2, a0
+; RV32-NEXT:    srli a1, t6, 31
+; RV32-NEXT:    slli s1, s1, 1
+; RV32-NEXT:    or a1, s1, a1
+; RV32-NEXT:    srli a2, t3, 31
+; RV32-NEXT:    slli t6, t6, 1
+; RV32-NEXT:    slli a3, t3, 1
+; RV32-NEXT:    or a3, s0, a3
+; RV32-NEXT:    or a2, s6, a2
+; RV32-NEXT:    or a4, a2, t6
+; RV32-NEXT:    or a1, s7, a1
+; RV32-NEXT:    or a0, s8, a0
+; RV32-NEXT:    lw s7, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:  .LBB2_26: # %udiv-end
+; RV32-NEXT:    sw a3, 0(s7)
+; RV32-NEXT:    sw a4, 4(s7)
+; RV32-NEXT:    sw a1, 8(s7)
+; RV32-NEXT:    sw a0, 12(s7)
+; RV32-NEXT:    lw ra, 156(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s0, 152(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s1, 148(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s2, 144(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s3, 140(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s4, 136(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s5, 132(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s6, 128(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s7, 124(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s8, 120(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s9, 116(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s10, 112(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s11, 108(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 160
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB2_27: # %udiv-preheader
+; RV32-NEXT:    li s0, 0
+; RV32-NEXT:    li s5, 0
+; RV32-NEXT:    li s3, 0
+; RV32-NEXT:    li s4, 0
+; RV32-NEXT:    sw zero, 56(sp)
+; RV32-NEXT:    sw zero, 60(sp)
+; RV32-NEXT:    sw zero, 64(sp)
+; RV32-NEXT:    sw zero, 68(sp)
+; RV32-NEXT:    sw t1, 40(sp)
+; RV32-NEXT:    sw a6, 44(sp)
+; RV32-NEXT:    sw t0, 48(sp)
+; RV32-NEXT:    sw a7, 52(sp)
+; RV32-NEXT:    srli a0, a1, 3
+; RV32-NEXT:    addi a2, sp, 40
+; RV32-NEXT:    andi a0, a0, 12
+; RV32-NEXT:    add a0, a2, a0
+; RV32-NEXT:    lw a2, 4(a0)
+; RV32-NEXT:    lw a3, 8(a0)
+; RV32-NEXT:    lw a4, 12(a0)
+; RV32-NEXT:    lw a0, 0(a0)
+; RV32-NEXT:    andi a5, a1, 31
+; RV32-NEXT:    xori a5, a5, 31
+; RV32-NEXT:    slli a6, a4, 1
+; RV32-NEXT:    slli a7, a3, 1
+; RV32-NEXT:    slli t0, a2, 1
+; RV32-NEXT:    sll a6, a6, a5
+; RV32-NEXT:    sll a7, a7, a5
+; RV32-NEXT:    sll a5, t0, a5
+; RV32-NEXT:    seqz t0, s8
+; RV32-NEXT:    srl a3, a3, a1
+; RV32-NEXT:    or s10, a3, a6
+; RV32-NEXT:    or a3, s8, s9
+; RV32-NEXT:    sw s9, 32(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sub a6, s9, t0
+; RV32-NEXT:    seqz a3, a3
+; RV32-NEXT:    srl a2, a2, a1
+; RV32-NEXT:    or s9, a2, a7
+; RV32-NEXT:    sub a7, s11, a3
+; RV32-NEXT:    sw s11, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sltu a2, s11, a3
+; RV32-NEXT:    sw ra, 24(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sub a2, ra, a2
+; RV32-NEXT:    sw a2, 20(sp) # 4-byte Folded Spill
+; RV32-NEXT:    srl a0, a0, a1
+; RV32-NEXT:    srl ra, a4, a1
+; RV32-NEXT:    or t1, a0, a5
+; RV32-NEXT:    sw s8, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT:    addi s8, s8, -1
+; RV32-NEXT:    sw s8, 16(sp) # 4-byte Folded Spill
+; RV32-NEXT:    li s7, 0
+; RV32-NEXT:    li s8, 0
+; RV32-NEXT:    j .LBB2_29
+; RV32-NEXT:  .LBB2_28: # %udiv-do-while
+; RV32-NEXT:    # in Loop: Header=BB2_29 Depth=1
+; RV32-NEXT:    li s6, 0
+; RV32-NEXT:    sub a0, a0, a5
+; RV32-NEXT:    srli a5, s1, 31
+; RV32-NEXT:    slli s2, s2, 1
+; RV32-NEXT:    or a5, s2, a5
+; RV32-NEXT:    srli s2, t6, 31
+; RV32-NEXT:    slli s1, s1, 1
+; RV32-NEXT:    or s1, s1, s2
+; RV32-NEXT:    srli s2, t3, 31
+; RV32-NEXT:    slli t6, t6, 1
+; RV32-NEXT:    slli t3, t3, 1
+; RV32-NEXT:    or t6, t6, s2
+; RV32-NEXT:    lw a2, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    and s2, s10, a2
+; RV32-NEXT:    or t3, s0, t3
+; RV32-NEXT:    sub a2, a3, s2
+; RV32-NEXT:    sltu a3, a3, s2
+; RV32-NEXT:    lw t0, 24(sp) # 4-byte Folded Reload
+; RV32-NEXT:    and s0, s10, t0
+; RV32-NEXT:    sub t0, s9, s0
+; RV32-NEXT:    or s2, a1, t2
+; RV32-NEXT:    sub s9, a0, a4
+; RV32-NEXT:    seqz a0, a1
+; RV32-NEXT:    sub t2, t2, a0
+; RV32-NEXT:    or t6, s5, t6
+; RV32-NEXT:    addi a1, a1, -1
+; RV32-NEXT:    andi s0, s10, 1
+; RV32-NEXT:    seqz a0, s2
+; RV32-NEXT:    or s1, s3, s1
+; RV32-NEXT:    or s2, s4, a5
+; RV32-NEXT:    sub s10, a2, ra
+; RV32-NEXT:    sltu a2, a2, ra
+; RV32-NEXT:    sub a3, t0, a3
+; RV32-NEXT:    sltu a4, t5, a0
+; RV32-NEXT:    sub t5, t5, a0
+; RV32-NEXT:    sub ra, a3, a2
+; RV32-NEXT:    sub t4, t4, a4
+; RV32-NEXT:    or a0, t2, t4
+; RV32-NEXT:    or a2, a1, t5
+; RV32-NEXT:    or a0, a2, a0
+; RV32-NEXT:    sub t1, s11, t1
+; RV32-NEXT:    li s5, 0
+; RV32-NEXT:    li s3, 0
+; RV32-NEXT:    li s4, 0
+; RV32-NEXT:    beqz a0, .LBB2_25
+; RV32-NEXT:  .LBB2_29: # %udiv-do-while
+; RV32-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32-NEXT:    srli a0, t1, 31
+; RV32-NEXT:    slli a3, s9, 1
+; RV32-NEXT:    slli t1, t1, 1
+; RV32-NEXT:    or a0, a3, a0
+; RV32-NEXT:    srli a3, s2, 31
+; RV32-NEXT:    or s11, t1, a3
+; RV32-NEXT:    beq a6, a0, .LBB2_31
+; RV32-NEXT:  # %bb.30: # %udiv-do-while
+; RV32-NEXT:    # in Loop: Header=BB2_29 Depth=1
+; RV32-NEXT:    sltu a4, a6, a0
+; RV32-NEXT:    j .LBB2_32
+; RV32-NEXT:  .LBB2_31: # in Loop: Header=BB2_29 Depth=1
+; RV32-NEXT:    lw a2, 16(sp) # 4-byte Folded Reload
+; RV32-NEXT:    sltu a4, a2, s11
+; RV32-NEXT:  .LBB2_32: # %udiv-do-while
+; RV32-NEXT:    # in Loop: Header=BB2_29 Depth=1
+; RV32-NEXT:    lw a2, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT:    srli a3, s10, 31
+; RV32-NEXT:    slli ra, ra, 1
+; RV32-NEXT:    srli a5, s9, 31
+; RV32-NEXT:    slli s10, s10, 1
+; RV32-NEXT:    or s9, ra, a3
+; RV32-NEXT:    or a3, s10, a5
+; RV32-NEXT:    sub a5, a7, a3
+; RV32-NEXT:    sltu t1, a7, a3
+; RV32-NEXT:    lw t0, 20(sp) # 4-byte Folded Reload
+; RV32-NEXT:    sub s6, t0, s9
+; RV32-NEXT:    sltu a4, a5, a4
+; RV32-NEXT:    sub a5, s6, t1
+; RV32-NEXT:    sub a5, a5, a4
+; RV32-NEXT:    srai s10, a5, 31
+; RV32-NEXT:    and t1, s10, a2
+; RV32-NEXT:    lw a2, 32(sp) # 4-byte Folded Reload
+; RV32-NEXT:    and a5, s10, a2
+; RV32-NEXT:    sltu a4, s11, t1
+; RV32-NEXT:    mv ra, a4
+; RV32-NEXT:    beq a0, a5, .LBB2_28
+; RV32-NEXT:  # %bb.33: # %udiv-do-while
+; RV32-NEXT:    # in Loop: Header=BB2_29 Depth=1
+; RV32-NEXT:    sltu ra, a0, a5
+; RV32-NEXT:    j .LBB2_28
+;
+; RV64-LABEL: udiv_i128:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    call __udivti3
+; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
   %res = udiv i128 %x, %y
   ret i128 %res
 }
 
 define i129 @udiv_i129(i129 %x, i129 %y) nounwind {
-; CHECK-LABEL: udiv_i129:
-; CHECK-NOT: call{{.*}}div
+; RV32-LABEL: udiv_i129:
+; RV32:       # %bb.0: # %_udiv-special-cases
+; RV32-NEXT:    addi sp, sp, -240
+; RV32-NEXT:    sw ra, 236(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s0, 232(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s1, 228(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s2, 224(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s3, 220(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s4, 216(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s5, 212(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s6, 208(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s7, 204(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s8, 200(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s9, 196(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s10, 192(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s11, 188(sp) # 4-byte Folded Spill
+; RV32-NEXT:    mv ra, a0
+; RV32-NEXT:    lw t2, 16(a2)
+; RV32-NEXT:    lw a4, 0(a2)
+; RV32-NEXT:    lw a5, 4(a2)
+; RV32-NEXT:    lw a6, 8(a2)
+; RV32-NEXT:    lw a0, 12(a2)
+; RV32-NEXT:    sw a0, 24(sp) # 4-byte Folded Spill
+; RV32-NEXT:    lui a0, 349525
+; RV32-NEXT:    lui a2, 209715
+; RV32-NEXT:    lui a3, 61681
+; RV32-NEXT:    addi t5, a0, 1365
+; RV32-NEXT:    addi t4, a2, 819
+; RV32-NEXT:    addi t3, a3, -241
+; RV32-NEXT:    sw a6, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT:    slli a0, a6, 31
+; RV32-NEXT:    srli a2, a5, 1
+; RV32-NEXT:    sw a5, 20(sp) # 4-byte Folded Spill
+; RV32-NEXT:    slli a3, a5, 31
+; RV32-NEXT:    or a0, a2, a0
+; RV32-NEXT:    sw a4, 32(sp) # 4-byte Folded Spill
+; RV32-NEXT:    srli a2, a4, 1
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    bnez a0, .LBB3_2
+; RV32-NEXT:  # %bb.1: # %_udiv-special-cases
+; RV32-NEXT:    srli a3, a2, 1
+; RV32-NEXT:    or a3, a2, a3
+; RV32-NEXT:    srli a4, a3, 2
+; RV32-NEXT:    or a3, a3, a4
+; RV32-NEXT:    srli a4, a3, 4
+; RV32-NEXT:    or a3, a3, a4
+; RV32-NEXT:    srli a4, a3, 8
+; RV32-NEXT:    or a3, a3, a4
+; RV32-NEXT:    srli a4, a3, 16
+; RV32-NEXT:    or a3, a3, a4
+; RV32-NEXT:    not a3, a3
+; RV32-NEXT:    srli a4, a3, 1
+; RV32-NEXT:    and a4, a4, t5
+; RV32-NEXT:    sub a3, a3, a4
+; RV32-NEXT:    and a4, a3, t4
+; RV32-NEXT:    srli a3, a3, 2
+; RV32-NEXT:    and a3, a3, t4
+; RV32-NEXT:    add a3, a4, a3
+; RV32-NEXT:    srli a4, a3, 4
+; RV32-NEXT:    add a3, a3, a4
+; RV32-NEXT:    and a3, a3, t3
+; RV32-NEXT:    slli a4, a3, 8
+; RV32-NEXT:    add a3, a3, a4
+; RV32-NEXT:    slli a4, a3, 16
+; RV32-NEXT:    add a3, a3, a4
+; RV32-NEXT:    srli a3, a3, 24
+; RV32-NEXT:    addi a6, a3, 32
+; RV32-NEXT:    j .LBB3_3
+; RV32-NEXT:  .LBB3_2:
+; RV32-NEXT:    srli a3, a0, 1
+; RV32-NEXT:    or a3, a0, a3
+; RV32-NEXT:    srli a4, a3, 2
+; RV32-NEXT:    or a3, a3, a4
+; RV32-NEXT:    srli a4, a3, 4
+; RV32-NEXT:    or a3, a3, a4
+; RV32-NEXT:    srli a4, a3, 8
+; RV32-NEXT:    or a3, a3, a4
+; RV32-NEXT:    srli a4, a3, 16
+; RV32-NEXT:    or a3, a3, a4
+; RV32-NEXT:    not a3, a3
+; RV32-NEXT:    srli a4, a3, 1
+; RV32-NEXT:    and a4, a4, t5
+; RV32-NEXT:    sub a3, a3, a4
+; RV32-NEXT:    and a4, a3, t4
+; RV32-NEXT:    srli a3, a3, 2
+; RV32-NEXT:    and a3, a3, t4
+; RV32-NEXT:    add a3, a4, a3
+; RV32-NEXT:    srli a4, a3, 4
+; RV32-NEXT:    add a3, a3, a4
+; RV32-NEXT:    and a3, a3, t3
+; RV32-NEXT:    slli a4, a3, 8
+; RV32-NEXT:    add a3, a3, a4
+; RV32-NEXT:    slli a4, a3, 16
+; RV32-NEXT:    add a3, a3, a4
+; RV32-NEXT:    srli a6, a3, 24
+; RV32-NEXT:  .LBB3_3: # %_udiv-special-cases
+; RV32-NEXT:    lw a7, 24(sp) # 4-byte Folded Reload
+; RV32-NEXT:    srli a3, a7, 1
+; RV32-NEXT:    slli a5, t2, 31
+; RV32-NEXT:    slli a7, a7, 31
+; RV32-NEXT:    lw a4, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    srli t0, a4, 1
+; RV32-NEXT:    lw a4, 32(sp) # 4-byte Folded Reload
+; RV32-NEXT:    slli a4, a4, 31
+; RV32-NEXT:    li s2, 64
+; RV32-NEXT:    bnez a4, .LBB3_5
+; RV32-NEXT:  # %bb.4: # %_udiv-special-cases
+; RV32-NEXT:    li t6, 64
+; RV32-NEXT:    j .LBB3_6
+; RV32-NEXT:  .LBB3_5:
+; RV32-NEXT:    srli t1, a4, 1
+; RV32-NEXT:    or t1, a4, t1
+; RV32-NEXT:    srli t6, t1, 2
+; RV32-NEXT:    or t1, t1, t6
+; RV32-NEXT:    srli t6, t1, 4
+; RV32-NEXT:    or t1, t1, t6
+; RV32-NEXT:    srli t6, t1, 8
+; RV32-NEXT:    or t1, t1, t6
+; RV32-NEXT:    srli t6, t1, 16
+; RV32-NEXT:    or t1, t1, t6
+; RV32-NEXT:    not t1, t1
+; RV32-NEXT:    srli t6, t1, 1
+; RV32-NEXT:    and t6, t6, t5
+; RV32-NEXT:    sub t1, t1, t6
+; RV32-NEXT:    and t6, t1, t4
+; RV32-NEXT:    srli t1, t1, 2
+; RV32-NEXT:    and t1, t1, t4
+; RV32-NEXT:    add t1, t6, t1
+; RV32-NEXT:    srli t6, t1, 4
+; RV32-NEXT:    add t1, t1, t6
+; RV32-NEXT:    and t1, t1, t3
+; RV32-NEXT:    slli t6, t1, 8
+; RV32-NEXT:    add t1, t1, t6
+; RV32-NEXT:    slli t6, t1, 16
+; RV32-NEXT:    add t1, t1, t6
+; RV32-NEXT:    srli t6, t1, 24
+; RV32-NEXT:  .LBB3_6: # %_udiv-special-cases
+; RV32-NEXT:    or t1, a5, a3
+; RV32-NEXT:    or a7, t0, a7
+; RV32-NEXT:    bnez a4, .LBB3_8
+; RV32-NEXT:  # %bb.7: # %_udiv-special-cases
+; RV32-NEXT:    li t6, 128
+; RV32-NEXT:  .LBB3_8: # %_udiv-special-cases
+; RV32-NEXT:    or a5, a7, t1
+; RV32-NEXT:    addi a4, a6, 64
+; RV32-NEXT:    addi a3, t6, 128
+; RV32-NEXT:    or a0, a0, t1
+; RV32-NEXT:    or a2, a2, a7
+; RV32-NEXT:    or s3, a2, a0
+; RV32-NEXT:    sltu s0, a3, t6
+; RV32-NEXT:    bnez s3, .LBB3_11
+; RV32-NEXT:  # %bb.9: # %_udiv-special-cases
+; RV32-NEXT:    mv t6, s0
+; RV32-NEXT:    beqz t1, .LBB3_12
+; RV32-NEXT:  .LBB3_10:
+; RV32-NEXT:    srli a0, t1, 1
+; RV32-NEXT:    or a0, t1, a0
+; RV32-NEXT:    srli a2, a0, 2
+; RV32-NEXT:    or a0, a0, a2
+; RV32-NEXT:    srli a2, a0, 4
+; RV32-NEXT:    or a0, a0, a2
+; RV32-NEXT:    srli a2, a0, 8
+; RV32-NEXT:    or a0, a0, a2
+; RV32-NEXT:    srli a2, a0, 16
+; RV32-NEXT:    or a0, a0, a2
+; RV32-NEXT:    not a0, a0
+; RV32-NEXT:    srli a2, a0, 1
+; RV32-NEXT:    and a2, a2, t5
+; RV32-NEXT:    sub a0, a0, a2
+; RV32-NEXT:    and a2, a0, t4
+; RV32-NEXT:    srli a0, a0, 2
+; RV32-NEXT:    and a0, a0, t4
+; RV32-NEXT:    add a0, a2, a0
+; RV32-NEXT:    srli a2, a0, 4
+; RV32-NEXT:    add a0, a0, a2
+; RV32-NEXT:    and a0, a0, t3
+; RV32-NEXT:    slli a2, a0, 8
+; RV32-NEXT:    add a0, a0, a2
+; RV32-NEXT:    slli a2, a0, 16
+; RV32-NEXT:    add a0, a0, a2
+; RV32-NEXT:    srli s1, a0, 24
+; RV32-NEXT:    beqz a5, .LBB3_13
+; RV32-NEXT:    j .LBB3_14
+; RV32-NEXT:  .LBB3_11:
+; RV32-NEXT:    snez a0, a5
+; RV32-NEXT:    sltu a2, a4, a6
+; RV32-NEXT:    addi a0, a0, -1
+; RV32-NEXT:    and t6, a0, a2
+; RV32-NEXT:    bnez t1, .LBB3_10
+; RV32-NEXT:  .LBB3_12: # %_udiv-special-cases
+; RV32-NEXT:    srli a0, a7, 1
+; RV32-NEXT:    or a0, a7, a0
+; RV32-NEXT:    srli a2, a0, 2
+; RV32-NEXT:    or a0, a0, a2
+; RV32-NEXT:    srli a2, a0, 4
+; RV32-NEXT:    or a0, a0, a2
+; RV32-NEXT:    srli a2, a0, 8
+; RV32-NEXT:    or a0, a0, a2
+; RV32-NEXT:    srli a2, a0, 16
+; RV32-NEXT:    or a0, a0, a2
+; RV32-NEXT:    not a0, a0
+; RV32-NEXT:    srli a2, a0, 1
+; RV32-NEXT:    and a2, a2, t5
+; RV32-NEXT:    sub a0, a0, a2
+; RV32-NEXT:    and a2, a0, t4
+; RV32-NEXT:    srli a0, a0, 2
+; RV32-NEXT:    and a0, a0, t4
+; RV32-NEXT:    add a0, a2, a0
+; RV32-NEXT:    srli a2, a0, 4
+; RV32-NEXT:    add a0, a0, a2
+; RV32-NEXT:    and a0, a0, t3
+; RV32-NEXT:    slli a2, a0, 8
+; RV32-NEXT:    add a0, a0, a2
+; RV32-NEXT:    slli a2, a0, 16
+; RV32-NEXT:    add a0, a0, a2
+; RV32-NEXT:    srli a0, a0, 24
+; RV32-NEXT:    addi s1, a0, 32
+; RV32-NEXT:    bnez a5, .LBB3_14
+; RV32-NEXT:  .LBB3_13: # %_udiv-special-cases
+; RV32-NEXT:    mv s1, a4
+; RV32-NEXT:  .LBB3_14: # %_udiv-special-cases
+; RV32-NEXT:    lw a7, 0(a1)
+; RV32-NEXT:    lw t0, 4(a1)
+; RV32-NEXT:    lw a6, 8(a1)
+; RV32-NEXT:    bnez s3, .LBB3_16
+; RV32-NEXT:  # %bb.15: # %_udiv-special-cases
+; RV32-NEXT:    mv s1, a3
+; RV32-NEXT:  .LBB3_16: # %_udiv-special-cases
+; RV32-NEXT:    lw t1, 12(a1)
+; RV32-NEXT:    lw a1, 16(a1)
+; RV32-NEXT:    slli a0, a6, 31
+; RV32-NEXT:    srli a2, t0, 1
+; RV32-NEXT:    or a0, a2, a0
+; RV32-NEXT:    slli a2, t0, 31
+; RV32-NEXT:    srli a3, a7, 1
+; RV32-NEXT:    or a2, a3, a2
+; RV32-NEXT:    bnez a0, .LBB3_18
+; RV32-NEXT:  # %bb.17: # %_udiv-special-cases
+; RV32-NEXT:    srli a3, a2, 1
+; RV32-NEXT:    or a3, a2, a3
+; RV32-NEXT:    srli a4, a3, 2
+; RV32-NEXT:    or a3, a3, a4
+; RV32-NEXT:    srli a4, a3, 4
+; RV32-NEXT:    or a3, a3, a4
+; RV32-NEXT:    srli a4, a3, 8
+; RV32-NEXT:    or a3, a3, a4
+; RV32-NEXT:    srli a4, a3, 16
+; RV32-NEXT:    or a3, a3, a4
+; RV32-NEXT:    not a3, a3
+; RV32-NEXT:    srli a4, a3, 1
+; RV32-NEXT:    and a4, a4, t5
+; RV32-NEXT:    sub a3, a3, a4
+; RV32-NEXT:    and a4, a3, t4
+; RV32-NEXT:    srli a3, a3, 2
+; RV32-NEXT:    and a3, a3, t4
+; RV32-NEXT:    add a3, a4, a3
+; RV32-NEXT:    srli a4, a3, 4
+; RV32-NEXT:    add a3, a3, a4
+; RV32-NEXT:    and a3, a3, t3
+; RV32-NEXT:    slli a4, a3, 8
+; RV32-NEXT:    add a3, a3, a4
+; RV32-NEXT:    slli a4, a3, 16
+; RV32-NEXT:    add a3, a3, a4
+; RV32-NEXT:    srli a3, a3, 24
+; RV32-NEXT:    addi s5, a3, 32
+; RV32-NEXT:    j .LBB3_19
+; RV32-NEXT:  .LBB3_18:
+; RV32-NEXT:    srli a3, a0, 1
+; RV32-NEXT:    or a3, a0, a3
+; RV32-NEXT:    srli a4, a3, 2
+; RV32-NEXT:    or a3, a3, a4
+; RV32-NEXT:    srli a4, a3, 4
+; RV32-NEXT:    or a3, a3, a4
+; RV32-NEXT:    srli a4, a3, 8
+; RV32-NEXT:    or a3, a3, a4
+; RV32-NEXT:    srli a4, a3, 16
+; RV32-NEXT:    or a3, a3, a4
+; RV32-NEXT:    not a3, a3
+; RV32-NEXT:    srli a4, a3, 1
+; RV32-NEXT:    and a4, a4, t5
+; RV32-NEXT:    sub a3, a3, a4
+; RV32-NEXT:    and a4, a3, t4
+; RV32-NEXT:    srli a3, a3, 2
+; RV32-NEXT:    and a3, a3, t4
+; RV32-NEXT:    add a3, a4, a3
+; RV32-NEXT:    srli a4, a3, 4
+; RV32-NEXT:    add a3, a3, a4
+; RV32-NEXT:    and a3, a3, t3
+; RV32-NEXT:    slli a4, a3, 8
+; RV32-NEXT:    add a3, a3, a4
+; RV32-NEXT:    slli a4, a3, 16
+; RV32-NEXT:    add a3, a3, a4
+; RV32-NEXT:    srli s5, a3, 24
+; RV32-NEXT:  .LBB3_19: # %_udiv-special-cases
+; RV32-NEXT:    srli a3, t1, 1
+; RV32-NEXT:    slli a4, a1, 31
+; RV32-NEXT:    slli a5, t1, 31
+; RV32-NEXT:    slli s4, a7, 31
+; RV32-NEXT:    srli s6, a6, 1
+; RV32-NEXT:    beqz s4, .LBB3_21
+; RV32-NEXT:  # %bb.20:
+; RV32-NEXT:    srli s2, s4, 1
+; RV32-NEXT:    or s2, s4, s2
+; RV32-NEXT:    srli s7, s2, 2
+; RV32-NEXT:    or s2, s2, s7
+; RV32-NEXT:    srli s7, s2, 4
+; RV32-NEXT:    or s2, s2, s7
+; RV32-NEXT:    srli s7, s2, 8
+; RV32-NEXT:    or s2, s2, s7
+; RV32-NEXT:    srli s7, s2, 16
+; RV32-NEXT:    or s2, s2, s7
+; RV32-NEXT:    not s2, s2
+; RV32-NEXT:    srli s7, s2, 1
+; RV32-NEXT:    and s7, s7, t5
+; RV32-NEXT:    sub s2, s2, s7
+; RV32-NEXT:    and s7, s2, t4
+; RV32-NEXT:    srli s2, s2, 2
+; RV32-NEXT:    and s2, s2, t4
+; RV32-NEXT:    add s2, s7, s2
+; RV32-NEXT:    srli s7, s2, 4
+; RV32-NEXT:    add s2, s2, s7
+; RV32-NEXT:    and s2, s2, t3
+; RV32-NEXT:    slli s7, s2, 8
+; RV32-NEXT:    add s2, s2, s7
+; RV32-NEXT:    slli s7, s2, 16
+; RV32-NEXT:    add s2, s2, s7
+; RV32-NEXT:    srli s2, s2, 24
+; RV32-NEXT:  .LBB3_21: # %_udiv-special-cases
+; RV32-NEXT:    or s7, a4, a3
+; RV32-NEXT:    or s6, s6, a5
+; RV32-NEXT:    bnez s4, .LBB3_23
+; RV32-NEXT:  # %bb.22: # %_udiv-special-cases
+; RV32-NEXT:    li s2, 128
+; RV32-NEXT:  .LBB3_23: # %_udiv-special-cases
+; RV32-NEXT:    or s4, s6, s7
+; RV32-NEXT:    addi a5, s5, 64
+; RV32-NEXT:    addi a3, s2, 128
+; RV32-NEXT:    or a0, a0, s7
+; RV32-NEXT:    or a4, a2, s6
+; RV32-NEXT:    or a4, a4, a0
+; RV32-NEXT:    sltu a0, a3, s2
+; RV32-NEXT:    bnez a4, .LBB3_26
+; RV32-NEXT:  # %bb.24: # %_udiv-special-cases
+; RV32-NEXT:    mv a2, a0
+; RV32-NEXT:    snez s2, s3
+; RV32-NEXT:    beqz s7, .LBB3_27
+; RV32-NEXT:  .LBB3_25:
+; RV32-NEXT:    srli s3, s7, 1
+; RV32-NEXT:    or s3, s7, s3
+; RV32-NEXT:    srli s5, s3, 2
+; RV32-NEXT:    or s3, s3, s5
+; RV32-NEXT:    srli s5, s3, 4
+; RV32-NEXT:    or s3, s3, s5
+; RV32-NEXT:    srli s5, s3, 8
+; RV32-NEXT:    or s3, s3, s5
+; RV32-NEXT:    srli s5, s3, 16
+; RV32-NEXT:    or s3, s3, s5
+; RV32-NEXT:    not s3, s3
+; RV32-NEXT:    srli s5, s3, 1
+; RV32-NEXT:    and t5, s5, t5
+; RV32-NEXT:    sub t5, s3, t5
+; RV32-NEXT:    and s3, t5, t4
+; RV32-NEXT:    srli t5, t5, 2
+; RV32-NEXT:    and t4, t5, t4
+; RV32-NEXT:    add t4, s3, t4
+; RV32-NEXT:    srli t5, t4, 4
+; RV32-NEXT:    add t4, t4, t5
+; RV32-NEXT:    and t3, t4, t3
+; RV32-NEXT:    slli t4, t3, 8
+; RV32-NEXT:    add t3, t3, t4
+; RV32-NEXT:    slli t4, t3, 16
+; RV32-NEXT:    add t3, t3, t4
+; RV32-NEXT:    srli t3, t3, 24
+; RV32-NEXT:    j .LBB3_28
+; RV32-NEXT:  .LBB3_26:
+; RV32-NEXT:    snez a2, s4
+; RV32-NEXT:    sltu s2, a5, s5
+; RV32-NEXT:    addi a2, a2, -1
+; RV32-NEXT:    and a2, a2, s2
+; RV32-NEXT:    snez s2, s3
+; RV32-NEXT:    bnez s7, .LBB3_25
+; RV32-NEXT:  .LBB3_27: # %_udiv-special-cases
+; RV32-NEXT:    srli s3, s6, 1
+; RV32-NEXT:    or s3, s6, s3
+; RV32-NEXT:    srli s5, s3, 2
+; RV32-NEXT:    or s3, s3, s5
+; RV32-NEXT:    srli s5, s3, 4
+; RV32-NEXT:    or s3, s3, s5
+; RV32-NEXT:    srli s5, s3, 8
+; RV32-NEXT:    or s3, s3, s5
+; RV32-NEXT:    srli s5, s3, 16
+; RV32-NEXT:    or s3, s3, s5
+; RV32-NEXT:    not s3, s3
+; RV32-NEXT:    srli s5, s3, 1
+; RV32-NEXT:    and t5, s5, t5
+; RV32-NEXT:    sub t5, s3, t5
+; RV32-NEXT:    and s3, t5, t4
+; RV32-NEXT:    srli t5, t5, 2
+; RV32-NEXT:    and t4, t5, t4
+; RV32-NEXT:    add t4, s3, t4
+; RV32-NEXT:    srli t5, t4, 4
+; RV32-NEXT:    add t4, t4, t5
+; RV32-NEXT:    and t3, t4, t3
+; RV32-NEXT:    slli t4, t3, 8
+; RV32-NEXT:    add t3, t3, t4
+; RV32-NEXT:    slli t4, t3, 16
+; RV32-NEXT:    add t3, t3, t4
+; RV32-NEXT:    srli t3, t3, 24
+; RV32-NEXT:    addi t3, t3, 32
+; RV32-NEXT:  .LBB3_28: # %_udiv-special-cases
+; RV32-NEXT:    xori t4, s0, 1
+; RV32-NEXT:    addi s2, s2, -1
+; RV32-NEXT:    bnez s4, .LBB3_30
+; RV32-NEXT:  # %bb.29: # %_udiv-special-cases
+; RV32-NEXT:    mv t3, a5
+; RV32-NEXT:  .LBB3_30: # %_udiv-special-cases
+; RV32-NEXT:    andi s11, a1, 1
+; RV32-NEXT:    andi s8, t2, 1
+; RV32-NEXT:    lw a1, 32(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw a5, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    or s9, a1, a5
+; RV32-NEXT:    or t2, a7, a6
+; RV32-NEXT:    neg a1, t4
+; RV32-NEXT:    and s0, s2, s0
+; RV32-NEXT:    bnez a4, .LBB3_32
+; RV32-NEXT:  # %bb.31: # %_udiv-special-cases
+; RV32-NEXT:    mv t3, a3
+; RV32-NEXT:  .LBB3_32: # %_udiv-special-cases
+; RV32-NEXT:    lw a3, 20(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw a5, 24(sp) # 4-byte Folded Reload
+; RV32-NEXT:    or s10, a3, a5
+; RV32-NEXT:    or a5, s9, s8
+; RV32-NEXT:    or t4, t0, t1
+; RV32-NEXT:    or t5, t2, s11
+; RV32-NEXT:    and a1, s0, a1
+; RV32-NEXT:    xori a3, a0, 1
+; RV32-NEXT:    snez a4, a4
+; RV32-NEXT:    neg a3, a3
+; RV32-NEXT:    addi a4, a4, -1
+; RV32-NEXT:    and a0, a4, a0
+; RV32-NEXT:    sltu a4, s1, t3
+; RV32-NEXT:    and t2, a0, a3
+; RV32-NEXT:    mv a3, a4
+; RV32-NEXT:    beq t6, a2, .LBB3_34
+; RV32-NEXT:  # %bb.33: # %_udiv-special-cases
+; RV32-NEXT:    sltu a3, t6, a2
+; RV32-NEXT:  .LBB3_34: # %_udiv-special-cases
+; RV32-NEXT:    or a0, a5, s10
+; RV32-NEXT:    or t5, t5, t4
+; RV32-NEXT:    sltu t4, a1, t2
+; RV32-NEXT:    mv s0, a3
+; RV32-NEXT:    beq a1, t2, .LBB3_36
+; RV32-NEXT:  # %bb.35: # %_udiv-special-cases
+; RV32-NEXT:    mv s0, t4
+; RV32-NEXT:  .LBB3_36: # %_udiv-special-cases
+; RV32-NEXT:    seqz a5, a0
+; RV32-NEXT:    seqz t5, t5
+; RV32-NEXT:    andi a0, s0, 1
+; RV32-NEXT:    sub a2, t6, a2
+; RV32-NEXT:    sub a1, a1, t2
+; RV32-NEXT:    sub t2, a2, a4
+; RV32-NEXT:    sltu a2, a1, a3
+; RV32-NEXT:    add a2, t4, a2
+; RV32-NEXT:    neg t4, a2
+; RV32-NEXT:    sub a4, a1, a3
+; RV32-NEXT:    or a1, a4, t4
+; RV32-NEXT:    sub a3, s1, t3
+; RV32-NEXT:    beqz a1, .LBB3_38
+; RV32-NEXT:  # %bb.37: # %_udiv-special-cases
+; RV32-NEXT:    snez a1, a1
+; RV32-NEXT:    or a2, a5, t5
+; RV32-NEXT:    bnez a0, .LBB3_39
+; RV32-NEXT:    j .LBB3_40
+; RV32-NEXT:  .LBB3_38:
+; RV32-NEXT:    snez a1, t2
+; RV32-NEXT:    sltiu a2, a3, 129
+; RV32-NEXT:    xori a2, a2, 1
+; RV32-NEXT:    or a1, a2, a1
+; RV32-NEXT:    or a2, a5, t5
+; RV32-NEXT:    beqz a0, .LBB3_40
+; RV32-NEXT:  .LBB3_39: # %_udiv-special-cases
+; RV32-NEXT:    mv a1, a0
+; RV32-NEXT:  .LBB3_40: # %_udiv-special-cases
+; RV32-NEXT:    or t6, a2, a1
+; RV32-NEXT:    addi a1, t6, -1
+; RV32-NEXT:    and a2, s11, a1
+; RV32-NEXT:    and a5, a1, t1
+; RV32-NEXT:    and t3, a1, a6
+; RV32-NEXT:    and t5, a1, t0
+; RV32-NEXT:    and a1, a1, a7
+; RV32-NEXT:    bnez t6, .LBB3_57
+; RV32-NEXT:  # %bb.41: # %_udiv-special-cases
+; RV32-NEXT:    or t6, t2, t4
+; RV32-NEXT:    xori s0, a3, 128
+; RV32-NEXT:    or s0, s0, a0
+; RV32-NEXT:    or s0, s0, a4
+; RV32-NEXT:    or t6, s0, t6
+; RV32-NEXT:    beqz t6, .LBB3_57
+; RV32-NEXT:  # %bb.42: # %udiv-bb1
+; RV32-NEXT:    sw ra, 8(sp) # 4-byte Folded Spill
+; RV32-NEXT:    addi a1, a3, 1
+; RV32-NEXT:    sw zero, 136(sp)
+; RV32-NEXT:    sw zero, 140(sp)
+; RV32-NEXT:    sw zero, 144(sp)
+; RV32-NEXT:    sw zero, 148(sp)
+; RV32-NEXT:    sw zero, 120(sp)
+; RV32-NEXT:    sw zero, 124(sp)
+; RV32-NEXT:    sw zero, 128(sp)
+; RV32-NEXT:    sw zero, 132(sp)
+; RV32-NEXT:    sw a7, 152(sp)
+; RV32-NEXT:    sw t0, 156(sp)
+; RV32-NEXT:    sw a6, 160(sp)
+; RV32-NEXT:    sw t1, 164(sp)
+; RV32-NEXT:    sw s11, 168(sp)
+; RV32-NEXT:    li a5, 128
+; RV32-NEXT:    addi t3, sp, 152
+; RV32-NEXT:    neg a2, a3
+; RV32-NEXT:    seqz t5, a1
+; RV32-NEXT:    sub a5, a5, a3
+; RV32-NEXT:    add t2, t2, t5
+; RV32-NEXT:    andi a3, a5, 31
+; RV32-NEXT:    srli t5, a5, 3
+; RV32-NEXT:    or t6, a1, t2
+; RV32-NEXT:    xori a5, a3, 31
+; RV32-NEXT:    andi a3, t5, 28
+; RV32-NEXT:    seqz t6, t6
+; RV32-NEXT:    sub ra, t3, a3
+; RV32-NEXT:    add t6, a4, t6
+; RV32-NEXT:    lw t3, 0(ra)
+; RV32-NEXT:    lw s0, 4(ra)
+; RV32-NEXT:    lw s1, 8(ra)
+; RV32-NEXT:    lw a3, 12(ra)
+; RV32-NEXT:    sltu a4, t6, a4
+; RV32-NEXT:    or t5, a1, t6
+; RV32-NEXT:    add t4, t4, a4
+; RV32-NEXT:    or a4, t2, t4
+; RV32-NEXT:    or a4, t5, a4
+; RV32-NEXT:    srli t5, s1, 1
+; RV32-NEXT:    seqz s2, a4
+; RV32-NEXT:    add a0, a0, s2
+; RV32-NEXT:    sll s2, a3, a2
+; RV32-NEXT:    srl t5, t5, a5
+; RV32-NEXT:    or t5, s2, t5
+; RV32-NEXT:    srli s2, s0, 1
+; RV32-NEXT:    sll s1, s1, a2
+; RV32-NEXT:    srl s2, s2, a5
+; RV32-NEXT:    or s2, s1, s2
+; RV32-NEXT:    srli s1, t3, 1
+; RV32-NEXT:    sll s0, s0, a2
+; RV32-NEXT:    srl s1, s1, a5
+; RV32-NEXT:    andi s3, a0, 1
+; RV32-NEXT:    or s1, s0, s1
+; RV32-NEXT:    or a0, a4, s3
+; RV32-NEXT:    sll t3, t3, a2
+; RV32-NEXT:    beqz a0, .LBB3_55
+; RV32-NEXT:  # %bb.43: # %udiv-preheader
+; RV32-NEXT:    sw zero, 52(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw zero, 48(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw zero, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw zero, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT:    li s7, 0
+; RV32-NEXT:    srli a3, a3, 1
+; RV32-NEXT:    lw a0, 16(ra)
+; RV32-NEXT:    sw zero, 104(sp)
+; RV32-NEXT:    sw zero, 108(sp)
+; RV32-NEXT:    sw zero, 112(sp)
+; RV32-NEXT:    sw zero, 116(sp)
+; RV32-NEXT:    sw zero, 88(sp)
+; RV32-NEXT:    sw zero, 92(sp)
+; RV32-NEXT:    sw zero, 96(sp)
+; RV32-NEXT:    sw zero, 100(sp)
+; RV32-NEXT:    sw s11, 72(sp)
+; RV32-NEXT:    sw zero, 76(sp)
+; RV32-NEXT:    sw zero, 80(sp)
+; RV32-NEXT:    sw zero, 84(sp)
+; RV32-NEXT:    sw a7, 56(sp)
+; RV32-NEXT:    sw t0, 60(sp)
+; RV32-NEXT:    sw a6, 64(sp)
+; RV32-NEXT:    sw t1, 68(sp)
+; RV32-NEXT:    srli a4, a1, 3
+; RV32-NEXT:    addi a6, sp, 56
+; RV32-NEXT:    andi a7, a1, 31
+; RV32-NEXT:    or t0, s9, s10
+; RV32-NEXT:    srl a3, a3, a5
+; RV32-NEXT:    andi a4, a4, 28
+; RV32-NEXT:    xori a5, a7, 31
+; RV32-NEXT:    snez a7, t0
+; RV32-NEXT:    add a4, a6, a4
+; RV32-NEXT:    add a7, s8, a7
+; RV32-NEXT:    lw a6, 16(a4)
+; RV32-NEXT:    lw t0, 0(a4)
+; RV32-NEXT:    lw t1, 4(a4)
+; RV32-NEXT:    lw s0, 8(a4)
+; RV32-NEXT:    lw a4, 12(a4)
+; RV32-NEXT:    sll a0, a0, a2
+; RV32-NEXT:    or a3, a0, a3
+; RV32-NEXT:    slli a6, a6, 1
+; RV32-NEXT:    slli a0, a4, 1
+; RV32-NEXT:    slli a2, s0, 1
+; RV32-NEXT:    slli s4, t1, 1
+; RV32-NEXT:    sll a6, a6, a5
+; RV32-NEXT:    sll a0, a0, a5
+; RV32-NEXT:    sll s8, a2, a5
+; RV32-NEXT:    sll s4, s4, a5
+; RV32-NEXT:    srl a2, a4, a1
+; RV32-NEXT:    or ra, a2, a6
+; RV32-NEXT:    lw a6, 32(sp) # 4-byte Folded Reload
+; RV32-NEXT:    seqz a4, a6
+; RV32-NEXT:    srl a2, s0, a1
+; RV32-NEXT:    or a2, a2, a0
+; RV32-NEXT:    lw a5, 20(sp) # 4-byte Folded Reload
+; RV32-NEXT:    or a0, a6, a5
+; RV32-NEXT:    sub s5, a5, a4
+; RV32-NEXT:    seqz a4, a0
+; RV32-NEXT:    srl a0, t1, a1
+; RV32-NEXT:    or a0, a0, s8
+; RV32-NEXT:    lw a5, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    sub t1, a5, a4
+; RV32-NEXT:    sw t1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sltu a4, a5, a4
+; RV32-NEXT:    addi a7, a7, 1
+; RV32-NEXT:    lw a5, 24(sp) # 4-byte Folded Reload
+; RV32-NEXT:    sub s6, a5, a4
+; RV32-NEXT:    andi a4, a7, 1
+; RV32-NEXT:    sw a4, 16(sp) # 4-byte Folded Spill
+; RV32-NEXT:    andi a5, a3, 1
+; RV32-NEXT:    srl a3, t0, a1
+; RV32-NEXT:    or a4, a3, s4
+; RV32-NEXT:    addi a6, a6, -1
+; RV32-NEXT:    sw a6, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    li s11, 0
+; RV32-NEXT:    li s10, 0
+; RV32-NEXT:    j .LBB3_45
+; RV32-NEXT:  .LBB3_44: # %udiv-do-while
+; RV32-NEXT:    # in Loop: Header=BB3_45 Depth=1
+; RV32-NEXT:    lw s0, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    and s0, a5, s0
+; RV32-NEXT:    xor s8, t1, a7
+; RV32-NEXT:    xor s9, a2, s0
+; RV32-NEXT:    or s8, s9, s8
+; RV32-NEXT:    li s9, 0
+; RV32-NEXT:    li s8, 0
+; RV32-NEXT:    sltu s4, a2, s0
+; RV32-NEXT:    sub s0, a2, s0
+; RV32-NEXT:    sub a7, t1, a7
+; RV32-NEXT:    srli a2, s2, 31
+; RV32-NEXT:    sub a0, a0, t0
+; RV32-NEXT:    slli t0, t5, 1
+; RV32-NEXT:    or t0, t0, a2
+; RV32-NEXT:    srli a2, s1, 31
+; RV32-NEXT:    slli s2, s2, 1
+; RV32-NEXT:    or t1, s2, a2
+; RV32-NEXT:    srli a2, t3, 31
+; RV32-NEXT:    slli s1, s1, 1
+; RV32-NEXT:    or s1, s1, a2
+; RV32-NEXT:    slli t3, t3, 1
+; RV32-NEXT:    lw a2, 52(sp) # 4-byte Folded Reload
+; RV32-NEXT:    or t3, a2, t3
+; RV32-NEXT:    srli a2, t5, 31
+; RV32-NEXT:    or s7, s7, a2
+; RV32-NEXT:    sub a2, s0, ra
+; RV32-NEXT:    sltu s0, s0, ra
+; RV32-NEXT:    or t5, a1, t6
+; RV32-NEXT:    sub a7, a7, s4
+; RV32-NEXT:    or s2, t2, t4
+; RV32-NEXT:    sub a0, a0, a6
+; RV32-NEXT:    or a6, a1, t2
+; RV32-NEXT:    or s4, t5, s2
+; RV32-NEXT:    seqz t5, a1
+; RV32-NEXT:    addi a1, a1, -1
+; RV32-NEXT:    andi a5, a5, 1
+; RV32-NEXT:    sw a5, 52(sp) # 4-byte Folded Spill
+; RV32-NEXT:    seqz a6, a6
+; RV32-NEXT:    sub t2, t2, t5
+; RV32-NEXT:    lw a5, 48(sp) # 4-byte Folded Reload
+; RV32-NEXT:    or s1, a5, s1
+; RV32-NEXT:    lw a5, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT:    or s2, a5, t1
+; RV32-NEXT:    lw a5, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT:    or t5, a5, t0
+; RV32-NEXT:    andi a5, s7, 1
+; RV32-NEXT:    sub ra, a7, s0
+; RV32-NEXT:    snez a7, s4
+; RV32-NEXT:    sltu t0, t6, a6
+; RV32-NEXT:    sub t6, t6, a6
+; RV32-NEXT:    add a7, s3, a7
+; RV32-NEXT:    sub t4, t4, t0
+; RV32-NEXT:    or a6, a1, t6
+; RV32-NEXT:    addi a7, a7, 1
+; RV32-NEXT:    or t0, t2, t4
+; RV32-NEXT:    andi s3, a7, 1
+; RV32-NEXT:    or a6, a6, t0
+; RV32-NEXT:    or a6, a6, s3
+; RV32-NEXT:    sub a4, a4, a3
+; RV32-NEXT:    sw zero, 48(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw zero, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw zero, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT:    li s7, 0
+; RV32-NEXT:    beqz a6, .LBB3_56
+; RV32-NEXT:  .LBB3_45: # %udiv-do-while
+; RV32-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32-NEXT:    srli a3, a2, 31
+; RV32-NEXT:    slli a6, ra, 1
+; RV32-NEXT:    or t1, a6, a3
+; RV32-NEXT:    srli a3, a0, 31
+; RV32-NEXT:    slli a2, a2, 1
+; RV32-NEXT:    or a2, a2, a3
+; RV32-NEXT:    beq s6, t1, .LBB3_47
+; RV32-NEXT:  # %bb.46: # %udiv-do-while
+; RV32-NEXT:    # in Loop: Header=BB3_45 Depth=1
+; RV32-NEXT:    sltu a3, s6, t1
+; RV32-NEXT:    j .LBB3_48
+; RV32-NEXT:  .LBB3_47: # in Loop: Header=BB3_45 Depth=1
+; RV32-NEXT:    lw a3, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT:    sltu a3, a3, a2
+; RV32-NEXT:  .LBB3_48: # %udiv-do-while
+; RV32-NEXT:    # in Loop: Header=BB3_45 Depth=1
+; RV32-NEXT:    srli a6, a4, 31
+; RV32-NEXT:    slli a0, a0, 1
+; RV32-NEXT:    slli a4, a4, 1
+; RV32-NEXT:    or a0, a0, a6
+; RV32-NEXT:    andi a5, a5, 1
+; RV32-NEXT:    or a4, a4, a5
+; RV32-NEXT:    beq s5, a0, .LBB3_50
+; RV32-NEXT:  # %bb.49: # %udiv-do-while
+; RV32-NEXT:    # in Loop: Header=BB3_45 Depth=1
+; RV32-NEXT:    sltu a5, s5, a0
+; RV32-NEXT:    j .LBB3_51
+; RV32-NEXT:  .LBB3_50: # in Loop: Header=BB3_45 Depth=1
+; RV32-NEXT:    lw a5, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    sltu a5, a5, a4
+; RV32-NEXT:  .LBB3_51: # %udiv-do-while
+; RV32-NEXT:    # in Loop: Header=BB3_45 Depth=1
+; RV32-NEXT:    lw a6, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT:    xor a6, a6, a2
+; RV32-NEXT:    xor a7, s6, t1
+; RV32-NEXT:    or a6, a6, a7
+; RV32-NEXT:    beqz a6, .LBB3_53
+; RV32-NEXT:  # %bb.52: # %udiv-do-while
+; RV32-NEXT:    # in Loop: Header=BB3_45 Depth=1
+; RV32-NEXT:    mv a5, a3
+; RV32-NEXT:  .LBB3_53: # %udiv-do-while
+; RV32-NEXT:    # in Loop: Header=BB3_45 Depth=1
+; RV32-NEXT:    srli a3, ra, 31
+; RV32-NEXT:    lw a6, 16(sp) # 4-byte Folded Reload
+; RV32-NEXT:    sub a3, a6, a3
+; RV32-NEXT:    sub a3, a3, a5
+; RV32-NEXT:    slli a3, a3, 31
+; RV32-NEXT:    srai a5, a3, 31
+; RV32-NEXT:    lw a3, 24(sp) # 4-byte Folded Reload
+; RV32-NEXT:    and a7, a5, a3
+; RV32-NEXT:    lw a3, 32(sp) # 4-byte Folded Reload
+; RV32-NEXT:    and a3, a5, a3
+; RV32-NEXT:    lw a6, 20(sp) # 4-byte Folded Reload
+; RV32-NEXT:    and t0, a5, a6
+; RV32-NEXT:    sltu a6, a4, a3
+; RV32-NEXT:    mv ra, a6
+; RV32-NEXT:    beq a0, t0, .LBB3_44
+; RV32-NEXT:  # %bb.54: # %udiv-do-while
+; RV32-NEXT:    # in Loop: Header=BB3_45 Depth=1
+; RV32-NEXT:    sltu ra, a0, t0
+; RV32-NEXT:    j .LBB3_44
+; RV32-NEXT:  .LBB3_55:
+; RV32-NEXT:    sw zero, 52(sp) # 4-byte Folded Spill
+; RV32-NEXT:    li s11, 0
+; RV32-NEXT:    li s9, 0
+; RV32-NEXT:    li s10, 0
+; RV32-NEXT:    li s8, 0
+; RV32-NEXT:  .LBB3_56: # %udiv-loop-exit
+; RV32-NEXT:    srli a0, s2, 31
+; RV32-NEXT:    slli a1, t5, 1
+; RV32-NEXT:    or a0, a1, a0
+; RV32-NEXT:    srli a1, s1, 31
+; RV32-NEXT:    slli s2, s2, 1
+; RV32-NEXT:    or a2, s2, a1
+; RV32-NEXT:    srli a3, t3, 31
+; RV32-NEXT:    slli s1, s1, 1
+; RV32-NEXT:    srli a4, t5, 31
+; RV32-NEXT:    slli t3, t3, 1
+; RV32-NEXT:    lw a1, 52(sp) # 4-byte Folded Reload
+; RV32-NEXT:    or a1, a1, t3
+; RV32-NEXT:    or a3, s11, a3
+; RV32-NEXT:    or a4, s8, a4
+; RV32-NEXT:    or t5, a3, s1
+; RV32-NEXT:    or t3, s9, a2
+; RV32-NEXT:    or a5, s10, a0
+; RV32-NEXT:    andi a2, a4, 1
+; RV32-NEXT:    lw ra, 8(sp) # 4-byte Folded Reload
+; RV32-NEXT:  .LBB3_57: # %udiv-end
+; RV32-NEXT:    sw a1, 0(ra)
+; RV32-NEXT:    sw t5, 4(ra)
+; RV32-NEXT:    sw t3, 8(ra)
+; RV32-NEXT:    sw a5, 12(ra)
+; RV32-NEXT:    andi a2, a2, 1
+; RV32-NEXT:    sb a2, 16(ra)
+; RV32-NEXT:    lw ra, 236(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s0, 232(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s1, 228(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s2, 224(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s3, 220(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s4, 216(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s5, 212(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s6, 208(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s7, 204(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s8, 200(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s9, 196(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s10, 192(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s11, 188(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 240
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: udiv_i129:
+; RV64:       # %bb.0: # %_udiv-special-cases
+; RV64-NEXT:    ld a3, 0(a2)
+; RV64-NEXT:    ld a4, 8(a2)
+; RV64-NEXT:    ld t1, 16(a2)
+; RV64-NEXT:    lui a2, 349525
+; RV64-NEXT:    lui a5, 209715
+; RV64-NEXT:    lui a6, 61681
+; RV64-NEXT:    addi t0, a2, 1365
+; RV64-NEXT:    addi a7, a5, 819
+; RV64-NEXT:    addi a6, a6, -241
+; RV64-NEXT:    slli a2, t0, 32
+; RV64-NEXT:    slli a5, a7, 32
+; RV64-NEXT:    slli t2, a6, 32
+; RV64-NEXT:    add t0, t0, a2
+; RV64-NEXT:    add a7, a7, a5
+; RV64-NEXT:    add a6, a6, t2
+; RV64-NEXT:    srli a2, a4, 1
+; RV64-NEXT:    slli a5, t1, 63
+; RV64-NEXT:    slli t2, a4, 63
+; RV64-NEXT:    or t3, a5, a2
+; RV64-NEXT:    srli a2, a3, 1
+; RV64-NEXT:    or t4, a2, t2
+; RV64-NEXT:    bnez t3, .LBB3_2
+; RV64-NEXT:  # %bb.1: # %_udiv-special-cases
+; RV64-NEXT:    srli a2, t4, 1
+; RV64-NEXT:    or a2, t4, a2
+; RV64-NEXT:    srli a5, a2, 2
+; RV64-NEXT:    or a2, a2, a5
+; RV64-NEXT:    srli a5, a2, 4
+; RV64-NEXT:    or a2, a2, a5
+; RV64-NEXT:    srli a5, a2, 8
+; RV64-NEXT:    or a2, a2, a5
+; RV64-NEXT:    srli a5, a2, 16
+; RV64-NEXT:    or a2, a2, a5
+; RV64-NEXT:    srli a5, a2, 32
+; RV64-NEXT:    or a2, a2, a5
+; RV64-NEXT:    not a2, a2
+; RV64-NEXT:    srli a5, a2, 1
+; RV64-NEXT:    and a5, a5, t0
+; RV64-NEXT:    sub a2, a2, a5
+; RV64-NEXT:    and a5, a2, a7
+; RV64-NEXT:    srli a2, a2, 2
+; RV64-NEXT:    and a2, a2, a7
+; RV64-NEXT:    add a2, a5, a2
+; RV64-NEXT:    srli a5, a2, 4
+; RV64-NEXT:    add a2, a2, a5
+; RV64-NEXT:    and a2, a2, a6
+; RV64-NEXT:    slli a5, a2, 8
+; RV64-NEXT:    add a2, a2, a5
+; RV64-NEXT:    slli a5, a2, 16
+; RV64-NEXT:    add a2, a2, a5
+; RV64-NEXT:    slli a5, a2, 32
+; RV64-NEXT:    add a2, a2, a5
+; RV64-NEXT:    srli a2, a2, 56
+; RV64-NEXT:    addi t2, a2, 64
+; RV64-NEXT:    j .LBB3_3
+; RV64-NEXT:  .LBB3_2:
+; RV64-NEXT:    srli a2, t3, 1
+; RV64-NEXT:    or a2, t3, a2
+; RV64-NEXT:    srli a5, a2, 2
+; RV64-NEXT:    or a2, a2, a5
+; RV64-NEXT:    srli a5, a2, 4
+; RV64-NEXT:    or a2, a2, a5
+; RV64-NEXT:    srli a5, a2, 8
+; RV64-NEXT:    or a2, a2, a5
+; RV64-NEXT:    srli a5, a2, 16
+; RV64-NEXT:    or a2, a2, a5
+; RV64-NEXT:    srli a5, a2, 32
+; RV64-NEXT:    or a2, a2, a5
+; RV64-NEXT:    not a2, a2
+; RV64-NEXT:    srli a5, a2, 1
+; RV64-NEXT:    and a5, a5, t0
+; RV64-NEXT:    sub a2, a2, a5
+; RV64-NEXT:    and a5, a2, a7
+; RV64-NEXT:    srli a2, a2, 2
+; RV64-NEXT:    and a2, a2, a7
+; RV64-NEXT:    add a2, a5, a2
+; RV64-NEXT:    srli a5, a2, 4
+; RV64-NEXT:    add a2, a2, a5
+; RV64-NEXT:    and a2, a2, a6
+; RV64-NEXT:    slli a5, a2, 8
+; RV64-NEXT:    add a2, a2, a5
+; RV64-NEXT:    slli a5, a2, 16
+; RV64-NEXT:    add a2, a2, a5
+; RV64-NEXT:    slli a5, a2, 32
+; RV64-NEXT:    add a2, a2, a5
+; RV64-NEXT:    srli t2, a2, 56
+; RV64-NEXT:  .LBB3_3: # %_udiv-special-cases
+; RV64-NEXT:    addi sp, sp, -192
+; RV64-NEXT:    sd s0, 184(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s1, 176(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s2, 168(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s3, 160(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s4, 152(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s5, 144(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s6, 136(sp) # 8-byte Folded Spill
+; RV64-NEXT:    slli a2, a3, 63
+; RV64-NEXT:    li t5, 128
+; RV64-NEXT:    bnez a2, .LBB3_5
+; RV64-NEXT:  # %bb.4: # %_udiv-special-cases
+; RV64-NEXT:    li s0, 128
+; RV64-NEXT:    j .LBB3_6
+; RV64-NEXT:  .LBB3_5:
+; RV64-NEXT:    srli a5, a2, 1
+; RV64-NEXT:    or a2, a2, a5
+; RV64-NEXT:    srli a5, a2, 2
+; RV64-NEXT:    or a2, a2, a5
+; RV64-NEXT:    srli a5, a2, 4
+; RV64-NEXT:    or a2, a2, a5
+; RV64-NEXT:    srli a5, a2, 8
+; RV64-NEXT:    or a2, a2, a5
+; RV64-NEXT:    srli a5, a2, 16
+; RV64-NEXT:    or a2, a2, a5
+; RV64-NEXT:    srli a5, a2, 32
+; RV64-NEXT:    or a2, a2, a5
+; RV64-NEXT:    not a2, a2
+; RV64-NEXT:    srli a5, a2, 1
+; RV64-NEXT:    and a5, a5, t0
+; RV64-NEXT:    sub a2, a2, a5
+; RV64-NEXT:    and a5, a2, a7
+; RV64-NEXT:    srli a2, a2, 2
+; RV64-NEXT:    and a2, a2, a7
+; RV64-NEXT:    add a2, a5, a2
+; RV64-NEXT:    srli a5, a2, 4
+; RV64-NEXT:    add a2, a2, a5
+; RV64-NEXT:    and a2, a2, a6
+; RV64-NEXT:    slli a5, a2, 8
+; RV64-NEXT:    add a2, a2, a5
+; RV64-NEXT:    slli a5, a2, 16
+; RV64-NEXT:    add a2, a2, a5
+; RV64-NEXT:    slli a5, a2, 32
+; RV64-NEXT:    add a2, a2, a5
+; RV64-NEXT:    srli s0, a2, 56
+; RV64-NEXT:  .LBB3_6: # %_udiv-special-cases
+; RV64-NEXT:    ld a5, 0(a1)
+; RV64-NEXT:    ld a2, 8(a1)
+; RV64-NEXT:    ld s2, 16(a1)
+; RV64-NEXT:    or a1, t4, t3
+; RV64-NEXT:    addi s1, s0, 128
+; RV64-NEXT:    bnez a1, .LBB3_8
+; RV64-NEXT:  # %bb.7: # %_udiv-special-cases
+; RV64-NEXT:    mv t2, s1
+; RV64-NEXT:  .LBB3_8: # %_udiv-special-cases
+; RV64-NEXT:    snez s3, a1
+; RV64-NEXT:    srli a1, a2, 1
+; RV64-NEXT:    slli t3, s2, 63
+; RV64-NEXT:    slli t4, a2, 63
+; RV64-NEXT:    or a1, t3, a1
+; RV64-NEXT:    srli t3, a5, 1
+; RV64-NEXT:    or t6, t3, t4
+; RV64-NEXT:    bnez a1, .LBB3_10
+; RV64-NEXT:  # %bb.9: # %_udiv-special-cases
+; RV64-NEXT:    srli t3, t6, 1
+; RV64-NEXT:    or t3, t6, t3
+; RV64-NEXT:    srli t4, t3, 2
+; RV64-NEXT:    or t3, t3, t4
+; RV64-NEXT:    srli t4, t3, 4
+; RV64-NEXT:    or t3, t3, t4
+; RV64-NEXT:    srli t4, t3, 8
+; RV64-NEXT:    or t3, t3, t4
+; RV64-NEXT:    srli t4, t3, 16
+; RV64-NEXT:    or t3, t3, t4
+; RV64-NEXT:    srli t4, t3, 32
+; RV64-NEXT:    or t3, t3, t4
+; RV64-NEXT:    not t3, t3
+; RV64-NEXT:    srli t4, t3, 1
+; RV64-NEXT:    and t4, t4, t0
+; RV64-NEXT:    sub t3, t3, t4
+; RV64-NEXT:    and t4, t3, a7
+; RV64-NEXT:    srli t3, t3, 2
+; RV64-NEXT:    and t3, t3, a7
+; RV64-NEXT:    add t3, t4, t3
+; RV64-NEXT:    srli t4, t3, 4
+; RV64-NEXT:    add t3, t3, t4
+; RV64-NEXT:    and t3, t3, a6
+; RV64-NEXT:    slli t4, t3, 8
+; RV64-NEXT:    add t3, t3, t4
+; RV64-NEXT:    slli t4, t3, 16
+; RV64-NEXT:    add t3, t3, t4
+; RV64-NEXT:    slli t4, t3, 32
+; RV64-NEXT:    add t3, t3, t4
+; RV64-NEXT:    srli t3, t3, 56
+; RV64-NEXT:    addi s4, t3, 64
+; RV64-NEXT:    j .LBB3_11
+; RV64-NEXT:  .LBB3_10:
+; RV64-NEXT:    srli t3, a1, 1
+; RV64-NEXT:    or t3, a1, t3
+; RV64-NEXT:    srli t4, t3, 2
+; RV64-NEXT:    or t3, t3, t4
+; RV64-NEXT:    srli t4, t3, 4
+; RV64-NEXT:    or t3, t3, t4
+; RV64-NEXT:    srli t4, t3, 8
+; RV64-NEXT:    or t3, t3, t4
+; RV64-NEXT:    srli t4, t3, 16
+; RV64-NEXT:    or t3, t3, t4
+; RV64-NEXT:    srli t4, t3, 32
+; RV64-NEXT:    or t3, t3, t4
+; RV64-NEXT:    not t3, t3
+; RV64-NEXT:    srli t4, t3, 1
+; RV64-NEXT:    and t4, t4, t0
+; RV64-NEXT:    sub t3, t3, t4
+; RV64-NEXT:    and t4, t3, a7
+; RV64-NEXT:    srli t3, t3, 2
+; RV64-NEXT:    and t3, t3, a7
+; RV64-NEXT:    add t3, t4, t3
+; RV64-NEXT:    srli t4, t3, 4
+; RV64-NEXT:    add t3, t3, t4
+; RV64-NEXT:    and t3, t3, a6
+; RV64-NEXT:    slli t4, t3, 8
+; RV64-NEXT:    add t3, t3, t4
+; RV64-NEXT:    slli t4, t3, 16
+; RV64-NEXT:    add t3, t3, t4
+; RV64-NEXT:    slli t4, t3, 32
+; RV64-NEXT:    add t3, t3, t4
+; RV64-NEXT:    srli s4, t3, 56
+; RV64-NEXT:  .LBB3_11: # %_udiv-special-cases
+; RV64-NEXT:    andi t4, s2, 1
+; RV64-NEXT:    andi t1, t1, 1
+; RV64-NEXT:    or t3, a3, a4
+; RV64-NEXT:    or s2, a5, a2
+; RV64-NEXT:    sltu s0, s1, s0
+; RV64-NEXT:    slli s1, a5, 63
+; RV64-NEXT:    addi s3, s3, -1
+; RV64-NEXT:    beqz s1, .LBB3_13
+; RV64-NEXT:  # %bb.12:
+; RV64-NEXT:    srli t5, s1, 1
+; RV64-NEXT:    or t5, s1, t5
+; RV64-NEXT:    srli s1, t5, 2
+; RV64-NEXT:    or t5, t5, s1
+; RV64-NEXT:    srli s1, t5, 4
+; RV64-NEXT:    or t5, t5, s1
+; RV64-NEXT:    srli s1, t5, 8
+; RV64-NEXT:    or t5, t5, s1
+; RV64-NEXT:    srli s1, t5, 16
+; RV64-NEXT:    or t5, t5, s1
+; RV64-NEXT:    srli s1, t5, 32
+; RV64-NEXT:    or t5, t5, s1
+; RV64-NEXT:    not t5, t5
+; RV64-NEXT:    srli s1, t5, 1
+; RV64-NEXT:    and t0, s1, t0
+; RV64-NEXT:    sub t0, t5, t0
+; RV64-NEXT:    and t5, t0, a7
+; RV64-NEXT:    srli t0, t0, 2
+; RV64-NEXT:    and a7, t0, a7
+; RV64-NEXT:    add a7, t5, a7
+; RV64-NEXT:    srli t0, a7, 4
+; RV64-NEXT:    add a7, a7, t0
+; RV64-NEXT:    and a6, a7, a6
+; RV64-NEXT:    slli a7, a6, 8
+; RV64-NEXT:    add a6, a6, a7
+; RV64-NEXT:    slli a7, a6, 16
+; RV64-NEXT:    add a6, a6, a7
+; RV64-NEXT:    slli a7, a6, 32
+; RV64-NEXT:    add a6, a6, a7
+; RV64-NEXT:    srli t5, a6, 56
+; RV64-NEXT:  .LBB3_13: # %_udiv-special-cases
+; RV64-NEXT:    or t0, t3, t1
+; RV64-NEXT:    or a6, s2, t4
+; RV64-NEXT:    and a7, s3, s0
+; RV64-NEXT:    or t6, t6, a1
+; RV64-NEXT:    addi s0, t5, 128
+; RV64-NEXT:    bnez t6, .LBB3_15
+; RV64-NEXT:  # %bb.14: # %_udiv-special-cases
+; RV64-NEXT:    mv s4, s0
+; RV64-NEXT:  .LBB3_15: # %_udiv-special-cases
+; RV64-NEXT:    seqz a1, t0
+; RV64-NEXT:    sltu t0, s0, t5
+; RV64-NEXT:    snez t5, t6
+; RV64-NEXT:    addi t5, t5, -1
+; RV64-NEXT:    and t0, t5, t0
+; RV64-NEXT:    sltu t5, t2, s4
+; RV64-NEXT:    seqz a6, a6
+; RV64-NEXT:    mv t6, t5
+; RV64-NEXT:    beq a7, t0, .LBB3_17
+; RV64-NEXT:  # %bb.16: # %_udiv-special-cases
+; RV64-NEXT:    sltu t6, a7, t0
+; RV64-NEXT:  .LBB3_17: # %_udiv-special-cases
+; RV64-NEXT:    or a1, a1, a6
+; RV64-NEXT:    andi a6, t6, 1
+; RV64-NEXT:    sub a7, a7, t0
+; RV64-NEXT:    sub t5, a7, t5
+; RV64-NEXT:    sub a7, t2, s4
+; RV64-NEXT:    beqz a6, .LBB3_19
+; RV64-NEXT:  # %bb.18: # %_udiv-special-cases
+; RV64-NEXT:    mv t0, a6
+; RV64-NEXT:    j .LBB3_20
+; RV64-NEXT:  .LBB3_19:
+; RV64-NEXT:    sltiu t0, a7, 129
+; RV64-NEXT:    xori t0, t0, 1
+; RV64-NEXT:    snez t2, t5
+; RV64-NEXT:    or t0, t0, t2
+; RV64-NEXT:  .LBB3_20: # %_udiv-special-cases
+; RV64-NEXT:    or t6, a1, t0
+; RV64-NEXT:    addi a1, t6, -1
+; RV64-NEXT:    and t2, t4, a1
+; RV64-NEXT:    and t0, a1, a2
+; RV64-NEXT:    and a1, a1, a5
+; RV64-NEXT:    bnez t6, .LBB3_30
+; RV64-NEXT:  # %bb.21: # %_udiv-special-cases
+; RV64-NEXT:    xori t6, a7, 128
+; RV64-NEXT:    or t6, t6, a6
+; RV64-NEXT:    or t6, t6, t5
+; RV64-NEXT:    beqz t6, .LBB3_30
+; RV64-NEXT:  # %bb.22: # %udiv-bb1
+; RV64-NEXT:    addi a1, a7, 1
+; RV64-NEXT:    sd zero, 64(sp)
+; RV64-NEXT:    sd zero, 72(sp)
+; RV64-NEXT:    sd zero, 80(sp)
+; RV64-NEXT:    sd zero, 88(sp)
+; RV64-NEXT:    sd a5, 96(sp)
+; RV64-NEXT:    sd a2, 104(sp)
+; RV64-NEXT:    sd t4, 112(sp)
+; RV64-NEXT:    li t0, 128
+; RV64-NEXT:    addi t2, sp, 96
+; RV64-NEXT:    neg s1, a7
+; RV64-NEXT:    seqz t6, a1
+; RV64-NEXT:    sub a7, t0, a7
+; RV64-NEXT:    add t5, t5, t6
+; RV64-NEXT:    andi t0, a7, 63
+; RV64-NEXT:    srli a7, a7, 3
+; RV64-NEXT:    or t6, a1, t5
+; RV64-NEXT:    xori s2, t0, 63
+; RV64-NEXT:    andi a7, a7, 24
+; RV64-NEXT:    seqz t0, t6
+; RV64-NEXT:    sub s3, t2, a7
+; RV64-NEXT:    add a6, a6, t0
+; RV64-NEXT:    ld t2, 0(s3)
+; RV64-NEXT:    ld s4, 8(s3)
+; RV64-NEXT:    andi a7, a6, 1
+; RV64-NEXT:    or t6, t6, a7
+; RV64-NEXT:    srli a6, t2, 1
+; RV64-NEXT:    sll t0, s4, s1
+; RV64-NEXT:    srl a6, a6, s2
+; RV64-NEXT:    or t0, t0, a6
+; RV64-NEXT:    sll a6, t2, s1
+; RV64-NEXT:    li t2, 0
+; RV64-NEXT:    beqz t6, .LBB3_28
+; RV64-NEXT:  # %bb.23: # %udiv-preheader
+; RV64-NEXT:    li t6, 0
+; RV64-NEXT:    li s0, 0
+; RV64-NEXT:    srli s4, s4, 1
+; RV64-NEXT:    ld s3, 16(s3)
+; RV64-NEXT:    sd zero, 32(sp)
+; RV64-NEXT:    sd zero, 40(sp)
+; RV64-NEXT:    sd zero, 48(sp)
+; RV64-NEXT:    sd zero, 56(sp)
+; RV64-NEXT:    sd a5, 0(sp)
+; RV64-NEXT:    sd a2, 8(sp)
+; RV64-NEXT:    sd t4, 16(sp)
+; RV64-NEXT:    sd zero, 24(sp)
+; RV64-NEXT:    srli a2, a1, 3
+; RV64-NEXT:    srl a5, s4, s2
+; RV64-NEXT:    mv t4, sp
+; RV64-NEXT:    snez t3, t3
+; RV64-NEXT:    andi a2, a2, 24
+; RV64-NEXT:    add t1, t1, t3
+; RV64-NEXT:    add a2, t4, a2
+; RV64-NEXT:    ld t3, 0(a2)
+; RV64-NEXT:    ld t4, 8(a2)
+; RV64-NEXT:    ld a2, 16(a2)
+; RV64-NEXT:    sll s1, s3, s1
+; RV64-NEXT:    andi s2, a1, 63
+; RV64-NEXT:    xori s2, s2, 63
+; RV64-NEXT:    or s3, s1, a5
+; RV64-NEXT:    slli a2, a2, 1
+; RV64-NEXT:    slli a5, t4, 1
+; RV64-NEXT:    sll a2, a2, s2
+; RV64-NEXT:    sll s2, a5, s2
+; RV64-NEXT:    srl s1, t4, a1
+; RV64-NEXT:    or s1, s1, a2
+; RV64-NEXT:    seqz a2, a3
+; RV64-NEXT:    sub a2, a4, a2
+; RV64-NEXT:    addi a5, t1, 1
+; RV64-NEXT:    andi a5, a5, 1
+; RV64-NEXT:    andi s3, s3, 1
+; RV64-NEXT:    srl t1, t3, a1
+; RV64-NEXT:    or s2, t1, s2
+; RV64-NEXT:    addi t1, a3, -1
+; RV64-NEXT:    j .LBB3_26
+; RV64-NEXT:  .LBB3_24: # %udiv-do-while
+; RV64-NEXT:    # in Loop: Header=BB3_26 Depth=1
+; RV64-NEXT:    sltu t3, a2, s4
+; RV64-NEXT:  .LBB3_25: # %udiv-do-while
+; RV64-NEXT:    # in Loop: Header=BB3_26 Depth=1
+; RV64-NEXT:    srli s1, s1, 63
+; RV64-NEXT:    sub t4, a5, s1
+; RV64-NEXT:    sub t3, t4, t3
+; RV64-NEXT:    slli t3, t3, 63
+; RV64-NEXT:    srai s1, t3, 63
+; RV64-NEXT:    and s3, s1, a4
+; RV64-NEXT:    li t3, 0
+; RV64-NEXT:    li t4, 0
+; RV64-NEXT:    srli s5, a6, 63
+; RV64-NEXT:    sub s4, s4, s3
+; RV64-NEXT:    slli s3, t0, 1
+; RV64-NEXT:    or s3, s3, s5
+; RV64-NEXT:    srli t0, t0, 63
+; RV64-NEXT:    slli a6, a6, 1
+; RV64-NEXT:    or a6, t2, a6
+; RV64-NEXT:    seqz t2, a1
+; RV64-NEXT:    or s0, s0, t0
+; RV64-NEXT:    or s5, a1, t5
+; RV64-NEXT:    sub t5, t5, t2
+; RV64-NEXT:    and s6, s1, a3
+; RV64-NEXT:    addi a1, a1, -1
+; RV64-NEXT:    andi t2, s1, 1
+; RV64-NEXT:    or t0, t6, s3
+; RV64-NEXT:    sltu t6, s2, s6
+; RV64-NEXT:    snez s5, s5
+; RV64-NEXT:    andi s3, s0, 1
+; RV64-NEXT:    sub s1, s4, t6
+; RV64-NEXT:    add a7, a7, s5
+; RV64-NEXT:    addi a7, a7, 1
+; RV64-NEXT:    andi a7, a7, 1
+; RV64-NEXT:    or t6, a1, t5
+; RV64-NEXT:    or s4, t6, a7
+; RV64-NEXT:    sub s2, s2, s6
+; RV64-NEXT:    li t6, 0
+; RV64-NEXT:    li s0, 0
+; RV64-NEXT:    beqz s4, .LBB3_29
+; RV64-NEXT:  .LBB3_26: # %udiv-do-while
+; RV64-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64-NEXT:    srli t3, s2, 63
+; RV64-NEXT:    slli t4, s1, 1
+; RV64-NEXT:    slli s2, s2, 1
+; RV64-NEXT:    or s4, t4, t3
+; RV64-NEXT:    andi t3, s3, 1
+; RV64-NEXT:    or s2, s2, t3
+; RV64-NEXT:    bne a2, s4, .LBB3_24
+; RV64-NEXT:  # %bb.27: # in Loop: Header=BB3_26 Depth=1
+; RV64-NEXT:    sltu t3, t1, s2
+; RV64-NEXT:    j .LBB3_25
+; RV64-NEXT:  .LBB3_28:
+; RV64-NEXT:    li t3, 0
+; RV64-NEXT:    li t4, 0
+; RV64-NEXT:  .LBB3_29: # %udiv-loop-exit
+; RV64-NEXT:    srli a2, a6, 63
+; RV64-NEXT:    slli a3, t0, 1
+; RV64-NEXT:    srli a4, t0, 63
+; RV64-NEXT:    slli a6, a6, 1
+; RV64-NEXT:    or a1, t2, a6
+; RV64-NEXT:    or a2, t3, a2
+; RV64-NEXT:    or a4, t4, a4
+; RV64-NEXT:    or t0, a2, a3
+; RV64-NEXT:    andi t2, a4, 1
+; RV64-NEXT:  .LBB3_30: # %udiv-end
+; RV64-NEXT:    andi a2, t2, 1
+; RV64-NEXT:    sd a1, 0(a0)
+; RV64-NEXT:    sd t0, 8(a0)
+; RV64-NEXT:    sb a2, 16(a0)
+; RV64-NEXT:    ld s0, 184(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s1, 176(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s2, 168(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s3, 160(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s4, 152(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s5, 144(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s6, 136(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 192
+; RV64-NEXT:    ret
   %res = udiv i129 %x, %y
   ret i129 %res
 }


        


More information about the llvm-commits mailing list