[llvm] b8c5420 - [X86][RISCV] Pre-commit tests for D130862. NFC

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sun Aug 14 16:31:24 PDT 2022


Author: Craig Topper
Date: 2022-08-14T16:31:15-07:00
New Revision: b8c5420d74cbeffb6b323f193213dfb9857c53d8

URL: https://github.com/llvm/llvm-project/commit/b8c5420d74cbeffb6b323f193213dfb9857c53d8
DIFF: https://github.com/llvm/llvm-project/commit/b8c5420d74cbeffb6b323f193213dfb9857c53d8.diff

LOG: [X86][RISCV] Pre-commit tests for D130862. NFC

Reviewed By: RKSimon

Differential Revision: https://reviews.llvm.org/D131442

Added: 
    llvm/test/CodeGen/RISCV/split-udiv-by-constant.ll
    llvm/test/CodeGen/RISCV/split-urem-by-constant.ll

Modified: 
    llvm/test/CodeGen/X86/divide-by-constant.ll
    llvm/test/CodeGen/X86/divmod128.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/split-udiv-by-constant.ll b/llvm/test/CodeGen/RISCV/split-udiv-by-constant.ll
new file mode 100644
index 0000000000000..9b551680041cb
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/split-udiv-by-constant.ll
@@ -0,0 +1,295 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen2/i64/g' %s | llc -mtriple=riscv32 -mattr=+m | \
+; RUN:   FileCheck %s --check-prefix=RV32
+; RUN: sed 's/iXLen2/i128/g' %s | llc -mtriple=riscv64 -mattr=+m | \
+; RUN:   FileCheck %s --check-prefix=RV64
+
+define iXLen2 @test_udiv_3(iXLen2 %x) nounwind {
+; RV32-LABEL: test_udiv_3:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    li a2, 3
+; RV32-NEXT:    li a3, 0
+; RV32-NEXT:    call __udivdi3 at plt
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: test_udiv_3:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    li a2, 3
+; RV64-NEXT:    li a3, 0
+; RV64-NEXT:    call __udivti3 at plt
+; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+  %a = udiv iXLen2 %x, 3
+  ret iXLen2 %a
+}
+
+define iXLen2 @test_udiv_5(iXLen2 %x) nounwind {
+; RV32-LABEL: test_udiv_5:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    li a2, 5
+; RV32-NEXT:    li a3, 0
+; RV32-NEXT:    call __udivdi3 at plt
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: test_udiv_5:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    li a2, 5
+; RV64-NEXT:    li a3, 0
+; RV64-NEXT:    call __udivti3 at plt
+; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+  %a = udiv iXLen2 %x, 5
+  ret iXLen2 %a
+}
+
+define iXLen2 @test_udiv_7(iXLen2 %x) nounwind {
+; RV32-LABEL: test_udiv_7:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    li a2, 7
+; RV32-NEXT:    li a3, 0
+; RV32-NEXT:    call __udivdi3 at plt
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: test_udiv_7:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    li a2, 7
+; RV64-NEXT:    li a3, 0
+; RV64-NEXT:    call __udivti3 at plt
+; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+  %a = udiv iXLen2 %x, 7
+  ret iXLen2 %a
+}
+
+define iXLen2 @test_udiv_9(iXLen2 %x) nounwind {
+; RV32-LABEL: test_udiv_9:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    li a2, 9
+; RV32-NEXT:    li a3, 0
+; RV32-NEXT:    call __udivdi3 at plt
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: test_udiv_9:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    li a2, 9
+; RV64-NEXT:    li a3, 0
+; RV64-NEXT:    call __udivti3 at plt
+; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+  %a = udiv iXLen2 %x, 9
+  ret iXLen2 %a
+}
+
+define iXLen2 @test_udiv_15(iXLen2 %x) nounwind {
+; RV32-LABEL: test_udiv_15:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    li a2, 15
+; RV32-NEXT:    li a3, 0
+; RV32-NEXT:    call __udivdi3 at plt
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: test_udiv_15:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    li a2, 15
+; RV64-NEXT:    li a3, 0
+; RV64-NEXT:    call __udivti3 at plt
+; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+  %a = udiv iXLen2 %x, 15
+  ret iXLen2 %a
+}
+
+define iXLen2 @test_udiv_17(iXLen2 %x) nounwind {
+; RV32-LABEL: test_udiv_17:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    li a2, 17
+; RV32-NEXT:    li a3, 0
+; RV32-NEXT:    call __udivdi3 at plt
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: test_udiv_17:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    li a2, 17
+; RV64-NEXT:    li a3, 0
+; RV64-NEXT:    call __udivti3 at plt
+; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+  %a = udiv iXLen2 %x, 17
+  ret iXLen2 %a
+}
+
+define iXLen2 @test_udiv_255(iXLen2 %x) nounwind {
+; RV32-LABEL: test_udiv_255:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    li a2, 255
+; RV32-NEXT:    li a3, 0
+; RV32-NEXT:    call __udivdi3 at plt
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: test_udiv_255:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    li a2, 255
+; RV64-NEXT:    li a3, 0
+; RV64-NEXT:    call __udivti3 at plt
+; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+  %a = udiv iXLen2 %x, 255
+  ret iXLen2 %a
+}
+
+define iXLen2 @test_udiv_257(iXLen2 %x) nounwind {
+; RV32-LABEL: test_udiv_257:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    li a2, 257
+; RV32-NEXT:    li a3, 0
+; RV32-NEXT:    call __udivdi3 at plt
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: test_udiv_257:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    li a2, 257
+; RV64-NEXT:    li a3, 0
+; RV64-NEXT:    call __udivti3 at plt
+; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+  %a = udiv iXLen2 %x, 257
+  ret iXLen2 %a
+}
+
+define iXLen2 @test_udiv_65535(iXLen2 %x) nounwind {
+; RV32-LABEL: test_udiv_65535:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    lui a2, 16
+; RV32-NEXT:    addi a2, a2, -1
+; RV32-NEXT:    li a3, 0
+; RV32-NEXT:    call __udivdi3 at plt
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: test_udiv_65535:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    lui a2, 16
+; RV64-NEXT:    addiw a2, a2, -1
+; RV64-NEXT:    li a3, 0
+; RV64-NEXT:    call __udivti3 at plt
+; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+  %a = udiv iXLen2 %x, 65535
+  ret iXLen2 %a
+}
+
+define iXLen2 @test_udiv_65537(iXLen2 %x) nounwind {
+; RV32-LABEL: test_udiv_65537:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    lui a2, 16
+; RV32-NEXT:    addi a2, a2, 1
+; RV32-NEXT:    li a3, 0
+; RV32-NEXT:    call __udivdi3 at plt
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: test_udiv_65537:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    lui a2, 16
+; RV64-NEXT:    addiw a2, a2, 1
+; RV64-NEXT:    li a3, 0
+; RV64-NEXT:    call __udivti3 at plt
+; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+  %a = udiv iXLen2 %x, 65537
+  ret iXLen2 %a
+}
+
+define iXLen2 @test_udiv_12(iXLen2 %x) nounwind {
+; RV32-LABEL: test_udiv_12:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    li a2, 12
+; RV32-NEXT:    li a3, 0
+; RV32-NEXT:    call __udivdi3 at plt
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: test_udiv_12:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    li a2, 12
+; RV64-NEXT:    li a3, 0
+; RV64-NEXT:    call __udivti3 at plt
+; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+  %a = udiv iXLen2 %x, 12
+  ret iXLen2 %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/split-urem-by-constant.ll b/llvm/test/CodeGen/RISCV/split-urem-by-constant.ll
new file mode 100644
index 0000000000000..b742d9c1e2485
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/split-urem-by-constant.ll
@@ -0,0 +1,296 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen2/i64/g' %s | llc -mtriple=riscv32 -mattr=+m | \
+; RUN:   FileCheck %s --check-prefix=RV32
+; RUN: sed 's/iXLen2/i128/g' %s | llc -mtriple=riscv64 -mattr=+m | \
+; RUN:   FileCheck %s --check-prefix=RV64
+
+define iXLen2 @test_urem_3(iXLen2 %x) nounwind {
+; RV32-LABEL: test_urem_3:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    li a2, 3
+; RV32-NEXT:    li a3, 0
+; RV32-NEXT:    call __umoddi3 at plt
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: test_urem_3:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    li a2, 3
+; RV64-NEXT:    li a3, 0
+; RV64-NEXT:    call __umodti3 at plt
+; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+  %a = urem iXLen2 %x, 3
+  ret iXLen2 %a
+}
+
+define iXLen2 @test_urem_5(iXLen2 %x) nounwind {
+; RV32-LABEL: test_urem_5:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    li a2, 5
+; RV32-NEXT:    li a3, 0
+; RV32-NEXT:    call __umoddi3 at plt
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: test_urem_5:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    li a2, 5
+; RV64-NEXT:    li a3, 0
+; RV64-NEXT:    call __umodti3 at plt
+; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+  %a = urem iXLen2 %x, 5
+  ret iXLen2 %a
+}
+
+define iXLen2 @test_urem_7(iXLen2 %x) nounwind {
+; RV32-LABEL: test_urem_7:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    li a2, 7
+; RV32-NEXT:    li a3, 0
+; RV32-NEXT:    call __umoddi3 at plt
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: test_urem_7:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    li a2, 7
+; RV64-NEXT:    li a3, 0
+; RV64-NEXT:    call __umodti3 at plt
+; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+  %a = urem iXLen2 %x, 7
+  ret iXLen2 %a
+}
+
+define iXLen2 @test_urem_9(iXLen2 %x) nounwind {
+; RV32-LABEL: test_urem_9:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    li a2, 9
+; RV32-NEXT:    li a3, 0
+; RV32-NEXT:    call __umoddi3 at plt
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: test_urem_9:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    li a2, 9
+; RV64-NEXT:    li a3, 0
+; RV64-NEXT:    call __umodti3 at plt
+; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+  %a = urem iXLen2 %x, 9
+  ret iXLen2 %a
+}
+
+define iXLen2 @test_urem_15(iXLen2 %x) nounwind {
+; RV32-LABEL: test_urem_15:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    li a2, 15
+; RV32-NEXT:    li a3, 0
+; RV32-NEXT:    call __umoddi3 at plt
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: test_urem_15:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    li a2, 15
+; RV64-NEXT:    li a3, 0
+; RV64-NEXT:    call __umodti3 at plt
+; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+  %a = urem iXLen2 %x, 15
+  ret iXLen2 %a
+}
+
+define iXLen2 @test_urem_17(iXLen2 %x) nounwind {
+; RV32-LABEL: test_urem_17:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    li a2, 17
+; RV32-NEXT:    li a3, 0
+; RV32-NEXT:    call __umoddi3 at plt
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: test_urem_17:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    li a2, 17
+; RV64-NEXT:    li a3, 0
+; RV64-NEXT:    call __umodti3 at plt
+; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+  %a = urem iXLen2 %x, 17
+  ret iXLen2 %a
+}
+
+define iXLen2 @test_urem_255(iXLen2 %x) nounwind {
+; RV32-LABEL: test_urem_255:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    li a2, 255
+; RV32-NEXT:    li a3, 0
+; RV32-NEXT:    call __umoddi3 at plt
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: test_urem_255:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    li a2, 255
+; RV64-NEXT:    li a3, 0
+; RV64-NEXT:    call __umodti3 at plt
+; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+  %a = urem iXLen2 %x, 255
+  ret iXLen2 %a
+}
+
+define iXLen2 @test_urem_257(iXLen2 %x) nounwind {
+; RV32-LABEL: test_urem_257:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    li a2, 257
+; RV32-NEXT:    li a3, 0
+; RV32-NEXT:    call __umoddi3 at plt
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: test_urem_257:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    li a2, 257
+; RV64-NEXT:    li a3, 0
+; RV64-NEXT:    call __umodti3 at plt
+; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+  %a = urem iXLen2 %x, 257
+  ret iXLen2 %a
+}
+
+define iXLen2 @test_urem_65535(iXLen2 %x) nounwind {
+; RV32-LABEL: test_urem_65535:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    lui a2, 16
+; RV32-NEXT:    addi a2, a2, -1
+; RV32-NEXT:    li a3, 0
+; RV32-NEXT:    call __umoddi3 at plt
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: test_urem_65535:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    lui a2, 16
+; RV64-NEXT:    addiw a2, a2, -1
+; RV64-NEXT:    li a3, 0
+; RV64-NEXT:    call __umodti3 at plt
+; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+  %a = urem iXLen2 %x, 65535
+  ret iXLen2 %a
+}
+
+define iXLen2 @test_urem_65537(iXLen2 %x) nounwind {
+; RV32-LABEL: test_urem_65537:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    lui a2, 16
+; RV32-NEXT:    addi a2, a2, 1
+; RV32-NEXT:    li a3, 0
+; RV32-NEXT:    call __umoddi3 at plt
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: test_urem_65537:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    lui a2, 16
+; RV64-NEXT:    addiw a2, a2, 1
+; RV64-NEXT:    li a3, 0
+; RV64-NEXT:    call __umodti3 at plt
+; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+  %a = urem iXLen2 %x, 65537
+  ret iXLen2 %a
+}
+
+define iXLen2 @test_urem_12(iXLen2 %x) nounwind {
+; RV32-LABEL: test_urem_12:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    li a2, 12
+; RV32-NEXT:    li a3, 0
+; RV32-NEXT:    call __umoddi3 at plt
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: test_urem_12:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    li a2, 12
+; RV64-NEXT:    li a3, 0
+; RV64-NEXT:    call __umodti3 at plt
+; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+  %a = urem iXLen2 %x, 12
+  ret iXLen2 %a
+}
+

diff  --git a/llvm/test/CodeGen/X86/divide-by-constant.ll b/llvm/test/CodeGen/X86/divide-by-constant.ll
index 3958d5f85e20b..f094cbbd3f79a 100644
--- a/llvm/test/CodeGen/X86/divide-by-constant.ll
+++ b/llvm/test/CodeGen/X86/divide-by-constant.ll
@@ -456,3 +456,481 @@ define { i64, i32 } @PR38622_signed(i64) nounwind {
   %6 = insertvalue { i64, i32 } %5, i32 %4, 1
   ret { i64, i32 } %6
 }
+
+define i64 @urem_i64_3(i64 %x) nounwind {
+; X32-LABEL: urem_i64_3:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    subl $12, %esp
+; X32-NEXT:    pushl $0
+; X32-NEXT:    pushl $3
+; X32-NEXT:    pushl {{[0-9]+}}(%esp)
+; X32-NEXT:    pushl {{[0-9]+}}(%esp)
+; X32-NEXT:    calll __umoddi3
+; X32-NEXT:    addl $28, %esp
+; X32-NEXT:    retl
+;
+; X64-LABEL: urem_i64_3:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movabsq $-6148914691236517205, %rcx # imm = 0xAAAAAAAAAAAAAAAB
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    mulq %rcx
+; X64-NEXT:    shrq %rdx
+; X64-NEXT:    leaq (%rdx,%rdx,2), %rax
+; X64-NEXT:    subq %rax, %rdi
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    retq
+entry:
+  %rem = urem i64 %x, 3
+  ret i64 %rem
+}
+
+define i64 @urem_i64_5(i64 %x) nounwind {
+; X32-LABEL: urem_i64_5:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    subl $12, %esp
+; X32-NEXT:    pushl $0
+; X32-NEXT:    pushl $5
+; X32-NEXT:    pushl {{[0-9]+}}(%esp)
+; X32-NEXT:    pushl {{[0-9]+}}(%esp)
+; X32-NEXT:    calll __umoddi3
+; X32-NEXT:    addl $28, %esp
+; X32-NEXT:    retl
+;
+; X64-LABEL: urem_i64_5:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movabsq $-3689348814741910323, %rcx # imm = 0xCCCCCCCCCCCCCCCD
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    mulq %rcx
+; X64-NEXT:    shrq $2, %rdx
+; X64-NEXT:    leaq (%rdx,%rdx,4), %rax
+; X64-NEXT:    subq %rax, %rdi
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    retq
+entry:
+  %rem = urem i64 %x, 5
+  ret i64 %rem
+}
+
+define i64 @urem_i64_15(i64 %x) nounwind {
+; X32-LABEL: urem_i64_15:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    subl $12, %esp
+; X32-NEXT:    pushl $0
+; X32-NEXT:    pushl $15
+; X32-NEXT:    pushl {{[0-9]+}}(%esp)
+; X32-NEXT:    pushl {{[0-9]+}}(%esp)
+; X32-NEXT:    calll __umoddi3
+; X32-NEXT:    addl $28, %esp
+; X32-NEXT:    retl
+;
+; X64-LABEL: urem_i64_15:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movabsq $-8608480567731124087, %rcx # imm = 0x8888888888888889
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    mulq %rcx
+; X64-NEXT:    shrq $3, %rdx
+; X64-NEXT:    leaq (%rdx,%rdx,4), %rax
+; X64-NEXT:    leaq (%rax,%rax,2), %rax
+; X64-NEXT:    subq %rax, %rdi
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    retq
+entry:
+  %rem = urem i64 %x, 15
+  ret i64 %rem
+}
+
+define i64 @urem_i64_17(i64 %x) nounwind {
+; X32-LABEL: urem_i64_17:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    subl $12, %esp
+; X32-NEXT:    pushl $0
+; X32-NEXT:    pushl $17
+; X32-NEXT:    pushl {{[0-9]+}}(%esp)
+; X32-NEXT:    pushl {{[0-9]+}}(%esp)
+; X32-NEXT:    calll __umoddi3
+; X32-NEXT:    addl $28, %esp
+; X32-NEXT:    retl
+;
+; X64-LABEL: urem_i64_17:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movabsq $-1085102592571150095, %rcx # imm = 0xF0F0F0F0F0F0F0F1
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    mulq %rcx
+; X64-NEXT:    movq %rdx, %rax
+; X64-NEXT:    andq $-16, %rax
+; X64-NEXT:    shrq $4, %rdx
+; X64-NEXT:    addq %rax, %rdx
+; X64-NEXT:    subq %rdx, %rdi
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    retq
+entry:
+  %rem = urem i64 %x, 17
+  ret i64 %rem
+}
+
+define i64 @urem_i64_255(i64 %x) nounwind {
+; X32-LABEL: urem_i64_255:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    subl $12, %esp
+; X32-NEXT:    pushl $0
+; X32-NEXT:    pushl $255
+; X32-NEXT:    pushl {{[0-9]+}}(%esp)
+; X32-NEXT:    pushl {{[0-9]+}}(%esp)
+; X32-NEXT:    calll __umoddi3
+; X32-NEXT:    addl $28, %esp
+; X32-NEXT:    retl
+;
+; X64-LABEL: urem_i64_255:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movabsq $-9187201950435737471, %rcx # imm = 0x8080808080808081
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    mulq %rcx
+; X64-NEXT:    shrq $7, %rdx
+; X64-NEXT:    movq %rdx, %rax
+; X64-NEXT:    shlq $8, %rax
+; X64-NEXT:    subq %rax, %rdx
+; X64-NEXT:    leaq (%rdx,%rdi), %rax
+; X64-NEXT:    retq
+entry:
+  %rem = urem i64 %x, 255
+  ret i64 %rem
+}
+
+define i64 @urem_i64_257(i64 %x) nounwind {
+; X32-LABEL: urem_i64_257:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    subl $12, %esp
+; X32-NEXT:    pushl $0
+; X32-NEXT:    pushl $257 # imm = 0x101
+; X32-NEXT:    pushl {{[0-9]+}}(%esp)
+; X32-NEXT:    pushl {{[0-9]+}}(%esp)
+; X32-NEXT:    calll __umoddi3
+; X32-NEXT:    addl $28, %esp
+; X32-NEXT:    retl
+;
+; X64-LABEL: urem_i64_257:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movabsq $-71777214294589695, %rcx # imm = 0xFF00FF00FF00FF01
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    mulq %rcx
+; X64-NEXT:    movq %rdx, %rax
+; X64-NEXT:    andq $-256, %rax
+; X64-NEXT:    shrq $8, %rdx
+; X64-NEXT:    addq %rax, %rdx
+; X64-NEXT:    subq %rdx, %rdi
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    retq
+entry:
+  %rem = urem i64 %x, 257
+  ret i64 %rem
+}
+
+define i64 @urem_i64_65535(i64 %x) nounwind {
+; X32-LABEL: urem_i64_65535:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    subl $12, %esp
+; X32-NEXT:    pushl $0
+; X32-NEXT:    pushl $65535 # imm = 0xFFFF
+; X32-NEXT:    pushl {{[0-9]+}}(%esp)
+; X32-NEXT:    pushl {{[0-9]+}}(%esp)
+; X32-NEXT:    calll __umoddi3
+; X32-NEXT:    addl $28, %esp
+; X32-NEXT:    retl
+;
+; X64-LABEL: urem_i64_65535:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movabsq $-9223231297218904063, %rcx # imm = 0x8000800080008001
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    mulq %rcx
+; X64-NEXT:    shrq $15, %rdx
+; X64-NEXT:    movq %rdx, %rax
+; X64-NEXT:    shlq $16, %rax
+; X64-NEXT:    subq %rax, %rdx
+; X64-NEXT:    leaq (%rdx,%rdi), %rax
+; X64-NEXT:    retq
+entry:
+  %rem = urem i64 %x, 65535
+  ret i64 %rem
+}
+
+define i64 @urem_i64_65537(i64 %x) nounwind {
+; X32-LABEL: urem_i64_65537:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    subl $12, %esp
+; X32-NEXT:    pushl $0
+; X32-NEXT:    pushl $65537 # imm = 0x10001
+; X32-NEXT:    pushl {{[0-9]+}}(%esp)
+; X32-NEXT:    pushl {{[0-9]+}}(%esp)
+; X32-NEXT:    calll __umoddi3
+; X32-NEXT:    addl $28, %esp
+; X32-NEXT:    retl
+;
+; X64-LABEL: urem_i64_65537:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movabsq $-281470681808895, %rcx # imm = 0xFFFF0000FFFF0001
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    mulq %rcx
+; X64-NEXT:    movq %rdx, %rax
+; X64-NEXT:    andq $-65536, %rax # imm = 0xFFFF0000
+; X64-NEXT:    shrq $16, %rdx
+; X64-NEXT:    addq %rax, %rdx
+; X64-NEXT:    subq %rdx, %rdi
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    retq
+entry:
+  %rem = urem i64 %x, 65537
+  ret i64 %rem
+}
+
+define i64 @urem_i64_12(i64 %x) nounwind {
+; X32-LABEL: urem_i64_12:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    subl $12, %esp
+; X32-NEXT:    pushl $0
+; X32-NEXT:    pushl $12
+; X32-NEXT:    pushl {{[0-9]+}}(%esp)
+; X32-NEXT:    pushl {{[0-9]+}}(%esp)
+; X32-NEXT:    calll __umoddi3
+; X32-NEXT:    addl $28, %esp
+; X32-NEXT:    retl
+;
+; X64-LABEL: urem_i64_12:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movabsq $-6148914691236517205, %rcx # imm = 0xAAAAAAAAAAAAAAAB
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    mulq %rcx
+; X64-NEXT:    shrq %rdx
+; X64-NEXT:    andq $-4, %rdx
+; X64-NEXT:    leaq (%rdx,%rdx,2), %rax
+; X64-NEXT:    subq %rax, %rdi
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    retq
+entry:
+  %rem = urem i64 %x, 12
+  ret i64 %rem
+}
+
+define i64 @udiv_i64_3(i64 %x) nounwind {
+; X32-LABEL: udiv_i64_3:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    subl $12, %esp
+; X32-NEXT:    pushl $0
+; X32-NEXT:    pushl $3
+; X32-NEXT:    pushl {{[0-9]+}}(%esp)
+; X32-NEXT:    pushl {{[0-9]+}}(%esp)
+; X32-NEXT:    calll __udivdi3
+; X32-NEXT:    addl $28, %esp
+; X32-NEXT:    retl
+;
+; X64-LABEL: udiv_i64_3:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    movabsq $-6148914691236517205, %rcx # imm = 0xAAAAAAAAAAAAAAAB
+; X64-NEXT:    mulq %rcx
+; X64-NEXT:    movq %rdx, %rax
+; X64-NEXT:    shrq %rax
+; X64-NEXT:    retq
+entry:
+  %rem = udiv i64 %x, 3
+  ret i64 %rem
+}
+
+define i64 @udiv_i64_5(i64 %x) nounwind {
+; X32-LABEL: udiv_i64_5:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    subl $12, %esp
+; X32-NEXT:    pushl $0
+; X32-NEXT:    pushl $5
+; X32-NEXT:    pushl {{[0-9]+}}(%esp)
+; X32-NEXT:    pushl {{[0-9]+}}(%esp)
+; X32-NEXT:    calll __udivdi3
+; X32-NEXT:    addl $28, %esp
+; X32-NEXT:    retl
+;
+; X64-LABEL: udiv_i64_5:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    movabsq $-3689348814741910323, %rcx # imm = 0xCCCCCCCCCCCCCCCD
+; X64-NEXT:    mulq %rcx
+; X64-NEXT:    movq %rdx, %rax
+; X64-NEXT:    shrq $2, %rax
+; X64-NEXT:    retq
+entry:
+  %rem = udiv i64 %x, 5
+  ret i64 %rem
+}
+
+define i64 @udiv_i64_15(i64 %x) nounwind {
+; X32-LABEL: udiv_i64_15:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    subl $12, %esp
+; X32-NEXT:    pushl $0
+; X32-NEXT:    pushl $15
+; X32-NEXT:    pushl {{[0-9]+}}(%esp)
+; X32-NEXT:    pushl {{[0-9]+}}(%esp)
+; X32-NEXT:    calll __udivdi3
+; X32-NEXT:    addl $28, %esp
+; X32-NEXT:    retl
+;
+; X64-LABEL: udiv_i64_15:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    movabsq $-8608480567731124087, %rcx # imm = 0x8888888888888889
+; X64-NEXT:    mulq %rcx
+; X64-NEXT:    movq %rdx, %rax
+; X64-NEXT:    shrq $3, %rax
+; X64-NEXT:    retq
+entry:
+  %rem = udiv i64 %x, 15
+  ret i64 %rem
+}
+
+define i64 @udiv_i64_17(i64 %x) nounwind {
+; X32-LABEL: udiv_i64_17:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    subl $12, %esp
+; X32-NEXT:    pushl $0
+; X32-NEXT:    pushl $17
+; X32-NEXT:    pushl {{[0-9]+}}(%esp)
+; X32-NEXT:    pushl {{[0-9]+}}(%esp)
+; X32-NEXT:    calll __udivdi3
+; X32-NEXT:    addl $28, %esp
+; X32-NEXT:    retl
+;
+; X64-LABEL: udiv_i64_17:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    movabsq $-1085102592571150095, %rcx # imm = 0xF0F0F0F0F0F0F0F1
+; X64-NEXT:    mulq %rcx
+; X64-NEXT:    movq %rdx, %rax
+; X64-NEXT:    shrq $4, %rax
+; X64-NEXT:    retq
+entry:
+  %rem = udiv i64 %x, 17
+  ret i64 %rem
+}
+
+define i64 @udiv_i64_255(i64 %x) nounwind {
+; X32-LABEL: udiv_i64_255:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    subl $12, %esp
+; X32-NEXT:    pushl $0
+; X32-NEXT:    pushl $255
+; X32-NEXT:    pushl {{[0-9]+}}(%esp)
+; X32-NEXT:    pushl {{[0-9]+}}(%esp)
+; X32-NEXT:    calll __udivdi3
+; X32-NEXT:    addl $28, %esp
+; X32-NEXT:    retl
+;
+; X64-LABEL: udiv_i64_255:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    movabsq $-9187201950435737471, %rcx # imm = 0x8080808080808081
+; X64-NEXT:    mulq %rcx
+; X64-NEXT:    movq %rdx, %rax
+; X64-NEXT:    shrq $7, %rax
+; X64-NEXT:    retq
+entry:
+  %rem = udiv i64 %x, 255
+  ret i64 %rem
+}
+
+define i64 @udiv_i64_257(i64 %x) nounwind {
+; X32-LABEL: udiv_i64_257:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    subl $12, %esp
+; X32-NEXT:    pushl $0
+; X32-NEXT:    pushl $257 # imm = 0x101
+; X32-NEXT:    pushl {{[0-9]+}}(%esp)
+; X32-NEXT:    pushl {{[0-9]+}}(%esp)
+; X32-NEXT:    calll __udivdi3
+; X32-NEXT:    addl $28, %esp
+; X32-NEXT:    retl
+;
+; X64-LABEL: udiv_i64_257:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    movabsq $-71777214294589695, %rcx # imm = 0xFF00FF00FF00FF01
+; X64-NEXT:    mulq %rcx
+; X64-NEXT:    movq %rdx, %rax
+; X64-NEXT:    shrq $8, %rax
+; X64-NEXT:    retq
+entry:
+  %rem = udiv i64 %x, 257
+  ret i64 %rem
+}
+
+define i64 @udiv_i64_65535(i64 %x) nounwind {
+; X32-LABEL: udiv_i64_65535:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    subl $12, %esp
+; X32-NEXT:    pushl $0
+; X32-NEXT:    pushl $65535 # imm = 0xFFFF
+; X32-NEXT:    pushl {{[0-9]+}}(%esp)
+; X32-NEXT:    pushl {{[0-9]+}}(%esp)
+; X32-NEXT:    calll __udivdi3
+; X32-NEXT:    addl $28, %esp
+; X32-NEXT:    retl
+;
+; X64-LABEL: udiv_i64_65535:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    movabsq $-9223231297218904063, %rcx # imm = 0x8000800080008001
+; X64-NEXT:    mulq %rcx
+; X64-NEXT:    movq %rdx, %rax
+; X64-NEXT:    shrq $15, %rax
+; X64-NEXT:    retq
+entry:
+  %rem = udiv i64 %x, 65535
+  ret i64 %rem
+}
+
+define i64 @udiv_i64_65537(i64 %x) nounwind {
+; X32-LABEL: udiv_i64_65537:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    subl $12, %esp
+; X32-NEXT:    pushl $0
+; X32-NEXT:    pushl $65537 # imm = 0x10001
+; X32-NEXT:    pushl {{[0-9]+}}(%esp)
+; X32-NEXT:    pushl {{[0-9]+}}(%esp)
+; X32-NEXT:    calll __udivdi3
+; X32-NEXT:    addl $28, %esp
+; X32-NEXT:    retl
+;
+; X64-LABEL: udiv_i64_65537:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    movabsq $-281470681808895, %rcx # imm = 0xFFFF0000FFFF0001
+; X64-NEXT:    mulq %rcx
+; X64-NEXT:    movq %rdx, %rax
+; X64-NEXT:    shrq $16, %rax
+; X64-NEXT:    retq
+entry:
+  %rem = udiv i64 %x, 65537
+  ret i64 %rem
+}
+
+define i64 @udiv_i64_12(i64 %x) nounwind {
+; X32-LABEL: udiv_i64_12:
+; X32:       # %bb.0: # %entry
+; X32-NEXT:    subl $12, %esp
+; X32-NEXT:    pushl $0
+; X32-NEXT:    pushl $12
+; X32-NEXT:    pushl {{[0-9]+}}(%esp)
+; X32-NEXT:    pushl {{[0-9]+}}(%esp)
+; X32-NEXT:    calll __udivdi3
+; X32-NEXT:    addl $28, %esp
+; X32-NEXT:    retl
+;
+; X64-LABEL: udiv_i64_12:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    movq %rdi, %rax
+; X64-NEXT:    movabsq $-6148914691236517205, %rcx # imm = 0xAAAAAAAAAAAAAAAB
+; X64-NEXT:    mulq %rcx
+; X64-NEXT:    movq %rdx, %rax
+; X64-NEXT:    shrq $3, %rax
+; X64-NEXT:    retq
+entry:
+  %rem = udiv i64 %x, 12
+  ret i64 %rem
+}

diff  --git a/llvm/test/CodeGen/X86/divmod128.ll b/llvm/test/CodeGen/X86/divmod128.ll
index 00373be01ad2d..c9673edbc09b6 100644
--- a/llvm/test/CodeGen/X86/divmod128.ll
+++ b/llvm/test/CodeGen/X86/divmod128.ll
@@ -123,3 +123,543 @@ define i64 @udiv128(i128 %x) nounwind {
   %2 = trunc i128 %1 to i64
   ret i64 %2
 }
+
+define i128 @urem_i128_3(i128 %x) nounwind {
+; X86-64-LABEL: urem_i128_3:
+; X86-64:       # %bb.0: # %entry
+; X86-64-NEXT:    pushq %rax
+; X86-64-NEXT:    movl $3, %edx
+; X86-64-NEXT:    xorl %ecx, %ecx
+; X86-64-NEXT:    callq __umodti3 at PLT
+; X86-64-NEXT:    popq %rcx
+; X86-64-NEXT:    retq
+;
+; WIN64-LABEL: urem_i128_3:
+; WIN64:       # %bb.0: # %entry
+; WIN64-NEXT:    subq $72, %rsp
+; WIN64-NEXT:    movq %rdx, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq %rcx, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq $3, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq $0, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rcx
+; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rdx
+; WIN64-NEXT:    callq __umodti3
+; WIN64-NEXT:    movq %xmm0, %rax
+; WIN64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; WIN64-NEXT:    movq %xmm0, %rdx
+; WIN64-NEXT:    addq $72, %rsp
+; WIN64-NEXT:    retq
+entry:
+  %rem = urem i128 %x, 3
+  ret i128 %rem
+}
+
+define i128 @urem_i128_5(i128 %x) nounwind {
+; X86-64-LABEL: urem_i128_5:
+; X86-64:       # %bb.0: # %entry
+; X86-64-NEXT:    pushq %rax
+; X86-64-NEXT:    movl $5, %edx
+; X86-64-NEXT:    xorl %ecx, %ecx
+; X86-64-NEXT:    callq __umodti3 at PLT
+; X86-64-NEXT:    popq %rcx
+; X86-64-NEXT:    retq
+;
+; WIN64-LABEL: urem_i128_5:
+; WIN64:       # %bb.0: # %entry
+; WIN64-NEXT:    subq $72, %rsp
+; WIN64-NEXT:    movq %rdx, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq %rcx, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq $5, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq $0, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rcx
+; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rdx
+; WIN64-NEXT:    callq __umodti3
+; WIN64-NEXT:    movq %xmm0, %rax
+; WIN64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; WIN64-NEXT:    movq %xmm0, %rdx
+; WIN64-NEXT:    addq $72, %rsp
+; WIN64-NEXT:    retq
+entry:
+  %rem = urem i128 %x, 5
+  ret i128 %rem
+}
+
+define i128 @urem_i128_15(i128 %x) nounwind {
+; X86-64-LABEL: urem_i128_15:
+; X86-64:       # %bb.0: # %entry
+; X86-64-NEXT:    pushq %rax
+; X86-64-NEXT:    movl $15, %edx
+; X86-64-NEXT:    xorl %ecx, %ecx
+; X86-64-NEXT:    callq __umodti3 at PLT
+; X86-64-NEXT:    popq %rcx
+; X86-64-NEXT:    retq
+;
+; WIN64-LABEL: urem_i128_15:
+; WIN64:       # %bb.0: # %entry
+; WIN64-NEXT:    subq $72, %rsp
+; WIN64-NEXT:    movq %rdx, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq %rcx, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq $15, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq $0, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rcx
+; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rdx
+; WIN64-NEXT:    callq __umodti3
+; WIN64-NEXT:    movq %xmm0, %rax
+; WIN64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; WIN64-NEXT:    movq %xmm0, %rdx
+; WIN64-NEXT:    addq $72, %rsp
+; WIN64-NEXT:    retq
+entry:
+  %rem = urem i128 %x, 15
+  ret i128 %rem
+}
+
+define i128 @urem_i128_17(i128 %x) nounwind {
+; X86-64-LABEL: urem_i128_17:
+; X86-64:       # %bb.0: # %entry
+; X86-64-NEXT:    pushq %rax
+; X86-64-NEXT:    movl $17, %edx
+; X86-64-NEXT:    xorl %ecx, %ecx
+; X86-64-NEXT:    callq __umodti3 at PLT
+; X86-64-NEXT:    popq %rcx
+; X86-64-NEXT:    retq
+;
+; WIN64-LABEL: urem_i128_17:
+; WIN64:       # %bb.0: # %entry
+; WIN64-NEXT:    subq $72, %rsp
+; WIN64-NEXT:    movq %rdx, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq %rcx, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq $17, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq $0, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rcx
+; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rdx
+; WIN64-NEXT:    callq __umodti3
+; WIN64-NEXT:    movq %xmm0, %rax
+; WIN64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; WIN64-NEXT:    movq %xmm0, %rdx
+; WIN64-NEXT:    addq $72, %rsp
+; WIN64-NEXT:    retq
+entry:
+  %rem = urem i128 %x, 17
+  ret i128 %rem
+}
+
+define i128 @urem_i128_255(i128 %x) nounwind {
+; X86-64-LABEL: urem_i128_255:
+; X86-64:       # %bb.0: # %entry
+; X86-64-NEXT:    pushq %rax
+; X86-64-NEXT:    movl $255, %edx
+; X86-64-NEXT:    xorl %ecx, %ecx
+; X86-64-NEXT:    callq __umodti3 at PLT
+; X86-64-NEXT:    popq %rcx
+; X86-64-NEXT:    retq
+;
+; WIN64-LABEL: urem_i128_255:
+; WIN64:       # %bb.0: # %entry
+; WIN64-NEXT:    subq $72, %rsp
+; WIN64-NEXT:    movq %rdx, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq %rcx, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq $255, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq $0, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rcx
+; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rdx
+; WIN64-NEXT:    callq __umodti3
+; WIN64-NEXT:    movq %xmm0, %rax
+; WIN64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; WIN64-NEXT:    movq %xmm0, %rdx
+; WIN64-NEXT:    addq $72, %rsp
+; WIN64-NEXT:    retq
+entry:
+  %rem = urem i128 %x, 255
+  ret i128 %rem
+}
+
+define i128 @urem_i128_257(i128 %x) nounwind {
+; X86-64-LABEL: urem_i128_257:
+; X86-64:       # %bb.0: # %entry
+; X86-64-NEXT:    pushq %rax
+; X86-64-NEXT:    movl $257, %edx # imm = 0x101
+; X86-64-NEXT:    xorl %ecx, %ecx
+; X86-64-NEXT:    callq __umodti3 at PLT
+; X86-64-NEXT:    popq %rcx
+; X86-64-NEXT:    retq
+;
+; WIN64-LABEL: urem_i128_257:
+; WIN64:       # %bb.0: # %entry
+; WIN64-NEXT:    subq $72, %rsp
+; WIN64-NEXT:    movq %rdx, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq %rcx, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq $257, {{[0-9]+}}(%rsp) # imm = 0x101
+; WIN64-NEXT:    movq $0, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rcx
+; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rdx
+; WIN64-NEXT:    callq __umodti3
+; WIN64-NEXT:    movq %xmm0, %rax
+; WIN64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; WIN64-NEXT:    movq %xmm0, %rdx
+; WIN64-NEXT:    addq $72, %rsp
+; WIN64-NEXT:    retq
+entry:
+  %rem = urem i128 %x, 257
+  ret i128 %rem
+}
+
+define i128 @urem_i128_65535(i128 %x) nounwind {
+; X86-64-LABEL: urem_i128_65535:
+; X86-64:       # %bb.0: # %entry
+; X86-64-NEXT:    pushq %rax
+; X86-64-NEXT:    movl $65535, %edx # imm = 0xFFFF
+; X86-64-NEXT:    xorl %ecx, %ecx
+; X86-64-NEXT:    callq __umodti3 at PLT
+; X86-64-NEXT:    popq %rcx
+; X86-64-NEXT:    retq
+;
+; WIN64-LABEL: urem_i128_65535:
+; WIN64:       # %bb.0: # %entry
+; WIN64-NEXT:    subq $72, %rsp
+; WIN64-NEXT:    movq %rdx, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq %rcx, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq $65535, {{[0-9]+}}(%rsp) # imm = 0xFFFF
+; WIN64-NEXT:    movq $0, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rcx
+; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rdx
+; WIN64-NEXT:    callq __umodti3
+; WIN64-NEXT:    movq %xmm0, %rax
+; WIN64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; WIN64-NEXT:    movq %xmm0, %rdx
+; WIN64-NEXT:    addq $72, %rsp
+; WIN64-NEXT:    retq
+entry:
+  %rem = urem i128 %x, 65535
+  ret i128 %rem
+}
+
+define i128 @urem_i128_65537(i128 %x) nounwind {
+; X86-64-LABEL: urem_i128_65537:
+; X86-64:       # %bb.0: # %entry
+; X86-64-NEXT:    pushq %rax
+; X86-64-NEXT:    movl $65537, %edx # imm = 0x10001
+; X86-64-NEXT:    xorl %ecx, %ecx
+; X86-64-NEXT:    callq __umodti3 at PLT
+; X86-64-NEXT:    popq %rcx
+; X86-64-NEXT:    retq
+;
+; WIN64-LABEL: urem_i128_65537:
+; WIN64:       # %bb.0: # %entry
+; WIN64-NEXT:    subq $72, %rsp
+; WIN64-NEXT:    movq %rdx, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq %rcx, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq $65537, {{[0-9]+}}(%rsp) # imm = 0x10001
+; WIN64-NEXT:    movq $0, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rcx
+; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rdx
+; WIN64-NEXT:    callq __umodti3
+; WIN64-NEXT:    movq %xmm0, %rax
+; WIN64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; WIN64-NEXT:    movq %xmm0, %rdx
+; WIN64-NEXT:    addq $72, %rsp
+; WIN64-NEXT:    retq
+entry:
+  %rem = urem i128 %x, 65537
+  ret i128 %rem
+}
+
+define i128 @urem_i128_12(i128 %x) nounwind {
+; X86-64-LABEL: urem_i128_12:
+; X86-64:       # %bb.0: # %entry
+; X86-64-NEXT:    pushq %rax
+; X86-64-NEXT:    movl $12, %edx
+; X86-64-NEXT:    xorl %ecx, %ecx
+; X86-64-NEXT:    callq __umodti3 at PLT
+; X86-64-NEXT:    popq %rcx
+; X86-64-NEXT:    retq
+;
+; WIN64-LABEL: urem_i128_12:
+; WIN64:       # %bb.0: # %entry
+; WIN64-NEXT:    subq $72, %rsp
+; WIN64-NEXT:    movq %rdx, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq %rcx, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq $12, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq $0, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rcx
+; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rdx
+; WIN64-NEXT:    callq __umodti3
+; WIN64-NEXT:    movq %xmm0, %rax
+; WIN64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; WIN64-NEXT:    movq %xmm0, %rdx
+; WIN64-NEXT:    addq $72, %rsp
+; WIN64-NEXT:    retq
+entry:
+  %rem = urem i128 %x, 12
+  ret i128 %rem
+}
+
+define i128 @udiv_i128_3(i128 %x) nounwind {
+; X86-64-LABEL: udiv_i128_3:
+; X86-64:       # %bb.0: # %entry
+; X86-64-NEXT:    pushq %rax
+; X86-64-NEXT:    movl $3, %edx
+; X86-64-NEXT:    xorl %ecx, %ecx
+; X86-64-NEXT:    callq __udivti3 at PLT
+; X86-64-NEXT:    popq %rcx
+; X86-64-NEXT:    retq
+;
+; WIN64-LABEL: udiv_i128_3:
+; WIN64:       # %bb.0: # %entry
+; WIN64-NEXT:    subq $72, %rsp
+; WIN64-NEXT:    movq %rdx, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq %rcx, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq $3, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq $0, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rcx
+; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rdx
+; WIN64-NEXT:    callq __udivti3
+; WIN64-NEXT:    movq %xmm0, %rax
+; WIN64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; WIN64-NEXT:    movq %xmm0, %rdx
+; WIN64-NEXT:    addq $72, %rsp
+; WIN64-NEXT:    retq
+entry:
+  %rem = udiv i128 %x, 3
+  ret i128 %rem
+}
+
+define i128 @udiv_i128_5(i128 %x) nounwind {
+; X86-64-LABEL: udiv_i128_5:
+; X86-64:       # %bb.0: # %entry
+; X86-64-NEXT:    pushq %rax
+; X86-64-NEXT:    movl $5, %edx
+; X86-64-NEXT:    xorl %ecx, %ecx
+; X86-64-NEXT:    callq __udivti3 at PLT
+; X86-64-NEXT:    popq %rcx
+; X86-64-NEXT:    retq
+;
+; WIN64-LABEL: udiv_i128_5:
+; WIN64:       # %bb.0: # %entry
+; WIN64-NEXT:    subq $72, %rsp
+; WIN64-NEXT:    movq %rdx, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq %rcx, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq $5, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq $0, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rcx
+; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rdx
+; WIN64-NEXT:    callq __udivti3
+; WIN64-NEXT:    movq %xmm0, %rax
+; WIN64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; WIN64-NEXT:    movq %xmm0, %rdx
+; WIN64-NEXT:    addq $72, %rsp
+; WIN64-NEXT:    retq
+entry:
+  %rem = udiv i128 %x, 5
+  ret i128 %rem
+}
+
+define i128 @udiv_i128_15(i128 %x) nounwind {
+; X86-64-LABEL: udiv_i128_15:
+; X86-64:       # %bb.0: # %entry
+; X86-64-NEXT:    pushq %rax
+; X86-64-NEXT:    movl $15, %edx
+; X86-64-NEXT:    xorl %ecx, %ecx
+; X86-64-NEXT:    callq __udivti3 at PLT
+; X86-64-NEXT:    popq %rcx
+; X86-64-NEXT:    retq
+;
+; WIN64-LABEL: udiv_i128_15:
+; WIN64:       # %bb.0: # %entry
+; WIN64-NEXT:    subq $72, %rsp
+; WIN64-NEXT:    movq %rdx, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq %rcx, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq $15, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq $0, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rcx
+; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rdx
+; WIN64-NEXT:    callq __udivti3
+; WIN64-NEXT:    movq %xmm0, %rax
+; WIN64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; WIN64-NEXT:    movq %xmm0, %rdx
+; WIN64-NEXT:    addq $72, %rsp
+; WIN64-NEXT:    retq
+entry:
+  %rem = udiv i128 %x, 15
+  ret i128 %rem
+}
+
+define i128 @udiv_i128_17(i128 %x) nounwind {
+; X86-64-LABEL: udiv_i128_17:
+; X86-64:       # %bb.0: # %entry
+; X86-64-NEXT:    pushq %rax
+; X86-64-NEXT:    movl $17, %edx
+; X86-64-NEXT:    xorl %ecx, %ecx
+; X86-64-NEXT:    callq __udivti3 at PLT
+; X86-64-NEXT:    popq %rcx
+; X86-64-NEXT:    retq
+;
+; WIN64-LABEL: udiv_i128_17:
+; WIN64:       # %bb.0: # %entry
+; WIN64-NEXT:    subq $72, %rsp
+; WIN64-NEXT:    movq %rdx, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq %rcx, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq $17, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq $0, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rcx
+; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rdx
+; WIN64-NEXT:    callq __udivti3
+; WIN64-NEXT:    movq %xmm0, %rax
+; WIN64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; WIN64-NEXT:    movq %xmm0, %rdx
+; WIN64-NEXT:    addq $72, %rsp
+; WIN64-NEXT:    retq
+entry:
+  %rem = udiv i128 %x, 17
+  ret i128 %rem
+}
+
+define i128 @udiv_i128_255(i128 %x) nounwind {
+; X86-64-LABEL: udiv_i128_255:
+; X86-64:       # %bb.0: # %entry
+; X86-64-NEXT:    pushq %rax
+; X86-64-NEXT:    movl $255, %edx
+; X86-64-NEXT:    xorl %ecx, %ecx
+; X86-64-NEXT:    callq __udivti3 at PLT
+; X86-64-NEXT:    popq %rcx
+; X86-64-NEXT:    retq
+;
+; WIN64-LABEL: udiv_i128_255:
+; WIN64:       # %bb.0: # %entry
+; WIN64-NEXT:    subq $72, %rsp
+; WIN64-NEXT:    movq %rdx, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq %rcx, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq $255, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq $0, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rcx
+; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rdx
+; WIN64-NEXT:    callq __udivti3
+; WIN64-NEXT:    movq %xmm0, %rax
+; WIN64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; WIN64-NEXT:    movq %xmm0, %rdx
+; WIN64-NEXT:    addq $72, %rsp
+; WIN64-NEXT:    retq
+entry:
+  %rem = udiv i128 %x, 255
+  ret i128 %rem
+}
+
+define i128 @udiv_i128_257(i128 %x) nounwind {
+; X86-64-LABEL: udiv_i128_257:
+; X86-64:       # %bb.0: # %entry
+; X86-64-NEXT:    pushq %rax
+; X86-64-NEXT:    movl $257, %edx # imm = 0x101
+; X86-64-NEXT:    xorl %ecx, %ecx
+; X86-64-NEXT:    callq __udivti3 at PLT
+; X86-64-NEXT:    popq %rcx
+; X86-64-NEXT:    retq
+;
+; WIN64-LABEL: udiv_i128_257:
+; WIN64:       # %bb.0: # %entry
+; WIN64-NEXT:    subq $72, %rsp
+; WIN64-NEXT:    movq %rdx, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq %rcx, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq $257, {{[0-9]+}}(%rsp) # imm = 0x101
+; WIN64-NEXT:    movq $0, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rcx
+; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rdx
+; WIN64-NEXT:    callq __udivti3
+; WIN64-NEXT:    movq %xmm0, %rax
+; WIN64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; WIN64-NEXT:    movq %xmm0, %rdx
+; WIN64-NEXT:    addq $72, %rsp
+; WIN64-NEXT:    retq
+entry:
+  %rem = udiv i128 %x, 257
+  ret i128 %rem
+}
+
+define i128 @udiv_i128_65535(i128 %x) nounwind {
+; X86-64-LABEL: udiv_i128_65535:
+; X86-64:       # %bb.0: # %entry
+; X86-64-NEXT:    pushq %rax
+; X86-64-NEXT:    movl $65535, %edx # imm = 0xFFFF
+; X86-64-NEXT:    xorl %ecx, %ecx
+; X86-64-NEXT:    callq __udivti3 at PLT
+; X86-64-NEXT:    popq %rcx
+; X86-64-NEXT:    retq
+;
+; WIN64-LABEL: udiv_i128_65535:
+; WIN64:       # %bb.0: # %entry
+; WIN64-NEXT:    subq $72, %rsp
+; WIN64-NEXT:    movq %rdx, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq %rcx, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq $65535, {{[0-9]+}}(%rsp) # imm = 0xFFFF
+; WIN64-NEXT:    movq $0, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rcx
+; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rdx
+; WIN64-NEXT:    callq __udivti3
+; WIN64-NEXT:    movq %xmm0, %rax
+; WIN64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; WIN64-NEXT:    movq %xmm0, %rdx
+; WIN64-NEXT:    addq $72, %rsp
+; WIN64-NEXT:    retq
+entry:
+  %rem = udiv i128 %x, 65535
+  ret i128 %rem
+}
+
+define i128 @udiv_i128_65537(i128 %x) nounwind {
+; X86-64-LABEL: udiv_i128_65537:
+; X86-64:       # %bb.0: # %entry
+; X86-64-NEXT:    pushq %rax
+; X86-64-NEXT:    movl $65537, %edx # imm = 0x10001
+; X86-64-NEXT:    xorl %ecx, %ecx
+; X86-64-NEXT:    callq __udivti3 at PLT
+; X86-64-NEXT:    popq %rcx
+; X86-64-NEXT:    retq
+;
+; WIN64-LABEL: udiv_i128_65537:
+; WIN64:       # %bb.0: # %entry
+; WIN64-NEXT:    subq $72, %rsp
+; WIN64-NEXT:    movq %rdx, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq %rcx, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq $65537, {{[0-9]+}}(%rsp) # imm = 0x10001
+; WIN64-NEXT:    movq $0, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rcx
+; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rdx
+; WIN64-NEXT:    callq __udivti3
+; WIN64-NEXT:    movq %xmm0, %rax
+; WIN64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; WIN64-NEXT:    movq %xmm0, %rdx
+; WIN64-NEXT:    addq $72, %rsp
+; WIN64-NEXT:    retq
+entry:
+  %rem = udiv i128 %x, 65537
+  ret i128 %rem
+}
+
+define i128 @udiv_i128_12(i128 %x) nounwind {
+; X86-64-LABEL: udiv_i128_12:
+; X86-64:       # %bb.0: # %entry
+; X86-64-NEXT:    pushq %rax
+; X86-64-NEXT:    movl $12, %edx
+; X86-64-NEXT:    xorl %ecx, %ecx
+; X86-64-NEXT:    callq __udivti3 at PLT
+; X86-64-NEXT:    popq %rcx
+; X86-64-NEXT:    retq
+;
+; WIN64-LABEL: udiv_i128_12:
+; WIN64:       # %bb.0: # %entry
+; WIN64-NEXT:    subq $72, %rsp
+; WIN64-NEXT:    movq %rdx, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq %rcx, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq $12, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    movq $0, {{[0-9]+}}(%rsp)
+; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rcx
+; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rdx
+; WIN64-NEXT:    callq __udivti3
+; WIN64-NEXT:    movq %xmm0, %rax
+; WIN64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
+; WIN64-NEXT:    movq %xmm0, %rdx
+; WIN64-NEXT:    addq $72, %rsp
+; WIN64-NEXT:    retq
+entry:
+  %rem = udiv i128 %x, 12
+  ret i128 %rem
+}


        


More information about the llvm-commits mailing list