[llvm] d9207d3 - [RISCV] Add test cases for add/sub/mul overflow intrinsics. NFC

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sun Feb 21 00:22:30 PST 2021


Author: Craig Topper
Date: 2021-02-21T00:21:20-08:00
New Revision: d9207d3f0bad15caefbb44f307e42862a755333b

URL: https://github.com/llvm/llvm-project/commit/d9207d3f0bad15caefbb44f307e42862a755333b
DIFF: https://github.com/llvm/llvm-project/commit/d9207d3f0bad15caefbb44f307e42862a755333b.diff

LOG: [RISCV] Add test cases for add/sub/mul overflow intrinsics. NFC

Largely copied from AArch64/arm64-xaluo.ll

Added: 
    llvm/test/CodeGen/RISCV/xaluo.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/xaluo.ll b/llvm/test/CodeGen/RISCV/xaluo.ll
new file mode 100644
index 000000000000..b088f28d46cf
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/xaluo.ll
@@ -0,0 +1,2278 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=riscv32 -mattr=+m -verify-machineinstrs | FileCheck %s --check-prefix=RV32
+; RUN: llc < %s -mtriple=riscv64 -mattr=+m -verify-machineinstrs | FileCheck %s --check-prefix=RV64
+
+;
+; Get the actual value of the overflow bit.
+;
+define zeroext i1 @saddo1.i32(i32 %v1, i32 %v2, i32* %res) {
+; RV32-LABEL: saddo1.i32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    add a3, a0, a1
+; RV32-NEXT:    slt a0, a3, a0
+; RV32-NEXT:    slti a1, a1, 0
+; RV32-NEXT:    xor a0, a1, a0
+; RV32-NEXT:    sw a3, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: saddo1.i32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    sext.w a1, a1
+; RV64-NEXT:    sext.w a0, a0
+; RV64-NEXT:    add a3, a0, a1
+; RV64-NEXT:    addw a0, a0, a1
+; RV64-NEXT:    xor a0, a0, a3
+; RV64-NEXT:    snez a0, a0
+; RV64-NEXT:    sw a3, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32* %res
+  ret i1 %obit
+}
+
+; Test the immediate version.
+define zeroext i1 @saddo2.i32(i32 %v1, i32* %res) {
+; RV32-LABEL: saddo2.i32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi a2, a0, 4
+; RV32-NEXT:    slt a0, a2, a0
+; RV32-NEXT:    sw a2, 0(a1)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: saddo2.i32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    sext.w a0, a0
+; RV64-NEXT:    addi a2, a0, 4
+; RV64-NEXT:    addiw a0, a0, 4
+; RV64-NEXT:    xor a0, a0, a2
+; RV64-NEXT:    snez a0, a0
+; RV64-NEXT:    sw a2, 0(a1)
+; RV64-NEXT:    ret
+entry:
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 4)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32* %res
+  ret i1 %obit
+}
+
+; Test negative immediates.
+define zeroext i1 @saddo3.i32(i32 %v1, i32* %res) {
+; RV32-LABEL: saddo3.i32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi a2, a0, -4
+; RV32-NEXT:    slt a0, a2, a0
+; RV32-NEXT:    xori a0, a0, 1
+; RV32-NEXT:    sw a2, 0(a1)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: saddo3.i32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    sext.w a0, a0
+; RV64-NEXT:    addi a2, a0, -4
+; RV64-NEXT:    addiw a0, a0, -4
+; RV64-NEXT:    xor a0, a0, a2
+; RV64-NEXT:    snez a0, a0
+; RV64-NEXT:    sw a2, 0(a1)
+; RV64-NEXT:    ret
+entry:
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 -4)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32* %res
+  ret i1 %obit
+}
+
+; Test immediates that are too large to be encoded.
+define zeroext i1 @saddo4.i32(i32 %v1, i32* %res) {
+; RV32-LABEL: saddo4.i32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    lui a2, 4096
+; RV32-NEXT:    addi a2, a2, -1
+; RV32-NEXT:    add a2, a0, a2
+; RV32-NEXT:    slt a0, a2, a0
+; RV32-NEXT:    sw a2, 0(a1)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: saddo4.i32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    sext.w a0, a0
+; RV64-NEXT:    lui a2, 4096
+; RV64-NEXT:    addiw a2, a2, -1
+; RV64-NEXT:    add a3, a0, a2
+; RV64-NEXT:    addw a0, a0, a2
+; RV64-NEXT:    xor a0, a0, a3
+; RV64-NEXT:    snez a0, a0
+; RV64-NEXT:    sw a3, 0(a1)
+; RV64-NEXT:    ret
+entry:
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 16777215)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32* %res
+  ret i1 %obit
+}
+
+define zeroext i1 @saddo1.i64(i64 %v1, i64 %v2, i64* %res) {
+; RV32-LABEL: saddo1.i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    add a5, a1, a3
+; RV32-NEXT:    add t0, a0, a2
+; RV32-NEXT:    sltu a0, t0, a0
+; RV32-NEXT:    add a5, a5, a0
+; RV32-NEXT:    addi a6, zero, -1
+; RV32-NEXT:    addi a7, zero, 1
+; RV32-NEXT:    addi a2, zero, 1
+; RV32-NEXT:    beqz a5, .LBB4_2
+; RV32-NEXT:  # %bb.1: # %entry
+; RV32-NEXT:    slt a2, a6, a5
+; RV32-NEXT:  .LBB4_2: # %entry
+; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    beqz a1, .LBB4_4
+; RV32-NEXT:  # %bb.3: # %entry
+; RV32-NEXT:    slt a0, a6, a1
+; RV32-NEXT:  .LBB4_4: # %entry
+; RV32-NEXT:    xor a1, a0, a2
+; RV32-NEXT:    snez a1, a1
+; RV32-NEXT:    beqz a3, .LBB4_6
+; RV32-NEXT:  # %bb.5: # %entry
+; RV32-NEXT:    slt a7, a6, a3
+; RV32-NEXT:  .LBB4_6: # %entry
+; RV32-NEXT:    xor a0, a0, a7
+; RV32-NEXT:    seqz a0, a0
+; RV32-NEXT:    and a0, a0, a1
+; RV32-NEXT:    sw t0, 0(a4)
+; RV32-NEXT:    sw a5, 4(a4)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: saddo1.i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    add a3, a0, a1
+; RV64-NEXT:    slt a0, a3, a0
+; RV64-NEXT:    slti a1, a1, 0
+; RV64-NEXT:    xor a0, a1, a0
+; RV64-NEXT:    sd a3, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64* %res
+  ret i1 %obit
+}
+
+define zeroext i1 @saddo2.i64(i64 %v1, i64* %res) {
+; RV32-LABEL: saddo2.i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi a3, a0, 4
+; RV32-NEXT:    sltu a0, a3, a0
+; RV32-NEXT:    add a4, a1, a0
+; RV32-NEXT:    addi a6, zero, -1
+; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    addi a5, zero, 1
+; RV32-NEXT:    beqz a4, .LBB5_2
+; RV32-NEXT:  # %bb.1: # %entry
+; RV32-NEXT:    slt a5, a6, a4
+; RV32-NEXT:  .LBB5_2: # %entry
+; RV32-NEXT:    beqz a1, .LBB5_4
+; RV32-NEXT:  # %bb.3: # %entry
+; RV32-NEXT:    slt a0, a6, a1
+; RV32-NEXT:  .LBB5_4: # %entry
+; RV32-NEXT:    xor a1, a0, a5
+; RV32-NEXT:    snez a1, a1
+; RV32-NEXT:    snez a0, a0
+; RV32-NEXT:    and a0, a0, a1
+; RV32-NEXT:    sw a3, 0(a2)
+; RV32-NEXT:    sw a4, 4(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: saddo2.i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    addi a2, a0, 4
+; RV64-NEXT:    slt a0, a2, a0
+; RV64-NEXT:    sd a2, 0(a1)
+; RV64-NEXT:    ret
+entry:
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 4)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64* %res
+  ret i1 %obit
+}
+
+define zeroext i1 @saddo3.i64(i64 %v1, i64* %res) {
+; RV32-LABEL: saddo3.i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi a3, a0, -4
+; RV32-NEXT:    sltu a0, a3, a0
+; RV32-NEXT:    add a0, a1, a0
+; RV32-NEXT:    addi a4, a0, -1
+; RV32-NEXT:    addi a6, zero, -1
+; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    addi a5, zero, 1
+; RV32-NEXT:    beqz a4, .LBB6_2
+; RV32-NEXT:  # %bb.1: # %entry
+; RV32-NEXT:    slt a5, a6, a4
+; RV32-NEXT:  .LBB6_2: # %entry
+; RV32-NEXT:    beqz a1, .LBB6_4
+; RV32-NEXT:  # %bb.3: # %entry
+; RV32-NEXT:    slt a0, a6, a1
+; RV32-NEXT:  .LBB6_4: # %entry
+; RV32-NEXT:    xor a1, a0, a5
+; RV32-NEXT:    snez a1, a1
+; RV32-NEXT:    seqz a0, a0
+; RV32-NEXT:    and a0, a0, a1
+; RV32-NEXT:    sw a3, 0(a2)
+; RV32-NEXT:    sw a4, 4(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: saddo3.i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    addi a2, a0, -4
+; RV64-NEXT:    slt a0, a2, a0
+; RV64-NEXT:    xori a0, a0, 1
+; RV64-NEXT:    sd a2, 0(a1)
+; RV64-NEXT:    ret
+entry:
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 -4)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64* %res
+  ret i1 %obit
+}
+
+define zeroext i1 @uaddo.i32(i32 %v1, i32 %v2, i32* %res) {
+; RV32-LABEL: uaddo.i32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    add a1, a0, a1
+; RV32-NEXT:    sltu a0, a1, a0
+; RV32-NEXT:    sw a1, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: uaddo.i32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    slli a1, a1, 32
+; RV64-NEXT:    srli a1, a1, 32
+; RV64-NEXT:    slli a0, a0, 32
+; RV64-NEXT:    srli a0, a0, 32
+; RV64-NEXT:    add a1, a0, a1
+; RV64-NEXT:    slli a0, a1, 32
+; RV64-NEXT:    srli a0, a0, 32
+; RV64-NEXT:    xor a0, a0, a1
+; RV64-NEXT:    snez a0, a0
+; RV64-NEXT:    sw a1, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32* %res
+  ret i1 %obit
+}
+
+define zeroext i1 @uaddo.i64(i64 %v1, i64 %v2, i64* %res) {
+; RV32-LABEL: uaddo.i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    add a3, a1, a3
+; RV32-NEXT:    add a2, a0, a2
+; RV32-NEXT:    sltu a0, a2, a0
+; RV32-NEXT:    add a3, a3, a0
+; RV32-NEXT:    beq a3, a1, .LBB8_2
+; RV32-NEXT:  # %bb.1: # %entry
+; RV32-NEXT:    sltu a0, a3, a1
+; RV32-NEXT:  .LBB8_2: # %entry
+; RV32-NEXT:    sw a2, 0(a4)
+; RV32-NEXT:    sw a3, 4(a4)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: uaddo.i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    add a1, a0, a1
+; RV64-NEXT:    sltu a0, a1, a0
+; RV64-NEXT:    sd a1, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64* %res
+  ret i1 %obit
+}
+
+define zeroext i1 @ssubo1.i32(i32 %v1, i32 %v2, i32* %res) {
+; RV32-LABEL: ssubo1.i32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    sgtz a3, a1
+; RV32-NEXT:    sub a1, a0, a1
+; RV32-NEXT:    slt a0, a1, a0
+; RV32-NEXT:    xor a0, a3, a0
+; RV32-NEXT:    sw a1, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: ssubo1.i32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    sext.w a1, a1
+; RV64-NEXT:    sext.w a0, a0
+; RV64-NEXT:    sub a3, a0, a1
+; RV64-NEXT:    subw a0, a0, a1
+; RV64-NEXT:    xor a0, a0, a3
+; RV64-NEXT:    snez a0, a0
+; RV64-NEXT:    sw a3, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32* %res
+  ret i1 %obit
+}
+
+define zeroext i1 @ssubo2.i32(i32 %v1, i32* %res) {
+; RV32-LABEL: ssubo2.i32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi a2, a0, 4
+; RV32-NEXT:    slt a0, a2, a0
+; RV32-NEXT:    sw a2, 0(a1)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: ssubo2.i32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    sext.w a0, a0
+; RV64-NEXT:    addi a2, a0, 4
+; RV64-NEXT:    addiw a0, a0, 4
+; RV64-NEXT:    xor a0, a0, a2
+; RV64-NEXT:    snez a0, a0
+; RV64-NEXT:    sw a2, 0(a1)
+; RV64-NEXT:    ret
+entry:
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 -4)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32* %res
+  ret i1 %obit
+}
+
+define zeroext i1 @ssubo.i64(i64 %v1, i64 %v2, i64* %res) {
+; RV32-LABEL: ssubo.i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    sltu a6, a0, a2
+; RV32-NEXT:    sub a5, a1, a3
+; RV32-NEXT:    sub t0, a5, a6
+; RV32-NEXT:    addi a7, zero, -1
+; RV32-NEXT:    addi a6, zero, 1
+; RV32-NEXT:    addi t1, zero, 1
+; RV32-NEXT:    beqz t0, .LBB11_2
+; RV32-NEXT:  # %bb.1: # %entry
+; RV32-NEXT:    slt t1, a7, t0
+; RV32-NEXT:  .LBB11_2: # %entry
+; RV32-NEXT:    addi a5, zero, 1
+; RV32-NEXT:    beqz a1, .LBB11_4
+; RV32-NEXT:  # %bb.3: # %entry
+; RV32-NEXT:    slt a5, a7, a1
+; RV32-NEXT:  .LBB11_4: # %entry
+; RV32-NEXT:    xor a1, a5, t1
+; RV32-NEXT:    snez a1, a1
+; RV32-NEXT:    beqz a3, .LBB11_6
+; RV32-NEXT:  # %bb.5: # %entry
+; RV32-NEXT:    slt a6, a7, a3
+; RV32-NEXT:  .LBB11_6: # %entry
+; RV32-NEXT:    xor a3, a5, a6
+; RV32-NEXT:    snez a3, a3
+; RV32-NEXT:    and a1, a3, a1
+; RV32-NEXT:    sub a0, a0, a2
+; RV32-NEXT:    sw a0, 0(a4)
+; RV32-NEXT:    sw t0, 4(a4)
+; RV32-NEXT:    mv a0, a1
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: ssubo.i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    sgtz a3, a1
+; RV64-NEXT:    sub a1, a0, a1
+; RV64-NEXT:    slt a0, a1, a0
+; RV64-NEXT:    xor a0, a3, a0
+; RV64-NEXT:    sd a1, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64* %res
+  ret i1 %obit
+}
+
+define zeroext i1 @usubo.i32(i32 %v1, i32 %v2, i32* %res) {
+; RV32-LABEL: usubo.i32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    sub a1, a0, a1
+; RV32-NEXT:    sltu a0, a0, a1
+; RV32-NEXT:    sw a1, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: usubo.i32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    slli a1, a1, 32
+; RV64-NEXT:    srli a1, a1, 32
+; RV64-NEXT:    slli a0, a0, 32
+; RV64-NEXT:    srli a0, a0, 32
+; RV64-NEXT:    sub a1, a0, a1
+; RV64-NEXT:    slli a0, a1, 32
+; RV64-NEXT:    srli a0, a0, 32
+; RV64-NEXT:    xor a0, a0, a1
+; RV64-NEXT:    snez a0, a0
+; RV64-NEXT:    sw a1, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32* %res
+  ret i1 %obit
+}
+
+define zeroext i1 @usubo.i64(i64 %v1, i64 %v2, i64* %res) {
+; RV32-LABEL: usubo.i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    sltu a5, a0, a2
+; RV32-NEXT:    sub a3, a1, a3
+; RV32-NEXT:    sub a3, a3, a5
+; RV32-NEXT:    sub a2, a0, a2
+; RV32-NEXT:    beq a3, a1, .LBB13_2
+; RV32-NEXT:  # %bb.1: # %entry
+; RV32-NEXT:    sltu a0, a1, a3
+; RV32-NEXT:    j .LBB13_3
+; RV32-NEXT:  .LBB13_2:
+; RV32-NEXT:    sltu a0, a0, a2
+; RV32-NEXT:  .LBB13_3: # %entry
+; RV32-NEXT:    sw a2, 0(a4)
+; RV32-NEXT:    sw a3, 4(a4)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: usubo.i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    sub a1, a0, a1
+; RV64-NEXT:    sltu a0, a0, a1
+; RV64-NEXT:    sd a1, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64* %res
+  ret i1 %obit
+}
+
+define zeroext i1 @smulo.i32(i32 %v1, i32 %v2, i32* %res) {
+; RV32-LABEL: smulo.i32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    mulh a3, a0, a1
+; RV32-NEXT:    mul a1, a0, a1
+; RV32-NEXT:    srai a0, a1, 31
+; RV32-NEXT:    xor a0, a3, a0
+; RV32-NEXT:    snez a0, a0
+; RV32-NEXT:    sw a1, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: smulo.i32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    sext.w a1, a1
+; RV64-NEXT:    sext.w a0, a0
+; RV64-NEXT:    mul a3, a0, a1
+; RV64-NEXT:    mulw a4, a0, a1
+; RV64-NEXT:    xor a4, a4, a3
+; RV64-NEXT:    mulh a0, a0, a1
+; RV64-NEXT:    srai a1, a3, 63
+; RV64-NEXT:    xor a0, a0, a1
+; RV64-NEXT:    or a0, a4, a0
+; RV64-NEXT:    snez a0, a0
+; RV64-NEXT:    sw a3, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32* %res
+  ret i1 %obit
+}
+
+define zeroext i1 @smulo2.i32(i32 %v1, i32* %res) {
+; RV32-LABEL: smulo2.i32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi a2, zero, 13
+; RV32-NEXT:    mulh a3, a0, a2
+; RV32-NEXT:    mul a2, a0, a2
+; RV32-NEXT:    srai a0, a2, 31
+; RV32-NEXT:    xor a0, a3, a0
+; RV32-NEXT:    snez a0, a0
+; RV32-NEXT:    sw a2, 0(a1)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: smulo2.i32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    sext.w a0, a0
+; RV64-NEXT:    addi a2, zero, 13
+; RV64-NEXT:    mul a3, a0, a2
+; RV64-NEXT:    mulw a4, a0, a2
+; RV64-NEXT:    xor a4, a4, a3
+; RV64-NEXT:    mulh a0, a0, a2
+; RV64-NEXT:    srai a2, a3, 63
+; RV64-NEXT:    xor a0, a0, a2
+; RV64-NEXT:    or a0, a4, a0
+; RV64-NEXT:    snez a0, a0
+; RV64-NEXT:    sw a3, 0(a1)
+; RV64-NEXT:    ret
+entry:
+  %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 13)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32* %res
+  ret i1 %obit
+}
+
+define zeroext i1 @smulo.i64(i64 %v1, i64 %v2, i64* %res) {
+; RV32-LABEL: smulo.i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    .cfi_offset s0, -8
+; RV32-NEXT:    mv s0, a4
+; RV32-NEXT:    sw zero, 4(sp)
+; RV32-NEXT:    addi a4, sp, 4
+; RV32-NEXT:    call __mulodi4 at plt
+; RV32-NEXT:    lw a2, 4(sp)
+; RV32-NEXT:    snez a2, a2
+; RV32-NEXT:    sw a1, 4(s0)
+; RV32-NEXT:    sw a0, 0(s0)
+; RV32-NEXT:    mv a0, a2
+; RV32-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: smulo.i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    mulh a3, a0, a1
+; RV64-NEXT:    mul a1, a0, a1
+; RV64-NEXT:    srai a0, a1, 63
+; RV64-NEXT:    xor a0, a3, a0
+; RV64-NEXT:    snez a0, a0
+; RV64-NEXT:    sd a1, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64* %res
+  ret i1 %obit
+}
+
+define zeroext i1 @smulo2.i64(i64 %v1, i64* %res) {
+; RV32-LABEL: smulo2.i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    .cfi_offset s0, -8
+; RV32-NEXT:    mv s0, a2
+; RV32-NEXT:    sw zero, 4(sp)
+; RV32-NEXT:    addi a2, zero, 13
+; RV32-NEXT:    addi a4, sp, 4
+; RV32-NEXT:    mv a3, zero
+; RV32-NEXT:    call __mulodi4 at plt
+; RV32-NEXT:    lw a2, 4(sp)
+; RV32-NEXT:    snez a2, a2
+; RV32-NEXT:    sw a1, 4(s0)
+; RV32-NEXT:    sw a0, 0(s0)
+; RV32-NEXT:    mv a0, a2
+; RV32-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: smulo2.i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    addi a2, zero, 13
+; RV64-NEXT:    mulh a3, a0, a2
+; RV64-NEXT:    mul a2, a0, a2
+; RV64-NEXT:    srai a0, a2, 63
+; RV64-NEXT:    xor a0, a3, a0
+; RV64-NEXT:    snez a0, a0
+; RV64-NEXT:    sd a2, 0(a1)
+; RV64-NEXT:    ret
+entry:
+  %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 13)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64* %res
+  ret i1 %obit
+}
+
+define zeroext i1 @umulo.i32(i32 %v1, i32 %v2, i32* %res) {
+; RV32-LABEL: umulo.i32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    mulhu a3, a0, a1
+; RV32-NEXT:    snez a3, a3
+; RV32-NEXT:    mul a0, a0, a1
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    mv a0, a3
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: umulo.i32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    slli a1, a1, 32
+; RV64-NEXT:    srli a1, a1, 32
+; RV64-NEXT:    slli a0, a0, 32
+; RV64-NEXT:    srli a0, a0, 32
+; RV64-NEXT:    mulhu a3, a0, a1
+; RV64-NEXT:    mul a1, a0, a1
+; RV64-NEXT:    srli a0, a1, 32
+; RV64-NEXT:    or a0, a0, a3
+; RV64-NEXT:    snez a0, a0
+; RV64-NEXT:    sw a1, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32* %res
+  ret i1 %obit
+}
+
+define zeroext i1 @umulo2.i32(i32 %v1, i32* %res) {
+; RV32-LABEL: umulo2.i32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi a3, zero, 13
+; RV32-NEXT:    mulhu a2, a0, a3
+; RV32-NEXT:    snez a2, a2
+; RV32-NEXT:    mul a0, a0, a3
+; RV32-NEXT:    sw a0, 0(a1)
+; RV32-NEXT:    mv a0, a2
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: umulo2.i32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    slli a0, a0, 32
+; RV64-NEXT:    srli a0, a0, 32
+; RV64-NEXT:    addi a2, zero, 13
+; RV64-NEXT:    mulhu a3, a0, a2
+; RV64-NEXT:    mul a2, a0, a2
+; RV64-NEXT:    srli a0, a2, 32
+; RV64-NEXT:    or a0, a0, a3
+; RV64-NEXT:    snez a0, a0
+; RV64-NEXT:    sw a2, 0(a1)
+; RV64-NEXT:    ret
+entry:
+  %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 13)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  store i32 %val, i32* %res
+  ret i1 %obit
+}
+
+define zeroext i1 @umulo.i64(i64 %v1, i64 %v2, i64* %res) {
+; RV32-LABEL: umulo.i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    mul a6, a3, a0
+; RV32-NEXT:    mul a5, a1, a2
+; RV32-NEXT:    add a6, a5, a6
+; RV32-NEXT:    mulhu a5, a0, a2
+; RV32-NEXT:    add a6, a5, a6
+; RV32-NEXT:    sltu a7, a6, a5
+; RV32-NEXT:    snez t0, a3
+; RV32-NEXT:    snez a5, a1
+; RV32-NEXT:    and a5, a5, t0
+; RV32-NEXT:    mulhu a1, a1, a2
+; RV32-NEXT:    snez a1, a1
+; RV32-NEXT:    or a1, a5, a1
+; RV32-NEXT:    mulhu a3, a3, a0
+; RV32-NEXT:    snez a3, a3
+; RV32-NEXT:    or a1, a1, a3
+; RV32-NEXT:    or a1, a1, a7
+; RV32-NEXT:    mul a0, a0, a2
+; RV32-NEXT:    sw a0, 0(a4)
+; RV32-NEXT:    sw a6, 4(a4)
+; RV32-NEXT:    mv a0, a1
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: umulo.i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    mulhu a3, a0, a1
+; RV64-NEXT:    snez a3, a3
+; RV64-NEXT:    mul a0, a0, a1
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    mv a0, a3
+; RV64-NEXT:    ret
+entry:
+  %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64* %res
+  ret i1 %obit
+}
+
+define zeroext i1 @umulo2.i64(i64 %v1, i64* %res) {
+; RV32-LABEL: umulo2.i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi a3, zero, 13
+; RV32-NEXT:    mul a4, a1, a3
+; RV32-NEXT:    mulhu a5, a0, a3
+; RV32-NEXT:    add a4, a5, a4
+; RV32-NEXT:    sltu a5, a4, a5
+; RV32-NEXT:    mulhu a1, a1, a3
+; RV32-NEXT:    snez a1, a1
+; RV32-NEXT:    or a1, a1, a5
+; RV32-NEXT:    mul a0, a0, a3
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    sw a4, 4(a2)
+; RV32-NEXT:    mv a0, a1
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: umulo2.i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    addi a3, zero, 13
+; RV64-NEXT:    mulhu a2, a0, a3
+; RV64-NEXT:    snez a2, a2
+; RV64-NEXT:    mul a0, a0, a3
+; RV64-NEXT:    sd a0, 0(a1)
+; RV64-NEXT:    mv a0, a2
+; RV64-NEXT:    ret
+entry:
+  %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 13)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  store i64 %val, i64* %res
+  ret i1 %obit
+}
+
+
+;
+; Check the use of the overflow bit in combination with a select instruction.
+;
+define i32 @saddo.select.i32(i32 %v1, i32 %v2) {
+; RV32-LABEL: saddo.select.i32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    add a2, a0, a1
+; RV32-NEXT:    slt a2, a2, a0
+; RV32-NEXT:    slti a3, a1, 0
+; RV32-NEXT:    xor a2, a3, a2
+; RV32-NEXT:    bnez a2, .LBB22_2
+; RV32-NEXT:  # %bb.1: # %entry
+; RV32-NEXT:    mv a0, a1
+; RV32-NEXT:  .LBB22_2: # %entry
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: saddo.select.i32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    sext.w a2, a1
+; RV64-NEXT:    sext.w a3, a0
+; RV64-NEXT:    add a4, a3, a2
+; RV64-NEXT:    addw a2, a3, a2
+; RV64-NEXT:    bne a2, a4, .LBB22_2
+; RV64-NEXT:  # %bb.1: # %entry
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:  .LBB22_2: # %entry
+; RV64-NEXT:    ret
+entry:
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
+  %obit = extractvalue {i32, i1} %t, 1
+  %ret = select i1 %obit, i32 %v1, i32 %v2
+  ret i32 %ret
+}
+
+define i1 @saddo.not.i32(i32 %v1, i32 %v2) {
+; RV32-LABEL: saddo.not.i32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    add a2, a0, a1
+; RV32-NEXT:    slt a0, a2, a0
+; RV32-NEXT:    slti a1, a1, 0
+; RV32-NEXT:    xor a0, a1, a0
+; RV32-NEXT:    xori a0, a0, 1
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: saddo.not.i32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    sext.w a1, a1
+; RV64-NEXT:    sext.w a0, a0
+; RV64-NEXT:    add a2, a0, a1
+; RV64-NEXT:    addw a0, a0, a1
+; RV64-NEXT:    xor a0, a0, a2
+; RV64-NEXT:    seqz a0, a0
+; RV64-NEXT:    ret
+entry:
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
+  %obit = extractvalue {i32, i1} %t, 1
+  %ret = xor i1 %obit, true
+  ret i1 %ret
+}
+
+define i64 @saddo.select.i64(i64 %v1, i64 %v2) {
+; RV32-LABEL: saddo.select.i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    add a4, a1, a3
+; RV32-NEXT:    add a5, a0, a2
+; RV32-NEXT:    sltu a5, a5, a0
+; RV32-NEXT:    add a5, a4, a5
+; RV32-NEXT:    addi a6, zero, -1
+; RV32-NEXT:    addi a7, zero, 1
+; RV32-NEXT:    addi a4, zero, 1
+; RV32-NEXT:    beqz a5, .LBB24_2
+; RV32-NEXT:  # %bb.1: # %entry
+; RV32-NEXT:    slt a4, a6, a5
+; RV32-NEXT:  .LBB24_2: # %entry
+; RV32-NEXT:    addi a5, zero, 1
+; RV32-NEXT:    beqz a1, .LBB24_4
+; RV32-NEXT:  # %bb.3: # %entry
+; RV32-NEXT:    slt a5, a6, a1
+; RV32-NEXT:  .LBB24_4: # %entry
+; RV32-NEXT:    xor a4, a5, a4
+; RV32-NEXT:    snez a4, a4
+; RV32-NEXT:    beqz a3, .LBB24_6
+; RV32-NEXT:  # %bb.5: # %entry
+; RV32-NEXT:    slt a7, a6, a3
+; RV32-NEXT:  .LBB24_6: # %entry
+; RV32-NEXT:    xor a5, a5, a7
+; RV32-NEXT:    seqz a5, a5
+; RV32-NEXT:    and a4, a5, a4
+; RV32-NEXT:    bnez a4, .LBB24_8
+; RV32-NEXT:  # %bb.7: # %entry
+; RV32-NEXT:    mv a0, a2
+; RV32-NEXT:    mv a1, a3
+; RV32-NEXT:  .LBB24_8: # %entry
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: saddo.select.i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    add a2, a0, a1
+; RV64-NEXT:    slt a2, a2, a0
+; RV64-NEXT:    slti a3, a1, 0
+; RV64-NEXT:    xor a2, a3, a2
+; RV64-NEXT:    bnez a2, .LBB24_2
+; RV64-NEXT:  # %bb.1: # %entry
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:  .LBB24_2: # %entry
+; RV64-NEXT:    ret
+entry:
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
+  %obit = extractvalue {i64, i1} %t, 1
+  %ret = select i1 %obit, i64 %v1, i64 %v2
+  ret i64 %ret
+}
+
+define i1 @saddo.not.i64(i64 %v1, i64 %v2) {
+; RV32-LABEL: saddo.not.i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    add a4, a1, a3
+; RV32-NEXT:    add a2, a0, a2
+; RV32-NEXT:    sltu a0, a2, a0
+; RV32-NEXT:    add a4, a4, a0
+; RV32-NEXT:    addi a2, zero, -1
+; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    addi a5, zero, 1
+; RV32-NEXT:    beqz a4, .LBB25_2
+; RV32-NEXT:  # %bb.1: # %entry
+; RV32-NEXT:    slt a5, a2, a4
+; RV32-NEXT:  .LBB25_2: # %entry
+; RV32-NEXT:    addi a4, zero, 1
+; RV32-NEXT:    beqz a1, .LBB25_4
+; RV32-NEXT:  # %bb.3: # %entry
+; RV32-NEXT:    slt a4, a2, a1
+; RV32-NEXT:  .LBB25_4: # %entry
+; RV32-NEXT:    xor a1, a4, a5
+; RV32-NEXT:    snez a1, a1
+; RV32-NEXT:    beqz a3, .LBB25_6
+; RV32-NEXT:  # %bb.5: # %entry
+; RV32-NEXT:    slt a0, a2, a3
+; RV32-NEXT:  .LBB25_6: # %entry
+; RV32-NEXT:    xor a0, a4, a0
+; RV32-NEXT:    seqz a0, a0
+; RV32-NEXT:    and a0, a0, a1
+; RV32-NEXT:    xori a0, a0, 1
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: saddo.not.i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    add a2, a0, a1
+; RV64-NEXT:    slt a0, a2, a0
+; RV64-NEXT:    slti a1, a1, 0
+; RV64-NEXT:    xor a0, a1, a0
+; RV64-NEXT:    xori a0, a0, 1
+; RV64-NEXT:    ret
+entry:
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
+  %obit = extractvalue {i64, i1} %t, 1
+  %ret = xor i1 %obit, true
+  ret i1 %ret
+}
+
+define i32 @uaddo.select.i32(i32 %v1, i32 %v2) {
+; RV32-LABEL: uaddo.select.i32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    add a2, a0, a1
+; RV32-NEXT:    sltu a2, a2, a0
+; RV32-NEXT:    bnez a2, .LBB26_2
+; RV32-NEXT:  # %bb.1: # %entry
+; RV32-NEXT:    mv a0, a1
+; RV32-NEXT:  .LBB26_2: # %entry
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: uaddo.select.i32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    slli a2, a1, 32
+; RV64-NEXT:    srli a2, a2, 32
+; RV64-NEXT:    slli a3, a0, 32
+; RV64-NEXT:    srli a3, a3, 32
+; RV64-NEXT:    add a2, a3, a2
+; RV64-NEXT:    slli a3, a2, 32
+; RV64-NEXT:    srli a3, a3, 32
+; RV64-NEXT:    bne a3, a2, .LBB26_2
+; RV64-NEXT:  # %bb.1: # %entry
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:  .LBB26_2: # %entry
+; RV64-NEXT:    ret
+entry:
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
+  %obit = extractvalue {i32, i1} %t, 1
+  %ret = select i1 %obit, i32 %v1, i32 %v2
+  ret i32 %ret
+}
+
+define i1 @uaddo.not.i32(i32 %v1, i32 %v2) {
+; RV32-LABEL: uaddo.not.i32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    add a1, a0, a1
+; RV32-NEXT:    sltu a0, a1, a0
+; RV32-NEXT:    xori a0, a0, 1
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: uaddo.not.i32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    slli a1, a1, 32
+; RV64-NEXT:    srli a1, a1, 32
+; RV64-NEXT:    slli a0, a0, 32
+; RV64-NEXT:    srli a0, a0, 32
+; RV64-NEXT:    add a0, a0, a1
+; RV64-NEXT:    slli a1, a0, 32
+; RV64-NEXT:    srli a1, a1, 32
+; RV64-NEXT:    xor a0, a1, a0
+; RV64-NEXT:    seqz a0, a0
+; RV64-NEXT:    ret
+entry:
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
+  %obit = extractvalue {i32, i1} %t, 1
+  %ret = xor i1 %obit, true
+  ret i1 %ret
+}
+
+define i64 @uaddo.select.i64(i64 %v1, i64 %v2) {
+; RV32-LABEL: uaddo.select.i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    add a5, a1, a3
+; RV32-NEXT:    add a4, a0, a2
+; RV32-NEXT:    sltu a4, a4, a0
+; RV32-NEXT:    add a5, a5, a4
+; RV32-NEXT:    bne a5, a1, .LBB28_3
+; RV32-NEXT:  # %bb.1: # %entry
+; RV32-NEXT:    beqz a4, .LBB28_4
+; RV32-NEXT:  .LBB28_2: # %entry
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB28_3: # %entry
+; RV32-NEXT:    sltu a4, a5, a1
+; RV32-NEXT:    bnez a4, .LBB28_2
+; RV32-NEXT:  .LBB28_4: # %entry
+; RV32-NEXT:    mv a0, a2
+; RV32-NEXT:    mv a1, a3
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: uaddo.select.i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    add a2, a0, a1
+; RV64-NEXT:    sltu a2, a2, a0
+; RV64-NEXT:    bnez a2, .LBB28_2
+; RV64-NEXT:  # %bb.1: # %entry
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:  .LBB28_2: # %entry
+; RV64-NEXT:    ret
+entry:
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
+  %obit = extractvalue {i64, i1} %t, 1
+  %ret = select i1 %obit, i64 %v1, i64 %v2
+  ret i64 %ret
+}
+
+define i1 @uaddo.not.i64(i64 %v1, i64 %v2) {
+; RV32-LABEL: uaddo.not.i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    add a3, a1, a3
+; RV32-NEXT:    add a2, a0, a2
+; RV32-NEXT:    sltu a0, a2, a0
+; RV32-NEXT:    add a2, a3, a0
+; RV32-NEXT:    beq a2, a1, .LBB29_2
+; RV32-NEXT:  # %bb.1: # %entry
+; RV32-NEXT:    sltu a0, a2, a1
+; RV32-NEXT:  .LBB29_2: # %entry
+; RV32-NEXT:    xori a0, a0, 1
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: uaddo.not.i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    add a1, a0, a1
+; RV64-NEXT:    sltu a0, a1, a0
+; RV64-NEXT:    xori a0, a0, 1
+; RV64-NEXT:    ret
+entry:
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
+  %obit = extractvalue {i64, i1} %t, 1
+  %ret = xor i1 %obit, true
+  ret i1 %ret
+}
+
+define i32 @ssubo.select.i32(i32 %v1, i32 %v2) {
+; RV32-LABEL: ssubo.select.i32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    sgtz a2, a1
+; RV32-NEXT:    sub a3, a0, a1
+; RV32-NEXT:    slt a3, a3, a0
+; RV32-NEXT:    xor a2, a2, a3
+; RV32-NEXT:    bnez a2, .LBB30_2
+; RV32-NEXT:  # %bb.1: # %entry
+; RV32-NEXT:    mv a0, a1
+; RV32-NEXT:  .LBB30_2: # %entry
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: ssubo.select.i32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    sext.w a2, a1
+; RV64-NEXT:    sext.w a3, a0
+; RV64-NEXT:    sub a4, a3, a2
+; RV64-NEXT:    subw a2, a3, a2
+; RV64-NEXT:    bne a2, a4, .LBB30_2
+; RV64-NEXT:  # %bb.1: # %entry
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:  .LBB30_2: # %entry
+; RV64-NEXT:    ret
+entry:
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
+  %obit = extractvalue {i32, i1} %t, 1
+  %ret = select i1 %obit, i32 %v1, i32 %v2
+  ret i32 %ret
+}
+
+define i1 @ssubo.not.i32(i32 %v1, i32 %v2) {
+; RV32-LABEL: ssubo.not.i32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    sgtz a2, a1
+; RV32-NEXT:    sub a1, a0, a1
+; RV32-NEXT:    slt a0, a1, a0
+; RV32-NEXT:    xor a0, a2, a0
+; RV32-NEXT:    xori a0, a0, 1
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: ssubo.not.i32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    sext.w a1, a1
+; RV64-NEXT:    sext.w a0, a0
+; RV64-NEXT:    sub a2, a0, a1
+; RV64-NEXT:    subw a0, a0, a1
+; RV64-NEXT:    xor a0, a0, a2
+; RV64-NEXT:    seqz a0, a0
+; RV64-NEXT:    ret
+entry:
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
+  %obit = extractvalue {i32, i1} %t, 1
+  %ret = xor i1 %obit, true
+  ret i1 %ret
+}
+
+define i64 @ssubo.select.i64(i64 %v1, i64 %v2) {
+; RV32-LABEL: ssubo.select.i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    sltu a4, a0, a2
+; RV32-NEXT:    sub a5, a1, a3
+; RV32-NEXT:    sub a5, a5, a4
+; RV32-NEXT:    addi a6, zero, -1
+; RV32-NEXT:    addi a7, zero, 1
+; RV32-NEXT:    addi a4, zero, 1
+; RV32-NEXT:    beqz a5, .LBB32_2
+; RV32-NEXT:  # %bb.1: # %entry
+; RV32-NEXT:    slt a4, a6, a5
+; RV32-NEXT:  .LBB32_2: # %entry
+; RV32-NEXT:    addi a5, zero, 1
+; RV32-NEXT:    beqz a1, .LBB32_4
+; RV32-NEXT:  # %bb.3: # %entry
+; RV32-NEXT:    slt a5, a6, a1
+; RV32-NEXT:  .LBB32_4: # %entry
+; RV32-NEXT:    xor a4, a5, a4
+; RV32-NEXT:    snez a4, a4
+; RV32-NEXT:    beqz a3, .LBB32_6
+; RV32-NEXT:  # %bb.5: # %entry
+; RV32-NEXT:    slt a7, a6, a3
+; RV32-NEXT:  .LBB32_6: # %entry
+; RV32-NEXT:    xor a5, a5, a7
+; RV32-NEXT:    snez a5, a5
+; RV32-NEXT:    and a4, a5, a4
+; RV32-NEXT:    bnez a4, .LBB32_8
+; RV32-NEXT:  # %bb.7: # %entry
+; RV32-NEXT:    mv a0, a2
+; RV32-NEXT:    mv a1, a3
+; RV32-NEXT:  .LBB32_8: # %entry
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: ssubo.select.i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    sgtz a2, a1
+; RV64-NEXT:    sub a3, a0, a1
+; RV64-NEXT:    slt a3, a3, a0
+; RV64-NEXT:    xor a2, a2, a3
+; RV64-NEXT:    bnez a2, .LBB32_2
+; RV64-NEXT:  # %bb.1: # %entry
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:  .LBB32_2: # %entry
+; RV64-NEXT:    ret
+entry:
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
+  %obit = extractvalue {i64, i1} %t, 1
+  %ret = select i1 %obit, i64 %v1, i64 %v2
+  ret i64 %ret
+}
+
+define i1 @ssub.not.i64(i64 %v1, i64 %v2) {
+; RV32-LABEL: ssub.not.i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    sltu a0, a0, a2
+; RV32-NEXT:    sub a2, a1, a3
+; RV32-NEXT:    sub a4, a2, a0
+; RV32-NEXT:    addi a2, zero, -1
+; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    addi a5, zero, 1
+; RV32-NEXT:    beqz a4, .LBB33_2
+; RV32-NEXT:  # %bb.1: # %entry
+; RV32-NEXT:    slt a5, a2, a4
+; RV32-NEXT:  .LBB33_2: # %entry
+; RV32-NEXT:    addi a4, zero, 1
+; RV32-NEXT:    beqz a1, .LBB33_4
+; RV32-NEXT:  # %bb.3: # %entry
+; RV32-NEXT:    slt a4, a2, a1
+; RV32-NEXT:  .LBB33_4: # %entry
+; RV32-NEXT:    xor a1, a4, a5
+; RV32-NEXT:    snez a1, a1
+; RV32-NEXT:    beqz a3, .LBB33_6
+; RV32-NEXT:  # %bb.5: # %entry
+; RV32-NEXT:    slt a0, a2, a3
+; RV32-NEXT:  .LBB33_6: # %entry
+; RV32-NEXT:    xor a0, a4, a0
+; RV32-NEXT:    snez a0, a0
+; RV32-NEXT:    and a0, a0, a1
+; RV32-NEXT:    xori a0, a0, 1
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: ssub.not.i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    sgtz a2, a1
+; RV64-NEXT:    sub a1, a0, a1
+; RV64-NEXT:    slt a0, a1, a0
+; RV64-NEXT:    xor a0, a2, a0
+; RV64-NEXT:    xori a0, a0, 1
+; RV64-NEXT:    ret
+entry:
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
+  %obit = extractvalue {i64, i1} %t, 1
+  %ret = xor i1 %obit, true
+  ret i1 %ret
+}
+
+define i32 @usubo.select.i32(i32 %v1, i32 %v2) {
+; RV32-LABEL: usubo.select.i32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    sub a2, a0, a1
+; RV32-NEXT:    sltu a2, a0, a2
+; RV32-NEXT:    bnez a2, .LBB34_2
+; RV32-NEXT:  # %bb.1: # %entry
+; RV32-NEXT:    mv a0, a1
+; RV32-NEXT:  .LBB34_2: # %entry
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: usubo.select.i32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    slli a2, a1, 32
+; RV64-NEXT:    srli a2, a2, 32
+; RV64-NEXT:    slli a3, a0, 32
+; RV64-NEXT:    srli a3, a3, 32
+; RV64-NEXT:    sub a2, a3, a2
+; RV64-NEXT:    slli a3, a2, 32
+; RV64-NEXT:    srli a3, a3, 32
+; RV64-NEXT:    bne a3, a2, .LBB34_2
+; RV64-NEXT:  # %bb.1: # %entry
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:  .LBB34_2: # %entry
+; RV64-NEXT:    ret
+entry:
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
+  %obit = extractvalue {i32, i1} %t, 1
+  %ret = select i1 %obit, i32 %v1, i32 %v2
+  ret i32 %ret
+}
+
+define i1 @usubo.not.i32(i32 %v1, i32 %v2) {
+; RV32-LABEL: usubo.not.i32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    sub a1, a0, a1
+; RV32-NEXT:    sltu a0, a0, a1
+; RV32-NEXT:    xori a0, a0, 1
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: usubo.not.i32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    slli a1, a1, 32
+; RV64-NEXT:    srli a1, a1, 32
+; RV64-NEXT:    slli a0, a0, 32
+; RV64-NEXT:    srli a0, a0, 32
+; RV64-NEXT:    sub a0, a0, a1
+; RV64-NEXT:    slli a1, a0, 32
+; RV64-NEXT:    srli a1, a1, 32
+; RV64-NEXT:    xor a0, a1, a0
+; RV64-NEXT:    seqz a0, a0
+; RV64-NEXT:    ret
+entry:
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
+  %obit = extractvalue {i32, i1} %t, 1
+  %ret = xor i1 %obit, true
+  ret i1 %ret
+}
+
+define i64 @usubo.select.i64(i64 %v1, i64 %v2) {
+; RV32-LABEL: usubo.select.i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    sltu a4, a0, a2
+; RV32-NEXT:    sub a5, a1, a3
+; RV32-NEXT:    sub a4, a5, a4
+; RV32-NEXT:    beq a4, a1, .LBB36_2
+; RV32-NEXT:  # %bb.1: # %entry
+; RV32-NEXT:    sltu a4, a1, a4
+; RV32-NEXT:    beqz a4, .LBB36_3
+; RV32-NEXT:    j .LBB36_4
+; RV32-NEXT:  .LBB36_2:
+; RV32-NEXT:    sub a4, a0, a2
+; RV32-NEXT:    sltu a4, a0, a4
+; RV32-NEXT:    bnez a4, .LBB36_4
+; RV32-NEXT:  .LBB36_3: # %entry
+; RV32-NEXT:    mv a0, a2
+; RV32-NEXT:    mv a1, a3
+; RV32-NEXT:  .LBB36_4: # %entry
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: usubo.select.i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    sub a2, a0, a1
+; RV64-NEXT:    sltu a2, a0, a2
+; RV64-NEXT:    bnez a2, .LBB36_2
+; RV64-NEXT:  # %bb.1: # %entry
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:  .LBB36_2: # %entry
+; RV64-NEXT:    ret
+entry:
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
+  %obit = extractvalue {i64, i1} %t, 1
+  %ret = select i1 %obit, i64 %v1, i64 %v2
+  ret i64 %ret
+}
+
+define i1 @usubo.not.i64(i64 %v1, i64 %v2) {
+; RV32-LABEL: usubo.not.i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    sltu a4, a0, a2
+; RV32-NEXT:    sub a3, a1, a3
+; RV32-NEXT:    sub a3, a3, a4
+; RV32-NEXT:    beq a3, a1, .LBB37_2
+; RV32-NEXT:  # %bb.1: # %entry
+; RV32-NEXT:    sltu a0, a1, a3
+; RV32-NEXT:    xori a0, a0, 1
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB37_2:
+; RV32-NEXT:    sub a1, a0, a2
+; RV32-NEXT:    sltu a0, a0, a1
+; RV32-NEXT:    xori a0, a0, 1
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: usubo.not.i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    sub a1, a0, a1
+; RV64-NEXT:    sltu a0, a0, a1
+; RV64-NEXT:    xori a0, a0, 1
+; RV64-NEXT:    ret
+entry:
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
+  %obit = extractvalue {i64, i1} %t, 1
+  %ret = xor i1 %obit, true
+  ret i1 %ret
+}
+
+define i32 @smulo.select.i32(i32 %v1, i32 %v2) {
+; RV32-LABEL: smulo.select.i32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    mulh a2, a0, a1
+; RV32-NEXT:    mul a3, a0, a1
+; RV32-NEXT:    srai a3, a3, 31
+; RV32-NEXT:    xor a2, a2, a3
+; RV32-NEXT:    snez a2, a2
+; RV32-NEXT:    bnez a2, .LBB38_2
+; RV32-NEXT:  # %bb.1: # %entry
+; RV32-NEXT:    mv a0, a1
+; RV32-NEXT:  .LBB38_2: # %entry
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: smulo.select.i32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    sext.w a2, a1
+; RV64-NEXT:    sext.w a3, a0
+; RV64-NEXT:    mul a4, a3, a2
+; RV64-NEXT:    mulw a5, a3, a2
+; RV64-NEXT:    xor a5, a5, a4
+; RV64-NEXT:    mulh a2, a3, a2
+; RV64-NEXT:    srai a3, a4, 63
+; RV64-NEXT:    xor a2, a2, a3
+; RV64-NEXT:    or a2, a5, a2
+; RV64-NEXT:    snez a2, a2
+; RV64-NEXT:    bnez a2, .LBB38_2
+; RV64-NEXT:  # %bb.1: # %entry
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:  .LBB38_2: # %entry
+; RV64-NEXT:    ret
+entry:
+  %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
+  %obit = extractvalue {i32, i1} %t, 1
+  %ret = select i1 %obit, i32 %v1, i32 %v2
+  ret i32 %ret
+}
+
+define i1 @smulo.not.i32(i32 %v1, i32 %v2) {
+; RV32-LABEL: smulo.not.i32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    mulh a2, a0, a1
+; RV32-NEXT:    mul a0, a0, a1
+; RV32-NEXT:    srai a0, a0, 31
+; RV32-NEXT:    xor a0, a2, a0
+; RV32-NEXT:    seqz a0, a0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: smulo.not.i32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    sext.w a1, a1
+; RV64-NEXT:    sext.w a0, a0
+; RV64-NEXT:    mul a2, a0, a1
+; RV64-NEXT:    mulw a3, a0, a1
+; RV64-NEXT:    xor a3, a3, a2
+; RV64-NEXT:    mulh a0, a0, a1
+; RV64-NEXT:    srai a1, a2, 63
+; RV64-NEXT:    xor a0, a0, a1
+; RV64-NEXT:    or a0, a3, a0
+; RV64-NEXT:    seqz a0, a0
+; RV64-NEXT:    ret
+entry:
+  %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
+  %obit = extractvalue {i32, i1} %t, 1
+  %ret = xor i1 %obit, true
+  ret i1 %ret
+}
+
+define i64 @smulo.select.i64(i64 %v1, i64 %v2) {
+; RV32-LABEL: smulo.select.i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -32
+; RV32-NEXT:    .cfi_def_cfa_offset 32
+; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    .cfi_offset s0, -8
+; RV32-NEXT:    .cfi_offset s1, -12
+; RV32-NEXT:    .cfi_offset s2, -16
+; RV32-NEXT:    .cfi_offset s3, -20
+; RV32-NEXT:    mv s2, a3
+; RV32-NEXT:    mv s3, a2
+; RV32-NEXT:    mv s0, a1
+; RV32-NEXT:    mv s1, a0
+; RV32-NEXT:    sw zero, 8(sp)
+; RV32-NEXT:    addi a4, sp, 8
+; RV32-NEXT:    call __mulodi4 at plt
+; RV32-NEXT:    lw a0, 8(sp)
+; RV32-NEXT:    bnez a0, .LBB40_2
+; RV32-NEXT:  # %bb.1: # %entry
+; RV32-NEXT:    mv s1, s3
+; RV32-NEXT:    mv s0, s2
+; RV32-NEXT:  .LBB40_2: # %entry
+; RV32-NEXT:    mv a0, s1
+; RV32-NEXT:    mv a1, s0
+; RV32-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: smulo.select.i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    mulh a2, a0, a1
+; RV64-NEXT:    mul a3, a0, a1
+; RV64-NEXT:    srai a3, a3, 63
+; RV64-NEXT:    xor a2, a2, a3
+; RV64-NEXT:    snez a2, a2
+; RV64-NEXT:    bnez a2, .LBB40_2
+; RV64-NEXT:  # %bb.1: # %entry
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:  .LBB40_2: # %entry
+; RV64-NEXT:    ret
+entry:
+  %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
+  %obit = extractvalue {i64, i1} %t, 1
+  %ret = select i1 %obit, i64 %v1, i64 %v2
+  ret i64 %ret
+}
+
+define i1 @smulo.not.i64(i64 %v1, i64 %v2) {
+; RV32-LABEL: smulo.not.i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    sw zero, 8(sp)
+; RV32-NEXT:    addi a4, sp, 8
+; RV32-NEXT:    call __mulodi4 at plt
+; RV32-NEXT:    lw a0, 8(sp)
+; RV32-NEXT:    seqz a0, a0
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: smulo.not.i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    mulh a2, a0, a1
+; RV64-NEXT:    mul a0, a0, a1
+; RV64-NEXT:    srai a0, a0, 63
+; RV64-NEXT:    xor a0, a2, a0
+; RV64-NEXT:    seqz a0, a0
+; RV64-NEXT:    ret
+entry:
+  %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
+  %obit = extractvalue {i64, i1} %t, 1
+  %ret = xor i1 %obit, true
+  ret i1 %ret
+}
+
+define i32 @umulo.select.i32(i32 %v1, i32 %v2) {
+; RV32-LABEL: umulo.select.i32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    mulhu a2, a0, a1
+; RV32-NEXT:    snez a2, a2
+; RV32-NEXT:    bnez a2, .LBB42_2
+; RV32-NEXT:  # %bb.1: # %entry
+; RV32-NEXT:    mv a0, a1
+; RV32-NEXT:  .LBB42_2: # %entry
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: umulo.select.i32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    slli a2, a1, 32
+; RV64-NEXT:    srli a2, a2, 32
+; RV64-NEXT:    slli a3, a0, 32
+; RV64-NEXT:    srli a3, a3, 32
+; RV64-NEXT:    mulhu a4, a3, a2
+; RV64-NEXT:    mul a2, a3, a2
+; RV64-NEXT:    srli a2, a2, 32
+; RV64-NEXT:    or a2, a2, a4
+; RV64-NEXT:    snez a2, a2
+; RV64-NEXT:    bnez a2, .LBB42_2
+; RV64-NEXT:  # %bb.1: # %entry
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:  .LBB42_2: # %entry
+; RV64-NEXT:    ret
+entry:
+  %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
+  %obit = extractvalue {i32, i1} %t, 1
+  %ret = select i1 %obit, i32 %v1, i32 %v2
+  ret i32 %ret
+}
+
+define i1 @umulo.not.i32(i32 %v1, i32 %v2) {
+; RV32-LABEL: umulo.not.i32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    mulhu a0, a0, a1
+; RV32-NEXT:    seqz a0, a0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: umulo.not.i32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    slli a1, a1, 32
+; RV64-NEXT:    srli a1, a1, 32
+; RV64-NEXT:    slli a0, a0, 32
+; RV64-NEXT:    srli a0, a0, 32
+; RV64-NEXT:    mulhu a2, a0, a1
+; RV64-NEXT:    mul a0, a0, a1
+; RV64-NEXT:    srli a0, a0, 32
+; RV64-NEXT:    or a0, a0, a2
+; RV64-NEXT:    seqz a0, a0
+; RV64-NEXT:    ret
+entry:
+  %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
+  %obit = extractvalue {i32, i1} %t, 1
+  %ret = xor i1 %obit, true
+  ret i1 %ret
+}
+
+define i64 @umulo.select.i64(i64 %v1, i64 %v2) {
+; RV32-LABEL: umulo.select.i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    mul a4, a3, a0
+; RV32-NEXT:    mul a5, a1, a2
+; RV32-NEXT:    add a4, a5, a4
+; RV32-NEXT:    mulhu a5, a0, a2
+; RV32-NEXT:    add a4, a5, a4
+; RV32-NEXT:    sltu a6, a4, a5
+; RV32-NEXT:    snez a5, a3
+; RV32-NEXT:    snez a4, a1
+; RV32-NEXT:    and a4, a4, a5
+; RV32-NEXT:    mulhu a5, a1, a2
+; RV32-NEXT:    snez a5, a5
+; RV32-NEXT:    or a4, a4, a5
+; RV32-NEXT:    mulhu a5, a3, a0
+; RV32-NEXT:    snez a5, a5
+; RV32-NEXT:    or a4, a4, a5
+; RV32-NEXT:    or a4, a4, a6
+; RV32-NEXT:    bnez a4, .LBB44_2
+; RV32-NEXT:  # %bb.1: # %entry
+; RV32-NEXT:    mv a0, a2
+; RV32-NEXT:    mv a1, a3
+; RV32-NEXT:  .LBB44_2: # %entry
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: umulo.select.i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    mulhu a2, a0, a1
+; RV64-NEXT:    snez a2, a2
+; RV64-NEXT:    bnez a2, .LBB44_2
+; RV64-NEXT:  # %bb.1: # %entry
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:  .LBB44_2: # %entry
+; RV64-NEXT:    ret
+entry:
+  %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
+  %obit = extractvalue {i64, i1} %t, 1
+  %ret = select i1 %obit, i64 %v1, i64 %v2
+  ret i64 %ret
+}
+
+define i1 @umulo.not.i64(i64 %v1, i64 %v2) {
+; RV32-LABEL: umulo.not.i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    mul a4, a3, a0
+; RV32-NEXT:    mul a5, a1, a2
+; RV32-NEXT:    add a4, a5, a4
+; RV32-NEXT:    mulhu a5, a0, a2
+; RV32-NEXT:    add a4, a5, a4
+; RV32-NEXT:    sltu a6, a4, a5
+; RV32-NEXT:    snez a5, a3
+; RV32-NEXT:    snez a4, a1
+; RV32-NEXT:    and a4, a4, a5
+; RV32-NEXT:    mulhu a1, a1, a2
+; RV32-NEXT:    snez a1, a1
+; RV32-NEXT:    or a1, a4, a1
+; RV32-NEXT:    mulhu a0, a3, a0
+; RV32-NEXT:    snez a0, a0
+; RV32-NEXT:    or a0, a1, a0
+; RV32-NEXT:    or a0, a0, a6
+; RV32-NEXT:    xori a0, a0, 1
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: umulo.not.i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    mulhu a0, a0, a1
+; RV64-NEXT:    seqz a0, a0
+; RV64-NEXT:    ret
+entry:
+  %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
+  %obit = extractvalue {i64, i1} %t, 1
+  %ret = xor i1 %obit, true
+  ret i1 %ret
+}
+
+
+;
+; Check the use of the overflow bit in combination with a branch instruction.
+;
+define zeroext i1 @saddo.br.i32(i32 %v1, i32 %v2) {
+; RV32-LABEL: saddo.br.i32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    add a2, a0, a1
+; RV32-NEXT:    slt a0, a2, a0
+; RV32-NEXT:    slti a1, a1, 0
+; RV32-NEXT:    xor a0, a1, a0
+; RV32-NEXT:    beqz a0, .LBB46_2
+; RV32-NEXT:  # %bb.1: # %overflow
+; RV32-NEXT:    mv a0, zero
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB46_2: # %continue
+; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: saddo.br.i32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    sext.w a1, a1
+; RV64-NEXT:    sext.w a0, a0
+; RV64-NEXT:    add a2, a0, a1
+; RV64-NEXT:    addw a0, a0, a1
+; RV64-NEXT:    beq a0, a2, .LBB46_2
+; RV64-NEXT:  # %bb.1: # %overflow
+; RV64-NEXT:    mv a0, zero
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB46_2: # %continue
+; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    ret
+entry:
+  %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  br i1 %obit, label %overflow, label %continue
+
+overflow:
+  ret i1 false
+
+continue:
+  ret i1 true
+}
+
+define zeroext i1 @saddo.br.i64(i64 %v1, i64 %v2) {
+; RV32-LABEL: saddo.br.i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    add a4, a1, a3
+; RV32-NEXT:    add a2, a0, a2
+; RV32-NEXT:    sltu a0, a2, a0
+; RV32-NEXT:    add a4, a4, a0
+; RV32-NEXT:    addi a2, zero, -1
+; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    addi a5, zero, 1
+; RV32-NEXT:    beqz a4, .LBB47_2
+; RV32-NEXT:  # %bb.1: # %entry
+; RV32-NEXT:    slt a5, a2, a4
+; RV32-NEXT:  .LBB47_2: # %entry
+; RV32-NEXT:    addi a4, zero, 1
+; RV32-NEXT:    beqz a1, .LBB47_4
+; RV32-NEXT:  # %bb.3: # %entry
+; RV32-NEXT:    slt a4, a2, a1
+; RV32-NEXT:  .LBB47_4: # %entry
+; RV32-NEXT:    xor a1, a4, a5
+; RV32-NEXT:    snez a1, a1
+; RV32-NEXT:    beqz a3, .LBB47_6
+; RV32-NEXT:  # %bb.5: # %entry
+; RV32-NEXT:    slt a0, a2, a3
+; RV32-NEXT:  .LBB47_6: # %entry
+; RV32-NEXT:    xor a0, a4, a0
+; RV32-NEXT:    seqz a0, a0
+; RV32-NEXT:    and a0, a0, a1
+; RV32-NEXT:    beqz a0, .LBB47_8
+; RV32-NEXT:  # %bb.7: # %overflow
+; RV32-NEXT:    mv a0, zero
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB47_8: # %continue
+; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: saddo.br.i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    add a2, a0, a1
+; RV64-NEXT:    slt a0, a2, a0
+; RV64-NEXT:    slti a1, a1, 0
+; RV64-NEXT:    xor a0, a1, a0
+; RV64-NEXT:    beqz a0, .LBB47_2
+; RV64-NEXT:  # %bb.1: # %overflow
+; RV64-NEXT:    mv a0, zero
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB47_2: # %continue
+; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    ret
+entry:
+  %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  br i1 %obit, label %overflow, label %continue
+
+overflow:
+  ret i1 false
+
+continue:
+  ret i1 true
+}
+
+define zeroext i1 @uaddo.br.i32(i32 %v1, i32 %v2) {
+; RV32-LABEL: uaddo.br.i32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    add a1, a0, a1
+; RV32-NEXT:    bgeu a1, a0, .LBB48_2
+; RV32-NEXT:  # %bb.1: # %overflow
+; RV32-NEXT:    mv a0, zero
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB48_2: # %continue
+; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: uaddo.br.i32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    slli a1, a1, 32
+; RV64-NEXT:    srli a1, a1, 32
+; RV64-NEXT:    slli a0, a0, 32
+; RV64-NEXT:    srli a0, a0, 32
+; RV64-NEXT:    add a0, a0, a1
+; RV64-NEXT:    slli a1, a0, 32
+; RV64-NEXT:    srli a1, a1, 32
+; RV64-NEXT:    beq a1, a0, .LBB48_2
+; RV64-NEXT:  # %bb.1: # %overflow
+; RV64-NEXT:    mv a0, zero
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB48_2: # %continue
+; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    ret
+entry:
+  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  br i1 %obit, label %overflow, label %continue
+
+overflow:
+  ret i1 false
+
+continue:
+  ret i1 true
+}
+
+define zeroext i1 @uaddo.br.i64(i64 %v1, i64 %v2) {
+; RV32-LABEL: uaddo.br.i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    add a3, a1, a3
+; RV32-NEXT:    add a2, a0, a2
+; RV32-NEXT:    sltu a0, a2, a0
+; RV32-NEXT:    add a2, a3, a0
+; RV32-NEXT:    beq a2, a1, .LBB49_2
+; RV32-NEXT:  # %bb.1: # %entry
+; RV32-NEXT:    sltu a0, a2, a1
+; RV32-NEXT:  .LBB49_2: # %entry
+; RV32-NEXT:    beqz a0, .LBB49_4
+; RV32-NEXT:  # %bb.3: # %overflow
+; RV32-NEXT:    mv a0, zero
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB49_4: # %continue
+; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: uaddo.br.i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    add a1, a0, a1
+; RV64-NEXT:    bgeu a1, a0, .LBB49_2
+; RV64-NEXT:  # %bb.1: # %overflow
+; RV64-NEXT:    mv a0, zero
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB49_2: # %continue
+; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    ret
+entry:
+  %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  br i1 %obit, label %overflow, label %continue
+
+overflow:
+  ret i1 false
+
+continue:
+  ret i1 true
+}
+
+define zeroext i1 @ssubo.br.i32(i32 %v1, i32 %v2) {
+; RV32-LABEL: ssubo.br.i32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    sgtz a2, a1
+; RV32-NEXT:    sub a1, a0, a1
+; RV32-NEXT:    slt a0, a1, a0
+; RV32-NEXT:    xor a0, a2, a0
+; RV32-NEXT:    beqz a0, .LBB50_2
+; RV32-NEXT:  # %bb.1: # %overflow
+; RV32-NEXT:    mv a0, zero
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB50_2: # %continue
+; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: ssubo.br.i32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    sext.w a1, a1
+; RV64-NEXT:    sext.w a0, a0
+; RV64-NEXT:    sub a2, a0, a1
+; RV64-NEXT:    subw a0, a0, a1
+; RV64-NEXT:    beq a0, a2, .LBB50_2
+; RV64-NEXT:  # %bb.1: # %overflow
+; RV64-NEXT:    mv a0, zero
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB50_2: # %continue
+; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    ret
+entry:
+  %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  br i1 %obit, label %overflow, label %continue
+
+overflow:
+  ret i1 false
+
+continue:
+  ret i1 true
+}
+
+define zeroext i1 @ssubo.br.i64(i64 %v1, i64 %v2) {
+; RV32-LABEL: ssubo.br.i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    sltu a0, a0, a2
+; RV32-NEXT:    sub a2, a1, a3
+; RV32-NEXT:    sub a4, a2, a0
+; RV32-NEXT:    addi a2, zero, -1
+; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    addi a5, zero, 1
+; RV32-NEXT:    beqz a4, .LBB51_2
+; RV32-NEXT:  # %bb.1: # %entry
+; RV32-NEXT:    slt a5, a2, a4
+; RV32-NEXT:  .LBB51_2: # %entry
+; RV32-NEXT:    addi a4, zero, 1
+; RV32-NEXT:    beqz a1, .LBB51_4
+; RV32-NEXT:  # %bb.3: # %entry
+; RV32-NEXT:    slt a4, a2, a1
+; RV32-NEXT:  .LBB51_4: # %entry
+; RV32-NEXT:    xor a1, a4, a5
+; RV32-NEXT:    snez a1, a1
+; RV32-NEXT:    beqz a3, .LBB51_6
+; RV32-NEXT:  # %bb.5: # %entry
+; RV32-NEXT:    slt a0, a2, a3
+; RV32-NEXT:  .LBB51_6: # %entry
+; RV32-NEXT:    xor a0, a4, a0
+; RV32-NEXT:    snez a0, a0
+; RV32-NEXT:    and a0, a0, a1
+; RV32-NEXT:    beqz a0, .LBB51_8
+; RV32-NEXT:  # %bb.7: # %overflow
+; RV32-NEXT:    mv a0, zero
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB51_8: # %continue
+; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: ssubo.br.i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    sgtz a2, a1
+; RV64-NEXT:    sub a1, a0, a1
+; RV64-NEXT:    slt a0, a1, a0
+; RV64-NEXT:    xor a0, a2, a0
+; RV64-NEXT:    beqz a0, .LBB51_2
+; RV64-NEXT:  # %bb.1: # %overflow
+; RV64-NEXT:    mv a0, zero
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB51_2: # %continue
+; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    ret
+entry:
+  %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  br i1 %obit, label %overflow, label %continue
+
+overflow:
+  ret i1 false
+
+continue:
+  ret i1 true
+}
+
+define zeroext i1 @usubo.br.i32(i32 %v1, i32 %v2) {
+; RV32-LABEL: usubo.br.i32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    sub a1, a0, a1
+; RV32-NEXT:    bgeu a0, a1, .LBB52_2
+; RV32-NEXT:  # %bb.1: # %overflow
+; RV32-NEXT:    mv a0, zero
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB52_2: # %continue
+; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: usubo.br.i32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    slli a1, a1, 32
+; RV64-NEXT:    srli a1, a1, 32
+; RV64-NEXT:    slli a0, a0, 32
+; RV64-NEXT:    srli a0, a0, 32
+; RV64-NEXT:    sub a0, a0, a1
+; RV64-NEXT:    slli a1, a0, 32
+; RV64-NEXT:    srli a1, a1, 32
+; RV64-NEXT:    beq a1, a0, .LBB52_2
+; RV64-NEXT:  # %bb.1: # %overflow
+; RV64-NEXT:    mv a0, zero
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB52_2: # %continue
+; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    ret
+entry:
+  %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  br i1 %obit, label %overflow, label %continue
+
+overflow:
+  ret i1 false
+
+continue:
+  ret i1 true
+}
+
+define zeroext i1 @usubo.br.i64(i64 %v1, i64 %v2) {
+; RV32-LABEL: usubo.br.i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    sltu a4, a0, a2
+; RV32-NEXT:    sub a3, a1, a3
+; RV32-NEXT:    sub a3, a3, a4
+; RV32-NEXT:    beq a3, a1, .LBB53_3
+; RV32-NEXT:  # %bb.1: # %entry
+; RV32-NEXT:    sltu a0, a1, a3
+; RV32-NEXT:    bnez a0, .LBB53_4
+; RV32-NEXT:  .LBB53_2: # %continue
+; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB53_3:
+; RV32-NEXT:    sub a1, a0, a2
+; RV32-NEXT:    sltu a0, a0, a1
+; RV32-NEXT:    beqz a0, .LBB53_2
+; RV32-NEXT:  .LBB53_4: # %overflow
+; RV32-NEXT:    mv a0, zero
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: usubo.br.i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    sub a1, a0, a1
+; RV64-NEXT:    bgeu a0, a1, .LBB53_2
+; RV64-NEXT:  # %bb.1: # %overflow
+; RV64-NEXT:    mv a0, zero
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB53_2: # %continue
+; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    ret
+entry:
+  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  br i1 %obit, label %overflow, label %continue
+
+overflow:
+  ret i1 false
+
+continue:
+  ret i1 true
+}
+
+define zeroext i1 @smulo.br.i32(i32 %v1, i32 %v2) {
+; RV32-LABEL: smulo.br.i32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    mulh a2, a0, a1
+; RV32-NEXT:    mul a0, a0, a1
+; RV32-NEXT:    srai a0, a0, 31
+; RV32-NEXT:    beq a2, a0, .LBB54_2
+; RV32-NEXT:  # %bb.1: # %overflow
+; RV32-NEXT:    mv a0, zero
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB54_2: # %continue
+; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: smulo.br.i32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    sext.w a1, a1
+; RV64-NEXT:    sext.w a0, a0
+; RV64-NEXT:    mul a2, a0, a1
+; RV64-NEXT:    mulw a3, a0, a1
+; RV64-NEXT:    xor a3, a3, a2
+; RV64-NEXT:    mulh a0, a0, a1
+; RV64-NEXT:    srai a1, a2, 63
+; RV64-NEXT:    xor a0, a0, a1
+; RV64-NEXT:    or a0, a3, a0
+; RV64-NEXT:    beqz a0, .LBB54_2
+; RV64-NEXT:  # %bb.1: # %overflow
+; RV64-NEXT:    mv a0, zero
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB54_2: # %continue
+; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    ret
+entry:
+  %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  br i1 %obit, label %overflow, label %continue
+
+overflow:
+  ret i1 false
+
+continue:
+  ret i1 true
+}
+
+define zeroext i1 @smulo.br.i64(i64 %v1, i64 %v2) {
+; RV32-LABEL: smulo.br.i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    sw zero, 8(sp)
+; RV32-NEXT:    addi a4, sp, 8
+; RV32-NEXT:    call __mulodi4 at plt
+; RV32-NEXT:    lw a0, 8(sp)
+; RV32-NEXT:    beqz a0, .LBB55_2
+; RV32-NEXT:  # %bb.1: # %overflow
+; RV32-NEXT:    mv a0, zero
+; RV32-NEXT:    j .LBB55_3
+; RV32-NEXT:  .LBB55_2: # %continue
+; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:  .LBB55_3: # %overflow
+; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: smulo.br.i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    mulh a2, a0, a1
+; RV64-NEXT:    mul a0, a0, a1
+; RV64-NEXT:    srai a0, a0, 63
+; RV64-NEXT:    beq a2, a0, .LBB55_2
+; RV64-NEXT:  # %bb.1: # %overflow
+; RV64-NEXT:    mv a0, zero
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB55_2: # %continue
+; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    ret
+entry:
+  %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  br i1 %obit, label %overflow, label %continue
+
+overflow:
+  ret i1 false
+
+continue:
+  ret i1 true
+}
+
+define zeroext i1 @smulo2.br.i64(i64 %v1) {
+; RV32-LABEL: smulo2.br.i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    add a2, a0, a0
+; RV32-NEXT:    sltu a0, a2, a0
+; RV32-NEXT:    add a2, a1, a1
+; RV32-NEXT:    add a4, a2, a0
+; RV32-NEXT:    addi a2, zero, -1
+; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    addi a3, zero, 1
+; RV32-NEXT:    bnez a4, .LBB56_4
+; RV32-NEXT:  # %bb.1: # %entry
+; RV32-NEXT:    bnez a1, .LBB56_5
+; RV32-NEXT:  .LBB56_2: # %entry
+; RV32-NEXT:    beq a0, a3, .LBB56_6
+; RV32-NEXT:  .LBB56_3: # %overflow
+; RV32-NEXT:    mv a0, zero
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB56_4: # %entry
+; RV32-NEXT:    slt a3, a2, a4
+; RV32-NEXT:    beqz a1, .LBB56_2
+; RV32-NEXT:  .LBB56_5: # %entry
+; RV32-NEXT:    slt a0, a2, a1
+; RV32-NEXT:    bne a0, a3, .LBB56_3
+; RV32-NEXT:  .LBB56_6: # %continue
+; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: smulo2.br.i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    add a1, a0, a0
+; RV64-NEXT:    slt a1, a1, a0
+; RV64-NEXT:    slti a0, a0, 0
+; RV64-NEXT:    xor a0, a0, a1
+; RV64-NEXT:    beqz a0, .LBB56_2
+; RV64-NEXT:  # %bb.1: # %overflow
+; RV64-NEXT:    mv a0, zero
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB56_2: # %continue
+; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    ret
+entry:
+  %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 2)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  br i1 %obit, label %overflow, label %continue
+
+overflow:
+  ret i1 false
+
+continue:
+  ret i1 true
+}
+
+define zeroext i1 @umulo.br.i32(i32 %v1, i32 %v2) {
+; RV32-LABEL: umulo.br.i32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    mulhu a0, a0, a1
+; RV32-NEXT:    beqz a0, .LBB57_2
+; RV32-NEXT:  # %bb.1: # %overflow
+; RV32-NEXT:    mv a0, zero
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB57_2: # %continue
+; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: umulo.br.i32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    slli a1, a1, 32
+; RV64-NEXT:    srli a1, a1, 32
+; RV64-NEXT:    slli a0, a0, 32
+; RV64-NEXT:    srli a0, a0, 32
+; RV64-NEXT:    mulhu a2, a0, a1
+; RV64-NEXT:    mul a0, a0, a1
+; RV64-NEXT:    srli a0, a0, 32
+; RV64-NEXT:    or a0, a0, a2
+; RV64-NEXT:    beqz a0, .LBB57_2
+; RV64-NEXT:  # %bb.1: # %overflow
+; RV64-NEXT:    mv a0, zero
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB57_2: # %continue
+; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    ret
+entry:
+  %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
+  %val = extractvalue {i32, i1} %t, 0
+  %obit = extractvalue {i32, i1} %t, 1
+  br i1 %obit, label %overflow, label %continue
+
+overflow:
+  ret i1 false
+
+continue:
+  ret i1 true
+}
+
+define zeroext i1 @umulo.br.i64(i64 %v1, i64 %v2) {
+; RV32-LABEL: umulo.br.i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    mul a4, a3, a0
+; RV32-NEXT:    mul a5, a1, a2
+; RV32-NEXT:    add a4, a5, a4
+; RV32-NEXT:    mulhu a5, a0, a2
+; RV32-NEXT:    add a4, a5, a4
+; RV32-NEXT:    sltu a6, a4, a5
+; RV32-NEXT:    snez a5, a3
+; RV32-NEXT:    snez a4, a1
+; RV32-NEXT:    and a4, a4, a5
+; RV32-NEXT:    mulhu a1, a1, a2
+; RV32-NEXT:    snez a1, a1
+; RV32-NEXT:    or a1, a4, a1
+; RV32-NEXT:    mulhu a0, a3, a0
+; RV32-NEXT:    snez a0, a0
+; RV32-NEXT:    or a0, a1, a0
+; RV32-NEXT:    or a0, a0, a6
+; RV32-NEXT:    beqz a0, .LBB58_2
+; RV32-NEXT:  # %bb.1: # %overflow
+; RV32-NEXT:    mv a0, zero
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB58_2: # %continue
+; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: umulo.br.i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    mulhu a0, a0, a1
+; RV64-NEXT:    beqz a0, .LBB58_2
+; RV64-NEXT:  # %bb.1: # %overflow
+; RV64-NEXT:    mv a0, zero
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB58_2: # %continue
+; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    ret
+entry:
+  %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  br i1 %obit, label %overflow, label %continue
+
+overflow:
+  ret i1 false
+
+continue:
+  ret i1 true
+}
+
+define zeroext i1 @umulo2.br.i64(i64 %v1) {
+; RV32-LABEL: umulo2.br.i64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    add a2, a0, a0
+; RV32-NEXT:    sltu a0, a2, a0
+; RV32-NEXT:    add a2, a1, a1
+; RV32-NEXT:    add a2, a2, a0
+; RV32-NEXT:    beq a2, a1, .LBB59_2
+; RV32-NEXT:  # %bb.1: # %entry
+; RV32-NEXT:    sltu a0, a2, a1
+; RV32-NEXT:  .LBB59_2: # %entry
+; RV32-NEXT:    beqz a0, .LBB59_4
+; RV32-NEXT:  # %bb.3: # %overflow
+; RV32-NEXT:    mv a0, zero
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB59_4: # %continue
+; RV32-NEXT:    addi a0, zero, 1
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: umulo2.br.i64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    add a1, a0, a0
+; RV64-NEXT:    bgeu a1, a0, .LBB59_2
+; RV64-NEXT:  # %bb.1: # %overflow
+; RV64-NEXT:    mv a0, zero
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB59_2: # %continue
+; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    ret
+entry:
+  %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 2)
+  %val = extractvalue {i64, i1} %t, 0
+  %obit = extractvalue {i64, i1} %t, 1
+  br i1 %obit, label %overflow, label %continue
+
+overflow:
+  ret i1 false
+
+continue:
+  ret i1 true
+}
+
+declare {i32, i1} @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone
+declare {i64, i1} @llvm.sadd.with.overflow.i64(i64, i64) nounwind readnone
+declare {i32, i1} @llvm.uadd.with.overflow.i32(i32, i32) nounwind readnone
+declare {i64, i1} @llvm.uadd.with.overflow.i64(i64, i64) nounwind readnone
+declare {i32, i1} @llvm.ssub.with.overflow.i32(i32, i32) nounwind readnone
+declare {i64, i1} @llvm.ssub.with.overflow.i64(i64, i64) nounwind readnone
+declare {i32, i1} @llvm.usub.with.overflow.i32(i32, i32) nounwind readnone
+declare {i64, i1} @llvm.usub.with.overflow.i64(i64, i64) nounwind readnone
+declare {i32, i1} @llvm.smul.with.overflow.i32(i32, i32) nounwind readnone
+declare {i64, i1} @llvm.smul.with.overflow.i64(i64, i64) nounwind readnone
+declare {i32, i1} @llvm.umul.with.overflow.i32(i32, i32) nounwind readnone
+declare {i64, i1} @llvm.umul.with.overflow.i64(i64, i64) nounwind readnone
+


        


More information about the llvm-commits mailing list