[llvm] acfab44 - [RISCV] Add add/sub saturation tests that exist on ARM/AArch64/X86

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Tue Feb 16 11:20:05 PST 2021


Author: Craig Topper
Date: 2021-02-16T11:19:57-08:00
New Revision: acfab44eebbeccb41f9fd3c2c363ff61f02dbf76

URL: https://github.com/llvm/llvm-project/commit/acfab44eebbeccb41f9fd3c2c363ff61f02dbf76
DIFF: https://github.com/llvm/llvm-project/commit/acfab44eebbeccb41f9fd3c2c363ff61f02dbf76.diff

LOG: [RISCV] Add add/sub saturation tests that exist on ARM/AArch64/X86

There have been some recent changes to the type legalization for
some of these intrinsics so I thought it would be good to have
coverage.

Added: 
    llvm/test/CodeGen/RISCV/sadd_sat.ll
    llvm/test/CodeGen/RISCV/sadd_sat_plus.ll
    llvm/test/CodeGen/RISCV/ssub_sat.ll
    llvm/test/CodeGen/RISCV/ssub_sat_plus.ll
    llvm/test/CodeGen/RISCV/uadd_sat.ll
    llvm/test/CodeGen/RISCV/uadd_sat_plus.ll
    llvm/test/CodeGen/RISCV/usub_sat.ll
    llvm/test/CodeGen/RISCV/usub_sat_plus.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/sadd_sat.ll b/llvm/test/CodeGen/RISCV/sadd_sat.ll
new file mode 100644
index 000000000000..9d14d100903b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/sadd_sat.ll
@@ -0,0 +1,240 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=riscv32 -mattr=+m | FileCheck %s --check-prefix=RV32I
+; RUN: llc < %s -mtriple=riscv64 -mattr=+m | FileCheck %s --check-prefix=RV64I
+
+declare i4 @llvm.sadd.sat.i4(i4, i4)
+declare i8 @llvm.sadd.sat.i8(i8, i8)
+declare i16 @llvm.sadd.sat.i16(i16, i16)
+declare i32 @llvm.sadd.sat.i32(i32, i32)
+declare i64 @llvm.sadd.sat.i64(i64, i64)
+
+define signext i32 @func(i32 signext %x, i32 signext %y) nounwind {
+; RV32I-LABEL: func:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    add a3, a0, a1
+; RV32I-NEXT:    lui a0, 524288
+; RV32I-NEXT:    bgez a3, .LBB0_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    addi a0, a0, -1
+; RV32I-NEXT:  .LBB0_2:
+; RV32I-NEXT:    slt a2, a3, a2
+; RV32I-NEXT:    slti a1, a1, 0
+; RV32I-NEXT:    xor a1, a1, a2
+; RV32I-NEXT:    bnez a1, .LBB0_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    mv a0, a3
+; RV32I-NEXT:  .LBB0_4:
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: func:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    lui a1, 524288
+; RV64I-NEXT:    addiw a2, a1, -1
+; RV64I-NEXT:    bge a0, a2, .LBB0_3
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    bge a1, a0, .LBB0_4
+; RV64I-NEXT:  .LBB0_2:
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB0_3:
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    blt a1, a0, .LBB0_2
+; RV64I-NEXT:  .LBB0_4:
+; RV64I-NEXT:    lui a0, 524288
+; RV64I-NEXT:    ret
+  %tmp = call i32 @llvm.sadd.sat.i32(i32 %x, i32 %y);
+  ret i32 %tmp;
+}
+
+define i64 @func2(i64 %x, i64 %y) nounwind {
+; RV32I-LABEL: func2:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    mv a4, a0
+; RV32I-NEXT:    add a5, a1, a3
+; RV32I-NEXT:    add a0, a0, a2
+; RV32I-NEXT:    sltu a2, a0, a4
+; RV32I-NEXT:    add a2, a5, a2
+; RV32I-NEXT:    addi a6, zero, -1
+; RV32I-NEXT:    addi a7, zero, 1
+; RV32I-NEXT:    addi a4, zero, 1
+; RV32I-NEXT:    beqz a2, .LBB1_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    slt a4, a6, a2
+; RV32I-NEXT:  .LBB1_2:
+; RV32I-NEXT:    addi a5, zero, 1
+; RV32I-NEXT:    beqz a1, .LBB1_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    slt a5, a6, a1
+; RV32I-NEXT:  .LBB1_4:
+; RV32I-NEXT:    xor a1, a5, a4
+; RV32I-NEXT:    snez a1, a1
+; RV32I-NEXT:    beqz a3, .LBB1_6
+; RV32I-NEXT:  # %bb.5:
+; RV32I-NEXT:    slt a7, a6, a3
+; RV32I-NEXT:  .LBB1_6:
+; RV32I-NEXT:    xor a3, a5, a7
+; RV32I-NEXT:    seqz a3, a3
+; RV32I-NEXT:    and a3, a3, a1
+; RV32I-NEXT:    bnez a3, .LBB1_10
+; RV32I-NEXT:  # %bb.7:
+; RV32I-NEXT:    lui a1, 524288
+; RV32I-NEXT:    bltz a2, .LBB1_11
+; RV32I-NEXT:  .LBB1_8:
+; RV32I-NEXT:    beqz a3, .LBB1_12
+; RV32I-NEXT:  .LBB1_9:
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB1_10:
+; RV32I-NEXT:    srai a0, a2, 31
+; RV32I-NEXT:    lui a1, 524288
+; RV32I-NEXT:    bgez a2, .LBB1_8
+; RV32I-NEXT:  .LBB1_11:
+; RV32I-NEXT:    addi a1, a1, -1
+; RV32I-NEXT:    bnez a3, .LBB1_9
+; RV32I-NEXT:  .LBB1_12:
+; RV32I-NEXT:    mv a1, a2
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: func2:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    add a3, a0, a1
+; RV64I-NEXT:    addi a0, zero, -1
+; RV64I-NEXT:    slli a0, a0, 63
+; RV64I-NEXT:    bgez a3, .LBB1_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:  .LBB1_2:
+; RV64I-NEXT:    slt a2, a3, a2
+; RV64I-NEXT:    slti a1, a1, 0
+; RV64I-NEXT:    xor a1, a1, a2
+; RV64I-NEXT:    bnez a1, .LBB1_4
+; RV64I-NEXT:  # %bb.3:
+; RV64I-NEXT:    mv a0, a3
+; RV64I-NEXT:  .LBB1_4:
+; RV64I-NEXT:    ret
+  %tmp = call i64 @llvm.sadd.sat.i64(i64 %x, i64 %y);
+  ret i64 %tmp;
+}
+
+define signext i16 @func16(i16 signext %x, i16 signext %y) nounwind {
+; RV32I-LABEL: func16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    lui a1, 8
+; RV32I-NEXT:    addi a1, a1, -1
+; RV32I-NEXT:    bge a0, a1, .LBB2_3
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    lui a1, 1048568
+; RV32I-NEXT:    bge a1, a0, .LBB2_4
+; RV32I-NEXT:  .LBB2_2:
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB2_3:
+; RV32I-NEXT:    mv a0, a1
+; RV32I-NEXT:    lui a1, 1048568
+; RV32I-NEXT:    blt a1, a0, .LBB2_2
+; RV32I-NEXT:  .LBB2_4:
+; RV32I-NEXT:    lui a0, 1048568
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: func16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    lui a1, 8
+; RV64I-NEXT:    addiw a1, a1, -1
+; RV64I-NEXT:    bge a0, a1, .LBB2_3
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    lui a1, 1048568
+; RV64I-NEXT:    bge a1, a0, .LBB2_4
+; RV64I-NEXT:  .LBB2_2:
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB2_3:
+; RV64I-NEXT:    mv a0, a1
+; RV64I-NEXT:    lui a1, 1048568
+; RV64I-NEXT:    blt a1, a0, .LBB2_2
+; RV64I-NEXT:  .LBB2_4:
+; RV64I-NEXT:    lui a0, 1048568
+; RV64I-NEXT:    ret
+  %tmp = call i16 @llvm.sadd.sat.i16(i16 %x, i16 %y);
+  ret i16 %tmp;
+}
+
+define signext i8 @func8(i8 signext %x, i8 signext %y) nounwind {
+; RV32I-LABEL: func8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    addi a1, zero, 127
+; RV32I-NEXT:    bge a0, a1, .LBB3_3
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    addi a1, zero, -128
+; RV32I-NEXT:    bge a1, a0, .LBB3_4
+; RV32I-NEXT:  .LBB3_2:
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB3_3:
+; RV32I-NEXT:    addi a0, zero, 127
+; RV32I-NEXT:    addi a1, zero, -128
+; RV32I-NEXT:    blt a1, a0, .LBB3_2
+; RV32I-NEXT:  .LBB3_4:
+; RV32I-NEXT:    addi a0, zero, -128
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: func8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    addi a1, zero, 127
+; RV64I-NEXT:    bge a0, a1, .LBB3_3
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    addi a1, zero, -128
+; RV64I-NEXT:    bge a1, a0, .LBB3_4
+; RV64I-NEXT:  .LBB3_2:
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB3_3:
+; RV64I-NEXT:    addi a0, zero, 127
+; RV64I-NEXT:    addi a1, zero, -128
+; RV64I-NEXT:    blt a1, a0, .LBB3_2
+; RV64I-NEXT:  .LBB3_4:
+; RV64I-NEXT:    addi a0, zero, -128
+; RV64I-NEXT:    ret
+  %tmp = call i8 @llvm.sadd.sat.i8(i8 %x, i8 %y);
+  ret i8 %tmp;
+}
+
+define signext i4 @func3(i4 signext %x, i4 signext %y) nounwind {
+; RV32I-LABEL: func3:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    addi a1, zero, 7
+; RV32I-NEXT:    bge a0, a1, .LBB4_3
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    addi a1, zero, -8
+; RV32I-NEXT:    bge a1, a0, .LBB4_4
+; RV32I-NEXT:  .LBB4_2:
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB4_3:
+; RV32I-NEXT:    addi a0, zero, 7
+; RV32I-NEXT:    addi a1, zero, -8
+; RV32I-NEXT:    blt a1, a0, .LBB4_2
+; RV32I-NEXT:  .LBB4_4:
+; RV32I-NEXT:    addi a0, zero, -8
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: func3:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    addi a1, zero, 7
+; RV64I-NEXT:    bge a0, a1, .LBB4_3
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    addi a1, zero, -8
+; RV64I-NEXT:    bge a1, a0, .LBB4_4
+; RV64I-NEXT:  .LBB4_2:
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB4_3:
+; RV64I-NEXT:    addi a0, zero, 7
+; RV64I-NEXT:    addi a1, zero, -8
+; RV64I-NEXT:    blt a1, a0, .LBB4_2
+; RV64I-NEXT:  .LBB4_4:
+; RV64I-NEXT:    addi a0, zero, -8
+; RV64I-NEXT:    ret
+  %tmp = call i4 @llvm.sadd.sat.i4(i4 %x, i4 %y);
+  ret i4 %tmp;
+}

diff  --git a/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll b/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll
new file mode 100644
index 000000000000..67b174b05cab
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll
@@ -0,0 +1,279 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=riscv32 -mattr=+m | FileCheck %s --check-prefix=RV32I
+; RUN: llc < %s -mtriple=riscv64 -mattr=+m | FileCheck %s --check-prefix=RV64I
+
+declare i4 @llvm.sadd.sat.i4(i4, i4)
+declare i8 @llvm.sadd.sat.i8(i8, i8)
+declare i16 @llvm.sadd.sat.i16(i16, i16)
+declare i32 @llvm.sadd.sat.i32(i32, i32)
+declare i64 @llvm.sadd.sat.i64(i64, i64)
+
+define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind {
+; RV32I-LABEL: func32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    mul a2, a1, a2
+; RV32I-NEXT:    add a1, a0, a2
+; RV32I-NEXT:    slt a0, a1, a0
+; RV32I-NEXT:    slti a2, a2, 0
+; RV32I-NEXT:    xor a2, a2, a0
+; RV32I-NEXT:    lui a0, 524288
+; RV32I-NEXT:    bltz a1, .LBB0_3
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    beqz a2, .LBB0_4
+; RV32I-NEXT:  .LBB0_2:
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB0_3:
+; RV32I-NEXT:    addi a0, a0, -1
+; RV32I-NEXT:    bnez a2, .LBB0_2
+; RV32I-NEXT:  .LBB0_4:
+; RV32I-NEXT:    mv a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: func32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    mulw a1, a1, a2
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    lui a1, 524288
+; RV64I-NEXT:    addiw a2, a1, -1
+; RV64I-NEXT:    bge a0, a2, .LBB0_3
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    bge a1, a0, .LBB0_4
+; RV64I-NEXT:  .LBB0_2:
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB0_3:
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    blt a1, a0, .LBB0_2
+; RV64I-NEXT:  .LBB0_4:
+; RV64I-NEXT:    lui a0, 524288
+; RV64I-NEXT:    ret
+  %a = mul i32 %y, %z
+  %tmp = call i32 @llvm.sadd.sat.i32(i32 %x, i32 %a)
+  ret i32 %tmp
+}
+
+define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind {
+; RV32I-LABEL: func64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    add a3, a1, a5
+; RV32I-NEXT:    add a0, a0, a4
+; RV32I-NEXT:    sltu a2, a0, a2
+; RV32I-NEXT:    add a2, a3, a2
+; RV32I-NEXT:    addi a6, zero, -1
+; RV32I-NEXT:    addi a7, zero, 1
+; RV32I-NEXT:    addi a3, zero, 1
+; RV32I-NEXT:    beqz a2, .LBB1_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    slt a3, a6, a2
+; RV32I-NEXT:  .LBB1_2:
+; RV32I-NEXT:    addi a4, zero, 1
+; RV32I-NEXT:    beqz a1, .LBB1_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    slt a4, a6, a1
+; RV32I-NEXT:  .LBB1_4:
+; RV32I-NEXT:    xor a1, a4, a3
+; RV32I-NEXT:    snez a1, a1
+; RV32I-NEXT:    beqz a5, .LBB1_6
+; RV32I-NEXT:  # %bb.5:
+; RV32I-NEXT:    slt a7, a6, a5
+; RV32I-NEXT:  .LBB1_6:
+; RV32I-NEXT:    xor a3, a4, a7
+; RV32I-NEXT:    seqz a3, a3
+; RV32I-NEXT:    and a3, a3, a1
+; RV32I-NEXT:    bnez a3, .LBB1_10
+; RV32I-NEXT:  # %bb.7:
+; RV32I-NEXT:    lui a1, 524288
+; RV32I-NEXT:    bltz a2, .LBB1_11
+; RV32I-NEXT:  .LBB1_8:
+; RV32I-NEXT:    beqz a3, .LBB1_12
+; RV32I-NEXT:  .LBB1_9:
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB1_10:
+; RV32I-NEXT:    srai a0, a2, 31
+; RV32I-NEXT:    lui a1, 524288
+; RV32I-NEXT:    bgez a2, .LBB1_8
+; RV32I-NEXT:  .LBB1_11:
+; RV32I-NEXT:    addi a1, a1, -1
+; RV32I-NEXT:    bnez a3, .LBB1_9
+; RV32I-NEXT:  .LBB1_12:
+; RV32I-NEXT:    mv a1, a2
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: func64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    add a3, a0, a2
+; RV64I-NEXT:    addi a0, zero, -1
+; RV64I-NEXT:    slli a0, a0, 63
+; RV64I-NEXT:    bgez a3, .LBB1_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:  .LBB1_2:
+; RV64I-NEXT:    slt a1, a3, a1
+; RV64I-NEXT:    slti a2, a2, 0
+; RV64I-NEXT:    xor a1, a2, a1
+; RV64I-NEXT:    bnez a1, .LBB1_4
+; RV64I-NEXT:  # %bb.3:
+; RV64I-NEXT:    mv a0, a3
+; RV64I-NEXT:  .LBB1_4:
+; RV64I-NEXT:    ret
+  %a = mul i64 %y, %z
+  %tmp = call i64 @llvm.sadd.sat.i64(i64 %x, i64 %z)
+  ret i64 %tmp
+}
+
+define i16 @func16(i16 %x, i16 %y, i16 %z) nounwind {
+; RV32I-LABEL: func16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srai a0, a0, 16
+; RV32I-NEXT:    mul a1, a1, a2
+; RV32I-NEXT:    slli a1, a1, 16
+; RV32I-NEXT:    srai a1, a1, 16
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    lui a1, 8
+; RV32I-NEXT:    addi a1, a1, -1
+; RV32I-NEXT:    bge a0, a1, .LBB2_3
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    lui a1, 1048568
+; RV32I-NEXT:    bge a1, a0, .LBB2_4
+; RV32I-NEXT:  .LBB2_2:
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB2_3:
+; RV32I-NEXT:    mv a0, a1
+; RV32I-NEXT:    lui a1, 1048568
+; RV32I-NEXT:    blt a1, a0, .LBB2_2
+; RV32I-NEXT:  .LBB2_4:
+; RV32I-NEXT:    lui a0, 1048568
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: func16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srai a0, a0, 48
+; RV64I-NEXT:    mul a1, a1, a2
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srai a1, a1, 48
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    lui a1, 8
+; RV64I-NEXT:    addiw a1, a1, -1
+; RV64I-NEXT:    bge a0, a1, .LBB2_3
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    lui a1, 1048568
+; RV64I-NEXT:    bge a1, a0, .LBB2_4
+; RV64I-NEXT:  .LBB2_2:
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB2_3:
+; RV64I-NEXT:    mv a0, a1
+; RV64I-NEXT:    lui a1, 1048568
+; RV64I-NEXT:    blt a1, a0, .LBB2_2
+; RV64I-NEXT:  .LBB2_4:
+; RV64I-NEXT:    lui a0, 1048568
+; RV64I-NEXT:    ret
+  %a = mul i16 %y, %z
+  %tmp = call i16 @llvm.sadd.sat.i16(i16 %x, i16 %a)
+  ret i16 %tmp
+}
+
+define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
+; RV32I-LABEL: func8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 24
+; RV32I-NEXT:    srai a0, a0, 24
+; RV32I-NEXT:    mul a1, a1, a2
+; RV32I-NEXT:    slli a1, a1, 24
+; RV32I-NEXT:    srai a1, a1, 24
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    addi a1, zero, 127
+; RV32I-NEXT:    bge a0, a1, .LBB3_3
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    addi a1, zero, -128
+; RV32I-NEXT:    bge a1, a0, .LBB3_4
+; RV32I-NEXT:  .LBB3_2:
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB3_3:
+; RV32I-NEXT:    addi a0, zero, 127
+; RV32I-NEXT:    addi a1, zero, -128
+; RV32I-NEXT:    blt a1, a0, .LBB3_2
+; RV32I-NEXT:  .LBB3_4:
+; RV32I-NEXT:    addi a0, zero, -128
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: func8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 56
+; RV64I-NEXT:    srai a0, a0, 56
+; RV64I-NEXT:    mul a1, a1, a2
+; RV64I-NEXT:    slli a1, a1, 56
+; RV64I-NEXT:    srai a1, a1, 56
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    addi a1, zero, 127
+; RV64I-NEXT:    bge a0, a1, .LBB3_3
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    addi a1, zero, -128
+; RV64I-NEXT:    bge a1, a0, .LBB3_4
+; RV64I-NEXT:  .LBB3_2:
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB3_3:
+; RV64I-NEXT:    addi a0, zero, 127
+; RV64I-NEXT:    addi a1, zero, -128
+; RV64I-NEXT:    blt a1, a0, .LBB3_2
+; RV64I-NEXT:  .LBB3_4:
+; RV64I-NEXT:    addi a0, zero, -128
+; RV64I-NEXT:    ret
+  %a = mul i8 %y, %z
+  %tmp = call i8 @llvm.sadd.sat.i8(i8 %x, i8 %a)
+  ret i8 %tmp
+}
+
+define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
+; RV32I-LABEL: func4:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 28
+; RV32I-NEXT:    srai a0, a0, 28
+; RV32I-NEXT:    mul a1, a1, a2
+; RV32I-NEXT:    slli a1, a1, 28
+; RV32I-NEXT:    srai a1, a1, 28
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    addi a1, zero, 7
+; RV32I-NEXT:    bge a0, a1, .LBB4_3
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    addi a1, zero, -8
+; RV32I-NEXT:    bge a1, a0, .LBB4_4
+; RV32I-NEXT:  .LBB4_2:
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB4_3:
+; RV32I-NEXT:    addi a0, zero, 7
+; RV32I-NEXT:    addi a1, zero, -8
+; RV32I-NEXT:    blt a1, a0, .LBB4_2
+; RV32I-NEXT:  .LBB4_4:
+; RV32I-NEXT:    addi a0, zero, -8
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: func4:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 60
+; RV64I-NEXT:    srai a0, a0, 60
+; RV64I-NEXT:    mul a1, a1, a2
+; RV64I-NEXT:    slli a1, a1, 60
+; RV64I-NEXT:    srai a1, a1, 60
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    addi a1, zero, 7
+; RV64I-NEXT:    bge a0, a1, .LBB4_3
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    addi a1, zero, -8
+; RV64I-NEXT:    bge a1, a0, .LBB4_4
+; RV64I-NEXT:  .LBB4_2:
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB4_3:
+; RV64I-NEXT:    addi a0, zero, 7
+; RV64I-NEXT:    addi a1, zero, -8
+; RV64I-NEXT:    blt a1, a0, .LBB4_2
+; RV64I-NEXT:  .LBB4_4:
+; RV64I-NEXT:    addi a0, zero, -8
+; RV64I-NEXT:    ret
+  %a = mul i4 %y, %z
+  %tmp = call i4 @llvm.sadd.sat.i4(i4 %x, i4 %a)
+  ret i4 %tmp
+}

diff  --git a/llvm/test/CodeGen/RISCV/ssub_sat.ll b/llvm/test/CodeGen/RISCV/ssub_sat.ll
new file mode 100644
index 000000000000..036fd592e55a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/ssub_sat.ll
@@ -0,0 +1,241 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=riscv32 -mattr=+m | FileCheck %s --check-prefix=RV32I
+; RUN: llc < %s -mtriple=riscv64 -mattr=+m | FileCheck %s --check-prefix=RV64I
+
+declare i4 @llvm.ssub.sat.i4(i4, i4)
+declare i8 @llvm.ssub.sat.i8(i8, i8)
+declare i16 @llvm.ssub.sat.i16(i16, i16)
+declare i32 @llvm.ssub.sat.i32(i32, i32)
+declare i64 @llvm.ssub.sat.i64(i64, i64)
+
+define signext i32 @func(i32 signext %x, i32 signext %y) nounwind {
+; RV32I-LABEL: func:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sgtz a2, a1
+; RV32I-NEXT:    sub a1, a0, a1
+; RV32I-NEXT:    slt a0, a1, a0
+; RV32I-NEXT:    xor a2, a2, a0
+; RV32I-NEXT:    lui a0, 524288
+; RV32I-NEXT:    bltz a1, .LBB0_3
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    beqz a2, .LBB0_4
+; RV32I-NEXT:  .LBB0_2:
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB0_3:
+; RV32I-NEXT:    addi a0, a0, -1
+; RV32I-NEXT:    bnez a2, .LBB0_2
+; RV32I-NEXT:  .LBB0_4:
+; RV32I-NEXT:    mv a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: func:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    lui a1, 524288
+; RV64I-NEXT:    addiw a2, a1, -1
+; RV64I-NEXT:    bge a0, a2, .LBB0_3
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    bge a1, a0, .LBB0_4
+; RV64I-NEXT:  .LBB0_2:
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB0_3:
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    blt a1, a0, .LBB0_2
+; RV64I-NEXT:  .LBB0_4:
+; RV64I-NEXT:    lui a0, 524288
+; RV64I-NEXT:    ret
+  %tmp = call i32 @llvm.ssub.sat.i32(i32 %x, i32 %y);
+  ret i32 %tmp;
+}
+
+define i64 @func2(i64 %x, i64 %y) nounwind {
+; RV32I-LABEL: func2:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sltu a4, a0, a2
+; RV32I-NEXT:    sub a5, a1, a3
+; RV32I-NEXT:    sub t0, a5, a4
+; RV32I-NEXT:    addi a6, zero, -1
+; RV32I-NEXT:    addi a7, zero, 1
+; RV32I-NEXT:    addi a4, zero, 1
+; RV32I-NEXT:    beqz t0, .LBB1_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    slt a4, a6, t0
+; RV32I-NEXT:  .LBB1_2:
+; RV32I-NEXT:    addi a5, zero, 1
+; RV32I-NEXT:    beqz a1, .LBB1_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    slt a5, a6, a1
+; RV32I-NEXT:  .LBB1_4:
+; RV32I-NEXT:    xor a1, a5, a4
+; RV32I-NEXT:    snez a1, a1
+; RV32I-NEXT:    beqz a3, .LBB1_6
+; RV32I-NEXT:  # %bb.5:
+; RV32I-NEXT:    slt a7, a6, a3
+; RV32I-NEXT:  .LBB1_6:
+; RV32I-NEXT:    xor a3, a5, a7
+; RV32I-NEXT:    snez a3, a3
+; RV32I-NEXT:    and a3, a3, a1
+; RV32I-NEXT:    bnez a3, .LBB1_8
+; RV32I-NEXT:  # %bb.7:
+; RV32I-NEXT:    sub a0, a0, a2
+; RV32I-NEXT:    lui a1, 524288
+; RV32I-NEXT:    bltz t0, .LBB1_9
+; RV32I-NEXT:    j .LBB1_10
+; RV32I-NEXT:  .LBB1_8:
+; RV32I-NEXT:    srai a0, t0, 31
+; RV32I-NEXT:    lui a1, 524288
+; RV32I-NEXT:    bgez t0, .LBB1_10
+; RV32I-NEXT:  .LBB1_9:
+; RV32I-NEXT:    addi a1, a1, -1
+; RV32I-NEXT:  .LBB1_10:
+; RV32I-NEXT:    beqz a3, .LBB1_12
+; RV32I-NEXT:  # %bb.11:
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB1_12:
+; RV32I-NEXT:    mv a1, t0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: func2:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sgtz a2, a1
+; RV64I-NEXT:    sub a1, a0, a1
+; RV64I-NEXT:    slt a0, a1, a0
+; RV64I-NEXT:    xor a2, a2, a0
+; RV64I-NEXT:    addi a0, zero, -1
+; RV64I-NEXT:    slli a0, a0, 63
+; RV64I-NEXT:    bltz a1, .LBB1_3
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    beqz a2, .LBB1_4
+; RV64I-NEXT:  .LBB1_2:
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB1_3:
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    bnez a2, .LBB1_2
+; RV64I-NEXT:  .LBB1_4:
+; RV64I-NEXT:    mv a0, a1
+; RV64I-NEXT:    ret
+  %tmp = call i64 @llvm.ssub.sat.i64(i64 %x, i64 %y);
+  ret i64 %tmp;
+}
+
+define signext i16 @func16(i16 signext %x, i16 signext %y) nounwind {
+; RV32I-LABEL: func16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    lui a1, 8
+; RV32I-NEXT:    addi a1, a1, -1
+; RV32I-NEXT:    bge a0, a1, .LBB2_3
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    lui a1, 1048568
+; RV32I-NEXT:    bge a1, a0, .LBB2_4
+; RV32I-NEXT:  .LBB2_2:
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB2_3:
+; RV32I-NEXT:    mv a0, a1
+; RV32I-NEXT:    lui a1, 1048568
+; RV32I-NEXT:    blt a1, a0, .LBB2_2
+; RV32I-NEXT:  .LBB2_4:
+; RV32I-NEXT:    lui a0, 1048568
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: func16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    lui a1, 8
+; RV64I-NEXT:    addiw a1, a1, -1
+; RV64I-NEXT:    bge a0, a1, .LBB2_3
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    lui a1, 1048568
+; RV64I-NEXT:    bge a1, a0, .LBB2_4
+; RV64I-NEXT:  .LBB2_2:
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB2_3:
+; RV64I-NEXT:    mv a0, a1
+; RV64I-NEXT:    lui a1, 1048568
+; RV64I-NEXT:    blt a1, a0, .LBB2_2
+; RV64I-NEXT:  .LBB2_4:
+; RV64I-NEXT:    lui a0, 1048568
+; RV64I-NEXT:    ret
+  %tmp = call i16 @llvm.ssub.sat.i16(i16 %x, i16 %y);
+  ret i16 %tmp;
+}
+
+define signext i8 @func8(i8 signext %x, i8 signext %y) nounwind {
+; RV32I-LABEL: func8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    addi a1, zero, 127
+; RV32I-NEXT:    bge a0, a1, .LBB3_3
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    addi a1, zero, -128
+; RV32I-NEXT:    bge a1, a0, .LBB3_4
+; RV32I-NEXT:  .LBB3_2:
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB3_3:
+; RV32I-NEXT:    addi a0, zero, 127
+; RV32I-NEXT:    addi a1, zero, -128
+; RV32I-NEXT:    blt a1, a0, .LBB3_2
+; RV32I-NEXT:  .LBB3_4:
+; RV32I-NEXT:    addi a0, zero, -128
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: func8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    addi a1, zero, 127
+; RV64I-NEXT:    bge a0, a1, .LBB3_3
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    addi a1, zero, -128
+; RV64I-NEXT:    bge a1, a0, .LBB3_4
+; RV64I-NEXT:  .LBB3_2:
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB3_3:
+; RV64I-NEXT:    addi a0, zero, 127
+; RV64I-NEXT:    addi a1, zero, -128
+; RV64I-NEXT:    blt a1, a0, .LBB3_2
+; RV64I-NEXT:  .LBB3_4:
+; RV64I-NEXT:    addi a0, zero, -128
+; RV64I-NEXT:    ret
+  %tmp = call i8 @llvm.ssub.sat.i8(i8 %x, i8 %y);
+  ret i8 %tmp;
+}
+
+define signext i4 @func3(i4 signext %x, i4 signext %y) nounwind {
+; RV32I-LABEL: func3:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    addi a1, zero, 7
+; RV32I-NEXT:    bge a0, a1, .LBB4_3
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    addi a1, zero, -8
+; RV32I-NEXT:    bge a1, a0, .LBB4_4
+; RV32I-NEXT:  .LBB4_2:
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB4_3:
+; RV32I-NEXT:    addi a0, zero, 7
+; RV32I-NEXT:    addi a1, zero, -8
+; RV32I-NEXT:    blt a1, a0, .LBB4_2
+; RV32I-NEXT:  .LBB4_4:
+; RV32I-NEXT:    addi a0, zero, -8
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: func3:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    addi a1, zero, 7
+; RV64I-NEXT:    bge a0, a1, .LBB4_3
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    addi a1, zero, -8
+; RV64I-NEXT:    bge a1, a0, .LBB4_4
+; RV64I-NEXT:  .LBB4_2:
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB4_3:
+; RV64I-NEXT:    addi a0, zero, 7
+; RV64I-NEXT:    addi a1, zero, -8
+; RV64I-NEXT:    blt a1, a0, .LBB4_2
+; RV64I-NEXT:  .LBB4_4:
+; RV64I-NEXT:    addi a0, zero, -8
+; RV64I-NEXT:    ret
+  %tmp = call i4 @llvm.ssub.sat.i4(i4 %x, i4 %y);
+  ret i4 %tmp;
+}

diff  --git a/llvm/test/CodeGen/RISCV/ssub_sat_plus.ll b/llvm/test/CodeGen/RISCV/ssub_sat_plus.ll
new file mode 100644
index 000000000000..a365dae7287d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/ssub_sat_plus.ll
@@ -0,0 +1,279 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=riscv32 -mattr=+m | FileCheck %s --check-prefix=RV32I
+; RUN: llc < %s -mtriple=riscv64 -mattr=+m | FileCheck %s --check-prefix=RV64I
+
+declare i4 @llvm.ssub.sat.i4(i4, i4)
+declare i8 @llvm.ssub.sat.i8(i8, i8)
+declare i16 @llvm.ssub.sat.i16(i16, i16)
+declare i32 @llvm.ssub.sat.i32(i32, i32)
+declare i64 @llvm.ssub.sat.i64(i64, i64)
+
+define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind {
+; RV32I-LABEL: func32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    mul a1, a1, a2
+; RV32I-NEXT:    sgtz a2, a1
+; RV32I-NEXT:    sub a1, a0, a1
+; RV32I-NEXT:    slt a0, a1, a0
+; RV32I-NEXT:    xor a2, a2, a0
+; RV32I-NEXT:    lui a0, 524288
+; RV32I-NEXT:    bltz a1, .LBB0_3
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    beqz a2, .LBB0_4
+; RV32I-NEXT:  .LBB0_2:
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB0_3:
+; RV32I-NEXT:    addi a0, a0, -1
+; RV32I-NEXT:    bnez a2, .LBB0_2
+; RV32I-NEXT:  .LBB0_4:
+; RV32I-NEXT:    mv a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: func32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    mulw a1, a1, a2
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    lui a1, 524288
+; RV64I-NEXT:    addiw a2, a1, -1
+; RV64I-NEXT:    bge a0, a2, .LBB0_3
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    bge a1, a0, .LBB0_4
+; RV64I-NEXT:  .LBB0_2:
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB0_3:
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    blt a1, a0, .LBB0_2
+; RV64I-NEXT:  .LBB0_4:
+; RV64I-NEXT:    lui a0, 524288
+; RV64I-NEXT:    ret
+  %a = mul i32 %y, %z
+  %tmp = call i32 @llvm.ssub.sat.i32(i32 %x, i32 %a)
+  ret i32 %tmp
+}
+
+define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind {
+; RV32I-LABEL: func64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sltu a2, a0, a4
+; RV32I-NEXT:    sub a3, a1, a5
+; RV32I-NEXT:    sub t0, a3, a2
+; RV32I-NEXT:    addi a6, zero, -1
+; RV32I-NEXT:    addi a7, zero, 1
+; RV32I-NEXT:    addi a2, zero, 1
+; RV32I-NEXT:    beqz t0, .LBB1_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    slt a2, a6, t0
+; RV32I-NEXT:  .LBB1_2:
+; RV32I-NEXT:    addi a3, zero, 1
+; RV32I-NEXT:    beqz a1, .LBB1_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    slt a3, a6, a1
+; RV32I-NEXT:  .LBB1_4:
+; RV32I-NEXT:    xor a1, a3, a2
+; RV32I-NEXT:    snez a1, a1
+; RV32I-NEXT:    beqz a5, .LBB1_6
+; RV32I-NEXT:  # %bb.5:
+; RV32I-NEXT:    slt a7, a6, a5
+; RV32I-NEXT:  .LBB1_6:
+; RV32I-NEXT:    xor a2, a3, a7
+; RV32I-NEXT:    snez a2, a2
+; RV32I-NEXT:    and a3, a2, a1
+; RV32I-NEXT:    bnez a3, .LBB1_8
+; RV32I-NEXT:  # %bb.7:
+; RV32I-NEXT:    sub a0, a0, a4
+; RV32I-NEXT:    lui a1, 524288
+; RV32I-NEXT:    bltz t0, .LBB1_9
+; RV32I-NEXT:    j .LBB1_10
+; RV32I-NEXT:  .LBB1_8:
+; RV32I-NEXT:    srai a0, t0, 31
+; RV32I-NEXT:    lui a1, 524288
+; RV32I-NEXT:    bgez t0, .LBB1_10
+; RV32I-NEXT:  .LBB1_9:
+; RV32I-NEXT:    addi a1, a1, -1
+; RV32I-NEXT:  .LBB1_10:
+; RV32I-NEXT:    beqz a3, .LBB1_12
+; RV32I-NEXT:  # %bb.11:
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB1_12:
+; RV32I-NEXT:    mv a1, t0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: func64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sgtz a3, a2
+; RV64I-NEXT:    sub a1, a0, a2
+; RV64I-NEXT:    slt a0, a1, a0
+; RV64I-NEXT:    xor a2, a3, a0
+; RV64I-NEXT:    addi a0, zero, -1
+; RV64I-NEXT:    slli a0, a0, 63
+; RV64I-NEXT:    bltz a1, .LBB1_3
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    beqz a2, .LBB1_4
+; RV64I-NEXT:  .LBB1_2:
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB1_3:
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    bnez a2, .LBB1_2
+; RV64I-NEXT:  .LBB1_4:
+; RV64I-NEXT:    mv a0, a1
+; RV64I-NEXT:    ret
+  %a = mul i64 %y, %z
+  %tmp = call i64 @llvm.ssub.sat.i64(i64 %x, i64 %z)
+  ret i64 %tmp
+}
+
+define i16 @func16(i16 %x, i16 %y, i16 %z) nounwind {
+; RV32I-LABEL: func16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srai a0, a0, 16
+; RV32I-NEXT:    mul a1, a1, a2
+; RV32I-NEXT:    slli a1, a1, 16
+; RV32I-NEXT:    srai a1, a1, 16
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    lui a1, 8
+; RV32I-NEXT:    addi a1, a1, -1
+; RV32I-NEXT:    bge a0, a1, .LBB2_3
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    lui a1, 1048568
+; RV32I-NEXT:    bge a1, a0, .LBB2_4
+; RV32I-NEXT:  .LBB2_2:
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB2_3:
+; RV32I-NEXT:    mv a0, a1
+; RV32I-NEXT:    lui a1, 1048568
+; RV32I-NEXT:    blt a1, a0, .LBB2_2
+; RV32I-NEXT:  .LBB2_4:
+; RV32I-NEXT:    lui a0, 1048568
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: func16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srai a0, a0, 48
+; RV64I-NEXT:    mul a1, a1, a2
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srai a1, a1, 48
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    lui a1, 8
+; RV64I-NEXT:    addiw a1, a1, -1
+; RV64I-NEXT:    bge a0, a1, .LBB2_3
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    lui a1, 1048568
+; RV64I-NEXT:    bge a1, a0, .LBB2_4
+; RV64I-NEXT:  .LBB2_2:
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB2_3:
+; RV64I-NEXT:    mv a0, a1
+; RV64I-NEXT:    lui a1, 1048568
+; RV64I-NEXT:    blt a1, a0, .LBB2_2
+; RV64I-NEXT:  .LBB2_4:
+; RV64I-NEXT:    lui a0, 1048568
+; RV64I-NEXT:    ret
+  %a = mul i16 %y, %z
+  %tmp = call i16 @llvm.ssub.sat.i16(i16 %x, i16 %a)
+  ret i16 %tmp
+}
+
+define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
+; RV32I-LABEL: func8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 24
+; RV32I-NEXT:    srai a0, a0, 24
+; RV32I-NEXT:    mul a1, a1, a2
+; RV32I-NEXT:    slli a1, a1, 24
+; RV32I-NEXT:    srai a1, a1, 24
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    addi a1, zero, 127
+; RV32I-NEXT:    bge a0, a1, .LBB3_3
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    addi a1, zero, -128
+; RV32I-NEXT:    bge a1, a0, .LBB3_4
+; RV32I-NEXT:  .LBB3_2:
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB3_3:
+; RV32I-NEXT:    addi a0, zero, 127
+; RV32I-NEXT:    addi a1, zero, -128
+; RV32I-NEXT:    blt a1, a0, .LBB3_2
+; RV32I-NEXT:  .LBB3_4:
+; RV32I-NEXT:    addi a0, zero, -128
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: func8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 56
+; RV64I-NEXT:    srai a0, a0, 56
+; RV64I-NEXT:    mul a1, a1, a2
+; RV64I-NEXT:    slli a1, a1, 56
+; RV64I-NEXT:    srai a1, a1, 56
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    addi a1, zero, 127
+; RV64I-NEXT:    bge a0, a1, .LBB3_3
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    addi a1, zero, -128
+; RV64I-NEXT:    bge a1, a0, .LBB3_4
+; RV64I-NEXT:  .LBB3_2:
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB3_3:
+; RV64I-NEXT:    addi a0, zero, 127
+; RV64I-NEXT:    addi a1, zero, -128
+; RV64I-NEXT:    blt a1, a0, .LBB3_2
+; RV64I-NEXT:  .LBB3_4:
+; RV64I-NEXT:    addi a0, zero, -128
+; RV64I-NEXT:    ret
+  %a = mul i8 %y, %z
+  %tmp = call i8 @llvm.ssub.sat.i8(i8 %x, i8 %a)
+  ret i8 %tmp
+}
+
+define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
+; RV32I-LABEL: func4:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 28
+; RV32I-NEXT:    srai a0, a0, 28
+; RV32I-NEXT:    mul a1, a1, a2
+; RV32I-NEXT:    slli a1, a1, 28
+; RV32I-NEXT:    srai a1, a1, 28
+; RV32I-NEXT:    sub a0, a0, a1
+; RV32I-NEXT:    addi a1, zero, 7
+; RV32I-NEXT:    bge a0, a1, .LBB4_3
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    addi a1, zero, -8
+; RV32I-NEXT:    bge a1, a0, .LBB4_4
+; RV32I-NEXT:  .LBB4_2:
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB4_3:
+; RV32I-NEXT:    addi a0, zero, 7
+; RV32I-NEXT:    addi a1, zero, -8
+; RV32I-NEXT:    blt a1, a0, .LBB4_2
+; RV32I-NEXT:  .LBB4_4:
+; RV32I-NEXT:    addi a0, zero, -8
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: func4:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 60
+; RV64I-NEXT:    srai a0, a0, 60
+; RV64I-NEXT:    mul a1, a1, a2
+; RV64I-NEXT:    slli a1, a1, 60
+; RV64I-NEXT:    srai a1, a1, 60
+; RV64I-NEXT:    sub a0, a0, a1
+; RV64I-NEXT:    addi a1, zero, 7
+; RV64I-NEXT:    bge a0, a1, .LBB4_3
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    addi a1, zero, -8
+; RV64I-NEXT:    bge a1, a0, .LBB4_4
+; RV64I-NEXT:  .LBB4_2:
+; RV64I-NEXT:    ret
+; RV64I-NEXT:  .LBB4_3:
+; RV64I-NEXT:    addi a0, zero, 7
+; RV64I-NEXT:    addi a1, zero, -8
+; RV64I-NEXT:    blt a1, a0, .LBB4_2
+; RV64I-NEXT:  .LBB4_4:
+; RV64I-NEXT:    addi a0, zero, -8
+; RV64I-NEXT:    ret
+  %a = mul i4 %y, %z
+  %tmp = call i4 @llvm.ssub.sat.i4(i4 %x, i4 %a)
+  ret i4 %tmp
+}

diff  --git a/llvm/test/CodeGen/RISCV/uadd_sat.ll b/llvm/test/CodeGen/RISCV/uadd_sat.ll
new file mode 100644
index 000000000000..615d04fd81c3
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/uadd_sat.ll
@@ -0,0 +1,149 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=riscv32 -mattr=+m | FileCheck %s --check-prefix=RV32I
+; RUN: llc < %s -mtriple=riscv64 -mattr=+m | FileCheck %s --check-prefix=RV64I
+
+declare i4 @llvm.uadd.sat.i4(i4, i4)
+declare i8 @llvm.uadd.sat.i8(i8, i8)
+declare i16 @llvm.uadd.sat.i16(i16, i16)
+declare i32 @llvm.uadd.sat.i32(i32, i32)
+declare i64 @llvm.uadd.sat.i64(i64, i64)
+
+define signext i32 @func(i32 signext %x, i32 signext %y) nounwind {
+; RV32I-LABEL: func:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    add a1, a0, a1
+; RV32I-NEXT:    sltu a2, a1, a0
+; RV32I-NEXT:    addi a0, zero, -1
+; RV32I-NEXT:    bnez a2, .LBB0_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    mv a0, a1
+; RV32I-NEXT:  .LBB0_2:
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: func:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a1, a1, 32
+; RV64I-NEXT:    srli a1, a1, 32
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    addi a1, zero, 1
+; RV64I-NEXT:    slli a1, a1, 32
+; RV64I-NEXT:    addi a1, a1, -1
+; RV64I-NEXT:    bltu a0, a1, .LBB0_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    mv a0, a1
+; RV64I-NEXT:  .LBB0_2:
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    ret
+  %tmp = call i32 @llvm.uadd.sat.i32(i32 %x, i32 %y);
+  ret i32 %tmp;
+}
+
+define i64 @func2(i64 %x, i64 %y) nounwind {
+; RV32I-LABEL: func2:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    add a3, a1, a3
+; RV32I-NEXT:    add a2, a0, a2
+; RV32I-NEXT:    sltu a4, a2, a0
+; RV32I-NEXT:    add a3, a3, a4
+; RV32I-NEXT:    beq a3, a1, .LBB1_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sltu a4, a3, a1
+; RV32I-NEXT:  .LBB1_2:
+; RV32I-NEXT:    addi a0, zero, -1
+; RV32I-NEXT:    addi a1, zero, -1
+; RV32I-NEXT:    bnez a4, .LBB1_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    mv a1, a3
+; RV32I-NEXT:  .LBB1_4:
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: func2:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    add a1, a0, a1
+; RV64I-NEXT:    sltu a2, a1, a0
+; RV64I-NEXT:    addi a0, zero, -1
+; RV64I-NEXT:    bnez a2, .LBB1_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    mv a0, a1
+; RV64I-NEXT:  .LBB1_2:
+; RV64I-NEXT:    ret
+  %tmp = call i64 @llvm.uadd.sat.i64(i64 %x, i64 %y);
+  ret i64 %tmp;
+}
+
+define zeroext i16 @func16(i16 zeroext %x, i16 zeroext %y) nounwind {
+; RV32I-LABEL: func16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    lui a1, 16
+; RV32I-NEXT:    addi a1, a1, -1
+; RV32I-NEXT:    bltu a0, a1, .LBB2_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    mv a0, a1
+; RV32I-NEXT:  .LBB2_2:
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: func16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    addiw a1, a1, -1
+; RV64I-NEXT:    bltu a0, a1, .LBB2_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    mv a0, a1
+; RV64I-NEXT:  .LBB2_2:
+; RV64I-NEXT:    ret
+  %tmp = call i16 @llvm.uadd.sat.i16(i16 %x, i16 %y);
+  ret i16 %tmp;
+}
+
+define zeroext i8 @func8(i8 zeroext %x, i8 zeroext %y) nounwind {
+; RV32I-LABEL: func8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    addi a1, zero, 255
+; RV32I-NEXT:    bltu a0, a1, .LBB3_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    addi a0, zero, 255
+; RV32I-NEXT:  .LBB3_2:
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: func8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    addi a1, zero, 255
+; RV64I-NEXT:    bltu a0, a1, .LBB3_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    addi a0, zero, 255
+; RV64I-NEXT:  .LBB3_2:
+; RV64I-NEXT:    ret
+  %tmp = call i8 @llvm.uadd.sat.i8(i8 %x, i8 %y);
+  ret i8 %tmp;
+}
+
+define zeroext i4 @func3(i4 zeroext %x, i4 zeroext %y) nounwind {
+; RV32I-LABEL: func3:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    addi a1, zero, 15
+; RV32I-NEXT:    bltu a0, a1, .LBB4_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    addi a0, zero, 15
+; RV32I-NEXT:  .LBB4_2:
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: func3:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    addi a1, zero, 15
+; RV64I-NEXT:    bltu a0, a1, .LBB4_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    addi a0, zero, 15
+; RV64I-NEXT:  .LBB4_2:
+; RV64I-NEXT:    ret
+  %tmp = call i4 @llvm.uadd.sat.i4(i4 %x, i4 %y);
+  ret i4 %tmp;
+}

diff  --git a/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll b/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll
new file mode 100644
index 000000000000..bccae8273d22
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll
@@ -0,0 +1,173 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=riscv32 -mattr=+m | FileCheck %s --check-prefix=RV32I
+; RUN: llc < %s -mtriple=riscv64 -mattr=+m | FileCheck %s --check-prefix=RV64I
+
+declare i4 @llvm.uadd.sat.i4(i4, i4)
+declare i8 @llvm.uadd.sat.i8(i8, i8)
+declare i16 @llvm.uadd.sat.i16(i16, i16)
+declare i32 @llvm.uadd.sat.i32(i32, i32)
+declare i64 @llvm.uadd.sat.i64(i64, i64)
+
+define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind {
+; RV32I-LABEL: func32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    mul a1, a1, a2
+; RV32I-NEXT:    add a1, a0, a1
+; RV32I-NEXT:    sltu a2, a1, a0
+; RV32I-NEXT:    addi a0, zero, -1
+; RV32I-NEXT:    bnez a2, .LBB0_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    mv a0, a1
+; RV32I-NEXT:  .LBB0_2:
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: func32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    mul a1, a1, a2
+; RV64I-NEXT:    slli a1, a1, 32
+; RV64I-NEXT:    srli a1, a1, 32
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    addi a1, zero, 1
+; RV64I-NEXT:    slli a1, a1, 32
+; RV64I-NEXT:    addi a1, a1, -1
+; RV64I-NEXT:    bltu a0, a1, .LBB0_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    mv a0, a1
+; RV64I-NEXT:  .LBB0_2:
+; RV64I-NEXT:    ret
+  %a = mul i32 %y, %z
+  %tmp = call i32 @llvm.uadd.sat.i32(i32 %x, i32 %a)
+  ret i32 %tmp
+}
+
+define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind {
+; RV32I-LABEL: func64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    add a3, a1, a5
+; RV32I-NEXT:    add a2, a0, a4
+; RV32I-NEXT:    sltu a4, a2, a0
+; RV32I-NEXT:    add a3, a3, a4
+; RV32I-NEXT:    beq a3, a1, .LBB1_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sltu a4, a3, a1
+; RV32I-NEXT:  .LBB1_2:
+; RV32I-NEXT:    addi a0, zero, -1
+; RV32I-NEXT:    addi a1, zero, -1
+; RV32I-NEXT:    bnez a4, .LBB1_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    mv a1, a3
+; RV32I-NEXT:  .LBB1_4:
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: func64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    add a1, a0, a2
+; RV64I-NEXT:    sltu a2, a1, a0
+; RV64I-NEXT:    addi a0, zero, -1
+; RV64I-NEXT:    bnez a2, .LBB1_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    mv a0, a1
+; RV64I-NEXT:  .LBB1_2:
+; RV64I-NEXT:    ret
+  %a = mul i64 %y, %z
+  %tmp = call i64 @llvm.uadd.sat.i64(i64 %x, i64 %z)
+  ret i64 %tmp
+}
+
+define i16 @func16(i16 %x, i16 %y, i16 %z) nounwind {
+; RV32I-LABEL: func16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lui a3, 16
+; RV32I-NEXT:    addi a3, a3, -1
+; RV32I-NEXT:    and a0, a0, a3
+; RV32I-NEXT:    mul a1, a1, a2
+; RV32I-NEXT:    and a1, a1, a3
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    bltu a0, a3, .LBB2_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    mv a0, a3
+; RV32I-NEXT:  .LBB2_2:
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: func16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lui a3, 16
+; RV64I-NEXT:    addiw a3, a3, -1
+; RV64I-NEXT:    and a0, a0, a3
+; RV64I-NEXT:    mul a1, a1, a2
+; RV64I-NEXT:    and a1, a1, a3
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    bltu a0, a3, .LBB2_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    mv a0, a3
+; RV64I-NEXT:  .LBB2_2:
+; RV64I-NEXT:    ret
+  %a = mul i16 %y, %z
+  %tmp = call i16 @llvm.uadd.sat.i16(i16 %x, i16 %a)
+  ret i16 %tmp
+}
+
+define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
+; RV32I-LABEL: func8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    andi a0, a0, 255
+; RV32I-NEXT:    mul a1, a1, a2
+; RV32I-NEXT:    andi a1, a1, 255
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    addi a1, zero, 255
+; RV32I-NEXT:    bltu a0, a1, .LBB3_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    addi a0, zero, 255
+; RV32I-NEXT:  .LBB3_2:
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: func8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 255
+; RV64I-NEXT:    mul a1, a1, a2
+; RV64I-NEXT:    andi a1, a1, 255
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    addi a1, zero, 255
+; RV64I-NEXT:    bltu a0, a1, .LBB3_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    addi a0, zero, 255
+; RV64I-NEXT:  .LBB3_2:
+; RV64I-NEXT:    ret
+  %a = mul i8 %y, %z
+  %tmp = call i8 @llvm.uadd.sat.i8(i8 %x, i8 %a)
+  ret i8 %tmp
+}
+
+define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
+; RV32I-LABEL: func4:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    andi a0, a0, 15
+; RV32I-NEXT:    mul a1, a1, a2
+; RV32I-NEXT:    andi a1, a1, 15
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    addi a1, zero, 15
+; RV32I-NEXT:    bltu a0, a1, .LBB4_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    addi a0, zero, 15
+; RV32I-NEXT:  .LBB4_2:
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: func4:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 15
+; RV64I-NEXT:    mul a1, a1, a2
+; RV64I-NEXT:    andi a1, a1, 15
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    addi a1, zero, 15
+; RV64I-NEXT:    bltu a0, a1, .LBB4_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    addi a0, zero, 15
+; RV64I-NEXT:  .LBB4_2:
+; RV64I-NEXT:    ret
+  %a = mul i4 %y, %z
+  %tmp = call i4 @llvm.uadd.sat.i4(i4 %x, i4 %a)
+  ret i4 %tmp
+}

diff  --git a/llvm/test/CodeGen/RISCV/usub_sat.ll b/llvm/test/CodeGen/RISCV/usub_sat.ll
new file mode 100644
index 000000000000..7cefc8c6380f
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/usub_sat.ll
@@ -0,0 +1,165 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=riscv32 -mattr=+m | FileCheck %s --check-prefix=RV32I
+; RUN: llc < %s -mtriple=riscv64 -mattr=+m | FileCheck %s --check-prefix=RV64I
+
+declare i4 @llvm.usub.sat.i4(i4, i4)
+declare i8 @llvm.usub.sat.i8(i8, i8)
+declare i16 @llvm.usub.sat.i16(i16, i16)
+declare i32 @llvm.usub.sat.i32(i32, i32)
+declare i64 @llvm.usub.sat.i64(i64, i64)
+
+define signext i32 @func(i32 signext %x, i32 signext %y) nounwind {
+; RV32I-LABEL: func:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sub a1, a0, a1
+; RV32I-NEXT:    sltu a2, a0, a1
+; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    bnez a2, .LBB0_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    mv a0, a1
+; RV32I-NEXT:  .LBB0_2:
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: func:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a1, a1, 32
+; RV64I-NEXT:    srli a1, a1, 32
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a2, a0, 32
+; RV64I-NEXT:    sub a0, a2, a1
+; RV64I-NEXT:    sltu a2, a2, a0
+; RV64I-NEXT:    mv a1, zero
+; RV64I-NEXT:    bnez a2, .LBB0_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:  .LBB0_2:
+; RV64I-NEXT:    sext.w a0, a1
+; RV64I-NEXT:    ret
+  %tmp = call i32 @llvm.usub.sat.i32(i32 %x, i32 %y);
+  ret i32 %tmp;
+}
+
+define i64 @func2(i64 %x, i64 %y) nounwind {
+; RV32I-LABEL: func2:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sltu a4, a0, a2
+; RV32I-NEXT:    sub a3, a1, a3
+; RV32I-NEXT:    sub a3, a3, a4
+; RV32I-NEXT:    sub a2, a0, a2
+; RV32I-NEXT:    beq a3, a1, .LBB1_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sltu a4, a1, a3
+; RV32I-NEXT:    j .LBB1_3
+; RV32I-NEXT:  .LBB1_2:
+; RV32I-NEXT:    sltu a4, a0, a2
+; RV32I-NEXT:  .LBB1_3:
+; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    bnez a4, .LBB1_5
+; RV32I-NEXT:  # %bb.4:
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    mv a1, a3
+; RV32I-NEXT:  .LBB1_5:
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: func2:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a1, a0, a1
+; RV64I-NEXT:    sltu a2, a0, a1
+; RV64I-NEXT:    mv a0, zero
+; RV64I-NEXT:    bnez a2, .LBB1_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    mv a0, a1
+; RV64I-NEXT:  .LBB1_2:
+; RV64I-NEXT:    ret
+  %tmp = call i64 @llvm.usub.sat.i64(i64 %x, i64 %y);
+  ret i64 %tmp;
+}
+
+define zeroext i16 @func16(i16 zeroext %x, i16 zeroext %y) nounwind {
+; RV32I-LABEL: func16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sub a1, a0, a1
+; RV32I-NEXT:    sltu a2, a0, a1
+; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    bnez a2, .LBB2_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    mv a0, a1
+; RV32I-NEXT:  .LBB2_2:
+; RV32I-NEXT:    lui a1, 16
+; RV32I-NEXT:    addi a1, a1, -1
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: func16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a1, a0, a1
+; RV64I-NEXT:    sltu a2, a0, a1
+; RV64I-NEXT:    mv a0, zero
+; RV64I-NEXT:    bnez a2, .LBB2_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    mv a0, a1
+; RV64I-NEXT:  .LBB2_2:
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    addiw a1, a1, -1
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    ret
+  %tmp = call i16 @llvm.usub.sat.i16(i16 %x, i16 %y);
+  ret i16 %tmp;
+}
+
+define zeroext i8 @func8(i8 zeroext %x, i8 zeroext %y) nounwind {
+; RV32I-LABEL: func8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sub a1, a0, a1
+; RV32I-NEXT:    sltu a2, a0, a1
+; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    bnez a2, .LBB3_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    mv a0, a1
+; RV32I-NEXT:  .LBB3_2:
+; RV32I-NEXT:    andi a0, a0, 255
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: func8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a1, a0, a1
+; RV64I-NEXT:    sltu a2, a0, a1
+; RV64I-NEXT:    mv a0, zero
+; RV64I-NEXT:    bnez a2, .LBB3_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    mv a0, a1
+; RV64I-NEXT:  .LBB3_2:
+; RV64I-NEXT:    andi a0, a0, 255
+; RV64I-NEXT:    ret
+  %tmp = call i8 @llvm.usub.sat.i8(i8 %x, i8 %y);
+  ret i8 %tmp;
+}
+
+define zeroext i4 @func3(i4 zeroext %x, i4 zeroext %y) nounwind {
+; RV32I-LABEL: func3:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sub a1, a0, a1
+; RV32I-NEXT:    sltu a2, a0, a1
+; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    bnez a2, .LBB4_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    mv a0, a1
+; RV32I-NEXT:  .LBB4_2:
+; RV32I-NEXT:    andi a0, a0, 15
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: func3:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a1, a0, a1
+; RV64I-NEXT:    sltu a2, a0, a1
+; RV64I-NEXT:    mv a0, zero
+; RV64I-NEXT:    bnez a2, .LBB4_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    mv a0, a1
+; RV64I-NEXT:  .LBB4_2:
+; RV64I-NEXT:    andi a0, a0, 15
+; RV64I-NEXT:    ret
+  %tmp = call i4 @llvm.usub.sat.i4(i4 %x, i4 %y);
+  ret i4 %tmp;
+}

diff  --git a/llvm/test/CodeGen/RISCV/usub_sat_plus.ll b/llvm/test/CodeGen/RISCV/usub_sat_plus.ll
new file mode 100644
index 000000000000..57e8df469213
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/usub_sat_plus.ll
@@ -0,0 +1,183 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=riscv32 -mattr=+m | FileCheck %s --check-prefix=RV32I
+; RUN: llc < %s -mtriple=riscv64 -mattr=+m | FileCheck %s --check-prefix=RV64I
+
+declare i4 @llvm.usub.sat.i4(i4, i4)
+declare i8 @llvm.usub.sat.i8(i8, i8)
+declare i16 @llvm.usub.sat.i16(i16, i16)
+declare i32 @llvm.usub.sat.i32(i32, i32)
+declare i64 @llvm.usub.sat.i64(i64, i64)
+
+define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind {
+; RV32I-LABEL: func32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    mul a1, a1, a2
+; RV32I-NEXT:    sub a1, a0, a1
+; RV32I-NEXT:    sltu a2, a0, a1
+; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    bnez a2, .LBB0_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    mv a0, a1
+; RV32I-NEXT:  .LBB0_2:
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: func32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 32
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    mul a1, a1, a2
+; RV64I-NEXT:    slli a1, a1, 32
+; RV64I-NEXT:    srli a1, a1, 32
+; RV64I-NEXT:    sub a1, a0, a1
+; RV64I-NEXT:    sltu a2, a0, a1
+; RV64I-NEXT:    mv a0, zero
+; RV64I-NEXT:    bnez a2, .LBB0_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    mv a0, a1
+; RV64I-NEXT:  .LBB0_2:
+; RV64I-NEXT:    ret
+  %a = mul i32 %y, %z
+  %tmp = call i32 @llvm.usub.sat.i32(i32 %x, i32 %a)
+  ret i32 %tmp
+}
+
+define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind {
+; RV32I-LABEL: func64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sltu a2, a0, a4
+; RV32I-NEXT:    sub a3, a1, a5
+; RV32I-NEXT:    sub a2, a3, a2
+; RV32I-NEXT:    sub a3, a0, a4
+; RV32I-NEXT:    beq a2, a1, .LBB1_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    sltu a4, a1, a2
+; RV32I-NEXT:    j .LBB1_3
+; RV32I-NEXT:  .LBB1_2:
+; RV32I-NEXT:    sltu a4, a0, a3
+; RV32I-NEXT:  .LBB1_3:
+; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    mv a1, zero
+; RV32I-NEXT:    bnez a4, .LBB1_5
+; RV32I-NEXT:  # %bb.4:
+; RV32I-NEXT:    mv a0, a3
+; RV32I-NEXT:    mv a1, a2
+; RV32I-NEXT:  .LBB1_5:
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: func64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sub a1, a0, a2
+; RV64I-NEXT:    sltu a2, a0, a1
+; RV64I-NEXT:    mv a0, zero
+; RV64I-NEXT:    bnez a2, .LBB1_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    mv a0, a1
+; RV64I-NEXT:  .LBB1_2:
+; RV64I-NEXT:    ret
+  %a = mul i64 %y, %z
+  %tmp = call i64 @llvm.usub.sat.i64(i64 %x, i64 %z)
+  ret i64 %tmp
+}
+
+define i16 @func16(i16 %x, i16 %y, i16 %z) nounwind {
+; RV32I-LABEL: func16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lui a3, 16
+; RV32I-NEXT:    addi a3, a3, -1
+; RV32I-NEXT:    and a0, a0, a3
+; RV32I-NEXT:    mul a1, a1, a2
+; RV32I-NEXT:    and a1, a1, a3
+; RV32I-NEXT:    sub a1, a0, a1
+; RV32I-NEXT:    sltu a2, a0, a1
+; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    bnez a2, .LBB2_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    mv a0, a1
+; RV32I-NEXT:  .LBB2_2:
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: func16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lui a3, 16
+; RV64I-NEXT:    addiw a3, a3, -1
+; RV64I-NEXT:    and a0, a0, a3
+; RV64I-NEXT:    mul a1, a1, a2
+; RV64I-NEXT:    and a1, a1, a3
+; RV64I-NEXT:    sub a1, a0, a1
+; RV64I-NEXT:    sltu a2, a0, a1
+; RV64I-NEXT:    mv a0, zero
+; RV64I-NEXT:    bnez a2, .LBB2_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    mv a0, a1
+; RV64I-NEXT:  .LBB2_2:
+; RV64I-NEXT:    ret
+  %a = mul i16 %y, %z
+  %tmp = call i16 @llvm.usub.sat.i16(i16 %x, i16 %a)
+  ret i16 %tmp
+}
+
+define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
+; RV32I-LABEL: func8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    andi a0, a0, 255
+; RV32I-NEXT:    mul a1, a1, a2
+; RV32I-NEXT:    andi a1, a1, 255
+; RV32I-NEXT:    sub a1, a0, a1
+; RV32I-NEXT:    sltu a2, a0, a1
+; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    bnez a2, .LBB3_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    mv a0, a1
+; RV32I-NEXT:  .LBB3_2:
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: func8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 255
+; RV64I-NEXT:    mul a1, a1, a2
+; RV64I-NEXT:    andi a1, a1, 255
+; RV64I-NEXT:    sub a1, a0, a1
+; RV64I-NEXT:    sltu a2, a0, a1
+; RV64I-NEXT:    mv a0, zero
+; RV64I-NEXT:    bnez a2, .LBB3_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    mv a0, a1
+; RV64I-NEXT:  .LBB3_2:
+; RV64I-NEXT:    ret
+  %a = mul i8 %y, %z
+  %tmp = call i8 @llvm.usub.sat.i8(i8 %x, i8 %a)
+  ret i8 %tmp
+}
+
+define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
+; RV32I-LABEL: func4:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    andi a0, a0, 15
+; RV32I-NEXT:    mul a1, a1, a2
+; RV32I-NEXT:    andi a1, a1, 15
+; RV32I-NEXT:    sub a1, a0, a1
+; RV32I-NEXT:    sltu a2, a0, a1
+; RV32I-NEXT:    mv a0, zero
+; RV32I-NEXT:    bnez a2, .LBB4_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    mv a0, a1
+; RV32I-NEXT:  .LBB4_2:
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: func4:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 15
+; RV64I-NEXT:    mul a1, a1, a2
+; RV64I-NEXT:    andi a1, a1, 15
+; RV64I-NEXT:    sub a1, a0, a1
+; RV64I-NEXT:    sltu a2, a0, a1
+; RV64I-NEXT:    mv a0, zero
+; RV64I-NEXT:    bnez a2, .LBB4_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    mv a0, a1
+; RV64I-NEXT:  .LBB4_2:
+; RV64I-NEXT:    ret
+  %a = mul i4 %y, %z
+  %tmp = call i4 @llvm.usub.sat.i4(i4 %x, i4 %a)
+  ret i4 %tmp
+}


        


More information about the llvm-commits mailing list