[llvm] [RISCV] Improve instruction selection for most significant bit extraction (PR #151687)

Piotr Fusik via llvm-commits llvm-commits at lists.llvm.org
Fri Aug 1 03:56:10 PDT 2025


https://github.com/pfusik created https://github.com/llvm/llvm-project/pull/151687

    (seteq (and X, 1<<XLEN-1), 0) -> (xori (srli X, XLEN-1), 1)
    (seteq (and X, 1<<31), 0) -> (xori (srliw X, 31), 1) // RV64
    (setlt X, 0) -> (srli X, XLEN-1) // SRLI is compressible
    (setlt (sext X), 0) -> (srliw X, 31) // RV64

>From 9d7dd77d8c39be3b34124b0ab66ecf6ec05e60cf Mon Sep 17 00:00:00 2001
From: Piotr Fusik <p.fusik at samsung.com>
Date: Fri, 1 Aug 2025 12:00:56 +0200
Subject: [PATCH 1/2] [RISCV][test] Add tests for most significant bit
 extraction

---
 llvm/test/CodeGen/RISCV/bittest.ll | 116 +++++++++++++++++++++++++++++
 1 file changed, 116 insertions(+)

diff --git a/llvm/test/CodeGen/RISCV/bittest.ll b/llvm/test/CodeGen/RISCV/bittest.ll
index fa6892be44d97..e578d89669324 100644
--- a/llvm/test/CodeGen/RISCV/bittest.ll
+++ b/llvm/test/CodeGen/RISCV/bittest.ll
@@ -3507,3 +3507,119 @@ define void @bit_64_1_nz_branch_i64(i64 %0) {
 5:
   ret void
 }
+
+define i32 @bittest_31_andeq0_i64(i64 %x) {
+; RV32I-LABEL: bittest_31_andeq0_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lui a1, 524288
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    seqz a0, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: bittest_31_andeq0_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srliw a0, a0, 31
+; RV64I-NEXT:    slli a0, a0, 31
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    ret
+;
+; ZBS-LABEL: bittest_31_andeq0_i64:
+; ZBS:       # %bb.0:
+; ZBS-NEXT:    not a0, a0
+; ZBS-NEXT:    bexti a0, a0, 31
+; ZBS-NEXT:    ret
+;
+; XTHEADBS-LABEL: bittest_31_andeq0_i64:
+; XTHEADBS:       # %bb.0:
+; XTHEADBS-NEXT:    not a0, a0
+; XTHEADBS-NEXT:    th.tst a0, a0, 31
+; XTHEADBS-NEXT:    ret
+  %and = and i64 %x, 2147483648
+  %cmp = icmp eq i64 %and, 0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+define i32 @bittest_63_andeq0_i64(i64 %x) {
+; RV32I-LABEL: bittest_63_andeq0_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lui a0, 524288
+; RV32I-NEXT:    and a0, a1, a0
+; RV32I-NEXT:    seqz a0, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: bittest_63_andeq0_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srli a0, a0, 63
+; RV64I-NEXT:    slli a0, a0, 63
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    ret
+;
+; RV32ZBS-LABEL: bittest_63_andeq0_i64:
+; RV32ZBS:       # %bb.0:
+; RV32ZBS-NEXT:    not a0, a1
+; RV32ZBS-NEXT:    bexti a0, a0, 31
+; RV32ZBS-NEXT:    ret
+;
+; RV64ZBS-LABEL: bittest_63_andeq0_i64:
+; RV64ZBS:       # %bb.0:
+; RV64ZBS-NEXT:    not a0, a0
+; RV64ZBS-NEXT:    bexti a0, a0, 63
+; RV64ZBS-NEXT:    ret
+;
+; RV32XTHEADBS-LABEL: bittest_63_andeq0_i64:
+; RV32XTHEADBS:       # %bb.0:
+; RV32XTHEADBS-NEXT:    not a0, a1
+; RV32XTHEADBS-NEXT:    th.tst a0, a0, 31
+; RV32XTHEADBS-NEXT:    ret
+;
+; RV64XTHEADBS-LABEL: bittest_63_andeq0_i64:
+; RV64XTHEADBS:       # %bb.0:
+; RV64XTHEADBS-NEXT:    not a0, a0
+; RV64XTHEADBS-NEXT:    th.tst a0, a0, 63
+; RV64XTHEADBS-NEXT:    ret
+  %and = and i64 %x, 9223372036854775808
+  %cmp = icmp eq i64 %and, 0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+define i32 @bittest_31_slt0_i32(i32 %x, i1 %y) {
+; RV32-LABEL: bittest_31_slt0_i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    slti a0, a0, 0
+; RV32-NEXT:    and a0, a0, a1
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: bittest_31_slt0_i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    sext.w a0, a0
+; RV64-NEXT:    slti a0, a0, 0
+; RV64-NEXT:    and a0, a0, a1
+; RV64-NEXT:    ret
+  %cmp = icmp slt i32 %x, 0
+  %and = and i1 %cmp, %y
+  %ext = zext i1 %and to i32
+  ret i32 %ext
+}
+
+define i32 @bittest_63_slt0_i64(i32 %x, i1 %y) {
+; RV32-LABEL: bittest_63_slt0_i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    srai a0, a0, 31
+; RV32-NEXT:    slti a0, a0, 0
+; RV32-NEXT:    and a0, a0, a1
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: bittest_63_slt0_i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    sext.w a0, a0
+; RV64-NEXT:    slti a0, a0, 0
+; RV64-NEXT:    and a0, a0, a1
+; RV64-NEXT:    ret
+  %ext = sext i32 %x to i64
+  %cmp = icmp slt i64 %ext, 0
+  %and = and i1 %cmp, %y
+  %cond = zext i1 %and to i32
+  ret i32 %cond
+}

>From f719f1d2c163b4fd6f814eef7319d2f324a53c61 Mon Sep 17 00:00:00 2001
From: Piotr Fusik <p.fusik at samsung.com>
Date: Fri, 1 Aug 2025 12:05:31 +0200
Subject: [PATCH 2/2] [RISCV] Improve instruction selection for most
 significant bit extraction

    (seteq (and X, 1<<XLEN-1), 0) -> (xori (srli X, XLEN-1), 1)
    (seteq (and X, 1<<31), 0) -> (xori (srliw X, 31), 1) // RV64
    (setlt X, 0) -> (srli X, XLEN-1) // SRLI is compressible
    (setlt (sext X), 0) -> (srliw X, 31) // RV64
---
 llvm/lib/Target/RISCV/RISCVInstrInfo.td       |  14 ++
 .../CodeGen/RISCV/GlobalISel/double-fcmp.ll   |  10 +-
 .../CodeGen/RISCV/GlobalISel/float-fcmp.ll    |  10 +-
 llvm/test/CodeGen/RISCV/alu64.ll              |   2 +-
 .../test/CodeGen/RISCV/arith-with-overflow.ll |   2 +-
 llvm/test/CodeGen/RISCV/bittest.ll            |  98 ++++--------
 llvm/test/CodeGen/RISCV/condbinops.ll         |   6 +-
 llvm/test/CodeGen/RISCV/double-convert.ll     |  38 ++---
 llvm/test/CodeGen/RISCV/double-fcmp-strict.ll |  32 ++--
 llvm/test/CodeGen/RISCV/double-fcmp.ll        |  16 +-
 llvm/test/CodeGen/RISCV/float-convert.ll      |  34 ++--
 llvm/test/CodeGen/RISCV/float-fcmp-strict.ll  |  32 ++--
 llvm/test/CodeGen/RISCV/float-fcmp.ll         |  16 +-
 llvm/test/CodeGen/RISCV/float-intrinsics.ll   |  18 +--
 .../test/CodeGen/RISCV/fold-addi-loadstore.ll |   4 +-
 llvm/test/CodeGen/RISCV/forced-atomics.ll     |   2 +-
 llvm/test/CodeGen/RISCV/fpclamptosat.ll       | 126 +++++++--------
 llvm/test/CodeGen/RISCV/half-convert.ll       |  46 +++---
 llvm/test/CodeGen/RISCV/memcmp-optsize.ll     |  20 +--
 llvm/test/CodeGen/RISCV/memcmp.ll             |  60 +++----
 llvm/test/CodeGen/RISCV/min-max.ll            |   4 +-
 llvm/test/CodeGen/RISCV/pr84653_pr85190.ll    |   2 +-
 llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll       |   2 +-
 llvm/test/CodeGen/RISCV/rv32zbs.ll            |  26 +--
 .../test/CodeGen/RISCV/rv64-double-convert.ll |   4 +-
 llvm/test/CodeGen/RISCV/rv64-float-convert.ll |   4 +-
 llvm/test/CodeGen/RISCV/rv64-half-convert.ll  |  12 +-
 .../CodeGen/RISCV/rvv/fpclamptosat_vec.ll     | 150 +++++++++---------
 .../RISCV/rvv/vxrm-insert-out-of-loop.ll      |  16 +-
 llvm/test/CodeGen/RISCV/sadd_sat.ll           |   4 +-
 llvm/test/CodeGen/RISCV/sadd_sat_plus.ll      |   4 +-
 .../CodeGen/RISCV/select-binop-identity.ll    |  12 +-
 llvm/test/CodeGen/RISCV/select-cc.ll          |   8 +-
 .../test/CodeGen/RISCV/select-constant-xor.ll |   4 +-
 .../CodeGen/RISCV/selectcc-to-shiftand.ll     |  25 +--
 llvm/test/CodeGen/RISCV/stack-folding.ll      |   4 +-
 llvm/test/CodeGen/RISCV/xaluo.ll              | 112 ++++++-------
 llvm/test/CodeGen/RISCV/xqcia.ll              |   2 +-
 llvm/test/CodeGen/RISCV/zbb-cmp-combine.ll    |   4 +-
 39 files changed, 479 insertions(+), 506 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index 653607827282e..d0c9d32af2786 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -1691,6 +1691,20 @@ multiclass SelectCC_GPR_riirr<DAGOperand valty, DAGOperand imm> {
                 valty:$truev, valty:$falsev), []>;
 }
 
+let Predicates = [IsRV32] in {
+def : Pat<(i32 (seteq (i32 (and GPR:$rs1, 0xffffffff80000000)), 0)),
+          (XORI (i32 (SRLI GPR:$rs1, 31)), 1)>;
+def : Pat<(i32 (setlt (i32 GPR:$rs1), 0)), (SRLI GPR:$rs1, 31)>; // compressible
+}
+let Predicates = [IsRV64] in {
+def : Pat<(i64 (seteq (i64 (and GPR:$rs1, 0x8000000000000000)), 0)),
+          (XORI (i64 (SRLI GPR:$rs1, 63)), 1)>;
+def : Pat<(i64 (seteq (i64 (and GPR:$rs1, 0x0000000080000000)), 0)),
+          (XORI (i64 (SRLIW GPR:$rs1, 31)), 1)>;
+def : Pat<(i64 (setlt (i64 GPR:$rs1), 0)), (SRLI GPR:$rs1, 63)>; // compressible
+def : Pat<(i64 (setlt (sext_inreg GPR:$rs1, i32), 0)), (SRLIW GPR:$rs1, 31)>;
+}
+
 /// Branches and jumps
 
 // Match `riscv_brcc` and lower to the appropriate RISC-V branch instruction.
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/double-fcmp.ll b/llvm/test/CodeGen/RISCV/GlobalISel/double-fcmp.ll
index dfa76a2e1531b..9ec8c32e989b0 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/double-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/double-fcmp.ll
@@ -138,7 +138,7 @@ define i32 @fcmp_olt(double %a, double %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __ltdf2
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -148,8 +148,7 @@ define i32 @fcmp_olt(double %a, double %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __ltdf2
-; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    srliw a0, a0, 31
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -446,7 +445,7 @@ define i32 @fcmp_ult(double %a, double %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __gedf2
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -456,8 +455,7 @@ define i32 @fcmp_ult(double %a, double %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __gedf2
-; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    srliw a0, a0, 31
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/float-fcmp.ll b/llvm/test/CodeGen/RISCV/GlobalISel/float-fcmp.ll
index 475b67bda9ae9..380751c907c0d 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/float-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/float-fcmp.ll
@@ -138,7 +138,7 @@ define i32 @fcmp_olt(float %a, float %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __ltsf2
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -148,8 +148,7 @@ define i32 @fcmp_olt(float %a, float %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __ltsf2
-; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    srliw a0, a0, 31
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -431,7 +430,7 @@ define i32 @fcmp_ult(float %a, float %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __gesf2
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -441,8 +440,7 @@ define i32 @fcmp_ult(float %a, float %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __gesf2
-; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    srliw a0, a0, 31
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/alu64.ll b/llvm/test/CodeGen/RISCV/alu64.ll
index f032756e007b6..c7938a718de70 100644
--- a/llvm/test/CodeGen/RISCV/alu64.ll
+++ b/llvm/test/CodeGen/RISCV/alu64.ll
@@ -37,7 +37,7 @@ define i64 @slti(i64 %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    beqz a1, .LBB1_2
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    slti a0, a1, 0
+; RV32I-NEXT:    srli a0, a1, 31
 ; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
 ; RV32I-NEXT:  .LBB1_2:
diff --git a/llvm/test/CodeGen/RISCV/arith-with-overflow.ll b/llvm/test/CodeGen/RISCV/arith-with-overflow.ll
index 4efc224ab1ca7..551d8864033f3 100644
--- a/llvm/test/CodeGen/RISCV/arith-with-overflow.ll
+++ b/llvm/test/CodeGen/RISCV/arith-with-overflow.ll
@@ -12,7 +12,7 @@ define i1 @sadd(i32 %a, i32 %b, ptr %c) nounwind {
 ; RV32I:       # %bb.0: # %entry
 ; RV32I-NEXT:    add a3, a0, a1
 ; RV32I-NEXT:    slt a0, a3, a0
-; RV32I-NEXT:    slti a1, a1, 0
+; RV32I-NEXT:    srli a1, a1, 31
 ; RV32I-NEXT:    xor a0, a1, a0
 ; RV32I-NEXT:    sw a3, 0(a2)
 ; RV32I-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/bittest.ll b/llvm/test/CodeGen/RISCV/bittest.ll
index e578d89669324..95c577f833a37 100644
--- a/llvm/test/CodeGen/RISCV/bittest.ll
+++ b/llvm/test/CodeGen/RISCV/bittest.ll
@@ -187,14 +187,14 @@ define i64 @bittest_31_i64(i64 %a) nounwind {
 ;
 ; RV64ZBS-LABEL: bittest_31_i64:
 ; RV64ZBS:       # %bb.0:
-; RV64ZBS-NEXT:    not a0, a0
-; RV64ZBS-NEXT:    bexti a0, a0, 31
+; RV64ZBS-NEXT:    srliw a0, a0, 31
+; RV64ZBS-NEXT:    xori a0, a0, 1
 ; RV64ZBS-NEXT:    ret
 ;
 ; RV64XTHEADBS-LABEL: bittest_31_i64:
 ; RV64XTHEADBS:       # %bb.0:
-; RV64XTHEADBS-NEXT:    not a0, a0
-; RV64XTHEADBS-NEXT:    th.tst a0, a0, 31
+; RV64XTHEADBS-NEXT:    srliw a0, a0, 31
+; RV64XTHEADBS-NEXT:    xori a0, a0, 1
 ; RV64XTHEADBS-NEXT:    ret
   %shr = lshr i64 %a, 31
   %not = xor i64 %shr, -1
@@ -3509,31 +3509,17 @@ define void @bit_64_1_nz_branch_i64(i64 %0) {
 }
 
 define i32 @bittest_31_andeq0_i64(i64 %x) {
-; RV32I-LABEL: bittest_31_andeq0_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 524288
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    seqz a0, a0
-; RV32I-NEXT:    ret
-;
-; RV64I-LABEL: bittest_31_andeq0_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    srliw a0, a0, 31
-; RV64I-NEXT:    slli a0, a0, 31
-; RV64I-NEXT:    seqz a0, a0
-; RV64I-NEXT:    ret
-;
-; ZBS-LABEL: bittest_31_andeq0_i64:
-; ZBS:       # %bb.0:
-; ZBS-NEXT:    not a0, a0
-; ZBS-NEXT:    bexti a0, a0, 31
-; ZBS-NEXT:    ret
+; RV32-LABEL: bittest_31_andeq0_i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    srli a0, a0, 31
+; RV32-NEXT:    xori a0, a0, 1
+; RV32-NEXT:    ret
 ;
-; XTHEADBS-LABEL: bittest_31_andeq0_i64:
-; XTHEADBS:       # %bb.0:
-; XTHEADBS-NEXT:    not a0, a0
-; XTHEADBS-NEXT:    th.tst a0, a0, 31
-; XTHEADBS-NEXT:    ret
+; RV64-LABEL: bittest_31_andeq0_i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    srliw a0, a0, 31
+; RV64-NEXT:    xori a0, a0, 1
+; RV64-NEXT:    ret
   %and = and i64 %x, 2147483648
   %cmp = icmp eq i64 %and, 0
   %conv = zext i1 %cmp to i32
@@ -3541,43 +3527,17 @@ define i32 @bittest_31_andeq0_i64(i64 %x) {
 }
 
 define i32 @bittest_63_andeq0_i64(i64 %x) {
-; RV32I-LABEL: bittest_63_andeq0_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a0, 524288
-; RV32I-NEXT:    and a0, a1, a0
-; RV32I-NEXT:    seqz a0, a0
-; RV32I-NEXT:    ret
-;
-; RV64I-LABEL: bittest_63_andeq0_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    srli a0, a0, 63
-; RV64I-NEXT:    slli a0, a0, 63
-; RV64I-NEXT:    seqz a0, a0
-; RV64I-NEXT:    ret
-;
-; RV32ZBS-LABEL: bittest_63_andeq0_i64:
-; RV32ZBS:       # %bb.0:
-; RV32ZBS-NEXT:    not a0, a1
-; RV32ZBS-NEXT:    bexti a0, a0, 31
-; RV32ZBS-NEXT:    ret
-;
-; RV64ZBS-LABEL: bittest_63_andeq0_i64:
-; RV64ZBS:       # %bb.0:
-; RV64ZBS-NEXT:    not a0, a0
-; RV64ZBS-NEXT:    bexti a0, a0, 63
-; RV64ZBS-NEXT:    ret
-;
-; RV32XTHEADBS-LABEL: bittest_63_andeq0_i64:
-; RV32XTHEADBS:       # %bb.0:
-; RV32XTHEADBS-NEXT:    not a0, a1
-; RV32XTHEADBS-NEXT:    th.tst a0, a0, 31
-; RV32XTHEADBS-NEXT:    ret
+; RV32-LABEL: bittest_63_andeq0_i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    srli a1, a1, 31
+; RV32-NEXT:    xori a0, a1, 1
+; RV32-NEXT:    ret
 ;
-; RV64XTHEADBS-LABEL: bittest_63_andeq0_i64:
-; RV64XTHEADBS:       # %bb.0:
-; RV64XTHEADBS-NEXT:    not a0, a0
-; RV64XTHEADBS-NEXT:    th.tst a0, a0, 63
-; RV64XTHEADBS-NEXT:    ret
+; RV64-LABEL: bittest_63_andeq0_i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    srli a0, a0, 63
+; RV64-NEXT:    xori a0, a0, 1
+; RV64-NEXT:    ret
   %and = and i64 %x, 9223372036854775808
   %cmp = icmp eq i64 %and, 0
   %conv = zext i1 %cmp to i32
@@ -3587,14 +3547,13 @@ define i32 @bittest_63_andeq0_i64(i64 %x) {
 define i32 @bittest_31_slt0_i32(i32 %x, i1 %y) {
 ; RV32-LABEL: bittest_31_slt0_i32:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    slti a0, a0, 0
+; RV32-NEXT:    srli a0, a0, 31
 ; RV32-NEXT:    and a0, a0, a1
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: bittest_31_slt0_i32:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    sext.w a0, a0
-; RV64-NEXT:    slti a0, a0, 0
+; RV64-NEXT:    srliw a0, a0, 31
 ; RV64-NEXT:    and a0, a0, a1
 ; RV64-NEXT:    ret
   %cmp = icmp slt i32 %x, 0
@@ -3607,14 +3566,13 @@ define i32 @bittest_63_slt0_i64(i32 %x, i1 %y) {
 ; RV32-LABEL: bittest_63_slt0_i64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    srai a0, a0, 31
-; RV32-NEXT:    slti a0, a0, 0
+; RV32-NEXT:    srli a0, a0, 31
 ; RV32-NEXT:    and a0, a0, a1
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: bittest_63_slt0_i64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    sext.w a0, a0
-; RV64-NEXT:    slti a0, a0, 0
+; RV64-NEXT:    srliw a0, a0, 31
 ; RV64-NEXT:    and a0, a0, a1
 ; RV64-NEXT:    ret
   %ext = sext i32 %x to i64
diff --git a/llvm/test/CodeGen/RISCV/condbinops.ll b/llvm/test/CodeGen/RISCV/condbinops.ll
index dc81c13bfb6a3..91052bce9704c 100644
--- a/llvm/test/CodeGen/RISCV/condbinops.ll
+++ b/llvm/test/CodeGen/RISCV/condbinops.ll
@@ -459,7 +459,7 @@ define i64 @shl64(i64 %x, i64 %y, i1 %c) {
 ; RV32ZICOND-NEXT:    addi a4, a2, -32
 ; RV32ZICOND-NEXT:    sll a1, a1, a2
 ; RV32ZICOND-NEXT:    not a2, a2
-; RV32ZICOND-NEXT:    slti a4, a4, 0
+; RV32ZICOND-NEXT:    srli a4, a4, 31
 ; RV32ZICOND-NEXT:    srl a2, a3, a2
 ; RV32ZICOND-NEXT:    czero.nez a3, a0, a4
 ; RV32ZICOND-NEXT:    or a1, a1, a2
@@ -534,7 +534,7 @@ define i64 @ashr64(i64 %x, i64 %y, i1 %c) {
 ; RV32ZICOND-NEXT:    addi a4, a2, -32
 ; RV32ZICOND-NEXT:    srl a0, a0, a2
 ; RV32ZICOND-NEXT:    not a2, a2
-; RV32ZICOND-NEXT:    slti a4, a4, 0
+; RV32ZICOND-NEXT:    srli a4, a4, 31
 ; RV32ZICOND-NEXT:    sll a2, a3, a2
 ; RV32ZICOND-NEXT:    czero.nez a3, a1, a4
 ; RV32ZICOND-NEXT:    or a0, a0, a2
@@ -610,7 +610,7 @@ define i64 @lshr64(i64 %x, i64 %y, i1 %c) {
 ; RV32ZICOND-NEXT:    addi a4, a2, -32
 ; RV32ZICOND-NEXT:    srl a0, a0, a2
 ; RV32ZICOND-NEXT:    not a2, a2
-; RV32ZICOND-NEXT:    slti a4, a4, 0
+; RV32ZICOND-NEXT:    srli a4, a4, 31
 ; RV32ZICOND-NEXT:    sll a2, a3, a2
 ; RV32ZICOND-NEXT:    czero.nez a3, a1, a4
 ; RV32ZICOND-NEXT:    or a0, a0, a2
diff --git a/llvm/test/CodeGen/RISCV/double-convert.ll b/llvm/test/CodeGen/RISCV/double-convert.ll
index a2e6186e051bf..9c81bc2851347 100644
--- a/llvm/test/CodeGen/RISCV/double-convert.ll
+++ b/llvm/test/CodeGen/RISCV/double-convert.ll
@@ -405,7 +405,7 @@ define i32 @fcvt_wu_d_sat(double %a) nounwind {
 ; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __gedf2
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    addi s3, a0, -1
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
@@ -446,8 +446,8 @@ define i32 @fcvt_wu_d_sat(double %a) nounwind {
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    j .LBB6_3
 ; RV64I-NEXT:  .LBB6_2:
-; RV64I-NEXT:    slti a0, s0, 0
-; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    srli s0, s0, 63
+; RV64I-NEXT:    addi a0, s0, -1
 ; RV64I-NEXT:    and a0, a0, s1
 ; RV64I-NEXT:  .LBB6_3: # %start
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -819,7 +819,7 @@ define i64 @fcvt_l_d_sat(double %a) nounwind {
 ; RV32I-NEXT:    mv a3, s0
 ; RV32I-NEXT:    call __unorddf2
 ; RV32I-NEXT:    snez a0, a0
-; RV32I-NEXT:    slti a1, s4, 0
+; RV32I-NEXT:    srli a1, s4, 31
 ; RV32I-NEXT:    sgtz a2, s2
 ; RV32I-NEXT:    addi a0, a0, -1
 ; RV32I-NEXT:    addi a3, a1, -1
@@ -1029,7 +1029,7 @@ define i64 @fcvt_lu_d_sat(double %a) nounwind {
 ; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __gedf2
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    addi s3, a0, -1
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
@@ -1055,7 +1055,7 @@ define i64 @fcvt_lu_d_sat(double %a) nounwind {
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __gedf2
-; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    srli a0, a0, 63
 ; RV64I-NEXT:    addi s1, a0, -1
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __fixunsdfdi
@@ -1898,9 +1898,9 @@ define zeroext i16 @fcvt_wu_s_sat_i16(double %a) nounwind {
 ; RV32I-NEXT:    mv a0, a1
 ; RV32I-NEXT:    j .LBB28_3
 ; RV32I-NEXT:  .LBB28_2:
-; RV32I-NEXT:    slti a2, s0, 0
-; RV32I-NEXT:    addi a2, a2, -1
-; RV32I-NEXT:    and a0, a2, a0
+; RV32I-NEXT:    srli s0, s0, 31
+; RV32I-NEXT:    addi s0, s0, -1
+; RV32I-NEXT:    and a0, s0, a0
 ; RV32I-NEXT:  .LBB28_3: # %start
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -1937,8 +1937,8 @@ define zeroext i16 @fcvt_wu_s_sat_i16(double %a) nounwind {
 ; RV64I-NEXT:    mv a0, a1
 ; RV64I-NEXT:    j .LBB28_3
 ; RV64I-NEXT:  .LBB28_2:
-; RV64I-NEXT:    slti a0, s0, 0
-; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    srli s0, s0, 63
+; RV64I-NEXT:    addi a0, s0, -1
 ; RV64I-NEXT:    and a0, a0, s1
 ; RV64I-NEXT:  .LBB28_3: # %start
 ; RV64I-NEXT:    and a0, a0, a1
@@ -2271,9 +2271,9 @@ define zeroext i8 @fcvt_wu_s_sat_i8(double %a) nounwind {
 ; RV32I-NEXT:    li a0, 255
 ; RV32I-NEXT:    j .LBB32_3
 ; RV32I-NEXT:  .LBB32_2:
-; RV32I-NEXT:    slti a1, s0, 0
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a1, a0
+; RV32I-NEXT:    srli s0, s0, 31
+; RV32I-NEXT:    addi s0, s0, -1
+; RV32I-NEXT:    and a0, s0, a0
 ; RV32I-NEXT:  .LBB32_3: # %start
 ; RV32I-NEXT:    zext.b a0, a0
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
@@ -2307,8 +2307,8 @@ define zeroext i8 @fcvt_wu_s_sat_i8(double %a) nounwind {
 ; RV64I-NEXT:    li a0, 255
 ; RV64I-NEXT:    j .LBB32_3
 ; RV64I-NEXT:  .LBB32_2:
-; RV64I-NEXT:    slti a0, s0, 0
-; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    srli s0, s0, 63
+; RV64I-NEXT:    addi a0, s0, -1
 ; RV64I-NEXT:    and a0, a0, s1
 ; RV64I-NEXT:  .LBB32_3: # %start
 ; RV64I-NEXT:    zext.b a0, a0
@@ -2386,7 +2386,7 @@ define zeroext i32 @fcvt_wu_d_sat_zext(double %a) nounwind {
 ; RV32I-NEXT:    li a2, 0
 ; RV32I-NEXT:    li a3, 0
 ; RV32I-NEXT:    call __gedf2
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    addi s3, a0, -1
 ; RV32I-NEXT:    mv a0, s1
 ; RV32I-NEXT:    mv a1, s0
@@ -2427,8 +2427,8 @@ define zeroext i32 @fcvt_wu_d_sat_zext(double %a) nounwind {
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    j .LBB33_3
 ; RV64I-NEXT:  .LBB33_2:
-; RV64I-NEXT:    slti a0, s0, 0
-; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    srli s0, s0, 63
+; RV64I-NEXT:    addi a0, s0, -1
 ; RV64I-NEXT:    and a0, a0, s1
 ; RV64I-NEXT:  .LBB33_3: # %start
 ; RV64I-NEXT:    slli a0, a0, 32
diff --git a/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll b/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll
index 7c5332f719867..b1c63af3e7e07 100644
--- a/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll
+++ b/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll
@@ -140,7 +140,7 @@ define i32 @fcmp_oge(double %a, double %b) nounwind strictfp {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __gedf2
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -151,7 +151,7 @@ define i32 @fcmp_oge(double %a, double %b) nounwind strictfp {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __gedf2
-; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    srli a0, a0, 63
 ; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -193,7 +193,7 @@ define i32 @fcmp_olt(double %a, double %b) nounwind strictfp {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __ltdf2
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -203,7 +203,7 @@ define i32 @fcmp_olt(double %a, double %b) nounwind strictfp {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __ltdf2
-; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    srli a0, a0, 63
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -605,7 +605,7 @@ define i32 @fcmp_uge(double %a, double %b) nounwind strictfp {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __ltdf2
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -616,7 +616,7 @@ define i32 @fcmp_uge(double %a, double %b) nounwind strictfp {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __ltdf2
-; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    srli a0, a0, 63
 ; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -661,7 +661,7 @@ define i32 @fcmp_ult(double %a, double %b) nounwind strictfp {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __gedf2
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -671,7 +671,7 @@ define i32 @fcmp_ult(double %a, double %b) nounwind strictfp {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __gedf2
-; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    srli a0, a0, 63
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -934,7 +934,7 @@ define i32 @fcmps_oge(double %a, double %b) nounwind strictfp {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __gedf2
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -945,7 +945,7 @@ define i32 @fcmps_oge(double %a, double %b) nounwind strictfp {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __gedf2
-; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    srli a0, a0, 63
 ; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -976,7 +976,7 @@ define i32 @fcmps_olt(double %a, double %b) nounwind strictfp {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __ltdf2
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -986,7 +986,7 @@ define i32 @fcmps_olt(double %a, double %b) nounwind strictfp {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __ltdf2
-; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    srli a0, a0, 63
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1311,7 +1311,7 @@ define i32 @fcmps_uge(double %a, double %b) nounwind strictfp {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __ltdf2
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1322,7 +1322,7 @@ define i32 @fcmps_uge(double %a, double %b) nounwind strictfp {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __ltdf2
-; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    srli a0, a0, 63
 ; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1356,7 +1356,7 @@ define i32 @fcmps_ult(double %a, double %b) nounwind strictfp {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __gedf2
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1366,7 +1366,7 @@ define i32 @fcmps_ult(double %a, double %b) nounwind strictfp {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __gedf2
-; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    srli a0, a0, 63
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/double-fcmp.ll b/llvm/test/CodeGen/RISCV/double-fcmp.ll
index f73e6865cf47d..31c8589177439 100644
--- a/llvm/test/CodeGen/RISCV/double-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/double-fcmp.ll
@@ -138,7 +138,7 @@ define i32 @fcmp_oge(double %a, double %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __gedf2
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -149,7 +149,7 @@ define i32 @fcmp_oge(double %a, double %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __gedf2
-; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    srli a0, a0, 63
 ; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -180,7 +180,7 @@ define i32 @fcmp_olt(double %a, double %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __ltdf2
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -190,7 +190,7 @@ define i32 @fcmp_olt(double %a, double %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __ltdf2
-; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    srli a0, a0, 63
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -515,7 +515,7 @@ define i32 @fcmp_uge(double %a, double %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __ltdf2
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -526,7 +526,7 @@ define i32 @fcmp_uge(double %a, double %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __ltdf2
-; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    srli a0, a0, 63
 ; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -560,7 +560,7 @@ define i32 @fcmp_ult(double %a, double %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __gedf2
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -570,7 +570,7 @@ define i32 @fcmp_ult(double %a, double %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __gedf2
-; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    srli a0, a0, 63
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/float-convert.ll b/llvm/test/CodeGen/RISCV/float-convert.ll
index 60349a0e39953..6e49d479cf0b9 100644
--- a/llvm/test/CodeGen/RISCV/float-convert.ll
+++ b/llvm/test/CodeGen/RISCV/float-convert.ll
@@ -278,7 +278,7 @@ define i32 @fcvt_wu_s_sat(float %a) nounwind {
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __gesf2
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    addi s1, a0, -1
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __fixunssfsi
@@ -320,8 +320,8 @@ define i32 @fcvt_wu_s_sat(float %a) nounwind {
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    j .LBB4_3
 ; RV64I-NEXT:  .LBB4_2:
-; RV64I-NEXT:    slti a0, s0, 0
-; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    srli s0, s0, 63
+; RV64I-NEXT:    addi a0, s0, -1
 ; RV64I-NEXT:    and a0, a0, s1
 ; RV64I-NEXT:  .LBB4_3: # %start
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -736,7 +736,7 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
 ; RV32I-NEXT:    mv a1, s1
 ; RV32I-NEXT:    call __unordsf2
 ; RV32I-NEXT:    snez a0, a0
-; RV32I-NEXT:    slti a1, s2, 0
+; RV32I-NEXT:    srli a1, s2, 31
 ; RV32I-NEXT:    sgtz a2, s4
 ; RV32I-NEXT:    addi a0, a0, -1
 ; RV32I-NEXT:    addi a3, a1, -1
@@ -932,7 +932,7 @@ define i64 @fcvt_lu_s_sat(float %a) nounwind {
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __gesf2
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    addi s2, a0, -1
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __fixunssfdi
@@ -971,7 +971,7 @@ define i64 @fcvt_lu_s_sat(float %a) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __gesf2
-; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    srli a0, a0, 63
 ; RV64I-NEXT:    addi s2, a0, -1
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __fixunssfdi
@@ -1651,8 +1651,8 @@ define zeroext i16 @fcvt_wu_s_sat_i16(float %a) nounwind {
 ; RV32I-NEXT:    mv a0, a1
 ; RV32I-NEXT:    j .LBB26_3
 ; RV32I-NEXT:  .LBB26_2:
-; RV32I-NEXT:    slti a0, s0, 0
-; RV32I-NEXT:    addi a0, a0, -1
+; RV32I-NEXT:    srli s0, s0, 31
+; RV32I-NEXT:    addi a0, s0, -1
 ; RV32I-NEXT:    and a0, a0, s1
 ; RV32I-NEXT:  .LBB26_3: # %start
 ; RV32I-NEXT:    and a0, a0, a1
@@ -1688,8 +1688,8 @@ define zeroext i16 @fcvt_wu_s_sat_i16(float %a) nounwind {
 ; RV64I-NEXT:    mv a0, a1
 ; RV64I-NEXT:    j .LBB26_3
 ; RV64I-NEXT:  .LBB26_2:
-; RV64I-NEXT:    slti a0, s0, 0
-; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    srli s0, s0, 63
+; RV64I-NEXT:    addi a0, s0, -1
 ; RV64I-NEXT:    and a0, a0, s1
 ; RV64I-NEXT:  .LBB26_3: # %start
 ; RV64I-NEXT:    and a0, a0, a1
@@ -1986,8 +1986,8 @@ define zeroext i8 @fcvt_wu_s_sat_i8(float %a) nounwind {
 ; RV32I-NEXT:    li a0, 255
 ; RV32I-NEXT:    j .LBB30_3
 ; RV32I-NEXT:  .LBB30_2:
-; RV32I-NEXT:    slti a0, s0, 0
-; RV32I-NEXT:    addi a0, a0, -1
+; RV32I-NEXT:    srli s0, s0, 31
+; RV32I-NEXT:    addi a0, s0, -1
 ; RV32I-NEXT:    and a0, a0, s1
 ; RV32I-NEXT:  .LBB30_3: # %start
 ; RV32I-NEXT:    zext.b a0, a0
@@ -2020,8 +2020,8 @@ define zeroext i8 @fcvt_wu_s_sat_i8(float %a) nounwind {
 ; RV64I-NEXT:    li a0, 255
 ; RV64I-NEXT:    j .LBB30_3
 ; RV64I-NEXT:  .LBB30_2:
-; RV64I-NEXT:    slti a0, s0, 0
-; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    srli s0, s0, 63
+; RV64I-NEXT:    addi a0, s0, -1
 ; RV64I-NEXT:    and a0, a0, s1
 ; RV64I-NEXT:  .LBB30_3: # %start
 ; RV64I-NEXT:    zext.b a0, a0
@@ -2087,7 +2087,7 @@ define zeroext i32 @fcvt_wu_s_sat_zext(float %a) nounwind {
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __gesf2
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    addi s1, a0, -1
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __fixunssfsi
@@ -2129,8 +2129,8 @@ define zeroext i32 @fcvt_wu_s_sat_zext(float %a) nounwind {
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    j .LBB31_3
 ; RV64I-NEXT:  .LBB31_2:
-; RV64I-NEXT:    slti a0, s0, 0
-; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    srli s0, s0, 63
+; RV64I-NEXT:    addi a0, s0, -1
 ; RV64I-NEXT:    and a0, a0, s1
 ; RV64I-NEXT:  .LBB31_3: # %start
 ; RV64I-NEXT:    slli a0, a0, 32
diff --git a/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll b/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll
index fd3baa0575250..7cdd1826b4522 100644
--- a/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll
+++ b/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll
@@ -117,7 +117,7 @@ define i32 @fcmp_oge(float %a, float %b) nounwind strictfp {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __gesf2
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -128,7 +128,7 @@ define i32 @fcmp_oge(float %a, float %b) nounwind strictfp {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __gesf2
-; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    srli a0, a0, 63
 ; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -161,7 +161,7 @@ define i32 @fcmp_olt(float %a, float %b) nounwind strictfp {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __ltsf2
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -171,7 +171,7 @@ define i32 @fcmp_olt(float %a, float %b) nounwind strictfp {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __ltsf2
-; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    srli a0, a0, 63
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -492,7 +492,7 @@ define i32 @fcmp_uge(float %a, float %b) nounwind strictfp {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __ltsf2
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -503,7 +503,7 @@ define i32 @fcmp_uge(float %a, float %b) nounwind strictfp {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __ltsf2
-; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    srli a0, a0, 63
 ; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -538,7 +538,7 @@ define i32 @fcmp_ult(float %a, float %b) nounwind strictfp {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __gesf2
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -548,7 +548,7 @@ define i32 @fcmp_ult(float %a, float %b) nounwind strictfp {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __gesf2
-; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    srli a0, a0, 63
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -770,7 +770,7 @@ define i32 @fcmps_oge(float %a, float %b) nounwind strictfp {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __gesf2
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -781,7 +781,7 @@ define i32 @fcmps_oge(float %a, float %b) nounwind strictfp {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __gesf2
-; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    srli a0, a0, 63
 ; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -807,7 +807,7 @@ define i32 @fcmps_olt(float %a, float %b) nounwind strictfp {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __ltsf2
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -817,7 +817,7 @@ define i32 @fcmps_olt(float %a, float %b) nounwind strictfp {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __ltsf2
-; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    srli a0, a0, 63
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -1087,7 +1087,7 @@ define i32 @fcmps_uge(float %a, float %b) nounwind strictfp {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __ltsf2
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -1098,7 +1098,7 @@ define i32 @fcmps_uge(float %a, float %b) nounwind strictfp {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __ltsf2
-; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    srli a0, a0, 63
 ; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -1126,7 +1126,7 @@ define i32 @fcmps_ult(float %a, float %b) nounwind strictfp {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __gesf2
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1136,7 +1136,7 @@ define i32 @fcmps_ult(float %a, float %b) nounwind strictfp {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __gesf2
-; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    srli a0, a0, 63
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/float-fcmp.ll b/llvm/test/CodeGen/RISCV/float-fcmp.ll
index 2e9c39f331bbc..cec6580247627 100644
--- a/llvm/test/CodeGen/RISCV/float-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/float-fcmp.ll
@@ -123,7 +123,7 @@ define i32 @fcmp_oge(float %a, float %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __gesf2
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -134,7 +134,7 @@ define i32 @fcmp_oge(float %a, float %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __gesf2
-; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    srli a0, a0, 63
 ; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -160,7 +160,7 @@ define i32 @fcmp_olt(float %a, float %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __ltsf2
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -170,7 +170,7 @@ define i32 @fcmp_olt(float %a, float %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __ltsf2
-; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    srli a0, a0, 63
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
@@ -440,7 +440,7 @@ define i32 @fcmp_uge(float %a, float %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __ltsf2
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -451,7 +451,7 @@ define i32 @fcmp_uge(float %a, float %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __ltsf2
-; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    srli a0, a0, 63
 ; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -479,7 +479,7 @@ define i32 @fcmp_ult(float %a, float %b) nounwind {
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __gesf2
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -489,7 +489,7 @@ define i32 @fcmp_ult(float %a, float %b) nounwind {
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __gesf2
-; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    srli a0, a0, 63
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/float-intrinsics.ll b/llvm/test/CodeGen/RISCV/float-intrinsics.ll
index ed50042f54ab5..8b8a3257a0027 100644
--- a/llvm/test/CodeGen/RISCV/float-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/float-intrinsics.ll
@@ -1634,7 +1634,7 @@ define i1 @fpclass(float %x) {
 ; RV32I-NEXT:    add a4, a5, a4
 ; RV32I-NEXT:    addi a5, a5, -1
 ; RV32I-NEXT:    sltu a2, a5, a2
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    seqz a1, a1
 ; RV32I-NEXT:    seqz a5, a6
 ; RV32I-NEXT:    srli a4, a4, 24
@@ -1660,8 +1660,7 @@ define i1 @fpclass(float %x) {
 ; RV64I-NEXT:    add a4, a5, a4
 ; RV64I-NEXT:    addi a5, a5, -1
 ; RV64I-NEXT:    sltu a2, a5, a2
-; RV64I-NEXT:    sext.w a0, a0
-; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    srliw a0, a0, 31
 ; RV64I-NEXT:    seqz a1, a1
 ; RV64I-NEXT:    seqz a5, a6
 ; RV64I-NEXT:    srliw a4, a4, 24
@@ -2092,19 +2091,18 @@ define i1 @isnegfinite_fpclass(float %x) {
 ; RV32I-NEXT:    lui a2, 522240
 ; RV32I-NEXT:    srli a1, a1, 1
 ; RV32I-NEXT:    slt a1, a1, a2
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    and a0, a1, a0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: isnegfinite_fpclass:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sext.w a1, a0
-; RV64I-NEXT:    slli a0, a0, 33
+; RV64I-NEXT:    slli a1, a0, 33
 ; RV64I-NEXT:    lui a2, 522240
-; RV64I-NEXT:    srli a0, a0, 33
-; RV64I-NEXT:    slt a0, a0, a2
-; RV64I-NEXT:    slti a1, a1, 0
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    srli a1, a1, 33
+; RV64I-NEXT:    slt a1, a1, a2
+; RV64I-NEXT:    srliw a0, a0, 31
+; RV64I-NEXT:    and a0, a1, a0
 ; RV64I-NEXT:    ret
   %1 = call i1 @llvm.is.fpclass.f32(float %x, i32 56)  ; 0x38 = "-finite"
   ret i1 %1
diff --git a/llvm/test/CodeGen/RISCV/fold-addi-loadstore.ll b/llvm/test/CodeGen/RISCV/fold-addi-loadstore.ll
index 477a7d1ce7b6b..aa65ebecbe56a 100644
--- a/llvm/test/CodeGen/RISCV/fold-addi-loadstore.ll
+++ b/llvm/test/CodeGen/RISCV/fold-addi-loadstore.ll
@@ -909,7 +909,7 @@ define i64 @fold_addi_from_different_bb(i64 %k, i64 %n, ptr %a) nounwind {
 ; RV32I-NEXT:    mv s2, a2
 ; RV32I-NEXT:    beqz a3, .LBB20_3
 ; RV32I-NEXT:  # %bb.1: # %entry
-; RV32I-NEXT:    slti a1, s1, 0
+; RV32I-NEXT:    srli a1, s1, 31
 ; RV32I-NEXT:    beqz a1, .LBB20_4
 ; RV32I-NEXT:  .LBB20_2:
 ; RV32I-NEXT:    li s3, 0
@@ -974,7 +974,7 @@ define i64 @fold_addi_from_different_bb(i64 %k, i64 %n, ptr %a) nounwind {
 ; RV32I-MEDIUM-NEXT:    mv s2, a2
 ; RV32I-MEDIUM-NEXT:    beqz a3, .LBB20_3
 ; RV32I-MEDIUM-NEXT:  # %bb.1: # %entry
-; RV32I-MEDIUM-NEXT:    slti a1, s1, 0
+; RV32I-MEDIUM-NEXT:    srli a1, s1, 31
 ; RV32I-MEDIUM-NEXT:    beqz a1, .LBB20_4
 ; RV32I-MEDIUM-NEXT:  .LBB20_2:
 ; RV32I-MEDIUM-NEXT:    li s3, 0
diff --git a/llvm/test/CodeGen/RISCV/forced-atomics.ll b/llvm/test/CodeGen/RISCV/forced-atomics.ll
index e7719dc70660b..1a69106a485eb 100644
--- a/llvm/test/CodeGen/RISCV/forced-atomics.ll
+++ b/llvm/test/CodeGen/RISCV/forced-atomics.ll
@@ -3475,7 +3475,7 @@ define i64 @rmw64_min_seq_cst(ptr %p) nounwind {
 ; RV32-NEXT:    beqz a1, .LBB50_4
 ; RV32-NEXT:  # %bb.3: # %atomicrmw.start
 ; RV32-NEXT:    # in Loop: Header=BB50_2 Depth=1
-; RV32-NEXT:    slti a0, a1, 0
+; RV32-NEXT:    srli a0, a1, 31
 ; RV32-NEXT:    mv a2, a4
 ; RV32-NEXT:    bnez a0, .LBB50_1
 ; RV32-NEXT:    j .LBB50_5
diff --git a/llvm/test/CodeGen/RISCV/fpclamptosat.ll b/llvm/test/CodeGen/RISCV/fpclamptosat.ll
index 117e3e4aac45d..69566b1725548 100644
--- a/llvm/test/CodeGen/RISCV/fpclamptosat.ll
+++ b/llvm/test/CodeGen/RISCV/fpclamptosat.ll
@@ -22,7 +22,7 @@ define i32 @stest_f64i32(double %x) {
 ; RV32IF-NEXT:    addi a3, a2, -1
 ; RV32IF-NEXT:    beqz a1, .LBB0_2
 ; RV32IF-NEXT:  # %bb.1: # %entry
-; RV32IF-NEXT:    slti a4, a1, 0
+; RV32IF-NEXT:    srli a4, a1, 31
 ; RV32IF-NEXT:    j .LBB0_3
 ; RV32IF-NEXT:  .LBB0_2:
 ; RV32IF-NEXT:    sltu a4, a0, a3
@@ -36,7 +36,7 @@ define i32 @stest_f64i32(double %x) {
 ; RV32IF-NEXT:    li a3, -1
 ; RV32IF-NEXT:    beq a1, a3, .LBB0_7
 ; RV32IF-NEXT:  # %bb.6: # %entry
-; RV32IF-NEXT:    slti a1, a1, 0
+; RV32IF-NEXT:    srli a1, a1, 31
 ; RV32IF-NEXT:    xori a1, a1, 1
 ; RV32IF-NEXT:    beqz a1, .LBB0_8
 ; RV32IF-NEXT:    j .LBB0_9
@@ -185,7 +185,7 @@ define i32 @ustest_f64i32(double %x) {
 ; RV32IF-NEXT:    call __fixdfdi
 ; RV32IF-NEXT:    beqz a1, .LBB2_2
 ; RV32IF-NEXT:  # %bb.1: # %entry
-; RV32IF-NEXT:    slti a2, a1, 0
+; RV32IF-NEXT:    srli a2, a1, 31
 ; RV32IF-NEXT:    j .LBB2_3
 ; RV32IF-NEXT:  .LBB2_2:
 ; RV32IF-NEXT:    sltiu a2, a0, -1
@@ -373,7 +373,7 @@ define i32 @stest_f16i32(half %x) {
 ; RV32-NEXT:    addi a3, a2, -1
 ; RV32-NEXT:    beqz a1, .LBB6_2
 ; RV32-NEXT:  # %bb.1: # %entry
-; RV32-NEXT:    slti a4, a1, 0
+; RV32-NEXT:    srli a4, a1, 31
 ; RV32-NEXT:    j .LBB6_3
 ; RV32-NEXT:  .LBB6_2:
 ; RV32-NEXT:    sltu a4, a0, a3
@@ -387,7 +387,7 @@ define i32 @stest_f16i32(half %x) {
 ; RV32-NEXT:    li a3, -1
 ; RV32-NEXT:    beq a1, a3, .LBB6_7
 ; RV32-NEXT:  # %bb.6: # %entry
-; RV32-NEXT:    slti a1, a1, 0
+; RV32-NEXT:    srli a1, a1, 31
 ; RV32-NEXT:    xori a1, a1, 1
 ; RV32-NEXT:    beqz a1, .LBB6_8
 ; RV32-NEXT:    j .LBB6_9
@@ -494,7 +494,7 @@ define i32 @ustest_f16i32(half %x) {
 ; RV32-NEXT:    call __fixsfdi
 ; RV32-NEXT:    beqz a1, .LBB8_2
 ; RV32-NEXT:  # %bb.1: # %entry
-; RV32-NEXT:    slti a2, a1, 0
+; RV32-NEXT:    srli a2, a1, 31
 ; RV32-NEXT:    j .LBB8_3
 ; RV32-NEXT:  .LBB8_2:
 ; RV32-NEXT:    sltiu a2, a0, -1
@@ -1108,7 +1108,7 @@ define i64 @stest_f64i64(double %x) {
 ; RV32IF-NEXT:    or a7, a2, a4
 ; RV32IF-NEXT:    beqz a7, .LBB18_4
 ; RV32IF-NEXT:  .LBB18_3: # %entry
-; RV32IF-NEXT:    slti a6, a4, 0
+; RV32IF-NEXT:    srli a6, a4, 31
 ; RV32IF-NEXT:  .LBB18_4: # %entry
 ; RV32IF-NEXT:    addi a7, a6, -1
 ; RV32IF-NEXT:    neg t0, a6
@@ -1130,8 +1130,8 @@ define i64 @stest_f64i64(double %x) {
 ; RV32IF-NEXT:    li a5, -1
 ; RV32IF-NEXT:    beq a2, a5, .LBB18_11
 ; RV32IF-NEXT:  # %bb.10: # %entry
-; RV32IF-NEXT:    slti a0, a4, 0
-; RV32IF-NEXT:    xori a0, a0, 1
+; RV32IF-NEXT:    srli a4, a4, 31
+; RV32IF-NEXT:    xori a0, a4, 1
 ; RV32IF-NEXT:  .LBB18_11: # %entry
 ; RV32IF-NEXT:    bnez a0, .LBB18_13
 ; RV32IF-NEXT:  # %bb.12: # %entry
@@ -1156,7 +1156,7 @@ define i64 @stest_f64i64(double %x) {
 ; RV64IF-NEXT:    srli a3, a2, 1
 ; RV64IF-NEXT:    beqz a1, .LBB18_2
 ; RV64IF-NEXT:  # %bb.1: # %entry
-; RV64IF-NEXT:    slti a4, a1, 0
+; RV64IF-NEXT:    srli a4, a1, 63
 ; RV64IF-NEXT:    j .LBB18_3
 ; RV64IF-NEXT:  .LBB18_2:
 ; RV64IF-NEXT:    sltu a4, a0, a3
@@ -1170,8 +1170,8 @@ define i64 @stest_f64i64(double %x) {
 ; RV64IF-NEXT:    slli a1, a2, 63
 ; RV64IF-NEXT:    beq a5, a2, .LBB18_7
 ; RV64IF-NEXT:  # %bb.6: # %entry
-; RV64IF-NEXT:    slti a2, a5, 0
-; RV64IF-NEXT:    xori a2, a2, 1
+; RV64IF-NEXT:    srli a5, a5, 63
+; RV64IF-NEXT:    xori a2, a5, 1
 ; RV64IF-NEXT:    beqz a2, .LBB18_8
 ; RV64IF-NEXT:    j .LBB18_9
 ; RV64IF-NEXT:  .LBB18_7:
@@ -1211,7 +1211,7 @@ define i64 @stest_f64i64(double %x) {
 ; RV32IFD-NEXT:    or a7, a2, a4
 ; RV32IFD-NEXT:    beqz a7, .LBB18_4
 ; RV32IFD-NEXT:  .LBB18_3: # %entry
-; RV32IFD-NEXT:    slti a6, a4, 0
+; RV32IFD-NEXT:    srli a6, a4, 31
 ; RV32IFD-NEXT:  .LBB18_4: # %entry
 ; RV32IFD-NEXT:    addi a7, a6, -1
 ; RV32IFD-NEXT:    neg t0, a6
@@ -1233,8 +1233,8 @@ define i64 @stest_f64i64(double %x) {
 ; RV32IFD-NEXT:    li a5, -1
 ; RV32IFD-NEXT:    beq a2, a5, .LBB18_11
 ; RV32IFD-NEXT:  # %bb.10: # %entry
-; RV32IFD-NEXT:    slti a0, a4, 0
-; RV32IFD-NEXT:    xori a0, a0, 1
+; RV32IFD-NEXT:    srli a4, a4, 31
+; RV32IFD-NEXT:    xori a0, a4, 1
 ; RV32IFD-NEXT:  .LBB18_11: # %entry
 ; RV32IFD-NEXT:    bnez a0, .LBB18_13
 ; RV32IFD-NEXT:  # %bb.12: # %entry
@@ -1363,7 +1363,7 @@ define i64 @ustest_f64i64(double %x) {
 ; RV32IF-NEXT:    lw a0, 16(sp)
 ; RV32IF-NEXT:    beqz a1, .LBB20_2
 ; RV32IF-NEXT:  # %bb.1: # %entry
-; RV32IF-NEXT:    slti a2, a1, 0
+; RV32IF-NEXT:    srli a2, a1, 31
 ; RV32IF-NEXT:    j .LBB20_3
 ; RV32IF-NEXT:  .LBB20_2:
 ; RV32IF-NEXT:    seqz a2, a0
@@ -1446,7 +1446,7 @@ define i64 @ustest_f64i64(double %x) {
 ; RV32IFD-NEXT:    lw a0, 16(sp)
 ; RV32IFD-NEXT:    beqz a1, .LBB20_2
 ; RV32IFD-NEXT:  # %bb.1: # %entry
-; RV32IFD-NEXT:    slti a2, a1, 0
+; RV32IFD-NEXT:    srli a2, a1, 31
 ; RV32IFD-NEXT:    j .LBB20_3
 ; RV32IFD-NEXT:  .LBB20_2:
 ; RV32IFD-NEXT:    seqz a2, a0
@@ -1523,7 +1523,7 @@ define i64 @stest_f32i64(float %x) {
 ; RV32-NEXT:    or a7, a2, a4
 ; RV32-NEXT:    beqz a7, .LBB21_4
 ; RV32-NEXT:  .LBB21_3: # %entry
-; RV32-NEXT:    slti a6, a4, 0
+; RV32-NEXT:    srli a6, a4, 31
 ; RV32-NEXT:  .LBB21_4: # %entry
 ; RV32-NEXT:    addi a7, a6, -1
 ; RV32-NEXT:    neg t0, a6
@@ -1545,8 +1545,8 @@ define i64 @stest_f32i64(float %x) {
 ; RV32-NEXT:    li a5, -1
 ; RV32-NEXT:    beq a2, a5, .LBB21_11
 ; RV32-NEXT:  # %bb.10: # %entry
-; RV32-NEXT:    slti a0, a4, 0
-; RV32-NEXT:    xori a0, a0, 1
+; RV32-NEXT:    srli a4, a4, 31
+; RV32-NEXT:    xori a0, a4, 1
 ; RV32-NEXT:  .LBB21_11: # %entry
 ; RV32-NEXT:    bnez a0, .LBB21_13
 ; RV32-NEXT:  # %bb.12: # %entry
@@ -1643,7 +1643,7 @@ define i64 @ustest_f32i64(float %x) {
 ; RV32-NEXT:    lw a0, 16(sp)
 ; RV32-NEXT:    beqz a1, .LBB23_2
 ; RV32-NEXT:  # %bb.1: # %entry
-; RV32-NEXT:    slti a2, a1, 0
+; RV32-NEXT:    srli a2, a1, 31
 ; RV32-NEXT:    j .LBB23_3
 ; RV32-NEXT:  .LBB23_2:
 ; RV32-NEXT:    seqz a2, a0
@@ -1750,7 +1750,7 @@ define i64 @stest_f16i64(half %x) {
 ; RV32-NEXT:    or a7, a2, a4
 ; RV32-NEXT:    beqz a7, .LBB24_4
 ; RV32-NEXT:  .LBB24_3: # %entry
-; RV32-NEXT:    slti a6, a4, 0
+; RV32-NEXT:    srli a6, a4, 31
 ; RV32-NEXT:  .LBB24_4: # %entry
 ; RV32-NEXT:    addi a7, a6, -1
 ; RV32-NEXT:    neg t0, a6
@@ -1772,8 +1772,8 @@ define i64 @stest_f16i64(half %x) {
 ; RV32-NEXT:    li a5, -1
 ; RV32-NEXT:    beq a2, a5, .LBB24_11
 ; RV32-NEXT:  # %bb.10: # %entry
-; RV32-NEXT:    slti a0, a4, 0
-; RV32-NEXT:    xori a0, a0, 1
+; RV32-NEXT:    srli a4, a4, 31
+; RV32-NEXT:    xori a0, a4, 1
 ; RV32-NEXT:  .LBB24_11: # %entry
 ; RV32-NEXT:    bnez a0, .LBB24_13
 ; RV32-NEXT:  # %bb.12: # %entry
@@ -1799,7 +1799,7 @@ define i64 @stest_f16i64(half %x) {
 ; RV64-NEXT:    srli a3, a2, 1
 ; RV64-NEXT:    beqz a1, .LBB24_2
 ; RV64-NEXT:  # %bb.1: # %entry
-; RV64-NEXT:    slti a4, a1, 0
+; RV64-NEXT:    srli a4, a1, 63
 ; RV64-NEXT:    j .LBB24_3
 ; RV64-NEXT:  .LBB24_2:
 ; RV64-NEXT:    sltu a4, a0, a3
@@ -1813,8 +1813,8 @@ define i64 @stest_f16i64(half %x) {
 ; RV64-NEXT:    slli a1, a2, 63
 ; RV64-NEXT:    beq a5, a2, .LBB24_7
 ; RV64-NEXT:  # %bb.6: # %entry
-; RV64-NEXT:    slti a2, a5, 0
-; RV64-NEXT:    xori a2, a2, 1
+; RV64-NEXT:    srli a5, a5, 63
+; RV64-NEXT:    xori a2, a5, 1
 ; RV64-NEXT:    beqz a2, .LBB24_8
 ; RV64-NEXT:    j .LBB24_9
 ; RV64-NEXT:  .LBB24_7:
@@ -1906,7 +1906,7 @@ define i64 @ustest_f16i64(half %x) {
 ; RV32-NEXT:    lw a0, 16(sp)
 ; RV32-NEXT:    beqz a1, .LBB26_2
 ; RV32-NEXT:  # %bb.1: # %entry
-; RV32-NEXT:    slti a2, a1, 0
+; RV32-NEXT:    srli a2, a1, 31
 ; RV32-NEXT:    j .LBB26_3
 ; RV32-NEXT:  .LBB26_2:
 ; RV32-NEXT:    seqz a2, a0
@@ -2004,7 +2004,7 @@ define i32 @stest_f64i32_mm(double %x) {
 ; RV32IF-NEXT:    addi a3, a2, -1
 ; RV32IF-NEXT:    beqz a1, .LBB27_2
 ; RV32IF-NEXT:  # %bb.1: # %entry
-; RV32IF-NEXT:    slti a4, a1, 0
+; RV32IF-NEXT:    srli a4, a1, 31
 ; RV32IF-NEXT:    j .LBB27_3
 ; RV32IF-NEXT:  .LBB27_2:
 ; RV32IF-NEXT:    sltu a4, a0, a3
@@ -2018,7 +2018,7 @@ define i32 @stest_f64i32_mm(double %x) {
 ; RV32IF-NEXT:    li a3, -1
 ; RV32IF-NEXT:    beq a1, a3, .LBB27_7
 ; RV32IF-NEXT:  # %bb.6: # %entry
-; RV32IF-NEXT:    slti a1, a1, 0
+; RV32IF-NEXT:    srli a1, a1, 31
 ; RV32IF-NEXT:    xori a1, a1, 1
 ; RV32IF-NEXT:    beqz a1, .LBB27_8
 ; RV32IF-NEXT:    j .LBB27_9
@@ -2171,7 +2171,7 @@ define i32 @ustest_f64i32_mm(double %x) {
 ; RV32IF-NEXT:    neg a2, a2
 ; RV32IF-NEXT:    or a0, a3, a0
 ; RV32IF-NEXT:    and a1, a2, a1
-; RV32IF-NEXT:    slti a1, a1, 0
+; RV32IF-NEXT:    srli a1, a1, 31
 ; RV32IF-NEXT:    addi a1, a1, -1
 ; RV32IF-NEXT:    and a0, a1, a0
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -2337,7 +2337,7 @@ define i32 @stest_f16i32_mm(half %x) {
 ; RV32-NEXT:    addi a3, a2, -1
 ; RV32-NEXT:    beqz a1, .LBB33_2
 ; RV32-NEXT:  # %bb.1: # %entry
-; RV32-NEXT:    slti a4, a1, 0
+; RV32-NEXT:    srli a4, a1, 31
 ; RV32-NEXT:    j .LBB33_3
 ; RV32-NEXT:  .LBB33_2:
 ; RV32-NEXT:    sltu a4, a0, a3
@@ -2351,7 +2351,7 @@ define i32 @stest_f16i32_mm(half %x) {
 ; RV32-NEXT:    li a3, -1
 ; RV32-NEXT:    beq a1, a3, .LBB33_7
 ; RV32-NEXT:  # %bb.6: # %entry
-; RV32-NEXT:    slti a1, a1, 0
+; RV32-NEXT:    srli a1, a1, 31
 ; RV32-NEXT:    xori a1, a1, 1
 ; RV32-NEXT:    beqz a1, .LBB33_8
 ; RV32-NEXT:    j .LBB33_9
@@ -2462,7 +2462,7 @@ define i32 @ustest_f16i32_mm(half %x) {
 ; RV32-NEXT:    neg a2, a2
 ; RV32-NEXT:    or a0, a3, a0
 ; RV32-NEXT:    and a1, a2, a1
-; RV32-NEXT:    slti a1, a1, 0
+; RV32-NEXT:    srli a1, a1, 31
 ; RV32-NEXT:    addi a1, a1, -1
 ; RV32-NEXT:    and a0, a1, a0
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -3044,7 +3044,7 @@ define i64 @stest_f64i64_mm(double %x) {
 ; RV32IF-NEXT:    or a7, a2, a4
 ; RV32IF-NEXT:    beqz a7, .LBB45_4
 ; RV32IF-NEXT:  .LBB45_3: # %entry
-; RV32IF-NEXT:    slti a6, a4, 0
+; RV32IF-NEXT:    srli a6, a4, 31
 ; RV32IF-NEXT:  .LBB45_4: # %entry
 ; RV32IF-NEXT:    addi a7, a6, -1
 ; RV32IF-NEXT:    neg t0, a6
@@ -3066,8 +3066,8 @@ define i64 @stest_f64i64_mm(double %x) {
 ; RV32IF-NEXT:    li a5, -1
 ; RV32IF-NEXT:    beq a2, a5, .LBB45_11
 ; RV32IF-NEXT:  # %bb.10: # %entry
-; RV32IF-NEXT:    slti a0, a4, 0
-; RV32IF-NEXT:    xori a0, a0, 1
+; RV32IF-NEXT:    srli a4, a4, 31
+; RV32IF-NEXT:    xori a0, a4, 1
 ; RV32IF-NEXT:  .LBB45_11: # %entry
 ; RV32IF-NEXT:    bnez a0, .LBB45_13
 ; RV32IF-NEXT:  # %bb.12: # %entry
@@ -3092,7 +3092,7 @@ define i64 @stest_f64i64_mm(double %x) {
 ; RV64IF-NEXT:    srli a3, a2, 1
 ; RV64IF-NEXT:    beqz a1, .LBB45_2
 ; RV64IF-NEXT:  # %bb.1: # %entry
-; RV64IF-NEXT:    slti a4, a1, 0
+; RV64IF-NEXT:    srli a4, a1, 63
 ; RV64IF-NEXT:    j .LBB45_3
 ; RV64IF-NEXT:  .LBB45_2:
 ; RV64IF-NEXT:    sltu a4, a0, a3
@@ -3106,8 +3106,8 @@ define i64 @stest_f64i64_mm(double %x) {
 ; RV64IF-NEXT:    slli a1, a2, 63
 ; RV64IF-NEXT:    beq a5, a2, .LBB45_7
 ; RV64IF-NEXT:  # %bb.6: # %entry
-; RV64IF-NEXT:    slti a2, a5, 0
-; RV64IF-NEXT:    xori a2, a2, 1
+; RV64IF-NEXT:    srli a5, a5, 63
+; RV64IF-NEXT:    xori a2, a5, 1
 ; RV64IF-NEXT:    beqz a2, .LBB45_8
 ; RV64IF-NEXT:    j .LBB45_9
 ; RV64IF-NEXT:  .LBB45_7:
@@ -3147,7 +3147,7 @@ define i64 @stest_f64i64_mm(double %x) {
 ; RV32IFD-NEXT:    or a7, a2, a4
 ; RV32IFD-NEXT:    beqz a7, .LBB45_4
 ; RV32IFD-NEXT:  .LBB45_3: # %entry
-; RV32IFD-NEXT:    slti a6, a4, 0
+; RV32IFD-NEXT:    srli a6, a4, 31
 ; RV32IFD-NEXT:  .LBB45_4: # %entry
 ; RV32IFD-NEXT:    addi a7, a6, -1
 ; RV32IFD-NEXT:    neg t0, a6
@@ -3169,8 +3169,8 @@ define i64 @stest_f64i64_mm(double %x) {
 ; RV32IFD-NEXT:    li a5, -1
 ; RV32IFD-NEXT:    beq a2, a5, .LBB45_11
 ; RV32IFD-NEXT:  # %bb.10: # %entry
-; RV32IFD-NEXT:    slti a0, a4, 0
-; RV32IFD-NEXT:    xori a0, a0, 1
+; RV32IFD-NEXT:    srli a4, a4, 31
+; RV32IFD-NEXT:    xori a0, a4, 1
 ; RV32IFD-NEXT:  .LBB45_11: # %entry
 ; RV32IFD-NEXT:    bnez a0, .LBB45_13
 ; RV32IFD-NEXT:  # %bb.12: # %entry
@@ -3298,7 +3298,7 @@ define i64 @ustest_f64i64_mm(double %x) {
 ; RV32IF-NEXT:    lw a3, 16(sp)
 ; RV32IF-NEXT:    beqz a0, .LBB47_2
 ; RV32IF-NEXT:  # %bb.1: # %entry
-; RV32IF-NEXT:    slti a4, a0, 0
+; RV32IF-NEXT:    srli a4, a0, 31
 ; RV32IF-NEXT:    j .LBB47_3
 ; RV32IF-NEXT:  .LBB47_2:
 ; RV32IF-NEXT:    seqz a4, a3
@@ -3312,7 +3312,7 @@ define i64 @ustest_f64i64_mm(double %x) {
 ; RV32IF-NEXT:    and a2, a3, a2
 ; RV32IF-NEXT:    and a1, a3, a1
 ; RV32IF-NEXT:    and a0, a3, a0
-; RV32IF-NEXT:    slti a0, a0, 0
+; RV32IF-NEXT:    srli a0, a0, 31
 ; RV32IF-NEXT:    addi a3, a0, -1
 ; RV32IF-NEXT:    and a0, a3, a1
 ; RV32IF-NEXT:    and a1, a3, a2
@@ -3335,7 +3335,7 @@ define i64 @ustest_f64i64_mm(double %x) {
 ; RV64-NEXT:    li a2, 1
 ; RV64-NEXT:  .LBB47_2: # %entry
 ; RV64-NEXT:    slti a1, a1, 1
-; RV64-NEXT:    slti a2, a2, 0
+; RV64-NEXT:    srli a2, a2, 63
 ; RV64-NEXT:    neg a1, a1
 ; RV64-NEXT:    and a0, a1, a0
 ; RV64-NEXT:    addi a2, a2, -1
@@ -3360,7 +3360,7 @@ define i64 @ustest_f64i64_mm(double %x) {
 ; RV32IFD-NEXT:    lw a3, 16(sp)
 ; RV32IFD-NEXT:    beqz a0, .LBB47_2
 ; RV32IFD-NEXT:  # %bb.1: # %entry
-; RV32IFD-NEXT:    slti a4, a0, 0
+; RV32IFD-NEXT:    srli a4, a0, 31
 ; RV32IFD-NEXT:    j .LBB47_3
 ; RV32IFD-NEXT:  .LBB47_2:
 ; RV32IFD-NEXT:    seqz a4, a3
@@ -3374,7 +3374,7 @@ define i64 @ustest_f64i64_mm(double %x) {
 ; RV32IFD-NEXT:    and a2, a3, a2
 ; RV32IFD-NEXT:    and a1, a3, a1
 ; RV32IFD-NEXT:    and a0, a3, a0
-; RV32IFD-NEXT:    slti a0, a0, 0
+; RV32IFD-NEXT:    srli a0, a0, 31
 ; RV32IFD-NEXT:    addi a3, a0, -1
 ; RV32IFD-NEXT:    and a0, a3, a1
 ; RV32IFD-NEXT:    and a1, a3, a2
@@ -3417,7 +3417,7 @@ define i64 @stest_f32i64_mm(float %x) {
 ; RV32-NEXT:    or a7, a2, a4
 ; RV32-NEXT:    beqz a7, .LBB48_4
 ; RV32-NEXT:  .LBB48_3: # %entry
-; RV32-NEXT:    slti a6, a4, 0
+; RV32-NEXT:    srli a6, a4, 31
 ; RV32-NEXT:  .LBB48_4: # %entry
 ; RV32-NEXT:    addi a7, a6, -1
 ; RV32-NEXT:    neg t0, a6
@@ -3439,8 +3439,8 @@ define i64 @stest_f32i64_mm(float %x) {
 ; RV32-NEXT:    li a5, -1
 ; RV32-NEXT:    beq a2, a5, .LBB48_11
 ; RV32-NEXT:  # %bb.10: # %entry
-; RV32-NEXT:    slti a0, a4, 0
-; RV32-NEXT:    xori a0, a0, 1
+; RV32-NEXT:    srli a4, a4, 31
+; RV32-NEXT:    xori a0, a4, 1
 ; RV32-NEXT:  .LBB48_11: # %entry
 ; RV32-NEXT:    bnez a0, .LBB48_13
 ; RV32-NEXT:  # %bb.12: # %entry
@@ -3536,7 +3536,7 @@ define i64 @ustest_f32i64_mm(float %x) {
 ; RV32-NEXT:    lw a3, 16(sp)
 ; RV32-NEXT:    beqz a0, .LBB50_2
 ; RV32-NEXT:  # %bb.1: # %entry
-; RV32-NEXT:    slti a4, a0, 0
+; RV32-NEXT:    srli a4, a0, 31
 ; RV32-NEXT:    j .LBB50_3
 ; RV32-NEXT:  .LBB50_2:
 ; RV32-NEXT:    seqz a4, a3
@@ -3550,7 +3550,7 @@ define i64 @ustest_f32i64_mm(float %x) {
 ; RV32-NEXT:    and a2, a3, a2
 ; RV32-NEXT:    and a1, a3, a1
 ; RV32-NEXT:    and a0, a3, a0
-; RV32-NEXT:    slti a0, a0, 0
+; RV32-NEXT:    srli a0, a0, 31
 ; RV32-NEXT:    addi a3, a0, -1
 ; RV32-NEXT:    and a0, a3, a1
 ; RV32-NEXT:    and a1, a3, a2
@@ -3573,7 +3573,7 @@ define i64 @ustest_f32i64_mm(float %x) {
 ; RV64-NEXT:    li a2, 1
 ; RV64-NEXT:  .LBB50_2: # %entry
 ; RV64-NEXT:    slti a1, a1, 1
-; RV64-NEXT:    slti a2, a2, 0
+; RV64-NEXT:    srli a2, a2, 63
 ; RV64-NEXT:    neg a1, a1
 ; RV64-NEXT:    and a0, a1, a0
 ; RV64-NEXT:    addi a2, a2, -1
@@ -3618,7 +3618,7 @@ define i64 @stest_f16i64_mm(half %x) {
 ; RV32-NEXT:    or a7, a2, a4
 ; RV32-NEXT:    beqz a7, .LBB51_4
 ; RV32-NEXT:  .LBB51_3: # %entry
-; RV32-NEXT:    slti a6, a4, 0
+; RV32-NEXT:    srli a6, a4, 31
 ; RV32-NEXT:  .LBB51_4: # %entry
 ; RV32-NEXT:    addi a7, a6, -1
 ; RV32-NEXT:    neg t0, a6
@@ -3640,8 +3640,8 @@ define i64 @stest_f16i64_mm(half %x) {
 ; RV32-NEXT:    li a5, -1
 ; RV32-NEXT:    beq a2, a5, .LBB51_11
 ; RV32-NEXT:  # %bb.10: # %entry
-; RV32-NEXT:    slti a0, a4, 0
-; RV32-NEXT:    xori a0, a0, 1
+; RV32-NEXT:    srli a4, a4, 31
+; RV32-NEXT:    xori a0, a4, 1
 ; RV32-NEXT:  .LBB51_11: # %entry
 ; RV32-NEXT:    bnez a0, .LBB51_13
 ; RV32-NEXT:  # %bb.12: # %entry
@@ -3667,7 +3667,7 @@ define i64 @stest_f16i64_mm(half %x) {
 ; RV64-NEXT:    srli a3, a2, 1
 ; RV64-NEXT:    beqz a1, .LBB51_2
 ; RV64-NEXT:  # %bb.1: # %entry
-; RV64-NEXT:    slti a4, a1, 0
+; RV64-NEXT:    srli a4, a1, 63
 ; RV64-NEXT:    j .LBB51_3
 ; RV64-NEXT:  .LBB51_2:
 ; RV64-NEXT:    sltu a4, a0, a3
@@ -3681,8 +3681,8 @@ define i64 @stest_f16i64_mm(half %x) {
 ; RV64-NEXT:    slli a1, a2, 63
 ; RV64-NEXT:    beq a5, a2, .LBB51_7
 ; RV64-NEXT:  # %bb.6: # %entry
-; RV64-NEXT:    slti a2, a5, 0
-; RV64-NEXT:    xori a2, a2, 1
+; RV64-NEXT:    srli a5, a5, 63
+; RV64-NEXT:    xori a2, a5, 1
 ; RV64-NEXT:    beqz a2, .LBB51_8
 ; RV64-NEXT:    j .LBB51_9
 ; RV64-NEXT:  .LBB51_7:
@@ -3773,7 +3773,7 @@ define i64 @ustest_f16i64_mm(half %x) {
 ; RV32-NEXT:    lw a3, 16(sp)
 ; RV32-NEXT:    beqz a0, .LBB53_2
 ; RV32-NEXT:  # %bb.1: # %entry
-; RV32-NEXT:    slti a4, a0, 0
+; RV32-NEXT:    srli a4, a0, 31
 ; RV32-NEXT:    j .LBB53_3
 ; RV32-NEXT:  .LBB53_2:
 ; RV32-NEXT:    seqz a4, a3
@@ -3787,7 +3787,7 @@ define i64 @ustest_f16i64_mm(half %x) {
 ; RV32-NEXT:    and a2, a3, a2
 ; RV32-NEXT:    and a1, a3, a1
 ; RV32-NEXT:    and a0, a3, a0
-; RV32-NEXT:    slti a0, a0, 0
+; RV32-NEXT:    srli a0, a0, 31
 ; RV32-NEXT:    addi a3, a0, -1
 ; RV32-NEXT:    and a0, a3, a1
 ; RV32-NEXT:    and a1, a3, a2
@@ -3811,7 +3811,7 @@ define i64 @ustest_f16i64_mm(half %x) {
 ; RV64-NEXT:    li a2, 1
 ; RV64-NEXT:  .LBB53_2: # %entry
 ; RV64-NEXT:    slti a1, a1, 1
-; RV64-NEXT:    slti a2, a2, 0
+; RV64-NEXT:    srli a2, a2, 63
 ; RV64-NEXT:    neg a1, a1
 ; RV64-NEXT:    and a0, a1, a0
 ; RV64-NEXT:    addi a2, a2, -1
diff --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll
index facb544fb52b6..615d0bd307a66 100644
--- a/llvm/test/CodeGen/RISCV/half-convert.ll
+++ b/llvm/test/CodeGen/RISCV/half-convert.ll
@@ -818,7 +818,7 @@ define i16 @fcvt_ui_h_sat(half %a) nounwind {
 ; RV32I-NEXT:    call __gtsf2
 ; RV32I-NEXT:    bgtz a0, .LBB3_2
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    slti a0, s2, 0
+; RV32I-NEXT:    srli a0, s2, 31
 ; RV32I-NEXT:    addi a0, a0, -1
 ; RV32I-NEXT:    and s0, a0, s1
 ; RV32I-NEXT:  .LBB3_2: # %start
@@ -856,7 +856,7 @@ define i16 @fcvt_ui_h_sat(half %a) nounwind {
 ; RV64I-NEXT:    call __gtsf2
 ; RV64I-NEXT:    bgtz a0, .LBB3_2
 ; RV64I-NEXT:  # %bb.1:
-; RV64I-NEXT:    slti a0, s2, 0
+; RV64I-NEXT:    srli a0, s2, 63
 ; RV64I-NEXT:    addi a0, a0, -1
 ; RV64I-NEXT:    and s0, a0, s1
 ; RV64I-NEXT:  .LBB3_2: # %start
@@ -1788,7 +1788,7 @@ define i32 @fcvt_wu_h_sat(half %a) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __gesf2
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    addi s2, a0, -1
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __fixunssfsi
@@ -1828,8 +1828,8 @@ define i32 @fcvt_wu_h_sat(half %a) nounwind {
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    j .LBB8_3
 ; RV64I-NEXT:  .LBB8_2:
-; RV64I-NEXT:    slti a0, s0, 0
-; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    srli s0, s0, 63
+; RV64I-NEXT:    addi a0, s0, -1
 ; RV64I-NEXT:    and a0, a0, s1
 ; RV64I-NEXT:  .LBB8_3: # %start
 ; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
@@ -2369,13 +2369,13 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
 ; RV32I-NEXT:    call __unordsf2
 ; RV32I-NEXT:    snez a0, a0
 ; RV32I-NEXT:    sgtz a1, s4
-; RV32I-NEXT:    slti a2, s0, 0
+; RV32I-NEXT:    srli s0, s0, 31
 ; RV32I-NEXT:    addi a0, a0, -1
-; RV32I-NEXT:    neg a3, a1
-; RV32I-NEXT:    addi a2, a2, -1
+; RV32I-NEXT:    neg a2, a1
+; RV32I-NEXT:    addi s0, s0, -1
 ; RV32I-NEXT:    and a1, a0, s3
-; RV32I-NEXT:    and a2, a2, s1
-; RV32I-NEXT:    or a2, a3, a2
+; RV32I-NEXT:    and s0, s0, s1
+; RV32I-NEXT:    or a2, a2, s0
 ; RV32I-NEXT:    and a0, a0, a2
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -3049,7 +3049,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __gesf2
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    addi s2, a0, -1
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __fixunssfdi
@@ -3083,7 +3083,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __gesf2
-; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    srli a0, a0, 63
 ; RV64I-NEXT:    addi s2, a0, -1
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __fixunssfdi
@@ -6908,8 +6908,8 @@ define zeroext i16 @fcvt_wu_s_sat_i16(half %a) nounwind {
 ; RV32I-NEXT:    mv a0, s3
 ; RV32I-NEXT:    j .LBB34_3
 ; RV32I-NEXT:  .LBB34_2:
-; RV32I-NEXT:    slti a0, s1, 0
-; RV32I-NEXT:    addi a0, a0, -1
+; RV32I-NEXT:    srli s1, s1, 31
+; RV32I-NEXT:    addi a0, s1, -1
 ; RV32I-NEXT:    and a0, a0, s0
 ; RV32I-NEXT:  .LBB34_3: # %start
 ; RV32I-NEXT:    and a0, a0, s3
@@ -6949,8 +6949,8 @@ define zeroext i16 @fcvt_wu_s_sat_i16(half %a) nounwind {
 ; RV64I-NEXT:    mv a0, s3
 ; RV64I-NEXT:    j .LBB34_3
 ; RV64I-NEXT:  .LBB34_2:
-; RV64I-NEXT:    slti a0, s1, 0
-; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    srli s1, s1, 63
+; RV64I-NEXT:    addi a0, s1, -1
 ; RV64I-NEXT:    and a0, a0, s0
 ; RV64I-NEXT:  .LBB34_3: # %start
 ; RV64I-NEXT:    and a0, a0, s3
@@ -7852,8 +7852,8 @@ define zeroext i8 @fcvt_wu_s_sat_i8(half %a) nounwind {
 ; RV32I-NEXT:    li a0, 255
 ; RV32I-NEXT:    j .LBB38_3
 ; RV32I-NEXT:  .LBB38_2:
-; RV32I-NEXT:    slti a0, s0, 0
-; RV32I-NEXT:    addi a0, a0, -1
+; RV32I-NEXT:    srli s0, s0, 31
+; RV32I-NEXT:    addi a0, s0, -1
 ; RV32I-NEXT:    and a0, a0, s1
 ; RV32I-NEXT:  .LBB38_3: # %start
 ; RV32I-NEXT:    zext.b a0, a0
@@ -7889,8 +7889,8 @@ define zeroext i8 @fcvt_wu_s_sat_i8(half %a) nounwind {
 ; RV64I-NEXT:    li a0, 255
 ; RV64I-NEXT:    j .LBB38_3
 ; RV64I-NEXT:  .LBB38_2:
-; RV64I-NEXT:    slti a0, s0, 0
-; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    srli s0, s0, 63
+; RV64I-NEXT:    addi a0, s0, -1
 ; RV64I-NEXT:    and a0, a0, s1
 ; RV64I-NEXT:  .LBB38_3: # %start
 ; RV64I-NEXT:    zext.b a0, a0
@@ -8126,7 +8126,7 @@ define zeroext i32 @fcvt_wu_h_sat_zext(half %a) nounwind {
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    call __gesf2
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    addi s2, a0, -1
 ; RV32I-NEXT:    mv a0, s0
 ; RV32I-NEXT:    call __fixunssfsi
@@ -8166,8 +8166,8 @@ define zeroext i32 @fcvt_wu_h_sat_zext(half %a) nounwind {
 ; RV64I-NEXT:    srli a0, a0, 32
 ; RV64I-NEXT:    j .LBB39_3
 ; RV64I-NEXT:  .LBB39_2:
-; RV64I-NEXT:    slti a0, s0, 0
-; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    srli s0, s0, 63
+; RV64I-NEXT:    addi a0, s0, -1
 ; RV64I-NEXT:    and a0, a0, s1
 ; RV64I-NEXT:  .LBB39_3: # %start
 ; RV64I-NEXT:    slli a0, a0, 32
diff --git a/llvm/test/CodeGen/RISCV/memcmp-optsize.ll b/llvm/test/CodeGen/RISCV/memcmp-optsize.ll
index cd9357994742b..afc8e3553f8b1 100644
--- a/llvm/test/CodeGen/RISCV/memcmp-optsize.ll
+++ b/llvm/test/CodeGen/RISCV/memcmp-optsize.ll
@@ -2741,7 +2741,7 @@ define i1 @bcmp_lt_zero(ptr %s1, ptr %s2) nounwind optsize {
 ; CHECK-ALIGNED-RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; CHECK-ALIGNED-RV64-NEXT:    li a2, 4
 ; CHECK-ALIGNED-RV64-NEXT:    call bcmp
-; CHECK-ALIGNED-RV64-NEXT:    slti a0, a0, 0
+; CHECK-ALIGNED-RV64-NEXT:    srli a0, a0, 63
 ; CHECK-ALIGNED-RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK-ALIGNED-RV64-NEXT:    addi sp, sp, 16
 ; CHECK-ALIGNED-RV64-NEXT:    ret
@@ -2763,7 +2763,7 @@ define i1 @bcmp_lt_zero(ptr %s1, ptr %s2) nounwind optsize {
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    li a2, 4
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    call bcmp
-; CHECK-ALIGNED-RV64-ZBB-NEXT:    slti a0, a0, 0
+; CHECK-ALIGNED-RV64-ZBB-NEXT:    srli a0, a0, 63
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    addi sp, sp, 16
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    ret
@@ -2785,7 +2785,7 @@ define i1 @bcmp_lt_zero(ptr %s1, ptr %s2) nounwind optsize {
 ; CHECK-ALIGNED-RV64-ZBKB-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; CHECK-ALIGNED-RV64-ZBKB-NEXT:    li a2, 4
 ; CHECK-ALIGNED-RV64-ZBKB-NEXT:    call bcmp
-; CHECK-ALIGNED-RV64-ZBKB-NEXT:    slti a0, a0, 0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT:    srli a0, a0, 63
 ; CHECK-ALIGNED-RV64-ZBKB-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK-ALIGNED-RV64-ZBKB-NEXT:    addi sp, sp, 16
 ; CHECK-ALIGNED-RV64-ZBKB-NEXT:    ret
@@ -2807,7 +2807,7 @@ define i1 @bcmp_lt_zero(ptr %s1, ptr %s2) nounwind optsize {
 ; CHECK-ALIGNED-RV64-V-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; CHECK-ALIGNED-RV64-V-NEXT:    li a2, 4
 ; CHECK-ALIGNED-RV64-V-NEXT:    call bcmp
-; CHECK-ALIGNED-RV64-V-NEXT:    slti a0, a0, 0
+; CHECK-ALIGNED-RV64-V-NEXT:    srli a0, a0, 63
 ; CHECK-ALIGNED-RV64-V-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK-ALIGNED-RV64-V-NEXT:    addi sp, sp, 16
 ; CHECK-ALIGNED-RV64-V-NEXT:    ret
@@ -5549,7 +5549,7 @@ define i1 @memcmp_lt_zero(ptr %s1, ptr %s2) nounwind optsize {
 ; CHECK-ALIGNED-RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; CHECK-ALIGNED-RV64-NEXT:    li a2, 4
 ; CHECK-ALIGNED-RV64-NEXT:    call memcmp
-; CHECK-ALIGNED-RV64-NEXT:    slti a0, a0, 0
+; CHECK-ALIGNED-RV64-NEXT:    srli a0, a0, 63
 ; CHECK-ALIGNED-RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK-ALIGNED-RV64-NEXT:    addi sp, sp, 16
 ; CHECK-ALIGNED-RV64-NEXT:    ret
@@ -5571,7 +5571,7 @@ define i1 @memcmp_lt_zero(ptr %s1, ptr %s2) nounwind optsize {
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    li a2, 4
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    call memcmp
-; CHECK-ALIGNED-RV64-ZBB-NEXT:    slti a0, a0, 0
+; CHECK-ALIGNED-RV64-ZBB-NEXT:    srli a0, a0, 63
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    addi sp, sp, 16
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    ret
@@ -5593,7 +5593,7 @@ define i1 @memcmp_lt_zero(ptr %s1, ptr %s2) nounwind optsize {
 ; CHECK-ALIGNED-RV64-ZBKB-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; CHECK-ALIGNED-RV64-ZBKB-NEXT:    li a2, 4
 ; CHECK-ALIGNED-RV64-ZBKB-NEXT:    call memcmp
-; CHECK-ALIGNED-RV64-ZBKB-NEXT:    slti a0, a0, 0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT:    srli a0, a0, 63
 ; CHECK-ALIGNED-RV64-ZBKB-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK-ALIGNED-RV64-ZBKB-NEXT:    addi sp, sp, 16
 ; CHECK-ALIGNED-RV64-ZBKB-NEXT:    ret
@@ -5615,7 +5615,7 @@ define i1 @memcmp_lt_zero(ptr %s1, ptr %s2) nounwind optsize {
 ; CHECK-ALIGNED-RV64-V-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; CHECK-ALIGNED-RV64-V-NEXT:    li a2, 4
 ; CHECK-ALIGNED-RV64-V-NEXT:    call memcmp
-; CHECK-ALIGNED-RV64-V-NEXT:    slti a0, a0, 0
+; CHECK-ALIGNED-RV64-V-NEXT:    srli a0, a0, 63
 ; CHECK-ALIGNED-RV64-V-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK-ALIGNED-RV64-V-NEXT:    addi sp, sp, 16
 ; CHECK-ALIGNED-RV64-V-NEXT:    ret
@@ -5637,7 +5637,7 @@ define i1 @memcmp_lt_zero(ptr %s1, ptr %s2) nounwind optsize {
 ; CHECK-UNALIGNED-RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; CHECK-UNALIGNED-RV64-NEXT:    li a2, 4
 ; CHECK-UNALIGNED-RV64-NEXT:    call memcmp
-; CHECK-UNALIGNED-RV64-NEXT:    slti a0, a0, 0
+; CHECK-UNALIGNED-RV64-NEXT:    srli a0, a0, 63
 ; CHECK-UNALIGNED-RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK-UNALIGNED-RV64-NEXT:    addi sp, sp, 16
 ; CHECK-UNALIGNED-RV64-NEXT:    ret
@@ -5699,7 +5699,7 @@ define i1 @memcmp_lt_zero(ptr %s1, ptr %s2) nounwind optsize {
 ; CHECK-UNALIGNED-RV64-V-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; CHECK-UNALIGNED-RV64-V-NEXT:    li a2, 4
 ; CHECK-UNALIGNED-RV64-V-NEXT:    call memcmp
-; CHECK-UNALIGNED-RV64-V-NEXT:    slti a0, a0, 0
+; CHECK-UNALIGNED-RV64-V-NEXT:    srli a0, a0, 63
 ; CHECK-UNALIGNED-RV64-V-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK-UNALIGNED-RV64-V-NEXT:    addi sp, sp, 16
 ; CHECK-UNALIGNED-RV64-V-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/memcmp.ll b/llvm/test/CodeGen/RISCV/memcmp.ll
index a5bdb13d37fb8..c737edb9acce9 100644
--- a/llvm/test/CodeGen/RISCV/memcmp.ll
+++ b/llvm/test/CodeGen/RISCV/memcmp.ll
@@ -3161,7 +3161,7 @@ define i1 @bcmp_lt_zero(ptr %s1, ptr %s2) nounwind {
 ; CHECK-ALIGNED-RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; CHECK-ALIGNED-RV64-NEXT:    li a2, 4
 ; CHECK-ALIGNED-RV64-NEXT:    call bcmp
-; CHECK-ALIGNED-RV64-NEXT:    slti a0, a0, 0
+; CHECK-ALIGNED-RV64-NEXT:    srli a0, a0, 63
 ; CHECK-ALIGNED-RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK-ALIGNED-RV64-NEXT:    addi sp, sp, 16
 ; CHECK-ALIGNED-RV64-NEXT:    ret
@@ -3183,7 +3183,7 @@ define i1 @bcmp_lt_zero(ptr %s1, ptr %s2) nounwind {
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    li a2, 4
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    call bcmp
-; CHECK-ALIGNED-RV64-ZBB-NEXT:    slti a0, a0, 0
+; CHECK-ALIGNED-RV64-ZBB-NEXT:    srli a0, a0, 63
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    addi sp, sp, 16
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    ret
@@ -3205,7 +3205,7 @@ define i1 @bcmp_lt_zero(ptr %s1, ptr %s2) nounwind {
 ; CHECK-ALIGNED-RV64-ZBKB-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; CHECK-ALIGNED-RV64-ZBKB-NEXT:    li a2, 4
 ; CHECK-ALIGNED-RV64-ZBKB-NEXT:    call bcmp
-; CHECK-ALIGNED-RV64-ZBKB-NEXT:    slti a0, a0, 0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT:    srli a0, a0, 63
 ; CHECK-ALIGNED-RV64-ZBKB-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK-ALIGNED-RV64-ZBKB-NEXT:    addi sp, sp, 16
 ; CHECK-ALIGNED-RV64-ZBKB-NEXT:    ret
@@ -3227,7 +3227,7 @@ define i1 @bcmp_lt_zero(ptr %s1, ptr %s2) nounwind {
 ; CHECK-ALIGNED-RV64-V-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; CHECK-ALIGNED-RV64-V-NEXT:    li a2, 4
 ; CHECK-ALIGNED-RV64-V-NEXT:    call bcmp
-; CHECK-ALIGNED-RV64-V-NEXT:    slti a0, a0, 0
+; CHECK-ALIGNED-RV64-V-NEXT:    srli a0, a0, 63
 ; CHECK-ALIGNED-RV64-V-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK-ALIGNED-RV64-V-NEXT:    addi sp, sp, 16
 ; CHECK-ALIGNED-RV64-V-NEXT:    ret
@@ -3454,7 +3454,7 @@ define i1 @bcmp_ge_zero(ptr %s1, ptr %s2) nounwind {
 ; CHECK-ALIGNED-RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; CHECK-ALIGNED-RV32-NEXT:    li a2, 4
 ; CHECK-ALIGNED-RV32-NEXT:    call bcmp
-; CHECK-ALIGNED-RV32-NEXT:    slti a0, a0, 0
+; CHECK-ALIGNED-RV32-NEXT:    srli a0, a0, 31
 ; CHECK-ALIGNED-RV32-NEXT:    xori a0, a0, 1
 ; CHECK-ALIGNED-RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK-ALIGNED-RV32-NEXT:    addi sp, sp, 16
@@ -3466,7 +3466,7 @@ define i1 @bcmp_ge_zero(ptr %s1, ptr %s2) nounwind {
 ; CHECK-ALIGNED-RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; CHECK-ALIGNED-RV64-NEXT:    li a2, 4
 ; CHECK-ALIGNED-RV64-NEXT:    call bcmp
-; CHECK-ALIGNED-RV64-NEXT:    slti a0, a0, 0
+; CHECK-ALIGNED-RV64-NEXT:    srli a0, a0, 63
 ; CHECK-ALIGNED-RV64-NEXT:    xori a0, a0, 1
 ; CHECK-ALIGNED-RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK-ALIGNED-RV64-NEXT:    addi sp, sp, 16
@@ -3478,7 +3478,7 @@ define i1 @bcmp_ge_zero(ptr %s1, ptr %s2) nounwind {
 ; CHECK-ALIGNED-RV32-ZBB-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; CHECK-ALIGNED-RV32-ZBB-NEXT:    li a2, 4
 ; CHECK-ALIGNED-RV32-ZBB-NEXT:    call bcmp
-; CHECK-ALIGNED-RV32-ZBB-NEXT:    slti a0, a0, 0
+; CHECK-ALIGNED-RV32-ZBB-NEXT:    srli a0, a0, 31
 ; CHECK-ALIGNED-RV32-ZBB-NEXT:    xori a0, a0, 1
 ; CHECK-ALIGNED-RV32-ZBB-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK-ALIGNED-RV32-ZBB-NEXT:    addi sp, sp, 16
@@ -3490,7 +3490,7 @@ define i1 @bcmp_ge_zero(ptr %s1, ptr %s2) nounwind {
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    li a2, 4
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    call bcmp
-; CHECK-ALIGNED-RV64-ZBB-NEXT:    slti a0, a0, 0
+; CHECK-ALIGNED-RV64-ZBB-NEXT:    srli a0, a0, 63
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    xori a0, a0, 1
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    addi sp, sp, 16
@@ -3502,7 +3502,7 @@ define i1 @bcmp_ge_zero(ptr %s1, ptr %s2) nounwind {
 ; CHECK-ALIGNED-RV32-ZBKB-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; CHECK-ALIGNED-RV32-ZBKB-NEXT:    li a2, 4
 ; CHECK-ALIGNED-RV32-ZBKB-NEXT:    call bcmp
-; CHECK-ALIGNED-RV32-ZBKB-NEXT:    slti a0, a0, 0
+; CHECK-ALIGNED-RV32-ZBKB-NEXT:    srli a0, a0, 31
 ; CHECK-ALIGNED-RV32-ZBKB-NEXT:    xori a0, a0, 1
 ; CHECK-ALIGNED-RV32-ZBKB-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK-ALIGNED-RV32-ZBKB-NEXT:    addi sp, sp, 16
@@ -3514,7 +3514,7 @@ define i1 @bcmp_ge_zero(ptr %s1, ptr %s2) nounwind {
 ; CHECK-ALIGNED-RV64-ZBKB-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; CHECK-ALIGNED-RV64-ZBKB-NEXT:    li a2, 4
 ; CHECK-ALIGNED-RV64-ZBKB-NEXT:    call bcmp
-; CHECK-ALIGNED-RV64-ZBKB-NEXT:    slti a0, a0, 0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT:    srli a0, a0, 63
 ; CHECK-ALIGNED-RV64-ZBKB-NEXT:    xori a0, a0, 1
 ; CHECK-ALIGNED-RV64-ZBKB-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK-ALIGNED-RV64-ZBKB-NEXT:    addi sp, sp, 16
@@ -3526,7 +3526,7 @@ define i1 @bcmp_ge_zero(ptr %s1, ptr %s2) nounwind {
 ; CHECK-ALIGNED-RV32-V-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; CHECK-ALIGNED-RV32-V-NEXT:    li a2, 4
 ; CHECK-ALIGNED-RV32-V-NEXT:    call bcmp
-; CHECK-ALIGNED-RV32-V-NEXT:    slti a0, a0, 0
+; CHECK-ALIGNED-RV32-V-NEXT:    srli a0, a0, 31
 ; CHECK-ALIGNED-RV32-V-NEXT:    xori a0, a0, 1
 ; CHECK-ALIGNED-RV32-V-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK-ALIGNED-RV32-V-NEXT:    addi sp, sp, 16
@@ -3538,7 +3538,7 @@ define i1 @bcmp_ge_zero(ptr %s1, ptr %s2) nounwind {
 ; CHECK-ALIGNED-RV64-V-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; CHECK-ALIGNED-RV64-V-NEXT:    li a2, 4
 ; CHECK-ALIGNED-RV64-V-NEXT:    call bcmp
-; CHECK-ALIGNED-RV64-V-NEXT:    slti a0, a0, 0
+; CHECK-ALIGNED-RV64-V-NEXT:    srli a0, a0, 63
 ; CHECK-ALIGNED-RV64-V-NEXT:    xori a0, a0, 1
 ; CHECK-ALIGNED-RV64-V-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK-ALIGNED-RV64-V-NEXT:    addi sp, sp, 16
@@ -6839,7 +6839,7 @@ define i1 @memcmp_lt_zero(ptr %s1, ptr %s2) nounwind {
 ; CHECK-ALIGNED-RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; CHECK-ALIGNED-RV64-NEXT:    li a2, 4
 ; CHECK-ALIGNED-RV64-NEXT:    call memcmp
-; CHECK-ALIGNED-RV64-NEXT:    slti a0, a0, 0
+; CHECK-ALIGNED-RV64-NEXT:    srli a0, a0, 63
 ; CHECK-ALIGNED-RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK-ALIGNED-RV64-NEXT:    addi sp, sp, 16
 ; CHECK-ALIGNED-RV64-NEXT:    ret
@@ -6861,7 +6861,7 @@ define i1 @memcmp_lt_zero(ptr %s1, ptr %s2) nounwind {
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    li a2, 4
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    call memcmp
-; CHECK-ALIGNED-RV64-ZBB-NEXT:    slti a0, a0, 0
+; CHECK-ALIGNED-RV64-ZBB-NEXT:    srli a0, a0, 63
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    addi sp, sp, 16
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    ret
@@ -6883,7 +6883,7 @@ define i1 @memcmp_lt_zero(ptr %s1, ptr %s2) nounwind {
 ; CHECK-ALIGNED-RV64-ZBKB-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; CHECK-ALIGNED-RV64-ZBKB-NEXT:    li a2, 4
 ; CHECK-ALIGNED-RV64-ZBKB-NEXT:    call memcmp
-; CHECK-ALIGNED-RV64-ZBKB-NEXT:    slti a0, a0, 0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT:    srli a0, a0, 63
 ; CHECK-ALIGNED-RV64-ZBKB-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK-ALIGNED-RV64-ZBKB-NEXT:    addi sp, sp, 16
 ; CHECK-ALIGNED-RV64-ZBKB-NEXT:    ret
@@ -6905,7 +6905,7 @@ define i1 @memcmp_lt_zero(ptr %s1, ptr %s2) nounwind {
 ; CHECK-ALIGNED-RV64-V-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; CHECK-ALIGNED-RV64-V-NEXT:    li a2, 4
 ; CHECK-ALIGNED-RV64-V-NEXT:    call memcmp
-; CHECK-ALIGNED-RV64-V-NEXT:    slti a0, a0, 0
+; CHECK-ALIGNED-RV64-V-NEXT:    srli a0, a0, 63
 ; CHECK-ALIGNED-RV64-V-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK-ALIGNED-RV64-V-NEXT:    addi sp, sp, 16
 ; CHECK-ALIGNED-RV64-V-NEXT:    ret
@@ -6927,7 +6927,7 @@ define i1 @memcmp_lt_zero(ptr %s1, ptr %s2) nounwind {
 ; CHECK-UNALIGNED-RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; CHECK-UNALIGNED-RV64-NEXT:    li a2, 4
 ; CHECK-UNALIGNED-RV64-NEXT:    call memcmp
-; CHECK-UNALIGNED-RV64-NEXT:    slti a0, a0, 0
+; CHECK-UNALIGNED-RV64-NEXT:    srli a0, a0, 63
 ; CHECK-UNALIGNED-RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK-UNALIGNED-RV64-NEXT:    addi sp, sp, 16
 ; CHECK-UNALIGNED-RV64-NEXT:    ret
@@ -6989,7 +6989,7 @@ define i1 @memcmp_lt_zero(ptr %s1, ptr %s2) nounwind {
 ; CHECK-UNALIGNED-RV64-V-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; CHECK-UNALIGNED-RV64-V-NEXT:    li a2, 4
 ; CHECK-UNALIGNED-RV64-V-NEXT:    call memcmp
-; CHECK-UNALIGNED-RV64-V-NEXT:    slti a0, a0, 0
+; CHECK-UNALIGNED-RV64-V-NEXT:    srli a0, a0, 63
 ; CHECK-UNALIGNED-RV64-V-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK-UNALIGNED-RV64-V-NEXT:    addi sp, sp, 16
 ; CHECK-UNALIGNED-RV64-V-NEXT:    ret
@@ -7366,7 +7366,7 @@ define i1 @memcmp_ge_zero(ptr %s1, ptr %s2) nounwind {
 ; CHECK-ALIGNED-RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; CHECK-ALIGNED-RV32-NEXT:    li a2, 4
 ; CHECK-ALIGNED-RV32-NEXT:    call memcmp
-; CHECK-ALIGNED-RV32-NEXT:    slti a0, a0, 0
+; CHECK-ALIGNED-RV32-NEXT:    srli a0, a0, 31
 ; CHECK-ALIGNED-RV32-NEXT:    xori a0, a0, 1
 ; CHECK-ALIGNED-RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK-ALIGNED-RV32-NEXT:    addi sp, sp, 16
@@ -7378,7 +7378,7 @@ define i1 @memcmp_ge_zero(ptr %s1, ptr %s2) nounwind {
 ; CHECK-ALIGNED-RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; CHECK-ALIGNED-RV64-NEXT:    li a2, 4
 ; CHECK-ALIGNED-RV64-NEXT:    call memcmp
-; CHECK-ALIGNED-RV64-NEXT:    slti a0, a0, 0
+; CHECK-ALIGNED-RV64-NEXT:    srli a0, a0, 63
 ; CHECK-ALIGNED-RV64-NEXT:    xori a0, a0, 1
 ; CHECK-ALIGNED-RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK-ALIGNED-RV64-NEXT:    addi sp, sp, 16
@@ -7390,7 +7390,7 @@ define i1 @memcmp_ge_zero(ptr %s1, ptr %s2) nounwind {
 ; CHECK-ALIGNED-RV32-ZBB-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; CHECK-ALIGNED-RV32-ZBB-NEXT:    li a2, 4
 ; CHECK-ALIGNED-RV32-ZBB-NEXT:    call memcmp
-; CHECK-ALIGNED-RV32-ZBB-NEXT:    slti a0, a0, 0
+; CHECK-ALIGNED-RV32-ZBB-NEXT:    srli a0, a0, 31
 ; CHECK-ALIGNED-RV32-ZBB-NEXT:    xori a0, a0, 1
 ; CHECK-ALIGNED-RV32-ZBB-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK-ALIGNED-RV32-ZBB-NEXT:    addi sp, sp, 16
@@ -7402,7 +7402,7 @@ define i1 @memcmp_ge_zero(ptr %s1, ptr %s2) nounwind {
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    li a2, 4
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    call memcmp
-; CHECK-ALIGNED-RV64-ZBB-NEXT:    slti a0, a0, 0
+; CHECK-ALIGNED-RV64-ZBB-NEXT:    srli a0, a0, 63
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    xori a0, a0, 1
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK-ALIGNED-RV64-ZBB-NEXT:    addi sp, sp, 16
@@ -7414,7 +7414,7 @@ define i1 @memcmp_ge_zero(ptr %s1, ptr %s2) nounwind {
 ; CHECK-ALIGNED-RV32-ZBKB-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; CHECK-ALIGNED-RV32-ZBKB-NEXT:    li a2, 4
 ; CHECK-ALIGNED-RV32-ZBKB-NEXT:    call memcmp
-; CHECK-ALIGNED-RV32-ZBKB-NEXT:    slti a0, a0, 0
+; CHECK-ALIGNED-RV32-ZBKB-NEXT:    srli a0, a0, 31
 ; CHECK-ALIGNED-RV32-ZBKB-NEXT:    xori a0, a0, 1
 ; CHECK-ALIGNED-RV32-ZBKB-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK-ALIGNED-RV32-ZBKB-NEXT:    addi sp, sp, 16
@@ -7426,7 +7426,7 @@ define i1 @memcmp_ge_zero(ptr %s1, ptr %s2) nounwind {
 ; CHECK-ALIGNED-RV64-ZBKB-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; CHECK-ALIGNED-RV64-ZBKB-NEXT:    li a2, 4
 ; CHECK-ALIGNED-RV64-ZBKB-NEXT:    call memcmp
-; CHECK-ALIGNED-RV64-ZBKB-NEXT:    slti a0, a0, 0
+; CHECK-ALIGNED-RV64-ZBKB-NEXT:    srli a0, a0, 63
 ; CHECK-ALIGNED-RV64-ZBKB-NEXT:    xori a0, a0, 1
 ; CHECK-ALIGNED-RV64-ZBKB-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK-ALIGNED-RV64-ZBKB-NEXT:    addi sp, sp, 16
@@ -7438,7 +7438,7 @@ define i1 @memcmp_ge_zero(ptr %s1, ptr %s2) nounwind {
 ; CHECK-ALIGNED-RV32-V-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; CHECK-ALIGNED-RV32-V-NEXT:    li a2, 4
 ; CHECK-ALIGNED-RV32-V-NEXT:    call memcmp
-; CHECK-ALIGNED-RV32-V-NEXT:    slti a0, a0, 0
+; CHECK-ALIGNED-RV32-V-NEXT:    srli a0, a0, 31
 ; CHECK-ALIGNED-RV32-V-NEXT:    xori a0, a0, 1
 ; CHECK-ALIGNED-RV32-V-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK-ALIGNED-RV32-V-NEXT:    addi sp, sp, 16
@@ -7450,7 +7450,7 @@ define i1 @memcmp_ge_zero(ptr %s1, ptr %s2) nounwind {
 ; CHECK-ALIGNED-RV64-V-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; CHECK-ALIGNED-RV64-V-NEXT:    li a2, 4
 ; CHECK-ALIGNED-RV64-V-NEXT:    call memcmp
-; CHECK-ALIGNED-RV64-V-NEXT:    slti a0, a0, 0
+; CHECK-ALIGNED-RV64-V-NEXT:    srli a0, a0, 63
 ; CHECK-ALIGNED-RV64-V-NEXT:    xori a0, a0, 1
 ; CHECK-ALIGNED-RV64-V-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK-ALIGNED-RV64-V-NEXT:    addi sp, sp, 16
@@ -7462,7 +7462,7 @@ define i1 @memcmp_ge_zero(ptr %s1, ptr %s2) nounwind {
 ; CHECK-UNALIGNED-RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; CHECK-UNALIGNED-RV32-NEXT:    li a2, 4
 ; CHECK-UNALIGNED-RV32-NEXT:    call memcmp
-; CHECK-UNALIGNED-RV32-NEXT:    slti a0, a0, 0
+; CHECK-UNALIGNED-RV32-NEXT:    srli a0, a0, 31
 ; CHECK-UNALIGNED-RV32-NEXT:    xori a0, a0, 1
 ; CHECK-UNALIGNED-RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK-UNALIGNED-RV32-NEXT:    addi sp, sp, 16
@@ -7474,7 +7474,7 @@ define i1 @memcmp_ge_zero(ptr %s1, ptr %s2) nounwind {
 ; CHECK-UNALIGNED-RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; CHECK-UNALIGNED-RV64-NEXT:    li a2, 4
 ; CHECK-UNALIGNED-RV64-NEXT:    call memcmp
-; CHECK-UNALIGNED-RV64-NEXT:    slti a0, a0, 0
+; CHECK-UNALIGNED-RV64-NEXT:    srli a0, a0, 63
 ; CHECK-UNALIGNED-RV64-NEXT:    xori a0, a0, 1
 ; CHECK-UNALIGNED-RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK-UNALIGNED-RV64-NEXT:    addi sp, sp, 16
@@ -7530,7 +7530,7 @@ define i1 @memcmp_ge_zero(ptr %s1, ptr %s2) nounwind {
 ; CHECK-UNALIGNED-RV32-V-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; CHECK-UNALIGNED-RV32-V-NEXT:    li a2, 4
 ; CHECK-UNALIGNED-RV32-V-NEXT:    call memcmp
-; CHECK-UNALIGNED-RV32-V-NEXT:    slti a0, a0, 0
+; CHECK-UNALIGNED-RV32-V-NEXT:    srli a0, a0, 31
 ; CHECK-UNALIGNED-RV32-V-NEXT:    xori a0, a0, 1
 ; CHECK-UNALIGNED-RV32-V-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; CHECK-UNALIGNED-RV32-V-NEXT:    addi sp, sp, 16
@@ -7542,7 +7542,7 @@ define i1 @memcmp_ge_zero(ptr %s1, ptr %s2) nounwind {
 ; CHECK-UNALIGNED-RV64-V-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; CHECK-UNALIGNED-RV64-V-NEXT:    li a2, 4
 ; CHECK-UNALIGNED-RV64-V-NEXT:    call memcmp
-; CHECK-UNALIGNED-RV64-V-NEXT:    slti a0, a0, 0
+; CHECK-UNALIGNED-RV64-V-NEXT:    srli a0, a0, 63
 ; CHECK-UNALIGNED-RV64-V-NEXT:    xori a0, a0, 1
 ; CHECK-UNALIGNED-RV64-V-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; CHECK-UNALIGNED-RV64-V-NEXT:    addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/min-max.ll b/llvm/test/CodeGen/RISCV/min-max.ll
index 0115b48b7124c..acde8adf5d08b 100644
--- a/llvm/test/CodeGen/RISCV/min-max.ll
+++ b/llvm/test/CodeGen/RISCV/min-max.ll
@@ -642,7 +642,7 @@ define signext i32 @smin_i32_negone(i32 signext %a) {
 define i64 @smin_i64_negone(i64 %a) {
 ; RV32I-LABEL: smin_i64_negone:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    slti a2, a1, 0
+; RV32I-NEXT:    srli a2, a1, 31
 ; RV32I-NEXT:    addi a2, a2, -1
 ; RV32I-NEXT:    or a0, a2, a0
 ; RV32I-NEXT:    slti a2, a1, -1
@@ -661,7 +661,7 @@ define i64 @smin_i64_negone(i64 %a) {
 ; RV32ZBB:       # %bb.0:
 ; RV32ZBB-NEXT:    li a2, -1
 ; RV32ZBB-NEXT:    min a2, a1, a2
-; RV32ZBB-NEXT:    slti a1, a1, 0
+; RV32ZBB-NEXT:    srli a1, a1, 31
 ; RV32ZBB-NEXT:    addi a1, a1, -1
 ; RV32ZBB-NEXT:    or a0, a1, a0
 ; RV32ZBB-NEXT:    mv a1, a2
diff --git a/llvm/test/CodeGen/RISCV/pr84653_pr85190.ll b/llvm/test/CodeGen/RISCV/pr84653_pr85190.ll
index 30a9355734772..f84673635fbbd 100644
--- a/llvm/test/CodeGen/RISCV/pr84653_pr85190.ll
+++ b/llvm/test/CodeGen/RISCV/pr84653_pr85190.ll
@@ -39,7 +39,7 @@ define i1 @pr85190(i64 %a) {
 ; CHECK-NOZBB-LABEL: pr85190:
 ; CHECK-NOZBB:       # %bb.0:
 ; CHECK-NOZBB-NEXT:    ori a1, a0, 7
-; CHECK-NOZBB-NEXT:    slti a2, a0, 0
+; CHECK-NOZBB-NEXT:    srli a2, a0, 63
 ; CHECK-NOZBB-NEXT:    li a3, -1
 ; CHECK-NOZBB-NEXT:    slli a3, a3, 63
 ; CHECK-NOZBB-NEXT:    sub a3, a3, a1
diff --git a/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll b/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll
index 1736074ab1868..7ab3d7c694568 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll
@@ -431,7 +431,7 @@ define i64 @not_shl_one_i64(i64 %x) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi a1, a0, -32
 ; CHECK-NEXT:    li a2, 1
-; CHECK-NEXT:    slti a1, a1, 0
+; CHECK-NEXT:    srli a1, a1, 31
 ; CHECK-NEXT:    sll a0, a2, a0
 ; CHECK-NEXT:    neg a2, a1
 ; CHECK-NEXT:    addi a1, a1, -1
diff --git a/llvm/test/CodeGen/RISCV/rv32zbs.ll b/llvm/test/CodeGen/RISCV/rv32zbs.ll
index 1a3beeb79b85b..397a1c3ca42bc 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbs.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbs.ll
@@ -53,11 +53,11 @@ define i64 @bclr_i64(i64 %a, i64 %b) nounwind {
 ; RV32I-NEXT:    addi a5, a3, -32
 ; RV32I-NEXT:    sll a2, a4, a2
 ; RV32I-NEXT:    sll a3, a4, a3
-; RV32I-NEXT:    slti a4, a5, 0
-; RV32I-NEXT:    neg a5, a4
-; RV32I-NEXT:    addi a4, a4, -1
-; RV32I-NEXT:    and a2, a5, a2
-; RV32I-NEXT:    and a3, a4, a3
+; RV32I-NEXT:    srli a5, a5, 31
+; RV32I-NEXT:    neg a4, a5
+; RV32I-NEXT:    addi a5, a5, -1
+; RV32I-NEXT:    and a2, a4, a2
+; RV32I-NEXT:    and a3, a5, a3
 ; RV32I-NEXT:    not a2, a2
 ; RV32I-NEXT:    not a3, a3
 ; RV32I-NEXT:    and a0, a2, a0
@@ -70,7 +70,7 @@ define i64 @bclr_i64(i64 %a, i64 %b) nounwind {
 ; RV32ZBSNOZBB-NEXT:    bset a2, zero, a2
 ; RV32ZBSNOZBB-NEXT:    addi a4, a3, -32
 ; RV32ZBSNOZBB-NEXT:    bset a3, zero, a3
-; RV32ZBSNOZBB-NEXT:    slti a4, a4, 0
+; RV32ZBSNOZBB-NEXT:    srli a4, a4, 31
 ; RV32ZBSNOZBB-NEXT:    neg a5, a4
 ; RV32ZBSNOZBB-NEXT:    addi a4, a4, -1
 ; RV32ZBSNOZBB-NEXT:    and a2, a5, a2
@@ -87,7 +87,7 @@ define i64 @bclr_i64(i64 %a, i64 %b) nounwind {
 ; RV32ZBSZBB-NEXT:    bset a2, zero, a2
 ; RV32ZBSZBB-NEXT:    bset a4, zero, a3
 ; RV32ZBSZBB-NEXT:    addi a3, a3, -32
-; RV32ZBSZBB-NEXT:    slti a3, a3, 0
+; RV32ZBSZBB-NEXT:    srli a3, a3, 31
 ; RV32ZBSZBB-NEXT:    addi a5, a3, -1
 ; RV32ZBSZBB-NEXT:    neg a3, a3
 ; RV32ZBSZBB-NEXT:    and a4, a5, a4
@@ -188,7 +188,7 @@ define signext i64 @bset_i64_zero(i64 signext %a) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi a1, a0, -32
 ; RV32I-NEXT:    li a2, 1
-; RV32I-NEXT:    slti a1, a1, 0
+; RV32I-NEXT:    srli a1, a1, 31
 ; RV32I-NEXT:    sll a2, a2, a0
 ; RV32I-NEXT:    neg a0, a1
 ; RV32I-NEXT:    addi a1, a1, -1
@@ -200,11 +200,11 @@ define signext i64 @bset_i64_zero(i64 signext %a) nounwind {
 ; RV32ZBS:       # %bb.0:
 ; RV32ZBS-NEXT:    addi a1, a0, -32
 ; RV32ZBS-NEXT:    bset a2, zero, a0
-; RV32ZBS-NEXT:    slti a0, a1, 0
-; RV32ZBS-NEXT:    neg a1, a0
-; RV32ZBS-NEXT:    addi a3, a0, -1
-; RV32ZBS-NEXT:    and a0, a1, a2
-; RV32ZBS-NEXT:    and a1, a3, a2
+; RV32ZBS-NEXT:    srli a1, a1, 31
+; RV32ZBS-NEXT:    neg a0, a1
+; RV32ZBS-NEXT:    addi a1, a1, -1
+; RV32ZBS-NEXT:    and a0, a0, a2
+; RV32ZBS-NEXT:    and a1, a1, a2
 ; RV32ZBS-NEXT:    ret
   %shl = shl i64 1, %a
   ret i64 %shl
diff --git a/llvm/test/CodeGen/RISCV/rv64-double-convert.ll b/llvm/test/CodeGen/RISCV/rv64-double-convert.ll
index dd49d9e3e2dce..caa6c2f8ff96f 100644
--- a/llvm/test/CodeGen/RISCV/rv64-double-convert.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-double-convert.ll
@@ -97,7 +97,7 @@ define i128 @fptosi_sat_f64_to_i128(double %a) nounwind {
 ; RV64I-NEXT:    mv a1, s0
 ; RV64I-NEXT:    call __unorddf2
 ; RV64I-NEXT:    snez a0, a0
-; RV64I-NEXT:    slti a1, s2, 0
+; RV64I-NEXT:    srli a1, s2, 63
 ; RV64I-NEXT:    sgtz a2, s4
 ; RV64I-NEXT:    addi a0, a0, -1
 ; RV64I-NEXT:    addi a3, a1, -1
@@ -207,7 +207,7 @@ define i128 @fptoui_sat_f64_to_i128(double %a) nounwind {
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __gedf2
-; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    srli a0, a0, 63
 ; RV64I-NEXT:    addi s2, a0, -1
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __fixunsdfti
diff --git a/llvm/test/CodeGen/RISCV/rv64-float-convert.ll b/llvm/test/CodeGen/RISCV/rv64-float-convert.ll
index 896e371452db8..ebda78528810f 100644
--- a/llvm/test/CodeGen/RISCV/rv64-float-convert.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-float-convert.ll
@@ -95,7 +95,7 @@ define i128 @fptosi_sat_f32_to_i128(float %a) nounwind {
 ; RV64I-NEXT:    mv a1, s1
 ; RV64I-NEXT:    call __unordsf2
 ; RV64I-NEXT:    snez a0, a0
-; RV64I-NEXT:    slti a1, s2, 0
+; RV64I-NEXT:    srli a1, s2, 63
 ; RV64I-NEXT:    sgtz a2, s4
 ; RV64I-NEXT:    addi a0, a0, -1
 ; RV64I-NEXT:    addi a3, a1, -1
@@ -209,7 +209,7 @@ define i128 @fptoui_sat_f32_to_i128(float %a) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __gesf2
-; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    srli a0, a0, 63
 ; RV64I-NEXT:    addi s2, a0, -1
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __fixunssfti
diff --git a/llvm/test/CodeGen/RISCV/rv64-half-convert.ll b/llvm/test/CodeGen/RISCV/rv64-half-convert.ll
index 57061e1bde83a..b210f17932d1e 100644
--- a/llvm/test/CodeGen/RISCV/rv64-half-convert.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-half-convert.ll
@@ -173,13 +173,13 @@ define i128 @fptosi_sat_f16_to_i128(half %a) nounwind {
 ; RV64I-NEXT:    call __unordsf2
 ; RV64I-NEXT:    snez a0, a0
 ; RV64I-NEXT:    sgtz a1, s4
-; RV64I-NEXT:    slti a2, s0, 0
+; RV64I-NEXT:    srli s0, s0, 63
 ; RV64I-NEXT:    addi a0, a0, -1
-; RV64I-NEXT:    neg a3, a1
-; RV64I-NEXT:    addi a2, a2, -1
+; RV64I-NEXT:    neg a2, a1
+; RV64I-NEXT:    addi s0, s0, -1
 ; RV64I-NEXT:    and a1, a0, s3
-; RV64I-NEXT:    and a2, a2, s1
-; RV64I-NEXT:    or a2, a3, a2
+; RV64I-NEXT:    and s0, s0, s1
+; RV64I-NEXT:    or a2, a2, s0
 ; RV64I-NEXT:    and a0, a0, a2
 ; RV64I-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
@@ -288,7 +288,7 @@ define i128 @fptoui_sat_f16_to_i128(half %a) nounwind {
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    li a1, 0
 ; RV64I-NEXT:    call __gesf2
-; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    srli a0, a0, 63
 ; RV64I-NEXT:    addi s2, a0, -1
 ; RV64I-NEXT:    mv a0, s0
 ; RV64I-NEXT:    call __fixunssfti
diff --git a/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll b/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
index f481f9cff5de1..40e0662711518 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
@@ -2327,7 +2327,7 @@ define <2 x i64> @stest_f64i64(<2 x double> %x) {
 ; CHECK-NOV-NEXT:    srli a3, a0, 1
 ; CHECK-NOV-NEXT:    beqz a1, .LBB18_3
 ; CHECK-NOV-NEXT:  # %bb.1: # %entry
-; CHECK-NOV-NEXT:    slti a4, a1, 0
+; CHECK-NOV-NEXT:    srli a4, a1, 63
 ; CHECK-NOV-NEXT:    bnez s1, .LBB18_4
 ; CHECK-NOV-NEXT:  .LBB18_2:
 ; CHECK-NOV-NEXT:    sltu a5, s0, a3
@@ -2337,7 +2337,7 @@ define <2 x i64> @stest_f64i64(<2 x double> %x) {
 ; CHECK-NOV-NEXT:    sltu a4, a2, a3
 ; CHECK-NOV-NEXT:    beqz s1, .LBB18_2
 ; CHECK-NOV-NEXT:  .LBB18_4: # %entry
-; CHECK-NOV-NEXT:    slti a5, s1, 0
+; CHECK-NOV-NEXT:    srli a5, s1, 63
 ; CHECK-NOV-NEXT:    bnez a5, .LBB18_6
 ; CHECK-NOV-NEXT:  .LBB18_5: # %entry
 ; CHECK-NOV-NEXT:    mv s0, a3
@@ -2353,8 +2353,8 @@ define <2 x i64> @stest_f64i64(<2 x double> %x) {
 ; CHECK-NOV-NEXT:    slli a1, a0, 63
 ; CHECK-NOV-NEXT:    beq a5, a0, .LBB18_11
 ; CHECK-NOV-NEXT:  # %bb.9: # %entry
-; CHECK-NOV-NEXT:    slti a3, a5, 0
-; CHECK-NOV-NEXT:    xori a3, a3, 1
+; CHECK-NOV-NEXT:    srli a5, a5, 63
+; CHECK-NOV-NEXT:    xori a3, a5, 1
 ; CHECK-NOV-NEXT:    bne a4, a0, .LBB18_12
 ; CHECK-NOV-NEXT:  .LBB18_10:
 ; CHECK-NOV-NEXT:    sltu a0, a1, s0
@@ -2364,8 +2364,8 @@ define <2 x i64> @stest_f64i64(<2 x double> %x) {
 ; CHECK-NOV-NEXT:    sltu a3, a1, a2
 ; CHECK-NOV-NEXT:    beq a4, a0, .LBB18_10
 ; CHECK-NOV-NEXT:  .LBB18_12: # %entry
-; CHECK-NOV-NEXT:    slti a0, a4, 0
-; CHECK-NOV-NEXT:    xori a0, a0, 1
+; CHECK-NOV-NEXT:    srli a4, a4, 63
+; CHECK-NOV-NEXT:    xori a0, a4, 1
 ; CHECK-NOV-NEXT:    bnez a0, .LBB18_14
 ; CHECK-NOV-NEXT:  .LBB18_13: # %entry
 ; CHECK-NOV-NEXT:    mv s0, a1
@@ -2415,7 +2415,7 @@ define <2 x i64> @stest_f64i64(<2 x double> %x) {
 ; CHECK-V-NEXT:    srli a3, a2, 1
 ; CHECK-V-NEXT:    beqz a1, .LBB18_3
 ; CHECK-V-NEXT:  # %bb.1: # %entry
-; CHECK-V-NEXT:    slti a4, a1, 0
+; CHECK-V-NEXT:    srli a4, a1, 63
 ; CHECK-V-NEXT:    bnez s1, .LBB18_4
 ; CHECK-V-NEXT:  .LBB18_2:
 ; CHECK-V-NEXT:    sltu a5, s0, a3
@@ -2425,7 +2425,7 @@ define <2 x i64> @stest_f64i64(<2 x double> %x) {
 ; CHECK-V-NEXT:    sltu a4, a0, a3
 ; CHECK-V-NEXT:    beqz s1, .LBB18_2
 ; CHECK-V-NEXT:  .LBB18_4: # %entry
-; CHECK-V-NEXT:    slti a5, s1, 0
+; CHECK-V-NEXT:    srli a5, s1, 63
 ; CHECK-V-NEXT:    bnez a5, .LBB18_6
 ; CHECK-V-NEXT:  .LBB18_5: # %entry
 ; CHECK-V-NEXT:    mv s0, a3
@@ -2441,8 +2441,8 @@ define <2 x i64> @stest_f64i64(<2 x double> %x) {
 ; CHECK-V-NEXT:    slli a1, a2, 63
 ; CHECK-V-NEXT:    beq a5, a2, .LBB18_11
 ; CHECK-V-NEXT:  # %bb.9: # %entry
-; CHECK-V-NEXT:    slti a3, a5, 0
-; CHECK-V-NEXT:    xori a3, a3, 1
+; CHECK-V-NEXT:    srli a5, a5, 63
+; CHECK-V-NEXT:    xori a3, a5, 1
 ; CHECK-V-NEXT:    bne a4, a2, .LBB18_12
 ; CHECK-V-NEXT:  .LBB18_10:
 ; CHECK-V-NEXT:    sltu a2, a1, s0
@@ -2452,8 +2452,8 @@ define <2 x i64> @stest_f64i64(<2 x double> %x) {
 ; CHECK-V-NEXT:    sltu a3, a1, a0
 ; CHECK-V-NEXT:    beq a4, a2, .LBB18_10
 ; CHECK-V-NEXT:  .LBB18_12: # %entry
-; CHECK-V-NEXT:    slti a2, a4, 0
-; CHECK-V-NEXT:    xori a2, a2, 1
+; CHECK-V-NEXT:    srli a4, a4, 63
+; CHECK-V-NEXT:    xori a2, a4, 1
 ; CHECK-V-NEXT:    bnez a2, .LBB18_14
 ; CHECK-V-NEXT:  .LBB18_13: # %entry
 ; CHECK-V-NEXT:    mv s0, a1
@@ -2749,7 +2749,7 @@ define <2 x i64> @stest_f32i64(<2 x float> %x) {
 ; CHECK-NOV-NEXT:    srli a3, a0, 1
 ; CHECK-NOV-NEXT:    beqz a1, .LBB21_3
 ; CHECK-NOV-NEXT:  # %bb.1: # %entry
-; CHECK-NOV-NEXT:    slti a4, a1, 0
+; CHECK-NOV-NEXT:    srli a4, a1, 63
 ; CHECK-NOV-NEXT:    bnez s1, .LBB21_4
 ; CHECK-NOV-NEXT:  .LBB21_2:
 ; CHECK-NOV-NEXT:    sltu a5, s0, a3
@@ -2759,7 +2759,7 @@ define <2 x i64> @stest_f32i64(<2 x float> %x) {
 ; CHECK-NOV-NEXT:    sltu a4, a2, a3
 ; CHECK-NOV-NEXT:    beqz s1, .LBB21_2
 ; CHECK-NOV-NEXT:  .LBB21_4: # %entry
-; CHECK-NOV-NEXT:    slti a5, s1, 0
+; CHECK-NOV-NEXT:    srli a5, s1, 63
 ; CHECK-NOV-NEXT:    bnez a5, .LBB21_6
 ; CHECK-NOV-NEXT:  .LBB21_5: # %entry
 ; CHECK-NOV-NEXT:    mv s0, a3
@@ -2775,8 +2775,8 @@ define <2 x i64> @stest_f32i64(<2 x float> %x) {
 ; CHECK-NOV-NEXT:    slli a1, a0, 63
 ; CHECK-NOV-NEXT:    beq a5, a0, .LBB21_11
 ; CHECK-NOV-NEXT:  # %bb.9: # %entry
-; CHECK-NOV-NEXT:    slti a3, a5, 0
-; CHECK-NOV-NEXT:    xori a3, a3, 1
+; CHECK-NOV-NEXT:    srli a5, a5, 63
+; CHECK-NOV-NEXT:    xori a3, a5, 1
 ; CHECK-NOV-NEXT:    bne a4, a0, .LBB21_12
 ; CHECK-NOV-NEXT:  .LBB21_10:
 ; CHECK-NOV-NEXT:    sltu a0, a1, s0
@@ -2786,8 +2786,8 @@ define <2 x i64> @stest_f32i64(<2 x float> %x) {
 ; CHECK-NOV-NEXT:    sltu a3, a1, a2
 ; CHECK-NOV-NEXT:    beq a4, a0, .LBB21_10
 ; CHECK-NOV-NEXT:  .LBB21_12: # %entry
-; CHECK-NOV-NEXT:    slti a0, a4, 0
-; CHECK-NOV-NEXT:    xori a0, a0, 1
+; CHECK-NOV-NEXT:    srli a4, a4, 63
+; CHECK-NOV-NEXT:    xori a0, a4, 1
 ; CHECK-NOV-NEXT:    bnez a0, .LBB21_14
 ; CHECK-NOV-NEXT:  .LBB21_13: # %entry
 ; CHECK-NOV-NEXT:    mv s0, a1
@@ -2837,7 +2837,7 @@ define <2 x i64> @stest_f32i64(<2 x float> %x) {
 ; CHECK-V-NEXT:    srli a3, a2, 1
 ; CHECK-V-NEXT:    beqz a1, .LBB21_3
 ; CHECK-V-NEXT:  # %bb.1: # %entry
-; CHECK-V-NEXT:    slti a4, a1, 0
+; CHECK-V-NEXT:    srli a4, a1, 63
 ; CHECK-V-NEXT:    bnez s1, .LBB21_4
 ; CHECK-V-NEXT:  .LBB21_2:
 ; CHECK-V-NEXT:    sltu a5, s0, a3
@@ -2847,7 +2847,7 @@ define <2 x i64> @stest_f32i64(<2 x float> %x) {
 ; CHECK-V-NEXT:    sltu a4, a0, a3
 ; CHECK-V-NEXT:    beqz s1, .LBB21_2
 ; CHECK-V-NEXT:  .LBB21_4: # %entry
-; CHECK-V-NEXT:    slti a5, s1, 0
+; CHECK-V-NEXT:    srli a5, s1, 63
 ; CHECK-V-NEXT:    bnez a5, .LBB21_6
 ; CHECK-V-NEXT:  .LBB21_5: # %entry
 ; CHECK-V-NEXT:    mv s0, a3
@@ -2863,8 +2863,8 @@ define <2 x i64> @stest_f32i64(<2 x float> %x) {
 ; CHECK-V-NEXT:    slli a1, a2, 63
 ; CHECK-V-NEXT:    beq a5, a2, .LBB21_11
 ; CHECK-V-NEXT:  # %bb.9: # %entry
-; CHECK-V-NEXT:    slti a3, a5, 0
-; CHECK-V-NEXT:    xori a3, a3, 1
+; CHECK-V-NEXT:    srli a5, a5, 63
+; CHECK-V-NEXT:    xori a3, a5, 1
 ; CHECK-V-NEXT:    bne a4, a2, .LBB21_12
 ; CHECK-V-NEXT:  .LBB21_10:
 ; CHECK-V-NEXT:    sltu a2, a1, s0
@@ -2874,8 +2874,8 @@ define <2 x i64> @stest_f32i64(<2 x float> %x) {
 ; CHECK-V-NEXT:    sltu a3, a1, a0
 ; CHECK-V-NEXT:    beq a4, a2, .LBB21_10
 ; CHECK-V-NEXT:  .LBB21_12: # %entry
-; CHECK-V-NEXT:    slti a2, a4, 0
-; CHECK-V-NEXT:    xori a2, a2, 1
+; CHECK-V-NEXT:    srli a4, a4, 63
+; CHECK-V-NEXT:    xori a2, a4, 1
 ; CHECK-V-NEXT:    bnez a2, .LBB21_14
 ; CHECK-V-NEXT:  .LBB21_13: # %entry
 ; CHECK-V-NEXT:    mv s0, a1
@@ -3174,7 +3174,7 @@ define <2 x i64> @stest_f16i64(<2 x half> %x) {
 ; CHECK-NOV-NEXT:    srli a3, a0, 1
 ; CHECK-NOV-NEXT:    beqz a1, .LBB24_3
 ; CHECK-NOV-NEXT:  # %bb.1: # %entry
-; CHECK-NOV-NEXT:    slti a4, a1, 0
+; CHECK-NOV-NEXT:    srli a4, a1, 63
 ; CHECK-NOV-NEXT:    bnez s1, .LBB24_4
 ; CHECK-NOV-NEXT:  .LBB24_2:
 ; CHECK-NOV-NEXT:    sltu a5, s0, a3
@@ -3184,7 +3184,7 @@ define <2 x i64> @stest_f16i64(<2 x half> %x) {
 ; CHECK-NOV-NEXT:    sltu a4, a2, a3
 ; CHECK-NOV-NEXT:    beqz s1, .LBB24_2
 ; CHECK-NOV-NEXT:  .LBB24_4: # %entry
-; CHECK-NOV-NEXT:    slti a5, s1, 0
+; CHECK-NOV-NEXT:    srli a5, s1, 63
 ; CHECK-NOV-NEXT:    bnez a5, .LBB24_6
 ; CHECK-NOV-NEXT:  .LBB24_5: # %entry
 ; CHECK-NOV-NEXT:    mv s0, a3
@@ -3200,8 +3200,8 @@ define <2 x i64> @stest_f16i64(<2 x half> %x) {
 ; CHECK-NOV-NEXT:    slli a1, a0, 63
 ; CHECK-NOV-NEXT:    beq a5, a0, .LBB24_11
 ; CHECK-NOV-NEXT:  # %bb.9: # %entry
-; CHECK-NOV-NEXT:    slti a3, a5, 0
-; CHECK-NOV-NEXT:    xori a3, a3, 1
+; CHECK-NOV-NEXT:    srli a5, a5, 63
+; CHECK-NOV-NEXT:    xori a3, a5, 1
 ; CHECK-NOV-NEXT:    bne a4, a0, .LBB24_12
 ; CHECK-NOV-NEXT:  .LBB24_10:
 ; CHECK-NOV-NEXT:    sltu a0, a1, s0
@@ -3211,8 +3211,8 @@ define <2 x i64> @stest_f16i64(<2 x half> %x) {
 ; CHECK-NOV-NEXT:    sltu a3, a1, a2
 ; CHECK-NOV-NEXT:    beq a4, a0, .LBB24_10
 ; CHECK-NOV-NEXT:  .LBB24_12: # %entry
-; CHECK-NOV-NEXT:    slti a0, a4, 0
-; CHECK-NOV-NEXT:    xori a0, a0, 1
+; CHECK-NOV-NEXT:    srli a4, a4, 63
+; CHECK-NOV-NEXT:    xori a0, a4, 1
 ; CHECK-NOV-NEXT:    bnez a0, .LBB24_14
 ; CHECK-NOV-NEXT:  .LBB24_13: # %entry
 ; CHECK-NOV-NEXT:    mv s0, a1
@@ -3260,7 +3260,7 @@ define <2 x i64> @stest_f16i64(<2 x half> %x) {
 ; CHECK-V-NEXT:    srli a3, a2, 1
 ; CHECK-V-NEXT:    beqz a1, .LBB24_3
 ; CHECK-V-NEXT:  # %bb.1: # %entry
-; CHECK-V-NEXT:    slti a4, a1, 0
+; CHECK-V-NEXT:    srli a4, a1, 63
 ; CHECK-V-NEXT:    bnez s1, .LBB24_4
 ; CHECK-V-NEXT:  .LBB24_2:
 ; CHECK-V-NEXT:    sltu a5, s0, a3
@@ -3270,7 +3270,7 @@ define <2 x i64> @stest_f16i64(<2 x half> %x) {
 ; CHECK-V-NEXT:    sltu a4, a0, a3
 ; CHECK-V-NEXT:    beqz s1, .LBB24_2
 ; CHECK-V-NEXT:  .LBB24_4: # %entry
-; CHECK-V-NEXT:    slti a5, s1, 0
+; CHECK-V-NEXT:    srli a5, s1, 63
 ; CHECK-V-NEXT:    bnez a5, .LBB24_6
 ; CHECK-V-NEXT:  .LBB24_5: # %entry
 ; CHECK-V-NEXT:    mv s0, a3
@@ -3286,8 +3286,8 @@ define <2 x i64> @stest_f16i64(<2 x half> %x) {
 ; CHECK-V-NEXT:    slli a1, a2, 63
 ; CHECK-V-NEXT:    beq a5, a2, .LBB24_11
 ; CHECK-V-NEXT:  # %bb.9: # %entry
-; CHECK-V-NEXT:    slti a3, a5, 0
-; CHECK-V-NEXT:    xori a3, a3, 1
+; CHECK-V-NEXT:    srli a5, a5, 63
+; CHECK-V-NEXT:    xori a3, a5, 1
 ; CHECK-V-NEXT:    bne a4, a2, .LBB24_12
 ; CHECK-V-NEXT:  .LBB24_10:
 ; CHECK-V-NEXT:    sltu a2, a1, s0
@@ -3297,8 +3297,8 @@ define <2 x i64> @stest_f16i64(<2 x half> %x) {
 ; CHECK-V-NEXT:    sltu a3, a1, a0
 ; CHECK-V-NEXT:    beq a4, a2, .LBB24_10
 ; CHECK-V-NEXT:  .LBB24_12: # %entry
-; CHECK-V-NEXT:    slti a2, a4, 0
-; CHECK-V-NEXT:    xori a2, a2, 1
+; CHECK-V-NEXT:    srli a4, a4, 63
+; CHECK-V-NEXT:    xori a2, a4, 1
 ; CHECK-V-NEXT:    bnez a2, .LBB24_14
 ; CHECK-V-NEXT:  .LBB24_13: # %entry
 ; CHECK-V-NEXT:    mv s0, a1
@@ -5864,7 +5864,7 @@ define <2 x i64> @stest_f64i64_mm(<2 x double> %x) {
 ; CHECK-NOV-NEXT:    srli a3, a0, 1
 ; CHECK-NOV-NEXT:    beqz a1, .LBB45_2
 ; CHECK-NOV-NEXT:  # %bb.1: # %entry
-; CHECK-NOV-NEXT:    slti a4, a1, 0
+; CHECK-NOV-NEXT:    srli a4, a1, 63
 ; CHECK-NOV-NEXT:    beqz a4, .LBB45_3
 ; CHECK-NOV-NEXT:    j .LBB45_4
 ; CHECK-NOV-NEXT:  .LBB45_2:
@@ -5875,7 +5875,7 @@ define <2 x i64> @stest_f64i64_mm(<2 x double> %x) {
 ; CHECK-NOV-NEXT:  .LBB45_4: # %entry
 ; CHECK-NOV-NEXT:    beqz s1, .LBB45_6
 ; CHECK-NOV-NEXT:  # %bb.5: # %entry
-; CHECK-NOV-NEXT:    slti a6, s1, 0
+; CHECK-NOV-NEXT:    srli a6, s1, 63
 ; CHECK-NOV-NEXT:    j .LBB45_7
 ; CHECK-NOV-NEXT:  .LBB45_6:
 ; CHECK-NOV-NEXT:    sltu a6, s0, a3
@@ -5890,7 +5890,7 @@ define <2 x i64> @stest_f64i64_mm(<2 x double> %x) {
 ; CHECK-NOV-NEXT:    slli a3, a0, 63
 ; CHECK-NOV-NEXT:    beq a5, a0, .LBB45_11
 ; CHECK-NOV-NEXT:  # %bb.10: # %entry
-; CHECK-NOV-NEXT:    slti a5, a5, 0
+; CHECK-NOV-NEXT:    srli a5, a5, 63
 ; CHECK-NOV-NEXT:    xori a5, a5, 1
 ; CHECK-NOV-NEXT:    and a1, a4, a1
 ; CHECK-NOV-NEXT:    beqz a5, .LBB45_12
@@ -5904,8 +5904,8 @@ define <2 x i64> @stest_f64i64_mm(<2 x double> %x) {
 ; CHECK-NOV-NEXT:  .LBB45_13: # %entry
 ; CHECK-NOV-NEXT:    beq a1, a0, .LBB45_15
 ; CHECK-NOV-NEXT:  # %bb.14: # %entry
-; CHECK-NOV-NEXT:    slti a0, a1, 0
-; CHECK-NOV-NEXT:    xori a0, a0, 1
+; CHECK-NOV-NEXT:    srli a1, a1, 63
+; CHECK-NOV-NEXT:    xori a0, a1, 1
 ; CHECK-NOV-NEXT:    beqz a0, .LBB45_16
 ; CHECK-NOV-NEXT:    j .LBB45_17
 ; CHECK-NOV-NEXT:  .LBB45_15:
@@ -5955,7 +5955,7 @@ define <2 x i64> @stest_f64i64_mm(<2 x double> %x) {
 ; CHECK-V-NEXT:    srli a3, a2, 1
 ; CHECK-V-NEXT:    beqz a1, .LBB45_2
 ; CHECK-V-NEXT:  # %bb.1: # %entry
-; CHECK-V-NEXT:    slti a4, a1, 0
+; CHECK-V-NEXT:    srli a4, a1, 63
 ; CHECK-V-NEXT:    beqz a4, .LBB45_3
 ; CHECK-V-NEXT:    j .LBB45_4
 ; CHECK-V-NEXT:  .LBB45_2:
@@ -5966,7 +5966,7 @@ define <2 x i64> @stest_f64i64_mm(<2 x double> %x) {
 ; CHECK-V-NEXT:  .LBB45_4: # %entry
 ; CHECK-V-NEXT:    beqz s1, .LBB45_6
 ; CHECK-V-NEXT:  # %bb.5: # %entry
-; CHECK-V-NEXT:    slti a6, s1, 0
+; CHECK-V-NEXT:    srli a6, s1, 63
 ; CHECK-V-NEXT:    j .LBB45_7
 ; CHECK-V-NEXT:  .LBB45_6:
 ; CHECK-V-NEXT:    sltu a6, s0, a3
@@ -5981,7 +5981,7 @@ define <2 x i64> @stest_f64i64_mm(<2 x double> %x) {
 ; CHECK-V-NEXT:    slli a3, a2, 63
 ; CHECK-V-NEXT:    beq a5, a2, .LBB45_11
 ; CHECK-V-NEXT:  # %bb.10: # %entry
-; CHECK-V-NEXT:    slti a5, a5, 0
+; CHECK-V-NEXT:    srli a5, a5, 63
 ; CHECK-V-NEXT:    xori a5, a5, 1
 ; CHECK-V-NEXT:    and a1, a4, a1
 ; CHECK-V-NEXT:    beqz a5, .LBB45_12
@@ -5995,7 +5995,7 @@ define <2 x i64> @stest_f64i64_mm(<2 x double> %x) {
 ; CHECK-V-NEXT:  .LBB45_13: # %entry
 ; CHECK-V-NEXT:    beq a1, a2, .LBB45_15
 ; CHECK-V-NEXT:  # %bb.14: # %entry
-; CHECK-V-NEXT:    slti a1, a1, 0
+; CHECK-V-NEXT:    srli a1, a1, 63
 ; CHECK-V-NEXT:    xori a1, a1, 1
 ; CHECK-V-NEXT:    beqz a1, .LBB45_16
 ; CHECK-V-NEXT:    j .LBB45_17
@@ -6153,8 +6153,8 @@ define <2 x i64> @ustest_f64i64_mm(<2 x double> %x) {
 ; CHECK-NOV-NEXT:  .LBB47_4: # %entry
 ; CHECK-NOV-NEXT:    slti a1, a1, 1
 ; CHECK-NOV-NEXT:    slti a4, s1, 1
-; CHECK-NOV-NEXT:    slti a3, a3, 0
-; CHECK-NOV-NEXT:    slti a2, a2, 0
+; CHECK-NOV-NEXT:    srli a3, a3, 63
+; CHECK-NOV-NEXT:    srli a2, a2, 63
 ; CHECK-NOV-NEXT:    neg a1, a1
 ; CHECK-NOV-NEXT:    neg a4, a4
 ; CHECK-NOV-NEXT:    addi a3, a3, -1
@@ -6210,8 +6210,8 @@ define <2 x i64> @ustest_f64i64_mm(<2 x double> %x) {
 ; CHECK-V-NEXT:  .LBB47_4: # %entry
 ; CHECK-V-NEXT:    slti a1, a1, 1
 ; CHECK-V-NEXT:    slti a4, s1, 1
-; CHECK-V-NEXT:    slti a3, a3, 0
-; CHECK-V-NEXT:    slti a2, a2, 0
+; CHECK-V-NEXT:    srli a3, a3, 63
+; CHECK-V-NEXT:    srli a2, a2, 63
 ; CHECK-V-NEXT:    neg a1, a1
 ; CHECK-V-NEXT:    neg a4, a4
 ; CHECK-V-NEXT:    addi a3, a3, -1
@@ -6268,7 +6268,7 @@ define <2 x i64> @stest_f32i64_mm(<2 x float> %x) {
 ; CHECK-NOV-NEXT:    srli a3, a0, 1
 ; CHECK-NOV-NEXT:    beqz a1, .LBB48_2
 ; CHECK-NOV-NEXT:  # %bb.1: # %entry
-; CHECK-NOV-NEXT:    slti a4, a1, 0
+; CHECK-NOV-NEXT:    srli a4, a1, 63
 ; CHECK-NOV-NEXT:    beqz a4, .LBB48_3
 ; CHECK-NOV-NEXT:    j .LBB48_4
 ; CHECK-NOV-NEXT:  .LBB48_2:
@@ -6279,7 +6279,7 @@ define <2 x i64> @stest_f32i64_mm(<2 x float> %x) {
 ; CHECK-NOV-NEXT:  .LBB48_4: # %entry
 ; CHECK-NOV-NEXT:    beqz s1, .LBB48_6
 ; CHECK-NOV-NEXT:  # %bb.5: # %entry
-; CHECK-NOV-NEXT:    slti a6, s1, 0
+; CHECK-NOV-NEXT:    srli a6, s1, 63
 ; CHECK-NOV-NEXT:    j .LBB48_7
 ; CHECK-NOV-NEXT:  .LBB48_6:
 ; CHECK-NOV-NEXT:    sltu a6, s0, a3
@@ -6294,7 +6294,7 @@ define <2 x i64> @stest_f32i64_mm(<2 x float> %x) {
 ; CHECK-NOV-NEXT:    slli a3, a0, 63
 ; CHECK-NOV-NEXT:    beq a5, a0, .LBB48_11
 ; CHECK-NOV-NEXT:  # %bb.10: # %entry
-; CHECK-NOV-NEXT:    slti a5, a5, 0
+; CHECK-NOV-NEXT:    srli a5, a5, 63
 ; CHECK-NOV-NEXT:    xori a5, a5, 1
 ; CHECK-NOV-NEXT:    and a1, a4, a1
 ; CHECK-NOV-NEXT:    beqz a5, .LBB48_12
@@ -6308,8 +6308,8 @@ define <2 x i64> @stest_f32i64_mm(<2 x float> %x) {
 ; CHECK-NOV-NEXT:  .LBB48_13: # %entry
 ; CHECK-NOV-NEXT:    beq a1, a0, .LBB48_15
 ; CHECK-NOV-NEXT:  # %bb.14: # %entry
-; CHECK-NOV-NEXT:    slti a0, a1, 0
-; CHECK-NOV-NEXT:    xori a0, a0, 1
+; CHECK-NOV-NEXT:    srli a1, a1, 63
+; CHECK-NOV-NEXT:    xori a0, a1, 1
 ; CHECK-NOV-NEXT:    beqz a0, .LBB48_16
 ; CHECK-NOV-NEXT:    j .LBB48_17
 ; CHECK-NOV-NEXT:  .LBB48_15:
@@ -6359,7 +6359,7 @@ define <2 x i64> @stest_f32i64_mm(<2 x float> %x) {
 ; CHECK-V-NEXT:    srli a3, a2, 1
 ; CHECK-V-NEXT:    beqz a1, .LBB48_2
 ; CHECK-V-NEXT:  # %bb.1: # %entry
-; CHECK-V-NEXT:    slti a4, a1, 0
+; CHECK-V-NEXT:    srli a4, a1, 63
 ; CHECK-V-NEXT:    beqz a4, .LBB48_3
 ; CHECK-V-NEXT:    j .LBB48_4
 ; CHECK-V-NEXT:  .LBB48_2:
@@ -6370,7 +6370,7 @@ define <2 x i64> @stest_f32i64_mm(<2 x float> %x) {
 ; CHECK-V-NEXT:  .LBB48_4: # %entry
 ; CHECK-V-NEXT:    beqz s1, .LBB48_6
 ; CHECK-V-NEXT:  # %bb.5: # %entry
-; CHECK-V-NEXT:    slti a6, s1, 0
+; CHECK-V-NEXT:    srli a6, s1, 63
 ; CHECK-V-NEXT:    j .LBB48_7
 ; CHECK-V-NEXT:  .LBB48_6:
 ; CHECK-V-NEXT:    sltu a6, s0, a3
@@ -6385,7 +6385,7 @@ define <2 x i64> @stest_f32i64_mm(<2 x float> %x) {
 ; CHECK-V-NEXT:    slli a3, a2, 63
 ; CHECK-V-NEXT:    beq a5, a2, .LBB48_11
 ; CHECK-V-NEXT:  # %bb.10: # %entry
-; CHECK-V-NEXT:    slti a5, a5, 0
+; CHECK-V-NEXT:    srli a5, a5, 63
 ; CHECK-V-NEXT:    xori a5, a5, 1
 ; CHECK-V-NEXT:    and a1, a4, a1
 ; CHECK-V-NEXT:    beqz a5, .LBB48_12
@@ -6399,7 +6399,7 @@ define <2 x i64> @stest_f32i64_mm(<2 x float> %x) {
 ; CHECK-V-NEXT:  .LBB48_13: # %entry
 ; CHECK-V-NEXT:    beq a1, a2, .LBB48_15
 ; CHECK-V-NEXT:  # %bb.14: # %entry
-; CHECK-V-NEXT:    slti a1, a1, 0
+; CHECK-V-NEXT:    srli a1, a1, 63
 ; CHECK-V-NEXT:    xori a1, a1, 1
 ; CHECK-V-NEXT:    beqz a1, .LBB48_16
 ; CHECK-V-NEXT:    j .LBB48_17
@@ -6557,8 +6557,8 @@ define <2 x i64> @ustest_f32i64_mm(<2 x float> %x) {
 ; CHECK-NOV-NEXT:  .LBB50_4: # %entry
 ; CHECK-NOV-NEXT:    slti a1, a1, 1
 ; CHECK-NOV-NEXT:    slti a4, s1, 1
-; CHECK-NOV-NEXT:    slti a3, a3, 0
-; CHECK-NOV-NEXT:    slti a2, a2, 0
+; CHECK-NOV-NEXT:    srli a3, a3, 63
+; CHECK-NOV-NEXT:    srli a2, a2, 63
 ; CHECK-NOV-NEXT:    neg a1, a1
 ; CHECK-NOV-NEXT:    neg a4, a4
 ; CHECK-NOV-NEXT:    addi a3, a3, -1
@@ -6614,8 +6614,8 @@ define <2 x i64> @ustest_f32i64_mm(<2 x float> %x) {
 ; CHECK-V-NEXT:  .LBB50_4: # %entry
 ; CHECK-V-NEXT:    slti a1, a1, 1
 ; CHECK-V-NEXT:    slti a4, s1, 1
-; CHECK-V-NEXT:    slti a3, a3, 0
-; CHECK-V-NEXT:    slti a2, a2, 0
+; CHECK-V-NEXT:    srli a3, a3, 63
+; CHECK-V-NEXT:    srli a2, a2, 63
 ; CHECK-V-NEXT:    neg a1, a1
 ; CHECK-V-NEXT:    neg a4, a4
 ; CHECK-V-NEXT:    addi a3, a3, -1
@@ -6675,7 +6675,7 @@ define <2 x i64> @stest_f16i64_mm(<2 x half> %x) {
 ; CHECK-NOV-NEXT:    srli a3, a0, 1
 ; CHECK-NOV-NEXT:    beqz a1, .LBB51_2
 ; CHECK-NOV-NEXT:  # %bb.1: # %entry
-; CHECK-NOV-NEXT:    slti a4, a1, 0
+; CHECK-NOV-NEXT:    srli a4, a1, 63
 ; CHECK-NOV-NEXT:    beqz a4, .LBB51_3
 ; CHECK-NOV-NEXT:    j .LBB51_4
 ; CHECK-NOV-NEXT:  .LBB51_2:
@@ -6686,7 +6686,7 @@ define <2 x i64> @stest_f16i64_mm(<2 x half> %x) {
 ; CHECK-NOV-NEXT:  .LBB51_4: # %entry
 ; CHECK-NOV-NEXT:    beqz s1, .LBB51_6
 ; CHECK-NOV-NEXT:  # %bb.5: # %entry
-; CHECK-NOV-NEXT:    slti a6, s1, 0
+; CHECK-NOV-NEXT:    srli a6, s1, 63
 ; CHECK-NOV-NEXT:    j .LBB51_7
 ; CHECK-NOV-NEXT:  .LBB51_6:
 ; CHECK-NOV-NEXT:    sltu a6, s0, a3
@@ -6701,7 +6701,7 @@ define <2 x i64> @stest_f16i64_mm(<2 x half> %x) {
 ; CHECK-NOV-NEXT:    slli a3, a0, 63
 ; CHECK-NOV-NEXT:    beq a5, a0, .LBB51_11
 ; CHECK-NOV-NEXT:  # %bb.10: # %entry
-; CHECK-NOV-NEXT:    slti a5, a5, 0
+; CHECK-NOV-NEXT:    srli a5, a5, 63
 ; CHECK-NOV-NEXT:    xori a5, a5, 1
 ; CHECK-NOV-NEXT:    and a1, a4, a1
 ; CHECK-NOV-NEXT:    beqz a5, .LBB51_12
@@ -6715,8 +6715,8 @@ define <2 x i64> @stest_f16i64_mm(<2 x half> %x) {
 ; CHECK-NOV-NEXT:  .LBB51_13: # %entry
 ; CHECK-NOV-NEXT:    beq a1, a0, .LBB51_15
 ; CHECK-NOV-NEXT:  # %bb.14: # %entry
-; CHECK-NOV-NEXT:    slti a0, a1, 0
-; CHECK-NOV-NEXT:    xori a0, a0, 1
+; CHECK-NOV-NEXT:    srli a1, a1, 63
+; CHECK-NOV-NEXT:    xori a0, a1, 1
 ; CHECK-NOV-NEXT:    beqz a0, .LBB51_16
 ; CHECK-NOV-NEXT:    j .LBB51_17
 ; CHECK-NOV-NEXT:  .LBB51_15:
@@ -6764,7 +6764,7 @@ define <2 x i64> @stest_f16i64_mm(<2 x half> %x) {
 ; CHECK-V-NEXT:    srli a3, a2, 1
 ; CHECK-V-NEXT:    beqz a1, .LBB51_2
 ; CHECK-V-NEXT:  # %bb.1: # %entry
-; CHECK-V-NEXT:    slti a4, a1, 0
+; CHECK-V-NEXT:    srli a4, a1, 63
 ; CHECK-V-NEXT:    beqz a4, .LBB51_3
 ; CHECK-V-NEXT:    j .LBB51_4
 ; CHECK-V-NEXT:  .LBB51_2:
@@ -6775,7 +6775,7 @@ define <2 x i64> @stest_f16i64_mm(<2 x half> %x) {
 ; CHECK-V-NEXT:  .LBB51_4: # %entry
 ; CHECK-V-NEXT:    beqz s1, .LBB51_6
 ; CHECK-V-NEXT:  # %bb.5: # %entry
-; CHECK-V-NEXT:    slti a6, s1, 0
+; CHECK-V-NEXT:    srli a6, s1, 63
 ; CHECK-V-NEXT:    j .LBB51_7
 ; CHECK-V-NEXT:  .LBB51_6:
 ; CHECK-V-NEXT:    sltu a6, s0, a3
@@ -6790,7 +6790,7 @@ define <2 x i64> @stest_f16i64_mm(<2 x half> %x) {
 ; CHECK-V-NEXT:    slli a3, a2, 63
 ; CHECK-V-NEXT:    beq a5, a2, .LBB51_11
 ; CHECK-V-NEXT:  # %bb.10: # %entry
-; CHECK-V-NEXT:    slti a5, a5, 0
+; CHECK-V-NEXT:    srli a5, a5, 63
 ; CHECK-V-NEXT:    xori a5, a5, 1
 ; CHECK-V-NEXT:    and a1, a4, a1
 ; CHECK-V-NEXT:    beqz a5, .LBB51_12
@@ -6804,7 +6804,7 @@ define <2 x i64> @stest_f16i64_mm(<2 x half> %x) {
 ; CHECK-V-NEXT:  .LBB51_13: # %entry
 ; CHECK-V-NEXT:    beq a1, a2, .LBB51_15
 ; CHECK-V-NEXT:  # %bb.14: # %entry
-; CHECK-V-NEXT:    slti a1, a1, 0
+; CHECK-V-NEXT:    srli a1, a1, 63
 ; CHECK-V-NEXT:    xori a1, a1, 1
 ; CHECK-V-NEXT:    beqz a1, .LBB51_16
 ; CHECK-V-NEXT:    j .LBB51_17
@@ -6960,8 +6960,8 @@ define <2 x i64> @ustest_f16i64_mm(<2 x half> %x) {
 ; CHECK-NOV-NEXT:  .LBB53_4: # %entry
 ; CHECK-NOV-NEXT:    slti a1, a1, 1
 ; CHECK-NOV-NEXT:    slti a4, s1, 1
-; CHECK-NOV-NEXT:    slti a3, a3, 0
-; CHECK-NOV-NEXT:    slti a2, a2, 0
+; CHECK-NOV-NEXT:    srli a3, a3, 63
+; CHECK-NOV-NEXT:    srli a2, a2, 63
 ; CHECK-NOV-NEXT:    neg a1, a1
 ; CHECK-NOV-NEXT:    neg a4, a4
 ; CHECK-NOV-NEXT:    addi a3, a3, -1
@@ -7015,8 +7015,8 @@ define <2 x i64> @ustest_f16i64_mm(<2 x half> %x) {
 ; CHECK-V-NEXT:  .LBB53_4: # %entry
 ; CHECK-V-NEXT:    slti a1, a1, 1
 ; CHECK-V-NEXT:    slti a4, s1, 1
-; CHECK-V-NEXT:    slti a3, a3, 0
-; CHECK-V-NEXT:    slti a2, a2, 0
+; CHECK-V-NEXT:    srli a3, a3, 63
+; CHECK-V-NEXT:    srli a2, a2, 63
 ; CHECK-V-NEXT:    neg a1, a1
 ; CHECK-V-NEXT:    neg a4, a4
 ; CHECK-V-NEXT:    addi a3, a3, -1
diff --git a/llvm/test/CodeGen/RISCV/rvv/vxrm-insert-out-of-loop.ll b/llvm/test/CodeGen/RISCV/rvv/vxrm-insert-out-of-loop.ll
index 7990dfc0880a5..76ec34ccadf0b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vxrm-insert-out-of-loop.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vxrm-insert-out-of-loop.ll
@@ -61,11 +61,11 @@ define void @test1(ptr nocapture noundef writeonly %dst, i32 noundef signext %i_
 ; RV32-NEXT:    sltu t3, a4, t3
 ; RV32-NEXT:    and t3, t4, t3
 ; RV32-NEXT:    or t4, a1, a3
-; RV32-NEXT:    slti t4, t4, 0
+; RV32-NEXT:    srli t4, t4, 31
 ; RV32-NEXT:    or t4, t5, t4
 ; RV32-NEXT:    or t5, a1, a5
 ; RV32-NEXT:    sltu t1, a6, t1
-; RV32-NEXT:    slti t5, t5, 0
+; RV32-NEXT:    srli t5, t5, 31
 ; RV32-NEXT:    or t3, t3, t5
 ; RV32-NEXT:    or t3, t4, t3
 ; RV32-NEXT:    or t1, t1, t3
@@ -186,14 +186,14 @@ define void @test1(ptr nocapture noundef writeonly %dst, i32 noundef signext %i_
 ; RV64P670-NEXT:    slli t3, t2, 1
 ; RV64P670-NEXT:    and s0, s0, s1
 ; RV64P670-NEXT:    or s1, a1, a3
-; RV64P670-NEXT:    slti s1, s1, 0
+; RV64P670-NEXT:    srli s1, s1, 63
 ; RV64P670-NEXT:    or t6, s0, s1
 ; RV64P670-NEXT:    sltu s1, a0, t5
 ; RV64P670-NEXT:    sltu s0, a4, t4
 ; RV64P670-NEXT:    mv t5, a0
 ; RV64P670-NEXT:    and s0, s0, s1
 ; RV64P670-NEXT:    or s1, a1, a5
-; RV64P670-NEXT:    slti s1, s1, 0
+; RV64P670-NEXT:    srli s1, s1, 63
 ; RV64P670-NEXT:    or s0, s0, s1
 ; RV64P670-NEXT:    li s1, 32
 ; RV64P670-NEXT:    maxu s1, t3, s1
@@ -321,12 +321,12 @@ define void @test1(ptr nocapture noundef writeonly %dst, i32 noundef signext %i_
 ; RV64X60-NEXT:    or s2, a1, a3
 ; RV64X60-NEXT:    sltu s0, a0, t5
 ; RV64X60-NEXT:    sltu s1, a4, t3
-; RV64X60-NEXT:    slti t3, s2, 0
+; RV64X60-NEXT:    srli t3, s2, 63
 ; RV64X60-NEXT:    and s0, s0, s1
 ; RV64X60-NEXT:    or s1, a1, a5
 ; RV64X60-NEXT:    or t4, t4, t3
 ; RV64X60-NEXT:    slli t3, t2, 1
-; RV64X60-NEXT:    slti s1, s1, 0
+; RV64X60-NEXT:    srli s1, s1, 63
 ; RV64X60-NEXT:    or s0, s0, s1
 ; RV64X60-NEXT:    maxu s1, t3, t6
 ; RV64X60-NEXT:    or s0, t4, s0
@@ -461,10 +461,10 @@ define void @test1(ptr nocapture noundef writeonly %dst, i32 noundef signext %i_
 ; RV64-NEXT:    sltu t5, a4, t5
 ; RV64-NEXT:    and t5, t6, t5
 ; RV64-NEXT:    or t6, a1, a3
-; RV64-NEXT:    slti t6, t6, 0
+; RV64-NEXT:    srli t6, t6, 63
 ; RV64-NEXT:    or t6, s0, t6
 ; RV64-NEXT:    or s0, a1, a5
-; RV64-NEXT:    slti s0, s0, 0
+; RV64-NEXT:    srli s0, s0, 63
 ; RV64-NEXT:    or t5, t5, s0
 ; RV64-NEXT:    or t5, t6, t5
 ; RV64-NEXT:    sltu t4, a6, t4
diff --git a/llvm/test/CodeGen/RISCV/sadd_sat.ll b/llvm/test/CodeGen/RISCV/sadd_sat.ll
index 04f2436201e9d..1d6d07aa67337 100644
--- a/llvm/test/CodeGen/RISCV/sadd_sat.ll
+++ b/llvm/test/CodeGen/RISCV/sadd_sat.ll
@@ -16,7 +16,7 @@ define signext i32 @func(i32 signext %x, i32 signext %y) nounwind {
 ; RV32-NEXT:    mv a2, a0
 ; RV32-NEXT:    add a0, a0, a1
 ; RV32-NEXT:    slt a2, a0, a2
-; RV32-NEXT:    slti a1, a1, 0
+; RV32-NEXT:    srli a1, a1, 31
 ; RV32-NEXT:    beq a1, a2, .LBB0_2
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    srai a0, a0, 31
@@ -77,7 +77,7 @@ define i64 @func2(i64 %x, i64 %y) nounwind {
 ; RV64-NEXT:    mv a2, a0
 ; RV64-NEXT:    add a0, a0, a1
 ; RV64-NEXT:    slt a2, a0, a2
-; RV64-NEXT:    slti a1, a1, 0
+; RV64-NEXT:    srli a1, a1, 63
 ; RV64-NEXT:    beq a1, a2, .LBB1_2
 ; RV64-NEXT:  # %bb.1:
 ; RV64-NEXT:    srai a0, a0, 63
diff --git a/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll b/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll
index 857026cce0d43..9200a77915c56 100644
--- a/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll
+++ b/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll
@@ -17,7 +17,7 @@ define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind {
 ; RV32-NEXT:    mul a1, a1, a2
 ; RV32-NEXT:    add a0, a0, a1
 ; RV32-NEXT:    slt a2, a0, a3
-; RV32-NEXT:    slti a1, a1, 0
+; RV32-NEXT:    srli a1, a1, 31
 ; RV32-NEXT:    beq a1, a2, .LBB0_2
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    srai a0, a0, 31
@@ -81,7 +81,7 @@ define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind {
 ; RV64-NEXT:    mv a1, a0
 ; RV64-NEXT:    add a0, a0, a2
 ; RV64-NEXT:    slt a1, a0, a1
-; RV64-NEXT:    slti a2, a2, 0
+; RV64-NEXT:    srli a2, a2, 63
 ; RV64-NEXT:    beq a2, a1, .LBB1_2
 ; RV64-NEXT:  # %bb.1:
 ; RV64-NEXT:    srai a0, a0, 63
diff --git a/llvm/test/CodeGen/RISCV/select-binop-identity.ll b/llvm/test/CodeGen/RISCV/select-binop-identity.ll
index 325e4b54c1d6d..8ab66ba3f25f5 100644
--- a/llvm/test/CodeGen/RISCV/select-binop-identity.ll
+++ b/llvm/test/CodeGen/RISCV/select-binop-identity.ll
@@ -260,14 +260,14 @@ define i64 @and_select_all_ones_i64_cmp2(i64 %x, i64 %y, i64 %z) {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    beqz a5, .LBB5_2
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    slti a4, a5, 0
+; RV32I-NEXT:    srli a5, a5, 31
 ; RV32I-NEXT:    j .LBB5_3
 ; RV32I-NEXT:  .LBB5_2:
-; RV32I-NEXT:    sltiu a4, a4, 4
+; RV32I-NEXT:    sltiu a5, a4, 4
 ; RV32I-NEXT:  .LBB5_3:
-; RV32I-NEXT:    addi a4, a4, -1
-; RV32I-NEXT:    or a1, a4, a1
-; RV32I-NEXT:    or a0, a4, a0
+; RV32I-NEXT:    addi a5, a5, -1
+; RV32I-NEXT:    or a1, a5, a1
+; RV32I-NEXT:    or a0, a5, a0
 ; RV32I-NEXT:    and a0, a0, a2
 ; RV32I-NEXT:    and a1, a1, a3
 ; RV32I-NEXT:    ret
@@ -300,7 +300,7 @@ define i64 @and_select_all_ones_i64_cmp2(i64 %x, i64 %y, i64 %z) {
 ;
 ; ZICOND32-LABEL: and_select_all_ones_i64_cmp2:
 ; ZICOND32:       # %bb.0:
-; ZICOND32-NEXT:    slti a6, a5, 0
+; ZICOND32-NEXT:    srli a6, a5, 31
 ; ZICOND32-NEXT:    sltiu a4, a4, 4
 ; ZICOND32-NEXT:    czero.eqz a6, a6, a5
 ; ZICOND32-NEXT:    czero.nez a4, a4, a5
diff --git a/llvm/test/CodeGen/RISCV/select-cc.ll b/llvm/test/CodeGen/RISCV/select-cc.ll
index ec1f8aeddcaaf..3df07073e0ed3 100644
--- a/llvm/test/CodeGen/RISCV/select-cc.ll
+++ b/llvm/test/CodeGen/RISCV/select-cc.ll
@@ -200,7 +200,7 @@ define signext i32 @foo(i32 signext %a, ptr %b) nounwind {
 ; RV64I-CCMOV-NEXT:    lw a4, 0(a1)
 ; RV64I-CCMOV-NEXT:    slti a5, a2, 1
 ; RV64I-CCMOV-NEXT:    mips.ccmov a0, a5, a0, a2
-; RV64I-CCMOV-NEXT:    slti a5, a2, 0
+; RV64I-CCMOV-NEXT:    srli a5, a2, 63
 ; RV64I-CCMOV-NEXT:    mips.ccmov a0, a5, a3, a0
 ; RV64I-CCMOV-NEXT:    lw a1, 0(a1)
 ; RV64I-CCMOV-NEXT:    slti a3, a4, 1025
@@ -384,11 +384,11 @@ define i64 @select_sge_int32min(i64 %x, i64 %y, i64 %z) {
 ; RV32I-NEXT:    li a6, -1
 ; RV32I-NEXT:    bne a1, a6, .LBB3_2
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    j .LBB3_3
 ; RV32I-NEXT:  .LBB3_2:
-; RV32I-NEXT:    slti a0, a1, 0
-; RV32I-NEXT:    xori a0, a0, 1
+; RV32I-NEXT:    srli a1, a1, 31
+; RV32I-NEXT:    xori a0, a1, 1
 ; RV32I-NEXT:  .LBB3_3:
 ; RV32I-NEXT:    bnez a0, .LBB3_5
 ; RV32I-NEXT:  # %bb.4:
diff --git a/llvm/test/CodeGen/RISCV/select-constant-xor.ll b/llvm/test/CodeGen/RISCV/select-constant-xor.ll
index 72313a82b3d39..f11fb617c3b12 100644
--- a/llvm/test/CodeGen/RISCV/select-constant-xor.ll
+++ b/llvm/test/CodeGen/RISCV/select-constant-xor.ll
@@ -48,8 +48,8 @@ define i64 @selecti64i64(i64 %a) {
 define i32 @selecti64i32(i64 %a) {
 ; RV32-LABEL: selecti64i32:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    slti a0, a1, 0
-; RV32-NEXT:    xori a0, a0, 1
+; RV32-NEXT:    srli a1, a1, 31
+; RV32-NEXT:    xori a0, a1, 1
 ; RV32-NEXT:    lui a1, 524288
 ; RV32-NEXT:    sub a0, a1, a0
 ; RV32-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/selectcc-to-shiftand.ll b/llvm/test/CodeGen/RISCV/selectcc-to-shiftand.ll
index 3fbaefffac2ef..fa1807cd7d911 100644
--- a/llvm/test/CodeGen/RISCV/selectcc-to-shiftand.ll
+++ b/llvm/test/CodeGen/RISCV/selectcc-to-shiftand.ll
@@ -76,12 +76,19 @@ define i32 @not_pos_sel_same_variable(i32 signext %a) {
 
 ; Compare if positive and select of constants where one constant is zero.
 define i32 @pos_sel_constants(i32 signext %a) {
-; CHECK-LABEL: pos_sel_constants:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    slti a0, a0, 0
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    andi a0, a0, 5
-; CHECK-NEXT:    ret
+; RV32-LABEL: pos_sel_constants:
+; RV32:       # %bb.0:
+; RV32-NEXT:    srli a0, a0, 31
+; RV32-NEXT:    addi a0, a0, -1
+; RV32-NEXT:    andi a0, a0, 5
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: pos_sel_constants:
+; RV64:       # %bb.0:
+; RV64-NEXT:    srli a0, a0, 63
+; RV64-NEXT:    addi a0, a0, -1
+; RV64-NEXT:    andi a0, a0, 5
+; RV64-NEXT:    ret
   %tmp.1 = icmp sgt i32 %a, -1
   %retval = select i1 %tmp.1, i32 5, i32 0
   ret i32 %retval
@@ -101,7 +108,7 @@ define i32 @pos_sel_special_constant(i32 signext %a) {
 ;
 ; RV64-LABEL: pos_sel_special_constant:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    slti a0, a0, 0
+; RV64-NEXT:    srli a0, a0, 63
 ; RV64-NEXT:    xori a0, a0, 1
 ; RV64-NEXT:    slli a0, a0, 9
 ; RV64-NEXT:    ret
@@ -114,14 +121,14 @@ define i32 @pos_sel_special_constant(i32 signext %a) {
 define i32 @pos_sel_variable_and_zero(i32 signext %a, i32 signext %b) {
 ; RV32I-LABEL: pos_sel_variable_and_zero:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    srli a0, a0, 31
 ; RV32I-NEXT:    addi a0, a0, -1
 ; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: pos_sel_variable_and_zero:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slti a0, a0, 0
+; RV64I-NEXT:    srli a0, a0, 63
 ; RV64I-NEXT:    addi a0, a0, -1
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/stack-folding.ll b/llvm/test/CodeGen/RISCV/stack-folding.ll
index 8373a745e45cb..0e3291167d0ec 100644
--- a/llvm/test/CodeGen/RISCV/stack-folding.ll
+++ b/llvm/test/CodeGen/RISCV/stack-folding.ll
@@ -31,8 +31,8 @@ define i1 @test_sext_w(i64 %x, i32 %y) nounwind {
 ; CHECK-NEXT:    li a0, 0
 ; CHECK-NEXT:    j .LBB0_3
 ; CHECK-NEXT:  .LBB0_2: # %truebb
-; CHECK-NEXT:    lw a0, 8(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    slti a0, a0, 0
+; CHECK-NEXT:    ld a0, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    srliw a0, a0, 31
 ; CHECK-NEXT:  .LBB0_3: # %falsebb
 ; CHECK-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/xaluo.ll b/llvm/test/CodeGen/RISCV/xaluo.ll
index a30593d7d7afb..2751332c9e3ae 100644
--- a/llvm/test/CodeGen/RISCV/xaluo.ll
+++ b/llvm/test/CodeGen/RISCV/xaluo.ll
@@ -14,7 +14,7 @@ define zeroext i1 @saddo1.i32(i32 signext %v1, i32 signext %v2, ptr %res) {
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    add a3, a0, a1
 ; RV32-NEXT:    slt a0, a3, a0
-; RV32-NEXT:    slti a1, a1, 0
+; RV32-NEXT:    srli a1, a1, 31
 ; RV32-NEXT:    xor a0, a1, a0
 ; RV32-NEXT:    sw a3, 0(a2)
 ; RV32-NEXT:    ret
@@ -32,7 +32,7 @@ define zeroext i1 @saddo1.i32(i32 signext %v1, i32 signext %v2, ptr %res) {
 ; RV32ZBA:       # %bb.0: # %entry
 ; RV32ZBA-NEXT:    add a3, a0, a1
 ; RV32ZBA-NEXT:    slt a0, a3, a0
-; RV32ZBA-NEXT:    slti a1, a1, 0
+; RV32ZBA-NEXT:    srli a1, a1, 31
 ; RV32ZBA-NEXT:    xor a0, a1, a0
 ; RV32ZBA-NEXT:    sw a3, 0(a2)
 ; RV32ZBA-NEXT:    ret
@@ -50,7 +50,7 @@ define zeroext i1 @saddo1.i32(i32 signext %v1, i32 signext %v2, ptr %res) {
 ; RV32ZICOND:       # %bb.0: # %entry
 ; RV32ZICOND-NEXT:    add a3, a0, a1
 ; RV32ZICOND-NEXT:    slt a0, a3, a0
-; RV32ZICOND-NEXT:    slti a1, a1, 0
+; RV32ZICOND-NEXT:    srli a1, a1, 31
 ; RV32ZICOND-NEXT:    xor a0, a1, a0
 ; RV32ZICOND-NEXT:    sw a3, 0(a2)
 ; RV32ZICOND-NEXT:    ret
@@ -252,8 +252,8 @@ define zeroext i1 @saddo1.i64(i64 %v1, i64 %v2, ptr %res) {
 ; RV32-NEXT:    not a3, a3
 ; RV32-NEXT:    add a5, a5, a0
 ; RV32-NEXT:    xor a1, a1, a5
-; RV32-NEXT:    and a1, a3, a1
-; RV32-NEXT:    slti a0, a1, 0
+; RV32-NEXT:    and a0, a3, a1
+; RV32-NEXT:    srli a0, a0, 31
 ; RV32-NEXT:    sw a2, 0(a4)
 ; RV32-NEXT:    sw a5, 4(a4)
 ; RV32-NEXT:    ret
@@ -262,7 +262,7 @@ define zeroext i1 @saddo1.i64(i64 %v1, i64 %v2, ptr %res) {
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    add a3, a0, a1
 ; RV64-NEXT:    slt a0, a3, a0
-; RV64-NEXT:    slti a1, a1, 0
+; RV64-NEXT:    srli a1, a1, 63
 ; RV64-NEXT:    xor a0, a1, a0
 ; RV64-NEXT:    sd a3, 0(a2)
 ; RV64-NEXT:    ret
@@ -276,8 +276,8 @@ define zeroext i1 @saddo1.i64(i64 %v1, i64 %v2, ptr %res) {
 ; RV32ZBA-NEXT:    not a3, a3
 ; RV32ZBA-NEXT:    add a5, a5, a0
 ; RV32ZBA-NEXT:    xor a1, a1, a5
-; RV32ZBA-NEXT:    and a1, a3, a1
-; RV32ZBA-NEXT:    slti a0, a1, 0
+; RV32ZBA-NEXT:    and a0, a3, a1
+; RV32ZBA-NEXT:    srli a0, a0, 31
 ; RV32ZBA-NEXT:    sw a2, 0(a4)
 ; RV32ZBA-NEXT:    sw a5, 4(a4)
 ; RV32ZBA-NEXT:    ret
@@ -286,7 +286,7 @@ define zeroext i1 @saddo1.i64(i64 %v1, i64 %v2, ptr %res) {
 ; RV64ZBA:       # %bb.0: # %entry
 ; RV64ZBA-NEXT:    add a3, a0, a1
 ; RV64ZBA-NEXT:    slt a0, a3, a0
-; RV64ZBA-NEXT:    slti a1, a1, 0
+; RV64ZBA-NEXT:    srli a1, a1, 63
 ; RV64ZBA-NEXT:    xor a0, a1, a0
 ; RV64ZBA-NEXT:    sd a3, 0(a2)
 ; RV64ZBA-NEXT:    ret
@@ -300,8 +300,8 @@ define zeroext i1 @saddo1.i64(i64 %v1, i64 %v2, ptr %res) {
 ; RV32ZICOND-NEXT:    not a3, a3
 ; RV32ZICOND-NEXT:    add a5, a5, a0
 ; RV32ZICOND-NEXT:    xor a1, a1, a5
-; RV32ZICOND-NEXT:    and a1, a3, a1
-; RV32ZICOND-NEXT:    slti a0, a1, 0
+; RV32ZICOND-NEXT:    and a0, a3, a1
+; RV32ZICOND-NEXT:    srli a0, a0, 31
 ; RV32ZICOND-NEXT:    sw a2, 0(a4)
 ; RV32ZICOND-NEXT:    sw a5, 4(a4)
 ; RV32ZICOND-NEXT:    ret
@@ -310,7 +310,7 @@ define zeroext i1 @saddo1.i64(i64 %v1, i64 %v2, ptr %res) {
 ; RV64ZICOND:       # %bb.0: # %entry
 ; RV64ZICOND-NEXT:    add a3, a0, a1
 ; RV64ZICOND-NEXT:    slt a0, a3, a0
-; RV64ZICOND-NEXT:    slti a1, a1, 0
+; RV64ZICOND-NEXT:    srli a1, a1, 63
 ; RV64ZICOND-NEXT:    xor a0, a1, a0
 ; RV64ZICOND-NEXT:    sd a3, 0(a2)
 ; RV64ZICOND-NEXT:    ret
@@ -330,8 +330,8 @@ define zeroext i1 @saddo2.i64(i64 %v1, ptr %res) {
 ; RV32-NEXT:    sltu a0, a3, a0
 ; RV32-NEXT:    add a5, a1, a0
 ; RV32-NEXT:    xor a1, a1, a5
-; RV32-NEXT:    and a1, a4, a1
-; RV32-NEXT:    slti a0, a1, 0
+; RV32-NEXT:    and a0, a4, a1
+; RV32-NEXT:    srli a0, a0, 31
 ; RV32-NEXT:    sw a3, 0(a2)
 ; RV32-NEXT:    sw a5, 4(a2)
 ; RV32-NEXT:    ret
@@ -350,8 +350,8 @@ define zeroext i1 @saddo2.i64(i64 %v1, ptr %res) {
 ; RV32ZBA-NEXT:    sltu a0, a3, a0
 ; RV32ZBA-NEXT:    add a5, a1, a0
 ; RV32ZBA-NEXT:    xor a1, a1, a5
-; RV32ZBA-NEXT:    and a1, a4, a1
-; RV32ZBA-NEXT:    slti a0, a1, 0
+; RV32ZBA-NEXT:    and a0, a4, a1
+; RV32ZBA-NEXT:    srli a0, a0, 31
 ; RV32ZBA-NEXT:    sw a3, 0(a2)
 ; RV32ZBA-NEXT:    sw a5, 4(a2)
 ; RV32ZBA-NEXT:    ret
@@ -370,8 +370,8 @@ define zeroext i1 @saddo2.i64(i64 %v1, ptr %res) {
 ; RV32ZICOND-NEXT:    sltu a0, a3, a0
 ; RV32ZICOND-NEXT:    add a5, a1, a0
 ; RV32ZICOND-NEXT:    xor a1, a1, a5
-; RV32ZICOND-NEXT:    and a1, a4, a1
-; RV32ZICOND-NEXT:    slti a0, a1, 0
+; RV32ZICOND-NEXT:    and a0, a4, a1
+; RV32ZICOND-NEXT:    srli a0, a0, 31
 ; RV32ZICOND-NEXT:    sw a3, 0(a2)
 ; RV32ZICOND-NEXT:    sw a5, 4(a2)
 ; RV32ZICOND-NEXT:    ret
@@ -399,7 +399,7 @@ define zeroext i1 @saddo3.i64(i64 %v1, ptr %res) {
 ; RV32-NEXT:    addi a4, a0, -1
 ; RV32-NEXT:    xor a0, a1, a4
 ; RV32-NEXT:    and a0, a1, a0
-; RV32-NEXT:    slti a0, a0, 0
+; RV32-NEXT:    srli a0, a0, 31
 ; RV32-NEXT:    sw a3, 0(a2)
 ; RV32-NEXT:    sw a4, 4(a2)
 ; RV32-NEXT:    ret
@@ -420,7 +420,7 @@ define zeroext i1 @saddo3.i64(i64 %v1, ptr %res) {
 ; RV32ZBA-NEXT:    addi a4, a0, -1
 ; RV32ZBA-NEXT:    xor a0, a1, a4
 ; RV32ZBA-NEXT:    and a0, a1, a0
-; RV32ZBA-NEXT:    slti a0, a0, 0
+; RV32ZBA-NEXT:    srli a0, a0, 31
 ; RV32ZBA-NEXT:    sw a3, 0(a2)
 ; RV32ZBA-NEXT:    sw a4, 4(a2)
 ; RV32ZBA-NEXT:    ret
@@ -441,7 +441,7 @@ define zeroext i1 @saddo3.i64(i64 %v1, ptr %res) {
 ; RV32ZICOND-NEXT:    addi a4, a0, -1
 ; RV32ZICOND-NEXT:    xor a0, a1, a4
 ; RV32ZICOND-NEXT:    and a0, a1, a0
-; RV32ZICOND-NEXT:    slti a0, a0, 0
+; RV32ZICOND-NEXT:    srli a0, a0, 31
 ; RV32ZICOND-NEXT:    sw a3, 0(a2)
 ; RV32ZICOND-NEXT:    sw a4, 4(a2)
 ; RV32ZICOND-NEXT:    ret
@@ -866,8 +866,8 @@ define zeroext i1 @ssubo.i64(i64 %v1, i64 %v2, ptr %res) {
 ; RV32-NEXT:    sub a2, a0, a2
 ; RV32-NEXT:    sub a5, a6, a5
 ; RV32-NEXT:    xor a1, a1, a5
-; RV32-NEXT:    and a1, a3, a1
-; RV32-NEXT:    slti a0, a1, 0
+; RV32-NEXT:    and a0, a3, a1
+; RV32-NEXT:    srli a0, a0, 31
 ; RV32-NEXT:    sw a2, 0(a4)
 ; RV32-NEXT:    sw a5, 4(a4)
 ; RV32-NEXT:    ret
@@ -889,8 +889,8 @@ define zeroext i1 @ssubo.i64(i64 %v1, i64 %v2, ptr %res) {
 ; RV32ZBA-NEXT:    sub a2, a0, a2
 ; RV32ZBA-NEXT:    sub a5, a6, a5
 ; RV32ZBA-NEXT:    xor a1, a1, a5
-; RV32ZBA-NEXT:    and a1, a3, a1
-; RV32ZBA-NEXT:    slti a0, a1, 0
+; RV32ZBA-NEXT:    and a0, a3, a1
+; RV32ZBA-NEXT:    srli a0, a0, 31
 ; RV32ZBA-NEXT:    sw a2, 0(a4)
 ; RV32ZBA-NEXT:    sw a5, 4(a4)
 ; RV32ZBA-NEXT:    ret
@@ -912,8 +912,8 @@ define zeroext i1 @ssubo.i64(i64 %v1, i64 %v2, ptr %res) {
 ; RV32ZICOND-NEXT:    sub a2, a0, a2
 ; RV32ZICOND-NEXT:    sub a5, a6, a5
 ; RV32ZICOND-NEXT:    xor a1, a1, a5
-; RV32ZICOND-NEXT:    and a1, a3, a1
-; RV32ZICOND-NEXT:    slti a0, a1, 0
+; RV32ZICOND-NEXT:    and a0, a3, a1
+; RV32ZICOND-NEXT:    srli a0, a0, 31
 ; RV32ZICOND-NEXT:    sw a2, 0(a4)
 ; RV32ZICOND-NEXT:    sw a5, 4(a4)
 ; RV32ZICOND-NEXT:    ret
@@ -1963,7 +1963,7 @@ define i32 @saddo.select.i32(i32 signext %v1, i32 signext %v2) {
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    add a2, a0, a1
 ; RV32-NEXT:    slt a2, a2, a0
-; RV32-NEXT:    slti a3, a1, 0
+; RV32-NEXT:    srli a3, a1, 31
 ; RV32-NEXT:    bne a3, a2, .LBB28_2
 ; RV32-NEXT:  # %bb.1: # %entry
 ; RV32-NEXT:    mv a0, a1
@@ -1984,7 +1984,7 @@ define i32 @saddo.select.i32(i32 signext %v1, i32 signext %v2) {
 ; RV32ZBA:       # %bb.0: # %entry
 ; RV32ZBA-NEXT:    add a2, a0, a1
 ; RV32ZBA-NEXT:    slt a2, a2, a0
-; RV32ZBA-NEXT:    slti a3, a1, 0
+; RV32ZBA-NEXT:    srli a3, a1, 31
 ; RV32ZBA-NEXT:    bne a3, a2, .LBB28_2
 ; RV32ZBA-NEXT:  # %bb.1: # %entry
 ; RV32ZBA-NEXT:    mv a0, a1
@@ -2004,7 +2004,7 @@ define i32 @saddo.select.i32(i32 signext %v1, i32 signext %v2) {
 ; RV32ZICOND-LABEL: saddo.select.i32:
 ; RV32ZICOND:       # %bb.0: # %entry
 ; RV32ZICOND-NEXT:    add a2, a0, a1
-; RV32ZICOND-NEXT:    slti a3, a1, 0
+; RV32ZICOND-NEXT:    srli a3, a1, 31
 ; RV32ZICOND-NEXT:    slt a2, a2, a0
 ; RV32ZICOND-NEXT:    xor a2, a3, a2
 ; RV32ZICOND-NEXT:    czero.nez a1, a1, a2
@@ -2033,7 +2033,7 @@ define i1 @saddo.not.i32(i32 signext %v1, i32 signext %v2) {
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    add a2, a0, a1
 ; RV32-NEXT:    slt a0, a2, a0
-; RV32-NEXT:    slti a1, a1, 0
+; RV32-NEXT:    srli a1, a1, 31
 ; RV32-NEXT:    xor a0, a1, a0
 ; RV32-NEXT:    xori a0, a0, 1
 ; RV32-NEXT:    ret
@@ -2050,7 +2050,7 @@ define i1 @saddo.not.i32(i32 signext %v1, i32 signext %v2) {
 ; RV32ZBA:       # %bb.0: # %entry
 ; RV32ZBA-NEXT:    add a2, a0, a1
 ; RV32ZBA-NEXT:    slt a0, a2, a0
-; RV32ZBA-NEXT:    slti a1, a1, 0
+; RV32ZBA-NEXT:    srli a1, a1, 31
 ; RV32ZBA-NEXT:    xor a0, a1, a0
 ; RV32ZBA-NEXT:    xori a0, a0, 1
 ; RV32ZBA-NEXT:    ret
@@ -2067,7 +2067,7 @@ define i1 @saddo.not.i32(i32 signext %v1, i32 signext %v2) {
 ; RV32ZICOND:       # %bb.0: # %entry
 ; RV32ZICOND-NEXT:    add a2, a0, a1
 ; RV32ZICOND-NEXT:    slt a0, a2, a0
-; RV32ZICOND-NEXT:    slti a1, a1, 0
+; RV32ZICOND-NEXT:    srli a1, a1, 31
 ; RV32ZICOND-NEXT:    xor a0, a1, a0
 ; RV32ZICOND-NEXT:    xori a0, a0, 1
 ; RV32ZICOND-NEXT:    ret
@@ -2108,7 +2108,7 @@ define i64 @saddo.select.i64(i64 %v1, i64 %v2) {
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    add a2, a0, a1
 ; RV64-NEXT:    slt a2, a2, a0
-; RV64-NEXT:    slti a3, a1, 0
+; RV64-NEXT:    srli a3, a1, 63
 ; RV64-NEXT:    bne a3, a2, .LBB30_2
 ; RV64-NEXT:  # %bb.1: # %entry
 ; RV64-NEXT:    mv a0, a1
@@ -2136,7 +2136,7 @@ define i64 @saddo.select.i64(i64 %v1, i64 %v2) {
 ; RV64ZBA:       # %bb.0: # %entry
 ; RV64ZBA-NEXT:    add a2, a0, a1
 ; RV64ZBA-NEXT:    slt a2, a2, a0
-; RV64ZBA-NEXT:    slti a3, a1, 0
+; RV64ZBA-NEXT:    srli a3, a1, 63
 ; RV64ZBA-NEXT:    bne a3, a2, .LBB30_2
 ; RV64ZBA-NEXT:  # %bb.1: # %entry
 ; RV64ZBA-NEXT:    mv a0, a1
@@ -2153,7 +2153,7 @@ define i64 @saddo.select.i64(i64 %v1, i64 %v2) {
 ; RV32ZICOND-NEXT:    not a5, a5
 ; RV32ZICOND-NEXT:    xor a4, a1, a4
 ; RV32ZICOND-NEXT:    and a4, a5, a4
-; RV32ZICOND-NEXT:    slti a4, a4, 0
+; RV32ZICOND-NEXT:    srli a4, a4, 31
 ; RV32ZICOND-NEXT:    czero.nez a2, a2, a4
 ; RV32ZICOND-NEXT:    czero.eqz a0, a0, a4
 ; RV32ZICOND-NEXT:    czero.nez a3, a3, a4
@@ -2165,7 +2165,7 @@ define i64 @saddo.select.i64(i64 %v1, i64 %v2) {
 ; RV64ZICOND-LABEL: saddo.select.i64:
 ; RV64ZICOND:       # %bb.0: # %entry
 ; RV64ZICOND-NEXT:    add a2, a0, a1
-; RV64ZICOND-NEXT:    slti a3, a1, 0
+; RV64ZICOND-NEXT:    srli a3, a1, 63
 ; RV64ZICOND-NEXT:    slt a2, a2, a0
 ; RV64ZICOND-NEXT:    xor a2, a3, a2
 ; RV64ZICOND-NEXT:    czero.nez a1, a1, a2
@@ -2190,7 +2190,7 @@ define i1 @saddo.not.i64(i64 %v1, i64 %v2) {
 ; RV32-NEXT:    xor a0, a1, a0
 ; RV32-NEXT:    not a1, a3
 ; RV32-NEXT:    and a0, a1, a0
-; RV32-NEXT:    slti a0, a0, 0
+; RV32-NEXT:    srli a0, a0, 31
 ; RV32-NEXT:    xori a0, a0, 1
 ; RV32-NEXT:    ret
 ;
@@ -2198,7 +2198,7 @@ define i1 @saddo.not.i64(i64 %v1, i64 %v2) {
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    add a2, a0, a1
 ; RV64-NEXT:    slt a0, a2, a0
-; RV64-NEXT:    slti a1, a1, 0
+; RV64-NEXT:    srli a1, a1, 63
 ; RV64-NEXT:    xor a0, a1, a0
 ; RV64-NEXT:    xori a0, a0, 1
 ; RV64-NEXT:    ret
@@ -2213,7 +2213,7 @@ define i1 @saddo.not.i64(i64 %v1, i64 %v2) {
 ; RV32ZBA-NEXT:    xor a0, a1, a0
 ; RV32ZBA-NEXT:    not a1, a3
 ; RV32ZBA-NEXT:    and a0, a1, a0
-; RV32ZBA-NEXT:    slti a0, a0, 0
+; RV32ZBA-NEXT:    srli a0, a0, 31
 ; RV32ZBA-NEXT:    xori a0, a0, 1
 ; RV32ZBA-NEXT:    ret
 ;
@@ -2221,7 +2221,7 @@ define i1 @saddo.not.i64(i64 %v1, i64 %v2) {
 ; RV64ZBA:       # %bb.0: # %entry
 ; RV64ZBA-NEXT:    add a2, a0, a1
 ; RV64ZBA-NEXT:    slt a0, a2, a0
-; RV64ZBA-NEXT:    slti a1, a1, 0
+; RV64ZBA-NEXT:    srli a1, a1, 63
 ; RV64ZBA-NEXT:    xor a0, a1, a0
 ; RV64ZBA-NEXT:    xori a0, a0, 1
 ; RV64ZBA-NEXT:    ret
@@ -2236,7 +2236,7 @@ define i1 @saddo.not.i64(i64 %v1, i64 %v2) {
 ; RV32ZICOND-NEXT:    xor a0, a1, a0
 ; RV32ZICOND-NEXT:    not a1, a3
 ; RV32ZICOND-NEXT:    and a0, a1, a0
-; RV32ZICOND-NEXT:    slti a0, a0, 0
+; RV32ZICOND-NEXT:    srli a0, a0, 31
 ; RV32ZICOND-NEXT:    xori a0, a0, 1
 ; RV32ZICOND-NEXT:    ret
 ;
@@ -2244,7 +2244,7 @@ define i1 @saddo.not.i64(i64 %v1, i64 %v2) {
 ; RV64ZICOND:       # %bb.0: # %entry
 ; RV64ZICOND-NEXT:    add a2, a0, a1
 ; RV64ZICOND-NEXT:    slt a0, a2, a0
-; RV64ZICOND-NEXT:    slti a1, a1, 0
+; RV64ZICOND-NEXT:    srli a1, a1, 63
 ; RV64ZICOND-NEXT:    xor a0, a1, a0
 ; RV64ZICOND-NEXT:    xori a0, a0, 1
 ; RV64ZICOND-NEXT:    ret
@@ -2713,7 +2713,7 @@ define i64 @ssubo.select.i64(i64 %v1, i64 %v2) {
 ; RV32ZICOND-NEXT:    xor a4, a1, a3
 ; RV32ZICOND-NEXT:    xor a5, a1, a5
 ; RV32ZICOND-NEXT:    and a4, a4, a5
-; RV32ZICOND-NEXT:    slti a4, a4, 0
+; RV32ZICOND-NEXT:    srli a4, a4, 31
 ; RV32ZICOND-NEXT:    czero.nez a2, a2, a4
 ; RV32ZICOND-NEXT:    czero.eqz a0, a0, a4
 ; RV32ZICOND-NEXT:    czero.nez a3, a3, a4
@@ -2748,8 +2748,8 @@ define i1 @ssub.not.i64(i64 %v1, i64 %v2) {
 ; RV32-NEXT:    xor a2, a1, a2
 ; RV32-NEXT:    xor a1, a1, a3
 ; RV32-NEXT:    and a1, a1, a2
-; RV32-NEXT:    slti a0, a1, 0
-; RV32-NEXT:    xori a0, a0, 1
+; RV32-NEXT:    srli a1, a1, 31
+; RV32-NEXT:    xori a0, a1, 1
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: ssub.not.i64:
@@ -2769,8 +2769,8 @@ define i1 @ssub.not.i64(i64 %v1, i64 %v2) {
 ; RV32ZBA-NEXT:    xor a2, a1, a2
 ; RV32ZBA-NEXT:    xor a1, a1, a3
 ; RV32ZBA-NEXT:    and a1, a1, a2
-; RV32ZBA-NEXT:    slti a0, a1, 0
-; RV32ZBA-NEXT:    xori a0, a0, 1
+; RV32ZBA-NEXT:    srli a1, a1, 31
+; RV32ZBA-NEXT:    xori a0, a1, 1
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: ssub.not.i64:
@@ -2790,8 +2790,8 @@ define i1 @ssub.not.i64(i64 %v1, i64 %v2) {
 ; RV32ZICOND-NEXT:    xor a2, a1, a2
 ; RV32ZICOND-NEXT:    xor a1, a1, a3
 ; RV32ZICOND-NEXT:    and a1, a1, a2
-; RV32ZICOND-NEXT:    slti a0, a1, 0
-; RV32ZICOND-NEXT:    xori a0, a0, 1
+; RV32ZICOND-NEXT:    srli a1, a1, 31
+; RV32ZICOND-NEXT:    xori a0, a1, 1
 ; RV32ZICOND-NEXT:    ret
 ;
 ; RV64ZICOND-LABEL: ssub.not.i64:
@@ -3821,7 +3821,7 @@ define zeroext i1 @saddo.br.i32(i32 signext %v1, i32 signext %v2) {
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    add a2, a0, a1
 ; RV32-NEXT:    slt a0, a2, a0
-; RV32-NEXT:    slti a1, a1, 0
+; RV32-NEXT:    srli a1, a1, 31
 ; RV32-NEXT:    beq a1, a0, .LBB52_2
 ; RV32-NEXT:  # %bb.1: # %overflow
 ; RV32-NEXT:    li a0, 0
@@ -3846,7 +3846,7 @@ define zeroext i1 @saddo.br.i32(i32 signext %v1, i32 signext %v2) {
 ; RV32ZBA:       # %bb.0: # %entry
 ; RV32ZBA-NEXT:    add a2, a0, a1
 ; RV32ZBA-NEXT:    slt a0, a2, a0
-; RV32ZBA-NEXT:    slti a1, a1, 0
+; RV32ZBA-NEXT:    srli a1, a1, 31
 ; RV32ZBA-NEXT:    beq a1, a0, .LBB52_2
 ; RV32ZBA-NEXT:  # %bb.1: # %overflow
 ; RV32ZBA-NEXT:    li a0, 0
@@ -3871,7 +3871,7 @@ define zeroext i1 @saddo.br.i32(i32 signext %v1, i32 signext %v2) {
 ; RV32ZICOND:       # %bb.0: # %entry
 ; RV32ZICOND-NEXT:    add a2, a0, a1
 ; RV32ZICOND-NEXT:    slt a0, a2, a0
-; RV32ZICOND-NEXT:    slti a1, a1, 0
+; RV32ZICOND-NEXT:    srli a1, a1, 31
 ; RV32ZICOND-NEXT:    beq a1, a0, .LBB52_2
 ; RV32ZICOND-NEXT:  # %bb.1: # %overflow
 ; RV32ZICOND-NEXT:    li a0, 0
@@ -3927,7 +3927,7 @@ define zeroext i1 @saddo.br.i64(i64 %v1, i64 %v2) {
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    add a2, a0, a1
 ; RV64-NEXT:    slt a0, a2, a0
-; RV64-NEXT:    slti a1, a1, 0
+; RV64-NEXT:    srli a1, a1, 63
 ; RV64-NEXT:    beq a1, a0, .LBB53_2
 ; RV64-NEXT:  # %bb.1: # %overflow
 ; RV64-NEXT:    li a0, 0
@@ -3958,7 +3958,7 @@ define zeroext i1 @saddo.br.i64(i64 %v1, i64 %v2) {
 ; RV64ZBA:       # %bb.0: # %entry
 ; RV64ZBA-NEXT:    add a2, a0, a1
 ; RV64ZBA-NEXT:    slt a0, a2, a0
-; RV64ZBA-NEXT:    slti a1, a1, 0
+; RV64ZBA-NEXT:    srli a1, a1, 63
 ; RV64ZBA-NEXT:    beq a1, a0, .LBB53_2
 ; RV64ZBA-NEXT:  # %bb.1: # %overflow
 ; RV64ZBA-NEXT:    li a0, 0
@@ -3989,7 +3989,7 @@ define zeroext i1 @saddo.br.i64(i64 %v1, i64 %v2) {
 ; RV64ZICOND:       # %bb.0: # %entry
 ; RV64ZICOND-NEXT:    add a2, a0, a1
 ; RV64ZICOND-NEXT:    slt a0, a2, a0
-; RV64ZICOND-NEXT:    slti a1, a1, 0
+; RV64ZICOND-NEXT:    srli a1, a1, 63
 ; RV64ZICOND-NEXT:    beq a1, a0, .LBB53_2
 ; RV64ZICOND-NEXT:  # %bb.1: # %overflow
 ; RV64ZICOND-NEXT:    li a0, 0
diff --git a/llvm/test/CodeGen/RISCV/xqcia.ll b/llvm/test/CodeGen/RISCV/xqcia.ll
index c75bb9daefcf2..3bbf33328f529 100644
--- a/llvm/test/CodeGen/RISCV/xqcia.ll
+++ b/llvm/test/CodeGen/RISCV/xqcia.ll
@@ -11,7 +11,7 @@ define i32 @addsat(i32 %a, i32 %b) {
 ; RV32I-NEXT:    mv a2, a0
 ; RV32I-NEXT:    add a0, a0, a1
 ; RV32I-NEXT:    slt a2, a0, a2
-; RV32I-NEXT:    slti a1, a1, 0
+; RV32I-NEXT:    srli a1, a1, 31
 ; RV32I-NEXT:    beq a1, a2, .LBB0_2
 ; RV32I-NEXT:  # %bb.1:
 ; RV32I-NEXT:    srai a0, a0, 31
diff --git a/llvm/test/CodeGen/RISCV/zbb-cmp-combine.ll b/llvm/test/CodeGen/RISCV/zbb-cmp-combine.ll
index 2d48f2b49822b..17b35343d0791 100644
--- a/llvm/test/CodeGen/RISCV/zbb-cmp-combine.ll
+++ b/llvm/test/CodeGen/RISCV/zbb-cmp-combine.ll
@@ -224,7 +224,7 @@ define i1 @flo(float %c, float %a, float %b) {
 ; CHECK-RV64I-NEXT:    mv a1, s1
 ; CHECK-RV64I-NEXT:    call __gesf2
 ; CHECK-RV64I-NEXT:    or a0, s2, a0
-; CHECK-RV64I-NEXT:    slti a0, a0, 0
+; CHECK-RV64I-NEXT:    srli a0, a0, 63
 ; CHECK-RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; CHECK-RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; CHECK-RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
@@ -275,7 +275,7 @@ define i1 @dlo(double %c, double %a, double %b) {
 ; CHECK-NEXT:    mv a1, s1
 ; CHECK-NEXT:    call __gedf2
 ; CHECK-NEXT:    or a0, s2, a0
-; CHECK-NEXT:    slti a0, a0, 0
+; CHECK-NEXT:    srli a0, a0, 63
 ; CHECK-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
 ; CHECK-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload



More information about the llvm-commits mailing list