[llvm] 6227b7a - [RISCV] Move xori creation for scalar setccs to lowering.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Fri Aug 19 13:52:06 PDT 2022


Author: Craig Topper
Date: 2022-08-19T13:51:53-07:00
New Revision: 6227b7ae313d8bb45cd271208b5e6da389795690

URL: https://github.com/llvm/llvm-project/commit/6227b7ae313d8bb45cd271208b5e6da389795690
DIFF: https://github.com/llvm/llvm-project/commit/6227b7ae313d8bb45cd271208b5e6da389795690.diff

LOG: [RISCV] Move xori creation for scalar setccs to lowering.

This patch enables expansion or custom lowering for some integer
condition codes so that any xori that is needed is created before
the last DAG combine to enable optimization.

I've seen cases where we end up with
(or (xori (setcc), 1), (xori (setcc), 1)) which we would ideally
convert to (xori (and (setcc), (setcc)), 1). This patch doesn't
accomplish that yet, but it should allow us to add DAG
combines as follow ups. Example https://godbolt.org/z/Y4qnvsq1b

Reviewed By: reames

Differential Revision: https://reviews.llvm.org/D131729

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVInstrInfo.td
    llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
    llvm/test/CodeGen/RISCV/forced-atomics.ll
    llvm/test/CodeGen/RISCV/fpclamptosat.ll
    llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll
    llvm/test/CodeGen/RISCV/rv32zbt.ll
    llvm/test/CodeGen/RISCV/select-cc.ll
    llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
    llvm/test/CodeGen/RISCV/xaluo.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index a92bbb1fd6720..d0d48c07dabff 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -188,6 +188,13 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
   setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
 
+  setCondCodeAction(ISD::SETLE, XLenVT, Expand);
+  setCondCodeAction(ISD::SETGT, XLenVT, Custom);
+  setCondCodeAction(ISD::SETGE, XLenVT, Expand);
+  setCondCodeAction(ISD::SETULE, XLenVT, Expand);
+  setCondCodeAction(ISD::SETUGT, XLenVT, Custom);
+  setCondCodeAction(ISD::SETUGE, XLenVT, Expand);
+
   setOperationAction({ISD::STACKSAVE, ISD::STACKRESTORE}, MVT::Other, Expand);
 
   setOperationAction(ISD::VASTART, MVT::Other, Custom);
@@ -3592,8 +3599,63 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
   case ISD::MSTORE:
   case ISD::VP_STORE:
     return lowerMaskedStore(Op, DAG);
-  case ISD::SETCC:
+  case ISD::SELECT_CC: {
+    // This occurs because we custom legalize SETGT and SETUGT for setcc. That
+    // causes LegalizeDAG to think we need to custom legalize select_cc. Expand
+    // into separate SETCC+SELECT_CC just like LegalizeDAG.
+    SDValue Tmp1 = Op.getOperand(0);
+    SDValue Tmp2 = Op.getOperand(1);
+    SDValue True = Op.getOperand(2);
+    SDValue False = Op.getOperand(3);
+    EVT VT = Op.getValueType();
+    SDValue CC = Op.getOperand(4);
+    EVT CmpVT = Tmp1.getValueType();
+    EVT CCVT =
+        getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), CmpVT);
+    SDLoc DL(Op);
+    SDValue Cond =
+        DAG.getNode(ISD::SETCC, DL, CCVT, Tmp1, Tmp2, CC, Op->getFlags());
+    return DAG.getSelect(DL, VT, Cond, True, False);
+  }
+  case ISD::SETCC: {
+    MVT OpVT = Op.getOperand(0).getSimpleValueType();
+    if (OpVT.isScalarInteger()) {
+      MVT VT = Op.getSimpleValueType();
+      SDValue LHS = Op.getOperand(0);
+      SDValue RHS = Op.getOperand(1);
+      ISD::CondCode CCVal = cast<CondCodeSDNode>(Op.getOperand(2))->get();
+      assert((CCVal == ISD::SETGT || CCVal == ISD::SETUGT) &&
+             "Unexpected CondCode");
+
+      SDLoc DL(Op);
+
+      // If the RHS is a constant in the range [-2049, 0) or (0, 2046], we can
+      // convert this to the equivalent of (set(u)ge X, C+1) by using
+      // (xori (slti(u) X, C+1), 1). This avoids materializing a small constant
+      // in a register.
+      if (isa<ConstantSDNode>(RHS)) {
+        int64_t Imm = cast<ConstantSDNode>(RHS)->getSExtValue();
+        if (Imm != 0 && isInt<12>((uint64_t)Imm + 1)) {
+          // If this is an unsigned compare and the constant is -1, incrementing
+          // the constant would change behavior. The result should be false.
+          if (CCVal == ISD::SETUGT && Imm == -1)
+            return DAG.getConstant(0, DL, VT);
+          // Using getSetCCSwappedOperands will convert SET(U)GT->SET(U)LT.
+          CCVal = ISD::getSetCCSwappedOperands(CCVal);
+          SDValue SetCC = DAG.getSetCC(
+              DL, VT, LHS, DAG.getConstant(Imm + 1, DL, OpVT), CCVal);
+          return DAG.getLogicalNOT(DL, SetCC, VT);
+        }
+      }
+
+      // Not a constant we could handle, swap the operands and condition code to
+      // SETLT/SETULT.
+      CCVal = ISD::getSetCCSwappedOperands(CCVal);
+      return DAG.getSetCC(DL, VT, RHS, LHS, CCVal);
+    }
+
     return lowerFixedLengthVectorSetccToRVV(Op, DAG);
+  }
   case ISD::ADD:
     return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL, /*HasMergeOp*/ true);
   case ISD::SUB:

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index fdbad27ba951c..224974e40e3c2 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -1272,51 +1272,6 @@ def : Pat<(setne GPR:$rs1, simm12_plus1:$imm12),
 def : Pat<(setne GPR:$rs1, -2048),
           (SLTU X0, (XORI GPR:$rs1, -2048))>;
 def : Pat<(setne GPR:$rs1, -1), (SLTIU GPR:$rs1, -1)>;
-def : Pat<(setugt GPR:$rs1, GPR:$rs2), (SLTU GPR:$rs2, GPR:$rs1)>;
-def : Pat<(setuge GPR:$rs1, GPR:$rs2), (XORI (SLTU GPR:$rs1, GPR:$rs2), 1)>;
-def : Pat<(setule GPR:$rs1, GPR:$rs2), (XORI (SLTU GPR:$rs2, GPR:$rs1), 1)>;
-def : Pat<(setgt GPR:$rs1, GPR:$rs2), (SLT GPR:$rs2, GPR:$rs1)>;
-def : Pat<(setge GPR:$rs1, GPR:$rs2), (XORI (SLT GPR:$rs1, GPR:$rs2), 1)>;
-def : Pat<(setle GPR:$rs1, GPR:$rs2), (XORI (SLT GPR:$rs2, GPR:$rs1), 1)>;
-def : Pat<(setgt GPR:$rs1, simm12_minus1_nonzero:$imm),
-          (XORI (SLTI GPR:$rs1, (ImmPlus1 simm12_minus1_nonzero:$imm)), 1)>;
-def : Pat<(setugt GPR:$rs1, simm12_minus1_nonzero:$imm),
-          (XORI (SLTIU GPR:$rs1, (ImmPlus1 simm12_minus1_nonzero:$imm)), 1)>;
-
-// If negating a pattern that requires an XORI above, we can fold the XORI with
-// the NEG. The XORI is equivalent to 1-X and negating gives X-1.
-def : Pat<(ineg (setuge GPR:$rs1, GPR:$rs2)),
-          (ADDI (SLTU GPR:$rs1, GPR:$rs2), -1)>;
-def : Pat<(ineg (setule GPR:$rs1, GPR:$rs2)),
-          (ADDI (SLTU GPR:$rs2, GPR:$rs1), -1)>;
-def : Pat<(ineg (setge GPR:$rs1, GPR:$rs2)),
-          (ADDI (SLT GPR:$rs1, GPR:$rs2), -1)>;
-def : Pat<(ineg (setle GPR:$rs1, GPR:$rs2)),
-          (ADDI (SLT GPR:$rs2, GPR:$rs1), -1)>;
-def : Pat<(ineg (setgt GPR:$rs1, simm12_minus1_nonzero:$imm)),
-          (ADDI (SLTI GPR:$rs1, (ImmPlus1 simm12_minus1_nonzero:$imm)), -1)>;
-def : Pat<(ineg (setugt GPR:$rs1, simm12_minus1_nonzero:$imm)),
-          (ADDI (SLTIU GPR:$rs1, (ImmPlus1 simm12_minus1_nonzero:$imm)), -1)>;
-
-def ineg_allwusers : PatFrag<(ops node:$src),
-                             (ineg node:$src), [{
-  return hasAllWUsers(Node);
-}]>;
-
-let Predicates = [IsRV64] in {
-def : Pat<(ineg_allwusers (setuge GPR:$rs1, GPR:$rs2)),
-          (ADDIW (SLTU GPR:$rs1, GPR:$rs2), -1)>;
-def : Pat<(ineg_allwusers (setule GPR:$rs1, GPR:$rs2)),
-          (ADDIW (SLTU GPR:$rs2, GPR:$rs1), -1)>;
-def : Pat<(ineg_allwusers (setge GPR:$rs1, GPR:$rs2)),
-          (ADDIW (SLT GPR:$rs1, GPR:$rs2), -1)>;
-def : Pat<(ineg_allwusers (setle GPR:$rs1, GPR:$rs2)),
-          (ADDIW (SLT GPR:$rs2, GPR:$rs1), -1)>;
-def : Pat<(ineg_allwusers (setgt GPR:$rs1, simm12_minus1_nonzero:$imm)),
-          (ADDIW (SLTI GPR:$rs1, (ImmPlus1 simm12_minus1_nonzero:$imm)), -1)>;
-def : Pat<(ineg_allwusers (setugt GPR:$rs1, simm12_minus1_nonzero:$imm)),
-          (ADDIW (SLTIU GPR:$rs1, (ImmPlus1 simm12_minus1_nonzero:$imm)), -1)>;
-}
 
 def IntCCtoRISCVCC : SDNodeXForm<riscv_selectcc, [{
   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
index 1a7539f40d474..5bf6148d462c5 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
@@ -997,20 +997,6 @@ def : Pat<(select (XLenVT (setne GPR:$x, GPR:$y)), GPR:$rs1, GPR:$rs3),
           (CMOV GPR:$rs1, (XOR GPR:$x, GPR:$y), GPR:$rs3)>;
 def : Pat<(select (XLenVT (seteq GPR:$x, GPR:$y)), GPR:$rs3, GPR:$rs1),
           (CMOV GPR:$rs1, (XOR GPR:$x, GPR:$y), GPR:$rs3)>;
-def : Pat<(select (XLenVT (setuge GPR:$x, GPR:$y)), GPR:$rs3, GPR:$rs1),
-          (CMOV GPR:$rs1, (SLTU GPR:$x, GPR:$y), GPR:$rs3)>;
-def : Pat<(select (XLenVT (setule GPR:$y, GPR:$x)), GPR:$rs3, GPR:$rs1),
-          (CMOV GPR:$rs1, (SLTU GPR:$x, GPR:$y), GPR:$rs3)>;
-def : Pat<(select (XLenVT (setge GPR:$x, GPR:$y)), GPR:$rs3, GPR:$rs1),
-          (CMOV GPR:$rs1, (SLT GPR:$x, GPR:$y), GPR:$rs3)>;
-def : Pat<(select (XLenVT (setle GPR:$y, GPR:$x)), GPR:$rs3, GPR:$rs1),
-          (CMOV GPR:$rs1, (SLT GPR:$x, GPR:$y), GPR:$rs3)>;
-
-// setge X, Imm is canonicalized to setgt X, (Imm - 1).
-def : Pat<(select (XLenVT (setgt GPR:$x, simm12_minus1_nonzero:$imm)), GPR:$rs3, GPR:$rs1),
-          (CMOV GPR:$rs1, (SLTI GPR:$x, (ImmPlus1 simm12_minus1_nonzero:$imm)), GPR:$rs3)>;
-def : Pat<(select (XLenVT (setugt GPR:$x, simm12_minus1_nonzero:$imm)), GPR:$rs3, GPR:$rs1),
-          (CMOV GPR:$rs1, (SLTIU GPR:$x, (ImmPlus1 simm12_minus1_nonzero:$imm)), GPR:$rs3)>;
 
 def : Pat<(select GPR:$rs2, GPR:$rs1, GPR:$rs3),
           (CMOV GPR:$rs1, GPR:$rs2, GPR:$rs3)>;

diff  --git a/llvm/test/CodeGen/RISCV/forced-atomics.ll b/llvm/test/CodeGen/RISCV/forced-atomics.ll
index a6c735bdfa950..fc03091cba604 100644
--- a/llvm/test/CodeGen/RISCV/forced-atomics.ll
+++ b/llvm/test/CodeGen/RISCV/forced-atomics.ll
@@ -2518,14 +2518,14 @@ define i64 @rmw64_max_seq_cst(ptr %p) nounwind {
 ; RV32-NEXT:    bnez a0, .LBB49_7
 ; RV32-NEXT:  .LBB49_2: # %atomicrmw.start
 ; RV32-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    beqz a1, .LBB49_4
 ; RV32-NEXT:  # %bb.3: # %atomicrmw.start
 ; RV32-NEXT:    # in Loop: Header=BB49_2 Depth=1
 ; RV32-NEXT:    sgtz a0, a1
 ; RV32-NEXT:    j .LBB49_5
 ; RV32-NEXT:  .LBB49_4: # in Loop: Header=BB49_2 Depth=1
-; RV32-NEXT:    sltu a0, a0, a4
+; RV32-NEXT:    sltiu a0, a4, 2
+; RV32-NEXT:    xori a0, a0, 1
 ; RV32-NEXT:  .LBB49_5: # %atomicrmw.start
 ; RV32-NEXT:    # in Loop: Header=BB49_2 Depth=1
 ; RV32-NEXT:    mv a2, a4
@@ -2711,14 +2711,14 @@ define i64 @rmw64_umax_seq_cst(ptr %p) nounwind {
 ; RV32-NEXT:    bnez a0, .LBB51_7
 ; RV32-NEXT:  .LBB51_2: # %atomicrmw.start
 ; RV32-NEXT:    # =>This Inner Loop Header: Depth=1
-; RV32-NEXT:    li a0, 1
 ; RV32-NEXT:    beqz a1, .LBB51_4
 ; RV32-NEXT:  # %bb.3: # %atomicrmw.start
 ; RV32-NEXT:    # in Loop: Header=BB51_2 Depth=1
 ; RV32-NEXT:    snez a0, a1
 ; RV32-NEXT:    j .LBB51_5
 ; RV32-NEXT:  .LBB51_4: # in Loop: Header=BB51_2 Depth=1
-; RV32-NEXT:    sltu a0, a0, a4
+; RV32-NEXT:    sltiu a0, a4, 2
+; RV32-NEXT:    xori a0, a0, 1
 ; RV32-NEXT:  .LBB51_5: # %atomicrmw.start
 ; RV32-NEXT:    # in Loop: Header=BB51_2 Depth=1
 ; RV32-NEXT:    mv a2, a4

diff  --git a/llvm/test/CodeGen/RISCV/fpclamptosat.ll b/llvm/test/CodeGen/RISCV/fpclamptosat.ll
index 56c7a36a55991..7fbf441c0a5b7 100644
--- a/llvm/test/CodeGen/RISCV/fpclamptosat.ll
+++ b/llvm/test/CodeGen/RISCV/fpclamptosat.ll
@@ -35,7 +35,8 @@ define i32 @stest_f64i32(double %x) {
 ; RV32IF-NEXT:    li a3, -1
 ; RV32IF-NEXT:    beq a1, a3, .LBB0_6
 ; RV32IF-NEXT:  # %bb.5: # %entry
-; RV32IF-NEXT:    slt a1, a3, a1
+; RV32IF-NEXT:    slti a1, a1, 0
+; RV32IF-NEXT:    xori a1, a1, 1
 ; RV32IF-NEXT:    beqz a1, .LBB0_7
 ; RV32IF-NEXT:    j .LBB0_8
 ; RV32IF-NEXT:  .LBB0_6:
@@ -389,7 +390,8 @@ define i32 @stest_f16i32(half %x) {
 ; RV32-NEXT:    li a3, -1
 ; RV32-NEXT:    beq a1, a3, .LBB6_6
 ; RV32-NEXT:  # %bb.5: # %entry
-; RV32-NEXT:    slt a1, a3, a1
+; RV32-NEXT:    slti a1, a1, 0
+; RV32-NEXT:    xori a1, a1, 1
 ; RV32-NEXT:    beqz a1, .LBB6_7
 ; RV32-NEXT:    j .LBB6_8
 ; RV32-NEXT:  .LBB6_6:
@@ -1132,7 +1134,8 @@ define i64 @stest_f64i64(double %x) {
 ; RV32IF-NEXT:    and a3, a3, a2
 ; RV32IF-NEXT:    beq a3, a6, .LBB18_10
 ; RV32IF-NEXT:  .LBB18_9: # %entry
-; RV32IF-NEXT:    slt a4, a6, a2
+; RV32IF-NEXT:    slti a2, a2, 0
+; RV32IF-NEXT:    xori a4, a2, 1
 ; RV32IF-NEXT:  .LBB18_10: # %entry
 ; RV32IF-NEXT:    bnez a4, .LBB18_12
 ; RV32IF-NEXT:  # %bb.11: # %entry
@@ -1167,7 +1170,8 @@ define i64 @stest_f64i64(double %x) {
 ; RV64IF-NEXT:    slli a3, a2, 63
 ; RV64IF-NEXT:    beq a1, a2, .LBB18_6
 ; RV64IF-NEXT:  # %bb.5: # %entry
-; RV64IF-NEXT:    slt a1, a2, a1
+; RV64IF-NEXT:    slti a1, a1, 0
+; RV64IF-NEXT:    xori a1, a1, 1
 ; RV64IF-NEXT:    beqz a1, .LBB18_7
 ; RV64IF-NEXT:    j .LBB18_8
 ; RV64IF-NEXT:  .LBB18_6:
@@ -1227,7 +1231,8 @@ define i64 @stest_f64i64(double %x) {
 ; RV32IFD-NEXT:    and a3, a3, a2
 ; RV32IFD-NEXT:    beq a3, a6, .LBB18_10
 ; RV32IFD-NEXT:  .LBB18_9: # %entry
-; RV32IFD-NEXT:    slt a4, a6, a2
+; RV32IFD-NEXT:    slti a2, a2, 0
+; RV32IFD-NEXT:    xori a4, a2, 1
 ; RV32IFD-NEXT:  .LBB18_10: # %entry
 ; RV32IFD-NEXT:    bnez a4, .LBB18_12
 ; RV32IFD-NEXT:  # %bb.11: # %entry
@@ -1562,7 +1567,8 @@ define i64 @stest_f32i64(float %x) {
 ; RV32-NEXT:    and a3, a3, a2
 ; RV32-NEXT:    beq a3, a6, .LBB21_10
 ; RV32-NEXT:  .LBB21_9: # %entry
-; RV32-NEXT:    slt a4, a6, a2
+; RV32-NEXT:    slti a2, a2, 0
+; RV32-NEXT:    xori a4, a2, 1
 ; RV32-NEXT:  .LBB21_10: # %entry
 ; RV32-NEXT:    bnez a4, .LBB21_12
 ; RV32-NEXT:  # %bb.11: # %entry
@@ -1798,7 +1804,8 @@ define i64 @stest_f16i64(half %x) {
 ; RV32-NEXT:    and a3, a3, a2
 ; RV32-NEXT:    beq a3, a6, .LBB24_10
 ; RV32-NEXT:  .LBB24_9: # %entry
-; RV32-NEXT:    slt a4, a6, a2
+; RV32-NEXT:    slti a2, a2, 0
+; RV32-NEXT:    xori a4, a2, 1
 ; RV32-NEXT:  .LBB24_10: # %entry
 ; RV32-NEXT:    bnez a4, .LBB24_12
 ; RV32-NEXT:  # %bb.11: # %entry
@@ -1835,7 +1842,8 @@ define i64 @stest_f16i64(half %x) {
 ; RV64-NEXT:    slli a3, a2, 63
 ; RV64-NEXT:    beq a1, a2, .LBB24_6
 ; RV64-NEXT:  # %bb.5: # %entry
-; RV64-NEXT:    slt a1, a2, a1
+; RV64-NEXT:    slti a1, a1, 0
+; RV64-NEXT:    xori a1, a1, 1
 ; RV64-NEXT:    beqz a1, .LBB24_7
 ; RV64-NEXT:    j .LBB24_8
 ; RV64-NEXT:  .LBB24_6:

diff  --git a/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll b/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll
index 325443b9d7399..dc65902b46859 100644
--- a/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll
+++ b/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll
@@ -2155,7 +2155,8 @@ define <2 x i64> @stest_f64i64(<2 x double> %x) {
 ; CHECK-NOV-NEXT:    slli a3, a0, 63
 ; CHECK-NOV-NEXT:    beq a1, a0, .LBB18_11
 ; CHECK-NOV-NEXT:  .LBB18_8: # %entry
-; CHECK-NOV-NEXT:    slt a1, a0, a1
+; CHECK-NOV-NEXT:    slti a1, a1, 0
+; CHECK-NOV-NEXT:    xori a1, a1, 1
 ; CHECK-NOV-NEXT:    bne s1, a0, .LBB18_12
 ; CHECK-NOV-NEXT:  .LBB18_9:
 ; CHECK-NOV-NEXT:    sltu a0, a3, s0
@@ -2170,7 +2171,8 @@ define <2 x i64> @stest_f64i64(<2 x double> %x) {
 ; CHECK-NOV-NEXT:    sltu a1, a3, a2
 ; CHECK-NOV-NEXT:    beq s1, a0, .LBB18_9
 ; CHECK-NOV-NEXT:  .LBB18_12: # %entry
-; CHECK-NOV-NEXT:    slt a0, a0, s1
+; CHECK-NOV-NEXT:    slti a0, s1, 0
+; CHECK-NOV-NEXT:    xori a0, a0, 1
 ; CHECK-NOV-NEXT:    bnez a0, .LBB18_14
 ; CHECK-NOV-NEXT:  .LBB18_13: # %entry
 ; CHECK-NOV-NEXT:    mv s0, a3
@@ -2239,7 +2241,8 @@ define <2 x i64> @stest_f64i64(<2 x double> %x) {
 ; CHECK-V-NEXT:    slli a3, a2, 63
 ; CHECK-V-NEXT:    beq s1, a2, .LBB18_11
 ; CHECK-V-NEXT:  .LBB18_8: # %entry
-; CHECK-V-NEXT:    slt a4, a2, s1
+; CHECK-V-NEXT:    slti a4, s1, 0
+; CHECK-V-NEXT:    xori a4, a4, 1
 ; CHECK-V-NEXT:    bne a1, a2, .LBB18_12
 ; CHECK-V-NEXT:  .LBB18_9:
 ; CHECK-V-NEXT:    sltu a1, a3, a0
@@ -2254,7 +2257,8 @@ define <2 x i64> @stest_f64i64(<2 x double> %x) {
 ; CHECK-V-NEXT:    sltu a4, a3, s0
 ; CHECK-V-NEXT:    beq a1, a2, .LBB18_9
 ; CHECK-V-NEXT:  .LBB18_12: # %entry
-; CHECK-V-NEXT:    slt a1, a2, a1
+; CHECK-V-NEXT:    slti a1, a1, 0
+; CHECK-V-NEXT:    xori a1, a1, 1
 ; CHECK-V-NEXT:    bnez a4, .LBB18_14
 ; CHECK-V-NEXT:  .LBB18_13: # %entry
 ; CHECK-V-NEXT:    mv s0, a3
@@ -2593,7 +2597,8 @@ define <2 x i64> @stest_f32i64(<2 x float> %x) {
 ; CHECK-NOV-NEXT:    slli a3, a0, 63
 ; CHECK-NOV-NEXT:    beq a1, a0, .LBB21_11
 ; CHECK-NOV-NEXT:  .LBB21_8: # %entry
-; CHECK-NOV-NEXT:    slt a1, a0, a1
+; CHECK-NOV-NEXT:    slti a1, a1, 0
+; CHECK-NOV-NEXT:    xori a1, a1, 1
 ; CHECK-NOV-NEXT:    bne s1, a0, .LBB21_12
 ; CHECK-NOV-NEXT:  .LBB21_9:
 ; CHECK-NOV-NEXT:    sltu a0, a3, s0
@@ -2608,7 +2613,8 @@ define <2 x i64> @stest_f32i64(<2 x float> %x) {
 ; CHECK-NOV-NEXT:    sltu a1, a3, a2
 ; CHECK-NOV-NEXT:    beq s1, a0, .LBB21_9
 ; CHECK-NOV-NEXT:  .LBB21_12: # %entry
-; CHECK-NOV-NEXT:    slt a0, a0, s1
+; CHECK-NOV-NEXT:    slti a0, s1, 0
+; CHECK-NOV-NEXT:    xori a0, a0, 1
 ; CHECK-NOV-NEXT:    bnez a0, .LBB21_14
 ; CHECK-NOV-NEXT:  .LBB21_13: # %entry
 ; CHECK-NOV-NEXT:    mv s0, a3
@@ -2677,7 +2683,8 @@ define <2 x i64> @stest_f32i64(<2 x float> %x) {
 ; CHECK-V-NEXT:    slli a3, a2, 63
 ; CHECK-V-NEXT:    beq s1, a2, .LBB21_11
 ; CHECK-V-NEXT:  .LBB21_8: # %entry
-; CHECK-V-NEXT:    slt a4, a2, s1
+; CHECK-V-NEXT:    slti a4, s1, 0
+; CHECK-V-NEXT:    xori a4, a4, 1
 ; CHECK-V-NEXT:    bne a1, a2, .LBB21_12
 ; CHECK-V-NEXT:  .LBB21_9:
 ; CHECK-V-NEXT:    sltu a1, a3, a0
@@ -2692,7 +2699,8 @@ define <2 x i64> @stest_f32i64(<2 x float> %x) {
 ; CHECK-V-NEXT:    sltu a4, a3, s0
 ; CHECK-V-NEXT:    beq a1, a2, .LBB21_9
 ; CHECK-V-NEXT:  .LBB21_12: # %entry
-; CHECK-V-NEXT:    slt a1, a2, a1
+; CHECK-V-NEXT:    slti a1, a1, 0
+; CHECK-V-NEXT:    xori a1, a1, 1
 ; CHECK-V-NEXT:    bnez a4, .LBB21_14
 ; CHECK-V-NEXT:  .LBB21_13: # %entry
 ; CHECK-V-NEXT:    mv s0, a3
@@ -3033,7 +3041,8 @@ define <2 x i64> @stest_f16i64(<2 x half> %x) {
 ; CHECK-NOV-NEXT:    slli a3, a0, 63
 ; CHECK-NOV-NEXT:    beq a1, a0, .LBB24_11
 ; CHECK-NOV-NEXT:  .LBB24_8: # %entry
-; CHECK-NOV-NEXT:    slt a1, a0, a1
+; CHECK-NOV-NEXT:    slti a1, a1, 0
+; CHECK-NOV-NEXT:    xori a1, a1, 1
 ; CHECK-NOV-NEXT:    bne s1, a0, .LBB24_12
 ; CHECK-NOV-NEXT:  .LBB24_9:
 ; CHECK-NOV-NEXT:    sltu a0, a3, s0
@@ -3048,7 +3057,8 @@ define <2 x i64> @stest_f16i64(<2 x half> %x) {
 ; CHECK-NOV-NEXT:    sltu a1, a3, a2
 ; CHECK-NOV-NEXT:    beq s1, a0, .LBB24_9
 ; CHECK-NOV-NEXT:  .LBB24_12: # %entry
-; CHECK-NOV-NEXT:    slt a0, a0, s1
+; CHECK-NOV-NEXT:    slti a0, s1, 0
+; CHECK-NOV-NEXT:    xori a0, a0, 1
 ; CHECK-NOV-NEXT:    bnez a0, .LBB24_14
 ; CHECK-NOV-NEXT:  .LBB24_13: # %entry
 ; CHECK-NOV-NEXT:    mv s0, a3
@@ -3111,7 +3121,8 @@ define <2 x i64> @stest_f16i64(<2 x half> %x) {
 ; CHECK-V-NEXT:    slli a3, a2, 63
 ; CHECK-V-NEXT:    beq a1, a2, .LBB24_11
 ; CHECK-V-NEXT:  .LBB24_8: # %entry
-; CHECK-V-NEXT:    slt a1, a2, a1
+; CHECK-V-NEXT:    slti a1, a1, 0
+; CHECK-V-NEXT:    xori a1, a1, 1
 ; CHECK-V-NEXT:    bne s1, a2, .LBB24_12
 ; CHECK-V-NEXT:  .LBB24_9:
 ; CHECK-V-NEXT:    sltu a2, a3, s0
@@ -3126,7 +3137,8 @@ define <2 x i64> @stest_f16i64(<2 x half> %x) {
 ; CHECK-V-NEXT:    sltu a1, a3, a0
 ; CHECK-V-NEXT:    beq s1, a2, .LBB24_9
 ; CHECK-V-NEXT:  .LBB24_12: # %entry
-; CHECK-V-NEXT:    slt a2, a2, s1
+; CHECK-V-NEXT:    slti a2, s1, 0
+; CHECK-V-NEXT:    xori a2, a2, 1
 ; CHECK-V-NEXT:    bnez a2, .LBB24_14
 ; CHECK-V-NEXT:  .LBB24_13: # %entry
 ; CHECK-V-NEXT:    mv s0, a3

diff  --git a/llvm/test/CodeGen/RISCV/rv32zbt.ll b/llvm/test/CodeGen/RISCV/rv32zbt.ll
index 334430744e788..e63a68eac4ce1 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbt.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbt.ll
@@ -648,14 +648,12 @@ define i64 @cmov_sle_i64(i64 %a, i64 %b, i64 %c, i64 %d) nounwind {
 ;
 ; RV32ZBT-LABEL: cmov_sle_i64:
 ; RV32ZBT:       # %bb.0:
-; RV32ZBT-NEXT:    xor t0, a3, a5
 ; RV32ZBT-NEXT:    sltu a2, a4, a2
-; RV32ZBT-NEXT:    xori a2, a2, 1
+; RV32ZBT-NEXT:    xor a4, a3, a5
 ; RV32ZBT-NEXT:    slt a3, a5, a3
-; RV32ZBT-NEXT:    xori a3, a3, 1
-; RV32ZBT-NEXT:    cmov a2, t0, a3, a2
-; RV32ZBT-NEXT:    cmov a0, a2, a0, a6
-; RV32ZBT-NEXT:    cmov a1, a2, a1, a7
+; RV32ZBT-NEXT:    cmov a2, a4, a3, a2
+; RV32ZBT-NEXT:    cmov a0, a2, a6, a0
+; RV32ZBT-NEXT:    cmov a1, a2, a7, a1
 ; RV32ZBT-NEXT:    ret
   %tobool = icmp sle i64 %b, %c
   %cond = select i1 %tobool, i64 %a, i64 %d
@@ -683,14 +681,12 @@ define i64 @cmov_sge_i64(i64 %a, i64 %b, i64 %c, i64 %d) nounwind {
 ;
 ; RV32ZBT-LABEL: cmov_sge_i64:
 ; RV32ZBT:       # %bb.0:
-; RV32ZBT-NEXT:    xor t0, a3, a5
 ; RV32ZBT-NEXT:    sltu a2, a2, a4
-; RV32ZBT-NEXT:    xori a2, a2, 1
+; RV32ZBT-NEXT:    xor a4, a3, a5
 ; RV32ZBT-NEXT:    slt a3, a3, a5
-; RV32ZBT-NEXT:    xori a3, a3, 1
-; RV32ZBT-NEXT:    cmov a2, t0, a3, a2
-; RV32ZBT-NEXT:    cmov a0, a2, a0, a6
-; RV32ZBT-NEXT:    cmov a1, a2, a1, a7
+; RV32ZBT-NEXT:    cmov a2, a4, a3, a2
+; RV32ZBT-NEXT:    cmov a0, a2, a6, a0
+; RV32ZBT-NEXT:    cmov a1, a2, a7, a1
 ; RV32ZBT-NEXT:    ret
   %tobool = icmp sge i64 %b, %c
   %cond = select i1 %tobool, i64 %a, i64 %d
@@ -718,14 +714,12 @@ define i64 @cmov_ule_i64(i64 %a, i64 %b, i64 %c, i64 %d) nounwind {
 ;
 ; RV32ZBT-LABEL: cmov_ule_i64:
 ; RV32ZBT:       # %bb.0:
-; RV32ZBT-NEXT:    xor t0, a3, a5
 ; RV32ZBT-NEXT:    sltu a2, a4, a2
-; RV32ZBT-NEXT:    xori a2, a2, 1
+; RV32ZBT-NEXT:    xor a4, a3, a5
 ; RV32ZBT-NEXT:    sltu a3, a5, a3
-; RV32ZBT-NEXT:    xori a3, a3, 1
-; RV32ZBT-NEXT:    cmov a2, t0, a3, a2
-; RV32ZBT-NEXT:    cmov a0, a2, a0, a6
-; RV32ZBT-NEXT:    cmov a1, a2, a1, a7
+; RV32ZBT-NEXT:    cmov a2, a4, a3, a2
+; RV32ZBT-NEXT:    cmov a0, a2, a6, a0
+; RV32ZBT-NEXT:    cmov a1, a2, a7, a1
 ; RV32ZBT-NEXT:    ret
   %tobool = icmp ule i64 %b, %c
   %cond = select i1 %tobool, i64 %a, i64 %d
@@ -753,14 +747,12 @@ define i64 @cmov_uge_i64(i64 %a, i64 %b, i64 %c, i64 %d) nounwind {
 ;
 ; RV32ZBT-LABEL: cmov_uge_i64:
 ; RV32ZBT:       # %bb.0:
-; RV32ZBT-NEXT:    xor t0, a3, a5
 ; RV32ZBT-NEXT:    sltu a2, a2, a4
-; RV32ZBT-NEXT:    xori a2, a2, 1
+; RV32ZBT-NEXT:    xor a4, a3, a5
 ; RV32ZBT-NEXT:    sltu a3, a3, a5
-; RV32ZBT-NEXT:    xori a3, a3, 1
-; RV32ZBT-NEXT:    cmov a2, t0, a3, a2
-; RV32ZBT-NEXT:    cmov a0, a2, a0, a6
-; RV32ZBT-NEXT:    cmov a1, a2, a1, a7
+; RV32ZBT-NEXT:    cmov a2, a4, a3, a2
+; RV32ZBT-NEXT:    cmov a0, a2, a6, a0
+; RV32ZBT-NEXT:    cmov a1, a2, a7, a1
 ; RV32ZBT-NEXT:    ret
   %tobool = icmp uge i64 %b, %c
   %cond = select i1 %tobool, i64 %a, i64 %d

diff  --git a/llvm/test/CodeGen/RISCV/select-cc.ll b/llvm/test/CodeGen/RISCV/select-cc.ll
index 34656428aeff6..017a66bcad3cd 100644
--- a/llvm/test/CodeGen/RISCV/select-cc.ll
+++ b/llvm/test/CodeGen/RISCV/select-cc.ll
@@ -465,7 +465,8 @@ define i64 @select_sge_int32min(i64 %x, i64 %y, i64 %z) {
 ; RV32I-NEXT:    slti a0, a0, 0
 ; RV32I-NEXT:    j .LBB3_3
 ; RV32I-NEXT:  .LBB3_2:
-; RV32I-NEXT:    slt a0, a6, a1
+; RV32I-NEXT:    slti a0, a1, 0
+; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:  .LBB3_3:
 ; RV32I-NEXT:    bnez a0, .LBB3_5
 ; RV32I-NEXT:  # %bb.4:

diff  --git a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
index fac578b72508a..e23302dce9cb3 100644
--- a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
@@ -343,33 +343,32 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
 ; RV32-NEXT:    slli a0, a0, 21
 ; RV32-NEXT:    srli a0, a0, 22
 ; RV32-NEXT:    or a0, a0, a1
-; RV32-NEXT:    andi s3, a0, 2047
+; RV32-NEXT:    andi a0, a0, 2047
+; RV32-NEXT:    sltiu s3, a0, 342
 ; RV32-NEXT:    li a1, 819
 ; RV32-NEXT:    mv a0, s1
 ; RV32-NEXT:    call __mulsi3 at plt
 ; RV32-NEXT:    addi a0, a0, -1638
 ; RV32-NEXT:    andi a0, a0, 2047
-; RV32-NEXT:    li a1, 1
-; RV32-NEXT:    sltu s1, a1, a0
+; RV32-NEXT:    sltiu s1, a0, 2
 ; RV32-NEXT:    li a1, 1463
 ; RV32-NEXT:    mv a0, s2
 ; RV32-NEXT:    call __mulsi3 at plt
 ; RV32-NEXT:    addi a0, a0, -1463
 ; RV32-NEXT:    andi a0, a0, 2047
-; RV32-NEXT:    sltiu a1, s3, 342
-; RV32-NEXT:    addi a1, a1, -1
 ; RV32-NEXT:    sltiu a0, a0, 293
+; RV32-NEXT:    addi a1, s3, -1
 ; RV32-NEXT:    addi a0, a0, -1
-; RV32-NEXT:    neg a2, s1
-; RV32-NEXT:    slli a2, a2, 21
-; RV32-NEXT:    srli a2, a2, 31
-; RV32-NEXT:    sb a2, 4(s0)
+; RV32-NEXT:    addi a2, s1, -1
+; RV32-NEXT:    slli a3, a2, 21
+; RV32-NEXT:    srli a3, a3, 31
+; RV32-NEXT:    sb a3, 4(s0)
 ; RV32-NEXT:    andi a1, a1, 2047
 ; RV32-NEXT:    andi a0, a0, 2047
 ; RV32-NEXT:    slli a0, a0, 11
 ; RV32-NEXT:    or a0, a1, a0
-; RV32-NEXT:    slli a1, s1, 22
-; RV32-NEXT:    sub a0, a0, a1
+; RV32-NEXT:    slli a1, a2, 22
+; RV32-NEXT:    or a0, a0, a1
 ; RV32-NEXT:    sw a0, 0(s0)
 ; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -392,8 +391,8 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
 ; RV64-NEXT:    lwu a1, 0(s0)
 ; RV64-NEXT:    slli a0, a0, 32
 ; RV64-NEXT:    or a0, a1, a0
-; RV64-NEXT:    srli s1, a0, 11
-; RV64-NEXT:    srli s2, a0, 22
+; RV64-NEXT:    srli s1, a0, 22
+; RV64-NEXT:    srli s2, a0, 11
 ; RV64-NEXT:    andi a0, a0, 2047
 ; RV64-NEXT:    li a1, 683
 ; RV64-NEXT:    call __muldi3 at plt
@@ -401,29 +400,29 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
 ; RV64-NEXT:    slli a0, a0, 53
 ; RV64-NEXT:    srli a0, a0, 54
 ; RV64-NEXT:    or a0, a0, a1
-; RV64-NEXT:    andi s3, a0, 2047
-; RV64-NEXT:    li a1, 819
+; RV64-NEXT:    andi a0, a0, 2047
+; RV64-NEXT:    sltiu s3, a0, 342
+; RV64-NEXT:    li a1, 1463
 ; RV64-NEXT:    mv a0, s2
 ; RV64-NEXT:    call __muldi3 at plt
-; RV64-NEXT:    addiw a0, a0, -1638
+; RV64-NEXT:    addiw a0, a0, -1463
 ; RV64-NEXT:    andi a0, a0, 2047
-; RV64-NEXT:    li a1, 1
-; RV64-NEXT:    sltu s2, a1, a0
-; RV64-NEXT:    li a1, 1463
+; RV64-NEXT:    sltiu s2, a0, 293
+; RV64-NEXT:    li a1, 819
 ; RV64-NEXT:    mv a0, s1
 ; RV64-NEXT:    call __muldi3 at plt
-; RV64-NEXT:    addiw a0, a0, -1463
+; RV64-NEXT:    addiw a0, a0, -1638
 ; RV64-NEXT:    andi a0, a0, 2047
-; RV64-NEXT:    sltiu a1, s3, 342
-; RV64-NEXT:    addiw a1, a1, -1
-; RV64-NEXT:    sltiu a0, a0, 293
-; RV64-NEXT:    addiw a0, a0, -1
+; RV64-NEXT:    sltiu a0, a0, 2
+; RV64-NEXT:    addiw a1, s3, -1
+; RV64-NEXT:    addi a0, a0, -1
+; RV64-NEXT:    addiw a2, s2, -1
 ; RV64-NEXT:    andi a1, a1, 2047
-; RV64-NEXT:    andi a0, a0, 2047
-; RV64-NEXT:    slli a0, a0, 11
+; RV64-NEXT:    andi a2, a2, 2047
+; RV64-NEXT:    slli a2, a2, 11
+; RV64-NEXT:    or a1, a1, a2
+; RV64-NEXT:    slli a0, a0, 22
 ; RV64-NEXT:    or a0, a1, a0
-; RV64-NEXT:    slli a1, s2, 22
-; RV64-NEXT:    sub a0, a0, a1
 ; RV64-NEXT:    sw a0, 0(s0)
 ; RV64-NEXT:    slli a0, a0, 31
 ; RV64-NEXT:    srli a0, a0, 63
@@ -452,22 +451,21 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
 ; RV32M-NEXT:    srli a2, a2, 22
 ; RV32M-NEXT:    or a2, a2, a4
 ; RV32M-NEXT:    andi a2, a2, 2047
+; RV32M-NEXT:    sltiu a2, a2, 342
 ; RV32M-NEXT:    li a4, 819
 ; RV32M-NEXT:    mul a1, a1, a4
 ; RV32M-NEXT:    addi a1, a1, -1638
 ; RV32M-NEXT:    andi a1, a1, 2047
-; RV32M-NEXT:    li a4, 1
-; RV32M-NEXT:    sltu a1, a4, a1
+; RV32M-NEXT:    sltiu a1, a1, 2
 ; RV32M-NEXT:    li a4, 1463
 ; RV32M-NEXT:    mul a3, a3, a4
 ; RV32M-NEXT:    addi a3, a3, -1463
 ; RV32M-NEXT:    andi a3, a3, 2047
-; RV32M-NEXT:    sltiu a2, a2, 342
-; RV32M-NEXT:    addi a2, a2, -1
 ; RV32M-NEXT:    sltiu a3, a3, 293
+; RV32M-NEXT:    addi a2, a2, -1
 ; RV32M-NEXT:    addi a3, a3, -1
-; RV32M-NEXT:    neg a4, a1
-; RV32M-NEXT:    slli a4, a4, 21
+; RV32M-NEXT:    addi a1, a1, -1
+; RV32M-NEXT:    slli a4, a1, 21
 ; RV32M-NEXT:    srli a4, a4, 31
 ; RV32M-NEXT:    sb a4, 4(a0)
 ; RV32M-NEXT:    andi a2, a2, 2047
@@ -475,7 +473,7 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
 ; RV32M-NEXT:    slli a3, a3, 11
 ; RV32M-NEXT:    or a2, a2, a3
 ; RV32M-NEXT:    slli a1, a1, 22
-; RV32M-NEXT:    sub a1, a2, a1
+; RV32M-NEXT:    or a1, a2, a1
 ; RV32M-NEXT:    sw a1, 0(a0)
 ; RV32M-NEXT:    ret
 ;
@@ -485,8 +483,8 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
 ; RV64M-NEXT:    lwu a2, 0(a0)
 ; RV64M-NEXT:    slli a1, a1, 32
 ; RV64M-NEXT:    or a1, a2, a1
-; RV64M-NEXT:    srli a2, a1, 11
-; RV64M-NEXT:    srli a3, a1, 22
+; RV64M-NEXT:    srli a2, a1, 22
+; RV64M-NEXT:    srli a3, a1, 11
 ; RV64M-NEXT:    andi a1, a1, 2047
 ; RV64M-NEXT:    li a4, 683
 ; RV64M-NEXT:    mul a1, a1, a4
@@ -495,26 +493,26 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
 ; RV64M-NEXT:    srli a1, a1, 54
 ; RV64M-NEXT:    or a1, a1, a4
 ; RV64M-NEXT:    andi a1, a1, 2047
-; RV64M-NEXT:    li a4, 819
+; RV64M-NEXT:    sltiu a1, a1, 342
+; RV64M-NEXT:    li a4, 1463
 ; RV64M-NEXT:    mulw a3, a3, a4
-; RV64M-NEXT:    addiw a3, a3, -1638
+; RV64M-NEXT:    addiw a3, a3, -1463
 ; RV64M-NEXT:    andi a3, a3, 2047
-; RV64M-NEXT:    li a4, 1
-; RV64M-NEXT:    sltu a3, a4, a3
-; RV64M-NEXT:    li a4, 1463
+; RV64M-NEXT:    sltiu a3, a3, 293
+; RV64M-NEXT:    li a4, 819
 ; RV64M-NEXT:    mulw a2, a2, a4
-; RV64M-NEXT:    addiw a2, a2, -1463
+; RV64M-NEXT:    addiw a2, a2, -1638
 ; RV64M-NEXT:    andi a2, a2, 2047
-; RV64M-NEXT:    sltiu a1, a1, 342
+; RV64M-NEXT:    sltiu a2, a2, 2
 ; RV64M-NEXT:    addiw a1, a1, -1
-; RV64M-NEXT:    sltiu a2, a2, 293
-; RV64M-NEXT:    addiw a2, a2, -1
+; RV64M-NEXT:    addi a2, a2, -1
+; RV64M-NEXT:    addiw a3, a3, -1
 ; RV64M-NEXT:    andi a1, a1, 2047
-; RV64M-NEXT:    andi a2, a2, 2047
-; RV64M-NEXT:    slli a2, a2, 11
+; RV64M-NEXT:    andi a3, a3, 2047
+; RV64M-NEXT:    slli a3, a3, 11
+; RV64M-NEXT:    or a1, a1, a3
+; RV64M-NEXT:    slli a2, a2, 22
 ; RV64M-NEXT:    or a1, a1, a2
-; RV64M-NEXT:    slli a2, a3, 22
-; RV64M-NEXT:    sub a1, a1, a2
 ; RV64M-NEXT:    sw a1, 0(a0)
 ; RV64M-NEXT:    slli a1, a1, 31
 ; RV64M-NEXT:    srli a1, a1, 63

diff  --git a/llvm/test/CodeGen/RISCV/xaluo.ll b/llvm/test/CodeGen/RISCV/xaluo.ll
index 05301a1362684..922368aedc343 100644
--- a/llvm/test/CodeGen/RISCV/xaluo.ll
+++ b/llvm/test/CodeGen/RISCV/xaluo.ll
@@ -1670,8 +1670,8 @@ define i1 @saddo.not.i64(i64 %v1, i64 %v2) {
 ; RV32-NEXT:    xor a1, a1, a3
 ; RV32-NEXT:    not a1, a1
 ; RV32-NEXT:    and a0, a1, a0
-; RV32-NEXT:    li a1, -1
-; RV32-NEXT:    slt a0, a1, a0
+; RV32-NEXT:    slti a0, a0, 0
+; RV32-NEXT:    xori a0, a0, 1
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: saddo.not.i64:
@@ -1693,8 +1693,8 @@ define i1 @saddo.not.i64(i64 %v1, i64 %v2) {
 ; RV32ZBA-NEXT:    xor a1, a1, a3
 ; RV32ZBA-NEXT:    not a1, a1
 ; RV32ZBA-NEXT:    and a0, a1, a0
-; RV32ZBA-NEXT:    li a1, -1
-; RV32ZBA-NEXT:    slt a0, a1, a0
+; RV32ZBA-NEXT:    slti a0, a0, 0
+; RV32ZBA-NEXT:    xori a0, a0, 1
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: saddo.not.i64:


        


More information about the llvm-commits mailing list