[llvm] b894a9f - [RISCV] Optimize select_cc after fp compare expansion

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu Jan 14 13:48:38 PST 2021


Author: Craig Topper
Date: 2021-01-14T13:41:40-08:00
New Revision: b894a9fb237345db64d14ce3881d3195e124df0d

URL: https://github.com/llvm/llvm-project/commit/b894a9fb237345db64d14ce3881d3195e124df0d
DIFF: https://github.com/llvm/llvm-project/commit/b894a9fb237345db64d14ce3881d3195e124df0d.diff

LOG: [RISCV] Optimize select_cc after fp compare expansion

Some FP compares expand to a sequence ending with (xor X, 1) to invert the result. If
the consumer is a select_cc we can likely get rid of this xor by fixing
up the select_cc condition.

This patch combines (select_cc (xor X, 1), 0, setne, trueV, falseV) -
(select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1.

Reviewed By: lenary

Differential Revision: https://reviews.llvm.org/D94546

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVISelLowering.h
    llvm/test/CodeGen/RISCV/double-select-fcmp.ll
    llvm/test/CodeGen/RISCV/float-select-fcmp.ll
    llvm/test/CodeGen/RISCV/half-select-fcmp.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 74f936b13c23..9b034e594f63 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1828,6 +1828,27 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
     if (auto GORC = combineORToGORC(SDValue(N, 0), DCI.DAG, Subtarget))
       return GORC;
     break;
+  case RISCVISD::SELECT_CC: {
+    // Transform
+    // (select_cc (xor X, 1), 0, setne, trueV, falseV) ->
+    // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1.
+    // This can occur when legalizing some floating point comparisons.
+    SDValue LHS = N->getOperand(0);
+    SDValue RHS = N->getOperand(1);
+    auto CCVal = static_cast<ISD::CondCode>(N->getConstantOperandVal(2));
+    APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
+    if ((CCVal == ISD::SETNE || CCVal == ISD::SETEQ) && isNullConstant(RHS) &&
+        LHS.getOpcode() == ISD::XOR && isOneConstant(LHS.getOperand(1)) &&
+        DAG.MaskedValueIsZero(LHS.getOperand(0), Mask)) {
+      SDLoc DL(N);
+      CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
+      SDValue TargetCC = DAG.getConstant(CCVal, DL, Subtarget.getXLenVT());
+      return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
+                         {LHS.getOperand(0), RHS, TargetCC, N->getOperand(3),
+                          N->getOperand(4)});
+    }
+    break;
+  }
   }
 
   return SDValue();

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index d7de7e4016b3..081d62a33ef0 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -28,6 +28,12 @@ enum NodeType : unsigned {
   SRET_FLAG,
   MRET_FLAG,
   CALL,
+  /// Select with condition operator - This selects between a true value and
+  /// a false value (ops #3 and #4) based on the boolean result of comparing
+  /// the lhs and rhs (ops #0 and #1) of a conditional expression with the
+  /// condition code in op #2, a XLenVT constant from the ISD::CondCode enum.
+  /// The lhs and rhs are XLenVT integers. The true and false values can be
+  /// integer or floating point.
   SELECT_CC,
   BuildPairF64,
   SplitF64,

diff  --git a/llvm/test/CodeGen/RISCV/double-select-fcmp.ll b/llvm/test/CodeGen/RISCV/double-select-fcmp.ll
index 0481ca9ba90e..aa59ef06c587 100644
--- a/llvm/test/CodeGen/RISCV/double-select-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/double-select-fcmp.ll
@@ -300,8 +300,7 @@ define double @select_fcmp_ueq(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:    flt.d a0, ft1, ft0
 ; RV32IFD-NEXT:    flt.d a1, ft0, ft1
 ; RV32IFD-NEXT:    or a0, a1, a0
-; RV32IFD-NEXT:    xori a0, a0, 1
-; RV32IFD-NEXT:    bnez a0, .LBB8_2
+; RV32IFD-NEXT:    beqz a0, .LBB8_2
 ; RV32IFD-NEXT:  # %bb.1:
 ; RV32IFD-NEXT:    fmv.d ft1, ft0
 ; RV32IFD-NEXT:  .LBB8_2:
@@ -318,8 +317,7 @@ define double @select_fcmp_ueq(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:    flt.d a0, ft0, ft1
 ; RV64IFD-NEXT:    flt.d a1, ft1, ft0
 ; RV64IFD-NEXT:    or a0, a1, a0
-; RV64IFD-NEXT:    xori a0, a0, 1
-; RV64IFD-NEXT:    bnez a0, .LBB8_2
+; RV64IFD-NEXT:    beqz a0, .LBB8_2
 ; RV64IFD-NEXT:  # %bb.1:
 ; RV64IFD-NEXT:    fmv.d ft0, ft1
 ; RV64IFD-NEXT:  .LBB8_2:
@@ -341,8 +339,7 @@ define double @select_fcmp_ugt(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:    sw a1, 12(sp)
 ; RV32IFD-NEXT:    fld ft1, 8(sp)
 ; RV32IFD-NEXT:    fle.d a0, ft1, ft0
-; RV32IFD-NEXT:    xori a0, a0, 1
-; RV32IFD-NEXT:    bnez a0, .LBB9_2
+; RV32IFD-NEXT:    beqz a0, .LBB9_2
 ; RV32IFD-NEXT:  # %bb.1:
 ; RV32IFD-NEXT:    fmv.d ft1, ft0
 ; RV32IFD-NEXT:  .LBB9_2:
@@ -357,8 +354,7 @@ define double @select_fcmp_ugt(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:    fmv.d.x ft1, a1
 ; RV64IFD-NEXT:    fmv.d.x ft0, a0
 ; RV64IFD-NEXT:    fle.d a0, ft0, ft1
-; RV64IFD-NEXT:    xori a0, a0, 1
-; RV64IFD-NEXT:    bnez a0, .LBB9_2
+; RV64IFD-NEXT:    beqz a0, .LBB9_2
 ; RV64IFD-NEXT:  # %bb.1:
 ; RV64IFD-NEXT:    fmv.d ft0, ft1
 ; RV64IFD-NEXT:  .LBB9_2:
@@ -380,8 +376,7 @@ define double @select_fcmp_uge(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:    sw a1, 12(sp)
 ; RV32IFD-NEXT:    fld ft1, 8(sp)
 ; RV32IFD-NEXT:    flt.d a0, ft1, ft0
-; RV32IFD-NEXT:    xori a0, a0, 1
-; RV32IFD-NEXT:    bnez a0, .LBB10_2
+; RV32IFD-NEXT:    beqz a0, .LBB10_2
 ; RV32IFD-NEXT:  # %bb.1:
 ; RV32IFD-NEXT:    fmv.d ft1, ft0
 ; RV32IFD-NEXT:  .LBB10_2:
@@ -396,8 +391,7 @@ define double @select_fcmp_uge(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:    fmv.d.x ft1, a1
 ; RV64IFD-NEXT:    fmv.d.x ft0, a0
 ; RV64IFD-NEXT:    flt.d a0, ft0, ft1
-; RV64IFD-NEXT:    xori a0, a0, 1
-; RV64IFD-NEXT:    bnez a0, .LBB10_2
+; RV64IFD-NEXT:    beqz a0, .LBB10_2
 ; RV64IFD-NEXT:  # %bb.1:
 ; RV64IFD-NEXT:    fmv.d ft0, ft1
 ; RV64IFD-NEXT:  .LBB10_2:
@@ -419,8 +413,7 @@ define double @select_fcmp_ult(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:    sw a3, 12(sp)
 ; RV32IFD-NEXT:    fld ft1, 8(sp)
 ; RV32IFD-NEXT:    fle.d a0, ft1, ft0
-; RV32IFD-NEXT:    xori a0, a0, 1
-; RV32IFD-NEXT:    bnez a0, .LBB11_2
+; RV32IFD-NEXT:    beqz a0, .LBB11_2
 ; RV32IFD-NEXT:  # %bb.1:
 ; RV32IFD-NEXT:    fmv.d ft0, ft1
 ; RV32IFD-NEXT:  .LBB11_2:
@@ -435,8 +428,7 @@ define double @select_fcmp_ult(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:    fmv.d.x ft0, a0
 ; RV64IFD-NEXT:    fmv.d.x ft1, a1
 ; RV64IFD-NEXT:    fle.d a0, ft1, ft0
-; RV64IFD-NEXT:    xori a0, a0, 1
-; RV64IFD-NEXT:    bnez a0, .LBB11_2
+; RV64IFD-NEXT:    beqz a0, .LBB11_2
 ; RV64IFD-NEXT:  # %bb.1:
 ; RV64IFD-NEXT:    fmv.d ft0, ft1
 ; RV64IFD-NEXT:  .LBB11_2:
@@ -458,8 +450,7 @@ define double @select_fcmp_ule(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:    sw a3, 12(sp)
 ; RV32IFD-NEXT:    fld ft1, 8(sp)
 ; RV32IFD-NEXT:    flt.d a0, ft1, ft0
-; RV32IFD-NEXT:    xori a0, a0, 1
-; RV32IFD-NEXT:    bnez a0, .LBB12_2
+; RV32IFD-NEXT:    beqz a0, .LBB12_2
 ; RV32IFD-NEXT:  # %bb.1:
 ; RV32IFD-NEXT:    fmv.d ft0, ft1
 ; RV32IFD-NEXT:  .LBB12_2:
@@ -474,8 +465,7 @@ define double @select_fcmp_ule(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:    fmv.d.x ft0, a0
 ; RV64IFD-NEXT:    fmv.d.x ft1, a1
 ; RV64IFD-NEXT:    flt.d a0, ft1, ft0
-; RV64IFD-NEXT:    xori a0, a0, 1
-; RV64IFD-NEXT:    bnez a0, .LBB12_2
+; RV64IFD-NEXT:    beqz a0, .LBB12_2
 ; RV64IFD-NEXT:  # %bb.1:
 ; RV64IFD-NEXT:    fmv.d ft0, ft1
 ; RV64IFD-NEXT:  .LBB12_2:
@@ -497,8 +487,7 @@ define double @select_fcmp_une(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:    sw a1, 12(sp)
 ; RV32IFD-NEXT:    fld ft1, 8(sp)
 ; RV32IFD-NEXT:    feq.d a0, ft1, ft0
-; RV32IFD-NEXT:    xori a0, a0, 1
-; RV32IFD-NEXT:    bnez a0, .LBB13_2
+; RV32IFD-NEXT:    beqz a0, .LBB13_2
 ; RV32IFD-NEXT:  # %bb.1:
 ; RV32IFD-NEXT:    fmv.d ft1, ft0
 ; RV32IFD-NEXT:  .LBB13_2:
@@ -513,8 +502,7 @@ define double @select_fcmp_une(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:    fmv.d.x ft1, a1
 ; RV64IFD-NEXT:    fmv.d.x ft0, a0
 ; RV64IFD-NEXT:    feq.d a0, ft0, ft1
-; RV64IFD-NEXT:    xori a0, a0, 1
-; RV64IFD-NEXT:    bnez a0, .LBB13_2
+; RV64IFD-NEXT:    beqz a0, .LBB13_2
 ; RV64IFD-NEXT:  # %bb.1:
 ; RV64IFD-NEXT:    fmv.d ft0, ft1
 ; RV64IFD-NEXT:  .LBB13_2:
@@ -526,7 +514,6 @@ define double @select_fcmp_une(double %a, double %b) nounwind {
 }
 
 define double @select_fcmp_uno(double %a, double %b) nounwind {
-; TODO: sltiu+bne could be optimized
 ; RV32IFD-LABEL: select_fcmp_uno:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
@@ -539,8 +526,7 @@ define double @select_fcmp_uno(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:    feq.d a0, ft1, ft1
 ; RV32IFD-NEXT:    feq.d a1, ft0, ft0
 ; RV32IFD-NEXT:    and a0, a1, a0
-; RV32IFD-NEXT:    xori a0, a0, 1
-; RV32IFD-NEXT:    bnez a0, .LBB14_2
+; RV32IFD-NEXT:    beqz a0, .LBB14_2
 ; RV32IFD-NEXT:  # %bb.1:
 ; RV32IFD-NEXT:    fmv.d ft0, ft1
 ; RV32IFD-NEXT:  .LBB14_2:
@@ -557,8 +543,7 @@ define double @select_fcmp_uno(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:    feq.d a0, ft1, ft1
 ; RV64IFD-NEXT:    feq.d a1, ft0, ft0
 ; RV64IFD-NEXT:    and a0, a1, a0
-; RV64IFD-NEXT:    xori a0, a0, 1
-; RV64IFD-NEXT:    bnez a0, .LBB14_2
+; RV64IFD-NEXT:    beqz a0, .LBB14_2
 ; RV64IFD-NEXT:  # %bb.1:
 ; RV64IFD-NEXT:    fmv.d ft0, ft1
 ; RV64IFD-NEXT:  .LBB14_2:

diff  --git a/llvm/test/CodeGen/RISCV/float-select-fcmp.ll b/llvm/test/CodeGen/RISCV/float-select-fcmp.ll
index 73b7fd05dc19..17ef878bc264 100644
--- a/llvm/test/CodeGen/RISCV/float-select-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/float-select-fcmp.ll
@@ -238,8 +238,7 @@ define float @select_fcmp_ueq(float %a, float %b) nounwind {
 ; RV32IF-NEXT:    flt.s a0, ft0, ft1
 ; RV32IF-NEXT:    flt.s a1, ft1, ft0
 ; RV32IF-NEXT:    or a0, a1, a0
-; RV32IF-NEXT:    xori a0, a0, 1
-; RV32IF-NEXT:    bnez a0, .LBB8_2
+; RV32IF-NEXT:    beqz a0, .LBB8_2
 ; RV32IF-NEXT:  # %bb.1:
 ; RV32IF-NEXT:    fmv.s ft0, ft1
 ; RV32IF-NEXT:  .LBB8_2:
@@ -253,8 +252,7 @@ define float @select_fcmp_ueq(float %a, float %b) nounwind {
 ; RV64IF-NEXT:    flt.s a0, ft0, ft1
 ; RV64IF-NEXT:    flt.s a1, ft1, ft0
 ; RV64IF-NEXT:    or a0, a1, a0
-; RV64IF-NEXT:    xori a0, a0, 1
-; RV64IF-NEXT:    bnez a0, .LBB8_2
+; RV64IF-NEXT:    beqz a0, .LBB8_2
 ; RV64IF-NEXT:  # %bb.1:
 ; RV64IF-NEXT:    fmv.s ft0, ft1
 ; RV64IF-NEXT:  .LBB8_2:
@@ -271,8 +269,7 @@ define float @select_fcmp_ugt(float %a, float %b) nounwind {
 ; RV32IF-NEXT:    fmv.w.x ft1, a1
 ; RV32IF-NEXT:    fmv.w.x ft0, a0
 ; RV32IF-NEXT:    fle.s a0, ft0, ft1
-; RV32IF-NEXT:    xori a0, a0, 1
-; RV32IF-NEXT:    bnez a0, .LBB9_2
+; RV32IF-NEXT:    beqz a0, .LBB9_2
 ; RV32IF-NEXT:  # %bb.1:
 ; RV32IF-NEXT:    fmv.s ft0, ft1
 ; RV32IF-NEXT:  .LBB9_2:
@@ -284,8 +281,7 @@ define float @select_fcmp_ugt(float %a, float %b) nounwind {
 ; RV64IF-NEXT:    fmv.w.x ft1, a1
 ; RV64IF-NEXT:    fmv.w.x ft0, a0
 ; RV64IF-NEXT:    fle.s a0, ft0, ft1
-; RV64IF-NEXT:    xori a0, a0, 1
-; RV64IF-NEXT:    bnez a0, .LBB9_2
+; RV64IF-NEXT:    beqz a0, .LBB9_2
 ; RV64IF-NEXT:  # %bb.1:
 ; RV64IF-NEXT:    fmv.s ft0, ft1
 ; RV64IF-NEXT:  .LBB9_2:
@@ -302,8 +298,7 @@ define float @select_fcmp_uge(float %a, float %b) nounwind {
 ; RV32IF-NEXT:    fmv.w.x ft1, a1
 ; RV32IF-NEXT:    fmv.w.x ft0, a0
 ; RV32IF-NEXT:    flt.s a0, ft0, ft1
-; RV32IF-NEXT:    xori a0, a0, 1
-; RV32IF-NEXT:    bnez a0, .LBB10_2
+; RV32IF-NEXT:    beqz a0, .LBB10_2
 ; RV32IF-NEXT:  # %bb.1:
 ; RV32IF-NEXT:    fmv.s ft0, ft1
 ; RV32IF-NEXT:  .LBB10_2:
@@ -315,8 +310,7 @@ define float @select_fcmp_uge(float %a, float %b) nounwind {
 ; RV64IF-NEXT:    fmv.w.x ft1, a1
 ; RV64IF-NEXT:    fmv.w.x ft0, a0
 ; RV64IF-NEXT:    flt.s a0, ft0, ft1
-; RV64IF-NEXT:    xori a0, a0, 1
-; RV64IF-NEXT:    bnez a0, .LBB10_2
+; RV64IF-NEXT:    beqz a0, .LBB10_2
 ; RV64IF-NEXT:  # %bb.1:
 ; RV64IF-NEXT:    fmv.s ft0, ft1
 ; RV64IF-NEXT:  .LBB10_2:
@@ -333,8 +327,7 @@ define float @select_fcmp_ult(float %a, float %b) nounwind {
 ; RV32IF-NEXT:    fmv.w.x ft0, a0
 ; RV32IF-NEXT:    fmv.w.x ft1, a1
 ; RV32IF-NEXT:    fle.s a0, ft1, ft0
-; RV32IF-NEXT:    xori a0, a0, 1
-; RV32IF-NEXT:    bnez a0, .LBB11_2
+; RV32IF-NEXT:    beqz a0, .LBB11_2
 ; RV32IF-NEXT:  # %bb.1:
 ; RV32IF-NEXT:    fmv.s ft0, ft1
 ; RV32IF-NEXT:  .LBB11_2:
@@ -346,8 +339,7 @@ define float @select_fcmp_ult(float %a, float %b) nounwind {
 ; RV64IF-NEXT:    fmv.w.x ft0, a0
 ; RV64IF-NEXT:    fmv.w.x ft1, a1
 ; RV64IF-NEXT:    fle.s a0, ft1, ft0
-; RV64IF-NEXT:    xori a0, a0, 1
-; RV64IF-NEXT:    bnez a0, .LBB11_2
+; RV64IF-NEXT:    beqz a0, .LBB11_2
 ; RV64IF-NEXT:  # %bb.1:
 ; RV64IF-NEXT:    fmv.s ft0, ft1
 ; RV64IF-NEXT:  .LBB11_2:
@@ -364,8 +356,7 @@ define float @select_fcmp_ule(float %a, float %b) nounwind {
 ; RV32IF-NEXT:    fmv.w.x ft0, a0
 ; RV32IF-NEXT:    fmv.w.x ft1, a1
 ; RV32IF-NEXT:    flt.s a0, ft1, ft0
-; RV32IF-NEXT:    xori a0, a0, 1
-; RV32IF-NEXT:    bnez a0, .LBB12_2
+; RV32IF-NEXT:    beqz a0, .LBB12_2
 ; RV32IF-NEXT:  # %bb.1:
 ; RV32IF-NEXT:    fmv.s ft0, ft1
 ; RV32IF-NEXT:  .LBB12_2:
@@ -377,8 +368,7 @@ define float @select_fcmp_ule(float %a, float %b) nounwind {
 ; RV64IF-NEXT:    fmv.w.x ft0, a0
 ; RV64IF-NEXT:    fmv.w.x ft1, a1
 ; RV64IF-NEXT:    flt.s a0, ft1, ft0
-; RV64IF-NEXT:    xori a0, a0, 1
-; RV64IF-NEXT:    bnez a0, .LBB12_2
+; RV64IF-NEXT:    beqz a0, .LBB12_2
 ; RV64IF-NEXT:  # %bb.1:
 ; RV64IF-NEXT:    fmv.s ft0, ft1
 ; RV64IF-NEXT:  .LBB12_2:
@@ -395,8 +385,7 @@ define float @select_fcmp_une(float %a, float %b) nounwind {
 ; RV32IF-NEXT:    fmv.w.x ft1, a1
 ; RV32IF-NEXT:    fmv.w.x ft0, a0
 ; RV32IF-NEXT:    feq.s a0, ft0, ft1
-; RV32IF-NEXT:    xori a0, a0, 1
-; RV32IF-NEXT:    bnez a0, .LBB13_2
+; RV32IF-NEXT:    beqz a0, .LBB13_2
 ; RV32IF-NEXT:  # %bb.1:
 ; RV32IF-NEXT:    fmv.s ft0, ft1
 ; RV32IF-NEXT:  .LBB13_2:
@@ -408,8 +397,7 @@ define float @select_fcmp_une(float %a, float %b) nounwind {
 ; RV64IF-NEXT:    fmv.w.x ft1, a1
 ; RV64IF-NEXT:    fmv.w.x ft0, a0
 ; RV64IF-NEXT:    feq.s a0, ft0, ft1
-; RV64IF-NEXT:    xori a0, a0, 1
-; RV64IF-NEXT:    bnez a0, .LBB13_2
+; RV64IF-NEXT:    beqz a0, .LBB13_2
 ; RV64IF-NEXT:  # %bb.1:
 ; RV64IF-NEXT:    fmv.s ft0, ft1
 ; RV64IF-NEXT:  .LBB13_2:
@@ -421,7 +409,6 @@ define float @select_fcmp_une(float %a, float %b) nounwind {
 }
 
 define float @select_fcmp_uno(float %a, float %b) nounwind {
-; TODO: sltiu+bne could be optimized
 ; RV32IF-LABEL: select_fcmp_uno:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    fmv.w.x ft0, a0
@@ -429,8 +416,7 @@ define float @select_fcmp_uno(float %a, float %b) nounwind {
 ; RV32IF-NEXT:    feq.s a0, ft1, ft1
 ; RV32IF-NEXT:    feq.s a1, ft0, ft0
 ; RV32IF-NEXT:    and a0, a1, a0
-; RV32IF-NEXT:    xori a0, a0, 1
-; RV32IF-NEXT:    bnez a0, .LBB14_2
+; RV32IF-NEXT:    beqz a0, .LBB14_2
 ; RV32IF-NEXT:  # %bb.1:
 ; RV32IF-NEXT:    fmv.s ft0, ft1
 ; RV32IF-NEXT:  .LBB14_2:
@@ -444,8 +430,7 @@ define float @select_fcmp_uno(float %a, float %b) nounwind {
 ; RV64IF-NEXT:    feq.s a0, ft1, ft1
 ; RV64IF-NEXT:    feq.s a1, ft0, ft0
 ; RV64IF-NEXT:    and a0, a1, a0
-; RV64IF-NEXT:    xori a0, a0, 1
-; RV64IF-NEXT:    bnez a0, .LBB14_2
+; RV64IF-NEXT:    beqz a0, .LBB14_2
 ; RV64IF-NEXT:  # %bb.1:
 ; RV64IF-NEXT:    fmv.s ft0, ft1
 ; RV64IF-NEXT:  .LBB14_2:

diff  --git a/llvm/test/CodeGen/RISCV/half-select-fcmp.ll b/llvm/test/CodeGen/RISCV/half-select-fcmp.ll
index 56a319fc156c..365b718d82b2 100644
--- a/llvm/test/CodeGen/RISCV/half-select-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/half-select-fcmp.ll
@@ -194,8 +194,7 @@ define half @select_fcmp_ueq(half %a, half %b) nounwind {
 ; RV32IZFH-NEXT:    flt.h a0, fa0, fa1
 ; RV32IZFH-NEXT:    flt.h a1, fa1, fa0
 ; RV32IZFH-NEXT:    or a0, a1, a0
-; RV32IZFH-NEXT:    xori a0, a0, 1
-; RV32IZFH-NEXT:    bnez a0, .LBB8_2
+; RV32IZFH-NEXT:    beqz a0, .LBB8_2
 ; RV32IZFH-NEXT:  # %bb.1:
 ; RV32IZFH-NEXT:    fmv.h fa0, fa1
 ; RV32IZFH-NEXT:  .LBB8_2:
@@ -206,8 +205,7 @@ define half @select_fcmp_ueq(half %a, half %b) nounwind {
 ; RV64IZFH-NEXT:    flt.h a0, fa0, fa1
 ; RV64IZFH-NEXT:    flt.h a1, fa1, fa0
 ; RV64IZFH-NEXT:    or a0, a1, a0
-; RV64IZFH-NEXT:    xori a0, a0, 1
-; RV64IZFH-NEXT:    bnez a0, .LBB8_2
+; RV64IZFH-NEXT:    beqz a0, .LBB8_2
 ; RV64IZFH-NEXT:  # %bb.1:
 ; RV64IZFH-NEXT:    fmv.h fa0, fa1
 ; RV64IZFH-NEXT:  .LBB8_2:
@@ -221,8 +219,7 @@ define half @select_fcmp_ugt(half %a, half %b) nounwind {
 ; RV32IZFH-LABEL: select_fcmp_ugt:
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    fle.h a0, fa0, fa1
-; RV32IZFH-NEXT:    xori a0, a0, 1
-; RV32IZFH-NEXT:    bnez a0, .LBB9_2
+; RV32IZFH-NEXT:    beqz a0, .LBB9_2
 ; RV32IZFH-NEXT:  # %bb.1:
 ; RV32IZFH-NEXT:    fmv.h fa0, fa1
 ; RV32IZFH-NEXT:  .LBB9_2:
@@ -231,8 +228,7 @@ define half @select_fcmp_ugt(half %a, half %b) nounwind {
 ; RV64IZFH-LABEL: select_fcmp_ugt:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    fle.h a0, fa0, fa1
-; RV64IZFH-NEXT:    xori a0, a0, 1
-; RV64IZFH-NEXT:    bnez a0, .LBB9_2
+; RV64IZFH-NEXT:    beqz a0, .LBB9_2
 ; RV64IZFH-NEXT:  # %bb.1:
 ; RV64IZFH-NEXT:    fmv.h fa0, fa1
 ; RV64IZFH-NEXT:  .LBB9_2:
@@ -246,8 +242,7 @@ define half @select_fcmp_uge(half %a, half %b) nounwind {
 ; RV32IZFH-LABEL: select_fcmp_uge:
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    flt.h a0, fa0, fa1
-; RV32IZFH-NEXT:    xori a0, a0, 1
-; RV32IZFH-NEXT:    bnez a0, .LBB10_2
+; RV32IZFH-NEXT:    beqz a0, .LBB10_2
 ; RV32IZFH-NEXT:  # %bb.1:
 ; RV32IZFH-NEXT:    fmv.h fa0, fa1
 ; RV32IZFH-NEXT:  .LBB10_2:
@@ -256,8 +251,7 @@ define half @select_fcmp_uge(half %a, half %b) nounwind {
 ; RV64IZFH-LABEL: select_fcmp_uge:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    flt.h a0, fa0, fa1
-; RV64IZFH-NEXT:    xori a0, a0, 1
-; RV64IZFH-NEXT:    bnez a0, .LBB10_2
+; RV64IZFH-NEXT:    beqz a0, .LBB10_2
 ; RV64IZFH-NEXT:  # %bb.1:
 ; RV64IZFH-NEXT:    fmv.h fa0, fa1
 ; RV64IZFH-NEXT:  .LBB10_2:
@@ -271,8 +265,7 @@ define half @select_fcmp_ult(half %a, half %b) nounwind {
 ; RV32IZFH-LABEL: select_fcmp_ult:
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    fle.h a0, fa1, fa0
-; RV32IZFH-NEXT:    xori a0, a0, 1
-; RV32IZFH-NEXT:    bnez a0, .LBB11_2
+; RV32IZFH-NEXT:    beqz a0, .LBB11_2
 ; RV32IZFH-NEXT:  # %bb.1:
 ; RV32IZFH-NEXT:    fmv.h fa0, fa1
 ; RV32IZFH-NEXT:  .LBB11_2:
@@ -281,8 +274,7 @@ define half @select_fcmp_ult(half %a, half %b) nounwind {
 ; RV64IZFH-LABEL: select_fcmp_ult:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    fle.h a0, fa1, fa0
-; RV64IZFH-NEXT:    xori a0, a0, 1
-; RV64IZFH-NEXT:    bnez a0, .LBB11_2
+; RV64IZFH-NEXT:    beqz a0, .LBB11_2
 ; RV64IZFH-NEXT:  # %bb.1:
 ; RV64IZFH-NEXT:    fmv.h fa0, fa1
 ; RV64IZFH-NEXT:  .LBB11_2:
@@ -296,8 +288,7 @@ define half @select_fcmp_ule(half %a, half %b) nounwind {
 ; RV32IZFH-LABEL: select_fcmp_ule:
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    flt.h a0, fa1, fa0
-; RV32IZFH-NEXT:    xori a0, a0, 1
-; RV32IZFH-NEXT:    bnez a0, .LBB12_2
+; RV32IZFH-NEXT:    beqz a0, .LBB12_2
 ; RV32IZFH-NEXT:  # %bb.1:
 ; RV32IZFH-NEXT:    fmv.h fa0, fa1
 ; RV32IZFH-NEXT:  .LBB12_2:
@@ -306,8 +297,7 @@ define half @select_fcmp_ule(half %a, half %b) nounwind {
 ; RV64IZFH-LABEL: select_fcmp_ule:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    flt.h a0, fa1, fa0
-; RV64IZFH-NEXT:    xori a0, a0, 1
-; RV64IZFH-NEXT:    bnez a0, .LBB12_2
+; RV64IZFH-NEXT:    beqz a0, .LBB12_2
 ; RV64IZFH-NEXT:  # %bb.1:
 ; RV64IZFH-NEXT:    fmv.h fa0, fa1
 ; RV64IZFH-NEXT:  .LBB12_2:
@@ -321,8 +311,7 @@ define half @select_fcmp_une(half %a, half %b) nounwind {
 ; RV32IZFH-LABEL: select_fcmp_une:
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    feq.h a0, fa0, fa1
-; RV32IZFH-NEXT:    xori a0, a0, 1
-; RV32IZFH-NEXT:    bnez a0, .LBB13_2
+; RV32IZFH-NEXT:    beqz a0, .LBB13_2
 ; RV32IZFH-NEXT:  # %bb.1:
 ; RV32IZFH-NEXT:    fmv.h fa0, fa1
 ; RV32IZFH-NEXT:  .LBB13_2:
@@ -331,8 +320,7 @@ define half @select_fcmp_une(half %a, half %b) nounwind {
 ; RV64IZFH-LABEL: select_fcmp_une:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    feq.h a0, fa0, fa1
-; RV64IZFH-NEXT:    xori a0, a0, 1
-; RV64IZFH-NEXT:    bnez a0, .LBB13_2
+; RV64IZFH-NEXT:    beqz a0, .LBB13_2
 ; RV64IZFH-NEXT:  # %bb.1:
 ; RV64IZFH-NEXT:    fmv.h fa0, fa1
 ; RV64IZFH-NEXT:  .LBB13_2:
@@ -343,14 +331,12 @@ define half @select_fcmp_une(half %a, half %b) nounwind {
 }
 
 define half @select_fcmp_uno(half %a, half %b) nounwind {
-; TODO: sltiu+bne could be optimized
 ; RV32IZFH-LABEL: select_fcmp_uno:
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    feq.h a0, fa1, fa1
 ; RV32IZFH-NEXT:    feq.h a1, fa0, fa0
 ; RV32IZFH-NEXT:    and a0, a1, a0
-; RV32IZFH-NEXT:    xori a0, a0, 1
-; RV32IZFH-NEXT:    bnez a0, .LBB14_2
+; RV32IZFH-NEXT:    beqz a0, .LBB14_2
 ; RV32IZFH-NEXT:  # %bb.1:
 ; RV32IZFH-NEXT:    fmv.h fa0, fa1
 ; RV32IZFH-NEXT:  .LBB14_2:
@@ -361,8 +347,7 @@ define half @select_fcmp_uno(half %a, half %b) nounwind {
 ; RV64IZFH-NEXT:    feq.h a0, fa1, fa1
 ; RV64IZFH-NEXT:    feq.h a1, fa0, fa0
 ; RV64IZFH-NEXT:    and a0, a1, a0
-; RV64IZFH-NEXT:    xori a0, a0, 1
-; RV64IZFH-NEXT:    bnez a0, .LBB14_2
+; RV64IZFH-NEXT:    beqz a0, .LBB14_2
 ; RV64IZFH-NEXT:  # %bb.1:
 ; RV64IZFH-NEXT:    fmv.h fa0, fa1
 ; RV64IZFH-NEXT:  .LBB14_2:


        


More information about the llvm-commits mailing list