[llvm] fdbd5d3 - [RISCV] Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) -> (select_cc X, Y, eq/ne, trueV, falseV)

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sun Mar 7 09:36:34 PST 2021


Author: Craig Topper
Date: 2021-03-07T09:29:55-08:00
New Revision: fdbd5d32060c50313493fbdbbea74dc9ea73b591

URL: https://github.com/llvm/llvm-project/commit/fdbd5d32060c50313493fbdbbea74dc9ea73b591
DIFF: https://github.com/llvm/llvm-project/commit/fdbd5d32060c50313493fbdbbea74dc9ea73b591.diff

LOG: [RISCV] Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) -> (select_cc X, Y, eq/ne, trueV, falseV)

This pattern occurs when lowering for overflow operations
introduce an xor after select_cc has already been formed.

I had to rework another combine that looked for select_cc of an xor
with 1. That xor will now get combined away so we just need to
look for the RHS of the select_cc being 1.

Reviewed By: luismarques

Differential Revision: https://reviews.llvm.org/D98130

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/sadd_sat.ll
    llvm/test/CodeGen/RISCV/sadd_sat_plus.ll
    llvm/test/CodeGen/RISCV/ssub_sat.ll
    llvm/test/CodeGen/RISCV/ssub_sat_plus.ll
    llvm/test/CodeGen/RISCV/xaluo.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 81c118a2e40d..e0a42859f1ed 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -3789,23 +3789,32 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
     break;
   case RISCVISD::SELECT_CC: {
     // Transform
-    // (select_cc (xor X, 1), 0, setne, trueV, falseV) ->
-    // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1.
-    // This can occur when legalizing some floating point comparisons.
     SDValue LHS = N->getOperand(0);
     SDValue RHS = N->getOperand(1);
     auto CCVal = static_cast<ISD::CondCode>(N->getConstantOperandVal(2));
+    if (!ISD::isIntEqualitySetCC(CCVal))
+      break;
+    // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) ->
+    //      (select_cc X, Y, eq/ne, trueV, falseV)
+    if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
+      return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0),
+                         {LHS.getOperand(0), LHS.getOperand(1),
+                          N->getOperand(2), N->getOperand(3),
+                          N->getOperand(4)});
+    // (select_cc X, 1, setne, trueV, falseV) ->
+    // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1.
+    // This can occur when legalizing some floating point comparisons.
     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
-    if (ISD::isIntEqualitySetCC(CCVal) && isNullConstant(RHS) &&
-        LHS.getOpcode() == ISD::XOR && isOneConstant(LHS.getOperand(1)) &&
-        DAG.MaskedValueIsZero(LHS.getOperand(0), Mask)) {
+    if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
       SDLoc DL(N);
       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
       SDValue TargetCC = DAG.getConstant(CCVal, DL, Subtarget.getXLenVT());
-      return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
-                         {LHS.getOperand(0), RHS, TargetCC, N->getOperand(3),
-                          N->getOperand(4)});
+      RHS = DAG.getConstant(0, DL, LHS.getValueType());
+      return DAG.getNode(
+          RISCVISD::SELECT_CC, DL, N->getValueType(0),
+          {LHS, RHS, TargetCC, N->getOperand(3), N->getOperand(4)});
     }
+
     break;
   }
   case ISD::SETCC: {

diff  --git a/llvm/test/CodeGen/RISCV/sadd_sat.ll b/llvm/test/CodeGen/RISCV/sadd_sat.ll
index 440890fcb068..d35e23c0a1cc 100644
--- a/llvm/test/CodeGen/RISCV/sadd_sat.ll
+++ b/llvm/test/CodeGen/RISCV/sadd_sat.ll
@@ -22,8 +22,7 @@ define signext i32 @func(i32 signext %x, i32 signext %y) nounwind {
 ; RV32I-NEXT:  .LBB0_2:
 ; RV32I-NEXT:    slt a2, a3, a2
 ; RV32I-NEXT:    slti a1, a1, 0
-; RV32I-NEXT:    xor a1, a1, a2
-; RV32I-NEXT:    bnez a1, .LBB0_4
+; RV32I-NEXT:    bne a1, a2, .LBB0_4
 ; RV32I-NEXT:  # %bb.3:
 ; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:  .LBB0_4:
@@ -57,8 +56,7 @@ define signext i32 @func(i32 signext %x, i32 signext %y) nounwind {
 ; RV32IZbb-NEXT:  .LBB0_2:
 ; RV32IZbb-NEXT:    slt a2, a3, a2
 ; RV32IZbb-NEXT:    slti a1, a1, 0
-; RV32IZbb-NEXT:    xor a1, a1, a2
-; RV32IZbb-NEXT:    bnez a1, .LBB0_4
+; RV32IZbb-NEXT:    bne a1, a2, .LBB0_4
 ; RV32IZbb-NEXT:  # %bb.3:
 ; RV32IZbb-NEXT:    mv a0, a3
 ; RV32IZbb-NEXT:  .LBB0_4:
@@ -114,8 +112,7 @@ define i64 @func2(i64 %x, i64 %y) nounwind {
 ; RV64I-NEXT:  .LBB1_2:
 ; RV64I-NEXT:    slt a2, a3, a2
 ; RV64I-NEXT:    slti a1, a1, 0
-; RV64I-NEXT:    xor a1, a1, a2
-; RV64I-NEXT:    bnez a1, .LBB1_4
+; RV64I-NEXT:    bne a1, a2, .LBB1_4
 ; RV64I-NEXT:  # %bb.3:
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:  .LBB1_4:
@@ -157,8 +154,7 @@ define i64 @func2(i64 %x, i64 %y) nounwind {
 ; RV64IZbb-NEXT:  .LBB1_2:
 ; RV64IZbb-NEXT:    slt a2, a3, a2
 ; RV64IZbb-NEXT:    slti a1, a1, 0
-; RV64IZbb-NEXT:    xor a1, a1, a2
-; RV64IZbb-NEXT:    bnez a1, .LBB1_4
+; RV64IZbb-NEXT:    bne a1, a2, .LBB1_4
 ; RV64IZbb-NEXT:  # %bb.3:
 ; RV64IZbb-NEXT:    mv a0, a3
 ; RV64IZbb-NEXT:  .LBB1_4:

diff  --git a/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll b/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll
index d38298aa86d8..1767701e6eaf 100644
--- a/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll
+++ b/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll
@@ -13,22 +13,20 @@ declare i64 @llvm.sadd.sat.i64(i64, i64)
 define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind {
 ; RV32I-LABEL: func32:
 ; RV32I:       # %bb.0:
+; RV32I-NEXT:    mv a3, a0
 ; RV32I-NEXT:    mul a2, a1, a2
 ; RV32I-NEXT:    add a1, a0, a2
-; RV32I-NEXT:    slt a0, a1, a0
-; RV32I-NEXT:    slti a2, a2, 0
-; RV32I-NEXT:    xor a2, a2, a0
 ; RV32I-NEXT:    lui a0, 524288
-; RV32I-NEXT:    bltz a1, .LBB0_3
+; RV32I-NEXT:    bgez a1, .LBB0_2
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    beqz a2, .LBB0_4
-; RV32I-NEXT:  .LBB0_2:
-; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB0_3:
 ; RV32I-NEXT:    addi a0, a0, -1
-; RV32I-NEXT:    bnez a2, .LBB0_2
-; RV32I-NEXT:  .LBB0_4:
+; RV32I-NEXT:  .LBB0_2:
+; RV32I-NEXT:    slt a3, a1, a3
+; RV32I-NEXT:    slti a2, a2, 0
+; RV32I-NEXT:    bne a2, a3, .LBB0_4
+; RV32I-NEXT:  # %bb.3:
 ; RV32I-NEXT:    mv a0, a1
+; RV32I-NEXT:  .LBB0_4:
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: func32:
@@ -52,22 +50,20 @@ define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind {
 ;
 ; RV32IZbb-LABEL: func32:
 ; RV32IZbb:       # %bb.0:
+; RV32IZbb-NEXT:    mv a3, a0
 ; RV32IZbb-NEXT:    mul a2, a1, a2
 ; RV32IZbb-NEXT:    add a1, a0, a2
-; RV32IZbb-NEXT:    slt a0, a1, a0
-; RV32IZbb-NEXT:    slti a2, a2, 0
-; RV32IZbb-NEXT:    xor a2, a2, a0
 ; RV32IZbb-NEXT:    lui a0, 524288
-; RV32IZbb-NEXT:    bltz a1, .LBB0_3
+; RV32IZbb-NEXT:    bgez a1, .LBB0_2
 ; RV32IZbb-NEXT:  # %bb.1:
-; RV32IZbb-NEXT:    beqz a2, .LBB0_4
-; RV32IZbb-NEXT:  .LBB0_2:
-; RV32IZbb-NEXT:    ret
-; RV32IZbb-NEXT:  .LBB0_3:
 ; RV32IZbb-NEXT:    addi a0, a0, -1
-; RV32IZbb-NEXT:    bnez a2, .LBB0_2
-; RV32IZbb-NEXT:  .LBB0_4:
+; RV32IZbb-NEXT:  .LBB0_2:
+; RV32IZbb-NEXT:    slt a3, a1, a3
+; RV32IZbb-NEXT:    slti a2, a2, 0
+; RV32IZbb-NEXT:    bne a2, a3, .LBB0_4
+; RV32IZbb-NEXT:  # %bb.3:
 ; RV32IZbb-NEXT:    mv a0, a1
+; RV32IZbb-NEXT:  .LBB0_4:
 ; RV32IZbb-NEXT:    ret
 ;
 ; RV64IZbb-LABEL: func32:
@@ -123,8 +119,7 @@ define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind {
 ; RV64I-NEXT:  .LBB1_2:
 ; RV64I-NEXT:    slt a1, a3, a1
 ; RV64I-NEXT:    slti a2, a2, 0
-; RV64I-NEXT:    xor a1, a2, a1
-; RV64I-NEXT:    bnez a1, .LBB1_4
+; RV64I-NEXT:    bne a2, a1, .LBB1_4
 ; RV64I-NEXT:  # %bb.3:
 ; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:  .LBB1_4:
@@ -166,8 +161,7 @@ define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind {
 ; RV64IZbb-NEXT:  .LBB1_2:
 ; RV64IZbb-NEXT:    slt a1, a3, a1
 ; RV64IZbb-NEXT:    slti a2, a2, 0
-; RV64IZbb-NEXT:    xor a1, a2, a1
-; RV64IZbb-NEXT:    bnez a1, .LBB1_4
+; RV64IZbb-NEXT:    bne a2, a1, .LBB1_4
 ; RV64IZbb-NEXT:  # %bb.3:
 ; RV64IZbb-NEXT:    mv a0, a3
 ; RV64IZbb-NEXT:  .LBB1_4:

diff  --git a/llvm/test/CodeGen/RISCV/ssub_sat.ll b/llvm/test/CodeGen/RISCV/ssub_sat.ll
index 47565d4c8d7c..61d5a4833ba9 100644
--- a/llvm/test/CodeGen/RISCV/ssub_sat.ll
+++ b/llvm/test/CodeGen/RISCV/ssub_sat.ll
@@ -13,21 +13,19 @@ declare i64 @llvm.ssub.sat.i64(i64, i64)
 define signext i32 @func(i32 signext %x, i32 signext %y) nounwind {
 ; RV32I-LABEL: func:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    sgtz a2, a1
-; RV32I-NEXT:    sub a1, a0, a1
-; RV32I-NEXT:    slt a0, a1, a0
-; RV32I-NEXT:    xor a2, a2, a0
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    sub a3, a0, a1
 ; RV32I-NEXT:    lui a0, 524288
-; RV32I-NEXT:    bltz a1, .LBB0_3
+; RV32I-NEXT:    bgez a3, .LBB0_2
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    beqz a2, .LBB0_4
-; RV32I-NEXT:  .LBB0_2:
-; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB0_3:
 ; RV32I-NEXT:    addi a0, a0, -1
-; RV32I-NEXT:    bnez a2, .LBB0_2
+; RV32I-NEXT:  .LBB0_2:
+; RV32I-NEXT:    sgtz a1, a1
+; RV32I-NEXT:    slt a2, a3, a2
+; RV32I-NEXT:    bne a1, a2, .LBB0_4
+; RV32I-NEXT:  # %bb.3:
+; RV32I-NEXT:    mv a0, a3
 ; RV32I-NEXT:  .LBB0_4:
-; RV32I-NEXT:    mv a0, a1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: func:
@@ -49,21 +47,19 @@ define signext i32 @func(i32 signext %x, i32 signext %y) nounwind {
 ;
 ; RV32IZbb-LABEL: func:
 ; RV32IZbb:       # %bb.0:
-; RV32IZbb-NEXT:    sgtz a2, a1
-; RV32IZbb-NEXT:    sub a1, a0, a1
-; RV32IZbb-NEXT:    slt a0, a1, a0
-; RV32IZbb-NEXT:    xor a2, a2, a0
+; RV32IZbb-NEXT:    mv a2, a0
+; RV32IZbb-NEXT:    sub a3, a0, a1
 ; RV32IZbb-NEXT:    lui a0, 524288
-; RV32IZbb-NEXT:    bltz a1, .LBB0_3
+; RV32IZbb-NEXT:    bgez a3, .LBB0_2
 ; RV32IZbb-NEXT:  # %bb.1:
-; RV32IZbb-NEXT:    beqz a2, .LBB0_4
-; RV32IZbb-NEXT:  .LBB0_2:
-; RV32IZbb-NEXT:    ret
-; RV32IZbb-NEXT:  .LBB0_3:
 ; RV32IZbb-NEXT:    addi a0, a0, -1
-; RV32IZbb-NEXT:    bnez a2, .LBB0_2
+; RV32IZbb-NEXT:  .LBB0_2:
+; RV32IZbb-NEXT:    sgtz a1, a1
+; RV32IZbb-NEXT:    slt a2, a3, a2
+; RV32IZbb-NEXT:    bne a1, a2, .LBB0_4
+; RV32IZbb-NEXT:  # %bb.3:
+; RV32IZbb-NEXT:    mv a0, a3
 ; RV32IZbb-NEXT:  .LBB0_4:
-; RV32IZbb-NEXT:    mv a0, a1
 ; RV32IZbb-NEXT:    ret
 ;
 ; RV64IZbb-LABEL: func:
@@ -104,22 +100,20 @@ define i64 @func2(i64 %x, i64 %y) nounwind {
 ;
 ; RV64I-LABEL: func2:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sgtz a2, a1
-; RV64I-NEXT:    sub a1, a0, a1
-; RV64I-NEXT:    slt a0, a1, a0
-; RV64I-NEXT:    xor a2, a2, a0
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:    sub a3, a0, a1
 ; RV64I-NEXT:    addi a0, zero, -1
 ; RV64I-NEXT:    slli a0, a0, 63
-; RV64I-NEXT:    bltz a1, .LBB1_3
+; RV64I-NEXT:    bgez a3, .LBB1_2
 ; RV64I-NEXT:  # %bb.1:
-; RV64I-NEXT:    beqz a2, .LBB1_4
-; RV64I-NEXT:  .LBB1_2:
-; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB1_3:
 ; RV64I-NEXT:    addi a0, a0, -1
-; RV64I-NEXT:    bnez a2, .LBB1_2
+; RV64I-NEXT:  .LBB1_2:
+; RV64I-NEXT:    sgtz a1, a1
+; RV64I-NEXT:    slt a2, a3, a2
+; RV64I-NEXT:    bne a1, a2, .LBB1_4
+; RV64I-NEXT:  # %bb.3:
+; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:  .LBB1_4:
-; RV64I-NEXT:    mv a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV32IZbb-LABEL: func2:
@@ -147,22 +141,20 @@ define i64 @func2(i64 %x, i64 %y) nounwind {
 ;
 ; RV64IZbb-LABEL: func2:
 ; RV64IZbb:       # %bb.0:
-; RV64IZbb-NEXT:    sgtz a2, a1
-; RV64IZbb-NEXT:    sub a1, a0, a1
-; RV64IZbb-NEXT:    slt a0, a1, a0
-; RV64IZbb-NEXT:    xor a2, a2, a0
+; RV64IZbb-NEXT:    mv a2, a0
+; RV64IZbb-NEXT:    sub a3, a0, a1
 ; RV64IZbb-NEXT:    addi a0, zero, -1
 ; RV64IZbb-NEXT:    slli a0, a0, 63
-; RV64IZbb-NEXT:    bltz a1, .LBB1_3
+; RV64IZbb-NEXT:    bgez a3, .LBB1_2
 ; RV64IZbb-NEXT:  # %bb.1:
-; RV64IZbb-NEXT:    beqz a2, .LBB1_4
-; RV64IZbb-NEXT:  .LBB1_2:
-; RV64IZbb-NEXT:    ret
-; RV64IZbb-NEXT:  .LBB1_3:
 ; RV64IZbb-NEXT:    addi a0, a0, -1
-; RV64IZbb-NEXT:    bnez a2, .LBB1_2
+; RV64IZbb-NEXT:  .LBB1_2:
+; RV64IZbb-NEXT:    sgtz a1, a1
+; RV64IZbb-NEXT:    slt a2, a3, a2
+; RV64IZbb-NEXT:    bne a1, a2, .LBB1_4
+; RV64IZbb-NEXT:  # %bb.3:
+; RV64IZbb-NEXT:    mv a0, a3
 ; RV64IZbb-NEXT:  .LBB1_4:
-; RV64IZbb-NEXT:    mv a0, a1
 ; RV64IZbb-NEXT:    ret
   %tmp = call i64 @llvm.ssub.sat.i64(i64 %x, i64 %y);
   ret i64 %tmp;

diff  --git a/llvm/test/CodeGen/RISCV/ssub_sat_plus.ll b/llvm/test/CodeGen/RISCV/ssub_sat_plus.ll
index f94f5f63af01..7f2e1595a98d 100644
--- a/llvm/test/CodeGen/RISCV/ssub_sat_plus.ll
+++ b/llvm/test/CodeGen/RISCV/ssub_sat_plus.ll
@@ -13,22 +13,20 @@ declare i64 @llvm.ssub.sat.i64(i64, i64)
 define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind {
 ; RV32I-LABEL: func32:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    mul a1, a1, a2
-; RV32I-NEXT:    sgtz a2, a1
-; RV32I-NEXT:    sub a1, a0, a1
-; RV32I-NEXT:    slt a0, a1, a0
-; RV32I-NEXT:    xor a2, a2, a0
+; RV32I-NEXT:    mv a3, a0
+; RV32I-NEXT:    mul a2, a1, a2
+; RV32I-NEXT:    sub a1, a0, a2
 ; RV32I-NEXT:    lui a0, 524288
-; RV32I-NEXT:    bltz a1, .LBB0_3
+; RV32I-NEXT:    bgez a1, .LBB0_2
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    beqz a2, .LBB0_4
-; RV32I-NEXT:  .LBB0_2:
-; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB0_3:
 ; RV32I-NEXT:    addi a0, a0, -1
-; RV32I-NEXT:    bnez a2, .LBB0_2
-; RV32I-NEXT:  .LBB0_4:
+; RV32I-NEXT:  .LBB0_2:
+; RV32I-NEXT:    sgtz a2, a2
+; RV32I-NEXT:    slt a3, a1, a3
+; RV32I-NEXT:    bne a2, a3, .LBB0_4
+; RV32I-NEXT:  # %bb.3:
 ; RV32I-NEXT:    mv a0, a1
+; RV32I-NEXT:  .LBB0_4:
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: func32:
@@ -52,22 +50,20 @@ define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind {
 ;
 ; RV32IZbb-LABEL: func32:
 ; RV32IZbb:       # %bb.0:
-; RV32IZbb-NEXT:    mul a1, a1, a2
-; RV32IZbb-NEXT:    sgtz a2, a1
-; RV32IZbb-NEXT:    sub a1, a0, a1
-; RV32IZbb-NEXT:    slt a0, a1, a0
-; RV32IZbb-NEXT:    xor a2, a2, a0
+; RV32IZbb-NEXT:    mv a3, a0
+; RV32IZbb-NEXT:    mul a2, a1, a2
+; RV32IZbb-NEXT:    sub a1, a0, a2
 ; RV32IZbb-NEXT:    lui a0, 524288
-; RV32IZbb-NEXT:    bltz a1, .LBB0_3
+; RV32IZbb-NEXT:    bgez a1, .LBB0_2
 ; RV32IZbb-NEXT:  # %bb.1:
-; RV32IZbb-NEXT:    beqz a2, .LBB0_4
-; RV32IZbb-NEXT:  .LBB0_2:
-; RV32IZbb-NEXT:    ret
-; RV32IZbb-NEXT:  .LBB0_3:
 ; RV32IZbb-NEXT:    addi a0, a0, -1
-; RV32IZbb-NEXT:    bnez a2, .LBB0_2
-; RV32IZbb-NEXT:  .LBB0_4:
+; RV32IZbb-NEXT:  .LBB0_2:
+; RV32IZbb-NEXT:    sgtz a2, a2
+; RV32IZbb-NEXT:    slt a3, a1, a3
+; RV32IZbb-NEXT:    bne a2, a3, .LBB0_4
+; RV32IZbb-NEXT:  # %bb.3:
 ; RV32IZbb-NEXT:    mv a0, a1
+; RV32IZbb-NEXT:  .LBB0_4:
 ; RV32IZbb-NEXT:    ret
 ;
 ; RV64IZbb-LABEL: func32:
@@ -111,22 +107,20 @@ define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind {
 ;
 ; RV64I-LABEL: func64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sgtz a3, a2
-; RV64I-NEXT:    sub a1, a0, a2
-; RV64I-NEXT:    slt a0, a1, a0
-; RV64I-NEXT:    xor a2, a3, a0
+; RV64I-NEXT:    mv a1, a0
+; RV64I-NEXT:    sub a3, a0, a2
 ; RV64I-NEXT:    addi a0, zero, -1
 ; RV64I-NEXT:    slli a0, a0, 63
-; RV64I-NEXT:    bltz a1, .LBB1_3
+; RV64I-NEXT:    bgez a3, .LBB1_2
 ; RV64I-NEXT:  # %bb.1:
-; RV64I-NEXT:    beqz a2, .LBB1_4
-; RV64I-NEXT:  .LBB1_2:
-; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB1_3:
 ; RV64I-NEXT:    addi a0, a0, -1
-; RV64I-NEXT:    bnez a2, .LBB1_2
+; RV64I-NEXT:  .LBB1_2:
+; RV64I-NEXT:    sgtz a2, a2
+; RV64I-NEXT:    slt a1, a3, a1
+; RV64I-NEXT:    bne a2, a1, .LBB1_4
+; RV64I-NEXT:  # %bb.3:
+; RV64I-NEXT:    mv a0, a3
 ; RV64I-NEXT:  .LBB1_4:
-; RV64I-NEXT:    mv a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV32IZbb-LABEL: func64:
@@ -154,22 +148,20 @@ define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind {
 ;
 ; RV64IZbb-LABEL: func64:
 ; RV64IZbb:       # %bb.0:
-; RV64IZbb-NEXT:    sgtz a3, a2
-; RV64IZbb-NEXT:    sub a1, a0, a2
-; RV64IZbb-NEXT:    slt a0, a1, a0
-; RV64IZbb-NEXT:    xor a2, a3, a0
+; RV64IZbb-NEXT:    mv a1, a0
+; RV64IZbb-NEXT:    sub a3, a0, a2
 ; RV64IZbb-NEXT:    addi a0, zero, -1
 ; RV64IZbb-NEXT:    slli a0, a0, 63
-; RV64IZbb-NEXT:    bltz a1, .LBB1_3
+; RV64IZbb-NEXT:    bgez a3, .LBB1_2
 ; RV64IZbb-NEXT:  # %bb.1:
-; RV64IZbb-NEXT:    beqz a2, .LBB1_4
-; RV64IZbb-NEXT:  .LBB1_2:
-; RV64IZbb-NEXT:    ret
-; RV64IZbb-NEXT:  .LBB1_3:
 ; RV64IZbb-NEXT:    addi a0, a0, -1
-; RV64IZbb-NEXT:    bnez a2, .LBB1_2
+; RV64IZbb-NEXT:  .LBB1_2:
+; RV64IZbb-NEXT:    sgtz a2, a2
+; RV64IZbb-NEXT:    slt a1, a3, a1
+; RV64IZbb-NEXT:    bne a2, a1, .LBB1_4
+; RV64IZbb-NEXT:  # %bb.3:
+; RV64IZbb-NEXT:    mv a0, a3
 ; RV64IZbb-NEXT:  .LBB1_4:
-; RV64IZbb-NEXT:    mv a0, a1
 ; RV64IZbb-NEXT:    ret
   %a = mul i64 %y, %z
   %tmp = call i64 @llvm.ssub.sat.i64(i64 %x, i64 %z)

diff  --git a/llvm/test/CodeGen/RISCV/xaluo.ll b/llvm/test/CodeGen/RISCV/xaluo.ll
index 7d8b3854a51e..2f6655f9bbb6 100644
--- a/llvm/test/CodeGen/RISCV/xaluo.ll
+++ b/llvm/test/CodeGen/RISCV/xaluo.ll
@@ -694,8 +694,7 @@ define i32 @saddo.select.i32(i32 %v1, i32 %v2) {
 ; RV32-NEXT:    add a2, a0, a1
 ; RV32-NEXT:    slt a2, a2, a0
 ; RV32-NEXT:    slti a3, a1, 0
-; RV32-NEXT:    xor a2, a3, a2
-; RV32-NEXT:    bnez a2, .LBB22_2
+; RV32-NEXT:    bne a3, a2, .LBB22_2
 ; RV32-NEXT:  # %bb.1: # %entry
 ; RV32-NEXT:    mv a0, a1
 ; RV32-NEXT:  .LBB22_2: # %entry
@@ -768,8 +767,7 @@ define i64 @saddo.select.i64(i64 %v1, i64 %v2) {
 ; RV64-NEXT:    add a2, a0, a1
 ; RV64-NEXT:    slt a2, a2, a0
 ; RV64-NEXT:    slti a3, a1, 0
-; RV64-NEXT:    xor a2, a3, a2
-; RV64-NEXT:    bnez a2, .LBB24_2
+; RV64-NEXT:    bne a3, a2, .LBB24_2
 ; RV64-NEXT:  # %bb.1: # %entry
 ; RV64-NEXT:    mv a0, a1
 ; RV64-NEXT:  .LBB24_2: # %entry
@@ -939,8 +937,7 @@ define i32 @ssubo.select.i32(i32 %v1, i32 %v2) {
 ; RV32-NEXT:    sgtz a2, a1
 ; RV32-NEXT:    sub a3, a0, a1
 ; RV32-NEXT:    slt a3, a3, a0
-; RV32-NEXT:    xor a2, a2, a3
-; RV32-NEXT:    bnez a2, .LBB30_2
+; RV32-NEXT:    bne a2, a3, .LBB30_2
 ; RV32-NEXT:  # %bb.1: # %entry
 ; RV32-NEXT:    mv a0, a1
 ; RV32-NEXT:  .LBB30_2: # %entry
@@ -1011,8 +1008,7 @@ define i64 @ssubo.select.i64(i64 %v1, i64 %v2) {
 ; RV64-NEXT:    sgtz a2, a1
 ; RV64-NEXT:    sub a3, a0, a1
 ; RV64-NEXT:    slt a3, a3, a0
-; RV64-NEXT:    xor a2, a2, a3
-; RV64-NEXT:    bnez a2, .LBB32_2
+; RV64-NEXT:    bne a2, a3, .LBB32_2
 ; RV64-NEXT:  # %bb.1: # %entry
 ; RV64-NEXT:    mv a0, a1
 ; RV64-NEXT:  .LBB32_2: # %entry


        


More information about the llvm-commits mailing list