[llvm] 85834d8 - [RISCV]Keep (select c, 0/-1, X) during PerformDAGCombine
via llvm-commits
llvm-commits at lists.llvm.org
Mon Dec 5 17:26:34 PST 2022
Author: ChunyuLiao
Date: 2022-12-06T09:26:29+08:00
New Revision: 85834d8685c12dea2cf054a5cf138395cb1f8d9f
URL: https://github.com/llvm/llvm-project/commit/85834d8685c12dea2cf054a5cf138395cb1f8d9f
DIFF: https://github.com/llvm/llvm-project/commit/85834d8685c12dea2cf054a5cf138395cb1f8d9f.diff
LOG: [RISCV]Keep (select c, 0/-1, X) during PerformDAGCombine
D135833, lowerSelect: (select C, -1/0, X) -> or/and
Keep (select c, 0/-1, X), thus making better use of lowerSelect to eliminate branch instructions.
Reviewed By: craig.topper
Differential Revision: https://reviews.llvm.org/D139272
Added:
Modified:
llvm/lib/Target/RISCV/RISCVISelLowering.cpp
llvm/test/CodeGen/RISCV/select-binop-identity.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 03f4a564035dc..7c8e9f5ce0b49 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -8063,14 +8063,16 @@ static SDValue transformAddShlImm(SDNode *N, SelectionDAG &DAG,
// (sub x, (select cond, 0, c))
// -> (select cond, x, (sub x, c)) [AllOnes=0]
static SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
- SelectionDAG &DAG, bool AllOnes) {
+ SelectionDAG &DAG, bool AllOnes,
+ const RISCVSubtarget &Subtarget) {
EVT VT = N->getValueType(0);
// Skip vectors.
if (VT.isVector())
return SDValue();
- if ((Slct.getOpcode() != ISD::SELECT &&
+ if (!Subtarget.hasShortForwardBranchOpt() ||
+ (Slct.getOpcode() != ISD::SELECT &&
Slct.getOpcode() != RISCVISD::SELECT_CC) ||
!Slct.hasOneUse())
return SDValue();
@@ -8111,12 +8113,13 @@ static SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
// Attempt combineSelectAndUse on each operand of a commutative operator N.
static SDValue combineSelectAndUseCommutative(SDNode *N, SelectionDAG &DAG,
- bool AllOnes) {
+ bool AllOnes,
+ const RISCVSubtarget &Subtarget) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
- if (SDValue Result = combineSelectAndUse(N, N0, N1, DAG, AllOnes))
+ if (SDValue Result = combineSelectAndUse(N, N0, N1, DAG, AllOnes, Subtarget))
return Result;
- if (SDValue Result = combineSelectAndUse(N, N1, N0, DAG, AllOnes))
+ if (SDValue Result = combineSelectAndUse(N, N1, N0, DAG, AllOnes, Subtarget))
return Result;
return SDValue();
}
@@ -8198,7 +8201,7 @@ static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG,
return V;
// fold (add (select lhs, rhs, cc, 0, y), x) ->
// (select lhs, rhs, cc, x, (add x, y))
- return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
+ return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false, Subtarget);
}
// Try to turn a sub boolean RHS and constant LHS into an addi.
@@ -8242,7 +8245,8 @@ static SDValue combineSubOfBoolean(SDNode *N, SelectionDAG &DAG) {
return DAG.getNode(ISD::ADD, DL, VT, NewLHS, NewRHS);
}
-static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG) {
+static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget) {
if (SDValue V = combineSubOfBoolean(N, DAG))
return V;
@@ -8250,7 +8254,7 @@ static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG) {
// (select lhs, rhs, cc, x, (sub x, y))
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
- return combineSelectAndUse(N, N1, N0, DAG, /*AllOnes*/ false);
+ return combineSelectAndUse(N, N1, N0, DAG, /*AllOnes*/ false, Subtarget);
}
// Apply DeMorgan's law to (and/or (xor X, 1), (xor Y, 1)) if X and Y are 0/1.
@@ -8356,7 +8360,7 @@ static SDValue performANDCombine(SDNode *N,
// fold (and (select lhs, rhs, cc, -1, y), x) ->
// (select lhs, rhs, cc, x, (and x, y))
- return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ true);
+ return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ true, Subtarget);
}
static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
@@ -8372,10 +8376,11 @@ static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
// fold (or (select cond, 0, y), x) ->
// (select cond, x, (or x, y))
- return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
+ return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false, Subtarget);
}
-static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG) {
+static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
@@ -8394,7 +8399,7 @@ static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG) {
return V;
// fold (xor (select cond, 0, y), x) ->
// (select cond, x, (xor x, y))
- return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
+ return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false, Subtarget);
}
// Replace (seteq (i64 (and X, 0xffffffff)), C1) with
@@ -9596,13 +9601,13 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
case ISD::ADD:
return performADDCombine(N, DAG, Subtarget);
case ISD::SUB:
- return performSUBCombine(N, DAG);
+ return performSUBCombine(N, DAG, Subtarget);
case ISD::AND:
return performANDCombine(N, DCI, Subtarget);
case ISD::OR:
return performORCombine(N, DCI, Subtarget);
case ISD::XOR:
- return performXORCombine(N, DAG);
+ return performXORCombine(N, DAG, Subtarget);
case ISD::FADD:
case ISD::UMAX:
case ISD::UMIN:
diff --git a/llvm/test/CodeGen/RISCV/select-binop-identity.ll b/llvm/test/CodeGen/RISCV/select-binop-identity.ll
index 74fee7aed8b55..c4a6b40fe63db 100644
--- a/llvm/test/CodeGen/RISCV/select-binop-identity.ll
+++ b/llvm/test/CodeGen/RISCV/select-binop-identity.ll
@@ -3,6 +3,8 @@
; RUN: | FileCheck -check-prefix=RV32I %s
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64I %s
+; RUN: llc -mtriple=riscv64 -mcpu=sifive-u74 -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=SFB64 %s
; InstCombine canonicalizes (c ? x | y : x) to (x | (c ? y : 0)) similar for
; other binary operations using their identity value as the constant.
@@ -13,21 +15,28 @@
define signext i32 @and_select_all_ones_i32(i1 zeroext %c, i32 signext %x, i32 %y) {
; RV32I-LABEL: and_select_all_ones_i32:
; RV32I: # %bb.0:
-; RV32I-NEXT: beqz a0, .LBB0_2
-; RV32I-NEXT: # %bb.1:
-; RV32I-NEXT: and a2, a2, a1
-; RV32I-NEXT: .LBB0_2:
-; RV32I-NEXT: mv a0, a2
+; RV32I-NEXT: addi a0, a0, -1
+; RV32I-NEXT: or a0, a0, a1
+; RV32I-NEXT: and a0, a0, a2
; RV32I-NEXT: ret
;
; RV64I-LABEL: and_select_all_ones_i32:
; RV64I: # %bb.0:
-; RV64I-NEXT: beqz a0, .LBB0_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: and a2, a2, a1
-; RV64I-NEXT: .LBB0_2:
-; RV64I-NEXT: sext.w a0, a2
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: and a0, a0, a2
+; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: ret
+;
+; SFB64-LABEL: and_select_all_ones_i32:
+; SFB64: # %bb.0:
+; SFB64-NEXT: and a1, a1, a2
+; SFB64-NEXT: bnez a0, .LBB0_2
+; SFB64-NEXT: # %bb.1:
+; SFB64-NEXT: mv a1, a2
+; SFB64-NEXT: .LBB0_2:
+; SFB64-NEXT: sext.w a0, a1
+; SFB64-NEXT: ret
%a = select i1 %c, i32 %x, i32 -1
%b = and i32 %a, %y
ret i32 %b
@@ -36,23 +45,29 @@ define signext i32 @and_select_all_ones_i32(i1 zeroext %c, i32 signext %x, i32 %
define i64 @and_select_all_ones_i64(i1 zeroext %c, i64 %x, i64 %y) {
; RV32I-LABEL: and_select_all_ones_i64:
; RV32I: # %bb.0:
-; RV32I-NEXT: bnez a0, .LBB1_2
-; RV32I-NEXT: # %bb.1:
-; RV32I-NEXT: and a3, a3, a1
-; RV32I-NEXT: and a4, a4, a2
-; RV32I-NEXT: .LBB1_2:
-; RV32I-NEXT: mv a0, a3
-; RV32I-NEXT: mv a1, a4
+; RV32I-NEXT: neg a0, a0
+; RV32I-NEXT: or a2, a0, a2
+; RV32I-NEXT: or a0, a0, a1
+; RV32I-NEXT: and a0, a3, a0
+; RV32I-NEXT: and a1, a4, a2
; RV32I-NEXT: ret
;
; RV64I-LABEL: and_select_all_ones_i64:
; RV64I: # %bb.0:
-; RV64I-NEXT: bnez a0, .LBB1_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: and a2, a2, a1
-; RV64I-NEXT: .LBB1_2:
-; RV64I-NEXT: mv a0, a2
+; RV64I-NEXT: neg a0, a0
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: and a0, a2, a0
; RV64I-NEXT: ret
+;
+; SFB64-LABEL: and_select_all_ones_i64:
+; SFB64: # %bb.0:
+; SFB64-NEXT: and a1, a1, a2
+; SFB64-NEXT: beqz a0, .LBB1_2
+; SFB64-NEXT: # %bb.1:
+; SFB64-NEXT: mv a1, a2
+; SFB64-NEXT: .LBB1_2:
+; SFB64-NEXT: mv a0, a1
+; SFB64-NEXT: ret
%a = select i1 %c, i64 -1, i64 %x
%b = and i64 %y, %a
ret i64 %b
@@ -63,15 +78,25 @@ define signext i32 @or_select_all_zeros_i32(i1 zeroext %c, i32 signext %x, i32 s
; RV32I: # %bb.0:
; RV32I-NEXT: neg a0, a0
; RV32I-NEXT: and a0, a0, a1
-; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: or a0, a2, a0
; RV32I-NEXT: ret
;
; RV64I-LABEL: or_select_all_zeros_i32:
; RV64I: # %bb.0:
; RV64I-NEXT: neg a0, a0
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: or a0, a0, a2
+; RV64I-NEXT: or a0, a2, a0
; RV64I-NEXT: ret
+;
+; SFB64-LABEL: or_select_all_zeros_i32:
+; SFB64: # %bb.0:
+; SFB64-NEXT: or a1, a1, a2
+; SFB64-NEXT: bnez a0, .LBB2_2
+; SFB64-NEXT: # %bb.1:
+; SFB64-NEXT: mv a1, a2
+; SFB64-NEXT: .LBB2_2:
+; SFB64-NEXT: mv a0, a1
+; SFB64-NEXT: ret
%a = select i1 %c, i32 %x, i32 0
%b = or i32 %y, %a
ret i32 %b
@@ -80,23 +105,29 @@ define signext i32 @or_select_all_zeros_i32(i1 zeroext %c, i32 signext %x, i32 s
define i64 @or_select_all_zeros_i64(i1 zeroext %c, i64 %x, i64 %y) {
; RV32I-LABEL: or_select_all_zeros_i64:
; RV32I: # %bb.0:
-; RV32I-NEXT: bnez a0, .LBB3_2
-; RV32I-NEXT: # %bb.1:
-; RV32I-NEXT: or a3, a3, a1
-; RV32I-NEXT: or a4, a4, a2
-; RV32I-NEXT: .LBB3_2:
-; RV32I-NEXT: mv a0, a3
-; RV32I-NEXT: mv a1, a4
+; RV32I-NEXT: addi a0, a0, -1
+; RV32I-NEXT: and a2, a0, a2
+; RV32I-NEXT: and a0, a0, a1
+; RV32I-NEXT: or a0, a0, a3
+; RV32I-NEXT: or a1, a2, a4
; RV32I-NEXT: ret
;
; RV64I-LABEL: or_select_all_zeros_i64:
; RV64I: # %bb.0:
-; RV64I-NEXT: bnez a0, .LBB3_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: or a2, a2, a1
-; RV64I-NEXT: .LBB3_2:
-; RV64I-NEXT: mv a0, a2
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: or a0, a0, a2
; RV64I-NEXT: ret
+;
+; SFB64-LABEL: or_select_all_zeros_i64:
+; SFB64: # %bb.0:
+; SFB64-NEXT: or a1, a1, a2
+; SFB64-NEXT: beqz a0, .LBB3_2
+; SFB64-NEXT: # %bb.1:
+; SFB64-NEXT: mv a1, a2
+; SFB64-NEXT: .LBB3_2:
+; SFB64-NEXT: mv a0, a1
+; SFB64-NEXT: ret
%a = select i1 %c, i64 0, i64 %x
%b = or i64 %a, %y
ret i64 %b
@@ -105,21 +136,27 @@ define i64 @or_select_all_zeros_i64(i1 zeroext %c, i64 %x, i64 %y) {
define signext i32 @xor_select_all_zeros_i32(i1 zeroext %c, i32 signext %x, i32 signext %y) {
; RV32I-LABEL: xor_select_all_zeros_i32:
; RV32I: # %bb.0:
-; RV32I-NEXT: bnez a0, .LBB4_2
-; RV32I-NEXT: # %bb.1:
-; RV32I-NEXT: xor a2, a2, a1
-; RV32I-NEXT: .LBB4_2:
-; RV32I-NEXT: mv a0, a2
+; RV32I-NEXT: addi a0, a0, -1
+; RV32I-NEXT: and a0, a0, a1
+; RV32I-NEXT: xor a0, a2, a0
; RV32I-NEXT: ret
;
; RV64I-LABEL: xor_select_all_zeros_i32:
; RV64I: # %bb.0:
-; RV64I-NEXT: bnez a0, .LBB4_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: xor a2, a2, a1
-; RV64I-NEXT: .LBB4_2:
-; RV64I-NEXT: mv a0, a2
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: xor a0, a2, a0
; RV64I-NEXT: ret
+;
+; SFB64-LABEL: xor_select_all_zeros_i32:
+; SFB64: # %bb.0:
+; SFB64-NEXT: xor a1, a1, a2
+; SFB64-NEXT: beqz a0, .LBB4_2
+; SFB64-NEXT: # %bb.1:
+; SFB64-NEXT: mv a1, a2
+; SFB64-NEXT: .LBB4_2:
+; SFB64-NEXT: mv a0, a1
+; SFB64-NEXT: ret
%a = select i1 %c, i32 0, i32 %x
%b = xor i32 %y, %a
ret i32 %b
@@ -128,11 +165,11 @@ define signext i32 @xor_select_all_zeros_i32(i1 zeroext %c, i32 signext %x, i32
define i64 @xor_select_all_zeros_i64(i1 zeroext %c, i64 %x, i64 %y) {
; RV32I-LABEL: xor_select_all_zeros_i64:
; RV32I: # %bb.0:
-; RV32I-NEXT: neg a5, a0
-; RV32I-NEXT: and a0, a5, a1
+; RV32I-NEXT: neg a0, a0
+; RV32I-NEXT: and a2, a0, a2
+; RV32I-NEXT: and a0, a0, a1
; RV32I-NEXT: xor a0, a0, a3
-; RV32I-NEXT: and a1, a5, a2
-; RV32I-NEXT: xor a1, a1, a4
+; RV32I-NEXT: xor a1, a2, a4
; RV32I-NEXT: ret
;
; RV64I-LABEL: xor_select_all_zeros_i64:
@@ -141,6 +178,16 @@ define i64 @xor_select_all_zeros_i64(i1 zeroext %c, i64 %x, i64 %y) {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: xor a0, a0, a2
; RV64I-NEXT: ret
+;
+; SFB64-LABEL: xor_select_all_zeros_i64:
+; SFB64: # %bb.0:
+; SFB64-NEXT: xor a1, a1, a2
+; SFB64-NEXT: bnez a0, .LBB5_2
+; SFB64-NEXT: # %bb.1:
+; SFB64-NEXT: mv a1, a2
+; SFB64-NEXT: .LBB5_2:
+; SFB64-NEXT: mv a0, a1
+; SFB64-NEXT: ret
%a = select i1 %c, i64 %x, i64 0
%b = xor i64 %a, %y
ret i64 %b
@@ -149,21 +196,27 @@ define i64 @xor_select_all_zeros_i64(i1 zeroext %c, i64 %x, i64 %y) {
define signext i32 @add_select_all_zeros_i32(i1 zeroext %c, i32 signext %x, i32 signext %y) {
; RV32I-LABEL: add_select_all_zeros_i32:
; RV32I: # %bb.0:
-; RV32I-NEXT: bnez a0, .LBB6_2
-; RV32I-NEXT: # %bb.1:
-; RV32I-NEXT: add a2, a2, a1
-; RV32I-NEXT: .LBB6_2:
-; RV32I-NEXT: mv a0, a2
+; RV32I-NEXT: addi a0, a0, -1
+; RV32I-NEXT: and a0, a0, a1
+; RV32I-NEXT: add a0, a2, a0
; RV32I-NEXT: ret
;
; RV64I-LABEL: add_select_all_zeros_i32:
; RV64I: # %bb.0:
-; RV64I-NEXT: bnez a0, .LBB6_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: addw a2, a2, a1
-; RV64I-NEXT: .LBB6_2:
-; RV64I-NEXT: mv a0, a2
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: addw a0, a2, a0
; RV64I-NEXT: ret
+;
+; SFB64-LABEL: add_select_all_zeros_i32:
+; SFB64: # %bb.0:
+; SFB64-NEXT: addw a1, a1, a2
+; SFB64-NEXT: beqz a0, .LBB6_2
+; SFB64-NEXT: # %bb.1:
+; SFB64-NEXT: mv a1, a2
+; SFB64-NEXT: .LBB6_2:
+; SFB64-NEXT: mv a0, a1
+; SFB64-NEXT: ret
%a = select i1 %c, i32 0, i32 %x
%b = add i32 %y, %a
ret i32 %b
@@ -172,26 +225,31 @@ define signext i32 @add_select_all_zeros_i32(i1 zeroext %c, i32 signext %x, i32
define i64 @add_select_all_zeros_i64(i1 zeroext %c, i64 %x, i64 %y) {
; RV32I-LABEL: add_select_all_zeros_i64:
; RV32I: # %bb.0:
-; RV32I-NEXT: beqz a0, .LBB7_2
-; RV32I-NEXT: # %bb.1:
-; RV32I-NEXT: add a2, a4, a2
-; RV32I-NEXT: add a1, a3, a1
-; RV32I-NEXT: sltu a4, a1, a3
-; RV32I-NEXT: add a4, a2, a4
-; RV32I-NEXT: mv a3, a1
-; RV32I-NEXT: .LBB7_2:
-; RV32I-NEXT: mv a0, a3
-; RV32I-NEXT: mv a1, a4
+; RV32I-NEXT: neg a0, a0
+; RV32I-NEXT: and a2, a0, a2
+; RV32I-NEXT: and a1, a0, a1
+; RV32I-NEXT: add a0, a1, a3
+; RV32I-NEXT: sltu a1, a0, a1
+; RV32I-NEXT: add a2, a2, a4
+; RV32I-NEXT: add a1, a2, a1
; RV32I-NEXT: ret
;
; RV64I-LABEL: add_select_all_zeros_i64:
; RV64I: # %bb.0:
-; RV64I-NEXT: beqz a0, .LBB7_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: add a2, a2, a1
-; RV64I-NEXT: .LBB7_2:
-; RV64I-NEXT: mv a0, a2
+; RV64I-NEXT: neg a0, a0
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: add a0, a0, a2
; RV64I-NEXT: ret
+;
+; SFB64-LABEL: add_select_all_zeros_i64:
+; SFB64: # %bb.0:
+; SFB64-NEXT: add a1, a1, a2
+; SFB64-NEXT: bnez a0, .LBB7_2
+; SFB64-NEXT: # %bb.1:
+; SFB64-NEXT: mv a1, a2
+; SFB64-NEXT: .LBB7_2:
+; SFB64-NEXT: mv a0, a1
+; SFB64-NEXT: ret
%a = select i1 %c, i64 %x, i64 0
%b = add i64 %a, %y
ret i64 %b
@@ -200,21 +258,27 @@ define i64 @add_select_all_zeros_i64(i1 zeroext %c, i64 %x, i64 %y) {
define signext i32 @sub_select_all_zeros_i32(i1 zeroext %c, i32 signext %x, i32 signext %y) {
; RV32I-LABEL: sub_select_all_zeros_i32:
; RV32I: # %bb.0:
-; RV32I-NEXT: bnez a0, .LBB8_2
-; RV32I-NEXT: # %bb.1:
-; RV32I-NEXT: sub a2, a2, a1
-; RV32I-NEXT: .LBB8_2:
-; RV32I-NEXT: mv a0, a2
+; RV32I-NEXT: addi a0, a0, -1
+; RV32I-NEXT: and a0, a0, a1
+; RV32I-NEXT: sub a0, a2, a0
; RV32I-NEXT: ret
;
; RV64I-LABEL: sub_select_all_zeros_i32:
; RV64I: # %bb.0:
-; RV64I-NEXT: bnez a0, .LBB8_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: subw a2, a2, a1
-; RV64I-NEXT: .LBB8_2:
-; RV64I-NEXT: mv a0, a2
+; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: subw a0, a2, a0
; RV64I-NEXT: ret
+;
+; SFB64-LABEL: sub_select_all_zeros_i32:
+; SFB64: # %bb.0:
+; SFB64-NEXT: subw a1, a2, a1
+; SFB64-NEXT: beqz a0, .LBB8_2
+; SFB64-NEXT: # %bb.1:
+; SFB64-NEXT: mv a1, a2
+; SFB64-NEXT: .LBB8_2:
+; SFB64-NEXT: mv a0, a1
+; SFB64-NEXT: ret
%a = select i1 %c, i32 0, i32 %x
%b = sub i32 %y, %a
ret i32 %b
@@ -223,25 +287,31 @@ define signext i32 @sub_select_all_zeros_i32(i1 zeroext %c, i32 signext %x, i32
define i64 @sub_select_all_zeros_i64(i1 zeroext %c, i64 %x, i64 %y) {
; RV32I-LABEL: sub_select_all_zeros_i64:
; RV32I: # %bb.0:
-; RV32I-NEXT: beqz a0, .LBB9_2
-; RV32I-NEXT: # %bb.1:
-; RV32I-NEXT: sltu a0, a3, a1
+; RV32I-NEXT: neg a0, a0
+; RV32I-NEXT: and a2, a0, a2
+; RV32I-NEXT: and a0, a0, a1
+; RV32I-NEXT: sltu a1, a3, a0
; RV32I-NEXT: sub a4, a4, a2
-; RV32I-NEXT: sub a4, a4, a0
-; RV32I-NEXT: sub a3, a3, a1
-; RV32I-NEXT: .LBB9_2:
-; RV32I-NEXT: mv a0, a3
-; RV32I-NEXT: mv a1, a4
+; RV32I-NEXT: sub a1, a4, a1
+; RV32I-NEXT: sub a0, a3, a0
; RV32I-NEXT: ret
;
; RV64I-LABEL: sub_select_all_zeros_i64:
; RV64I: # %bb.0:
-; RV64I-NEXT: beqz a0, .LBB9_2
-; RV64I-NEXT: # %bb.1:
-; RV64I-NEXT: sub a2, a2, a1
-; RV64I-NEXT: .LBB9_2:
-; RV64I-NEXT: mv a0, a2
+; RV64I-NEXT: neg a0, a0
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: sub a0, a2, a0
; RV64I-NEXT: ret
+;
+; SFB64-LABEL: sub_select_all_zeros_i64:
+; SFB64: # %bb.0:
+; SFB64-NEXT: sub a1, a2, a1
+; SFB64-NEXT: bnez a0, .LBB9_2
+; SFB64-NEXT: # %bb.1:
+; SFB64-NEXT: mv a1, a2
+; SFB64-NEXT: .LBB9_2:
+; SFB64-NEXT: mv a0, a1
+; SFB64-NEXT: ret
%a = select i1 %c, i64 %x, i64 0
%b = sub i64 %y, %a
ret i64 %b
More information about the llvm-commits
mailing list