[llvm] 02fad05 - [RISCV][SDAG] Fold `select c, ~x, x` into `xor -c, x` (#82462)

via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 21 00:27:47 PST 2024


Author: Yingwei Zheng
Date: 2024-02-21T16:27:43+08:00
New Revision: 02fad0565fe7f061bdaa79ff33b29f64b2c290eb

URL: https://github.com/llvm/llvm-project/commit/02fad0565fe7f061bdaa79ff33b29f64b2c290eb
DIFF: https://github.com/llvm/llvm-project/commit/02fad0565fe7f061bdaa79ff33b29f64b2c290eb.diff

LOG: [RISCV][SDAG] Fold `select c, ~x, x` into `xor -c, x` (#82462)

This patch lowers select of constants if `TrueV == ~FalseV`.
Address the comment in
https://github.com/llvm/llvm-project/pull/82456#discussion_r1496881603.

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/select.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 25a27a91a16358..c2fef4993f6ec8 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -7239,6 +7239,16 @@ static SDValue combineSelectToBinOp(SDNode *N, SelectionDAG &DAG,
     }
   }
 
+  // select c, ~x, x --> xor -c, x
+  if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV)) {
+    const APInt &TrueVal = TrueV->getAsAPIntVal();
+    const APInt &FalseVal = FalseV->getAsAPIntVal();
+    if (~TrueVal == FalseVal) {
+      SDValue Neg = DAG.getNegative(CondV, DL, VT);
+      return DAG.getNode(ISD::XOR, DL, VT, Neg, FalseV);
+    }
+  }
+
   // Try to fold (select (setcc lhs, rhs, cc), truev, falsev) into bitwise ops
   // when both truev and falsev are also setcc.
   if (CondV.getOpcode() == ISD::SETCC && TrueV.getOpcode() == ISD::SETCC &&

diff  --git a/llvm/test/CodeGen/RISCV/select.ll b/llvm/test/CodeGen/RISCV/select.ll
index 7dd223df5e557e..e01984b7c5843a 100644
--- a/llvm/test/CodeGen/RISCV/select.ll
+++ b/llvm/test/CodeGen/RISCV/select.ll
@@ -1449,3 +1449,180 @@ entry:
   %res = select i1 %cond, i32 %a, i32 %c
   ret i32 %res
 }
+
+define i32 @select_cst_not1(i32 signext %a, i32 signext %b) {
+; CHECK-LABEL: select_cst_not1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    slt a0, a0, a1
+; CHECK-NEXT:    neg a0, a0
+; CHECK-NEXT:    xori a0, a0, -6
+; CHECK-NEXT:    ret
+  %cond = icmp slt i32 %a, %b
+  %ret = select i1 %cond, i32 5, i32 -6
+  ret i32 %ret
+}
+
+define i32 @select_cst_not2(i32 signext %a) {
+; CHECK-LABEL: select_cst_not2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    srai a0, a0, 31
+; CHECK-NEXT:    xori a0, a0, -6
+; CHECK-NEXT:    ret
+  %cond = icmp slt i32 %a, 0
+  %ret = select i1 %cond, i32 5, i32 -6
+  ret i32 %ret
+}
+
+define i32 @select_cst_not3(i32 signext %a) {
+; CHECK-LABEL: select_cst_not3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    srai a0, a0, 31
+; CHECK-NEXT:    xori a0, a0, 5
+; CHECK-NEXT:    ret
+  %cond = icmp sgt i32 %a, -1
+  %ret = select i1 %cond, i32 5, i32 -6
+  ret i32 %ret
+}
+
+define i32 @select_cst_not4(i32 signext %a, i32 signext %b) {
+; RV32IM-LABEL: select_cst_not4:
+; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    slt a0, a0, a1
+; RV32IM-NEXT:    lui a1, 524288
+; RV32IM-NEXT:    addi a1, a1, -1
+; RV32IM-NEXT:    add a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: select_cst_not4:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    slt a0, a0, a1
+; RV64IM-NEXT:    neg a0, a0
+; RV64IM-NEXT:    lui a1, 524288
+; RV64IM-NEXT:    addiw a1, a1, -1
+; RV64IM-NEXT:    xor a0, a0, a1
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_cst_not4:
+; RV64IMXVTCONDOPS:       # %bb.0:
+; RV64IMXVTCONDOPS-NEXT:    slt a0, a0, a1
+; RV64IMXVTCONDOPS-NEXT:    neg a0, a0
+; RV64IMXVTCONDOPS-NEXT:    lui a1, 524288
+; RV64IMXVTCONDOPS-NEXT:    addiw a1, a1, -1
+; RV64IMXVTCONDOPS-NEXT:    xor a0, a0, a1
+; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_cst_not4:
+; RV32IMZICOND:       # %bb.0:
+; RV32IMZICOND-NEXT:    slt a0, a0, a1
+; RV32IMZICOND-NEXT:    lui a1, 524288
+; RV32IMZICOND-NEXT:    addi a1, a1, -1
+; RV32IMZICOND-NEXT:    add a0, a0, a1
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_cst_not4:
+; RV64IMZICOND:       # %bb.0:
+; RV64IMZICOND-NEXT:    slt a0, a0, a1
+; RV64IMZICOND-NEXT:    neg a0, a0
+; RV64IMZICOND-NEXT:    lui a1, 524288
+; RV64IMZICOND-NEXT:    addiw a1, a1, -1
+; RV64IMZICOND-NEXT:    xor a0, a0, a1
+; RV64IMZICOND-NEXT:    ret
+  %cond = icmp slt i32 %a, %b
+  %ret = select i1 %cond, i32 -2147483648, i32 2147483647
+  ret i32 %ret
+}
+
+define i32 @select_cst_not5(i32 signext %a, i32 signext %b) {
+; RV32IM-LABEL: select_cst_not5:
+; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    slt a0, a0, a1
+; RV32IM-NEXT:    neg a0, a0
+; RV32IM-NEXT:    lui a1, 16
+; RV32IM-NEXT:    addi a1, a1, -5
+; RV32IM-NEXT:    xor a0, a0, a1
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: select_cst_not5:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    slt a0, a0, a1
+; RV64IM-NEXT:    neg a0, a0
+; RV64IM-NEXT:    lui a1, 16
+; RV64IM-NEXT:    addiw a1, a1, -5
+; RV64IM-NEXT:    xor a0, a0, a1
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_cst_not5:
+; RV64IMXVTCONDOPS:       # %bb.0:
+; RV64IMXVTCONDOPS-NEXT:    slt a0, a0, a1
+; RV64IMXVTCONDOPS-NEXT:    neg a0, a0
+; RV64IMXVTCONDOPS-NEXT:    lui a1, 16
+; RV64IMXVTCONDOPS-NEXT:    addiw a1, a1, -5
+; RV64IMXVTCONDOPS-NEXT:    xor a0, a0, a1
+; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_cst_not5:
+; RV32IMZICOND:       # %bb.0:
+; RV32IMZICOND-NEXT:    slt a0, a0, a1
+; RV32IMZICOND-NEXT:    neg a0, a0
+; RV32IMZICOND-NEXT:    lui a1, 16
+; RV32IMZICOND-NEXT:    addi a1, a1, -5
+; RV32IMZICOND-NEXT:    xor a0, a0, a1
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_cst_not5:
+; RV64IMZICOND:       # %bb.0:
+; RV64IMZICOND-NEXT:    slt a0, a0, a1
+; RV64IMZICOND-NEXT:    neg a0, a0
+; RV64IMZICOND-NEXT:    lui a1, 16
+; RV64IMZICOND-NEXT:    addiw a1, a1, -5
+; RV64IMZICOND-NEXT:    xor a0, a0, a1
+; RV64IMZICOND-NEXT:    ret
+  %cond = icmp slt i32 %a, %b
+  %ret = select i1 %cond, i32 -65532, i32 65531
+  ret i32 %ret
+}
+
+define i32 @select_cst_unknown(i32 signext %a, i32 signext %b) {
+; RV32IM-LABEL: select_cst_unknown:
+; RV32IM:       # %bb.0:
+; RV32IM-NEXT:    mv a2, a0
+; RV32IM-NEXT:    li a0, 5
+; RV32IM-NEXT:    blt a2, a1, .LBB42_2
+; RV32IM-NEXT:  # %bb.1:
+; RV32IM-NEXT:    li a0, -7
+; RV32IM-NEXT:  .LBB42_2:
+; RV32IM-NEXT:    ret
+;
+; RV64IM-LABEL: select_cst_unknown:
+; RV64IM:       # %bb.0:
+; RV64IM-NEXT:    mv a2, a0
+; RV64IM-NEXT:    li a0, 5
+; RV64IM-NEXT:    blt a2, a1, .LBB42_2
+; RV64IM-NEXT:  # %bb.1:
+; RV64IM-NEXT:    li a0, -7
+; RV64IM-NEXT:  .LBB42_2:
+; RV64IM-NEXT:    ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_cst_unknown:
+; RV64IMXVTCONDOPS:       # %bb.0:
+; RV64IMXVTCONDOPS-NEXT:    slt a0, a0, a1
+; RV64IMXVTCONDOPS-NEXT:    li a1, -7
+; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a1, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    li a2, 5
+; RV64IMXVTCONDOPS-NEXT:    vt.maskc a0, a2, a0
+; RV64IMXVTCONDOPS-NEXT:    or a0, a0, a1
+; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; CHECKZICOND-LABEL: select_cst_unknown:
+; CHECKZICOND:       # %bb.0:
+; CHECKZICOND-NEXT:    slt a0, a0, a1
+; CHECKZICOND-NEXT:    li a1, -7
+; CHECKZICOND-NEXT:    czero.nez a1, a1, a0
+; CHECKZICOND-NEXT:    li a2, 5
+; CHECKZICOND-NEXT:    czero.eqz a0, a2, a0
+; CHECKZICOND-NEXT:    or a0, a0, a1
+; CHECKZICOND-NEXT:    ret
+  %cond = icmp slt i32 %a, %b
+  %ret = select i1 %cond, i32 5, i32 -7
+  ret i32 %ret
+}


        


More information about the llvm-commits mailing list