[llvm] 0c05528 - [RISCV] Use RISCVISD::CZERO_EQZ/CZERO_NEZ for XVentanaCondOps.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 18 10:18:20 PDT 2023


Author: Craig Topper
Date: 2023-07-18T10:18:02-07:00
New Revision: 0c055286b2178c28f61142d6f1c40a05e3e45dd8

URL: https://github.com/llvm/llvm-project/commit/0c055286b2178c28f61142d6f1c40a05e3e45dd8
DIFF: https://github.com/llvm/llvm-project/commit/0c055286b2178c28f61142d6f1c40a05e3e45dd8.diff

LOG: [RISCV] Use RISCVISD::CZERO_EQZ/CZERO_NEZ for XVentanaCondOps.

This makes Zicond and XVentanaCondOps use the same code path.
The instructions have identical semantics.

Reviewed By: wangpc

Differential Revision: https://reviews.llvm.org/D155391

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVISelLowering.h
    llvm/lib/Target/RISCV/RISCVInstrInfoXVentana.td
    llvm/test/CodeGen/RISCV/condops.ll
    llvm/test/CodeGen/RISCV/select.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index e01b261c2f43d6..8376b9add9e9ed 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -333,8 +333,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
   if (Subtarget.is64Bit())
     setOperationAction(ISD::ABS, MVT::i32, Custom);
 
-  if (!Subtarget.hasVendorXVentanaCondOps() &&
-      !Subtarget.hasVendorXTHeadCondMov())
+  if (!Subtarget.hasVendorXTHeadCondMov())
     setOperationAction(ISD::SELECT, XLenVT, Custom);
 
   static const unsigned FPLegalNodeTypes[] = {
@@ -5949,11 +5948,12 @@ SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
     return DAG.getNode(ISD::VSELECT, DL, VT, CondSplat, TrueV, FalseV);
   }
 
-  // When Zicond is present, emit CZERO_EQZ and CZERO_NEZ nodes to implement
-  // the SELECT. Performing the lowering here allows for greater control over
-  // when CZERO_{EQZ/NEZ} are used vs another branchless sequence or
-  // RISCVISD::SELECT_CC node (branch-based select).
-  if (Subtarget.hasStdExtZicond() && VT.isScalarInteger()) {
+  // When Zicond or XVentanaCondOps is present, emit CZERO_EQZ and CZERO_NEZ
+  // nodes to implement the SELECT. Performing the lowering here allows for
+  // greater control over when CZERO_{EQZ/NEZ} are used vs another branchless
+  // sequence or RISCVISD::SELECT_CC node (branch-based select).
+  if ((Subtarget.hasStdExtZicond() || Subtarget.hasVendorXVentanaCondOps()) &&
+      VT.isScalarInteger()) {
     if (SDValue NewCondV = selectSETCC(CondV, ISD::SETNE, DAG)) {
       // (select (riscv_setne c), t, 0) -> (czero_eqz t, c)
       if (isNullConstant(FalseV))

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index 8ab84a13cf2422..a6c7100ddf42b7 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -370,9 +370,9 @@ enum NodeType : unsigned {
   SWAP_CSR,
 
   // Branchless select operations, matching the semantics of the instructions
-  // defined in zicond.
-  CZERO_EQZ,
-  CZERO_NEZ,
+  // defined in Zicond or XVentanaCondOps.
+  CZERO_EQZ, // vt.maskc for XVentanaCondOps.
+  CZERO_NEZ, // vt.maskcn for XVentanaCondOps.
 
   // FP to 32 bit int conversions for RV64. These are used to keep track of the
   // result being sign extended to 64 bit. These saturate out of range inputs.

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXVentana.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXVentana.td
index 07cf086979f22f..f6b0feaf76284f 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoXVentana.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXVentana.td
@@ -29,34 +29,8 @@ def VT_MASKCN : VTMaskedMove<0b111, "vt.maskcn">,
            Sched<[WriteIALU, ReadIALU, ReadIALU]>;
 
 let Predicates = [IsRV64, HasVendorXVentanaCondOps] in {
-// Directly use MASKC/MASKCN in case of any of the operands being 0.
-def : Pat<(select (i64 GPR:$rc), GPR:$rs1, (i64 0)),
+def : Pat<(XLenVT (riscv_czero_eqz GPR:$rs1, GPR:$rc)),
           (VT_MASKC GPR:$rs1, GPR:$rc)>;
-def : Pat<(select (i64 GPR:$rc), (i64 0), GPR:$rs1),
+def : Pat<(XLenVT (riscv_czero_nez GPR:$rs1, GPR:$rc)),
           (VT_MASKCN GPR:$rs1, GPR:$rc)>;
-
-def : Pat<(select (riscv_setne (i64 GPR:$rc)), GPR:$rs1, (i64 0)),
-          (VT_MASKC GPR:$rs1, GPR:$rc)>;
-def : Pat<(select (riscv_seteq (i64 GPR:$rc)), GPR:$rs1, (i64 0)),
-          (VT_MASKCN GPR:$rs1, GPR:$rc)>;
-def : Pat<(select (riscv_setne (i64 GPR:$rc)), (i64 0), GPR:$rs1),
-          (VT_MASKCN GPR:$rs1, GPR:$rc)>;
-def : Pat<(select (riscv_seteq (i64 GPR:$rc)), (i64 0), GPR:$rs1),
-          (VT_MASKC GPR:$rs1, GPR:$rc)>;
-
-// Conditional AND operation patterns.
-def : Pat<(i64 (select (i64 GPR:$rc), (and GPR:$rs1, GPR:$rs2), GPR:$rs1)),
-          (OR (AND $rs1, $rs2), (VT_MASKCN $rs1, $rc))>;
-def : Pat<(i64 (select (i64 GPR:$rc), GPR:$rs1, (and GPR:$rs1, GPR:$rs2))),
-          (OR (AND $rs1, $rs2), (VT_MASKC $rs1, $rc))>;
-
-// Basic select pattern that selects between 2 registers.
-def : Pat<(i64 (select (i64 GPR:$rc), GPR:$rs1, GPR:$rs2)),
-          (OR (VT_MASKC $rs1, $rc), (VT_MASKCN $rs2, $rc))>;
-
-def : Pat<(i64 (select (riscv_setne (i64 GPR:$rc)), GPR:$rs1, GPR:$rs2)),
-          (OR (VT_MASKC GPR:$rs1, GPR:$rc), (VT_MASKCN GPR:$rs2, GPR:$rc))>;
-def : Pat<(i64 (select (riscv_seteq (i64 GPR:$rc)), GPR:$rs2, GPR:$rs1)),
-          (OR (VT_MASKC GPR:$rs1, GPR:$rc), (VT_MASKCN GPR:$rs2, GPR:$rc))>;
-
 } // Predicates = [IsRV64, HasVendorXVentanaCondOps]

diff  --git a/llvm/test/CodeGen/RISCV/condops.ll b/llvm/test/CodeGen/RISCV/condops.ll
index a36077955b7f27..d16b111c90e531 100644
--- a/llvm/test/CodeGen/RISCV/condops.ll
+++ b/llvm/test/CodeGen/RISCV/condops.ll
@@ -799,9 +799,9 @@ define i64 @and1(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
 ;
 ; RV64XVENTANACONDOPS-LABEL: and1:
 ; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    and a2, a1, a2
 ; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT:    and a1, a1, a2
-; RV64XVENTANACONDOPS-NEXT:    or a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    or a0, a2, a0
 ; RV64XVENTANACONDOPS-NEXT:    ret
 ;
 ; RV64XTHEADCONDMOV-LABEL: and1:
@@ -856,8 +856,8 @@ define i64 @and2(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
 ;
 ; RV64XVENTANACONDOPS-LABEL: and2:
 ; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    and a1, a1, a2
 ; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a2, a0
-; RV64XVENTANACONDOPS-NEXT:    and a1, a2, a1
 ; RV64XVENTANACONDOPS-NEXT:    or a0, a1, a0
 ; RV64XVENTANACONDOPS-NEXT:    ret
 ;
@@ -913,9 +913,9 @@ define i64 @and3(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
 ;
 ; RV64XVENTANACONDOPS-LABEL: and3:
 ; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    and a2, a1, a2
 ; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT:    and a1, a1, a2
-; RV64XVENTANACONDOPS-NEXT:    or a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    or a0, a2, a0
 ; RV64XVENTANACONDOPS-NEXT:    ret
 ;
 ; RV64XTHEADCONDMOV-LABEL: and3:
@@ -970,8 +970,8 @@ define i64 @and4(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
 ;
 ; RV64XVENTANACONDOPS-LABEL: and4:
 ; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    and a1, a1, a2
 ; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a2, a0
-; RV64XVENTANACONDOPS-NEXT:    and a1, a2, a1
 ; RV64XVENTANACONDOPS-NEXT:    or a0, a1, a0
 ; RV64XVENTANACONDOPS-NEXT:    ret
 ;
@@ -1291,8 +1291,9 @@ define i64 @setge(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
 ; RV64XVENTANACONDOPS-LABEL: setge:
 ; RV64XVENTANACONDOPS:       # %bb.0:
 ; RV64XVENTANACONDOPS-NEXT:    slt a0, a0, a1
-; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a1, a2, a0
-; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a3, a0
+; RV64XVENTANACONDOPS-NEXT:    xori a0, a0, 1
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a1, a3, a0
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a2, a0
 ; RV64XVENTANACONDOPS-NEXT:    or a0, a0, a1
 ; RV64XVENTANACONDOPS-NEXT:    ret
 ;
@@ -1438,8 +1439,9 @@ define i64 @setle(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
 ; RV64XVENTANACONDOPS-LABEL: setle:
 ; RV64XVENTANACONDOPS:       # %bb.0:
 ; RV64XVENTANACONDOPS-NEXT:    slt a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a1, a2, a0
-; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a3, a0
+; RV64XVENTANACONDOPS-NEXT:    xori a0, a0, 1
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a1, a3, a0
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a2, a0
 ; RV64XVENTANACONDOPS-NEXT:    or a0, a0, a1
 ; RV64XVENTANACONDOPS-NEXT:    ret
 ;
@@ -1585,8 +1587,9 @@ define i64 @setuge(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
 ; RV64XVENTANACONDOPS-LABEL: setuge:
 ; RV64XVENTANACONDOPS:       # %bb.0:
 ; RV64XVENTANACONDOPS-NEXT:    sltu a0, a0, a1
-; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a1, a2, a0
-; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a3, a0
+; RV64XVENTANACONDOPS-NEXT:    xori a0, a0, 1
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a1, a3, a0
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a2, a0
 ; RV64XVENTANACONDOPS-NEXT:    or a0, a0, a1
 ; RV64XVENTANACONDOPS-NEXT:    ret
 ;
@@ -1732,8 +1735,9 @@ define i64 @setule(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
 ; RV64XVENTANACONDOPS-LABEL: setule:
 ; RV64XVENTANACONDOPS:       # %bb.0:
 ; RV64XVENTANACONDOPS-NEXT:    sltu a0, a1, a0
-; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a1, a2, a0
-; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a3, a0
+; RV64XVENTANACONDOPS-NEXT:    xori a0, a0, 1
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a1, a3, a0
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a2, a0
 ; RV64XVENTANACONDOPS-NEXT:    or a0, a0, a1
 ; RV64XVENTANACONDOPS-NEXT:    ret
 ;
@@ -3332,9 +3336,10 @@ define i32 @setune_32(float %a, float %b, i32 %rs1, i32 %rs2) {
 ; RV64XVENTANACONDOPS-LABEL: setune_32:
 ; RV64XVENTANACONDOPS:       # %bb.0:
 ; RV64XVENTANACONDOPS-NEXT:    feq.s a2, fa0, fa1
-; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a0, a2
-; RV64XVENTANACONDOPS-NEXT:    vt.maskc a1, a1, a2
-; RV64XVENTANACONDOPS-NEXT:    or a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    xori a2, a2, 1
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a1, a1, a2
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a0, a2
+; RV64XVENTANACONDOPS-NEXT:    or a0, a0, a1
 ; RV64XVENTANACONDOPS-NEXT:    ret
 ;
 ; RV64XTHEADCONDMOV-LABEL: setune_32:
@@ -3388,9 +3393,10 @@ define i64 @setune_64(float %a, float %b, i64 %rs1, i64 %rs2) {
 ; RV64XVENTANACONDOPS-LABEL: setune_64:
 ; RV64XVENTANACONDOPS:       # %bb.0:
 ; RV64XVENTANACONDOPS-NEXT:    feq.s a2, fa0, fa1
-; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a0, a2
-; RV64XVENTANACONDOPS-NEXT:    vt.maskc a1, a1, a2
-; RV64XVENTANACONDOPS-NEXT:    or a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    xori a2, a2, 1
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a1, a1, a2
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a0, a2
+; RV64XVENTANACONDOPS-NEXT:    or a0, a0, a1
 ; RV64XVENTANACONDOPS-NEXT:    ret
 ;
 ; RV64XTHEADCONDMOV-LABEL: setune_64:

diff  --git a/llvm/test/CodeGen/RISCV/select.ll b/llvm/test/CodeGen/RISCV/select.ll
index 7c49010a85c659..4336aebf81b5c3 100644
--- a/llvm/test/CodeGen/RISCV/select.ll
+++ b/llvm/test/CodeGen/RISCV/select.ll
@@ -844,8 +844,8 @@ define i32 @select_and_1(i1 zeroext %cond, i32 %a, i32 %b) {
 ;
 ; RV64IMXVTCONDOPS-LABEL: select_and_1:
 ; RV64IMXVTCONDOPS:       # %bb.0: # %entry
+; RV64IMXVTCONDOPS-NEXT:    and a1, a1, a2
 ; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a0, a2, a0
-; RV64IMXVTCONDOPS-NEXT:    and a1, a2, a1
 ; RV64IMXVTCONDOPS-NEXT:    or a0, a1, a0
 ; RV64IMXVTCONDOPS-NEXT:    ret
 ;
@@ -882,9 +882,9 @@ define i32 @select_and_2(i1 zeroext %cond, i32 %a, i32 %b) {
 ;
 ; RV64IMXVTCONDOPS-LABEL: select_and_2:
 ; RV64IMXVTCONDOPS:       # %bb.0: # %entry
+; RV64IMXVTCONDOPS-NEXT:    and a2, a1, a2
 ; RV64IMXVTCONDOPS-NEXT:    vt.maskc a0, a1, a0
-; RV64IMXVTCONDOPS-NEXT:    and a1, a1, a2
-; RV64IMXVTCONDOPS-NEXT:    or a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    or a0, a2, a0
 ; RV64IMXVTCONDOPS-NEXT:    ret
 ;
 ; CHECKZICOND-LABEL: select_and_2:
@@ -920,10 +920,9 @@ define i32 @select_and_3(i1 zeroext %cond, i32 %a) {
 ;
 ; RV64IMXVTCONDOPS-LABEL: select_and_3:
 ; RV64IMXVTCONDOPS:       # %bb.0: # %entry
+; RV64IMXVTCONDOPS-NEXT:    andi a2, a1, 42
 ; RV64IMXVTCONDOPS-NEXT:    vt.maskc a0, a1, a0
-; RV64IMXVTCONDOPS-NEXT:    li a2, 42
-; RV64IMXVTCONDOPS-NEXT:    and a1, a1, a2
-; RV64IMXVTCONDOPS-NEXT:    or a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    or a0, a2, a0
 ; RV64IMXVTCONDOPS-NEXT:    ret
 ;
 ; CHECKZICOND-LABEL: select_and_3:


        


More information about the llvm-commits mailing list