[llvm] a755e80 - [RISCV] Add codegen for the experimental zicond extension

Alex Bradbury via llvm-commits llvm-commits at lists.llvm.org
Thu Mar 30 13:06:08 PDT 2023


Author: Alex Bradbury
Date: 2023-03-30T21:05:22+01:00
New Revision: a755e80ed1d2fe439d7c8c0fa38c399e398aa4f0

URL: https://github.com/llvm/llvm-project/commit/a755e80ed1d2fe439d7c8c0fa38c399e398aa4f0
DIFF: https://github.com/llvm/llvm-project/commit/a755e80ed1d2fe439d7c8c0fa38c399e398aa4f0.diff

LOG: [RISCV] Add codegen for the experimental zicond extension

This directly matches the codegen for xventanacondops with vt.maskcn =>
czero.nez and vt.maskc => czero.eqz. An additional difference is that
zicond is available on RV32 in addition to RV64 (xventanacondops is RV64
only).

Differential Revision: https://reviews.llvm.org/D147147

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVInstrInfoZicond.td
    llvm/test/CodeGen/RISCV/condops.ll
    llvm/test/CodeGen/RISCV/select-binop-identity.ll
    llvm/test/CodeGen/RISCV/select.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 5c6d47dc429a..36ca6836bc48 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -324,7 +324,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
   if (Subtarget.is64Bit())
     setOperationAction(ISD::ABS, MVT::i32, Custom);
 
-  if (!Subtarget.hasVendorXVentanaCondOps() &&
+  if (!Subtarget.hasStdExtZicond() && !Subtarget.hasVendorXVentanaCondOps() &&
       !Subtarget.hasVendorXTHeadCondMov())
     setOperationAction(ISD::SELECT, XLenVT, Custom);
 

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZicond.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZicond.td
index 04547d649c78..1dfde82bac11 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZicond.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZicond.td
@@ -23,3 +23,39 @@ def CZERO_EQZ : ALU_rr<0b0000111, 0b101, "czero.eqz">,
 def CZERO_NEZ : ALU_rr<0b0000111, 0b111, "czero.nez">,
                 Sched<[WriteIALU, ReadIALU, ReadIALU]>;
 } // Predicates = [HasStdExtZicond]
+
+//===----------------------------------------------------------------------===//
+// Pseudo-instructions and codegen patterns
+//===----------------------------------------------------------------------===//
+
+let Predicates = [HasStdExtZicond] in {
+// Directly use CZERO_EQZ/CZERO_NEZ in case of any of the operands being 0.
+def : Pat<(select GPR:$rc, GPR:$rs1, 0),
+          (CZERO_EQZ GPR:$rs1, GPR:$rc)>;
+def : Pat<(select GPR:$rc, 0, GPR:$rs1),
+          (CZERO_NEZ GPR:$rs1, GPR:$rc)>;
+
+def : Pat<(select (riscv_setne GPR:$rc), GPR:$rs1, 0),
+          (CZERO_EQZ GPR:$rs1, GPR:$rc)>;
+def : Pat<(select (riscv_seteq GPR:$rc), GPR:$rs1, 0),
+          (CZERO_NEZ GPR:$rs1, GPR:$rc)>;
+def : Pat<(select (riscv_setne GPR:$rc), 0, GPR:$rs1),
+          (CZERO_NEZ GPR:$rs1, GPR:$rc)>;
+def : Pat<(select (riscv_seteq GPR:$rc), 0, GPR:$rs1),
+          (CZERO_EQZ GPR:$rs1, GPR:$rc)>;
+
+// Conditional AND operation patterns.
+def : Pat<(select GPR:$rc, (and GPR:$rs1, GPR:$rs2), GPR:$rs1),
+          (OR (AND $rs1, $rs2), (CZERO_NEZ $rs1, $rc))>;
+def : Pat<(select GPR:$rc, GPR:$rs1, (and GPR:$rs1, GPR:$rs2)),
+          (OR (AND $rs1, $rs2), (CZERO_EQZ $rs1, $rc))>;
+
+// Basic select pattern that selects between 2 registers.
+def : Pat<(select GPR:$rc, GPR:$rs1, GPR:$rs2),
+          (OR (CZERO_EQZ $rs1, $rc), (CZERO_NEZ $rs2, $rc))>;
+
+def : Pat<(select (riscv_setne GPR:$rc), GPR:$rs1, GPR:$rs2),
+          (OR (CZERO_EQZ GPR:$rs1, GPR:$rc), (CZERO_NEZ GPR:$rs2, GPR:$rc))>;
+def : Pat<(select (riscv_seteq GPR:$rc), GPR:$rs2, GPR:$rs1),
+          (OR (CZERO_EQZ GPR:$rs1, GPR:$rc), (CZERO_NEZ GPR:$rs2, GPR:$rc))>;
+} // Predicates = [HasStdExtZicond]

diff  --git a/llvm/test/CodeGen/RISCV/condops.ll b/llvm/test/CodeGen/RISCV/condops.ll
index 2857786694d8..a88d3f70ddbe 100644
--- a/llvm/test/CodeGen/RISCV/condops.ll
+++ b/llvm/test/CodeGen/RISCV/condops.ll
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+xventanacondops < %s | FileCheck %s -check-prefix=RV64XVENTANACONDOPS
 ; RUN: llc -mtriple=riscv64 -mattr=+xtheadcondmov < %s | FileCheck %s -check-prefix=RV64XTHEADCONDMOV
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-zicond < %s | FileCheck %s -check-prefixes=RV32ZICOND
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-zicond < %s | FileCheck %s -check-prefixes=RV64ZICOND
 
 define i64 @zero1(i64 %rs1, i1 zeroext %rc) {
 ; RV64XVENTANACONDOPS-LABEL: zero1:
@@ -12,6 +14,17 @@ define i64 @zero1(i64 %rs1, i1 zeroext %rc) {
 ; RV64XTHEADCONDMOV:       # %bb.0:
 ; RV64XTHEADCONDMOV-NEXT:    th.mveqz a0, zero, a1
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: zero1:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    czero.eqz a0, a0, a2
+; RV32ZICOND-NEXT:    czero.eqz a1, a1, a2
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: zero1:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.eqz a0, a0, a1
+; RV64ZICOND-NEXT:    ret
   %sel = select i1 %rc, i64 %rs1, i64 0
   ret i64 %sel
 }
@@ -26,6 +39,17 @@ define i64 @zero2(i64 %rs1, i1 zeroext %rc) {
 ; RV64XTHEADCONDMOV:       # %bb.0:
 ; RV64XTHEADCONDMOV-NEXT:    th.mvnez a0, zero, a1
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: zero2:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    czero.nez a0, a0, a2
+; RV32ZICOND-NEXT:    czero.nez a1, a1, a2
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: zero2:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.nez a0, a0, a1
+; RV64ZICOND-NEXT:    ret
   %sel = select i1 %rc, i64 0, i64 %rs1
   ret i64 %sel
 }
@@ -42,6 +66,27 @@ define i64 @add1(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mveqz a2, zero, a0
 ; RV64XTHEADCONDMOV-NEXT:    add a0, a1, a2
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: add1:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    add a4, a2, a4
+; RV32ZICOND-NEXT:    add a3, a1, a3
+; RV32ZICOND-NEXT:    sltu a5, a3, a1
+; RV32ZICOND-NEXT:    add a4, a4, a5
+; RV32ZICOND-NEXT:    czero.nez a1, a1, a0
+; RV32ZICOND-NEXT:    czero.eqz a3, a3, a0
+; RV32ZICOND-NEXT:    or a3, a3, a1
+; RV32ZICOND-NEXT:    czero.eqz a1, a4, a0
+; RV32ZICOND-NEXT:    czero.nez a0, a2, a0
+; RV32ZICOND-NEXT:    or a1, a1, a0
+; RV32ZICOND-NEXT:    mv a0, a3
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: add1:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.eqz a0, a2, a0
+; RV64ZICOND-NEXT:    add a0, a1, a0
+; RV64ZICOND-NEXT:    ret
   %add = add i64 %rs1, %rs2
   %sel = select i1 %rc, i64 %add, i64 %rs1
   ret i64 %sel
@@ -59,6 +104,27 @@ define i64 @add2(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mveqz a1, zero, a0
 ; RV64XTHEADCONDMOV-NEXT:    add a0, a2, a1
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: add2:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    add a2, a2, a4
+; RV32ZICOND-NEXT:    add a5, a1, a3
+; RV32ZICOND-NEXT:    sltu a1, a5, a1
+; RV32ZICOND-NEXT:    add a1, a2, a1
+; RV32ZICOND-NEXT:    czero.nez a2, a3, a0
+; RV32ZICOND-NEXT:    czero.eqz a3, a5, a0
+; RV32ZICOND-NEXT:    or a2, a3, a2
+; RV32ZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV32ZICOND-NEXT:    czero.nez a0, a4, a0
+; RV32ZICOND-NEXT:    or a1, a1, a0
+; RV32ZICOND-NEXT:    mv a0, a2
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: add2:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV64ZICOND-NEXT:    add a0, a2, a0
+; RV64ZICOND-NEXT:    ret
   %add = add i64 %rs1, %rs2
   %sel = select i1 %rc, i64 %add, i64 %rs2
   ret i64 %sel
@@ -76,6 +142,27 @@ define i64 @add3(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mvnez a2, zero, a0
 ; RV64XTHEADCONDMOV-NEXT:    add a0, a1, a2
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: add3:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    add a4, a2, a4
+; RV32ZICOND-NEXT:    add a3, a1, a3
+; RV32ZICOND-NEXT:    sltu a5, a3, a1
+; RV32ZICOND-NEXT:    add a4, a4, a5
+; RV32ZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV32ZICOND-NEXT:    czero.nez a3, a3, a0
+; RV32ZICOND-NEXT:    or a3, a1, a3
+; RV32ZICOND-NEXT:    czero.nez a1, a4, a0
+; RV32ZICOND-NEXT:    czero.eqz a0, a2, a0
+; RV32ZICOND-NEXT:    or a1, a0, a1
+; RV32ZICOND-NEXT:    mv a0, a3
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: add3:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64ZICOND-NEXT:    add a0, a1, a0
+; RV64ZICOND-NEXT:    ret
   %add = add i64 %rs1, %rs2
   %sel = select i1 %rc, i64 %rs1, i64 %add
   ret i64 %sel
@@ -93,6 +180,27 @@ define i64 @add4(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mvnez a1, zero, a0
 ; RV64XTHEADCONDMOV-NEXT:    add a0, a2, a1
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: add4:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    add a2, a2, a4
+; RV32ZICOND-NEXT:    add a5, a1, a3
+; RV32ZICOND-NEXT:    sltu a1, a5, a1
+; RV32ZICOND-NEXT:    add a1, a2, a1
+; RV32ZICOND-NEXT:    czero.eqz a2, a3, a0
+; RV32ZICOND-NEXT:    czero.nez a3, a5, a0
+; RV32ZICOND-NEXT:    or a2, a2, a3
+; RV32ZICOND-NEXT:    czero.nez a1, a1, a0
+; RV32ZICOND-NEXT:    czero.eqz a0, a4, a0
+; RV32ZICOND-NEXT:    or a1, a0, a1
+; RV32ZICOND-NEXT:    mv a0, a2
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: add4:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.nez a0, a1, a0
+; RV64ZICOND-NEXT:    add a0, a2, a0
+; RV64ZICOND-NEXT:    ret
   %add = add i64 %rs1, %rs2
   %sel = select i1 %rc, i64 %rs2, i64 %add
   ret i64 %sel
@@ -110,6 +218,25 @@ define i64 @sub1(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mveqz a2, zero, a0
 ; RV64XTHEADCONDMOV-NEXT:    sub a0, a1, a2
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: sub1:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    sltu a5, a1, a3
+; RV32ZICOND-NEXT:    sub a4, a2, a4
+; RV32ZICOND-NEXT:    sub a4, a4, a5
+; RV32ZICOND-NEXT:    czero.eqz a4, a4, a0
+; RV32ZICOND-NEXT:    czero.nez a2, a2, a0
+; RV32ZICOND-NEXT:    or a2, a4, a2
+; RV32ZICOND-NEXT:    czero.eqz a0, a3, a0
+; RV32ZICOND-NEXT:    sub a0, a1, a0
+; RV32ZICOND-NEXT:    mv a1, a2
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: sub1:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.eqz a0, a2, a0
+; RV64ZICOND-NEXT:    sub a0, a1, a0
+; RV64ZICOND-NEXT:    ret
   %sub = sub i64 %rs1, %rs2
   %sel = select i1 %rc, i64 %sub, i64 %rs1
   ret i64 %sel
@@ -127,6 +254,25 @@ define i64 @sub2(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mvnez a2, zero, a0
 ; RV64XTHEADCONDMOV-NEXT:    sub a0, a1, a2
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: sub2:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    sltu a5, a1, a3
+; RV32ZICOND-NEXT:    sub a4, a2, a4
+; RV32ZICOND-NEXT:    sub a4, a4, a5
+; RV32ZICOND-NEXT:    czero.nez a4, a4, a0
+; RV32ZICOND-NEXT:    czero.eqz a2, a2, a0
+; RV32ZICOND-NEXT:    or a2, a2, a4
+; RV32ZICOND-NEXT:    czero.nez a0, a3, a0
+; RV32ZICOND-NEXT:    sub a0, a1, a0
+; RV32ZICOND-NEXT:    mv a1, a2
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: sub2:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64ZICOND-NEXT:    sub a0, a1, a0
+; RV64ZICOND-NEXT:    ret
   %sub = sub i64 %rs1, %rs2
   %sel = select i1 %rc, i64 %rs1, i64 %sub
   ret i64 %sel
@@ -144,6 +290,21 @@ define i64 @or1(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mveqz a2, zero, a0
 ; RV64XTHEADCONDMOV-NEXT:    or a0, a1, a2
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: or1:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    czero.eqz a3, a3, a0
+; RV32ZICOND-NEXT:    or a3, a1, a3
+; RV32ZICOND-NEXT:    czero.eqz a1, a4, a0
+; RV32ZICOND-NEXT:    or a1, a2, a1
+; RV32ZICOND-NEXT:    mv a0, a3
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: or1:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.eqz a0, a2, a0
+; RV64ZICOND-NEXT:    or a0, a1, a0
+; RV64ZICOND-NEXT:    ret
   %or = or i64 %rs1, %rs2
   %sel = select i1 %rc, i64 %or, i64 %rs1
   ret i64 %sel
@@ -161,6 +322,21 @@ define i64 @or2(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mveqz a1, zero, a0
 ; RV64XTHEADCONDMOV-NEXT:    or a0, a2, a1
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: or2:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV32ZICOND-NEXT:    or a3, a3, a1
+; RV32ZICOND-NEXT:    czero.eqz a1, a2, a0
+; RV32ZICOND-NEXT:    or a1, a4, a1
+; RV32ZICOND-NEXT:    mv a0, a3
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: or2:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV64ZICOND-NEXT:    or a0, a2, a0
+; RV64ZICOND-NEXT:    ret
   %or = or i64 %rs1, %rs2
   %sel = select i1 %rc, i64 %or, i64 %rs2
   ret i64 %sel
@@ -178,6 +354,21 @@ define i64 @or3(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mvnez a2, zero, a0
 ; RV64XTHEADCONDMOV-NEXT:    or a0, a1, a2
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: or3:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    czero.nez a3, a3, a0
+; RV32ZICOND-NEXT:    or a3, a1, a3
+; RV32ZICOND-NEXT:    czero.nez a1, a4, a0
+; RV32ZICOND-NEXT:    or a1, a2, a1
+; RV32ZICOND-NEXT:    mv a0, a3
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: or3:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64ZICOND-NEXT:    or a0, a1, a0
+; RV64ZICOND-NEXT:    ret
   %or = or i64 %rs1, %rs2
   %sel = select i1 %rc, i64 %rs1, i64 %or
   ret i64 %sel
@@ -195,6 +386,21 @@ define i64 @or4(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mvnez a1, zero, a0
 ; RV64XTHEADCONDMOV-NEXT:    or a0, a2, a1
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: or4:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    czero.nez a1, a1, a0
+; RV32ZICOND-NEXT:    or a3, a3, a1
+; RV32ZICOND-NEXT:    czero.nez a1, a2, a0
+; RV32ZICOND-NEXT:    or a1, a4, a1
+; RV32ZICOND-NEXT:    mv a0, a3
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: or4:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.nez a0, a1, a0
+; RV64ZICOND-NEXT:    or a0, a2, a0
+; RV64ZICOND-NEXT:    ret
   %or = or i64 %rs1, %rs2
   %sel = select i1 %rc, i64 %rs2, i64 %or
   ret i64 %sel
@@ -212,6 +418,21 @@ define i64 @xor1(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mveqz a2, zero, a0
 ; RV64XTHEADCONDMOV-NEXT:    xor a0, a1, a2
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: xor1:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    czero.eqz a3, a3, a0
+; RV32ZICOND-NEXT:    xor a3, a1, a3
+; RV32ZICOND-NEXT:    czero.eqz a1, a4, a0
+; RV32ZICOND-NEXT:    xor a1, a2, a1
+; RV32ZICOND-NEXT:    mv a0, a3
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: xor1:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.eqz a0, a2, a0
+; RV64ZICOND-NEXT:    xor a0, a1, a0
+; RV64ZICOND-NEXT:    ret
   %xor = xor i64 %rs1, %rs2
   %sel = select i1 %rc, i64 %xor, i64 %rs1
   ret i64 %sel
@@ -229,6 +450,21 @@ define i64 @xor2(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mveqz a1, zero, a0
 ; RV64XTHEADCONDMOV-NEXT:    xor a0, a2, a1
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: xor2:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV32ZICOND-NEXT:    xor a3, a3, a1
+; RV32ZICOND-NEXT:    czero.eqz a1, a2, a0
+; RV32ZICOND-NEXT:    xor a1, a4, a1
+; RV32ZICOND-NEXT:    mv a0, a3
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: xor2:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV64ZICOND-NEXT:    xor a0, a2, a0
+; RV64ZICOND-NEXT:    ret
   %xor = xor i64 %rs1, %rs2
   %sel = select i1 %rc, i64 %xor, i64 %rs2
   ret i64 %sel
@@ -246,6 +482,21 @@ define i64 @xor3(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mvnez a2, zero, a0
 ; RV64XTHEADCONDMOV-NEXT:    xor a0, a1, a2
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: xor3:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    czero.nez a3, a3, a0
+; RV32ZICOND-NEXT:    xor a3, a1, a3
+; RV32ZICOND-NEXT:    czero.nez a1, a4, a0
+; RV32ZICOND-NEXT:    xor a1, a2, a1
+; RV32ZICOND-NEXT:    mv a0, a3
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: xor3:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64ZICOND-NEXT:    xor a0, a1, a0
+; RV64ZICOND-NEXT:    ret
   %xor = xor i64 %rs1, %rs2
   %sel = select i1 %rc, i64 %rs1, i64 %xor
   ret i64 %sel
@@ -263,6 +514,21 @@ define i64 @xor4(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mvnez a1, zero, a0
 ; RV64XTHEADCONDMOV-NEXT:    xor a0, a2, a1
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: xor4:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    czero.nez a1, a1, a0
+; RV32ZICOND-NEXT:    xor a3, a3, a1
+; RV32ZICOND-NEXT:    czero.nez a1, a2, a0
+; RV32ZICOND-NEXT:    xor a1, a4, a1
+; RV32ZICOND-NEXT:    mv a0, a3
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: xor4:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.nez a0, a1, a0
+; RV64ZICOND-NEXT:    xor a0, a2, a0
+; RV64ZICOND-NEXT:    ret
   %xor = xor i64 %rs1, %rs2
   %sel = select i1 %rc, i64 %rs2, i64 %xor
   ret i64 %sel
@@ -282,6 +548,24 @@ define i64 @and1(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mveqz a2, a1, a0
 ; RV64XTHEADCONDMOV-NEXT:    mv a0, a2
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: and1:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    czero.nez a5, a1, a0
+; RV32ZICOND-NEXT:    and a1, a1, a3
+; RV32ZICOND-NEXT:    or a3, a1, a5
+; RV32ZICOND-NEXT:    czero.nez a0, a2, a0
+; RV32ZICOND-NEXT:    and a1, a2, a4
+; RV32ZICOND-NEXT:    or a1, a1, a0
+; RV32ZICOND-NEXT:    mv a0, a3
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: and1:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.nez a0, a1, a0
+; RV64ZICOND-NEXT:    and a1, a1, a2
+; RV64ZICOND-NEXT:    or a0, a1, a0
+; RV64ZICOND-NEXT:    ret
   %and = and i64 %rs1, %rs2
   %sel = select i1 %rc, i64 %and, i64 %rs1
   ret i64 %sel
@@ -301,6 +585,24 @@ define i64 @and2(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mveqz a1, a2, a0
 ; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: and2:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    czero.nez a5, a3, a0
+; RV32ZICOND-NEXT:    and a1, a3, a1
+; RV32ZICOND-NEXT:    or a3, a1, a5
+; RV32ZICOND-NEXT:    czero.nez a0, a4, a0
+; RV32ZICOND-NEXT:    and a1, a4, a2
+; RV32ZICOND-NEXT:    or a1, a1, a0
+; RV32ZICOND-NEXT:    mv a0, a3
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: and2:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64ZICOND-NEXT:    and a1, a2, a1
+; RV64ZICOND-NEXT:    or a0, a1, a0
+; RV64ZICOND-NEXT:    ret
   %and = and i64 %rs1, %rs2
   %sel = select i1 %rc, i64 %and, i64 %rs2
   ret i64 %sel
@@ -320,6 +622,24 @@ define i64 @and3(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mvnez a2, a1, a0
 ; RV64XTHEADCONDMOV-NEXT:    mv a0, a2
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: and3:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    czero.eqz a5, a1, a0
+; RV32ZICOND-NEXT:    and a1, a1, a3
+; RV32ZICOND-NEXT:    or a3, a1, a5
+; RV32ZICOND-NEXT:    czero.eqz a0, a2, a0
+; RV32ZICOND-NEXT:    and a1, a2, a4
+; RV32ZICOND-NEXT:    or a1, a1, a0
+; RV32ZICOND-NEXT:    mv a0, a3
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: and3:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV64ZICOND-NEXT:    and a1, a1, a2
+; RV64ZICOND-NEXT:    or a0, a1, a0
+; RV64ZICOND-NEXT:    ret
   %and = and i64 %rs1, %rs2
   %sel = select i1 %rc, i64 %rs1, i64 %and
   ret i64 %sel
@@ -339,6 +659,24 @@ define i64 @and4(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mvnez a1, a2, a0
 ; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: and4:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    czero.eqz a5, a3, a0
+; RV32ZICOND-NEXT:    and a1, a3, a1
+; RV32ZICOND-NEXT:    or a3, a1, a5
+; RV32ZICOND-NEXT:    czero.eqz a0, a4, a0
+; RV32ZICOND-NEXT:    and a1, a4, a2
+; RV32ZICOND-NEXT:    or a1, a1, a0
+; RV32ZICOND-NEXT:    mv a0, a3
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: and4:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.eqz a0, a2, a0
+; RV64ZICOND-NEXT:    and a1, a2, a1
+; RV64ZICOND-NEXT:    or a0, a1, a0
+; RV64ZICOND-NEXT:    ret
   %and = and i64 %rs1, %rs2
   %sel = select i1 %rc, i64 %rs2, i64 %and
   ret i64 %sel
@@ -357,6 +695,24 @@ define i64 @basic(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mveqz a1, a2, a0
 ; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: basic:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    czero.nez a3, a3, a0
+; RV32ZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV32ZICOND-NEXT:    or a3, a1, a3
+; RV32ZICOND-NEXT:    czero.nez a1, a4, a0
+; RV32ZICOND-NEXT:    czero.eqz a0, a2, a0
+; RV32ZICOND-NEXT:    or a1, a0, a1
+; RV32ZICOND-NEXT:    mv a0, a3
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: basic:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.nez a2, a2, a0
+; RV64ZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV64ZICOND-NEXT:    or a0, a0, a2
+; RV64ZICOND-NEXT:    ret
   %sel = select i1 %rc, i64 %rs1, i64 %rs2
   ret i64 %sel
 }
@@ -376,6 +732,27 @@ define i64 @seteq(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mvnez a2, a3, a0
 ; RV64XTHEADCONDMOV-NEXT:    mv a0, a2
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: seteq:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    xor a1, a1, a3
+; RV32ZICOND-NEXT:    xor a0, a0, a2
+; RV32ZICOND-NEXT:    or a1, a0, a1
+; RV32ZICOND-NEXT:    czero.nez a0, a4, a1
+; RV32ZICOND-NEXT:    czero.eqz a2, a6, a1
+; RV32ZICOND-NEXT:    or a0, a2, a0
+; RV32ZICOND-NEXT:    czero.nez a2, a5, a1
+; RV32ZICOND-NEXT:    czero.eqz a1, a7, a1
+; RV32ZICOND-NEXT:    or a1, a1, a2
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: seteq:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    xor a0, a0, a1
+; RV64ZICOND-NEXT:    czero.nez a1, a2, a0
+; RV64ZICOND-NEXT:    czero.eqz a0, a3, a0
+; RV64ZICOND-NEXT:    or a0, a0, a1
+; RV64ZICOND-NEXT:    ret
   %rc = icmp eq i64 %a, %b
   %sel = select i1 %rc, i64 %rs1, i64 %rs2
   ret i64 %sel
@@ -396,6 +773,27 @@ define i64 @setne(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mveqz a2, a3, a0
 ; RV64XTHEADCONDMOV-NEXT:    mv a0, a2
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: setne:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    xor a1, a1, a3
+; RV32ZICOND-NEXT:    xor a0, a0, a2
+; RV32ZICOND-NEXT:    or a1, a0, a1
+; RV32ZICOND-NEXT:    czero.nez a0, a6, a1
+; RV32ZICOND-NEXT:    czero.eqz a2, a4, a1
+; RV32ZICOND-NEXT:    or a0, a2, a0
+; RV32ZICOND-NEXT:    czero.nez a2, a7, a1
+; RV32ZICOND-NEXT:    czero.eqz a1, a5, a1
+; RV32ZICOND-NEXT:    or a1, a1, a2
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: setne:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    xor a0, a0, a1
+; RV64ZICOND-NEXT:    czero.nez a1, a3, a0
+; RV64ZICOND-NEXT:    czero.eqz a0, a2, a0
+; RV64ZICOND-NEXT:    or a0, a0, a1
+; RV64ZICOND-NEXT:    ret
   %rc = icmp ne i64 %a, %b
   %sel = select i1 %rc, i64 %rs1, i64 %rs2
   ret i64 %sel
@@ -416,6 +814,30 @@ define i64 @setgt(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mveqz a2, a3, a0
 ; RV64XTHEADCONDMOV-NEXT:    mv a0, a2
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: setgt:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    xor t0, a1, a3
+; RV32ZICOND-NEXT:    sltu a0, a2, a0
+; RV32ZICOND-NEXT:    czero.nez a0, a0, t0
+; RV32ZICOND-NEXT:    slt a1, a3, a1
+; RV32ZICOND-NEXT:    czero.eqz a1, a1, t0
+; RV32ZICOND-NEXT:    or a1, a1, a0
+; RV32ZICOND-NEXT:    czero.nez a0, a6, a1
+; RV32ZICOND-NEXT:    czero.eqz a2, a4, a1
+; RV32ZICOND-NEXT:    or a0, a2, a0
+; RV32ZICOND-NEXT:    czero.nez a2, a7, a1
+; RV32ZICOND-NEXT:    czero.eqz a1, a5, a1
+; RV32ZICOND-NEXT:    or a1, a1, a2
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: setgt:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    slt a0, a1, a0
+; RV64ZICOND-NEXT:    czero.nez a1, a3, a0
+; RV64ZICOND-NEXT:    czero.eqz a0, a2, a0
+; RV64ZICOND-NEXT:    or a0, a0, a1
+; RV64ZICOND-NEXT:    ret
   %rc = icmp sgt i64 %a, %b
   %sel = select i1 %rc, i64 %rs1, i64 %rs2
   ret i64 %sel
@@ -436,6 +858,30 @@ define i64 @setge(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mvnez a2, a3, a0
 ; RV64XTHEADCONDMOV-NEXT:    mv a0, a2
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: setge:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    xor t0, a1, a3
+; RV32ZICOND-NEXT:    sltu a0, a0, a2
+; RV32ZICOND-NEXT:    czero.nez a0, a0, t0
+; RV32ZICOND-NEXT:    slt a1, a1, a3
+; RV32ZICOND-NEXT:    czero.eqz a1, a1, t0
+; RV32ZICOND-NEXT:    or a1, a1, a0
+; RV32ZICOND-NEXT:    czero.nez a0, a4, a1
+; RV32ZICOND-NEXT:    czero.eqz a2, a6, a1
+; RV32ZICOND-NEXT:    or a0, a2, a0
+; RV32ZICOND-NEXT:    czero.nez a2, a5, a1
+; RV32ZICOND-NEXT:    czero.eqz a1, a7, a1
+; RV32ZICOND-NEXT:    or a1, a1, a2
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: setge:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    slt a0, a0, a1
+; RV64ZICOND-NEXT:    czero.nez a1, a2, a0
+; RV64ZICOND-NEXT:    czero.eqz a0, a3, a0
+; RV64ZICOND-NEXT:    or a0, a0, a1
+; RV64ZICOND-NEXT:    ret
   %rc = icmp sge i64 %a, %b
   %sel = select i1 %rc, i64 %rs1, i64 %rs2
   ret i64 %sel
@@ -456,6 +902,30 @@ define i64 @setlt(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mveqz a2, a3, a0
 ; RV64XTHEADCONDMOV-NEXT:    mv a0, a2
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: setlt:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    xor t0, a1, a3
+; RV32ZICOND-NEXT:    sltu a0, a0, a2
+; RV32ZICOND-NEXT:    czero.nez a0, a0, t0
+; RV32ZICOND-NEXT:    slt a1, a1, a3
+; RV32ZICOND-NEXT:    czero.eqz a1, a1, t0
+; RV32ZICOND-NEXT:    or a1, a1, a0
+; RV32ZICOND-NEXT:    czero.nez a0, a6, a1
+; RV32ZICOND-NEXT:    czero.eqz a2, a4, a1
+; RV32ZICOND-NEXT:    or a0, a2, a0
+; RV32ZICOND-NEXT:    czero.nez a2, a7, a1
+; RV32ZICOND-NEXT:    czero.eqz a1, a5, a1
+; RV32ZICOND-NEXT:    or a1, a1, a2
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: setlt:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    slt a0, a0, a1
+; RV64ZICOND-NEXT:    czero.nez a1, a3, a0
+; RV64ZICOND-NEXT:    czero.eqz a0, a2, a0
+; RV64ZICOND-NEXT:    or a0, a0, a1
+; RV64ZICOND-NEXT:    ret
   %rc = icmp slt i64 %a, %b
   %sel = select i1 %rc, i64 %rs1, i64 %rs2
   ret i64 %sel
@@ -476,6 +946,30 @@ define i64 @setle(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mvnez a2, a3, a0
 ; RV64XTHEADCONDMOV-NEXT:    mv a0, a2
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: setle:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    xor t0, a1, a3
+; RV32ZICOND-NEXT:    sltu a0, a2, a0
+; RV32ZICOND-NEXT:    czero.nez a0, a0, t0
+; RV32ZICOND-NEXT:    slt a1, a3, a1
+; RV32ZICOND-NEXT:    czero.eqz a1, a1, t0
+; RV32ZICOND-NEXT:    or a1, a1, a0
+; RV32ZICOND-NEXT:    czero.nez a0, a4, a1
+; RV32ZICOND-NEXT:    czero.eqz a2, a6, a1
+; RV32ZICOND-NEXT:    or a0, a2, a0
+; RV32ZICOND-NEXT:    czero.nez a2, a5, a1
+; RV32ZICOND-NEXT:    czero.eqz a1, a7, a1
+; RV32ZICOND-NEXT:    or a1, a1, a2
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: setle:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    slt a0, a1, a0
+; RV64ZICOND-NEXT:    czero.nez a1, a2, a0
+; RV64ZICOND-NEXT:    czero.eqz a0, a3, a0
+; RV64ZICOND-NEXT:    or a0, a0, a1
+; RV64ZICOND-NEXT:    ret
   %rc = icmp sle i64 %a, %b
   %sel = select i1 %rc, i64 %rs1, i64 %rs2
   ret i64 %sel
@@ -496,6 +990,30 @@ define i64 @setugt(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mveqz a2, a3, a0
 ; RV64XTHEADCONDMOV-NEXT:    mv a0, a2
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: setugt:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    xor t0, a1, a3
+; RV32ZICOND-NEXT:    sltu a0, a2, a0
+; RV32ZICOND-NEXT:    czero.nez a0, a0, t0
+; RV32ZICOND-NEXT:    sltu a1, a3, a1
+; RV32ZICOND-NEXT:    czero.eqz a1, a1, t0
+; RV32ZICOND-NEXT:    or a1, a1, a0
+; RV32ZICOND-NEXT:    czero.nez a0, a6, a1
+; RV32ZICOND-NEXT:    czero.eqz a2, a4, a1
+; RV32ZICOND-NEXT:    or a0, a2, a0
+; RV32ZICOND-NEXT:    czero.nez a2, a7, a1
+; RV32ZICOND-NEXT:    czero.eqz a1, a5, a1
+; RV32ZICOND-NEXT:    or a1, a1, a2
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: setugt:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    sltu a0, a1, a0
+; RV64ZICOND-NEXT:    czero.nez a1, a3, a0
+; RV64ZICOND-NEXT:    czero.eqz a0, a2, a0
+; RV64ZICOND-NEXT:    or a0, a0, a1
+; RV64ZICOND-NEXT:    ret
   %rc = icmp ugt i64 %a, %b
   %sel = select i1 %rc, i64 %rs1, i64 %rs2
   ret i64 %sel
@@ -516,6 +1034,30 @@ define i64 @setuge(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mvnez a2, a3, a0
 ; RV64XTHEADCONDMOV-NEXT:    mv a0, a2
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: setuge:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    xor t0, a1, a3
+; RV32ZICOND-NEXT:    sltu a0, a0, a2
+; RV32ZICOND-NEXT:    czero.nez a0, a0, t0
+; RV32ZICOND-NEXT:    sltu a1, a1, a3
+; RV32ZICOND-NEXT:    czero.eqz a1, a1, t0
+; RV32ZICOND-NEXT:    or a1, a1, a0
+; RV32ZICOND-NEXT:    czero.nez a0, a4, a1
+; RV32ZICOND-NEXT:    czero.eqz a2, a6, a1
+; RV32ZICOND-NEXT:    or a0, a2, a0
+; RV32ZICOND-NEXT:    czero.nez a2, a5, a1
+; RV32ZICOND-NEXT:    czero.eqz a1, a7, a1
+; RV32ZICOND-NEXT:    or a1, a1, a2
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: setuge:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    sltu a0, a0, a1
+; RV64ZICOND-NEXT:    czero.nez a1, a2, a0
+; RV64ZICOND-NEXT:    czero.eqz a0, a3, a0
+; RV64ZICOND-NEXT:    or a0, a0, a1
+; RV64ZICOND-NEXT:    ret
   %rc = icmp uge i64 %a, %b
   %sel = select i1 %rc, i64 %rs1, i64 %rs2
   ret i64 %sel
@@ -536,6 +1078,30 @@ define i64 @setult(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mveqz a2, a3, a0
 ; RV64XTHEADCONDMOV-NEXT:    mv a0, a2
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: setult:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    xor t0, a1, a3
+; RV32ZICOND-NEXT:    sltu a0, a0, a2
+; RV32ZICOND-NEXT:    czero.nez a0, a0, t0
+; RV32ZICOND-NEXT:    sltu a1, a1, a3
+; RV32ZICOND-NEXT:    czero.eqz a1, a1, t0
+; RV32ZICOND-NEXT:    or a1, a1, a0
+; RV32ZICOND-NEXT:    czero.nez a0, a6, a1
+; RV32ZICOND-NEXT:    czero.eqz a2, a4, a1
+; RV32ZICOND-NEXT:    or a0, a2, a0
+; RV32ZICOND-NEXT:    czero.nez a2, a7, a1
+; RV32ZICOND-NEXT:    czero.eqz a1, a5, a1
+; RV32ZICOND-NEXT:    or a1, a1, a2
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: setult:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    sltu a0, a0, a1
+; RV64ZICOND-NEXT:    czero.nez a1, a3, a0
+; RV64ZICOND-NEXT:    czero.eqz a0, a2, a0
+; RV64ZICOND-NEXT:    or a0, a0, a1
+; RV64ZICOND-NEXT:    ret
   %rc = icmp ult i64 %a, %b
   %sel = select i1 %rc, i64 %rs1, i64 %rs2
   ret i64 %sel
@@ -556,6 +1122,30 @@ define i64 @setule(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mvnez a2, a3, a0
 ; RV64XTHEADCONDMOV-NEXT:    mv a0, a2
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: setule:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    xor t0, a1, a3
+; RV32ZICOND-NEXT:    sltu a0, a2, a0
+; RV32ZICOND-NEXT:    czero.nez a0, a0, t0
+; RV32ZICOND-NEXT:    sltu a1, a3, a1
+; RV32ZICOND-NEXT:    czero.eqz a1, a1, t0
+; RV32ZICOND-NEXT:    or a1, a1, a0
+; RV32ZICOND-NEXT:    czero.nez a0, a4, a1
+; RV32ZICOND-NEXT:    czero.eqz a2, a6, a1
+; RV32ZICOND-NEXT:    or a0, a2, a0
+; RV32ZICOND-NEXT:    czero.nez a2, a5, a1
+; RV32ZICOND-NEXT:    czero.eqz a1, a7, a1
+; RV32ZICOND-NEXT:    or a1, a1, a2
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: setule:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    sltu a0, a1, a0
+; RV64ZICOND-NEXT:    czero.nez a1, a2, a0
+; RV64ZICOND-NEXT:    czero.eqz a0, a3, a0
+; RV64ZICOND-NEXT:    or a0, a0, a1
+; RV64ZICOND-NEXT:    ret
   %rc = icmp ule i64 %a, %b
   %sel = select i1 %rc, i64 %rs1, i64 %rs2
   ret i64 %sel
@@ -574,6 +1164,24 @@ define i64 @seteq_zero(i64 %a, i64 %rs1, i64 %rs2) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mvnez a1, a2, a0
 ; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: seteq_zero:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    or a1, a0, a1
+; RV32ZICOND-NEXT:    czero.nez a0, a2, a1
+; RV32ZICOND-NEXT:    czero.eqz a2, a4, a1
+; RV32ZICOND-NEXT:    or a0, a2, a0
+; RV32ZICOND-NEXT:    czero.nez a2, a3, a1
+; RV32ZICOND-NEXT:    czero.eqz a1, a5, a1
+; RV32ZICOND-NEXT:    or a1, a1, a2
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: seteq_zero:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.nez a1, a1, a0
+; RV64ZICOND-NEXT:    czero.eqz a0, a2, a0
+; RV64ZICOND-NEXT:    or a0, a0, a1
+; RV64ZICOND-NEXT:    ret
   %rc = icmp eq i64 %a, 0
   %sel = select i1 %rc, i64 %rs1, i64 %rs2
   ret i64 %sel
@@ -592,6 +1200,24 @@ define i64 @setne_zero(i64 %a, i64 %rs1, i64 %rs2) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mveqz a1, a2, a0
 ; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: setne_zero:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    or a1, a0, a1
+; RV32ZICOND-NEXT:    czero.nez a0, a4, a1
+; RV32ZICOND-NEXT:    czero.eqz a2, a2, a1
+; RV32ZICOND-NEXT:    or a0, a2, a0
+; RV32ZICOND-NEXT:    czero.nez a2, a5, a1
+; RV32ZICOND-NEXT:    czero.eqz a1, a3, a1
+; RV32ZICOND-NEXT:    or a1, a1, a2
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: setne_zero:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.nez a2, a2, a0
+; RV64ZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV64ZICOND-NEXT:    or a0, a0, a2
+; RV64ZICOND-NEXT:    ret
   %rc = icmp ne i64 %a, 0
   %sel = select i1 %rc, i64 %rs1, i64 %rs2
   ret i64 %sel
@@ -612,6 +1238,26 @@ define i64 @seteq_constant(i64 %a, i64 %rs1, i64 %rs2) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mvnez a1, a2, a0
 ; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: seteq_constant:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    xori a0, a0, 123
+; RV32ZICOND-NEXT:    or a1, a0, a1
+; RV32ZICOND-NEXT:    czero.nez a0, a2, a1
+; RV32ZICOND-NEXT:    czero.eqz a2, a4, a1
+; RV32ZICOND-NEXT:    or a0, a2, a0
+; RV32ZICOND-NEXT:    czero.nez a2, a3, a1
+; RV32ZICOND-NEXT:    czero.eqz a1, a5, a1
+; RV32ZICOND-NEXT:    or a1, a1, a2
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: seteq_constant:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    addi a0, a0, -123
+; RV64ZICOND-NEXT:    czero.nez a1, a1, a0
+; RV64ZICOND-NEXT:    czero.eqz a0, a2, a0
+; RV64ZICOND-NEXT:    or a0, a0, a1
+; RV64ZICOND-NEXT:    ret
   %rc = icmp eq i64 %a, 123
   %sel = select i1 %rc, i64 %rs1, i64 %rs2
   ret i64 %sel
@@ -632,6 +1278,26 @@ define i64 @setne_constant(i64 %a, i64 %rs1, i64 %rs2) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mveqz a1, a2, a0
 ; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: setne_constant:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    xori a0, a0, 456
+; RV32ZICOND-NEXT:    or a1, a0, a1
+; RV32ZICOND-NEXT:    czero.nez a0, a4, a1
+; RV32ZICOND-NEXT:    czero.eqz a2, a2, a1
+; RV32ZICOND-NEXT:    or a0, a2, a0
+; RV32ZICOND-NEXT:    czero.nez a2, a5, a1
+; RV32ZICOND-NEXT:    czero.eqz a1, a3, a1
+; RV32ZICOND-NEXT:    or a1, a1, a2
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: setne_constant:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    addi a0, a0, -456
+; RV64ZICOND-NEXT:    czero.nez a2, a2, a0
+; RV64ZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV64ZICOND-NEXT:    or a0, a0, a2
+; RV64ZICOND-NEXT:    ret
   %rc = icmp ne i64 %a, 456
   %sel = select i1 %rc, i64 %rs1, i64 %rs2
   ret i64 %sel
@@ -652,6 +1318,28 @@ define i64 @seteq_2048(i64 %a, i64 %rs1, i64 %rs2) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mvnez a1, a2, a0
 ; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: seteq_2048:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    li a6, 1
+; RV32ZICOND-NEXT:    slli a6, a6, 11
+; RV32ZICOND-NEXT:    xor a0, a0, a6
+; RV32ZICOND-NEXT:    or a1, a0, a1
+; RV32ZICOND-NEXT:    czero.nez a0, a2, a1
+; RV32ZICOND-NEXT:    czero.eqz a2, a4, a1
+; RV32ZICOND-NEXT:    or a0, a2, a0
+; RV32ZICOND-NEXT:    czero.nez a2, a3, a1
+; RV32ZICOND-NEXT:    czero.eqz a1, a5, a1
+; RV32ZICOND-NEXT:    or a1, a1, a2
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: seteq_2048:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    addi a0, a0, -2048
+; RV64ZICOND-NEXT:    czero.nez a1, a1, a0
+; RV64ZICOND-NEXT:    czero.eqz a0, a2, a0
+; RV64ZICOND-NEXT:    or a0, a0, a1
+; RV64ZICOND-NEXT:    ret
   %rc = icmp eq i64 %a, 2048
   %sel = select i1 %rc, i64 %rs1, i64 %rs2
   ret i64 %sel
@@ -672,6 +1360,27 @@ define i64 @seteq_neg2048(i64 %a, i64 %rs1, i64 %rs2) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mvnez a1, a2, a0
 ; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: seteq_neg2048:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    not a1, a1
+; RV32ZICOND-NEXT:    xori a0, a0, -2048
+; RV32ZICOND-NEXT:    or a1, a0, a1
+; RV32ZICOND-NEXT:    czero.nez a0, a2, a1
+; RV32ZICOND-NEXT:    czero.eqz a2, a4, a1
+; RV32ZICOND-NEXT:    or a0, a2, a0
+; RV32ZICOND-NEXT:    czero.nez a2, a3, a1
+; RV32ZICOND-NEXT:    czero.eqz a1, a5, a1
+; RV32ZICOND-NEXT:    or a1, a1, a2
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: seteq_neg2048:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    xori a0, a0, -2048
+; RV64ZICOND-NEXT:    czero.nez a1, a1, a0
+; RV64ZICOND-NEXT:    czero.eqz a0, a2, a0
+; RV64ZICOND-NEXT:    or a0, a0, a1
+; RV64ZICOND-NEXT:    ret
   %rc = icmp eq i64 %a, -2048
   %sel = select i1 %rc, i64 %rs1, i64 %rs2
   ret i64 %sel
@@ -692,6 +1401,27 @@ define i64 @setne_neg2048(i64 %a, i64 %rs1, i64 %rs2) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mveqz a1, a2, a0
 ; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: setne_neg2048:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    not a1, a1
+; RV32ZICOND-NEXT:    xori a0, a0, -2048
+; RV32ZICOND-NEXT:    or a1, a0, a1
+; RV32ZICOND-NEXT:    czero.nez a0, a4, a1
+; RV32ZICOND-NEXT:    czero.eqz a2, a2, a1
+; RV32ZICOND-NEXT:    or a0, a2, a0
+; RV32ZICOND-NEXT:    czero.nez a2, a5, a1
+; RV32ZICOND-NEXT:    czero.eqz a1, a3, a1
+; RV32ZICOND-NEXT:    or a1, a1, a2
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: setne_neg2048:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    xori a0, a0, -2048
+; RV64ZICOND-NEXT:    czero.nez a2, a2, a0
+; RV64ZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV64ZICOND-NEXT:    or a0, a0, a2
+; RV64ZICOND-NEXT:    ret
   %rc = icmp ne i64 %a, -2048
   %sel = select i1 %rc, i64 %rs1, i64 %rs2
   ret i64 %sel
@@ -710,6 +1440,21 @@ define i64 @zero1_seteq(i64 %a, i64 %b, i64 %rs1) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mvnez a2, zero, a0
 ; RV64XTHEADCONDMOV-NEXT:    mv a0, a2
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: zero1_seteq:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    xor a1, a1, a3
+; RV32ZICOND-NEXT:    xor a0, a0, a2
+; RV32ZICOND-NEXT:    or a1, a0, a1
+; RV32ZICOND-NEXT:    czero.nez a0, a4, a1
+; RV32ZICOND-NEXT:    czero.nez a1, a5, a1
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: zero1_seteq:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    xor a0, a0, a1
+; RV64ZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64ZICOND-NEXT:    ret
   %rc = icmp eq i64 %a, %b
   %sel = select i1 %rc, i64 %rs1, i64 0
   ret i64 %sel
@@ -728,6 +1473,21 @@ define i64 @zero2_seteq(i64 %a, i64 %b, i64 %rs1) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mveqz a2, zero, a0
 ; RV64XTHEADCONDMOV-NEXT:    mv a0, a2
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: zero2_seteq:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    xor a1, a1, a3
+; RV32ZICOND-NEXT:    xor a0, a0, a2
+; RV32ZICOND-NEXT:    or a1, a0, a1
+; RV32ZICOND-NEXT:    czero.eqz a0, a4, a1
+; RV32ZICOND-NEXT:    czero.eqz a1, a5, a1
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: zero2_seteq:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    xor a0, a0, a1
+; RV64ZICOND-NEXT:    czero.eqz a0, a2, a0
+; RV64ZICOND-NEXT:    ret
   %rc = icmp eq i64 %a, %b
   %sel = select i1 %rc, i64 0, i64 %rs1
   ret i64 %sel
@@ -746,6 +1506,21 @@ define i64 @zero1_setne(i64 %a, i64 %b, i64 %rs1) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mveqz a2, zero, a0
 ; RV64XTHEADCONDMOV-NEXT:    mv a0, a2
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: zero1_setne:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    xor a1, a1, a3
+; RV32ZICOND-NEXT:    xor a0, a0, a2
+; RV32ZICOND-NEXT:    or a1, a0, a1
+; RV32ZICOND-NEXT:    czero.eqz a0, a4, a1
+; RV32ZICOND-NEXT:    czero.eqz a1, a5, a1
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: zero1_setne:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    xor a0, a0, a1
+; RV64ZICOND-NEXT:    czero.eqz a0, a2, a0
+; RV64ZICOND-NEXT:    ret
   %rc = icmp ne i64 %a, %b
   %sel = select i1 %rc, i64 %rs1, i64 0
   ret i64 %sel
@@ -764,6 +1539,21 @@ define i64 @zero2_setne(i64 %a, i64 %b, i64 %rs1) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mvnez a2, zero, a0
 ; RV64XTHEADCONDMOV-NEXT:    mv a0, a2
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: zero2_setne:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    xor a1, a1, a3
+; RV32ZICOND-NEXT:    xor a0, a0, a2
+; RV32ZICOND-NEXT:    or a1, a0, a1
+; RV32ZICOND-NEXT:    czero.nez a0, a4, a1
+; RV32ZICOND-NEXT:    czero.nez a1, a5, a1
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: zero2_setne:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    xor a0, a0, a1
+; RV64ZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64ZICOND-NEXT:    ret
   %rc = icmp ne i64 %a, %b
   %sel = select i1 %rc, i64 0, i64 %rs1
   ret i64 %sel
@@ -780,6 +1570,18 @@ define i64 @zero1_seteq_zero(i64 %a, i64 %rs1) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mvnez a1, zero, a0
 ; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: zero1_seteq_zero:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    or a1, a0, a1
+; RV32ZICOND-NEXT:    czero.nez a0, a2, a1
+; RV32ZICOND-NEXT:    czero.nez a1, a3, a1
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: zero1_seteq_zero:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.nez a0, a1, a0
+; RV64ZICOND-NEXT:    ret
   %rc = icmp eq i64 %a, 0
   %sel = select i1 %rc, i64 %rs1, i64 0
   ret i64 %sel
@@ -796,6 +1598,18 @@ define i64 @zero2_seteq_zero(i64 %a, i64 %rs1) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mveqz a1, zero, a0
 ; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: zero2_seteq_zero:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    or a1, a0, a1
+; RV32ZICOND-NEXT:    czero.eqz a0, a2, a1
+; RV32ZICOND-NEXT:    czero.eqz a1, a3, a1
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: zero2_seteq_zero:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV64ZICOND-NEXT:    ret
   %rc = icmp eq i64 %a, 0
   %sel = select i1 %rc, i64 0, i64 %rs1
   ret i64 %sel
@@ -812,6 +1626,18 @@ define i64 @zero1_setne_zero(i64 %a, i64 %rs1) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mveqz a1, zero, a0
 ; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: zero1_setne_zero:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    or a1, a0, a1
+; RV32ZICOND-NEXT:    czero.eqz a0, a2, a1
+; RV32ZICOND-NEXT:    czero.eqz a1, a3, a1
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: zero1_setne_zero:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV64ZICOND-NEXT:    ret
   %rc = icmp ne i64 %a, 0
   %sel = select i1 %rc, i64 %rs1, i64 0
   ret i64 %sel
@@ -828,6 +1654,18 @@ define i64 @zero2_setne_zero(i64 %a, i64 %rs1) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mvnez a1, zero, a0
 ; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: zero2_setne_zero:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    or a1, a0, a1
+; RV32ZICOND-NEXT:    czero.nez a0, a2, a1
+; RV32ZICOND-NEXT:    czero.nez a1, a3, a1
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: zero2_setne_zero:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.nez a0, a1, a0
+; RV64ZICOND-NEXT:    ret
   %rc = icmp ne i64 %a, 0
   %sel = select i1 %rc, i64 0, i64 %rs1
   ret i64 %sel
@@ -846,6 +1684,21 @@ define i64 @zero1_seteq_constant(i64 %a, i64 %rs1) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mvnez a1, zero, a0
 ; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: zero1_seteq_constant:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    not a1, a1
+; RV32ZICOND-NEXT:    xori a0, a0, -231
+; RV32ZICOND-NEXT:    or a1, a0, a1
+; RV32ZICOND-NEXT:    czero.nez a0, a2, a1
+; RV32ZICOND-NEXT:    czero.nez a1, a3, a1
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: zero1_seteq_constant:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    addi a0, a0, 231
+; RV64ZICOND-NEXT:    czero.nez a0, a1, a0
+; RV64ZICOND-NEXT:    ret
   %rc = icmp eq i64 %a, -231
   %sel = select i1 %rc, i64 %rs1, i64 0
   ret i64 %sel
@@ -864,6 +1717,20 @@ define i64 @zero2_seteq_constant(i64 %a, i64 %rs1) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mveqz a1, zero, a0
 ; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: zero2_seteq_constant:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    xori a0, a0, 546
+; RV32ZICOND-NEXT:    or a1, a0, a1
+; RV32ZICOND-NEXT:    czero.eqz a0, a2, a1
+; RV32ZICOND-NEXT:    czero.eqz a1, a3, a1
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: zero2_seteq_constant:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    addi a0, a0, -546
+; RV64ZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV64ZICOND-NEXT:    ret
   %rc = icmp eq i64 %a, 546
   %sel = select i1 %rc, i64 0, i64 %rs1
   ret i64 %sel
@@ -882,6 +1749,20 @@ define i64 @zero1_setne_constant(i64 %a, i64 %rs1) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mveqz a1, zero, a0
 ; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: zero1_setne_constant:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    xori a0, a0, 321
+; RV32ZICOND-NEXT:    or a1, a0, a1
+; RV32ZICOND-NEXT:    czero.eqz a0, a2, a1
+; RV32ZICOND-NEXT:    czero.eqz a1, a3, a1
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: zero1_setne_constant:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    addi a0, a0, -321
+; RV64ZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV64ZICOND-NEXT:    ret
   %rc = icmp ne i64 %a, 321
   %sel = select i1 %rc, i64 %rs1, i64 0
   ret i64 %sel
@@ -900,6 +1781,21 @@ define i64 @zero2_setne_constant(i64 %a, i64 %rs1) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mvnez a1, zero, a0
 ; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: zero2_setne_constant:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    not a1, a1
+; RV32ZICOND-NEXT:    xori a0, a0, -654
+; RV32ZICOND-NEXT:    or a1, a0, a1
+; RV32ZICOND-NEXT:    czero.nez a0, a2, a1
+; RV32ZICOND-NEXT:    czero.nez a1, a3, a1
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: zero2_setne_constant:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    addi a0, a0, 654
+; RV64ZICOND-NEXT:    czero.nez a0, a1, a0
+; RV64ZICOND-NEXT:    ret
   %rc = icmp ne i64 %a, -654
   %sel = select i1 %rc, i64 0, i64 %rs1
   ret i64 %sel
@@ -918,6 +1814,21 @@ define i64 @zero1_seteq_neg2048(i64 %a, i64 %rs1) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mvnez a1, zero, a0
 ; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: zero1_seteq_neg2048:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    not a1, a1
+; RV32ZICOND-NEXT:    xori a0, a0, -2048
+; RV32ZICOND-NEXT:    or a1, a0, a1
+; RV32ZICOND-NEXT:    czero.nez a0, a2, a1
+; RV32ZICOND-NEXT:    czero.nez a1, a3, a1
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: zero1_seteq_neg2048:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    xori a0, a0, -2048
+; RV64ZICOND-NEXT:    czero.nez a0, a1, a0
+; RV64ZICOND-NEXT:    ret
   %rc = icmp eq i64 %a, -2048
   %sel = select i1 %rc, i64 %rs1, i64 0
   ret i64 %sel
@@ -936,6 +1847,21 @@ define i64 @zero2_seteq_neg2048(i64 %a, i64 %rs1) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mveqz a1, zero, a0
 ; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: zero2_seteq_neg2048:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    not a1, a1
+; RV32ZICOND-NEXT:    xori a0, a0, -2048
+; RV32ZICOND-NEXT:    or a1, a0, a1
+; RV32ZICOND-NEXT:    czero.eqz a0, a2, a1
+; RV32ZICOND-NEXT:    czero.eqz a1, a3, a1
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: zero2_seteq_neg2048:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    xori a0, a0, -2048
+; RV64ZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV64ZICOND-NEXT:    ret
   %rc = icmp eq i64 %a, -2048
   %sel = select i1 %rc, i64 0, i64 %rs1
   ret i64 %sel
@@ -954,6 +1880,21 @@ define i64 @zero1_setne_neg2048(i64 %a, i64 %rs1) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mveqz a1, zero, a0
 ; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: zero1_setne_neg2048:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    not a1, a1
+; RV32ZICOND-NEXT:    xori a0, a0, -2048
+; RV32ZICOND-NEXT:    or a1, a0, a1
+; RV32ZICOND-NEXT:    czero.eqz a0, a2, a1
+; RV32ZICOND-NEXT:    czero.eqz a1, a3, a1
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: zero1_setne_neg2048:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    xori a0, a0, -2048
+; RV64ZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV64ZICOND-NEXT:    ret
   %rc = icmp ne i64 %a, -2048
   %sel = select i1 %rc, i64 %rs1, i64 0
   ret i64 %sel
@@ -972,6 +1913,21 @@ define i64 @zero2_setne_neg2048(i64 %a, i64 %rs1) {
 ; RV64XTHEADCONDMOV-NEXT:    th.mvnez a1, zero, a0
 ; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: zero2_setne_neg2048:
+; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    not a1, a1
+; RV32ZICOND-NEXT:    xori a0, a0, -2048
+; RV32ZICOND-NEXT:    or a1, a0, a1
+; RV32ZICOND-NEXT:    czero.nez a0, a2, a1
+; RV32ZICOND-NEXT:    czero.nez a1, a3, a1
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: zero2_setne_neg2048:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    xori a0, a0, -2048
+; RV64ZICOND-NEXT:    czero.nez a0, a1, a0
+; RV64ZICOND-NEXT:    ret
   %rc = icmp ne i64 %a, -2048
   %sel = select i1 %rc, i64 0, i64 %rs1
   ret i64 %sel
@@ -1023,6 +1979,50 @@ define void @sextw_removal_maskc(i1 %c, i32 signext %arg, i32 signext %arg1) nou
 ; RV64XTHEADCONDMOV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
 ; RV64XTHEADCONDMOV-NEXT:    addi sp, sp, 32
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: sextw_removal_maskc:
+; RV32ZICOND:       # %bb.0: # %bb
+; RV32ZICOND-NEXT:    addi sp, sp, -16
+; RV32ZICOND-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32ZICOND-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32ZICOND-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
+; RV32ZICOND-NEXT:    mv s0, a2
+; RV32ZICOND-NEXT:    andi a0, a0, 1
+; RV32ZICOND-NEXT:    czero.eqz s1, a1, a0
+; RV32ZICOND-NEXT:  .LBB54_1: # %bb2
+; RV32ZICOND-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32ZICOND-NEXT:    mv a0, s1
+; RV32ZICOND-NEXT:    call bar at plt
+; RV32ZICOND-NEXT:    sll s1, s1, s0
+; RV32ZICOND-NEXT:    bnez a0, .LBB54_1
+; RV32ZICOND-NEXT:  # %bb.2: # %bb7
+; RV32ZICOND-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32ZICOND-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32ZICOND-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32ZICOND-NEXT:    addi sp, sp, 16
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: sextw_removal_maskc:
+; RV64ZICOND:       # %bb.0: # %bb
+; RV64ZICOND-NEXT:    addi sp, sp, -32
+; RV64ZICOND-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64ZICOND-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64ZICOND-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64ZICOND-NEXT:    mv s0, a2
+; RV64ZICOND-NEXT:    andi a0, a0, 1
+; RV64ZICOND-NEXT:    czero.eqz s1, a1, a0
+; RV64ZICOND-NEXT:  .LBB54_1: # %bb2
+; RV64ZICOND-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64ZICOND-NEXT:    sext.w a0, s1
+; RV64ZICOND-NEXT:    call bar at plt
+; RV64ZICOND-NEXT:    sllw s1, s1, s0
+; RV64ZICOND-NEXT:    bnez a0, .LBB54_1
+; RV64ZICOND-NEXT:  # %bb.2: # %bb7
+; RV64ZICOND-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64ZICOND-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64ZICOND-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64ZICOND-NEXT:    addi sp, sp, 32
+; RV64ZICOND-NEXT:    ret
 bb:
   %i = select i1 %c, i32 %arg, i32 0
   br label %bb2
@@ -1084,6 +2084,50 @@ define void @sextw_removal_maskcn(i1 %c, i32 signext %arg, i32 signext %arg1) no
 ; RV64XTHEADCONDMOV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
 ; RV64XTHEADCONDMOV-NEXT:    addi sp, sp, 32
 ; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV32ZICOND-LABEL: sextw_removal_maskcn:
+; RV32ZICOND:       # %bb.0: # %bb
+; RV32ZICOND-NEXT:    addi sp, sp, -16
+; RV32ZICOND-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
+; RV32ZICOND-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
+; RV32ZICOND-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
+; RV32ZICOND-NEXT:    mv s0, a2
+; RV32ZICOND-NEXT:    andi a0, a0, 1
+; RV32ZICOND-NEXT:    czero.nez s1, a1, a0
+; RV32ZICOND-NEXT:  .LBB55_1: # %bb2
+; RV32ZICOND-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32ZICOND-NEXT:    mv a0, s1
+; RV32ZICOND-NEXT:    call bar at plt
+; RV32ZICOND-NEXT:    sll s1, s1, s0
+; RV32ZICOND-NEXT:    bnez a0, .LBB55_1
+; RV32ZICOND-NEXT:  # %bb.2: # %bb7
+; RV32ZICOND-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
+; RV32ZICOND-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
+; RV32ZICOND-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
+; RV32ZICOND-NEXT:    addi sp, sp, 16
+; RV32ZICOND-NEXT:    ret
+;
+; RV64ZICOND-LABEL: sextw_removal_maskcn:
+; RV64ZICOND:       # %bb.0: # %bb
+; RV64ZICOND-NEXT:    addi sp, sp, -32
+; RV64ZICOND-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64ZICOND-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64ZICOND-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64ZICOND-NEXT:    mv s0, a2
+; RV64ZICOND-NEXT:    andi a0, a0, 1
+; RV64ZICOND-NEXT:    czero.nez s1, a1, a0
+; RV64ZICOND-NEXT:  .LBB55_1: # %bb2
+; RV64ZICOND-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64ZICOND-NEXT:    sext.w a0, s1
+; RV64ZICOND-NEXT:    call bar at plt
+; RV64ZICOND-NEXT:    sllw s1, s1, s0
+; RV64ZICOND-NEXT:    bnez a0, .LBB55_1
+; RV64ZICOND-NEXT:  # %bb.2: # %bb7
+; RV64ZICOND-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64ZICOND-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64ZICOND-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64ZICOND-NEXT:    addi sp, sp, 32
+; RV64ZICOND-NEXT:    ret
 bb:
   %i = select i1 %c, i32 0, i32 %arg
   br label %bb2

diff  --git a/llvm/test/CodeGen/RISCV/select-binop-identity.ll b/llvm/test/CodeGen/RISCV/select-binop-identity.ll
index 78b2cd977644..ef2f866e0ede 100644
--- a/llvm/test/CodeGen/RISCV/select-binop-identity.ll
+++ b/llvm/test/CodeGen/RISCV/select-binop-identity.ll
@@ -7,6 +7,10 @@
 ; RUN:   | FileCheck -check-prefix=SFB64 %s
 ; RUN: llc -mtriple=riscv64 -mattr=+xventanacondops -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=VTCONDOPS64 %s
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-zicond -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefixes=ZICOND,ZICOND32 %s
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-zicond -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefixes=ZICOND,ZICOND64 %s
 
 ; InstCombine canonicalizes (c ? x | y : x) to (x | (c ? y : 0)) similar for
 ; other binary operations using their identity value as the constant.
@@ -46,6 +50,15 @@ define signext i32 @and_select_all_ones_i32(i1 zeroext %c, i32 signext %x, i32 s
 ; VTCONDOPS64-NEXT:    or a0, a0, a3
 ; VTCONDOPS64-NEXT:    and a0, a0, a2
 ; VTCONDOPS64-NEXT:    ret
+;
+; ZICOND-LABEL: and_select_all_ones_i32:
+; ZICOND:       # %bb.0:
+; ZICOND-NEXT:    li a3, -1
+; ZICOND-NEXT:    czero.nez a3, a3, a0
+; ZICOND-NEXT:    czero.eqz a0, a1, a0
+; ZICOND-NEXT:    or a0, a0, a3
+; ZICOND-NEXT:    and a0, a0, a2
+; ZICOND-NEXT:    ret
   %a = select i1 %c, i32 %x, i32 -1
   %b = and i32 %a, %y
   ret i32 %b
@@ -85,6 +98,27 @@ define i64 @and_select_all_ones_i64(i1 zeroext %c, i64 %x, i64 %y) {
 ; VTCONDOPS64-NEXT:    or a0, a0, a1
 ; VTCONDOPS64-NEXT:    and a0, a2, a0
 ; VTCONDOPS64-NEXT:    ret
+;
+; ZICOND32-LABEL: and_select_all_ones_i64:
+; ZICOND32:       # %bb.0:
+; ZICOND32-NEXT:    czero.nez a2, a2, a0
+; ZICOND32-NEXT:    li a5, -1
+; ZICOND32-NEXT:    czero.eqz a5, a5, a0
+; ZICOND32-NEXT:    or a2, a5, a2
+; ZICOND32-NEXT:    czero.nez a0, a1, a0
+; ZICOND32-NEXT:    or a0, a5, a0
+; ZICOND32-NEXT:    and a0, a3, a0
+; ZICOND32-NEXT:    and a1, a4, a2
+; ZICOND32-NEXT:    ret
+;
+; ZICOND64-LABEL: and_select_all_ones_i64:
+; ZICOND64:       # %bb.0:
+; ZICOND64-NEXT:    czero.nez a1, a1, a0
+; ZICOND64-NEXT:    li a3, -1
+; ZICOND64-NEXT:    czero.eqz a0, a3, a0
+; ZICOND64-NEXT:    or a0, a0, a1
+; ZICOND64-NEXT:    and a0, a2, a0
+; ZICOND64-NEXT:    ret
   %a = select i1 %c, i64 -1, i64 %x
   %b = and i64 %y, %a
   ret i64 %b
@@ -119,6 +153,12 @@ define signext i32 @or_select_all_zeros_i32(i1 zeroext %c, i32 signext %x, i32 s
 ; VTCONDOPS64-NEXT:    vt.maskc a0, a1, a0
 ; VTCONDOPS64-NEXT:    or a0, a2, a0
 ; VTCONDOPS64-NEXT:    ret
+;
+; ZICOND-LABEL: or_select_all_zeros_i32:
+; ZICOND:       # %bb.0:
+; ZICOND-NEXT:    czero.eqz a0, a1, a0
+; ZICOND-NEXT:    or a0, a2, a0
+; ZICOND-NEXT:    ret
   %a = select i1 %c, i32 %x, i32 0
   %b = or i32 %y, %a
   ret i32 %b
@@ -155,6 +195,20 @@ define i64 @or_select_all_zeros_i64(i1 zeroext %c, i64 %x, i64 %y) {
 ; VTCONDOPS64-NEXT:    vt.maskcn a0, a1, a0
 ; VTCONDOPS64-NEXT:    or a0, a0, a2
 ; VTCONDOPS64-NEXT:    ret
+;
+; ZICOND32-LABEL: or_select_all_zeros_i64:
+; ZICOND32:       # %bb.0:
+; ZICOND32-NEXT:    czero.nez a2, a2, a0
+; ZICOND32-NEXT:    czero.nez a0, a1, a0
+; ZICOND32-NEXT:    or a0, a0, a3
+; ZICOND32-NEXT:    or a1, a2, a4
+; ZICOND32-NEXT:    ret
+;
+; ZICOND64-LABEL: or_select_all_zeros_i64:
+; ZICOND64:       # %bb.0:
+; ZICOND64-NEXT:    czero.nez a0, a1, a0
+; ZICOND64-NEXT:    or a0, a0, a2
+; ZICOND64-NEXT:    ret
   %a = select i1 %c, i64 0, i64 %x
   %b = or i64 %a, %y
   ret i64 %b
@@ -189,6 +243,12 @@ define signext i32 @xor_select_all_zeros_i32(i1 zeroext %c, i32 signext %x, i32
 ; VTCONDOPS64-NEXT:    vt.maskcn a0, a1, a0
 ; VTCONDOPS64-NEXT:    xor a0, a2, a0
 ; VTCONDOPS64-NEXT:    ret
+;
+; ZICOND-LABEL: xor_select_all_zeros_i32:
+; ZICOND:       # %bb.0:
+; ZICOND-NEXT:    czero.nez a0, a1, a0
+; ZICOND-NEXT:    xor a0, a2, a0
+; ZICOND-NEXT:    ret
   %a = select i1 %c, i32 0, i32 %x
   %b = xor i32 %y, %a
   ret i32 %b
@@ -225,6 +285,20 @@ define i64 @xor_select_all_zeros_i64(i1 zeroext %c, i64 %x, i64 %y) {
 ; VTCONDOPS64-NEXT:    vt.maskc a0, a1, a0
 ; VTCONDOPS64-NEXT:    xor a0, a0, a2
 ; VTCONDOPS64-NEXT:    ret
+;
+; ZICOND32-LABEL: xor_select_all_zeros_i64:
+; ZICOND32:       # %bb.0:
+; ZICOND32-NEXT:    czero.eqz a2, a2, a0
+; ZICOND32-NEXT:    czero.eqz a0, a1, a0
+; ZICOND32-NEXT:    xor a0, a0, a3
+; ZICOND32-NEXT:    xor a1, a2, a4
+; ZICOND32-NEXT:    ret
+;
+; ZICOND64-LABEL: xor_select_all_zeros_i64:
+; ZICOND64:       # %bb.0:
+; ZICOND64-NEXT:    czero.eqz a0, a1, a0
+; ZICOND64-NEXT:    xor a0, a0, a2
+; ZICOND64-NEXT:    ret
   %a = select i1 %c, i64 %x, i64 0
   %b = xor i64 %a, %y
   ret i64 %b
@@ -259,6 +333,18 @@ define signext i32 @add_select_all_zeros_i32(i1 zeroext %c, i32 signext %x, i32
 ; VTCONDOPS64-NEXT:    vt.maskcn a0, a1, a0
 ; VTCONDOPS64-NEXT:    addw a0, a2, a0
 ; VTCONDOPS64-NEXT:    ret
+;
+; ZICOND32-LABEL: add_select_all_zeros_i32:
+; ZICOND32:       # %bb.0:
+; ZICOND32-NEXT:    czero.nez a0, a1, a0
+; ZICOND32-NEXT:    add a0, a2, a0
+; ZICOND32-NEXT:    ret
+;
+; ZICOND64-LABEL: add_select_all_zeros_i32:
+; ZICOND64:       # %bb.0:
+; ZICOND64-NEXT:    czero.nez a0, a1, a0
+; ZICOND64-NEXT:    addw a0, a2, a0
+; ZICOND64-NEXT:    ret
   %a = select i1 %c, i32 0, i32 %x
   %b = add i32 %y, %a
   ret i32 %b
@@ -297,6 +383,22 @@ define i64 @add_select_all_zeros_i64(i1 zeroext %c, i64 %x, i64 %y) {
 ; VTCONDOPS64-NEXT:    vt.maskc a0, a1, a0
 ; VTCONDOPS64-NEXT:    add a0, a0, a2
 ; VTCONDOPS64-NEXT:    ret
+;
+; ZICOND32-LABEL: add_select_all_zeros_i64:
+; ZICOND32:       # %bb.0:
+; ZICOND32-NEXT:    czero.eqz a2, a2, a0
+; ZICOND32-NEXT:    czero.eqz a1, a1, a0
+; ZICOND32-NEXT:    add a0, a1, a3
+; ZICOND32-NEXT:    sltu a1, a0, a1
+; ZICOND32-NEXT:    add a2, a2, a4
+; ZICOND32-NEXT:    add a1, a2, a1
+; ZICOND32-NEXT:    ret
+;
+; ZICOND64-LABEL: add_select_all_zeros_i64:
+; ZICOND64:       # %bb.0:
+; ZICOND64-NEXT:    czero.eqz a0, a1, a0
+; ZICOND64-NEXT:    add a0, a0, a2
+; ZICOND64-NEXT:    ret
   %a = select i1 %c, i64 %x, i64 0
   %b = add i64 %a, %y
   ret i64 %b
@@ -331,6 +433,18 @@ define signext i32 @sub_select_all_zeros_i32(i1 zeroext %c, i32 signext %x, i32
 ; VTCONDOPS64-NEXT:    vt.maskcn a0, a1, a0
 ; VTCONDOPS64-NEXT:    subw a0, a2, a0
 ; VTCONDOPS64-NEXT:    ret
+;
+; ZICOND32-LABEL: sub_select_all_zeros_i32:
+; ZICOND32:       # %bb.0:
+; ZICOND32-NEXT:    czero.nez a0, a1, a0
+; ZICOND32-NEXT:    sub a0, a2, a0
+; ZICOND32-NEXT:    ret
+;
+; ZICOND64-LABEL: sub_select_all_zeros_i32:
+; ZICOND64:       # %bb.0:
+; ZICOND64-NEXT:    czero.nez a0, a1, a0
+; ZICOND64-NEXT:    subw a0, a2, a0
+; ZICOND64-NEXT:    ret
   %a = select i1 %c, i32 0, i32 %x
   %b = sub i32 %y, %a
   ret i32 %b
@@ -369,6 +483,22 @@ define i64 @sub_select_all_zeros_i64(i1 zeroext %c, i64 %x, i64 %y) {
 ; VTCONDOPS64-NEXT:    vt.maskc a0, a1, a0
 ; VTCONDOPS64-NEXT:    sub a0, a2, a0
 ; VTCONDOPS64-NEXT:    ret
+;
+; ZICOND32-LABEL: sub_select_all_zeros_i64:
+; ZICOND32:       # %bb.0:
+; ZICOND32-NEXT:    czero.eqz a2, a2, a0
+; ZICOND32-NEXT:    czero.eqz a0, a1, a0
+; ZICOND32-NEXT:    sltu a1, a3, a0
+; ZICOND32-NEXT:    sub a4, a4, a2
+; ZICOND32-NEXT:    sub a1, a4, a1
+; ZICOND32-NEXT:    sub a0, a3, a0
+; ZICOND32-NEXT:    ret
+;
+; ZICOND64-LABEL: sub_select_all_zeros_i64:
+; ZICOND64:       # %bb.0:
+; ZICOND64-NEXT:    czero.eqz a0, a1, a0
+; ZICOND64-NEXT:    sub a0, a2, a0
+; ZICOND64-NEXT:    ret
   %a = select i1 %c, i64 %x, i64 0
   %b = sub i64 %y, %a
   ret i64 %b

diff  --git a/llvm/test/CodeGen/RISCV/select.ll b/llvm/test/CodeGen/RISCV/select.ll
index d1c83c60a92f..1bf96ee8edb5 100644
--- a/llvm/test/CodeGen/RISCV/select.ll
+++ b/llvm/test/CodeGen/RISCV/select.ll
@@ -2,6 +2,8 @@
 ; RUN: llc -mtriple=riscv32 -mattr=+m -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,CHECK32,RV32IM %s
 ; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,CHECK64,RV64IM %s
 ; RUN: llc -mtriple=riscv64 -mattr=+m,+xventanacondops -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,CHECK64,RV64IMXVTCONDOPS %s
+; RUN: llc -mtriple=riscv32 -mattr=+m,+experimental-zicond -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,CHECK32,CHECKZICOND,RV32IMZICOND %s
+; RUN: llc -mtriple=riscv64 -mattr=+m,+experimental-zicond -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,CHECK64,CHECKZICOND,RV64IMZICOND %s
 
 define i16 @select_xor_1(i16 %A, i8 %cond) {
 ; CHECK32-LABEL: select_xor_1:
@@ -30,13 +32,13 @@ entry:
 ; Equivalent to above, but with icmp ne (and %cond, 1), 1 instead of
 ; icmp eq (and %cond, 1), 0
 define i16 @select_xor_1b(i16 %A, i8 %cond) {
-; CHECK32-LABEL: select_xor_1b:
-; CHECK32:       # %bb.0: # %entry
-; CHECK32-NEXT:    slli a1, a1, 31
-; CHECK32-NEXT:    srai a1, a1, 31
-; CHECK32-NEXT:    andi a1, a1, 43
-; CHECK32-NEXT:    xor a0, a0, a1
-; CHECK32-NEXT:    ret
+; RV32IM-LABEL: select_xor_1b:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    slli a1, a1, 31
+; RV32IM-NEXT:    srai a1, a1, 31
+; RV32IM-NEXT:    andi a1, a1, 43
+; RV32IM-NEXT:    xor a0, a0, a1
+; RV32IM-NEXT:    ret
 ;
 ; RV64IM-LABEL: select_xor_1b:
 ; RV64IM:       # %bb.0: # %entry
@@ -53,6 +55,14 @@ define i16 @select_xor_1b(i16 %A, i8 %cond) {
 ; RV64IMXVTCONDOPS-NEXT:    vt.maskc a1, a2, a1
 ; RV64IMXVTCONDOPS-NEXT:    xor a0, a0, a1
 ; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; CHECKZICOND-LABEL: select_xor_1b:
+; CHECKZICOND:       # %bb.0: # %entry
+; CHECKZICOND-NEXT:    andi a1, a1, 1
+; CHECKZICOND-NEXT:    li a2, 43
+; CHECKZICOND-NEXT:    czero.eqz a1, a2, a1
+; CHECKZICOND-NEXT:    xor a0, a0, a1
+; CHECKZICOND-NEXT:    ret
 entry:
  %and = and i8 %cond, 1
  %cmp10 = icmp ne i8 %and, 1
@@ -88,13 +98,13 @@ entry:
 ; Equivalent to above, but with icmp ne (and %cond, 1), 1 instead of
 ; icmp eq (and %cond, 1), 0
 define i32 @select_xor_2b(i32 %A, i32 %B, i8 %cond) {
-; CHECK32-LABEL: select_xor_2b:
-; CHECK32:       # %bb.0: # %entry
-; CHECK32-NEXT:    slli a2, a2, 31
-; CHECK32-NEXT:    srai a2, a2, 31
-; CHECK32-NEXT:    and a1, a2, a1
-; CHECK32-NEXT:    xor a0, a0, a1
-; CHECK32-NEXT:    ret
+; RV32IM-LABEL: select_xor_2b:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    slli a2, a2, 31
+; RV32IM-NEXT:    srai a2, a2, 31
+; RV32IM-NEXT:    and a1, a2, a1
+; RV32IM-NEXT:    xor a0, a0, a1
+; RV32IM-NEXT:    ret
 ;
 ; RV64IM-LABEL: select_xor_2b:
 ; RV64IM:       # %bb.0: # %entry
@@ -110,6 +120,13 @@ define i32 @select_xor_2b(i32 %A, i32 %B, i8 %cond) {
 ; RV64IMXVTCONDOPS-NEXT:    vt.maskc a1, a1, a2
 ; RV64IMXVTCONDOPS-NEXT:    xor a0, a0, a1
 ; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; CHECKZICOND-LABEL: select_xor_2b:
+; CHECKZICOND:       # %bb.0: # %entry
+; CHECKZICOND-NEXT:    andi a2, a2, 1
+; CHECKZICOND-NEXT:    czero.eqz a1, a1, a2
+; CHECKZICOND-NEXT:    xor a0, a0, a1
+; CHECKZICOND-NEXT:    ret
 entry:
  %and = and i8 %cond, 1
  %cmp10 = icmp ne i8 %and, 1
@@ -119,13 +136,13 @@ entry:
 }
 
 define i16 @select_xor_3(i16 %A, i8 %cond) {
-; CHECK32-LABEL: select_xor_3:
-; CHECK32:       # %bb.0: # %entry
-; CHECK32-NEXT:    andi a1, a1, 1
-; CHECK32-NEXT:    addi a1, a1, -1
-; CHECK32-NEXT:    andi a1, a1, 43
-; CHECK32-NEXT:    xor a0, a0, a1
-; CHECK32-NEXT:    ret
+; RV32IM-LABEL: select_xor_3:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    andi a1, a1, 1
+; RV32IM-NEXT:    addi a1, a1, -1
+; RV32IM-NEXT:    andi a1, a1, 43
+; RV32IM-NEXT:    xor a0, a0, a1
+; RV32IM-NEXT:    ret
 ;
 ; RV64IM-LABEL: select_xor_3:
 ; RV64IM:       # %bb.0: # %entry
@@ -142,6 +159,14 @@ define i16 @select_xor_3(i16 %A, i8 %cond) {
 ; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a1, a2, a1
 ; RV64IMXVTCONDOPS-NEXT:    xor a0, a0, a1
 ; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; CHECKZICOND-LABEL: select_xor_3:
+; CHECKZICOND:       # %bb.0: # %entry
+; CHECKZICOND-NEXT:    andi a1, a1, 1
+; CHECKZICOND-NEXT:    li a2, 43
+; CHECKZICOND-NEXT:    czero.nez a1, a2, a1
+; CHECKZICOND-NEXT:    xor a0, a0, a1
+; CHECKZICOND-NEXT:    ret
 entry:
  %and = and i8 %cond, 1
  %cmp10 = icmp eq i8 %and, 0
@@ -153,13 +178,13 @@ entry:
 ; Equivalent to above, but with icmp ne (and %cond, 1), 1 instead of
 ; icmp eq (and %cond, 1), 0
 define i16 @select_xor_3b(i16 %A, i8 %cond) {
-; CHECK32-LABEL: select_xor_3b:
-; CHECK32:       # %bb.0: # %entry
-; CHECK32-NEXT:    andi a1, a1, 1
-; CHECK32-NEXT:    addi a1, a1, -1
-; CHECK32-NEXT:    andi a1, a1, 43
-; CHECK32-NEXT:    xor a0, a0, a1
-; CHECK32-NEXT:    ret
+; RV32IM-LABEL: select_xor_3b:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    andi a1, a1, 1
+; RV32IM-NEXT:    addi a1, a1, -1
+; RV32IM-NEXT:    andi a1, a1, 43
+; RV32IM-NEXT:    xor a0, a0, a1
+; RV32IM-NEXT:    ret
 ;
 ; RV64IM-LABEL: select_xor_3b:
 ; RV64IM:       # %bb.0: # %entry
@@ -176,6 +201,14 @@ define i16 @select_xor_3b(i16 %A, i8 %cond) {
 ; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a1, a2, a1
 ; RV64IMXVTCONDOPS-NEXT:    xor a0, a0, a1
 ; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; CHECKZICOND-LABEL: select_xor_3b:
+; CHECKZICOND:       # %bb.0: # %entry
+; CHECKZICOND-NEXT:    andi a1, a1, 1
+; CHECKZICOND-NEXT:    li a2, 43
+; CHECKZICOND-NEXT:    czero.nez a1, a2, a1
+; CHECKZICOND-NEXT:    xor a0, a0, a1
+; CHECKZICOND-NEXT:    ret
 entry:
  %and = and i8 %cond, 1
  %cmp10 = icmp ne i8 %and, 1
@@ -185,13 +218,13 @@ entry:
 }
 
 define i32 @select_xor_4(i32 %A, i32 %B, i8 %cond) {
-; CHECK32-LABEL: select_xor_4:
-; CHECK32:       # %bb.0: # %entry
-; CHECK32-NEXT:    andi a2, a2, 1
-; CHECK32-NEXT:    addi a2, a2, -1
-; CHECK32-NEXT:    and a1, a2, a1
-; CHECK32-NEXT:    xor a0, a0, a1
-; CHECK32-NEXT:    ret
+; RV32IM-LABEL: select_xor_4:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    andi a2, a2, 1
+; RV32IM-NEXT:    addi a2, a2, -1
+; RV32IM-NEXT:    and a1, a2, a1
+; RV32IM-NEXT:    xor a0, a0, a1
+; RV32IM-NEXT:    ret
 ;
 ; RV64IM-LABEL: select_xor_4:
 ; RV64IM:       # %bb.0: # %entry
@@ -207,6 +240,13 @@ define i32 @select_xor_4(i32 %A, i32 %B, i8 %cond) {
 ; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a1, a1, a2
 ; RV64IMXVTCONDOPS-NEXT:    xor a0, a0, a1
 ; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; CHECKZICOND-LABEL: select_xor_4:
+; CHECKZICOND:       # %bb.0: # %entry
+; CHECKZICOND-NEXT:    andi a2, a2, 1
+; CHECKZICOND-NEXT:    czero.nez a1, a1, a2
+; CHECKZICOND-NEXT:    xor a0, a0, a1
+; CHECKZICOND-NEXT:    ret
 entry:
  %and = and i8 %cond, 1
  %cmp10 = icmp eq i8 %and, 0
@@ -218,13 +258,13 @@ entry:
 ; Equivalent to above, but with icmp ne (and %cond, 1), 1 instead of
 ; icmp eq (and %cond, 1), 0
 define i32 @select_xor_4b(i32 %A, i32 %B, i8 %cond) {
-; CHECK32-LABEL: select_xor_4b:
-; CHECK32:       # %bb.0: # %entry
-; CHECK32-NEXT:    andi a2, a2, 1
-; CHECK32-NEXT:    addi a2, a2, -1
-; CHECK32-NEXT:    and a1, a2, a1
-; CHECK32-NEXT:    xor a0, a0, a1
-; CHECK32-NEXT:    ret
+; RV32IM-LABEL: select_xor_4b:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    andi a2, a2, 1
+; RV32IM-NEXT:    addi a2, a2, -1
+; RV32IM-NEXT:    and a1, a2, a1
+; RV32IM-NEXT:    xor a0, a0, a1
+; RV32IM-NEXT:    ret
 ;
 ; RV64IM-LABEL: select_xor_4b:
 ; RV64IM:       # %bb.0: # %entry
@@ -240,6 +280,13 @@ define i32 @select_xor_4b(i32 %A, i32 %B, i8 %cond) {
 ; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a1, a1, a2
 ; RV64IMXVTCONDOPS-NEXT:    xor a0, a0, a1
 ; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; CHECKZICOND-LABEL: select_xor_4b:
+; CHECKZICOND:       # %bb.0: # %entry
+; CHECKZICOND-NEXT:    andi a2, a2, 1
+; CHECKZICOND-NEXT:    czero.nez a1, a1, a2
+; CHECKZICOND-NEXT:    xor a0, a0, a1
+; CHECKZICOND-NEXT:    ret
 entry:
  %and = and i8 %cond, 1
  %cmp10 = icmp ne i8 %and, 1
@@ -275,13 +322,13 @@ entry:
 ; Equivalent to above, but with icmp ne (and %cond, 1), 1 instead of
 ; icmp eq (and %cond, 1), 0
 define i32 @select_or_b(i32 %A, i32 %B, i8 %cond) {
-; CHECK32-LABEL: select_or_b:
-; CHECK32:       # %bb.0: # %entry
-; CHECK32-NEXT:    slli a2, a2, 31
-; CHECK32-NEXT:    srai a2, a2, 31
-; CHECK32-NEXT:    and a1, a2, a1
-; CHECK32-NEXT:    or a0, a0, a1
-; CHECK32-NEXT:    ret
+; RV32IM-LABEL: select_or_b:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    slli a2, a2, 31
+; RV32IM-NEXT:    srai a2, a2, 31
+; RV32IM-NEXT:    and a1, a2, a1
+; RV32IM-NEXT:    or a0, a0, a1
+; RV32IM-NEXT:    ret
 ;
 ; RV64IM-LABEL: select_or_b:
 ; RV64IM:       # %bb.0: # %entry
@@ -297,6 +344,13 @@ define i32 @select_or_b(i32 %A, i32 %B, i8 %cond) {
 ; RV64IMXVTCONDOPS-NEXT:    vt.maskc a1, a1, a2
 ; RV64IMXVTCONDOPS-NEXT:    or a0, a0, a1
 ; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; CHECKZICOND-LABEL: select_or_b:
+; CHECKZICOND:       # %bb.0: # %entry
+; CHECKZICOND-NEXT:    andi a2, a2, 1
+; CHECKZICOND-NEXT:    czero.eqz a1, a1, a2
+; CHECKZICOND-NEXT:    or a0, a0, a1
+; CHECKZICOND-NEXT:    ret
 entry:
  %and = and i8 %cond, 1
  %cmp10 = icmp ne i8 %and, 1
@@ -332,13 +386,13 @@ entry:
 ; Equivalent to above, but with icmp ne (and %cond, 1), 1 instead of
 ; icmp eq (and %cond, 1), 0
 define i32 @select_or_1b(i32 %A, i32 %B, i32 %cond) {
-; CHECK32-LABEL: select_or_1b:
-; CHECK32:       # %bb.0: # %entry
-; CHECK32-NEXT:    slli a2, a2, 31
-; CHECK32-NEXT:    srai a2, a2, 31
-; CHECK32-NEXT:    and a1, a2, a1
-; CHECK32-NEXT:    or a0, a0, a1
-; CHECK32-NEXT:    ret
+; RV32IM-LABEL: select_or_1b:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    slli a2, a2, 31
+; RV32IM-NEXT:    srai a2, a2, 31
+; RV32IM-NEXT:    and a1, a2, a1
+; RV32IM-NEXT:    or a0, a0, a1
+; RV32IM-NEXT:    ret
 ;
 ; RV64IM-LABEL: select_or_1b:
 ; RV64IM:       # %bb.0: # %entry
@@ -354,6 +408,13 @@ define i32 @select_or_1b(i32 %A, i32 %B, i32 %cond) {
 ; RV64IMXVTCONDOPS-NEXT:    vt.maskc a1, a1, a2
 ; RV64IMXVTCONDOPS-NEXT:    or a0, a0, a1
 ; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; CHECKZICOND-LABEL: select_or_1b:
+; CHECKZICOND:       # %bb.0: # %entry
+; CHECKZICOND-NEXT:    andi a2, a2, 1
+; CHECKZICOND-NEXT:    czero.eqz a1, a1, a2
+; CHECKZICOND-NEXT:    or a0, a0, a1
+; CHECKZICOND-NEXT:    ret
 entry:
  %and = and i32 %cond, 1
  %cmp10 = icmp ne i32 %and, 1
@@ -363,13 +424,13 @@ entry:
 }
 
 define i32 @select_or_2(i32 %A, i32 %B, i8 %cond) {
-; CHECK32-LABEL: select_or_2:
-; CHECK32:       # %bb.0: # %entry
-; CHECK32-NEXT:    andi a2, a2, 1
-; CHECK32-NEXT:    addi a2, a2, -1
-; CHECK32-NEXT:    and a1, a2, a1
-; CHECK32-NEXT:    or a0, a0, a1
-; CHECK32-NEXT:    ret
+; RV32IM-LABEL: select_or_2:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    andi a2, a2, 1
+; RV32IM-NEXT:    addi a2, a2, -1
+; RV32IM-NEXT:    and a1, a2, a1
+; RV32IM-NEXT:    or a0, a0, a1
+; RV32IM-NEXT:    ret
 ;
 ; RV64IM-LABEL: select_or_2:
 ; RV64IM:       # %bb.0: # %entry
@@ -385,6 +446,13 @@ define i32 @select_or_2(i32 %A, i32 %B, i8 %cond) {
 ; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a1, a1, a2
 ; RV64IMXVTCONDOPS-NEXT:    or a0, a0, a1
 ; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; CHECKZICOND-LABEL: select_or_2:
+; CHECKZICOND:       # %bb.0: # %entry
+; CHECKZICOND-NEXT:    andi a2, a2, 1
+; CHECKZICOND-NEXT:    czero.nez a1, a1, a2
+; CHECKZICOND-NEXT:    or a0, a0, a1
+; CHECKZICOND-NEXT:    ret
 entry:
  %and = and i8 %cond, 1
  %cmp10 = icmp eq i8 %and, 0
@@ -396,13 +464,13 @@ entry:
 ; Equivalent to above, but with icmp ne (and %cond, 1), 1 instead of
 ; icmp eq (and %cond, 1), 0
 define i32 @select_or_2b(i32 %A, i32 %B, i8 %cond) {
-; CHECK32-LABEL: select_or_2b:
-; CHECK32:       # %bb.0: # %entry
-; CHECK32-NEXT:    andi a2, a2, 1
-; CHECK32-NEXT:    addi a2, a2, -1
-; CHECK32-NEXT:    and a1, a2, a1
-; CHECK32-NEXT:    or a0, a0, a1
-; CHECK32-NEXT:    ret
+; RV32IM-LABEL: select_or_2b:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    andi a2, a2, 1
+; RV32IM-NEXT:    addi a2, a2, -1
+; RV32IM-NEXT:    and a1, a2, a1
+; RV32IM-NEXT:    or a0, a0, a1
+; RV32IM-NEXT:    ret
 ;
 ; RV64IM-LABEL: select_or_2b:
 ; RV64IM:       # %bb.0: # %entry
@@ -418,6 +486,13 @@ define i32 @select_or_2b(i32 %A, i32 %B, i8 %cond) {
 ; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a1, a1, a2
 ; RV64IMXVTCONDOPS-NEXT:    or a0, a0, a1
 ; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; CHECKZICOND-LABEL: select_or_2b:
+; CHECKZICOND:       # %bb.0: # %entry
+; CHECKZICOND-NEXT:    andi a2, a2, 1
+; CHECKZICOND-NEXT:    czero.nez a1, a1, a2
+; CHECKZICOND-NEXT:    or a0, a0, a1
+; CHECKZICOND-NEXT:    ret
 entry:
  %and = and i8 %cond, 1
  %cmp10 = icmp ne i8 %and, 1
@@ -427,13 +502,13 @@ entry:
 }
 
 define i32 @select_or_3(i32 %A, i32 %B, i32 %cond) {
-; CHECK32-LABEL: select_or_3:
-; CHECK32:       # %bb.0: # %entry
-; CHECK32-NEXT:    andi a2, a2, 1
-; CHECK32-NEXT:    addi a2, a2, -1
-; CHECK32-NEXT:    and a1, a2, a1
-; CHECK32-NEXT:    or a0, a0, a1
-; CHECK32-NEXT:    ret
+; RV32IM-LABEL: select_or_3:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    andi a2, a2, 1
+; RV32IM-NEXT:    addi a2, a2, -1
+; RV32IM-NEXT:    and a1, a2, a1
+; RV32IM-NEXT:    or a0, a0, a1
+; RV32IM-NEXT:    ret
 ;
 ; RV64IM-LABEL: select_or_3:
 ; RV64IM:       # %bb.0: # %entry
@@ -449,6 +524,13 @@ define i32 @select_or_3(i32 %A, i32 %B, i32 %cond) {
 ; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a1, a1, a2
 ; RV64IMXVTCONDOPS-NEXT:    or a0, a0, a1
 ; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; CHECKZICOND-LABEL: select_or_3:
+; CHECKZICOND:       # %bb.0: # %entry
+; CHECKZICOND-NEXT:    andi a2, a2, 1
+; CHECKZICOND-NEXT:    czero.nez a1, a1, a2
+; CHECKZICOND-NEXT:    or a0, a0, a1
+; CHECKZICOND-NEXT:    ret
 entry:
  %and = and i32 %cond, 1
  %cmp10 = icmp eq i32 %and, 0
@@ -460,13 +542,13 @@ entry:
 ; Equivalent to above, but with icmp ne (and %cond, 1), 1 instead of
 ; icmp eq (and %cond, 1), 0
 define i32 @select_or_3b(i32 %A, i32 %B, i32 %cond) {
-; CHECK32-LABEL: select_or_3b:
-; CHECK32:       # %bb.0: # %entry
-; CHECK32-NEXT:    andi a2, a2, 1
-; CHECK32-NEXT:    addi a2, a2, -1
-; CHECK32-NEXT:    and a1, a2, a1
-; CHECK32-NEXT:    or a0, a0, a1
-; CHECK32-NEXT:    ret
+; RV32IM-LABEL: select_or_3b:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    andi a2, a2, 1
+; RV32IM-NEXT:    addi a2, a2, -1
+; RV32IM-NEXT:    and a1, a2, a1
+; RV32IM-NEXT:    or a0, a0, a1
+; RV32IM-NEXT:    ret
 ;
 ; RV64IM-LABEL: select_or_3b:
 ; RV64IM:       # %bb.0: # %entry
@@ -482,6 +564,13 @@ define i32 @select_or_3b(i32 %A, i32 %B, i32 %cond) {
 ; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a1, a1, a2
 ; RV64IMXVTCONDOPS-NEXT:    or a0, a0, a1
 ; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; CHECKZICOND-LABEL: select_or_3b:
+; CHECKZICOND:       # %bb.0: # %entry
+; CHECKZICOND-NEXT:    andi a2, a2, 1
+; CHECKZICOND-NEXT:    czero.nez a1, a1, a2
+; CHECKZICOND-NEXT:    or a0, a0, a1
+; CHECKZICOND-NEXT:    ret
 entry:
  %and = and i32 %cond, 1
  %cmp10 = icmp ne i32 %and, 1
@@ -491,12 +580,12 @@ entry:
 }
 
 define i32 @select_add_1(i1 zeroext %cond, i32 %a, i32 %b) {
-; CHECK32-LABEL: select_add_1:
-; CHECK32:       # %bb.0: # %entry
-; CHECK32-NEXT:    neg a0, a0
-; CHECK32-NEXT:    and a0, a0, a1
-; CHECK32-NEXT:    add a0, a2, a0
-; CHECK32-NEXT:    ret
+; RV32IM-LABEL: select_add_1:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    neg a0, a0
+; RV32IM-NEXT:    and a0, a0, a1
+; RV32IM-NEXT:    add a0, a2, a0
+; RV32IM-NEXT:    ret
 ;
 ; RV64IM-LABEL: select_add_1:
 ; RV64IM:       # %bb.0: # %entry
@@ -514,6 +603,20 @@ define i32 @select_add_1(i1 zeroext %cond, i32 %a, i32 %b) {
 ; RV64IMXVTCONDOPS-NEXT:    vt.maskc a0, a1, a0
 ; RV64IMXVTCONDOPS-NEXT:    or a0, a0, a2
 ; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_add_1:
+; RV32IMZICOND:       # %bb.0: # %entry
+; RV32IMZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV32IMZICOND-NEXT:    add a0, a2, a0
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_add_1:
+; RV64IMZICOND:       # %bb.0: # %entry
+; RV64IMZICOND-NEXT:    addw a1, a1, a2
+; RV64IMZICOND-NEXT:    czero.nez a2, a2, a0
+; RV64IMZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV64IMZICOND-NEXT:    or a0, a0, a2
+; RV64IMZICOND-NEXT:    ret
 entry:
   %c = add i32 %a, %b
   %res = select i1 %cond, i32 %c, i32 %b
@@ -521,12 +624,12 @@ entry:
 }
 
 define i32 @select_add_2(i1 zeroext %cond, i32 %a, i32 %b) {
-; CHECK32-LABEL: select_add_2:
-; CHECK32:       # %bb.0: # %entry
-; CHECK32-NEXT:    addi a0, a0, -1
-; CHECK32-NEXT:    and a0, a0, a2
-; CHECK32-NEXT:    add a0, a1, a0
-; CHECK32-NEXT:    ret
+; RV32IM-LABEL: select_add_2:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    addi a0, a0, -1
+; RV32IM-NEXT:    and a0, a0, a2
+; RV32IM-NEXT:    add a0, a1, a0
+; RV32IM-NEXT:    ret
 ;
 ; RV64IM-LABEL: select_add_2:
 ; RV64IM:       # %bb.0: # %entry
@@ -544,6 +647,20 @@ define i32 @select_add_2(i1 zeroext %cond, i32 %a, i32 %b) {
 ; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a0, a2, a0
 ; RV64IMXVTCONDOPS-NEXT:    or a0, a1, a0
 ; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_add_2:
+; RV32IMZICOND:       # %bb.0: # %entry
+; RV32IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT:    add a0, a1, a0
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_add_2:
+; RV64IMZICOND:       # %bb.0: # %entry
+; RV64IMZICOND-NEXT:    addw a2, a1, a2
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT:    or a0, a1, a0
+; RV64IMZICOND-NEXT:    ret
 entry:
   %c = add i32 %a, %b
   %res = select i1 %cond, i32 %a, i32 %c
@@ -551,12 +668,12 @@ entry:
 }
 
 define i32 @select_add_3(i1 zeroext %cond, i32 %a) {
-; CHECK32-LABEL: select_add_3:
-; CHECK32:       # %bb.0: # %entry
-; CHECK32-NEXT:    addi a0, a0, -1
-; CHECK32-NEXT:    andi a0, a0, 42
-; CHECK32-NEXT:    add a0, a1, a0
-; CHECK32-NEXT:    ret
+; RV32IM-LABEL: select_add_3:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    addi a0, a0, -1
+; RV32IM-NEXT:    andi a0, a0, 42
+; RV32IM-NEXT:    add a0, a1, a0
+; RV32IM-NEXT:    ret
 ;
 ; RV64IM-LABEL: select_add_3:
 ; RV64IM:       # %bb.0: # %entry
@@ -574,6 +691,21 @@ define i32 @select_add_3(i1 zeroext %cond, i32 %a) {
 ; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a0, a2, a0
 ; RV64IMXVTCONDOPS-NEXT:    or a0, a1, a0
 ; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_add_3:
+; RV32IMZICOND:       # %bb.0: # %entry
+; RV32IMZICOND-NEXT:    li a2, 42
+; RV32IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT:    add a0, a1, a0
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_add_3:
+; RV64IMZICOND:       # %bb.0: # %entry
+; RV64IMZICOND-NEXT:    addiw a2, a1, 42
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT:    or a0, a1, a0
+; RV64IMZICOND-NEXT:    ret
 entry:
   %c = add i32 %a, 42
   %res = select i1 %cond, i32 %a, i32 %c
@@ -581,14 +713,14 @@ entry:
 }
 
 define i32 @select_sub_1(i1 zeroext %cond, i32 %a, i32 %b) {
-; CHECK32-LABEL: select_sub_1:
-; CHECK32:       # %bb.0: # %entry
-; CHECK32-NEXT:    beqz a0, .LBB19_2
-; CHECK32-NEXT:  # %bb.1:
-; CHECK32-NEXT:    sub a2, a1, a2
-; CHECK32-NEXT:  .LBB19_2: # %entry
-; CHECK32-NEXT:    mv a0, a2
-; CHECK32-NEXT:    ret
+; RV32IM-LABEL: select_sub_1:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    beqz a0, .LBB19_2
+; RV32IM-NEXT:  # %bb.1:
+; RV32IM-NEXT:    sub a2, a1, a2
+; RV32IM-NEXT:  .LBB19_2: # %entry
+; RV32IM-NEXT:    mv a0, a2
+; RV32IM-NEXT:    ret
 ;
 ; RV64IM-LABEL: select_sub_1:
 ; RV64IM:       # %bb.0: # %entry
@@ -606,6 +738,22 @@ define i32 @select_sub_1(i1 zeroext %cond, i32 %a, i32 %b) {
 ; RV64IMXVTCONDOPS-NEXT:    vt.maskc a0, a1, a0
 ; RV64IMXVTCONDOPS-NEXT:    or a0, a0, a2
 ; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_sub_1:
+; RV32IMZICOND:       # %bb.0: # %entry
+; RV32IMZICOND-NEXT:    sub a1, a1, a2
+; RV32IMZICOND-NEXT:    czero.nez a2, a2, a0
+; RV32IMZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV32IMZICOND-NEXT:    or a0, a0, a2
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_sub_1:
+; RV64IMZICOND:       # %bb.0: # %entry
+; RV64IMZICOND-NEXT:    subw a1, a1, a2
+; RV64IMZICOND-NEXT:    czero.nez a2, a2, a0
+; RV64IMZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV64IMZICOND-NEXT:    or a0, a0, a2
+; RV64IMZICOND-NEXT:    ret
 entry:
   %c = sub i32 %a, %b
   %res = select i1 %cond, i32 %c, i32 %b
@@ -613,12 +761,12 @@ entry:
 }
 
 define i32 @select_sub_2(i1 zeroext %cond, i32 %a, i32 %b) {
-; CHECK32-LABEL: select_sub_2:
-; CHECK32:       # %bb.0: # %entry
-; CHECK32-NEXT:    addi a0, a0, -1
-; CHECK32-NEXT:    and a0, a0, a2
-; CHECK32-NEXT:    sub a0, a1, a0
-; CHECK32-NEXT:    ret
+; RV32IM-LABEL: select_sub_2:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    addi a0, a0, -1
+; RV32IM-NEXT:    and a0, a0, a2
+; RV32IM-NEXT:    sub a0, a1, a0
+; RV32IM-NEXT:    ret
 ;
 ; RV64IM-LABEL: select_sub_2:
 ; RV64IM:       # %bb.0: # %entry
@@ -636,6 +784,20 @@ define i32 @select_sub_2(i1 zeroext %cond, i32 %a, i32 %b) {
 ; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a0, a2, a0
 ; RV64IMXVTCONDOPS-NEXT:    or a0, a1, a0
 ; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_sub_2:
+; RV32IMZICOND:       # %bb.0: # %entry
+; RV32IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT:    sub a0, a1, a0
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_sub_2:
+; RV64IMZICOND:       # %bb.0: # %entry
+; RV64IMZICOND-NEXT:    subw a2, a1, a2
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT:    or a0, a1, a0
+; RV64IMZICOND-NEXT:    ret
 entry:
   %c = sub i32 %a, %b
   %res = select i1 %cond, i32 %a, i32 %c
@@ -643,12 +805,12 @@ entry:
 }
 
 define i32 @select_sub_3(i1 zeroext %cond, i32 %a) {
-; CHECK32-LABEL: select_sub_3:
-; CHECK32:       # %bb.0: # %entry
-; CHECK32-NEXT:    addi a0, a0, -1
-; CHECK32-NEXT:    andi a0, a0, 42
-; CHECK32-NEXT:    sub a0, a1, a0
-; CHECK32-NEXT:    ret
+; RV32IM-LABEL: select_sub_3:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    addi a0, a0, -1
+; RV32IM-NEXT:    andi a0, a0, 42
+; RV32IM-NEXT:    sub a0, a1, a0
+; RV32IM-NEXT:    ret
 ;
 ; RV64IM-LABEL: select_sub_3:
 ; RV64IM:       # %bb.0: # %entry
@@ -666,6 +828,21 @@ define i32 @select_sub_3(i1 zeroext %cond, i32 %a) {
 ; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a0, a2, a0
 ; RV64IMXVTCONDOPS-NEXT:    or a0, a1, a0
 ; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_sub_3:
+; RV32IMZICOND:       # %bb.0: # %entry
+; RV32IMZICOND-NEXT:    li a2, 42
+; RV32IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT:    sub a0, a1, a0
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_sub_3:
+; RV64IMZICOND:       # %bb.0: # %entry
+; RV64IMZICOND-NEXT:    addiw a2, a1, -42
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT:    or a0, a1, a0
+; RV64IMZICOND-NEXT:    ret
 entry:
   %c = sub i32 %a, 42
   %res = select i1 %cond, i32 %a, i32 %c
@@ -673,14 +850,14 @@ entry:
 }
 
 define i32 @select_and_1(i1 zeroext %cond, i32 %a, i32 %b) {
-; CHECK32-LABEL: select_and_1:
-; CHECK32:       # %bb.0: # %entry
-; CHECK32-NEXT:    beqz a0, .LBB22_2
-; CHECK32-NEXT:  # %bb.1:
-; CHECK32-NEXT:    and a2, a1, a2
-; CHECK32-NEXT:  .LBB22_2: # %entry
-; CHECK32-NEXT:    mv a0, a2
-; CHECK32-NEXT:    ret
+; RV32IM-LABEL: select_and_1:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    beqz a0, .LBB22_2
+; RV32IM-NEXT:  # %bb.1:
+; RV32IM-NEXT:    and a2, a1, a2
+; RV32IM-NEXT:  .LBB22_2: # %entry
+; RV32IM-NEXT:    mv a0, a2
+; RV32IM-NEXT:    ret
 ;
 ; RV64IM-LABEL: select_and_1:
 ; RV64IM:       # %bb.0: # %entry
@@ -697,6 +874,13 @@ define i32 @select_and_1(i1 zeroext %cond, i32 %a, i32 %b) {
 ; RV64IMXVTCONDOPS-NEXT:    and a1, a2, a1
 ; RV64IMXVTCONDOPS-NEXT:    or a0, a1, a0
 ; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; CHECKZICOND-LABEL: select_and_1:
+; CHECKZICOND:       # %bb.0: # %entry
+; CHECKZICOND-NEXT:    czero.nez a0, a2, a0
+; CHECKZICOND-NEXT:    and a1, a2, a1
+; CHECKZICOND-NEXT:    or a0, a1, a0
+; CHECKZICOND-NEXT:    ret
 entry:
   %c = and i32 %a, %b
   %res = select i1 %cond, i32 %c, i32 %b
@@ -704,14 +888,14 @@ entry:
 }
 
 define i32 @select_and_2(i1 zeroext %cond, i32 %a, i32 %b) {
-; CHECK32-LABEL: select_and_2:
-; CHECK32:       # %bb.0: # %entry
-; CHECK32-NEXT:    bnez a0, .LBB23_2
-; CHECK32-NEXT:  # %bb.1: # %entry
-; CHECK32-NEXT:    and a1, a1, a2
-; CHECK32-NEXT:  .LBB23_2: # %entry
-; CHECK32-NEXT:    mv a0, a1
-; CHECK32-NEXT:    ret
+; RV32IM-LABEL: select_and_2:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    bnez a0, .LBB23_2
+; RV32IM-NEXT:  # %bb.1: # %entry
+; RV32IM-NEXT:    and a1, a1, a2
+; RV32IM-NEXT:  .LBB23_2: # %entry
+; RV32IM-NEXT:    mv a0, a1
+; RV32IM-NEXT:    ret
 ;
 ; RV64IM-LABEL: select_and_2:
 ; RV64IM:       # %bb.0: # %entry
@@ -728,6 +912,13 @@ define i32 @select_and_2(i1 zeroext %cond, i32 %a, i32 %b) {
 ; RV64IMXVTCONDOPS-NEXT:    and a1, a1, a2
 ; RV64IMXVTCONDOPS-NEXT:    or a0, a1, a0
 ; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; CHECKZICOND-LABEL: select_and_2:
+; CHECKZICOND:       # %bb.0: # %entry
+; CHECKZICOND-NEXT:    czero.eqz a0, a1, a0
+; CHECKZICOND-NEXT:    and a1, a1, a2
+; CHECKZICOND-NEXT:    or a0, a1, a0
+; CHECKZICOND-NEXT:    ret
 entry:
   %c = and i32 %a, %b
   %res = select i1 %cond, i32 %a, i32 %c
@@ -735,14 +926,14 @@ entry:
 }
 
 define i32 @select_and_3(i1 zeroext %cond, i32 %a) {
-; CHECK32-LABEL: select_and_3:
-; CHECK32:       # %bb.0: # %entry
-; CHECK32-NEXT:    bnez a0, .LBB24_2
-; CHECK32-NEXT:  # %bb.1: # %entry
-; CHECK32-NEXT:    andi a1, a1, 42
-; CHECK32-NEXT:  .LBB24_2: # %entry
-; CHECK32-NEXT:    mv a0, a1
-; CHECK32-NEXT:    ret
+; RV32IM-LABEL: select_and_3:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    bnez a0, .LBB24_2
+; RV32IM-NEXT:  # %bb.1: # %entry
+; RV32IM-NEXT:    andi a1, a1, 42
+; RV32IM-NEXT:  .LBB24_2: # %entry
+; RV32IM-NEXT:    mv a0, a1
+; RV32IM-NEXT:    ret
 ;
 ; RV64IM-LABEL: select_and_3:
 ; RV64IM:       # %bb.0: # %entry
@@ -760,6 +951,14 @@ define i32 @select_and_3(i1 zeroext %cond, i32 %a) {
 ; RV64IMXVTCONDOPS-NEXT:    and a1, a1, a2
 ; RV64IMXVTCONDOPS-NEXT:    or a0, a1, a0
 ; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; CHECKZICOND-LABEL: select_and_3:
+; CHECKZICOND:       # %bb.0: # %entry
+; CHECKZICOND-NEXT:    czero.eqz a0, a1, a0
+; CHECKZICOND-NEXT:    li a2, 42
+; CHECKZICOND-NEXT:    and a1, a1, a2
+; CHECKZICOND-NEXT:    or a0, a1, a0
+; CHECKZICOND-NEXT:    ret
 entry:
   %c = and i32 %a, 42
   %res = select i1 %cond, i32 %a, i32 %c
@@ -767,14 +966,14 @@ entry:
 }
 
 define i32 @select_udiv_1(i1 zeroext %cond, i32 %a, i32 %b) {
-; CHECK32-LABEL: select_udiv_1:
-; CHECK32:       # %bb.0: # %entry
-; CHECK32-NEXT:    beqz a0, .LBB25_2
-; CHECK32-NEXT:  # %bb.1:
-; CHECK32-NEXT:    divu a2, a1, a2
-; CHECK32-NEXT:  .LBB25_2: # %entry
-; CHECK32-NEXT:    mv a0, a2
-; CHECK32-NEXT:    ret
+; RV32IM-LABEL: select_udiv_1:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    beqz a0, .LBB25_2
+; RV32IM-NEXT:  # %bb.1:
+; RV32IM-NEXT:    divu a2, a1, a2
+; RV32IM-NEXT:  .LBB25_2: # %entry
+; RV32IM-NEXT:    mv a0, a2
+; RV32IM-NEXT:    ret
 ;
 ; RV64IM-LABEL: select_udiv_1:
 ; RV64IM:       # %bb.0: # %entry
@@ -792,6 +991,22 @@ define i32 @select_udiv_1(i1 zeroext %cond, i32 %a, i32 %b) {
 ; RV64IMXVTCONDOPS-NEXT:    vt.maskc a0, a1, a0
 ; RV64IMXVTCONDOPS-NEXT:    or a0, a0, a2
 ; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_udiv_1:
+; RV32IMZICOND:       # %bb.0: # %entry
+; RV32IMZICOND-NEXT:    divu a1, a1, a2
+; RV32IMZICOND-NEXT:    czero.nez a2, a2, a0
+; RV32IMZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV32IMZICOND-NEXT:    or a0, a0, a2
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_udiv_1:
+; RV64IMZICOND:       # %bb.0: # %entry
+; RV64IMZICOND-NEXT:    divuw a1, a1, a2
+; RV64IMZICOND-NEXT:    czero.nez a2, a2, a0
+; RV64IMZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV64IMZICOND-NEXT:    or a0, a0, a2
+; RV64IMZICOND-NEXT:    ret
 entry:
   %c = udiv i32 %a, %b
   %res = select i1 %cond, i32 %c, i32 %b
@@ -799,14 +1014,14 @@ entry:
 }
 
 define i32 @select_udiv_2(i1 zeroext %cond, i32 %a, i32 %b) {
-; CHECK32-LABEL: select_udiv_2:
-; CHECK32:       # %bb.0: # %entry
-; CHECK32-NEXT:    bnez a0, .LBB26_2
-; CHECK32-NEXT:  # %bb.1: # %entry
-; CHECK32-NEXT:    divu a1, a1, a2
-; CHECK32-NEXT:  .LBB26_2: # %entry
-; CHECK32-NEXT:    mv a0, a1
-; CHECK32-NEXT:    ret
+; RV32IM-LABEL: select_udiv_2:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    bnez a0, .LBB26_2
+; RV32IM-NEXT:  # %bb.1: # %entry
+; RV32IM-NEXT:    divu a1, a1, a2
+; RV32IM-NEXT:  .LBB26_2: # %entry
+; RV32IM-NEXT:    mv a0, a1
+; RV32IM-NEXT:    ret
 ;
 ; RV64IM-LABEL: select_udiv_2:
 ; RV64IM:       # %bb.0: # %entry
@@ -824,6 +1039,22 @@ define i32 @select_udiv_2(i1 zeroext %cond, i32 %a, i32 %b) {
 ; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a0, a2, a0
 ; RV64IMXVTCONDOPS-NEXT:    or a0, a1, a0
 ; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_udiv_2:
+; RV32IMZICOND:       # %bb.0: # %entry
+; RV32IMZICOND-NEXT:    divu a2, a1, a2
+; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV32IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT:    or a0, a1, a0
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_udiv_2:
+; RV64IMZICOND:       # %bb.0: # %entry
+; RV64IMZICOND-NEXT:    divuw a2, a1, a2
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT:    or a0, a1, a0
+; RV64IMZICOND-NEXT:    ret
 entry:
   %c = udiv i32 %a, %b
   %res = select i1 %cond, i32 %a, i32 %c
@@ -831,18 +1062,18 @@ entry:
 }
 
 define i32 @select_udiv_3(i1 zeroext %cond, i32 %a) {
-; CHECK32-LABEL: select_udiv_3:
-; CHECK32:       # %bb.0: # %entry
-; CHECK32-NEXT:    bnez a0, .LBB27_2
-; CHECK32-NEXT:  # %bb.1: # %entry
-; CHECK32-NEXT:    srli a1, a1, 1
-; CHECK32-NEXT:    lui a0, 199729
-; CHECK32-NEXT:    addi a0, a0, -975
-; CHECK32-NEXT:    mulhu a1, a1, a0
-; CHECK32-NEXT:    srli a1, a1, 2
-; CHECK32-NEXT:  .LBB27_2: # %entry
-; CHECK32-NEXT:    mv a0, a1
-; CHECK32-NEXT:    ret
+; RV32IM-LABEL: select_udiv_3:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    bnez a0, .LBB27_2
+; RV32IM-NEXT:  # %bb.1: # %entry
+; RV32IM-NEXT:    srli a1, a1, 1
+; RV32IM-NEXT:    lui a0, 199729
+; RV32IM-NEXT:    addi a0, a0, -975
+; RV32IM-NEXT:    mulhu a1, a1, a0
+; RV32IM-NEXT:    srli a1, a1, 2
+; RV32IM-NEXT:  .LBB27_2: # %entry
+; RV32IM-NEXT:    mv a0, a1
+; RV32IM-NEXT:    ret
 ;
 ; RV64IM-LABEL: select_udiv_3:
 ; RV64IM:       # %bb.0: # %entry
@@ -868,6 +1099,30 @@ define i32 @select_udiv_3(i1 zeroext %cond, i32 %a) {
 ; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a0, a2, a0
 ; RV64IMXVTCONDOPS-NEXT:    or a0, a1, a0
 ; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_udiv_3:
+; RV32IMZICOND:       # %bb.0: # %entry
+; RV32IMZICOND-NEXT:    srli a2, a1, 1
+; RV32IMZICOND-NEXT:    lui a3, 199729
+; RV32IMZICOND-NEXT:    addi a3, a3, -975
+; RV32IMZICOND-NEXT:    mulhu a2, a2, a3
+; RV32IMZICOND-NEXT:    srli a2, a2, 2
+; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV32IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT:    or a0, a1, a0
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_udiv_3:
+; RV64IMZICOND:       # %bb.0: # %entry
+; RV64IMZICOND-NEXT:    srliw a2, a1, 1
+; RV64IMZICOND-NEXT:    lui a3, 199729
+; RV64IMZICOND-NEXT:    addiw a3, a3, -975
+; RV64IMZICOND-NEXT:    mul a2, a2, a3
+; RV64IMZICOND-NEXT:    srli a2, a2, 34
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT:    or a0, a1, a0
+; RV64IMZICOND-NEXT:    ret
 entry:
   %c = udiv i32 %a, 42
   %res = select i1 %cond, i32 %a, i32 %c
@@ -875,14 +1130,14 @@ entry:
 }
 
 define i32 @select_shl_1(i1 zeroext %cond, i32 %a, i32 %b) {
-; CHECK32-LABEL: select_shl_1:
-; CHECK32:       # %bb.0: # %entry
-; CHECK32-NEXT:    beqz a0, .LBB28_2
-; CHECK32-NEXT:  # %bb.1:
-; CHECK32-NEXT:    sll a2, a1, a2
-; CHECK32-NEXT:  .LBB28_2: # %entry
-; CHECK32-NEXT:    mv a0, a2
-; CHECK32-NEXT:    ret
+; RV32IM-LABEL: select_shl_1:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    beqz a0, .LBB28_2
+; RV32IM-NEXT:  # %bb.1:
+; RV32IM-NEXT:    sll a2, a1, a2
+; RV32IM-NEXT:  .LBB28_2: # %entry
+; RV32IM-NEXT:    mv a0, a2
+; RV32IM-NEXT:    ret
 ;
 ; RV64IM-LABEL: select_shl_1:
 ; RV64IM:       # %bb.0: # %entry
@@ -900,6 +1155,22 @@ define i32 @select_shl_1(i1 zeroext %cond, i32 %a, i32 %b) {
 ; RV64IMXVTCONDOPS-NEXT:    vt.maskc a0, a1, a0
 ; RV64IMXVTCONDOPS-NEXT:    or a0, a0, a2
 ; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_shl_1:
+; RV32IMZICOND:       # %bb.0: # %entry
+; RV32IMZICOND-NEXT:    sll a1, a1, a2
+; RV32IMZICOND-NEXT:    czero.nez a2, a2, a0
+; RV32IMZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV32IMZICOND-NEXT:    or a0, a0, a2
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_shl_1:
+; RV64IMZICOND:       # %bb.0: # %entry
+; RV64IMZICOND-NEXT:    sllw a1, a1, a2
+; RV64IMZICOND-NEXT:    czero.nez a2, a2, a0
+; RV64IMZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV64IMZICOND-NEXT:    or a0, a0, a2
+; RV64IMZICOND-NEXT:    ret
 entry:
   %c = shl i32 %a, %b
   %res = select i1 %cond, i32 %c, i32 %b
@@ -907,14 +1178,14 @@ entry:
 }
 
 define i32 @select_shl_2(i1 zeroext %cond, i32 %a, i32 %b) {
-; CHECK32-LABEL: select_shl_2:
-; CHECK32:       # %bb.0: # %entry
-; CHECK32-NEXT:    bnez a0, .LBB29_2
-; CHECK32-NEXT:  # %bb.1: # %entry
-; CHECK32-NEXT:    sll a1, a1, a2
-; CHECK32-NEXT:  .LBB29_2: # %entry
-; CHECK32-NEXT:    mv a0, a1
-; CHECK32-NEXT:    ret
+; RV32IM-LABEL: select_shl_2:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    bnez a0, .LBB29_2
+; RV32IM-NEXT:  # %bb.1: # %entry
+; RV32IM-NEXT:    sll a1, a1, a2
+; RV32IM-NEXT:  .LBB29_2: # %entry
+; RV32IM-NEXT:    mv a0, a1
+; RV32IM-NEXT:    ret
 ;
 ; RV64IM-LABEL: select_shl_2:
 ; RV64IM:       # %bb.0: # %entry
@@ -932,6 +1203,22 @@ define i32 @select_shl_2(i1 zeroext %cond, i32 %a, i32 %b) {
 ; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a0, a2, a0
 ; RV64IMXVTCONDOPS-NEXT:    or a0, a1, a0
 ; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_shl_2:
+; RV32IMZICOND:       # %bb.0: # %entry
+; RV32IMZICOND-NEXT:    sll a2, a1, a2
+; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV32IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT:    or a0, a1, a0
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_shl_2:
+; RV64IMZICOND:       # %bb.0: # %entry
+; RV64IMZICOND-NEXT:    sllw a2, a1, a2
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT:    or a0, a1, a0
+; RV64IMZICOND-NEXT:    ret
 entry:
   %c = shl i32 %a, %b
   %res = select i1 %cond, i32 %a, i32 %c
@@ -950,14 +1237,14 @@ entry:
 }
 
 define i32 @select_ashr_1(i1 zeroext %cond, i32 %a, i32 %b) {
-; CHECK32-LABEL: select_ashr_1:
-; CHECK32:       # %bb.0: # %entry
-; CHECK32-NEXT:    beqz a0, .LBB31_2
-; CHECK32-NEXT:  # %bb.1:
-; CHECK32-NEXT:    sra a2, a1, a2
-; CHECK32-NEXT:  .LBB31_2: # %entry
-; CHECK32-NEXT:    mv a0, a2
-; CHECK32-NEXT:    ret
+; RV32IM-LABEL: select_ashr_1:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    beqz a0, .LBB31_2
+; RV32IM-NEXT:  # %bb.1:
+; RV32IM-NEXT:    sra a2, a1, a2
+; RV32IM-NEXT:  .LBB31_2: # %entry
+; RV32IM-NEXT:    mv a0, a2
+; RV32IM-NEXT:    ret
 ;
 ; RV64IM-LABEL: select_ashr_1:
 ; RV64IM:       # %bb.0: # %entry
@@ -975,6 +1262,22 @@ define i32 @select_ashr_1(i1 zeroext %cond, i32 %a, i32 %b) {
 ; RV64IMXVTCONDOPS-NEXT:    vt.maskc a0, a1, a0
 ; RV64IMXVTCONDOPS-NEXT:    or a0, a0, a2
 ; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_ashr_1:
+; RV32IMZICOND:       # %bb.0: # %entry
+; RV32IMZICOND-NEXT:    sra a1, a1, a2
+; RV32IMZICOND-NEXT:    czero.nez a2, a2, a0
+; RV32IMZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV32IMZICOND-NEXT:    or a0, a0, a2
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_ashr_1:
+; RV64IMZICOND:       # %bb.0: # %entry
+; RV64IMZICOND-NEXT:    sraw a1, a1, a2
+; RV64IMZICOND-NEXT:    czero.nez a2, a2, a0
+; RV64IMZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV64IMZICOND-NEXT:    or a0, a0, a2
+; RV64IMZICOND-NEXT:    ret
 entry:
   %c = ashr i32 %a, %b
   %res = select i1 %cond, i32 %c, i32 %b
@@ -982,14 +1285,14 @@ entry:
 }
 
 define i32 @select_ashr_2(i1 zeroext %cond, i32 %a, i32 %b) {
-; CHECK32-LABEL: select_ashr_2:
-; CHECK32:       # %bb.0: # %entry
-; CHECK32-NEXT:    bnez a0, .LBB32_2
-; CHECK32-NEXT:  # %bb.1: # %entry
-; CHECK32-NEXT:    sra a1, a1, a2
-; CHECK32-NEXT:  .LBB32_2: # %entry
-; CHECK32-NEXT:    mv a0, a1
-; CHECK32-NEXT:    ret
+; RV32IM-LABEL: select_ashr_2:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    bnez a0, .LBB32_2
+; RV32IM-NEXT:  # %bb.1: # %entry
+; RV32IM-NEXT:    sra a1, a1, a2
+; RV32IM-NEXT:  .LBB32_2: # %entry
+; RV32IM-NEXT:    mv a0, a1
+; RV32IM-NEXT:    ret
 ;
 ; RV64IM-LABEL: select_ashr_2:
 ; RV64IM:       # %bb.0: # %entry
@@ -1007,6 +1310,22 @@ define i32 @select_ashr_2(i1 zeroext %cond, i32 %a, i32 %b) {
 ; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a0, a2, a0
 ; RV64IMXVTCONDOPS-NEXT:    or a0, a1, a0
 ; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_ashr_2:
+; RV32IMZICOND:       # %bb.0: # %entry
+; RV32IMZICOND-NEXT:    sra a2, a1, a2
+; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV32IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT:    or a0, a1, a0
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_ashr_2:
+; RV64IMZICOND:       # %bb.0: # %entry
+; RV64IMZICOND-NEXT:    sraw a2, a1, a2
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT:    or a0, a1, a0
+; RV64IMZICOND-NEXT:    ret
 entry:
   %c = ashr i32 %a, %b
   %res = select i1 %cond, i32 %a, i32 %c
@@ -1025,14 +1344,14 @@ entry:
 }
 
 define i32 @select_lshr_1(i1 zeroext %cond, i32 %a, i32 %b) {
-; CHECK32-LABEL: select_lshr_1:
-; CHECK32:       # %bb.0: # %entry
-; CHECK32-NEXT:    beqz a0, .LBB34_2
-; CHECK32-NEXT:  # %bb.1:
-; CHECK32-NEXT:    srl a2, a1, a2
-; CHECK32-NEXT:  .LBB34_2: # %entry
-; CHECK32-NEXT:    mv a0, a2
-; CHECK32-NEXT:    ret
+; RV32IM-LABEL: select_lshr_1:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    beqz a0, .LBB34_2
+; RV32IM-NEXT:  # %bb.1:
+; RV32IM-NEXT:    srl a2, a1, a2
+; RV32IM-NEXT:  .LBB34_2: # %entry
+; RV32IM-NEXT:    mv a0, a2
+; RV32IM-NEXT:    ret
 ;
 ; RV64IM-LABEL: select_lshr_1:
 ; RV64IM:       # %bb.0: # %entry
@@ -1050,6 +1369,22 @@ define i32 @select_lshr_1(i1 zeroext %cond, i32 %a, i32 %b) {
 ; RV64IMXVTCONDOPS-NEXT:    vt.maskc a0, a1, a0
 ; RV64IMXVTCONDOPS-NEXT:    or a0, a0, a2
 ; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_lshr_1:
+; RV32IMZICOND:       # %bb.0: # %entry
+; RV32IMZICOND-NEXT:    srl a1, a1, a2
+; RV32IMZICOND-NEXT:    czero.nez a2, a2, a0
+; RV32IMZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV32IMZICOND-NEXT:    or a0, a0, a2
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_lshr_1:
+; RV64IMZICOND:       # %bb.0: # %entry
+; RV64IMZICOND-NEXT:    srlw a1, a1, a2
+; RV64IMZICOND-NEXT:    czero.nez a2, a2, a0
+; RV64IMZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV64IMZICOND-NEXT:    or a0, a0, a2
+; RV64IMZICOND-NEXT:    ret
 entry:
   %c = lshr i32 %a, %b
   %res = select i1 %cond, i32 %c, i32 %b
@@ -1057,14 +1392,14 @@ entry:
 }
 
 define i32 @select_lshr_2(i1 zeroext %cond, i32 %a, i32 %b) {
-; CHECK32-LABEL: select_lshr_2:
-; CHECK32:       # %bb.0: # %entry
-; CHECK32-NEXT:    bnez a0, .LBB35_2
-; CHECK32-NEXT:  # %bb.1: # %entry
-; CHECK32-NEXT:    srl a1, a1, a2
-; CHECK32-NEXT:  .LBB35_2: # %entry
-; CHECK32-NEXT:    mv a0, a1
-; CHECK32-NEXT:    ret
+; RV32IM-LABEL: select_lshr_2:
+; RV32IM:       # %bb.0: # %entry
+; RV32IM-NEXT:    bnez a0, .LBB35_2
+; RV32IM-NEXT:  # %bb.1: # %entry
+; RV32IM-NEXT:    srl a1, a1, a2
+; RV32IM-NEXT:  .LBB35_2: # %entry
+; RV32IM-NEXT:    mv a0, a1
+; RV32IM-NEXT:    ret
 ;
 ; RV64IM-LABEL: select_lshr_2:
 ; RV64IM:       # %bb.0: # %entry
@@ -1082,6 +1417,22 @@ define i32 @select_lshr_2(i1 zeroext %cond, i32 %a, i32 %b) {
 ; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a0, a2, a0
 ; RV64IMXVTCONDOPS-NEXT:    or a0, a1, a0
 ; RV64IMXVTCONDOPS-NEXT:    ret
+;
+; RV32IMZICOND-LABEL: select_lshr_2:
+; RV32IMZICOND:       # %bb.0: # %entry
+; RV32IMZICOND-NEXT:    srl a2, a1, a2
+; RV32IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV32IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT:    or a0, a1, a0
+; RV32IMZICOND-NEXT:    ret
+;
+; RV64IMZICOND-LABEL: select_lshr_2:
+; RV64IMZICOND:       # %bb.0: # %entry
+; RV64IMZICOND-NEXT:    srlw a2, a1, a2
+; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT:    or a0, a1, a0
+; RV64IMZICOND-NEXT:    ret
 entry:
   %c = lshr i32 %a, %b
   %res = select i1 %cond, i32 %a, i32 %c
@@ -1098,5 +1449,3 @@ entry:
   %res = select i1 %cond, i32 %a, i32 %c
   ret i32 %res
 }
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; RV32IM: {{.*}}


        


More information about the llvm-commits mailing list