[llvm] e12be9c - [RISCV] Don't promote ISD::SELECT with rv64-legal-i32 when XTHeadCondMov is enabled.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Fri Feb 2 11:54:26 PST 2024


Author: Craig Topper
Date: 2024-02-02T11:53:47-08:00
New Revision: e12be9cde44f92bc5a788930508a7fd13db78f11

URL: https://github.com/llvm/llvm-project/commit/e12be9cde44f92bc5a788930508a7fd13db78f11
DIFF: https://github.com/llvm/llvm-project/commit/e12be9cde44f92bc5a788930508a7fd13db78f11.diff

LOG: [RISCV] Don't promote ISD::SELECT with rv64-legal-i32 when XTHeadCondMov is enabled.

Fixes an infinite loop.

Test copied from the non-rv64-legal-i32 test.

Added: 
    llvm/test/CodeGen/RISCV/rv64-legal-i32/condops.ll

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVInstrInfoXTHead.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index b5db41197a35a..bb8204d711d13 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -411,11 +411,11 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
   if (Subtarget.hasShortForwardBranchOpt())
     setOperationAction(ISD::ABS, XLenVT, Legal);
 
-  if (!Subtarget.hasVendorXTHeadCondMov())
+  if (!Subtarget.hasVendorXTHeadCondMov()) {
     setOperationAction(ISD::SELECT, XLenVT, Custom);
-
-  if (RV64LegalI32 && Subtarget.is64Bit())
-    setOperationAction(ISD::SELECT, MVT::i32, Promote);
+    if (RV64LegalI32 && Subtarget.is64Bit())
+      setOperationAction(ISD::SELECT, MVT::i32, Promote);
+  }
 
   static const unsigned FPLegalNodeTypes[] = {
       ISD::FMINNUM,        ISD::FMAXNUM,       ISD::LRINT,

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXTHead.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXTHead.td
index 1d44b1ad26364..9fb431b26b1f8 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoXTHead.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXTHead.td
@@ -909,3 +909,24 @@ defm : StoreUpdatePat<post_truncsti16, TH_SHIA, i32>;
 defm : StoreUpdatePat<pre_truncsti16, TH_SHIB, i32>;
 }
 
+let Predicates = [HasVendorXTHeadCondMov, IsRV64] in {
+def : Pat<(select (XLenVT GPR:$cond), (i32 GPR:$a), (i32 GPR:$b)),
+          (TH_MVEQZ GPR:$a, GPR:$b, GPR:$cond)>;
+def : Pat<(select (XLenVT GPR:$cond), (i32 GPR:$a), (i32 0)),
+          (TH_MVEQZ GPR:$a, (XLenVT X0), GPR:$cond)>;
+def : Pat<(select (XLenVT GPR:$cond), (i32 0), (i32 GPR:$b)),
+          (TH_MVNEZ GPR:$b, (XLenVT X0), GPR:$cond)>;
+
+def : Pat<(select (riscv_seteq (XLenVT GPR:$cond)), (i32 GPR:$a), (i32 GPR:$b)),
+          (TH_MVNEZ GPR:$a, GPR:$b, GPR:$cond)>;
+def : Pat<(select (riscv_setne (XLenVT GPR:$cond)), (i32 GPR:$a), (i32 GPR:$b)),
+          (TH_MVEQZ GPR:$a, GPR:$b, GPR:$cond)>;
+def : Pat<(select (riscv_seteq (XLenVT GPR:$cond)), (i32 GPR:$a), (i32 0)),
+          (TH_MVNEZ GPR:$a, (XLenVT X0), GPR:$cond)>;
+def : Pat<(select (riscv_setne (XLenVT GPR:$cond)), (i32 GPR:$a), (i32 0)),
+          (TH_MVEQZ GPR:$a, (XLenVT X0), GPR:$cond)>;
+def : Pat<(select (riscv_seteq (XLenVT GPR:$cond)), (i32 0), (i32 GPR:$b)),
+          (TH_MVEQZ GPR:$b, (XLenVT X0), GPR:$cond)>;
+def : Pat<(select (riscv_setne (XLenVT GPR:$cond)),  (i32 0), (i32 GPR:$b)),
+          (TH_MVNEZ GPR:$b, (XLenVT X0), GPR:$cond)>;
+} // Predicates = [HasVendorXTHeadCondMov]

diff  --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/condops.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/condops.ll
new file mode 100644
index 0000000000000..42e12056d7d37
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv64-legal-i32/condops.ll
@@ -0,0 +1,2284 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
+; RUN: llc -mtriple=riscv64 -target-abi=lp64f -mattr=+f,+zbs \
+; RUN:   -riscv-experimental-rv64-legal-i32 < %s | FileCheck %s -check-prefix=RV64I
+; RUN: llc -mtriple=riscv64 -target-abi=lp64f -mattr=+f,+zbs,+xventanacondops \
+; RUN:   -riscv-experimental-rv64-legal-i32 < %s | FileCheck %s -check-prefix=RV64XVENTANACONDOPS
+; RUN: llc -mtriple=riscv64 -target-abi=lp64f -mattr=+f,+zbs,+xtheadcondmov \
+; RUN:   -riscv-experimental-rv64-legal-i32 < %s | FileCheck %s -check-prefix=RV64XTHEADCONDMOV
+; RUN: llc -mtriple=riscv64 -target-abi=lp64f -mattr=+f,+zbs,+zicond \
+; RUN:   -riscv-experimental-rv64-legal-i32 < %s | FileCheck %s -check-prefix=RV64ZICOND
+
+define i64 @zero1(i64 %rs1, i1 zeroext %rc) {
+; RV64I-LABEL: zero1:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    neg a1, a1
+; RV64I-NEXT:    and a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: zero1:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: zero1:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    th.mveqz a0, zero, a1
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: zero1:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.eqz a0, a0, a1
+; RV64ZICOND-NEXT:    ret
+  %sel = select i1 %rc, i64 %rs1, i64 0
+  ret i64 %sel
+}
+
+define i64 @zero2(i64 %rs1, i1 zeroext %rc) {
+; RV64I-LABEL: zero2:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a1, a1, -1
+; RV64I-NEXT:    and a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: zero2:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: zero2:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    th.mvnez a0, zero, a1
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: zero2:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.nez a0, a0, a1
+; RV64ZICOND-NEXT:    ret
+  %sel = select i1 %rc, i64 0, i64 %rs1
+  ret i64 %sel
+}
+
+define i64 @zero_singlebit1(i64 %rs1, i64 %rs2) {
+; RV64I-LABEL: zero_singlebit1:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    bexti a1, a1, 12
+; RV64I-NEXT:    addi a1, a1, -1
+; RV64I-NEXT:    and a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: zero_singlebit1:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    bexti a1, a1, 12
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: zero_singlebit1:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    lui a2, 1
+; RV64XTHEADCONDMOV-NEXT:    and a1, a1, a2
+; RV64XTHEADCONDMOV-NEXT:    th.mvnez a0, zero, a1
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: zero_singlebit1:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    bexti a1, a1, 12
+; RV64ZICOND-NEXT:    czero.nez a0, a0, a1
+; RV64ZICOND-NEXT:    ret
+  %and = and i64 %rs2, 4096
+  %rc = icmp eq i64 %and, 0
+  %sel = select i1 %rc, i64 %rs1, i64 0
+  ret i64 %sel
+}
+
+define i64 @zero_singlebit2(i64 %rs1, i64 %rs2) {
+; RV64I-LABEL: zero_singlebit2:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a1, a1, 51
+; RV64I-NEXT:    srai a1, a1, 63
+; RV64I-NEXT:    and a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: zero_singlebit2:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    bexti a1, a1, 12
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: zero_singlebit2:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    slli a1, a1, 51
+; RV64XTHEADCONDMOV-NEXT:    srai a1, a1, 63
+; RV64XTHEADCONDMOV-NEXT:    and a0, a1, a0
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: zero_singlebit2:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    bexti a1, a1, 12
+; RV64ZICOND-NEXT:    czero.eqz a0, a0, a1
+; RV64ZICOND-NEXT:    ret
+  %and = and i64 %rs2, 4096
+  %rc = icmp eq i64 %and, 0
+  %sel = select i1 %rc, i64 0, i64 %rs1
+  ret i64 %sel
+}
+
+define i64 @add1(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
+; RV64I-LABEL: add1:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    add a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: add1:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    add a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: add1:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    th.mveqz a2, zero, a0
+; RV64XTHEADCONDMOV-NEXT:    add a0, a1, a2
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: add1:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.eqz a0, a2, a0
+; RV64ZICOND-NEXT:    add a0, a1, a0
+; RV64ZICOND-NEXT:    ret
+  %add = add i64 %rs1, %rs2
+  %sel = select i1 %rc, i64 %add, i64 %rs1
+  ret i64 %sel
+}
+
+define i64 @add2(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
+; RV64I-LABEL: add2:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    add a0, a2, a0
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: add2:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    add a0, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: add2:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    th.mveqz a1, zero, a0
+; RV64XTHEADCONDMOV-NEXT:    add a0, a2, a1
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: add2:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV64ZICOND-NEXT:    add a0, a2, a0
+; RV64ZICOND-NEXT:    ret
+  %add = add i64 %rs1, %rs2
+  %sel = select i1 %rc, i64 %add, i64 %rs2
+  ret i64 %sel
+}
+
+define i64 @add3(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
+; RV64I-LABEL: add3:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    add a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: add3:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    add a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: add3:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    th.mvnez a2, zero, a0
+; RV64XTHEADCONDMOV-NEXT:    add a0, a1, a2
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: add3:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64ZICOND-NEXT:    add a0, a1, a0
+; RV64ZICOND-NEXT:    ret
+  %add = add i64 %rs1, %rs2
+  %sel = select i1 %rc, i64 %rs1, i64 %add
+  ret i64 %sel
+}
+
+define i64 @add4(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
+; RV64I-LABEL: add4:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    add a0, a2, a0
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: add4:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    add a0, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: add4:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    th.mvnez a1, zero, a0
+; RV64XTHEADCONDMOV-NEXT:    add a0, a2, a1
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: add4:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.nez a0, a1, a0
+; RV64ZICOND-NEXT:    add a0, a2, a0
+; RV64ZICOND-NEXT:    ret
+  %add = add i64 %rs1, %rs2
+  %sel = select i1 %rc, i64 %rs2, i64 %add
+  ret i64 %sel
+}
+
+define i64 @sub1(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
+; RV64I-LABEL: sub1:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    sub a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: sub1:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    sub a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: sub1:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    th.mveqz a2, zero, a0
+; RV64XTHEADCONDMOV-NEXT:    sub a0, a1, a2
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: sub1:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.eqz a0, a2, a0
+; RV64ZICOND-NEXT:    sub a0, a1, a0
+; RV64ZICOND-NEXT:    ret
+  %sub = sub i64 %rs1, %rs2
+  %sel = select i1 %rc, i64 %sub, i64 %rs1
+  ret i64 %sel
+}
+
+define i64 @sub2(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
+; RV64I-LABEL: sub2:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    sub a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: sub2:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    sub a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: sub2:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    th.mvnez a2, zero, a0
+; RV64XTHEADCONDMOV-NEXT:    sub a0, a1, a2
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: sub2:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64ZICOND-NEXT:    sub a0, a1, a0
+; RV64ZICOND-NEXT:    ret
+  %sub = sub i64 %rs1, %rs2
+  %sel = select i1 %rc, i64 %rs1, i64 %sub
+  ret i64 %sel
+}
+
+define i64 @or1(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
+; RV64I-LABEL: or1:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: or1:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    or a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: or1:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    th.mveqz a2, zero, a0
+; RV64XTHEADCONDMOV-NEXT:    or a0, a1, a2
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: or1:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.eqz a0, a2, a0
+; RV64ZICOND-NEXT:    or a0, a1, a0
+; RV64ZICOND-NEXT:    ret
+  %or = or i64 %rs1, %rs2
+  %sel = select i1 %rc, i64 %or, i64 %rs1
+  ret i64 %sel
+}
+
+define i64 @or2(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
+; RV64I-LABEL: or2:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: or2:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    or a0, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: or2:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    th.mveqz a1, zero, a0
+; RV64XTHEADCONDMOV-NEXT:    or a0, a2, a1
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: or2:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV64ZICOND-NEXT:    or a0, a2, a0
+; RV64ZICOND-NEXT:    ret
+  %or = or i64 %rs1, %rs2
+  %sel = select i1 %rc, i64 %or, i64 %rs2
+  ret i64 %sel
+}
+
+define i64 @or3(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
+; RV64I-LABEL: or3:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    or a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: or3:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    or a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: or3:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    th.mvnez a2, zero, a0
+; RV64XTHEADCONDMOV-NEXT:    or a0, a1, a2
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: or3:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64ZICOND-NEXT:    or a0, a1, a0
+; RV64ZICOND-NEXT:    ret
+  %or = or i64 %rs1, %rs2
+  %sel = select i1 %rc, i64 %rs1, i64 %or
+  ret i64 %sel
+}
+
+define i64 @or4(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
+; RV64I-LABEL: or4:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: or4:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    or a0, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: or4:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    th.mvnez a1, zero, a0
+; RV64XTHEADCONDMOV-NEXT:    or a0, a2, a1
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: or4:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.nez a0, a1, a0
+; RV64ZICOND-NEXT:    or a0, a2, a0
+; RV64ZICOND-NEXT:    ret
+  %or = or i64 %rs1, %rs2
+  %sel = select i1 %rc, i64 %rs2, i64 %or
+  ret i64 %sel
+}
+
+define i64 @xor1(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
+; RV64I-LABEL: xor1:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    xor a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: xor1:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    xor a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: xor1:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    th.mveqz a2, zero, a0
+; RV64XTHEADCONDMOV-NEXT:    xor a0, a1, a2
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: xor1:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.eqz a0, a2, a0
+; RV64ZICOND-NEXT:    xor a0, a1, a0
+; RV64ZICOND-NEXT:    ret
+  %xor = xor i64 %rs1, %rs2
+  %sel = select i1 %rc, i64 %xor, i64 %rs1
+  ret i64 %sel
+}
+
+define i64 @xor2(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
+; RV64I-LABEL: xor2:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    neg a0, a0
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    xor a0, a2, a0
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: xor2:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    xor a0, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: xor2:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    th.mveqz a1, zero, a0
+; RV64XTHEADCONDMOV-NEXT:    xor a0, a2, a1
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: xor2:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV64ZICOND-NEXT:    xor a0, a2, a0
+; RV64ZICOND-NEXT:    ret
+  %xor = xor i64 %rs1, %rs2
+  %sel = select i1 %rc, i64 %xor, i64 %rs2
+  ret i64 %sel
+}
+
+define i64 @xor3(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
+; RV64I-LABEL: xor3:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    xor a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: xor3:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    xor a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: xor3:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    th.mvnez a2, zero, a0
+; RV64XTHEADCONDMOV-NEXT:    xor a0, a1, a2
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: xor3:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64ZICOND-NEXT:    xor a0, a1, a0
+; RV64ZICOND-NEXT:    ret
+  %xor = xor i64 %rs1, %rs2
+  %sel = select i1 %rc, i64 %rs1, i64 %xor
+  ret i64 %sel
+}
+
+define i64 @xor4(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
+; RV64I-LABEL: xor4:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    xor a0, a2, a0
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: xor4:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    xor a0, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: xor4:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    th.mvnez a1, zero, a0
+; RV64XTHEADCONDMOV-NEXT:    xor a0, a2, a1
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: xor4:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.nez a0, a1, a0
+; RV64ZICOND-NEXT:    xor a0, a2, a0
+; RV64ZICOND-NEXT:    ret
+  %xor = xor i64 %rs1, %rs2
+  %sel = select i1 %rc, i64 %rs2, i64 %xor
+  ret i64 %sel
+}
+
+define i64 @and1(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
+; RV64I-LABEL: and1:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    beqz a0, .LBB18_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:  .LBB18_2:
+; RV64I-NEXT:    mv a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: and1:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    and a2, a1, a2
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    or a0, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: and1:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    and a2, a1, a2
+; RV64XTHEADCONDMOV-NEXT:    th.mveqz a2, a1, a0
+; RV64XTHEADCONDMOV-NEXT:    mv a0, a2
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: and1:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    and a2, a1, a2
+; RV64ZICOND-NEXT:    czero.nez a0, a1, a0
+; RV64ZICOND-NEXT:    or a0, a2, a0
+; RV64ZICOND-NEXT:    ret
+  %and = and i64 %rs1, %rs2
+  %sel = select i1 %rc, i64 %and, i64 %rs1
+  ret i64 %sel
+}
+
+define i64 @and2(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
+; RV64I-LABEL: and2:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    beqz a0, .LBB19_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    and a2, a1, a2
+; RV64I-NEXT:  .LBB19_2:
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: and2:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    and a1, a1, a2
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    or a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: and2:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    and a1, a1, a2
+; RV64XTHEADCONDMOV-NEXT:    th.mveqz a1, a2, a0
+; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: and2:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    and a1, a1, a2
+; RV64ZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64ZICOND-NEXT:    or a0, a1, a0
+; RV64ZICOND-NEXT:    ret
+  %and = and i64 %rs1, %rs2
+  %sel = select i1 %rc, i64 %and, i64 %rs2
+  ret i64 %sel
+}
+
+define i64 @and3(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
+; RV64I-LABEL: and3:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    bnez a0, .LBB20_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:  .LBB20_2:
+; RV64I-NEXT:    mv a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: and3:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    and a2, a1, a2
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    or a0, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: and3:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    and a2, a1, a2
+; RV64XTHEADCONDMOV-NEXT:    th.mvnez a2, a1, a0
+; RV64XTHEADCONDMOV-NEXT:    mv a0, a2
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: and3:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    and a2, a1, a2
+; RV64ZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV64ZICOND-NEXT:    or a0, a2, a0
+; RV64ZICOND-NEXT:    ret
+  %and = and i64 %rs1, %rs2
+  %sel = select i1 %rc, i64 %rs1, i64 %and
+  ret i64 %sel
+}
+
+define i64 @and4(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
+; RV64I-LABEL: and4:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    bnez a0, .LBB21_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    and a2, a1, a2
+; RV64I-NEXT:  .LBB21_2:
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: and4:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    and a1, a1, a2
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    or a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: and4:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    and a1, a1, a2
+; RV64XTHEADCONDMOV-NEXT:    th.mvnez a1, a2, a0
+; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: and4:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    and a1, a1, a2
+; RV64ZICOND-NEXT:    czero.eqz a0, a2, a0
+; RV64ZICOND-NEXT:    or a0, a1, a0
+; RV64ZICOND-NEXT:    ret
+  %and = and i64 %rs1, %rs2
+  %sel = select i1 %rc, i64 %rs2, i64 %and
+  ret i64 %sel
+}
+
+define i64 @basic(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
+; RV64I-LABEL: basic:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    bnez a0, .LBB22_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    mv a1, a2
+; RV64I-NEXT:  .LBB22_2:
+; RV64I-NEXT:    mv a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: basic:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a2, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    or a0, a0, a2
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: basic:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    th.mveqz a1, a2, a0
+; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: basic:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.nez a2, a2, a0
+; RV64ZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV64ZICOND-NEXT:    or a0, a0, a2
+; RV64ZICOND-NEXT:    ret
+  %sel = select i1 %rc, i64 %rs1, i64 %rs2
+  ret i64 %sel
+}
+
+define i64 @seteq(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
+; RV64I-LABEL: seteq:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    beq a0, a1, .LBB23_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    mv a2, a3
+; RV64I-NEXT:  .LBB23_2:
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: seteq:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    xor a0, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a1, a3, a0
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    or a0, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: seteq:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    xor a0, a0, a1
+; RV64XTHEADCONDMOV-NEXT:    th.mvnez a2, a3, a0
+; RV64XTHEADCONDMOV-NEXT:    mv a0, a2
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: seteq:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    xor a0, a0, a1
+; RV64ZICOND-NEXT:    czero.eqz a1, a3, a0
+; RV64ZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64ZICOND-NEXT:    or a0, a0, a1
+; RV64ZICOND-NEXT:    ret
+  %rc = icmp eq i64 %a, %b
+  %sel = select i1 %rc, i64 %rs1, i64 %rs2
+  ret i64 %sel
+}
+
+define i64 @setne(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
+; RV64I-LABEL: setne:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    bne a0, a1, .LBB24_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    mv a2, a3
+; RV64I-NEXT:  .LBB24_2:
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: setne:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    xor a0, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a1, a3, a0
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    or a0, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: setne:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    xor a0, a0, a1
+; RV64XTHEADCONDMOV-NEXT:    th.mveqz a2, a3, a0
+; RV64XTHEADCONDMOV-NEXT:    mv a0, a2
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: setne:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    xor a0, a0, a1
+; RV64ZICOND-NEXT:    czero.nez a1, a3, a0
+; RV64ZICOND-NEXT:    czero.eqz a0, a2, a0
+; RV64ZICOND-NEXT:    or a0, a0, a1
+; RV64ZICOND-NEXT:    ret
+  %rc = icmp ne i64 %a, %b
+  %sel = select i1 %rc, i64 %rs1, i64 %rs2
+  ret i64 %sel
+}
+
+define i64 @setgt(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
+; RV64I-LABEL: setgt:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    blt a1, a0, .LBB25_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    mv a2, a3
+; RV64I-NEXT:  .LBB25_2:
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: setgt:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    slt a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a1, a3, a0
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    or a0, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: setgt:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    slt a0, a1, a0
+; RV64XTHEADCONDMOV-NEXT:    th.mveqz a2, a3, a0
+; RV64XTHEADCONDMOV-NEXT:    mv a0, a2
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: setgt:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    slt a0, a1, a0
+; RV64ZICOND-NEXT:    czero.nez a1, a3, a0
+; RV64ZICOND-NEXT:    czero.eqz a0, a2, a0
+; RV64ZICOND-NEXT:    or a0, a0, a1
+; RV64ZICOND-NEXT:    ret
+  %rc = icmp sgt i64 %a, %b
+  %sel = select i1 %rc, i64 %rs1, i64 %rs2
+  ret i64 %sel
+}
+
+define i64 @setge(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
+; RV64I-LABEL: setge:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    bge a0, a1, .LBB26_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    mv a2, a3
+; RV64I-NEXT:  .LBB26_2:
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: setge:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    slt a0, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a1, a3, a0
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    or a0, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: setge:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    slt a0, a0, a1
+; RV64XTHEADCONDMOV-NEXT:    th.mvnez a2, a3, a0
+; RV64XTHEADCONDMOV-NEXT:    mv a0, a2
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: setge:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    slt a0, a0, a1
+; RV64ZICOND-NEXT:    czero.eqz a1, a3, a0
+; RV64ZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64ZICOND-NEXT:    or a0, a0, a1
+; RV64ZICOND-NEXT:    ret
+  %rc = icmp sge i64 %a, %b
+  %sel = select i1 %rc, i64 %rs1, i64 %rs2
+  ret i64 %sel
+}
+
+define i64 @setlt(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
+; RV64I-LABEL: setlt:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    blt a0, a1, .LBB27_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    mv a2, a3
+; RV64I-NEXT:  .LBB27_2:
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: setlt:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    slt a0, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a1, a3, a0
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    or a0, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: setlt:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    slt a0, a0, a1
+; RV64XTHEADCONDMOV-NEXT:    th.mveqz a2, a3, a0
+; RV64XTHEADCONDMOV-NEXT:    mv a0, a2
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: setlt:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    slt a0, a0, a1
+; RV64ZICOND-NEXT:    czero.nez a1, a3, a0
+; RV64ZICOND-NEXT:    czero.eqz a0, a2, a0
+; RV64ZICOND-NEXT:    or a0, a0, a1
+; RV64ZICOND-NEXT:    ret
+  %rc = icmp slt i64 %a, %b
+  %sel = select i1 %rc, i64 %rs1, i64 %rs2
+  ret i64 %sel
+}
+
+define i64 @setle(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
+; RV64I-LABEL: setle:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    bge a1, a0, .LBB28_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    mv a2, a3
+; RV64I-NEXT:  .LBB28_2:
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: setle:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    slt a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a1, a3, a0
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    or a0, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: setle:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    slt a0, a1, a0
+; RV64XTHEADCONDMOV-NEXT:    th.mvnez a2, a3, a0
+; RV64XTHEADCONDMOV-NEXT:    mv a0, a2
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: setle:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    slt a0, a1, a0
+; RV64ZICOND-NEXT:    czero.eqz a1, a3, a0
+; RV64ZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64ZICOND-NEXT:    or a0, a0, a1
+; RV64ZICOND-NEXT:    ret
+  %rc = icmp sle i64 %a, %b
+  %sel = select i1 %rc, i64 %rs1, i64 %rs2
+  ret i64 %sel
+}
+
+define i64 @setugt(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
+; RV64I-LABEL: setugt:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    bltu a1, a0, .LBB29_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    mv a2, a3
+; RV64I-NEXT:  .LBB29_2:
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: setugt:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    sltu a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a1, a3, a0
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    or a0, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: setugt:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    sltu a0, a1, a0
+; RV64XTHEADCONDMOV-NEXT:    th.mveqz a2, a3, a0
+; RV64XTHEADCONDMOV-NEXT:    mv a0, a2
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: setugt:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    sltu a0, a1, a0
+; RV64ZICOND-NEXT:    czero.nez a1, a3, a0
+; RV64ZICOND-NEXT:    czero.eqz a0, a2, a0
+; RV64ZICOND-NEXT:    or a0, a0, a1
+; RV64ZICOND-NEXT:    ret
+  %rc = icmp ugt i64 %a, %b
+  %sel = select i1 %rc, i64 %rs1, i64 %rs2
+  ret i64 %sel
+}
+
+define i64 @setuge(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
+; RV64I-LABEL: setuge:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    bgeu a0, a1, .LBB30_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    mv a2, a3
+; RV64I-NEXT:  .LBB30_2:
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: setuge:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    sltu a0, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a1, a3, a0
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    or a0, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: setuge:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    sltu a0, a0, a1
+; RV64XTHEADCONDMOV-NEXT:    th.mvnez a2, a3, a0
+; RV64XTHEADCONDMOV-NEXT:    mv a0, a2
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: setuge:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    sltu a0, a0, a1
+; RV64ZICOND-NEXT:    czero.eqz a1, a3, a0
+; RV64ZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64ZICOND-NEXT:    or a0, a0, a1
+; RV64ZICOND-NEXT:    ret
+  %rc = icmp uge i64 %a, %b
+  %sel = select i1 %rc, i64 %rs1, i64 %rs2
+  ret i64 %sel
+}
+
+define i64 @setult(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
+; RV64I-LABEL: setult:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    bltu a0, a1, .LBB31_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    mv a2, a3
+; RV64I-NEXT:  .LBB31_2:
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: setult:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    sltu a0, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a1, a3, a0
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    or a0, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: setult:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    sltu a0, a0, a1
+; RV64XTHEADCONDMOV-NEXT:    th.mveqz a2, a3, a0
+; RV64XTHEADCONDMOV-NEXT:    mv a0, a2
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: setult:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    sltu a0, a0, a1
+; RV64ZICOND-NEXT:    czero.nez a1, a3, a0
+; RV64ZICOND-NEXT:    czero.eqz a0, a2, a0
+; RV64ZICOND-NEXT:    or a0, a0, a1
+; RV64ZICOND-NEXT:    ret
+  %rc = icmp ult i64 %a, %b
+  %sel = select i1 %rc, i64 %rs1, i64 %rs2
+  ret i64 %sel
+}
+
+define i64 @setule(i64 %a, i64 %b, i64 %rs1, i64 %rs2) {
+; RV64I-LABEL: setule:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    bgeu a1, a0, .LBB32_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    mv a2, a3
+; RV64I-NEXT:  .LBB32_2:
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: setule:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    sltu a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a1, a3, a0
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    or a0, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: setule:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    sltu a0, a1, a0
+; RV64XTHEADCONDMOV-NEXT:    th.mvnez a2, a3, a0
+; RV64XTHEADCONDMOV-NEXT:    mv a0, a2
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: setule:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    sltu a0, a1, a0
+; RV64ZICOND-NEXT:    czero.eqz a1, a3, a0
+; RV64ZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64ZICOND-NEXT:    or a0, a0, a1
+; RV64ZICOND-NEXT:    ret
+  %rc = icmp ule i64 %a, %b
+  %sel = select i1 %rc, i64 %rs1, i64 %rs2
+  ret i64 %sel
+}
+
+define i64 @seteq_zero(i64 %a, i64 %rs1, i64 %rs2) {
+; RV64I-LABEL: seteq_zero:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    beqz a0, .LBB33_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    mv a1, a2
+; RV64I-NEXT:  .LBB33_2:
+; RV64I-NEXT:    mv a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: seteq_zero:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a2, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    or a0, a0, a2
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: seteq_zero:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    th.mvnez a1, a2, a0
+; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: seteq_zero:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.eqz a2, a2, a0
+; RV64ZICOND-NEXT:    czero.nez a0, a1, a0
+; RV64ZICOND-NEXT:    or a0, a0, a2
+; RV64ZICOND-NEXT:    ret
+  %rc = icmp eq i64 %a, 0
+  %sel = select i1 %rc, i64 %rs1, i64 %rs2
+  ret i64 %sel
+}
+
+define i64 @setne_zero(i64 %a, i64 %rs1, i64 %rs2) {
+; RV64I-LABEL: setne_zero:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    bnez a0, .LBB34_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    mv a1, a2
+; RV64I-NEXT:  .LBB34_2:
+; RV64I-NEXT:    mv a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: setne_zero:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a2, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    or a0, a0, a2
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: setne_zero:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    th.mveqz a1, a2, a0
+; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: setne_zero:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.nez a2, a2, a0
+; RV64ZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV64ZICOND-NEXT:    or a0, a0, a2
+; RV64ZICOND-NEXT:    ret
+  %rc = icmp ne i64 %a, 0
+  %sel = select i1 %rc, i64 %rs1, i64 %rs2
+  ret i64 %sel
+}
+
+define i64 @seteq_constant(i64 %a, i64 %rs1, i64 %rs2) {
+; RV64I-LABEL: seteq_constant:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a3, 123
+; RV64I-NEXT:    beq a0, a3, .LBB35_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    mv a1, a2
+; RV64I-NEXT:  .LBB35_2:
+; RV64I-NEXT:    mv a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: seteq_constant:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    addi a0, a0, -123
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a2, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    or a0, a0, a2
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: seteq_constant:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    addi a0, a0, -123
+; RV64XTHEADCONDMOV-NEXT:    th.mvnez a1, a2, a0
+; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: seteq_constant:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    addi a0, a0, -123
+; RV64ZICOND-NEXT:    czero.eqz a2, a2, a0
+; RV64ZICOND-NEXT:    czero.nez a0, a1, a0
+; RV64ZICOND-NEXT:    or a0, a0, a2
+; RV64ZICOND-NEXT:    ret
+  %rc = icmp eq i64 %a, 123
+  %sel = select i1 %rc, i64 %rs1, i64 %rs2
+  ret i64 %sel
+}
+
+define i64 @setne_constant(i64 %a, i64 %rs1, i64 %rs2) {
+; RV64I-LABEL: setne_constant:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a3, 456
+; RV64I-NEXT:    bne a0, a3, .LBB36_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    mv a1, a2
+; RV64I-NEXT:  .LBB36_2:
+; RV64I-NEXT:    mv a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: setne_constant:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    addi a0, a0, -456
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a2, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    or a0, a0, a2
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: setne_constant:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    addi a0, a0, -456
+; RV64XTHEADCONDMOV-NEXT:    th.mveqz a1, a2, a0
+; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: setne_constant:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    addi a0, a0, -456
+; RV64ZICOND-NEXT:    czero.nez a2, a2, a0
+; RV64ZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV64ZICOND-NEXT:    or a0, a0, a2
+; RV64ZICOND-NEXT:    ret
+  %rc = icmp ne i64 %a, 456
+  %sel = select i1 %rc, i64 %rs1, i64 %rs2
+  ret i64 %sel
+}
+
+define i64 @seteq_2048(i64 %a, i64 %rs1, i64 %rs2) {
+; RV64I-LABEL: seteq_2048:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    bseti a3, zero, 11
+; RV64I-NEXT:    beq a0, a3, .LBB37_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    mv a1, a2
+; RV64I-NEXT:  .LBB37_2:
+; RV64I-NEXT:    mv a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: seteq_2048:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    addi a0, a0, -2048
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a2, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    or a0, a0, a2
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: seteq_2048:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    addi a0, a0, -2048
+; RV64XTHEADCONDMOV-NEXT:    th.mvnez a1, a2, a0
+; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: seteq_2048:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    addi a0, a0, -2048
+; RV64ZICOND-NEXT:    czero.eqz a2, a2, a0
+; RV64ZICOND-NEXT:    czero.nez a0, a1, a0
+; RV64ZICOND-NEXT:    or a0, a0, a2
+; RV64ZICOND-NEXT:    ret
+  %rc = icmp eq i64 %a, 2048
+  %sel = select i1 %rc, i64 %rs1, i64 %rs2
+  ret i64 %sel
+}
+
+define i64 @seteq_neg2048(i64 %a, i64 %rs1, i64 %rs2) {
+; RV64I-LABEL: seteq_neg2048:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a3, -2048
+; RV64I-NEXT:    beq a0, a3, .LBB38_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    mv a1, a2
+; RV64I-NEXT:  .LBB38_2:
+; RV64I-NEXT:    mv a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: seteq_neg2048:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    xori a0, a0, -2048
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a2, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    or a0, a0, a2
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: seteq_neg2048:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    xori a0, a0, -2048
+; RV64XTHEADCONDMOV-NEXT:    th.mvnez a1, a2, a0
+; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: seteq_neg2048:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    xori a0, a0, -2048
+; RV64ZICOND-NEXT:    czero.eqz a2, a2, a0
+; RV64ZICOND-NEXT:    czero.nez a0, a1, a0
+; RV64ZICOND-NEXT:    or a0, a0, a2
+; RV64ZICOND-NEXT:    ret
+  %rc = icmp eq i64 %a, -2048
+  %sel = select i1 %rc, i64 %rs1, i64 %rs2
+  ret i64 %sel
+}
+
+define i64 @setne_neg2048(i64 %a, i64 %rs1, i64 %rs2) {
+; RV64I-LABEL: setne_neg2048:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    li a3, -2048
+; RV64I-NEXT:    bne a0, a3, .LBB39_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    mv a1, a2
+; RV64I-NEXT:  .LBB39_2:
+; RV64I-NEXT:    mv a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: setne_neg2048:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    xori a0, a0, -2048
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a2, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    or a0, a0, a2
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: setne_neg2048:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    xori a0, a0, -2048
+; RV64XTHEADCONDMOV-NEXT:    th.mveqz a1, a2, a0
+; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: setne_neg2048:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    xori a0, a0, -2048
+; RV64ZICOND-NEXT:    czero.nez a2, a2, a0
+; RV64ZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV64ZICOND-NEXT:    or a0, a0, a2
+; RV64ZICOND-NEXT:    ret
+  %rc = icmp ne i64 %a, -2048
+  %sel = select i1 %rc, i64 %rs1, i64 %rs2
+  ret i64 %sel
+}
+
+define i64 @zero1_seteq(i64 %a, i64 %b, i64 %rs1) {
+; RV64I-LABEL: zero1_seteq:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    snez a0, a0
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: zero1_seteq:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    xor a0, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: zero1_seteq:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    xor a0, a0, a1
+; RV64XTHEADCONDMOV-NEXT:    th.mvnez a2, zero, a0
+; RV64XTHEADCONDMOV-NEXT:    mv a0, a2
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: zero1_seteq:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    xor a0, a0, a1
+; RV64ZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64ZICOND-NEXT:    ret
+  %rc = icmp eq i64 %a, %b
+  %sel = select i1 %rc, i64 %rs1, i64 0
+  ret i64 %sel
+}
+
+define i64 @zero2_seteq(i64 %a, i64 %b, i64 %rs1) {
+; RV64I-LABEL: zero2_seteq:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: zero2_seteq:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    xor a0, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: zero2_seteq:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    xor a0, a0, a1
+; RV64XTHEADCONDMOV-NEXT:    th.mveqz a2, zero, a0
+; RV64XTHEADCONDMOV-NEXT:    mv a0, a2
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: zero2_seteq:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    xor a0, a0, a1
+; RV64ZICOND-NEXT:    czero.eqz a0, a2, a0
+; RV64ZICOND-NEXT:    ret
+  %rc = icmp eq i64 %a, %b
+  %sel = select i1 %rc, i64 0, i64 %rs1
+  ret i64 %sel
+}
+
+define i64 @zero1_setne(i64 %a, i64 %b, i64 %rs1) {
+; RV64I-LABEL: zero1_setne:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: zero1_setne:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    xor a0, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: zero1_setne:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    xor a0, a0, a1
+; RV64XTHEADCONDMOV-NEXT:    th.mveqz a2, zero, a0
+; RV64XTHEADCONDMOV-NEXT:    mv a0, a2
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: zero1_setne:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    xor a0, a0, a1
+; RV64ZICOND-NEXT:    czero.eqz a0, a2, a0
+; RV64ZICOND-NEXT:    ret
+  %rc = icmp ne i64 %a, %b
+  %sel = select i1 %rc, i64 %rs1, i64 0
+  ret i64 %sel
+}
+
+define i64 @zero2_setne(i64 %a, i64 %b, i64 %rs1) {
+; RV64I-LABEL: zero2_setne:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    snez a0, a0
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: zero2_setne:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    xor a0, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: zero2_setne:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    xor a0, a0, a1
+; RV64XTHEADCONDMOV-NEXT:    th.mvnez a2, zero, a0
+; RV64XTHEADCONDMOV-NEXT:    mv a0, a2
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: zero2_setne:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    xor a0, a0, a1
+; RV64ZICOND-NEXT:    czero.nez a0, a2, a0
+; RV64ZICOND-NEXT:    ret
+  %rc = icmp ne i64 %a, %b
+  %sel = select i1 %rc, i64 0, i64 %rs1
+  ret i64 %sel
+}
+
+define i64 @zero1_seteq_zero(i64 %a, i64 %rs1) {
+; RV64I-LABEL: zero1_seteq_zero:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    snez a0, a0
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: zero1_seteq_zero:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: zero1_seteq_zero:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    th.mvnez a1, zero, a0
+; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: zero1_seteq_zero:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.nez a0, a1, a0
+; RV64ZICOND-NEXT:    ret
+  %rc = icmp eq i64 %a, 0
+  %sel = select i1 %rc, i64 %rs1, i64 0
+  ret i64 %sel
+}
+
+define i64 @zero2_seteq_zero(i64 %a, i64 %rs1) {
+; RV64I-LABEL: zero2_seteq_zero:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: zero2_seteq_zero:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: zero2_seteq_zero:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    th.mveqz a1, zero, a0
+; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: zero2_seteq_zero:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV64ZICOND-NEXT:    ret
+  %rc = icmp eq i64 %a, 0
+  %sel = select i1 %rc, i64 0, i64 %rs1
+  ret i64 %sel
+}
+
+define i64 @zero1_setne_zero(i64 %a, i64 %rs1) {
+; RV64I-LABEL: zero1_setne_zero:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: zero1_setne_zero:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: zero1_setne_zero:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    th.mveqz a1, zero, a0
+; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: zero1_setne_zero:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV64ZICOND-NEXT:    ret
+  %rc = icmp ne i64 %a, 0
+  %sel = select i1 %rc, i64 %rs1, i64 0
+  ret i64 %sel
+}
+
+define i64 @zero2_setne_zero(i64 %a, i64 %rs1) {
+; RV64I-LABEL: zero2_setne_zero:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    snez a0, a0
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: zero2_setne_zero:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: zero2_setne_zero:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    th.mvnez a1, zero, a0
+; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: zero2_setne_zero:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    czero.nez a0, a1, a0
+; RV64ZICOND-NEXT:    ret
+  %rc = icmp ne i64 %a, 0
+  %sel = select i1 %rc, i64 0, i64 %rs1
+  ret i64 %sel
+}
+
+define i64 @zero1_seteq_constant(i64 %a, i64 %rs1) {
+; RV64I-LABEL: zero1_seteq_constant:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a0, a0, 231
+; RV64I-NEXT:    snez a0, a0
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: zero1_seteq_constant:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    addi a0, a0, 231
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: zero1_seteq_constant:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    addi a0, a0, 231
+; RV64XTHEADCONDMOV-NEXT:    th.mvnez a1, zero, a0
+; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: zero1_seteq_constant:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    addi a0, a0, 231
+; RV64ZICOND-NEXT:    czero.nez a0, a1, a0
+; RV64ZICOND-NEXT:    ret
+  %rc = icmp eq i64 %a, -231
+  %sel = select i1 %rc, i64 %rs1, i64 0
+  ret i64 %sel
+}
+
+define i64 @zero2_seteq_constant(i64 %a, i64 %rs1) {
+; RV64I-LABEL: zero2_seteq_constant:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a0, a0, -546
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: zero2_seteq_constant:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    addi a0, a0, -546
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: zero2_seteq_constant:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    addi a0, a0, -546
+; RV64XTHEADCONDMOV-NEXT:    th.mveqz a1, zero, a0
+; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: zero2_seteq_constant:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    addi a0, a0, -546
+; RV64ZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV64ZICOND-NEXT:    ret
+  %rc = icmp eq i64 %a, 546
+  %sel = select i1 %rc, i64 0, i64 %rs1
+  ret i64 %sel
+}
+
+define i64 @zero1_setne_constant(i64 %a, i64 %rs1) {
+; RV64I-LABEL: zero1_setne_constant:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a0, a0, -321
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: zero1_setne_constant:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    addi a0, a0, -321
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: zero1_setne_constant:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    addi a0, a0, -321
+; RV64XTHEADCONDMOV-NEXT:    th.mveqz a1, zero, a0
+; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: zero1_setne_constant:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    addi a0, a0, -321
+; RV64ZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV64ZICOND-NEXT:    ret
+  %rc = icmp ne i64 %a, 321
+  %sel = select i1 %rc, i64 %rs1, i64 0
+  ret i64 %sel
+}
+
+define i64 @zero2_setne_constant(i64 %a, i64 %rs1) {
+; RV64I-LABEL: zero2_setne_constant:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi a0, a0, 654
+; RV64I-NEXT:    snez a0, a0
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: zero2_setne_constant:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    addi a0, a0, 654
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: zero2_setne_constant:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    addi a0, a0, 654
+; RV64XTHEADCONDMOV-NEXT:    th.mvnez a1, zero, a0
+; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: zero2_setne_constant:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    addi a0, a0, 654
+; RV64ZICOND-NEXT:    czero.nez a0, a1, a0
+; RV64ZICOND-NEXT:    ret
+  %rc = icmp ne i64 %a, -654
+  %sel = select i1 %rc, i64 0, i64 %rs1
+  ret i64 %sel
+}
+
+define i64 @zero1_seteq_neg2048(i64 %a, i64 %rs1) {
+; RV64I-LABEL: zero1_seteq_neg2048:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    xori a0, a0, -2048
+; RV64I-NEXT:    snez a0, a0
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: zero1_seteq_neg2048:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    xori a0, a0, -2048
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: zero1_seteq_neg2048:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    xori a0, a0, -2048
+; RV64XTHEADCONDMOV-NEXT:    th.mvnez a1, zero, a0
+; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: zero1_seteq_neg2048:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    xori a0, a0, -2048
+; RV64ZICOND-NEXT:    czero.nez a0, a1, a0
+; RV64ZICOND-NEXT:    ret
+  %rc = icmp eq i64 %a, -2048
+  %sel = select i1 %rc, i64 %rs1, i64 0
+  ret i64 %sel
+}
+
+define i64 @zero2_seteq_neg2048(i64 %a, i64 %rs1) {
+; RV64I-LABEL: zero2_seteq_neg2048:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    xori a0, a0, -2048
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: zero2_seteq_neg2048:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    xori a0, a0, -2048
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: zero2_seteq_neg2048:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    xori a0, a0, -2048
+; RV64XTHEADCONDMOV-NEXT:    th.mveqz a1, zero, a0
+; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: zero2_seteq_neg2048:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    xori a0, a0, -2048
+; RV64ZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV64ZICOND-NEXT:    ret
+  %rc = icmp eq i64 %a, -2048
+  %sel = select i1 %rc, i64 0, i64 %rs1
+  ret i64 %sel
+}
+
+define i64 @zero1_setne_neg2048(i64 %a, i64 %rs1) {
+; RV64I-LABEL: zero1_setne_neg2048:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    xori a0, a0, -2048
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: zero1_setne_neg2048:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    xori a0, a0, -2048
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: zero1_setne_neg2048:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    xori a0, a0, -2048
+; RV64XTHEADCONDMOV-NEXT:    th.mveqz a1, zero, a0
+; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: zero1_setne_neg2048:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    xori a0, a0, -2048
+; RV64ZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV64ZICOND-NEXT:    ret
+  %rc = icmp ne i64 %a, -2048
+  %sel = select i1 %rc, i64 %rs1, i64 0
+  ret i64 %sel
+}
+
+define i64 @zero2_setne_neg2048(i64 %a, i64 %rs1) {
+; RV64I-LABEL: zero2_setne_neg2048:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    xori a0, a0, -2048
+; RV64I-NEXT:    snez a0, a0
+; RV64I-NEXT:    addi a0, a0, -1
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: zero2_setne_neg2048:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    xori a0, a0, -2048
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a1, a0
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: zero2_setne_neg2048:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    xori a0, a0, -2048
+; RV64XTHEADCONDMOV-NEXT:    th.mvnez a1, zero, a0
+; RV64XTHEADCONDMOV-NEXT:    mv a0, a1
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: zero2_setne_neg2048:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    xori a0, a0, -2048
+; RV64ZICOND-NEXT:    czero.nez a0, a1, a0
+; RV64ZICOND-NEXT:    ret
+  %rc = icmp ne i64 %a, -2048
+  %sel = select i1 %rc, i64 0, i64 %rs1
+  ret i64 %sel
+}
+
+define void @sextw_removal_maskc(i1 %c, i32 signext %arg, i32 signext %arg1) nounwind {
+; RV64I-LABEL: sextw_removal_maskc:
+; RV64I:       # %bb.0: # %bb
+; RV64I-NEXT:    addi sp, sp, -32
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    mv s0, a2
+; RV64I-NEXT:    slli a0, a0, 63
+; RV64I-NEXT:    srai a0, a0, 63
+; RV64I-NEXT:    and s1, a0, a1
+; RV64I-NEXT:  .LBB56_1: # %bb2
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    mv a0, s1
+; RV64I-NEXT:    call bar
+; RV64I-NEXT:    sllw s1, s1, s0
+; RV64I-NEXT:    bnez a0, .LBB56_1
+; RV64I-NEXT:  # %bb.2: # %bb7
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 32
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: sextw_removal_maskc:
+; RV64XVENTANACONDOPS:       # %bb.0: # %bb
+; RV64XVENTANACONDOPS-NEXT:    addi sp, sp, -32
+; RV64XVENTANACONDOPS-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64XVENTANACONDOPS-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64XVENTANACONDOPS-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64XVENTANACONDOPS-NEXT:    mv s0, a2
+; RV64XVENTANACONDOPS-NEXT:    andi a0, a0, 1
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc s1, a1, a0
+; RV64XVENTANACONDOPS-NEXT:  .LBB56_1: # %bb2
+; RV64XVENTANACONDOPS-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64XVENTANACONDOPS-NEXT:    mv a0, s1
+; RV64XVENTANACONDOPS-NEXT:    call bar
+; RV64XVENTANACONDOPS-NEXT:    sllw s1, s1, s0
+; RV64XVENTANACONDOPS-NEXT:    bnez a0, .LBB56_1
+; RV64XVENTANACONDOPS-NEXT:  # %bb.2: # %bb7
+; RV64XVENTANACONDOPS-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64XVENTANACONDOPS-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64XVENTANACONDOPS-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64XVENTANACONDOPS-NEXT:    addi sp, sp, 32
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: sextw_removal_maskc:
+; RV64XTHEADCONDMOV:       # %bb.0: # %bb
+; RV64XTHEADCONDMOV-NEXT:    addi sp, sp, -32
+; RV64XTHEADCONDMOV-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64XTHEADCONDMOV-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64XTHEADCONDMOV-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64XTHEADCONDMOV-NEXT:    mv s0, a2
+; RV64XTHEADCONDMOV-NEXT:    mv s1, a1
+; RV64XTHEADCONDMOV-NEXT:    andi a0, a0, 1
+; RV64XTHEADCONDMOV-NEXT:    th.mveqz s1, zero, a0
+; RV64XTHEADCONDMOV-NEXT:  .LBB56_1: # %bb2
+; RV64XTHEADCONDMOV-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64XTHEADCONDMOV-NEXT:    sext.w a0, s1
+; RV64XTHEADCONDMOV-NEXT:    call bar
+; RV64XTHEADCONDMOV-NEXT:    sllw s1, s1, s0
+; RV64XTHEADCONDMOV-NEXT:    bnez a0, .LBB56_1
+; RV64XTHEADCONDMOV-NEXT:  # %bb.2: # %bb7
+; RV64XTHEADCONDMOV-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64XTHEADCONDMOV-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64XTHEADCONDMOV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64XTHEADCONDMOV-NEXT:    addi sp, sp, 32
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: sextw_removal_maskc:
+; RV64ZICOND:       # %bb.0: # %bb
+; RV64ZICOND-NEXT:    addi sp, sp, -32
+; RV64ZICOND-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64ZICOND-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64ZICOND-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64ZICOND-NEXT:    mv s0, a2
+; RV64ZICOND-NEXT:    andi a0, a0, 1
+; RV64ZICOND-NEXT:    czero.eqz s1, a1, a0
+; RV64ZICOND-NEXT:  .LBB56_1: # %bb2
+; RV64ZICOND-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64ZICOND-NEXT:    mv a0, s1
+; RV64ZICOND-NEXT:    call bar
+; RV64ZICOND-NEXT:    sllw s1, s1, s0
+; RV64ZICOND-NEXT:    bnez a0, .LBB56_1
+; RV64ZICOND-NEXT:  # %bb.2: # %bb7
+; RV64ZICOND-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64ZICOND-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64ZICOND-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64ZICOND-NEXT:    addi sp, sp, 32
+; RV64ZICOND-NEXT:    ret
+bb:
+  %i = select i1 %c, i32 %arg, i32 0
+  br label %bb2
+
+bb2:                                              ; preds = %bb2, %bb
+  %i3 = phi i32 [ %i, %bb ], [ %i5, %bb2 ]
+  %i4 = tail call signext i32 @bar(i32 signext %i3)
+  %i5 = shl i32 %i3, %arg1
+  %i6 = icmp eq i32 %i4, 0
+  br i1 %i6, label %bb7, label %bb2
+
+bb7:                                              ; preds = %bb2
+  ret void
+}
+declare signext i32 @bar(i32 signext)
+
+define void @sextw_removal_maskcn(i1 %c, i32 signext %arg, i32 signext %arg1) nounwind {
+; RV64I-LABEL: sextw_removal_maskcn:
+; RV64I:       # %bb.0: # %bb
+; RV64I-NEXT:    addi sp, sp, -32
+; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    mv s0, a2
+; RV64I-NEXT:    andi a0, a0, 1
+; RV64I-NEXT:    addiw a0, a0, -1
+; RV64I-NEXT:    and s1, a0, a1
+; RV64I-NEXT:  .LBB57_1: # %bb2
+; RV64I-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64I-NEXT:    mv a0, s1
+; RV64I-NEXT:    call bar
+; RV64I-NEXT:    sllw s1, s1, s0
+; RV64I-NEXT:    bnez a0, .LBB57_1
+; RV64I-NEXT:  # %bb.2: # %bb7
+; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 32
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: sextw_removal_maskcn:
+; RV64XVENTANACONDOPS:       # %bb.0: # %bb
+; RV64XVENTANACONDOPS-NEXT:    addi sp, sp, -32
+; RV64XVENTANACONDOPS-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64XVENTANACONDOPS-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64XVENTANACONDOPS-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64XVENTANACONDOPS-NEXT:    mv s0, a2
+; RV64XVENTANACONDOPS-NEXT:    andi a0, a0, 1
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn s1, a1, a0
+; RV64XVENTANACONDOPS-NEXT:  .LBB57_1: # %bb2
+; RV64XVENTANACONDOPS-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64XVENTANACONDOPS-NEXT:    mv a0, s1
+; RV64XVENTANACONDOPS-NEXT:    call bar
+; RV64XVENTANACONDOPS-NEXT:    sllw s1, s1, s0
+; RV64XVENTANACONDOPS-NEXT:    bnez a0, .LBB57_1
+; RV64XVENTANACONDOPS-NEXT:  # %bb.2: # %bb7
+; RV64XVENTANACONDOPS-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64XVENTANACONDOPS-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64XVENTANACONDOPS-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64XVENTANACONDOPS-NEXT:    addi sp, sp, 32
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: sextw_removal_maskcn:
+; RV64XTHEADCONDMOV:       # %bb.0: # %bb
+; RV64XTHEADCONDMOV-NEXT:    addi sp, sp, -32
+; RV64XTHEADCONDMOV-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64XTHEADCONDMOV-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64XTHEADCONDMOV-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64XTHEADCONDMOV-NEXT:    mv s0, a2
+; RV64XTHEADCONDMOV-NEXT:    mv s1, a1
+; RV64XTHEADCONDMOV-NEXT:    andi a0, a0, 1
+; RV64XTHEADCONDMOV-NEXT:    th.mvnez s1, zero, a0
+; RV64XTHEADCONDMOV-NEXT:  .LBB57_1: # %bb2
+; RV64XTHEADCONDMOV-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64XTHEADCONDMOV-NEXT:    sext.w a0, s1
+; RV64XTHEADCONDMOV-NEXT:    call bar
+; RV64XTHEADCONDMOV-NEXT:    sllw s1, s1, s0
+; RV64XTHEADCONDMOV-NEXT:    bnez a0, .LBB57_1
+; RV64XTHEADCONDMOV-NEXT:  # %bb.2: # %bb7
+; RV64XTHEADCONDMOV-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64XTHEADCONDMOV-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64XTHEADCONDMOV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64XTHEADCONDMOV-NEXT:    addi sp, sp, 32
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: sextw_removal_maskcn:
+; RV64ZICOND:       # %bb.0: # %bb
+; RV64ZICOND-NEXT:    addi sp, sp, -32
+; RV64ZICOND-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64ZICOND-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64ZICOND-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64ZICOND-NEXT:    mv s0, a2
+; RV64ZICOND-NEXT:    andi a0, a0, 1
+; RV64ZICOND-NEXT:    czero.nez s1, a1, a0
+; RV64ZICOND-NEXT:  .LBB57_1: # %bb2
+; RV64ZICOND-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64ZICOND-NEXT:    mv a0, s1
+; RV64ZICOND-NEXT:    call bar
+; RV64ZICOND-NEXT:    sllw s1, s1, s0
+; RV64ZICOND-NEXT:    bnez a0, .LBB57_1
+; RV64ZICOND-NEXT:  # %bb.2: # %bb7
+; RV64ZICOND-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64ZICOND-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64ZICOND-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64ZICOND-NEXT:    addi sp, sp, 32
+; RV64ZICOND-NEXT:    ret
+bb:
+  %i = select i1 %c, i32 0, i32 %arg
+  br label %bb2
+
+bb2:                                              ; preds = %bb2, %bb
+  %i3 = phi i32 [ %i, %bb ], [ %i5, %bb2 ]
+  %i4 = tail call signext i32 @bar(i32 signext %i3)
+  %i5 = shl i32 %i3, %arg1
+  %i6 = icmp eq i32 %i4, 0
+  br i1 %i6, label %bb7, label %bb2
+
+bb7:                                              ; preds = %bb2
+  ret void
+}
+
+define i32 @setune_32(float %a, float %b, i32 %rs1, i32 %rs2) {
+; RV64I-LABEL: setune_32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    feq.s a2, fa0, fa1
+; RV64I-NEXT:    beqz a2, .LBB58_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    mv a0, a1
+; RV64I-NEXT:  .LBB58_2:
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: setune_32:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    feq.s a2, fa0, fa1
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a1, a1, a2
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a0, a2
+; RV64XVENTANACONDOPS-NEXT:    or a0, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: setune_32:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    feq.s a2, fa0, fa1
+; RV64XTHEADCONDMOV-NEXT:    th.mvnez a0, a1, a2
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: setune_32:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    feq.s a2, fa0, fa1
+; RV64ZICOND-NEXT:    czero.eqz a1, a1, a2
+; RV64ZICOND-NEXT:    czero.nez a0, a0, a2
+; RV64ZICOND-NEXT:    or a0, a0, a1
+; RV64ZICOND-NEXT:    ret
+  %rc = fcmp une float %a, %b
+  %sel = select i1 %rc, i32 %rs1, i32 %rs2
+  ret i32 %sel
+}
+
+define i64 @setune_64(float %a, float %b, i64 %rs1, i64 %rs2) {
+; RV64I-LABEL: setune_64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    feq.s a2, fa0, fa1
+; RV64I-NEXT:    beqz a2, .LBB59_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    mv a0, a1
+; RV64I-NEXT:  .LBB59_2:
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: setune_64:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    feq.s a2, fa0, fa1
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a1, a1, a2
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn a0, a0, a2
+; RV64XVENTANACONDOPS-NEXT:    or a0, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: setune_64:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    feq.s a2, fa0, fa1
+; RV64XTHEADCONDMOV-NEXT:    th.mvnez a0, a1, a2
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: setune_64:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    feq.s a2, fa0, fa1
+; RV64ZICOND-NEXT:    czero.eqz a1, a1, a2
+; RV64ZICOND-NEXT:    czero.nez a0, a0, a2
+; RV64ZICOND-NEXT:    or a0, a0, a1
+; RV64ZICOND-NEXT:    ret
+  %rc = fcmp une float %a, %b
+  %sel = select i1 %rc, i64 %rs1, i64 %rs2
+  ret i64 %sel
+}
+
+; Test that we can ComputeNumSignBits across basic blocks when the live out is
+; RISCVISD::SELECT_CC. There should be no slli+srai or sext.h in the output.
+define signext i16 @numsignbits(i16 signext %0, i16 signext %1, i16 signext %2, i16 signext %3) nounwind {
+; RV64I-LABEL: numsignbits:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    addi sp, sp, -16
+; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
+; RV64I-NEXT:    mv s0, a3
+; RV64I-NEXT:    beqz a0, .LBB60_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    mv s0, a2
+; RV64I-NEXT:  .LBB60_2:
+; RV64I-NEXT:    beqz a1, .LBB60_4
+; RV64I-NEXT:  # %bb.3:
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    call bat
+; RV64I-NEXT:  .LBB60_4:
+; RV64I-NEXT:    mv a0, s0
+; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64I-NEXT:    addi sp, sp, 16
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: numsignbits:
+; RV64XVENTANACONDOPS:       # %bb.0:
+; RV64XVENTANACONDOPS-NEXT:    addi sp, sp, -16
+; RV64XVENTANACONDOPS-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64XVENTANACONDOPS-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a2, a2, a0
+; RV64XVENTANACONDOPS-NEXT:    vt.maskcn s0, a3, a0
+; RV64XVENTANACONDOPS-NEXT:    or s0, s0, a2
+; RV64XVENTANACONDOPS-NEXT:    beqz a1, .LBB60_2
+; RV64XVENTANACONDOPS-NEXT:  # %bb.1:
+; RV64XVENTANACONDOPS-NEXT:    mv a0, s0
+; RV64XVENTANACONDOPS-NEXT:    call bat
+; RV64XVENTANACONDOPS-NEXT:  .LBB60_2:
+; RV64XVENTANACONDOPS-NEXT:    mv a0, s0
+; RV64XVENTANACONDOPS-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64XVENTANACONDOPS-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64XVENTANACONDOPS-NEXT:    addi sp, sp, 16
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: numsignbits:
+; RV64XTHEADCONDMOV:       # %bb.0:
+; RV64XTHEADCONDMOV-NEXT:    addi sp, sp, -16
+; RV64XTHEADCONDMOV-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64XTHEADCONDMOV-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
+; RV64XTHEADCONDMOV-NEXT:    th.mveqz a2, a3, a0
+; RV64XTHEADCONDMOV-NEXT:    sext.w s0, a2
+; RV64XTHEADCONDMOV-NEXT:    beqz a1, .LBB60_2
+; RV64XTHEADCONDMOV-NEXT:  # %bb.1:
+; RV64XTHEADCONDMOV-NEXT:    mv a0, s0
+; RV64XTHEADCONDMOV-NEXT:    call bat
+; RV64XTHEADCONDMOV-NEXT:  .LBB60_2:
+; RV64XTHEADCONDMOV-NEXT:    mv a0, s0
+; RV64XTHEADCONDMOV-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64XTHEADCONDMOV-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64XTHEADCONDMOV-NEXT:    addi sp, sp, 16
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: numsignbits:
+; RV64ZICOND:       # %bb.0:
+; RV64ZICOND-NEXT:    addi sp, sp, -16
+; RV64ZICOND-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64ZICOND-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
+; RV64ZICOND-NEXT:    czero.eqz a2, a2, a0
+; RV64ZICOND-NEXT:    czero.nez s0, a3, a0
+; RV64ZICOND-NEXT:    or s0, s0, a2
+; RV64ZICOND-NEXT:    beqz a1, .LBB60_2
+; RV64ZICOND-NEXT:  # %bb.1:
+; RV64ZICOND-NEXT:    mv a0, s0
+; RV64ZICOND-NEXT:    call bat
+; RV64ZICOND-NEXT:  .LBB60_2:
+; RV64ZICOND-NEXT:    mv a0, s0
+; RV64ZICOND-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64ZICOND-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64ZICOND-NEXT:    addi sp, sp, 16
+; RV64ZICOND-NEXT:    ret
+  %5 = icmp eq i16 %0, 0
+  %6 = select i1 %5, i16 %3, i16 %2
+  %7 = icmp eq i16 %1, 0
+  br i1 %7, label %9, label %8
+
+8:                                                ; preds = %4
+  tail call void @bat(i16 signext %6)
+  br label %9
+
+9:                                                ; preds = %8, %4
+  ret i16 %6
+}
+
+declare void @bat(i16 signext)
+
+define i64 @single_bit(i64 %x) {
+; RV64I-LABEL: single_bit:
+; RV64I:       # %bb.0: # %entry
+; RV64I-NEXT:    slli a1, a0, 53
+; RV64I-NEXT:    srai a1, a1, 63
+; RV64I-NEXT:    and a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: single_bit:
+; RV64XVENTANACONDOPS:       # %bb.0: # %entry
+; RV64XVENTANACONDOPS-NEXT:    andi a1, a0, 1024
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: single_bit:
+; RV64XTHEADCONDMOV:       # %bb.0: # %entry
+; RV64XTHEADCONDMOV-NEXT:    slli a1, a0, 53
+; RV64XTHEADCONDMOV-NEXT:    srai a1, a1, 63
+; RV64XTHEADCONDMOV-NEXT:    and a0, a1, a0
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: single_bit:
+; RV64ZICOND:       # %bb.0: # %entry
+; RV64ZICOND-NEXT:    andi a1, a0, 1024
+; RV64ZICOND-NEXT:    czero.eqz a0, a0, a1
+; RV64ZICOND-NEXT:    ret
+entry:
+  %and = and i64 %x, 1024
+  %tobool.not = icmp eq i64 %and, 0
+  %cond = select i1 %tobool.not, i64 0, i64 %x
+  ret i64 %cond
+}
+
+; Test to fold select with single bit check to (and (sra (shl x))).
+define i64 @single_bit2(i64 %x) {
+; RV64I-LABEL: single_bit2:
+; RV64I:       # %bb.0: # %entry
+; RV64I-NEXT:    slli a1, a0, 52
+; RV64I-NEXT:    srai a1, a1, 63
+; RV64I-NEXT:    and a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV64XVENTANACONDOPS-LABEL: single_bit2:
+; RV64XVENTANACONDOPS:       # %bb.0: # %entry
+; RV64XVENTANACONDOPS-NEXT:    bexti a1, a0, 11
+; RV64XVENTANACONDOPS-NEXT:    vt.maskc a0, a0, a1
+; RV64XVENTANACONDOPS-NEXT:    ret
+;
+; RV64XTHEADCONDMOV-LABEL: single_bit2:
+; RV64XTHEADCONDMOV:       # %bb.0: # %entry
+; RV64XTHEADCONDMOV-NEXT:    slli a1, a0, 52
+; RV64XTHEADCONDMOV-NEXT:    srai a1, a1, 63
+; RV64XTHEADCONDMOV-NEXT:    and a0, a1, a0
+; RV64XTHEADCONDMOV-NEXT:    ret
+;
+; RV64ZICOND-LABEL: single_bit2:
+; RV64ZICOND:       # %bb.0: # %entry
+; RV64ZICOND-NEXT:    bexti a1, a0, 11
+; RV64ZICOND-NEXT:    czero.eqz a0, a0, a1
+; RV64ZICOND-NEXT:    ret
+entry:
+  %and = and i64 %x, 2048
+  %tobool.not = icmp eq i64 %and, 0
+  %cond = select i1 %tobool.not, i64 0, i64 %x
+  ret i64 %cond
+}


        


More information about the llvm-commits mailing list