[llvm] [RISCV][SDAG] Improve codegen of select with constants if zicond is available (PR #82456)
Yingwei Zheng via llvm-commits
llvm-commits at lists.llvm.org
Thu Feb 22 07:21:32 PST 2024
https://github.com/dtcxzyw updated https://github.com/llvm/llvm-project/pull/82456
>From 0adba1b6938735d66881eebff7cb142f471efd6c Mon Sep 17 00:00:00 2001
From: Yingwei Zheng <dtcxzyw2333 at gmail.com>
Date: Thu, 22 Feb 2024 23:10:12 +0800
Subject: [PATCH 1/2] [RISCV][SDAG] Add pre-commit tests. NFC.
---
llvm/test/CodeGen/RISCV/select.ll | 281 ++++++++++++++++++++++++++++++
1 file changed, 281 insertions(+)
diff --git a/llvm/test/CodeGen/RISCV/select.ll b/llvm/test/CodeGen/RISCV/select.ll
index e01984b7c5843a..14667ae5053fa3 100644
--- a/llvm/test/CodeGen/RISCV/select.ll
+++ b/llvm/test/CodeGen/RISCV/select.ll
@@ -1626,3 +1626,284 @@ define i32 @select_cst_unknown(i32 signext %a, i32 signext %b) {
%ret = select i1 %cond, i32 5, i32 -7
ret i32 %ret
}
+
+define i32 @select_cst1(i1 zeroext %cond) {
+; RV32IM-LABEL: select_cst1:
+; RV32IM: # %bb.0:
+; RV32IM-NEXT: mv a1, a0
+; RV32IM-NEXT: li a0, 10
+; RV32IM-NEXT: bnez a1, .LBB43_2
+; RV32IM-NEXT: # %bb.1:
+; RV32IM-NEXT: li a0, 20
+; RV32IM-NEXT: .LBB43_2:
+; RV32IM-NEXT: ret
+;
+; RV64IM-LABEL: select_cst1:
+; RV64IM: # %bb.0:
+; RV64IM-NEXT: mv a1, a0
+; RV64IM-NEXT: li a0, 10
+; RV64IM-NEXT: bnez a1, .LBB43_2
+; RV64IM-NEXT: # %bb.1:
+; RV64IM-NEXT: li a0, 20
+; RV64IM-NEXT: .LBB43_2:
+; RV64IM-NEXT: ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_cst1:
+; RV64IMXVTCONDOPS: # %bb.0:
+; RV64IMXVTCONDOPS-NEXT: li a1, 20
+; RV64IMXVTCONDOPS-NEXT: vt.maskcn a1, a1, a0
+; RV64IMXVTCONDOPS-NEXT: li a2, 10
+; RV64IMXVTCONDOPS-NEXT: vt.maskc a0, a2, a0
+; RV64IMXVTCONDOPS-NEXT: or a0, a0, a1
+; RV64IMXVTCONDOPS-NEXT: ret
+;
+; CHECKZICOND-LABEL: select_cst1:
+; CHECKZICOND: # %bb.0:
+; CHECKZICOND-NEXT: li a1, 20
+; CHECKZICOND-NEXT: czero.nez a1, a1, a0
+; CHECKZICOND-NEXT: li a2, 10
+; CHECKZICOND-NEXT: czero.eqz a0, a2, a0
+; CHECKZICOND-NEXT: or a0, a0, a1
+; CHECKZICOND-NEXT: ret
+ %ret = select i1 %cond, i32 10, i32 20
+ ret i32 %ret
+}
+
+define i32 @select_cst2(i1 zeroext %cond) {
+; RV32IM-LABEL: select_cst2:
+; RV32IM: # %bb.0:
+; RV32IM-NEXT: mv a1, a0
+; RV32IM-NEXT: li a0, 10
+; RV32IM-NEXT: bnez a1, .LBB44_2
+; RV32IM-NEXT: # %bb.1:
+; RV32IM-NEXT: lui a0, 5
+; RV32IM-NEXT: addi a0, a0, -480
+; RV32IM-NEXT: .LBB44_2:
+; RV32IM-NEXT: ret
+;
+; RV64IM-LABEL: select_cst2:
+; RV64IM: # %bb.0:
+; RV64IM-NEXT: mv a1, a0
+; RV64IM-NEXT: li a0, 10
+; RV64IM-NEXT: bnez a1, .LBB44_2
+; RV64IM-NEXT: # %bb.1:
+; RV64IM-NEXT: lui a0, 5
+; RV64IM-NEXT: addiw a0, a0, -480
+; RV64IM-NEXT: .LBB44_2:
+; RV64IM-NEXT: ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_cst2:
+; RV64IMXVTCONDOPS: # %bb.0:
+; RV64IMXVTCONDOPS-NEXT: li a1, 10
+; RV64IMXVTCONDOPS-NEXT: vt.maskc a1, a1, a0
+; RV64IMXVTCONDOPS-NEXT: lui a2, 5
+; RV64IMXVTCONDOPS-NEXT: addiw a2, a2, -480
+; RV64IMXVTCONDOPS-NEXT: vt.maskcn a0, a2, a0
+; RV64IMXVTCONDOPS-NEXT: or a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT: ret
+;
+; RV32IMZICOND-LABEL: select_cst2:
+; RV32IMZICOND: # %bb.0:
+; RV32IMZICOND-NEXT: li a1, 10
+; RV32IMZICOND-NEXT: czero.eqz a1, a1, a0
+; RV32IMZICOND-NEXT: lui a2, 5
+; RV32IMZICOND-NEXT: addi a2, a2, -480
+; RV32IMZICOND-NEXT: czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT: or a0, a1, a0
+; RV32IMZICOND-NEXT: ret
+;
+; RV64IMZICOND-LABEL: select_cst2:
+; RV64IMZICOND: # %bb.0:
+; RV64IMZICOND-NEXT: li a1, 10
+; RV64IMZICOND-NEXT: czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT: lui a2, 5
+; RV64IMZICOND-NEXT: addiw a2, a2, -480
+; RV64IMZICOND-NEXT: czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT: or a0, a1, a0
+; RV64IMZICOND-NEXT: ret
+ %ret = select i1 %cond, i32 10, i32 20000
+ ret i32 %ret
+}
+
+define i32 @select_cst3(i1 zeroext %cond) {
+; RV32IM-LABEL: select_cst3:
+; RV32IM: # %bb.0:
+; RV32IM-NEXT: bnez a0, .LBB45_2
+; RV32IM-NEXT: # %bb.1:
+; RV32IM-NEXT: lui a0, 5
+; RV32IM-NEXT: addi a0, a0, -480
+; RV32IM-NEXT: ret
+; RV32IM-NEXT: .LBB45_2:
+; RV32IM-NEXT: lui a0, 7
+; RV32IM-NEXT: addi a0, a0, 1328
+; RV32IM-NEXT: ret
+;
+; RV64IM-LABEL: select_cst3:
+; RV64IM: # %bb.0:
+; RV64IM-NEXT: bnez a0, .LBB45_2
+; RV64IM-NEXT: # %bb.1:
+; RV64IM-NEXT: lui a0, 5
+; RV64IM-NEXT: addiw a0, a0, -480
+; RV64IM-NEXT: ret
+; RV64IM-NEXT: .LBB45_2:
+; RV64IM-NEXT: lui a0, 7
+; RV64IM-NEXT: addiw a0, a0, 1328
+; RV64IM-NEXT: ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_cst3:
+; RV64IMXVTCONDOPS: # %bb.0:
+; RV64IMXVTCONDOPS-NEXT: lui a1, 5
+; RV64IMXVTCONDOPS-NEXT: addiw a1, a1, -480
+; RV64IMXVTCONDOPS-NEXT: vt.maskcn a1, a1, a0
+; RV64IMXVTCONDOPS-NEXT: lui a2, 7
+; RV64IMXVTCONDOPS-NEXT: addiw a2, a2, 1328
+; RV64IMXVTCONDOPS-NEXT: vt.maskc a0, a2, a0
+; RV64IMXVTCONDOPS-NEXT: or a0, a0, a1
+; RV64IMXVTCONDOPS-NEXT: ret
+;
+; RV32IMZICOND-LABEL: select_cst3:
+; RV32IMZICOND: # %bb.0:
+; RV32IMZICOND-NEXT: lui a1, 5
+; RV32IMZICOND-NEXT: addi a1, a1, -480
+; RV32IMZICOND-NEXT: czero.nez a1, a1, a0
+; RV32IMZICOND-NEXT: lui a2, 7
+; RV32IMZICOND-NEXT: addi a2, a2, 1328
+; RV32IMZICOND-NEXT: czero.eqz a0, a2, a0
+; RV32IMZICOND-NEXT: or a0, a0, a1
+; RV32IMZICOND-NEXT: ret
+;
+; RV64IMZICOND-LABEL: select_cst3:
+; RV64IMZICOND: # %bb.0:
+; RV64IMZICOND-NEXT: lui a1, 5
+; RV64IMZICOND-NEXT: addiw a1, a1, -480
+; RV64IMZICOND-NEXT: czero.nez a1, a1, a0
+; RV64IMZICOND-NEXT: lui a2, 7
+; RV64IMZICOND-NEXT: addiw a2, a2, 1328
+; RV64IMZICOND-NEXT: czero.eqz a0, a2, a0
+; RV64IMZICOND-NEXT: or a0, a0, a1
+; RV64IMZICOND-NEXT: ret
+ %ret = select i1 %cond, i32 30000, i32 20000
+ ret i32 %ret
+}
+
+define i32 @select_cst4(i1 zeroext %cond) {
+; CHECK-LABEL: select_cst4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: neg a0, a0
+; CHECK-NEXT: xori a0, a0, 2047
+; CHECK-NEXT: ret
+ %ret = select i1 %cond, i32 -2048, i32 2047
+ ret i32 %ret
+}
+
+define i32 @select_cst5(i1 zeroext %cond) {
+; RV32IM-LABEL: select_cst5:
+; RV32IM: # %bb.0:
+; RV32IM-NEXT: mv a1, a0
+; RV32IM-NEXT: li a0, 2047
+; RV32IM-NEXT: bnez a1, .LBB47_2
+; RV32IM-NEXT: # %bb.1:
+; RV32IM-NEXT: lui a0, 1
+; RV32IM-NEXT: addi a0, a0, -2047
+; RV32IM-NEXT: .LBB47_2:
+; RV32IM-NEXT: ret
+;
+; RV64IM-LABEL: select_cst5:
+; RV64IM: # %bb.0:
+; RV64IM-NEXT: mv a1, a0
+; RV64IM-NEXT: li a0, 2047
+; RV64IM-NEXT: bnez a1, .LBB47_2
+; RV64IM-NEXT: # %bb.1:
+; RV64IM-NEXT: lui a0, 1
+; RV64IM-NEXT: addiw a0, a0, -2047
+; RV64IM-NEXT: .LBB47_2:
+; RV64IM-NEXT: ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_cst5:
+; RV64IMXVTCONDOPS: # %bb.0:
+; RV64IMXVTCONDOPS-NEXT: li a1, 2047
+; RV64IMXVTCONDOPS-NEXT: vt.maskc a1, a1, a0
+; RV64IMXVTCONDOPS-NEXT: lui a2, 1
+; RV64IMXVTCONDOPS-NEXT: addiw a2, a2, -2047
+; RV64IMXVTCONDOPS-NEXT: vt.maskcn a0, a2, a0
+; RV64IMXVTCONDOPS-NEXT: or a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT: ret
+;
+; RV32IMZICOND-LABEL: select_cst5:
+; RV32IMZICOND: # %bb.0:
+; RV32IMZICOND-NEXT: li a1, 2047
+; RV32IMZICOND-NEXT: czero.eqz a1, a1, a0
+; RV32IMZICOND-NEXT: lui a2, 1
+; RV32IMZICOND-NEXT: addi a2, a2, -2047
+; RV32IMZICOND-NEXT: czero.nez a0, a2, a0
+; RV32IMZICOND-NEXT: or a0, a1, a0
+; RV32IMZICOND-NEXT: ret
+;
+; RV64IMZICOND-LABEL: select_cst5:
+; RV64IMZICOND: # %bb.0:
+; RV64IMZICOND-NEXT: li a1, 2047
+; RV64IMZICOND-NEXT: czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT: lui a2, 1
+; RV64IMZICOND-NEXT: addiw a2, a2, -2047
+; RV64IMZICOND-NEXT: czero.nez a0, a2, a0
+; RV64IMZICOND-NEXT: or a0, a1, a0
+; RV64IMZICOND-NEXT: ret
+ %ret = select i1 %cond, i32 2047, i32 2049
+ ret i32 %ret
+}
+
+define i32 @select_cst6(i1 zeroext %cond) {
+; RV32IM-LABEL: select_cst6:
+; RV32IM: # %bb.0:
+; RV32IM-NEXT: bnez a0, .LBB48_2
+; RV32IM-NEXT: # %bb.1:
+; RV32IM-NEXT: li a0, 2047
+; RV32IM-NEXT: ret
+; RV32IM-NEXT: .LBB48_2:
+; RV32IM-NEXT: lui a0, 1
+; RV32IM-NEXT: addi a0, a0, -2047
+; RV32IM-NEXT: ret
+;
+; RV64IM-LABEL: select_cst6:
+; RV64IM: # %bb.0:
+; RV64IM-NEXT: bnez a0, .LBB48_2
+; RV64IM-NEXT: # %bb.1:
+; RV64IM-NEXT: li a0, 2047
+; RV64IM-NEXT: ret
+; RV64IM-NEXT: .LBB48_2:
+; RV64IM-NEXT: lui a0, 1
+; RV64IM-NEXT: addiw a0, a0, -2047
+; RV64IM-NEXT: ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_cst6:
+; RV64IMXVTCONDOPS: # %bb.0:
+; RV64IMXVTCONDOPS-NEXT: li a1, 2047
+; RV64IMXVTCONDOPS-NEXT: vt.maskcn a1, a1, a0
+; RV64IMXVTCONDOPS-NEXT: lui a2, 1
+; RV64IMXVTCONDOPS-NEXT: addiw a2, a2, -2047
+; RV64IMXVTCONDOPS-NEXT: vt.maskc a0, a2, a0
+; RV64IMXVTCONDOPS-NEXT: or a0, a0, a1
+; RV64IMXVTCONDOPS-NEXT: ret
+;
+; RV32IMZICOND-LABEL: select_cst6:
+; RV32IMZICOND: # %bb.0:
+; RV32IMZICOND-NEXT: li a1, 2047
+; RV32IMZICOND-NEXT: czero.nez a1, a1, a0
+; RV32IMZICOND-NEXT: lui a2, 1
+; RV32IMZICOND-NEXT: addi a2, a2, -2047
+; RV32IMZICOND-NEXT: czero.eqz a0, a2, a0
+; RV32IMZICOND-NEXT: or a0, a0, a1
+; RV32IMZICOND-NEXT: ret
+;
+; RV64IMZICOND-LABEL: select_cst6:
+; RV64IMZICOND: # %bb.0:
+; RV64IMZICOND-NEXT: li a1, 2047
+; RV64IMZICOND-NEXT: czero.nez a1, a1, a0
+; RV64IMZICOND-NEXT: lui a2, 1
+; RV64IMZICOND-NEXT: addiw a2, a2, -2047
+; RV64IMZICOND-NEXT: czero.eqz a0, a2, a0
+; RV64IMZICOND-NEXT: or a0, a0, a1
+; RV64IMZICOND-NEXT: ret
+ %ret = select i1 %cond, i32 2049, i32 2047
+ ret i32 %ret
+}
>From 929308f790d567eef59b068930ce6ed6a170e70b Mon Sep 17 00:00:00 2001
From: Yingwei Zheng <dtcxzyw2333 at gmail.com>
Date: Thu, 22 Feb 2024 23:11:15 +0800
Subject: [PATCH 2/2] [RISCV][SDAG] Improve codegen of select with constants if
zicond is available
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 20 +++
llvm/test/CodeGen/RISCV/select.ll | 169 +++++++-------------
2 files changed, 80 insertions(+), 109 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index cf0dc36a51b61b..6bf02cf8c0f875 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -7379,6 +7379,26 @@ SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
if (SDValue V = combineSelectToBinOp(Op.getNode(), DAG, Subtarget))
return V;
+ // (select c, c1, c2) -> (add (czero_nez c2 - c1, c), c1)
+ // (select c, c1, c2) -> (add (czero_eqz c1 - c2, c), c2)
+ if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV)) {
+ const APInt &TrueVal = TrueV->getAsAPIntVal();
+ const APInt &FalseVal = FalseV->getAsAPIntVal();
+ const int TrueValCost = RISCVMatInt::getIntMatCost(
+ TrueVal, Subtarget.getXLen(), Subtarget, /*CompressionCost=*/true);
+ const int FalseValCost = RISCVMatInt::getIntMatCost(
+ FalseVal, Subtarget.getXLen(), Subtarget, /*CompressionCost=*/true);
+ bool IsCZERO_NEZ = TrueValCost <= FalseValCost;
+ SDValue LHSVal = DAG.getConstant(
+ IsCZERO_NEZ ? FalseVal - TrueVal : TrueVal - FalseVal, DL, VT);
+ SDValue RHSVal =
+ DAG.getConstant(IsCZERO_NEZ ? TrueVal : FalseVal, DL, VT);
+ SDValue CMOV =
+ DAG.getNode(IsCZERO_NEZ ? RISCVISD::CZERO_NEZ : RISCVISD::CZERO_EQZ,
+ DL, VT, LHSVal, CondV);
+ return DAG.getNode(ISD::ADD, DL, VT, CMOV, RHSVal);
+ }
+
// (select c, t, f) -> (or (czero_eqz t, c), (czero_nez f, c))
// Unless we have the short forward branch optimization.
if (!Subtarget.hasConditionalMoveFusion())
diff --git a/llvm/test/CodeGen/RISCV/select.ll b/llvm/test/CodeGen/RISCV/select.ll
index 14667ae5053fa3..e07e52091e9e71 100644
--- a/llvm/test/CodeGen/RISCV/select.ll
+++ b/llvm/test/CodeGen/RISCV/select.ll
@@ -1606,21 +1606,17 @@ define i32 @select_cst_unknown(i32 signext %a, i32 signext %b) {
; RV64IMXVTCONDOPS-LABEL: select_cst_unknown:
; RV64IMXVTCONDOPS: # %bb.0:
; RV64IMXVTCONDOPS-NEXT: slt a0, a0, a1
-; RV64IMXVTCONDOPS-NEXT: li a1, -7
-; RV64IMXVTCONDOPS-NEXT: vt.maskcn a1, a1, a0
-; RV64IMXVTCONDOPS-NEXT: li a2, 5
-; RV64IMXVTCONDOPS-NEXT: vt.maskc a0, a2, a0
-; RV64IMXVTCONDOPS-NEXT: or a0, a0, a1
+; RV64IMXVTCONDOPS-NEXT: li a1, -12
+; RV64IMXVTCONDOPS-NEXT: vt.maskcn a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT: addi a0, a0, 5
; RV64IMXVTCONDOPS-NEXT: ret
;
; CHECKZICOND-LABEL: select_cst_unknown:
; CHECKZICOND: # %bb.0:
; CHECKZICOND-NEXT: slt a0, a0, a1
-; CHECKZICOND-NEXT: li a1, -7
-; CHECKZICOND-NEXT: czero.nez a1, a1, a0
-; CHECKZICOND-NEXT: li a2, 5
-; CHECKZICOND-NEXT: czero.eqz a0, a2, a0
-; CHECKZICOND-NEXT: or a0, a0, a1
+; CHECKZICOND-NEXT: li a1, -12
+; CHECKZICOND-NEXT: czero.nez a0, a1, a0
+; CHECKZICOND-NEXT: addi a0, a0, 5
; CHECKZICOND-NEXT: ret
%cond = icmp slt i32 %a, %b
%ret = select i1 %cond, i32 5, i32 -7
@@ -1650,20 +1646,16 @@ define i32 @select_cst1(i1 zeroext %cond) {
;
; RV64IMXVTCONDOPS-LABEL: select_cst1:
; RV64IMXVTCONDOPS: # %bb.0:
-; RV64IMXVTCONDOPS-NEXT: li a1, 20
-; RV64IMXVTCONDOPS-NEXT: vt.maskcn a1, a1, a0
-; RV64IMXVTCONDOPS-NEXT: li a2, 10
-; RV64IMXVTCONDOPS-NEXT: vt.maskc a0, a2, a0
-; RV64IMXVTCONDOPS-NEXT: or a0, a0, a1
+; RV64IMXVTCONDOPS-NEXT: li a1, 10
+; RV64IMXVTCONDOPS-NEXT: vt.maskcn a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT: addi a0, a0, 10
; RV64IMXVTCONDOPS-NEXT: ret
;
; CHECKZICOND-LABEL: select_cst1:
; CHECKZICOND: # %bb.0:
-; CHECKZICOND-NEXT: li a1, 20
-; CHECKZICOND-NEXT: czero.nez a1, a1, a0
-; CHECKZICOND-NEXT: li a2, 10
-; CHECKZICOND-NEXT: czero.eqz a0, a2, a0
-; CHECKZICOND-NEXT: or a0, a0, a1
+; CHECKZICOND-NEXT: li a1, 10
+; CHECKZICOND-NEXT: czero.nez a0, a1, a0
+; CHECKZICOND-NEXT: addi a0, a0, 10
; CHECKZICOND-NEXT: ret
%ret = select i1 %cond, i32 10, i32 20
ret i32 %ret
@@ -1694,32 +1686,26 @@ define i32 @select_cst2(i1 zeroext %cond) {
;
; RV64IMXVTCONDOPS-LABEL: select_cst2:
; RV64IMXVTCONDOPS: # %bb.0:
-; RV64IMXVTCONDOPS-NEXT: li a1, 10
-; RV64IMXVTCONDOPS-NEXT: vt.maskc a1, a1, a0
-; RV64IMXVTCONDOPS-NEXT: lui a2, 5
-; RV64IMXVTCONDOPS-NEXT: addiw a2, a2, -480
-; RV64IMXVTCONDOPS-NEXT: vt.maskcn a0, a2, a0
-; RV64IMXVTCONDOPS-NEXT: or a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT: lui a1, 5
+; RV64IMXVTCONDOPS-NEXT: addiw a1, a1, -490
+; RV64IMXVTCONDOPS-NEXT: vt.maskcn a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT: addi a0, a0, 10
; RV64IMXVTCONDOPS-NEXT: ret
;
; RV32IMZICOND-LABEL: select_cst2:
; RV32IMZICOND: # %bb.0:
-; RV32IMZICOND-NEXT: li a1, 10
-; RV32IMZICOND-NEXT: czero.eqz a1, a1, a0
-; RV32IMZICOND-NEXT: lui a2, 5
-; RV32IMZICOND-NEXT: addi a2, a2, -480
-; RV32IMZICOND-NEXT: czero.nez a0, a2, a0
-; RV32IMZICOND-NEXT: or a0, a1, a0
+; RV32IMZICOND-NEXT: lui a1, 5
+; RV32IMZICOND-NEXT: addi a1, a1, -490
+; RV32IMZICOND-NEXT: czero.nez a0, a1, a0
+; RV32IMZICOND-NEXT: addi a0, a0, 10
; RV32IMZICOND-NEXT: ret
;
; RV64IMZICOND-LABEL: select_cst2:
; RV64IMZICOND: # %bb.0:
-; RV64IMZICOND-NEXT: li a1, 10
-; RV64IMZICOND-NEXT: czero.eqz a1, a1, a0
-; RV64IMZICOND-NEXT: lui a2, 5
-; RV64IMZICOND-NEXT: addiw a2, a2, -480
-; RV64IMZICOND-NEXT: czero.nez a0, a2, a0
-; RV64IMZICOND-NEXT: or a0, a1, a0
+; RV64IMZICOND-NEXT: lui a1, 5
+; RV64IMZICOND-NEXT: addiw a1, a1, -490
+; RV64IMZICOND-NEXT: czero.nez a0, a1, a0
+; RV64IMZICOND-NEXT: addi a0, a0, 10
; RV64IMZICOND-NEXT: ret
%ret = select i1 %cond, i32 10, i32 20000
ret i32 %ret
@@ -1752,35 +1738,32 @@ define i32 @select_cst3(i1 zeroext %cond) {
;
; RV64IMXVTCONDOPS-LABEL: select_cst3:
; RV64IMXVTCONDOPS: # %bb.0:
-; RV64IMXVTCONDOPS-NEXT: lui a1, 5
-; RV64IMXVTCONDOPS-NEXT: addiw a1, a1, -480
-; RV64IMXVTCONDOPS-NEXT: vt.maskcn a1, a1, a0
-; RV64IMXVTCONDOPS-NEXT: lui a2, 7
-; RV64IMXVTCONDOPS-NEXT: addiw a2, a2, 1328
-; RV64IMXVTCONDOPS-NEXT: vt.maskc a0, a2, a0
-; RV64IMXVTCONDOPS-NEXT: or a0, a0, a1
+; RV64IMXVTCONDOPS-NEXT: lui a1, 1048574
+; RV64IMXVTCONDOPS-NEXT: addiw a1, a1, -1808
+; RV64IMXVTCONDOPS-NEXT: vt.maskcn a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT: lui a1, 7
+; RV64IMXVTCONDOPS-NEXT: addiw a1, a1, 1328
+; RV64IMXVTCONDOPS-NEXT: add a0, a0, a1
; RV64IMXVTCONDOPS-NEXT: ret
;
; RV32IMZICOND-LABEL: select_cst3:
; RV32IMZICOND: # %bb.0:
-; RV32IMZICOND-NEXT: lui a1, 5
-; RV32IMZICOND-NEXT: addi a1, a1, -480
-; RV32IMZICOND-NEXT: czero.nez a1, a1, a0
-; RV32IMZICOND-NEXT: lui a2, 7
-; RV32IMZICOND-NEXT: addi a2, a2, 1328
-; RV32IMZICOND-NEXT: czero.eqz a0, a2, a0
-; RV32IMZICOND-NEXT: or a0, a0, a1
+; RV32IMZICOND-NEXT: lui a1, 1048574
+; RV32IMZICOND-NEXT: addi a1, a1, -1808
+; RV32IMZICOND-NEXT: czero.nez a0, a1, a0
+; RV32IMZICOND-NEXT: lui a1, 7
+; RV32IMZICOND-NEXT: addi a1, a1, 1328
+; RV32IMZICOND-NEXT: add a0, a0, a1
; RV32IMZICOND-NEXT: ret
;
; RV64IMZICOND-LABEL: select_cst3:
; RV64IMZICOND: # %bb.0:
-; RV64IMZICOND-NEXT: lui a1, 5
-; RV64IMZICOND-NEXT: addiw a1, a1, -480
-; RV64IMZICOND-NEXT: czero.nez a1, a1, a0
-; RV64IMZICOND-NEXT: lui a2, 7
-; RV64IMZICOND-NEXT: addiw a2, a2, 1328
-; RV64IMZICOND-NEXT: czero.eqz a0, a2, a0
-; RV64IMZICOND-NEXT: or a0, a0, a1
+; RV64IMZICOND-NEXT: lui a1, 1048574
+; RV64IMZICOND-NEXT: addiw a1, a1, -1808
+; RV64IMZICOND-NEXT: czero.nez a0, a1, a0
+; RV64IMZICOND-NEXT: lui a1, 7
+; RV64IMZICOND-NEXT: addiw a1, a1, 1328
+; RV64IMZICOND-NEXT: add a0, a0, a1
; RV64IMZICOND-NEXT: ret
%ret = select i1 %cond, i32 30000, i32 20000
ret i32 %ret
@@ -1821,33 +1804,17 @@ define i32 @select_cst5(i1 zeroext %cond) {
;
; RV64IMXVTCONDOPS-LABEL: select_cst5:
; RV64IMXVTCONDOPS: # %bb.0:
-; RV64IMXVTCONDOPS-NEXT: li a1, 2047
-; RV64IMXVTCONDOPS-NEXT: vt.maskc a1, a1, a0
-; RV64IMXVTCONDOPS-NEXT: lui a2, 1
-; RV64IMXVTCONDOPS-NEXT: addiw a2, a2, -2047
-; RV64IMXVTCONDOPS-NEXT: vt.maskcn a0, a2, a0
-; RV64IMXVTCONDOPS-NEXT: or a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT: li a1, 2
+; RV64IMXVTCONDOPS-NEXT: vt.maskcn a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT: addi a0, a0, 2047
; RV64IMXVTCONDOPS-NEXT: ret
;
-; RV32IMZICOND-LABEL: select_cst5:
-; RV32IMZICOND: # %bb.0:
-; RV32IMZICOND-NEXT: li a1, 2047
-; RV32IMZICOND-NEXT: czero.eqz a1, a1, a0
-; RV32IMZICOND-NEXT: lui a2, 1
-; RV32IMZICOND-NEXT: addi a2, a2, -2047
-; RV32IMZICOND-NEXT: czero.nez a0, a2, a0
-; RV32IMZICOND-NEXT: or a0, a1, a0
-; RV32IMZICOND-NEXT: ret
-;
-; RV64IMZICOND-LABEL: select_cst5:
-; RV64IMZICOND: # %bb.0:
-; RV64IMZICOND-NEXT: li a1, 2047
-; RV64IMZICOND-NEXT: czero.eqz a1, a1, a0
-; RV64IMZICOND-NEXT: lui a2, 1
-; RV64IMZICOND-NEXT: addiw a2, a2, -2047
-; RV64IMZICOND-NEXT: czero.nez a0, a2, a0
-; RV64IMZICOND-NEXT: or a0, a1, a0
-; RV64IMZICOND-NEXT: ret
+; CHECKZICOND-LABEL: select_cst5:
+; CHECKZICOND: # %bb.0:
+; CHECKZICOND-NEXT: li a1, 2
+; CHECKZICOND-NEXT: czero.nez a0, a1, a0
+; CHECKZICOND-NEXT: addi a0, a0, 2047
+; CHECKZICOND-NEXT: ret
%ret = select i1 %cond, i32 2047, i32 2049
ret i32 %ret
}
@@ -1877,33 +1844,17 @@ define i32 @select_cst6(i1 zeroext %cond) {
;
; RV64IMXVTCONDOPS-LABEL: select_cst6:
; RV64IMXVTCONDOPS: # %bb.0:
-; RV64IMXVTCONDOPS-NEXT: li a1, 2047
-; RV64IMXVTCONDOPS-NEXT: vt.maskcn a1, a1, a0
-; RV64IMXVTCONDOPS-NEXT: lui a2, 1
-; RV64IMXVTCONDOPS-NEXT: addiw a2, a2, -2047
-; RV64IMXVTCONDOPS-NEXT: vt.maskc a0, a2, a0
-; RV64IMXVTCONDOPS-NEXT: or a0, a0, a1
+; RV64IMXVTCONDOPS-NEXT: li a1, 2
+; RV64IMXVTCONDOPS-NEXT: vt.maskc a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT: addi a0, a0, 2047
; RV64IMXVTCONDOPS-NEXT: ret
;
-; RV32IMZICOND-LABEL: select_cst6:
-; RV32IMZICOND: # %bb.0:
-; RV32IMZICOND-NEXT: li a1, 2047
-; RV32IMZICOND-NEXT: czero.nez a1, a1, a0
-; RV32IMZICOND-NEXT: lui a2, 1
-; RV32IMZICOND-NEXT: addi a2, a2, -2047
-; RV32IMZICOND-NEXT: czero.eqz a0, a2, a0
-; RV32IMZICOND-NEXT: or a0, a0, a1
-; RV32IMZICOND-NEXT: ret
-;
-; RV64IMZICOND-LABEL: select_cst6:
-; RV64IMZICOND: # %bb.0:
-; RV64IMZICOND-NEXT: li a1, 2047
-; RV64IMZICOND-NEXT: czero.nez a1, a1, a0
-; RV64IMZICOND-NEXT: lui a2, 1
-; RV64IMZICOND-NEXT: addiw a2, a2, -2047
-; RV64IMZICOND-NEXT: czero.eqz a0, a2, a0
-; RV64IMZICOND-NEXT: or a0, a0, a1
-; RV64IMZICOND-NEXT: ret
+; CHECKZICOND-LABEL: select_cst6:
+; CHECKZICOND: # %bb.0:
+; CHECKZICOND-NEXT: li a1, 2
+; CHECKZICOND-NEXT: czero.eqz a0, a1, a0
+; CHECKZICOND-NEXT: addi a0, a0, 2047
+; CHECKZICOND-NEXT: ret
%ret = select i1 %cond, i32 2049, i32 2047
ret i32 %ret
}
More information about the llvm-commits
mailing list