[llvm] [RISCV] Select (add C, x) -> (add.uw C|0xffffffff00000000, x) (PR #143375)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Jun 9 05:36:10 PDT 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-risc-v
Author: Piotr Fusik (pfusik)
<details>
<summary>Changes</summary>
Emits fewer instructions for certain constants.
---
Full diff: https://github.com/llvm/llvm-project/pull/143375.diff
4 Files Affected:
- (modified) llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp (+35-20)
- (modified) llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h (+3)
- (modified) llvm/lib/Target/RISCV/RISCVInstrInfoZb.td (+4)
- (modified) llvm/test/CodeGen/RISCV/rv64zba.ll (+137)
``````````diff
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 494d6ed03292a..5be03bb33d3f5 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -3296,6 +3296,33 @@ bool RISCVDAGToDAGISel::selectSHXADD_UWOp(SDValue N, unsigned ShAmt,
return false;
}
+bool RISCVDAGToDAGISel::selectImm64IfCheaper(int64_t Imm, int64_t OrigImm,
+ SDValue N, SDValue &Val) {
+ int OrigCost = RISCVMatInt::getIntMatCost(APInt(64, OrigImm), 64, *Subtarget,
+ /*CompressionCost=*/true);
+ int Cost = RISCVMatInt::getIntMatCost(APInt(64, Imm), 64, *Subtarget,
+ /*CompressionCost=*/true);
+ if (OrigCost <= Cost)
+ return false;
+
+ Val = selectImm(CurDAG, SDLoc(N), N->getSimpleValueType(0), Imm, *Subtarget);
+ return true;
+}
+
+bool RISCVDAGToDAGISel::selectZExtImm32(SDValue N, SDValue &Val) {
+ if (!isa<ConstantSDNode>(N))
+ return false;
+ int64_t Imm = cast<ConstantSDNode>(N)->getSExtValue();
+ if ((Imm >> 31) != 1)
+ return false;
+
+ if (any_of(N->users(),
+ [](const SDNode *U) { return U->getOpcode() != ISD::ADD; }))
+ return false;
+
+ return selectImm64IfCheaper(0xffffffff00000000 | Imm, Imm, N, Val);
+}
+
bool RISCVDAGToDAGISel::selectNegImm(SDValue N, SDValue &Val) {
if (!isa<ConstantSDNode>(N))
return false;
@@ -3319,15 +3346,7 @@ bool RISCVDAGToDAGISel::selectNegImm(SDValue N, SDValue &Val) {
}
}
- int OrigImmCost = RISCVMatInt::getIntMatCost(APInt(64, Imm), 64, *Subtarget,
- /*CompressionCost=*/true);
- int NegImmCost = RISCVMatInt::getIntMatCost(APInt(64, -Imm), 64, *Subtarget,
- /*CompressionCost=*/true);
- if (OrigImmCost <= NegImmCost)
- return false;
-
- Val = selectImm(CurDAG, SDLoc(N), N->getSimpleValueType(0), -Imm, *Subtarget);
- return true;
+ return selectImm64IfCheaper(-Imm, Imm, N, Val);
}
bool RISCVDAGToDAGISel::selectInvLogicImm(SDValue N, SDValue &Val) {
@@ -3362,19 +3381,15 @@ bool RISCVDAGToDAGISel::selectInvLogicImm(SDValue N, SDValue &Val) {
}
}
- // For 64-bit constants, the instruction sequences get complex,
- // so we select inverted only if it's cheaper.
- if (!isInt<32>(Imm)) {
- int OrigImmCost = RISCVMatInt::getIntMatCost(APInt(64, Imm), 64, *Subtarget,
- /*CompressionCost=*/true);
- int NegImmCost = RISCVMatInt::getIntMatCost(APInt(64, ~Imm), 64, *Subtarget,
- /*CompressionCost=*/true);
- if (OrigImmCost <= NegImmCost)
- return false;
+ if (isInt<32>(Imm)) {
+ Val =
+ selectImm(CurDAG, SDLoc(N), N->getSimpleValueType(0), ~Imm, *Subtarget);
+ return true;
}
- Val = selectImm(CurDAG, SDLoc(N), N->getSimpleValueType(0), ~Imm, *Subtarget);
- return true;
+ // For 64-bit constants, the instruction sequences get complex,
+ // so we select inverted only if it's cheaper.
+ return selectImm64IfCheaper(~Imm, Imm, N, Val);
}
static bool vectorPseudoHasAllNBitUsers(SDNode *User, unsigned UserOpNo,
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
index f199c2031b9a9..45e39614f21b8 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
@@ -118,6 +118,7 @@ class RISCVDAGToDAGISel : public SelectionDAGISel {
return selectSHXADD_UWOp(N, ShAmt, Val);
}
+ bool selectZExtImm32(SDValue N, SDValue &Val);
bool selectNegImm(SDValue N, SDValue &Val);
bool selectInvLogicImm(SDValue N, SDValue &Val);
@@ -199,6 +200,8 @@ class RISCVDAGToDAGISel : public SelectionDAGISel {
bool doPeepholeMergeVVMFold();
bool doPeepholeNoRegPassThru();
bool performCombineVMergeAndVOps(SDNode *N);
+ bool selectImm64IfCheaper(int64_t Imm, int64_t OrigImm, SDValue N,
+ SDValue &Val);
};
class RISCVDAGToDAGISelLegacy : public SelectionDAGISelLegacy {
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
index 4353e94bdb1d0..4806bcc1d63de 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
@@ -704,9 +704,13 @@ def : CSImm12MulBy4Pat<SH2ADD>;
def : CSImm12MulBy8Pat<SH3ADD>;
} // Predicates = [HasStdExtZba]
+def zExtImm32 : ComplexPattern<i64, 1, "selectZExtImm32", [], [], 0>;
+
multiclass ADD_UWPat<Instruction add_uw> {
def : Pat<(i64 (add_like_non_imm12 (and GPR:$rs1, 0xFFFFFFFF), GPR:$rs2)),
(add_uw GPR:$rs1, GPR:$rs2)>;
+ def : Pat<(i64 (add_like zExtImm32:$rs1, GPR:$rs2)),
+ (add_uw zExtImm32:$rs1, GPR:$rs2)>;
def : Pat<(i64 (and GPR:$rs, 0xFFFFFFFF)), (add_uw GPR:$rs, (XLenVT X0))>;
}
diff --git a/llvm/test/CodeGen/RISCV/rv64zba.ll b/llvm/test/CodeGen/RISCV/rv64zba.ll
index e71117739b125..5668ec1f230c2 100644
--- a/llvm/test/CodeGen/RISCV/rv64zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zba.ll
@@ -4411,3 +4411,140 @@ define ptr @udiv1280_gep(ptr %p, i16 zeroext %i) {
%add.ptr = getelementptr i64, ptr %p, i64 %idx.ext
ret ptr %add.ptr
}
+
+define i64 @adduw_m1(i64 %x) {
+; RV64I-LABEL: adduw_m1:
+; RV64I: # %bb.0:
+; RV64I-NEXT: li a1, -1
+; RV64I-NEXT: srli a1, a1, 32
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: adduw_m1:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: li a1, -1
+; RV64ZBA-NEXT: add.uw a0, a1, a0
+; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: adduw_m1:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: li a1, -1
+; RV64XANDESPERF-NEXT: nds.lea.b.ze a0, a0, a1
+; RV64XANDESPERF-NEXT: ret
+ %a = add i64 %x, 4294967295
+ ret i64 %a
+}
+
+define i64 @adduw_m3(i64 %x) {
+; RV64I-LABEL: adduw_m3:
+; RV64I: # %bb.0:
+; RV64I-NEXT: li a1, 1
+; RV64I-NEXT: slli a1, a1, 32
+; RV64I-NEXT: addi a1, a1, -3
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: adduw_m3:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: li a1, -3
+; RV64ZBA-NEXT: add.uw a0, a1, a0
+; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: adduw_m3:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: li a1, -3
+; RV64XANDESPERF-NEXT: nds.lea.b.ze a0, a0, a1
+; RV64XANDESPERF-NEXT: ret
+ %a = add i64 %x, 4294967293
+ ret i64 %a
+}
+
+define i64 @adduw_3shl30(i64 %x) {
+; RV64I-LABEL: adduw_3shl30:
+; RV64I: # %bb.0:
+; RV64I-NEXT: li a1, 3
+; RV64I-NEXT: slli a1, a1, 30
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: adduw_3shl30:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: lui a1, 786432
+; RV64ZBA-NEXT: add.uw a0, a1, a0
+; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: adduw_3shl30:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: lui a1, 786432
+; RV64XANDESPERF-NEXT: nds.lea.b.ze a0, a0, a1
+; RV64XANDESPERF-NEXT: ret
+ %a = add i64 %x, 3221225472
+ ret i64 %a
+}
+
+define i64 @adduw_m3_multiuse(i64 %x, i64 %y) {
+; RV64I-LABEL: adduw_m3_multiuse:
+; RV64I: # %bb.0:
+; RV64I-NEXT: li a2, 1
+; RV64I-NEXT: slli a2, a2, 32
+; RV64I-NEXT: addi a2, a2, -3
+; RV64I-NEXT: add a0, a0, a2
+; RV64I-NEXT: add a1, a1, a2
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: adduw_m3_multiuse:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: li a2, -3
+; RV64ZBA-NEXT: add.uw a0, a2, a0
+; RV64ZBA-NEXT: add.uw a1, a2, a1
+; RV64ZBA-NEXT: or a0, a0, a1
+; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: adduw_m3_multiuse:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: li a2, -3
+; RV64XANDESPERF-NEXT: nds.lea.b.ze a0, a0, a2
+; RV64XANDESPERF-NEXT: nds.lea.b.ze a1, a1, a2
+; RV64XANDESPERF-NEXT: or a0, a0, a1
+; RV64XANDESPERF-NEXT: ret
+ %a = add i64 %x, 4294967293
+ %b = add i64 %y, 4294967293
+ %c = or i64 %a, %b
+ ret i64 %c
+}
+
+define i64 @add_or_m3(i64 %x) {
+; RV64I-LABEL: add_or_m3:
+; RV64I: # %bb.0:
+; RV64I-NEXT: li a1, 1
+; RV64I-NEXT: slli a1, a1, 32
+; RV64I-NEXT: addi a1, a1, -3
+; RV64I-NEXT: or a2, a0, a1
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: add a0, a0, a2
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: add_or_m3:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: li a1, -3
+; RV64ZBA-NEXT: zext.w a1, a1
+; RV64ZBA-NEXT: or a2, a0, a1
+; RV64ZBA-NEXT: add a0, a0, a1
+; RV64ZBA-NEXT: add a0, a0, a2
+; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: add_or_m3:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: li a1, 1
+; RV64XANDESPERF-NEXT: slli a1, a1, 32
+; RV64XANDESPERF-NEXT: addi a1, a1, -3
+; RV64XANDESPERF-NEXT: or a2, a0, a1
+; RV64XANDESPERF-NEXT: add a0, a0, a1
+; RV64XANDESPERF-NEXT: add a0, a0, a2
+; RV64XANDESPERF-NEXT: ret
+ %a = add i64 %x, 4294967293
+ %o = or i64 %x, 4294967293
+ %c = add i64 %a, %o
+ ret i64 %c
+}
``````````
</details>
https://github.com/llvm/llvm-project/pull/143375
More information about the llvm-commits
mailing list