[llvm] [RISCV] Combine ADDD(lo, hi, x, 0) -> WADDAU(lo, hi, x, 0). Combine WADDAU (WADDAU lo, hi, x, 0), y, 0 -> WADDAU lo, hi, x, y (PR #181396)
via llvm-commits
llvm-commits at lists.llvm.org
Fri Feb 13 10:27:20 PST 2026
- Previous message: [llvm] [RISCV] Combine ADDD(lo, hi, x, 0) -> WADDAU(lo, hi, x, 0). Combine WADDAU (WADDAU lo, hi, x, 0), y, 0 -> WADDAU lo, hi, x, y (PR #181396)
- Next message: [llvm] [RISCV] Combine ADDD(lo, hi, x, 0) -> WADDAU(lo, hi, x, 0). Combine WADDAU (WADDAU lo, hi, x, 0), y, 0 -> WADDAU lo, hi, x, y (PR #181396)
- Messages sorted by:
[ date ]
[ thread ]
[ subject ]
[ author ]
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-risc-v
Author: Craig Topper (topperc)
<details>
<summary>Changes</summary>
WADDAU is rd += zext(rs1) + zext(rs2)
If we only have 1 32-bit input can force rs2 to avoid zeroing the upper
part of a register pair to use ADDD.
Unfortunately, WADDAU clobbers rd so it might need a GPRPair copy
if we need the old value of rd. We might need to look into that in
the future. Maybe we could have convertToThreeAddress could turn
it back into ADDD+WADDU or ADDD+LI.
Assisted-by: claude
---
Full diff: https://github.com/llvm/llvm-project/pull/181396.diff
4 Files Affected:
- (modified) llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp (+27-18)
- (modified) llvm/lib/Target/RISCV/RISCVISelLowering.cpp (+48)
- (modified) llvm/lib/Target/RISCV/RISCVInstrInfoP.td (+9)
- (modified) llvm/test/CodeGen/RISCV/rv32p.ll (+40-6)
``````````diff
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 021fe05a3d7fd..9dd4c9f25600a 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -1991,7 +1991,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
// Fall through to regular ADDD selection.
[[fallthrough]];
case RISCVISD::SUBD:
- case RISCVISD::PPAIRE_DB: {
+ case RISCVISD::PPAIRE_DB:
+ case RISCVISD::WADDAU: {
assert(!Subtarget->is64Bit() && "Unexpected opcode");
assert((Node->getOpcode() != RISCVISD::PPAIRE_DB ||
Subtarget->enablePExtSIMDCodeGen()) &&
@@ -2009,25 +2010,33 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
SDValue Op1Lo = Node->getOperand(2);
SDValue Op1Hi = Node->getOperand(3);
- SDValue Op1 = buildGPRPair(CurDAG, DL, MVT::Untyped, Op1Lo, Op1Hi);
- unsigned Opc;
- switch (Node->getOpcode()) {
- default:
- llvm_unreachable("Unexpected opcode");
- case RISCVISD::ADDD:
- Opc = RISCV::ADDD;
- break;
- case RISCVISD::SUBD:
- Opc = RISCV::SUBD;
- break;
- case RISCVISD::PPAIRE_DB:
- Opc = RISCV::PPAIRE_DB;
- break;
- }
+ MachineSDNode *New;
+ if (Node->getOpcode() == RISCVISD::WADDAU) {
+ // WADDAU: rd = rd + zext(rs1) + zext(rs2)
+ // Op0 is the accumulator (GPRPair), Op1Lo and Op1Hi are the two 32-bit
+ // values to add.
+ New = CurDAG->getMachineNode(RISCV::WADDAU, DL, MVT::Untyped, Op0, Op1Lo,
+ Op1Hi);
+ } else {
+ SDValue Op1 = buildGPRPair(CurDAG, DL, MVT::Untyped, Op1Lo, Op1Hi);
- MachineSDNode *New =
- CurDAG->getMachineNode(Opc, DL, MVT::Untyped, Op0, Op1);
+ unsigned Opc;
+ switch (Node->getOpcode()) {
+ default:
+ llvm_unreachable("Unexpected opcode");
+ case RISCVISD::ADDD:
+ Opc = RISCV::ADDD;
+ break;
+ case RISCVISD::SUBD:
+ Opc = RISCV::SUBD;
+ break;
+ case RISCVISD::PPAIRE_DB:
+ Opc = RISCV::PPAIRE_DB;
+ break;
+ }
+ New = CurDAG->getMachineNode(Opc, DL, MVT::Untyped, Op0, Op1);
+ }
auto [Lo, Hi] = extractGPRPair(CurDAG, DL, SDValue(New, 0));
ReplaceUses(SDValue(Node, 0), Lo);
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index b368127c64c98..66fa30540d95d 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -21350,6 +21350,54 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
}
break;
}
+ case RISCVISD::ADDD: {
+ assert(!Subtarget.is64Bit() && Subtarget.hasStdExtP() &&
+ "ADDD is only for RV32 with P extension");
+
+ SDValue Op0Lo = N->getOperand(0);
+ SDValue Op0Hi = N->getOperand(1);
+ SDValue Op1Lo = N->getOperand(2);
+ SDValue Op1Hi = N->getOperand(3);
+
+ // (ADDD lo, hi, x, 0) -> (WADDAU lo, hi, x, 0)
+ if (isNullConstant(Op1Hi)) {
+ SDValue Result =
+ DAG.getNode(RISCVISD::WADDAU, DL, DAG.getVTList(MVT::i32, MVT::i32),
+ Op0Lo, Op0Hi, Op1Lo, DAG.getConstant(0, DL, MVT::i32));
+ return DCI.CombineTo(N, Result.getValue(0), Result.getValue(1));
+ }
+ // (ADDD x, 0, lo, hi) -> (WADDAU lo, hi, x, 0)
+ if (isNullConstant(Op0Hi)) {
+ SDValue Result =
+ DAG.getNode(RISCVISD::WADDAU, DL, DAG.getVTList(MVT::i32, MVT::i32),
+ Op1Lo, Op1Hi, Op0Lo, DAG.getConstant(0, DL, MVT::i32));
+ return DCI.CombineTo(N, Result.getValue(0), Result.getValue(1));
+ }
+ break;
+ }
+ case RISCVISD::WADDAU: {
+ assert(!Subtarget.is64Bit() && Subtarget.hasStdExtP() &&
+ "WADDAU is only for RV32 with P extension");
+ // (WADDAU (WADDAU lo, hi, x, 0), y, 0) -> (WADDAU lo, hi, x, y)
+ SDValue Op0Lo = N->getOperand(0);
+ SDValue Op0Hi = N->getOperand(1);
+ SDValue Op1 = N->getOperand(2);
+ SDValue Op2 = N->getOperand(3);
+
+ // Check if this WADDAU has a zero second operand and the accumulator
+ // comes from another WADDAU with a zero second operand.
+ // FIXME: Canonicalize zero Op1 to Op2.
+ if (isNullConstant(Op2) && Op0Lo.getOpcode() == RISCVISD::WADDAU &&
+ Op0Lo.getNode() == Op0Hi.getNode() && Op0Lo.getResNo() == 0 &&
+ Op0Hi.getResNo() == 1 && Op0Lo.hasOneUse() && Op0Hi.hasOneUse() &&
+ isNullConstant(Op0Lo.getOperand(3))) {
+ SDValue Result = DAG.getNode(
+ RISCVISD::WADDAU, DL, DAG.getVTList(MVT::i32, MVT::i32),
+ Op0Lo.getOperand(0), Op0Lo.getOperand(1), Op0Lo.getOperand(2), Op1);
+ return DCI.CombineTo(N, Result.getValue(0), Result.getValue(1));
+ }
+ break;
+ }
case RISCVISD::FMV_W_X_RV64: {
// If the input to FMV_W_X_RV64 is just FMV_X_ANYEXTW_RV64 the the
// conversion is unnecessary and can be replaced with the
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoP.td b/llvm/lib/Target/RISCV/RISCVInstrInfoP.td
index d154687d22754..857d9f4575e04 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoP.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoP.td
@@ -1493,6 +1493,15 @@ def riscv_addd : RVSDNode<"ADDD", SDT_RISCVIntBinOpD,
[SDNPCommutative, SDNPAssociative]>;
def riscv_subd : RVSDNode<"SUBD", SDT_RISCVIntBinOpD>;
+def SDT_RISCVWideningAddSubAccumulate : SDTypeProfile<2, 4, [SDTCisVT<0, i32>,
+ SDTCisVT<1, i32>,
+ SDTCisSameAs<0, 2>,
+ SDTCisSameAs<1, 3>,
+ SDTCisVT<4, i32>,
+ SDTCisVT<5, i32>]>;
+// Widening add accumulate unsigned: rd = rd + zext(rs1) + zext(rs2)
+def riscv_waddau : RVSDNode<"WADDAU", SDT_RISCVWideningAddSubAccumulate>;
+
def riscv_wmulsu : RVSDNode<"WMULSU", SDTIntBinHiLoOp>;
// Narrowing shift: res = nsrl(lo, hi, shamt) is equivalent to
diff --git a/llvm/test/CodeGen/RISCV/rv32p.ll b/llvm/test/CodeGen/RISCV/rv32p.ll
index 3f6b3e8184cf0..1cc42bc9c592f 100644
--- a/llvm/test/CodeGen/RISCV/rv32p.ll
+++ b/llvm/test/CodeGen/RISCV/rv32p.ll
@@ -186,16 +186,15 @@ define i64 @cls_i64(i64 %x) {
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: xor a0, a0, a2
; CHECK-NEXT: clz a0, a0
-; CHECK-NEXT: addi a0, a0, 32
+; CHECK-NEXT: addi a2, a0, 32
; CHECK-NEXT: j .LBB15_3
; CHECK-NEXT: .LBB15_2:
; CHECK-NEXT: xor a1, a1, a2
-; CHECK-NEXT: clz a0, a1
+; CHECK-NEXT: clz a2, a1
; CHECK-NEXT: .LBB15_3:
-; CHECK-NEXT: li a1, 0
-; CHECK-NEXT: li a2, -1
-; CHECK-NEXT: mv a3, a2
-; CHECK-NEXT: addd a0, a0, a2
+; CHECK-NEXT: li a0, -1
+; CHECK-NEXT: mv a1, a0
+; CHECK-NEXT: waddau a0, a2, zero
; CHECK-NEXT: ret
%a = ashr i64 %x, 63
%b = xor i64 %x, %a
@@ -1049,3 +1048,38 @@ define i32 @mvmn_xor_i32(i32 %b, i32 %mask, i32 %a) nounwind {
%xor2 = xor i32 %and, %a
ret i32 %xor2
}
+
+; acc + zext(a) -> waddau acc, a, 0
+define i64 @waddau_zext(i64 %acc, i32 %a) nounwind {
+; CHECK-LABEL: waddau_zext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: waddau a0, a2, zero
+; CHECK-NEXT: ret
+ %ext_a = zext i32 %a to i64
+ %sum = add i64 %acc, %ext_a
+ ret i64 %sum
+}
+
+; zext(a) + acc -> waddau acc, a, 0
+define i64 @waddau_zext_commuted(i64 %acc, i32 %a) nounwind {
+; CHECK-LABEL: waddau_zext_commuted:
+; CHECK: # %bb.0:
+; CHECK-NEXT: waddau a0, a2, zero
+; CHECK-NEXT: ret
+ %ext_a = zext i32 %a to i64
+ %sum = add i64 %ext_a, %acc
+ ret i64 %sum
+}
+
+; acc + zext(a) + zext(b) -> waddau acc, a, b
+define i64 @waddau_zext_chain(i64 %acc, i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: waddau_zext_chain:
+; CHECK: # %bb.0:
+; CHECK-NEXT: waddau a0, a2, a3
+; CHECK-NEXT: ret
+ %ext_a = zext i32 %a to i64
+ %ext_b = zext i32 %b to i64
+ %sum1 = add i64 %acc, %ext_a
+ %sum2 = add i64 %sum1, %ext_b
+ ret i64 %sum2
+}
``````````
</details>
https://github.com/llvm/llvm-project/pull/181396
- Previous message: [llvm] [RISCV] Combine ADDD(lo, hi, x, 0) -> WADDAU(lo, hi, x, 0). Combine WADDAU (WADDAU lo, hi, x, 0), y, 0 -> WADDAU lo, hi, x, y (PR #181396)
- Next message: [llvm] [RISCV] Combine ADDD(lo, hi, x, 0) -> WADDAU(lo, hi, x, 0). Combine WADDAU (WADDAU lo, hi, x, 0), y, 0 -> WADDAU lo, hi, x, y (PR #181396)
- Messages sorted by:
[ date ]
[ thread ]
[ subject ]
[ author ]
More information about the llvm-commits
mailing list