[llvm] [RISCV] Handle more (add x, C) -> (sub x, -C) cases (PR #138705)
Piotr Fusik via llvm-commits
llvm-commits at lists.llvm.org
Tue May 6 08:23:19 PDT 2025
https://github.com/pfusik updated https://github.com/llvm/llvm-project/pull/138705
>From 27f0ffc64dd02bcd9fd4e3755b7f28fae6375142 Mon Sep 17 00:00:00 2001
From: Piotr Fusik <p.fusik at samsung.com>
Date: Tue, 6 May 2025 17:15:28 +0200
Subject: [PATCH 1/2] [RISCV] Handle more (add x, C) -> (sub x, -C) cases
This is a follow-up to #137309, adding:
- multi-use of the constant with different adds
- vectors (vadd.vx -> vsub.vx)
---
llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp | 6 ++-
.../Target/RISCV/RISCVInstrInfoVPseudos.td | 30 +++++++++++
.../Target/RISCV/RISCVInstrInfoVSDPatterns.td | 13 +++++
.../Target/RISCV/RISCVInstrInfoVVLPatterns.td | 13 +++++
llvm/test/CodeGen/RISCV/add-imm64-to-sub.ll | 17 +++++-
llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll | 54 +++++++++++++++++++
6 files changed, 131 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 86bdb4c7fd24c..a0d96a1f264d5 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -3207,11 +3207,15 @@ bool RISCVDAGToDAGISel::selectSHXADD_UWOp(SDValue N, unsigned ShAmt,
}
bool RISCVDAGToDAGISel::selectNegImm(SDValue N, SDValue &Val) {
- if (!isa<ConstantSDNode>(N) || !N.hasOneUse())
+ if (!isa<ConstantSDNode>(N))
return false;
int64_t Imm = cast<ConstantSDNode>(N)->getSExtValue();
if (isInt<32>(Imm))
return false;
+
+ if (any_of(N->users(), [](const SDNode *U) { return U->getOpcode() != ISD::ADD; }))
+ return false;
+
int OrigImmCost = RISCVMatInt::getIntMatCost(APInt(64, Imm), 64, *Subtarget,
/*CompressionCost=*/true);
int NegImmCost = RISCVMatInt::getIntMatCost(APInt(64, -Imm), 64, *Subtarget,
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 5edcfdf2654a4..44e7dfa22c378 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -6228,6 +6228,36 @@ foreach vti = AllIntegerVectors in {
}
}
+// (add v, C) -> (sub v, -C) if -C cheaper to materialize
+defvar I64IntegerVectors = !filter(vti, AllIntegerVectors, !eq(vti.SEW, 64));
+foreach vti = I64IntegerVectors in {
+ let Predicates = [HasVInstructionsI64] in {
+ def : Pat<(vti.Vector (int_riscv_vadd (vti.Vector vti.RegClass:$passthru),
+ (vti.Vector vti.RegClass:$rs1),
+ (i64 negImm:$rs2),
+ VLOpFrag)),
+ (!cast<Instruction>("PseudoVSUB_VX_"#vti.LMul.MX)
+ vti.RegClass:$passthru,
+ vti.RegClass:$rs1,
+ negImm:$rs2,
+ GPR:$vl, vti.Log2SEW, TU_MU)>;
+ def : Pat<(vti.Vector (int_riscv_vadd_mask (vti.Vector vti.RegClass:$passthru),
+ (vti.Vector vti.RegClass:$rs1),
+ (i64 negImm:$rs2),
+ (vti.Mask VMV0:$vm),
+ VLOpFrag,
+ (i64 timm:$policy))),
+ (!cast<Instruction>("PseudoVSUB_VX_"#vti.LMul.MX#"_MASK")
+ vti.RegClass:$passthru,
+ vti.RegClass:$rs1,
+ negImm:$rs2,
+ (vti.Mask VMV0:$vm),
+ GPR:$vl,
+ vti.Log2SEW,
+ (i64 timm:$policy))>;
+ }
+}
+
//===----------------------------------------------------------------------===//
// 11.2. Vector Widening Integer Add/Subtract
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
index 93228f2a9e167..c083ac6f57643 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
@@ -907,6 +907,19 @@ foreach vti = AllIntegerVectors in {
}
}
+// (add v, C) -> (sub v, -C) if -C cheaper to materialize
+foreach vti = I64IntegerVectors in {
+ let Predicates = [HasVInstructionsI64] in {
+ def : Pat<(add (vti.Vector vti.RegClass:$rs1),
+ (vti.Vector (SplatPat (i64 negImm:$rs2)))),
+// (riscv_vmv_v_x_vl undef, negImm:$rs2, srcvalue)),
+ (!cast<Instruction>("PseudoVSUB_VX_"#vti.LMul.MX)
+ (vti.Vector (IMPLICIT_DEF)),
+ vti.RegClass:$rs1,
+ negImm:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>;
+ }
+}
+
// 11.2. Vector Widening Integer Add and Subtract
defm : VPatWidenBinarySDNode_VV_VX_WV_WX<add, sext_oneuse, "PseudoVWADD">;
defm : VPatWidenBinarySDNode_VV_VX_WV_WX<add, zext_oneuse, "PseudoVWADDU">;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index 2b0b31c79c7a7..5975bcd2a323b 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -1957,6 +1957,19 @@ foreach vti = AllIntegerVectors in {
}
}
+// (add v, C) -> (sub v, -C) if -C cheaper to materialize
+foreach vti = I64IntegerVectors in {
+ let Predicates = [HasVInstructionsI64] in {
+ def : Pat<(riscv_add_vl (vti.Vector vti.RegClass:$rs1),
+ (vti.Vector (SplatPat (i64 negImm:$rs2))),
+ vti.RegClass:$passthru, (vti.Mask VMV0:$vm), VLOpFrag),
+ (!cast<Instruction>("PseudoVSUB_VX_"#vti.LMul.MX#"_MASK")
+ vti.RegClass:$passthru, vti.RegClass:$rs1,
+ negImm:$rs2, (vti.Mask VMV0:$vm),
+ GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
+ }
+}
+
// 11.2. Vector Widening Integer Add/Subtract
defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwadd_vl, riscv_vwadd_w_vl, "PseudoVWADD">;
defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwaddu_vl, riscv_vwaddu_w_vl, "PseudoVWADDU">;
diff --git a/llvm/test/CodeGen/RISCV/add-imm64-to-sub.ll b/llvm/test/CodeGen/RISCV/add-imm64-to-sub.ll
index ddcf4e1a8aa77..3c02efbfe02f9 100644
--- a/llvm/test/CodeGen/RISCV/add-imm64-to-sub.ll
+++ b/llvm/test/CodeGen/RISCV/add-imm64-to-sub.ll
@@ -56,6 +56,21 @@ define i64 @add_multiuse(i64 %x) {
; CHECK-NEXT: and a0, a0, a1
; CHECK-NEXT: ret
%add = add i64 %x, -1099511627775
- %xor = and i64 %add, -1099511627775
+ %and = and i64 %add, -1099511627775
+ ret i64 %and
+}
+
+define i64 @add_multiuse_const(i64 %x, i64 %y) {
+; CHECK-LABEL: add_multiuse_const:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a2, -1
+; CHECK-NEXT: srli a2, a2, 24
+; CHECK-NEXT: sub a0, a0, a2
+; CHECK-NEXT: sub a1, a1, a2
+; CHECK-NEXT: xor a0, a0, a1
+; CHECK-NEXT: ret
+ %a = add i64 %x, -1099511627775
+ %b = add i64 %y, -1099511627775
+ %xor = xor i64 %a, %b
ret i64 %xor
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll
index ac22e11d30cdc..a95ad7f744af3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll
@@ -865,3 +865,57 @@ define <vscale x 8 x i32> @vadd_vv_mask_negative1_nxv8i32(<vscale x 8 x i32> %va
%vd = add <vscale x 8 x i32> %vc, %vs
ret <vscale x 8 x i32> %vd
}
+
+define <vscale x 1 x i64> @vadd_vx_imm64_to_sub(<vscale x 1 x i64> %va) nounwind {
+; RV32-LABEL: vadd_vx_imm64_to_sub:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: li a0, -256
+; RV32-NEXT: li a1, 1
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: sw a0, 12(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vadd.vv v8, v8, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vadd_vx_imm64_to_sub:
+; RV64: # %bb.0:
+; RV64-NEXT: li a0, -1
+; RV64-NEXT: slli a0, a0, 40
+; RV64-NEXT: addi a0, a0, 1
+; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma
+; RV64-NEXT: vadd.vx v8, v8, a0
+; RV64-NEXT: ret
+ %vc = add <vscale x 1 x i64> splat (i64 -1099511627775), %va
+ ret <vscale x 1 x i64> %vc
+}
+
+define <vscale x 1 x i64> @vadd_vx_imm64_to_sub_swapped(<vscale x 1 x i64> %va) nounwind {
+; RV32-LABEL: vadd_vx_imm64_to_sub_swapped:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: li a0, -256
+; RV32-NEXT: li a1, 1
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: sw a0, 12(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vadd.vv v8, v8, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vadd_vx_imm64_to_sub_swapped:
+; RV64: # %bb.0:
+; RV64-NEXT: li a0, -1
+; RV64-NEXT: slli a0, a0, 40
+; RV64-NEXT: addi a0, a0, 1
+; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma
+; RV64-NEXT: vadd.vx v8, v8, a0
+; RV64-NEXT: ret
+ %vc = add <vscale x 1 x i64> %va, splat (i64 -1099511627775)
+ ret <vscale x 1 x i64> %vc
+}
>From 665ae26e940b159da843157f494e0f88a0417b9e Mon Sep 17 00:00:00 2001
From: Piotr Fusik <p.fusik at samsung.com>
Date: Tue, 6 May 2025 17:22:58 +0200
Subject: [PATCH 2/2] [RISCV] clang-format
---
llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index a0d96a1f264d5..e250a7d432218 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -3213,7 +3213,8 @@ bool RISCVDAGToDAGISel::selectNegImm(SDValue N, SDValue &Val) {
if (isInt<32>(Imm))
return false;
- if (any_of(N->users(), [](const SDNode *U) { return U->getOpcode() != ISD::ADD; }))
+ if (any_of(N->users(),
+ [](const SDNode *U) { return U->getOpcode() != ISD::ADD; }))
return false;
int OrigImmCost = RISCVMatInt::getIntMatCost(APInt(64, Imm), 64, *Subtarget,
More information about the llvm-commits
mailing list