[llvm] 7a098ae - [RISCV] Implement computeKnownBitsForTargetNode for SHL_ADD (#159105)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Sep 17 09:03:36 PDT 2025
Author: Piotr Fusik
Date: 2025-09-17T18:03:32+02:00
New Revision: 7a098aecb65cc6ca9c877825c9436a51c36ab48d
URL: https://github.com/llvm/llvm-project/commit/7a098aecb65cc6ca9c877825c9436a51c36ab48d
DIFF: https://github.com/llvm/llvm-project/commit/7a098aecb65cc6ca9c877825c9436a51c36ab48d.diff
LOG: [RISCV] Implement computeKnownBitsForTargetNode for SHL_ADD (#159105)
Added:
Modified:
llvm/lib/Target/RISCV/RISCVISelLowering.cpp
llvm/test/CodeGen/RISCV/rv32zba.ll
llvm/test/CodeGen/RISCV/rv64zba.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 10b3f0b213811..d98872c484d0b 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -21568,6 +21568,16 @@ void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
Known = Known.sext(BitWidth);
break;
}
+ case RISCVISD::SHL_ADD: {
+ KnownBits Known2;
+ Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
+ unsigned ShAmt = Op.getConstantOperandVal(1);
+ Known <<= ShAmt;
+ Known.Zero.setLowBits(ShAmt); // the <<= operator left these bits unknown
+ Known2 = DAG.computeKnownBits(Op.getOperand(2), DemandedElts, Depth + 1);
+ Known = KnownBits::add(Known, Known2);
+ break;
+ }
case RISCVISD::CTZW: {
KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
unsigned PossibleTZ = Known2.trunc(32).countMaxTrailingZeros();
diff --git a/llvm/test/CodeGen/RISCV/rv32zba.ll b/llvm/test/CodeGen/RISCV/rv32zba.ll
index 0d490682d1ff3..a6dbd94caad4f 100644
--- a/llvm/test/CodeGen/RISCV/rv32zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zba.ll
@@ -1265,3 +1265,38 @@ define i64 @select9i64(i1 zeroext %x) {
%select = select i1 %x, i64 9, i64 0
ret i64 %select
}
+
+define ptr @shl_add_knownbits(ptr %p, i32 %i) {
+; RV32I-LABEL: shl_add_knownbits:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a1, a1, 18
+; RV32I-NEXT: srli a1, a1, 18
+; RV32I-NEXT: slli a2, a1, 1
+; RV32I-NEXT: slli a1, a1, 3
+; RV32I-NEXT: sub a1, a1, a2
+; RV32I-NEXT: srli a1, a1, 3
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: ret
+;
+; RV32ZBA-LABEL: shl_add_knownbits:
+; RV32ZBA: # %bb.0:
+; RV32ZBA-NEXT: slli a1, a1, 18
+; RV32ZBA-NEXT: srli a1, a1, 18
+; RV32ZBA-NEXT: sh1add a1, a1, a1
+; RV32ZBA-NEXT: srli a1, a1, 2
+; RV32ZBA-NEXT: add a0, a0, a1
+; RV32ZBA-NEXT: ret
+;
+; RV32XANDESPERF-LABEL: shl_add_knownbits:
+; RV32XANDESPERF: # %bb.0:
+; RV32XANDESPERF-NEXT: nds.bfoz a1, a1, 13, 0
+; RV32XANDESPERF-NEXT: nds.lea.h a1, a1, a1
+; RV32XANDESPERF-NEXT: srli a1, a1, 2
+; RV32XANDESPERF-NEXT: add a0, a0, a1
+; RV32XANDESPERF-NEXT: ret
+ %and = and i32 %i, 16383
+ %mul = mul i32 %and, 6
+ %shr = lshr i32 %mul, 3
+ %r = getelementptr i8, ptr %p, i32 %shr
+ ret ptr %r
+}
diff --git a/llvm/test/CodeGen/RISCV/rv64zba.ll b/llvm/test/CodeGen/RISCV/rv64zba.ll
index 95f9f7116fe68..c028d25169749 100644
--- a/llvm/test/CodeGen/RISCV/rv64zba.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zba.ll
@@ -4636,3 +4636,38 @@ define i32 @select9(i1 zeroext %x) {
%select = select i1 %x, i32 9, i32 0
ret i32 %select
}
+
+define ptr @shl_add_knownbits(ptr %p, i64 %i) {
+; RV64I-LABEL: shl_add_knownbits:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 50
+; RV64I-NEXT: srli a1, a1, 50
+; RV64I-NEXT: slli a2, a1, 1
+; RV64I-NEXT: slli a1, a1, 3
+; RV64I-NEXT: sub a1, a1, a2
+; RV64I-NEXT: srli a1, a1, 3
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBA-LABEL: shl_add_knownbits:
+; RV64ZBA: # %bb.0:
+; RV64ZBA-NEXT: slli a1, a1, 50
+; RV64ZBA-NEXT: srli a1, a1, 50
+; RV64ZBA-NEXT: sh1add a1, a1, a1
+; RV64ZBA-NEXT: srli a1, a1, 2
+; RV64ZBA-NEXT: add a0, a0, a1
+; RV64ZBA-NEXT: ret
+;
+; RV64XANDESPERF-LABEL: shl_add_knownbits:
+; RV64XANDESPERF: # %bb.0:
+; RV64XANDESPERF-NEXT: nds.bfoz a1, a1, 13, 0
+; RV64XANDESPERF-NEXT: nds.lea.h a1, a1, a1
+; RV64XANDESPERF-NEXT: srli a1, a1, 2
+; RV64XANDESPERF-NEXT: add a0, a0, a1
+; RV64XANDESPERF-NEXT: ret
+ %and = and i64 %i, 16383
+ %mul = mul i64 %and, 6
+ %shr = lshr i64 %mul, 3
+ %r = getelementptr i8, ptr %p, i64 %shr
+ ret ptr %r
+}
More information about the llvm-commits
mailing list