[llvm] [SelectionDAG][RISCV] Teach computeKnownBits to use range metadata for atomic_load. (PR #137119)
Matt Arsenault via llvm-commits
llvm-commits at lists.llvm.org
Wed Apr 23 23:50:43 PDT 2025
================
@@ -4382,6 +4382,38 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
Known.Zero |= APInt::getBitsSetFrom(BitWidth, VT.getScalarSizeInBits());
break;
}
+ case ISD::ATOMIC_LOAD: {
+ // If we are looking at the loaded value.
+ if (Op.getResNo() == 0) {
+ auto *AT = cast<AtomicSDNode>(Op);
+ unsigned ScalarMemorySize = AT->getMemoryVT().getScalarSizeInBits();
+ KnownBits KnownScalarMemory(ScalarMemorySize);
+ if (const MDNode *MD = AT->getRanges())
+ computeKnownBitsFromRangeMetadata(*MD, KnownScalarMemory);
+
+ switch (AT->getExtensionType()) {
+ case ISD::ZEXTLOAD:
+ Known = KnownScalarMemory.zext(BitWidth);
+ break;
+ case ISD::SEXTLOAD:
+ Known = KnownScalarMemory.sext(BitWidth);
+ break;
+ case ISD::EXTLOAD:
+ if (TLI->getExtendForAtomicOps() == ISD::ZERO_EXTEND)
+ Known = KnownScalarMemory.zext(BitWidth);
+ else if (TLI->getExtendForAtomicOps() == ISD::SIGN_EXTEND)
+ Known = KnownScalarMemory.sext(BitWidth);
----------------
arsenm wrote:
Avoid repeated getExtendForAtomicOps, switch over it? We should get rid of it eventually
https://github.com/llvm/llvm-project/pull/137119
More information about the llvm-commits
mailing list