[llvm] 213eb42 - Revert "[Metadata] Add a resize capability to MDNodes and add a push_back interface to MDNodes"
Wolfgang Pieb via llvm-commits
llvm-commits at lists.llvm.org
Tue Jun 7 15:49:39 PDT 2022
Author: Wolfgang Pieb
Date: 2022-06-07T15:48:31-07:00
New Revision: 213eb424e8e1d659d5bf9e440fbaf5d478ce3a11
URL: https://github.com/llvm/llvm-project/commit/213eb424e8e1d659d5bf9e440fbaf5d478ce3a11
DIFF: https://github.com/llvm/llvm-project/commit/213eb424e8e1d659d5bf9e440fbaf5d478ce3a11.diff
LOG: Revert "[Metadata] Add a resize capability to MDNodes and add a push_back interface to MDNodes"
This reverts commit e3f6eda8c6ebc567755377911746d4ca2367e649.
Failure in unittest on https://lab.llvm.org/buildbot*builders/171/builds/15666
Added:
Modified:
llvm/include/llvm/IR/Metadata.h
llvm/lib/IR/Metadata.cpp
llvm/unittests/IR/MetadataTest.cpp
Removed:
################################################################################
diff --git a/llvm/include/llvm/IR/Metadata.h b/llvm/include/llvm/IR/Metadata.h
index be359d94f812b..0810a814da819 100644
--- a/llvm/include/llvm/IR/Metadata.h
+++ b/llvm/include/llvm/IR/Metadata.h
@@ -934,101 +934,38 @@ struct TempMDNodeDeleter {
/// If an unresolved node is part of a cycle, \a resolveCycles() needs
/// to be called on some member of the cycle once all temporary nodes have been
/// replaced.
-///
-/// MDNodes can be large or small, as well as resizable or non-resizable.
-/// Large MDNodes' operands are allocated in a separate storage vector,
-/// whereas small MDNodes' operands are co-allocated. Distinct and temporary
-/// MDnodes are resizable, but only MDTuples support this capability.
-///
-/// Clients can add operands to resizable MDNodes using push_back().
class MDNode : public Metadata {
friend class ReplaceableMetadataImpl;
friend class LLVMContextImpl;
friend class DIArgList;
- /// The header that is coallocated with an MDNode along with its "small"
- /// operands. It is located immediately before the main body of the node.
- /// The operands are in turn located immediately before the header.
- /// For resizable MDNodes, the space for the storage vector is also allocated
- /// immediately before the header, overlapping with the operands.
+ /// The header that is coallocated with an MDNode, along with the operands.
+ /// It is located immediately before the main body of the node. The operands
+ /// are in turn located immediately before the header.
struct Header {
- bool IsResizable : 1;
- bool IsLarge : 1;
- size_t SmallSize : 4;
- size_t SmallNumOps : 4;
- size_t : sizeof(size_t) * CHAR_BIT - 10;
-
+ unsigned NumOperands;
unsigned NumUnresolved = 0;
- using LargeStorageVector = SmallVector<MDOperand, 0>;
-
- static constexpr size_t NumOpsFitInVector =
- sizeof(LargeStorageVector) / sizeof(MDOperand);
- static_assert(
- NumOpsFitInVector * sizeof(MDOperand) == sizeof(LargeStorageVector),
- "sizeof(LargeStorageVector) must be a multiple of sizeof(MDOperand)");
-
- static constexpr size_t MaxSmallSize = 15;
static constexpr size_t getOpSize(unsigned NumOps) {
return sizeof(MDOperand) * NumOps;
}
- /// Returns the number of operands the node has space for based on its
- /// allocation characteristics.
- static size_t getSmallSize(size_t NumOps, bool IsResizable, bool IsLarge) {
- return IsLarge ? NumOpsFitInVector
- : std::max(NumOps, NumOpsFitInVector * IsResizable);
- }
- /// Returns the number of bytes allocated for operands and header.
- static size_t getAllocSize(StorageType Storage, size_t NumOps) {
- return getOpSize(
- getSmallSize(NumOps, isResizable(Storage), isLarge(NumOps))) +
- sizeof(Header);
- }
-
- /// Only temporary and distinct nodes are resizable.
- static bool isResizable(StorageType Storage) { return Storage != Uniqued; }
- static bool isLarge(size_t NumOps) { return NumOps > MaxSmallSize; }
-
- size_t getAllocSize() const {
- return getOpSize(SmallSize) + sizeof(Header);
+ static constexpr size_t getAllocSize(unsigned NumOps) {
+ return getOpSize(NumOps) + sizeof(Header);
}
void *getAllocation() {
return reinterpret_cast<char *>(this + 1) -
- alignTo(getAllocSize(), alignof(uint64_t));
- }
-
- void *getLargePtr() const;
- void *getSmallPtr();
-
- LargeStorageVector &getLarge() {
- assert(IsLarge);
- return *reinterpret_cast<LargeStorageVector *>(getLargePtr());
+ alignTo(getAllocSize(NumOperands), alignof(uint64_t));
}
- const LargeStorageVector &getLarge() const {
- assert(IsLarge);
- return *reinterpret_cast<const LargeStorageVector *>(getLargePtr());
- }
-
- void resizeSmall(size_t NumOps);
- void resizeSmallToLarge(size_t NumOps);
- void resize(size_t NumOps);
-
- explicit Header(size_t NumOps, StorageType Storage);
+ explicit Header(unsigned NumOperands);
~Header();
-
MutableArrayRef<MDOperand> operands() {
- if (IsLarge)
- return getLarge();
return makeMutableArrayRef(
- reinterpret_cast<MDOperand *>(this) - SmallSize, SmallNumOps);
+ reinterpret_cast<MDOperand *>(this) - NumOperands, NumOperands);
}
-
ArrayRef<MDOperand> operands() const {
- if (IsLarge)
- return getLarge();
- return makeArrayRef(reinterpret_cast<const MDOperand *>(this) - SmallSize,
- SmallNumOps);
+ return makeArrayRef(
+ reinterpret_cast<const MDOperand *>(this) - NumOperands, NumOperands);
}
};
@@ -1045,7 +982,7 @@ class MDNode : public Metadata {
ArrayRef<Metadata *> Ops1, ArrayRef<Metadata *> Ops2 = None);
~MDNode() = default;
- void *operator new(size_t Size, size_t NumOps, StorageType Storage);
+ void *operator new(size_t Size, unsigned NumOps, StorageType Storage);
void operator delete(void *Mem);
/// Required by std, but never called.
@@ -1209,17 +1146,6 @@ class MDNode : public Metadata {
static T *storeImpl(T *N, StorageType Storage, StoreT &Store);
template <class T> static T *storeImpl(T *N, StorageType Storage);
- /// Resize the node to hold \a NumOps operands.
- ///
- /// \pre \a isTemporary() or \a isDistinct()
- /// \pre MetadataID == MDTupleKind
- void resize(size_t NumOps) {
- assert(!isUniqued() && "Resizing is not supported for uniqued nodes");
- assert(getMetadataID() == MDTupleKind &&
- "Resizing is not supported for this node kind");
- getHeader().resize(NumOps);
- }
-
private:
void handleChangedOperand(void *Ref, Metadata *New);
@@ -1281,7 +1207,7 @@ class MDNode : public Metadata {
}
/// Return number of MDNode operands.
- unsigned getNumOperands() const { return getHeader().operands().size(); }
+ unsigned getNumOperands() const { return getHeader().NumOperands; }
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Metadata *MD) {
@@ -1366,16 +1292,6 @@ class MDTuple : public MDNode {
/// Return a (temporary) clone of this.
TempMDTuple clone() const { return cloneImpl(); }
- /// Append an element to the tuple. This will resize the node.
- void push_back(Metadata *MD) {
- size_t NumOps = getNumOperands();
- resize(NumOps + 1);
- setOperand(NumOps, MD);
- }
-
- /// Shrink the operands by 1.
- void pop_back() { resize(getNumOperands() - 1); }
-
static bool classof(const Metadata *MD) {
return MD->getMetadataID() == MDTupleKind;
}
diff --git a/llvm/lib/IR/Metadata.cpp b/llvm/lib/IR/Metadata.cpp
index 3bbf3fe99c4a0..55db2d22dcaef 100644
--- a/llvm/lib/IR/Metadata.cpp
+++ b/llvm/lib/IR/Metadata.cpp
@@ -521,13 +521,13 @@ StringRef MDString::getString() const {
"Alignment is insufficient after objects prepended to " #CLASS);
#include "llvm/IR/Metadata.def"
-void *MDNode::operator new(size_t Size, size_t NumOps, StorageType Storage) {
+void *MDNode::operator new(size_t Size, unsigned NumOps,
+ StorageType /* Storage */) {
// uint64_t is the most aligned type we need support (ensured by static_assert
// above)
- size_t AllocSize =
- alignTo(Header::getAllocSize(Storage, NumOps), alignof(uint64_t));
+ size_t AllocSize = alignTo(Header::getAllocSize(NumOps), alignof(uint64_t));
char *Mem = reinterpret_cast<char *>(::operator new(AllocSize + Size));
- Header *H = new (Mem + AllocSize - sizeof(Header)) Header(NumOps, Storage);
+ Header *H = new (Mem + AllocSize - sizeof(Header)) Header(NumOps);
return reinterpret_cast<void *>(H + 1);
}
@@ -566,88 +566,17 @@ TempMDNode MDNode::clone() const {
}
}
-MDNode::Header::Header(size_t NumOps, StorageType Storage) {
- IsLarge = isLarge(NumOps);
- IsResizable = isResizable(Storage);
- SmallSize = getSmallSize(NumOps, IsResizable, IsLarge);
- if (IsLarge) {
- SmallNumOps = 0;
- new (getLargePtr()) LargeStorageVector();
- getLarge().resize(NumOps);
- return;
- }
- SmallNumOps = NumOps;
- MDOperand *O = reinterpret_cast<MDOperand *>(this) - SmallSize;
- for (MDOperand *E = O + SmallSize; O != E;)
- (void)new (O++) MDOperand();
-}
-
-MDNode::Header::~Header() {
- if (IsLarge) {
- getLarge().~LargeStorageVector();
- return;
- }
+MDNode::Header::Header(unsigned NumOps) {
+ NumOperands = NumOps;
MDOperand *O = reinterpret_cast<MDOperand *>(this);
- for (MDOperand *E = O - SmallSize; O != E; --O)
- (void)(O - 1)->~MDOperand();
-}
-
-void *MDNode::Header::getLargePtr() const {
- static_assert(alignof(LargeStorageVector) <= alignof(Header),
- "LargeStorageVector too strongly aligned");
- return reinterpret_cast<char *>(const_cast<Header *>(this)) -
- sizeof(LargeStorageVector);
-}
-
-void *MDNode::Header::getSmallPtr() {
- static_assert(alignof(MDOperand) <= alignof(Header),
- "MDOperand too strongly aligned");
- return reinterpret_cast<char *>(const_cast<Header *>(this)) -
- sizeof(MDOperand) * SmallSize;
-}
-
-void MDNode::Header::resize(size_t NumOps) {
- assert(IsResizable && "Node is not resizable");
- if (operands().size() == NumOps)
- return;
-
- if (IsLarge)
- getLarge().resize(NumOps);
- else if (NumOps <= SmallSize)
- resizeSmall(NumOps);
- else
- resizeSmallToLarge(NumOps);
+ for (MDOperand *E = O - NumOps; O != E; --O)
+ (void)new (O - 1) MDOperand();
}
-void MDNode::Header::resizeSmall(size_t NumOps) {
- assert(!IsLarge && "Expected a small MDNode");
- assert(NumOps <= SmallSize && "NumOps too large for small resize");
-
- MutableArrayRef<MDOperand> ExistingOps = operands();
- assert(NumOps != ExistingOps.size() && "Expected a
diff erent size");
-
- int NumNew = (int)NumOps - (int)ExistingOps.size();
- MDOperand *O = ExistingOps.end();
- if (NumNew > 0) {
- for (int I = 0, E = NumNew; I != E; ++I)
- new (O++) MDOperand();
- } else {
- for (int I = 0, E = NumNew; I != E; --I)
- (--O)->~MDOperand();
- }
- SmallNumOps = NumOps;
- assert(O == operands().end() && "Operands not (un)initialized until the end");
-}
-
-void MDNode::Header::resizeSmallToLarge(size_t NumOps) {
- assert(!IsLarge && "Expected a small MDNode");
- assert(NumOps > SmallSize && "Expected NumOps to be larger than allocation");
- LargeStorageVector NewOps;
- NewOps.resize(NumOps);
- llvm::move(operands(), NewOps.begin());
- resizeSmall(0);
- new (getLargePtr()) LargeStorageVector(std::move(NewOps));
- IsLarge = true;
+MDNode::Header::~Header() {
+ MDOperand *O = reinterpret_cast<MDOperand *>(this) - NumOperands;
+ for (MDOperand *E = O + NumOperands; O != E; ++O)
+ (void)O->~MDOperand();
}
static bool isOperandUnresolved(Metadata *Op) {
diff --git a/llvm/unittests/IR/MetadataTest.cpp b/llvm/unittests/IR/MetadataTest.cpp
index f6dbab3311c26..41ea3e9c2f5ca 100644
--- a/llvm/unittests/IR/MetadataTest.cpp
+++ b/llvm/unittests/IR/MetadataTest.cpp
@@ -3631,128 +3631,4 @@ TEST_F(MDTupleAllocationTest, Tracking) {
EXPECT_EQ(NewOps2.get(), static_cast<Metadata *>(Value2));
}
-TEST_F(MDTupleAllocationTest, Resize) {
- MDTuple *A = getTuple();
- Metadata *Value1 = getConstantAsMetadata();
- Metadata *Value2 = getConstantAsMetadata();
- Metadata *Value3 = getConstantAsMetadata();
-
- EXPECT_EQ(A->getNumOperands(), 0u);
-
- // Add a couple of elements to it, which resizes the node.
- A->push_back(Value1);
- EXPECT_EQ(A->getNumOperands(), 1u);
- EXPECT_EQ(A->getOperand(0), Value1);
-
- A->push_back(Value2);
- EXPECT_EQ(A->getNumOperands(), 2u);
- EXPECT_EQ(A->getOperand(0), Value1);
- EXPECT_EQ(A->getOperand(1), Value2);
-
- // Append another element, which should resize the node
- // to a "large" node, though not detectable by the user.
- A->push_back(Value3);
- EXPECT_EQ(A->getNumOperands(), 3u);
- EXPECT_EQ(A->getOperand(0), Value1);
- EXPECT_EQ(A->getOperand(1), Value2);
- EXPECT_EQ(A->getOperand(2), Value3);
-
- // Remove the last element
- A->pop_back();
- EXPECT_EQ(A->getNumOperands(), 2u);
- EXPECT_EQ(A->getOperand(1), Value2);
-
- // Allocate a node with 4 operands.
- Metadata *Value4 = getConstantAsMetadata();
- Metadata *Value5 = getConstantAsMetadata();
-
- Metadata *Ops[] = {Value1, Value2, Value3, Value4};
- MDTuple *B = MDTuple::getDistinct(Context, Ops);
-
- EXPECT_EQ(B->getNumOperands(), 4u);
- B->pop_back();
- EXPECT_EQ(B->getNumOperands(), 3u);
- B->push_back(Value5);
- EXPECT_EQ(B->getNumOperands(), 4u);
- EXPECT_EQ(B->getOperand(0), Value1);
- EXPECT_EQ(B->getOperand(1), Value2);
- EXPECT_EQ(B->getOperand(2), Value3);
- EXPECT_EQ(B->getOperand(3), Value5);
-
- // Check that we can resize temporary nodes as well.
- auto Temp1 = MDTuple::getTemporary(Context, None);
- EXPECT_EQ(Temp1->getNumOperands(), 0u);
-
- Temp1->push_back(Value1);
- EXPECT_EQ(Temp1->getNumOperands(), 1u);
- EXPECT_EQ(Temp1->getOperand(0), Value1);
-
- for (int i = 0; i < 11; i++)
- Temp1->push_back(Value2);
- EXPECT_EQ(Temp1->getNumOperands(), 12u);
- EXPECT_EQ(Temp1->getOperand(2), Value2);
- EXPECT_EQ(Temp1->getOperand(11), Value2);
-
- // Allocate a node that starts off as a large one.
- Metadata *OpsLarge[] = {Value1, Value2, Value3, Value4,
- Value1, Value2, Value3, Value4,
- Value1, Value2, Value3, Value4,
- Value1, Value2, Value3, Value4,
- Value1, Value2, Value3, Value4};
- MDTuple *C = MDTuple::getDistinct(Context, OpsLarge);
- EXPECT_EQ(C->getNumOperands(), 20u);
- EXPECT_EQ(C->getOperand(7), Value4);
- EXPECT_EQ(C->getOperand(13), Value2);
-
- C->push_back(Value1);
- C->push_back(Value2);
- EXPECT_EQ(C->getNumOperands(), 22u);
- EXPECT_EQ(C->getOperand(21), Value2);
- C->pop_back();
- EXPECT_EQ(C->getNumOperands(), 21u);
- EXPECT_EQ(C->getOperand(20), Value1);
-}
-
-TEST_F(MDTupleAllocationTest, Tracking2) {
- // Resize a tuple and check that we can still RAUW one of its operands.
- auto *Value1 = getConstantAsMetadata();
- MDTuple *A = getTuple();
- A->push_back(Value1);
- A->push_back(Value1);
- A->push_back(Value1); // Causes a resize to large.
- EXPECT_EQ(A->getOperand(0), Value1);
- EXPECT_EQ(A->getOperand(1), Value1);
- EXPECT_EQ(A->getOperand(2), Value1);
-
- auto *Value2 = getConstantAsMetadata();
- Value *V1 = Value1->getValue();
- Value *V2 = Value2->getValue();
- ValueAsMetadata::handleRAUW(V1, V2);
-
- EXPECT_EQ(A->getOperand(0), Value2);
- EXPECT_EQ(A->getOperand(1), Value2);
- EXPECT_EQ(A->getOperand(2), Value2);
-}
-
-#ifdef GTEST_HAS_DEATH_TEST
-typedef MetadataTest MDTupleAllocationDeathTest;
-TEST_F(MDTupleAllocationDeathTest, ResizeRejected) {
- MDTuple *A = MDTuple::get(Context, None);
- auto *Value1 = getConstantAsMetadata();
- EXPECT_DEATH(A->push_back(Value1),
- "Resizing is not supported for uniqued nodes");
-
- // Check that a node, which has been allocated as a temporary,
- // cannot be resized after it has been uniqued.
- auto *Value2 = getConstantAsMetadata();
- auto B = MDTuple::getTemporary(Context, {Value2});
- B->push_back(Value2);
- MDTuple *BUniqued = MDNode::replaceWithUniqued(std::move(B));
- EXPECT_EQ(BUniqued->getNumOperands(), 2u);
- EXPECT_EQ(BUniqued->getOperand(1), Value2);
- EXPECT_DEATH(BUniqued->push_back(Value2),
- "Resizing is not supported for uniqued nodes");
-}
-#endif
-
} // end namespace
More information about the llvm-commits
mailing list