[llvm] e7fbb38 - [SROA] Break typed pointer support
Nikita Popov via llvm-commits
llvm-commits at lists.llvm.org
Thu Jan 26 02:42:44 PST 2023
Author: Nikita Popov
Date: 2023-01-26T11:42:12+01:00
New Revision: e7fbb381c8476ab740bc1a494ce3eeb1dc551981
URL: https://github.com/llvm/llvm-project/commit/e7fbb381c8476ab740bc1a494ce3eeb1dc551981
DIFF: https://github.com/llvm/llvm-project/commit/e7fbb381c8476ab740bc1a494ce3eeb1dc551981.diff
LOG: [SROA] Break typed pointer support
This removes typed pointer support in a prominent place in the
optimization pipeline, to ensure that any non-trivial consumers
of tip-of-tree LLVM are aware that this is no longer a supported
configuration.
Added:
Modified:
llvm/lib/Transforms/Scalar/SROA.cpp
Removed:
################################################################################
diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp
index 8339981e1bdc4..da277ff62b73c 100644
--- a/llvm/lib/Transforms/Scalar/SROA.cpp
+++ b/llvm/lib/Transforms/Scalar/SROA.cpp
@@ -1654,238 +1654,18 @@ static bool rewriteSelectInstMemOps(SelectInst &SI,
return CFGChanged;
}
-/// Build a GEP out of a base pointer and indices.
-///
-/// This will return the BasePtr if that is valid, or build a new GEP
-/// instruction using the IRBuilder if GEP-ing is needed.
-static Value *buildGEP(IRBuilderTy &IRB, Value *BasePtr,
- SmallVectorImpl<Value *> &Indices,
- const Twine &NamePrefix) {
- if (Indices.empty())
- return BasePtr;
-
- // A single zero index is a no-op, so check for this and avoid building a GEP
- // in that case.
- if (Indices.size() == 1 && cast<ConstantInt>(Indices.back())->isZero())
- return BasePtr;
-
- // buildGEP() is only called for non-opaque pointers.
- return IRB.CreateInBoundsGEP(
- BasePtr->getType()->getNonOpaquePointerElementType(), BasePtr, Indices,
- NamePrefix + "sroa_idx");
-}
-
-/// Get a natural GEP off of the BasePtr walking through Ty toward
-/// TargetTy without changing the offset of the pointer.
-///
-/// This routine assumes we've already established a properly offset GEP with
-/// Indices, and arrived at the Ty type. The goal is to continue to GEP with
-/// zero-indices down through type layers until we find one the same as
-/// TargetTy. If we can't find one with the same type, we at least try to use
-/// one with the same size. If none of that works, we just produce the GEP as
-/// indicated by Indices to have the correct offset.
-static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &DL,
- Value *BasePtr, Type *Ty, Type *TargetTy,
- SmallVectorImpl<Value *> &Indices,
- const Twine &NamePrefix) {
- if (Ty == TargetTy)
- return buildGEP(IRB, BasePtr, Indices, NamePrefix);
-
- // Offset size to use for the indices.
- unsigned OffsetSize = DL.getIndexTypeSizeInBits(BasePtr->getType());
-
- // See if we can descend into a struct and locate a field with the correct
- // type.
- unsigned NumLayers = 0;
- Type *ElementTy = Ty;
- do {
- if (ElementTy->isPointerTy())
- break;
-
- if (ArrayType *ArrayTy = dyn_cast<ArrayType>(ElementTy)) {
- ElementTy = ArrayTy->getElementType();
- Indices.push_back(IRB.getIntN(OffsetSize, 0));
- } else if (VectorType *VectorTy = dyn_cast<VectorType>(ElementTy)) {
- ElementTy = VectorTy->getElementType();
- Indices.push_back(IRB.getInt32(0));
- } else if (StructType *STy = dyn_cast<StructType>(ElementTy)) {
- if (STy->element_begin() == STy->element_end())
- break; // Nothing left to descend into.
- ElementTy = *STy->element_begin();
- Indices.push_back(IRB.getInt32(0));
- } else {
- break;
- }
- ++NumLayers;
- } while (ElementTy != TargetTy);
- if (ElementTy != TargetTy)
- Indices.erase(Indices.end() - NumLayers, Indices.end());
-
- return buildGEP(IRB, BasePtr, Indices, NamePrefix);
-}
-
-/// Get a natural GEP from a base pointer to a particular offset and
-/// resulting in a particular type.
-///
-/// The goal is to produce a "natural" looking GEP that works with the existing
-/// composite types to arrive at the appropriate offset and element type for
-/// a pointer. TargetTy is the element type the returned GEP should point-to if
-/// possible. We recurse by decreasing Offset, adding the appropriate index to
-/// Indices, and setting Ty to the result subtype.
-///
-/// If no natural GEP can be constructed, this function returns null.
-static Value *getNaturalGEPWithOffset(IRBuilderTy &IRB, const DataLayout &DL,
- Value *Ptr, APInt Offset, Type *TargetTy,
- SmallVectorImpl<Value *> &Indices,
- const Twine &NamePrefix) {
- PointerType *Ty = cast<PointerType>(Ptr->getType());
-
- // Don't consider any GEPs through an i8* as natural unless the TargetTy is
- // an i8.
- if (Ty == IRB.getInt8PtrTy(Ty->getAddressSpace()) && TargetTy->isIntegerTy(8))
- return nullptr;
-
- Type *ElementTy = Ty->getNonOpaquePointerElementType();
- if (!ElementTy->isSized())
- return nullptr; // We can't GEP through an unsized element.
-
- SmallVector<APInt> IntIndices = DL.getGEPIndicesForOffset(ElementTy, Offset);
- if (Offset != 0)
- return nullptr;
-
- for (const APInt &Index : IntIndices)
- Indices.push_back(IRB.getInt(Index));
- return getNaturalGEPWithType(IRB, DL, Ptr, ElementTy, TargetTy, Indices,
- NamePrefix);
-}
-
/// Compute an adjusted pointer from Ptr by Offset bytes where the
/// resulting pointer has PointerTy.
-///
-/// This tries very hard to compute a "natural" GEP which arrives at the offset
-/// and produces the pointer type desired. Where it cannot, it will try to use
-/// the natural GEP to arrive at the offset and bitcast to the type. Where that
-/// fails, it will try to use an existing i8* and GEP to the byte offset and
-/// bitcast to the type.
-///
-/// The strategy for finding the more natural GEPs is to peel off layers of the
-/// pointer, walking back through bit casts and GEPs, searching for a base
-/// pointer from which we can compute a natural GEP with the desired
-/// properties. The algorithm tries to fold as many constant indices into
-/// a single GEP as possible, thus making each GEP more independent of the
-/// surrounding code.
static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr,
APInt Offset, Type *PointerTy,
const Twine &NamePrefix) {
- // Create i8 GEP for opaque pointers.
- if (Ptr->getType()->isOpaquePointerTy()) {
- if (Offset != 0)
- Ptr = IRB.CreateInBoundsGEP(IRB.getInt8Ty(), Ptr, IRB.getInt(Offset),
- NamePrefix + "sroa_idx");
- return IRB.CreatePointerBitCastOrAddrSpaceCast(Ptr, PointerTy,
- NamePrefix + "sroa_cast");
- }
-
- // Even though we don't look through PHI nodes, we could be called on an
- // instruction in an unreachable block, which may be on a cycle.
- SmallPtrSet<Value *, 4> Visited;
- Visited.insert(Ptr);
- SmallVector<Value *, 4> Indices;
-
- // We may end up computing an offset pointer that has the wrong type. If we
- // never are able to compute one directly that has the correct type, we'll
- // fall back to it, so keep it and the base it was computed from around here.
- Value *OffsetPtr = nullptr;
- Value *OffsetBasePtr;
-
- // Remember any i8 pointer we come across to re-use if we need to do a raw
- // byte offset.
- Value *Int8Ptr = nullptr;
- APInt Int8PtrOffset(Offset.getBitWidth(), 0);
-
- PointerType *TargetPtrTy = cast<PointerType>(PointerTy);
- Type *TargetTy = TargetPtrTy->getNonOpaquePointerElementType();
-
- // As `addrspacecast` is , `Ptr` (the storage pointer) may have
diff erent
- // address space from the expected `PointerTy` (the pointer to be used).
- // Adjust the pointer type based the original storage pointer.
- auto AS = cast<PointerType>(Ptr->getType())->getAddressSpace();
- PointerTy = TargetTy->getPointerTo(AS);
-
- do {
- // First fold any existing GEPs into the offset.
- while (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
- APInt GEPOffset(Offset.getBitWidth(), 0);
- if (!GEP->accumulateConstantOffset(DL, GEPOffset))
- break;
- Offset += GEPOffset;
- Ptr = GEP->getPointerOperand();
- if (!Visited.insert(Ptr).second)
- break;
- }
-
- // See if we can perform a natural GEP here.
- Indices.clear();
- if (Value *P = getNaturalGEPWithOffset(IRB, DL, Ptr, Offset, TargetTy,
- Indices, NamePrefix)) {
- // If we have a new natural pointer at the offset, clear out any old
- // offset pointer we computed. Unless it is the base pointer or
- // a non-instruction, we built a GEP we don't need. Zap it.
- if (OffsetPtr && OffsetPtr != OffsetBasePtr)
- if (Instruction *I = dyn_cast<Instruction>(OffsetPtr)) {
- assert(I->use_empty() && "Built a GEP with uses some how!");
- I->eraseFromParent();
- }
- OffsetPtr = P;
- OffsetBasePtr = Ptr;
- // If we also found a pointer of the right type, we're done.
- if (P->getType() == PointerTy)
- break;
- }
-
- // Stash this pointer if we've found an i8*.
- if (Ptr->getType()->isIntegerTy(8)) {
- Int8Ptr = Ptr;
- Int8PtrOffset = Offset;
- }
-
- // Peel off a layer of the pointer and update the offset appropriately.
- if (Operator::getOpcode(Ptr) == Instruction::BitCast) {
- Ptr = cast<Operator>(Ptr)->getOperand(0);
- } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
- if (GA->isInterposable())
- break;
- Ptr = GA->getAliasee();
- } else {
- break;
- }
- assert(Ptr->getType()->isPointerTy() && "Unexpected operand type!");
- } while (Visited.insert(Ptr).second);
-
- if (!OffsetPtr) {
- if (!Int8Ptr) {
- Int8Ptr = IRB.CreateBitCast(
- Ptr, IRB.getInt8PtrTy(PointerTy->getPointerAddressSpace()),
- NamePrefix + "sroa_raw_cast");
- Int8PtrOffset = Offset;
- }
-
- OffsetPtr = Int8PtrOffset == 0
- ? Int8Ptr
- : IRB.CreateInBoundsGEP(IRB.getInt8Ty(), Int8Ptr,
- IRB.getInt(Int8PtrOffset),
- NamePrefix + "sroa_raw_idx");
- }
- Ptr = OffsetPtr;
-
- // On the off chance we were targeting i8*, guard the bitcast here.
- if (cast<PointerType>(Ptr->getType()) != TargetPtrTy) {
- Ptr = IRB.CreatePointerBitCastOrAddrSpaceCast(Ptr,
- TargetPtrTy,
- NamePrefix + "sroa_cast");
- }
-
- return Ptr;
+ assert(Ptr->getType()->isOpaquePointerTy() &&
+ "Only opaque pointers supported");
+ if (Offset != 0)
+ Ptr = IRB.CreateInBoundsGEP(IRB.getInt8Ty(), Ptr, IRB.getInt(Offset),
+ NamePrefix + "sroa_idx");
+ return IRB.CreatePointerBitCastOrAddrSpaceCast(Ptr, PointerTy,
+ NamePrefix + "sroa_cast");
}
/// Compute the adjusted alignment for a load or store from an offset.
More information about the llvm-commits
mailing list