[llvm] 243f056 - [llvm] Replace uses of Type::getPointerTo (NFC)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Jun 28 06:26:55 PDT 2023
Author: Youngsuk Kim
Date: 2023-06-28T09:21:34-04:00
New Revision: 243f0566dc414e8bb6e15c7a6ae490d0e3cd0656
URL: https://github.com/llvm/llvm-project/commit/243f0566dc414e8bb6e15c7a6ae490d0e3cd0656
DIFF: https://github.com/llvm/llvm-project/commit/243f0566dc414e8bb6e15c7a6ae490d0e3cd0656.diff
LOG: [llvm] Replace uses of Type::getPointerTo (NFC)
Partial progress towards removing in-tree uses of `Type::getPointerTo`,
before we can deprecate the API.
If the API is used solely to support an unnecessary bitcast, get rid of
the bitcast as well.
Reviewed By: nikic
Differential Revision: https://reviews.llvm.org/D153933
Added:
Modified:
llvm/lib/AsmParser/LLParser.cpp
llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
llvm/lib/Target/ARM/ARMParallelDSP.cpp
llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp
llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
llvm/lib/Transforms/Utils/ValueMapper.cpp
llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
Removed:
################################################################################
diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp
index 7ee5184a1e43cb..1638791e3f5eb2 100644
--- a/llvm/lib/AsmParser/LLParser.cpp
+++ b/llvm/lib/AsmParser/LLParser.cpp
@@ -1279,7 +1279,7 @@ bool LLParser::parseGlobal(const std::string &Name, LocTy NameLoc,
GV->setUnnamedAddr(UnnamedAddr);
if (GVal) {
- if (GVal->getType() != Ty->getPointerTo(AddrSpace))
+ if (GVal->getAddressSpace() != AddrSpace)
return error(
TyLoc,
"forward reference and definition of global have
diff erent types");
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
index cd289e6470f2c3..733fdaeaa7e0ba 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
@@ -512,10 +512,8 @@ bool AMDGPUPromoteAllocaImpl::tryPromoteAllocaToVector(AllocaInst &Alloca) {
case Instruction::Load: {
Value *Ptr = cast<LoadInst>(Inst)->getPointerOperand();
Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
- Type *VecPtrTy = VectorTy->getPointerTo(Alloca.getAddressSpace());
- Value *BitCast = Builder.CreateBitCast(&Alloca, VecPtrTy);
Value *VecValue =
- Builder.CreateAlignedLoad(VectorTy, BitCast, Alloca.getAlign());
+ Builder.CreateAlignedLoad(VectorTy, &Alloca, Alloca.getAlign());
Value *ExtractElement = Builder.CreateExtractElement(VecValue, Index);
if (Inst->getType() != VecEltTy)
ExtractElement =
@@ -528,15 +526,13 @@ bool AMDGPUPromoteAllocaImpl::tryPromoteAllocaToVector(AllocaInst &Alloca) {
StoreInst *SI = cast<StoreInst>(Inst);
Value *Ptr = SI->getPointerOperand();
Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
- Type *VecPtrTy = VectorTy->getPointerTo(Alloca.getAddressSpace());
- Value *BitCast = Builder.CreateBitCast(&Alloca, VecPtrTy);
Value *VecValue =
- Builder.CreateAlignedLoad(VectorTy, BitCast, Alloca.getAlign());
+ Builder.CreateAlignedLoad(VectorTy, &Alloca, Alloca.getAlign());
Value *Elt = SI->getValueOperand();
if (Elt->getType() != VecEltTy)
Elt = Builder.CreateBitOrPointerCast(Elt, VecEltTy);
Value *NewVecValue = Builder.CreateInsertElement(VecValue, Elt, Index);
- Builder.CreateAlignedStore(NewVecValue, BitCast, Alloca.getAlign());
+ Builder.CreateAlignedStore(NewVecValue, &Alloca, Alloca.getAlign());
Inst->eraseFromParent();
break;
}
@@ -556,12 +552,10 @@ bool AMDGPUPromoteAllocaImpl::tryPromoteAllocaToVector(AllocaInst &Alloca) {
Mask.push_back(Idx);
}
}
- Type *VecPtrTy = VectorTy->getPointerTo(Alloca.getAddressSpace());
- Value *BitCast = Builder.CreateBitCast(&Alloca, VecPtrTy);
Value *VecValue =
- Builder.CreateAlignedLoad(VectorTy, BitCast, Alloca.getAlign());
+ Builder.CreateAlignedLoad(VectorTy, &Alloca, Alloca.getAlign());
Value *NewVecValue = Builder.CreateShuffleVector(VecValue, Mask);
- Builder.CreateAlignedStore(NewVecValue, BitCast, Alloca.getAlign());
+ Builder.CreateAlignedStore(NewVecValue, &Alloca, Alloca.getAlign());
Inst->eraseFromParent();
} else if (MemSetInst *MSI = dyn_cast<MemSetInst>(Inst)) {
diff --git a/llvm/lib/Target/ARM/ARMParallelDSP.cpp b/llvm/lib/Target/ARM/ARMParallelDSP.cpp
index c8531507fd8a0a..1efda5d1c93755 100644
--- a/llvm/lib/Target/ARM/ARMParallelDSP.cpp
+++ b/llvm/lib/Target/ARM/ARMParallelDSP.cpp
@@ -760,12 +760,10 @@ LoadInst* ARMParallelDSP::CreateWideLoad(MemInstList &Loads,
IRBuilder<NoFolder> IRB(DomLoad->getParent(),
++BasicBlock::iterator(DomLoad));
- // Bitcast the pointer to a wider type and create the wide load, while making
- // sure to maintain the original alignment as this prevents ldrd from being
- // generated when it could be illegal due to memory alignment.
- const unsigned AddrSpace = DomLoad->getPointerAddressSpace();
- Value *VecPtr = IRB.CreateBitCast(Base->getPointerOperand(),
- LoadTy->getPointerTo(AddrSpace));
+ // Create the wide load, while making sure to maintain the original alignment
+ // as this prevents ldrd from being generated when it could be illegal due to
+ // memory alignment.
+ Value *VecPtr = Base->getPointerOperand();
LoadInst *WideLoad = IRB.CreateAlignedLoad(LoadTy, VecPtr, Base->getAlign());
// Make sure everything is in the correct order in the basic block.
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp
index 0e03890088ef09..4b8fdcf3a5b3e0 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp
@@ -1726,10 +1726,8 @@ void WebAssemblyLowerEmscriptenEHSjLj::handleLongjmpableCallsForWasmSjLj(
// that requires multivalue support in the toolchain, which is currently not
// very reliable. We instead throw and catch a pointer to a struct value of
// type 'struct __WasmLongjmpArgs', which is defined in Emscripten.
- Instruction *CatchCI =
+ Instruction *LongjmpArgs =
IRB.CreateCall(CatchF, {IRB.getInt32(WebAssembly::C_LONGJMP)}, "thrown");
- Value *LongjmpArgs =
- IRB.CreateBitCast(CatchCI, LongjmpArgsTy->getPointerTo(), "longjmp.args");
Value *EnvField =
IRB.CreateConstGEP2_32(LongjmpArgsTy, LongjmpArgs, 0, 0, "env_gep");
Value *ValField =
diff --git a/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp b/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
index 33af27456d0c86..bdd050669355b1 100644
--- a/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
+++ b/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
@@ -1706,8 +1706,7 @@ void DevirtModule::applyVirtualConstProp(CallSiteInfo &CSInfo, StringRef FnName,
Call.replaceAndErase("virtual-const-prop-1-bit", FnName, RemarksEnabled,
OREGetter, IsBitSet);
} else {
- Value *ValAddr = B.CreateBitCast(Addr, RetType->getPointerTo());
- Value *Val = B.CreateLoad(RetType, ValAddr);
+ Value *Val = B.CreateLoad(RetType, Addr);
NumVirtConstProp++;
Call.replaceAndErase("virtual-const-prop", FnName, RemarksEnabled,
OREGetter, Val);
diff --git a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
index 808368ca05424d..634e4508f91070 100644
--- a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
@@ -2156,9 +2156,8 @@ std::pair<Value *, Value *> DFSanFunction::loadShadowFast(
ShadowSize == 4 ? Type::getInt32Ty(*DFS.Ctx) : Type::getInt64Ty(*DFS.Ctx);
IRBuilder<> IRB(Pos);
- Value *WideAddr = IRB.CreateBitCast(ShadowAddr, WideShadowTy->getPointerTo());
Value *CombinedWideShadow =
- IRB.CreateAlignedLoad(WideShadowTy, WideAddr, ShadowAlign);
+ IRB.CreateAlignedLoad(WideShadowTy, ShadowAddr, ShadowAlign);
unsigned WideShadowBitWidth = WideShadowTy->getIntegerBitWidth();
const uint64_t BytesPerWideShadow = WideShadowBitWidth / DFS.ShadowWidthBits;
@@ -2195,10 +2194,10 @@ std::pair<Value *, Value *> DFSanFunction::loadShadowFast(
// shadow).
for (uint64_t ByteOfs = BytesPerWideShadow; ByteOfs < Size;
ByteOfs += BytesPerWideShadow) {
- WideAddr = IRB.CreateGEP(WideShadowTy, WideAddr,
- ConstantInt::get(DFS.IntptrTy, 1));
+ ShadowAddr = IRB.CreateGEP(WideShadowTy, ShadowAddr,
+ ConstantInt::get(DFS.IntptrTy, 1));
Value *NextWideShadow =
- IRB.CreateAlignedLoad(WideShadowTy, WideAddr, ShadowAlign);
+ IRB.CreateAlignedLoad(WideShadowTy, ShadowAddr, ShadowAlign);
CombinedWideShadow = IRB.CreateOr(CombinedWideShadow, NextWideShadow);
if (ShouldTrackOrigins) {
Value *NextOrigin = DFS.loadNextOrigin(Pos, OriginAlign, &OriginAddr);
diff --git a/llvm/lib/Transforms/Utils/ValueMapper.cpp b/llvm/lib/Transforms/Utils/ValueMapper.cpp
index cad523ce9e3eb3..3446e31cc2ef17 100644
--- a/llvm/lib/Transforms/Utils/ValueMapper.cpp
+++ b/llvm/lib/Transforms/Utils/ValueMapper.cpp
@@ -1034,7 +1034,7 @@ void Mapper::mapAppendingVariable(GlobalVariable &GV, Constant *InitPrefix,
if (IsOldCtorDtor) {
// FIXME: This upgrade is done during linking to support the C API. See
// also IRLinker::linkAppendingVarProto() in IRMover.cpp.
- VoidPtrTy = Type::getInt8Ty(GV.getContext())->getPointerTo();
+ VoidPtrTy = PointerType::getUnqual(GV.getContext());
auto &ST = *cast<StructType>(NewMembers.front()->getType());
Type *Tys[3] = {ST.getElementType(0), ST.getElementType(1), VoidPtrTy};
EltTy = StructType::get(GV.getContext(), Tys, false);
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index f76e49274c5905..20a2d4bd38ea23 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -9393,9 +9393,9 @@ class BoUpSLP::ShuffleInstructionBuilder final : public BaseShuffleAnalysis {
// process to keep correct order.
auto *VecTy = FixedVectorType::get(E->Scalars.front()->getType(),
E->getVectorFactor());
- Value *Vec = Builder.CreateAlignedLoad(
- VecTy, PoisonValue::get(VecTy->getPointerTo()), MaybeAlign());
- return Vec;
+ return Builder.CreateAlignedLoad(
+ VecTy, PoisonValue::get(PointerType::getUnqual(VecTy->getContext())),
+ MaybeAlign());
}
/// Adds 2 input vectors and the mask for their shuffling.
void add(Value *V1, Value *V2, ArrayRef<int> Mask) {
@@ -10371,20 +10371,17 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
LoadInst *LI = cast<LoadInst>(VL0);
Instruction *NewLI;
- unsigned AS = LI->getPointerAddressSpace();
Value *PO = LI->getPointerOperand();
if (E->State == TreeEntry::Vectorize) {
- Value *VecPtr = Builder.CreateBitCast(PO, VecTy->getPointerTo(AS));
- NewLI = Builder.CreateAlignedLoad(VecTy, VecPtr, LI->getAlign());
+ NewLI = Builder.CreateAlignedLoad(VecTy, PO, LI->getAlign());
- // The pointer operand uses an in-tree scalar so we add the new BitCast
- // or LoadInst to ExternalUses list to make sure that an extract will
+ // The pointer operand uses an in-tree scalar so we add the new
+ // LoadInst to ExternalUses list to make sure that an extract will
// be generated in the future.
if (TreeEntry *Entry = getTreeEntry(PO)) {
// Find which lane we need to extract.
unsigned FoundLane = Entry->findLaneForValue(PO);
- ExternalUses.emplace_back(
- PO, PO != VecPtr ? cast<User>(VecPtr) : NewLI, FoundLane);
+ ExternalUses.emplace_back(PO, NewLI, FoundLane);
}
} else {
assert(E->State == TreeEntry::ScatterVectorize && "Unhandled state");
More information about the llvm-commits
mailing list