[llvm] e53b28c - [llvm] Drop some bitcasts and references related to typed pointers
Bjorn Pettersson via llvm-commits
llvm-commits at lists.llvm.org
Thu Aug 10 06:08:30 PDT 2023
Author: Bjorn Pettersson
Date: 2023-08-10T15:07:07+02:00
New Revision: e53b28c8330102bf21ac717ade2bfac9b1f09517
URL: https://github.com/llvm/llvm-project/commit/e53b28c8330102bf21ac717ade2bfac9b1f09517
DIFF: https://github.com/llvm/llvm-project/commit/e53b28c8330102bf21ac717ade2bfac9b1f09517.diff
LOG: [llvm] Drop some bitcasts and references related to typed pointers
Differential Revision: https://reviews.llvm.org/D157551
Added:
Modified:
llvm/lib/CodeGen/AtomicExpandPass.cpp
llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
llvm/lib/ExecutionEngine/ExecutionEngine.cpp
llvm/lib/IR/AutoUpgrade.cpp
llvm/lib/IR/Instructions.cpp
llvm/lib/IR/Type.cpp
llvm/lib/IR/Verifier.cpp
llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
llvm/lib/Target/AArch64/AArch64StackTagging.cpp
llvm/lib/Target/AArch64/SVEIntrinsicOpts.cpp
llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp
llvm/lib/Target/ARM/ARMISelLowering.cpp
llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp
llvm/lib/Target/DirectX/DXILPrepare.cpp
llvm/lib/Target/PowerPC/PPCISelLowering.cpp
llvm/lib/Target/PowerPC/PPCLoopInstrFormPrep.cpp
llvm/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp
llvm/lib/Target/X86/X86ISelLowering.cpp
llvm/lib/Target/X86/X86LowerAMXType.cpp
llvm/lib/Transforms/CFGuard/CFGuard.cpp
llvm/lib/Transforms/Coroutines/Coroutines.cpp
llvm/lib/Transforms/Scalar/SROA.cpp
llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
llvm/unittests/Analysis/ScalarEvolutionTest.cpp
llvm/unittests/Transforms/Utils/CloningTest.cpp
llvm/unittests/Transforms/Utils/LocalTest.cpp
llvm/unittests/Transforms/Utils/ScalarEvolutionExpanderTest.cpp
llvm/unittests/Transforms/Utils/ValueMapperTest.cpp
Removed:
################################################################################
diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp
index d98d286e039897..ccf3e9ec649210 100644
--- a/llvm/lib/CodeGen/AtomicExpandPass.cpp
+++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp
@@ -547,8 +547,6 @@ static void createCmpXchgInstFun(IRBuilderBase &Builder, Value *Addr,
bool NeedBitcast = OrigTy->isFloatingPointTy();
if (NeedBitcast) {
IntegerType *IntTy = Builder.getIntNTy(OrigTy->getPrimitiveSizeInBits());
- unsigned AS = Addr->getType()->getPointerAddressSpace();
- Addr = Builder.CreateBitCast(Addr, IntTy->getPointerTo(AS));
NewVal = Builder.CreateBitCast(NewVal, IntTy);
Loaded = Builder.CreateBitCast(Loaded, IntTy);
}
@@ -721,7 +719,6 @@ static PartwordMaskValues createMaskInstrs(IRBuilderBase &Builder,
assert(ValueSize < MinWordSize);
PointerType *PtrTy = cast<PointerType>(Addr->getType());
- Type *WordPtrType = PMV.WordType->getPointerTo(PtrTy->getAddressSpace());
IntegerType *IntTy = DL.getIntPtrType(Ctx, PtrTy->getAddressSpace());
Value *PtrLSB;
@@ -755,10 +752,6 @@ static PartwordMaskValues createMaskInstrs(IRBuilderBase &Builder,
PMV.Inv_Mask = Builder.CreateNot(PMV.Mask, "Inv_Mask");
- // Cast for typed pointers.
- PMV.AlignedAddr =
- Builder.CreateBitCast(PMV.AlignedAddr, WordPtrType, "AlignedAddr");
-
return PMV;
}
@@ -1841,11 +1834,8 @@ bool AtomicExpand::expandAtomicOpToLibcall(
// variables.
AllocaInst *AllocaCASExpected = nullptr;
- Value *AllocaCASExpected_i8 = nullptr;
AllocaInst *AllocaValue = nullptr;
- Value *AllocaValue_i8 = nullptr;
AllocaInst *AllocaResult = nullptr;
- Value *AllocaResult_i8 = nullptr;
Type *ResultTy;
SmallVector<Value *, 6> Args;
@@ -1862,23 +1852,17 @@ bool AtomicExpand::expandAtomicOpToLibcall(
// implementation and that addresses are convertable. For systems without
// that property, we'd need to extend this mechanism to support AS-specific
// families of atomic intrinsics.
- auto PtrTypeAS = PointerOperand->getType()->getPointerAddressSpace();
- Value *PtrVal =
- Builder.CreateBitCast(PointerOperand, Type::getInt8PtrTy(Ctx, PtrTypeAS));
- PtrVal = Builder.CreateAddrSpaceCast(PtrVal, Type::getInt8PtrTy(Ctx));
+ Value *PtrVal = PointerOperand;
+ PtrVal = Builder.CreateAddrSpaceCast(PtrVal, PointerType::getUnqual(Ctx));
Args.push_back(PtrVal);
// 'expected' argument, if present.
if (CASExpected) {
AllocaCASExpected = AllocaBuilder.CreateAlloca(CASExpected->getType());
AllocaCASExpected->setAlignment(AllocaAlignment);
- unsigned AllocaAS = AllocaCASExpected->getType()->getPointerAddressSpace();
-
- AllocaCASExpected_i8 = Builder.CreateBitCast(
- AllocaCASExpected, Type::getInt8PtrTy(Ctx, AllocaAS));
- Builder.CreateLifetimeStart(AllocaCASExpected_i8, SizeVal64);
+ Builder.CreateLifetimeStart(AllocaCASExpected, SizeVal64);
Builder.CreateAlignedStore(CASExpected, AllocaCASExpected, AllocaAlignment);
- Args.push_back(AllocaCASExpected_i8);
+ Args.push_back(AllocaCASExpected);
}
// 'val' argument ('desired' for cas), if present.
@@ -1890,11 +1874,9 @@ bool AtomicExpand::expandAtomicOpToLibcall(
} else {
AllocaValue = AllocaBuilder.CreateAlloca(ValueOperand->getType());
AllocaValue->setAlignment(AllocaAlignment);
- AllocaValue_i8 =
- Builder.CreateBitCast(AllocaValue, Type::getInt8PtrTy(Ctx));
- Builder.CreateLifetimeStart(AllocaValue_i8, SizeVal64);
+ Builder.CreateLifetimeStart(AllocaValue, SizeVal64);
Builder.CreateAlignedStore(ValueOperand, AllocaValue, AllocaAlignment);
- Args.push_back(AllocaValue_i8);
+ Args.push_back(AllocaValue);
}
}
@@ -1902,11 +1884,8 @@ bool AtomicExpand::expandAtomicOpToLibcall(
if (!CASExpected && HasResult && !UseSizedLibcall) {
AllocaResult = AllocaBuilder.CreateAlloca(I->getType());
AllocaResult->setAlignment(AllocaAlignment);
- unsigned AllocaAS = AllocaResult->getType()->getPointerAddressSpace();
- AllocaResult_i8 =
- Builder.CreateBitCast(AllocaResult, Type::getInt8PtrTy(Ctx, AllocaAS));
- Builder.CreateLifetimeStart(AllocaResult_i8, SizeVal64);
- Args.push_back(AllocaResult_i8);
+ Builder.CreateLifetimeStart(AllocaResult, SizeVal64);
+ Args.push_back(AllocaResult);
}
// 'ordering' ('success_order' for cas) argument.
@@ -1938,7 +1917,7 @@ bool AtomicExpand::expandAtomicOpToLibcall(
// And then, extract the results...
if (ValueOperand && !UseSizedLibcall)
- Builder.CreateLifetimeEnd(AllocaValue_i8, SizeVal64);
+ Builder.CreateLifetimeEnd(AllocaValue, SizeVal64);
if (CASExpected) {
// The final result from the CAS is {load of 'expected' alloca, bool result
@@ -1947,7 +1926,7 @@ bool AtomicExpand::expandAtomicOpToLibcall(
Value *V = PoisonValue::get(FinalResultTy);
Value *ExpectedOut = Builder.CreateAlignedLoad(
CASExpected->getType(), AllocaCASExpected, AllocaAlignment);
- Builder.CreateLifetimeEnd(AllocaCASExpected_i8, SizeVal64);
+ Builder.CreateLifetimeEnd(AllocaCASExpected, SizeVal64);
V = Builder.CreateInsertValue(V, ExpectedOut, 0);
V = Builder.CreateInsertValue(V, Result, 1);
I->replaceAllUsesWith(V);
@@ -1958,7 +1937,7 @@ bool AtomicExpand::expandAtomicOpToLibcall(
else {
V = Builder.CreateAlignedLoad(I->getType(), AllocaResult,
AllocaAlignment);
- Builder.CreateLifetimeEnd(AllocaResult_i8, SizeVal64);
+ Builder.CreateLifetimeEnd(AllocaResult, SizeVal64);
}
I->replaceAllUsesWith(V);
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 6b95424bff04d4..c01189c76e08e6 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -9618,7 +9618,7 @@ SDValue TargetLowering::LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA,
// Access to address of TLS varialbe xyz is lowered to a function call:
// __emutls_get_address( address of global variable named "__emutls_v.xyz" )
EVT PtrVT = getPointerTy(DAG.getDataLayout());
- PointerType *VoidPtrType = Type::getInt8PtrTy(*DAG.getContext());
+ PointerType *VoidPtrType = PointerType::get(*DAG.getContext(), 0);
SDLoc dl(GA);
ArgListTy Args;
diff --git a/llvm/lib/ExecutionEngine/ExecutionEngine.cpp b/llvm/lib/ExecutionEngine/ExecutionEngine.cpp
index 768d8450133726..98d7dcb8ec12bd 100644
--- a/llvm/lib/ExecutionEngine/ExecutionEngine.cpp
+++ b/llvm/lib/ExecutionEngine/ExecutionEngine.cpp
@@ -430,7 +430,7 @@ int ExecutionEngine::runFunctionAsMain(Function *Fn,
// Check main() type
unsigned NumArgs = Fn->getFunctionType()->getNumParams();
FunctionType *FTy = Fn->getFunctionType();
- Type* PPInt8Ty = Type::getInt8PtrTy(Fn->getContext())->getPointerTo();
+ Type *PPInt8Ty = PointerType::get(Fn->getContext(), 0);
// Check the argument types.
if (NumArgs > 3)
diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index 698b6257f513bf..ce1c3a354bd1a2 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -1409,15 +1409,15 @@ GlobalVariable *llvm::UpgradeGlobalVariable(GlobalVariable *GV) {
LLVMContext &C = GV->getContext();
IRBuilder<> IRB(C);
auto EltTy = StructType::get(STy->getElementType(0), STy->getElementType(1),
- IRB.getInt8PtrTy());
+ IRB.getPtrTy());
Constant *Init = GV->getInitializer();
unsigned N = Init->getNumOperands();
std::vector<Constant *> NewCtors(N);
for (unsigned i = 0; i != N; ++i) {
auto Ctor = cast<Constant>(Init->getOperand(i));
- NewCtors[i] = ConstantStruct::get(
- EltTy, Ctor->getAggregateElement(0u), Ctor->getAggregateElement(1),
- Constant::getNullValue(IRB.getInt8PtrTy()));
+ NewCtors[i] = ConstantStruct::get(EltTy, Ctor->getAggregateElement(0u),
+ Ctor->getAggregateElement(1),
+ Constant::getNullValue(IRB.getPtrTy()));
}
Constant *NewInit = ConstantArray::get(ArrayType::get(EltTy, N), NewCtors);
@@ -4430,10 +4430,10 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
}
// Create a new call with an added null annotation attribute argument.
- NewCall = Builder.CreateCall(
- NewFn,
- {CI->getArgOperand(0), CI->getArgOperand(1), CI->getArgOperand(2),
- CI->getArgOperand(3), Constant::getNullValue(Builder.getInt8PtrTy())});
+ NewCall =
+ Builder.CreateCall(NewFn, {CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(2), CI->getArgOperand(3),
+ Constant::getNullValue(Builder.getPtrTy())});
NewCall->takeName(CI);
CI->replaceAllUsesWith(NewCall);
CI->eraseFromParent();
@@ -4446,10 +4446,10 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
return;
}
// Create a new call with an added null annotation attribute argument.
- NewCall = Builder.CreateCall(
- NewFn,
- {CI->getArgOperand(0), CI->getArgOperand(1), CI->getArgOperand(2),
- CI->getArgOperand(3), Constant::getNullValue(Builder.getInt8PtrTy())});
+ NewCall =
+ Builder.CreateCall(NewFn, {CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(2), CI->getArgOperand(3),
+ Constant::getNullValue(Builder.getPtrTy())});
NewCall->takeName(CI);
CI->replaceAllUsesWith(NewCall);
CI->eraseFromParent();
diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp
index 42a927ca8214b7..e60f8f981d636f 100644
--- a/llvm/lib/IR/Instructions.cpp
+++ b/llvm/lib/IR/Instructions.cpp
@@ -830,7 +830,7 @@ static Instruction *createMalloc(Instruction *InsertBefore,
// Create the call to Malloc.
BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd;
Module *M = BB->getParent()->getParent();
- Type *BPTy = Type::getInt8PtrTy(BB->getContext());
+ Type *BPTy = PointerType::getUnqual(BB->getContext());
FunctionCallee MallocFunc = MallocF;
if (!MallocFunc)
// prototype malloc as "void *malloc(size_t)"
@@ -926,20 +926,14 @@ static Instruction *createFree(Value *Source,
Module *M = BB->getParent()->getParent();
Type *VoidTy = Type::getVoidTy(M->getContext());
- Type *IntPtrTy = Type::getInt8PtrTy(M->getContext());
+ Type *VoidPtrTy = PointerType::getUnqual(M->getContext());
// prototype free as "void free(void*)"
- FunctionCallee FreeFunc = M->getOrInsertFunction("free", VoidTy, IntPtrTy);
+ FunctionCallee FreeFunc = M->getOrInsertFunction("free", VoidTy, VoidPtrTy);
CallInst *Result = nullptr;
- Value *PtrCast = Source;
- if (InsertBefore) {
- if (Source->getType() != IntPtrTy)
- PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertBefore);
- Result = CallInst::Create(FreeFunc, PtrCast, Bundles, "", InsertBefore);
- } else {
- if (Source->getType() != IntPtrTy)
- PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertAtEnd);
- Result = CallInst::Create(FreeFunc, PtrCast, Bundles, "");
- }
+ if (InsertBefore)
+ Result = CallInst::Create(FreeFunc, Source, Bundles, "", InsertBefore);
+ else
+ Result = CallInst::Create(FreeFunc, Source, Bundles, "");
Result->setTailCall();
if (Function *F = dyn_cast<Function>(FreeFunc.getCallee()))
Result->setCallingConv(F->getCallingConv());
diff --git a/llvm/lib/IR/Type.cpp b/llvm/lib/IR/Type.cpp
index 24d244fdccb37f..f88d3ace64c26f 100644
--- a/llvm/lib/IR/Type.cpp
+++ b/llvm/lib/IR/Type.cpp
@@ -835,7 +835,7 @@ static TargetTypeInfo getTargetTypeInfo(const TargetExtType *Ty) {
LLVMContext &C = Ty->getContext();
StringRef Name = Ty->getName();
if (Name.startswith("spirv."))
- return TargetTypeInfo(Type::getInt8PtrTy(C, 0), TargetExtType::HasZeroInit,
+ return TargetTypeInfo(PointerType::get(C, 0), TargetExtType::HasZeroInit,
TargetExtType::CanBeGlobal);
// Opaque types in the AArch64 name space.
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index 2374aeb8c1d666..064b9b2181f082 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -796,8 +796,7 @@ void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
if (ArrayType *ATy = dyn_cast<ArrayType>(GV.getValueType())) {
StructType *STy = dyn_cast<StructType>(ATy->getElementType());
PointerType *FuncPtrTy =
- FunctionType::get(Type::getVoidTy(Context), false)->
- getPointerTo(DL.getProgramAddressSpace());
+ PointerType::get(Context, DL.getProgramAddressSpace());
Check(STy && (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
STy->getTypeAtIndex(1) == FuncPtrTy,
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 4d322cecaf8df2..725dcb5301ea8c 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -14984,15 +14984,6 @@ bool AArch64TargetLowering::lowerInterleavedLoad(
// The base address of the load.
Value *BaseAddr = LI->getPointerOperand();
- if (NumLoads > 1) {
- // We will compute the pointer operand of each load from the original base
- // address using GEPs. Cast the base address to a pointer to the scalar
- // element type.
- BaseAddr = Builder.CreateBitCast(
- BaseAddr,
- LDVTy->getElementType()->getPointerTo(LI->getPointerAddressSpace()));
- }
-
Type *PtrTy = LI->getPointerOperandType();
Type *PredTy = VectorType::get(Type::getInt1Ty(LDVTy->getContext()),
LDVTy->getElementCount());
@@ -15030,11 +15021,9 @@ bool AArch64TargetLowering::lowerInterleavedLoad(
CallInst *LdN;
if (UseScalable)
- LdN = Builder.CreateCall(
- LdNFunc, {PTrue, Builder.CreateBitCast(BaseAddr, PtrTy)}, "ldN");
+ LdN = Builder.CreateCall(LdNFunc, {PTrue, BaseAddr}, "ldN");
else
- LdN = Builder.CreateCall(LdNFunc, Builder.CreateBitCast(BaseAddr, PtrTy),
- "ldN");
+ LdN = Builder.CreateCall(LdNFunc, BaseAddr, "ldN");
// Extract and store the sub-vectors returned by the load intrinsic.
for (unsigned i = 0; i < Shuffles.size(); i++) {
@@ -15154,15 +15143,6 @@ bool AArch64TargetLowering::lowerInterleavedStore(StoreInst *SI,
// The base address of the store.
Value *BaseAddr = SI->getPointerOperand();
- if (NumStores > 1) {
- // We will compute the pointer operand of each store from the original base
- // address using GEPs. Cast the base address to a pointer to the scalar
- // element type.
- BaseAddr = Builder.CreateBitCast(
- BaseAddr,
- SubVecTy->getElementType()->getPointerTo(SI->getPointerAddressSpace()));
- }
-
auto Mask = SVI->getShuffleMask();
// Sanity check if all the indices are NOT in range.
@@ -15245,7 +15225,7 @@ bool AArch64TargetLowering::lowerInterleavedStore(StoreInst *SI,
BaseAddr = Builder.CreateConstGEP1_32(SubVecTy->getElementType(),
BaseAddr, LaneLen * Factor);
- Ops.push_back(Builder.CreateBitCast(BaseAddr, PtrTy));
+ Ops.push_back(BaseAddr);
Builder.CreateCall(StNFunc, Ops);
}
return true;
@@ -24393,7 +24373,6 @@ Value *AArch64TargetLowering::emitLoadLinked(IRBuilderBase &Builder,
IsAcquire ? Intrinsic::aarch64_ldaxp : Intrinsic::aarch64_ldxp;
Function *Ldxr = Intrinsic::getDeclaration(M, Int);
- Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
Value *LoHi = Builder.CreateCall(Ldxr, Addr, "lohi");
Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo");
@@ -24442,7 +24421,6 @@ Value *AArch64TargetLowering::emitStoreConditional(IRBuilderBase &Builder,
Value *Lo = Builder.CreateTrunc(Val, Int64Ty, "lo");
Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 64), Int64Ty, "hi");
- Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
return Builder.CreateCall(Stxr, {Lo, Hi, Addr});
}
@@ -24490,7 +24468,7 @@ static Value *UseTlsOffset(IRBuilderBase &IRB, unsigned Offset) {
return IRB.CreatePointerCast(
IRB.CreateConstGEP1_32(IRB.getInt8Ty(), IRB.CreateCall(ThreadPointerFunc),
Offset),
- IRB.getInt8PtrTy()->getPointerTo(0));
+ IRB.getPtrTy(0));
}
Value *AArch64TargetLowering::getIRStackGuard(IRBuilderBase &IRB) const {
diff --git a/llvm/lib/Target/AArch64/AArch64StackTagging.cpp b/llvm/lib/Target/AArch64/AArch64StackTagging.cpp
index 3ac86b3cde2ed9..63bca97d7313fb 100644
--- a/llvm/lib/Target/AArch64/AArch64StackTagging.cpp
+++ b/llvm/lib/Target/AArch64/AArch64StackTagging.cpp
@@ -435,7 +435,7 @@ void AArch64StackTagging::tagAlloca(AllocaInst *AI, Instruction *InsertBefore,
void AArch64StackTagging::untagAlloca(AllocaInst *AI, Instruction *InsertBefore,
uint64_t Size) {
IRBuilder<> IRB(InsertBefore);
- IRB.CreateCall(SetTagFunc, {IRB.CreatePointerCast(AI, IRB.getInt8PtrTy()),
+ IRB.CreateCall(SetTagFunc, {IRB.CreatePointerCast(AI, IRB.getPtrTy()),
ConstantInt::get(IRB.getInt64Ty(), Size)});
}
@@ -564,7 +564,7 @@ bool AArch64StackTagging::runOnFunction(Function &Fn) {
}
} else {
uint64_t Size = *Info.AI->getAllocationSize(*DL);
- Value *Ptr = IRB.CreatePointerCast(TagPCall, IRB.getInt8PtrTy());
+ Value *Ptr = IRB.CreatePointerCast(TagPCall, IRB.getPtrTy());
tagAlloca(AI, &*IRB.GetInsertPoint(), Ptr, Size);
for (auto *RI : SInfo.RetVec) {
untagAlloca(AI, RI, Size);
diff --git a/llvm/lib/Target/AArch64/SVEIntrinsicOpts.cpp b/llvm/lib/Target/AArch64/SVEIntrinsicOpts.cpp
index c5a6cb7af405e6..880ff8498b876e 100644
--- a/llvm/lib/Target/AArch64/SVEIntrinsicOpts.cpp
+++ b/llvm/lib/Target/AArch64/SVEIntrinsicOpts.cpp
@@ -325,10 +325,7 @@ bool SVEIntrinsicOpts::optimizePredicateStore(Instruction *I) {
IRBuilder<> Builder(I->getContext());
Builder.SetInsertPoint(I);
- auto *PtrBitCast = Builder.CreateBitCast(
- Store->getPointerOperand(),
- PredType->getPointerTo(Store->getPointerAddressSpace()));
- Builder.CreateStore(BitCast->getOperand(0), PtrBitCast);
+ Builder.CreateStore(BitCast->getOperand(0), Store->getPointerOperand());
Store->eraseFromParent();
if (IntrI->getNumUses() == 0)
@@ -385,10 +382,7 @@ bool SVEIntrinsicOpts::optimizePredicateLoad(Instruction *I) {
IRBuilder<> Builder(I->getContext());
Builder.SetInsertPoint(Load);
- auto *PtrBitCast = Builder.CreateBitCast(
- Load->getPointerOperand(),
- PredType->getPointerTo(Load->getPointerAddressSpace()));
- auto *LoadPred = Builder.CreateLoad(PredType, PtrBitCast);
+ auto *LoadPred = Builder.CreateLoad(PredType, Load->getPointerOperand());
BitCast->replaceAllUsesWith(LoadPred);
BitCast->eraseFromParent();
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp
index dadc0c92ef8bc9..e83b5edea5efe1 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp
@@ -392,8 +392,8 @@ void MetadataStreamerYamlV2::emitHiddenKernelArgs(const Function &Func,
if (HiddenArgNumBytes >= 24)
emitKernelArg(DL, Int64Ty, Align(8), ValueKind::HiddenGlobalOffsetZ);
- auto Int8PtrTy = Type::getInt8PtrTy(Func.getContext(),
- AMDGPUAS::GLOBAL_ADDRESS);
+ auto Int8PtrTy =
+ PointerType::get(Func.getContext(), AMDGPUAS::GLOBAL_ADDRESS);
if (HiddenArgNumBytes >= 32) {
// We forbid the use of features requiring hostcall when compiling OpenCL
@@ -824,7 +824,7 @@ void MetadataStreamerMsgPackV3::emitHiddenKernelArgs(
Args);
auto Int8PtrTy =
- Type::getInt8PtrTy(Func.getContext(), AMDGPUAS::GLOBAL_ADDRESS);
+ PointerType::get(Func.getContext(), AMDGPUAS::GLOBAL_ADDRESS);
if (HiddenArgNumBytes >= 32) {
// We forbid the use of features requiring hostcall when compiling OpenCL
@@ -1044,7 +1044,7 @@ void MetadataStreamerMsgPackV5::emitHiddenKernelArgs(
Offset += 6; // Reserved.
auto Int8PtrTy =
- Type::getInt8PtrTy(Func.getContext(), AMDGPUAS::GLOBAL_ADDRESS);
+ PointerType::get(Func.getContext(), AMDGPUAS::GLOBAL_ADDRESS);
if (M->getNamedMetadata("llvm.printf.fmts")) {
emitKernelArg(DL, Int8PtrTy, Align(8), "hidden_printf_buffer", Offset,
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index c95e2eea1dc73b..5cb1f434d30e3c 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -21454,7 +21454,6 @@ Value *ARMTargetLowering::emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy,
IsAcquire ? Intrinsic::arm_ldaexd : Intrinsic::arm_ldrexd;
Function *Ldrex = Intrinsic::getDeclaration(M, Int);
- Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
Value *LoHi = Builder.CreateCall(Ldrex, Addr, "lohi");
Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo");
@@ -21504,7 +21503,6 @@ Value *ARMTargetLowering::emitStoreConditional(IRBuilderBase &Builder,
Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 32), Int32Ty, "hi");
if (!Subtarget->isLittle())
std::swap(Lo, Hi);
- Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
return Builder.CreateCall(Strex, {Lo, Hi, Addr});
}
@@ -21640,8 +21638,8 @@ bool ARMTargetLowering::lowerInterleavedLoad(
auto createLoadIntrinsic = [&](Value *BaseAddr) {
if (Subtarget->hasNEON()) {
- Type *Int8Ptr = Builder.getInt8PtrTy(LI->getPointerAddressSpace());
- Type *Tys[] = {VecTy, Int8Ptr};
+ Type *PtrTy = Builder.getPtrTy(LI->getPointerAddressSpace());
+ Type *Tys[] = {VecTy, PtrTy};
static const Intrinsic::ID LoadInts[3] = {Intrinsic::arm_neon_vld2,
Intrinsic::arm_neon_vld3,
Intrinsic::arm_neon_vld4};
@@ -21649,7 +21647,7 @@ bool ARMTargetLowering::lowerInterleavedLoad(
Intrinsic::getDeclaration(LI->getModule(), LoadInts[Factor - 2], Tys);
SmallVector<Value *, 2> Ops;
- Ops.push_back(Builder.CreateBitCast(BaseAddr, Int8Ptr));
+ Ops.push_back(BaseAddr);
Ops.push_back(Builder.getInt32(LI->getAlign().value()));
return Builder.CreateCall(VldnFunc, Ops, "vldN");
@@ -21658,14 +21656,13 @@ bool ARMTargetLowering::lowerInterleavedLoad(
"expected interleave factor of 2 or 4 for MVE");
Intrinsic::ID LoadInts =
Factor == 2 ? Intrinsic::arm_mve_vld2q : Intrinsic::arm_mve_vld4q;
- Type *VecEltTy =
- VecTy->getElementType()->getPointerTo(LI->getPointerAddressSpace());
- Type *Tys[] = {VecTy, VecEltTy};
+ Type *PtrTy = Builder.getPtrTy(LI->getPointerAddressSpace());
+ Type *Tys[] = {VecTy, PtrTy};
Function *VldnFunc =
Intrinsic::getDeclaration(LI->getModule(), LoadInts, Tys);
SmallVector<Value *, 2> Ops;
- Ops.push_back(Builder.CreateBitCast(BaseAddr, VecEltTy));
+ Ops.push_back(BaseAddr);
return Builder.CreateCall(VldnFunc, Ops, "vldN");
}
};
@@ -21792,13 +21789,6 @@ bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI,
// and sub-vector type to something legal.
LaneLen /= NumStores;
SubVecTy = FixedVectorType::get(SubVecTy->getElementType(), LaneLen);
-
- // We will compute the pointer operand of each store from the original base
- // address using GEPs. Cast the base address to a pointer to the scalar
- // element type.
- BaseAddr = Builder.CreateBitCast(
- BaseAddr,
- SubVecTy->getElementType()->getPointerTo(SI->getPointerAddressSpace()));
}
assert(isTypeLegal(EVT::getEVT(SubVecTy)) && "Illegal vstN vector type!");
@@ -21811,14 +21801,14 @@ bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI,
static const Intrinsic::ID StoreInts[3] = {Intrinsic::arm_neon_vst2,
Intrinsic::arm_neon_vst3,
Intrinsic::arm_neon_vst4};
- Type *Int8Ptr = Builder.getInt8PtrTy(SI->getPointerAddressSpace());
- Type *Tys[] = {Int8Ptr, SubVecTy};
+ Type *PtrTy = Builder.getPtrTy(SI->getPointerAddressSpace());
+ Type *Tys[] = {PtrTy, SubVecTy};
Function *VstNFunc = Intrinsic::getDeclaration(
SI->getModule(), StoreInts[Factor - 2], Tys);
SmallVector<Value *, 6> Ops;
- Ops.push_back(Builder.CreateBitCast(BaseAddr, Int8Ptr));
+ Ops.push_back(BaseAddr);
append_range(Ops, Shuffles);
Ops.push_back(Builder.getInt32(SI->getAlign().value()));
Builder.CreateCall(VstNFunc, Ops);
@@ -21827,14 +21817,13 @@ bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI,
"expected interleave factor of 2 or 4 for MVE");
Intrinsic::ID StoreInts =
Factor == 2 ? Intrinsic::arm_mve_vst2q : Intrinsic::arm_mve_vst4q;
- Type *EltPtrTy = SubVecTy->getElementType()->getPointerTo(
- SI->getPointerAddressSpace());
- Type *Tys[] = {EltPtrTy, SubVecTy};
+ Type *PtrTy = Builder.getPtrTy(SI->getPointerAddressSpace());
+ Type *Tys[] = {PtrTy, SubVecTy};
Function *VstNFunc =
Intrinsic::getDeclaration(SI->getModule(), StoreInts, Tys);
SmallVector<Value *, 6> Ops;
- Ops.push_back(Builder.CreateBitCast(BaseAddr, EltPtrTy));
+ Ops.push_back(BaseAddr);
append_range(Ops, Shuffles);
for (unsigned F = 0; F < Factor; F++) {
Ops.push_back(Builder.getInt32(F));
diff --git a/llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp b/llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp
index 5ac6d481e3d942..48e63e04b1197e 100644
--- a/llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp
+++ b/llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp
@@ -244,7 +244,7 @@ Value *MVEGatherScatterLowering::decomposePtr(Value *Ptr, Value *&Offsets,
if (PtrTy->getNumElements() != 4 || MemoryTy->getScalarSizeInBits() == 32)
return nullptr;
Value *Zero = ConstantInt::get(Builder.getInt32Ty(), 0);
- Value *BasePtr = Builder.CreateIntToPtr(Zero, Builder.getInt8PtrTy());
+ Value *BasePtr = Builder.CreateIntToPtr(Zero, Builder.getPtrTy());
Offsets = Builder.CreatePtrToInt(
Ptr, FixedVectorType::get(Builder.getInt32Ty(), 4));
Scale = 0;
@@ -1224,7 +1224,7 @@ bool MVEGatherScatterLowering::optimiseAddress(Value *Address, BasicBlock *BB,
// pointer.
if (Offsets && Base && Base != GEP) {
assert(Scale == 1 && "Expected to fold GEP to a scale of 1");
- Type *BaseTy = Builder.getInt8PtrTy();
+ Type *BaseTy = Builder.getPtrTy();
if (auto *VecTy = dyn_cast<FixedVectorType>(Base->getType()))
BaseTy = FixedVectorType::get(BaseTy, VecTy);
GetElementPtrInst *NewAddress = GetElementPtrInst::Create(
diff --git a/llvm/lib/Target/DirectX/DXILPrepare.cpp b/llvm/lib/Target/DirectX/DXILPrepare.cpp
index 660ca415b1a469..300924017c89f8 100644
--- a/llvm/lib/Target/DirectX/DXILPrepare.cpp
+++ b/llvm/lib/Target/DirectX/DXILPrepare.cpp
@@ -98,7 +98,7 @@ class DXILPrepareModule : public ModulePass {
PointerType *PtrTy = cast<PointerType>(Operand->getType());
return Builder.Insert(
CastInst::Create(Instruction::BitCast, Operand,
- Builder.getInt8PtrTy(PtrTy->getAddressSpace())));
+ Builder.getPtrTy(PtrTy->getAddressSpace())));
}
public:
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 3ed0a261eb769a..9175603b088510 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -18537,9 +18537,7 @@ Value *PPCTargetLowering::emitMaskedAtomicRMWIntrinsic(
Value *IncrLo = Builder.CreateTrunc(Incr, Int64Ty, "incr_lo");
Value *IncrHi =
Builder.CreateTrunc(Builder.CreateLShr(Incr, 64), Int64Ty, "incr_hi");
- Value *Addr =
- Builder.CreateBitCast(AlignedAddr, Type::getInt8PtrTy(M->getContext()));
- Value *LoHi = Builder.CreateCall(RMW, {Addr, IncrLo, IncrHi});
+ Value *LoHi = Builder.CreateCall(RMW, {AlignedAddr, IncrLo, IncrHi});
Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo");
Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi");
Lo = Builder.CreateZExt(Lo, ValTy, "lo64");
@@ -18564,11 +18562,9 @@ Value *PPCTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
Value *NewLo = Builder.CreateTrunc(NewVal, Int64Ty, "new_lo");
Value *NewHi =
Builder.CreateTrunc(Builder.CreateLShr(NewVal, 64), Int64Ty, "new_hi");
- Value *Addr =
- Builder.CreateBitCast(AlignedAddr, Type::getInt8PtrTy(M->getContext()));
emitLeadingFence(Builder, CI, Ord);
Value *LoHi =
- Builder.CreateCall(IntCmpXchg, {Addr, CmpLo, CmpHi, NewLo, NewHi});
+ Builder.CreateCall(IntCmpXchg, {AlignedAddr, CmpLo, CmpHi, NewLo, NewHi});
emitTrailingFence(Builder, CI, Ord);
Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo");
Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi");
diff --git a/llvm/lib/Target/PowerPC/PPCLoopInstrFormPrep.cpp b/llvm/lib/Target/PowerPC/PPCLoopInstrFormPrep.cpp
index f29a7af1bdf1c4..4df59ba3231c30 100644
--- a/llvm/lib/Target/PowerPC/PPCLoopInstrFormPrep.cpp
+++ b/llvm/lib/Target/PowerPC/PPCLoopInstrFormPrep.cpp
@@ -660,8 +660,8 @@ PPCLoopInstrFormPrep::rewriteForBase(Loop *L, const SCEVAddRecExpr *BasePtrSCEV,
Type *I8Ty = Type::getInt8Ty(BaseMemI->getParent()->getContext());
Type *I8PtrTy =
- Type::getInt8PtrTy(BaseMemI->getParent()->getContext(),
- BasePtr->getType()->getPointerAddressSpace());
+ PointerType::get(BaseMemI->getParent()->getContext(),
+ BasePtr->getType()->getPointerAddressSpace());
bool IsConstantInc = false;
const SCEV *BasePtrIncSCEV = BasePtrSCEV->getStepRecurrence(*SE);
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp
index b3fe110a092bd5..2aebc94aa663f4 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp
@@ -247,8 +247,7 @@ bool FixFunctionBitcasts::runOnModule(Module &M) {
if (F.getName() == "main") {
Main = &F;
LLVMContext &C = M.getContext();
- Type *MainArgTys[] = {Type::getInt32Ty(C),
- PointerType::get(Type::getInt8PtrTy(C), 0)};
+ Type *MainArgTys[] = {Type::getInt32Ty(C), PointerType::get(C, 0)};
FunctionType *MainTy = FunctionType::get(Type::getInt32Ty(C), MainArgTys,
/*isVarArg=*/false);
if (shouldFixMainFunction(F.getFunctionType(), MainTy)) {
@@ -256,8 +255,7 @@ bool FixFunctionBitcasts::runOnModule(Module &M) {
<< *F.getFunctionType() << "\n");
Value *Args[] = {UndefValue::get(MainArgTys[0]),
UndefValue::get(MainArgTys[1])};
- Value *Casted =
- ConstantExpr::getBitCast(Main, PointerType::get(MainTy, 0));
+ Value *Casted = ConstantExpr::getBitCast(Main, PointerType::get(C, 0));
CallMain = CallInst::Create(MainTy, Casted, Args, "call_main");
Uses.push_back(std::make_pair(CallMain, &F));
}
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 6cc0b73314606f..b84b408f4f66d5 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -18486,8 +18486,8 @@ static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
SDLoc dl(GA);
// Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
- Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
- is64Bit ? 257 : 256));
+ Value *Ptr = Constant::getNullValue(
+ PointerType::get(*DAG.getContext(), is64Bit ? 257 : 256));
SDValue ThreadPointer =
DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0, dl),
@@ -26104,7 +26104,7 @@ SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
SDLoc dl(Op);
EVT PtrVT = getPointerTy(DAG.getDataLayout());
// Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
- Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(
+ Value *Ptr = Constant::getNullValue(PointerType::get(
*DAG.getContext(), Subtarget.is64Bit() ? X86AS::FS : X86AS::GS));
return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
DAG.getIntPtrConstant(0, dl), MachinePointerInfo(Ptr));
diff --git a/llvm/lib/Target/X86/X86LowerAMXType.cpp b/llvm/lib/Target/X86/X86LowerAMXType.cpp
index 0416f0f0d2ec9f..69e0af6a5e9296 100644
--- a/llvm/lib/Target/X86/X86LowerAMXType.cpp
+++ b/llvm/lib/Target/X86/X86LowerAMXType.cpp
@@ -244,8 +244,7 @@ void X86LowerAMXType::combineLoadBitcast(LoadInst *LD, BitCastInst *Bitcast) {
IRBuilder<> Builder(Bitcast);
// Use the maximun column as stride.
Value *Stride = Builder.getInt64(64);
- Value *I8Ptr =
- Builder.CreateBitCast(LD->getOperand(0), Builder.getInt8PtrTy());
+ Value *I8Ptr = LD->getOperand(0);
std::array<Value *, 4> Args = {Row, Col, I8Ptr, Stride};
Value *NewInst = Builder.CreateIntrinsic(Intrinsic::x86_tileloadd64_internal,
@@ -272,8 +271,7 @@ void X86LowerAMXType::combineBitcastStore(BitCastInst *Bitcast, StoreInst *ST) {
// Use the maximum column as stride. It must be the same with load
// stride.
Value *Stride = Builder.getInt64(64);
- Value *I8Ptr =
- Builder.CreateBitCast(ST->getOperand(1), Builder.getInt8PtrTy());
+ Value *I8Ptr = ST->getOperand(1);
std::array<Value *, 5> Args = {Row, Col, I8Ptr, Stride, Tile};
Builder.CreateIntrinsic(Intrinsic::x86_tilestored64_internal, std::nullopt,
Args);
@@ -301,7 +299,7 @@ bool X86LowerAMXType::transformBitcast(BitCastInst *Bitcast) {
auto Prepare = [&](Type *MemTy) {
AllocaAddr = createAllocaInstAtEntry(Builder, Bitcast->getParent(), MemTy);
- I8Ptr = Builder.CreateBitCast(AllocaAddr, Builder.getInt8PtrTy());
+ I8Ptr = AllocaAddr;
Stride = Builder.getInt64(64);
};
diff --git a/llvm/lib/Transforms/CFGuard/CFGuard.cpp b/llvm/lib/Transforms/CFGuard/CFGuard.cpp
index bf823ac55497ed..3eca0e4b594732 100644
--- a/llvm/lib/Transforms/CFGuard/CFGuard.cpp
+++ b/llvm/lib/Transforms/CFGuard/CFGuard.cpp
@@ -177,8 +177,7 @@ void CFGuard::insertCFGuardCheck(CallBase *CB) {
// Create new call instruction. The CFGuard check should always be a call,
// even if the original CallBase is an Invoke or CallBr instruction.
CallInst *GuardCheck =
- B.CreateCall(GuardFnType, GuardCheckLoad,
- {B.CreateBitCast(CalledOperand, B.getInt8PtrTy())}, Bundles);
+ B.CreateCall(GuardFnType, GuardCheckLoad, {CalledOperand}, Bundles);
// Ensure that the first argument is passed in the correct register
// (e.g. ECX on 32-bit X86 targets).
diff --git a/llvm/lib/Transforms/Coroutines/Coroutines.cpp b/llvm/lib/Transforms/Coroutines/Coroutines.cpp
index cde74c5e693be6..61cfbecfbe9be8 100644
--- a/llvm/lib/Transforms/Coroutines/Coroutines.cpp
+++ b/llvm/lib/Transforms/Coroutines/Coroutines.cpp
@@ -37,7 +37,7 @@ using namespace llvm;
// Construct the lowerer base class and initialize its members.
coro::LowererBase::LowererBase(Module &M)
: TheModule(M), Context(M.getContext()),
- Int8Ptr(Type::getInt8PtrTy(Context)),
+ Int8Ptr(PointerType::get(Context, 0)),
ResumeFnType(FunctionType::get(Type::getVoidTy(Context), Int8Ptr,
/*isVarArg=*/false)),
NullPtr(ConstantPointerNull::get(Int8Ptr)) {}
@@ -137,8 +137,9 @@ void coro::replaceCoroFree(CoroIdInst *CoroId, bool Elide) {
return;
Value *Replacement =
- Elide ? ConstantPointerNull::get(Type::getInt8PtrTy(CoroId->getContext()))
- : CoroFrees.front()->getFrame();
+ Elide
+ ? ConstantPointerNull::get(PointerType::get(CoroId->getContext(), 0))
+ : CoroFrees.front()->getFrame();
for (CoroFreeInst *CF : CoroFrees) {
CF->replaceAllUsesWith(Replacement);
@@ -267,7 +268,7 @@ void coro::Shape::buildFrom(Function &F) {
if (!CoroBegin) {
// Replace coro.frame which are supposed to be lowered to the result of
// coro.begin with undef.
- auto *Undef = UndefValue::get(Type::getInt8PtrTy(F.getContext()));
+ auto *Undef = UndefValue::get(PointerType::get(F.getContext(), 0));
for (CoroFrameInst *CF : CoroFrames) {
CF->replaceAllUsesWith(Undef);
CF->eraseFromParent();
diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp
index 20e48e6e0f8e96..3cf2985df24aba 100644
--- a/llvm/lib/Transforms/Scalar/SROA.cpp
+++ b/llvm/lib/Transforms/Scalar/SROA.cpp
@@ -4268,7 +4268,7 @@ bool SROAPass::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
for (;;) {
auto *PartTy = Type::getIntNTy(LI->getContext(), PartSize * 8);
auto AS = LI->getPointerAddressSpace();
- auto *PartPtrTy = PartTy->getPointerTo(AS);
+ auto *PartPtrTy = LI->getPointerOperandType();
LoadInst *PLoad = IRB.CreateAlignedLoad(
PartTy,
getAdjustedPtr(IRB, DL, BasePtr,
@@ -4323,8 +4323,7 @@ bool SROAPass::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
for (int Idx = 0, Size = SplitLoads.size(); Idx < Size; ++Idx) {
LoadInst *PLoad = SplitLoads[Idx];
uint64_t PartOffset = Idx == 0 ? 0 : Offsets.Splits[Idx - 1];
- auto *PartPtrTy =
- PLoad->getType()->getPointerTo(SI->getPointerAddressSpace());
+ auto *PartPtrTy = SI->getPointerOperandType();
auto AS = SI->getPointerAddressSpace();
StoreInst *PStore = IRB.CreateAlignedStore(
@@ -4404,8 +4403,8 @@ bool SROAPass::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {
int Idx = 0, Size = Offsets.Splits.size();
for (;;) {
auto *PartTy = Type::getIntNTy(Ty->getContext(), PartSize * 8);
- auto *LoadPartPtrTy = PartTy->getPointerTo(LI->getPointerAddressSpace());
- auto *StorePartPtrTy = PartTy->getPointerTo(SI->getPointerAddressSpace());
+ auto *LoadPartPtrTy = LI->getPointerOperandType();
+ auto *StorePartPtrTy = SI->getPointerOperandType();
// Either lookup a split load or create one.
LoadInst *PLoad;
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index ef4b540cbfc95b..1159c4983d6b7c 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -9689,8 +9689,7 @@ void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
PartPtr = Builder.CreateGEP(ScalarDataTy, Ptr, Increment, "", InBounds);
}
- unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace();
- return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
+ return PartPtr;
};
// Handle Stores:
diff --git a/llvm/unittests/Analysis/ScalarEvolutionTest.cpp b/llvm/unittests/Analysis/ScalarEvolutionTest.cpp
index 9365464d58b305..425e7292b1a581 100644
--- a/llvm/unittests/Analysis/ScalarEvolutionTest.cpp
+++ b/llvm/unittests/Analysis/ScalarEvolutionTest.cpp
@@ -741,7 +741,7 @@ TEST_F(ScalarEvolutionsTest, SCEVExitLimitForgetLoop) {
NIM.setDataLayout(DataLayout);
Type *T_int64 = Type::getInt64Ty(Context);
- Type *T_pint64 = T_int64->getPointerTo(10);
+ Type *T_pint64 = PointerType::get(Context, 10);
FunctionType *FTy =
FunctionType::get(Type::getVoidTy(Context), {T_pint64}, false);
@@ -839,7 +839,7 @@ TEST_F(ScalarEvolutionsTest, SCEVExitLimitForgetValue) {
NIM.setDataLayout(DataLayout);
Type *T_int64 = Type::getInt64Ty(Context);
- Type *T_pint64 = T_int64->getPointerTo(10);
+ Type *T_pint64 = PointerType::get(Context, 10);
FunctionType *FTy =
FunctionType::get(Type::getVoidTy(Context), {T_pint64}, false);
diff --git a/llvm/unittests/Transforms/Utils/CloningTest.cpp b/llvm/unittests/Transforms/Utils/CloningTest.cpp
index 01ad41f8f75aaf..e083c75bf9c92d 100644
--- a/llvm/unittests/Transforms/Utils/CloningTest.cpp
+++ b/llvm/unittests/Transforms/Utils/CloningTest.cpp
@@ -927,7 +927,7 @@ class CloneModule : public ::testing::Test {
// Add ifuncs
{
const unsigned AddrSpace = 123;
- auto *FuncPtrTy = Type::getInt8Ty(C)->getPointerTo(123);
+ auto *FuncPtrTy = PointerType::get(C, AddrSpace);
auto *FuncTy = FunctionType::get(FuncPtrTy, false);
auto *ResolverF = Function::Create(FuncTy, GlobalValue::PrivateLinkage,
diff --git a/llvm/unittests/Transforms/Utils/LocalTest.cpp b/llvm/unittests/Transforms/Utils/LocalTest.cpp
index c888668f7aec38..0525952d15d0a3 100644
--- a/llvm/unittests/Transforms/Utils/LocalTest.cpp
+++ b/llvm/unittests/Transforms/Utils/LocalTest.cpp
@@ -1151,7 +1151,7 @@ TEST(Local, CanReplaceOperandWithVariable) {
// Test that it's invalid to replace gcroot operands, even though it can't use
// immarg.
- Type *PtrPtr = B.getInt8Ty()->getPointerTo(0);
+ Type *PtrPtr = B.getPtrTy(0);
Value *Alloca = B.CreateAlloca(PtrPtr, (unsigned)0);
CallInst *GCRoot = B.CreateIntrinsic(Intrinsic::gcroot, {},
{Alloca, Constant::getNullValue(PtrPtr)});
diff --git a/llvm/unittests/Transforms/Utils/ScalarEvolutionExpanderTest.cpp b/llvm/unittests/Transforms/Utils/ScalarEvolutionExpanderTest.cpp
index 5fb7db9eccb1f5..ca2fc71c4d3801 100644
--- a/llvm/unittests/Transforms/Utils/ScalarEvolutionExpanderTest.cpp
+++ b/llvm/unittests/Transforms/Utils/ScalarEvolutionExpanderTest.cpp
@@ -153,7 +153,7 @@ TEST_F(ScalarEvolutionExpanderTest, SCEVZeroExtendExprNonIntegral) {
Type *T_int1 = Type::getInt1Ty(Context);
Type *T_int64 = Type::getInt64Ty(Context);
- Type *T_pint64 = T_int64->getPointerTo(10);
+ Type *T_pint64 = PointerType::get(Context, 10);
FunctionType *FTy =
FunctionType::get(Type::getVoidTy(Context), {T_pint64}, false);
@@ -227,7 +227,7 @@ TEST_F(ScalarEvolutionExpanderTest, SCEVExpanderIsSafeToExpandAt) {
NIM.setDataLayout(DataLayout);
Type *T_int64 = Type::getInt64Ty(Context);
- Type *T_pint64 = T_int64->getPointerTo(10);
+ Type *T_pint64 = PointerType::get(Context, 10);
FunctionType *FTy =
FunctionType::get(Type::getVoidTy(Context), {T_pint64}, false);
diff --git a/llvm/unittests/Transforms/Utils/ValueMapperTest.cpp b/llvm/unittests/Transforms/Utils/ValueMapperTest.cpp
index af20f5af36bb43..17083b3846430d 100644
--- a/llvm/unittests/Transforms/Utils/ValueMapperTest.cpp
+++ b/llvm/unittests/Transforms/Utils/ValueMapperTest.cpp
@@ -66,7 +66,7 @@ TEST(ValueMapperTest, mapMDNodeCycle) {
TEST(ValueMapperTest, mapMDNodeDuplicatedCycle) {
LLVMContext Context;
- auto *PtrTy = Type::getInt8Ty(Context)->getPointerTo();
+ auto *PtrTy = PointerType::get(Context, 0);
std::unique_ptr<GlobalVariable> G0 = std::make_unique<GlobalVariable>(
PtrTy, false, GlobalValue::ExternalLinkage, nullptr, "G0");
std::unique_ptr<GlobalVariable> G1 = std::make_unique<GlobalVariable>(
More information about the llvm-commits
mailing list