[clang] [clang][CodeGen] Simplify code based on opaque pointers (PR #65624)
Björn Pettersson via cfe-commits
cfe-commits at lists.llvm.org
Thu Sep 14 11:17:10 PDT 2023
https://github.com/bjope updated https://github.com/llvm/llvm-project/pull/65624:
>From 6ccf70a6aa245b83667ef6547c869035dd06da6f Mon Sep 17 00:00:00 2001
From: Bjorn Pettersson <bjorn.a.pettersson at ericsson.com>
Date: Thu, 7 Sep 2023 13:08:22 +0200
Subject: [PATCH] [clang][CodeGen] Simplify code based on opaque pointers
- Update CodeGenTypeCache to use a single union for all pointers in
address space zero.
- Introduce a UnqualPtrTy in CodeGenTypeCache, and use that (for
example instead of llvm::PointerType::getUnqual) in some places.
- Drop some redundant bit/pointers casts from ptr to ptr.
---
clang/lib/CodeGen/CGAtomic.cpp | 3 +-
clang/lib/CodeGen/CGBlocks.cpp | 20 ++----
clang/lib/CodeGen/CGBuiltin.cpp | 99 ++++++++-------------------
clang/lib/CodeGen/CGCUDANV.cpp | 9 +--
clang/lib/CodeGen/CGExpr.cpp | 4 +-
clang/lib/CodeGen/CGObjCRuntime.cpp | 4 +-
clang/lib/CodeGen/CGOpenCLRuntime.cpp | 16 ++---
clang/lib/CodeGen/CodeGenModule.cpp | 13 ++--
clang/lib/CodeGen/CodeGenTypeCache.h | 7 +-
clang/lib/CodeGen/ItaniumCXXABI.cpp | 39 +++++------
clang/lib/CodeGen/Targets/PPC.cpp | 2 +-
clang/lib/CodeGen/Targets/Sparc.cpp | 2 +-
clang/lib/CodeGen/Targets/X86.cpp | 10 +--
13 files changed, 81 insertions(+), 147 deletions(-)
diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp
index 222b0a192c85e20..83ad6739015b8d2 100644
--- a/clang/lib/CodeGen/CGAtomic.cpp
+++ b/clang/lib/CodeGen/CGAtomic.cpp
@@ -87,8 +87,7 @@ namespace {
llvm::Value *StoragePtr = CGF.Builder.CreateConstGEP1_64(
CGF.Int8Ty, BitFieldPtr, OffsetInChars.getQuantity());
StoragePtr = CGF.Builder.CreateAddrSpaceCast(
- StoragePtr, llvm::PointerType::getUnqual(CGF.getLLVMContext()),
- "atomic_bitfield_base");
+ StoragePtr, CGF.UnqualPtrTy, "atomic_bitfield_base");
BFI = OrigBFI;
BFI.Offset = Offset;
BFI.StorageSize = AtomicSizeInBits;
diff --git a/clang/lib/CodeGen/CGBlocks.cpp b/clang/lib/CodeGen/CGBlocks.cpp
index 4f64012fc1a5c39..aa3e730d28efae5 100644
--- a/clang/lib/CodeGen/CGBlocks.cpp
+++ b/clang/lib/CodeGen/CGBlocks.cpp
@@ -1189,8 +1189,8 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E,
}
} else {
// Bitcast the block literal to a generic block literal.
- BlockPtr = Builder.CreatePointerCast(
- BlockPtr, llvm::PointerType::get(GenBlockTy, 0), "block.literal");
+ BlockPtr =
+ Builder.CreatePointerCast(BlockPtr, UnqualPtrTy, "block.literal");
// Get pointer to the block invoke function
llvm::Value *FuncPtr = Builder.CreateStructGEP(GenBlockTy, BlockPtr, 3);
@@ -1208,12 +1208,6 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E,
const CGFunctionInfo &FnInfo =
CGM.getTypes().arrangeBlockFunctionCall(Args, FuncTy);
- // Cast the function pointer to the right type.
- llvm::Type *BlockFTy = CGM.getTypes().GetFunctionType(FnInfo);
-
- llvm::Type *BlockFTyPtr = llvm::PointerType::getUnqual(BlockFTy);
- Func = Builder.CreatePointerCast(Func, BlockFTyPtr);
-
// Prepare the callee.
CGCallee Callee(CGCalleeInfo(), Func);
@@ -2589,11 +2583,11 @@ const BlockByrefInfo &CodeGenFunction::getBlockByrefInfo(const VarDecl *D) {
SmallVector<llvm::Type *, 8> types;
// void *__isa;
- types.push_back(Int8PtrTy);
+ types.push_back(VoidPtrTy);
size += getPointerSize();
// void *__forwarding;
- types.push_back(llvm::PointerType::getUnqual(byrefType));
+ types.push_back(VoidPtrTy);
size += getPointerSize();
// int32_t __flags;
@@ -2608,11 +2602,11 @@ const BlockByrefInfo &CodeGenFunction::getBlockByrefInfo(const VarDecl *D) {
bool hasCopyAndDispose = getContext().BlockRequiresCopying(Ty, D);
if (hasCopyAndDispose) {
/// void *__copy_helper;
- types.push_back(Int8PtrTy);
+ types.push_back(VoidPtrTy);
size += getPointerSize();
/// void *__destroy_helper;
- types.push_back(Int8PtrTy);
+ types.push_back(VoidPtrTy);
size += getPointerSize();
}
@@ -2621,7 +2615,7 @@ const BlockByrefInfo &CodeGenFunction::getBlockByrefInfo(const VarDecl *D) {
if (getContext().getByrefLifetime(Ty, Lifetime, HasByrefExtendedLayout) &&
HasByrefExtendedLayout) {
/// void *__byref_variable_layout;
- types.push_back(Int8PtrTy);
+ types.push_back(VoidPtrTy);
size += CharUnits::fromQuantity(PointerSizeInBytes);
}
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 52868ca260290b7..18f2a03c7995233 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -978,9 +978,8 @@ static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF,
llvm::IntegerType *IntType = llvm::IntegerType::get(
CGF.getLLVMContext(),
CGF.getContext().getTypeSize(E->getArg(1)->getType()));
- llvm::Type *PtrType = llvm::PointerType::getUnqual(CGF.getLLVMContext());
llvm::FunctionType *FTy =
- llvm::FunctionType::get(CGF.Int8Ty, {PtrType, IntType}, false);
+ llvm::FunctionType::get(CGF.Int8Ty, {CGF.UnqualPtrTy, IntType}, false);
llvm::InlineAsm *IA =
llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
@@ -1119,7 +1118,7 @@ static llvm::Value *emitPPCLoadReserveIntrinsic(CodeGenFunction &CGF,
Constraints += MachineClobbers;
}
- llvm::Type *PtrType = llvm::PointerType::getUnqual(CGF.getLLVMContext());
+ llvm::Type *PtrType = CGF.UnqualPtrTy;
llvm::FunctionType *FTy = llvm::FunctionType::get(RetType, {PtrType}, false);
llvm::InlineAsm *IA =
@@ -4954,8 +4953,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// Type of the generic packet parameter.
unsigned GenericAS =
getContext().getTargetAddressSpace(LangAS::opencl_generic);
- llvm::Type *I8PTy = llvm::PointerType::get(
- llvm::Type::getInt8Ty(getLLVMContext()), GenericAS);
+ llvm::Type *I8PTy = llvm::PointerType::get(getLLVMContext(), GenericAS);
// Testing which overloaded version we should generate the call for.
if (2U == E->getNumArgs()) {
@@ -5100,11 +5098,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BIto_local:
case Builtin::BIto_private: {
auto Arg0 = EmitScalarExpr(E->getArg(0));
- auto NewArgT = llvm::PointerType::get(Int8Ty,
- CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
- auto NewRetT = llvm::PointerType::get(Int8Ty,
- CGM.getContext().getTargetAddressSpace(
- E->getType()->getPointeeType().getAddressSpace()));
+ auto NewArgT = llvm::PointerType::get(
+ getLLVMContext(),
+ CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
+ auto NewRetT = llvm::PointerType::get(
+ getLLVMContext(),
+ CGM.getContext().getTargetAddressSpace(
+ E->getType()->getPointeeType().getAddressSpace()));
auto FTy = llvm::FunctionType::get(NewRetT, {NewArgT}, false);
llvm::Value *NewArg;
if (Arg0->getType()->getPointerAddressSpace() !=
@@ -7356,13 +7356,9 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vld1q_x3_v:
case NEON::BI__builtin_neon_vld1_x4_v:
case NEON::BI__builtin_neon_vld1q_x4_v: {
- llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType());
- Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
- llvm::Type *Tys[2] = { VTy, PTy };
+ llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN");
- Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld2_v:
@@ -7381,8 +7377,6 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
Value *Align = getAlignmentValue32(PtrOp1);
Ops[1] = Builder.CreateCall(F, {Ops[1], Align}, NameHint);
- Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld1_dup_v:
@@ -7406,8 +7400,6 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
Ops[I] = Builder.CreateBitCast(Ops[I], Ty);
Ops.push_back(getAlignmentValue32(PtrOp1));
Ops[1] = Builder.CreateCall(F, ArrayRef(Ops).slice(1), NameHint);
- Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vmovl_v: {
@@ -7586,16 +7578,15 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vst1q_x3_v:
case NEON::BI__builtin_neon_vst1_x4_v:
case NEON::BI__builtin_neon_vst1q_x4_v: {
- llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType());
// TODO: Currently in AArch32 mode the pointer operand comes first, whereas
// in AArch64 it comes last. We may want to stick to one or another.
if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_be ||
Arch == llvm::Triple::aarch64_32) {
- llvm::Type *Tys[2] = { VTy, PTy };
+ llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
}
- llvm::Type *Tys[2] = { PTy, VTy };
+ llvm::Type *Tys[2] = {UnqualPtrTy, VTy};
return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
}
case NEON::BI__builtin_neon_vsubhn_v: {
@@ -7617,7 +7608,6 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
}
case NEON::BI__builtin_neon_vtrn_v:
case NEON::BI__builtin_neon_vtrnq_v: {
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Value *SV = nullptr;
@@ -7645,7 +7635,6 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
}
case NEON::BI__builtin_neon_vuzp_v:
case NEON::BI__builtin_neon_vuzpq_v: {
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Value *SV = nullptr;
@@ -7668,7 +7657,6 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
}
case NEON::BI__builtin_neon_vzip_v:
case NEON::BI__builtin_neon_vzipq_v: {
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Value *SV = nullptr;
@@ -8190,12 +8178,11 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
llvm::Type *RealResTy = ConvertType(Ty);
llvm::Type *IntTy =
llvm::IntegerType::get(getLLVMContext(), getContext().getTypeSize(Ty));
- llvm::Type *PtrTy = llvm::PointerType::getUnqual(getLLVMContext());
Function *F = CGM.getIntrinsic(
BuiltinID == clang::ARM::BI__builtin_arm_ldaex ? Intrinsic::arm_ldaex
: Intrinsic::arm_ldrex,
- PtrTy);
+ UnqualPtrTy);
CallInst *Val = Builder.CreateCall(F, LoadAddr, "ldrex");
Val->addParamAttr(
0, Attribute::get(getLLVMContext(), Attribute::ElementType, IntTy));
@@ -10361,13 +10348,12 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
llvm::Type *RealResTy = ConvertType(Ty);
llvm::Type *IntTy =
llvm::IntegerType::get(getLLVMContext(), getContext().getTypeSize(Ty));
- llvm::Type *PtrTy = llvm::PointerType::getUnqual(getLLVMContext());
Function *F =
CGM.getIntrinsic(BuiltinID == clang::AArch64::BI__builtin_arm_ldaex
? Intrinsic::aarch64_ldaxr
: Intrinsic::aarch64_ldxr,
- PtrTy);
+ UnqualPtrTy);
CallInst *Val = Builder.CreateCall(F, LoadAddr, "ldxr");
Val->addParamAttr(
0, Attribute::get(getLLVMContext(), Attribute::ElementType, IntTy));
@@ -10707,8 +10693,6 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
BuiltinID == AArch64::BI__writex18word ||
BuiltinID == AArch64::BI__writex18dword ||
BuiltinID == AArch64::BI__writex18qword) {
- llvm::Type *IntTy = ConvertType(E->getArg(1)->getType());
-
// Read x18 as i8*
LLVMContext &Context = CGM.getLLVMContext();
llvm::Metadata *Ops[] = {llvm::MDString::get(Context, "x18")};
@@ -10717,12 +10701,11 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
llvm::Function *F =
CGM.getIntrinsic(llvm::Intrinsic::read_register, {Int64Ty});
llvm::Value *X18 = Builder.CreateCall(F, Metadata);
- X18 = Builder.CreateIntToPtr(X18, llvm::PointerType::get(Int8Ty, 0));
+ X18 = Builder.CreateIntToPtr(X18, Int8PtrTy);
// Store val at x18 + offset
Value *Offset = Builder.CreateZExt(EmitScalarExpr(E->getArg(0)), Int64Ty);
Value *Ptr = Builder.CreateGEP(Int8Ty, X18, Offset);
- Ptr = Builder.CreatePointerCast(Ptr, llvm::PointerType::get(IntTy, 0));
Value *Val = EmitScalarExpr(E->getArg(1));
StoreInst *Store = Builder.CreateAlignedStore(Val, Ptr, CharUnits::One());
return Store;
@@ -10742,12 +10725,11 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
llvm::Function *F =
CGM.getIntrinsic(llvm::Intrinsic::read_register, {Int64Ty});
llvm::Value *X18 = Builder.CreateCall(F, Metadata);
- X18 = Builder.CreateIntToPtr(X18, llvm::PointerType::get(Int8Ty, 0));
+ X18 = Builder.CreateIntToPtr(X18, Int8PtrTy);
// Load x18 + offset
Value *Offset = Builder.CreateZExt(EmitScalarExpr(E->getArg(0)), Int64Ty);
Value *Ptr = Builder.CreateGEP(Int8Ty, X18, Offset);
- Ptr = Builder.CreatePointerCast(Ptr, llvm::PointerType::get(IntTy, 0));
LoadInst *Load = Builder.CreateAlignedLoad(IntTy, Ptr, CharUnits::One());
return Load;
}
@@ -12413,9 +12395,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vst1q_lane_v:
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
- Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
- return Builder.CreateAlignedStore(Ops[1], Builder.CreateBitCast(Ops[0], Ty),
- PtrOp0.getAlignment());
+ return Builder.CreateAlignedStore(Ops[1], Ops[0], PtrOp0.getAlignment());
case NEON::BI__builtin_neon_vstl1_lane_s64:
case NEON::BI__builtin_neon_vstl1q_lane_s64: {
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
@@ -12427,50 +12407,42 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vld2_v:
case NEON::BI__builtin_neon_vld2q_v: {
- llvm::Type *PTy = llvm::PointerType::getUnqual(getLLVMContext());
- llvm::Type *Tys[2] = { VTy, PTy };
+ llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld3_v:
case NEON::BI__builtin_neon_vld3q_v: {
- llvm::Type *PTy = llvm::PointerType::getUnqual(getLLVMContext());
- llvm::Type *Tys[2] = { VTy, PTy };
+ llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld4_v:
case NEON::BI__builtin_neon_vld4q_v: {
- llvm::Type *PTy = llvm::PointerType::getUnqual(getLLVMContext());
- llvm::Type *Tys[2] = { VTy, PTy };
+ llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
- Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::PointerType::getUnqual(Ops[1]->getType()));
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld2_dup_v:
case NEON::BI__builtin_neon_vld2q_dup_v: {
- llvm::Type *PTy = llvm::PointerType::getUnqual(getLLVMContext());
- llvm::Type *Tys[2] = { VTy, PTy };
+ llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld3_dup_v:
case NEON::BI__builtin_neon_vld3q_dup_v: {
- llvm::Type *PTy = llvm::PointerType::getUnqual(getLLVMContext());
- llvm::Type *Tys[2] = { VTy, PTy };
+ llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld4_dup_v:
case NEON::BI__builtin_neon_vld4q_dup_v: {
- llvm::Type *PTy = llvm::PointerType::getUnqual(getLLVMContext());
- llvm::Type *Tys[2] = { VTy, PTy };
+ llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
@@ -14716,12 +14688,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
BuiltinID == X86::BI__builtin_ia32_movntss)
Src = Builder.CreateExtractElement(Src, (uint64_t)0, "extract");
- // Convert the type of the pointer to a pointer to the stored type.
- Value *BC = Builder.CreateBitCast(
- Ptr, llvm::PointerType::getUnqual(Src->getType()), "cast");
-
// Unaligned nontemporal store of the scalar value.
- StoreInst *SI = Builder.CreateDefaultAlignedStore(Src, BC);
+ StoreInst *SI = Builder.CreateDefaultAlignedStore(Src, Ptr);
SI->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
SI->setAlignment(llvm::Align(1));
return SI;
@@ -15783,8 +15751,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__readfsdword:
case X86::BI__readfsqword: {
llvm::Type *IntTy = ConvertType(E->getType());
- Value *Ptr =
- Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 257));
+ Value *Ptr = Builder.CreateIntToPtr(
+ Ops[0], llvm::PointerType::get(getLLVMContext(), 257));
LoadInst *Load = Builder.CreateAlignedLoad(
IntTy, Ptr, getContext().getTypeAlignInChars(E->getType()));
Load->setVolatile(true);
@@ -15795,8 +15763,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__readgsdword:
case X86::BI__readgsqword: {
llvm::Type *IntTy = ConvertType(E->getType());
- Value *Ptr =
- Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 256));
+ Value *Ptr = Builder.CreateIntToPtr(
+ Ops[0], llvm::PointerType::get(getLLVMContext(), 256));
LoadInst *Load = Builder.CreateAlignedLoad(
IntTy, Ptr, getContext().getTypeAlignInChars(E->getType()));
Load->setVolatile(true);
@@ -15810,8 +15778,6 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
for (int i = 0; i < 3; ++i) {
Value *Extract = Builder.CreateExtractValue(Call, i + 1);
Value *Ptr = Builder.CreateConstGEP1_32(Int8Ty, Ops[2], i * 16);
- Ptr = Builder.CreateBitCast(
- Ptr, llvm::PointerType::getUnqual(Extract->getType()));
Builder.CreateAlignedStore(Extract, Ptr, Align(1));
}
@@ -15826,8 +15792,6 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
for (int i = 0; i < 4; ++i) {
Value *Extract = Builder.CreateExtractValue(Call, i + 1);
Value *Ptr = Builder.CreateConstGEP1_32(Int8Ty, Ops[3], i * 16);
- Ptr = Builder.CreateBitCast(
- Ptr, llvm::PointerType::getUnqual(Extract->getType()));
Builder.CreateAlignedStore(Extract, Ptr, Align(1));
}
@@ -20516,10 +20480,7 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
llvm::MDNode *NontemporalNode = llvm::MDNode::get(
getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
- Value *BC = Builder.CreateBitCast(
- Ops[0], llvm::PointerType::getUnqual(Ops[1]->getType()), "cast");
-
- StoreInst *Store = Builder.CreateDefaultAlignedStore(Ops[1], BC);
+ StoreInst *Store = Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
Store->setMetadata(llvm::LLVMContext::MD_nontemporal, NontemporalNode);
Store->setMetadata(CGM.getModule().getMDKindID("riscv-nontemporal-domain"),
RISCVDomainNode);
diff --git a/clang/lib/CodeGen/CGCUDANV.cpp b/clang/lib/CodeGen/CGCUDANV.cpp
index 08769c98dc298a0..6a6e96e5aad2239 100644
--- a/clang/lib/CodeGen/CGCUDANV.cpp
+++ b/clang/lib/CodeGen/CGCUDANV.cpp
@@ -226,18 +226,15 @@ CGNVCUDARuntime::CGNVCUDARuntime(CodeGenModule &CGM)
TheModule(CGM.getModule()),
RelocatableDeviceCode(CGM.getLangOpts().GPURelocatableDeviceCode),
DeviceMC(InitDeviceMC(CGM)) {
- CodeGen::CodeGenTypes &Types = CGM.getTypes();
- ASTContext &Ctx = CGM.getContext();
-
IntTy = CGM.IntTy;
SizeTy = CGM.SizeTy;
VoidTy = CGM.VoidTy;
Zeros[0] = llvm::ConstantInt::get(SizeTy, 0);
Zeros[1] = Zeros[0];
- CharPtrTy = llvm::PointerType::getUnqual(Types.ConvertType(Ctx.CharTy));
- VoidPtrTy = cast<llvm::PointerType>(Types.ConvertType(Ctx.VoidPtrTy));
- VoidPtrPtrTy = llvm::PointerType::getUnqual(CGM.getLLVMContext());
+ CharPtrTy = CGM.UnqualPtrTy;
+ VoidPtrTy = CGM.UnqualPtrTy;
+ VoidPtrPtrTy = CGM.UnqualPtrTy;
}
llvm::FunctionCallee CGNVCUDARuntime::getSetupArgumentFn() const {
diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp
index 76bbeba468db643..005cadbcb5cbdcc 100644
--- a/clang/lib/CodeGen/CGExpr.cpp
+++ b/clang/lib/CodeGen/CGExpr.cpp
@@ -1212,7 +1212,7 @@ LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
const char *Name) {
ErrorUnsupported(E, Name);
llvm::Type *ElTy = ConvertType(E->getType());
- llvm::Type *Ty = llvm::PointerType::getUnqual(ElTy);
+ llvm::Type *Ty = UnqualPtrTy;
return MakeAddrLValue(
Address(llvm::UndefValue::get(Ty), ElTy, CharUnits::One()), E->getType());
}
@@ -4599,7 +4599,7 @@ std::optional<LValue> HandleConditionalOperatorLValueSimpleCase(
if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Live->IgnoreParens())) {
CGF.EmitCXXThrowExpr(ThrowExpr);
llvm::Type *ElemTy = CGF.ConvertType(Dead->getType());
- llvm::Type *Ty = llvm::PointerType::getUnqual(ElemTy);
+ llvm::Type *Ty = CGF.UnqualPtrTy;
return CGF.MakeAddrLValue(
Address(llvm::UndefValue::get(Ty), ElemTy, CharUnits::One()),
Dead->getType());
diff --git a/clang/lib/CodeGen/CGObjCRuntime.cpp b/clang/lib/CodeGen/CGObjCRuntime.cpp
index 634a3d5a938dfd9..424564f97599952 100644
--- a/clang/lib/CodeGen/CGObjCRuntime.cpp
+++ b/clang/lib/CodeGen/CGObjCRuntime.cpp
@@ -63,12 +63,10 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
CGF.CGM.getContext().getObjCObjectPointerType(InterfaceTy);
QualType IvarTy =
Ivar->getUsageType(ObjectPtrTy).withCVRQualifiers(CVRQualifiers);
- llvm::Type *LTy = CGF.CGM.getTypes().ConvertTypeForMem(IvarTy);
- llvm::Value *V = CGF.Builder.CreateBitCast(BaseValue, CGF.Int8PtrTy);
+ llvm::Value *V = BaseValue;
V = CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, V, Offset, "add.ptr");
if (!Ivar->isBitField()) {
- V = CGF.Builder.CreateBitCast(V, llvm::PointerType::getUnqual(LTy));
LValue LV = CGF.MakeNaturalAlignAddrLValue(V, IvarTy);
return LV;
}
diff --git a/clang/lib/CodeGen/CGOpenCLRuntime.cpp b/clang/lib/CodeGen/CGOpenCLRuntime.cpp
index 4167792abafc70e..33838a6552c8d70 100644
--- a/clang/lib/CodeGen/CGOpenCLRuntime.cpp
+++ b/clang/lib/CodeGen/CGOpenCLRuntime.cpp
@@ -71,8 +71,7 @@ llvm::PointerType *CGOpenCLRuntime::getPointerType(const Type *T,
llvm::LLVMContext &Ctx = CGM.getLLVMContext();
uint32_t AddrSpc = CGM.getContext().getTargetAddressSpace(
CGM.getContext().getOpenCLTypeAddrSpace(T));
- auto *PTy =
- llvm::PointerType::get(llvm::StructType::create(Ctx, Name), AddrSpc);
+ auto *PTy = llvm::PointerType::get(Ctx, AddrSpc);
CachedTys[Name] = PTy;
return PTy;
}
@@ -90,10 +89,9 @@ llvm::Type *CGOpenCLRuntime::getPipeType(const PipeType *T) {
llvm::Type *CGOpenCLRuntime::getPipeType(const PipeType *T, StringRef Name,
llvm::Type *&PipeTy) {
if (!PipeTy)
- PipeTy = llvm::PointerType::get(llvm::StructType::create(
- CGM.getLLVMContext(), Name),
- CGM.getContext().getTargetAddressSpace(
- CGM.getContext().getOpenCLTypeAddrSpace(T)));
+ PipeTy = llvm::PointerType::get(
+ CGM.getLLVMContext(), CGM.getContext().getTargetAddressSpace(
+ CGM.getContext().getOpenCLTypeAddrSpace(T)));
return PipeTy;
}
@@ -105,10 +103,10 @@ llvm::Type *CGOpenCLRuntime::getSamplerType(const Type *T) {
CGM, CGM.getContext().OCLSamplerTy.getTypePtr()))
SamplerTy = TransTy;
else
+ // struct opencl.sampler_t*
SamplerTy = llvm::PointerType::get(
- llvm::StructType::create(CGM.getLLVMContext(), "opencl.sampler_t"),
- CGM.getContext().getTargetAddressSpace(
- CGM.getContext().getOpenCLTypeAddrSpace(T)));
+ CGM.getLLVMContext(), CGM.getContext().getTargetAddressSpace(
+ CGM.getContext().getOpenCLTypeAddrSpace(T)));
return SamplerTy;
}
diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp
index 8b0c9340775cbe9..130cedcd4f2bf68 100644
--- a/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/clang/lib/CodeGen/CodeGenModule.cpp
@@ -360,13 +360,14 @@ CodeGenModule::CodeGenModule(ASTContext &C,
IntTy = llvm::IntegerType::get(LLVMContext, C.getTargetInfo().getIntWidth());
IntPtrTy = llvm::IntegerType::get(LLVMContext,
C.getTargetInfo().getMaxPointerWidth());
- Int8PtrTy = Int8Ty->getPointerTo(0);
- Int8PtrPtrTy = Int8PtrTy->getPointerTo(0);
+ Int8PtrTy = llvm::PointerType::get(LLVMContext, 0);
const llvm::DataLayout &DL = M.getDataLayout();
- AllocaInt8PtrTy = Int8Ty->getPointerTo(DL.getAllocaAddrSpace());
- GlobalsInt8PtrTy = Int8Ty->getPointerTo(DL.getDefaultGlobalsAddressSpace());
- ConstGlobalsPtrTy = Int8Ty->getPointerTo(
- C.getTargetAddressSpace(GetGlobalConstantAddressSpace()));
+ AllocaInt8PtrTy =
+ llvm::PointerType::get(LLVMContext, DL.getAllocaAddrSpace());
+ GlobalsInt8PtrTy =
+ llvm::PointerType::get(LLVMContext, DL.getDefaultGlobalsAddressSpace());
+ ConstGlobalsPtrTy = llvm::PointerType::get(
+ LLVMContext, C.getTargetAddressSpace(GetGlobalConstantAddressSpace()));
ASTAllocaAddressSpace = getTargetCodeGenInfo().getASTAllocaAddressSpace();
// Build C++20 Module initializers.
diff --git a/clang/lib/CodeGen/CodeGenTypeCache.h b/clang/lib/CodeGen/CodeGenTypeCache.h
index e848dc3b449c827..083d69214fb3c29 100644
--- a/clang/lib/CodeGen/CodeGenTypeCache.h
+++ b/clang/lib/CodeGen/CodeGenTypeCache.h
@@ -51,14 +51,11 @@ struct CodeGenTypeCache {
llvm::IntegerType *PtrDiffTy;
};
- /// void* in address space 0
+ /// void*, void** in address space 0
union {
+ llvm::PointerType *UnqualPtrTy;
llvm::PointerType *VoidPtrTy;
llvm::PointerType *Int8PtrTy;
- };
-
- /// void** in address space 0
- union {
llvm::PointerType *VoidPtrPtrTy;
llvm::PointerType *Int8PtrPtrTy;
};
diff --git a/clang/lib/CodeGen/ItaniumCXXABI.cpp b/clang/lib/CodeGen/ItaniumCXXABI.cpp
index 385dcf21f724e9c..0c89871420bdd3d 100644
--- a/clang/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/clang/lib/CodeGen/ItaniumCXXABI.cpp
@@ -750,9 +750,9 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
} else {
llvm::Value *VFPAddr =
CGF.Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
- VirtualFn = CGF.Builder.CreateAlignedLoad(
- llvm::PointerType::getUnqual(CGF.getLLVMContext()), VFPAddr,
- CGF.getPointerAlign(), "memptr.virtualfn");
+ VirtualFn = CGF.Builder.CreateAlignedLoad(CGF.UnqualPtrTy, VFPAddr,
+ CGF.getPointerAlign(),
+ "memptr.virtualfn");
}
}
assert(VirtualFn && "Virtual fuction pointer not created!");
@@ -792,9 +792,8 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
// In the non-virtual path, the function pointer is actually a
// function pointer.
CGF.EmitBlock(FnNonVirtual);
- llvm::Value *NonVirtualFn = Builder.CreateIntToPtr(
- FnAsInt, llvm::PointerType::getUnqual(CGF.getLLVMContext()),
- "memptr.nonvirtualfn");
+ llvm::Value *NonVirtualFn =
+ Builder.CreateIntToPtr(FnAsInt, CGF.UnqualPtrTy, "memptr.nonvirtualfn");
// Check the function pointer if CFI on member function pointers is enabled.
if (ShouldEmitCFICheck) {
@@ -833,8 +832,7 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
// We're done.
CGF.EmitBlock(FnEnd);
- llvm::PHINode *CalleePtr =
- Builder.CreatePHI(llvm::PointerType::getUnqual(CGF.getLLVMContext()), 2);
+ llvm::PHINode *CalleePtr = Builder.CreatePHI(CGF.UnqualPtrTy, 2);
CalleePtr->addIncoming(VirtualFn, FnVirtual);
CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual);
@@ -1238,8 +1236,7 @@ void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
// Grab the vtable pointer as an intptr_t*.
auto *ClassDecl =
cast<CXXRecordDecl>(ElementType->castAs<RecordType>()->getDecl());
- llvm::Value *VTable = CGF.GetVTablePtr(
- Ptr, llvm::PointerType::getUnqual(CGF.getLLVMContext()), ClassDecl);
+ llvm::Value *VTable = CGF.GetVTablePtr(Ptr, CGF.UnqualPtrTy, ClassDecl);
// Track back to entry -2 and pull out the offset there.
llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
@@ -1591,9 +1588,8 @@ llvm::Value *ItaniumCXXABI::emitDynamicCastToVoid(CodeGenFunction &CGF,
llvm::Value *OffsetToTop;
if (CGM.getItaniumVTableContext().isRelativeLayout()) {
// Get the vtable pointer.
- llvm::Value *VTable = CGF.GetVTablePtr(
- ThisAddr, llvm::PointerType::getUnqual(CGF.getLLVMContext()),
- ClassDecl);
+ llvm::Value *VTable =
+ CGF.GetVTablePtr(ThisAddr, CGF.UnqualPtrTy, ClassDecl);
// Get the offset-to-top from the vtable.
OffsetToTop =
@@ -1605,9 +1601,8 @@ llvm::Value *ItaniumCXXABI::emitDynamicCastToVoid(CodeGenFunction &CGF,
CGF.ConvertType(CGF.getContext().getPointerDiffType());
// Get the vtable pointer.
- llvm::Value *VTable = CGF.GetVTablePtr(
- ThisAddr, llvm::PointerType::getUnqual(CGF.getLLVMContext()),
- ClassDecl);
+ llvm::Value *VTable =
+ CGF.GetVTablePtr(ThisAddr, CGF.UnqualPtrTy, ClassDecl);
// Get the offset-to-top from the vtable.
OffsetToTop =
@@ -2308,8 +2303,8 @@ llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
// cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
// We can't simply ignore this load using nosanitize metadata because
// the metadata may be lost.
- llvm::FunctionType *FTy = llvm::FunctionType::get(
- CGF.SizeTy, llvm::PointerType::getUnqual(CGF.getLLVMContext()), false);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGF.SizeTy, CGF.UnqualPtrTy, false);
llvm::FunctionCallee F =
CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
return CGF.Builder.CreateCall(F, numElementsPtr.getPointer());
@@ -2652,7 +2647,7 @@ static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
// We're assuming that the destructor function is something we can
// reasonably call with the default CC.
- llvm::Type *dtorTy = llvm::PointerType::getUnqual(CGF.getLLVMContext());
+ llvm::Type *dtorTy = CGF.UnqualPtrTy;
// Preserve address space of addr.
auto AddrAS = addr ? addr->getType()->getPointerAddressSpace() : 0;
@@ -4654,8 +4649,7 @@ static void InitCatchParam(CodeGenFunction &CGF,
auto catchRD = CatchType->getAsCXXRecordDecl();
CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD);
- llvm::Type *PtrTy =
- llvm::PointerType::getUnqual(CGF.getLLVMContext()); // addrspace 0 ok
+ llvm::Type *PtrTy = CGF.UnqualPtrTy; // addrspace 0 ok
// Check for a copy expression. If we don't have a copy expression,
// that means a trivial copy is okay.
@@ -4843,8 +4837,7 @@ void XLCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
llvm::FunctionCallee Dtor,
llvm::Constant *Addr) {
if (D.getTLSKind() != VarDecl::TLS_None) {
- llvm::PointerType *PtrTy =
- llvm::PointerType::getUnqual(CGF.getLLVMContext());
+ llvm::PointerType *PtrTy = CGF.UnqualPtrTy;
// extern "C" int __pt_atexit_np(int flags, int(*)(int,...), ...);
llvm::FunctionType *AtExitTy =
diff --git a/clang/lib/CodeGen/Targets/PPC.cpp b/clang/lib/CodeGen/Targets/PPC.cpp
index 9cdd2aa07791d31..6f4c150ef3149e8 100644
--- a/clang/lib/CodeGen/Targets/PPC.cpp
+++ b/clang/lib/CodeGen/Targets/PPC.cpp
@@ -431,7 +431,7 @@ Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
llvm::Type *DirectTy = CGF.ConvertType(Ty), *ElementTy = DirectTy;
if (isIndirect)
- DirectTy = llvm::PointerType::getUnqual(CGF.getLLVMContext());
+ DirectTy = CGF.UnqualPtrTy;
// Case 1: consume registers.
Address RegAddr = Address::invalid();
diff --git a/clang/lib/CodeGen/Targets/Sparc.cpp b/clang/lib/CodeGen/Targets/Sparc.cpp
index f5cafaa973150a2..a337a52a94eca94 100644
--- a/clang/lib/CodeGen/Targets/Sparc.cpp
+++ b/clang/lib/CodeGen/Targets/Sparc.cpp
@@ -286,7 +286,7 @@ Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
CGBuilderTy &Builder = CGF.Builder;
Address Addr = Address(Builder.CreateLoad(VAListAddr, "ap.cur"),
getVAListElementType(CGF), SlotSize);
- llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
+ llvm::Type *ArgPtrTy = CGF.UnqualPtrTy;
auto TypeInfo = getContext().getTypeInfoInChars(Ty);
diff --git a/clang/lib/CodeGen/Targets/X86.cpp b/clang/lib/CodeGen/Targets/X86.cpp
index b2e2f6789cce328..2af240350438840 100644
--- a/clang/lib/CodeGen/Targets/X86.cpp
+++ b/clang/lib/CodeGen/Targets/X86.cpp
@@ -2978,9 +2978,7 @@ static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF,
// AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
- llvm::Value *Res =
- CGF.Builder.CreateBitCast(overflow_arg_area,
- llvm::PointerType::getUnqual(LTy));
+ llvm::Value *Res = overflow_arg_area;
// AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
// l->overflow_arg_area + sizeof(type).
@@ -3083,8 +3081,6 @@ Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
llvm::Type *TyHi = ST->getElementType(1);
assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
"Unexpected ABI info for mixed regs");
- llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
- llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
llvm::Value *GPAddr =
CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, gp_offset);
llvm::Value *FPAddr =
@@ -3095,13 +3091,13 @@ Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
// Copy the first element.
// FIXME: Our choice of alignment here and below is probably pessimistic.
llvm::Value *V = CGF.Builder.CreateAlignedLoad(
- TyLo, CGF.Builder.CreateBitCast(RegLoAddr, PTyLo),
+ TyLo, RegLoAddr,
CharUnits::fromQuantity(getDataLayout().getABITypeAlign(TyLo)));
CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
// Copy the second element.
V = CGF.Builder.CreateAlignedLoad(
- TyHi, CGF.Builder.CreateBitCast(RegHiAddr, PTyHi),
+ TyHi, RegHiAddr,
CharUnits::fromQuantity(getDataLayout().getABITypeAlign(TyHi)));
CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
More information about the cfe-commits
mailing list