[llvm-commits] [dragonegg] r166673 - in /dragonegg/trunk: include/dragonegg/Internals.h src/Backend.cpp src/ConstantConversion.cpp src/Convert.cpp src/TypeConversion.cpp
Duncan Sands
baldrick at free.fr
Thu Oct 25 02:19:01 PDT 2012
Author: baldrick
Date: Thu Oct 25 04:19:00 2012
New Revision: 166673
URL: http://llvm.org/viewvc/llvm-project?rev=166673&view=rev
Log:
Rename variables from TD (TargetData) to DL (DataLayout).
Modified:
dragonegg/trunk/include/dragonegg/Internals.h
dragonegg/trunk/src/Backend.cpp
dragonegg/trunk/src/ConstantConversion.cpp
dragonegg/trunk/src/Convert.cpp
dragonegg/trunk/src/TypeConversion.cpp
Modified: dragonegg/trunk/include/dragonegg/Internals.h
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/include/dragonegg/Internals.h?rev=166673&r1=166672&r2=166673&view=diff
==============================================================================
--- dragonegg/trunk/include/dragonegg/Internals.h (original)
+++ dragonegg/trunk/include/dragonegg/Internals.h Thu Oct 25 04:19:00 2012
@@ -222,7 +222,7 @@
///
class TreeToLLVM {
// State that is initialized when the function starts.
- const DataLayout &TD;
+ const DataLayout &DL;
tree_node *FnDecl;
Function *Fn;
BasicBlock *ReturnBB;
Modified: dragonegg/trunk/src/Backend.cpp
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/src/Backend.cpp?rev=166673&r1=166672&r2=166673&view=diff
==============================================================================
--- dragonegg/trunk/src/Backend.cpp (original)
+++ dragonegg/trunk/src/Backend.cpp Thu Oct 25 04:19:00 2012
@@ -275,8 +275,8 @@
// TODO: Change getTypeSizeInBits for aggregate types so it is no longer
// rounded up to the alignment.
uint64_t gcc_size = getInt64(DECL_SIZE(decl), true);
- const DataLayout *TD = TheTarget->getDataLayout();
- unsigned Align = 8 * TD->getABITypeAlignment(Ty);
+ const DataLayout *DL = TheTarget->getDataLayout();
+ unsigned Align = 8 * DL->getABITypeAlignment(Ty);
return TheTarget->getDataLayout()->getTypeAllocSizeInBits(Ty) ==
((gcc_size + Align - 1) / Align) * Align;
}
Modified: dragonegg/trunk/src/ConstantConversion.cpp
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/src/ConstantConversion.cpp?rev=166673&r1=166672&r2=166673&view=diff
==============================================================================
--- dragonegg/trunk/src/ConstantConversion.cpp (original)
+++ dragonegg/trunk/src/ConstantConversion.cpp Thu Oct 25 04:19:00 2012
@@ -819,7 +819,7 @@
/// ConvertArrayCONSTRUCTOR - Convert a CONSTRUCTOR with array or vector type.
static Constant *ConvertArrayCONSTRUCTOR(tree exp, TargetFolder &Folder) {
- const DataLayout &TD = getDataLayout();
+ const DataLayout &DL = getDataLayout();
tree init_type = main_type(exp);
Type *InitTy = ConvertType(init_type);
@@ -829,7 +829,7 @@
// Check that the element type has a known, constant size.
assert(isSizeCompatible(elt_type) && "Variable sized array element!");
- uint64_t EltSize = TD.getTypeAllocSizeInBits(EltTy);
+ uint64_t EltSize = DL.getTypeAllocSizeInBits(EltTy);
/// Elts - The initial values to use for the array elements. A null entry
/// means that the corresponding array element should be default initialized.
@@ -856,7 +856,7 @@
FOR_EACH_CONSTRUCTOR_ELT(CONSTRUCTOR_ELTS(exp), ix, elt_index, elt_value) {
// Find and decode the constructor's value.
Constant *Val = ConvertInitializerWithCast(elt_value, elt_type, Folder);
- uint64_t ValSize = TD.getTypeAllocSizeInBits(Val->getType());
+ uint64_t ValSize = DL.getTypeAllocSizeInBits(Val->getType());
assert(ValSize <= EltSize && "Element initial value too big!");
// If the initial value is smaller than the element size then pad it out.
@@ -929,16 +929,16 @@
// While there, compute the maximum element alignment.
bool isHomogeneous = true;
Type *ActualEltTy = Elts[0]->getType();
- unsigned MaxAlign = TD.getABITypeAlignment(ActualEltTy);
+ unsigned MaxAlign = DL.getABITypeAlignment(ActualEltTy);
for (unsigned i = 1; i != NumElts; ++i)
if (Elts[i]->getType() != ActualEltTy) {
- MaxAlign = std::max(TD.getABITypeAlignment(Elts[i]->getType()), MaxAlign);
+ MaxAlign = std::max(DL.getABITypeAlignment(Elts[i]->getType()), MaxAlign);
isHomogeneous = false;
}
// We guarantee that initializers are always at least as big as the LLVM type
// for the initializer. If needed, append padding to ensure this.
- uint64_t TypeSize = TD.getTypeAllocSizeInBits(InitTy);
+ uint64_t TypeSize = DL.getTypeAllocSizeInBits(InitTy);
if (NumElts * EltSize < TypeSize) {
unsigned PadBits = TypeSize - NumElts * EltSize;
assert(PadBits % BITS_PER_UNIT == 0 && "Non-unit type size?");
@@ -1022,7 +1022,7 @@
/// isSafeToReturnContentsDirectly - Return whether the current value for the
/// constant properly represents the bits in the range and so can be handed to
/// the user as is.
- bool isSafeToReturnContentsDirectly(const DataLayout &TD) const {
+ bool isSafeToReturnContentsDirectly(const DataLayout &DL) const {
// If there is no constant (allowed when the range is empty) then one needs
// to be created.
if (!C)
@@ -1038,7 +1038,7 @@
return false;
// If the constant is wider than the range then it needs to be truncated
// before being passed to the user.
- unsigned AllocBits = TD.getTypeAllocSizeInBits(Ty);
+ unsigned AllocBits = DL.getTypeAllocSizeInBits(Ty);
return AllocBits <= (unsigned)R.getWidth();
}
@@ -1071,18 +1071,18 @@
/// larger than the width of the range. Unlike the other methods for this
/// class, this one requires that the width of the range be a multiple of an
/// address unit, which usually means a multiple of 8.
- Constant *extractContents(const DataLayout &TD) {
+ Constant *extractContents(const DataLayout &DL) {
assert(R.getWidth() % BITS_PER_UNIT == 0 && "Boundaries not aligned?");
/// If the current value for the constant can be used to represent the bits
/// in the range then just return it.
- if (isSafeToReturnContentsDirectly(TD))
+ if (isSafeToReturnContentsDirectly(DL))
return C;
// If the range is empty then return a constant with zero size.
if (R.empty()) {
// Return an empty array. Remember the returned value as an optimization
// in case we are called again.
C = UndefValue::get(GetUnitType(Context, 0));
- assert(isSafeToReturnContentsDirectly(TD) && "Unit over aligned?");
+ assert(isSafeToReturnContentsDirectly(DL) && "Unit over aligned?");
return C;
}
// If the type is something like i17 then round it up to a multiple of a
@@ -1094,7 +1094,7 @@
BITS_PER_UNIT);
Ty = IntegerType::get(Context, BitWidth);
C = TheFolder->CreateZExtOrBitCast(C, Ty);
- if (isSafeToReturnContentsDirectly(TD))
+ if (isSafeToReturnContentsDirectly(DL))
return C;
}
// Turn the contents into a bunch of bytes. Remember the returned value as
@@ -1106,7 +1106,7 @@
C = InterpretAsType(C, GetUnitType(Context, Units), R.getFirst() - Starts,
Folder);
Starts = R.getFirst();
- assert(isSafeToReturnContentsDirectly(TD) && "Unit over aligned?");
+ assert(isSafeToReturnContentsDirectly(DL) && "Unit over aligned?");
return C;
}
};
@@ -1137,10 +1137,10 @@
// FIXME: This new logic, especially the handling of bitfields, is untested
// and probably wrong on big-endian machines.
IntervalList<FieldContents, int, 8> Layout;
- const DataLayout &TD = getDataLayout();
+ const DataLayout &DL = getDataLayout();
tree type = main_type(exp);
Type *Ty = ConvertType(type);
- uint64_t TypeSize = TD.getTypeAllocSizeInBits(Ty);
+ uint64_t TypeSize = DL.getTypeAllocSizeInBits(Ty);
// Ensure that fields without an initial value are default initialized by
// explicitly setting the starting value for all fields to be zero. If an
@@ -1182,7 +1182,7 @@
if (!FieldTy->isSized())
// An incomplete type - this field cannot be default initialized.
continue;
- BitWidth = TD.getTypeAllocSizeInBits(FieldTy);
+ BitWidth = DL.getTypeAllocSizeInBits(FieldTy);
if (FirstBit + BitWidth > TypeSize)
BitWidth = TypeSize - FirstBit;
}
@@ -1225,7 +1225,7 @@
// size from the initial value.
uint64_t BitWidth = isInt64(DECL_SIZE(field), true) ?
getInt64(DECL_SIZE(field), true) :
- TD.getTypeAllocSizeInBits(Init->getType());
+ DL.getTypeAllocSizeInBits(Init->getType());
uint64_t LastBit = FirstBit + BitWidth;
// Set the bits occupied by the field to the initial value.
@@ -1245,8 +1245,8 @@
for (unsigned i = 0, e = Layout.getNumIntervals(); i != e; ++i) {
FieldContents F = Layout.getInterval(i);
unsigned First = F.getRange().getFirst();
- Constant *Val = F.extractContents(TD);
- unsigned Alignment = TD.getABITypeAlignment(Val->getType()) * 8;
+ Constant *Val = F.extractContents(DL);
+ unsigned Alignment = DL.getABITypeAlignment(Val->getType()) * 8;
if (Alignment > MaxAlign || First % Alignment) {
Pack = true;
break;
@@ -1261,7 +1261,7 @@
for (unsigned i = 0, e = Layout.getNumIntervals(); i != e; ++i) {
FieldContents F = Layout.getInterval(i);
unsigned First = F.getRange().getFirst();
- Constant *Val = F.extractContents(TD);
+ Constant *Val = F.extractContents(DL);
assert(EndOfPrevious <= First && "Previous field too big!");
// If there is a gap then we may need to fill it with padding.
@@ -1273,7 +1273,7 @@
if (!Pack) {
// If the field's alignment will take care of the gap then there is no
// need for padding.
- unsigned Alignment = TD.getABITypeAlignment(Val->getType()) * 8;
+ unsigned Alignment = DL.getABITypeAlignment(Val->getType()) * 8;
if (First == (EndOfPrevious + Alignment - 1) / Alignment * Alignment)
NeedPadding = false;
}
@@ -1288,7 +1288,7 @@
// Append the field.
Elts.push_back(Val);
- EndOfPrevious = First + TD.getTypeAllocSizeInBits(Val->getType());
+ EndOfPrevious = First + DL.getTypeAllocSizeInBits(Val->getType());
}
// We guarantee that initializers are always at least as big as the LLVM type
Modified: dragonegg/trunk/src/Convert.cpp
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/src/Convert.cpp?rev=166673&r1=166672&r2=166673&view=diff
==============================================================================
--- dragonegg/trunk/src/Convert.cpp (original)
+++ dragonegg/trunk/src/Convert.cpp Thu Oct 25 04:19:00 2012
@@ -581,7 +581,7 @@
}
TreeToLLVM::TreeToLLVM(tree fndecl) :
- TD(getDataLayout()), Builder(Context, *TheFolder) {
+ DL(getDataLayout()), Builder(Context, *TheFolder) {
FnDecl = fndecl;
AllocaInsertionPoint = 0;
Fn = 0;
@@ -1355,7 +1355,7 @@
Builder.CreateBitCast(ResultLV.Ptr, Type::getInt8PtrTy(Context));
ResultLV.Ptr =
Builder.CreateGEP(ResultLV.Ptr,
- ConstantInt::get(TD.getIntPtrType(Context, 0),
+ ConstantInt::get(DL.getIntPtrType(Context, 0),
ReturnOffset),
flag_verbose_asm ? "rtvl" : "");
ResultLV.setAlignment(MinAlign(ResultLV.getAlignment(), ReturnOffset));
@@ -1839,7 +1839,7 @@
if (EltTy->isPointerTy()) {
// A pointer/vector of pointer - use inttoptr.
assert(OrigEltTy->getPrimitiveSizeInBits() ==
- TD.getPointerSizeInBits(
+ DL.getPointerSizeInBits(
cast<PointerType>(EltTy)->getAddressSpace())
&& "Pointer type not same size!");
return Builder.CreateIntToPtr(V, Ty);
@@ -1859,7 +1859,7 @@
return V;
if (OrigEltTy->isPointerTy()) {
// A pointer/vector of pointer - form a (vector of) pointer sized integers.
- Type *NewTy = TD.getIntPtrType(OrigTy);
+ Type *NewTy = DL.getIntPtrType(OrigTy);
return Builder.CreatePtrToInt(V, NewTy);
}
// Everything else.
@@ -1941,7 +1941,7 @@
AllocaInst *AI = CreateTemporary(Ty);
// MemRefs do not allow alignment 0.
if (!AI->getAlignment())
- AI->setAlignment(TD.getPrefTypeAlignment(Ty));
+ AI->setAlignment(DL.getPrefTypeAlignment(Ty));
return MemRef(AI, AI->getAlignment(), false);
}
@@ -2220,7 +2220,7 @@
unsigned Align) {
Type *SBP = Type::getInt8PtrTy(Context);
- Type *IntPtr = TD.getIntPtrType(DestPtr->getType());
+ Type *IntPtr = DL.getIntPtrType(DestPtr->getType());
Value *Ops[5] = {
Builder.CreateBitCast(DestPtr, SBP),
Builder.CreateBitCast(SrcPtr, SBP),
@@ -2238,7 +2238,7 @@
Value *TreeToLLVM::EmitMemMove(Value *DestPtr, Value *SrcPtr, Value *Size,
unsigned Align) {
Type *SBP = Type::getInt8PtrTy(Context);
- Type *IntPtr = TD.getIntPtrType(DestPtr->getType());
+ Type *IntPtr = DL.getIntPtrType(DestPtr->getType());
Value *Ops[5] = {
Builder.CreateBitCast(DestPtr, SBP),
Builder.CreateBitCast(SrcPtr, SBP),
@@ -2256,7 +2256,7 @@
Value *TreeToLLVM::EmitMemSet(Value *DestPtr, Value *SrcVal, Value *Size,
unsigned Align) {
Type *SBP = Type::getInt8PtrTy(Context);
- Type *IntPtr = TD.getIntPtrType(DestPtr->getType());
+ Type *IntPtr = DL.getIntPtrType(DestPtr->getType());
Value *Ops[5] = {
Builder.CreateBitCast(DestPtr, SBP),
Builder.CreateIntCast(SrcVal, Type::getInt8Ty(Context), /*isSigned*/true),
@@ -2905,7 +2905,7 @@
// LLVM does not support vectors of pointers, so turn any pointers into
// integers.
if (isa<PointerType>(Elt->getType()))
- Elt = Builder.CreatePtrToInt(Elt, TD.getIntPtrType(Elt->getType()));
+ Elt = Builder.CreatePtrToInt(Elt, DL.getIntPtrType(Elt->getType()));
assert(Elt->getType() == VTy->getElementType() &&
"Unexpected type for vector constructor!");
BuildVecOps.push_back(Elt);
@@ -3416,8 +3416,8 @@
// a temporary then load the value out later.
Target = CreateTempLoc(ConvertType(gimple_call_return_type(stmt)));
- if (TD.getTypeAllocSize(Call->getType()) <=
- TD.getTypeAllocSize(cast<PointerType>(Target.Ptr->getType())
+ if (DL.getTypeAllocSize(Call->getType()) <=
+ DL.getTypeAllocSize(cast<PointerType>(Target.Ptr->getType())
->getElementType())) {
Value *Dest = Builder.CreateBitCast(Target.Ptr,
Call->getType()->getPointerTo());
@@ -3454,7 +3454,7 @@
if (Call->getType()->canLosslesslyBitCastTo(RetTy))
return Builder.CreateBitCast(Call, RetTy); // Simple case.
// Probably a scalar to complex conversion.
- assert(TD.getTypeAllocSize(Call->getType()) == TD.getTypeAllocSize(RetTy) &&
+ assert(DL.getTypeAllocSize(Call->getType()) == DL.getTypeAllocSize(RetTy) &&
"Size mismatch in scalar to scalar conversion!");
Value *Tmp = CreateTemporary(Call->getType());
Builder.CreateStore(Call, Tmp);
@@ -3473,11 +3473,11 @@
Type *AggTy = cast<PointerType>(Ptr->getType())->getElementType();
// MaxStoreSize - The maximum number of bytes we can store without overflowing
// the aggregate.
- int64_t MaxStoreSize = TD.getTypeAllocSize(AggTy);
+ int64_t MaxStoreSize = DL.getTypeAllocSize(AggTy);
if (Client.Offset) {
Ptr = Builder.CreateBitCast(Ptr, Type::getInt8PtrTy(Context));
Ptr = Builder.CreateGEP(Ptr,
- ConstantInt::get(TD.getIntPtrType(Ptr->getType()),
+ ConstantInt::get(DL.getIntPtrType(Ptr->getType()),
Client.Offset),
flag_verbose_asm ? "ro" : "");
Align = MinAlign(Align, Client.Offset);
@@ -3486,7 +3486,7 @@
assert(MaxStoreSize > 0 && "Storing off end of aggregate?");
Value *Val = Call;
// Check whether storing the scalar directly would overflow the aggregate.
- if (TD.getTypeStoreSize(Call->getType()) > (uint64_t)MaxStoreSize) {
+ if (DL.getTypeStoreSize(Call->getType()) > (uint64_t)MaxStoreSize) {
// Chop down the size of the scalar to the maximum number of bytes that can
// be stored without overflowing the destination.
// TODO: Check whether this works correctly on big-endian machines.
@@ -5290,7 +5290,7 @@
// Extract to a temporary then load the value out later.
MemRef Target = CreateTempLoc(CplxTy);
- assert(TD.getTypeAllocSize(CI->getType()) <= TD.getTypeAllocSize(CplxTy)
+ assert(DL.getTypeAllocSize(CI->getType()) <= DL.getTypeAllocSize(CplxTy)
&& "Complex number returned in too large registers!");
Value *Dest = Builder.CreateBitCast(Target.Ptr,
CI->getType()->getPointerTo());
@@ -5302,7 +5302,7 @@
return CI; // Normal scalar return.
// Probably { float, float } being returned as a double.
- assert(TD.getTypeAllocSize(CI->getType()) == TD.getTypeAllocSize(CplxTy) &&
+ assert(DL.getTypeAllocSize(CI->getType()) == DL.getTypeAllocSize(CplxTy) &&
"Size mismatch in scalar to scalar conversion!");
Value *Tmp = CreateTemporary(CI->getType());
Builder.CreateStore(CI, Tmp);
@@ -5675,7 +5675,7 @@
if (!validate_gimple_arglist(stmt, INTEGER_TYPE, POINTER_TYPE, VOID_TYPE))
return false;
- Type *IntPtr = TD.getIntPtrType(Context, 0);
+ Type *IntPtr = DL.getIntPtrType(Context, 0);
Value *Offset = EmitMemory(gimple_call_arg(stmt, 0));
Value *Handler = EmitMemory(gimple_call_arg(stmt, 1));
@@ -6038,7 +6038,7 @@
Value *Ptr = POINTER_TYPE_OVERFLOW_UNDEFINED ?
Builder.CreateInBoundsGEP(ArrayAddr, IndexVal, GEPName) :
Builder.CreateGEP(ArrayAddr, IndexVal, GEPName);
- unsigned Alignment = MinAlign(ArrayAlign, TD.getABITypeAlignment(EltTy));
+ unsigned Alignment = MinAlign(ArrayAlign, DL.getABITypeAlignment(EltTy));
return LValue(Builder.CreateBitCast(Ptr,
PointerType::getUnqual(ConvertType(TREE_TYPE(exp)))),
Alignment);
@@ -6090,11 +6090,11 @@
unsigned BitSize = (unsigned)TREE_INT_CST_LOW(TREE_OPERAND(exp, 1));
Type *ValTy = ConvertType(TREE_TYPE(exp));
- unsigned ValueSizeInBits = TD.getTypeSizeInBits(ValTy);
+ unsigned ValueSizeInBits = DL.getTypeSizeInBits(ValTy);
assert(BitSize <= ValueSizeInBits &&
"ValTy isn't large enough to hold the value loaded!");
- assert(ValueSizeInBits == TD.getTypeAllocSizeInBits(ValTy) &&
+ assert(ValueSizeInBits == DL.getTypeAllocSizeInBits(ValTy) &&
"FIXME: BIT_FIELD_REF logic is broken for non-round types");
// BIT_FIELD_REF values can have BitStart values that are quite large. We
@@ -6326,7 +6326,7 @@
else
// IMAGPART alignment = MinAlign(Ptr.Alignment, sizeof field);
Alignment = MinAlign(Ptr.getAlignment(),
- TD.getTypeAllocSize(Ptr.Ptr->getType()));
+ DL.getTypeAllocSize(Ptr.Ptr->getType()));
return LValue(Builder.CreateStructGEP(Ptr.Ptr, Idx, flag_verbose_asm ?
"prtxpr" : ""), Alignment);
}
@@ -7739,7 +7739,7 @@
// Store the vectors to successive memory locations in a temporary.
tree elt_type = TREE_TYPE(TREE_TYPE(op0));
Type *EltTy = ConvertType(elt_type);
- unsigned Align = TD.getABITypeAlignment(EltTy);
+ unsigned Align = DL.getABITypeAlignment(EltTy);
// The temporary is a struct containing the pair of input vectors.
Type *TmpTy = StructType::get(ConvertType(TREE_TYPE(op0)),
ConvertType(TREE_TYPE(op1)), NULL);
@@ -8108,7 +8108,7 @@
assert(!LV.isBitfield() && "Inline asm can't have bitfield operand");
// Small structs and unions can be treated as integers.
- uint64_t TySize = TD.getTypeSizeInBits(LLVMTy);
+ uint64_t TySize = DL.getTypeSizeInBits(LLVMTy);
if (TySize == 1 || TySize == 8 || TySize == 16 ||
TySize == 32 || TySize == 64 || (TySize == 128 && !AllowsMem)) {
LLVMTy = IntegerType::get(Context, (unsigned)TySize);
@@ -8156,8 +8156,8 @@
"output constraint of incompatible type!");
return;
}
- uint64_t OTyBits = TD.getTypeSizeInBits(OTy);
- uint64_t OpTyBits = TD.getTypeSizeInBits(OpTy);
+ uint64_t OTyBits = DL.getTypeSizeInBits(OTy);
+ uint64_t OpTyBits = DL.getTypeSizeInBits(OpTy);
if (OTyBits == 0 || OpTyBits == 0) {
error("unsupported inline asm: input constraint with a matching "
"output constraint of incompatible type!");
Modified: dragonegg/trunk/src/TypeConversion.cpp
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/src/TypeConversion.cpp?rev=166673&r1=166672&r2=166673&view=diff
==============================================================================
--- dragonegg/trunk/src/TypeConversion.cpp (original)
+++ dragonegg/trunk/src/TypeConversion.cpp Thu Oct 25 04:19:00 2012
@@ -997,7 +997,7 @@
/// isSafeToReturnContentsDirectly - Return whether the current value for the
/// type properly represents the bits in the range and so can be handed to the
/// user as is.
- bool isSafeToReturnContentsDirectly(const DataLayout &TD) const {
+ bool isSafeToReturnContentsDirectly(const DataLayout &DL) const {
// If there is no type (allowed when the range is empty) then one needs to
// be created.
if (!Ty)
@@ -1012,7 +1012,7 @@
return false;
// If the type is wider than the range then it needs to be truncated before
// being passed to the user.
- uint64_t AllocBits = TD.getTypeAllocSizeInBits(Ty);
+ uint64_t AllocBits = DL.getTypeAllocSizeInBits(Ty);
return AllocBits <= R.getWidth();
}
@@ -1044,18 +1044,18 @@
/// than the width of the range. Unlike the other methods for this class this
/// one requires that the width of the range be a multiple of an address unit,
/// which usually means a multiple of 8.
- Type *extractContents(const DataLayout &TD) {
+ Type *extractContents(const DataLayout &DL) {
assert(R.getWidth() % BITS_PER_UNIT == 0 && "Boundaries not aligned?");
/// If the current value for the type can be used to represent the bits in
/// the range then just return it.
- if (isSafeToReturnContentsDirectly(TD))
+ if (isSafeToReturnContentsDirectly(DL))
return Ty;
// If the range is empty then return a type with zero size.
if (R.empty()) {
// Return an empty array. Remember the returned value as an optimization
// in case we are called again.
Ty = GetUnitType(Context, 0);
- assert(isSafeToReturnContentsDirectly(TD) && "Unit over aligned?");
+ assert(isSafeToReturnContentsDirectly(DL) && "Unit over aligned?");
return Ty;
}
// If the type is something like i17 then round it up to a multiple of a
@@ -1064,7 +1064,7 @@
unsigned BitWidth = RoundUpToAlignment(Ty->getPrimitiveSizeInBits(),
BITS_PER_UNIT);
Ty = IntegerType::get(Context, BitWidth);
- if (isSafeToReturnContentsDirectly(TD))
+ if (isSafeToReturnContentsDirectly(DL))
return Ty;
}
// Represent the range using an array of bytes. Remember the returned type
@@ -1075,7 +1075,7 @@
uint64_t Units = R.getWidth() / BITS_PER_UNIT;
Ty = GetUnitType(Context, Units);
Starts = R.getFirst();
- assert(isSafeToReturnContentsDirectly(TD) && "Unit over aligned?");
+ assert(isSafeToReturnContentsDirectly(DL) && "Unit over aligned?");
return Ty;
}
};
@@ -1106,7 +1106,7 @@
assert(TYPE_SIZE(type) && "Incomplete types should be handled elsewhere!");
IntervalList<TypedRange, uint64_t, 8> Layout;
- const DataLayout &TD = getDataLayout();
+ const DataLayout &DL = getDataLayout();
// Get the size of the type in bits. If the type has variable or ginormous
// size then it is convenient to pretend it is "infinitely" big.
@@ -1147,7 +1147,7 @@
// If the field has variable or unknown size then use the size of the
// LLVM type instead as it gives the minimum size the field may have.
assert(FieldTy->isSized() && "Type field has no size!");
- BitWidth = TD.getTypeAllocSizeInBits(FieldTy);
+ BitWidth = DL.getTypeAllocSizeInBits(FieldTy);
if (FirstBit + BitWidth > TypeSize)
BitWidth = TypeSize - FirstBit;
}
@@ -1181,8 +1181,8 @@
for (unsigned i = 0, e = Layout.getNumIntervals(); i != e; ++i) {
TypedRange F = Layout.getInterval(i);
uint64_t First = F.getRange().getFirst();
- Type *Ty = F.extractContents(TD);
- unsigned Alignment = TD.getABITypeAlignment(Ty) * 8;
+ Type *Ty = F.extractContents(DL);
+ unsigned Alignment = DL.getABITypeAlignment(Ty) * 8;
if (Alignment > MaxAlign || First % Alignment) {
Pack = true;
break;
@@ -1197,7 +1197,7 @@
for (unsigned i = 0, e = Layout.getNumIntervals(); i != e; ++i) {
TypedRange F = Layout.getInterval(i);
uint64_t First = F.getRange().getFirst();
- Type *Ty = F.extractContents(TD);
+ Type *Ty = F.extractContents(DL);
assert(EndOfPrevious <= First && "Previous field too big!");
// If there is a gap then we may need to fill it with padding.
@@ -1209,7 +1209,7 @@
if (!Pack) {
// If the field's alignment will take care of the gap then there is no
// need for padding.
- unsigned Alignment = TD.getABITypeAlignment(Ty) * 8;
+ unsigned Alignment = DL.getABITypeAlignment(Ty) * 8;
if (First == (EndOfPrevious + Alignment - 1) / Alignment * Alignment)
NeedPadding = false;
}
@@ -1224,7 +1224,7 @@
// Append the field.
Elts.push_back(Ty);
- EndOfPrevious = First + TD.getTypeAllocSizeInBits(Ty);
+ EndOfPrevious = First + DL.getTypeAllocSizeInBits(Ty);
}
// If the GCC type has a sensible size then we guarantee that LLVM type has
More information about the llvm-commits
mailing list