[llvm-commits] [dragonegg] r135371 - in /dragonegg/trunk: include/dragonegg/ABI.h include/dragonegg/Internals.h include/x86/dragonegg/Target.h src/Backend.cpp src/Constants.cpp src/Convert.cpp src/DefaultABI.cpp src/Types.cpp src/x86/Target.cpp
Chris Lattner
sabre at nondot.org
Sun Jul 17 21:25:32 PDT 2011
Author: lattner
Date: Sun Jul 17 23:25:32 2011
New Revision: 135371
URL: http://llvm.org/viewvc/llvm-project?rev=135371&view=rev
Log:
untested patch to de-constify llvm::Type, patch by David Blaikie!
Modified:
dragonegg/trunk/include/dragonegg/ABI.h
dragonegg/trunk/include/dragonegg/Internals.h
dragonegg/trunk/include/x86/dragonegg/Target.h
dragonegg/trunk/src/Backend.cpp
dragonegg/trunk/src/Constants.cpp
dragonegg/trunk/src/Convert.cpp
dragonegg/trunk/src/DefaultABI.cpp
dragonegg/trunk/src/Types.cpp
dragonegg/trunk/src/x86/Target.cpp
Modified: dragonegg/trunk/include/dragonegg/ABI.h
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/include/dragonegg/ABI.h?rev=135371&r1=135370&r2=135371&view=diff
==============================================================================
--- dragonegg/trunk/include/dragonegg/ABI.h (original)
+++ dragonegg/trunk/include/dragonegg/ABI.h Sun Jul 17 23:25:32 2011
@@ -47,32 +47,32 @@
/// HandleScalarResult - This callback is invoked if the function returns a
/// simple scalar result value, which is of type RetTy.
- virtual void HandleScalarResult(const Type * /*RetTy*/) {}
+ virtual void HandleScalarResult(Type * /*RetTy*/) {}
/// HandleAggregateResultAsScalar - This callback is invoked if the function
/// returns an aggregate value by bit converting it to the specified scalar
/// type and returning that. The bit conversion should start at byte Offset
/// within the struct, and ScalarTy is not necessarily big enough to cover
/// the entire struct.
- virtual void HandleAggregateResultAsScalar(const Type * /*ScalarTy*/,
+ virtual void HandleAggregateResultAsScalar(Type * /*ScalarTy*/,
unsigned /*Offset*/ = 0) {}
/// HandleAggregateResultAsAggregate - This callback is invoked if the function
/// returns an aggregate value using multiple return values.
- virtual void HandleAggregateResultAsAggregate(const Type * /*AggrTy*/) {}
+ virtual void HandleAggregateResultAsAggregate(Type * /*AggrTy*/) {}
/// HandleAggregateShadowResult - This callback is invoked if the function
/// returns an aggregate value by using a "shadow" first parameter, which is
/// a pointer to the aggregate, of type PtrArgTy. If RetPtr is set to true,
/// the pointer argument itself is returned from the function.
- virtual void HandleAggregateShadowResult(const PointerType * /*PtrArgTy*/,
+ virtual void HandleAggregateShadowResult(PointerType * /*PtrArgTy*/,
bool /*RetPtr*/) {}
/// HandleScalarShadowResult - This callback is invoked if the function
/// returns a scalar value by using a "shadow" first parameter, which is a
/// pointer to the scalar, of type PtrArgTy. If RetPtr is set to true,
/// the pointer argument itself is returned from the function.
- virtual void HandleScalarShadowResult(const PointerType * /*PtrArgTy*/,
+ virtual void HandleScalarShadowResult(PointerType * /*PtrArgTy*/,
bool /*RetPtr*/) {}
@@ -80,32 +80,32 @@
/// LLVM argument to pass. It is only used for first class types.
/// If RealSize is non Zero then it specifies number of bytes to access
/// from LLVMTy.
- virtual void HandleScalarArgument(const llvm::Type * /*LLVMTy*/,
+ virtual void HandleScalarArgument(llvm::Type * /*LLVMTy*/,
tree_node * /*type*/,
unsigned /*RealSize*/ = 0) {}
/// HandleByInvisibleReferenceArgument - This callback is invoked if a pointer
/// (of type PtrTy) to the argument is passed rather than the argument itself.
- virtual void HandleByInvisibleReferenceArgument(const llvm::Type * /*PtrTy*/,
+ virtual void HandleByInvisibleReferenceArgument(llvm::Type * /*PtrTy*/,
tree_node * /*type*/) {}
/// HandleByValArgument - This callback is invoked if the aggregate function
/// argument is passed by value.
- virtual void HandleByValArgument(const llvm::Type * /*LLVMTy*/,
+ virtual void HandleByValArgument(llvm::Type * /*LLVMTy*/,
tree_node * /*type*/) {}
/// HandleFCAArgument - This callback is invoked if the aggregate function
/// argument is passed by value as a first class aggregate.
- virtual void HandleFCAArgument(const llvm::Type * /*LLVMTy*/,
+ virtual void HandleFCAArgument(llvm::Type * /*LLVMTy*/,
tree_node * /*type*/) {}
/// EnterField - Called when we're about the enter the field of a struct
/// or union. FieldNo is the number of the element we are entering in the
/// LLVM Struct, StructTy is the LLVM type of the struct we are entering.
virtual void EnterField(unsigned /*FieldNo*/,
- const llvm::Type * /*StructTy*/) {}
+ llvm::Type * /*StructTy*/) {}
virtual void ExitField() {}
- virtual void HandlePad(const llvm::Type * /*LLVMTy*/) {}
+ virtual void HandlePad(llvm::Type * /*LLVMTy*/) {}
};
// LLVM_SHOULD_NOT_RETURN_COMPLEX_IN_MEMORY - A hook to allow
@@ -146,8 +146,8 @@
// returned as a scalar, otherwise return NULL. This is the default
// target independent implementation.
static inline
-const Type* getLLVMScalarTypeForStructReturn(tree_node *type, unsigned *Offset) {
- const Type *Ty = ConvertType(type);
+Type* getLLVMScalarTypeForStructReturn(tree_node *type, unsigned *Offset) {
+ Type *Ty = ConvertType(type);
unsigned Size = getTargetData().getTypeAllocSize(Ty);
*Offset = 0;
if (Size == 0)
@@ -172,7 +172,7 @@
// returns as multiple values, otherwise return NULL. This is the default
// target independent implementation.
static inline
-const Type* getLLVMAggregateTypeForStructReturn(tree_node * /*type*/) {
+Type* getLLVMAggregateTypeForStructReturn(tree_node * /*type*/) {
return NULL;
}
@@ -327,25 +327,25 @@
/// argument and invokes methods on the client that indicate how its pieces
/// should be handled. This handles things like decimating structures into
/// their fields.
- void HandleArgument(tree_node *type, std::vector<const Type*> &ScalarElts,
+ void HandleArgument(tree_node *type, std::vector<Type*> &ScalarElts,
Attributes *Attributes = NULL);
/// HandleUnion - Handle a UNION_TYPE or QUAL_UNION_TYPE tree.
///
- void HandleUnion(tree_node *type, std::vector<const Type*> &ScalarElts);
+ void HandleUnion(tree_node *type, std::vector<Type*> &ScalarElts);
/// PassInIntegerRegisters - Given an aggregate value that should be passed in
/// integer registers, convert it to a structure containing ints and pass all
/// of the struct elements in. If Size is set we pass only that many bytes.
void PassInIntegerRegisters(tree_node *type,
- std::vector<const Type*> &ScalarElts,
+ std::vector<Type*> &ScalarElts,
unsigned origSize, bool DontCheckAlignment);
/// PassInMixedRegisters - Given an aggregate value that should be passed in
/// mixed integer, floating point, and vector registers, convert it to a
/// structure containing the specified struct elements in.
- void PassInMixedRegisters(const Type *Ty, std::vector<const Type*> &OrigElts,
- std::vector<const Type*> &ScalarElts);
+ void PassInMixedRegisters(Type *Ty, std::vector<Type*> &OrigElts,
+ std::vector<Type*> &ScalarElts);
};
#endif /* DRAGONEGG_ABI_H */
Modified: dragonegg/trunk/include/dragonegg/Internals.h
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/include/dragonegg/Internals.h?rev=135371&r1=135370&r2=135371&view=diff
==============================================================================
--- dragonegg/trunk/include/dragonegg/Internals.h (original)
+++ dragonegg/trunk/include/dragonegg/Internals.h Sun Jul 17 23:25:32 2011
@@ -202,12 +202,12 @@
/// ConvertType - Returns the LLVM type to use for memory that holds a value
/// of the given GCC type (getRegType should be used for values in registers).
- const Type *ConvertType(tree_node *type);
+ Type *ConvertType(tree_node *type);
/// ConvertFunctionType - Convert the specified FUNCTION_TYPE or METHOD_TYPE
/// tree to an LLVM type. This does the same thing that ConvertType does, but
/// it also returns the function's LLVM calling convention and attributes.
- const FunctionType *ConvertFunctionType(tree_node *type,
+ FunctionType *ConvertFunctionType(tree_node *type,
tree_node *decl,
tree_node *static_chain,
CallingConv::ID &CallingConv,
@@ -216,14 +216,14 @@
/// ConvertArgListToFnType - Given a DECL_ARGUMENTS list on an GCC tree,
/// return the LLVM type corresponding to the function. This is useful for
/// turning "T foo(...)" functions into "T foo(void)" functions.
- const FunctionType *ConvertArgListToFnType(tree_node *type,
+ FunctionType *ConvertArgListToFnType(tree_node *type,
tree_node *arglist,
tree_node *static_chain,
CallingConv::ID &CallingConv,
AttrListPtr &PAL);
private:
- const Type *ConvertRECORD(tree_node *type);
+ Type *ConvertRECORD(tree_node *type);
bool DecodeStructFields(tree_node *Field, StructTypeConversionInfo &Info);
void DecodeStructBitField(tree_node *Field, StructTypeConversionInfo &Info);
void SelectUnionMember(tree_node *type, StructTypeConversionInfo &Info);
@@ -234,22 +234,22 @@
/// getRegType - Returns the LLVM type to use for registers that hold a value
/// of the scalar GCC type 'type'. All of the EmitReg* routines use this to
/// determine the LLVM type to return.
-const Type *getRegType(tree_node *type);
+Type *getRegType(tree_node *type);
/// ConvertType - Returns the LLVM type to use for memory that holds a value
/// of the given GCC type (getRegType should be used for values in registers).
-inline const Type *ConvertType(tree_node *type) {
+inline Type *ConvertType(tree_node *type) {
return TheTypeConverter->ConvertType(type);
}
/// getPointerToType - Returns the LLVM register type to use for a pointer to
/// the given GCC type.
-const Type *getPointerToType(tree_node *type);
+Type *getPointerToType(tree_node *type);
/// getDefaultValue - Return the default value to use for a constant or global
/// that has no value specified. For example in C like languages such variables
/// are initialized to zero, while in Ada they hold an undefined value.
-inline Constant *getDefaultValue(const Type *Ty) {
+inline Constant *getDefaultValue(Type *Ty) {
return flag_default_initialize_globals ?
Constant::getNullValue(Ty) : UndefValue::get(Ty);
}
@@ -258,19 +258,19 @@
/// otherwise returns an array of such integers with 'NumUnits' elements. For
/// example, on a machine which has 16 bit bytes returns an i16 or an array of
/// i16.
-extern const Type *GetUnitType(LLVMContext &C, unsigned NumUnits = 1);
+extern Type *GetUnitType(LLVMContext &C, unsigned NumUnits = 1);
/// GetUnitPointerType - Returns an LLVM pointer type which points to memory one
/// address unit wide. For example, on a machine which has 16 bit bytes returns
/// an i16*.
-extern const Type *GetUnitPointerType(LLVMContext &C, unsigned AddrSpace = 0);
+extern Type *GetUnitPointerType(LLVMContext &C, unsigned AddrSpace = 0);
/// GetFieldIndex - Return the index of the field in the given LLVM type that
/// corresponds to the GCC field declaration 'decl'. This means that the LLVM
/// and GCC fields start in the same byte (if 'decl' is a bitfield, this means
/// that its first bit is within the byte the LLVM field starts at). Returns
/// INT_MAX if there is no such LLVM field.
-int GetFieldIndex(tree_node *decl, const Type *Ty);
+int GetFieldIndex(tree_node *decl, Type *Ty);
/// isPassedByInvisibleReference - Return true if the specified type should be
/// passed by 'invisible reference'. In other words, instead of passing the
@@ -475,22 +475,22 @@
/// CastToAnyType - Cast the specified value to the specified type regardless
/// of the types involved. This is an inferred cast.
- Value *CastToAnyType (Value *V, bool VSigned, const Type *Ty, bool TySigned);
+ Value *CastToAnyType (Value *V, bool VSigned, Type *Ty, bool TySigned);
/// CastToUIntType - Cast the specified value to the specified type assuming
/// that V's type and Ty are integral types. This arbitrates between BitCast,
/// Trunc and ZExt.
- Value *CastToUIntType(Value *V, const Type *Ty);
+ Value *CastToUIntType(Value *V, Type *Ty);
/// CastToSIntType - Cast the specified value to the specified type assuming
/// that V's type and Ty are integral types. This arbitrates between BitCast,
/// Trunc and SExt.
- Value *CastToSIntType(Value *V, const Type *Ty);
+ Value *CastToSIntType(Value *V, Type *Ty);
/// CastToFPType - Cast the specified value to the specified type assuming
/// that V's type and Ty are floating point types. This arbitrates between
/// BitCast, FPTrunc and FPExt.
- Value *CastToFPType(Value *V, const Type *Ty);
+ Value *CastToFPType(Value *V, Type *Ty);
/// CreateAnyAdd - Add two LLVM scalar values with the given GCC type. Does
/// not support complex numbers. The type is used to set overflow flags.
@@ -511,10 +511,10 @@
/// CreateTemporary - Create a new alloca instruction of the specified type,
/// inserting it into the entry block and returning it. The resulting
/// instruction's type is a pointer to the specified type.
- AllocaInst *CreateTemporary(const Type *Ty, unsigned align=0);
+ AllocaInst *CreateTemporary(Type *Ty, unsigned align=0);
/// CreateTempLoc - Like CreateTemporary, but returns a MemRef.
- MemRef CreateTempLoc(const Type *Ty);
+ MemRef CreateTempLoc(Type *Ty);
/// EmitAggregateCopy - Copy the elements from SrcLoc to DestLoc, using the
/// GCC type specified by GCCType to know which elements to copy.
@@ -649,7 +649,7 @@
/// that the original and target types are LLVM register types that correspond
/// to GCC scalar types t1 and t2 satisfying useless_type_conversion_p(t1, t2)
/// or useless_type_conversion_p(t2, t1).
- Value *TriviallyTypeConvert(Value *V, const Type *Ty) {
+ Value *TriviallyTypeConvert(Value *V, Type *Ty) {
// If useless_type_conversion_p(t1, t2) holds then the corresponding LLVM
// register types are either equal or are both pointer types.
if (V->getType() == Ty)
@@ -891,7 +891,7 @@
tree_node *fndecl,
const MemRef *DestLoc,
Value *&Result,
- const Type *ResultType,
+ Type *ResultType,
std::vector<Value*> &Ops);
public:
Modified: dragonegg/trunk/include/x86/dragonegg/Target.h
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/include/x86/dragonegg/Target.h?rev=135371&r1=135370&r2=135371&view=diff
==============================================================================
--- dragonegg/trunk/include/x86/dragonegg/Target.h (original)
+++ dragonegg/trunk/include/x86/dragonegg/Target.h Sun Jul 17 23:25:32 2011
@@ -125,7 +125,7 @@
#define LLVM_SHOULD_PASS_AGGREGATE_IN_INTEGER_REGS(X, Y, Z) \
llvm_x86_should_pass_aggregate_in_integer_regs((X), (Y), (Z))
-extern const Type *llvm_x86_scalar_type_for_struct_return(tree_node *type,
+extern Type *llvm_x86_scalar_type_for_struct_return(tree_node *type,
unsigned *Offset);
/* LLVM_SCALAR_TYPE_FOR_STRUCT_RETURN - Return LLVM Type if X can be
@@ -133,7 +133,7 @@
#define LLVM_SCALAR_TYPE_FOR_STRUCT_RETURN(X, Y) \
llvm_x86_scalar_type_for_struct_return((X), (Y))
-extern const Type *llvm_x86_aggr_type_for_struct_return(tree_node *type);
+extern Type *llvm_x86_aggr_type_for_struct_return(tree_node *type);
/* LLVM_AGGR_TYPE_FOR_STRUCT_RETURN - Return LLVM Type if X can be
returned as an aggregate, otherwise return NULL. */
@@ -188,7 +188,7 @@
llvm_x86_should_not_return_complex_in_memory((X))
extern bool
-llvm_x86_should_pass_aggregate_as_fca(tree_node *type, const Type *);
+llvm_x86_should_pass_aggregate_as_fca(tree_node *type, Type *);
/* LLVM_SHOULD_PASS_AGGREGATE_AS_FCA - Return true if an aggregate of the
specified type should be passed as a first-class aggregate. */
@@ -197,18 +197,18 @@
llvm_x86_should_pass_aggregate_as_fca(X, TY)
#endif
-extern bool llvm_x86_should_pass_aggregate_in_memory(tree_node *, const Type *);
+extern bool llvm_x86_should_pass_aggregate_in_memory(tree_node *, Type *);
#define LLVM_SHOULD_PASS_AGGREGATE_USING_BYVAL_ATTR(X, TY) \
llvm_x86_should_pass_aggregate_in_memory(X, TY)
extern bool
-llvm_x86_64_should_pass_aggregate_in_mixed_regs(tree_node *, const Type *Ty,
- std::vector<const Type*>&);
+llvm_x86_64_should_pass_aggregate_in_mixed_regs(tree_node *, Type *Ty,
+ std::vector<Type*>&);
extern bool
-llvm_x86_32_should_pass_aggregate_in_mixed_regs(tree_node *, const Type *Ty,
- std::vector<const Type*>&);
+llvm_x86_32_should_pass_aggregate_in_mixed_regs(tree_node *, Type *Ty,
+ std::vector<Type*>&);
#define LLVM_SHOULD_PASS_AGGREGATE_IN_MIXED_REGS(T, TY, CC, E) \
(TARGET_64BIT ? \
@@ -216,8 +216,8 @@
llvm_x86_32_should_pass_aggregate_in_mixed_regs((T), (TY), (E)))
extern
-bool llvm_x86_64_aggregate_partially_passed_in_regs(std::vector<const Type*>&,
- std::vector<const Type*>&,
+bool llvm_x86_64_aggregate_partially_passed_in_regs(std::vector<Type*>&,
+ std::vector<Type*>&,
bool);
#define LLVM_AGGREGATE_PARTIALLY_PASSED_IN_REGS(E, SE, ISR, CC) \
Modified: dragonegg/trunk/src/Backend.cpp
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/src/Backend.cpp?rev=135371&r1=135370&r2=135371&view=diff
==============================================================================
--- dragonegg/trunk/src/Backend.cpp (original)
+++ dragonegg/trunk/src/Backend.cpp Sun Jul 17 23:25:32 2011
@@ -243,7 +243,7 @@
return true;
assert(isInt64(DECL_SIZE(decl), true) && "Global decl with variable size!");
- const Type *Ty = GV->getType()->getElementType();
+ Type *Ty = GV->getType()->getElementType();
// If the LLVM type has no size then a useful comparison cannot be made.
if (!Ty->isSized())
return true;
@@ -688,9 +688,9 @@
LLVMContext &Context = getGlobalContext();
- const Type *FPTy =
+ Type *FPTy =
FunctionType::get(Type::getVoidTy(Context),
- std::vector<const Type*>(), false);
+ std::vector<Type*>(), false);
FPTy = FPTy->getPointerTo();
for (unsigned i = 0, e = Tors.size(); i != e; ++i) {
@@ -743,7 +743,7 @@
Constant *lineNo = ConstantInt::get(Type::getInt32Ty(Context),
DECL_SOURCE_LINE(decl));
Constant *file = ConvertMetadataStringToGV(DECL_SOURCE_FILE(decl));
- const Type *SBP = Type::getInt8PtrTy(Context);
+ Type *SBP = Type::getInt8PtrTy(Context);
file = TheFolder->CreateBitCast(file, SBP);
// There may be multiple annotate attributes. Pass return of lookup_attr
@@ -805,7 +805,7 @@
if (DECL_INITIAL(decl) == 0 || DECL_INITIAL(decl) == error_mark_node) {
// Reconvert the type in case the forward def of the global and the real def
// differ in type (e.g. declared as 'int A[]', and defined as 'int A[100]').
- const Type *Ty = ConvertType(TREE_TYPE(decl));
+ Type *Ty = ConvertType(TREE_TYPE(decl));
Init = getDefaultValue(Ty);
} else {
// Temporarily set an initializer for the global, so we don't infinitely
@@ -1070,7 +1070,7 @@
if (FnEntry == 0) {
CallingConv::ID CC;
AttrListPtr PAL;
- const FunctionType *Ty =
+ FunctionType *Ty =
TheTypeConverter->ConvertFunctionType(TREE_TYPE(decl), decl, NULL,
CC, PAL);
FnEntry = Function::Create(Ty, Function::ExternalLinkage, Name, TheModule);
@@ -1112,7 +1112,7 @@
} else {
assert((TREE_CODE(decl) == VAR_DECL ||
TREE_CODE(decl) == CONST_DECL) && "Not a function or var decl?");
- const Type *Ty = ConvertType(TREE_TYPE(decl));
+ Type *Ty = ConvertType(TREE_TYPE(decl));
GlobalVariable *GV ;
// If we have "extern void foo", make the global have type {} instead of
@@ -1259,7 +1259,7 @@
//FIXME // adaptor which would be simpler and more efficient. In the meantime, just
//FIXME // adapt the adaptor.
//FIXME raw_os_ostream RO(FS);
-//FIXME WriteTypeSymbolic(RO, (const Type*)LLVM, TheModule);
+//FIXME WriteTypeSymbolic(RO, (Type*)LLVM, TheModule);
//FIXME}
/// extractRegisterName - Get a register name given its decl. In 4.2 unlike 4.0
@@ -1720,7 +1720,7 @@
if (!AttributeUsedGlobals.empty()) {
std::vector<Constant *> AUGs;
- const Type *SBP = Type::getInt8PtrTy(Context);
+ Type *SBP = Type::getInt8PtrTy(Context);
for (SmallSetVector<Constant *,32>::iterator
AI = AttributeUsedGlobals.begin(),
AE = AttributeUsedGlobals.end(); AI != AE; ++AI) {
@@ -1739,7 +1739,7 @@
if (!AttributeCompilerUsedGlobals.empty()) {
std::vector<Constant *> ACUGs;
- const Type *SBP = Type::getInt8PtrTy(Context);
+ Type *SBP = Type::getInt8PtrTy(Context);
for (SmallSetVector<Constant *,32>::iterator
AI = AttributeCompilerUsedGlobals.begin(),
AE = AttributeCompilerUsedGlobals.end(); AI != AE; ++AI) {
Modified: dragonegg/trunk/src/Constants.cpp
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/src/Constants.cpp?rev=135371&r1=135370&r2=135371&view=diff
==============================================================================
--- dragonegg/trunk/src/Constants.cpp (original)
+++ dragonegg/trunk/src/Constants.cpp Sun Jul 17 23:25:32 2011
@@ -173,7 +173,7 @@
if (R == r)
return *this;
assert(!r.empty() && "Empty ranges did not evaluate as equal?");
- const Type *ExtTy = IntegerType::get(Context, r.getWidth());
+ Type *ExtTy = IntegerType::get(Context, r.getWidth());
// If the slice contains no bits then every bit of the extension is zero.
if (empty())
return BitSlice(r, Constant::getNullValue(ExtTy));
@@ -206,7 +206,7 @@
// Quick exit if the desired range matches that of the slice.
if (R == r)
return Contents;
- const Type *RetTy = IntegerType::get(Context, r.getWidth());
+ Type *RetTy = IntegerType::get(Context, r.getWidth());
// If the slice contains no bits then every returned bit is undefined.
if (empty())
return UndefValue::get(RetTy);
@@ -267,7 +267,7 @@
C = Folder.CreateLShr(C, ShiftAmt);
}
// Truncate to the new type.
- const Type *RedTy = IntegerType::get(Context, r.getWidth());
+ Type *RedTy = IntegerType::get(Context, r.getWidth());
C = Folder.CreateTruncOrBitCast(C, RedTy);
return BitSlice(r, C);
}
@@ -281,7 +281,7 @@
return BitSlice();
// Sanitize the range to make life easier in what follows.
- const Type *Ty = C->getType();
+ Type *Ty = C->getType();
int StoreSize = getTargetData().getTypeStoreSizeInBits(Ty);
R = R.Meet(SignedRange(0, StoreSize));
@@ -295,7 +295,7 @@
DieAbjectly("Unsupported type!");
case Type::PointerTyID: {
// Cast to an integer with the same number of bits and return that.
- const IntegerType *IntTy = getTargetData().getIntPtrType(Context);
+ IntegerType *IntTy = getTargetData().getIntPtrType(Context);
return BitSlice(0, StoreSize, Folder.CreatePtrToInt(C, IntTy));
}
case Type::DoubleTyID:
@@ -307,7 +307,7 @@
case Type::X86_MMXTyID: {
// Bitcast to an integer with the same number of bits and return that.
unsigned BitWidth = Ty->getPrimitiveSizeInBits();
- const IntegerType *IntTy = IntegerType::get(Context, BitWidth);
+ IntegerType *IntTy = IntegerType::get(Context, BitWidth);
C = Folder.CreateBitCast(C, IntTy);
// Be careful about where the bits are placed in case this is a funky type
// like i1. If the width is a multiple of the address unit then there is
@@ -319,8 +319,8 @@
}
case Type::ArrayTyID: {
- const ArrayType *ATy = cast<ArrayType>(Ty);
- const Type *EltTy = ATy->getElementType();
+ ArrayType *ATy = cast<ArrayType>(Ty);
+ Type *EltTy = ATy->getElementType();
const unsigned Stride = getTargetData().getTypeAllocSizeInBits(EltTy);
assert(Stride > 0 && "Store size smaller than alloc size?");
// Elements with indices in [FirstElt, LastElt) overlap the range.
@@ -343,7 +343,7 @@
}
case Type::StructTyID: {
- const StructType *STy = cast<StructType>(Ty);
+ StructType *STy = cast<StructType>(Ty);
const StructLayout *SL = getTargetData().getStructLayout(STy);
// Fields with indices in [FirstIdx, LastIdx) overlap the range.
unsigned FirstIdx = SL->getElementContainingOffset((R.getFirst()+7)/8);
@@ -355,7 +355,7 @@
// Extract the field.
Constant *Field = Folder.CreateExtractValue(C, i);
// View it as a bunch of bits.
- const Type *FieldTy = Field->getType();
+ Type *FieldTy = Field->getType();
unsigned FieldStoreSize = getTargetData().getTypeStoreSizeInBits(FieldTy);
BitSlice FieldBits = ViewAsBits(Field, SignedRange(0, FieldStoreSize),
Folder);
@@ -366,8 +366,8 @@
}
case Type::VectorTyID: {
- const VectorType *VTy = cast<VectorType>(Ty);
- const Type *EltTy = VTy->getElementType();
+ VectorType *VTy = cast<VectorType>(Ty);
+ Type *EltTy = VTy->getElementType();
const unsigned Stride = getTargetData().getTypeAllocSizeInBits(EltTy);
assert(Stride > 0 && "Store size smaller than alloc size?");
// Elements with indices in [FirstElt, LastElt) overlap the range.
@@ -397,7 +397,7 @@
/// same constant as you would get by storing the bits of 'C' to memory (with
/// the first bit stored being 'StartingBit') and then loading out a (constant)
/// value of type 'Ty' from the stored to memory location.
-static Constant *InterpretAsType(Constant *C, const Type* Ty, int StartingBit,
+static Constant *InterpretAsType(Constant *C, Type* Ty, int StartingBit,
TargetFolder &Folder) {
if (C->getType() == Ty)
return C;
@@ -426,7 +426,7 @@
case Type::PointerTyID: {
// Interpret as an integer with the same number of bits then cast back to
// the original type.
- const IntegerType *IntTy = getTargetData().getIntPtrType(Context);
+ IntegerType *IntTy = getTargetData().getIntPtrType(Context);
C = InterpretAsType(C, IntTy, StartingBit, Folder);
return Folder.CreateIntToPtr(C, Ty);
}
@@ -439,15 +439,15 @@
// Interpret as an integer with the same number of bits then cast back to
// the original type.
unsigned BitWidth = Ty->getPrimitiveSizeInBits();
- const IntegerType *IntTy = IntegerType::get(Context, BitWidth);
+ IntegerType *IntTy = IntegerType::get(Context, BitWidth);
return Folder.CreateBitCast(InterpretAsType(C, IntTy, StartingBit, Folder),
Ty);
}
case Type::ArrayTyID: {
// Interpret each array element in turn.
- const ArrayType *ATy = cast<ArrayType>(Ty);
- const Type *EltTy = ATy->getElementType();
+ ArrayType *ATy = cast<ArrayType>(Ty);
+ Type *EltTy = ATy->getElementType();
const unsigned Stride = getTargetData().getTypeAllocSizeInBits(EltTy);
const unsigned NumElts = ATy->getNumElements();
std::vector<Constant*> Vals(NumElts);
@@ -458,7 +458,7 @@
case Type::StructTyID: {
// Interpret each struct field in turn.
- const StructType *STy = cast<StructType>(Ty);
+ StructType *STy = cast<StructType>(Ty);
const StructLayout *SL = getTargetData().getStructLayout(STy);
unsigned NumElts = STy->getNumElements();
std::vector<Constant*> Vals(NumElts);
@@ -471,8 +471,8 @@
case Type::VectorTyID: {
// Interpret each vector element in turn.
- const VectorType *VTy = cast<VectorType>(Ty);
- const Type *EltTy = VTy->getElementType();
+ VectorType *VTy = cast<VectorType>(Ty);
+ Type *EltTy = VTy->getElementType();
const unsigned Stride = getTargetData().getTypeAllocSizeInBits(EltTy);
const unsigned NumElts = VTy->getNumElements();
SmallVector<Constant*, 16> Vals(NumElts);
@@ -509,7 +509,7 @@
// This roundabout approach means we get the right result on both little and
// big endian machines.
uint64_t Size = getInt64(TYPE_SIZE(type), true);
- const Type *MemTy = IntegerType::get(Context, Size);
+ Type *MemTy = IntegerType::get(Context, Size);
C = InterpretAsType(C, MemTy, StartingBit, Folder);
return Folder.CreateTruncOrBitCast(C, getRegType(type));
}
@@ -539,7 +539,7 @@
unsigned NumElts = TYPE_VECTOR_SUBPARTS(type);
unsigned Stride = GET_MODE_BITSIZE(TYPE_MODE(elt_type));
SmallVector<Constant*, 16> Vals(NumElts);
- const IntegerType *IntPtrTy = getTargetData().getIntPtrType(Context);
+ IntegerType *IntPtrTy = getTargetData().getIntPtrType(Context);
for (unsigned i = 0; i != NumElts; ++i) {
Vals[i] = ExtractRegisterFromConstantImpl(C, elt_type,
StartingBit+i*Stride, Folder);
@@ -599,7 +599,7 @@
// to an i32. This approach means we get the right result on both little
// and big endian machines.
uint64_t Size = getInt64(TYPE_SIZE(type), true);
- const Type *MemTy = IntegerType::get(Context, Size);
+ Type *MemTy = IntegerType::get(Context, Size);
// We can extend in any way, but get nicer IR by respecting signedness.
bool isSigned = !TYPE_UNSIGNED(type);
Result = isSigned ? Folder.CreateSExtOrBitCast(C, MemTy) :
@@ -677,8 +677,8 @@
return C;
// No cast is needed if the LLVM types are the same. This occurs often since
// many different GCC types usually map to the same LLVM type.
- const Type *SrcTy = getRegType(TREE_TYPE(exp));
- const Type *DestTy = getRegType(type);
+ Type *SrcTy = getRegType(TREE_TYPE(exp));
+ Type *DestTy = getRegType(type);
if (SrcTy == DestTy)
return C;
@@ -718,8 +718,8 @@
static Constant *ConvertSTRING_CST(tree exp, TargetFolder &) {
// TODO: Enhance GCC's native_encode_expr to handle arbitrary strings and not
// just those with a byte component type; then ConvertCST can handle strings.
- const ArrayType *StrTy = cast<ArrayType>(ConvertType(TREE_TYPE(exp)));
- const Type *ElTy = StrTy->getElementType();
+ ArrayType *StrTy = cast<ArrayType>(ConvertType(TREE_TYPE(exp)));
+ Type *ElTy = StrTy->getElementType();
unsigned Len = (unsigned)TREE_STRING_LENGTH(exp);
@@ -797,10 +797,10 @@
const TargetData &TD = getTargetData();
tree init_type = TREE_TYPE(exp);
- const Type *InitTy = ConvertType(init_type);
+ Type *InitTy = ConvertType(init_type);
tree elt_type = TREE_TYPE(init_type);
- const Type *EltTy = ConvertType(elt_type);
+ Type *EltTy = ConvertType(elt_type);
// Check that the element type has a known, constant size.
assert(isSequentialCompatible(init_type) && "Variable sized array element!");
@@ -903,7 +903,7 @@
// an array of unions, and the various unions had different parts initialized.
// While there, compute the maximum element alignment.
bool UseStruct = false;
- const Type *ActualEltTy = Elts[0]->getType();
+ Type *ActualEltTy = Elts[0]->getType();
unsigned MaxAlign = TD.getABITypeAlignment(ActualEltTy);
for (unsigned i = 1; i != NumElts; ++i)
if (Elts[i]->getType() != ActualEltTy) {
@@ -960,7 +960,7 @@
Constant *getAsBits() const {
if (R.empty())
return 0;
- const Type *IntTy = IntegerType::get(Context, R.getWidth());
+ Type *IntTy = IntegerType::get(Context, R.getWidth());
return InterpretAsType(C, IntTy, R.getFirst() - Starts, Folder);
}
@@ -978,7 +978,7 @@
return false;
// If the constant is wider than the range then it needs to be truncated
// before being passed to the user.
- const Type *Ty = C->getType();
+ Type *Ty = C->getType();
unsigned AllocBits = TD.getTypeAllocSizeInBits(Ty);
return AllocBits <= (unsigned)R.getWidth();
}
@@ -1084,7 +1084,7 @@
assert(FirstBit <= TypeSize && "Field off end of type!");
// Determine the width of the field.
uint64_t BitWidth;
- const Type *FieldTy = ConvertType(TREE_TYPE(field));
+ Type *FieldTy = ConvertType(TREE_TYPE(field));
if (isInt64(DECL_SIZE(field), true)) {
// The field has a size and it is a constant, so use it. Note that
// this size may be smaller than the type size. For example, if the
@@ -1325,7 +1325,7 @@
// The initializer should always be at least as big as the constructor's type,
// and except in the cases of incomplete types or types with variable size the
// sizes should be the same.
- const Type *Ty = ConvertType(TREE_TYPE(exp));
+ Type *Ty = ConvertType(TREE_TYPE(exp));
if (Ty->isSized()) {
uint64_t InitSize = getTargetData().getTypeAllocSizeInBits(Init->getType());
uint64_t TypeSize = getTargetData().getTypeAllocSizeInBits(Ty);
@@ -1414,7 +1414,7 @@
// Avoid any assumptions about how the array type is represented in LLVM by
// doing the GEP on a pointer to the first array element.
Constant *ArrayAddr = AddressOfImpl(array, Folder);
- const Type *EltTy = ConvertType(TREE_TYPE(TREE_TYPE(array)));
+ Type *EltTy = ConvertType(TREE_TYPE(TREE_TYPE(array)));
ArrayAddr = Folder.CreateBitCast(ArrayAddr, EltTy->getPointerTo());
return POINTER_TYPE_OVERFLOW_UNDEFINED ?
@@ -1453,7 +1453,7 @@
assert(BitStart == 0 &&
"It's a bitfield reference or we didn't get to the field!");
- const Type *UnitPtrTy = GetUnitPointerType(Context);
+ Type *UnitPtrTy = GetUnitPointerType(Context);
Constant *StructAddr = AddressOfImpl(TREE_OPERAND(exp, 0), Folder);
Constant *FieldPtr = Folder.CreateBitCast(StructAddr, UnitPtrTy);
FieldPtr = Folder.CreateInBoundsGetElementPtr(FieldPtr, &Offset, 1);
@@ -1533,7 +1533,7 @@
// Ensure that the address has the expected type. It is simpler to do this
// once here rather than in every AddressOf helper.
- const Type *Ty;
+ Type *Ty;
if (VOID_TYPE_P(TREE_TYPE(exp)))
Ty = GetUnitPointerType(Context); // void* -> i8*.
else
Modified: dragonegg/trunk/src/Convert.cpp
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/src/Convert.cpp?rev=135371&r1=135370&r2=135371&view=diff
==============================================================================
--- dragonegg/trunk/src/Convert.cpp (original)
+++ dragonegg/trunk/src/Convert.cpp Sun Jul 17 23:25:32 2011
@@ -92,7 +92,7 @@
/// is used before being defined (this can occur because basic blocks are not
/// output in dominator order). Replaced with the correct value when the SSA
/// name's definition is encountered.
-static Value *GetSSAPlaceholder(const Type *Ty) {
+static Value *GetSSAPlaceholder(Type *Ty) {
// Cannot use a constant, since there is no way to distinguish a fake value
// from a real value. So use an instruction with no parent. This needs to
// be an instruction that can return a struct type, since the SSA name might
@@ -254,7 +254,7 @@
/// llvm_store_scalar_argument - Store scalar argument ARGVAL of type
/// LLVMTY at location LOC.
static void llvm_store_scalar_argument(Value *Loc, Value *ArgVal,
- const llvm::Type *LLVMTy,
+ llvm::Type *LLVMTy,
unsigned RealSize,
LLVMBuilder &Builder) {
if (RealSize) {
@@ -262,7 +262,7 @@
assert(!BYTES_BIG_ENDIAN && "Unsupported case - please report");
// Do byte wise store because actual argument type does not match LLVMTy.
assert(ArgVal->getType()->isIntegerTy() && "Expected an integer value!");
- const Type *StoreType = IntegerType::get(Context, RealSize * 8);
+ Type *StoreType = IntegerType::get(Context, RealSize * 8);
Loc = Builder.CreateBitCast(Loc, StoreType->getPointerTo());
if (ArgVal->getType()->getPrimitiveSizeInBits() >=
StoreType->getPrimitiveSizeInBits())
@@ -310,7 +310,7 @@
/// getCallingConv - This provides the desired CallingConv for the function.
CallingConv::ID& getCallingConv(void) { return CallingConv; }
- void HandlePad(const llvm::Type * /*LLVMTy*/) {
+ void HandlePad(llvm::Type * /*LLVMTy*/) {
++AI;
}
@@ -329,7 +329,7 @@
LocStack.clear();
}
- void HandleAggregateShadowResult(const PointerType * /*PtrArgTy*/,
+ void HandleAggregateShadowResult(PointerType * /*PtrArgTy*/,
bool /*RetPtr*/) {
// If the function returns a structure by value, we transform the function
// to take a pointer to the result as the first argument of the function
@@ -364,7 +364,7 @@
++AI;
}
- void HandleScalarShadowResult(const PointerType * /*PtrArgTy*/,
+ void HandleScalarShadowResult(PointerType * /*PtrArgTy*/,
bool /*RetPtr*/) {
assert(AI != Builder.GetInsertBlock()->getParent()->arg_end() &&
"No explicit return value?");
@@ -374,7 +374,7 @@
++AI;
}
- void HandleScalarArgument(const llvm::Type *LLVMTy, tree /*type*/,
+ void HandleScalarArgument(llvm::Type *LLVMTy, tree /*type*/,
unsigned RealSize = 0) {
Value *ArgVal = AI;
if (ArgVal->getType() != LLVMTy) {
@@ -403,7 +403,7 @@
++AI;
}
- void HandleByValArgument(const llvm::Type * /*LLVMTy*/, tree type) {
+ void HandleByValArgument(llvm::Type * /*LLVMTy*/, tree type) {
if (LLVM_BYVAL_ALIGNMENT_TOO_SMALL(type)) {
// Incoming object on stack is insufficiently aligned for the type.
// Make a correctly aligned copy.
@@ -413,8 +413,8 @@
// bytes, but only 10 are copied. If the object is really a union
// we might need the other bytes. We must also be careful to use
// the smaller alignment.
- const Type *SBP = Type::getInt8PtrTy(Context);
- const Type *IntPtr = getTargetData().getIntPtrType(Context);
+ Type *SBP = Type::getInt8PtrTy(Context);
+ Type *IntPtr = getTargetData().getIntPtrType(Context);
Value *Ops[5] = {
Builder.CreateCast(Instruction::BitCast, Loc, SBP),
Builder.CreateCast(Instruction::BitCast, AI, SBP),
@@ -423,7 +423,7 @@
Builder.getInt32(LLVM_BYVAL_ALIGNMENT(type)),
Builder.getFalse()
};
- const Type *ArgTypes[3] = {SBP, SBP, IntPtr };
+ Type *ArgTypes[3] = {SBP, SBP, IntPtr };
Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
Intrinsic::memcpy,
ArgTypes, 3), Ops);
@@ -433,7 +433,7 @@
++AI;
}
- void HandleFCAArgument(const llvm::Type * /*LLVMTy*/, tree /*type*/) {
+ void HandleFCAArgument(llvm::Type * /*LLVMTy*/, tree /*type*/) {
// Store the FCA argument into alloca.
assert(!LocStack.empty());
Value *Loc = LocStack.back();
@@ -442,12 +442,12 @@
++AI;
}
- void HandleAggregateResultAsScalar(const Type * /*ScalarTy*/,
+ void HandleAggregateResultAsScalar(Type * /*ScalarTy*/,
unsigned Offset = 0) {
this->Offset = Offset;
}
- void EnterField(unsigned FieldNo, const llvm::Type *StructTy) {
+ void EnterField(unsigned FieldNo, llvm::Type *StructTy) {
NameStack.push_back(NameStack.back()+"."+utostr(FieldNo));
Value *Loc = LocStack.back();
@@ -466,13 +466,13 @@
// isPassedByVal - Return true if an aggregate of the specified type will be
// passed in memory byval.
-static bool isPassedByVal(tree type, const Type *Ty,
- std::vector<const Type*> &ScalarArgs,
+static bool isPassedByVal(tree type, Type *Ty,
+ std::vector<Type*> &ScalarArgs,
bool isShadowRet, CallingConv::ID &/*CC*/) {
if (LLVM_SHOULD_PASS_AGGREGATE_USING_BYVAL_ATTR(type, Ty))
return true;
- std::vector<const Type*> Args;
+ std::vector<Type*> Args;
if (LLVM_SHOULD_PASS_AGGREGATE_IN_MIXED_REGS(type, Ty, CC, Args) &&
LLVM_AGGREGATE_PARTIALLY_PASSED_IN_REGS(Args, ScalarArgs, isShadowRet,
CC))
@@ -490,7 +490,7 @@
// Determine the FunctionType and calling convention for this function.
tree static_chain = cfun->static_chain_decl;
- const FunctionType *FTy;
+ FunctionType *FTy;
CallingConv::ID CallingConv;
AttrListPtr PAL;
@@ -678,12 +678,12 @@
tree Args = static_chain ? static_chain : DECL_ARGUMENTS(FnDecl);
// Scalar arguments processed so far.
- std::vector<const Type*> ScalarArgs;
+ std::vector<Type*> ScalarArgs;
while (Args) {
const char *Name = "unnamed_arg";
if (DECL_NAME(Args)) Name = IDENTIFIER_POINTER(DECL_NAME(Args));
- const Type *ArgTy = ConvertType(TREE_TYPE(Args));
+ Type *ArgTy = ConvertType(TREE_TYPE(Args));
bool isInvRef = isPassedByInvisibleReference(TREE_TYPE(Args));
if (isInvRef ||
(ArgTy->isVectorTy() &&
@@ -905,7 +905,7 @@
RetVals.push_back(RetVal);
} else {
Value *RetVal = DECL_LOCAL(TreeRetVal);
- if (const StructType *STy = dyn_cast<StructType>(Fn->getReturnType())) {
+ if (StructType *STy = dyn_cast<StructType>(Fn->getReturnType())) {
Value *R1 = Builder.CreateBitCast(RetVal, STy->getPointerTo());
llvm::Value *Idxs[2];
@@ -1069,7 +1069,7 @@
continue;
// Create the LLVM phi node.
- const Type *Ty = getRegType(TREE_TYPE(gimple_phi_result(gcc_phi)));
+ Type *Ty = getRegType(TREE_TYPE(gimple_phi_result(gcc_phi)));
PHINode *PHI = Builder.CreatePHI(Ty, gimple_phi_num_args(gcc_phi));
// The phi defines the associated ssa name.
@@ -1303,8 +1303,8 @@
/// CastToAnyType - Cast the specified value to the specified type making no
/// assumptions about the types of the arguments. This creates an inferred cast.
Value *TreeToLLVM::CastToAnyType(Value *V, bool VisSigned,
- const Type* DestTy, bool DestIsSigned) {
- const Type *SrcTy = V->getType();
+ Type* DestTy, bool DestIsSigned) {
+ Type *SrcTy = V->getType();
// Eliminate useless casts of a type to itself.
if (SrcTy == DestTy)
@@ -1317,12 +1317,12 @@
unsigned SrcBits = SrcTy->getScalarSizeInBits();
unsigned DestBits = DestTy->getScalarSizeInBits();
if (SrcBits && !isa<IntegerType>(SrcTy)) {
- const Type *IntTy = IntegerType::get(Context, SrcBits);
+ Type *IntTy = IntegerType::get(Context, SrcBits);
V = Builder.CreateBitCast(V, IntTy);
return CastToAnyType(V, VisSigned, DestTy, DestIsSigned);
}
if (DestBits && !isa<IntegerType>(DestTy)) {
- const Type *IntTy = IntegerType::get(Context, DestBits);
+ Type *IntTy = IntegerType::get(Context, DestBits);
V = CastToAnyType(V, VisSigned, IntTy, DestIsSigned);
return Builder.CreateBitCast(V, DestTy);
}
@@ -1340,7 +1340,7 @@
/// CastToFPType - Cast the specified value to the specified type assuming
/// that the value and type are floating point.
-Value *TreeToLLVM::CastToFPType(Value *V, const Type* Ty) {
+Value *TreeToLLVM::CastToFPType(Value *V, Type* Ty) {
unsigned SrcBits = V->getType()->getPrimitiveSizeInBits();
unsigned DstBits = Ty->getPrimitiveSizeInBits();
if (SrcBits == DstBits)
@@ -1385,7 +1385,7 @@
/// CreateTemporary - Create a new alloca instruction of the specified type,
/// inserting it into the entry block and returning it. The resulting
/// instruction's type is a pointer to the specified type.
-AllocaInst *TreeToLLVM::CreateTemporary(const Type *Ty, unsigned align) {
+AllocaInst *TreeToLLVM::CreateTemporary(Type *Ty, unsigned align) {
if (AllocaInsertionPoint == 0) {
// Create a dummy instruction in the entry block as a marker to insert new
// alloc instructions before. It doesn't matter what this instruction is,
@@ -1402,7 +1402,7 @@
}
/// CreateTempLoc - Like CreateTemporary, but returns a MemRef.
-MemRef TreeToLLVM::CreateTempLoc(const Type *Ty) {
+MemRef TreeToLLVM::CreateTempLoc(Type *Ty) {
AllocaInst *AI = CreateTemporary(Ty);
// MemRefs do not allow alignment 0.
if (!AI->getAlignment())
@@ -1448,7 +1448,7 @@
// The cost of a record type is the sum of the costs of its fields.
if (TREE_CODE(type) == RECORD_TYPE) {
- const Type *Ty = ConvertType(type);
+ Type *Ty = ConvertType(type);
unsigned TotalCost = 0;
for (tree Field = TYPE_FIELDS(type); Field; Field = TREE_CHAIN(Field)) {
assert(TREE_CODE(Field) == FIELD_DECL && "Lang data not freed?");
@@ -1497,7 +1497,7 @@
if (TREE_CODE(type) == RECORD_TYPE) {
// Ensure the source and destination are pointers to the record type.
- const Type *Ty = ConvertType(type);
+ Type *Ty = ConvertType(type);
DestLoc.Ptr = Builder.CreateBitCast(DestLoc.Ptr, Ty->getPointerTo());
SrcLoc.Ptr = Builder.CreateBitCast(SrcLoc.Ptr, Ty->getPointerTo());
@@ -1528,7 +1528,7 @@
assert(TREE_CODE(type) == ARRAY_TYPE && "Expected an array!");
// Turn the source and destination into pointers to the component type.
- const Type *CompType = ConvertType(TREE_TYPE(type));
+ Type *CompType = ConvertType(TREE_TYPE(type));
DestLoc.Ptr = Builder.CreateBitCast(DestLoc.Ptr, CompType->getPointerTo());
SrcLoc.Ptr = Builder.CreateBitCast(SrcLoc.Ptr, CompType->getPointerTo());
@@ -1592,7 +1592,7 @@
if (TREE_CODE(type) == RECORD_TYPE) {
// Ensure the pointer is to the record type.
- const Type *Ty = ConvertType(type);
+ Type *Ty = ConvertType(type);
DestLoc.Ptr = Builder.CreateBitCast(DestLoc.Ptr, Ty->getPointerTo());
// Zero each field in turn.
@@ -1617,7 +1617,7 @@
assert(TREE_CODE(type) == ARRAY_TYPE && "Expected an array!");
// Turn the pointer into a pointer to the component type.
- const Type *CompType = ConvertType(TREE_TYPE(type));
+ Type *CompType = ConvertType(TREE_TYPE(type));
DestLoc.Ptr = Builder.CreateBitCast(DestLoc.Ptr, CompType->getPointerTo());
// Zero each component in turn.
@@ -1659,8 +1659,8 @@
Value *TreeToLLVM::EmitMemCpy(Value *DestPtr, Value *SrcPtr, Value *Size,
unsigned Align) {
- const Type *SBP = Type::getInt8PtrTy(Context);
- const Type *IntPtr = TD.getIntPtrType(Context);
+ Type *SBP = Type::getInt8PtrTy(Context);
+ Type *IntPtr = TD.getIntPtrType(Context);
Value *Ops[5] = {
Builder.CreateBitCast(DestPtr, SBP),
Builder.CreateBitCast(SrcPtr, SBP),
@@ -1668,7 +1668,7 @@
Builder.getInt32(Align),
Builder.getFalse()
};
- const Type *ArgTypes[3] = { SBP, SBP, IntPtr };
+ Type *ArgTypes[3] = { SBP, SBP, IntPtr };
Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::memcpy,
ArgTypes, 3), Ops);
@@ -1677,8 +1677,8 @@
Value *TreeToLLVM::EmitMemMove(Value *DestPtr, Value *SrcPtr, Value *Size,
unsigned Align) {
- const Type *SBP = Type::getInt8PtrTy(Context);
- const Type *IntPtr = TD.getIntPtrType(Context);
+ Type *SBP = Type::getInt8PtrTy(Context);
+ Type *IntPtr = TD.getIntPtrType(Context);
Value *Ops[5] = {
Builder.CreateBitCast(DestPtr, SBP),
Builder.CreateBitCast(SrcPtr, SBP),
@@ -1686,7 +1686,7 @@
Builder.getInt32(Align),
Builder.getFalse()
};
- const Type *ArgTypes[3] = { SBP, SBP, IntPtr };
+ Type *ArgTypes[3] = { SBP, SBP, IntPtr };
Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::memmove,
ArgTypes, 3), Ops);
@@ -1695,8 +1695,8 @@
Value *TreeToLLVM::EmitMemSet(Value *DestPtr, Value *SrcVal, Value *Size,
unsigned Align) {
- const Type *SBP = Type::getInt8PtrTy(Context);
- const Type *IntPtr = TD.getIntPtrType(Context);
+ Type *SBP = Type::getInt8PtrTy(Context);
+ Type *IntPtr = TD.getIntPtrType(Context);
Value *Ops[5] = {
Builder.CreateBitCast(DestPtr, SBP),
Builder.CreateIntCast(SrcVal, Type::getInt8Ty(Context), /*isSigned*/true),
@@ -1704,7 +1704,7 @@
Builder.getInt32(Align),
Builder.getFalse()
};
- const Type *ArgTypes[2] = { SBP, IntPtr };
+ Type *ArgTypes[2] = { SBP, IntPtr };
Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::memset,
ArgTypes, 2), Ops);
@@ -1722,7 +1722,7 @@
// The idea is that it's a pointer to type "Value"
// which is opaque* but the routine expects i8** and i8*.
- const PointerType *Ty = Type::getInt8PtrTy(Context);
+ PointerType *Ty = Type::getInt8PtrTy(Context);
V = Builder.CreateBitCast(V, Ty->getPointerTo());
Value *Ops[2] = {
@@ -1749,7 +1749,7 @@
Constant *lineNo =
ConstantInt::get(Type::getInt32Ty(Context), DECL_SOURCE_LINE(decl));
Constant *file = ConvertMetadataStringToGV(DECL_SOURCE_FILE(decl));
- const Type *SBP = Type::getInt8PtrTy(Context);
+ Type *SBP = Type::getInt8PtrTy(Context);
file = TheFolder->CreateBitCast(file, SBP);
// There may be multiple annotate attributes. Pass return of lookup_attr
@@ -1769,7 +1769,7 @@
// Assert its a string, and then get that string.
assert(TREE_CODE(val) == STRING_CST &&
"Annotate attribute arg should always be a string");
- const Type *SBP = Type::getInt8PtrTy(Context);
+ Type *SBP = Type::getInt8PtrTy(Context);
Constant *strGV = AddressOf(val);
Value *Ops[4] = {
Builder.CreateBitCast(V, SBP),
@@ -1801,7 +1801,7 @@
return;
tree type = TREE_TYPE(decl);
- const Type *Ty; // Type to allocate
+ Type *Ty; // Type to allocate
Value *Size = 0; // Amount to alloca (null for 1)
if (DECL_SIZE(decl) == 0) { // Variable with incomplete type.
@@ -1852,7 +1852,7 @@
{
// We should null out local variables so that a stack crawl
// before initialization doesn't get garbage results to follow.
- const Type *T = cast<PointerType>(AI->getType())->getElementType();
+ Type *T = cast<PointerType>(AI->getType())->getElementType();
EmitTypeGcroot(AI);
Builder.CreateStore(Constant::getNullValue(T), AI);
}
@@ -2280,7 +2280,7 @@
// Generate an explicit call to _Unwind_Resume_or_Rethrow.
// FIXME: On ARM this should be a call to __cxa_end_cleanup with no arguments.
- std::vector<const Type*> Params(1, Type::getInt8PtrTy(Context));
+ std::vector<Type*> Params(1, Type::getInt8PtrTy(Context));
FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context), Params,
false);
Constant *RewindFn =
@@ -2329,7 +2329,7 @@
LValue LV = EmitLV(exp);
LV.Volatile = TREE_THIS_VOLATILE(exp);
// TODO: Arrange for Volatile to already be set in the LValue.
- const Type *Ty = ConvertType(TREE_TYPE(exp));
+ Type *Ty = ConvertType(TREE_TYPE(exp));
unsigned Alignment = LV.getAlignment();
if (!LV.isBitfield()) {
@@ -2340,7 +2340,7 @@
if (!LV.BitSize)
return Constant::getNullValue(Ty);
- const Type *ValTy = cast<PointerType>(LV.Ptr->getType())->getElementType();
+ Type *ValTy = cast<PointerType>(LV.Ptr->getType())->getElementType();
unsigned ValSizeInBits = ValTy->getPrimitiveSizeInBits();
// The number of loads needed to read the entire bitfield.
@@ -2448,8 +2448,8 @@
/// DestLoc.
Value *TreeToLLVM::EmitCONSTRUCTOR(tree exp, const MemRef *DestLoc) {
tree type = TREE_TYPE(exp);
- const Type *Ty = ConvertType(type);
- if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
+ Type *Ty = ConvertType(type);
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
assert(DestLoc == 0 && "Dest location for vector value?");
std::vector<Value *> BuildVecOps;
BuildVecOps.reserve(VTy->getNumElements());
@@ -2460,7 +2460,7 @@
FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), idx, value) {
Value *Elt = EmitRegister(value);
- if (const VectorType *EltTy = dyn_cast<VectorType>(Elt->getType())) {
+ if (VectorType *EltTy = dyn_cast<VectorType>(Elt->getType())) {
// GCC allows vectors to be built up from vectors. Extract all of the
// vector elements and add them to the list of build vector operands.
for (unsigned i = 0, e = EltTy->getNumElements(); i != e; ++i) {
@@ -2526,7 +2526,7 @@
/// llvm_load_scalar_argument - Load value located at LOC.
static Value *llvm_load_scalar_argument(Value *L,
- const llvm::Type *LLVMTy,
+ llvm::Type *LLVMTy,
unsigned RealSize,
LLVMBuilder &Builder) {
if (!RealSize)
@@ -2535,7 +2535,7 @@
// Not clear what this is supposed to do on big endian machines...
assert(!BYTES_BIG_ENDIAN && "Unsupported case - please report");
assert(LLVMTy->isIntegerTy() && "Expected an integer value!");
- const Type *LoadType = IntegerType::get(Context, RealSize * 8);
+ Type *LoadType = IntegerType::get(Context, RealSize * 8);
L = Builder.CreateBitCast(L, LoadType->getPointerTo());
Value *Val = Builder.CreateLoad(L);
if (LoadType->getPrimitiveSizeInBits() >= LLVMTy->getPrimitiveSizeInBits())
@@ -2557,7 +2557,7 @@
struct FunctionCallArgumentConversion : public DefaultABIClient {
SmallVector<Value*, 16> &CallOperands;
SmallVector<Value*, 2> LocStack;
- const FunctionType *FTy;
+ FunctionType *FTy;
const MemRef *DestLoc;
bool useReturnSlot;
LLVMBuilder &Builder;
@@ -2569,7 +2569,7 @@
unsigned Offset;
FunctionCallArgumentConversion(SmallVector<Value*, 16> &ops,
- const FunctionType *FnTy,
+ FunctionType *FnTy,
const MemRef *destloc,
bool ReturnSlotOpt,
LLVMBuilder &b,
@@ -2608,7 +2608,7 @@
}
// Get the value of the current location (of type Ty).
- Value *getValue(const Type *Ty) {
+ Value *getValue(Type *Ty) {
assert(!LocStack.empty());
Value *Loc = LocStack.back();
if (Loc) {
@@ -2651,7 +2651,7 @@
/// HandleScalarResult - This callback is invoked if the function returns a
/// simple scalar result value.
- void HandleScalarResult(const Type * /*RetTy*/) {
+ void HandleScalarResult(Type * /*RetTy*/) {
// There is nothing to do here if we return a scalar or void.
assert(DestLoc == 0 &&
"Call returns a scalar but caller expects aggregate!");
@@ -2660,14 +2660,14 @@
/// HandleAggregateResultAsScalar - This callback is invoked if the function
/// returns an aggregate value by bit converting it to the specified scalar
/// type and returning that.
- void HandleAggregateResultAsScalar(const Type * /*ScalarTy*/,
+ void HandleAggregateResultAsScalar(Type * /*ScalarTy*/,
unsigned Offset = 0) {
this->Offset = Offset;
}
/// HandleAggregateResultAsAggregate - This callback is invoked if the
/// function returns an aggregate value using multiple return values.
- void HandleAggregateResultAsAggregate(const Type * /*AggrTy*/) {
+ void HandleAggregateResultAsAggregate(Type * /*AggrTy*/) {
// There is nothing to do here.
isAggrRet = true;
}
@@ -2676,7 +2676,7 @@
/// returns an aggregate value by using a "shadow" first parameter. If
/// RetPtr is set to true, the pointer argument itself is returned from the
/// function.
- void HandleAggregateShadowResult(const PointerType *PtrArgTy, bool /*RetPtr*/) {
+ void HandleAggregateShadowResult(PointerType *PtrArgTy, bool /*RetPtr*/) {
// We need to pass memory to write the return value into.
// FIXME: alignment and volatility are being ignored!
assert(!DestLoc || PtrArgTy == DestLoc->Ptr->getType());
@@ -2701,7 +2701,7 @@
isShadowRet = true;
}
- void HandlePad(const llvm::Type *LLVMTy) {
+ void HandlePad(llvm::Type *LLVMTy) {
CallOperands.push_back(UndefValue::get(LLVMTy));
}
@@ -2709,7 +2709,7 @@
/// returns a scalar value by using a "shadow" first parameter, which is a
/// pointer to the scalar, of type PtrArgTy. If RetPtr is set to true,
/// the pointer argument itself is returned from the function.
- void HandleScalarShadowResult(const PointerType *PtrArgTy,
+ void HandleScalarShadowResult(PointerType *PtrArgTy,
bool /*RetPtr*/) {
assert(DestLoc == 0 &&
"Call returns a scalar but caller expects aggregate!");
@@ -2724,7 +2724,7 @@
/// HandleScalarArgument - This is the primary callback that specifies an
/// LLVM argument to pass. It is only used for first class types.
- void HandleScalarArgument(const llvm::Type *LLVMTy, tree type,
+ void HandleScalarArgument(llvm::Type *LLVMTy, tree type,
unsigned RealSize = 0) {
Value *Loc = NULL;
if (RealSize) {
@@ -2735,7 +2735,7 @@
// Perform any implicit type conversions.
if (CallOperands.size() < FTy->getNumParams()) {
- const Type *CalledTy= FTy->getParamType(CallOperands.size());
+ Type *CalledTy= FTy->getParamType(CallOperands.size());
if (Loc->getType() != CalledTy) {
assert(type && "Inconsistent parameter types?");
bool isSigned = !TYPE_UNSIGNED(type);
@@ -2749,7 +2749,7 @@
/// HandleByInvisibleReferenceArgument - This callback is invoked if a
/// pointer (of type PtrTy) to the argument is passed rather than the
/// argument itself.
- void HandleByInvisibleReferenceArgument(const llvm::Type *PtrTy,
+ void HandleByInvisibleReferenceArgument(llvm::Type *PtrTy,
tree /*type*/) {
Value *Loc = getAddress();
Loc = Builder.CreateBitCast(Loc, PtrTy);
@@ -2759,7 +2759,7 @@
/// HandleByValArgument - This callback is invoked if the aggregate function
/// argument is passed by value. It is lowered to a parameter passed by
/// reference with an additional parameter attribute "ByVal".
- void HandleByValArgument(const llvm::Type *LLVMTy, tree /*type*/) {
+ void HandleByValArgument(llvm::Type *LLVMTy, tree /*type*/) {
Value *Loc = getAddress();
assert(LLVMTy->getPointerTo() == Loc->getType());
(void)LLVMTy; // Otherwise unused if asserts off - avoid compiler warning.
@@ -2768,7 +2768,7 @@
/// HandleFCAArgument - This callback is invoked if the aggregate function
/// argument is passed as a first class aggregate.
- void HandleFCAArgument(const llvm::Type *LLVMTy, tree /*type*/) {
+ void HandleFCAArgument(llvm::Type *LLVMTy, tree /*type*/) {
Value *Loc = getAddress();
assert(LLVMTy->getPointerTo() == Loc->getType());
(void)LLVMTy; // Otherwise unused if asserts off - avoid compiler warning.
@@ -2778,7 +2778,7 @@
/// EnterField - Called when we're about the enter the field of a struct
/// or union. FieldNo is the number of the element we are entering in the
/// LLVM Struct, StructTy is the LLVM type of the struct we are entering.
- void EnterField(unsigned FieldNo, const llvm::Type *StructTy) {
+ void EnterField(unsigned FieldNo, llvm::Type *StructTy) {
Value *Loc = getAddress();
Loc = Builder.CreateBitCast(Loc, StructTy->getPointerTo());
pushAddress(Builder.CreateStructGEP(Loc, FieldNo, "elt"));
@@ -2843,8 +2843,8 @@
#endif
SmallVector<Value*, 16> CallOperands;
- const PointerType *PFTy = cast<PointerType>(Callee->getType());
- const FunctionType *FTy = cast<FunctionType>(PFTy->getElementType());
+ PointerType *PFTy = cast<PointerType>(Callee->getType());
+ FunctionType *FTy = cast<FunctionType>(PFTy->getElementType());
FunctionCallArgumentConversion Client(CallOperands, FTy, DestLoc,
gimple_call_return_slot_opt_p(stmt),
Builder, CallingConvention);
@@ -2860,11 +2860,11 @@
CallOperands.push_back(EmitMemory(gimple_call_chain(stmt)));
// Loop over the arguments, expanding them and adding them to the op list.
- std::vector<const Type*> ScalarArgs;
+ std::vector<Type*> ScalarArgs;
for (unsigned i = 0, e = gimple_call_num_args(stmt); i != e; ++i) {
tree arg = gimple_call_arg(stmt, i);
tree type = TREE_TYPE(arg);
- const Type *ArgTy = ConvertType(type);
+ Type *ArgTy = ConvertType(type);
// Push the argument.
if (ArgTy->isSingleValueType()) {
@@ -2921,11 +2921,11 @@
Constant *RealCallee = CE->getOperand(0);
assert(RealCallee->getType()->isPointerTy() &&
"Bitcast to ptr not from ptr?");
- const PointerType *RealPT = cast<PointerType>(RealCallee->getType());
- if (const FunctionType *RealFT =
+ PointerType *RealPT = cast<PointerType>(RealCallee->getType());
+ if (FunctionType *RealFT =
dyn_cast<FunctionType>(RealPT->getElementType())) {
- const PointerType *ActualPT = cast<PointerType>(Callee->getType());
- const FunctionType *ActualFT =
+ PointerType *ActualPT = cast<PointerType>(Callee->getType());
+ FunctionType *ActualFT =
cast<FunctionType>(ActualPT->getElementType());
if (RealFT->getReturnType() == ActualFT->getReturnType() &&
RealFT->getNumParams() == 0)
@@ -3010,7 +3010,7 @@
}
if (!DestLoc) {
- const Type *RetTy = ConvertType(gimple_call_return_type(stmt));
+ Type *RetTy = ConvertType(gimple_call_return_type(stmt));
if (Call->getType() == RetTy)
return Call; // Normal scalar return.
@@ -3035,7 +3035,7 @@
// beginning of the aggregate (x86-64).
Value *Ptr = DestLoc->Ptr;
// AggTy - The type of the aggregate being stored to.
- const Type *AggTy = cast<PointerType>(Ptr->getType())->getElementType();
+ Type *AggTy = cast<PointerType>(Ptr->getType())->getElementType();
// MaxStoreSize - The maximum number of bytes we can store without overflowing
// the aggregate.
int64_t MaxStoreSize = TD.getTypeAllocSize(AggTy);
@@ -3057,7 +3057,7 @@
Builder.CreateStore(Call, Tmp);
// Load the desired number of bytes back out again as an integer of the
// appropriate size.
- const Type *SmallTy = IntegerType::get(Context, MaxStoreSize*8);
+ Type *SmallTy = IntegerType::get(Context, MaxStoreSize*8);
Tmp = Builder.CreateBitCast(Tmp, PointerType::getUnqual(SmallTy));
Val = Builder.CreateLoad(Tmp);
// Store the integer rather than the call result to the aggregate.
@@ -3097,11 +3097,11 @@
#endif
va_end(ops);
- const Type *RetTy = TREE_CODE(ret_type) == VOID_TYPE ?
+ Type *RetTy = TREE_CODE(ret_type) == VOID_TYPE ?
Type::getVoidTy(Context) : getRegType(ret_type);
// The LLVM argument types.
- std::vector<const Type*> ArgTys;
+ std::vector<Type*> ArgTys;
ArgTys.reserve(Args.size());
for (unsigned i = 0, e = Args.size(); i != e; ++i)
ArgTys.push_back(Args[i]->getType());
@@ -3115,7 +3115,7 @@
#endif
// Get the function declaration for the callee.
- const FunctionType *FTy = FunctionType::get(RetTy, ArgTys, /*isVarArg*/false);
+ FunctionType *FTy = FunctionType::get(RetTy, ArgTys, /*isVarArg*/false);
Constant *Func = TheModule->getOrInsertFunction(CalleeName, FTy);
// If the function already existed with the wrong prototype then don't try to
@@ -3153,15 +3153,15 @@
/// Reads from register variables are handled by emitting an inline asm node
/// that copies the value out of the specified register.
Value *TreeToLLVM::EmitReadOfRegisterVariable(tree decl) {
- const Type *MemTy = ConvertType(TREE_TYPE(decl));
- const Type *RegTy = getRegType(TREE_TYPE(decl));
+ Type *MemTy = ConvertType(TREE_TYPE(decl));
+ Type *RegTy = getRegType(TREE_TYPE(decl));
// If there was an error, return something bogus.
if (ValidateRegisterVariable(decl))
return UndefValue::get(RegTy);
// Turn this into a 'tmp = call Ty asm "", "={reg}"()'.
- FunctionType *FTy = FunctionType::get(MemTy, std::vector<const Type*>(),
+ FunctionType *FTy = FunctionType::get(MemTy, std::vector<Type*>(),
false);
const char *Name = extractRegisterName(decl);
@@ -3186,7 +3186,7 @@
RHS = Reg2Mem(RHS, TREE_TYPE(decl), Builder);
// Turn this into a 'call void asm sideeffect "", "{reg}"(Ty %RHS)'.
- std::vector<const Type*> ArgTys;
+ std::vector<Type*> ArgTys;
ArgTys.push_back(RHS->getType());
FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context), ArgTys,
false);
@@ -3678,7 +3678,7 @@
Value *&Result) {
#ifdef LLVM_TARGET_INTRINSIC_LOWER
// Get the result type and operand line in an easy to consume format.
- const Type *ResultType = ConvertType(TREE_TYPE(TREE_TYPE(fndecl)));
+ Type *ResultType = ConvertType(TREE_TYPE(TREE_TYPE(fndecl)));
std::vector<Value*> Operands;
for (unsigned i = 0, e = gimple_call_num_args(stmt); i != e; ++i) {
tree OpVal = gimple_call_arg(stmt, i);
@@ -3718,12 +3718,12 @@
Value *
TreeToLLVM::BuildBinaryAtomicBuiltin(gimple stmt, Intrinsic::ID id) {
tree return_type = gimple_call_return_type(stmt);
- const Type *ResultTy = ConvertType(return_type);
+ Type *ResultTy = ConvertType(return_type);
Value* C[2] = {
EmitMemory(gimple_call_arg(stmt, 0)),
EmitMemory(gimple_call_arg(stmt, 1))
};
- const Type* Ty[2];
+ Type* Ty[2];
Ty[0] = ResultTy;
Ty[1] = ResultTy->getPointerTo();
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
@@ -3763,8 +3763,8 @@
tree new_val = gimple_call_arg(stmt, 2);
// The type loaded from/stored to memory.
- const Type *MemTy = IntegerType::get(Context, Bits);
- const Type *MemPtrTy = MemTy->getPointerTo();
+ Type *MemTy = IntegerType::get(Context, Bits);
+ Type *MemPtrTy = MemTy->getPointerTo();
Value *Ptr = Builder.CreateBitCast(EmitRegister(ptr), MemPtrTy);
Value *Old_Val = CastToAnyType(EmitRegister(old_val),
@@ -3783,7 +3783,7 @@
#endif
Value *Ops[3] = { Ptr, Old_Val, New_Val };
- const Type* Ty[2] = { MemTy, MemPtrTy };
+ Type* Ty[2] = { MemTy, MemPtrTy };
Value *Result =
Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
Intrinsic::atomic_cmp_swap,
@@ -3834,7 +3834,7 @@
if (IntrinsicID == Intrinsic::not_intrinsic) {
error_at(gimple_location(stmt),
"unsupported target builtin %<%s%> used", BuiltinName);
- const Type *ResTy = ConvertType(gimple_call_return_type(stmt));
+ Type *ResTy = ConvertType(gimple_call_return_type(stmt));
if (ResTy->isSingleValueType())
Result = UndefValue::get(ResTy);
return true;
@@ -3940,7 +3940,7 @@
};
// Grab the current return type.
- const Type* Ty = ConvertType(gimple_call_return_type(stmt));
+ Type* Ty = ConvertType(gimple_call_return_type(stmt));
// Manually coerce the arg to the correct pointer type.
Args[0] = Builder.CreateBitCast(Args[0], Type::getInt8PtrTy(Context));
@@ -3963,7 +3963,7 @@
Value *Amt = EmitMemory(gimple_call_arg(stmt, 0));
EmitBuiltinUnaryOp(Amt, Result, Intrinsic::ctlz);
tree return_type = gimple_call_return_type(stmt);
- const Type *DestTy = ConvertType(return_type);
+ Type *DestTy = ConvertType(return_type);
Result = Builder.CreateIntCast(Result, DestTy,
/*isSigned*/!TYPE_UNSIGNED(return_type),
"cast");
@@ -3975,7 +3975,7 @@
Value *Amt = EmitMemory(gimple_call_arg(stmt, 0));
EmitBuiltinUnaryOp(Amt, Result, Intrinsic::cttz);
tree return_type = gimple_call_return_type(stmt);
- const Type *DestTy = ConvertType(return_type);
+ Type *DestTy = ConvertType(return_type);
Result = Builder.CreateIntCast(Result, DestTy,
/*isSigned*/!TYPE_UNSIGNED(return_type),
"cast");
@@ -3989,7 +3989,7 @@
Result = Builder.CreateBinOp(Instruction::And, Result,
ConstantInt::get(Result->getType(), 1));
tree return_type = gimple_call_return_type(stmt);
- const Type *DestTy = ConvertType(return_type);
+ Type *DestTy = ConvertType(return_type);
Result = Builder.CreateIntCast(Result, DestTy,
/*isSigned*/!TYPE_UNSIGNED(return_type),
"cast");
@@ -4001,7 +4001,7 @@
Value *Amt = EmitMemory(gimple_call_arg(stmt, 0));
EmitBuiltinUnaryOp(Amt, Result, Intrinsic::ctpop);
tree return_type = gimple_call_return_type(stmt);
- const Type *DestTy = ConvertType(return_type);
+ Type *DestTy = ConvertType(return_type);
Result = Builder.CreateIntCast(Result, DestTy,
/*isSigned*/!TYPE_UNSIGNED(return_type),
"cast");
@@ -4012,7 +4012,7 @@
Value *Amt = EmitMemory(gimple_call_arg(stmt, 0));
EmitBuiltinUnaryOp(Amt, Result, Intrinsic::bswap);
tree return_type = gimple_call_return_type(stmt);
- const Type *DestTy = ConvertType(return_type);
+ Type *DestTy = ConvertType(return_type);
Result = Builder.CreateIntCast(Result, DestTy,
/*isSigned*/!TYPE_UNSIGNED(return_type),
"cast");
@@ -4157,13 +4157,13 @@
//TODO location_t locus = gimple_location(stmt);
//TODO Constant *lineNo = ConstantInt::get(Type::getInt32Ty, LOCATION_LINE(locus));
//TODO Constant *file = ConvertMetadataStringToGV(LOCATION_FILE(locus));
-//TODO const Type *SBP= Type::getInt8PtrTy(Context);
+//TODO Type *SBP= Type::getInt8PtrTy(Context);
//TODO file = TheFolder->CreateBitCast(file, SBP);
//TODO
//TODO // Get arguments.
//TODO tree arglist = CALL_EXPR_ARGS(stmt);
//TODO Value *ExprVal = EmitMemory(gimple_call_arg(stmt, 0));
-//TODO const Type *Ty = ExprVal->getType();
+//TODO Type *Ty = ExprVal->getType();
//TODO Value *StrVal = EmitMemory(gimple_call_arg(stmt, 1));
//TODO
//TODO SmallVector<Value *, 4> Args;
@@ -4325,12 +4325,12 @@
case BUILT_IN_ADD_AND_FETCH_2:
case BUILT_IN_ADD_AND_FETCH_4: {
tree return_type = gimple_call_return_type(stmt);
- const Type *ResultTy = ConvertType(return_type);
+ Type *ResultTy = ConvertType(return_type);
Value* C[2] = {
EmitMemory(gimple_call_arg(stmt, 0)),
EmitMemory(gimple_call_arg(stmt, 1))
};
- const Type* Ty[2];
+ Type* Ty[2];
Ty[0] = ResultTy;
Ty[1] = ResultTy->getPointerTo();
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
@@ -4373,12 +4373,12 @@
case BUILT_IN_SUB_AND_FETCH_2:
case BUILT_IN_SUB_AND_FETCH_4: {
tree return_type = gimple_call_return_type(stmt);
- const Type *ResultTy = ConvertType(return_type);
+ Type *ResultTy = ConvertType(return_type);
Value* C[2] = {
EmitMemory(gimple_call_arg(stmt, 0)),
EmitMemory(gimple_call_arg(stmt, 1))
};
- const Type* Ty[2];
+ Type* Ty[2];
Ty[0] = ResultTy;
Ty[1] = ResultTy->getPointerTo();
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
@@ -4421,12 +4421,12 @@
case BUILT_IN_OR_AND_FETCH_2:
case BUILT_IN_OR_AND_FETCH_4: {
tree return_type = gimple_call_return_type(stmt);
- const Type *ResultTy = ConvertType(return_type);
+ Type *ResultTy = ConvertType(return_type);
Value* C[2] = {
EmitMemory(gimple_call_arg(stmt, 0)),
EmitMemory(gimple_call_arg(stmt, 1))
};
- const Type* Ty[2];
+ Type* Ty[2];
Ty[0] = ResultTy;
Ty[1] = ResultTy->getPointerTo();
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
@@ -4469,12 +4469,12 @@
case BUILT_IN_AND_AND_FETCH_2:
case BUILT_IN_AND_AND_FETCH_4: {
tree return_type = gimple_call_return_type(stmt);
- const Type *ResultTy = ConvertType(return_type);
+ Type *ResultTy = ConvertType(return_type);
Value* C[2] = {
EmitMemory(gimple_call_arg(stmt, 0)),
EmitMemory(gimple_call_arg(stmt, 1))
};
- const Type* Ty[2];
+ Type* Ty[2];
Ty[0] = ResultTy;
Ty[1] = ResultTy->getPointerTo();
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
@@ -4517,12 +4517,12 @@
case BUILT_IN_XOR_AND_FETCH_2:
case BUILT_IN_XOR_AND_FETCH_4: {
tree return_type = gimple_call_return_type(stmt);
- const Type *ResultTy = ConvertType(return_type);
+ Type *ResultTy = ConvertType(return_type);
Value* C[2] = {
EmitMemory(gimple_call_arg(stmt, 0)),
EmitMemory(gimple_call_arg(stmt, 1))
};
- const Type* Ty[2];
+ Type* Ty[2];
Ty[0] = ResultTy;
Ty[1] = ResultTy->getPointerTo();
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
@@ -4565,12 +4565,12 @@
case BUILT_IN_NAND_AND_FETCH_2:
case BUILT_IN_NAND_AND_FETCH_4: {
tree return_type = gimple_call_return_type(stmt);
- const Type *ResultTy = ConvertType(return_type);
+ Type *ResultTy = ConvertType(return_type);
Value* C[2] = {
EmitMemory(gimple_call_arg(stmt, 0)),
EmitMemory(gimple_call_arg(stmt, 1))
};
- const Type* Ty[2];
+ Type* Ty[2];
Ty[0] = ResultTy;
Ty[1] = ResultTy->getPointerTo();
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
@@ -4614,7 +4614,7 @@
// The argument has typically been coerced to "volatile void*"; the
// only way to find the size of the operation is from the builtin
// opcode.
- const Type *Ty;
+ Type *Ty;
switch(DECL_FUNCTION_CODE(fndecl)) {
case BUILT_IN_LOCK_RELEASE_16: // not handled; should use SSE on x86
default:
@@ -4677,7 +4677,7 @@
// FIXME: HACK: Just ignore these.
{
- const Type *Ty = ConvertType(gimple_call_return_type(stmt));
+ Type *Ty = ConvertType(gimple_call_return_type(stmt));
if (!Ty->isVoidTy())
Result = Constant::getNullValue(Ty);
return true;
@@ -4693,7 +4693,7 @@
// varying type. Make sure that we specify the actual type for "iAny"
// by passing it as the 3rd and 4th parameters. This isn't needed for
// most intrinsics, but is needed for ctpop, cttz, ctlz.
- const Type *Ty = InVal->getType();
+ Type *Ty = InVal->getType();
Result = Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Id, &Ty, 1),
InVal);
return true;
@@ -4701,7 +4701,7 @@
Value *TreeToLLVM::EmitBuiltinSQRT(gimple stmt) {
Value *Amt = EmitMemory(gimple_call_arg(stmt, 0));
- const Type* Ty = Amt->getType();
+ Type* Ty = Amt->getType();
return Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
Intrinsic::sqrt, &Ty, 1),
@@ -4714,7 +4714,7 @@
Value *Val = EmitMemory(gimple_call_arg(stmt, 0));
Value *Pow = EmitMemory(gimple_call_arg(stmt, 1));
- const Type *Ty = Val->getType();
+ Type *Ty = Val->getType();
Pow = Builder.CreateIntCast(Pow, Type::getInt32Ty(Context), /*isSigned*/true);
SmallVector<Value *,2> Args;
@@ -4731,7 +4731,7 @@
Value *Val = EmitMemory(gimple_call_arg(stmt, 0));
Value *Pow = EmitMemory(gimple_call_arg(stmt, 1));
- const Type *Ty = Val->getType();
+ Type *Ty = Val->getType();
SmallVector<Value *,2> Args;
Args.push_back(Val);
@@ -4755,7 +4755,7 @@
// Then type cast the result of the "ceil" call.
tree type = gimple_call_return_type(stmt);
- const Type *RetTy = getRegType(type);
+ Type *RetTy = getRegType(type);
return TYPE_UNSIGNED(type) ? Builder.CreateFPToUI(Call, RetTy) :
Builder.CreateFPToSI(Call, RetTy);
}
@@ -4774,7 +4774,7 @@
// Then type cast the result of the "floor" call.
tree type = gimple_call_return_type(stmt);
- const Type *RetTy = getRegType(type);
+ Type *RetTy = getRegType(type);
return TYPE_UNSIGNED(type) ? Builder.CreateFPToUI(Call, RetTy) :
Builder.CreateFPToSI(Call, RetTy);
}
@@ -4796,9 +4796,9 @@
Value *CosPtr = CreateTemporary(Val->getType());
// Get the LLVM function declaration for sincos.
- const Type *ArgTys[3] =
+ Type *ArgTys[3] =
{ Val->getType(), SinPtr->getType(), CosPtr->getType() };
- const FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context),
+ FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context),
ArgTys, /*isVarArg*/false);
Constant *Func = TheModule->getOrInsertFunction(Name, FTy);
@@ -4839,7 +4839,7 @@
// Get the GCC and LLVM function types for cexp.
tree cplx_type = gimple_call_return_type(stmt);
tree fntype = build_function_type_list(cplx_type, cplx_type, NULL_TREE);
- const FunctionType *FTy = cast<FunctionType>(ConvertType(fntype));
+ FunctionType *FTy = cast<FunctionType>(ConvertType(fntype));
// Get the LLVM function declaration for cexp.
Constant *Func = TheModule->getOrInsertFunction(Name, FTy);
@@ -4873,7 +4873,7 @@
// Push the argument.
bool PassedInMemory;
- const Type *CplxTy = CplxArg->getType();
+ Type *CplxTy = CplxArg->getType();
if (LLVM_SHOULD_PASS_AGGREGATE_AS_FCA(cplx_type, CplxTy)) {
Client.pushValue(CplxArg);
PassedInMemory = false;
@@ -4886,7 +4886,7 @@
}
Attributes Attrs = Attribute::None;
- std::vector<const Type*> ScalarArgs;
+ std::vector<Type*> ScalarArgs;
ABIConverter.HandleArgument(cplx_type, ScalarArgs, &Attrs);
assert(Attrs == Attribute::None && "Got attributes but none given!");
Client.clear();
@@ -4922,7 +4922,7 @@
"Size mismatch in scalar to scalar conversion!");
Value *Tmp = CreateTemporary(CI->getType());
Builder.CreateStore(CI, Tmp);
- const Type *CplxPtrTy = CplxTy->getPointerTo();
+ Type *CplxPtrTy = CplxTy->getPointerTo();
return Builder.CreateLoad(Builder.CreateBitCast(Tmp, CplxPtrTy));
}
}
@@ -5263,7 +5263,7 @@
if (!validate_gimple_arglist(stmt, INTEGER_TYPE, POINTER_TYPE, VOID_TYPE))
return false;
- const Type *IntPtr = TD.getIntPtrType(Context);
+ Type *IntPtr = TD.getIntPtrType(Context);
Value *Offset = EmitMemory(gimple_call_arg(stmt, 0));
Value *Handler = EmitMemory(gimple_call_arg(stmt, 1));
@@ -5431,7 +5431,7 @@
Arg2 = EmitMemory(Arg2T);
}
- static const Type *VPTy = Type::getInt8PtrTy(Context);
+ static Type *VPTy = Type::getInt8PtrTy(Context);
// FIXME: This ignores alignment and volatility of the arguments.
SmallVector<Value *, 2> Args;
@@ -5447,7 +5447,7 @@
if (!validate_gimple_arglist(stmt, POINTER_TYPE, VOID_TYPE))
return false;
- const Type *ResultTy = ConvertType(gimple_call_return_type(stmt));
+ Type *ResultTy = ConvertType(gimple_call_return_type(stmt));
// The adjusted value is stored as a pointer at the start of the storage GCC
// allocated for the trampoline - load it out and return it.
@@ -5480,10 +5480,10 @@
// The trampoline machine code itself is stored in a stack temporary that we
// create (one for each init_trampoline) in the function where init_trampoline
// is called.
- static const Type *VPTy = Type::getInt8PtrTy(Context);
+ static Type *VPTy = Type::getInt8PtrTy(Context);
// Create a stack temporary to hold the trampoline machine code.
- const Type *TrampType = ArrayType::get(Type::getInt8Ty(Context),
+ Type *TrampType = ArrayType::get(Type::getInt8Ty(Context),
TRAMPOLINE_SIZE);
AllocaInst *TrampTmp = CreateTemporary(TrampType);
TrampTmp->setAlignment(TRAMPOLINE_ALIGNMENT);
@@ -5529,7 +5529,7 @@
Value *TreeToLLVM::CreateComplex(Value *Real, Value *Imag) {
assert(Real->getType() == Imag->getType() && "Component type mismatch!");
- const Type *EltTy = Real->getType();
+ Type *EltTy = Real->getType();
Value *Result = UndefValue::get(StructType::get(EltTy, EltTy, NULL));
Result = Builder.CreateInsertValue(Result, Real, 0);
Result = Builder.CreateInsertValue(Result, Imag, 1);
@@ -5549,7 +5549,7 @@
Value *TreeToLLVM::EmitFieldAnnotation(Value *FieldPtr, tree FieldDecl) {
tree AnnotateAttr = lookup_attribute("annotate", DECL_ATTRIBUTES(FieldDecl));
- const Type *SBP = Type::getInt8PtrTy(Context);
+ Type *SBP = Type::getInt8PtrTy(Context);
Function *Fn = Intrinsic::getDeclaration(TheModule,
Intrinsic::ptr_annotation,
@@ -5595,7 +5595,7 @@
File, LineNo
};
- const Type* FieldPtrType = FieldPtr->getType();
+ Type* FieldPtrType = FieldPtr->getType();
FieldPtr = Builder.CreateCall(Fn, Ops);
FieldPtr = Builder.CreateBitCast(FieldPtr, FieldPtrType);
}
@@ -5636,7 +5636,7 @@
ArrayAddr = ArrayAddrLV.Ptr;
ArrayAlign = ArrayAddrLV.getAlignment();
- const Type *IntPtrTy = getTargetData().getIntPtrType(Context);
+ Type *IntPtrTy = getTargetData().getIntPtrType(Context);
IndexVal = Builder.CreateIntCast(IndexVal, IntPtrTy,
/*isSigned*/!TYPE_UNSIGNED(IndexType));
@@ -5644,7 +5644,7 @@
if (isSequentialCompatible(ArrayTreeType)) {
// Avoid any assumptions about how the array type is represented in LLVM by
// doing the GEP on a pointer to the first array element.
- const Type *EltTy = ConvertType(ElementType);
+ Type *EltTy = ConvertType(ElementType);
ArrayAddr = Builder.CreateBitCast(ArrayAddr, EltTy->getPointerTo());
Value *Ptr = POINTER_TYPE_OVERFLOW_UNDEFINED ?
Builder.CreateInBoundsGEP(ArrayAddr, IndexVal) :
@@ -5678,7 +5678,7 @@
"Alignment not a power of two!");
assert(TYPE_ALIGN(ElementType) >= 8 && "Unit size not a multiple of 8 bits!");
// ScaleType is chosen to correct for the division in ScaleFactor.
- const Type *ScaleType = IntegerType::get(Context, TYPE_ALIGN(ElementType));
+ Type *ScaleType = IntegerType::get(Context, TYPE_ALIGN(ElementType));
ArrayAddr = Builder.CreateBitCast(ArrayAddr, ScaleType->getPointerTo());
IndexVal = Builder.CreateMul(IndexVal, ScaleFactor);
@@ -5697,7 +5697,7 @@
unsigned BitStart = (unsigned)TREE_INT_CST_LOW(TREE_OPERAND(exp, 2));
unsigned BitSize = (unsigned)TREE_INT_CST_LOW(TREE_OPERAND(exp, 1));
- const Type *ValTy = ConvertType(TREE_TYPE(exp));
+ Type *ValTy = ConvertType(TREE_TYPE(exp));
unsigned ValueSizeInBits = TD.getTypeSizeInBits(ValTy);
assert(BitSize <= ValueSizeInBits &&
@@ -5737,14 +5737,14 @@
TREE_CODE(DECL_CONTEXT(FieldDecl)) == UNION_TYPE ||
TREE_CODE(DECL_CONTEXT(FieldDecl)) == QUAL_UNION_TYPE));
- const Type *StructTy = ConvertType(DECL_CONTEXT(FieldDecl));
+ Type *StructTy = ConvertType(DECL_CONTEXT(FieldDecl));
assert((!StructAddrLV.isBitfield() ||
StructAddrLV.BitStart == 0) && "structs cannot be bitfields!");
StructAddrLV.Ptr = Builder.CreateBitCast(StructAddrLV.Ptr,
StructTy->getPointerTo());
- const Type *FieldTy = ConvertType(TREE_TYPE(FieldDecl));
+ Type *FieldTy = ConvertType(TREE_TYPE(FieldDecl));
// BitStart - This is the actual offset of the field from the start of the
// struct, in bits. For bitfields this may be on a non-byte boundary.
@@ -5794,7 +5794,7 @@
BitStart -= ByteOffset*8;
}
- const Type *BytePtrTy = Type::getInt8PtrTy(Context);
+ Type *BytePtrTy = Type::getInt8PtrTy(Context);
FieldPtr = Builder.CreateBitCast(StructAddrLV.Ptr, BytePtrTy);
FieldPtr = Builder.CreateInBoundsGEP(FieldPtr, Offset);
FieldPtr = Builder.CreateBitCast(FieldPtr, FieldTy->getPointerTo());
@@ -5813,7 +5813,7 @@
if (!isBitfield(FieldDecl)) {
assert(BitStart == 0 && "Not a bitfield but not at a byte offset!");
// Make sure we return a pointer to the right type.
- const Type *EltTy = ConvertType(TREE_TYPE(exp));
+ Type *EltTy = ConvertType(TREE_TYPE(exp));
FieldPtr = Builder.CreateBitCast(FieldPtr, EltTy->getPointerTo());
return LValue(FieldPtr, LVAlign);
}
@@ -5826,7 +5826,7 @@
"Variable sized bitfield?");
unsigned BitfieldSize = TREE_INT_CST_LOW(DECL_SIZE(FieldDecl));
- const Type *LLVMFieldTy =
+ Type *LLVMFieldTy =
cast<PointerType>(FieldPtr->getType())->getElementType();
// If the LLVM notion of the field type contains the entire bitfield being
@@ -5913,19 +5913,19 @@
Value *Decl = DEFINITION_LOCAL(exp);
if (Decl == 0) {
if (errorcount || sorrycount) {
- const Type *Ty = ConvertType(TREE_TYPE(exp));
- const PointerType *PTy = Ty->getPointerTo();
+ Type *Ty = ConvertType(TREE_TYPE(exp));
+ PointerType *PTy = Ty->getPointerTo();
LValue LV(ConstantPointerNull::get(PTy), 1);
return LV;
}
DieAbjectly("Referencing decl that hasn't been laid out!", exp);
}
- const Type *Ty = ConvertType(TREE_TYPE(exp));
+ Type *Ty = ConvertType(TREE_TYPE(exp));
// If we have "extern void foo", make the global have type {} instead of
// type void.
if (Ty->isVoidTy()) Ty = StructType::get(Context);
- const PointerType *PTy = Ty->getPointerTo();
+ PointerType *PTy = Ty->getPointerTo();
unsigned Alignment = Ty->isSized() ? TD.getABITypeAlignment(Ty) : 1;
if (DECL_ALIGN(exp)) {
if (DECL_USER_ALIGN(exp) || 8 * Alignment < (unsigned)DECL_ALIGN(exp))
@@ -6202,7 +6202,7 @@
ConstantInt *CI = ConstantInt::get(Context, getIntegerValue(reg));
// The destination can be a pointer, integer or floating point type so we need
// a generalized cast here
- const Type *Ty = getRegType(TREE_TYPE(reg));
+ Type *Ty = getRegType(TREE_TYPE(reg));
Instruction::CastOps opcode = CastInst::getCastOpcode(CI, false, Ty,
!TYPE_UNSIGNED(TREE_TYPE(reg)));
return TheFolder->CreateCast(opcode, CI, Ty);
@@ -6276,7 +6276,7 @@
// Convert the elements.
SmallVector<Constant*, 16> Elts;
- const IntegerType *IntTy = getTargetData().getIntPtrType(Context);
+ IntegerType *IntTy = getTargetData().getIntPtrType(Context);
for (tree elt = TREE_VECTOR_CST_ELTS(reg); elt; elt = TREE_CHAIN(elt)) {
Constant *Elt = EmitRegisterConstant(TREE_VALUE(elt));
// LLVM does not support vectors of pointers, so turn any pointers into
@@ -6299,8 +6299,8 @@
/// Mem2Reg - Convert a value of in-memory type (that given by ConvertType)
/// to in-register type (that given by getRegType).
Value *TreeToLLVM::Mem2Reg(Value *V, tree type, LLVMBuilder &Builder) {
- const Type *MemTy = V->getType();
- const Type *RegTy = getRegType(type);
+ Type *MemTy = V->getType();
+ Type *RegTy = getRegType(type);
assert(MemTy == ConvertType(type) && "Not of memory type!");
if (MemTy == RegTy)
@@ -6341,8 +6341,8 @@
/// Reg2Mem - Convert a value of in-register type (that given by getRegType)
/// to in-memory type (that given by ConvertType).
Value *TreeToLLVM::Reg2Mem(Value *V, tree type, LLVMBuilder &Builder) {
- const Type *RegTy = V->getType();
- const Type *MemTy = ConvertType(type);
+ Type *RegTy = V->getType();
+ Type *MemTy = ConvertType(type);
assert(RegTy == getRegType(type) && "Not of register type!");
if (RegTy == MemTy)
@@ -6390,7 +6390,7 @@
Value *TreeToLLVM::LoadRegisterFromMemory(MemRef Loc, tree type,
LLVMBuilder &Builder) {
// NOTE: Needs to be kept in sync with getRegType.
- const Type *MemTy = ConvertType(type);
+ Type *MemTy = ConvertType(type);
Value *Ptr = Builder.CreateBitCast(Loc.Ptr, MemTy->getPointerTo());
LoadInst *LI = Builder.CreateLoad(Ptr, Loc.Volatile);
LI->setAlignment(Loc.getAlignment());
@@ -6403,7 +6403,7 @@
void TreeToLLVM::StoreRegisterToMemory(Value *V, MemRef Loc, tree type,
LLVMBuilder &Builder) {
// NOTE: Needs to be kept in sync with getRegType.
- const Type *MemTy = ConvertType(type);
+ Type *MemTy = ConvertType(type);
Value *Ptr = Builder.CreateBitCast(Loc.Ptr, MemTy->getPointerTo());
StoreInst *SI = Builder.CreateStore(Reg2Mem(V, type, Builder), Ptr,
Loc.Volatile);
@@ -6413,7 +6413,7 @@
/// VectorHighElements - Return a vector of half the length, consisting of the
/// elements of the given vector with indices in the top half.
Value *TreeToLLVM::VectorHighElements(Value *Vec) {
- const VectorType *Ty = cast<VectorType>(Vec->getType());
+ VectorType *Ty = cast<VectorType>(Vec->getType());
assert(!(Ty->getNumElements() & 1) && "Vector has odd number of elements!");
unsigned NumElts = Ty->getNumElements() / 2;
SmallVector<Constant*, 8> Mask;
@@ -6427,7 +6427,7 @@
/// VectorLowElements - Return a vector of half the length, consisting of the
/// elements of the given vector with indices in the bottom half.
Value *TreeToLLVM::VectorLowElements(Value *Vec) {
- const VectorType *Ty = cast<VectorType>(Vec->getType());
+ VectorType *Ty = cast<VectorType>(Vec->getType());
assert(!(Ty->getNumElements() & 1) && "Vector has odd number of elements!");
unsigned NumElts = Ty->getNumElements() / 2;
SmallVector<Constant*, 8> Mask;
@@ -6533,12 +6533,12 @@
} else if (TREE_CODE(TREE_TYPE(op)) == VECTOR_TYPE) {
// Clear the sign bits.
Value *Op = EmitRegister(op);
- const VectorType *VecTy = cast<VectorType>(Op->getType());
+ VectorType *VecTy = cast<VectorType>(Op->getType());
// Mask = ~(1 << (Bits-1)).
unsigned Bits = VecTy->getElementType()->getPrimitiveSizeInBits();
- const Type *IntTy = IntegerType::get(Context, Bits);
- const Type *IntVecTy = VectorType::get(IntTy, VecTy->getNumElements());
+ Type *IntTy = IntegerType::get(Context, Bits);
+ Type *IntVecTy = VectorType::get(IntTy, VecTy->getNumElements());
APInt API = APInt::getAllOnesValue(Bits);
API.clearBit(Bits-1);
Constant *Mask = ConstantInt::get(IntVecTy, API);
@@ -6726,7 +6726,7 @@
// where v = <v0, v1, undef, undef>. The first element of w is the max/min
// of x0,x1,x2,x3.
Value *Val = EmitRegister(op);
- const Type *Ty = Val->getType();
+ Type *Ty = Val->getType();
CmpInst::Predicate Pred =
CmpInst::Predicate(FLOAT_TYPE_P(TREE_TYPE(op)) ?
@@ -6772,7 +6772,7 @@
// w = <v0, undef, undef, undef> + <v1, undef, undef, undef>
// where v = <v0, v1, undef, undef>. The first element of w is x0+x1+x2+x3.
Value *Val = EmitRegister(op);
- const Type *Ty = Val->getType();
+ Type *Ty = Val->getType();
unsigned Length = TYPE_VECTOR_SUBPARTS(TREE_TYPE(op));
assert(Length > 1 && !(Length & (Length - 1)) && "Length not a power of 2!");
@@ -6845,12 +6845,12 @@
// with all elements equal.
assert(LHS->getType()->isVectorTy() &&
"Shifting a scalar by a vector amount!");
- const VectorType *VecTy = cast<VectorType>(LHS->getType());
+ VectorType *VecTy = cast<VectorType>(LHS->getType());
RHS = CastToAnyType(RHS, /*isSigned*/false, VecTy->getElementType(),
/*isSigned*/false);
RHS = Builder.CreateInsertElement(UndefValue::get(VecTy), RHS,
Builder.getInt32(0));
- const Type *MaskTy = VectorType::get(Type::getInt32Ty(Context),
+ Type *MaskTy = VectorType::get(Type::getInt32Ty(Context),
VecTy->getNumElements());
RHS = Builder.CreateShuffleVector(RHS, UndefValue::get(VecTy),
ConstantInt::get(MaskTy, 0));
@@ -6862,7 +6862,7 @@
Value *TreeToLLVM::EmitReg_VecShiftOp(tree op0, tree op1, bool isLeftShift) {
Value *LHS = EmitRegister(op0); // A vector.
Value *Amt = EmitRegister(op1); // An integer.
- const VectorType *VecTy = cast<VectorType>(LHS->getType());
+ VectorType *VecTy = cast<VectorType>(LHS->getType());
unsigned Bits = VecTy->getPrimitiveSizeInBits();
// If the shift is by a multiple of the element size then emit a shuffle.
@@ -6945,7 +6945,7 @@
// LHS CDiv RHS = (LHS - Sign(RHS)) Div RHS + 1
// otherwise.
- const Type *Ty = getRegType(TREE_TYPE(op0));
+ Type *Ty = getRegType(TREE_TYPE(op0));
Constant *Zero = ConstantInt::get(Ty, 0);
Constant *One = ConstantInt::get(Ty, 1);
Constant *MinusOne = Constant::getAllOnesValue(Ty);
@@ -7033,7 +7033,7 @@
// same sign, so FDiv is the same as Div.
return Builder.CreateUDiv(LHS, RHS, "fdiv");
- const Type *Ty = getRegType(TREE_TYPE(op0));
+ Type *Ty = getRegType(TREE_TYPE(op0));
Constant *Zero = ConstantInt::get(Ty, 0);
Constant *One = ConstantInt::get(Ty, 1);
Constant *MinusOne = Constant::getAllOnesValue(Ty);
@@ -7080,7 +7080,7 @@
// LHS and RHS values must have the same sign if their type is unsigned.
return Builder.CreateURem(LHS, RHS);
- const Type *Ty = getRegType(TREE_TYPE(op0));
+ Type *Ty = getRegType(TREE_TYPE(op0));
Constant *Zero = ConstantInt::get(Ty, 0);
// The two possible values for Mod.
@@ -7236,7 +7236,7 @@
// required to ensure correct results. The details depend on whether
// we are doing signed or unsigned arithmetic.
- const Type *Ty = getRegType(TREE_TYPE(op0));
+ Type *Ty = getRegType(TREE_TYPE(op0));
Constant *Zero = ConstantInt::get(Ty, 0);
Constant *Two = ConstantInt::get(Ty, 2);
@@ -7416,7 +7416,7 @@
// Truncate the input elements to the output element type, eg: <2 x double>
// -> <2 x float>.
unsigned Length = TYPE_VECTOR_SUBPARTS(TREE_TYPE(op0));
- const Type *DestTy = VectorType::get(getRegType(TREE_TYPE(type)), Length);
+ Type *DestTy = VectorType::get(getRegType(TREE_TYPE(type)), Length);
LHS = CastToAnyType(LHS, !TYPE_UNSIGNED(TREE_TYPE(op0)), DestTy,
!TYPE_UNSIGNED(TREE_TYPE(type)));
RHS = CastToAnyType(RHS, !TYPE_UNSIGNED(TREE_TYPE(op0)), DestTy,
@@ -7440,7 +7440,7 @@
// Extend the input elements to the output element type, eg: <2 x float>
// -> <2 x double>.
- const Type *DestTy = getRegType(type);
+ Type *DestTy = getRegType(type);
return CastToAnyType(Op, !TYPE_UNSIGNED(TREE_TYPE(op0)), DestTy,
!TYPE_UNSIGNED(TREE_TYPE(type)));
}
@@ -7454,7 +7454,7 @@
// Extend the input elements to the output element type, eg: <2 x float>
// -> <2 x double>.
- const Type *DestTy = getRegType(type);
+ Type *DestTy = getRegType(type);
return CastToAnyType(Op, !TYPE_UNSIGNED(TREE_TYPE(op0)), DestTy,
!TYPE_UNSIGNED(TREE_TYPE(type)));
}
@@ -7601,7 +7601,7 @@
// CallResultTypes - The inline asm call may return one or more results. The
// types of the results are recorded here along with a flag indicating whether
// the corresponding GCC type is signed.
- SmallVector<std::pair<const Type *, bool>, 4> CallResultTypes;
+ SmallVector<std::pair<Type *, bool>, 4> CallResultTypes;
// CallResultDests - Each result returned by the inline asm call is stored in
// a memory location. These are listed here along with a flag indicating if
@@ -7673,7 +7673,7 @@
}
LValue Dest;
- const Type *DestValTy = ConvertType(TREE_TYPE(Operand));
+ Type *DestValTy = ConvertType(TREE_TYPE(Operand));
if (TREE_CODE(Operand) == SSA_NAME) {
// The ASM is defining an ssa name. Store the output to a temporary, then
// load it out again later as the ssa name.
@@ -7718,10 +7718,10 @@
return;
bool isIndirect = false;
if (AllowsReg || !AllowsMem) { // Register operand.
- const Type *LLVMTy = ConvertType(type);
+ Type *LLVMTy = ConvertType(type);
Value *Op = 0;
- const Type *OpTy = LLVMTy;
+ Type *OpTy = LLVMTy;
if (LLVMTy->isSingleValueType()) {
if (TREE_CODE(Val)==ADDR_EXPR &&
TREE_CODE(TREE_OPERAND(Val,0))==LABEL_DECL) {
@@ -7773,7 +7773,7 @@
unsigned Match = atoi(Constraint);
// This output might have gotten put in either CallResult or CallArg
// depending whether it's a register or not. Find its type.
- const Type *OTy = 0;
+ Type *OTy = 0;
unsigned OutputIndex = ~0U;
if (Match < OutputLocations.size()) {
// Indices here known to be within range.
@@ -7931,7 +7931,7 @@
}
// Compute the return type to use for the asm call.
- const Type *CallResultType;
+ Type *CallResultType;
switch (CallResultTypes.size()) {
// If there are no results then the return type is void!
case 0: CallResultType = Type::getVoidTy(Context); break;
@@ -7940,7 +7940,7 @@
// If the asm returns multiple results then create a struct type with the
// result types as its fields, and use it for the return type.
default:
- std::vector<const Type*> Fields(CallResultTypes.size());
+ std::vector<Type*> Fields(CallResultTypes.size());
for (unsigned i = 0, e = CallResultTypes.size(); i != e; ++i)
Fields[i] = CallResultTypes[i].first;
CallResultType = StructType::get(Context, Fields);
@@ -7948,12 +7948,12 @@
}
// Compute the types of the arguments to the asm call.
- std::vector<const Type*> CallArgTypes(CallOps.size());
+ std::vector<Type*> CallArgTypes(CallOps.size());
for (unsigned i = 0, e = CallOps.size(); i != e; ++i)
CallArgTypes[i] = CallOps[i]->getType();
// Get the type of the called asm "function".
- const FunctionType *FTy =
+ FunctionType *FTy =
FunctionType::get(CallResultType, CallArgTypes, false);
// Remove the leading comma if we have operands.
@@ -7980,7 +7980,7 @@
bool ValIsSigned = CallResultTypes[i].second;
Value *Dest = CallResultDests[i].first;
- const Type *DestTy = cast<PointerType>(Dest->getType())->getElementType();
+ Type *DestTy = cast<PointerType>(Dest->getType())->getElementType();
bool DestIsSigned = CallResultDests[i].second;
Val = CastToAnyType(Val, ValIsSigned, DestTy, DestIsSigned);
Builder.CreateStore(Val, Dest);
@@ -8531,7 +8531,7 @@
CallingConv::ID CallingConv;
AttrListPtr PAL;
- const Type *Ty =
+ Type *Ty =
TheTypeConverter->ConvertFunctionType(function_type,
fndecl,
gimple_call_chain(stmt),
@@ -8593,7 +8593,7 @@
unsigned Alignment = LV.getAlignment();
- const Type *ValTy = cast<PointerType>(LV.Ptr->getType())->getElementType();
+ Type *ValTy = cast<PointerType>(LV.Ptr->getType())->getElementType();
unsigned ValSizeInBits = ValTy->getPrimitiveSizeInBits();
// The number of stores needed to write the entire bitfield.
Modified: dragonegg/trunk/src/DefaultABI.cpp
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/src/DefaultABI.cpp?rev=135371&r1=135370&r2=135371&view=diff
==============================================================================
--- dragonegg/trunk/src/DefaultABI.cpp (original)
+++ dragonegg/trunk/src/DefaultABI.cpp Sun Jul 17 23:25:32 2011
@@ -102,7 +102,7 @@
ignoreZeroLength, false)
: 0;
case ARRAY_TYPE:
- const ArrayType *Ty = dyn_cast<ArrayType>(ConvertType(type));
+ ArrayType *Ty = dyn_cast<ArrayType>(ConvertType(type));
if (!Ty || Ty->getNumElements() != 1)
return 0;
return isSingleElementStructOrArray(TREE_TYPE(type), false, false);
@@ -129,7 +129,7 @@
/// handles things like returning structures via hidden parameters.
void DefaultABI::HandleReturnType(tree type, tree fn, bool isBuiltin) {
unsigned Offset = 0;
- const Type *Ty = ConvertType(type);
+ Type *Ty = ConvertType(type);
if (Ty->isVectorTy()) {
// Vector handling is weird on x86. In particular builtin and
// non-builtin function of the same return types can use different
@@ -154,10 +154,10 @@
} else {
// Otherwise return as an integer value large enough to hold the entire
// aggregate.
- if (const Type *AggrTy = LLVM_AGGR_TYPE_FOR_STRUCT_RETURN(type,
+ if (Type *AggrTy = LLVM_AGGR_TYPE_FOR_STRUCT_RETURN(type,
C.getCallingConv()))
C.HandleAggregateResultAsAggregate(AggrTy);
- else if (const Type* ScalarTy =
+ else if (Type* ScalarTy =
LLVM_SCALAR_TYPE_FOR_STRUCT_RETURN(type, &Offset))
C.HandleAggregateResultAsScalar(ScalarTy, Offset);
else
@@ -181,21 +181,21 @@
/// argument and invokes methods on the client that indicate how its pieces
/// should be handled. This handles things like decimating structures into
/// their fields.
-void DefaultABI::HandleArgument(tree type, std::vector<const Type*> &ScalarElts,
+void DefaultABI::HandleArgument(tree type, std::vector<Type*> &ScalarElts,
Attributes *Attributes) {
unsigned Size = 0;
bool DontCheckAlignment = false;
- const Type *Ty = ConvertType(type);
+ Type *Ty = ConvertType(type);
// Figure out if this field is zero bits wide, e.g. {} or [0 x int]. Do
// not include variable sized fields here.
- std::vector<const Type*> Elts;
+ std::vector<Type*> Elts;
if (Ty->isVoidTy()) {
// Handle void explicitly as an opaque type.
- const Type *OpTy = OpaqueType::get(getGlobalContext());
+ Type *OpTy = OpaqueType::get(getGlobalContext());
C.HandleScalarArgument(OpTy, type);
ScalarElts.push_back(OpTy);
} else if (isPassedByInvisibleReference(type)) { // variable size -> by-ref.
- const Type *PtrTy = Ty->getPointerTo();
+ Type *PtrTy = Ty->getPointerTo();
C.HandleByInvisibleReferenceArgument(PtrTy, type);
ScalarElts.push_back(PtrTy);
} else if (Ty->isVectorTy()) {
@@ -252,7 +252,7 @@
for (tree Field = TYPE_FIELDS(type); Field; Field = TREE_CHAIN(Field))
if (TREE_CODE(Field) == FIELD_DECL) {
const tree Ftype = TREE_TYPE(Field);
- const Type *FTy = ConvertType(Ftype);
+ Type *FTy = ConvertType(Ftype);
unsigned FNo = GetFieldIndex(Field, Ty);
assert(FNo < INT_MAX && "Case not handled yet!");
@@ -282,7 +282,7 @@
// Array with padding?
if (Ty->isStructTy())
Ty = cast<StructType>(Ty)->getTypeAtIndex(0U);
- const ArrayType *ATy = cast<ArrayType>(Ty);
+ ArrayType *ATy = cast<ArrayType>(Ty);
for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
C.EnterField(i, Ty);
HandleArgument(TREE_TYPE(type), ScalarElts);
@@ -294,7 +294,7 @@
}
/// HandleUnion - Handle a UNION_TYPE or QUAL_UNION_TYPE tree.
-void DefaultABI::HandleUnion(tree type, std::vector<const Type*> &ScalarElts) {
+void DefaultABI::HandleUnion(tree type, std::vector<Type*> &ScalarElts) {
if (TYPE_TRANSPARENT_AGGR(type)) {
tree Field = TYPE_FIELDS(type);
assert(Field && "Transparent union must have some elements!");
@@ -338,7 +338,7 @@
/// integer registers, convert it to a structure containing ints and pass all
/// of the struct elements in. If Size is set we pass only that many bytes.
void DefaultABI::PassInIntegerRegisters(tree type,
- std::vector<const Type*> &ScalarElts,
+ std::vector<Type*> &ScalarElts,
unsigned origSize,
bool DontCheckAlignment) {
unsigned Size;
@@ -360,8 +360,8 @@
unsigned ArraySize = Size / ElementSize;
// Put as much of the aggregate as possible into an array.
- const Type *ATy = NULL;
- const Type *ArrayElementType = NULL;
+ Type *ATy = NULL;
+ Type *ArrayElementType = NULL;
if (ArraySize) {
Size = Size % ElementSize;
ArrayElementType = (UseInt64 ?
@@ -372,7 +372,7 @@
// Pass any leftover bytes as a separate element following the array.
unsigned LastEltRealSize = 0;
- const llvm::Type *LastEltTy = 0;
+ llvm::Type *LastEltTy = 0;
if (Size > 4) {
LastEltTy = Type::getInt64Ty(getGlobalContext());
} else if (Size > 2) {
@@ -387,12 +387,12 @@
LastEltRealSize = Size;
}
- std::vector<const Type*> Elts;
+ std::vector<Type*> Elts;
if (ATy)
Elts.push_back(ATy);
if (LastEltTy)
Elts.push_back(LastEltTy);
- const StructType *STy = StructType::get(getGlobalContext(), Elts, false);
+ StructType *STy = StructType::get(getGlobalContext(), Elts, false);
unsigned i = 0;
if (ArraySize) {
@@ -417,23 +417,23 @@
/// PassInMixedRegisters - Given an aggregate value that should be passed in
/// mixed integer, floating point, and vector registers, convert it to a
/// structure containing the specified struct elements in.
-void DefaultABI::PassInMixedRegisters(const Type *Ty,
- std::vector<const Type*> &OrigElts,
- std::vector<const Type*> &ScalarElts) {
+void DefaultABI::PassInMixedRegisters(Type *Ty,
+ std::vector<Type*> &OrigElts,
+ std::vector<Type*> &ScalarElts) {
// We use VoidTy in OrigElts to mean "this is a word in the aggregate
// that occupies storage but has no useful information, and is not passed
// anywhere". Happens on x86-64.
- std::vector<const Type*> Elts(OrigElts);
- const Type* wordType = getTargetData().getPointerSize() == 4 ?
+ std::vector<Type*> Elts(OrigElts);
+ Type* wordType = getTargetData().getPointerSize() == 4 ?
Type::getInt32Ty(getGlobalContext()) : Type::getInt64Ty(getGlobalContext());
for (unsigned i=0, e=Elts.size(); i!=e; ++i)
if (OrigElts[i]->isVoidTy())
Elts[i] = wordType;
- const StructType *STy = StructType::get(getGlobalContext(), Elts, false);
+ StructType *STy = StructType::get(getGlobalContext(), Elts, false);
unsigned Size = getTargetData().getTypeAllocSize(STy);
- const StructType *InSTy = dyn_cast<StructType>(Ty);
+ StructType *InSTy = dyn_cast<StructType>(Ty);
unsigned InSize = 0;
// If Ty and STy size does not match then last element is accessing
// extra bits.
@@ -442,7 +442,7 @@
InSize = getTargetData().getTypeAllocSize(InSTy);
if (InSize < Size) {
unsigned N = STy->getNumElements();
- const llvm::Type *LastEltTy = STy->getElementType(N-1);
+ llvm::Type *LastEltTy = STy->getElementType(N-1);
if (LastEltTy->isIntegerTy())
LastEltSizeDiff =
getTargetData().getTypeAllocSize(LastEltTy) - (Size - InSize);
Modified: dragonegg/trunk/src/Types.cpp
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/src/Types.cpp?rev=135371&r1=135370&r2=135371&view=diff
==============================================================================
--- dragonegg/trunk/src/Types.cpp (original)
+++ dragonegg/trunk/src/Types.cpp Sun Jul 17 23:25:32 2011
@@ -59,7 +59,7 @@
// GET_TYPE_LLVM/SET_TYPE_LLVM - Associate an LLVM type with each TREE type.
// These are lazily computed by ConvertType.
-const Type *llvm_set_type(tree Tr, const Type *Ty) {
+Type *llvm_set_type(tree Tr, Type *Ty) {
assert(TYPE_P(Tr) && "Expected a gcc type!");
// Check that the LLVM and GCC types have the same size, or, if the type has
@@ -79,14 +79,14 @@
}
#endif
- return (const Type *)llvm_set_cached(Tr, Ty);
+ return (Type *)llvm_set_cached(Tr, Ty);
}
#define SET_TYPE_LLVM(NODE, TYPE) llvm_set_type(NODE, TYPE)
-const Type *llvm_get_type(tree Tr) {
+Type *llvm_get_type(tree Tr) {
assert(TYPE_P(Tr) && "Expected a gcc type!");
- return (const Type *)llvm_get_cached(Tr);
+ return (Type *)llvm_get_cached(Tr);
}
#define GET_TYPE_LLVM(NODE) llvm_get_type(NODE)
@@ -103,7 +103,7 @@
//TODO ConstantStruct *LTypesNames = cast<ConstantStruct>(GV->getOperand(0));
//TODO
//TODO for (unsigned i = 0; i < LTypesNames->getNumOperands(); ++i) {
-//TODO const Type *Ty = NULL;
+//TODO Type *Ty = NULL;
//TODO
//TODO if (ConstantArray *CA =
//TODO dyn_cast<ConstantArray>(LTypesNames->getOperand(i))) {
@@ -131,7 +131,7 @@
//TODO return;
//TODO
//TODO std::vector<Constant *> LTypesNames;
-//TODO std::map < const Type *, std::string > TypeNameMap;
+//TODO std::map < Type *, std::string > TypeNameMap;
//TODO
//TODO // Collect Type Names in advance.
//TODO const TypeSymbolTable &ST = TheModule->getTypeSymbolTable();
@@ -141,9 +141,9 @@
//TODO }
//TODO
//TODO // Populate LTypesNames vector.
-//TODO for (std::vector<const Type *>::iterator I = LTypes.begin(),
+//TODO for (std::vector<Type *>::iterator I = LTypes.begin(),
//TODO E = LTypes.end(); I != E; ++I) {
-//TODO const Type *Ty = *I;
+//TODO Type *Ty = *I;
//TODO
//TODO // Give names to nameless types.
//TODO if (Ty && TypeNameMap[Ty].empty()) {
@@ -184,7 +184,7 @@
static FunctionType *GetFunctionType(const PATypeHolder &Res,
std::vector<PATypeHolder> &ArgTys,
bool isVarArg) {
- std::vector<const Type*> ArgTysP;
+ std::vector<Type*> ArgTysP;
ArgTysP.reserve(ArgTys.size());
for (unsigned i = 0, e = ArgTys.size(); i != e; ++i)
ArgTysP.push_back(ArgTys[i]);
@@ -220,9 +220,9 @@
/// otherwise returns an array of such integers with 'NumUnits' elements. For
/// example, on a machine which has 16 bit bytes returns an i16 or an array of
/// i16.
-const Type *GetUnitType(LLVMContext &C, unsigned NumUnits) {
+Type *GetUnitType(LLVMContext &C, unsigned NumUnits) {
assert(!(BITS_PER_UNIT & 7) && "Unit size not a multiple of 8 bits!");
- const Type *UnitTy = IntegerType::get(C, BITS_PER_UNIT);
+ Type *UnitTy = IntegerType::get(C, BITS_PER_UNIT);
if (NumUnits == 1)
return UnitTy;
return ArrayType::get(UnitTy, NumUnits);
@@ -231,7 +231,7 @@
/// GetUnitPointerType - Returns an LLVM pointer type which points to memory one
/// address unit wide. For example, on a machine which has 16 bit bytes returns
/// an i16*.
-const Type *GetUnitPointerType(LLVMContext &C, unsigned AddrSpace) {
+Type *GetUnitPointerType(LLVMContext &C, unsigned AddrSpace) {
return GetUnitType(C)->getPointerTo(AddrSpace);
}
@@ -304,7 +304,7 @@
{
const OpaqueType *OldTy = cast_or_null<OpaqueType>(GET_TYPE_LLVM(old_type));
if (OldTy) {
- const Type *NewTy = ConvertType (new_type);
+ Type *NewTy = ConvertType (new_type);
const_cast<OpaqueType*>(OldTy)->refineAbstractTypeTo(NewTy);
}
}
@@ -321,17 +321,17 @@
namespace {
class TypeRefinementDatabase : public AbstractTypeUser {
virtual void refineAbstractType(const DerivedType *OldTy,
- const Type *NewTy);
+ Type *NewTy);
virtual void typeBecameConcrete(const DerivedType *AbsTy);
// TypeUsers - For each abstract LLVM type, we keep track of all of the GCC
// types that point to it.
- std::map<const Type*, std::vector<tree> > TypeUsers;
+ std::map<Type*, std::vector<tree> > TypeUsers;
public:
/// setType - call SET_TYPE_LLVM(type, Ty), associating the type with the
/// specified tree type. In addition, if the LLVM type is an abstract type,
/// we add it to our data structure to track it.
- inline const Type *setType(tree type, const Type *Ty) {
+ inline Type *setType(tree type, Type *Ty) {
if (GET_TYPE_LLVM(type))
RemoveTypeFromTable(type);
@@ -354,9 +354,9 @@
/// RemoveTypeFromTable - We're about to change the LLVM type of 'type'
///
void TypeRefinementDatabase::RemoveTypeFromTable(tree type) {
- const Type *Ty = GET_TYPE_LLVM(type);
+ Type *Ty = GET_TYPE_LLVM(type);
if (!Ty->isAbstract()) return;
- std::map<const Type*, std::vector<tree> >::iterator I = TypeUsers.find(Ty);
+ std::map<Type*, std::vector<tree> >::iterator I = TypeUsers.find(Ty);
assert(I != TypeUsers.end() && "Using an abstract type but not in table?");
bool FoundIt = false;
@@ -381,10 +381,10 @@
/// its internal state to reference NewType instead of OldType.
///
void TypeRefinementDatabase::refineAbstractType(const DerivedType *OldTy,
- const Type *NewTy) {
+ Type *NewTy) {
if (OldTy == NewTy && OldTy->isAbstract()) return; // Nothing to do.
- std::map<const Type*, std::vector<tree> >::iterator I = TypeUsers.find(OldTy);
+ std::map<Type*, std::vector<tree> >::iterator I = TypeUsers.find(OldTy);
assert(I != TypeUsers.end() && "Using an abstract type but not in table?");
if (!NewTy->isAbstract()) {
@@ -408,7 +408,7 @@
TypeUsers.erase(I);
// Next, remove OldTy's entry in the TargetData object if it has one.
- if (const StructType *STy = dyn_cast<StructType>(OldTy))
+ if (StructType *STy = dyn_cast<StructType>(OldTy))
getTargetData().InvalidateStructLayoutInfo(STy);
OldTy->removeAbstractTypeUser(this);
@@ -439,7 +439,7 @@
/// and GCC fields start in the same byte (if 'decl' is a bitfield, this means
/// that its first bit is within the byte the LLVM field starts at). Returns
/// INT_MAX if there is no such LLVM field.
-int GetFieldIndex(tree decl, const Type *Ty) {
+int GetFieldIndex(tree decl, Type *Ty) {
assert(TREE_CODE(decl) == FIELD_DECL && "Expected a FIELD_DECL!");
assert(Ty == ConvertType(DECL_CONTEXT(decl)) && "Field not for this type!");
@@ -453,7 +453,7 @@
// O(N) rather than O(N log N) if all N fields are used. It's not clear if it
// would really be a win though.
- const StructType *STy = dyn_cast<StructType>(Ty);
+ StructType *STy = dyn_cast<StructType>(Ty);
// If this is not a struct type, then for sure there is no corresponding LLVM
// field (we do not require GCC record types to be converted to LLVM structs).
if (!STy)
@@ -490,7 +490,7 @@
/// getRegType - Returns the LLVM type to use for registers that hold a value
/// of the scalar GCC type 'type'. All of the EmitReg* routines use this to
/// determine the LLVM type to return.
-const Type *getRegType(tree type) {
+Type *getRegType(tree type) {
// NOTE: Any changes made here need to be reflected in LoadRegisterFromMemory,
// StoreRegisterToMemory and ExtractRegisterFromConstant.
assert(!AGGREGATE_TYPE_P(type) && "Registers must have a scalar type!");
@@ -510,7 +510,7 @@
return IntegerType::get(Context, TYPE_PRECISION(type));
case COMPLEX_TYPE: {
- const Type *EltTy = getRegType(TREE_TYPE(type));
+ Type *EltTy = getRegType(TREE_TYPE(type));
return StructType::get(EltTy, EltTy, NULL);
}
@@ -542,7 +542,7 @@
case VECTOR_TYPE: {
// LLVM does not support vectors of pointers, so turn any pointers into
// integers.
- const Type *EltTy = POINTER_TYPE_P(TREE_TYPE(type)) ?
+ Type *EltTy = POINTER_TYPE_P(TREE_TYPE(type)) ?
getTargetData().getIntPtrType(Context) : getRegType(TREE_TYPE(type));
return VectorType::get(EltTy, TYPE_VECTOR_SUBPARTS(type));
}
@@ -552,7 +552,7 @@
/// getPointerToType - Returns the LLVM register type to use for a pointer to
/// the given GCC type.
-const Type *getPointerToType(tree type) {
+Type *getPointerToType(tree type) {
if (VOID_TYPE_P(type))
// void* -> byte*
return GetUnitPointerType(Context);
@@ -560,12 +560,12 @@
return ConvertType(type)->getPointerTo();
}
-const Type *TypeConverter::ConvertType(tree type) {
+Type *TypeConverter::ConvertType(tree type) {
if (type == error_mark_node) return Type::getInt32Ty(Context);
// LLVM doesn't care about variants such as const, volatile, or restrict.
type = TYPE_MAIN_VARIANT(type);
- const Type *Ty;
+ Type *Ty;
switch (TREE_CODE(type)) {
default:
@@ -643,7 +643,7 @@
case POINTER_TYPE:
case REFERENCE_TYPE:
- if (const PointerType *PTy = cast_or_null<PointerType>(GET_TYPE_LLVM(type))){
+ if (PointerType *PTy = cast_or_null<PointerType>(GET_TYPE_LLVM(type))){
// We already converted this type. If this isn't a case where we have to
// reparse it, just return it.
if (PointersToReresolve.empty() || PointersToReresolve.back() != type ||
@@ -668,7 +668,7 @@
ConvertingStruct = true;
// Note that we know that PTy cannot be resolved or invalidated here.
- const Type *Actual = ConvertType(TREE_TYPE(type));
+ Type *Actual = ConvertType(TREE_TYPE(type));
assert(GET_TYPE_LLVM(type) == PTy && "Pointer invalidated!");
// Restore ConvertingStruct for the caller.
@@ -732,7 +732,7 @@
if ((Ty = GET_TYPE_LLVM(type)))
return Ty;
- const Type *ElementTy = ConvertType(TREE_TYPE(type));
+ Type *ElementTy = ConvertType(TREE_TYPE(type));
uint64_t NumElements = ArrayLengthOf(type);
if (NumElements == NO_LENGTH) // Variable length array?
@@ -751,7 +751,7 @@
uint64_t PadBits = getInt64(TYPE_SIZE(type), true) -
getTargetData().getTypeAllocSizeInBits(Ty);
if (PadBits) {
- const Type *Padding = ArrayType::get(Type::getInt8Ty(Context), PadBits / 8);
+ Type *Padding = ArrayType::get(Type::getInt8Ty(Context), PadBits / 8);
Ty = StructType::get(Ty, Padding, NULL);
}
}
@@ -807,26 +807,26 @@
/// HandleScalarResult - This callback is invoked if the function returns a
/// simple scalar result value.
- void HandleScalarResult(const Type *RetTy) {
+ void HandleScalarResult(Type *RetTy) {
this->RetTy = RetTy;
}
/// HandleAggregateResultAsScalar - This callback is invoked if the function
/// returns an aggregate value by bit converting it to the specified scalar
/// type and returning that.
- void HandleAggregateResultAsScalar(const Type *ScalarTy, unsigned Offset=0) {
+ void HandleAggregateResultAsScalar(Type *ScalarTy, unsigned Offset=0) {
RetTy = ScalarTy;
this->Offset = Offset;
}
/// HandleAggregateResultAsAggregate - This callback is invoked if the function
/// returns an aggregate value using multiple return values.
- void HandleAggregateResultAsAggregate(const Type *AggrTy) {
+ void HandleAggregateResultAsAggregate(Type *AggrTy) {
RetTy = AggrTy;
}
/// HandleShadowResult - Handle an aggregate or scalar shadow argument.
- void HandleShadowResult(const PointerType *PtrArgTy, bool RetPtr) {
+ void HandleShadowResult(PointerType *PtrArgTy, bool RetPtr) {
// This function either returns void or the shadow argument,
// depending on the target.
RetTy = RetPtr ? PtrArgTy : Type::getVoidTy(Context);
@@ -842,7 +842,7 @@
/// returns an aggregate value by using a "shadow" first parameter, which is
/// a pointer to the aggregate, of type PtrArgTy. If RetPtr is set to true,
/// the pointer argument itself is returned from the function.
- void HandleAggregateShadowResult(const PointerType *PtrArgTy,
+ void HandleAggregateShadowResult(PointerType *PtrArgTy,
bool RetPtr) {
HandleShadowResult(PtrArgTy, RetPtr);
}
@@ -851,15 +851,15 @@
/// returns a scalar value by using a "shadow" first parameter, which is a
/// pointer to the scalar, of type PtrArgTy. If RetPtr is set to true,
/// the pointer argument itself is returned from the function.
- void HandleScalarShadowResult(const PointerType *PtrArgTy, bool RetPtr) {
+ void HandleScalarShadowResult(PointerType *PtrArgTy, bool RetPtr) {
HandleShadowResult(PtrArgTy, RetPtr);
}
- void HandlePad(const llvm::Type *LLVMTy) {
+ void HandlePad(llvm::Type *LLVMTy) {
HandleScalarArgument(LLVMTy, 0, 0);
}
- void HandleScalarArgument(const llvm::Type *LLVMTy, tree type,
+ void HandleScalarArgument(llvm::Type *LLVMTy, tree type,
unsigned /*RealSize*/ = 0) {
if (KNRPromotion) {
if (type == float_type_node)
@@ -873,7 +873,7 @@
/// HandleByInvisibleReferenceArgument - This callback is invoked if a pointer
/// (of type PtrTy) to the argument is passed rather than the argument itself.
- void HandleByInvisibleReferenceArgument(const llvm::Type *PtrTy,
+ void HandleByInvisibleReferenceArgument(llvm::Type *PtrTy,
tree /*type*/) {
ArgTypes.push_back(PtrTy);
}
@@ -881,13 +881,13 @@
/// HandleByValArgument - This callback is invoked if the aggregate function
/// argument is passed by value. It is lowered to a parameter passed by
/// reference with an additional parameter attribute "ByVal".
- void HandleByValArgument(const llvm::Type *LLVMTy, tree type) {
+ void HandleByValArgument(llvm::Type *LLVMTy, tree type) {
HandleScalarArgument(LLVMTy->getPointerTo(), type);
}
/// HandleFCAArgument - This callback is invoked if the aggregate function
/// argument is a first class aggregate passed by value.
- void HandleFCAArgument(const llvm::Type *LLVMTy, tree /*type*/) {
+ void HandleFCAArgument(llvm::Type *LLVMTy, tree /*type*/) {
ArgTypes.push_back(LLVMTy);
}
};
@@ -915,7 +915,7 @@
/// for the function. This method takes the DECL_ARGUMENTS list (Args), and
/// fills in Result with the argument types for the function. It returns the
/// specified result type for the function.
-const FunctionType *TypeConverter::
+FunctionType *TypeConverter::
ConvertArgListToFnType(tree type, tree Args, tree static_chain,
CallingConv::ID &CallingConv, AttrListPtr &PAL) {
tree ReturnType = TREE_TYPE(type);
@@ -951,7 +951,7 @@
Attrs.push_back(AttributeWithIndex::get(ArgTys.size(),
Attribute::StructRet | Attribute::NoAlias));
- std::vector<const Type*> ScalarArgs;
+ std::vector<Type*> ScalarArgs;
if (static_chain) {
// Pass the static chain as the first parameter.
ABIConverter.HandleArgument(TREE_TYPE(static_chain), ScalarArgs);
@@ -979,7 +979,7 @@
return GetFunctionType(RetTy, ArgTys, false);
}
-const FunctionType *TypeConverter::
+FunctionType *TypeConverter::
ConvertFunctionType(tree type, tree decl, tree static_chain,
CallingConv::ID &CallingConv, AttrListPtr &PAL) {
PATypeHolder RetTy = Type::getVoidTy(Context);
@@ -1059,7 +1059,7 @@
Attrs.push_back(AttributeWithIndex::get(ArgTypes.size(),
Attribute::StructRet | Attribute::NoAlias));
- std::vector<const Type*> ScalarArgs;
+ std::vector<Type*> ScalarArgs;
if (static_chain) {
// Pass the static chain as the first parameter.
ABIConverter.HandleArgument(TREE_TYPE(static_chain), ScalarArgs);
@@ -1174,7 +1174,7 @@
/// StructTypeConversionInfo - A temporary structure that is used when
/// translating a RECORD_TYPE to an LLVM type.
struct StructTypeConversionInfo {
- std::vector<const Type*> Elements;
+ std::vector<Type*> Elements;
std::vector<uint64_t> ElementOffsetInBytes;
std::vector<uint64_t> ElementSizeInBytes;
std::vector<bool> PaddingElement; // True if field is used for padding
@@ -1217,19 +1217,19 @@
/// getTypeAlignment - Return the alignment of the specified type in bytes.
///
- unsigned getTypeAlignment(const Type *Ty) const {
+ unsigned getTypeAlignment(Type *Ty) const {
return Packed ? 1 : TD.getABITypeAlignment(Ty);
}
/// getTypeSize - Return the size of the specified type in bytes.
///
- uint64_t getTypeSize(const Type *Ty) const {
+ uint64_t getTypeSize(Type *Ty) const {
return TD.getTypeAllocSize(Ty);
}
/// getLLVMType - Return the LLVM type for the specified object.
///
- const Type *getLLVMType() const {
+ Type *getLLVMType() const {
// Use Packed type if Packed is set or all struct fields are bitfields.
// Empty struct is not packed unless packed is set.
return StructType::get(Context, Elements,
@@ -1268,7 +1268,7 @@
if (NoOfBytesToRemove == 0)
return;
- const Type *LastType = Elements.back();
+ Type *LastType = Elements.back();
unsigned PadBytes = 0;
if (LastType->isIntegerTy(8))
@@ -1285,7 +1285,7 @@
assert (PadBytes > 0 && "Unable to remove extra bytes");
// Update last element type and size, element offset is unchanged.
- const Type *Pad = ArrayType::get(Type::getInt8Ty(Context), PadBytes);
+ Type *Pad = ArrayType::get(Type::getInt8Ty(Context), PadBytes);
unsigned OriginalSize = ElementSizeInBytes.back();
Elements.pop_back();
Elements.push_back(Pad);
@@ -1299,8 +1299,8 @@
/// layout is sized properly. Return false if unable to handle ByteOffset.
/// In this case caller should redo this struct as a packed structure.
bool ResizeLastElementIfOverlapsWith(uint64_t ByteOffset, tree /*Field*/,
- const Type *Ty) {
- const Type *SavedTy = NULL;
+ Type *Ty) {
+ Type *SavedTy = NULL;
if (!Elements.empty()) {
assert(ElementOffsetInBytes.back() <= ByteOffset &&
@@ -1320,7 +1320,7 @@
// field we just popped. Otherwise we might end up with a
// gcc non-bitfield being mapped to an LLVM field with a
// different offset.
- const Type *Pad = Type::getInt8Ty(Context);
+ Type *Pad = Type::getInt8Ty(Context);
if (PoppedOffset != EndOffset + 1)
Pad = ArrayType::get(Pad, PoppedOffset - EndOffset);
addElement(Pad, EndOffset, PoppedOffset - EndOffset);
@@ -1343,7 +1343,7 @@
// padding.
if (NextByteOffset < ByteOffset) {
uint64_t CurOffset = getNewElementByteOffset(1);
- const Type *Pad = Type::getInt8Ty(Context);
+ Type *Pad = Type::getInt8Ty(Context);
if (SavedTy && LastFieldStartsAtNonByteBoundry)
// We want to reuse SavedType to access this bit field.
// e.g. struct __attribute__((packed)) {
@@ -1384,7 +1384,7 @@
/// addElement - Add an element to the structure with the specified type,
/// offset and size.
- void addElement(const Type *Ty, uint64_t Offset, uint64_t Size,
+ void addElement(Type *Ty, uint64_t Offset, uint64_t Size,
bool ExtraPadding = false) {
Elements.push_back(Ty);
ElementOffsetInBytes.push_back(Offset);
@@ -1426,7 +1426,7 @@
// Figure out the LLVM type that we will use for the new field.
// Note, Size is not necessarily size of the new field. It indicates
// additional bits required after FirstunallocatedByte to cover new field.
- const Type *NewFieldTy = 0;
+ Type *NewFieldTy = 0;
// First try an ABI-aligned field including (some of) the Extra bits.
// This field must satisfy Size <= w && w <= XSize.
@@ -1506,7 +1506,7 @@
// If Field has user defined alignment and it does not match Ty alignment
// then convert to a packed struct and try again.
if (TYPE_USER_ALIGN(TREE_TYPE(Field))) {
- const Type *Ty = ConvertType(TREE_TYPE(Field));
+ Type *Ty = ConvertType(TREE_TYPE(Field));
if (TYPE_ALIGN(TREE_TYPE(Field)) !=
8 * Info.getTypeAlignment(Ty))
return false;
@@ -1523,7 +1523,7 @@
assert((StartOffsetInBits & 7) == 0 && "Non-bit-field has non-byte offset!");
uint64_t StartOffsetInBytes = StartOffsetInBits/8;
- const Type *Ty = ConvertType(TREE_TYPE(Field));
+ Type *Ty = ConvertType(TREE_TYPE(Field));
// If this field is packed then the struct may need padding fields
// before this field.
@@ -1676,7 +1676,7 @@
PadBytes = StartOffsetInBits/8-FirstUnallocatedByte;
if (PadBytes) {
- const Type *Pad = Type::getInt8Ty(Context);
+ Type *Pad = Type::getInt8Ty(Context);
if (PadBytes != 1)
Pad = ArrayType::get(Pad, PadBytes);
Info.addElement(Pad, FirstUnallocatedByte, PadBytes);
@@ -1714,7 +1714,7 @@
StructTypeConversionInfo &Info) {
bool FindBiggest = TREE_CODE(type) != QUAL_UNION_TYPE;
- const Type *UnionTy = 0;
+ Type *UnionTy = 0;
tree GccUnionTy = 0;
tree UnionField = 0;
unsigned MinAlign = ~0U;
@@ -1737,7 +1737,7 @@
integer_zerop(DECL_SIZE(Field)))
continue;
- const Type *TheTy = ConvertType(TheGccTy);
+ Type *TheTy = ConvertType(TheGccTy);
unsigned Align = Info.getTypeAlignment(TheTy);
uint64_t Size = Info.getTypeSize(TheTy);
@@ -1801,8 +1801,8 @@
// For LLVM purposes, we build a new type for B-within-D that
// has the correct size and layout for that usage.
-const Type *TypeConverter::ConvertRECORD(tree type) {
- if (const Type *Ty = GET_TYPE_LLVM(type)) {
+Type *TypeConverter::ConvertRECORD(tree type) {
+ if (Type *Ty = GET_TYPE_LLVM(type)) {
// If we already compiled this type, and if it was not a forward
// definition that is now defined, use the old type.
if (!Ty->isOpaqueTy() || TYPE_SIZE(type) == 0)
@@ -1810,7 +1810,7 @@
}
if (TYPE_SIZE(type) == 0) { // Forward declaration?
- const Type *Ty = OpaqueType::get(Context);
+ Type *Ty = OpaqueType::get(Context);
return TypeDB.setType(type, Ty);
}
@@ -1890,12 +1890,12 @@
Info->getTypeAlignment(Type::getInt32Ty(Context))) == 0) {
// Insert array of i32.
unsigned Int32ArraySize = (GCCTypeSize-LLVMStructSize) / 4;
- const Type *PadTy =
+ Type *PadTy =
ArrayType::get(Type::getInt32Ty(Context), Int32ArraySize);
Info->addElement(PadTy, GCCTypeSize - LLVMLastElementEnd,
Int32ArraySize, true /* Padding Element */);
} else {
- const Type *PadTy = ArrayType::get(Type::getInt8Ty(Context),
+ Type *PadTy = ArrayType::get(Type::getInt8Ty(Context),
GCCTypeSize-LLVMStructSize);
Info->addElement(PadTy, GCCTypeSize - LLVMLastElementEnd,
GCCTypeSize - LLVMLastElementEnd,
@@ -1906,7 +1906,7 @@
} else
Info->RemoveExtraBytes();
- const Type *ResultTy = Info->getLLVMType();
+ Type *ResultTy = Info->getLLVMType();
const OpaqueType *OldTy = cast_or_null<OpaqueType>(GET_TYPE_LLVM(type));
TypeDB.setType(type, ResultTy);
Modified: dragonegg/trunk/src/x86/Target.cpp
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/src/x86/Target.cpp?rev=135371&r1=135370&r2=135371&view=diff
==============================================================================
--- dragonegg/trunk/src/x86/Target.cpp (original)
+++ dragonegg/trunk/src/x86/Target.cpp Sun Jul 17 23:25:32 2011
@@ -51,9 +51,9 @@
/// BitCastToIntVector - Bitcast the vector operand to a vector of integers of
// the same length.
static Value *BitCastToIntVector(Value *Op, LLVMBuilder &Builder) {
- const VectorType *VecTy = cast<VectorType>(Op->getType());
- const Type *EltTy = VecTy->getElementType();
- const Type *IntTy = IntegerType::get(Context,EltTy->getPrimitiveSizeInBits());
+ VectorType *VecTy = cast<VectorType>(Op->getType());
+ Type *EltTy = VecTy->getElementType();
+ Type *IntTy = IntegerType::get(Context,EltTy->getPrimitiveSizeInBits());
return Builder.CreateBitCast(Op, VectorType::get(IntTy,
VecTy->getNumElements()));
}
@@ -83,7 +83,7 @@
tree fndecl,
const MemRef * /*DestLoc*/,
Value *&Result,
- const Type *ResultType,
+ Type *ResultType,
std::vector<Value*> &Ops) {
// DECL_FUNCTION_CODE contains a value of the enumerated type ix86_builtins,
// declared in i386.c. If this type was visible to us then we could simply
@@ -379,7 +379,7 @@
return true;
}
//TODO IX86_BUILTIN_LOADQ: {
-//TODO const PointerType *i64Ptr = Type::getInt64PtrTy(Context);
+//TODO PointerType *i64Ptr = Type::getInt64PtrTy(Context);
//TODO Ops[0] = Builder.CreateBitCast(Ops[0], i64Ptr);
//TODO Ops[0] = Builder.CreateLoad(Ops[0]);
//TODO Value *Zero = ConstantInt::get(Type::getInt64Ty(Context), 0);
@@ -391,7 +391,7 @@
//TODO }
case loadups: {
VectorType *v4f32 = VectorType::get(Type::getFloatTy(Context), 4);
- const PointerType *v4f32Ptr = v4f32->getPointerTo();
+ PointerType *v4f32Ptr = v4f32->getPointerTo();
Value *BC = Builder.CreateBitCast(Ops[0], v4f32Ptr);
LoadInst *LI = Builder.CreateLoad(BC);
LI->setAlignment(1);
@@ -400,7 +400,7 @@
}
case loadupd: {
VectorType *v2f64 = VectorType::get(Type::getDoubleTy(Context), 2);
- const PointerType *v2f64Ptr = v2f64->getPointerTo();
+ PointerType *v2f64Ptr = v2f64->getPointerTo();
Value *BC = Builder.CreateBitCast(Ops[0], v2f64Ptr);
LoadInst *LI = Builder.CreateLoad(BC);
LI->setAlignment(1);
@@ -409,7 +409,7 @@
}
case loaddqu: {
VectorType *v16i8 = VectorType::get(Type::getInt8Ty(Context), 16);
- const PointerType *v16i8Ptr = v16i8->getPointerTo();
+ PointerType *v16i8Ptr = v16i8->getPointerTo();
Value *BC = Builder.CreateBitCast(Ops[0], v16i8Ptr);
LoadInst *LI = Builder.CreateLoad(BC);
LI->setAlignment(1);
@@ -418,7 +418,7 @@
}
case storeups: {
VectorType *v4f32 = VectorType::get(Type::getFloatTy(Context), 4);
- const PointerType *v4f32Ptr = v4f32->getPointerTo();
+ PointerType *v4f32Ptr = v4f32->getPointerTo();
Value *BC = Builder.CreateBitCast(Ops[0], v4f32Ptr);
StoreInst *SI = Builder.CreateStore(Ops[1], BC);
SI->setAlignment(1);
@@ -426,7 +426,7 @@
}
case storeupd: {
VectorType *v2f64 = VectorType::get(Type::getDoubleTy(Context), 2);
- const PointerType *v2f64Ptr = v2f64->getPointerTo();
+ PointerType *v2f64Ptr = v2f64->getPointerTo();
Value *BC = Builder.CreateBitCast(Ops[0], v2f64Ptr);
StoreInst *SI = Builder.CreateStore(Ops[1], BC);
SI->setAlignment(1);
@@ -434,14 +434,14 @@
}
case storedqu: {
VectorType *v16i8 = VectorType::get(Type::getInt8Ty(Context), 16);
- const PointerType *v16i8Ptr = v16i8->getPointerTo();
+ PointerType *v16i8Ptr = v16i8->getPointerTo();
Value *BC = Builder.CreateBitCast(Ops[0], v16i8Ptr);
StoreInst *SI = Builder.CreateStore(Ops[1], BC);
SI->setAlignment(1);
return true;
}
case loadhps: {
- const PointerType *f64Ptr = Type::getDoublePtrTy(Context);
+ PointerType *f64Ptr = Type::getDoublePtrTy(Context);
Ops[1] = Builder.CreateBitCast(Ops[1], f64Ptr);
Value *Load = Builder.CreateLoad(Ops[1]);
Ops[1] = BuildVector(Load, UndefValue::get(Type::getDoubleTy(Context)), NULL);
@@ -451,7 +451,7 @@
return true;
}
case loadlps: {
- const PointerType *f64Ptr = Type::getDoublePtrTy(Context);
+ PointerType *f64Ptr = Type::getDoublePtrTy(Context);
Ops[1] = Builder.CreateBitCast(Ops[1], f64Ptr);
Value *Load = Builder.CreateLoad(Ops[1]);
Ops[1] = BuildVector(Load, UndefValue::get(Type::getDoubleTy(Context)), NULL);
@@ -478,7 +478,7 @@
}
case storehps: {
VectorType *v2f64 = VectorType::get(Type::getDoubleTy(Context), 2);
- const PointerType *f64Ptr = Type::getDoublePtrTy(Context);
+ PointerType *f64Ptr = Type::getDoublePtrTy(Context);
Ops[0] = Builder.CreateBitCast(Ops[0], f64Ptr);
Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), 1);
Ops[1] = Builder.CreateBitCast(Ops[1], v2f64);
@@ -488,7 +488,7 @@
}
case storelps: {
VectorType *v2f64 = VectorType::get(Type::getDoubleTy(Context), 2);
- const PointerType *f64Ptr = Type::getDoublePtrTy(Context);
+ PointerType *f64Ptr = Type::getDoublePtrTy(Context);
Ops[0] = Builder.CreateBitCast(Ops[0], f64Ptr);
Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), 0);
Ops[1] = Builder.CreateBitCast(Ops[1], v2f64);
@@ -661,9 +661,9 @@
// If palignr is shifting the pair of input vectors less than 9 bytes,
// emit a shuffle instruction.
if (shiftVal <= 8) {
- const Type *IntTy = Type::getInt32Ty(Context);
- const Type *EltTy = Type::getInt8Ty(Context);
- const Type *VecTy = VectorType::get(EltTy, 8);
+ Type *IntTy = Type::getInt32Ty(Context);
+ Type *EltTy = Type::getInt8Ty(Context);
+ Type *VecTy = VectorType::get(EltTy, 8);
Ops[1] = Builder.CreateBitCast(Ops[1], VecTy);
Ops[0] = Builder.CreateBitCast(Ops[0], VecTy);
@@ -681,8 +681,8 @@
// than 16 bytes, emit a logical right shift of the destination.
if (shiftVal < 16) {
// MMX has these as 1 x i64 vectors for some odd optimization reasons.
- const Type *EltTy = Type::getInt64Ty(Context);
- const Type *VecTy = VectorType::get(EltTy, 1);
+ Type *EltTy = Type::getInt64Ty(Context);
+ Type *VecTy = VectorType::get(EltTy, 1);
Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
Ops[1] = ConstantInt::get(VecTy, (shiftVal-8) * 8);
@@ -714,9 +714,9 @@
// If palignr is shifting the pair of input vectors less than 17 bytes,
// emit a shuffle instruction.
if (shiftVal <= 16) {
- const Type *IntTy = Type::getInt32Ty(Context);
- const Type *EltTy = Type::getInt8Ty(Context);
- const Type *VecTy = VectorType::get(EltTy, 16);
+ Type *IntTy = Type::getInt32Ty(Context);
+ Type *EltTy = Type::getInt8Ty(Context);
+ Type *VecTy = VectorType::get(EltTy, 16);
Ops[1] = Builder.CreateBitCast(Ops[1], VecTy);
Ops[0] = Builder.CreateBitCast(Ops[0], VecTy);
@@ -733,9 +733,9 @@
// If palignr is shifting the pair of input vectors more than 16 but less
// than 32 bytes, emit a logical right shift of the destination.
if (shiftVal < 32) {
- const Type *EltTy = Type::getInt64Ty(Context);
- const Type *VecTy = VectorType::get(EltTy, 2);
- const Type *IntTy = Type::getInt32Ty(Context);
+ Type *EltTy = Type::getInt64Ty(Context);
+ Type *VecTy = VectorType::get(EltTy, 2);
+ Type *IntTy = Type::getInt32Ty(Context);
Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
Ops[1] = ConstantInt::get(IntTy, (shiftVal-16) * 8);
@@ -788,9 +788,9 @@
// As rsqrtss is declared as taking a <4 x float> operand, mulch the operand
// into a vector.
Value *X = Ops[0];
- const Type *FloatTy = Type::getFloatTy(Context);
+ Type *FloatTy = Type::getFloatTy(Context);
Value *AsFloat = Builder.CreateFPTrunc(X, FloatTy);
- const Type *V4SFTy = VectorType::get(FloatTy, 4);
+ Type *V4SFTy = VectorType::get(FloatTy, 4);
Value *AsVec = Builder.CreateInsertElement(UndefValue::get(V4SFTy), AsFloat,
Builder.getInt32(0));
// Take the reciprocal square root of the vector and mulch it back into a
@@ -854,10 +854,10 @@
}
/* Returns true if all elements of the type are integer types. */
-static bool llvm_x86_is_all_integer_types(const Type *Ty) {
+static bool llvm_x86_is_all_integer_types(Type *Ty) {
for (Type::subtype_iterator I = Ty->subtype_begin(), E = Ty->subtype_end();
I != E; ++I) {
- const Type *STy = I->get();
+ Type *STy = I->get();
if (!STy->isIntOrIntVectorTy() && !STy->isPointerTy())
return false;
}
@@ -869,8 +869,8 @@
It also returns a vector of types that correspond to the registers used
for parameter passing. This is only called for x86-32. */
bool
-llvm_x86_32_should_pass_aggregate_in_mixed_regs(tree TreeType, const Type *Ty,
- std::vector<const Type*> &Elts){
+llvm_x86_32_should_pass_aggregate_in_mixed_regs(tree TreeType, Type *Ty,
+ std::vector<Type*> &Elts){
// If this is a small fixed size type, investigate it.
HOST_WIDE_INT SrcSize = int_size_in_bytes(TreeType);
if (SrcSize <= 0 || SrcSize > 16)
@@ -882,11 +882,11 @@
// Note that we can't support passing all structs this way. For example,
// {i16, i16} should be passed in on 32-bit unit, which is not how "i16, i16"
// would be passed as stand-alone arguments.
- const StructType *STy = dyn_cast<StructType>(Ty);
+ StructType *STy = dyn_cast<StructType>(Ty);
if (!STy || STy->isPacked()) return false;
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- const Type *EltTy = STy->getElementType(i);
+ Type *EltTy = STy->getElementType(i);
// 32 and 64-bit integers are fine, as are float and double. Long double
// (which can be picked as the type for a union of 16 bytes) is not fine,
// as loads and stores of it get only 10 bytes.
@@ -911,16 +911,16 @@
/* It returns true if an aggregate of the specified type should be passed as a
first class aggregate. */
-bool llvm_x86_should_pass_aggregate_as_fca(tree type, const Type *Ty) {
+bool llvm_x86_should_pass_aggregate_as_fca(tree type, Type *Ty) {
if (TREE_CODE(type) != COMPLEX_TYPE)
return false;
- const StructType *STy = dyn_cast<StructType>(Ty);
+ StructType *STy = dyn_cast<StructType>(Ty);
if (!STy || STy->isPacked()) return false;
// FIXME: Currently codegen isn't lowering most _Complex types in a way that
// makes it ABI compatible for x86-64. Same for _Complex char and _Complex
// short in 32-bit.
- const Type *EltTy = STy->getElementType(0);
+ Type *EltTy = STy->getElementType(0);
return !((TARGET_64BIT && (EltTy->isIntegerTy() ||
EltTy == Type::getFloatTy(Context) ||
EltTy == Type::getDoubleTy(Context))) ||
@@ -929,7 +929,7 @@
/* Target hook for llvm-abi.h. It returns true if an aggregate of the
specified type should be passed in memory. */
-bool llvm_x86_should_pass_aggregate_in_memory(tree TreeType, const Type *Ty) {
+bool llvm_x86_should_pass_aggregate_in_memory(tree TreeType, Type *Ty) {
if (llvm_x86_should_pass_aggregate_as_fca(TreeType, Ty))
return false;
@@ -942,7 +942,7 @@
return false;
if (!TARGET_64BIT) {
- std::vector<const Type*> Elts;
+ std::vector<Type*> Elts;
return !llvm_x86_32_should_pass_aggregate_in_mixed_regs(TreeType, Ty, Elts);
}
return llvm_x86_64_should_pass_aggregate_in_memory(TreeType, Mode);
@@ -950,11 +950,11 @@
/* count_num_registers_uses - Return the number of GPRs and XMMs parameter
register used so far. Caller is responsible for initializing outputs. */
-static void count_num_registers_uses(std::vector<const Type*> &ScalarElts,
+static void count_num_registers_uses(std::vector<Type*> &ScalarElts,
unsigned &NumGPRs, unsigned &NumXMMs) {
for (unsigned i = 0, e = ScalarElts.size(); i != e; ++i) {
- const Type *Ty = ScalarElts[i];
- if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
+ Type *Ty = ScalarElts[i];
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
if (!TARGET_MACHO)
continue;
if (VTy->getNumElements() == 1)
@@ -984,8 +984,8 @@
part of the aggregate, return true. That means the aggregate should instead
be passed in memory. */
bool
-llvm_x86_64_aggregate_partially_passed_in_regs(std::vector<const Type*> &Elts,
- std::vector<const Type*> &ScalarElts,
+llvm_x86_64_aggregate_partially_passed_in_regs(std::vector<Type*> &Elts,
+ std::vector<Type*> &ScalarElts,
bool isShadowReturn) {
// Counting number of GPRs and XMMs used so far. According to AMD64 ABI
// document: "If there are no registers available for any eightbyte of an
@@ -1031,8 +1031,8 @@
It also returns a vector of types that correspond to the registers used
for parameter passing. This is only called for x86-64. */
bool
-llvm_x86_64_should_pass_aggregate_in_mixed_regs(tree TreeType, const Type *Ty,
- std::vector<const Type*> &Elts){
+llvm_x86_64_should_pass_aggregate_in_mixed_regs(tree TreeType, Type *Ty,
+ std::vector<Type*> &Elts){
if (llvm_x86_should_pass_aggregate_as_fca(TreeType, Ty))
return false;
@@ -1077,12 +1077,12 @@
assert(0 && "Not yet handled!");
} else if ((NumClasses-i) == 2) {
if (Class[i+1] == X86_64_SSEUP_CLASS) {
- const Type *Ty = ConvertType(TreeType);
- if (const StructType *STy = dyn_cast<StructType>(Ty))
+ Type *Ty = ConvertType(TreeType);
+ if (StructType *STy = dyn_cast<StructType>(Ty))
// Look pass the struct wrapper.
if (STy->getNumElements() == 1)
Ty = STy->getElementType(0);
- if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
if (VTy->getNumElements() == 2) {
if (VTy->getElementType()->isIntegerTy()) {
Elts.push_back(VectorType::get(Type::getInt64Ty(Context), 2));
@@ -1267,13 +1267,13 @@
// llvm_suitable_multiple_ret_value_type - Return TRUE if return value
// of type TY should be returned using multiple value return instruction.
-static bool llvm_suitable_multiple_ret_value_type(const Type *Ty,
+static bool llvm_suitable_multiple_ret_value_type(Type *Ty,
tree TreeType) {
if (!TARGET_64BIT)
return false;
- const StructType *STy = dyn_cast<StructType>(Ty);
+ StructType *STy = dyn_cast<StructType>(Ty);
if (!STy)
return false;
@@ -1304,9 +1304,9 @@
// llvm_x86_scalar_type_for_struct_return - Return LLVM type if TYPE
// can be returned as a scalar, otherwise return NULL.
-const Type *llvm_x86_scalar_type_for_struct_return(tree type, unsigned *Offset) {
+Type *llvm_x86_scalar_type_for_struct_return(tree type, unsigned *Offset) {
*Offset = 0;
- const Type *Ty = ConvertType(type);
+ Type *Ty = ConvertType(type);
unsigned Size = getTargetData().getTypeAllocSize(Ty);
if (Size == 0)
return Type::getVoidTy(Context);
@@ -1391,8 +1391,8 @@
/// The original implementation of this routine is based on
/// llvm_x86_64_should_pass_aggregate_in_mixed_regs code.
void
-llvm_x86_64_get_multiple_return_reg_classes(tree TreeType, const Type * /*Ty*/,
- std::vector<const Type*> &Elts) {
+llvm_x86_64_get_multiple_return_reg_classes(tree TreeType, Type * /*Ty*/,
+ std::vector<Type*> &Elts) {
enum x86_64_reg_class Class[MAX_CLASSES];
enum machine_mode Mode = type_natural_mode(TreeType, NULL);
HOST_WIDE_INT Bytes =
@@ -1441,12 +1441,12 @@
assert(0 && "Not yet handled!");
} else if ((NumClasses-i) == 2) {
if (Class[i+1] == X86_64_SSEUP_CLASS) {
- const Type *Ty = ConvertType(TreeType);
- if (const StructType *STy = dyn_cast<StructType>(Ty))
+ Type *Ty = ConvertType(TreeType);
+ if (StructType *STy = dyn_cast<StructType>(Ty))
// Look pass the struct wrapper.
if (STy->getNumElements() == 1)
Ty = STy->getElementType(0);
- if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
if (VTy->getNumElements() == 2) {
if (VTy->getElementType()->isIntegerTy())
Elts.push_back(VectorType::get(Type::getInt64Ty(Context), 2));
@@ -1517,13 +1517,13 @@
// Return LLVM Type if TYPE can be returned as an aggregate,
// otherwise return NULL.
-const Type *llvm_x86_aggr_type_for_struct_return(tree type) {
- const Type *Ty = ConvertType(type);
+Type *llvm_x86_aggr_type_for_struct_return(tree type) {
+ Type *Ty = ConvertType(type);
if (!llvm_suitable_multiple_ret_value_type(Ty, type))
return NULL;
- const StructType *STy = cast<StructType>(Ty);
- std::vector<const Type *> ElementTypes;
+ StructType *STy = cast<StructType>(Ty);
+ std::vector<Type *> ElementTypes;
// Special handling for _Complex.
if (llvm_x86_should_not_return_complex_in_memory(type)) {
@@ -1532,7 +1532,7 @@
return StructType::get(Context, ElementTypes, STy->isPacked());
}
- std::vector<const Type*> GCCElts;
+ std::vector<Type*> GCCElts;
llvm_x86_64_get_multiple_return_reg_classes(type, Ty, GCCElts);
return StructType::get(Context, GCCElts, false);
}
@@ -1552,7 +1552,7 @@
LLVMBuilder &Builder,
bool isVolatile) {
Value *EVI = Builder.CreateExtractValue(Src, SrcFieldNo, "mrv_gr");
- const StructType *STy = cast<StructType>(Src->getType());
+ StructType *STy = cast<StructType>(Src->getType());
Value *Idxs[3];
Idxs[0] = ConstantInt::get(Type::getInt32Ty(Context), 0);
Idxs[1] = ConstantInt::get(Type::getInt32Ty(Context), DestFieldNo);
@@ -1574,11 +1574,11 @@
bool isVolatile,
LLVMBuilder &Builder) {
- const StructType *STy = cast<StructType>(Src->getType());
+ StructType *STy = cast<StructType>(Src->getType());
unsigned NumElements = STy->getNumElements();
- const PointerType *PTy = cast<PointerType>(Dest->getType());
- const StructType *DestTy = cast<StructType>(PTy->getElementType());
+ PointerType *PTy = cast<PointerType>(Dest->getType());
+ StructType *DestTy = cast<StructType>(PTy->getElementType());
unsigned SNO = 0;
unsigned DNO = 0;
@@ -1610,7 +1610,7 @@
while (SNO < NumElements) {
- const Type *DestElemType = DestTy->getElementType(DNO);
+ Type *DestElemType = DestTy->getElementType(DNO);
// Directly access first class values using getresult.
if (DestElemType->isSingleValueType()) {
@@ -1643,14 +1643,14 @@
// Access array elements individually. Note, Src and Dest type may
// not match. For example { <2 x float>, float } and { float[3]; }
- const ArrayType *ATy = cast<ArrayType>(DestElemType);
+ ArrayType *ATy = cast<ArrayType>(DestElemType);
unsigned ArraySize = ATy->getNumElements();
unsigned DElemNo = 0; // DestTy's DNO field's element number
while (DElemNo < ArraySize) {
unsigned i = 0;
unsigned Size = 1;
- if (const VectorType *SElemTy =
+ if (VectorType *SElemTy =
dyn_cast<VectorType>(STy->getElementType(SNO))) {
Size = SElemTy->getNumElements();
if (SElemTy->getElementType()->getTypeID() == Type::FloatTyID
More information about the llvm-commits
mailing list