[llvm-commits] [llvm-gcc-4.2] r134982 - in /llvm-gcc-4.2/trunk/gcc: config/alpha/llvm-alpha-target.h config/alpha/llvm-alpha.cpp config/arm/llvm-arm-target.h config/arm/llvm-arm.cpp config/i386/llvm-i386-target.h config/i386/llvm-i386.cpp config/mips/llvm-mips-target.h config/mips/llvm-mips.cpp config/rs6000/llvm-rs6000.cpp config/rs6000/rs6000.h llvm-abi-default.cpp llvm-abi.h llvm-backend.cpp llvm-convert.cpp llvm-internal.h llvm-types.cpp
Jay Foad
jay.foad at gmail.com
Tue Jul 12 07:06:49 PDT 2011
Author: foad
Date: Tue Jul 12 09:06:48 2011
New Revision: 134982
URL: http://llvm.org/viewvc/llvm-project?rev=134982&view=rev
Log:
Second attempt at de-constifying LLVM Types in FunctionType::get(),
StructType::get() and TargetData::getIntPtrType().
Modified:
llvm-gcc-4.2/trunk/gcc/config/alpha/llvm-alpha-target.h
llvm-gcc-4.2/trunk/gcc/config/alpha/llvm-alpha.cpp
llvm-gcc-4.2/trunk/gcc/config/arm/llvm-arm-target.h
llvm-gcc-4.2/trunk/gcc/config/arm/llvm-arm.cpp
llvm-gcc-4.2/trunk/gcc/config/i386/llvm-i386-target.h
llvm-gcc-4.2/trunk/gcc/config/i386/llvm-i386.cpp
llvm-gcc-4.2/trunk/gcc/config/mips/llvm-mips-target.h
llvm-gcc-4.2/trunk/gcc/config/mips/llvm-mips.cpp
llvm-gcc-4.2/trunk/gcc/config/rs6000/llvm-rs6000.cpp
llvm-gcc-4.2/trunk/gcc/config/rs6000/rs6000.h
llvm-gcc-4.2/trunk/gcc/llvm-abi-default.cpp
llvm-gcc-4.2/trunk/gcc/llvm-abi.h
llvm-gcc-4.2/trunk/gcc/llvm-backend.cpp
llvm-gcc-4.2/trunk/gcc/llvm-convert.cpp
llvm-gcc-4.2/trunk/gcc/llvm-internal.h
llvm-gcc-4.2/trunk/gcc/llvm-types.cpp
Modified: llvm-gcc-4.2/trunk/gcc/config/alpha/llvm-alpha-target.h
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/config/alpha/llvm-alpha-target.h?rev=134982&r1=134981&r2=134982&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/config/alpha/llvm-alpha-target.h (original)
+++ llvm-gcc-4.2/trunk/gcc/config/alpha/llvm-alpha-target.h Tue Jul 12 09:06:48 2011
@@ -25,7 +25,7 @@
#ifdef LLVM_ABI_H
-extern bool llvm_alpha_should_return_scalar_as_shadow(const Type* Ty);
+extern bool llvm_alpha_should_return_scalar_as_shadow(Type* Ty);
/* check if i128 should be a shadow return */
#define LLVM_SHOULD_RETURN_SCALAR_AS_SHADOW(X) \
Modified: llvm-gcc-4.2/trunk/gcc/config/alpha/llvm-alpha.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/config/alpha/llvm-alpha.cpp?rev=134982&r1=134981&r2=134982&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/config/alpha/llvm-alpha.cpp (original)
+++ llvm-gcc-4.2/trunk/gcc/config/alpha/llvm-alpha.cpp Tue Jul 12 09:06:48 2011
@@ -102,7 +102,7 @@
unsigned FnCode,
const MemRef *DestLoc,
Value *&Result,
- const Type *ResultType,
+ Type *ResultType,
std::vector<Value*> &Ops) {
switch (FnCode) {
case ALPHA_BUILTIN_UMULH: {
@@ -184,7 +184,7 @@
return false;
}
-bool llvm_alpha_should_return_scalar_as_shadow(const Type* Ty) {
+bool llvm_alpha_should_return_scalar_as_shadow(Type* Ty) {
if (Ty && Ty->isIntegerTy(128))
return true;
return false;
Modified: llvm-gcc-4.2/trunk/gcc/config/arm/llvm-arm-target.h
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/config/arm/llvm-arm-target.h?rev=134982&r1=134981&r2=134982&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/config/arm/llvm-arm-target.h (original)
+++ llvm-gcc-4.2/trunk/gcc/config/arm/llvm-arm-target.h Tue Jul 12 09:06:48 2011
@@ -48,16 +48,16 @@
#ifdef LLVM_ABI_H
extern bool
-llvm_arm_should_pass_aggregate_in_mixed_regs(tree, const Type *Ty,
+llvm_arm_should_pass_aggregate_in_mixed_regs(tree, Type *Ty,
CallingConv::ID&,
- std::vector<const Type*>&);
+ std::vector<Type*>&);
#define LLVM_SHOULD_PASS_AGGREGATE_IN_MIXED_REGS(T, TY, CC, E) \
llvm_arm_should_pass_aggregate_in_mixed_regs((T), (TY), (CC), (E))
struct DefaultABIClient;
extern bool
-llvm_arm_try_pass_aggregate_custom(tree, std::vector<const Type*>&,
+llvm_arm_try_pass_aggregate_custom(tree, std::vector<Type*>&,
CallingConv::ID&,
struct DefaultABIClient*);
@@ -65,15 +65,15 @@
llvm_arm_try_pass_aggregate_custom((T), (E), (CC), (C))
extern
-bool llvm_arm_aggregate_partially_passed_in_regs(std::vector<const Type*>&,
- std::vector<const Type*>&,
+bool llvm_arm_aggregate_partially_passed_in_regs(std::vector<Type*>&,
+ std::vector<Type*>&,
CallingConv::ID&);
#define LLVM_AGGREGATE_PARTIALLY_PASSED_IN_REGS(E, SE, CC) \
llvm_arm_aggregate_partially_passed_in_regs((E), (SE), (CC))
-extern const Type *llvm_arm_aggr_type_for_struct_return(tree type,
- CallingConv::ID &CC);
+extern Type *llvm_arm_aggr_type_for_struct_return(tree type,
+ CallingConv::ID &CC);
/* LLVM_AGGR_TYPE_FOR_STRUCT_RETURN - Return LLVM Type if X can be
returned as an aggregate, otherwise return NULL. */
@@ -99,7 +99,7 @@
llvm_arm_should_pass_or_return_aggregate_in_regs((X), (CC))
extern
-bool llvm_arm_should_pass_aggregate_using_byval_attr(tree, const Type *,
+bool llvm_arm_should_pass_aggregate_using_byval_attr(tree, Type *,
CallingConv::ID &CC);
#define LLVM_SHOULD_PASS_AGGREGATE_USING_BYVAL_ATTR(X, TY, CC) \
Modified: llvm-gcc-4.2/trunk/gcc/config/arm/llvm-arm.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/config/arm/llvm-arm.cpp?rev=134982&r1=134981&r2=134982&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/config/arm/llvm-arm.cpp (original)
+++ llvm-gcc-4.2/trunk/gcc/config/arm/llvm-arm.cpp Tue Jul 12 09:06:48 2011
@@ -58,7 +58,7 @@
error(msg, &EXPR_LOCATION(exp));
// Set the Result to an undefined value.
- const Type *ResTy = ConvertType(TREE_TYPE(exp));
+ Type *ResTy = ConvertType(TREE_TYPE(exp));
if (ResTy->isSingleValueType())
Result = UndefValue::get(ResTy);
@@ -246,13 +246,13 @@
unsigned FnCode,
const MemRef *DestLoc,
Value *&Result,
- const Type *ResultType,
+ Type *ResultType,
std::vector<Value*> &Ops) {
neon_datatype datatype = neon_datatype_unspecified;
bool isRounded = false;
Intrinsic::ID intID = Intrinsic::not_intrinsic;
Function *intFn;
- const Type* intOpTypes[2];
+ Type* intOpTypes[2];
if (FnCode < ARM_BUILTIN_NEON_BASE)
return false;
@@ -1415,7 +1415,7 @@
const VectorType *VTy = dyn_cast<const VectorType>(ResultType);
assert(VTy && "expected a vector type for vabdl result");
- const llvm::Type *DTy = VectorType::getTruncatedElementVectorType(VTy);
+ llvm::Type *DTy = VectorType::getTruncatedElementVectorType(VTy);
intFn = Intrinsic::getDeclaration(TheModule, intID, &DTy, 1);
Ops[0] = Builder.CreateCall2(intFn, Ops[0], Ops[1]);
Result = Builder.CreateZExt(Ops[0], ResultType);
@@ -1445,7 +1445,7 @@
const VectorType *VTy = dyn_cast<const VectorType>(ResultType);
assert(VTy && "expected a vector type for vabal result");
- const llvm::Type *DTy = VectorType::getTruncatedElementVectorType(VTy);
+ llvm::Type *DTy = VectorType::getTruncatedElementVectorType(VTy);
intFn = Intrinsic::getDeclaration(TheModule, intID, &DTy, 1);
Ops[1] = Builder.CreateCall2(intFn, Ops[1], Ops[2]);
Ops[1] = Builder.CreateZExt(Ops[1], ResultType);
@@ -2033,7 +2033,7 @@
case NEON_BUILTIN_vld4: {
const StructType *STy = dyn_cast<const StructType>(ResultType);
assert(STy && "expected a struct type");
- const Type *VTy = STy->getElementType(0);
+ Type *VTy = STy->getElementType(0);
switch (neon_code) {
case NEON_BUILTIN_vld2: intID = Intrinsic::arm_neon_vld2; break;
case NEON_BUILTIN_vld3: intID = Intrinsic::arm_neon_vld3; break;
@@ -2062,9 +2062,9 @@
case NEON_BUILTIN_vld2_lane:
case NEON_BUILTIN_vld3_lane:
case NEON_BUILTIN_vld4_lane: {
- const StructType *STy = dyn_cast<const StructType>(ResultType);
+ StructType *STy = dyn_cast<StructType>(ResultType);
assert(STy && "expected a struct type");
- const VectorType *VTy = dyn_cast<const VectorType>(STy->getElementType(0));
+ VectorType *VTy = dyn_cast<VectorType>(STy->getElementType(0));
assert(VTy && "expected a vector type");
if (!isValidLane(Ops[2], VTy->getNumElements()))
return UnexpectedError("%Hinvalid lane number", exp, Result);
@@ -2105,9 +2105,9 @@
case NEON_BUILTIN_vld2_dup:
case NEON_BUILTIN_vld3_dup:
case NEON_BUILTIN_vld4_dup: {
- const StructType *STy = dyn_cast<const StructType>(ResultType);
+ StructType *STy = dyn_cast<StructType>(ResultType);
assert(STy && "expected a struct type");
- const VectorType *VTy = dyn_cast<const VectorType>(STy->getElementType(0));
+ VectorType *VTy = dyn_cast<VectorType>(STy->getElementType(0));
assert(VTy && "expected a vector type");
intOpTypes[0] = VTy;
@@ -2168,7 +2168,7 @@
}
case NEON_BUILTIN_vst1: {
- const Type *VTy = Ops[1]->getType();
+ Type *VTy = Ops[1]->getType();
intID = Intrinsic::arm_neon_vst1;
intFn = Intrinsic::getDeclaration(TheModule, intID, &VTy, 1);
Type *VPTy = PointerType::getUnqual(Type::getInt8Ty(Context));
@@ -2184,7 +2184,7 @@
case NEON_BUILTIN_vst4: {
const StructType *STy = dyn_cast<const StructType>(Ops[1]->getType());
assert(STy && "expected a struct type");
- const Type *VTy = STy->getElementType(0);
+ Type *VTy = STy->getElementType(0);
switch (neon_code) {
case NEON_BUILTIN_vst2: intID = Intrinsic::arm_neon_vst2; break;
case NEON_BUILTIN_vst3: intID = Intrinsic::arm_neon_vst3; break;
@@ -2224,9 +2224,9 @@
case NEON_BUILTIN_vst2_lane:
case NEON_BUILTIN_vst3_lane:
case NEON_BUILTIN_vst4_lane: {
- const StructType *STy = dyn_cast<const StructType>(Ops[1]->getType());
+ StructType *STy = dyn_cast<StructType>(Ops[1]->getType());
assert(STy && "expected a struct type");
- const VectorType *VTy = dyn_cast<const VectorType>(STy->getElementType(0));
+ VectorType *VTy = dyn_cast<VectorType>(STy->getElementType(0));
assert(VTy && "expected a vector type");
if (!isValidLane(Ops[2], VTy->getNumElements()))
return UnexpectedError("%Hinvalid lane number", exp, Result);
@@ -2521,11 +2521,11 @@
// Walk over an LLVM Type that we know is a homogeneous aggregate and
// push the proper LLVM Types that represent the register types to pass
// that struct member in.
-static void push_elts(const Type *Ty, std::vector<const Type*> &Elts)
+static void push_elts(const Type *Ty, std::vector<Type*> &Elts)
{
for (Type::subtype_iterator I = Ty->subtype_begin(), E = Ty->subtype_end();
I != E; ++I) {
- const Type *STy = *I;
+ Type *STy = *I;
if (const VectorType *VTy = dyn_cast<VectorType>(STy)) {
switch (VTy->getBitWidth())
{
@@ -2538,8 +2538,8 @@
default:
assert (0 && "invalid vector type");
}
- } else if (const ArrayType *ATy = dyn_cast<ArrayType>(STy)) {
- const Type *ETy = ATy->getElementType();
+ } else if (ArrayType *ATy = dyn_cast<ArrayType>(STy)) {
+ Type *ETy = ATy->getElementType();
for (uint64_t i = ATy->getNumElements(); i > 0; --i)
Elts.push_back(ETy);
@@ -2550,7 +2550,7 @@
}
}
-static unsigned count_num_words(std::vector<const Type*> &ScalarElts) {
+static unsigned count_num_words(std::vector<Type*> &ScalarElts) {
unsigned NumWords = 0;
for (unsigned i = 0, e = ScalarElts.size(); i != e; ++i) {
const Type *Ty = ScalarElts[i];
@@ -2574,7 +2574,7 @@
// the IL a bit more explicit about how arguments are handled.
extern bool
llvm_arm_try_pass_aggregate_custom(tree type,
- std::vector<const Type*>& ScalarElts,
+ std::vector<Type*>& ScalarElts,
CallingConv::ID& CC,
struct DefaultABIClient* C) {
if (CC != CallingConv::ARM_AAPCS && CC != CallingConv::C)
@@ -2596,14 +2596,14 @@
// First, build a type that will be bitcast to the original one and
// from where elements will be extracted.
- std::vector<const Type*> Elts;
- const Type* Int32Ty = Type::getInt32Ty(getGlobalContext());
+ std::vector<Type*> Elts;
+ Type* Int32Ty = Type::getInt32Ty(getGlobalContext());
const unsigned NumRegularArgs = Size / 4;
for (unsigned i = 0; i < NumRegularArgs; ++i) {
Elts.push_back(Int32Ty);
}
const unsigned RestSize = Size % 4;
- const llvm::Type *RestType = NULL;
+ llvm::Type *RestType = NULL;
if (RestSize> 2) {
RestType = Type::getInt32Ty(getGlobalContext());
} else if (RestSize > 1) {
@@ -2613,7 +2613,7 @@
}
if (RestType)
Elts.push_back(RestType);
- const StructType *STy = StructType::get(getGlobalContext(), Elts, false);
+ StructType *STy = StructType::get(getGlobalContext(), Elts, false);
if (AddPad) {
ScalarElts.push_back(Int32Ty);
@@ -2641,9 +2641,9 @@
// for parameter passing. This only applies to AAPCS-VFP "homogeneous
// aggregates" as specified in 4.3.5 of the AAPCS spec.
bool
-llvm_arm_should_pass_aggregate_in_mixed_regs(tree TreeType, const Type *Ty,
+llvm_arm_should_pass_aggregate_in_mixed_regs(tree TreeType, Type *Ty,
CallingConv::ID &CC,
- std::vector<const Type*> &Elts) {
+ std::vector<Type*> &Elts) {
if (!llvm_arm_should_pass_or_return_aggregate_in_regs(TreeType, CC))
return false;
@@ -2686,7 +2686,7 @@
// count_num_registers_uses - Simulate argument passing reg allocation in SPRs.
// Caller is expected to zero out SPRs. Returns true if all of ScalarElts fit
// in registers.
-static bool count_num_registers_uses(std::vector<const Type*> &ScalarElts,
+static bool count_num_registers_uses(std::vector<Type*> &ScalarElts,
bool *SPRs) {
for (unsigned i = 0, e = ScalarElts.size(); i != e; ++i) {
const Type *Ty = ScalarElts[i];
@@ -2734,8 +2734,8 @@
// part of the aggregate, return true. That means the aggregate should instead
// be passed in memory.
bool
-llvm_arm_aggregate_partially_passed_in_regs(std::vector<const Type*> &Elts,
- std::vector<const Type*> &ScalarElts,
+llvm_arm_aggregate_partially_passed_in_regs(std::vector<Type*> &Elts,
+ std::vector<Type*> &ScalarElts,
CallingConv::ID &CC) {
// Homogeneous aggregates are an AAPCS-VFP feature.
if ((CC != CallingConv::ARM_AAPCS_VFP) ||
@@ -2756,14 +2756,14 @@
// Return LLVM Type if TYPE can be returned as an aggregate,
// otherwise return NULL.
-const Type *llvm_arm_aggr_type_for_struct_return(tree TreeType,
- CallingConv::ID &CC) {
+Type *llvm_arm_aggr_type_for_struct_return(tree TreeType,
+ CallingConv::ID &CC) {
if (!llvm_arm_should_pass_or_return_aggregate_in_regs(TreeType, CC))
return NULL;
// Walk Ty and push LLVM types corresponding to register types onto
// Elts.
- std::vector<const Type*> Elts;
+ std::vector<Type*> Elts;
const Type *Ty = ConvertType(TreeType);
push_elts(Ty, Elts);
@@ -2876,7 +2876,7 @@
/* Target hook for llvm-abi.h. It returns true if an aggregate of the
specified type should be passed with the 'byval' attribute. */
bool llvm_arm_should_pass_aggregate_using_byval_attr(tree TreeType,
- const Type *Ty,
+ Type *Ty,
CallingConv::ID &CC) {
if (CC == CallingConv::ARM_APCS ||
(CC == CallingConv::C && !TARGET_AAPCS_BASED)) {
Modified: llvm-gcc-4.2/trunk/gcc/config/i386/llvm-i386-target.h
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/config/i386/llvm-i386-target.h?rev=134982&r1=134981&r2=134982&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/config/i386/llvm-i386-target.h (original)
+++ llvm-gcc-4.2/trunk/gcc/config/i386/llvm-i386-target.h Tue Jul 12 09:06:48 2011
@@ -153,15 +153,15 @@
#define LLVM_SHOULD_PASS_AGGREGATE_IN_INTEGER_REGS(X, Y, Z) \
llvm_x86_should_pass_aggregate_in_integer_regs((X), (Y), (Z))
-extern const Type *llvm_x86_scalar_type_for_struct_return(tree type,
- unsigned *Offset);
+extern Type *llvm_x86_scalar_type_for_struct_return(tree type,
+ unsigned *Offset);
/* LLVM_SCALAR_TYPE_FOR_STRUCT_RETURN - Return LLVM Type if X can be
returned as a scalar, otherwise return NULL. */
#define LLVM_SCALAR_TYPE_FOR_STRUCT_RETURN(X, Y) \
llvm_x86_scalar_type_for_struct_return((X), (Y))
-extern const Type *llvm_x86_aggr_type_for_struct_return(tree type);
+extern Type *llvm_x86_aggr_type_for_struct_return(tree type);
/* LLVM_AGGR_TYPE_FOR_STRUCT_RETURN - Return LLVM Type if X can be
returned as an aggregate, otherwise return NULL. */
@@ -216,7 +216,7 @@
llvm_x86_should_not_return_complex_in_memory((X))
extern bool
-llvm_x86_should_pass_aggregate_as_fca(tree type, const Type *);
+llvm_x86_should_pass_aggregate_as_fca(tree type, Type *);
/* LLVM_SHOULD_PASS_AGGREGATE_AS_FCA - Return true if an aggregate of the
specified type should be passed as a first-class aggregate. */
@@ -225,18 +225,18 @@
llvm_x86_should_pass_aggregate_as_fca(X, TY)
#endif
-extern bool llvm_x86_should_pass_aggregate_in_memory(tree, const Type *);
+extern bool llvm_x86_should_pass_aggregate_in_memory(tree, Type *);
#define LLVM_SHOULD_PASS_AGGREGATE_USING_BYVAL_ATTR(X, TY, CC) \
llvm_x86_should_pass_aggregate_in_memory(X, TY)
extern bool
-llvm_x86_64_should_pass_aggregate_in_mixed_regs(tree, const Type *Ty,
- std::vector<const Type*>&);
+llvm_x86_64_should_pass_aggregate_in_mixed_regs(tree, Type *Ty,
+ std::vector<Type*>&);
extern bool
-llvm_x86_32_should_pass_aggregate_in_mixed_regs(tree, const Type *Ty,
- std::vector<const Type*>&);
+llvm_x86_32_should_pass_aggregate_in_mixed_regs(tree, Type *Ty,
+ std::vector<Type*>&);
#define LLVM_SHOULD_PASS_AGGREGATE_IN_MIXED_REGS(T, TY, CC, E) \
(TARGET_64BIT ? \
@@ -244,8 +244,8 @@
llvm_x86_32_should_pass_aggregate_in_mixed_regs((T), (TY), (E)))
extern
-bool llvm_x86_64_aggregate_partially_passed_in_regs(std::vector<const Type*>&,
- std::vector<const Type*>&);
+bool llvm_x86_64_aggregate_partially_passed_in_regs(std::vector<Type*>&,
+ std::vector<Type*>&);
#define LLVM_AGGREGATE_PARTIALLY_PASSED_IN_REGS(E, SE, CC) \
(TARGET_64BIT ? \
Modified: llvm-gcc-4.2/trunk/gcc/config/i386/llvm-i386.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/config/i386/llvm-i386.cpp?rev=134982&r1=134981&r2=134982&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/config/i386/llvm-i386.cpp (original)
+++ llvm-gcc-4.2/trunk/gcc/config/i386/llvm-i386.cpp Tue Jul 12 09:06:48 2011
@@ -95,7 +95,7 @@
unsigned FnCode,
const MemRef *DestLoc,
Value *&Result,
- const Type *ResultType,
+ Type *ResultType,
std::vector<Value*> &Ops) {
switch (FnCode) {
default: break;
@@ -1108,8 +1108,8 @@
It also returns a vector of types that correspond to the registers used
for parameter passing. This is only called for x86-32. */
bool
-llvm_x86_32_should_pass_aggregate_in_mixed_regs(tree TreeType, const Type *Ty,
- std::vector<const Type*> &Elts){
+llvm_x86_32_should_pass_aggregate_in_mixed_regs(tree TreeType, Type *Ty,
+ std::vector<Type*> &Elts){
// If this is a small fixed size type, investigate it.
HOST_WIDE_INT SrcSize = int_size_in_bytes(TreeType);
if (SrcSize <= 0 || SrcSize > 16)
@@ -1125,7 +1125,7 @@
if (!STy || STy->isPacked()) return false;
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- const Type *EltTy = STy->getElementType(i);
+ Type *EltTy = STy->getElementType(i);
// 32 and 64-bit integers are fine, as are float and double. Long double
// (which can be picked as the type for a union of 16 bytes) is not fine,
// as loads and stores of it get only 10 bytes.
@@ -1147,7 +1147,7 @@
/* It returns true if an aggregate of the specified type should be passed as a
first class aggregate. */
-bool llvm_x86_should_pass_aggregate_as_fca(tree type, const Type *Ty) {
+bool llvm_x86_should_pass_aggregate_as_fca(tree type, Type *Ty) {
if (TREE_CODE(type) != COMPLEX_TYPE)
return false;
const StructType *STy = dyn_cast<StructType>(Ty);
@@ -1165,7 +1165,7 @@
/* Target hook for llvm-abi.h. It returns true if an aggregate of the
specified type should be passed in memory. */
-bool llvm_x86_should_pass_aggregate_in_memory(tree TreeType, const Type *Ty) {
+bool llvm_x86_should_pass_aggregate_in_memory(tree TreeType, Type *Ty) {
if (llvm_x86_should_pass_aggregate_as_fca(TreeType, Ty))
return false;
@@ -1178,7 +1178,7 @@
return false;
if (!TARGET_64BIT) {
- std::vector<const Type*> Elts;
+ std::vector<Type*> Elts;
return !llvm_x86_32_should_pass_aggregate_in_mixed_regs(TreeType, Ty, Elts);
}
return llvm_x86_64_should_pass_aggregate_in_memory(TreeType, Mode);
@@ -1186,7 +1186,7 @@
/* count_num_registers_uses - Return the number of GPRs and XMMs parameter
register used so far. Caller is responsible for initializing outputs. */
-static void count_num_registers_uses(std::vector<const Type*> &ScalarElts,
+static void count_num_registers_uses(std::vector<Type*> &ScalarElts,
unsigned &NumGPRs, unsigned &NumXMMs) {
for (unsigned i = 0, e = ScalarElts.size(); i != e; ++i) {
const Type *Ty = ScalarElts[i];
@@ -1220,8 +1220,8 @@
part of the aggregate, return true. That means the aggregate should instead
be passed in memory. */
bool
-llvm_x86_64_aggregate_partially_passed_in_regs(std::vector<const Type*> &Elts,
- std::vector<const Type*> &ScalarElts) {
+llvm_x86_64_aggregate_partially_passed_in_regs(std::vector<Type*> &Elts,
+ std::vector<Type*> &ScalarElts) {
// Counting number of GPRs and XMMs used so far. According to AMD64 ABI
// document: "If there are no registers available for any eightbyte of an
// argument, the whole argument is passed on the stack." X86-64 uses 6
@@ -1266,8 +1266,8 @@
It also returns a vector of types that correspond to the registers used
for parameter passing. This is only called for x86-64. */
bool
-llvm_x86_64_should_pass_aggregate_in_mixed_regs(tree TreeType, const Type *Ty,
- std::vector<const Type*> &Elts){
+llvm_x86_64_should_pass_aggregate_in_mixed_regs(tree TreeType, Type *Ty,
+ std::vector<Type*> &Elts){
if (llvm_x86_should_pass_aggregate_as_fca(TreeType, Ty))
return false;
@@ -1544,7 +1544,7 @@
// llvm_x86_scalar_type_for_struct_return - Return LLVM type if TYPE
// can be returned as a scalar, otherwise return NULL.
-const Type *llvm_x86_scalar_type_for_struct_return(tree type, unsigned *Offset) {
+Type *llvm_x86_scalar_type_for_struct_return(tree type, unsigned *Offset) {
*Offset = 0;
const Type *Ty = ConvertType(type);
unsigned Size = getTargetData().getTypeAllocSize(Ty);
@@ -1633,7 +1633,7 @@
/// llvm_x86_64_should_pass_aggregate_in_mixed_regs code.
void
llvm_x86_64_get_multiple_return_reg_classes(tree TreeType, const Type *Ty,
- std::vector<const Type*> &Elts){
+ std::vector<Type*> &Elts){
enum x86_64_reg_class Class[MAX_CLASSES];
enum machine_mode Mode = ix86_getNaturalModeForType(TreeType);
HOST_WIDE_INT Bytes =
@@ -1768,13 +1768,13 @@
// Return LLVM Type if TYPE can be returned as an aggregate,
// otherwise return NULL.
-const Type *llvm_x86_aggr_type_for_struct_return(tree type) {
+Type *llvm_x86_aggr_type_for_struct_return(tree type) {
const Type *Ty = ConvertType(type);
if (!llvm_suitable_multiple_ret_value_type(Ty, type))
return NULL;
const StructType *STy = cast<StructType>(Ty);
- std::vector<const Type *> ElementTypes;
+ std::vector<Type *> ElementTypes;
// Special handling for _Complex.
if (llvm_x86_should_not_return_complex_in_memory(type)) {
@@ -1783,7 +1783,7 @@
return StructType::get(Context, ElementTypes, STy->isPacked());
}
- std::vector<const Type*> GCCElts;
+ std::vector<Type*> GCCElts;
llvm_x86_64_get_multiple_return_reg_classes(type, Ty, GCCElts);
return StructType::get(Context, GCCElts, false);
}
Modified: llvm-gcc-4.2/trunk/gcc/config/mips/llvm-mips-target.h
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/config/mips/llvm-mips-target.h?rev=134982&r1=134981&r2=134982&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/config/mips/llvm-mips-target.h (original)
+++ llvm-gcc-4.2/trunk/gcc/config/mips/llvm-mips-target.h Tue Jul 12 09:06:48 2011
@@ -22,7 +22,7 @@
#ifdef LLVM_ABI_H
-extern bool llvm_mips_should_pass_aggregate_in_memory(tree, const Type *);
+extern bool llvm_mips_should_pass_aggregate_in_memory(tree, Type *);
/* LLVM_SHOULD_PASS_AGGREGATE_USING_BYVAL_ATTR - Return true if this aggregate
value should be passed by value, i.e. passing its address with the byval
@@ -39,7 +39,7 @@
#define LLVM_SHOULD_NOT_RETURN_COMPLEX_IN_MEMORY(X) \
llvm_mips_should_not_return_complex_in_memory((X))
-extern const Type *llvm_mips_aggr_type_for_struct_return(tree type);
+extern Type *llvm_mips_aggr_type_for_struct_return(tree type);
/* LLVM_AGGR_TYPE_FOR_STRUCT_RETURN - Return LLVM Type if X can be
returned as an aggregate, otherwise return NULL. */
Modified: llvm-gcc-4.2/trunk/gcc/config/mips/llvm-mips.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/config/mips/llvm-mips.cpp?rev=134982&r1=134981&r2=134982&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/config/mips/llvm-mips.cpp (original)
+++ llvm-gcc-4.2/trunk/gcc/config/mips/llvm-mips.cpp Tue Jul 12 09:06:48 2011
@@ -32,7 +32,7 @@
/* Target hook for llvm-abi.h. It returns true if an aggregate of the
specified type should be passed in memory. In mips EABI this is
true for aggregates with size > 32-bits. */
-bool llvm_mips_should_pass_aggregate_in_memory(tree TreeType, const Type *Ty) {
+bool llvm_mips_should_pass_aggregate_in_memory(tree TreeType, Type *Ty) {
if (mips_abi == ABI_EABI)
{
enum machine_mode mode = TYPE_MODE(TreeType);
@@ -63,11 +63,11 @@
// Return LLVM Type if TYPE can be returned as an aggregate,
// otherwise return NULL.
-const Type *llvm_mips_aggr_type_for_struct_return(tree type) {
- const Type *Ty = ConvertType(type);
+Type *llvm_mips_aggr_type_for_struct_return(tree type) {
+ Type *Ty = ConvertType(type);
- const StructType *STy = cast<StructType>(Ty);
- std::vector<const Type *> ElementTypes;
+ StructType *STy = cast<StructType>(Ty);
+ std::vector<Type *> ElementTypes;
// Special handling for _Complex.
if (llvm_mips_should_not_return_complex_in_memory(type)) {
Modified: llvm-gcc-4.2/trunk/gcc/config/rs6000/llvm-rs6000.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/config/rs6000/llvm-rs6000.cpp?rev=134982&r1=134981&r2=134982&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/config/rs6000/llvm-rs6000.cpp (original)
+++ llvm-gcc-4.2/trunk/gcc/config/rs6000/llvm-rs6000.cpp Tue Jul 12 09:06:48 2011
@@ -94,7 +94,7 @@
unsigned FnCode,
const MemRef *DestLoc,
Value *&Result,
- const Type *ResultType,
+ Type *ResultType,
std::vector<Value*> &Ops) {
switch (FnCode) {
default: break;
@@ -391,7 +391,7 @@
return false;
}
-static unsigned count_num_registers_uses(std::vector<const Type*> &ScalarElts) {
+static unsigned count_num_registers_uses(std::vector<Type*> &ScalarElts) {
unsigned NumGPRs = 0;
for (unsigned i = 0, e = ScalarElts.size(); i != e; ++i) {
if (NumGPRs >= 8)
@@ -423,7 +423,7 @@
/// arguments are always passed in general purpose registers, never in
/// Floating-point registers or vector registers.
bool llvm_rs6000_try_pass_aggregate_custom(tree type,
- std::vector<const Type*> &ScalarElts,
+ std::vector<Type*> &ScalarElts,
const CallingConv::ID &CC,
struct DefaultABIClient* C) {
if (!isSVR4ABI())
@@ -432,8 +432,8 @@
// Eight GPR's are availabe for parameter passing.
const unsigned NumArgRegs = 8;
unsigned NumGPR = count_num_registers_uses(ScalarElts);
- const Type *Ty = ConvertType(type);
- const Type* Int32Ty = Type::getInt32Ty(getGlobalContext());
+ Type *Ty = ConvertType(type);
+ Type* Int32Ty = Type::getInt32Ty(getGlobalContext());
if (Ty->isSingleValueType()) {
if (Ty->isIntegerTy()) {
unsigned TypeSize = Ty->getPrimitiveSizeInBits();
@@ -469,7 +469,7 @@
if (TREE_CODE(type) == COMPLEX_TYPE) {
unsigned SrcSize = int_size_in_bytes(type);
unsigned NumRegs = (SrcSize + 3) / 4;
- std::vector<const Type*> Elts;
+ std::vector<Type*> Elts;
// This looks very strange, but matches the old code.
if (SrcSize == 8) {
@@ -491,7 +491,7 @@
for (unsigned int i = 0; i < NumRegs; ++i) {
Elts.push_back(Int32Ty);
}
- const StructType *STy = StructType::get(getGlobalContext(), Elts, false);
+ StructType *STy = StructType::get(getGlobalContext(), Elts, false);
for (unsigned int i = 0; i < NumRegs; ++i) {
C->EnterField(i, STy);
C->HandleScalarArgument(Int32Ty, 0);
@@ -505,7 +505,7 @@
/* Target hook for llvm-abi.h. It returns true if an aggregate of the
specified type should be passed using the byval mechanism. */
-bool llvm_rs6000_should_pass_aggregate_byval(tree TreeType, const Type *Ty) {
+bool llvm_rs6000_should_pass_aggregate_byval(tree TreeType, Type *Ty) {
/* FIXME byval not implemented for ppc64. */
if (TARGET_64BIT)
return false;
@@ -554,8 +554,8 @@
It also returns a vector of types that correspond to the registers used
for parameter passing. */
bool
-llvm_rs6000_should_pass_aggregate_in_mixed_regs(tree TreeType, const Type* Ty,
- std::vector<const Type*>&Elts) {
+llvm_rs6000_should_pass_aggregate_in_mixed_regs(tree TreeType, Type* Ty,
+ std::vector<Type*>&Elts) {
// FIXME there are plenty of ppc64 cases that need this.
if (TARGET_64BIT)
return false;
Modified: llvm-gcc-4.2/trunk/gcc/config/rs6000/rs6000.h
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/config/rs6000/rs6000.h?rev=134982&r1=134981&r2=134982&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/config/rs6000/rs6000.h (original)
+++ llvm-gcc-4.2/trunk/gcc/config/rs6000/rs6000.h Tue Jul 12 09:06:48 2011
@@ -3490,14 +3490,14 @@
#ifdef LLVM_ABI_H
extern bool llvm_rs6000_try_pass_aggregate_custom(tree,
- std::vector<const Type*>&,
+ std::vector<Type*>&,
const CallingConv::ID &,
struct DefaultABIClient*);
#define LLVM_TRY_PASS_AGGREGATE_CUSTOM(T, E, CC, C) \
llvm_rs6000_try_pass_aggregate_custom((T), (E), (CC), (C))
-extern bool llvm_rs6000_should_pass_aggregate_byval(tree, const Type *);
+extern bool llvm_rs6000_should_pass_aggregate_byval(tree, Type *);
#define LLVM_SHOULD_PASS_AGGREGATE_USING_BYVAL_ATTR(X, TY, CC) \
llvm_rs6000_should_pass_aggregate_byval((X), (TY))
@@ -3508,8 +3508,8 @@
#define LLVM_SHOULD_PASS_VECTOR_IN_INTEGER_REGS(X) \
llvm_rs6000_should_pass_vector_in_integer_regs((X))
-extern bool llvm_rs6000_should_pass_aggregate_in_mixed_regs(tree, const Type*,
- std::vector<const Type*>&);
+extern bool llvm_rs6000_should_pass_aggregate_in_mixed_regs(tree, Type*,
+ std::vector<Type*>&);
/* FIXME this is needed for 64-bit */
#define LLVM_SHOULD_PASS_AGGREGATE_IN_MIXED_REGS(T, TY, CC, E) \
Modified: llvm-gcc-4.2/trunk/gcc/llvm-abi-default.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/llvm-abi-default.cpp?rev=134982&r1=134981&r2=134982&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/llvm-abi-default.cpp (original)
+++ llvm-gcc-4.2/trunk/gcc/llvm-abi-default.cpp Tue Jul 12 09:06:48 2011
@@ -9,9 +9,9 @@
/// on the client that indicate how its pieces should be handled. This
/// handles things like returning structures via hidden parameters.
void DefaultABI::HandleReturnType(tree type, tree fn, bool isBuiltin,
- std::vector<const Type*> &ScalarElts) {
+ std::vector<Type*> &ScalarElts) {
unsigned Offset = 0;
- const Type *Ty = ConvertType(type);
+ Type *Ty = ConvertType(type);
if (Ty->isVectorTy()) {
// Vector handling is weird on x86. In particular builtin and
// non-builtin function of the same return types can use different
@@ -36,10 +36,10 @@
} else {
// Otherwise return as an integer value large enough to hold the entire
// aggregate.
- if (const Type *AggrTy = LLVM_AGGR_TYPE_FOR_STRUCT_RETURN(type,
+ if (Type *AggrTy = LLVM_AGGR_TYPE_FOR_STRUCT_RETURN(type,
C.getCallingConv()))
C.HandleAggregateResultAsAggregate(AggrTy);
- else if (const Type* ScalarTy =
+ else if (Type* ScalarTy =
LLVM_SCALAR_TYPE_FOR_STRUCT_RETURN(type, &Offset))
C.HandleAggregateResultAsScalar(ScalarTy, Offset);
else {
@@ -53,7 +53,7 @@
// FIXME: should return the hidden first argument for some targets
// (e.g. ELF i386).
- const PointerType *PTy = Ty->getPointerTo();
+ PointerType *PTy = Ty->getPointerTo();
C.HandleAggregateShadowResult(PTy, false);
ScalarElts.push_back(PTy);
}
@@ -64,21 +64,21 @@
/// argument and invokes methods on the client that indicate how its pieces
/// should be handled. This handles things like decimating structures into
/// their fields.
-void DefaultABI::HandleArgument(tree type, std::vector<const Type*> &ScalarElts,
+void DefaultABI::HandleArgument(tree type, std::vector<Type*> &ScalarElts,
Attributes *Attributes) {
unsigned Size = 0;
bool DontCheckAlignment = false;
- const Type *Ty = ConvertType(type);
+ Type *Ty = ConvertType(type);
// Figure out if this field is zero bits wide, e.g. {} or [0 x int]. Do
// not include variable sized fields here.
- std::vector<const Type*> Elts;
+ std::vector<Type*> Elts;
if (Ty->isVoidTy()) {
// Handle void explicitly as a {} type.
- const Type *OpTy = StructType::get(getGlobalContext());
+ Type *OpTy = StructType::get(getGlobalContext());
C.HandleScalarArgument(OpTy, type);
ScalarElts.push_back(OpTy);
} else if (isPassedByInvisibleReference(type)) { // variable size -> by-ref.
- const Type *PtrTy = Ty->getPointerTo();
+ Type *PtrTy = Ty->getPointerTo();
C.HandleByInvisibleReferenceArgument(PtrTy, type);
ScalarElts.push_back(PtrTy);
} else if (Ty->isVectorTy()) {
@@ -134,7 +134,7 @@
for (tree Field = TYPE_FIELDS(type); Field; Field = TREE_CHAIN(Field))
if (TREE_CODE(Field) == FIELD_DECL) {
const tree Ftype = getDeclaredType(Field);
- const Type *FTy = ConvertType(Ftype);
+ Type *FTy = ConvertType(Ftype);
unsigned FNo = GET_LLVM_FIELD_INDEX(Field);
assert(FNo != ~0U && "Case not handled yet!");
@@ -161,7 +161,7 @@
(TREE_CODE(type) == QUAL_UNION_TYPE)) {
HandleUnion(type, ScalarElts);
} else if (TREE_CODE(type) == ARRAY_TYPE) {
- const ArrayType *ATy = cast<ArrayType>(Ty);
+ ArrayType *ATy = cast<ArrayType>(Ty);
for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
C.EnterField(i, Ty);
HandleArgument(TREE_TYPE(type), ScalarElts);
@@ -174,7 +174,7 @@
}
/// HandleUnion - Handle a UNION_TYPE or QUAL_UNION_TYPE tree.
-void DefaultABI::HandleUnion(tree type, std::vector<const Type*> &ScalarElts) {
+void DefaultABI::HandleUnion(tree type, std::vector<Type*> &ScalarElts) {
if (TYPE_TRANSPARENT_UNION(type)) {
tree Field = TYPE_FIELDS(type);
assert(Field && "Transparent union must have some elements!");
@@ -218,7 +218,7 @@
/// integer registers, convert it to a structure containing ints and pass all
/// of the struct elements in. If Size is set we pass only that many bytes.
void DefaultABI::PassInIntegerRegisters(tree type,
- std::vector<const Type*> &ScalarElts,
+ std::vector<Type*> &ScalarElts,
unsigned origSize,
bool DontCheckAlignment) {
unsigned Size;
@@ -240,8 +240,8 @@
unsigned ArraySize = Size / ElementSize;
// Put as much of the aggregate as possible into an array.
- const Type *ATy = NULL;
- const Type *ArrayElementType = NULL;
+ Type *ATy = NULL;
+ Type *ArrayElementType = NULL;
if (ArraySize) {
Size = Size % ElementSize;
ArrayElementType = (UseInt64 ?
@@ -252,7 +252,7 @@
// Pass any leftover bytes as a separate element following the array.
unsigned LastEltRealSize = 0;
- const llvm::Type *LastEltTy = 0;
+ llvm::Type *LastEltTy = 0;
if (Size > 4) {
LastEltTy = Type::getInt64Ty(getGlobalContext());
} else if (Size > 2) {
@@ -267,12 +267,12 @@
LastEltRealSize = Size;
}
- std::vector<const Type*> Elts;
+ std::vector<Type*> Elts;
if (ATy)
Elts.push_back(ATy);
if (LastEltTy)
Elts.push_back(LastEltTy);
- const StructType *STy = StructType::get(getGlobalContext(), Elts, false);
+ StructType *STy = StructType::get(getGlobalContext(), Elts, false);
unsigned i = 0;
if (ArraySize) {
@@ -297,23 +297,23 @@
/// PassInMixedRegisters - Given an aggregate value that should be passed in
/// mixed integer, floating point, and vector registers, convert it to a
/// structure containing the specified struct elements in.
-void DefaultABI::PassInMixedRegisters(const Type *Ty,
- std::vector<const Type*> &OrigElts,
- std::vector<const Type*> &ScalarElts) {
+void DefaultABI::PassInMixedRegisters(Type *Ty,
+ std::vector<Type*> &OrigElts,
+ std::vector<Type*> &ScalarElts) {
// We use VoidTy in OrigElts to mean "this is a word in the aggregate
// that occupies storage but has no useful information, and is not passed
// anywhere". Happens on x86-64.
- std::vector<const Type*> Elts(OrigElts);
- const Type* wordType = getTargetData().getPointerSize() == 4 ?
+ std::vector<Type*> Elts(OrigElts);
+ Type* wordType = getTargetData().getPointerSize() == 4 ?
Type::getInt32Ty(getGlobalContext()) : Type::getInt64Ty(getGlobalContext());
for (unsigned i=0, e=Elts.size(); i!=e; ++i)
if (OrigElts[i]->isVoidTy())
Elts[i] = wordType;
- const StructType *STy = StructType::get(getGlobalContext(), Elts, false);
+ StructType *STy = StructType::get(getGlobalContext(), Elts, false);
unsigned Size = getTargetData().getTypeAllocSize(STy);
- const StructType *InSTy = dyn_cast<StructType>(Ty);
+ StructType *InSTy = dyn_cast<StructType>(Ty);
unsigned InSize = 0;
// If Ty and STy size does not match then last element is accessing
// extra bits.
@@ -322,7 +322,7 @@
InSize = getTargetData().getTypeAllocSize(InSTy);
if (InSize < Size) {
unsigned N = STy->getNumElements();
- const llvm::Type *LastEltTy = STy->getElementType(N-1);
+ llvm::Type *LastEltTy = STy->getElementType(N-1);
if (LastEltTy->isIntegerTy())
LastEltSizeDiff =
getTargetData().getTypeAllocSize(LastEltTy) - (Size - InSize);
Modified: llvm-gcc-4.2/trunk/gcc/llvm-abi.h
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/llvm-abi.h?rev=134982&r1=134981&r2=134982&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/llvm-abi.h (original)
+++ llvm-gcc-4.2/trunk/gcc/llvm-abi.h Tue Jul 12 09:06:48 2011
@@ -59,57 +59,57 @@
/// HandleScalarResult - This callback is invoked if the function returns a
/// simple scalar result value, which is of type RetTy.
- virtual void HandleScalarResult(const Type *RetTy) {}
+ virtual void HandleScalarResult(Type *RetTy) {}
/// HandleAggregateResultAsScalar - This callback is invoked if the function
/// returns an aggregate value by bit converting it to the specified scalar
/// type and returning that. The bit conversion should start at byte Offset
/// within the struct, and ScalarTy is not necessarily big enough to cover
/// the entire struct.
- virtual void HandleAggregateResultAsScalar(const Type *ScalarTy, unsigned Offset=0) {}
+ virtual void HandleAggregateResultAsScalar(Type *ScalarTy, unsigned Offset=0) {}
/// HandleAggregateResultAsAggregate - This callback is invoked if the function
/// returns an aggregate value using multiple return values.
- virtual void HandleAggregateResultAsAggregate(const Type *AggrTy) {}
+ virtual void HandleAggregateResultAsAggregate(Type *AggrTy) {}
/// HandleAggregateShadowResult - This callback is invoked if the function
/// returns an aggregate value by using a "shadow" first parameter, which is
/// a pointer to the aggregate, of type PtrArgTy. If RetPtr is set to true,
/// the pointer argument itself is returned from the function.
- virtual void HandleAggregateShadowResult(const PointerType *PtrArgTy, bool RetPtr){}
+ virtual void HandleAggregateShadowResult(PointerType *PtrArgTy, bool RetPtr){}
/// HandleScalarShadowResult - This callback is invoked if the function
/// returns a scalar value by using a "shadow" first parameter, which is a
/// pointer to the scalar, of type PtrArgTy. If RetPtr is set to true,
/// the pointer argument itself is returned from the function.
- virtual void HandleScalarShadowResult(const PointerType *PtrArgTy, bool RetPtr) {}
+ virtual void HandleScalarShadowResult(PointerType *PtrArgTy, bool RetPtr) {}
/// HandleScalarArgument - This is the primary callback that specifies an
/// LLVM argument to pass. It is only used for first class types.
/// If RealSize is non Zero then it specifies number of bytes to access
/// from LLVMTy.
- virtual void HandleScalarArgument(const llvm::Type *LLVMTy, tree type,
+ virtual void HandleScalarArgument(llvm::Type *LLVMTy, tree type,
unsigned RealSize = 0) {}
/// HandleByInvisibleReferenceArgument - This callback is invoked if a pointer
/// (of type PtrTy) to the argument is passed rather than the argument itself.
- virtual void HandleByInvisibleReferenceArgument(const llvm::Type *PtrTy, tree type) {}
+ virtual void HandleByInvisibleReferenceArgument(llvm::Type *PtrTy, tree type) {}
/// HandleByValArgument - This callback is invoked if the aggregate function
/// argument is passed by value.
- virtual void HandleByValArgument(const llvm::Type *LLVMTy, tree type) {}
+ virtual void HandleByValArgument(llvm::Type *LLVMTy, tree type) {}
/// HandleFCAArgument - This callback is invoked if the aggregate function
/// argument is passed by value as a first class aggregate.
- virtual void HandleFCAArgument(const llvm::Type * /*LLVMTy*/, tree /*type*/){}
+ virtual void HandleFCAArgument(llvm::Type * /*LLVMTy*/, tree /*type*/){}
/// EnterField - Called when we're about the enter the field of a struct
/// or union. FieldNo is the number of the element we are entering in the
/// LLVM Struct, StructTy is the LLVM type of the struct we are entering.
- virtual void EnterField(unsigned FieldNo, const llvm::Type *StructTy) {}
+ virtual void EnterField(unsigned FieldNo, llvm::Type *StructTy) {}
virtual void ExitField() {}
- virtual void HandlePad(const llvm::Type *LLVMTy) {}
+ virtual void HandlePad(llvm::Type *LLVMTy) {}
};
/// isAggregateTreeType - Return true if the specified GCC type is an aggregate
@@ -200,7 +200,7 @@
ignoreZeroLength, false)
: 0;
case ARRAY_TYPE:
- const ArrayType *Ty = dyn_cast<ArrayType>(ConvertType(type));
+ ArrayType *Ty = dyn_cast<ArrayType>(ConvertType(type));
if (!Ty || Ty->getNumElements() != 1)
return 0;
return isSingleElementStructOrArray(TREE_TYPE(type), false, false);
@@ -221,8 +221,8 @@
// returned as a scalar, otherwise return NULL. This is the default
// target independent implementation.
static inline
-const Type* getLLVMScalarTypeForStructReturn(tree type, unsigned *Offset) {
- const Type *Ty = ConvertType(type);
+Type* getLLVMScalarTypeForStructReturn(tree type, unsigned *Offset) {
+ Type *Ty = ConvertType(type);
unsigned Size = getTargetData().getTypeAllocSize(Ty);
*Offset = 0;
if (Size == 0)
@@ -246,7 +246,7 @@
// getLLVMAggregateTypeForStructReturn - Return LLVM type if TY can be
// returns as multiple values, otherwise return NULL. This is the default
// target independent implementation.
-static inline const Type* getLLVMAggregateTypeForStructReturn(tree type) {
+static inline Type* getLLVMAggregateTypeForStructReturn(tree type) {
return NULL;
}
@@ -407,31 +407,31 @@
/// on the client that indicate how its pieces should be handled. This
/// handles things like returning structures via hidden parameters.
void HandleReturnType(tree type, tree fn, bool isBuiltin,
- std::vector<const Type*> &ScalarElts);
+ std::vector<Type*> &ScalarElts);
/// HandleArgument - This is invoked by the target-independent code for each
/// argument type passed into the function. It potentially breaks down the
/// argument and invokes methods on the client that indicate how its pieces
/// should be handled. This handles things like decimating structures into
/// their fields.
- void HandleArgument(tree type, std::vector<const Type*> &ScalarElts,
+ void HandleArgument(tree type, std::vector<Type*> &ScalarElts,
Attributes *Attributes = NULL);
/// HandleUnion - Handle a UNION_TYPE or QUAL_UNION_TYPE tree.
///
- void HandleUnion(tree type, std::vector<const Type*> &ScalarElts);
+ void HandleUnion(tree type, std::vector<Type*> &ScalarElts);
/// PassInIntegerRegisters - Given an aggregate value that should be passed in
/// integer registers, convert it to a structure containing ints and pass all
/// of the struct elements in. If Size is set we pass only that many bytes.
- void PassInIntegerRegisters(tree type, std::vector<const Type*> &ScalarElts,
+ void PassInIntegerRegisters(tree type, std::vector<Type*> &ScalarElts,
unsigned origSize, bool DontCheckAlignment);
/// PassInMixedRegisters - Given an aggregate value that should be passed in
/// mixed integer, floating point, and vector registers, convert it to a
/// structure containing the specified struct elements in.
- void PassInMixedRegisters(const Type *Ty, std::vector<const Type*> &OrigElts,
- std::vector<const Type*> &ScalarElts);
+ void PassInMixedRegisters(Type *Ty, std::vector<Type*> &OrigElts,
+ std::vector<Type*> &ScalarElts);
};
#endif /* LLVM_ABI_H */
Modified: llvm-gcc-4.2/trunk/gcc/llvm-backend.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/llvm-backend.cpp?rev=134982&r1=134981&r2=134982&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/llvm-backend.cpp (original)
+++ llvm-gcc-4.2/trunk/gcc/llvm-backend.cpp Tue Jul 12 09:06:48 2011
@@ -915,8 +915,7 @@
LLVMContext &Context = getGlobalContext();
const Type *FPTy =
- FunctionType::get(Type::getVoidTy(Context),
- std::vector<const Type*>(), false);
+ FunctionType::get(Type::getVoidTy(Context), std::vector<Type*>(), false);
FPTy = FPTy->getPointerTo();
for (unsigned i = 0, e = Tors.size(); i != e; ++i) {
Modified: llvm-gcc-4.2/trunk/gcc/llvm-convert.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/llvm-convert.cpp?rev=134982&r1=134981&r2=134982&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/llvm-convert.cpp (original)
+++ llvm-gcc-4.2/trunk/gcc/llvm-convert.cpp Tue Jul 12 09:06:48 2011
@@ -278,7 +278,7 @@
/// getCallingConv - This provides the desired CallingConv for the function.
CallingConv::ID& getCallingConv(void) { return CallingConv; }
- void HandlePad(const llvm::Type *LLVMTy) {
+ void HandlePad(llvm::Type *LLVMTy) {
++AI;
}
@@ -297,7 +297,7 @@
LocStack.clear();
}
- void HandleAggregateShadowResult(const PointerType *PtrArgTy,
+ void HandleAggregateShadowResult(PointerType *PtrArgTy,
bool RetPtr) {
// If the function returns a structure by value, we transform the function
// to take a pointer to the result as the first argument of the function
@@ -332,7 +332,7 @@
++AI;
}
- void HandleScalarShadowResult(const PointerType *PtrArgTy, bool RetPtr) {
+ void HandleScalarShadowResult(PointerType *PtrArgTy, bool RetPtr) {
assert(AI != Builder.GetInsertBlock()->getParent()->arg_end() &&
"No explicit return value?");
AI->setName("scalar.result");
@@ -341,7 +341,7 @@
++AI;
}
- void HandleScalarArgument(const llvm::Type *LLVMTy, tree type,
+ void HandleScalarArgument(llvm::Type *LLVMTy, tree type,
unsigned RealSize = 0) {
Value *ArgVal = AI;
LLVMTy = LLVM_ADJUST_MMX_PARAMETER_TYPE(LLVMTy);
@@ -371,7 +371,7 @@
++AI;
}
- void HandleByValArgument(const llvm::Type *LLVMTy, tree type) {
+ void HandleByValArgument(llvm::Type *LLVMTy, tree type) {
if (LLVM_BYVAL_ALIGNMENT_TOO_SMALL(type)) {
// Incoming object on stack is insufficiently aligned for the type.
// Make a correctly aligned copy.
@@ -381,8 +381,8 @@
// bytes, but only 10 are copied. If the object is really a union
// we might need the other bytes. We must also be careful to use
// the smaller alignment.
- const Type *SBP = Type::getInt8PtrTy(Context);
- const Type *IntPtr = getTargetData().getIntPtrType(Context);
+ Type *SBP = Type::getInt8PtrTy(Context);
+ Type *IntPtr = getTargetData().getIntPtrType(Context);
Value *Ops[5] = {
Builder.CreateCast(Instruction::BitCast, Loc, SBP),
Builder.CreateCast(Instruction::BitCast, AI, SBP),
@@ -392,7 +392,7 @@
LLVM_BYVAL_ALIGNMENT(type)),
ConstantInt::get(Type::getInt1Ty(Context), false)
};
- const Type *ArgTypes[3] = {SBP, SBP, IntPtr };
+ Type *ArgTypes[3] = {SBP, SBP, IntPtr };
Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
Intrinsic::memcpy,
ArgTypes, 3), Ops, Ops+5);
@@ -402,7 +402,7 @@
++AI;
}
- void HandleFCAArgument(const llvm::Type *LLVMTy, tree /*type*/) {
+ void HandleFCAArgument(llvm::Type *LLVMTy, tree /*type*/) {
// Store the FCA argument into alloca.
assert(!LocStack.empty());
Value *Loc = LocStack.back();
@@ -411,11 +411,11 @@
++AI;
}
- void HandleAggregateResultAsScalar(const Type *ScalarTy, unsigned Offset=0){
+ void HandleAggregateResultAsScalar(Type *ScalarTy, unsigned Offset=0){
this->Offset = Offset;
}
- void EnterField(unsigned FieldNo, const llvm::Type *StructTy) {
+ void EnterField(unsigned FieldNo, llvm::Type *StructTy) {
NameStack.push_back(NameStack.back()+"."+utostr(FieldNo));
Value *Loc = LocStack.back();
@@ -434,13 +434,13 @@
// isPassedByVal - Return true if an aggregate of the specified type will be
// passed in memory byval.
-static bool isPassedByVal(tree type, const Type *Ty,
- std::vector<const Type*> &ScalarArgs,
+static bool isPassedByVal(tree type, Type *Ty,
+ std::vector<Type*> &ScalarArgs,
CallingConv::ID &CC) {
if (LLVM_SHOULD_PASS_AGGREGATE_USING_BYVAL_ATTR(type, Ty, CC))
return true;
- std::vector<const Type*> Args;
+ std::vector<Type*> Args;
if (LLVM_SHOULD_PASS_AGGREGATE_IN_MIXED_REGS(type, Ty, CC, Args) &&
LLVM_AGGREGATE_PARTIALLY_PASSED_IN_REGS(Args, ScalarArgs,
CC))
@@ -677,7 +677,7 @@
DefaultABI ABIConverter(Client);
// Scalar arguments processed so far.
- std::vector<const Type*> ScalarArgs;
+ std::vector<Type*> ScalarArgs;
// Handle the DECL_RESULT.
ABIConverter.HandleReturnType(TREE_TYPE(TREE_TYPE(FnDecl)), FnDecl,
@@ -693,7 +693,7 @@
const char *Name = "unnamed_arg";
if (DECL_NAME(Args)) Name = IDENTIFIER_POINTER(DECL_NAME(Args));
- const Type *ArgTy = ConvertType(TREE_TYPE(Args));
+ Type *ArgTy = ConvertType(TREE_TYPE(Args));
bool isInvRef = isPassedByInvisibleReference(TREE_TYPE(Args));
if (isInvRef ||
(ArgTy->isVectorTy() &&
@@ -1655,8 +1655,8 @@
Value *TreeToLLVM::EmitMemCpy(Value *DestPtr, Value *SrcPtr, Value *Size,
unsigned Align) {
- const Type *SBP = Type::getInt8PtrTy(Context);
- const Type *IntPtr = TD.getIntPtrType(Context);
+ Type *SBP = Type::getInt8PtrTy(Context);
+ Type *IntPtr = TD.getIntPtrType(Context);
Value *Ops[5] = {
BitCastToType(DestPtr, SBP),
BitCastToType(SrcPtr, SBP),
@@ -1665,7 +1665,7 @@
ConstantInt::get(Type::getInt1Ty(Context), false)
};
- const Type *ArgTypes[3] = {SBP, SBP, IntPtr };
+ Type *ArgTypes[3] = {SBP, SBP, IntPtr };
Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::memcpy,
ArgTypes, 3), Ops, Ops+5);
return Ops[0];
@@ -1673,8 +1673,8 @@
Value *TreeToLLVM::EmitMemMove(Value *DestPtr, Value *SrcPtr, Value *Size,
unsigned Align) {
- const Type *SBP = Type::getInt8PtrTy(Context);
- const Type *IntPtr = TD.getIntPtrType(Context);
+ Type *SBP = Type::getInt8PtrTy(Context);
+ Type *IntPtr = TD.getIntPtrType(Context);
Value *Ops[5] = {
BitCastToType(DestPtr, SBP),
BitCastToType(SrcPtr, SBP),
@@ -1682,7 +1682,7 @@
ConstantInt::get(Type::getInt32Ty(Context), Align),
ConstantInt::get(Type::getInt1Ty(Context), false)
};
- const Type *ArgTypes[3] = {SBP, SBP, IntPtr };
+ Type *ArgTypes[3] = {SBP, SBP, IntPtr };
Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::memmove,
ArgTypes, 3), Ops, Ops+5);
@@ -1691,8 +1691,8 @@
Value *TreeToLLVM::EmitMemSet(Value *DestPtr, Value *SrcVal, Value *Size,
unsigned Align) {
- const Type *SBP = Type::getInt8PtrTy(Context);
- const Type *IntPtr = TD.getIntPtrType(Context);
+ Type *SBP = Type::getInt8PtrTy(Context);
+ Type *IntPtr = TD.getIntPtrType(Context);
Value *Ops[5] = {
BitCastToType(DestPtr, SBP),
CastToSIntType(SrcVal, Type::getInt8Ty(Context)),
@@ -1701,7 +1701,7 @@
ConstantInt::get(Type::getInt1Ty(Context), false)
};
- const Type *ArgTypes[2] = {SBP, IntPtr };
+ Type *ArgTypes[2] = {SBP, IntPtr };
Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::memset,
ArgTypes, 2), Ops, Ops+5);
return Ops[0];
@@ -2744,7 +2744,7 @@
struct FunctionCallArgumentConversion : public DefaultABIClient {
SmallVector<Value*, 16> &CallOperands;
SmallVector<Value*, 2> LocStack;
- const FunctionType *FTy;
+ FunctionType *FTy;
const MemRef *DestLoc;
bool useReturnSlot;
LLVMBuilder &Builder;
@@ -2756,7 +2756,7 @@
unsigned Offset;
FunctionCallArgumentConversion(SmallVector<Value*, 16> &ops,
- const FunctionType *FnTy,
+ FunctionType *FnTy,
const MemRef *destloc,
bool ReturnSlotOpt,
LLVMBuilder &b,
@@ -2838,7 +2838,7 @@
/// HandleScalarResult - This callback is invoked if the function returns a
/// simple scalar result value.
- void HandleScalarResult(const Type *RetTy) {
+ void HandleScalarResult(Type *RetTy) {
// There is nothing to do here if we return a scalar or void.
assert(DestLoc == 0 &&
"Call returns a scalar but caller expects aggregate!");
@@ -2847,14 +2847,14 @@
/// HandleAggregateResultAsScalar - This callback is invoked if the function
/// returns an aggregate value by bit converting it to the specified scalar
/// type and returning that.
- void HandleAggregateResultAsScalar(const Type *ScalarTy,
+ void HandleAggregateResultAsScalar(Type *ScalarTy,
unsigned Offset = 0) {
this->Offset = Offset;
}
/// HandleAggregateResultAsAggregate - This callback is invoked if the
/// function returns an aggregate value using multiple return values.
- void HandleAggregateResultAsAggregate(const Type *AggrTy) {
+ void HandleAggregateResultAsAggregate(Type *AggrTy) {
// There is nothing to do here.
isAggrRet = true;
}
@@ -2863,7 +2863,7 @@
/// returns an aggregate value by using a "shadow" first parameter. If
/// RetPtr is set to true, the pointer argument itself is returned from the
/// function.
- void HandleAggregateShadowResult(const PointerType *PtrArgTy,
+ void HandleAggregateShadowResult(PointerType *PtrArgTy,
bool RetPtr) {
// We need to pass memory to write the return value into.
// FIXME: alignment and volatility are being ignored!
@@ -2889,7 +2889,7 @@
isShadowRet = true;
}
- void HandlePad(const llvm::Type *LLVMTy) {
+ void HandlePad(llvm::Type *LLVMTy) {
CallOperands.push_back(UndefValue::get(LLVMTy));
}
@@ -2897,7 +2897,7 @@
/// returns a scalar value by using a "shadow" first parameter, which is a
/// pointer to the scalar, of type PtrArgTy. If RetPtr is set to true,
/// the pointer argument itself is returned from the function.
- void HandleScalarShadowResult(const PointerType *PtrArgTy, bool RetPtr) {
+ void HandleScalarShadowResult(PointerType *PtrArgTy, bool RetPtr) {
assert(DestLoc == 0 &&
"Call returns a scalar but caller expects aggregate!");
// Create a buffer to hold the result. The result will be loaded out of
@@ -2911,7 +2911,7 @@
/// HandleScalarArgument - This is the primary callback that specifies an
/// LLVM argument to pass. It is only used for first class types.
- void HandleScalarArgument(const llvm::Type *LLVMTy, tree type,
+ void HandleScalarArgument(llvm::Type *LLVMTy, tree type,
unsigned RealSize = 0) {
Value *Loc = NULL;
if (RealSize) {
@@ -2936,7 +2936,7 @@
/// HandleByInvisibleReferenceArgument - This callback is invoked if a
/// pointer (of type PtrTy) to the argument is passed rather than the
/// argument itself.
- void HandleByInvisibleReferenceArgument(const llvm::Type *PtrTy, tree type){
+ void HandleByInvisibleReferenceArgument(llvm::Type *PtrTy, tree type){
Value *Loc = getAddress();
Loc = Builder.CreateBitCast(Loc, PtrTy);
CallOperands.push_back(Loc);
@@ -2945,7 +2945,7 @@
/// HandleByValArgument - This callback is invoked if the aggregate function
/// argument is passed by value. It is lowered to a parameter passed by
/// reference with an additional parameter attribute "ByVal".
- void HandleByValArgument(const llvm::Type *LLVMTy, tree type) {
+ void HandleByValArgument(llvm::Type *LLVMTy, tree type) {
Value *Loc = getAddress();
assert(LLVMTy->getPointerTo() == Loc->getType());
CallOperands.push_back(Loc);
@@ -2953,7 +2953,7 @@
/// HandleFCAArgument - This callback is invoked if the aggregate function
/// argument is passed as a first class aggregate.
- void HandleFCAArgument(const llvm::Type *LLVMTy, tree /*type*/) {
+ void HandleFCAArgument(llvm::Type *LLVMTy, tree /*type*/) {
Value *Loc = getAddress();
assert(LLVMTy->getPointerTo() == Loc->getType());
CallOperands.push_back(Builder.CreateLoad(Loc));
@@ -2962,7 +2962,7 @@
/// EnterField - Called when we're about the enter the field of a struct
/// or union. FieldNo is the number of the element we are entering in the
/// LLVM Struct, StructTy is the LLVM type of the struct we are entering.
- void EnterField(unsigned FieldNo, const llvm::Type *StructTy) {
+ void EnterField(unsigned FieldNo, llvm::Type *StructTy) {
Value *Loc = getAddress();
Loc = Builder.CreateBitCast(Loc, StructTy->getPointerTo());
pushAddress(Builder.CreateStructGEP(Loc, FieldNo, "elt"));
@@ -3026,15 +3026,15 @@
#endif
SmallVector<Value*, 16> CallOperands;
- const PointerType *PFTy = cast<PointerType>(Callee->getType());
- const FunctionType *FTy = cast<FunctionType>(PFTy->getElementType());
+ PointerType *PFTy = cast<PointerType>(Callee->getType());
+ FunctionType *FTy = cast<FunctionType>(PFTy->getElementType());
FunctionCallArgumentConversion Client(CallOperands, FTy, DestLoc,
CALL_EXPR_RETURN_SLOT_OPT(exp),
Builder, CallingConvention);
DefaultABI ABIConverter(Client);
// Handle the result, including struct returns.
- std::vector<const Type*> ScalarArgs;
+ std::vector<Type*> ScalarArgs;
ABIConverter.HandleReturnType(TREE_TYPE(exp),
fndecl ? fndecl : exp,
fndecl ? DECL_BUILT_IN(fndecl) : false,
@@ -3047,7 +3047,7 @@
// Loop over the arguments, expanding them and adding them to the op list.
for (tree arg = TREE_OPERAND(exp, 1); arg; arg = TREE_CHAIN(arg)) {
tree type = TREE_TYPE(TREE_VALUE(arg));
- const Type *ArgTy = ConvertType(type);
+ Type *ArgTy = ConvertType(type);
// Push the argument.
if (ArgTy->isSingleValueType()) {
@@ -4354,7 +4354,7 @@
}
// Turn this into a 'tmp = call Ty asm "", "={reg}"()'.
- FunctionType *FTy = FunctionType::get(Ty, std::vector<const Type*>(),false);
+ FunctionType *FTy = FunctionType::get(Ty, std::vector<Type*>(),false);
const char *Name = extractRegisterName(decl);
int RegNum = decode_reg_name(Name);
@@ -4370,7 +4370,7 @@
/// that copies the value out of the specified register.
Value *TreeToLLVM::EmitMoveOfRegVariableToRightReg(Instruction *I, tree var) {
// Create a 'call void asm sideeffect "", "{reg}"(Ty %RHS)'.
- const Type *Ty = I->getType();
+ Type *Ty = I->getType();
// If there was an error, return something bogus.
if (ValidateRegisterVariable(var)) {
@@ -4379,7 +4379,7 @@
return 0; // Just don't copy something into DestLoc.
}
- std::vector<const Type*> ArgTys;
+ std::vector<Type*> ArgTys;
ArgTys.push_back(Ty);
FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context),
ArgTys, false);
@@ -4392,7 +4392,7 @@
Call->setDoesNotThrow();
// Create another asm with the same reg, this time producing an output.
// Turn this into a 'tmp = call Ty asm "", "={reg}"()'.
- FunctionType *FTy2 = FunctionType::get(Ty, std::vector<const Type*>(),
+ FunctionType *FTy2 = FunctionType::get(Ty, std::vector<Type*>(),
false);
InlineAsm *IA2 = InlineAsm::get(FTy2, "", "={"+std::string(Name)+"}",
true);
@@ -4409,8 +4409,8 @@
return;
// Turn this into a 'call void asm sideeffect "", "{reg}"(Ty %RHS)'.
- std::vector<const Type*> ArgTys;
- const Type* Ty = ConvertType(TREE_TYPE(decl));
+ std::vector<Type*> ArgTys;
+ Type* Ty = ConvertType(TREE_TYPE(decl));
if (LLVM_IS_DECL_MMX_REGISTER(decl))
Ty = Type::getX86_MMXTy(Context);
ArgTys.push_back(Ty);
@@ -4869,14 +4869,14 @@
}
std::vector<Value*> CallOps;
- std::vector<const Type*> CallArgTypes;
+ std::vector<Type*> CallArgTypes;
std::string NewAsmStr = ConvertInlineAsmStr(exp, NumOutputs+NumInputs);
std::string ConstraintStr;
bool HasSideEffects = ASM_VOLATILE_P(exp) || !ASM_OUTPUTS(exp);
// StoreCallResultAddr - The pointer to store the result of the call through.
SmallVector<Value *, 4> StoreCallResultAddrs;
- SmallVector<const Type *, 4> CallResultTypes;
+ SmallVector<Type *, 4> CallResultTypes;
SmallVector<bool, 4> CallResultIsSigned;
SmallVector<std::pair<bool, unsigned>, 4> OutputLocations;
@@ -4936,8 +4936,7 @@
}
LValue Dest = EmitLV(Operand);
- const Type *DestValTy =
- cast<PointerType>(Dest.Ptr->getType())->getElementType();
+ Type *DestValTy = cast<PointerType>(Dest.Ptr->getType())->getElementType();
assert(!Dest.isBitfield() && "Cannot assign into a bitfield!");
if (!AllowsMem && DestValTy->isSingleValueType()) {// Reg dest -> asm return
@@ -5077,7 +5076,7 @@
}
}
- const Type* AdjTy = LLVM_ADJUST_MMX_INLINE_PARAMETER_TYPE(
+ Type* AdjTy = LLVM_ADJUST_MMX_INLINE_PARAMETER_TYPE(
Constraint, Op->getType());
if (AdjTy != Op->getType())
Op = BitCastToType(Op, AdjTy);
@@ -5158,9 +5157,7 @@
case 0: CallResultType = Type::getVoidTy(Context); break;
case 1: CallResultType = CallResultTypes[0]; break;
default:
- std::vector<const Type*> TmpVec(CallResultTypes.begin(),
- CallResultTypes.end());
- CallResultType = StructType::get(Context, TmpVec);
+ CallResultType = StructType::get(Context, CallResultTypes);
break;
}
@@ -5308,7 +5305,7 @@
Value *&Result) {
#ifdef LLVM_TARGET_INTRINSIC_LOWER
// Get the result type and operand line in an easy to consume format.
- const Type *ResultType = ConvertType(TREE_TYPE(TREE_TYPE(fndecl)));
+ Type *ResultType = ConvertType(TREE_TYPE(TREE_TYPE(fndecl)));
std::vector<Value*> Operands;
for (tree Op = TREE_OPERAND(exp, 1); Op; Op = TREE_CHAIN(Op)) {
tree OpVal = TREE_VALUE(Op);
@@ -5352,13 +5349,13 @@
Value *
TreeToLLVM::BuildBinaryAtomicBuiltin(tree exp, Intrinsic::ID id) {
- const Type *ResultTy = ConvertType(TREE_TYPE(exp));
+ Type *ResultTy = ConvertType(TREE_TYPE(exp));
tree arglist = TREE_OPERAND(exp, 1);
Value* C[2] = {
Emit(TREE_VALUE(arglist), 0),
Emit(TREE_VALUE(TREE_CHAIN(arglist)), 0)
};
- const Type* Ty[2];
+ Type* Ty[2];
Ty[0] = ResultTy;
Ty[1] = ResultTy->getPointerTo();
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
@@ -5391,14 +5388,14 @@
Value *
TreeToLLVM::BuildCmpAndSwapAtomicBuiltin(tree exp, tree type, bool isBool) {
- const Type *ResultTy = ConvertType(type);
+ Type *ResultTy = ConvertType(type);
tree arglist = TREE_OPERAND(exp, 1);
Value* C[3] = {
Emit(TREE_VALUE(arglist), 0),
Emit(TREE_VALUE(TREE_CHAIN(arglist)), 0),
Emit(TREE_VALUE(TREE_CHAIN(TREE_CHAIN(arglist))), 0)
};
- const Type* Ty[2];
+ Type* Ty[2];
Ty[0] = ResultTy;
Ty[1] = ResultTy->getPointerTo();
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
@@ -5571,7 +5568,7 @@
};
// Grab the current return type.
- const Type* Ty;
+ Type* Ty;
Ty = ConvertType(TREE_TYPE(exp));
// Manually coerce the arg to the correct pointer type.
@@ -5767,7 +5764,7 @@
// Get arguments.
tree arglist = TREE_OPERAND(exp, 1);
Value *ExprVal = Emit(TREE_VALUE(arglist), 0);
- const Type *Ty = ExprVal->getType();
+ Type *Ty = ExprVal->getType();
Value *StrVal = Emit(TREE_VALUE(TREE_CHAIN(arglist)), 0);
SmallVector<Value *, 4> Args;
@@ -5927,13 +5924,13 @@
case BUILT_IN_ADD_AND_FETCH_1:
case BUILT_IN_ADD_AND_FETCH_2:
case BUILT_IN_ADD_AND_FETCH_4: {
- const Type *ResultTy = ConvertType(TREE_TYPE(exp));
+ Type *ResultTy = ConvertType(TREE_TYPE(exp));
tree arglist = TREE_OPERAND(exp, 1);
Value* C[2] = {
Emit(TREE_VALUE(arglist), 0),
Emit(TREE_VALUE(TREE_CHAIN(arglist)), 0)
};
- const Type* Ty[2];
+ Type* Ty[2];
Ty[0] = ResultTy;
Ty[1] = ResultTy->getPointerTo();
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
@@ -5974,13 +5971,13 @@
case BUILT_IN_SUB_AND_FETCH_1:
case BUILT_IN_SUB_AND_FETCH_2:
case BUILT_IN_SUB_AND_FETCH_4: {
- const Type *ResultTy = ConvertType(TREE_TYPE(exp));
+ Type *ResultTy = ConvertType(TREE_TYPE(exp));
tree arglist = TREE_OPERAND(exp, 1);
Value* C[2] = {
Emit(TREE_VALUE(arglist), 0),
Emit(TREE_VALUE(TREE_CHAIN(arglist)), 0)
};
- const Type* Ty[2];
+ Type* Ty[2];
Ty[0] = ResultTy;
Ty[1] = ResultTy->getPointerTo();
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
@@ -6021,13 +6018,13 @@
case BUILT_IN_OR_AND_FETCH_1:
case BUILT_IN_OR_AND_FETCH_2:
case BUILT_IN_OR_AND_FETCH_4: {
- const Type *ResultTy = ConvertType(TREE_TYPE(exp));
+ Type *ResultTy = ConvertType(TREE_TYPE(exp));
tree arglist = TREE_OPERAND(exp, 1);
Value* C[2] = {
Emit(TREE_VALUE(arglist), 0),
Emit(TREE_VALUE(TREE_CHAIN(arglist)), 0)
};
- const Type* Ty[2];
+ Type* Ty[2];
Ty[0] = ResultTy;
Ty[1] = ResultTy->getPointerTo();
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
@@ -6068,13 +6065,13 @@
case BUILT_IN_AND_AND_FETCH_1:
case BUILT_IN_AND_AND_FETCH_2:
case BUILT_IN_AND_AND_FETCH_4: {
- const Type *ResultTy = ConvertType(TREE_TYPE(exp));
+ Type *ResultTy = ConvertType(TREE_TYPE(exp));
tree arglist = TREE_OPERAND(exp, 1);
Value* C[2] = {
Emit(TREE_VALUE(arglist), 0),
Emit(TREE_VALUE(TREE_CHAIN(arglist)), 0)
};
- const Type* Ty[2];
+ Type* Ty[2];
Ty[0] = ResultTy;
Ty[1] = ResultTy->getPointerTo();
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
@@ -6115,13 +6112,13 @@
case BUILT_IN_XOR_AND_FETCH_1:
case BUILT_IN_XOR_AND_FETCH_2:
case BUILT_IN_XOR_AND_FETCH_4: {
- const Type *ResultTy = ConvertType(TREE_TYPE(exp));
+ Type *ResultTy = ConvertType(TREE_TYPE(exp));
tree arglist = TREE_OPERAND(exp, 1);
Value* C[2] = {
Emit(TREE_VALUE(arglist), 0),
Emit(TREE_VALUE(TREE_CHAIN(arglist)), 0)
};
- const Type* Ty[2];
+ Type* Ty[2];
Ty[0] = ResultTy;
Ty[1] = ResultTy->getPointerTo();
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
@@ -6162,13 +6159,13 @@
case BUILT_IN_NAND_AND_FETCH_1:
case BUILT_IN_NAND_AND_FETCH_2:
case BUILT_IN_NAND_AND_FETCH_4: {
- const Type *ResultTy = ConvertType(TREE_TYPE(exp));
+ Type *ResultTy = ConvertType(TREE_TYPE(exp));
tree arglist = TREE_OPERAND(exp, 1);
Value* C[2] = {
Emit(TREE_VALUE(arglist), 0),
Emit(TREE_VALUE(TREE_CHAIN(arglist)), 0)
};
- const Type* Ty[2];
+ Type* Ty[2];
Ty[0] = ResultTy;
Ty[1] = ResultTy->getPointerTo();
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
@@ -6293,7 +6290,7 @@
// varying type. Make sure that we specify the actual type for "iAny"
// by passing it as the 3rd and 4th parameters. This isn't needed for
// most intrinsics, but is needed for ctpop, cttz, ctlz.
- const Type *Ty = InVal->getType();
+ Type *Ty = InVal->getType();
Result = Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Id, &Ty, 1),
InVal);
return true;
@@ -6301,7 +6298,7 @@
Value *TreeToLLVM::EmitBuiltinSQRT(tree exp) {
Value *Amt = Emit(TREE_VALUE(TREE_OPERAND(exp, 1)), 0);
- const Type* Ty = Amt->getType();
+ Type* Ty = Amt->getType();
return Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
Intrinsic::sqrt, &Ty, 1),
@@ -6315,7 +6312,7 @@
Value *Val = Emit(TREE_VALUE(ArgList), 0);
Value *Pow = Emit(TREE_VALUE(TREE_CHAIN(ArgList)), 0);
- const Type *Ty = Val->getType();
+ Type *Ty = Val->getType();
Pow = CastToSIntType(Pow, Type::getInt32Ty(Context));
SmallVector<Value *,2> Args;
@@ -6333,7 +6330,7 @@
Value *Val = Emit(TREE_VALUE(ArgList), 0);
Value *Pow = Emit(TREE_VALUE(TREE_CHAIN(ArgList)), 0);
- const Type *Ty = Val->getType();
+ Type *Ty = Val->getType();
SmallVector<Value *,2> Args;
Args.push_back(Val);
@@ -7079,7 +7076,7 @@
Value *TreeToLLVM::EmitFieldAnnotation(Value *FieldPtr, tree FieldDecl) {
tree AnnotateAttr = lookup_attribute("annotate", DECL_ATTRIBUTES(FieldDecl));
- const Type *SBP = Type::getInt8PtrTy(Context);
+ Type *SBP = Type::getInt8PtrTy(Context);
Function *Fn = Intrinsic::getDeclaration(TheModule,
Intrinsic::ptr_annotation,
Modified: llvm-gcc-4.2/trunk/gcc/llvm-internal.h
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/llvm-internal.h?rev=134982&r1=134981&r2=134982&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/llvm-internal.h (original)
+++ llvm-gcc-4.2/trunk/gcc/llvm-internal.h Tue Jul 12 09:06:48 2011
@@ -129,7 +129,7 @@
public:
TypeConverter() : RecursionStatus(CS_Normal) {}
- const Type *ConvertType(tree_node *type);
+ Type *ConvertType(tree_node *type);
/// GCCTypeOverlapsWithLLVMTypePadding - Return true if the specified GCC type
/// has any data that overlaps with structure padding in the specified LLVM
@@ -140,11 +140,11 @@
/// ConvertFunctionType - Convert the specified FUNCTION_TYPE or METHOD_TYPE
/// tree to an LLVM type. This does the same thing that ConvertType does, but
/// it also returns the function's LLVM calling convention and attributes.
- const FunctionType *ConvertFunctionType(tree_node *type,
- tree_node *decl,
- tree_node *static_chain,
- CallingConv::ID &CallingConv,
- AttrListPtr &PAL);
+ FunctionType *ConvertFunctionType(tree_node *type,
+ tree_node *decl,
+ tree_node *static_chain,
+ CallingConv::ID &CallingConv,
+ AttrListPtr &PAL);
/// ConvertArgListToFnType - Given a DECL_ARGUMENTS list on an GCC tree,
/// return the LLVM type corresponding to the function. This is useful for
@@ -156,8 +156,8 @@
AttrListPtr &PAL);
private:
- const Type *ConvertRECORD(tree_node *type, tree_node *orig_type);
- const Type *ConvertUNION(tree_node *type, tree_node *orig_type);
+ Type *ConvertRECORD(tree_node *type, tree_node *orig_type);
+ Type *ConvertUNION(tree_node *type, tree_node *orig_type);
bool DecodeStructFields(tree_node *Field, StructTypeConversionInfo &Info);
void DecodeStructBitField(tree_node *Field, StructTypeConversionInfo &Info);
void SelectUnionMember(tree_node *type, StructTypeConversionInfo &Info);
@@ -167,7 +167,7 @@
/// ConvertType - Convert the specified tree type to an LLVM type.
///
-inline const Type *ConvertType(tree_node *type) {
+inline Type *ConvertType(tree_node *type) {
return TheTypeConverter->ConvertType(type);
}
@@ -615,7 +615,7 @@
unsigned FnCode,
const MemRef *DestLoc,
Value *&Result,
- const Type *ResultType,
+ Type *ResultType,
std::vector<Value*> &Ops);
public:
Modified: llvm-gcc-4.2/trunk/gcc/llvm-types.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/llvm-types.cpp?rev=134982&r1=134981&r2=134982&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/llvm-types.cpp (original)
+++ llvm-gcc-4.2/trunk/gcc/llvm-types.cpp Tue Jul 12 09:06:48 2011
@@ -59,8 +59,8 @@
// a map.
// Collection of LLVM Types
-static std::vector<const Type *> LTypes;
-typedef DenseMap<const Type *, unsigned> LTypesMapTy;
+static std::vector<Type *> LTypes;
+typedef DenseMap<Type *, unsigned> LTypesMapTy;
static LTypesMapTy LTypesMap;
static LLVMContext &Context = getGlobalContext();
@@ -72,7 +72,7 @@
(TYPE_CHECK (NODE)->type.symtab.llvm = index)
// Note down LLVM type for GCC tree node.
-static const Type * llvm_set_type(tree Tr, const Type *Ty) {
+static Type * llvm_set_type(tree Tr, Type *Ty) {
#ifndef NDEBUG
// For x86 long double, llvm records the size of the data (80) while
// gcc's TYPE_SIZE including alignment padding. getTypeAllocSizeInBits
@@ -107,12 +107,12 @@
return Ty;
}
-#define SET_TYPE_LLVM(NODE, TYPE) (const Type *)llvm_set_type(NODE, TYPE)
+#define SET_TYPE_LLVM(NODE, TYPE) llvm_set_type(NODE, TYPE)
// Get LLVM Type for the GCC tree node based on LTypes vector index.
// When GCC tree node is initialized, it has 0 as the index value. This is
// why all recorded indexes are offset by 1.
-extern "C" const Type *llvm_get_type(unsigned Index) {
+extern "C" Type *llvm_get_type(unsigned Index) {
if (Index == 0)
return NULL;
assert ((Index - 1) < LTypes.size() && "Invalid LLVM Type index");
@@ -120,10 +120,10 @@
}
#define GET_TYPE_LLVM(NODE) \
- (const Type *)llvm_get_type( TYPE_CHECK (NODE)->type.symtab.llvm)
+ llvm_get_type( TYPE_CHECK (NODE)->type.symtab.llvm)
// Erase type from LTypes vector
-static void llvmEraseLType(const Type *Ty) {
+static void llvmEraseLType(Type *Ty) {
LTypesMapTy::iterator I = LTypesMap.find(Ty);
if (I != LTypesMap.end()) {
@@ -161,7 +161,7 @@
return;
// Convert the LTypes list to a list of pointers.
- std::vector<const Type*> PTys;
+ std::vector<Type*> PTys;
for (unsigned i = 0, e = LTypes.size(); i != e; ++i) {
// Cannot form pointer to void. Use i8 as a sentinel.
if (LTypes[i]->isVoidTy())
@@ -488,7 +488,7 @@
// Main Type Conversion Routines
//===----------------------------------------------------------------------===//
-const Type *TypeConverter::ConvertType(tree orig_type) {
+Type *TypeConverter::ConvertType(tree orig_type) {
if (orig_type == error_mark_node) return Type::getInt32Ty(Context);
// LLVM doesn't care about variants such as const, volatile, or restrict.
@@ -504,7 +504,7 @@
case QUAL_UNION_TYPE:
case UNION_TYPE: return ConvertRECORD(type, orig_type);
case BOOLEAN_TYPE: {
- if (const Type *Ty = GET_TYPE_LLVM(type))
+ if (Type *Ty = GET_TYPE_LLVM(type))
return Ty;
return SET_TYPE_LLVM(type,
IntegerType::get(Context, TREE_INT_CST_LOW(TYPE_SIZE(type))));
@@ -513,25 +513,25 @@
// Use of an enum that is implicitly declared?
if (TYPE_SIZE(orig_type) == 0) {
// If we already compiled this type, use the old type.
- if (const Type *Ty = GET_TYPE_LLVM(orig_type))
+ if (Type *Ty = GET_TYPE_LLVM(orig_type))
return Ty;
// Just mark it as a named type for now.
- const Type *Ty = StructType::createNamed(Context,
- GetTypeName("enum.", orig_type));
+ Type *Ty = StructType::createNamed(Context,
+ GetTypeName("enum.", orig_type));
return SET_TYPE_LLVM(orig_type, Ty);
}
// FALL THROUGH.
type = orig_type;
case INTEGER_TYPE: {
- if (const Type *Ty = GET_TYPE_LLVM(type)) return Ty;
+ if (Type *Ty = GET_TYPE_LLVM(type)) return Ty;
// The ARM port defines __builtin_neon_xi as a 511-bit type because GCC's
// type precision field has only 9 bits. Treat this as a special case.
int precision = TYPE_PRECISION(type) == 511 ? 512 : TYPE_PRECISION(type);
return SET_TYPE_LLVM(type, IntegerType::get(Context, precision));
}
case REAL_TYPE:
- if (const Type *Ty = GET_TYPE_LLVM(type)) return Ty;
+ if (Type *Ty = GET_TYPE_LLVM(type)) return Ty;
switch (TYPE_PRECISION(type)) {
default:
fprintf(stderr, "Unknown FP type!\n");
@@ -557,13 +557,13 @@
}
case COMPLEX_TYPE: {
- if (const Type *Ty = GET_TYPE_LLVM(type)) return Ty;
- const Type *Ty = ConvertType(TREE_TYPE(type));
+ if (Type *Ty = GET_TYPE_LLVM(type)) return Ty;
+ Type *Ty = ConvertType(TREE_TYPE(type));
return SET_TYPE_LLVM(type, StructType::get(Ty, Ty, NULL));
}
case VECTOR_TYPE: {
- if (const Type *Ty = GET_TYPE_LLVM(type)) return Ty;
- const Type *Ty = ConvertType(TREE_TYPE(type));
+ if (Type *Ty = GET_TYPE_LLVM(type)) return Ty;
+ Type *Ty = ConvertType(TREE_TYPE(type));
Ty = VectorType::get(Ty, TYPE_VECTOR_SUBPARTS(type));
return SET_TYPE_LLVM(type, Ty);
}
@@ -576,7 +576,7 @@
if (RecursionStatus == CS_Struct)
RecursionStatus = CS_StructPtr;
- const Type *Ty = ConvertType(TREE_TYPE(type));
+ Type *Ty = ConvertType(TREE_TYPE(type));
RecursionStatus = SavedCS;
@@ -587,7 +587,7 @@
case METHOD_TYPE:
case FUNCTION_TYPE: {
- if (const Type *Ty = GET_TYPE_LLVM(type))
+ if (Type *Ty = GET_TYPE_LLVM(type))
return Ty;
// No declaration to pass through, passing NULL.
@@ -597,7 +597,7 @@
CallingConv, PAL));
}
case ARRAY_TYPE: {
- if (const Type *Ty = GET_TYPE_LLVM(type))
+ if (Type *Ty = GET_TYPE_LLVM(type))
return Ty;
uint64_t ElementSize;
@@ -664,14 +664,14 @@
namespace {
class FunctionTypeConversion : public DefaultABIClient {
- const Type *&RetTy;
- std::vector<const Type*> &ArgTypes;
+ Type *&RetTy;
+ std::vector<Type*> &ArgTypes;
CallingConv::ID &CallingConv;
bool isShadowRet;
bool KNRPromotion;
unsigned Offset;
public:
- FunctionTypeConversion(const Type *&retty, std::vector<const Type*> &AT,
+ FunctionTypeConversion(Type *&retty, std::vector<Type*> &AT,
CallingConv::ID &CC, bool KNR)
: RetTy(retty), ArgTypes(AT), CallingConv(CC), KNRPromotion(KNR), Offset(0) {
CallingConv = CallingConv::C;
@@ -685,26 +685,26 @@
/// HandleScalarResult - This callback is invoked if the function returns a
/// simple scalar result value.
- void HandleScalarResult(const Type *RetTy) {
+ void HandleScalarResult(Type *RetTy) {
this->RetTy = RetTy;
}
/// HandleAggregateResultAsScalar - This callback is invoked if the function
/// returns an aggregate value by bit converting it to the specified scalar
/// type and returning that.
- void HandleAggregateResultAsScalar(const Type *ScalarTy, unsigned Offset=0) {
+ void HandleAggregateResultAsScalar(Type *ScalarTy, unsigned Offset=0) {
RetTy = ScalarTy;
this->Offset = Offset;
}
/// HandleAggregateResultAsAggregate - This callback is invoked if the function
/// returns an aggregate value using multiple return values.
- void HandleAggregateResultAsAggregate(const Type *AggrTy) {
+ void HandleAggregateResultAsAggregate(Type *AggrTy) {
RetTy = AggrTy;
}
/// HandleShadowResult - Handle an aggregate or scalar shadow argument.
- void HandleShadowResult(const PointerType *PtrArgTy, bool RetPtr) {
+ void HandleShadowResult(PointerType *PtrArgTy, bool RetPtr) {
// This function either returns void or the shadow argument,
// depending on the target.
RetTy = RetPtr ? PtrArgTy : Type::getVoidTy(Context);
@@ -720,8 +720,7 @@
/// returns an aggregate value by using a "shadow" first parameter, which is
/// a pointer to the aggregate, of type PtrArgTy. If RetPtr is set to true,
/// the pointer argument itself is returned from the function.
- void HandleAggregateShadowResult(const PointerType *PtrArgTy,
- bool RetPtr) {
+ void HandleAggregateShadowResult(PointerType *PtrArgTy, bool RetPtr) {
HandleShadowResult(PtrArgTy, RetPtr);
}
@@ -729,15 +728,15 @@
/// returns a scalar value by using a "shadow" first parameter, which is a
/// pointer to the scalar, of type PtrArgTy. If RetPtr is set to true,
/// the pointer argument itself is returned from the function.
- void HandleScalarShadowResult(const PointerType *PtrArgTy, bool RetPtr) {
+ void HandleScalarShadowResult(PointerType *PtrArgTy, bool RetPtr) {
HandleShadowResult(PtrArgTy, RetPtr);
}
- void HandlePad(const llvm::Type *LLVMTy) {
+ void HandlePad(llvm::Type *LLVMTy) {
HandleScalarArgument(LLVMTy, 0, 0);
}
- void HandleScalarArgument(const llvm::Type *LLVMTy, tree type,
+ void HandleScalarArgument(llvm::Type *LLVMTy, tree type,
unsigned RealSize = 0) {
if (KNRPromotion) {
if (type == float_type_node)
@@ -752,20 +751,20 @@
/// HandleByInvisibleReferenceArgument - This callback is invoked if a pointer
/// (of type PtrTy) to the argument is passed rather than the argument itself.
- void HandleByInvisibleReferenceArgument(const llvm::Type *PtrTy, tree type) {
+ void HandleByInvisibleReferenceArgument(llvm::Type *PtrTy, tree type) {
ArgTypes.push_back(PtrTy);
}
/// HandleByValArgument - This callback is invoked if the aggregate function
/// argument is passed by value. It is lowered to a parameter passed by
/// reference with an additional parameter attribute "ByVal".
- void HandleByValArgument(const llvm::Type *LLVMTy, tree type) {
+ void HandleByValArgument(llvm::Type *LLVMTy, tree type) {
HandleScalarArgument(LLVMTy->getPointerTo(), type);
}
/// HandleFCAArgument - This callback is invoked if the aggregate function
/// argument is a first class aggregate passed by value.
- void HandleFCAArgument(const llvm::Type *LLVMTy,
+ void HandleFCAArgument(llvm::Type *LLVMTy,
tree type ATTRIBUTE_UNUSED) {
ArgTypes.push_back(LLVMTy);
}
@@ -798,8 +797,8 @@
ConvertArgListToFnType(tree type, tree Args, tree static_chain,
CallingConv::ID &CallingConv, AttrListPtr &PAL) {
tree ReturnType = TREE_TYPE(type);
- std::vector<const Type*> ArgTys;
- const Type *RetTy = Type::getVoidTy(Context);
+ std::vector<Type*> ArgTys;
+ Type *RetTy = Type::getVoidTy(Context);
FunctionTypeConversion Client(RetTy, ArgTys, CallingConv, true /*K&R*/);
DefaultABI ABIConverter(Client);
@@ -808,7 +807,7 @@
TARGET_ADJUST_LLVM_CC(CallingConv, type);
#endif
- std::vector<const Type*> ScalarArgs;
+ std::vector<Type*> ScalarArgs;
// Builtins are always prototyped, so this isn't one.
ABIConverter.HandleReturnType(ReturnType, current_function_decl, false,
ScalarArgs);
@@ -865,11 +864,11 @@
return FunctionType::get(RetTy, ArgTys, false);
}
-const FunctionType *TypeConverter::
+FunctionType *TypeConverter::
ConvertFunctionType(tree type, tree decl, tree static_chain,
CallingConv::ID &CallingConv, AttrListPtr &PAL) {
- const Type *RetTy = Type::getVoidTy(Context);
- std::vector<const Type *> ArgTypes;
+ Type *RetTy = Type::getVoidTy(Context);
+ std::vector<Type *> ArgTypes;
bool isVarArg = false;
FunctionTypeConversion Client(RetTy, ArgTypes, CallingConv, false/*not K&R*/);
DefaultABI ABIConverter(Client);
@@ -879,7 +878,7 @@
TARGET_ADJUST_LLVM_CC(CallingConv, type);
#endif
- std::vector<const Type*> ScalarArgs;
+ std::vector<Type*> ScalarArgs;
ABIConverter.HandleReturnType(TREE_TYPE(type), current_function_decl,
decl ? DECL_BUILT_IN(decl) : false,
ScalarArgs);
@@ -973,7 +972,7 @@
for (; Args && TREE_VALUE(Args) != void_type_node; Args = TREE_CHAIN(Args)){
tree ArgTy = TREE_VALUE(Args);
if (!isPassedByInvisibleReference(ArgTy))
- if (const StructType *STy = dyn_cast<StructType>(ConvertType(ArgTy)))
+ if (StructType *STy = dyn_cast<StructType>(ConvertType(ArgTy)))
if (STy->isOpaque()) {
// If we are passing an opaque struct by value, we don't know how many
// arguments it will turn into. Because we can't handle this yet,
@@ -1649,7 +1648,7 @@
// If Field has user defined alignment and it does not match Ty alignment
// then convert to a packed struct and try again.
if (TYPE_USER_ALIGN(DECL_BIT_FIELD_TYPE(Field))) {
- const Type *Ty = ConvertType(getDeclaredType(Field));
+ Type *Ty = ConvertType(getDeclaredType(Field));
if (TYPE_ALIGN(DECL_BIT_FIELD_TYPE(Field)) !=
8 * Info.getTypeAlignment(Ty))
return false;
@@ -1666,7 +1665,7 @@
assert((StartOffsetInBits & 7) == 0 && "Non-bit-field has non-byte offset!");
uint64_t StartOffsetInBytes = StartOffsetInBits/8;
- const Type *Ty = ConvertType(getDeclaredType(Field));
+ Type *Ty = ConvertType(getDeclaredType(Field));
// If this field is packed then the struct may need padding fields
// before this field.
@@ -1875,7 +1874,7 @@
TREE_INT_CST_LOW(DECL_SIZE(Field)) == 0)
continue;
- const Type *TheTy = ConvertType(TheGccTy);
+ Type *TheTy = ConvertType(TheGccTy);
unsigned Size = Info.getTypeSize(TheTy);
unsigned Align = Info.getTypeAlignment(TheTy);
@@ -1939,9 +1938,9 @@
//
// For LLVM purposes, we build a new type for B-within-D that
// has the correct size and layout for that usage.
-const Type *TypeConverter::ConvertRECORD(tree type, tree orig_type) {
+Type *TypeConverter::ConvertRECORD(tree type, tree orig_type) {
bool IsStruct = TREE_CODE(type) == RECORD_TYPE;
- if (const StructType *Ty = cast_or_null<StructType>(GET_TYPE_LLVM(type))) {
+ if (StructType *Ty = cast_or_null<StructType>(GET_TYPE_LLVM(type))) {
// If we already compiled this type, and if it was not a forward
// definition that is now defined, use the old type.
if (!Ty->isOpaque() || TYPE_SIZE(type) == 0)
@@ -2060,7 +2059,7 @@
} else {
uint64_t FieldOffsetInBits = getFieldOffsetInBits(Field);
tree FieldType = getDeclaredType(Field);
- const Type *FieldTy = ConvertType(FieldType);
+ Type *FieldTy = ConvertType(FieldType);
// If this is a bitfield, we may want to adjust the FieldOffsetInBits
// to produce safe code. In particular, bitfields will be
@@ -2101,7 +2100,7 @@
if (IsStruct)
RestoreOriginalFields(type);
- const StructType *ResultTy = cast<StructType>(GET_TYPE_LLVM(type));
+ StructType *ResultTy = cast<StructType>(GET_TYPE_LLVM(type));
Info->fillInLLVMType((StructType*)ResultTy);
StructTypeInfoMap[type] = Info;
More information about the llvm-commits
mailing list