[llvm-commits] [llvm-gcc-4.2] r78947 - in /llvm-gcc-4.2/trunk/gcc: config/arm/llvm-arm.cpp config/i386/llvm-i386.cpp config/rs6000/llvm-rs6000.cpp llvm-abi.h llvm-backend.cpp llvm-convert.cpp llvm-linker-hack.cpp llvm-types.cpp
Owen Anderson
resistor at mac.com
Thu Aug 13 14:58:17 PDT 2009
Author: resistor
Date: Thu Aug 13 16:58:17 2009
New Revision: 78947
URL: http://llvm.org/viewvc/llvm-project?rev=78947&view=rev
Log:
Update for LLVM API change.
Modified:
llvm-gcc-4.2/trunk/gcc/config/arm/llvm-arm.cpp
llvm-gcc-4.2/trunk/gcc/config/i386/llvm-i386.cpp
llvm-gcc-4.2/trunk/gcc/config/rs6000/llvm-rs6000.cpp
llvm-gcc-4.2/trunk/gcc/llvm-abi.h
llvm-gcc-4.2/trunk/gcc/llvm-backend.cpp
llvm-gcc-4.2/trunk/gcc/llvm-convert.cpp
llvm-gcc-4.2/trunk/gcc/llvm-linker-hack.cpp
llvm-gcc-4.2/trunk/gcc/llvm-types.cpp
Modified: llvm-gcc-4.2/trunk/gcc/config/arm/llvm-arm.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/config/arm/llvm-arm.cpp?rev=78947&r1=78946&r2=78947&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/config/arm/llvm-arm.cpp (original)
+++ llvm-gcc-4.2/trunk/gcc/config/arm/llvm-arm.cpp Thu Aug 13 16:58:17 2009
@@ -28,6 +28,7 @@
#include "llvm/DerivedTypes.h"
#include "llvm/Instructions.h"
#include "llvm/Intrinsics.h"
+#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
extern "C" {
@@ -37,6 +38,8 @@
#include "insn-config.h"
#include "recog.h"
+static LLVMContext &Context = getGlobalContext();
+
enum neon_itype { neon_itype_dummy };
extern enum insn_code locate_neon_builtin_icode
(int fcode, neon_itype *itype, enum neon_builtins *neon_code);
@@ -142,14 +145,14 @@
Value *Undef = UndefValue::get(ResultType);
Value *Result =
Builder.CreateInsertElement(Undef, Val,
- ConstantInt::get(Type::Int32Ty, 0));
+ ConstantInt::get(Type::getInt32Ty(Context), 0));
// Use a shuffle to move the value into the other lanes.
unsigned NUnits = VTy->getNumElements();
if (NUnits > 1) {
std::vector<Constant*> Idxs;
for (unsigned i = 0; i != NUnits; ++i)
- Idxs.push_back(ConstantInt::get(Type::Int32Ty, 0));
+ Idxs.push_back(ConstantInt::get(Type::getInt32Ty(Context), 0));
Result = Builder.CreateShuffleVector(Result, Undef,
ConstantVector::get(Idxs));
}
@@ -164,7 +167,7 @@
std::vector<Constant*> Idxs;
LLVMContext &Context = getGlobalContext();
for (unsigned i = 0; i != NUnits; ++i)
- Idxs.push_back(ConstantInt::get(Type::Int32Ty, LaneVal));
+ Idxs.push_back(ConstantInt::get(Type::getInt32Ty(Context), LaneVal));
return Builder.CreateShuffleVector(Vec, UndefValue::get(Vec->getType()),
ConstantVector::get(Idxs));
}
@@ -213,7 +216,7 @@
// Right shifts are represented in NEON intrinsics by a negative shift count.
LLVMContext &Context = getGlobalContext();
- Cnt = ConstantInt::get(IntegerType::get(ElemBits),
+ Cnt = ConstantInt::get(IntegerType::get(Context, sElemBits),
NegateRightShift ? -CntVal : CntVal);
Op = BuildConstantSplatVector(GET_MODE_NUNITS(Mode), Cnt);
return true;
@@ -1703,7 +1706,7 @@
unsigned NUnits = GET_MODE_NUNITS(insn_data[icode].operand[0].mode);
std::vector<Constant*> Idxs;
for (unsigned i = 0; i != NUnits; ++i)
- Idxs.push_back(ConstantInt::get(Type::Int32Ty, i));
+ Idxs.push_back(ConstantInt::get(Type::getInt32Ty(Context), i));
Result = Builder.CreateShuffleVector(Ops[0], Ops[1],
ConstantVector::get(Idxs));
break;
@@ -1715,7 +1718,7 @@
std::vector<Constant*> Idxs;
unsigned Idx = (neon_code == NEON_BUILTIN_vget_low ? 0 : NUnits);
for (unsigned i = 0; i != NUnits; ++i)
- Idxs.push_back(ConstantInt::get(Type::Int32Ty, Idx++));
+ Idxs.push_back(ConstantInt::get(Type::getInt32Ty(Context), Idx++));
Result = Builder.CreateShuffleVector(Ops[0],
UndefValue::get(Ops[0]->getType()),
ConstantVector::get(Idxs));
@@ -1779,7 +1782,7 @@
// Translate to a vector shuffle.
std::vector<Constant*> Idxs;
for (unsigned i = 0; i != NUnits; ++i)
- Idxs.push_back(ConstantInt::get(Type::Int32Ty, i + ImmVal));
+ Idxs.push_back(ConstantInt::get(Type::getInt32Ty(Context), i + ImmVal));
Result = Builder.CreateShuffleVector(Ops[0], Ops[1],
ConstantVector::get(Idxs));
break;
@@ -1805,7 +1808,7 @@
unsigned NUnits = VTy->getNumElements();
for (unsigned c = ChunkElts; c <= NUnits; c += ChunkElts) {
for (unsigned i = 0; i != ChunkElts; ++i) {
- Idxs.push_back(ConstantInt::get(Type::Int32Ty, c - i - 1));
+ Idxs.push_back(ConstantInt::get(Type::getInt32Ty(Context), c - i - 1));
}
}
Result = Builder.CreateShuffleVector(Ops[0], UndefValue::get(ResultType),
@@ -1981,7 +1984,7 @@
case NEON_BUILTIN_vld1: {
intID = Intrinsic::arm_neon_vld1;
intFn = Intrinsic::getDeclaration(TheModule, intID, &ResultType, 1);
- Type *VPTy = PointerType::getUnqual(Type::Int8Ty);
+ Type *VPTy = PointerType::getUnqual(Type::getInt8Ty(Context));
Result = Builder.CreateCall(intFn, BitCastToType(Ops[0], VPTy));
break;
}
@@ -1999,7 +2002,7 @@
default: assert(false);
}
intFn = Intrinsic::getDeclaration(TheModule, intID, &VTy, 1);
- Type *VPTy = PointerType::getUnqual(Type::Int8Ty);
+ Type *VPTy = PointerType::getUnqual(Type::getInt8Ty(Context));
Result = Builder.CreateCall(intFn, BitCastToType(Ops[0], VPTy));
Builder.CreateStore(Result, DestLoc->Ptr);
Result = 0;
@@ -2026,9 +2029,9 @@
Result = BitCastToType(Ops[1], VTy);
for (unsigned n = 0; n != NumVecs; ++n) {
Value *Addr = (n == 0) ? Ops[0] :
- Builder.CreateGEP(Ops[0], ConstantInt::get(Type::Int32Ty, n));
+ Builder.CreateGEP(Ops[0], ConstantInt::get(Type::getInt32Ty(Context), n));
Value *Elt = Builder.CreateLoad(Addr);
- Value *Ndx = ConstantInt::get(Type::Int32Ty,
+ Value *Ndx = ConstantInt::get(Type::getInt32Ty(Context),
LaneVal + (n * NUnits));
Result = Builder.CreateInsertElement(Result, Elt, Ndx);
}
@@ -2056,10 +2059,10 @@
Result = UndefValue::get(VTy);
for (unsigned n = 0; n != NumVecs; ++n) {
Value *Addr = (n == 0) ? Ops[0] :
- Builder.CreateGEP(Ops[0], ConstantInt::get(Type::Int32Ty, n));
+ Builder.CreateGEP(Ops[0], ConstantInt::get(Type::getInt32Ty(Context), n));
Value *Elt = Builder.CreateLoad(Addr);
// Insert the value into one lane of the result.
- Value *Ndx = ConstantInt::get(Type::Int32Ty, n * NUnits);
+ Value *Ndx = ConstantInt::get(Type::getInt32Ty(Context), n * NUnits);
Result = Builder.CreateInsertElement(Result, Elt, Ndx);
}
// Use a shuffle to move the value into the other lanes of the vector.
@@ -2067,7 +2070,7 @@
std::vector<Constant*> Idxs;
for (unsigned n = 0; n != NumVecs; ++n) {
for (unsigned i = 0; i != NUnits; ++i)
- Idxs.push_back(ConstantInt::get(Type::Int32Ty, n * NUnits));
+ Idxs.push_back(ConstantInt::get(Type::getInt32Ty(Context), n * NUnits));
}
Result = Builder.CreateShuffleVector(Result, UndefValue::get(VTy),
ConstantVector::get(Idxs));
@@ -2082,7 +2085,7 @@
const Type *VTy = Ops[1]->getType();
intID = Intrinsic::arm_neon_vst1;
intFn = Intrinsic::getDeclaration(TheModule, intID, &VTy, 1);
- Type *VPTy = PointerType::getUnqual(Type::Int8Ty);
+ Type *VPTy = PointerType::getUnqual(Type::getInt8Ty(Context));
Builder.CreateCall2(intFn, BitCastToType(Ops[0], VPTy), Ops[1]);
Result = 0;
break;
@@ -2109,7 +2112,7 @@
default: assert(false);
}
std::vector<Value*> Args;
- Type *VPTy = PointerType::getUnqual(Type::Int8Ty);
+ Type *VPTy = PointerType::getUnqual(Type::getInt8Ty(Context));
Args.push_back(BitCastToType(Ops[0], VPTy));
for (unsigned n = 0; n < NumVecs; ++n) {
Args.push_back(Builder.CreateExtractValue(Ops[1], n));
@@ -2142,8 +2145,8 @@
Value *Vec = Builder.CreateLoad(Tmp);
for (unsigned n = 0; n != NumVecs; ++n) {
Value *Addr = (n == 0) ? Ops[0] :
- Builder.CreateGEP(Ops[0], ConstantInt::get(Type::Int32Ty, n));
- Value *Ndx = ConstantInt::get(Type::Int32Ty,
+ Builder.CreateGEP(Ops[0], ConstantInt::get(Type::getInt32Ty(Context), n));
+ Value *Ndx = ConstantInt::get(Type::getInt32Ty(Context),
LaneVal + (n * NUnits));
Builder.CreateStore(Builder.CreateExtractElement(Vec, Ndx), Addr);
}
Modified: llvm-gcc-4.2/trunk/gcc/config/i386/llvm-i386.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/config/i386/llvm-i386.cpp?rev=78947&r1=78946&r2=78947&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/config/i386/llvm-i386.cpp (original)
+++ llvm-gcc-4.2/trunk/gcc/config/i386/llvm-i386.cpp Thu Aug 13 16:58:17 2009
@@ -113,11 +113,11 @@
case IX86_BUILTIN_ANDNPD:
if (cast<VectorType>(ResultType)->getNumElements() == 4) // v4f32
Ops[0] = Builder.CreateBitCast(Ops[0],
- VectorType::get(Type::Int32Ty, 4),
+ VectorType::get(Type::getInt32Ty(Context), 4),
"tmp");
else // v2f64
Ops[0] = Builder.CreateBitCast(Ops[0],
- VectorType::get(Type::Int64Ty, 2),
+ VectorType::get(Type::getInt64Ty(Context), 2),
"tmp");
Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType(), "tmp");
@@ -273,24 +273,24 @@
Result = BuildVectorShuffle(Ops[0], Ops[1], 2, 1);
return true;
case IX86_BUILTIN_MOVQ: {
- Value *Zero = ConstantInt::get(Type::Int32Ty, 0);
+ Value *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
Result = BuildVector(Zero, Zero, Zero, Zero, NULL);
Result = BuildVectorShuffle(Result, Ops[0], 4, 5, 2, 3);
return true;
}
case IX86_BUILTIN_LOADQ: {
- PointerType *i64Ptr = PointerType::getUnqual(Type::Int64Ty);
+ PointerType *i64Ptr = PointerType::getUnqual(Type::getInt64Ty(Context));
Ops[0] = Builder.CreateBitCast(Ops[0], i64Ptr, "tmp");
Ops[0] = Builder.CreateLoad(Ops[0], "tmp");
- Value *Zero = ConstantInt::get(Type::Int64Ty, 0);
+ Value *Zero = ConstantInt::get(Type::getInt64Ty(Context), 0);
Result = BuildVector(Zero, Zero, NULL);
- Value *Idx = ConstantInt::get(Type::Int32Ty, 0);
+ Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), 0);
Result = Builder.CreateInsertElement(Result, Ops[0], Idx, "tmp");
Result = Builder.CreateBitCast(Result, ResultType, "tmp");
return true;
}
case IX86_BUILTIN_LOADUPS: {
- VectorType *v4f32 = VectorType::get(Type::FloatTy, 4);
+ VectorType *v4f32 = VectorType::get(Type::getFloatTy(Context), 4);
PointerType *v4f32Ptr = PointerType::getUnqual(v4f32);
Value *BC = Builder.CreateBitCast(Ops[0], v4f32Ptr, "tmp");
LoadInst *LI = Builder.CreateLoad(BC, "tmp");
@@ -299,7 +299,7 @@
return true;
}
case IX86_BUILTIN_LOADUPD: {
- VectorType *v2f64 = VectorType::get(Type::DoubleTy, 2);
+ VectorType *v2f64 = VectorType::get(Type::getDoubleTy(Context), 2);
PointerType *v2f64Ptr = PointerType::getUnqual(v2f64);
Value *BC = Builder.CreateBitCast(Ops[0], v2f64Ptr, "tmp");
LoadInst *LI = Builder.CreateLoad(BC, "tmp");
@@ -308,7 +308,7 @@
return true;
}
case IX86_BUILTIN_LOADDQU: {
- VectorType *v16i8 = VectorType::get(Type::Int8Ty, 16);
+ VectorType *v16i8 = VectorType::get(Type::getInt8Ty(Context), 16);
PointerType *v16i8Ptr = PointerType::getUnqual(v16i8);
Value *BC = Builder.CreateBitCast(Ops[0], v16i8Ptr, "tmp");
LoadInst *LI = Builder.CreateLoad(BC, "tmp");
@@ -317,7 +317,7 @@
return true;
}
case IX86_BUILTIN_STOREUPS: {
- VectorType *v4f32 = VectorType::get(Type::FloatTy, 4);
+ VectorType *v4f32 = VectorType::get(Type::getFloatTy(Context), 4);
PointerType *v4f32Ptr = PointerType::getUnqual(v4f32);
Value *BC = Builder.CreateBitCast(Ops[0], v4f32Ptr, "tmp");
StoreInst *SI = Builder.CreateStore(Ops[1], BC);
@@ -326,7 +326,7 @@
return true;
}
case IX86_BUILTIN_STOREUPD: {
- VectorType *v2f64 = VectorType::get(Type::DoubleTy, 2);
+ VectorType *v2f64 = VectorType::get(Type::getDoubleTy(Context), 2);
PointerType *v2f64Ptr = PointerType::getUnqual(v2f64);
Value *BC = Builder.CreateBitCast(Ops[0], v2f64Ptr, "tmp");
StoreInst *SI = Builder.CreateStore(Ops[1], BC);
@@ -335,7 +335,7 @@
return true;
}
case IX86_BUILTIN_STOREDQU: {
- VectorType *v16i8 = VectorType::get(Type::Int8Ty, 16);
+ VectorType *v16i8 = VectorType::get(Type::getInt8Ty(Context), 16);
PointerType *v16i8Ptr = PointerType::getUnqual(v16i8);
Value *BC = Builder.CreateBitCast(Ops[0], v16i8Ptr, "tmp");
StoreInst *SI = Builder.CreateStore(Ops[1], BC);
@@ -344,20 +344,20 @@
return true;
}
case IX86_BUILTIN_LOADHPS: {
- PointerType *f64Ptr = PointerType::getUnqual(Type::DoubleTy);
+ PointerType *f64Ptr = PointerType::getUnqual(Type::getDoubleTy(Context));
Ops[1] = Builder.CreateBitCast(Ops[1], f64Ptr, "tmp");
Value *Load = Builder.CreateLoad(Ops[1], "tmp");
- Ops[1] = BuildVector(Load, UndefValue::get(Type::DoubleTy), NULL);
+ Ops[1] = BuildVector(Load, UndefValue::get(Type::getDoubleTy(Context)), NULL);
Ops[1] = Builder.CreateBitCast(Ops[1], ResultType, "tmp");
Result = BuildVectorShuffle(Ops[0], Ops[1], 0, 1, 4, 5);
Result = Builder.CreateBitCast(Result, ResultType, "tmp");
return true;
}
case IX86_BUILTIN_LOADLPS: {
- PointerType *f64Ptr = PointerType::getUnqual(Type::DoubleTy);
+ PointerType *f64Ptr = PointerType::getUnqual(Type::getDoubleTy(Context));
Ops[1] = Builder.CreateBitCast(Ops[1], f64Ptr, "tmp");
Value *Load = Builder.CreateLoad(Ops[1], "tmp");
- Ops[1] = BuildVector(Load, UndefValue::get(Type::DoubleTy), NULL);
+ Ops[1] = BuildVector(Load, UndefValue::get(Type::getDoubleTy(Context)), NULL);
Ops[1] = Builder.CreateBitCast(Ops[1], ResultType, "tmp");
Result = BuildVectorShuffle(Ops[0], Ops[1], 4, 5, 2, 3);
Result = Builder.CreateBitCast(Result, ResultType, "tmp");
@@ -365,7 +365,7 @@
}
case IX86_BUILTIN_LOADHPD: {
Value *Load = Builder.CreateLoad(Ops[1], "tmp");
- Ops[1] = BuildVector(Load, UndefValue::get(Type::DoubleTy), NULL);
+ Ops[1] = BuildVector(Load, UndefValue::get(Type::getDoubleTy(Context)), NULL);
Ops[1] = Builder.CreateBitCast(Ops[1], ResultType, "tmp");
Result = BuildVectorShuffle(Ops[0], Ops[1], 0, 2);
Result = Builder.CreateBitCast(Result, ResultType, "tmp");
@@ -373,27 +373,27 @@
}
case IX86_BUILTIN_LOADLPD: {
Value *Load = Builder.CreateLoad(Ops[1], "tmp");
- Ops[1] = BuildVector(Load, UndefValue::get(Type::DoubleTy), NULL);
+ Ops[1] = BuildVector(Load, UndefValue::get(Type::getDoubleTy(Context)), NULL);
Ops[1] = Builder.CreateBitCast(Ops[1], ResultType, "tmp");
Result = BuildVectorShuffle(Ops[0], Ops[1], 2, 1);
Result = Builder.CreateBitCast(Result, ResultType, "tmp");
return true;
}
case IX86_BUILTIN_STOREHPS: {
- VectorType *v2f64 = VectorType::get(Type::DoubleTy, 2);
- PointerType *f64Ptr = PointerType::getUnqual(Type::DoubleTy);
+ VectorType *v2f64 = VectorType::get(Type::getDoubleTy(Context), 2);
+ PointerType *f64Ptr = PointerType::getUnqual(Type::getDoubleTy(Context));
Ops[0] = Builder.CreateBitCast(Ops[0], f64Ptr, "tmp");
- Value *Idx = ConstantInt::get(Type::Int32Ty, 1);
+ Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), 1);
Ops[1] = Builder.CreateBitCast(Ops[1], v2f64, "tmp");
Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "tmp");
Result = Builder.CreateStore(Ops[1], Ops[0]);
return true;
}
case IX86_BUILTIN_STORELPS: {
- VectorType *v2f64 = VectorType::get(Type::DoubleTy, 2);
- PointerType *f64Ptr = PointerType::getUnqual(Type::DoubleTy);
+ VectorType *v2f64 = VectorType::get(Type::getDoubleTy(Context), 2);
+ PointerType *f64Ptr = PointerType::getUnqual(Type::getDoubleTy(Context));
Ops[0] = Builder.CreateBitCast(Ops[0], f64Ptr, "tmp");
- Value *Idx = ConstantInt::get(Type::Int32Ty, 0);
+ Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), 0);
Ops[1] = Builder.CreateBitCast(Ops[1], v2f64, "tmp");
Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "tmp");
Result = Builder.CreateStore(Ops[1], Ops[0]);
@@ -411,13 +411,13 @@
case IX86_BUILTIN_VEC_INIT_V4HI:
// Sometimes G++ promotes arguments to int.
for (unsigned i = 0; i != 4; ++i)
- Ops[i] = Builder.CreateIntCast(Ops[i], Type::Int16Ty, false, "tmp");
+ Ops[i] = Builder.CreateIntCast(Ops[i], Type::getInt16Ty(Context), false, "tmp");
Result = BuildVector(Ops[0], Ops[1], Ops[2], Ops[3], NULL);
return true;
case IX86_BUILTIN_VEC_INIT_V8QI:
// Sometimes G++ promotes arguments to int.
for (unsigned i = 0; i != 8; ++i)
- Ops[i] = Builder.CreateIntCast(Ops[i], Type::Int8Ty, false, "tmp");
+ Ops[i] = Builder.CreateIntCast(Ops[i], Type::getInt8Ty(Context), false, "tmp");
Result = BuildVector(Ops[0], Ops[1], Ops[2], Ops[3],
Ops[4], Ops[5], Ops[6], Ops[7], NULL);
return true;
@@ -433,13 +433,13 @@
return true;
case IX86_BUILTIN_VEC_SET_V16QI:
// Sometimes G++ promotes arguments to int.
- Ops[1] = Builder.CreateIntCast(Ops[1], Type::Int8Ty, false, "tmp");
+ Ops[1] = Builder.CreateIntCast(Ops[1], Type::getInt8Ty(Context), false, "tmp");
Result = Builder.CreateInsertElement(Ops[0], Ops[1], Ops[2], "tmp");
return true;
case IX86_BUILTIN_VEC_SET_V4HI:
case IX86_BUILTIN_VEC_SET_V8HI:
// GCC sometimes doesn't produce the right element type.
- Ops[1] = Builder.CreateIntCast(Ops[1], Type::Int16Ty, false, "tmp");
+ Ops[1] = Builder.CreateIntCast(Ops[1], Type::getInt16Ty(Context), false, "tmp");
Result = Builder.CreateInsertElement(Ops[0], Ops[1], Ops[2], "tmp");
return true;
case IX86_BUILTIN_VEC_SET_V4SI:
@@ -479,7 +479,7 @@
case IX86_BUILTIN_CMPNGEPS: PredCode = 6; flip = true; break;
case IX86_BUILTIN_CMPORDPS: PredCode = 7; break;
}
- Value *Pred = ConstantInt::get(Type::Int8Ty, PredCode);
+ Value *Pred = ConstantInt::get(Type::getInt8Ty(Context), PredCode);
Value *Arg0 = Ops[0];
Value *Arg1 = Ops[1];
if (flip) std::swap(Arg0, Arg1);
@@ -512,7 +512,7 @@
case IX86_BUILTIN_CMPNLESS: PredCode = 6; break;
case IX86_BUILTIN_CMPORDSS: PredCode = 7; break;
}
- Value *Pred = ConstantInt::get(Type::Int8Ty, PredCode);
+ Value *Pred = ConstantInt::get(Type::getInt8Ty(Context), PredCode);
Value *CallOps[3] = { Ops[0], Ops[1], Pred };
Result = Builder.CreateCall(cmpss, CallOps, CallOps+3, "tmp");
Result = Builder.CreateBitCast(Result, ResultType, "tmp");
@@ -549,7 +549,7 @@
case IX86_BUILTIN_CMPNGEPD: PredCode = 6; flip = true; break;
case IX86_BUILTIN_CMPORDPD: PredCode = 7; break;
}
- Value *Pred = ConstantInt::get(Type::Int8Ty, PredCode);
+ Value *Pred = ConstantInt::get(Type::getInt8Ty(Context), PredCode);
Value *Arg0 = Ops[0];
Value *Arg1 = Ops[1];
if (flip) std::swap(Arg0, Arg1);
@@ -581,7 +581,7 @@
case IX86_BUILTIN_CMPNLESD: PredCode = 6; break;
case IX86_BUILTIN_CMPORDSD: PredCode = 7; break;
}
- Value *Pred = ConstantInt::get(Type::Int8Ty, PredCode);
+ Value *Pred = ConstantInt::get(Type::getInt8Ty(Context), PredCode);
Value *CallOps[3] = { Ops[0], Ops[1], Pred };
Result = Builder.CreateCall(cmpsd, CallOps, CallOps+3, "tmp");
Result = Builder.CreateBitCast(Result, ResultType, "tmp");
@@ -590,19 +590,19 @@
case IX86_BUILTIN_LDMXCSR: {
Function *ldmxcsr =
Intrinsic::getDeclaration(TheModule, Intrinsic::x86_sse_ldmxcsr);
- Value *Ptr = CreateTemporary(Type::Int32Ty);
+ Value *Ptr = CreateTemporary(Type::getInt32Ty(Context));
Builder.CreateStore(Ops[0], Ptr);
Ptr = Builder.CreateBitCast(Ptr,
- PointerType::getUnqual(Type::Int8Ty), "tmp");
+ PointerType::getUnqual(Type::getInt8Ty(Context)), "tmp");
Result = Builder.CreateCall(ldmxcsr, Ptr);
return true;
}
case IX86_BUILTIN_STMXCSR: {
Function *stmxcsr =
Intrinsic::getDeclaration(TheModule, Intrinsic::x86_sse_stmxcsr);
- Value *Ptr = CreateTemporary(Type::Int32Ty);
+ Value *Ptr = CreateTemporary(Type::getInt32Ty(Context));
Value *BPtr = Builder.CreateBitCast(Ptr,
- PointerType::getUnqual(Type::Int8Ty), "tmp");
+ PointerType::getUnqual(Type::getInt8Ty(Context)), "tmp");
Builder.CreateCall(stmxcsr, BPtr);
Result = Builder.CreateLoad(Ptr, "tmp");
@@ -672,10 +672,10 @@
// 32 and 64-bit integers are fine, as are float and double. Long double
// (which can be picked as the type for a union of 16 bytes) is not fine,
// as loads and stores of it get only 10 bytes.
- if (EltTy == Type::Int32Ty ||
- EltTy == Type::Int64Ty ||
- EltTy == Type::FloatTy ||
- EltTy == Type::DoubleTy ||
+ if (EltTy == Type::getInt32Ty(Context) ||
+ EltTy == Type::getInt64Ty(Context) ||
+ EltTy == Type::getFloatTy(Context) ||
+ EltTy == Type::getDoubleTy(Context) ||
isa<PointerType>(EltTy)) {
Elts.push_back(EltTy);
continue;
@@ -704,10 +704,10 @@
// short in 32-bit.
const Type *EltTy = STy->getElementType(0);
return !((TARGET_64BIT && (EltTy->isInteger() ||
- EltTy == Type::FloatTy ||
- EltTy == Type::DoubleTy)) ||
- EltTy == Type::Int16Ty ||
- EltTy == Type::Int8Ty);
+ EltTy == Type::getFloatTy(Context) ||
+ EltTy == Type::getDoubleTy(Context))) ||
+ EltTy == Type::getInt16Ty(Context) ||
+ EltTy == Type::getInt8Ty(Context));
}
/* Target hook for llvm-abi.h. It returns true if an aggregate of the
@@ -748,7 +748,7 @@
++NumXMMs;
} else if (Ty->isInteger() || isa<PointerType>(Ty)) {
++NumGPRs;
- } else if (Ty==Type::VoidTy) {
+ } else if (Ty==Type::getVoidTy(Context)) {
// Padding bytes that are not passed anywhere
;
} else {
@@ -836,7 +836,7 @@
switch (Class[i]) {
case X86_64_INTEGER_CLASS:
case X86_64_INTEGERSI_CLASS:
- Elts.push_back(Type::Int64Ty);
+ Elts.push_back(Type::getInt64Ty(Context));
totallyEmpty = false;
Bytes -= 8;
break;
@@ -851,10 +851,10 @@
// 5. 2 x SSE, size is 16: 2 x Double.
if ((NumClasses-i) == 1) {
if (Bytes == 8) {
- Elts.push_back(Type::DoubleTy);
+ Elts.push_back(Type::getDoubleTy(Context));
Bytes -= 8;
} else if (Bytes == 4) {
- Elts.push_back (Type::FloatTy);
+ Elts.push_back (Type::getFloatTy(Context));
Bytes -= 4;
} else
assert(0 && "Not yet handled!");
@@ -868,46 +868,46 @@
if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
if (VTy->getNumElements() == 2) {
if (VTy->getElementType()->isInteger()) {
- Elts.push_back(VectorType::get(Type::Int64Ty, 2));
+ Elts.push_back(VectorType::get(Type::getInt64Ty(Context), 2));
} else {
- Elts.push_back(VectorType::get(Type::DoubleTy, 2));
+ Elts.push_back(VectorType::get(Type::getDoubleTy(Context), 2));
}
Bytes -= 8;
} else {
assert(VTy->getNumElements() == 4);
if (VTy->getElementType()->isInteger()) {
- Elts.push_back(VectorType::get(Type::Int32Ty, 4));
+ Elts.push_back(VectorType::get(Type::getInt32Ty(Context), 4));
} else {
- Elts.push_back(VectorType::get(Type::FloatTy, 4));
+ Elts.push_back(VectorType::get(Type::getFloatTy(Context), 4));
}
Bytes -= 4;
}
} else if (llvm_x86_is_all_integer_types(Ty)) {
- Elts.push_back(VectorType::get(Type::Int32Ty, 4));
+ Elts.push_back(VectorType::get(Type::getInt32Ty(Context), 4));
Bytes -= 4;
} else {
- Elts.push_back(VectorType::get(Type::FloatTy, 4));
+ Elts.push_back(VectorType::get(Type::getFloatTy(Context), 4));
Bytes -= 4;
}
} else if (Class[i+1] == X86_64_SSESF_CLASS) {
assert(Bytes == 12 && "Not yet handled!");
- Elts.push_back(Type::DoubleTy);
- Elts.push_back(Type::FloatTy);
+ Elts.push_back(Type::getDoubleTy(Context));
+ Elts.push_back(Type::getFloatTy(Context));
Bytes -= 12;
} else if (Class[i+1] == X86_64_SSE_CLASS) {
- Elts.push_back(Type::DoubleTy);
- Elts.push_back(Type::DoubleTy);
+ Elts.push_back(Type::getDoubleTy(Context));
+ Elts.push_back(Type::getDoubleTy(Context));
Bytes -= 16;
} else if (Class[i+1] == X86_64_SSEDF_CLASS && Bytes == 16) {
- Elts.push_back(VectorType::get(Type::FloatTy, 2));
- Elts.push_back(Type::DoubleTy);
+ Elts.push_back(VectorType::get(Type::getFloatTy(Context), 2));
+ Elts.push_back(Type::getDoubleTy(Context));
} else if (Class[i+1] == X86_64_INTEGER_CLASS) {
- Elts.push_back(VectorType::get(Type::FloatTy, 2));
- Elts.push_back(Type::Int64Ty);
+ Elts.push_back(VectorType::get(Type::getFloatTy(Context), 2));
+ Elts.push_back(Type::getInt64Ty(Context));
} else if (Class[i+1] == X86_64_NO_CLASS) {
// padding bytes, don't pass
- Elts.push_back(Type::DoubleTy);
- Elts.push_back(Type::VoidTy);
+ Elts.push_back(Type::getDoubleTy(Context));
+ Elts.push_back(Type::getVoidTy(Context));
Bytes -= 16;
} else
assert(0 && "Not yet handled!");
@@ -917,12 +917,12 @@
break;
case X86_64_SSESF_CLASS:
totallyEmpty = false;
- Elts.push_back(Type::FloatTy);
+ Elts.push_back(Type::getFloatTy(Context));
Bytes -= 4;
break;
case X86_64_SSEDF_CLASS:
totallyEmpty = false;
- Elts.push_back(Type::DoubleTy);
+ Elts.push_back(Type::getDoubleTy(Context));
Bytes -= 8;
break;
case X86_64_X87_CLASS:
@@ -932,7 +932,7 @@
case X86_64_NO_CLASS:
// Padding bytes that are not passed (unless the entire object consists
// of padding)
- Elts.push_back(Type::VoidTy);
+ Elts.push_back(Type::getVoidTy(Context));
Bytes -= 8;
break;
default: assert(0 && "Unexpected register class!");
@@ -1092,13 +1092,13 @@
const Type *Ty = ConvertType(type);
unsigned Size = getTargetData().getTypeAllocSize(Ty);
if (Size == 0)
- return Type::VoidTy;
+ return Type::getVoidTy(Context);
else if (Size == 1)
- return Type::Int8Ty;
+ return Type::getInt8Ty(Context);
else if (Size == 2)
- return Type::Int16Ty;
+ return Type::getInt16Ty(Context);
else if (Size <= 4)
- return Type::Int32Ty;
+ return Type::getInt32Ty(Context);
// Check if Ty should be returned using multiple value return instruction.
if (llvm_suitable_multiple_ret_value_type(Ty, type))
@@ -1111,7 +1111,7 @@
enum machine_mode Mode = ix86_getNaturalModeForType(type);
int NumClasses = ix86_ClassifyArgument(Mode, type, Class, 0);
if (NumClasses == 0)
- return Type::Int64Ty;
+ return Type::getInt64Ty(Context);
if (NumClasses == 1) {
if (Class[0] == X86_64_INTEGERSI_CLASS ||
@@ -1121,13 +1121,13 @@
(Mode == BLKmode) ? int_size_in_bytes(type) :
(int) GET_MODE_SIZE(Mode);
if (Bytes>4)
- return Type::Int64Ty;
+ return Type::getInt64Ty(Context);
else if (Bytes>2)
- return Type::Int32Ty;
+ return Type::getInt32Ty(Context);
else if (Bytes>1)
- return Type::Int16Ty;
+ return Type::getInt16Ty(Context);
else
- return Type::Int8Ty;
+ return Type::getInt8Ty(Context);
}
assert(0 && "Unexpected type!");
}
@@ -1136,22 +1136,22 @@
if (Class[0] == X86_64_INTEGER_CLASS ||
Class[0] == X86_64_NO_CLASS ||
Class[0] == X86_64_INTEGERSI_CLASS)
- return Type::Int64Ty;
+ return Type::getInt64Ty(Context);
else if (Class[0] == X86_64_SSE_CLASS || Class[0] == X86_64_SSEDF_CLASS)
- return Type::DoubleTy;
+ return Type::getDoubleTy(Context);
else if (Class[0] == X86_64_SSESF_CLASS)
- return Type::FloatTy;
+ return Type::getFloatTy(Context);
assert(0 && "Unexpected type!");
}
if (Class[0] == X86_64_NO_CLASS) {
*Offset = 8;
if (Class[1] == X86_64_INTEGERSI_CLASS ||
Class[1] == X86_64_INTEGER_CLASS)
- return Type::Int64Ty;
+ return Type::getInt64Ty(Context);
else if (Class[1] == X86_64_SSE_CLASS || Class[1] == X86_64_SSEDF_CLASS)
- return Type::DoubleTy;
+ return Type::getDoubleTy(Context);
else if (Class[1] == X86_64_SSESF_CLASS)
- return Type::FloatTy;
+ return Type::getFloatTy(Context);
assert(0 && "Unexpected type!");
}
assert(0 && "Unexpected type!");
@@ -1159,11 +1159,11 @@
assert(0 && "Unexpected type!");
} else {
if (Size <= 8)
- return Type::Int64Ty;
+ return Type::getInt64Ty(Context);
else if (Size <= 16)
- return IntegerType::get(128);
+ return IntegerType::get(Context, 128);
else if (Size <= 32)
- return IntegerType::get(256);
+ return IntegerType::get(Context, 256);
}
return NULL;
}
@@ -1201,7 +1201,7 @@
switch (Class[i]) {
case X86_64_INTEGER_CLASS:
case X86_64_INTEGERSI_CLASS:
- Elts.push_back(Type::Int64Ty);
+ Elts.push_back(Type::getInt64Ty(Context));
Bytes -= 8;
break;
case X86_64_SSE_CLASS:
@@ -1215,10 +1215,10 @@
// 6. 1 x SSE, 1 x NO: Second is padding, pass as double.
if ((NumClasses-i) == 1) {
if (Bytes == 8) {
- Elts.push_back(Type::DoubleTy);
+ Elts.push_back(Type::getDoubleTy(Context));
Bytes -= 8;
} else if (Bytes == 4) {
- Elts.push_back(Type::FloatTy);
+ Elts.push_back(Type::getFloatTy(Context));
Bytes -= 4;
} else
assert(0 && "Not yet handled!");
@@ -1232,42 +1232,42 @@
if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
if (VTy->getNumElements() == 2) {
if (VTy->getElementType()->isInteger())
- Elts.push_back(VectorType::get(Type::Int64Ty, 2));
+ Elts.push_back(VectorType::get(Type::getInt64Ty(Context), 2));
else
- Elts.push_back(VectorType::get(Type::DoubleTy, 2));
+ Elts.push_back(VectorType::get(Type::getDoubleTy(Context), 2));
Bytes -= 8;
} else {
assert(VTy->getNumElements() == 4);
if (VTy->getElementType()->isInteger())
- Elts.push_back(VectorType::get(Type::Int32Ty, 4));
+ Elts.push_back(VectorType::get(Type::getInt32Ty(Context), 4));
else
- Elts.push_back(VectorType::get(Type::FloatTy, 4));
+ Elts.push_back(VectorType::get(Type::getFloatTy(Context), 4));
Bytes -= 4;
}
} else if (llvm_x86_is_all_integer_types(Ty)) {
- Elts.push_back(VectorType::get(Type::Int32Ty, 4));
+ Elts.push_back(VectorType::get(Type::getInt32Ty(Context), 4));
Bytes -= 4;
} else {
- Elts.push_back(VectorType::get(Type::FloatTy, 4));
+ Elts.push_back(VectorType::get(Type::getFloatTy(Context), 4));
Bytes -= 4;
}
} else if (Class[i+1] == X86_64_SSESF_CLASS) {
assert(Bytes == 12 && "Not yet handled!");
- Elts.push_back(Type::DoubleTy);
- Elts.push_back(Type::FloatTy);
+ Elts.push_back(Type::getDoubleTy(Context));
+ Elts.push_back(Type::getFloatTy(Context));
Bytes -= 12;
} else if (Class[i+1] == X86_64_SSE_CLASS) {
- Elts.push_back(Type::DoubleTy);
- Elts.push_back(Type::DoubleTy);
+ Elts.push_back(Type::getDoubleTy(Context));
+ Elts.push_back(Type::getDoubleTy(Context));
Bytes -= 16;
} else if (Class[i+1] == X86_64_SSEDF_CLASS && Bytes == 16) {
- Elts.push_back(VectorType::get(Type::FloatTy, 2));
- Elts.push_back(Type::DoubleTy);
+ Elts.push_back(VectorType::get(Type::getFloatTy(Context), 2));
+ Elts.push_back(Type::getDoubleTy(Context));
} else if (Class[i+1] == X86_64_INTEGER_CLASS) {
- Elts.push_back(VectorType::get(Type::FloatTy, 2));
- Elts.push_back(Type::Int64Ty);
+ Elts.push_back(VectorType::get(Type::getFloatTy(Context), 2));
+ Elts.push_back(Type::getInt64Ty(Context));
} else if (Class[i+1] == X86_64_NO_CLASS) {
- Elts.push_back(Type::DoubleTy);
+ Elts.push_back(Type::getDoubleTy(Context));
Bytes -= 16;
} else {
assert(0 && "Not yet handled!");
@@ -1277,21 +1277,21 @@
assert(0 && "Not yet handled!");
break;
case X86_64_SSESF_CLASS:
- Elts.push_back(Type::FloatTy);
+ Elts.push_back(Type::getFloatTy(Context));
Bytes -= 4;
break;
case X86_64_SSEDF_CLASS:
- Elts.push_back(Type::DoubleTy);
+ Elts.push_back(Type::getDoubleTy(Context));
Bytes -= 8;
break;
case X86_64_X87_CLASS:
case X86_64_X87UP_CLASS:
case X86_64_COMPLEX_X87_CLASS:
- Elts.push_back(Type::X86_FP80Ty);
+ Elts.push_back(Type::getX86_FP80Ty(Context));
break;
case X86_64_NO_CLASS:
// padding bytes.
- Elts.push_back(Type::Int64Ty);
+ Elts.push_back(Type::getInt64Ty(Context));
break;
default: assert(0 && "Unexpected register class!");
}
@@ -1311,8 +1311,8 @@
// Special handling for _Complex.
if (llvm_x86_should_not_return_complex_in_memory(type)) {
- ElementTypes.push_back(Type::X86_FP80Ty);
- ElementTypes.push_back(Type::X86_FP80Ty);
+ ElementTypes.push_back(Type::getX86_FP80Ty(Context));
+ ElementTypes.push_back(Type::getX86_FP80Ty(Context));
return StructType::get(Context, ElementTypes, STy->isPacked());
}
@@ -1338,12 +1338,12 @@
Value *EVI = Builder.CreateExtractValue(Src, SrcFieldNo, "mrv_gr");
const StructType *STy = cast<StructType>(Src->getType());
llvm::Value *Idxs[3];
- Idxs[0] = ConstantInt::get(llvm::Type::Int32Ty, 0);
- Idxs[1] = ConstantInt::get(llvm::Type::Int32Ty, DestFieldNo);
- Idxs[2] = ConstantInt::get(llvm::Type::Int32Ty, DestElemNo);
+ Idxs[0] = ConstantInt::get(llvm::Type::getInt32Ty(Context), 0);
+ Idxs[1] = ConstantInt::get(llvm::Type::getInt32Ty(Context), DestFieldNo);
+ Idxs[2] = ConstantInt::get(llvm::Type::getInt32Ty(Context), DestElemNo);
Value *GEP = Builder.CreateGEP(Dest, Idxs, Idxs+3, "mrv_gep");
if (isa<VectorType>(STy->getElementType(SrcFieldNo))) {
- Value *ElemIndex = ConstantInt::get(Type::Int32Ty, SrcElemNo);
+ Value *ElemIndex = ConstantInt::get(Type::getInt32Ty(Context), SrcElemNo);
Value *EVIElem = Builder.CreateExtractElement(EVI, ElemIndex, "mrv");
Builder.CreateStore(EVIElem, GEP, isVolatile);
} else {
@@ -1376,12 +1376,12 @@
Value *EVI = Builder.CreateExtractValue(Src, 0, "mrv_gr");
- Value *E0Index = ConstantInt::get(Type::Int32Ty, 0);
+ Value *E0Index = ConstantInt::get(Type::getInt32Ty(Context), 0);
Value *EVI0 = Builder.CreateExtractElement(EVI, E0Index, "mrv.v");
Value *GEP0 = Builder.CreateStructGEP(Dest, 0, "mrv_gep");
Builder.CreateStore(EVI0, GEP0, isVolatile);
- Value *E1Index = ConstantInt::get(Type::Int32Ty, 1);
+ Value *E1Index = ConstantInt::get(Type::getInt32Ty(Context), 1);
Value *EVI1 = Builder.CreateExtractElement(EVI, E1Index, "mrv.v");
Value *GEP1 = Builder.CreateStructGEP(Dest, 1, "mrv_gep");
Builder.CreateStore(EVI1, GEP1, isVolatile);
@@ -1408,16 +1408,16 @@
// Special treatement for _Complex.
if (const StructType *ComplexType = dyn_cast<StructType>(DestElemType)) {
llvm::Value *Idxs[3];
- Idxs[0] = ConstantInt::get(llvm::Type::Int32Ty, 0);
- Idxs[1] = ConstantInt::get(llvm::Type::Int32Ty, DNO);
+ Idxs[0] = ConstantInt::get(llvm::Type::getInt32Ty(Context), 0);
+ Idxs[1] = ConstantInt::get(llvm::Type::getInt32Ty(Context), DNO);
- Idxs[2] = ConstantInt::get(llvm::Type::Int32Ty, 0);
+ Idxs[2] = ConstantInt::get(llvm::Type::getInt32Ty(Context), 0);
Value *GEP = Builder.CreateGEP(Dest, Idxs, Idxs+3, "mrv_gep");
Value *EVI = Builder.CreateExtractValue(Src, 0, "mrv_gr");
Builder.CreateStore(EVI, GEP, isVolatile);
++SNO;
- Idxs[2] = ConstantInt::get(llvm::Type::Int32Ty, 1);
+ Idxs[2] = ConstantInt::get(llvm::Type::getInt32Ty(Context), 1);
GEP = Builder.CreateGEP(Dest, Idxs, Idxs+3, "mrv_gep");
EVI = Builder.CreateExtractValue(Src, 1, "mrv_gr");
Builder.CreateStore(EVI, GEP, isVolatile);
Modified: llvm-gcc-4.2/trunk/gcc/config/rs6000/llvm-rs6000.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/config/rs6000/llvm-rs6000.cpp?rev=78947&r1=78946&r2=78947&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/config/rs6000/llvm-rs6000.cpp (original)
+++ llvm-gcc-4.2/trunk/gcc/config/rs6000/llvm-rs6000.cpp Thu Aug 13 16:58:17 2009
@@ -56,7 +56,7 @@
const Type *ResultType,
std::vector<Value*> &Ops,
LLVMBuilder &Builder, Value *&Result) {
- const Type *VoidPtrTy = PointerType::getUnqual(Type::Int8Ty);
+ const Type *VoidPtrTy = PointerType::getUnqual(Type::getInt8Ty(Context));
Function *IntFn = Intrinsic::getDeclaration(TheModule, IID);
@@ -71,7 +71,7 @@
Ops[OpNum] = Ptr;
Value *V = Builder.CreateCall(IntFn, &Ops[0], &Ops[0]+Ops.size());
- if (V->getType() != Type::VoidTy) {
+ if (V->getType() != Type::getVoidTy(Context)) {
V->setName("tmp");
Result = V;
}
@@ -80,10 +80,10 @@
// GetAltivecTypeNumFromType - Given an LLVM type, return a unique ID for
// the type in the range 0-3.
static int GetAltivecTypeNumFromType(const Type *Ty) {
- return ((Ty == Type::Int32Ty) ? 0 : \
- ((Ty == Type::Int16Ty) ? 1 : \
- ((Ty == Type::Int8Ty) ? 2 : \
- ((Ty == Type::FloatTy) ? 3 : -1))));
+ return ((Ty == Type::getInt32Ty(Context)) ? 0 : \
+ ((Ty == Type::getInt16Ty(Context)) ? 1 : \
+ ((Ty == Type::getInt8Ty(Context)) ? 2 : \
+ ((Ty == Type::getFloatTy(Context)) ? 3 : -1))));
}
// TargetIntrinsicLower - To handle builtins, we want to expand the
@@ -177,30 +177,30 @@
return true;
case ALTIVEC_BUILTIN_VSPLTISB:
if (Constant *Elt = dyn_cast<ConstantInt>(Ops[0])) {
- Elt = ConstantExpr::getIntegerCast(Elt, Type::Int8Ty, true);
+ Elt = ConstantExpr::getIntegerCast(Elt, Type::getInt8Ty(Context), true);
Result = BuildVector(Elt, Elt, Elt, Elt, Elt, Elt, Elt, Elt,
Elt, Elt, Elt, Elt, Elt, Elt, Elt, Elt, NULL);
} else {
error("%Helement must be an immediate", &EXPR_LOCATION(exp));
- Result = UndefValue::get(VectorType::get(Type::Int8Ty, 16));
+ Result = UndefValue::get(VectorType::get(Type::getInt8Ty(Context), 16));
}
return true;
case ALTIVEC_BUILTIN_VSPLTISH:
if (Constant *Elt = dyn_cast<ConstantInt>(Ops[0])) {
- Elt = ConstantExpr::getIntegerCast(Elt, Type::Int16Ty, true);
+ Elt = ConstantExpr::getIntegerCast(Elt, Type::getInt16Ty(Context), true);
Result = BuildVector(Elt, Elt, Elt, Elt, Elt, Elt, Elt, Elt, NULL);
} else {
error("%Helement must be an immediate", &EXPR_LOCATION(exp));
- Result = UndefValue::get(VectorType::get(Type::Int16Ty, 8));
+ Result = UndefValue::get(VectorType::get(Type::getInt16Ty(Context), 8));
}
return true;
case ALTIVEC_BUILTIN_VSPLTISW:
if (Constant *Elt = dyn_cast<ConstantInt>(Ops[0])) {
- Elt = ConstantExpr::getIntegerCast(Elt, Type::Int32Ty, true);
+ Elt = ConstantExpr::getIntegerCast(Elt, Type::getInt32Ty(Context), true);
Result = BuildVector(Elt, Elt, Elt, Elt, NULL);
} else {
error("%Hmask must be an immediate", &EXPR_LOCATION(exp));
- Result = UndefValue::get(VectorType::get(Type::Int32Ty, 4));
+ Result = UndefValue::get(VectorType::get(Type::getInt32Ty(Context), 4));
}
return true;
case ALTIVEC_BUILTIN_VSPLTB:
@@ -248,7 +248,7 @@
if (ConstantInt *Elt = dyn_cast<ConstantInt>(Ops[2])) {
/* Map all of these to a shuffle. */
unsigned Amt = Elt->getZExtValue() & 15;
- VectorType *v16i8 = VectorType::get(Type::Int8Ty, 16);
+ VectorType *v16i8 = VectorType::get(Type::getInt8Ty(Context), 16);
Ops[0] = Builder.CreateBitCast(Ops[0], v16i8, "tmp");
Ops[1] = Builder.CreateBitCast(Ops[1], v16i8, "tmp");
Result = BuildVectorShuffle(Ops[0], Ops[1],
@@ -298,9 +298,9 @@
return true;
case ALTIVEC_BUILTIN_ABS_V4SF: {
// and out sign bits
- VectorType *v4i32 = VectorType::get(Type::Int32Ty, 4);
+ VectorType *v4i32 = VectorType::get(Type::getInt32Ty(Context), 4);
Ops[0] = Builder.CreateBitCast(Ops[0], v4i32, "tmp");
- Constant *C = ConstantInt::get(Type::Int32Ty, 0x7FFFFFFF);
+ Constant *C = ConstantInt::get(Type::getInt32Ty(Context), 0x7FFFFFFF);
C = ConstantVector::get(std::vector<Constant*>(4, C));
Result = Builder.CreateAnd(Ops[0], C, "tmp");
Result = Builder.CreateBitCast(Result, ResultType, "tmp");
@@ -356,7 +356,7 @@
case ALTIVEC_BUILTIN_VPERM_8HI:
case ALTIVEC_BUILTIN_VPERM_16QI: {
// Operation is identical on all types; we have a single intrinsic.
- const Type *VecTy = VectorType::get(Type::Int32Ty, 4);
+ const Type *VecTy = VectorType::get(Type::getInt32Ty(Context), 4);
Value *Op0 = CastToType(Instruction::BitCast, Ops[0], VecTy);
Value *Op1 = CastToType(Instruction::BitCast, Ops[1], VecTy);
Value *ActualOps[] = { Op0, Op1, Ops[2]};
@@ -371,7 +371,7 @@
case ALTIVEC_BUILTIN_VSEL_8HI:
case ALTIVEC_BUILTIN_VSEL_16QI: {
// Operation is identical on all types; we have a single intrinsic.
- const Type *VecTy = VectorType::get(Type::Int32Ty, 4);
+ const Type *VecTy = VectorType::get(Type::getInt32Ty(Context), 4);
Value *Op0 = CastToType(Instruction::BitCast, Ops[0], VecTy);
Value *Op1 = CastToType(Instruction::BitCast, Ops[1], VecTy);
Value *Op2 = CastToType(Instruction::BitCast, Ops[2], VecTy);
@@ -454,17 +454,17 @@
abort();
case 32:
// Pass _Complex long double in eight registers.
- Elts.push_back(Type::Int32Ty);
- Elts.push_back(Type::Int32Ty);
- Elts.push_back(Type::Int32Ty);
- Elts.push_back(Type::Int32Ty);
+ Elts.push_back(Type::getInt32Ty(Context));
+ Elts.push_back(Type::getInt32Ty(Context));
+ Elts.push_back(Type::getInt32Ty(Context));
+ Elts.push_back(Type::getInt32Ty(Context));
// FALLTRHOUGH
case 16:
// Pass _Complex long long/double in four registers.
- Elts.push_back(Type::Int32Ty);
- Elts.push_back(Type::Int32Ty);
- Elts.push_back(Type::Int32Ty);
- Elts.push_back(Type::Int32Ty);
+ Elts.push_back(Type::getInt32Ty(Context));
+ Elts.push_back(Type::getInt32Ty(Context));
+ Elts.push_back(Type::getInt32Ty(Context));
+ Elts.push_back(Type::getInt32Ty(Context));
break;
case 8:
// Pass _Complex int/long/float in two registers.
@@ -472,12 +472,12 @@
// which will be decomposed into two i32 elements. The first element will
// have the split attribute set, which is used to trigger 8-byte alignment
// in the backend.
- Elts.push_back(Type::Int64Ty);
+ Elts.push_back(Type::getInt64Ty(Context));
break;
case 4:
case 2:
// Pass _Complex short/char in one register.
- Elts.push_back(Type::Int32Ty);
+ Elts.push_back(Type::getInt32Ty(Context));
break;
}
Modified: llvm-gcc-4.2/trunk/gcc/llvm-abi.h
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/llvm-abi.h?rev=78947&r1=78946&r2=78947&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/llvm-abi.h (original)
+++ llvm-gcc-4.2/trunk/gcc/llvm-abi.h Thu Aug 13 16:58:17 2009
@@ -212,19 +212,19 @@
unsigned Size = getTargetData().getTypeAllocSize(Ty);
*Offset = 0;
if (Size == 0)
- return Type::VoidTy;
+ return Type::getVoidTy(getGlobalContext());
else if (Size == 1)
- return Type::Int8Ty;
+ return Type::getInt8Ty(getGlobalContext());
else if (Size == 2)
- return Type::Int16Ty;
+ return Type::getInt16Ty(getGlobalContext());
else if (Size <= 4)
- return Type::Int32Ty;
+ return Type::getInt32Ty(getGlobalContext());
else if (Size <= 8)
- return Type::Int64Ty;
+ return Type::getInt64Ty(getGlobalContext());
else if (Size <= 16)
- return IntegerType::get(128);
+ return IntegerType::get(getGlobalContext(), 128);
else if (Size <= 32)
- return IntegerType::get(256);
+ return IntegerType::get(getGlobalContext(), 256);
return NULL;
}
@@ -286,7 +286,7 @@
#endif
// LLVM_BYVAL_ALIGNMENT - Returns the alignment of the type in bytes, if known,
-// in the context of its use as a function parameter.
+// in the getGlobalContext() of its use as a function parameter.
// Note that the alignment in the TYPE node is usually the alignment appropriate
// when the type is used within a struct, which may or may not be appropriate
// here.
@@ -389,7 +389,7 @@
C.HandleScalarShadowResult(PointerType::getUnqual(Ty), false);
else
C.HandleScalarResult(Ty);
- } else if (Ty->isSingleValueType() || Ty == Type::VoidTy) {
+ } else if (Ty->isSingleValueType() || Ty == Type::getVoidTy(getGlobalContext())) {
// Return scalar values normally.
C.HandleScalarResult(Ty);
} else if (doNotUseShadowReturn(type, fn)) {
@@ -435,7 +435,7 @@
// Figure out if this field is zero bits wide, e.g. {} or [0 x int]. Do
// not include variable sized fields here.
std::vector<const Type*> Elts;
- if (Ty == Type::VoidTy) {
+ if (Ty == Type::getVoidTy(getGlobalContext())) {
// Handle void explicitly as an opaque type.
const Type *OpTy = OpaqueType::get();
C.HandleScalarArgument(OpTy, type);
@@ -590,7 +590,8 @@
// don't bitcast aggregate value to Int64 if its alignment is different
// from Int64 alignment. ARM backend needs this.
unsigned Align = TYPE_ALIGN(type)/8;
- unsigned Int64Align = getTargetData().getABITypeAlignment(Type::Int64Ty);
+ unsigned Int64Align =
+ getTargetData().getABITypeAlignment(Type::getInt64Ty(getGlobalContext()));
bool UseInt64 = DontCheckAlignment ? true : (Align >= Int64Align);
// FIXME: In cases where we can, we should use the original struct.
@@ -605,21 +606,22 @@
const Type *ArrayElementType = NULL;
if (ArraySize) {
Size = Size % ElementSize;
- ArrayElementType = (UseInt64)?Type::Int64Ty:Type::Int32Ty;
+ ArrayElementType = (UseInt64) ?
+ Type::getInt64Ty(getGlobalContext()) : Type::getInt32Ty(getGlobalContext());
ATy = ArrayType::get(ArrayElementType, ArraySize);
Elts.push_back(ATy);
}
if (Size >= 4) {
- Elts.push_back(Type::Int32Ty);
+ Elts.push_back(Type::getInt32Ty(getGlobalContext()));
Size -= 4;
}
if (Size >= 2) {
- Elts.push_back(Type::Int16Ty);
+ Elts.push_back(Type::getInt16Ty(getGlobalContext()));
Size -= 2;
}
if (Size >= 1) {
- Elts.push_back(Type::Int8Ty);
+ Elts.push_back(Type::getInt8Ty(getGlobalContext()));
Size -= 1;
}
assert(Size == 0 && "Didn't cover value?");
@@ -655,10 +657,10 @@
// that occupies storage but has no useful information, and is not passed
// anywhere". Happens on x86-64.
std::vector<const Type*> Elts(OrigElts);
- const Type* wordType = getTargetData().getPointerSize() == 4 ? Type::Int32Ty :
- Type::Int64Ty;
+ const Type* wordType = getTargetData().getPointerSize() == 4 ?
+ Type::getInt32Ty(getGlobalContext()) : Type::getInt64Ty(getGlobalContext());
for (unsigned i=0, e=Elts.size(); i!=e; ++i)
- if (OrigElts[i]==Type::VoidTy)
+ if (OrigElts[i]==Type::getVoidTy(getGlobalContext()))
Elts[i] = wordType;
const StructType *STy = StructType::get(getGlobalContext(), Elts, false);
@@ -680,7 +682,7 @@
}
}
for (unsigned i = 0, e = Elts.size(); i != e; ++i) {
- if (OrigElts[i] != Type::VoidTy) {
+ if (OrigElts[i] != Type::getVoidTy(getGlobalContext())) {
C.EnterField(i, STy);
unsigned RealSize = 0;
if (LastEltSizeDiff && i == (e - 1))
@@ -740,7 +742,7 @@
C.HandleScalarShadowResult(PointerType::getUnqual(Ty), false);
else
C.HandleScalarResult(Ty);
- } else if (Ty->isSingleValueType() || Ty == Type::VoidTy) {
+ } else if (Ty->isSingleValueType() || Ty == Type::getVoidTy(getGlobalContext())) {
// Return scalar values normally.
C.HandleScalarResult(Ty);
} else if (doNotUseShadowReturn(type, fn)) {
@@ -1029,7 +1031,8 @@
// don't bitcast aggregate value to Int64 if its alignment is different
// from Int64 alignment. ARM backend needs this.
unsigned Align = TYPE_ALIGN(type)/8;
- unsigned Int64Align = getTargetData().getABITypeAlignment(Type::Int64Ty);
+ unsigned Int64Align =
+ getTargetData().getABITypeAlignment(Type::getInt64Ty(getGlobalContext()));
bool UseInt64 = DontCheckAlignment ? true : (Align >= Int64Align);
// FIXME: In cases where we can, we should use the original struct.
@@ -1044,21 +1047,22 @@
const Type *ArrayElementType = NULL;
if (ArraySize) {
Size = Size % ElementSize;
- ArrayElementType = (UseInt64)?Type::Int64Ty:Type::Int32Ty;
+ ArrayElementType = (UseInt64) ?
+ Type::getInt64Ty(getGlobalContext()) : Type::getInt32Ty(getGlobalContext());
ATy = ArrayType::get(ArrayElementType, ArraySize);
Elts.push_back(ATy);
}
if (Size >= 4) {
- Elts.push_back(Type::Int32Ty);
+ Elts.push_back(Type::getInt32Ty(getGlobalContext()));
Size -= 4;
}
if (Size >= 2) {
- Elts.push_back(Type::Int16Ty);
+ Elts.push_back(Type::getInt16Ty(getGlobalContext()));
Size -= 2;
}
if (Size >= 1) {
- Elts.push_back(Type::Int8Ty);
+ Elts.push_back(Type::getInt8Ty(getGlobalContext()));
Size -= 1;
}
assert(Size == 0 && "Didn't cover value?");
@@ -1096,10 +1100,10 @@
// that occupies storage but has no useful information, and is not passed
// anywhere". Happens on x86-64.
std::vector<const Type*> Elts(OrigElts);
- const Type* wordType = getTargetData().getPointerSize() == 4 ? Type::Int32Ty :
- Type::Int64Ty;
+ const Type* wordType = getTargetData().getPointerSize() == 4
+ ? Type::getInt32Ty(getGlobalContext()) : Type::getInt64Ty(getGlobalContext());
for (unsigned i=0, e=Elts.size(); i!=e; ++i)
- if (OrigElts[i]==Type::VoidTy)
+ if (OrigElts[i]==Type::getVoidTy(getGlobalContext()))
Elts[i] = wordType;
const StructType *STy = StructType::get(getGlobalContext(), Elts, false);
@@ -1121,7 +1125,7 @@
}
}
for (unsigned i = 0, e = Elts.size(); i != e; ++i) {
- if (OrigElts[i] != Type::VoidTy) {
+ if (OrigElts[i] != Type::getVoidTy(getGlobalContext())) {
C.EnterField(i, STy);
unsigned RealSize = 0;
if (LastEltSizeDiff && i == (e - 1))
Modified: llvm-gcc-4.2/trunk/gcc/llvm-backend.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/llvm-backend.cpp?rev=78947&r1=78946&r2=78947&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/llvm-backend.cpp (original)
+++ llvm-gcc-4.2/trunk/gcc/llvm-backend.cpp Thu Aug 13 16:58:17 2009
@@ -289,7 +289,7 @@
else
// Non constant values, e.g. arguments, are not at global scope.
// When PCH is read, only global scope values are used.
- ValuesForPCH.push_back(Constant::getNullValue(Type::Int32Ty));
+ ValuesForPCH.push_back(Constant::getNullValue(Type::getInt32Ty(Context)));
}
// Create string table.
@@ -813,11 +813,12 @@
LLVMContext &Context = getGlobalContext();
const Type *FPTy =
- FunctionType::get(Type::VoidTy, std::vector<const Type*>(), false);
+ FunctionType::get(Type::getVoidTy(Context),
+ std::vector<const Type*>(), false);
FPTy = PointerType::getUnqual(FPTy);
for (unsigned i = 0, e = Tors.size(); i != e; ++i) {
- StructInit[0] = ConstantInt::get(Type::Int32Ty, Tors[i].second);
+ StructInit[0] = ConstantInt::get(Type::getInt32Ty(Context), Tors[i].second);
// __attribute__(constructor) can be on a function with any type. Make sure
// the pointer is void()*.
@@ -853,7 +854,7 @@
if (!AttributeUsedGlobals.empty()) {
std::vector<Constant *> AUGs;
- const Type *SBP= PointerType::getUnqual(Type::Int8Ty);
+ const Type *SBP= PointerType::getUnqual(Type::getInt8Ty(Context));
for (SmallSetVector<Constant *,32>::iterator
AI = AttributeUsedGlobals.begin(),
AE = AttributeUsedGlobals.end(); AI != AE; ++AI) {
@@ -872,7 +873,7 @@
if (!AttributeCompilerUsedGlobals.empty()) {
std::vector<Constant *> ACUGs;
- const Type *SBP= PointerType::getUnqual(Type::Int8Ty);
+ const Type *SBP= PointerType::getUnqual(Type::getInt8Ty(Context));
for (SmallSetVector<Constant *,32>::iterator
AI = AttributeCompilerUsedGlobals.begin(),
AE = AttributeCompilerUsedGlobals.end(); AI != AE; ++AI) {
@@ -1126,7 +1127,7 @@
/// global if possible.
Constant* ConvertMetadataStringToGV(const char *str) {
- Constant *Init = ConstantArray::get(std::string(str));
+ Constant *Init = ConstantArray::get(getGlobalContext(), std::string(str));
// Use cached string if it exists.
static std::map<Constant*, GlobalVariable*> StringCSTCache;
@@ -1154,9 +1155,10 @@
return;
// Get file and line number
- Constant *lineNo = ConstantInt::get(Type::Int32Ty, DECL_SOURCE_LINE(decl));
+ Constant *lineNo = ConstantInt::get(Type::getInt32Ty(Context),
+ DECL_SOURCE_LINE(decl));
Constant *file = ConvertMetadataStringToGV(DECL_SOURCE_FILE(decl));
- const Type *SBP= PointerType::getUnqual(Type::Int8Ty);
+ const Type *SBP= PointerType::getUnqual(Type::getInt8Ty(Context));
file = TheFolder->CreateBitCast(file, SBP);
// There may be multiple annotate attributes. Pass return of lookup_attr
@@ -1625,7 +1627,7 @@
// If we have "extern void foo", make the global have type {} instead of
// type void.
- if (Ty == Type::VoidTy)
+ if (Ty == Type::getVoidTy(Context))
Ty = StructType::get(Context);
if (Name[0] == 0) { // Global has no name.
Modified: llvm-gcc-4.2/trunk/gcc/llvm-convert.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/llvm-convert.cpp?rev=78947&r1=78946&r2=78947&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/llvm-convert.cpp (original)
+++ llvm-gcc-4.2/trunk/gcc/llvm-convert.cpp Thu Aug 13 16:58:17 2009
@@ -203,7 +203,7 @@
if (DECL_NAME(LabelDecl))
Name = IDENTIFIER_POINTER(DECL_NAME(LabelDecl));
- BasicBlock *NewBB = BasicBlock::Create(Name);
+ BasicBlock *NewBB = BasicBlock::Create(Context, Name);
SET_DECL_LLVM(LabelDecl, NewBB);
return NewBB;
}
@@ -219,7 +219,7 @@
assert(!BYTES_BIG_ENDIAN && "Unsupported case - please report");
// Do byte wise store because actual argument type does not match LLVMTy.
assert(isa<IntegerType>(ArgVal->getType()) && "Expected an integer value!");
- const Type *StoreType = IntegerType::get(RealSize * 8);
+ const Type *StoreType = IntegerType::get(Context, RealSize * 8);
Loc = Builder.CreateBitCast(Loc, StoreType->getPointerTo());
if (ArgVal->getType()->getPrimitiveSizeInBits() >=
StoreType->getPrimitiveSizeInBits())
@@ -325,7 +325,7 @@
// If this is GCC being sloppy about pointer types, insert a bitcast.
// See PR1083 for an example.
ArgVal = Builder.CreateBitCast(ArgVal, LLVMTy);
- } else if (ArgVal->getType() == Type::DoubleTy) {
+ } else if (ArgVal->getType() == Type::getDoubleTy(Context)) {
// If this is a K&R float parameter, it got promoted to double. Insert
// the truncation to float now.
ArgVal = Builder.CreateFPTrunc(ArgVal, LLVMTy,
@@ -334,7 +334,8 @@
// If this is just a mismatch between integer types, this is due
// to K&R prototypes, where the forward proto defines the arg as int
// and the actual impls is a short or char.
- assert(ArgVal->getType() == Type::Int32Ty && LLVMTy->isInteger() &&
+ assert(ArgVal->getType() == Type::getInt32Ty(Context) &&
+ LLVMTy->isInteger() &&
"Lowerings don't match?");
ArgVal = Builder.CreateTrunc(ArgVal, LLVMTy,NameStack.back().c_str());
}
@@ -538,7 +539,7 @@
Fn->setDoesNotThrow();
// Create a new basic block for the function.
- Builder.SetInsertPoint(BasicBlock::Create("entry", Fn));
+ Builder.SetInsertPoint(BasicBlock::Create(Context, "entry", Fn));
if (TheDebugInfo)
TheDebugInfo->EmitFunctionStart(FnDecl, Fn, Builder.GetInsertBlock());
@@ -643,10 +644,10 @@
FunctionContext =
CreateTempLoc (ConvertType(sjlj_fc_type_node));
llvm::Value *Idxs[2];
- Idxs[0] = ConstantInt::get(llvm::Type::Int32Ty, 0);
+ Idxs[0] = ConstantInt::get(llvm::Type::getInt32Ty(Context), 0);
// Assign the unwind personality function address
- Idxs[1] = ConstantInt::get(llvm::Type::Int32Ty, 3);
+ Idxs[1] = ConstantInt::get(llvm::Type::getInt32Ty(Context), 3);
Value *FieldPtr = Builder.CreateGEP (FunctionContext.Ptr, Idxs, Idxs+2,
"personality_gep");
const Type *FieldTy =
@@ -656,7 +657,7 @@
Builder.CreateStore(Val, FieldPtr);
// Load the address for the language specific data area (LSDA)
- Idxs[1] = ConstantInt::get(llvm::Type::Int32Ty, 4);
+ Idxs[1] = ConstantInt::get(llvm::Type::getInt32Ty(Context), 4);
FieldPtr
= Builder.CreateGEP (FunctionContext.Ptr, Idxs, Idxs+2, "lsda_gep");
FieldTy = cast<PointerType>(FieldPtr->getType())->getElementType();
@@ -668,10 +669,10 @@
// builtin_setjmp() stuff goes here.
// 1. Save the frame pointer.
- Idxs[1] = ConstantInt::get(llvm::Type::Int32Ty, 5);
+ Idxs[1] = ConstantInt::get(llvm::Type::getInt32Ty(Context), 5);
FieldPtr
= Builder.CreateGEP (FunctionContext.Ptr, Idxs, Idxs+2, "jbuf_gep");
- Idxs[1] = ConstantInt::get(llvm::Type::Int32Ty, 0);
+ Idxs[1] = ConstantInt::get(llvm::Type::getInt32Ty(Context), 0);
Value *ElemPtr
= Builder.CreateGEP (FieldPtr, Idxs, Idxs+2, "jbuf_fp_gep");
FieldTy = cast<PointerType>(ElemPtr->getType())->getElementType();
@@ -680,21 +681,22 @@
// helper function.
Val = Builder.CreateCall
(Intrinsic::getDeclaration(TheModule, Intrinsic::frameaddress),
- ConstantInt::get(llvm::Type::Int32Ty, 0));
+ ConstantInt::get(llvm::Type::getInt32Ty(Context), 0));
Val = BitCastToType(Val, FieldTy);
Builder.CreateStore(Val, ElemPtr);
- FieldPtr = BitCastToType(FieldPtr, llvm::Type::Int8Ty->getPointerTo());
+ FieldPtr = BitCastToType(FieldPtr,
+ llvm::Type::getInt8Ty(Context)->getPointerTo());
Value *DispatchVal = Builder.CreateCall
(Intrinsic::getDeclaration(TheModule, Intrinsic::eh_sjlj_setjmp),
FieldPtr);
// check the return value of the setjmp. non-zero goes to dispatcher
- Value *Zero = ConstantInt::get(llvm::Type::Int32Ty, 0);
+ Value *Zero = ConstantInt::get(llvm::Type::getInt32Ty(Context), 0);
Value *Compare = Builder.CreateICmpEQ(DispatchVal, Zero);
// Branch on the compare.
- DispatchBB = BasicBlock::Create("dispatch");
- BasicBlock *PostEntryBB = BasicBlock::Create("post_entry");
+ DispatchBB = BasicBlock::Create(Context, "dispatch");
+ BasicBlock *PostEntryBB = BasicBlock::Create(Context, "post_entry");
Builder.CreateCondBr(Compare, PostEntryBB, DispatchBB);
EmitBlock(PostEntryBB);
@@ -705,7 +707,7 @@
}
// Create a new block for the return node, but don't insert it yet.
- ReturnBB = BasicBlock::Create("return");
+ ReturnBB = BasicBlock::Create(Context, "return");
}
Function *TreeToLLVM::FinishFunctionBody() {
@@ -724,7 +726,7 @@
SmallVector <Value *, 4> RetVals;
// If the function returns a value, get it into a register and return it now.
- if (Fn->getReturnType() != Type::VoidTy) {
+ if (Fn->getReturnType() != Type::getVoidTy(Context)) {
if (!isAggregateTreeType(TREE_TYPE(DECL_RESULT(FnDecl)))) {
// If the DECL_RESULT is a scalar type, just load out the return value
// and return it.
@@ -741,9 +743,9 @@
Value *R1 = BitCastToType(RetVal, PointerType::getUnqual(STy));
llvm::Value *Idxs[2];
- Idxs[0] = ConstantInt::get(llvm::Type::Int32Ty, 0);
+ Idxs[0] = ConstantInt::get(llvm::Type::getInt32Ty(Context), 0);
for (unsigned ri = 0; ri < STy->getNumElements(); ++ri) {
- Idxs[1] = ConstantInt::get(llvm::Type::Int32Ty, ri);
+ Idxs[1] = ConstantInt::get(llvm::Type::getInt32Ty(Context), ri);
Value *GEP = Builder.CreateGEP(R1, Idxs, Idxs+2, "mrv_gep");
Value *E = Builder.CreateLoad(GEP, "mrv");
RetVals.push_back(E);
@@ -756,9 +758,9 @@
// beginning of the aggregate (x86-64).
if (ReturnOffset) {
RetVal = BitCastToType(RetVal,
- PointerType::getUnqual(Type::Int8Ty));
+ PointerType::getUnqual(Type::getInt8Ty(Context)));
RetVal = Builder.CreateGEP(RetVal,
- ConstantInt::get(TD.getIntPtrType(), ReturnOffset));
+ ConstantInt::get(TD.getIntPtrType(Context), ReturnOffset));
}
RetVal = BitCastToType(RetVal,
PointerType::getUnqual(Fn->getReturnType()));
@@ -850,7 +852,7 @@
break;
if (e && e->dest != bb->next_bb) {
Builder.CreateBr(getLabelDeclBlock(tree_block_label (e->dest)));
- EmitBlock(BasicBlock::Create(""));
+ EmitBlock(BasicBlock::Create(Context, ""));
}
}
@@ -1167,7 +1169,7 @@
// Handle 'trunc (zext i1 X to T2) to i1' as X, because this occurs all over
// the place.
if (ZExtInst *CI = dyn_cast<ZExtInst>(V))
- if (Ty == Type::Int1Ty && CI->getOperand(0)->getType() == Type::Int1Ty)
+ if (Ty == Type::getInt1Ty(Context) && CI->getOperand(0)->getType() == Type::getInt1Ty(Context))
return CI->getOperand(0);
return Builder.CreateCast(Instruction::CastOps(opcode), V, Ty,
@@ -1251,8 +1253,8 @@
// it is dead. This allows us to insert allocas in order without having to
// scan for an insertion point. Use BitCast for int -> int
AllocaInsertionPoint = CastInst::Create(Instruction::BitCast,
- Constant::getNullValue(Type::Int32Ty),
- Type::Int32Ty, "alloca point");
+ Constant::getNullValue(Type::getInt32Ty(Context)),
+ Type::getInt32Ty(Context), "alloca point");
// Insert it as the first instruction in the entry block.
Fn->begin()->getInstList().insert(Fn->begin()->begin(),
AllocaInsertionPoint);
@@ -1462,19 +1464,19 @@
}
}
- EmitMemSet(DestLoc.Ptr, ConstantInt::get(Type::Int8Ty, 0),
+ EmitMemSet(DestLoc.Ptr, ConstantInt::get(Type::getInt8Ty(Context), 0),
Emit(TYPE_SIZE_UNIT(type), 0), DestLoc.getAlignment());
}
Value *TreeToLLVM::EmitMemCpy(Value *DestPtr, Value *SrcPtr, Value *Size,
unsigned Align) {
- const Type *SBP = PointerType::getUnqual(Type::Int8Ty);
- const Type *IntPtr = TD.getIntPtrType();
+ const Type *SBP = PointerType::getUnqual(Type::getInt8Ty(Context));
+ const Type *IntPtr = TD.getIntPtrType(Context);
Value *Ops[4] = {
BitCastToType(DestPtr, SBP),
BitCastToType(SrcPtr, SBP),
CastToSIntType(Size, IntPtr),
- ConstantInt::get(Type::Int32Ty, Align)
+ ConstantInt::get(Type::getInt32Ty(Context), Align)
};
Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::memcpy,
@@ -1484,13 +1486,13 @@
Value *TreeToLLVM::EmitMemMove(Value *DestPtr, Value *SrcPtr, Value *Size,
unsigned Align) {
- const Type *SBP = PointerType::getUnqual(Type::Int8Ty);
- const Type *IntPtr = TD.getIntPtrType();
+ const Type *SBP = PointerType::getUnqual(Type::getInt8Ty(Context));
+ const Type *IntPtr = TD.getIntPtrType(Context);
Value *Ops[4] = {
BitCastToType(DestPtr, SBP),
BitCastToType(SrcPtr, SBP),
CastToSIntType(Size, IntPtr),
- ConstantInt::get(Type::Int32Ty, Align)
+ ConstantInt::get(Type::getInt32Ty(Context), Align)
};
Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::memmove,
@@ -1500,13 +1502,13 @@
Value *TreeToLLVM::EmitMemSet(Value *DestPtr, Value *SrcVal, Value *Size,
unsigned Align) {
- const Type *SBP = PointerType::getUnqual(Type::Int8Ty);
- const Type *IntPtr = TD.getIntPtrType();
+ const Type *SBP = PointerType::getUnqual(Type::getInt8Ty(Context));
+ const Type *IntPtr = TD.getIntPtrType(Context);
Value *Ops[4] = {
BitCastToType(DestPtr, SBP),
- CastToSIntType(SrcVal, Type::Int8Ty),
+ CastToSIntType(SrcVal, Type::getInt8Ty(Context)),
CastToSIntType(Size, IntPtr),
- ConstantInt::get(Type::Int32Ty, Align)
+ ConstantInt::get(Type::getInt32Ty(Context), Align)
};
Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::memset,
@@ -1525,7 +1527,7 @@
// The idea is that it's a pointer to type "Value"
// which is opaque* but the routine expects i8** and i8*.
- const PointerType *Ty = PointerType::getUnqual(Type::Int8Ty);
+ const PointerType *Ty = PointerType::getUnqual(Type::getInt8Ty(Context));
V = Builder.CreateBitCast(V, PointerType::getUnqual(Ty));
Value *Ops[2] = {
@@ -1550,9 +1552,9 @@
// Get file and line number
Constant *lineNo =
- ConstantInt::get(Type::Int32Ty, DECL_SOURCE_LINE(decl));
+ ConstantInt::get(Type::getInt32Ty(Context), DECL_SOURCE_LINE(decl));
Constant *file = ConvertMetadataStringToGV(DECL_SOURCE_FILE(decl));
- const Type *SBP= PointerType::getUnqual(Type::Int8Ty);
+ const Type *SBP= PointerType::getUnqual(Type::getInt8Ty(Context));
file = Builder.getFolder().CreateBitCast(file, SBP);
// There may be multiple annotate attributes. Pass return of lookup_attr
@@ -1572,7 +1574,7 @@
// Assert its a string, and then get that string.
assert(TREE_CODE(val) == STRING_CST &&
"Annotate attribute arg should always be a string");
- const Type *SBP = PointerType::getUnqual(Type::Int8Ty);
+ const Type *SBP = PointerType::getUnqual(Type::getInt8Ty(Context));
Constant *strGV = TreeConstantToLLVM::EmitLV_STRING_CST(val);
Value *Ops[4] = {
BitCastToType(V, SBP),
@@ -1662,9 +1664,9 @@
} else {
// Compute the variable's size in bytes.
Size = Emit(DECL_SIZE_UNIT(decl), 0);
- Ty = Type::Int8Ty;
+ Ty = Type::getInt8Ty(Context);
}
- Size = CastToUIntType(Size, Type::Int32Ty);
+ Size = CastToUIntType(Size, Type::getInt32Ty(Context));
}
unsigned Alignment = 0; // Alignment in bytes.
@@ -1740,7 +1742,7 @@
// Assign the new ID, update AddressTakenBBNumbers to remember it.
uint64_t BlockNo = ++NumAddressTakenBlocks;
BlockNo &= ~0ULL >> (64-TD.getPointerSizeInBits());
- Val = ConstantInt::get(TD.getIntPtrType(), BlockNo);
+ Val = ConstantInt::get(TD.getIntPtrType(Context), BlockNo);
// Add it to the switch statement in the indirect goto block.
cast<SwitchInst>(getIndirectGotoBlock()->getTerminator())->addCase(Val, BB);
@@ -1753,10 +1755,10 @@
if (IndirectGotoBlock) return IndirectGotoBlock;
// Create a temporary for the value to be switched on.
- IndirectGotoValue = CreateTemporary(TD.getIntPtrType());
+ IndirectGotoValue = CreateTemporary(TD.getIntPtrType(Context));
// Create the block, emit a load, and emit the switch in the block.
- IndirectGotoBlock = BasicBlock::Create("indirectgoto");
+ IndirectGotoBlock = BasicBlock::Create(Context, "indirectgoto");
Value *Ld = new LoadInst(IndirectGotoValue, "gotodest", IndirectGotoBlock);
SwitchInst::Create(Ld, IndirectGotoBlock, 0, IndirectGotoBlock);
@@ -1787,7 +1789,7 @@
// Store the destination block to the GotoValue alloca.
Value *V = Emit(TREE_OPERAND(exp, 0), 0);
- V = CastToType(Instruction::PtrToInt, V, TD.getIntPtrType());
+ V = CastToType(Instruction::PtrToInt, V, TD.getIntPtrType(Context));
Builder.CreateStore(V, IndirectGotoValue);
// NOTE: This is HORRIBLY INCORRECT in the presence of exception handlers.
@@ -1796,7 +1798,7 @@
//
Builder.CreateBr(DestBB);
}
- EmitBlock(BasicBlock::Create(""));
+ EmitBlock(BasicBlock::Create(Context, ""));
return 0;
}
@@ -1817,7 +1819,7 @@
// Emit a branch to the exit label.
Builder.CreateBr(ReturnBB);
- EmitBlock(BasicBlock::Create(""));
+ EmitBlock(BasicBlock::Create(Context, ""));
return 0;
}
@@ -1874,11 +1876,11 @@
if (FPPred == ~0U) {
Cond = Emit(exp_cond, 0);
// Comparison against zero to convert the result to i1.
- if (Cond->getType() != Type::Int1Ty)
+ if (Cond->getType() != Type::getInt1Ty(Context))
Cond = Builder.CreateIsNotNull(Cond, "toBool");
} else {
- Cond = EmitCompare(exp_cond, UIPred, SIPred, FPPred, Type::Int1Ty);
- assert(Cond->getType() == Type::Int1Ty);
+ Cond = EmitCompare(exp_cond, UIPred, SIPred, FPPred, Type::getInt1Ty(Context));
+ assert(Cond->getType() == Type::getInt1Ty(Context));
}
tree Then = COND_EXPR_THEN(exp);
@@ -1889,7 +1891,7 @@
BasicBlock *ThenDest = getLabelDeclBlock(TREE_OPERAND(Then, 0));
BasicBlock *ElseDest = getLabelDeclBlock(TREE_OPERAND(Else, 0));
Builder.CreateCondBr(Cond, ThenDest, ElseDest);
- EmitBlock(BasicBlock::Create(""));
+ EmitBlock(BasicBlock::Create(Context, ""));
return 0;
}
@@ -1903,7 +1905,7 @@
// Emit the switch instruction.
SwitchInst *SI = Builder.CreateSwitch(SwitchExp, Builder.GetInsertBlock(),
TREE_VEC_LENGTH(Cases));
- EmitBlock(BasicBlock::Create(""));
+ EmitBlock(BasicBlock::Create(Context, ""));
// Default location starts out as fall-through
SI->setSuccessor(0, Builder.GetInsertBlock());
@@ -1953,7 +1955,7 @@
Value *Diff = Builder.CreateSub(SwitchExp, LowC);
Value *Cond = Builder.CreateICmpULE(Diff,
ConstantInt::get(Context, Range));
- BasicBlock *False_Block = BasicBlock::Create("case_false");
+ BasicBlock *False_Block = BasicBlock::Create(Context, "case_false");
Builder.CreateCondBr(Cond, Dest, False_Block);
EmitBlock(False_Block);
}
@@ -1965,7 +1967,7 @@
else {
Builder.CreateBr(DefaultDest);
// Emit a "fallthrough" block, which is almost certainly dead.
- EmitBlock(BasicBlock::Create(""));
+ EmitBlock(BasicBlock::Create(Context, ""));
}
}
@@ -1978,9 +1980,9 @@
// Check to see if the exception values have been constructed.
if (ExceptionValue) return;
- const Type *IntPtr = TD.getIntPtrType();
+ const Type *IntPtr = TD.getIntPtrType(Context);
- ExceptionValue = CreateTemporary(PointerType::getUnqual(Type::Int8Ty));
+ ExceptionValue = CreateTemporary(PointerType::getUnqual(Type::getInt8Ty(Context)));
ExceptionValue->setName("eh_exception");
ExceptionSelectorValue = CreateTemporary(IntPtr);
@@ -1989,11 +1991,11 @@
FuncEHException = Intrinsic::getDeclaration(TheModule,
Intrinsic::eh_exception);
FuncEHSelector = Intrinsic::getDeclaration(TheModule,
- (IntPtr == Type::Int32Ty ?
+ (IntPtr == Type::getInt32Ty(Context) ?
Intrinsic::eh_selector_i32 :
Intrinsic::eh_selector_i64));
FuncEHGetTypeID = Intrinsic::getDeclaration(TheModule,
- (IntPtr == Type::Int32Ty ?
+ (IntPtr == Type::getInt32Ty(Context) ?
Intrinsic::eh_typeid_for_i32 :
Intrinsic::eh_typeid_for_i64));
}
@@ -2005,7 +2007,7 @@
BasicBlock *&PostPad = PostPads[RegionNo];
if (!PostPad)
- PostPad = BasicBlock::Create("ppad");
+ PostPad = BasicBlock::Create(Context, "ppad");
return PostPad;
}
@@ -2022,8 +2024,8 @@
EmitBlock(DispatchBB);
// Get the call_site value
llvm::Value *Idxs[2];
- Idxs[0] = ConstantInt::get(llvm::Type::Int32Ty, 0);
- Idxs[1] = ConstantInt::get(llvm::Type::Int32Ty, 1);
+ Idxs[0] = ConstantInt::get(llvm::Type::getInt32Ty(Context), 0);
+ Idxs[1] = ConstantInt::get(llvm::Type::getInt32Ty(Context), 1);
Value *FieldPtr = Builder.CreateGEP (FunctionContext.Ptr, Idxs, Idxs+2,
"call_site_gep");
Value *CallSite = Builder.CreateLoad(FieldPtr, "call_site");
@@ -2034,10 +2036,10 @@
for (unsigned region = 1 ; region < LandingPads.size() ; ++region) {
if (LandingPads[region]) {
if (!FirstPad) FirstPad = region;
- Value *RegionNo = ConstantInt::get(llvm::Type::Int32Ty, region - 1);
+ Value *RegionNo = ConstantInt::get(llvm::Type::getInt32Ty(Context), region - 1);
Value *Compare = Builder.CreateICmpEQ(CallSite, RegionNo);
// Branch on the compare.
- BasicBlock *NextDispatch = BasicBlock::Create("dispatch");
+ BasicBlock *NextDispatch = BasicBlock::Create(Context, "dispatch");
Builder.CreateCondBr(Compare, LandingPads[region], NextDispatch);
EmitBlock(NextDispatch);
}
@@ -2064,11 +2066,11 @@
// Get the exception value from the function context
llvm::Value *Idxs[2];
- Idxs[0] = ConstantInt::get(llvm::Type::Int32Ty, 0);
- Idxs[1] = ConstantInt::get(llvm::Type::Int32Ty, 2);
+ Idxs[0] = ConstantInt::get(llvm::Type::getInt32Ty(Context), 0);
+ Idxs[1] = ConstantInt::get(llvm::Type::getInt32Ty(Context), 2);
Value *FCDataPtr
= Builder.CreateGEP (FunctionContext.Ptr, Idxs, Idxs+2, "fc_data_gep");
- Idxs[1] = ConstantInt::get(llvm::Type::Int32Ty, 0);
+ Idxs[1] = ConstantInt::get(llvm::Type::getInt32Ty(Context), 0);
Value *ElemPtr
= Builder.CreateGEP (FCDataPtr, Idxs, Idxs+2, "exception_gep");
Value *Ex = Builder.CreateLoad (ElemPtr);
@@ -2083,7 +2085,7 @@
assert(llvm_eh_personality_libfunc
&& "no exception handling personality function!");
Args.push_back(BitCastToType(DECL_LLVM(llvm_eh_personality_libfunc),
- PointerType::getUnqual(Type::Int8Ty)));
+ PointerType::getUnqual(Type::getInt8Ty(Context))));
// Add selections for each handler.
foreach_reachable_handler(i, false, AddHandler, &Handlers);
@@ -2099,7 +2101,7 @@
tree TypeList = get_eh_type_list(region);
unsigned Length = list_length(TypeList);
Args.reserve(Args.size() + Length + 1);
- Args.push_back(ConstantInt::get(Type::Int32Ty, Length + 1));
+ Args.push_back(ConstantInt::get(Type::getInt32Ty(Context), Length + 1));
// Add the type infos.
for (; TypeList; TypeList = TREE_CHAIN(TypeList)) {
@@ -2113,7 +2115,7 @@
if (!TypeList) {
// Catch-all - push a null pointer.
Args.push_back(Constant::getNullValue(
- PointerType::getUnqual(Type::Int8Ty)));
+ PointerType::getUnqual(Type::getInt8Ty(Context))));
} else {
// Add the type infos.
for (; TypeList; TypeList = TREE_CHAIN(TypeList)) {
@@ -2134,13 +2136,13 @@
// DWARF.
if (USING_SJLJ_EXCEPTIONS || !lang_eh_catch_all) {
// Use a "cleanup" - this should be good enough for most languages.
- Catch_All = ConstantInt::get(Type::Int32Ty, 0);
+ Catch_All = ConstantInt::get(Type::getInt32Ty(Context), 0);
} else {
tree catch_all_type = lang_eh_catch_all();
if (catch_all_type == NULL_TREE)
// Use a C++ style null catch-all object.
Catch_All =
- Constant::getNullValue(PointerType::getUnqual(Type::Int8Ty));
+ Constant::getNullValue(PointerType::getUnqual(Type::getInt8Ty(Context)));
else
// This language has a type that catches all others.
Catch_All = Emit(catch_all_type, 0);
@@ -2151,7 +2153,7 @@
Builder.CreateCall(FuncEHSelector, Args.begin(), Args.end(),
"eh_select");
// Fetch and store the exception selector.
- Idxs[1] = ConstantInt::get(llvm::Type::Int32Ty, 1);
+ Idxs[1] = ConstantInt::get(llvm::Type::getInt32Ty(Context), 1);
ElemPtr = Builder.CreateGEP (FCDataPtr, Idxs, Idxs+2, "handler_gep");
Value *Select = Builder.CreateLoad (ElemPtr);
Builder.CreateStore(Select, ExceptionSelectorValue);
@@ -2191,7 +2193,7 @@
assert(llvm_eh_personality_libfunc
&& "no exception handling personality function!");
Args.push_back(BitCastToType(DECL_LLVM(llvm_eh_personality_libfunc),
- PointerType::getUnqual(Type::Int8Ty)));
+ PointerType::getUnqual(Type::getInt8Ty(Context))));
// Add selections for each handler.
foreach_reachable_handler(i, false, AddHandler, &Handlers);
@@ -2209,7 +2211,7 @@
tree TypeList = get_eh_type_list(region);
unsigned Length = list_length(TypeList);
Args.reserve(Args.size() + Length + 1);
- Args.push_back(ConstantInt::get(Type::Int32Ty, Length + 1));
+ Args.push_back(ConstantInt::get(Type::getInt32Ty(Context), Length + 1));
// Add the type infos.
for (; TypeList; TypeList = TREE_CHAIN(TypeList)) {
@@ -2223,7 +2225,7 @@
if (!TypeList) {
// Catch-all - push a null pointer.
Args.push_back(
- Constant::getNullValue(PointerType::getUnqual(Type::Int8Ty))
+ Constant::getNullValue(PointerType::getUnqual(Type::getInt8Ty(Context)))
);
} else {
// Add the type infos.
@@ -2244,13 +2246,13 @@
Value *CatchAll;
if (!lang_eh_catch_all) {
// Use a "cleanup" - this should be good enough for most languages.
- CatchAll = ConstantInt::get(Type::Int32Ty, 0);
+ CatchAll = ConstantInt::get(Type::getInt32Ty(Context), 0);
} else {
tree catch_all_type = lang_eh_catch_all();
if (catch_all_type == NULL_TREE)
// Use a C++ style null catch-all object.
CatchAll = Constant::getNullValue(
- PointerType::getUnqual(Type::Int8Ty));
+ PointerType::getUnqual(Type::getInt8Ty(Context)));
else
// This language has a type that catches all others.
CatchAll = Emit(catch_all_type, 0);
@@ -2303,7 +2305,7 @@
Value *Compare = Builder.CreateICmpSLT(Select, Zero);
// Branch on the compare.
- BasicBlock *NoFilterBB = BasicBlock::Create("nofilter");
+ BasicBlock *NoFilterBB = BasicBlock::Create(Context, "nofilter");
Builder.CreateCondBr(Compare, Dest, NoFilterBB);
EmitBlock(NoFilterBB);
} else if (RegionKind > 0) {
@@ -2314,7 +2316,7 @@
for (; TypeList; TypeList = TREE_CHAIN (TypeList)) {
Value *TType = Emit(lookup_type_for_runtime(TREE_VALUE(TypeList)), 0);
TType = BitCastToType(TType,
- PointerType::getUnqual(Type::Int8Ty));
+ PointerType::getUnqual(Type::getInt8Ty(Context)));
// Call get eh type id.
Value *TypeID = Builder.CreateCall(FuncEHGetTypeID, TType, "eh_typeid");
@@ -2348,7 +2350,7 @@
}
// If there is no such catch, execute a RESX if the comparison fails.
- NoCatchBB = BasicBlock::Create("nocatch");
+ NoCatchBB = BasicBlock::Create(Context, "nocatch");
// Branch on the compare.
Builder.CreateCondBr(Cond, Dest, NoCatchBB);
EmitBlock(NoCatchBB);
@@ -2377,7 +2379,7 @@
"Must-not-throw region handled by runtime?");
// Unwinding continues in the caller.
if (!UnwindBB)
- UnwindBB = BasicBlock::Create("Unwind");
+ UnwindBB = BasicBlock::Create(Context, "Unwind");
Builder.CreateBr(UnwindBB);
}
@@ -2394,8 +2396,8 @@
// Mark the call_site as -1 since we're signalling to continue
// the unwind now.
llvm::Value *Idxs[2];
- Idxs[0] = ConstantInt::get(llvm::Type::Int32Ty, 0);
- Idxs[1] = ConstantInt::get(llvm::Type::Int32Ty, 1);
+ Idxs[0] = ConstantInt::get(llvm::Type::getInt32Ty(Context), 0);
+ Idxs[1] = ConstantInt::get(llvm::Type::getInt32Ty(Context), 1);
Value *FieldPtr = Builder.CreateGEP (FunctionContext.Ptr, Idxs, Idxs+2,
"call_site_gep");
const Type *FieldTy = cast<PointerType>(FieldPtr->getType())->getElementType();
@@ -2507,7 +2509,7 @@
Value *Ptr = Index ?
Builder.CreateGEP(LV.Ptr,
- ConstantInt::get(Type::Int32Ty, Index)) :
+ ConstantInt::get(Type::getInt32Ty(Context), Index)) :
LV.Ptr;
LoadInst *LI = Builder.CreateLoad(Ptr, isVolatile);
LI->setAlignment(Alignment);
@@ -2609,7 +2611,7 @@
//
if (fndecl && TREE_THIS_VOLATILE(fndecl)) {
Builder.CreateUnreachable();
- EmitBlock(BasicBlock::Create(""));
+ EmitBlock(BasicBlock::Create(Context, ""));
}
return Result;
}
@@ -2625,7 +2627,7 @@
// Not clear what this is supposed to do on big endian machines...
assert(!BYTES_BIG_ENDIAN && "Unsupported case - please report");
assert(isa<IntegerType>(LLVMTy) && "Expected an integer value!");
- const Type *LoadType = IntegerType::get(RealSize * 8);
+ const Type *LoadType = IntegerType::get(Context, RealSize * 8);
L = Builder.CreateBitCast(L, LoadType->getPointerTo());
Value *Val = Builder.CreateLoad(L);
if (LoadType->getPrimitiveSizeInBits() >= LLVMTy->getPrimitiveSizeInBits())
@@ -2899,7 +2901,7 @@
// Create a landing pad if one didn't exist already.
if (!ThisPad)
- ThisPad = BasicBlock::Create("lpad");
+ ThisPad = BasicBlock::Create(Context, "lpad");
LandingPad = ThisPad;
@@ -2907,12 +2909,12 @@
// Mark the call site so we'll dispatch to the right landing pad
// when we get an exception passed back.
llvm::Value *Idxs[2];
- Idxs[0] = ConstantInt::get(llvm::Type::Int32Ty, 0);
- Idxs[1] = ConstantInt::get(llvm::Type::Int32Ty, 1);
+ Idxs[0] = ConstantInt::get(llvm::Type::getInt32Ty(Context), 0);
+ Idxs[1] = ConstantInt::get(llvm::Type::getInt32Ty(Context), 1);
Value *FieldPtr = Builder.CreateGEP (FunctionContext.Ptr,
Idxs, Idxs+2, "call_site_gep");
// FieldPtr = BitCastToType(FieldPtr,
-// llvm::Type::Int32Ty->getPointerTo());
+// llvm::Type::getInt32Ty(Context)->getPointerTo());
const Type *FieldTy =
cast<PointerType>(FieldPtr->getType())->getElementType();
Constant *CallSiteIdx = ConstantInt::get(FieldTy, RegionNo, true);
@@ -3023,7 +3025,7 @@
cast<CallInst>(Call)->setCallingConv(CallingConvention);
cast<CallInst>(Call)->setAttributes(PAL);
} else {
- BasicBlock *NextBlock = BasicBlock::Create("invcont");
+ BasicBlock *NextBlock = BasicBlock::Create(Context, "invcont");
Call = Builder.CreateInvoke(Callee, NextBlock, LandingPad,
CallOperands.begin(), CallOperands.end());
cast<InvokeInst>(Call)->setCallingConv(CallingConvention);
@@ -3034,7 +3036,7 @@
if (Client.isShadowReturn())
return Client.EmitShadowResult(TREE_TYPE(exp), DestLoc);
- if (Call->getType() == Type::VoidTy)
+ if (Call->getType() == Type::getVoidTy(Context))
return 0;
if (Client.isAggrReturn()) {
@@ -3055,9 +3057,9 @@
Value *Ptr = DestLoc->Ptr;
if (Client.Offset) {
- Ptr = BitCastToType(Ptr, PointerType::getUnqual(Type::Int8Ty));
+ Ptr = BitCastToType(Ptr, PointerType::getUnqual(Type::getInt8Ty(Context)));
Ptr = Builder.CreateGEP(Ptr,
- ConstantInt::get(TD.getIntPtrType(), Client.Offset));
+ ConstantInt::get(TD.getIntPtrType(Context), Client.Offset));
}
Ptr = BitCastToType(Ptr, PointerType::getUnqual(Call->getType()));
StoreInst *St = Builder.CreateStore(Call, Ptr, DestLoc->Volatile);
@@ -3237,7 +3239,7 @@
ThisLastBitPlusOne = LV.BitStart+LV.BitSize;
Value *Ptr = Index ?
- Builder.CreateGEP(LV.Ptr, ConstantInt::get(Type::Int32Ty, Index)) :
+ Builder.CreateGEP(LV.Ptr, ConstantInt::get(Type::getInt32Ty(Context), Index)) :
LV.Ptr;
LoadInst *LI = Builder.CreateLoad(Ptr, isVolatile);
LI->setAlignment(Alignment);
@@ -3259,7 +3261,7 @@
// Next, if this doesn't touch the top bit, mask out any bits that shouldn't
// be set in the result.
uint64_t MaskVal = ((1ULL << BitsInVal)-1) << FirstBitInVal;
- Constant *Mask = ConstantInt::get(Type::Int64Ty, MaskVal);
+ Constant *Mask = ConstantInt::get(Type::getInt64Ty(Context), MaskVal);
Mask = Builder.getFolder().CreateTruncOrBitCast(Mask, ValTy);
if (FirstBitInVal+BitsInVal != ValSizeInBits)
@@ -3297,7 +3299,7 @@
assert(!isAggregateTreeType(TREE_TYPE(Op))
&& "Aggregate to scalar nop_expr!");
Value *OpVal = Emit(Op, DestLoc);
- if (Ty == Type::VoidTy) return 0;
+ if (Ty == Type::getVoidTy(Context)) return 0;
return CastToAnyType(OpVal, OpIsSigned, Ty, ExpIsSigned);
} else if (isAggregateTreeType(TREE_TYPE(Op))) {
// Aggregate to aggregate copy.
@@ -3405,7 +3407,7 @@
if (isa<PointerType>(DestTy)) // ptr->ptr is a simple bitcast.
return Builder.CreateBitCast(OpVal, DestTy);
// Otherwise, ptrtoint to intptr_t first.
- OpVal = Builder.CreatePtrToInt(OpVal, TD.getIntPtrType());
+ OpVal = Builder.CreatePtrToInt(OpVal, TD.getIntPtrType(Context));
}
// If the destination type is a pointer, use inttoptr.
@@ -3426,7 +3428,7 @@
// GCC allows NEGATE_EXPR on pointers as well. Cast to int, negate, cast
// back.
- V = CastToAnyType(V, false, TD.getIntPtrType(), false);
+ V = CastToAnyType(V, false, TD.getIntPtrType(Context), false);
V = Builder.CreateNeg(V);
return CastToType(Instruction::IntToPtr, V, ConvertType(TREE_TYPE(exp)));
}
@@ -3509,9 +3511,9 @@
unsigned NumElements = VTy->getNumElements();
const Type *EltTy = VTy->getElementType();
return VectorType::get(
- IntegerType::get(EltTy->getPrimitiveSizeInBits()), NumElements);
+ IntegerType::get(Context, EltTy->getPrimitiveSizeInBits()), NumElements);
}
- return IntegerType::get(Ty->getPrimitiveSizeInBits());
+ return IntegerType::get(Context, Ty->getPrimitiveSizeInBits());
}
Value *TreeToLLVM::EmitBIT_NOT_EXPR(tree exp) {
@@ -3533,7 +3535,7 @@
Value *TreeToLLVM::EmitTRUTH_NOT_EXPR(tree exp) {
Value *V = Emit(TREE_OPERAND(exp, 0), 0);
- if (V->getType() != Type::Int1Ty)
+ if (V->getType() != Type::getInt1Ty(Context))
V = Builder.CreateICmpNE(V,
Constant::getNullValue(V->getType()), "toBool");
V = Builder.CreateNot(V, (V->getNameStr()+"not").c_str());
@@ -3581,7 +3583,7 @@
Result = Builder.CreateICmp(pred, LHS, RHS);
}
}
- assert(Result->getType() == Type::Int1Ty && "Expected i1 result for compare");
+ assert(Result->getType() == Type::getInt1Ty(Context) && "Expected i1 result for compare");
if (DestTy == 0)
DestTy = ConvertType(TREE_TYPE(exp));
@@ -3673,7 +3675,7 @@
// If this is a subtract, we want to step backwards.
if (Opc == Instruction::Sub)
EltOffset = -EltOffset;
- Constant *C = ConstantInt::get(Type::Int64Ty, EltOffset);
+ Constant *C = ConstantInt::get(Type::getInt64Ty(Context), EltOffset);
Value *V = flag_wrapv ?
Builder.CreateGEP(LHS, C) :
Builder.CreateInBoundsGEP(LHS, C);
@@ -3685,7 +3687,7 @@
Value *RHS = Emit(TREE_OPERAND(exp, 1), 0);
- const Type *IntPtrTy = TD.getIntPtrType();
+ const Type *IntPtrTy = TD.getIntPtrType(Context);
bool LHSIsSigned = !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp, 0)));
bool RHSIsSigned = !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp, 1)));
LHS = CastToAnyType(LHS, LHSIsSigned, IntPtrTy, false);
@@ -4060,11 +4062,11 @@
"Must-not-throw region handled by runtime?");
// Unwinding continues in the caller.
if (!UnwindBB)
- UnwindBB = BasicBlock::Create("Unwind");
+ UnwindBB = BasicBlock::Create(Context, "Unwind");
Builder.CreateBr(UnwindBB);
}
- EmitBlock(BasicBlock::Create(""));
+ EmitBlock(BasicBlock::Create(Context, ""));
return 0;
}
@@ -4107,7 +4109,7 @@
// Turn this into a 'call void asm sideeffect "", "{reg}"(Ty %RHS)'.
std::vector<const Type*> ArgTys;
ArgTys.push_back(ConvertType(TREE_TYPE(decl)));
- FunctionType *FTy = FunctionType::get(Type::VoidTy, ArgTys, false);
+ FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context), ArgTys, false);
const char *Name = reg_names[decode_reg_name(extractRegisterName(decl))];
@@ -4692,7 +4694,7 @@
uint64_t TySize = TD.getTypeSizeInBits(LLVMTy);
if (TySize == 1 || TySize == 8 || TySize == 16 ||
TySize == 32 || TySize == 64) {
- LLVMTy = IntegerType::get(TySize);
+ LLVMTy = IntegerType::get(Context, TySize);
Op = Builder.CreateLoad(BitCastToType(LV.Ptr,
PointerType::getUnqual(LLVMTy)));
} else {
@@ -4819,7 +4821,7 @@
const Type *CallResultType;
switch (CallResultTypes.size()) {
- case 0: CallResultType = Type::VoidTy; break;
+ case 0: CallResultType = Type::getVoidTy(Context); break;
case 1: CallResultType = CallResultTypes[0]; break;
default:
std::vector<const Type*> TmpVec(CallResultTypes.begin(),
@@ -4895,7 +4897,7 @@
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
Result = Builder.CreateInsertElement(Result, Ops[i],
- ConstantInt::get(Type::Int32Ty, i));
+ ConstantInt::get(Type::getInt32Ty(Context), i));
return Result;
}
@@ -4935,10 +4937,10 @@
for (unsigned i = 0; i != NumElements; ++i) {
int idx = va_arg(VA, int);
if (idx == -1)
- Idxs.push_back(UndefValue::get(Type::Int32Ty));
+ Idxs.push_back(UndefValue::get(Type::getInt32Ty(Context)));
else {
assert((unsigned)idx < 2*NumElements && "Element index out of range!");
- Idxs.push_back(ConstantInt::get(Type::Int32Ty, idx));
+ Idxs.push_back(ConstantInt::get(Type::getInt32Ty(Context), idx));
}
}
va_end(VA);
@@ -4994,12 +4996,12 @@
void TreeToLLVM::EmitMemoryBarrier(bool ll, bool ls, bool sl, bool ss) {
Value* C[5];
- C[0] = ConstantInt::get(Type::Int1Ty, ll);
- C[1] = ConstantInt::get(Type::Int1Ty, ls);
- C[2] = ConstantInt::get(Type::Int1Ty, sl);
- C[3] = ConstantInt::get(Type::Int1Ty, ss);
+ C[0] = ConstantInt::get(Type::getInt1Ty(Context), ll);
+ C[1] = ConstantInt::get(Type::getInt1Ty(Context), ls);
+ C[2] = ConstantInt::get(Type::getInt1Ty(Context), sl);
+ C[3] = ConstantInt::get(Type::getInt1Ty(Context), ss);
// Be conservatively safe.
- C[4] = ConstantInt::get(Type::Int1Ty, true);
+ C[4] = ConstantInt::get(Type::getInt1Ty(Context), true);
Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
Intrinsic::memory_barrier),
@@ -5182,9 +5184,9 @@
// This treats everything as unknown, and is minimally defensible as
// correct, although completely useless.
if (tree_low_cst (ObjSizeTree, 0) < 2)
- Result = Constant::getAllOnesValue(TD.getIntPtrType());
+ Result = Constant::getAllOnesValue(TD.getIntPtrType(Context));
else
- Result = ConstantInt::get(TD.getIntPtrType(), 0);
+ Result = ConstantInt::get(TD.getIntPtrType(Context), 0);
return true;
}
// Unary bit counting intrinsics.
@@ -5343,7 +5345,7 @@
Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::trap));
// Emit an explicit unreachable instruction.
Builder.CreateUnreachable();
- EmitBlock(BasicBlock::Create(""));
+ EmitBlock(BasicBlock::Create(Context, ""));
return true;
// Convert annotation built-in to llvm.annotation intrinsic.
@@ -5351,9 +5353,9 @@
// Get file and line number
location_t locus = EXPR_LOCATION (exp);
- Constant *lineNo = ConstantInt::get(Type::Int32Ty, locus.line);
+ Constant *lineNo = ConstantInt::get(Type::getInt32Ty(Context), locus.line);
Constant *file = ConvertMetadataStringToGV(locus.file);
- const Type *SBP= PointerType::getUnqual(Type::Int8Ty);
+ const Type *SBP= PointerType::getUnqual(Type::getInt8Ty(Context));
file = Builder.getFolder().CreateBitCast(file, SBP);
// Get arguments.
@@ -5380,8 +5382,8 @@
case BUILT_IN_SYNCHRONIZE: {
// We assume like gcc appears to, that this only applies to cached memory.
Value* C[5];
- C[0] = C[1] = C[2] = C[3] = ConstantInt::get(Type::Int1Ty, 1);
- C[4] = ConstantInt::get(Type::Int1Ty, 0);
+ C[0] = C[1] = C[2] = C[3] = ConstantInt::get(Type::getInt1Ty(Context), 1);
+ C[4] = ConstantInt::get(Type::getInt1Ty(Context), 0);
Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
Intrinsic::memory_barrier),
@@ -5805,7 +5807,7 @@
// FIXME: HACK: Just ignore these.
{
const Type *Ty = ConvertType(TREE_TYPE(exp));
- if (Ty != Type::VoidTy)
+ if (Ty != Type::getVoidTy(Context))
Result = Constant::getNullValue(Ty);
return true;
}
@@ -5843,7 +5845,7 @@
Value *Val = Emit(TREE_VALUE(ArgList), 0);
Value *Pow = Emit(TREE_VALUE(TREE_CHAIN(ArgList)), 0);
const Type *Ty = Val->getType();
- Pow = CastToSIntType(Pow, Type::Int32Ty);
+ Pow = CastToSIntType(Pow, Type::getInt32Ty(Context));
SmallVector<Value *,2> Args;
Args.push_back(Val);
@@ -5986,7 +5988,7 @@
unsigned DstAlign = getPointerAlignment(Dst);
Value *DstV = Emit(Dst, 0);
- Value *Val = Constant::getNullValue(Type::Int32Ty);
+ Value *Val = Constant::getNullValue(Type::getInt32Ty(Context));
Value *Len = Emit(TREE_VALUE(TREE_CHAIN(arglist)), 0);
EmitMemSet(DstV, Val, Len, DstAlign);
return true;
@@ -6012,7 +6014,7 @@
ReadWrite = 0;
} else {
ReadWrite = Builder.getFolder().CreateIntCast(cast<Constant>(ReadWrite),
- Type::Int32Ty, false);
+ Type::getInt32Ty(Context), false);
}
if (TREE_CHAIN(TREE_CHAIN(arglist))) {
@@ -6025,18 +6027,18 @@
Locality = 0;
} else {
Locality = Builder.getFolder().CreateIntCast(cast<Constant>(Locality),
- Type::Int32Ty, false);
+ Type::getInt32Ty(Context), false);
}
}
}
// Default to highly local read.
if (ReadWrite == 0)
- ReadWrite = Constant::getNullValue(Type::Int32Ty);
+ ReadWrite = Constant::getNullValue(Type::getInt32Ty(Context));
if (Locality == 0)
- Locality = ConstantInt::get(Type::Int32Ty, 3);
+ Locality = ConstantInt::get(Type::getInt32Ty(Context), 3);
- Ptr = BitCastToType(Ptr, PointerType::getUnqual(Type::Int8Ty));
+ Ptr = BitCastToType(Ptr, PointerType::getUnqual(Type::getInt8Ty(Context)));
Value *Ops[3] = { Ptr, ReadWrite, Locality };
Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::prefetch),
@@ -6079,7 +6081,7 @@
// Unfortunately, these constants are defined as RTL expressions and
// should be handled separately.
- Result = BitCastToType(Ptr, PointerType::getUnqual(Type::Int8Ty));
+ Result = BitCastToType(Ptr, PointerType::getUnqual(Type::getInt8Ty(Context)));
return true;
}
@@ -6095,7 +6097,7 @@
// needed for: MIPS, Sparc. Unfortunately, these constants are defined
// as RTL expressions and should be handled separately.
- Result = BitCastToType(Ptr, PointerType::getUnqual(Type::Int8Ty));
+ Result = BitCastToType(Ptr, PointerType::getUnqual(Type::getInt8Ty(Context)));
return true;
}
@@ -6146,7 +6148,7 @@
// FIXME: is i32 always enough here?
Result = Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
Intrinsic::eh_dwarf_cfa),
- ConstantInt::get(Type::Int32Ty, cfa_offset));
+ ConstantInt::get(Type::getInt32Ty(Context), cfa_offset));
return true;
}
@@ -6195,15 +6197,15 @@
if (!validate_arglist(arglist, INTEGER_TYPE, POINTER_TYPE, VOID_TYPE))
return false;
- const Type *IntPtr = TD.getIntPtrType();
+ const Type *IntPtr = TD.getIntPtrType(Context);
Value *Offset = Emit(TREE_VALUE(arglist), 0);
Value *Handler = Emit(TREE_VALUE(TREE_CHAIN(arglist)), 0);
- Intrinsic::ID IID = (IntPtr == Type::Int32Ty ?
+ Intrinsic::ID IID = (IntPtr == Type::getInt32Ty(Context) ?
Intrinsic::eh_return_i32 : Intrinsic::eh_return_i64);
Offset = Builder.CreateIntCast(Offset, IntPtr, true);
- Handler = BitCastToType(Handler, PointerType::getUnqual(Type::Int8Ty));
+ Handler = BitCastToType(Handler, PointerType::getUnqual(Type::getInt8Ty(Context)));
SmallVector<Value *, 2> Args;
Args.push_back(Offset);
@@ -6211,7 +6213,7 @@
Builder.CreateCall(Intrinsic::getDeclaration(TheModule, IID),
Args.begin(), Args.end());
Result = Builder.CreateUnreachable();
- EmitBlock(BasicBlock::Create(""));
+ EmitBlock(BasicBlock::Create(Context, ""));
return true;
}
@@ -6232,7 +6234,7 @@
}
Value *Addr = BitCastToType(Emit(TREE_VALUE(arglist), 0),
- PointerType::getUnqual(Type::Int8Ty));
+ PointerType::getUnqual(Type::getInt8Ty(Context)));
Constant *Size, *Idx;
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) {
@@ -6253,21 +6255,21 @@
if (rnum < 0)
continue;
- Size = ConstantInt::get(Type::Int8Ty, size);
- Idx = ConstantInt::get(Type::Int32Ty, rnum);
+ Size = ConstantInt::get(Type::getInt8Ty(Context), size);
+ Idx = ConstantInt::get(Type::getInt32Ty(Context), rnum);
Builder.CreateStore(Size, Builder.CreateGEP(Addr, Idx), false);
}
}
if (!wrote_return_column) {
- Size = ConstantInt::get(Type::Int8Ty, GET_MODE_SIZE (Pmode));
- Idx = ConstantInt::get(Type::Int32Ty, DWARF_FRAME_RETURN_COLUMN);
+ Size = ConstantInt::get(Type::getInt8Ty(Context), GET_MODE_SIZE (Pmode));
+ Idx = ConstantInt::get(Type::getInt32Ty(Context), DWARF_FRAME_RETURN_COLUMN);
Builder.CreateStore(Size, Builder.CreateGEP(Addr, Idx), false);
}
#ifdef DWARF_ALT_FRAME_RETURN_COLUMN
- Size = ConstantInt::get(Type::Int8Ty, GET_MODE_SIZE (Pmode));
- Idx = ConstantInt::get(Type::Int32Ty, DWARF_ALT_FRAME_RETURN_COLUMN);
+ Size = ConstantInt::get(Type::getInt8Ty(Context), GET_MODE_SIZE (Pmode));
+ Idx = ConstantInt::get(Type::getInt32Ty(Context), DWARF_ALT_FRAME_RETURN_COLUMN);
Builder.CreateStore(Size, Builder.CreateGEP(Addr, Idx), false);
#endif
@@ -6294,7 +6296,7 @@
return false;
Value *Ptr = Emit(TREE_VALUE(arglist), 0);
- Ptr = BitCastToType(Ptr, PointerType::getUnqual(Type::Int8Ty));
+ Ptr = BitCastToType(Ptr, PointerType::getUnqual(Type::getInt8Ty(Context)));
Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
Intrinsic::stackrestore), Ptr);
@@ -6307,8 +6309,8 @@
if (!validate_arglist(arglist, INTEGER_TYPE, VOID_TYPE))
return false;
Value *Amt = Emit(TREE_VALUE(arglist), 0);
- Amt = CastToSIntType(Amt, Type::Int32Ty);
- Result = Builder.CreateAlloca(Type::Int8Ty, Amt);
+ Amt = CastToSIntType(Amt, Type::getInt32Ty(Context));
+ Result = Builder.CreateAlloca(Type::getInt8Ty(Context), Amt);
return true;
}
@@ -6348,14 +6350,14 @@
Intrinsic::vastart);
const Type *FTy =
cast<PointerType>(llvm_va_start_fn->getType())->getElementType();
- ArgVal = BitCastToType(ArgVal, PointerType::getUnqual(Type::Int8Ty));
+ ArgVal = BitCastToType(ArgVal, PointerType::getUnqual(Type::getInt8Ty(Context)));
Builder.CreateCall(llvm_va_start_fn, ArgVal);
return true;
}
bool TreeToLLVM::EmitBuiltinVAEnd(tree exp) {
Value *Arg = Emit(TREE_VALUE(TREE_OPERAND(exp, 1)), 0);
- Arg = BitCastToType(Arg, PointerType::getUnqual(Type::Int8Ty));
+ Arg = BitCastToType(Arg, PointerType::getUnqual(Type::getInt8Ty(Context)));
Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::vaend),
Arg);
return true;
@@ -6380,7 +6382,7 @@
Arg2 = Emit(Arg2T, 0);
}
- static const Type *VPTy = PointerType::getUnqual(Type::Int8Ty);
+ static const Type *VPTy = PointerType::getUnqual(Type::getInt8Ty(Context));
// FIXME: This ignores alignment and volatility of the arguments.
SmallVector<Value *, 2> Args;
@@ -6398,7 +6400,7 @@
VOID_TYPE))
return false;
- static const Type *VPTy = PointerType::getUnqual(Type::Int8Ty);
+ static const Type *VPTy = PointerType::getUnqual(Type::getInt8Ty(Context));
Value *Tramp = Emit(TREE_VALUE(arglist), 0);
Tramp = BitCastToType(Tramp, VPTy);
@@ -6593,7 +6595,7 @@
tree AnnotateAttr = lookup_attribute("annotate", DECL_ATTRIBUTES(FieldDecl));
const Type *OrigPtrTy = FieldPtr->getType();
- const Type *SBP = PointerType::getUnqual(Type::Int8Ty);
+ const Type *SBP = PointerType::getUnqual(Type::getInt8Ty(Context));
Function *Fn = Intrinsic::getDeclaration(TheModule,
Intrinsic::ptr_annotation,
@@ -6601,7 +6603,7 @@
// Get file and line number. FIXME: Should this be for the decl or the
// use. Is there a location info for the use?
- Constant *LineNo = ConstantInt::get(Type::Int32Ty,
+ Constant *LineNo = ConstantInt::get(Type::getInt32Ty(Context),
DECL_SOURCE_LINE(FieldDecl));
Constant *File = ConvertMetadataStringToGV(DECL_SOURCE_FILE(FieldDecl));
@@ -6693,7 +6695,7 @@
Value *IndexVal = Emit(Index, 0);
- const Type *IntPtrTy = getTargetData().getIntPtrType();
+ const Type *IntPtrTy = getTargetData().getIntPtrType(Context);
if (TYPE_UNSIGNED(IndexType)) // if the index is unsigned
// ZExt it to retain its value in the larger type
IndexVal = CastToUIntType(IndexVal, IntPtrTy);
@@ -6723,7 +6725,7 @@
// float foo(int w, float A[][w], int g) { return A[g][0]; }
ArrayAddr = BitCastToType(ArrayAddr,
- PointerType::getUnqual(Type::Int8Ty));
+ PointerType::getUnqual(Type::getInt8Ty(Context)));
if (VOID_TYPE_P(TREE_TYPE(ArrayTreeType)))
return LValue(Builder.CreateGEP(ArrayAddr, IndexVal), 1);
@@ -6766,7 +6768,7 @@
// than this. e.g. check out when compiling unwind-dw2-fde-darwin.c.
Ptr.Ptr = BitCastToType(Ptr.Ptr, PointerType::getUnqual(ValTy));
Ptr.Ptr = Builder.CreateGEP(Ptr.Ptr,
- ConstantInt::get(Type::Int32Ty, UnitOffset));
+ ConstantInt::get(Type::getInt32Ty(Context), UnitOffset));
BitStart -= UnitOffset*ValueSizeInBits;
}
@@ -6909,7 +6911,7 @@
// sized like an i24 there may be trouble: incrementing a T* will move
// the position by 32 bits not 24, leaving the upper 8 of those 32 bits
// inaccessible. Avoid this by rounding up the size appropriately.
- FieldTy = IntegerType::get(TD.getTypeAllocSizeInBits(FieldTy));
+ FieldTy = IntegerType::get(Context, TD.getTypeAllocSizeInBits(FieldTy));
assert(FieldTy->getPrimitiveSizeInBits() ==
TD.getTypeAllocSizeInBits(FieldTy) && "Field type not sequential!");
@@ -6943,7 +6945,7 @@
unsigned ByteOffset = NumAlignmentUnits*ByteAlignment;
LVAlign = MinAlign(LVAlign, ByteOffset);
- Constant *Offset = ConstantInt::get(TD.getIntPtrType(), ByteOffset);
+ Constant *Offset = ConstantInt::get(TD.getIntPtrType(Context), ByteOffset);
FieldPtr = CastToType(Instruction::PtrToInt, FieldPtr,
Offset->getType());
FieldPtr = Builder.CreateAdd(FieldPtr, Offset);
@@ -7047,7 +7049,7 @@
const Type *Ty = ConvertType(TREE_TYPE(exp));
// If we have "extern void foo", make the global have type {} instead of
// type void.
- if (Ty == Type::VoidTy) Ty = StructType::get(Context);
+ if (Ty == Type::getVoidTy(Context)) Ty = StructType::get(Context);
const PointerType *PTy = PointerType::getUnqual(Ty);
unsigned Alignment = Ty->isSized() ? TD.getABITypeAlignment(Ty) : 1;
if (DECL_ALIGN(exp)) {
@@ -7253,7 +7255,7 @@
// Build the value as a ulong constant, then constant fold it to the right
// type. This handles overflow and other things appropriately.
uint64_t IntValue = getINTEGER_CSTVal(exp);
- ConstantInt *C = ConstantInt::get(Type::Int64Ty, IntValue);
+ ConstantInt *C = ConstantInt::get(Type::getInt64Ty(Context), IntValue);
// The destination type can be a pointer, integer or floating point
// so we need a generalized cast here
Instruction::CastOps opcode = CastInst::getCastOpcode(C, false, Ty,
@@ -7269,7 +7271,7 @@
int UArr[2];
double V;
};
- if (Ty==Type::FloatTy || Ty==Type::DoubleTy) {
+ if (Ty==Type::getFloatTy(Context) || Ty==Type::getDoubleTy(Context)) {
REAL_VALUE_TO_TARGET_DOUBLE(TREE_REAL_CST(exp), RealArr);
// Here's how this works:
@@ -7295,9 +7297,9 @@
std::swap(UArr[0], UArr[1]);
return
- ConstantFP::get(Context, Ty==Type::FloatTy ?
+ ConstantFP::get(Context, Ty==Type::getFloatTy(Context) ?
APFloat((float)V) : APFloat(V));
- } else if (Ty==Type::X86_FP80Ty) {
+ } else if (Ty==Type::getX86_FP80Ty(Context)) {
long RealArr[4];
uint64_t UArr[2];
REAL_VALUE_TO_TARGET_LONG_DOUBLE(TREE_REAL_CST(exp), RealArr);
@@ -7305,7 +7307,7 @@
((uint64_t)((uint32_t)RealArr[1]) << 32);
UArr[1] = (uint16_t)RealArr[2];
return ConstantFP::get(Context, APFloat(APInt(80, 2, UArr)));
- } else if (Ty==Type::PPC_FP128Ty) {
+ } else if (Ty==Type::getPPC_FP128Ty(Context)) {
long RealArr[4];
uint64_t UArr[2];
REAL_VALUE_TO_TARGET_LONG_DOUBLE(TREE_REAL_CST(exp), RealArr);
@@ -7346,11 +7348,11 @@
unsigned Len = (unsigned)TREE_STRING_LENGTH(exp);
std::vector<Constant*> Elts;
- if (ElTy == Type::Int8Ty) {
+ if (ElTy == Type::getInt8Ty(Context)) {
const unsigned char *InStr =(const unsigned char *)TREE_STRING_POINTER(exp);
for (unsigned i = 0; i != Len; ++i)
- Elts.push_back(ConstantInt::get(Type::Int8Ty, InStr[i]));
- } else if (ElTy == Type::Int16Ty) {
+ Elts.push_back(ConstantInt::get(Type::getInt8Ty(Context), InStr[i]));
+ } else if (ElTy == Type::getInt16Ty(Context)) {
assert((Len&1) == 0 &&
"Length in bytes should be a multiple of element size");
const uint16_t *InStr =
@@ -7360,11 +7362,11 @@
// but we're going to treat them as ordinary shorts from here, with
// host endianness. Adjust if necessary.
if (llvm::sys::isBigEndianHost() == BYTES_BIG_ENDIAN)
- Elts.push_back(ConstantInt::get(Type::Int16Ty, InStr[i]));
+ Elts.push_back(ConstantInt::get(Type::getInt16Ty(Context), InStr[i]));
else
- Elts.push_back(ConstantInt::get(Type::Int16Ty, ByteSwap_16(InStr[i])));
+ Elts.push_back(ConstantInt::get(Type::getInt16Ty(Context), ByteSwap_16(InStr[i])));
}
- } else if (ElTy == Type::Int32Ty) {
+ } else if (ElTy == Type::getInt32Ty(Context)) {
assert((Len&3) == 0 &&
"Length in bytes should be a multiple of element size");
const uint32_t *InStr = (const unsigned *)TREE_STRING_POINTER(exp);
@@ -7373,9 +7375,9 @@
// but we're going to treat them as ordinary ints from here, with
// host endianness. Adjust if necessary.
if (llvm::sys::isBigEndianHost() == BYTES_BIG_ENDIAN)
- Elts.push_back(ConstantInt::get(Type::Int32Ty, InStr[i]));
+ Elts.push_back(ConstantInt::get(Type::getInt32Ty(Context), InStr[i]));
else
- Elts.push_back(ConstantInt::get(Type::Int32Ty, ByteSwap_32(InStr[i])));
+ Elts.push_back(ConstantInt::get(Type::getInt32Ty(Context), ByteSwap_32(InStr[i])));
}
} else {
assert(0 && "Unknown character type!");
@@ -7448,7 +7450,7 @@
bool RHSIsSigned = !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp,1)));
Instruction::CastOps opcode;
if (isa<PointerType>(LHS->getType())) {
- const Type *IntPtrTy = getTargetData().getIntPtrType();
+ const Type *IntPtrTy = getTargetData().getIntPtrType(Context);
opcode = CastInst::getCastOpcode(LHS, LHSIsSigned, IntPtrTy, false);
LHS = TheFolder->CreateCast(opcode, LHS, IntPtrTy);
opcode = CastInst::getCastOpcode(RHS, RHSIsSigned, IntPtrTy, false);
@@ -7663,7 +7665,7 @@
}
// Otherwise, there is padding here. Insert explicit zeros.
- const Type *PadTy = Type::Int8Ty;
+ const Type *PadTy = Type::getInt8Ty(Context);
if (AlignedEltOffs-EltOffs != 1)
PadTy = ArrayType::get(PadTy, AlignedEltOffs-EltOffs);
ResultElts.insert(ResultElts.begin()+i,
@@ -7750,7 +7752,7 @@
// Insert enough padding to fully fill in the hole. Insert padding from
// NextFieldByteStart (not LLVMNaturalByteOffset) because the padding will
// not get the same alignment as "Val".
- const Type *FillTy = Type::Int8Ty;
+ const Type *FillTy = Type::getInt8Ty(Context);
if (GCCFieldOffsetInBits/8-NextFieldByteStart != 1)
FillTy = ArrayType::get(FillTy,
GCCFieldOffsetInBits/8-NextFieldByteStart);
@@ -7781,7 +7783,7 @@
// been an anonymous bitfield or other thing that shoved it over. No matter,
// just insert some i8 padding until there are bits to fill in.
while (GCCFieldOffsetInBits > NextFieldByteStart*8) {
- ResultElts.push_back(ConstantInt::get(Type::Int8Ty, 0));
+ ResultElts.push_back(ConstantInt::get(Type::getInt8Ty(Context), 0));
++NextFieldByteStart;
}
@@ -7804,7 +7806,7 @@
if (GCCFieldOffsetInBits < NextFieldByteStart*8) {
unsigned ValBitSize = ValC->getBitWidth();
assert(!ResultElts.empty() && "Bitfield starts before first element?");
- assert(ResultElts.back()->getType() == Type::Int8Ty &&
+ assert(ResultElts.back()->getType() == Type::getInt8Ty(Context) &&
isa<ConstantInt>(ResultElts.back()) &&
"Merging bitfield with non-bitfield value?");
assert(NextFieldByteStart*8 - GCCFieldOffsetInBits < 8 &&
@@ -7937,7 +7939,7 @@
// If the LLVM Size is too small, add some tail padding to fill it in.
if (LLVMNaturalSize < GCCStructSize) {
- const Type *FillTy = Type::Int8Ty;
+ const Type *FillTy = Type::getInt8Ty(Context);
if (GCCStructSize - NextFieldByteStart != 1)
FillTy = ArrayType::get(FillTy, GCCStructSize - NextFieldByteStart);
ResultElts.push_back(Constant::getNullValue(FillTy));
@@ -8057,9 +8059,9 @@
const Type *FillTy;
assert(UnionSize > InitSize && "Init shouldn't be larger than union!");
if (UnionSize - InitSize == 1)
- FillTy = Type::Int8Ty;
+ FillTy = Type::getInt8Ty(Context);
else
- FillTy = ArrayType::get(Type::Int8Ty, UnionSize - InitSize);
+ FillTy = ArrayType::get(Type::getInt8Ty(Context), UnionSize - InitSize);
Elts.push_back(Constant::getNullValue(FillTy));
}
}
@@ -8157,7 +8159,7 @@
// itself (allowed in GCC but not in LLVM) then the global is changed to have
// the type of the initializer. Correct for this now.
const Type *Ty = ConvertType(TREE_TYPE(exp));
- if (Ty == Type::VoidTy) Ty = Type::Int8Ty; // void* -> i8*.
+ if (Ty == Type::getVoidTy(Context)) Ty = Type::getInt8Ty(Context); // void* -> i8*.
return TheFolder->CreateBitCast(Val, Ty->getPointerTo());
}
@@ -8178,7 +8180,7 @@
BasicBlock *BB = getLabelDeclBlock(exp);
Constant *C = TheTreeToLLVM->getIndirectGotoBlockNumber(BB);
return
- TheFolder->CreateIntToPtr(C, PointerType::getUnqual(Type::Int8Ty));
+ TheFolder->CreateIntToPtr(C, PointerType::getUnqual(Type::getInt8Ty(Context)));
}
Constant *TreeConstantToLLVM::EmitLV_COMPLEX_CST(tree exp) {
@@ -8255,7 +8257,7 @@
Constant *IndexVal = Convert(Index);
- const Type *IntPtrTy = getTargetData().getIntPtrType();
+ const Type *IntPtrTy = getTargetData().getIntPtrType(Context);
if (IndexVal->getType() != IntPtrTy)
IndexVal = TheFolder->CreateIntCast(IndexVal, IntPtrTy,
!TYPE_UNSIGNED(IndexType));
@@ -8295,8 +8297,8 @@
Constant *Ops[] = {
StructAddrLV,
- Constant::getNullValue(Type::Int32Ty),
- ConstantInt::get(Type::Int32Ty, MemberIndex)
+ Constant::getNullValue(Type::getInt32Ty(Context)),
+ ConstantInt::get(Type::getInt32Ty(Context), MemberIndex)
};
FieldPtr = TheFolder->CreateGetElementPtr(StructAddrLV, Ops+1, 2);
Modified: llvm-gcc-4.2/trunk/gcc/llvm-linker-hack.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/llvm-linker-hack.cpp?rev=78947&r1=78946&r2=78947&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/llvm-linker-hack.cpp (original)
+++ llvm-gcc-4.2/trunk/gcc/llvm-linker-hack.cpp Thu Aug 13 16:58:17 2009
@@ -95,7 +95,7 @@
llvm::createFunctionAttrsPass();
llvm::createPrintModulePass(0);
- llvm::getGlobalContext();
+ llvm::Type::getInt8Ty(llvm::getGlobalContext());
llvm::PrettyStackTraceProgram::PrettyStackTraceProgram(0, 0);
llvm::DIFactory::DIFactory(*MP->getModule());
Modified: llvm-gcc-4.2/trunk/gcc/llvm-types.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/llvm-types.cpp?rev=78947&r1=78946&r2=78947&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/llvm-types.cpp (original)
+++ llvm-gcc-4.2/trunk/gcc/llvm-types.cpp Thu Aug 13 16:58:17 2009
@@ -188,7 +188,7 @@
}
const std::string &TypeName = TypeNameMap[*I];
- LTypesNames.push_back(ConstantArray::get(TypeName, false));
+ LTypesNames.push_back(ConstantArray::get(Context, TypeName, false));
}
// Create string table.
@@ -693,7 +693,7 @@
//===----------------------------------------------------------------------===//
const Type *TypeConverter::ConvertType(tree orig_type) {
- if (orig_type == error_mark_node) return Type::Int32Ty;
+ if (orig_type == error_mark_node) return Type::getInt32Ty(Context);
// LLVM doesn't care about variants such as const, volatile, or restrict.
tree type = TYPE_MAIN_VARIANT(orig_type);
@@ -703,7 +703,7 @@
fprintf(stderr, "Unknown type to convert:\n");
debug_tree(type);
abort();
- case VOID_TYPE: return SET_TYPE_LLVM(type, Type::VoidTy);
+ case VOID_TYPE: return SET_TYPE_LLVM(type, Type::getVoidTy(Context));
case RECORD_TYPE: return ConvertRECORD(type, orig_type);
case QUAL_UNION_TYPE:
case UNION_TYPE: return ConvertUNION(type, orig_type);
@@ -711,7 +711,7 @@
if (const Type *Ty = GET_TYPE_LLVM(type))
return Ty;
return SET_TYPE_LLVM(type,
- IntegerType::get(TREE_INT_CST_LOW(TYPE_SIZE(type))));
+ IntegerType::get(Context, TREE_INT_CST_LOW(TYPE_SIZE(type))));
}
case ENUMERAL_TYPE:
// Use of an enum that is implicitly declared?
@@ -731,7 +731,7 @@
// The ARM port defines __builtin_neon_xi as a 511-bit type because GCC's
// type precision field has only 9 bits. Treat this as a special case.
int precision = TYPE_PRECISION(type) == 511 ? 512 : TYPE_PRECISION(type);
- return SET_TYPE_LLVM(type, IntegerType::get(precision));
+ return SET_TYPE_LLVM(type, IntegerType::get(Context, precision));
}
case REAL_TYPE:
if (const Type *Ty = GET_TYPE_LLVM(type)) return Ty;
@@ -740,9 +740,9 @@
fprintf(stderr, "Unknown FP type!\n");
debug_tree(type);
abort();
- case 32: return SET_TYPE_LLVM(type, Type::FloatTy);
- case 64: return SET_TYPE_LLVM(type, Type::DoubleTy);
- case 80: return SET_TYPE_LLVM(type, Type::X86_FP80Ty);
+ case 32: return SET_TYPE_LLVM(type, Type::getFloatTy(Context));
+ case 64: return SET_TYPE_LLVM(type, Type::getDoubleTy(Context));
+ case 80: return SET_TYPE_LLVM(type, Type::getX86_FP80Ty(Context));
case 128:
#ifdef TARGET_POWERPC
return SET_TYPE_LLVM(type, Type::PPC_FP128Ty);
@@ -752,8 +752,8 @@
#else
// 128-bit long doubles map onto { double, double }.
return SET_TYPE_LLVM(type,
- StructType::get(Context, Type::DoubleTy,
- Type::DoubleTy, NULL));
+ StructType::get(Context, Type::getDoubleTy(Context),
+ Type::getDoubleTy(Context), NULL));
#endif
}
@@ -805,8 +805,8 @@
// Restore ConvertingStruct for the caller.
ConvertingStruct = false;
- if (Actual == Type::VoidTy)
- Actual = Type::Int8Ty; // void* -> sbyte*
+ if (Actual == Type::getVoidTy(Context))
+ Actual = Type::getInt8Ty(Context); // void* -> sbyte*
// Update the type, potentially updating TYPE_LLVM(type).
const OpaqueType *OT = cast<OpaqueType>(Ty->getElementType());
@@ -840,8 +840,8 @@
Ty = ConvertType(TREE_TYPE(type));
}
- if (Ty == Type::VoidTy)
- Ty = Type::Int8Ty; // void* -> sbyte*
+ if (Ty == Type::getVoidTy(Context))
+ Ty = Type::getInt8Ty(Context); // void* -> sbyte*
return TypeDB.setType(type, PointerType::getUnqual(Ty));
}
@@ -873,7 +873,7 @@
// that the gcc array type has constant size, using an i8 for the element
// type ensures we can produce an LLVM array of the right size.
ElementSize = 8;
- ElementTy = Type::Int8Ty;
+ ElementTy = Type::getInt8Ty(Context);
}
uint64_t NumElements;
@@ -912,8 +912,8 @@
// integer directly.
switch (getTargetData().getPointerSize()) {
default: assert(0 && "Unknown pointer size!");
- case 4: return Type::Int32Ty;
- case 8: return Type::Int64Ty;
+ case 4: return Type::getInt32Ty(Context);
+ case 8: return Type::getInt64Ty(Context);
}
}
}
@@ -964,7 +964,7 @@
void HandleShadowResult(const PointerType *PtrArgTy, bool RetPtr) {
// This function either returns void or the shadow argument,
// depending on the target.
- RetTy = RetPtr ? PtrArgTy : Type::VoidTy;
+ RetTy = RetPtr ? PtrArgTy : Type::getVoidTy(Context);
// In any case, there is a dummy shadow argument though!
ArgTypes.push_back(PtrArgTy);
@@ -995,9 +995,9 @@
if (KNRPromotion) {
if (type == float_type_node)
LLVMTy = ConvertType(double_type_node);
- else if (LLVMTy == Type::Int16Ty || LLVMTy == Type::Int8Ty ||
- LLVMTy == Type::Int1Ty)
- LLVMTy = Type::Int32Ty;
+ else if (LLVMTy == Type::getInt16Ty(Context) || LLVMTy == Type::getInt8Ty(Context) ||
+ LLVMTy == Type::getInt1Ty(Context))
+ LLVMTy = Type::getInt32Ty(Context);
}
ArgTypes.push_back(LLVMTy);
}
@@ -1050,7 +1050,7 @@
unsigned &CallingConv, AttrListPtr &PAL) {
tree ReturnType = TREE_TYPE(type);
std::vector<PATypeHolder> ArgTys;
- PATypeHolder RetTy(Type::VoidTy);
+ PATypeHolder RetTy(Type::getVoidTy(Context));
FunctionTypeConversion Client(RetTy, ArgTys, CallingConv, true /*K&R*/);
TheLLVMABI<FunctionTypeConversion> ABIConverter(Client);
@@ -1112,7 +1112,7 @@
const FunctionType *TypeConverter::
ConvertFunctionType(tree type, tree decl, tree static_chain,
unsigned &CallingConv, AttrListPtr &PAL) {
- PATypeHolder RetTy = Type::VoidTy;
+ PATypeHolder RetTy = Type::getVoidTy(Context);
std::vector<PATypeHolder> ArgTypes;
bool isVarArg = false;
FunctionTypeConversion Client(RetTy, ArgTypes, CallingConv, false/*not K&R*/);
@@ -1397,13 +1397,13 @@
const Type *LastType = Elements.back();
unsigned PadBytes = 0;
- if (LastType == Type::Int8Ty)
+ if (LastType == Type::getInt8Ty(Context))
PadBytes = 1 - NoOfBytesToRemove;
- else if (LastType == Type::Int16Ty)
+ else if (LastType == Type::getInt16Ty(Context))
PadBytes = 2 - NoOfBytesToRemove;
- else if (LastType == Type::Int32Ty)
+ else if (LastType == Type::getInt32Ty(Context))
PadBytes = 4 - NoOfBytesToRemove;
- else if (LastType == Type::Int64Ty)
+ else if (LastType == Type::getInt64Ty(Context))
PadBytes = 8 - NoOfBytesToRemove;
else
return;
@@ -1411,7 +1411,7 @@
assert (PadBytes > 0 && "Unable to remove extra bytes");
// Update last element type and size, element offset is unchanged.
- const Type *Pad = ArrayType::get(Type::Int8Ty, PadBytes);
+ const Type *Pad = ArrayType::get(Type::getInt8Ty(Context), PadBytes);
unsigned OriginalSize = ElementSizeInBytes.back();
Elements.pop_back();
Elements.push_back(Pad);
@@ -1446,7 +1446,7 @@
// field we just popped. Otherwise we might end up with a
// gcc non-bitfield being mapped to an LLVM field with a
// different offset.
- const Type *Pad = Type::Int8Ty;
+ const Type *Pad = Type::getInt8Ty(Context);
if (PoppedOffset != EndOffset + 1)
Pad = ArrayType::get(Pad, PoppedOffset - EndOffset);
addElement(Pad, EndOffset, PoppedOffset - EndOffset);
@@ -1469,7 +1469,7 @@
// padding.
if (NextByteOffset < ByteOffset) {
uint64_t CurOffset = getNewElementByteOffset(1);
- const Type *Pad = Type::Int8Ty;
+ const Type *Pad = Type::getInt8Ty(Context);
if (SavedTy && LastFieldStartsAtNonByteBoundry)
// We want to reuse SavedType to access this bit field.
// e.g. struct __attribute__((packed)) {
@@ -1606,21 +1606,21 @@
// additional bits required after FirstunallocatedByte to cover new field.
const Type *NewFieldTy;
if (Size <= 8)
- NewFieldTy = Type::Int8Ty;
+ NewFieldTy = Type::getInt8Ty(Context);
else if (Size <= 16)
- NewFieldTy = Type::Int16Ty;
+ NewFieldTy = Type::getInt16Ty(Context);
else if (Size <= 32)
- NewFieldTy = Type::Int32Ty;
+ NewFieldTy = Type::getInt32Ty(Context);
else {
assert(Size <= 64 && "Bitfield too large!");
- NewFieldTy = Type::Int64Ty;
+ NewFieldTy = Type::getInt64Ty(Context);
}
// Check that the alignment of NewFieldTy won't cause a gap in the structure!
unsigned ByteAlignment = getTypeAlignment(NewFieldTy);
if (FirstUnallocatedByte & (ByteAlignment-1)) {
// Instead of inserting a nice whole field, insert a small array of ubytes.
- NewFieldTy = ArrayType::get(Type::Int8Ty, (Size+7)/8);
+ NewFieldTy = ArrayType::get(Type::getInt8Ty(Context), (Size+7)/8);
}
// Finally, add the new field.
@@ -2082,7 +2082,7 @@
PadBytes = StartOffsetInBits/8-FirstUnallocatedByte;
if (PadBytes) {
- const Type *Pad = Type::Int8Ty;
+ const Type *Pad = Type::getInt8Ty(Context);
if (PadBytes != 1)
Pad = ArrayType::get(Pad, PadBytes);
Info.addElement(Pad, FirstUnallocatedByte, PadBytes);
@@ -2192,20 +2192,20 @@
// If only one byte is needed then insert i8.
if (GCCTypeSize-LLVMLastElementEnd == 1)
- Info->addElement(Type::Int8Ty, 1, 1);
+ Info->addElement(Type::getInt8Ty(Context), 1, 1);
else {
if (((GCCTypeSize-LLVMStructSize) % 4) == 0 &&
(Info->getAlignmentAsLLVMStruct() %
- Info->getTypeAlignment(Type::Int32Ty)) == 0) {
+ Info->getTypeAlignment(Type::getInt32Ty(Context))) == 0) {
// insert array of i32
unsigned Int32ArraySize = (GCCTypeSize-LLVMStructSize)/4;
const Type *PadTy =
- ArrayType::get(Type::Int32Ty, Int32ArraySize);
+ ArrayType::get(Type::getInt32Ty(Context), Int32ArraySize);
Info->addElement(PadTy, GCCTypeSize - LLVMLastElementEnd,
Int32ArraySize, true /* Padding Element */);
} else {
const Type *PadTy =
- ArrayType::get(Type::Int8Ty, GCCTypeSize-LLVMStructSize);
+ ArrayType::get(Type::getInt8Ty(Context), GCCTypeSize-LLVMStructSize);
Info->addElement(PadTy, GCCTypeSize - LLVMLastElementEnd,
GCCTypeSize - LLVMLastElementEnd,
true /* Padding Element */);
@@ -2440,7 +2440,7 @@
if (EltSize != GCCTypeSize) {
assert(EltSize < GCCTypeSize &&
"LLVM type size doesn't match GCC type size!");
- const Type *PadTy = Type::Int8Ty;
+ const Type *PadTy = Type::getInt8Ty(Context);
if (GCCTypeSize-EltSize != 1)
PadTy = ArrayType::get(PadTy, GCCTypeSize-EltSize);
UnionElts.push_back(PadTy);
More information about the llvm-commits
mailing list