[llvm-commits] [llvm-gcc-4.2] r75704 - in /llvm-gcc-4.2/trunk/gcc: config/i386/llvm-i386.cpp config/rs6000/llvm-rs6000.cpp llvm-backend.cpp llvm-convert.cpp llvm-types.cpp
Owen Anderson
resistor at mac.com
Tue Jul 14 16:10:12 PDT 2009
Author: resistor
Date: Tue Jul 14 18:10:12 2009
New Revision: 75704
URL: http://llvm.org/viewvc/llvm-project?rev=75704&view=rev
Log:
Update for LLVM API change.
Go ahead and context-ize a bunch of stuff while I'm at it.
Modified:
llvm-gcc-4.2/trunk/gcc/config/i386/llvm-i386.cpp
llvm-gcc-4.2/trunk/gcc/config/rs6000/llvm-rs6000.cpp
llvm-gcc-4.2/trunk/gcc/llvm-backend.cpp
llvm-gcc-4.2/trunk/gcc/llvm-convert.cpp
llvm-gcc-4.2/trunk/gcc/llvm-types.cpp
Modified: llvm-gcc-4.2/trunk/gcc/config/i386/llvm-i386.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/config/i386/llvm-i386.cpp?rev=75704&r1=75703&r2=75704&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/config/i386/llvm-i386.cpp (original)
+++ llvm-gcc-4.2/trunk/gcc/config/i386/llvm-i386.cpp Tue Jul 14 18:10:12 2009
@@ -30,6 +30,7 @@
#include "llvm/DerivedTypes.h"
#include "llvm/Instructions.h"
#include "llvm/Intrinsics.h"
+#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
#include "llvm-i386-target.h"
@@ -37,6 +38,8 @@
#include "toplev.h"
}
+static LLVMContext &Context = getGlobalContext();
+
/* TargetIntrinsicLower - For builtins that we want to expand to normal LLVM
* code, emit the code now. If we can handle the code, this macro should emit
* the code, return true.
@@ -109,10 +112,12 @@
case IX86_BUILTIN_XORPD:
case IX86_BUILTIN_ANDNPD:
if (cast<VectorType>(ResultType)->getNumElements() == 4) // v4f32
- Ops[0] = Builder.CreateBitCast(Ops[0], VectorType::get(Type::Int32Ty, 4),
+ Ops[0] = Builder.CreateBitCast(Ops[0],
+ Context.getVectorType(Type::Int32Ty, 4),
"tmp");
else // v2f64
- Ops[0] = Builder.CreateBitCast(Ops[0], VectorType::get(Type::Int64Ty, 2),
+ Ops[0] = Builder.CreateBitCast(Ops[0],
+ Context.getVectorType(Type::Int64Ty, 2),
"tmp");
Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType(), "tmp");
@@ -268,25 +273,25 @@
Result = BuildVectorShuffle(Ops[0], Ops[1], 2, 1);
return true;
case IX86_BUILTIN_MOVQ: {
- Value *Zero = ConstantInt::get(Type::Int32Ty, 0);
+ Value *Zero = Context.getConstantInt(Type::Int32Ty, 0);
Result = BuildVector(Zero, Zero, Zero, Zero, NULL);
Result = BuildVectorShuffle(Result, Ops[0], 4, 5, 2, 3);
return true;
}
case IX86_BUILTIN_LOADQ: {
- PointerType *i64Ptr = PointerType::getUnqual(Type::Int64Ty);
+ PointerType *i64Ptr = Context.getPointerTypeUnqual(Type::Int64Ty);
Ops[0] = Builder.CreateBitCast(Ops[0], i64Ptr, "tmp");
Ops[0] = Builder.CreateLoad(Ops[0], "tmp");
- Value *Zero = ConstantInt::get(Type::Int64Ty, 0);
+ Value *Zero = Context.getConstantInt(Type::Int64Ty, 0);
Result = BuildVector(Zero, Zero, NULL);
- Value *Idx = ConstantInt::get(Type::Int32Ty, 0);
+ Value *Idx = Context.getConstantInt(Type::Int32Ty, 0);
Result = Builder.CreateInsertElement(Result, Ops[0], Idx, "tmp");
Result = Builder.CreateBitCast(Result, ResultType, "tmp");
return true;
}
case IX86_BUILTIN_LOADUPS: {
- VectorType *v4f32 = VectorType::get(Type::FloatTy, 4);
- PointerType *v4f32Ptr = PointerType::getUnqual(v4f32);
+ VectorType *v4f32 = Context.getVectorType(Type::FloatTy, 4);
+ PointerType *v4f32Ptr = Context.getPointerTypeUnqual(v4f32);
Value *BC = Builder.CreateBitCast(Ops[0], v4f32Ptr, "tmp");
LoadInst *LI = Builder.CreateLoad(BC, "tmp");
LI->setAlignment(1);
@@ -294,8 +299,8 @@
return true;
}
case IX86_BUILTIN_LOADUPD: {
- VectorType *v2f64 = VectorType::get(Type::DoubleTy, 2);
- PointerType *v2f64Ptr = PointerType::getUnqual(v2f64);
+ VectorType *v2f64 = Context.getVectorType(Type::DoubleTy, 2);
+ PointerType *v2f64Ptr = Context.getPointerTypeUnqual(v2f64);
Value *BC = Builder.CreateBitCast(Ops[0], v2f64Ptr, "tmp");
LoadInst *LI = Builder.CreateLoad(BC, "tmp");
LI->setAlignment(1);
@@ -303,8 +308,8 @@
return true;
}
case IX86_BUILTIN_LOADDQU: {
- VectorType *v16i8 = VectorType::get(Type::Int8Ty, 16);
- PointerType *v16i8Ptr = PointerType::getUnqual(v16i8);
+ VectorType *v16i8 = Context.getVectorType(Type::Int8Ty, 16);
+ PointerType *v16i8Ptr = Context.getPointerTypeUnqual(v16i8);
Value *BC = Builder.CreateBitCast(Ops[0], v16i8Ptr, "tmp");
LoadInst *LI = Builder.CreateLoad(BC, "tmp");
LI->setAlignment(1);
@@ -312,8 +317,8 @@
return true;
}
case IX86_BUILTIN_STOREUPS: {
- VectorType *v4f32 = VectorType::get(Type::FloatTy, 4);
- PointerType *v4f32Ptr = PointerType::getUnqual(v4f32);
+ VectorType *v4f32 = Context.getVectorType(Type::FloatTy, 4);
+ PointerType *v4f32Ptr = Context.getPointerTypeUnqual(v4f32);
Value *BC = Builder.CreateBitCast(Ops[0], v4f32Ptr, "tmp");
StoreInst *SI = Builder.CreateStore(Ops[1], BC);
SI->setAlignment(1);
@@ -321,8 +326,8 @@
return true;
}
case IX86_BUILTIN_STOREUPD: {
- VectorType *v2f64 = VectorType::get(Type::DoubleTy, 2);
- PointerType *v2f64Ptr = PointerType::getUnqual(v2f64);
+ VectorType *v2f64 = Context.getVectorType(Type::DoubleTy, 2);
+ PointerType *v2f64Ptr = Context.getPointerTypeUnqual(v2f64);
Value *BC = Builder.CreateBitCast(Ops[0], v2f64Ptr, "tmp");
StoreInst *SI = Builder.CreateStore(Ops[1], BC);
SI->setAlignment(1);
@@ -330,8 +335,8 @@
return true;
}
case IX86_BUILTIN_STOREDQU: {
- VectorType *v16i8 = VectorType::get(Type::Int8Ty, 16);
- PointerType *v16i8Ptr = PointerType::getUnqual(v16i8);
+ VectorType *v16i8 = Context.getVectorType(Type::Int8Ty, 16);
+ PointerType *v16i8Ptr = Context.getPointerTypeUnqual(v16i8);
Value *BC = Builder.CreateBitCast(Ops[0], v16i8Ptr, "tmp");
StoreInst *SI = Builder.CreateStore(Ops[1], BC);
SI->setAlignment(1);
@@ -339,20 +344,20 @@
return true;
}
case IX86_BUILTIN_LOADHPS: {
- PointerType *f64Ptr = PointerType::getUnqual(Type::DoubleTy);
+ PointerType *f64Ptr = Context.getPointerTypeUnqual(Type::DoubleTy);
Ops[1] = Builder.CreateBitCast(Ops[1], f64Ptr, "tmp");
Value *Load = Builder.CreateLoad(Ops[1], "tmp");
- Ops[1] = BuildVector(Load, UndefValue::get(Type::DoubleTy), NULL);
+ Ops[1] = BuildVector(Load, Context.getUndef(Type::DoubleTy), NULL);
Ops[1] = Builder.CreateBitCast(Ops[1], ResultType, "tmp");
Result = BuildVectorShuffle(Ops[0], Ops[1], 0, 1, 4, 5);
Result = Builder.CreateBitCast(Result, ResultType, "tmp");
return true;
}
case IX86_BUILTIN_LOADLPS: {
- PointerType *f64Ptr = PointerType::getUnqual(Type::DoubleTy);
+ PointerType *f64Ptr = Context.getPointerTypeUnqual(Type::DoubleTy);
Ops[1] = Builder.CreateBitCast(Ops[1], f64Ptr, "tmp");
Value *Load = Builder.CreateLoad(Ops[1], "tmp");
- Ops[1] = BuildVector(Load, UndefValue::get(Type::DoubleTy), NULL);
+ Ops[1] = BuildVector(Load, Context.getUndef(Type::DoubleTy), NULL);
Ops[1] = Builder.CreateBitCast(Ops[1], ResultType, "tmp");
Result = BuildVectorShuffle(Ops[0], Ops[1], 4, 5, 2, 3);
Result = Builder.CreateBitCast(Result, ResultType, "tmp");
@@ -360,7 +365,7 @@
}
case IX86_BUILTIN_LOADHPD: {
Value *Load = Builder.CreateLoad(Ops[1], "tmp");
- Ops[1] = BuildVector(Load, UndefValue::get(Type::DoubleTy), NULL);
+ Ops[1] = BuildVector(Load, Context.getUndef(Type::DoubleTy), NULL);
Ops[1] = Builder.CreateBitCast(Ops[1], ResultType, "tmp");
Result = BuildVectorShuffle(Ops[0], Ops[1], 0, 2);
Result = Builder.CreateBitCast(Result, ResultType, "tmp");
@@ -368,27 +373,27 @@
}
case IX86_BUILTIN_LOADLPD: {
Value *Load = Builder.CreateLoad(Ops[1], "tmp");
- Ops[1] = BuildVector(Load, UndefValue::get(Type::DoubleTy), NULL);
+ Ops[1] = BuildVector(Load, Context.getUndef(Type::DoubleTy), NULL);
Ops[1] = Builder.CreateBitCast(Ops[1], ResultType, "tmp");
Result = BuildVectorShuffle(Ops[0], Ops[1], 2, 1);
Result = Builder.CreateBitCast(Result, ResultType, "tmp");
return true;
}
case IX86_BUILTIN_STOREHPS: {
- VectorType *v2f64 = VectorType::get(Type::DoubleTy, 2);
- PointerType *f64Ptr = PointerType::getUnqual(Type::DoubleTy);
+ VectorType *v2f64 = Context.getVectorType(Type::DoubleTy, 2);
+ PointerType *f64Ptr = Context.getPointerTypeUnqual(Type::DoubleTy);
Ops[0] = Builder.CreateBitCast(Ops[0], f64Ptr, "tmp");
- Value *Idx = ConstantInt::get(Type::Int32Ty, 1);
+ Value *Idx = Context.getConstantInt(Type::Int32Ty, 1);
Ops[1] = Builder.CreateBitCast(Ops[1], v2f64, "tmp");
Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "tmp");
Result = Builder.CreateStore(Ops[1], Ops[0]);
return true;
}
case IX86_BUILTIN_STORELPS: {
- VectorType *v2f64 = VectorType::get(Type::DoubleTy, 2);
- PointerType *f64Ptr = PointerType::getUnqual(Type::DoubleTy);
+ VectorType *v2f64 = Context.getVectorType(Type::DoubleTy, 2);
+ PointerType *f64Ptr = Context.getPointerTypeUnqual(Type::DoubleTy);
Ops[0] = Builder.CreateBitCast(Ops[0], f64Ptr, "tmp");
- Value *Idx = ConstantInt::get(Type::Int32Ty, 0);
+ Value *Idx = Context.getConstantInt(Type::Int32Ty, 0);
Ops[1] = Builder.CreateBitCast(Ops[1], v2f64, "tmp");
Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "tmp");
Result = Builder.CreateStore(Ops[1], Ops[0]);
@@ -463,7 +468,7 @@
case IX86_BUILTIN_CMPNGEPS: PredCode = 6; flip = true; break;
case IX86_BUILTIN_CMPORDPS: PredCode = 7; break;
}
- Value *Pred = ConstantInt::get(Type::Int8Ty, PredCode);
+ Value *Pred = Context.getConstantInt(Type::Int8Ty, PredCode);
Value *Arg0 = Ops[0];
Value *Arg1 = Ops[1];
if (flip) std::swap(Arg0, Arg1);
@@ -496,7 +501,7 @@
case IX86_BUILTIN_CMPNLESS: PredCode = 6; break;
case IX86_BUILTIN_CMPORDSS: PredCode = 7; break;
}
- Value *Pred = ConstantInt::get(Type::Int8Ty, PredCode);
+ Value *Pred = Context.getConstantInt(Type::Int8Ty, PredCode);
Value *CallOps[3] = { Ops[0], Ops[1], Pred };
Result = Builder.CreateCall(cmpss, CallOps, CallOps+3, "tmp");
Result = Builder.CreateBitCast(Result, ResultType, "tmp");
@@ -533,7 +538,7 @@
case IX86_BUILTIN_CMPNGEPD: PredCode = 6; flip = true; break;
case IX86_BUILTIN_CMPORDPD: PredCode = 7; break;
}
- Value *Pred = ConstantInt::get(Type::Int8Ty, PredCode);
+ Value *Pred = Context.getConstantInt(Type::Int8Ty, PredCode);
Value *Arg0 = Ops[0];
Value *Arg1 = Ops[1];
if (flip) std::swap(Arg0, Arg1);
@@ -565,7 +570,7 @@
case IX86_BUILTIN_CMPNLESD: PredCode = 6; break;
case IX86_BUILTIN_CMPORDSD: PredCode = 7; break;
}
- Value *Pred = ConstantInt::get(Type::Int8Ty, PredCode);
+ Value *Pred = Context.getConstantInt(Type::Int8Ty, PredCode);
Value *CallOps[3] = { Ops[0], Ops[1], Pred };
Result = Builder.CreateCall(cmpsd, CallOps, CallOps+3, "tmp");
Result = Builder.CreateBitCast(Result, ResultType, "tmp");
@@ -576,7 +581,7 @@
Intrinsic::getDeclaration(TheModule, Intrinsic::x86_sse_ldmxcsr);
Value *Ptr = CreateTemporary(Type::Int32Ty);
Builder.CreateStore(Ops[0], Ptr);
- Ptr = Builder.CreateBitCast(Ptr, PointerType::getUnqual(Type::Int8Ty), "tmp");
+ Ptr = Builder.CreateBitCast(Ptr, Context.getPointerTypeUnqual(Type::Int8Ty), "tmp");
Result = Builder.CreateCall(ldmxcsr, Ptr);
return true;
}
@@ -584,7 +589,7 @@
Function *stmxcsr =
Intrinsic::getDeclaration(TheModule, Intrinsic::x86_sse_stmxcsr);
Value *Ptr = CreateTemporary(Type::Int32Ty);
- Value *BPtr = Builder.CreateBitCast(Ptr, PointerType::getUnqual(Type::Int8Ty),
+ Value *BPtr = Builder.CreateBitCast(Ptr, Context.getPointerTypeUnqual(Type::Int8Ty),
"tmp");
Builder.CreateCall(stmxcsr, BPtr);
@@ -851,25 +856,25 @@
if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
if (VTy->getNumElements() == 2) {
if (VTy->getElementType()->isInteger()) {
- Elts.push_back(VectorType::get(Type::Int64Ty, 2));
+ Elts.push_back(Context.getVectorType(Type::Int64Ty, 2));
} else {
- Elts.push_back(VectorType::get(Type::DoubleTy, 2));
+ Elts.push_back(Context.getVectorType(Type::DoubleTy, 2));
}
Bytes -= 8;
} else {
assert(VTy->getNumElements() == 4);
if (VTy->getElementType()->isInteger()) {
- Elts.push_back(VectorType::get(Type::Int32Ty, 4));
+ Elts.push_back(Context.getVectorType(Type::Int32Ty, 4));
} else {
- Elts.push_back(VectorType::get(Type::FloatTy, 4));
+ Elts.push_back(Context.getVectorType(Type::FloatTy, 4));
}
Bytes -= 4;
}
} else if (llvm_x86_is_all_integer_types(Ty)) {
- Elts.push_back(VectorType::get(Type::Int32Ty, 4));
+ Elts.push_back(Context.getVectorType(Type::Int32Ty, 4));
Bytes -= 4;
} else {
- Elts.push_back(VectorType::get(Type::FloatTy, 4));
+ Elts.push_back(Context.getVectorType(Type::FloatTy, 4));
Bytes -= 4;
}
} else if (Class[i+1] == X86_64_SSESF_CLASS) {
@@ -882,10 +887,10 @@
Elts.push_back(Type::DoubleTy);
Bytes -= 16;
} else if (Class[i+1] == X86_64_SSEDF_CLASS && Bytes == 16) {
- Elts.push_back(VectorType::get(Type::FloatTy, 2));
+ Elts.push_back(Context.getVectorType(Type::FloatTy, 2));
Elts.push_back(Type::DoubleTy);
} else if (Class[i+1] == X86_64_INTEGER_CLASS) {
- Elts.push_back(VectorType::get(Type::FloatTy, 2));
+ Elts.push_back(Context.getVectorType(Type::FloatTy, 2));
Elts.push_back(Type::Int64Ty);
} else if (Class[i+1] == X86_64_NO_CLASS) {
// padding bytes, don't pass
@@ -1144,9 +1149,9 @@
if (Size <= 8)
return Type::Int64Ty;
else if (Size <= 16)
- return IntegerType::get(128);
+ return Context.getIntegerType(128);
else if (Size <= 32)
- return IntegerType::get(256);
+ return Context.getIntegerType(256);
}
return NULL;
}
@@ -1215,23 +1220,23 @@
if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
if (VTy->getNumElements() == 2) {
if (VTy->getElementType()->isInteger())
- Elts.push_back(VectorType::get(Type::Int64Ty, 2));
+ Elts.push_back(Context.getVectorType(Type::Int64Ty, 2));
else
- Elts.push_back(VectorType::get(Type::DoubleTy, 2));
+ Elts.push_back(Context.getVectorType(Type::DoubleTy, 2));
Bytes -= 8;
} else {
assert(VTy->getNumElements() == 4);
if (VTy->getElementType()->isInteger())
- Elts.push_back(VectorType::get(Type::Int32Ty, 4));
+ Elts.push_back(Context.getVectorType(Type::Int32Ty, 4));
else
- Elts.push_back(VectorType::get(Type::FloatTy, 4));
+ Elts.push_back(Context.getVectorType(Type::FloatTy, 4));
Bytes -= 4;
}
} else if (llvm_x86_is_all_integer_types(Ty)) {
- Elts.push_back(VectorType::get(Type::Int32Ty, 4));
+ Elts.push_back(Context.getVectorType(Type::Int32Ty, 4));
Bytes -= 4;
} else {
- Elts.push_back(VectorType::get(Type::FloatTy, 4));
+ Elts.push_back(Context.getVectorType(Type::FloatTy, 4));
Bytes -= 4;
}
} else if (Class[i+1] == X86_64_SSESF_CLASS) {
@@ -1244,10 +1249,10 @@
Elts.push_back(Type::DoubleTy);
Bytes -= 16;
} else if (Class[i+1] == X86_64_SSEDF_CLASS && Bytes == 16) {
- Elts.push_back(VectorType::get(Type::FloatTy, 2));
+ Elts.push_back(Context.getVectorType(Type::FloatTy, 2));
Elts.push_back(Type::DoubleTy);
} else if (Class[i+1] == X86_64_INTEGER_CLASS) {
- Elts.push_back(VectorType::get(Type::FloatTy, 2));
+ Elts.push_back(Context.getVectorType(Type::FloatTy, 2));
Elts.push_back(Type::Int64Ty);
} else if (Class[i+1] == X86_64_NO_CLASS) {
Elts.push_back(Type::DoubleTy);
@@ -1296,12 +1301,12 @@
if (llvm_x86_should_not_return_complex_in_memory(type)) {
ElementTypes.push_back(Type::X86_FP80Ty);
ElementTypes.push_back(Type::X86_FP80Ty);
- return StructType::get(ElementTypes, STy->isPacked());
+ return Context.getStructType(ElementTypes, STy->isPacked());
}
std::vector<const Type*> GCCElts;
llvm_x86_64_get_multiple_return_reg_classes(type, Ty, GCCElts);
- return StructType::get(GCCElts, false);
+ return Context.getStructType(GCCElts, false);
}
// llvm_x86_extract_mrv_array_element - Helper function that help extract
@@ -1321,12 +1326,12 @@
Value *EVI = Builder.CreateExtractValue(Src, SrcFieldNo, "mrv_gr");
const StructType *STy = cast<StructType>(Src->getType());
llvm::Value *Idxs[3];
- Idxs[0] = ConstantInt::get(llvm::Type::Int32Ty, 0);
- Idxs[1] = ConstantInt::get(llvm::Type::Int32Ty, DestFieldNo);
- Idxs[2] = ConstantInt::get(llvm::Type::Int32Ty, DestElemNo);
+ Idxs[0] = Context.getConstantInt(llvm::Type::Int32Ty, 0);
+ Idxs[1] = Context.getConstantInt(llvm::Type::Int32Ty, DestFieldNo);
+ Idxs[2] = Context.getConstantInt(llvm::Type::Int32Ty, DestElemNo);
Value *GEP = Builder.CreateGEP(Dest, Idxs, Idxs+3, "mrv_gep");
if (isa<VectorType>(STy->getElementType(SrcFieldNo))) {
- Value *ElemIndex = ConstantInt::get(Type::Int32Ty, SrcElemNo);
+ Value *ElemIndex = Context.getConstantInt(Type::Int32Ty, SrcElemNo);
Value *EVIElem = Builder.CreateExtractElement(EVI, ElemIndex, "mrv");
Builder.CreateStore(EVIElem, GEP, isVolatile);
} else {
@@ -1359,12 +1364,12 @@
Value *EVI = Builder.CreateExtractValue(Src, 0, "mrv_gr");
- Value *E0Index = ConstantInt::get(Type::Int32Ty, 0);
+ Value *E0Index = Context.getConstantInt(Type::Int32Ty, 0);
Value *EVI0 = Builder.CreateExtractElement(EVI, E0Index, "mrv.v");
Value *GEP0 = Builder.CreateStructGEP(Dest, 0, "mrv_gep");
Builder.CreateStore(EVI0, GEP0, isVolatile);
- Value *E1Index = ConstantInt::get(Type::Int32Ty, 1);
+ Value *E1Index = Context.getConstantInt(Type::Int32Ty, 1);
Value *EVI1 = Builder.CreateExtractElement(EVI, E1Index, "mrv.v");
Value *GEP1 = Builder.CreateStructGEP(Dest, 1, "mrv_gep");
Builder.CreateStore(EVI1, GEP1, isVolatile);
@@ -1391,16 +1396,16 @@
// Special treatement for _Complex.
if (const StructType *ComplexType = dyn_cast<StructType>(DestElemType)) {
llvm::Value *Idxs[3];
- Idxs[0] = ConstantInt::get(llvm::Type::Int32Ty, 0);
- Idxs[1] = ConstantInt::get(llvm::Type::Int32Ty, DNO);
+ Idxs[0] = Context.getConstantInt(llvm::Type::Int32Ty, 0);
+ Idxs[1] = Context.getConstantInt(llvm::Type::Int32Ty, DNO);
- Idxs[2] = ConstantInt::get(llvm::Type::Int32Ty, 0);
+ Idxs[2] = Context.getConstantInt(llvm::Type::Int32Ty, 0);
Value *GEP = Builder.CreateGEP(Dest, Idxs, Idxs+3, "mrv_gep");
Value *EVI = Builder.CreateExtractValue(Src, 0, "mrv_gr");
Builder.CreateStore(EVI, GEP, isVolatile);
++SNO;
- Idxs[2] = ConstantInt::get(llvm::Type::Int32Ty, 1);
+ Idxs[2] = Context.getConstantInt(llvm::Type::Int32Ty, 1);
GEP = Builder.CreateGEP(Dest, Idxs, Idxs+3, "mrv_gep");
EVI = Builder.CreateExtractValue(Src, 1, "mrv_gr");
Builder.CreateStore(EVI, GEP, isVolatile);
Modified: llvm-gcc-4.2/trunk/gcc/config/rs6000/llvm-rs6000.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/config/rs6000/llvm-rs6000.cpp?rev=75704&r1=75703&r2=75704&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/config/rs6000/llvm-rs6000.cpp (original)
+++ llvm-gcc-4.2/trunk/gcc/config/rs6000/llvm-rs6000.cpp Tue Jul 14 18:10:12 2009
@@ -29,6 +29,7 @@
#include "llvm/DerivedTypes.h"
#include "llvm/Instructions.h"
#include "llvm/Intrinsics.h"
+#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
extern "C" {
@@ -43,6 +44,8 @@
#endif
}
+static LLVMContext &Context = getGlobalContext();
+
// MergeIntPtrOperand - This merges the int and pointer operands of a GCC
// intrinsic into a single operand for the LLVM intrinsic. For example, this
// turns LVX(4, p) -> llvm.lvx(gep P, 4). OPNUM specifies the operand number
@@ -53,7 +56,7 @@
const Type *ResultType,
std::vector<Value*> &Ops,
LLVMBuilder &Builder, Value *&Result) {
- const Type *VoidPtrTy = PointerType::getUnqual(Type::Int8Ty);
+ const Type *VoidPtrTy = Context.getPointerTypeUnqual(Type::Int8Ty);
Function *IntFn = Intrinsic::getDeclaration(TheModule, IID);
@@ -174,30 +177,30 @@
return true;
case ALTIVEC_BUILTIN_VSPLTISB:
if (Constant *Elt = dyn_cast<ConstantInt>(Ops[0])) {
- Elt = ConstantExpr::getIntegerCast(Elt, Type::Int8Ty, true);
+ Elt = Context.getConstantExprIntegerCast(Elt, Type::Int8Ty, true);
Result = BuildVector(Elt, Elt, Elt, Elt, Elt, Elt, Elt, Elt,
Elt, Elt, Elt, Elt, Elt, Elt, Elt, Elt, NULL);
} else {
error("%Helement must be an immediate", &EXPR_LOCATION(exp));
- Result = UndefValue::get(VectorType::get(Type::Int8Ty, 16));
+ Result = Context.getUndef(Context.getVectorType(Type::Int8Ty, 16));
}
return true;
case ALTIVEC_BUILTIN_VSPLTISH:
if (Constant *Elt = dyn_cast<ConstantInt>(Ops[0])) {
- Elt = ConstantExpr::getIntegerCast(Elt, Type::Int16Ty, true);
+ Elt = Context.getConstantExprIntegerCast(Elt, Type::Int16Ty, true);
Result = BuildVector(Elt, Elt, Elt, Elt, Elt, Elt, Elt, Elt, NULL);
} else {
error("%Helement must be an immediate", &EXPR_LOCATION(exp));
- Result = UndefValue::get(VectorType::get(Type::Int16Ty, 8));
+ Result = Context.getUndef(Context.getVectorType(Type::Int16Ty, 8));
}
return true;
case ALTIVEC_BUILTIN_VSPLTISW:
if (Constant *Elt = dyn_cast<ConstantInt>(Ops[0])) {
- Elt = ConstantExpr::getIntegerCast(Elt, Type::Int32Ty, true);
+ Elt = Context.getConstantExprIntegerCast(Elt, Type::Int32Ty, true);
Result = BuildVector(Elt, Elt, Elt, Elt, NULL);
} else {
error("%Hmask must be an immediate", &EXPR_LOCATION(exp));
- Result = UndefValue::get(VectorType::get(Type::Int32Ty, 4));
+ Result = Context.getUndef(Context.getVectorType(Type::Int32Ty, 4));
}
return true;
case ALTIVEC_BUILTIN_VSPLTB:
@@ -245,7 +248,7 @@
if (ConstantInt *Elt = dyn_cast<ConstantInt>(Ops[2])) {
/* Map all of these to a shuffle. */
unsigned Amt = Elt->getZExtValue() & 15;
- VectorType *v16i8 = VectorType::get(Type::Int8Ty, 16);
+ VectorType *v16i8 = Context.getVectorType(Type::Int8Ty, 16);
Ops[0] = Builder.CreateBitCast(Ops[0], v16i8, "tmp");
Ops[1] = Builder.CreateBitCast(Ops[1], v16i8, "tmp");
Result = BuildVectorShuffle(Ops[0], Ops[1],
@@ -295,10 +298,10 @@
return true;
case ALTIVEC_BUILTIN_ABS_V4SF: {
// and out sign bits
- VectorType *v4i32 = VectorType::get(Type::Int32Ty, 4);
+ VectorType *v4i32 = Context.getVectorType(Type::Int32Ty, 4);
Ops[0] = Builder.CreateBitCast(Ops[0], v4i32, "tmp");
- Constant *C = ConstantInt::get(Type::Int32Ty, 0x7FFFFFFF);
- C = ConstantVector::get(std::vector<Constant*>(4, C));
+ Constant *C = Context.getConstantInt(Type::Int32Ty, 0x7FFFFFFF);
+ C = Context.getConstantVector(std::vector<Constant*>(4, C));
Result = Builder.CreateAnd(Ops[0], C, "tmp");
Result = Builder.CreateBitCast(Result, ResultType, "tmp");
return true;
@@ -353,7 +356,7 @@
case ALTIVEC_BUILTIN_VPERM_8HI:
case ALTIVEC_BUILTIN_VPERM_16QI: {
// Operation is identical on all types; we have a single intrinsic.
- const Type *VecTy = VectorType::get(Type::Int32Ty, 4);
+ const Type *VecTy = Context.getVectorType(Type::Int32Ty, 4);
Value *Op0 = CastToType(Instruction::BitCast, Ops[0], VecTy);
Value *Op1 = CastToType(Instruction::BitCast, Ops[1], VecTy);
Value *ActualOps[] = { Op0, Op1, Ops[2]};
@@ -368,7 +371,7 @@
case ALTIVEC_BUILTIN_VSEL_8HI:
case ALTIVEC_BUILTIN_VSEL_16QI: {
// Operation is identical on all types; we have a single intrinsic.
- const Type *VecTy = VectorType::get(Type::Int32Ty, 4);
+ const Type *VecTy = Context.getVectorType(Type::Int32Ty, 4);
Value *Op0 = CastToType(Instruction::BitCast, Ops[0], VecTy);
Value *Op1 = CastToType(Instruction::BitCast, Ops[1], VecTy);
Value *Op2 = CastToType(Instruction::BitCast, Ops[2], VecTy);
Modified: llvm-gcc-4.2/trunk/gcc/llvm-backend.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/llvm-backend.cpp?rev=75704&r1=75703&r2=75704&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/llvm-backend.cpp (original)
+++ llvm-gcc-4.2/trunk/gcc/llvm-backend.cpp Tue Jul 14 18:10:12 2009
@@ -271,6 +271,8 @@
if (LLVMValues.empty())
return;
+ LLVMContext &Context = getGlobalContext();
+
std::vector<Constant *> ValuesForPCH;
for (std::vector<Value *>::iterator I = LLVMValues.begin(),
E = LLVMValues.end(); I != E; ++I) {
@@ -279,11 +281,11 @@
else
// Non constant values, e.g. arguments, are not at global scope.
// When PCH is read, only global scope values are used.
- ValuesForPCH.push_back(getGlobalContext().getNullValue(Type::Int32Ty));
+ ValuesForPCH.push_back(Context.getNullValue(Type::Int32Ty));
}
// Create string table.
- Constant *LLVMValuesTable = ConstantStruct::get(ValuesForPCH, false);
+ Constant *LLVMValuesTable = Context.getConstantStruct(ValuesForPCH, false);
// Create variable to hold this string table.
new GlobalVariable(*TheModule, LLVMValuesTable->getType(), true,
@@ -789,25 +791,26 @@
/// initializer suitable for the llvm.global_[cd]tors globals.
static void CreateStructorsList(std::vector<std::pair<Constant*, int> > &Tors,
const char *Name) {
+ LLVMContext &Context = getGlobalContext();
+
std::vector<Constant*> InitList;
std::vector<Constant*> StructInit;
StructInit.resize(2);
- const Type *FPTy = FunctionType::get(Type::VoidTy, std::vector<const Type*>(),
- false);
- FPTy = PointerType::getUnqual(FPTy);
+ const Type *FPTy =
+ Context.getFunctionType(Type::VoidTy, std::vector<const Type*>(), false);
+ FPTy = Context.getPointerTypeUnqual(FPTy);
for (unsigned i = 0, e = Tors.size(); i != e; ++i) {
- StructInit[0] = ConstantInt::get(Type::Int32Ty, Tors[i].second);
+ StructInit[0] = Context.getConstantInt(Type::Int32Ty, Tors[i].second);
// __attribute__(constructor) can be on a function with any type. Make sure
// the pointer is void()*.
StructInit[1] = TheFolder->CreateBitCast(Tors[i].first, FPTy);
- InitList.push_back(ConstantStruct::get(StructInit, false));
+ InitList.push_back(Context.getConstantStruct(StructInit, false));
}
- Constant *Array =
- ConstantArray::get(ArrayType::get(InitList[0]->getType(), InitList.size()),
- InitList);
+ Constant *Array = Context.getConstantArray(
+ Context.getArrayType(InitList[0]->getType(), InitList.size()), InitList);
new GlobalVariable(*TheModule, Array->getType(), false,
GlobalValue::AppendingLinkage,
Array, Name);
@@ -816,6 +819,7 @@
// llvm_asm_file_end - Finish the .s file.
void llvm_asm_file_end(void) {
timevar_push(TV_LLVM_PERFILE);
+ LLVMContext &Context = getGlobalContext();
performLateBackendInitialization();
createPerFunctionOptimizationPasses();
@@ -834,15 +838,15 @@
if (!AttributeUsedGlobals.empty()) {
std::vector<Constant *> AUGs;
- const Type *SBP= PointerType::getUnqual(Type::Int8Ty);
+ const Type *SBP= Context.getPointerTypeUnqual(Type::Int8Ty);
for (SmallSetVector<Constant *,32>::iterator AI = AttributeUsedGlobals.begin(),
AE = AttributeUsedGlobals.end(); AI != AE; ++AI) {
Constant *C = *AI;
AUGs.push_back(TheFolder->CreateBitCast(C, SBP));
}
- ArrayType *AT = ArrayType::get(SBP, AUGs.size());
- Constant *Init = ConstantArray::get(AT, AUGs);
+ ArrayType *AT = Context.getArrayType(SBP, AUGs.size());
+ Constant *Init = Context.getConstantArray(AT, AUGs);
GlobalValue *gv = new GlobalVariable(*TheModule, AT, false,
GlobalValue::AppendingLinkage, Init,
"llvm.used");
@@ -852,8 +856,8 @@
// Add llvm.global.annotations
if (!AttributeAnnotateGlobals.empty()) {
- Constant *Array =
- ConstantArray::get(ArrayType::get(AttributeAnnotateGlobals[0]->getType(),
+ Constant *Array = Context.getConstantArray(
+ Context.getArrayType(AttributeAnnotateGlobals[0]->getType(),
AttributeAnnotateGlobals.size()),
AttributeAnnotateGlobals);
GlobalValue *gv = new GlobalVariable(*TheModule, Array->getType(), false,
@@ -987,6 +991,8 @@
TREE_ASM_WRITTEN(decl) = 1;
return; // Do not process broken code.
}
+
+ LLVMContext &Context = getGlobalContext();
timevar_push(TV_LLVM_GLOBALS);
@@ -1055,7 +1061,7 @@
handleVisibility(decl, GA);
if (GA->getType()->canLosslesslyBitCastTo(V->getType()))
- V->replaceAllUsesWith(ConstantExpr::getBitCast(GA, V->getType()));
+ V->replaceAllUsesWith(Context.getConstantExprBitCast(GA, V->getType()));
else if (!V->use_empty()) {
error ("%J Alias %qD used with invalid type!", decl, decl);
timevar_pop(TV_LLVM_GLOBALS);
@@ -1082,7 +1088,7 @@
// Convert string to global value. Use existing global if possible.
Constant* ConvertMetadataStringToGV(const char *str) {
- Constant *Init = ConstantArray::get(std::string(str));
+ Constant *Init = getGlobalContext().getConstantArray(std::string(str));
// Use cached string if it exists.
static std::map<Constant*, GlobalVariable*> StringCSTCache;
@@ -1102,6 +1108,7 @@
/// AddAnnotateAttrsToGlobal - Adds decls that have a
/// annotate attribute to a vector to be emitted later.
void AddAnnotateAttrsToGlobal(GlobalValue *GV, tree decl) {
+ LLVMContext &Context = getGlobalContext();
// Handle annotate attribute on global.
tree annotateAttr = lookup_attribute("annotate", DECL_ATTRIBUTES (decl));
@@ -1109,9 +1116,10 @@
return;
// Get file and line number
- Constant *lineNo = ConstantInt::get(Type::Int32Ty, DECL_SOURCE_LINE(decl));
+ Constant *lineNo =
+ Context.getConstantInt(Type::Int32Ty, DECL_SOURCE_LINE(decl));
Constant *file = ConvertMetadataStringToGV(DECL_SOURCE_FILE(decl));
- const Type *SBP= PointerType::getUnqual(Type::Int8Ty);
+ const Type *SBP= Context.getPointerTypeUnqual(Type::Int8Ty);
file = TheFolder->CreateBitCast(file, SBP);
// There may be multiple annotate attributes. Pass return of lookup_attr
@@ -1139,7 +1147,8 @@
lineNo
};
- AttributeAnnotateGlobals.push_back(ConstantStruct::get(Element, 4, false));
+ AttributeAnnotateGlobals.push_back(
+ Context.getConstantStruct(Element, 4, false));
}
// Get next annotate attribute.
@@ -1178,6 +1187,8 @@
// been set. Don't crash.
// We can also get here when DECL_LLVM has not been set for some object
// referenced in the initializer. Don't crash then either.
+ LLVMContext &Context = getGlobalContext();
+
if (errorcount || sorrycount)
return;
@@ -1188,7 +1199,7 @@
handleVisibility(decl, GV);
// Temporary to avoid infinite recursion (see comments emit_global_to_llvm)
- GV->setInitializer(UndefValue::get(GV->getType()->getElementType()));
+ GV->setInitializer(Context.getUndef(GV->getType()->getElementType()));
// Convert the initializer over.
Constant *Init = TreeConstantToLLVM::Convert(DECL_INITIAL(decl));
@@ -1243,6 +1254,8 @@
if (!TYPE_SIZE(TREE_TYPE(decl)))
return;
+ LLVMContext &Context = getGlobalContext();
+
timevar_push(TV_LLVM_GLOBALS);
// Get or create the global variable now.
@@ -1266,7 +1279,7 @@
// on it". When constructing the initializer it might refer to itself.
// this can happen for things like void *G = &G;
//
- GV->setInitializer(UndefValue::get(GV->getType()->getElementType()));
+ GV->setInitializer(Context.getUndef(GV->getType()->getElementType()));
Init = TreeConstantToLLVM::Convert(DECL_INITIAL(decl));
}
@@ -1401,6 +1414,7 @@
/// well-formed. If not, emit error messages and return true. If so, return
/// false.
bool ValidateRegisterVariable(tree decl) {
+ LLVMContext &Context = getGlobalContext();
int RegNumber = decode_reg_name(extractRegisterName(decl));
const Type *Ty = ConvertType(TREE_TYPE(decl));
@@ -1431,10 +1445,10 @@
if (TREE_THIS_VOLATILE(decl))
warning(0, "volatile register variables don%'t work as you might wish");
- SET_DECL_LLVM(decl, ConstantInt::getFalse());
+ SET_DECL_LLVM(decl, Context.getConstantIntFalse());
return false; // Everything ok.
}
- SET_DECL_LLVM(decl, ConstantInt::getTrue());
+ SET_DECL_LLVM(decl, Context.getConstantIntTrue());
return true;
}
@@ -1461,6 +1475,8 @@
else if (TREE_CODE(decl) == TYPE_DECL || TREE_CODE(decl) == LABEL_DECL)
abort ();
#endif
+
+ LLVMContext &Context = getGlobalContext();
// For a duplicate declaration, we can be called twice on the
// same DECL node. Don't discard the LLVM already made.
@@ -1567,7 +1583,7 @@
// If we have "extern void foo", make the global have type {} instead of
// type void.
- if (Ty == Type::VoidTy) Ty = StructType::get(NULL, NULL);
+ if (Ty == Type::VoidTy) Ty = Context.getStructType(NULL, NULL);
if (Name[0] == 0) { // Global has no name.
GV = new GlobalVariable(*TheModule, Ty, false,
Modified: llvm-gcc-4.2/trunk/gcc/llvm-convert.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/llvm-convert.cpp?rev=75704&r1=75703&r2=75704&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/llvm-convert.cpp (original)
+++ llvm-gcc-4.2/trunk/gcc/llvm-convert.cpp Tue Jul 14 18:10:12 2009
@@ -72,6 +72,8 @@
extern enum machine_mode reg_raw_mode[FIRST_PSEUDO_REGISTER];
}
+static LLVMContext &Context = getGlobalContext();
+
// Check for GCC bug 17347: C++ FE sometimes creates bogus ctor trees
// which we should throw out
#define BOGUS_CTOR(exp) \
@@ -152,7 +154,7 @@
}
TreeToLLVM::TreeToLLVM(tree fndecl) :
- TD(getTargetData()), Builder(getGlobalContext(), *TheFolder) {
+ TD(getTargetData()), Builder(Context, *TheFolder) {
FnDecl = fndecl;
Fn = 0;
ReturnBB = UnwindBB = 0;
@@ -216,7 +218,7 @@
assert(!BYTES_BIG_ENDIAN && "Unsupported case - please report");
// Do byte wise store because actual argument type does not match LLVMTy.
assert(isa<IntegerType>(ArgVal->getType()) && "Expected an integer value!");
- const Type *StoreType = IntegerType::get(RealSize * 8);
+ const Type *StoreType = Context.getIntegerType(RealSize * 8);
Loc = Builder.CreateBitCast(Loc, StoreType->getPointerTo());
if (ArgVal->getType()->getPrimitiveSizeInBits() >=
StoreType->getPrimitiveSizeInBits())
@@ -226,7 +228,7 @@
Builder.CreateStore(ArgVal, Loc);
} else {
// This cast only involves pointers, therefore BitCast.
- Loc = Builder.CreateBitCast(Loc, PointerType::getUnqual(LLVMTy));
+ Loc = Builder.CreateBitCast(Loc, Context.getPointerTypeUnqual(LLVMTy));
Builder.CreateStore(ArgVal, Loc);
}
}
@@ -366,7 +368,7 @@
Value *Loc = LocStack.back();
// This cast only involves pointers, therefore BitCast.
- Loc = Builder.CreateBitCast(Loc, PointerType::getUnqual(StructTy));
+ Loc = Builder.CreateBitCast(Loc, Context.getPointerTypeUnqual(StructTy));
Loc = Builder.CreateStructGEP(Loc, FieldNo);
LocStack.push_back(Loc);
@@ -645,12 +647,12 @@
} else {
Value *RetVal = DECL_LLVM(DECL_RESULT(FnDecl));
if (const StructType *STy = dyn_cast<StructType>(Fn->getReturnType())) {
- Value *R1 = BitCastToType(RetVal, PointerType::getUnqual(STy));
+ Value *R1 = BitCastToType(RetVal, Context.getPointerTypeUnqual(STy));
llvm::Value *Idxs[2];
- Idxs[0] = ConstantInt::get(llvm::Type::Int32Ty, 0);
+ Idxs[0] = Context.getConstantInt(llvm::Type::Int32Ty, 0);
for (unsigned ri = 0; ri < STy->getNumElements(); ++ri) {
- Idxs[1] = ConstantInt::get(llvm::Type::Int32Ty, ri);
+ Idxs[1] = Context.getConstantInt(llvm::Type::Int32Ty, ri);
Value *GEP = Builder.CreateGEP(R1, Idxs, Idxs+2, "mrv_gep");
Value *E = Builder.CreateLoad(GEP, "mrv");
RetVals.push_back(E);
@@ -662,12 +664,13 @@
// pointer and loading. The load does not necessarily start at the
// beginning of the aggregate (x86-64).
if (ReturnOffset) {
- RetVal = BitCastToType(RetVal, PointerType::getUnqual(Type::Int8Ty));
+ RetVal = BitCastToType(RetVal,
+ Context.getPointerTypeUnqual(Type::Int8Ty));
RetVal = Builder.CreateGEP(RetVal,
- ConstantInt::get(TD.getIntPtrType(), ReturnOffset));
+ Context.getConstantInt(TD.getIntPtrType(), ReturnOffset));
}
RetVal = BitCastToType(RetVal,
- PointerType::getUnqual(Fn->getReturnType()));
+ Context.getPointerTypeUnqual(Fn->getReturnType()));
RetVal = Builder.CreateLoad(RetVal, "retval");
RetVals.push_back(RetVal);
}
@@ -1146,13 +1149,13 @@
// it is dead. This allows us to insert allocas in order without having to
// scan for an insertion point. Use BitCast for int -> int
AllocaInsertionPoint = CastInst::Create(Instruction::BitCast,
- getGlobalContext().getNullValue(Type::Int32Ty),
+ Context.getNullValue(Type::Int32Ty),
Type::Int32Ty, "alloca point");
// Insert it as the first instruction in the entry block.
Fn->begin()->getInstList().insert(Fn->begin()->begin(),
AllocaInsertionPoint);
}
- return new AllocaInst(Ty, 0, "memtmp", AllocaInsertionPoint);
+ return new AllocaInst(Context, Ty, 0, "memtmp", AllocaInsertionPoint);
}
/// CreateTempLoc - Like CreateTemporary, but returns a MemRef.
@@ -1295,8 +1298,10 @@
!TheTypeConverter->GCCTypeOverlapsWithLLVMTypePadding(type, LLVMTy) &&
// Don't copy tons of tiny elements.
CountAggregateElements(LLVMTy) <= 8) {
- DestLoc.Ptr = BitCastToType(DestLoc.Ptr, PointerType::getUnqual(LLVMTy));
- SrcLoc.Ptr = BitCastToType(SrcLoc.Ptr, PointerType::getUnqual(LLVMTy));
+ DestLoc.Ptr = BitCastToType(DestLoc.Ptr,
+ Context.getPointerTypeUnqual(LLVMTy));
+ SrcLoc.Ptr = BitCastToType(SrcLoc.Ptr,
+ Context.getPointerTypeUnqual(LLVMTy));
CopyAggregate(DestLoc, SrcLoc, Builder, type);
return;
}
@@ -1313,7 +1318,7 @@
const Type *ElTy =
cast<PointerType>(DestLoc.Ptr->getType())->getElementType();
if (ElTy->isSingleValueType()) {
- StoreInst *St = Builder.CreateStore(getGlobalContext().getNullValue(ElTy),
+ StoreInst *St = Builder.CreateStore(Context.getNullValue(ElTy),
DestLoc.Ptr, DestLoc.Volatile);
St->setAlignment(DestLoc.getAlignment());
} else if (const StructType *STy = dyn_cast<StructType>(ElTy)) {
@@ -1348,25 +1353,26 @@
if (!TheTypeConverter->GCCTypeOverlapsWithLLVMTypePadding(type, LLVMTy) &&
// Don't zero tons of tiny elements.
CountAggregateElements(LLVMTy) <= 8) {
- DestLoc.Ptr = BitCastToType(DestLoc.Ptr, PointerType::getUnqual(LLVMTy));
+ DestLoc.Ptr = BitCastToType(DestLoc.Ptr,
+ Context.getPointerTypeUnqual(LLVMTy));
ZeroAggregate(DestLoc, Builder);
return;
}
}
- EmitMemSet(DestLoc.Ptr, ConstantInt::get(Type::Int8Ty, 0),
+ EmitMemSet(DestLoc.Ptr, Context.getConstantInt(Type::Int8Ty, 0),
Emit(TYPE_SIZE_UNIT(type), 0), DestLoc.getAlignment());
}
Value *TreeToLLVM::EmitMemCpy(Value *DestPtr, Value *SrcPtr, Value *Size,
unsigned Align) {
- const Type *SBP = PointerType::getUnqual(Type::Int8Ty);
+ const Type *SBP = Context.getPointerTypeUnqual(Type::Int8Ty);
const Type *IntPtr = TD.getIntPtrType();
Value *Ops[4] = {
BitCastToType(DestPtr, SBP),
BitCastToType(SrcPtr, SBP),
CastToSIntType(Size, IntPtr),
- ConstantInt::get(Type::Int32Ty, Align)
+ Context.getConstantInt(Type::Int32Ty, Align)
};
Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::memcpy,
@@ -1376,13 +1382,13 @@
Value *TreeToLLVM::EmitMemMove(Value *DestPtr, Value *SrcPtr, Value *Size,
unsigned Align) {
- const Type *SBP = PointerType::getUnqual(Type::Int8Ty);
+ const Type *SBP = Context.getPointerTypeUnqual(Type::Int8Ty);
const Type *IntPtr = TD.getIntPtrType();
Value *Ops[4] = {
BitCastToType(DestPtr, SBP),
BitCastToType(SrcPtr, SBP),
CastToSIntType(Size, IntPtr),
- ConstantInt::get(Type::Int32Ty, Align)
+ Context.getConstantInt(Type::Int32Ty, Align)
};
Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::memmove,
@@ -1392,13 +1398,13 @@
Value *TreeToLLVM::EmitMemSet(Value *DestPtr, Value *SrcVal, Value *Size,
unsigned Align) {
- const Type *SBP = PointerType::getUnqual(Type::Int8Ty);
+ const Type *SBP = Context.getPointerTypeUnqual(Type::Int8Ty);
const Type *IntPtr = TD.getIntPtrType();
Value *Ops[4] = {
BitCastToType(DestPtr, SBP),
CastToSIntType(SrcVal, Type::Int8Ty),
CastToSIntType(Size, IntPtr),
- ConstantInt::get(Type::Int32Ty, Align)
+ Context.getConstantInt(Type::Int32Ty, Align)
};
Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::memset,
@@ -1417,12 +1423,12 @@
// The idea is that it's a pointer to type "Value"
// which is opaque* but the routine expects i8** and i8*.
- const PointerType *Ty = PointerType::getUnqual(Type::Int8Ty);
- V = Builder.CreateBitCast(V, PointerType::getUnqual(Ty));
+ const PointerType *Ty = Context.getPointerTypeUnqual(Type::Int8Ty);
+ V = Builder.CreateBitCast(V, Context.getPointerTypeUnqual(Ty));
Value *Ops[2] = {
V,
- ConstantPointerNull::get(Ty)
+ Context.getConstantPointerNull(Ty)
};
Builder.CreateCall(gcrootFun, Ops, Ops+2);
@@ -1441,9 +1447,10 @@
Intrinsic::var_annotation);
// Get file and line number
- Constant *lineNo = ConstantInt::get(Type::Int32Ty, DECL_SOURCE_LINE(decl));
+ Constant *lineNo =
+ Context.getConstantInt(Type::Int32Ty, DECL_SOURCE_LINE(decl));
Constant *file = ConvertMetadataStringToGV(DECL_SOURCE_FILE(decl));
- const Type *SBP= PointerType::getUnqual(Type::Int8Ty);
+ const Type *SBP= Context.getPointerTypeUnqual(Type::Int8Ty);
file = Builder.getFolder().CreateBitCast(file, SBP);
// There may be multiple annotate attributes. Pass return of lookup_attr
@@ -1463,7 +1470,7 @@
// Assert its a string, and then get that string.
assert(TREE_CODE(val) == STRING_CST &&
"Annotate attribute arg should always be a string");
- const Type *SBP = PointerType::getUnqual(Type::Int8Ty);
+ const Type *SBP = Context.getPointerTypeUnqual(Type::Int8Ty);
Constant *strGV = TreeConstantToLLVM::EmitLV_STRING_CST(val);
Value *Ops[4] = {
BitCastToType(V, SBP),
@@ -1602,7 +1609,7 @@
// before initialization doesn't get garbage results to follow.
const Type *T = cast<PointerType>(AI->getType())->getElementType();
EmitTypeGcroot(AI, decl);
- Builder.CreateStore(getGlobalContext().getNullValue(T), AI);
+ Builder.CreateStore(Context.getNullValue(T), AI);
}
if (TheDebugInfo) {
@@ -1631,7 +1638,7 @@
// Assign the new ID, update AddressTakenBBNumbers to remember it.
uint64_t BlockNo = ++NumAddressTakenBlocks;
BlockNo &= ~0ULL >> (64-TD.getPointerSizeInBits());
- Val = ConstantInt::get(TD.getIntPtrType(), BlockNo);
+ Val = Context.getConstantInt(TD.getIntPtrType(), BlockNo);
// Add it to the switch statement in the indirect goto block.
cast<SwitchInst>(getIndirectGotoBlock()->getTerminator())->addCase(Val, BB);
@@ -1837,12 +1844,12 @@
SI->addCase(LowC, Dest);
if (LowC == HighC) break; // Emitted the last one.
CurrentValue++;
- LowC = ConstantInt::get(CurrentValue);
+ LowC = Context.getConstantInt(CurrentValue);
}
} else {
// The range is too big to add to the switch - emit an "if".
Value *Diff = Builder.CreateSub(SwitchExp, LowC);
- Value *Cond = Builder.CreateICmpULE(Diff, ConstantInt::get(Range));
+ Value *Cond = Builder.CreateICmpULE(Diff, Context.getConstantInt(Range));
BasicBlock *False_Block = BasicBlock::Create("case_false");
Builder.CreateCondBr(Cond, Dest, False_Block);
EmitBlock(False_Block);
@@ -1870,7 +1877,7 @@
const Type *IntPtr = TD.getIntPtrType();
- ExceptionValue = CreateTemporary(PointerType::getUnqual(Type::Int8Ty));
+ ExceptionValue = CreateTemporary(Context.getPointerTypeUnqual(Type::Int8Ty));
ExceptionValue->setName("eh_exception");
ExceptionSelectorValue = CreateTemporary(IntPtr);
@@ -1932,7 +1939,7 @@
assert(llvm_eh_personality_libfunc
&& "no exception handling personality function!");
Args.push_back(BitCastToType(DECL_LLVM(llvm_eh_personality_libfunc),
- PointerType::getUnqual(Type::Int8Ty)));
+ Context.getPointerTypeUnqual(Type::Int8Ty)));
// Add selections for each handler.
foreach_reachable_handler(i, false, AddHandler, &Handlers);
@@ -1950,7 +1957,7 @@
tree TypeList = get_eh_type_list(region);
unsigned Length = list_length(TypeList);
Args.reserve(Args.size() + Length + 1);
- Args.push_back(ConstantInt::get(Type::Int32Ty, Length + 1));
+ Args.push_back(Context.getConstantInt(Type::Int32Ty, Length + 1));
// Add the type infos.
for (; TypeList; TypeList = TREE_CHAIN(TypeList)) {
@@ -1964,7 +1971,7 @@
if (!TypeList) {
// Catch-all - push a null pointer.
Args.push_back(
- getGlobalContext().getNullValue(PointerType::getUnqual(Type::Int8Ty))
+ Context.getNullValue(Context.getPointerTypeUnqual(Type::Int8Ty))
);
} else {
// Add the type infos.
@@ -1985,13 +1992,13 @@
Value *Catch_All;
if (!lang_eh_catch_all) {
// Use a "cleanup" - this should be good enough for most languages.
- Catch_All = ConstantInt::get(Type::Int32Ty, 0);
+ Catch_All = Context.getConstantInt(Type::Int32Ty, 0);
} else {
tree catch_all_type = lang_eh_catch_all();
if (catch_all_type == NULL_TREE)
// Use a C++ style null catch-all object.
- Catch_All = getGlobalContext().getNullValue(
- PointerType::getUnqual(Type::Int8Ty));
+ Catch_All = Context.getNullValue(
+ Context.getPointerTypeUnqual(Type::Int8Ty));
else
// This language has a type that catches all others.
Catch_All = Emit(catch_all_type, 0);
@@ -2041,7 +2048,7 @@
Value *Select = Builder.CreateLoad(ExceptionSelectorValue);
// Compare with the filter action value.
- Value *Zero = ConstantInt::get(Select->getType(), 0);
+ Value *Zero = Context.getConstantInt(Select->getType(), 0);
Value *Compare = Builder.CreateICmpSLT(Select, Zero);
// Branch on the compare.
@@ -2055,7 +2062,8 @@
Value *Cond = NULL;
for (; TypeList; TypeList = TREE_CHAIN (TypeList)) {
Value *TType = Emit(lookup_type_for_runtime(TREE_VALUE(TypeList)), 0);
- TType = BitCastToType(TType, PointerType::getUnqual(Type::Int8Ty));
+ TType = BitCastToType(TType,
+ Context.getPointerTypeUnqual(Type::Int8Ty));
// Call get eh type id.
Value *TypeID = Builder.CreateCall(FuncEHGetTypeID, TType, "eh_typeid");
@@ -2182,7 +2190,7 @@
if (!LV.isBitfield()) {
if (!DestLoc) {
// Scalar value: emit a load.
- Value *Ptr = BitCastToType(LV.Ptr, PointerType::getUnqual(Ty));
+ Value *Ptr = BitCastToType(LV.Ptr, Context.getPointerTypeUnqual(Ty));
LoadInst *LI = Builder.CreateLoad(Ptr, isVolatile);
LI->setAlignment(Alignment);
return LI;
@@ -2194,7 +2202,7 @@
} else {
// This is a bitfield reference.
if (!LV.BitSize)
- return getGlobalContext().getNullValue(Ty);
+ return Context.getNullValue(Ty);
const Type *ValTy = cast<PointerType>(LV.Ptr->getType())->getElementType();
unsigned ValSizeInBits = ValTy->getPrimitiveSizeInBits();
@@ -2219,7 +2227,8 @@
ThisLastBitPlusOne = LV.BitStart+LV.BitSize;
Value *Ptr = Index ?
- Builder.CreateGEP(LV.Ptr, ConstantInt::get(Type::Int32Ty, Index)) :
+ Builder.CreateGEP(LV.Ptr,
+ Context.getConstantInt(Type::Int32Ty, Index)) :
LV.Ptr;
LoadInst *LI = Builder.CreateLoad(Ptr, isVolatile);
LI->setAlignment(Alignment);
@@ -2236,7 +2245,7 @@
// expression.
if (FirstBitInVal+BitsInVal != ValSizeInBits) {
- Value *ShAmt = ConstantInt::get(ValTy, ValSizeInBits -
+ Value *ShAmt = Context.getConstantInt(ValTy, ValSizeInBits -
(FirstBitInVal+BitsInVal));
Val = Builder.CreateShl(Val, ShAmt);
}
@@ -2244,13 +2253,13 @@
// Shift right required?
if (ValSizeInBits != BitsInVal) {
bool AddSignBits = !TYPE_UNSIGNED(TREE_TYPE(exp)) && !Result;
- Value *ShAmt = ConstantInt::get(ValTy, ValSizeInBits-BitsInVal);
+ Value *ShAmt = Context.getConstantInt(ValTy, ValSizeInBits-BitsInVal);
Val = AddSignBits ?
Builder.CreateAShr(Val, ShAmt) : Builder.CreateLShr(Val, ShAmt);
}
if (Result) {
- Value *ShAmt = ConstantInt::get(ValTy, BitsInVal);
+ Value *ShAmt = Context.getConstantInt(ValTy, BitsInVal);
Result = Builder.CreateShl(Result, ShAmt);
Result = Builder.CreateOr(Result, Val);
} else {
@@ -2310,7 +2319,7 @@
// If this is a direct call to a function using a static chain then we need
// to ensure the function type is the one just calculated: it has an extra
// parameter for the chain.
- Callee = BitCastToType(Callee, PointerType::getUnqual(Ty));
+ Callee = BitCastToType(Callee, Context.getPointerTypeUnqual(Ty));
// EmitCall(exp, DestLoc);
Value *Result = EmitCallOf(Callee, exp, DestLoc, PAL);
@@ -2332,12 +2341,12 @@
unsigned RealSize,
LLVMBuilder &Builder) {
if (!RealSize)
- return UndefValue::get(LLVMTy);
+ return Context.getUndef(LLVMTy);
// Not clear what this is supposed to do on big endian machines...
assert(!BYTES_BIG_ENDIAN && "Unsupported case - please report");
assert(isa<IntegerType>(LLVMTy) && "Expected an integer value!");
- const Type *LoadType = IntegerType::get(RealSize * 8);
+ const Type *LoadType = Context.getIntegerType(RealSize * 8);
L = Builder.CreateBitCast(L, LoadType->getPointerTo());
Value *Val = Builder.CreateLoad(L);
if (LoadType->getPrimitiveSizeInBits() >= LLVMTy->getPrimitiveSizeInBits())
@@ -2410,7 +2419,7 @@
Value *Loc = LocStack.back();
if (Loc) {
// An address. Convert to the right type and load the value out.
- Loc = Builder.CreateBitCast(Loc, PointerType::getUnqual(Ty));
+ Loc = Builder.CreateBitCast(Loc, Context.getPointerTypeUnqual(Ty));
return Builder.CreateLoad(Loc, "val");
} else {
// A value - just return it.
@@ -2553,7 +2562,7 @@
/// reference with an additional parameter attribute "ByVal".
void HandleByValArgument(const llvm::Type *LLVMTy, tree type) {
Value *Loc = getAddress();
- assert(PointerType::getUnqual(LLVMTy) == Loc->getType());
+ assert(Context.getPointerTypeUnqual(LLVMTy) == Loc->getType());
CallOperands.push_back(Loc);
}
@@ -2561,7 +2570,7 @@
/// argument is passed as a first class aggregate.
void HandleFCAArgument(const llvm::Type *LLVMTy, tree type) {
Value *Loc = getAddress();
- assert(PointerType::getUnqual(LLVMTy) == Loc->getType());
+ assert(Context.getPointerTypeUnqual(LLVMTy) == Loc->getType());
CallOperands.push_back(Builder.CreateLoad(Loc));
}
@@ -2570,7 +2579,7 @@
/// LLVM Struct, StructTy is the LLVM type of the struct we are entering.
void EnterField(unsigned FieldNo, const llvm::Type *StructTy) {
Value *Loc = getAddress();
- Loc = Builder.CreateBitCast(Loc, PointerType::getUnqual(StructTy));
+ Loc = Builder.CreateBitCast(Loc, Context.getPointerTypeUnqual(StructTy));
pushAddress(Builder.CreateStructGEP(Loc, FieldNo, "elt"));
}
void ExitField() {
@@ -2731,7 +2740,7 @@
if (Client.isAggrReturn()) {
Value *Dest = BitCastToType(DestLoc->Ptr,
- PointerType::getUnqual(Call->getType()));
+ Context.getPointerTypeUnqual(Call->getType()));
LLVM_EXTRACT_MULTIPLE_RETURN_VALUE(Call,Dest,DestLoc->Volatile,Builder);
return 0;
}
@@ -2747,11 +2756,11 @@
Value *Ptr = DestLoc->Ptr;
if (Client.Offset) {
- Ptr = BitCastToType(Ptr, PointerType::getUnqual(Type::Int8Ty));
+ Ptr = BitCastToType(Ptr, Context.getPointerTypeUnqual(Type::Int8Ty));
Ptr = Builder.CreateGEP(Ptr,
- ConstantInt::get(TD.getIntPtrType(), Client.Offset));
+ Context.getConstantInt(TD.getIntPtrType(), Client.Offset));
}
- Ptr = BitCastToType(Ptr, PointerType::getUnqual(Call->getType()));
+ Ptr = BitCastToType(Ptr, Context.getPointerTypeUnqual(Call->getType()));
StoreInst *St = Builder.CreateStore(Call, Ptr, DestLoc->Volatile);
St->setAlignment(DestLoc->getAlignment());
return 0;
@@ -2882,7 +2891,8 @@
if (PT->getElementType()->canLosslesslyBitCastTo(RHS->getType()))
RHS = CastToAnyType(RHS, RHSSigned, PT->getElementType(), LHSSigned);
else
- LV.Ptr = BitCastToType(LV.Ptr, PointerType::getUnqual(RHS->getType()));
+ LV.Ptr = BitCastToType(LV.Ptr,
+ Context.getPointerTypeUnqual(RHS->getType()));
StoreInst *SI = Builder.CreateStore(RHS, LV.Ptr, isVolatile);
SI->setAlignment(Alignment);
return RHS;
@@ -2929,7 +2939,7 @@
ThisLastBitPlusOne = LV.BitStart+LV.BitSize;
Value *Ptr = Index ?
- Builder.CreateGEP(LV.Ptr, ConstantInt::get(Type::Int32Ty, Index)) :
+ Builder.CreateGEP(LV.Ptr, Context.getConstantInt(Type::Int32Ty, Index)) :
LV.Ptr;
LoadInst *LI = Builder.CreateLoad(Ptr, isVolatile);
LI->setAlignment(Alignment);
@@ -2944,14 +2954,14 @@
// If not storing into the zero'th bit, shift the Src value to the left.
if (FirstBitInVal) {
- Value *ShAmt = ConstantInt::get(ValTy, FirstBitInVal);
+ Value *ShAmt = Context.getConstantInt(ValTy, FirstBitInVal);
NewVal = Builder.CreateShl(NewVal, ShAmt);
}
// Next, if this doesn't touch the top bit, mask out any bits that shouldn't
// be set in the result.
uint64_t MaskVal = ((1ULL << BitsInVal)-1) << FirstBitInVal;
- Constant *Mask = ConstantInt::get(Type::Int64Ty, MaskVal);
+ Constant *Mask = Context.getConstantInt(Type::Int64Ty, MaskVal);
Mask = Builder.getFolder().CreateTruncOrBitCast(Mask, ValTy);
if (FirstBitInVal+BitsInVal != ValSizeInBits)
@@ -2968,7 +2978,7 @@
SI->setAlignment(Alignment);
if (I + 1 < Strides) {
- Value *ShAmt = ConstantInt::get(ValTy, BitsInVal);
+ Value *ShAmt = Context.getConstantInt(ValTy, BitsInVal);
BitSource = Builder.CreateLShr(BitSource, ShAmt);
}
}
@@ -2994,7 +3004,7 @@
} else if (isAggregateTreeType(TREE_TYPE(Op))) {
// Aggregate to aggregate copy.
MemRef NewLoc = *DestLoc;
- NewLoc.Ptr = BitCastToType(DestLoc->Ptr, PointerType::getUnqual(Ty));
+ NewLoc.Ptr = BitCastToType(DestLoc->Ptr, Context.getPointerTypeUnqual(Ty));
Value *OpVal = Emit(Op, &NewLoc);
assert(OpVal == 0 && "Shouldn't cast scalar to aggregate!");
return 0;
@@ -3003,7 +3013,7 @@
// Scalar to aggregate copy.
Value *OpVal = Emit(Op, 0);
Value *Ptr = BitCastToType(DestLoc->Ptr,
- PointerType::getUnqual(OpVal->getType()));
+ Context.getPointerTypeUnqual(OpVal->getType()));
StoreInst *St = Builder.CreateStore(OpVal, Ptr, DestLoc->Volatile);
St->setAlignment(DestLoc->getAlignment());
return 0;
@@ -3031,7 +3041,7 @@
// Make the destination look like the source type.
const Type *OpTy = ConvertType(TREE_TYPE(Op));
- Target.Ptr = BitCastToType(Target.Ptr, PointerType::getUnqual(OpTy));
+ Target.Ptr = BitCastToType(Target.Ptr, Context.getPointerTypeUnqual(OpTy));
// Needs to be in sync with EmitLV.
switch (TREE_CODE(Op)) {
@@ -3071,7 +3081,7 @@
// Target holds the temporary created above.
const Type *ExpTy = ConvertType(TREE_TYPE(exp));
return Builder.CreateLoad(BitCastToType(Target.Ptr,
- PointerType::getUnqual(ExpTy)));
+ Context.getPointerTypeUnqual(ExpTy)));
}
if (DestLoc) {
@@ -3080,7 +3090,7 @@
Value *OpVal = Emit(Op, 0);
assert(OpVal && "Expected a scalar result!");
Value *Ptr = BitCastToType(DestLoc->Ptr,
- PointerType::getUnqual(OpVal->getType()));
+ Context.getPointerTypeUnqual(OpVal->getType()));
StoreInst *St = Builder.CreateStore(OpVal, Ptr, DestLoc->Volatile);
St->setAlignment(DestLoc->getAlignment());
return 0;
@@ -3169,7 +3179,7 @@
ICmpInst::Predicate pred = TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp, 0))) ?
ICmpInst::ICMP_UGE : ICmpInst::ICMP_SGE;
Value *Cmp = Builder.CreateICmp(pred, Op,
- getGlobalContext().getNullValue(Op->getType()), "abscond");
+ Context.getNullValue(Op->getType()), "abscond");
return Builder.CreateSelect(Cmp, Op, OpN, "abs");
}
@@ -3200,10 +3210,10 @@
if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
unsigned NumElements = VTy->getNumElements();
const Type *EltTy = VTy->getElementType();
- return VectorType::get(IntegerType::get(EltTy->getPrimitiveSizeInBits()),
- NumElements);
+ return Context.getVectorType(
+ Context.getIntegerType(EltTy->getPrimitiveSizeInBits()), NumElements);
}
- return IntegerType::get(Ty->getPrimitiveSizeInBits());
+ return Context.getIntegerType(Ty->getPrimitiveSizeInBits());
}
Value *TreeToLLVM::EmitBIT_NOT_EXPR(tree exp) {
@@ -3226,7 +3236,7 @@
Value *V = Emit(TREE_OPERAND(exp, 0), 0);
if (V->getType() != Type::Int1Ty)
V = Builder.CreateICmpNE(V,
- getGlobalContext().getNullValue(V->getType()), "toBool");
+ Context.getNullValue(V->getType()), "toBool");
V = Builder.CreateNot(V, (V->getName()+"not").c_str());
return CastToUIntType(V, ConvertType(TREE_TYPE(exp)));
}
@@ -3356,7 +3366,7 @@
// If this is a subtract, we want to step backwards.
if (Opc == Instruction::Sub)
EltOffset = -EltOffset;
- Constant *C = ConstantInt::get(Type::Int64Ty, EltOffset);
+ Constant *C = Context.getConstantInt(Type::Int64Ty, EltOffset);
Value *V = Builder.CreateGEP(LHS, C);
return BitCastToType(V, ConvertType(TREE_TYPE(exp)));
}
@@ -3385,10 +3395,10 @@
// This is a truth operation like the strict &&,||,^^. Convert to bool as
// a test against zero
LHS = Builder.CreateICmpNE(LHS,
- getGlobalContext().getNullValue(LHS->getType()),
+ Context.getNullValue(LHS->getType()),
"toBool");
RHS = Builder.CreateICmpNE(RHS,
- getGlobalContext().getNullValue(RHS->getType()),
+ Context.getNullValue(RHS->getType()),
"toBool");
Value *Res = Builder.CreateBinOp((Instruction::BinaryOps)Opc, LHS, RHS);
@@ -3418,7 +3428,8 @@
(Amt->getName()+".cast").c_str());
Value *TypeSize =
- ConstantInt::get(In->getType(), In->getType()->getPrimitiveSizeInBits());
+ Context.getConstantInt(In->getType(),
+ In->getType()->getPrimitiveSizeInBits());
// Do the two shifts.
Value *V1 = Builder.CreateBinOp((Instruction::BinaryOps)Opc1, In, Amt);
@@ -3476,7 +3487,7 @@
if (isPowerOf2_64(IntValue)) {
// Create an ashr instruction, by the log of the division amount.
Value *LHS = Emit(TREE_OPERAND(exp, 0), 0);
- return Builder.CreateAShr(LHS, ConstantInt::get(LHS->getType(),
+ return Builder.CreateAShr(LHS, Context.getConstantInt(LHS->getType(),
Log2_64(IntValue)));
}
}
@@ -3497,7 +3508,7 @@
return EmitBinOp(exp, DestLoc, Instruction::URem);
const Type *Ty = ConvertType(TREE_TYPE(exp));
- Constant *Zero = ConstantInt::get(Ty, 0);
+ Constant *Zero = Context.getConstantInt(Ty, 0);
Value *LHS = Emit(TREE_OPERAND(exp, 0), 0);
Value *RHS = Emit(TREE_OPERAND(exp, 1), 0);
@@ -3528,9 +3539,9 @@
// otherwise.
const Type *Ty = ConvertType(TREE_TYPE(exp));
- Constant *Zero = ConstantInt::get(Ty, 0);
- Constant *One = ConstantInt::get(Ty, 1);
- Constant *MinusOne = getGlobalContext().getAllOnesValue(Ty);
+ Constant *Zero = Context.getConstantInt(Ty, 0);
+ Constant *One = Context.getConstantInt(Ty, 1);
+ Constant *MinusOne = Context.getAllOnesValue(Ty);
Value *LHS = Emit(TREE_OPERAND(exp, 0), 0);
Value *RHS = Emit(TREE_OPERAND(exp, 1), 0);
@@ -3600,9 +3611,9 @@
return Builder.CreateUDiv(LHS, RHS, "fdiv");
const Type *Ty = ConvertType(TREE_TYPE(exp));
- Constant *Zero = ConstantInt::get(Ty, 0);
- Constant *One = ConstantInt::get(Ty, 1);
- Constant *MinusOne = getGlobalContext().getAllOnesValue(Ty);
+ Constant *Zero = Context.getConstantInt(Ty, 0);
+ Constant *One = Context.getConstantInt(Ty, 1);
+ Constant *MinusOne = Context.getAllOnesValue(Ty);
// In the case of signed arithmetic, we calculate FDiv as follows:
// LHS FDiv RHS = (LHS + Sign(RHS) * Offset) Div RHS - Offset,
@@ -3646,8 +3657,8 @@
// we are doing signed or unsigned arithmetic.
const Type *Ty = ConvertType(TREE_TYPE(exp));
- Constant *Zero = ConstantInt::get(Ty, 0);
- Constant *Two = ConstantInt::get(Ty, 2);
+ Constant *Zero = Context.getConstantInt(Ty, 0);
+ Constant *Two = Context.getConstantInt(Ty, 2);
Value *LHS = Emit(TREE_OPERAND(exp, 0), 0);
Value *RHS = Emit(TREE_OPERAND(exp, 1), 0);
@@ -3777,12 +3788,13 @@
// If there was an error, return something bogus.
if (ValidateRegisterVariable(decl)) {
if (Ty->isSingleValueType())
- return UndefValue::get(Ty);
+ return Context.getUndef(Ty);
return 0; // Just don't copy something into DestLoc.
}
// Turn this into a 'tmp = call Ty asm "", "={reg}"()'.
- FunctionType *FTy = FunctionType::get(Ty, std::vector<const Type*>(),false);
+ FunctionType *FTy =
+ Context.getFunctionType(Ty, std::vector<const Type*>(),false);
const char *Name = extractRegisterName(decl);
InlineAsm *IA = InlineAsm::get(FTy, "", "={"+std::string(Name)+"}", false);
@@ -3801,7 +3813,7 @@
// Turn this into a 'call void asm sideeffect "", "{reg}"(Ty %RHS)'.
std::vector<const Type*> ArgTys;
ArgTys.push_back(ConvertType(TREE_TYPE(decl)));
- FunctionType *FTy = FunctionType::get(Type::VoidTy, ArgTys, false);
+ FunctionType *FTy = Context.getFunctionType(Type::VoidTy, ArgTys, false);
const char *Name = extractRegisterName(decl);
InlineAsm *IA = InlineAsm::get(FTy, "", "{"+std::string(Name)+"}", true);
@@ -4371,9 +4383,9 @@
uint64_t TySize = TD.getTypeSizeInBits(LLVMTy);
if (TySize == 1 || TySize == 8 || TySize == 16 ||
TySize == 32 || TySize == 64) {
- LLVMTy = IntegerType::get(TySize);
+ LLVMTy = Context.getIntegerType(TySize);
Op = Builder.CreateLoad(BitCastToType(LV.Ptr,
- PointerType::getUnqual(LLVMTy)));
+ Context.getPointerTypeUnqual(LLVMTy)));
} else {
// Otherwise, emit our value as a lvalue and let the codegen deal with
// it.
@@ -4415,7 +4427,7 @@
Op = CastToAnyType(Op, !TYPE_UNSIGNED(type),
OTy, CallResultIsSigned[Match]);
if (BYTES_BIG_ENDIAN) {
- Constant *ShAmt = ConstantInt::get(Op->getType(),
+ Constant *ShAmt = Context.getConstantInt(Op->getType(),
OTyBits-OpTyBits);
Op = Builder.CreateLShr(Op, ShAmt);
}
@@ -4505,12 +4517,12 @@
default:
std::vector<const Type*> TmpVec(CallResultTypes.begin(),
CallResultTypes.end());
- CallResultType = StructType::get(TmpVec);
+ CallResultType = Context.getStructType(TmpVec);
break;
}
const FunctionType *FTy =
- FunctionType::get(CallResultType, CallArgTypes, false);
+ Context.getFunctionType(CallResultType, CallArgTypes, false);
// Remove the leading comma if we have operands.
if (!ConstraintStr.empty())
@@ -4567,16 +4579,16 @@
std::vector<Constant*> CstOps;
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
CstOps.push_back(cast<Constant>(Ops[i]));
- return ConstantVector::get(CstOps);
+ return Context.getConstantVector(CstOps);
}
// Otherwise, insertelement the values to build the vector.
Value *Result =
- UndefValue::get(VectorType::get(Ops[0]->getType(), Ops.size()));
+ Context.getUndef(Context.getVectorType(Ops[0]->getType(), Ops.size()));
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
Result = Builder.CreateInsertElement(Result, Ops[i],
- ConstantInt::get(Type::Int32Ty, i));
+ Context.getConstantInt(Type::Int32Ty, i));
return Result;
}
@@ -4616,16 +4628,17 @@
for (unsigned i = 0; i != NumElements; ++i) {
int idx = va_arg(VA, int);
if (idx == -1)
- Idxs.push_back(UndefValue::get(Type::Int32Ty));
+ Idxs.push_back(Context.getUndef(Type::Int32Ty));
else {
assert((unsigned)idx < 2*NumElements && "Element index out of range!");
- Idxs.push_back(ConstantInt::get(Type::Int32Ty, idx));
+ Idxs.push_back(Context.getConstantInt(Type::Int32Ty, idx));
}
}
va_end(VA);
// Turn this into the appropriate shuffle operation.
- return Builder.CreateShuffleVector(InVec1, InVec2, ConstantVector::get(Idxs));
+ return Builder.CreateShuffleVector(InVec1, InVec2,
+ Context.getConstantVector(Idxs));
}
//===----------------------------------------------------------------------===//
@@ -4666,12 +4679,12 @@
void TreeToLLVM::EmitMemoryBarrier(bool ll, bool ls, bool sl, bool ss) {
Value* C[5];
- C[0] = ConstantInt::get(Type::Int1Ty, ll);
- C[1] = ConstantInt::get(Type::Int1Ty, ls);
- C[2] = ConstantInt::get(Type::Int1Ty, sl);
- C[3] = ConstantInt::get(Type::Int1Ty, ss);
+ C[0] = Context.getConstantInt(Type::Int1Ty, ll);
+ C[1] = Context.getConstantInt(Type::Int1Ty, ls);
+ C[2] = Context.getConstantInt(Type::Int1Ty, sl);
+ C[3] = Context.getConstantInt(Type::Int1Ty, ss);
// Be conservatively safe.
- C[4] = ConstantInt::get(Type::Int1Ty, true);
+ C[4] = Context.getConstantInt(Type::Int1Ty, true);
Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
Intrinsic::memory_barrier),
@@ -4688,7 +4701,7 @@
};
const Type* Ty[2];
Ty[0] = ResultTy;
- Ty[1] = PointerType::getUnqual(ResultTy);
+ Ty[1] = Context.getPointerTypeUnqual(ResultTy);
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
C[1] = Builder.CreateIntCast(C[1], Ty[0], "cast");
// The gcc builtins are also full memory barriers.
@@ -4713,7 +4726,7 @@
};
const Type* Ty[2];
Ty[0] = ResultTy;
- Ty[1] = PointerType::getUnqual(ResultTy);
+ Ty[1] = Context.getPointerTypeUnqual(ResultTy);
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
C[1] = Builder.CreateIntCast(C[1], Ty[0], "cast");
C[2] = Builder.CreateIntCast(C[2], Ty[0], "cast");
@@ -4769,7 +4782,7 @@
BuiltinName);
const Type *ResTy = ConvertType(TREE_TYPE(exp));
if (ResTy->isSingleValueType())
- Result = UndefValue::get(ResTy);
+ Result = Context.getUndef(ResTy);
return true;
}
@@ -4854,9 +4867,9 @@
// This treats everything as unknown, and is minimally defensible as
// correct, although completely useless.
if (tree_low_cst (ObjSizeTree, 0) < 2)
- Result = getGlobalContext().getAllOnesValue(TD.getIntPtrType());
+ Result = Context.getAllOnesValue(TD.getIntPtrType());
else
- Result = ConstantInt::get(TD.getIntPtrType(), 0);
+ Result = Context.getConstantInt(TD.getIntPtrType(), 0);
return true;
}
// Unary bit counting intrinsics.
@@ -4886,7 +4899,7 @@
Value *Amt = Emit(TREE_VALUE(TREE_OPERAND(exp, 1)), 0);
EmitBuiltinUnaryOp(Amt, Result, Intrinsic::ctpop);
Result = Builder.CreateBinOp(Instruction::And, Result,
- ConstantInt::get(Result->getType(), 1));
+ Context.getConstantInt(Result->getType(), 1));
return true;
}
case BUILT_IN_POPCOUNT: // These GCC builtins always return int.
@@ -4993,13 +5006,14 @@
// the ffs, but should ignore the return type of ffs.
Value *Amt = Emit(TREE_VALUE(TREE_OPERAND(exp, 1)), 0);
EmitBuiltinUnaryOp(Amt, Result, Intrinsic::cttz);
- Result = Builder.CreateAdd(Result, ConstantInt::get(Result->getType(), 1));
+ Result = Builder.CreateAdd(Result,
+ Context.getConstantInt(Result->getType(), 1));
Result = CastToUIntType(Result, ConvertType(TREE_TYPE(exp)));
Value *Cond =
Builder.CreateICmpEQ(Amt,
- getGlobalContext().getNullValue(Amt->getType()));
+ Context.getNullValue(Amt->getType()));
Result = Builder.CreateSelect(Cond,
- getGlobalContext().getNullValue(Result->getType()),
+ Context.getNullValue(Result->getType()),
Result);
return true;
}
@@ -5022,9 +5036,9 @@
// Get file and line number
location_t locus = EXPR_LOCATION (exp);
- Constant *lineNo = ConstantInt::get(Type::Int32Ty, locus.line);
+ Constant *lineNo = Context.getConstantInt(Type::Int32Ty, locus.line);
Constant *file = ConvertMetadataStringToGV(locus.file);
- const Type *SBP= PointerType::getUnqual(Type::Int8Ty);
+ const Type *SBP= Context.getPointerTypeUnqual(Type::Int8Ty);
file = Builder.getFolder().CreateBitCast(file, SBP);
// Get arguments.
@@ -5051,8 +5065,8 @@
case BUILT_IN_SYNCHRONIZE: {
// We assume like gcc appears to, that this only applies to cached memory.
Value* C[5];
- C[0] = C[1] = C[2] = C[3] = ConstantInt::get(Type::Int1Ty, 1);
- C[4] = ConstantInt::get(Type::Int1Ty, 0);
+ C[0] = C[1] = C[2] = C[3] = Context.getConstantInt(Type::Int1Ty, 1);
+ C[4] = Context.getConstantInt(Type::Int1Ty, 0);
Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
Intrinsic::memory_barrier),
@@ -5197,7 +5211,7 @@
};
const Type* Ty[2];
Ty[0] = ResultTy;
- Ty[1] = PointerType::getUnqual(ResultTy);
+ Ty[1] = Context.getPointerTypeUnqual(ResultTy);
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
C[1] = Builder.CreateIntCast(C[1], Ty[0], "cast");
@@ -5235,7 +5249,7 @@
};
const Type* Ty[2];
Ty[0] = ResultTy;
- Ty[1] = PointerType::getUnqual(ResultTy);
+ Ty[1] = Context.getPointerTypeUnqual(ResultTy);
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
C[1] = Builder.CreateIntCast(C[1], Ty[0], "cast");
@@ -5273,7 +5287,7 @@
};
const Type* Ty[2];
Ty[0] = ResultTy;
- Ty[1] = PointerType::getUnqual(ResultTy);
+ Ty[1] = Context.getPointerTypeUnqual(ResultTy);
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
C[1] = Builder.CreateIntCast(C[1], Ty[0], "cast");
@@ -5311,7 +5325,7 @@
};
const Type* Ty[2];
Ty[0] = ResultTy;
- Ty[1] = PointerType::getUnqual(ResultTy);
+ Ty[1] = Context.getPointerTypeUnqual(ResultTy);
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
C[1] = Builder.CreateIntCast(C[1], Ty[0], "cast");
@@ -5349,7 +5363,7 @@
};
const Type* Ty[2];
Ty[0] = ResultTy;
- Ty[1] = PointerType::getUnqual(ResultTy);
+ Ty[1] = Context.getPointerTypeUnqual(ResultTy);
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
C[1] = Builder.CreateIntCast(C[1], Ty[0], "cast");
@@ -5387,7 +5401,7 @@
};
const Type* Ty[2];
Ty[0] = ResultTy;
- Ty[1] = PointerType::getUnqual(ResultTy);
+ Ty[1] = Context.getPointerTypeUnqual(ResultTy);
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
C[1] = Builder.CreateIntCast(C[1], Ty[0], "cast");
@@ -5477,7 +5491,7 @@
{
const Type *Ty = ConvertType(TREE_TYPE(exp));
if (Ty != Type::VoidTy)
- Result = getGlobalContext().getNullValue(Ty);
+ Result = Context.getNullValue(Ty);
return true;
}
#endif // FIXME: Should handle these GCC extensions eventually.
@@ -5542,7 +5556,7 @@
}
bool TreeToLLVM::EmitBuiltinConstantP(tree exp, Value *&Result) {
- Result = getGlobalContext().getNullValue(ConvertType(TREE_TYPE(exp)));
+ Result = Context.getNullValue(ConvertType(TREE_TYPE(exp)));
return true;
}
@@ -5657,7 +5671,7 @@
unsigned DstAlign = getPointerAlignment(Dst);
Value *DstV = Emit(Dst, 0);
- Value *Val = getGlobalContext().getNullValue(Type::Int32Ty);
+ Value *Val = Context.getNullValue(Type::Int32Ty);
Value *Len = Emit(TREE_VALUE(TREE_CHAIN(arglist)), 0);
EmitMemSet(DstV, Val, Len, DstAlign);
return true;
@@ -5703,11 +5717,11 @@
// Default to highly local read.
if (ReadWrite == 0)
- ReadWrite = getGlobalContext().getNullValue(Type::Int32Ty);
+ ReadWrite = Context.getNullValue(Type::Int32Ty);
if (Locality == 0)
- Locality = ConstantInt::get(Type::Int32Ty, 3);
+ Locality = Context.getConstantInt(Type::Int32Ty, 3);
- Ptr = BitCastToType(Ptr, PointerType::getUnqual(Type::Int8Ty));
+ Ptr = BitCastToType(Ptr, Context.getPointerTypeUnqual(Type::Int8Ty));
Value *Ops[3] = { Ptr, ReadWrite, Locality };
Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::prefetch),
@@ -5750,7 +5764,7 @@
// Unfortunately, these constants are defined as RTL expressions and
// should be handled separately.
- Result = BitCastToType(Ptr, PointerType::getUnqual(Type::Int8Ty));
+ Result = BitCastToType(Ptr, Context.getPointerTypeUnqual(Type::Int8Ty));
return true;
}
@@ -5766,7 +5780,7 @@
// needed for: MIPS, Sparc. Unfortunately, these constants are defined
// as RTL expressions and should be handled separately.
- Result = BitCastToType(Ptr, PointerType::getUnqual(Type::Int8Ty));
+ Result = BitCastToType(Ptr, Context.getPointerTypeUnqual(Type::Int8Ty));
return true;
}
@@ -5817,7 +5831,7 @@
// FIXME: is i32 always enough here?
Result = Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
Intrinsic::eh_dwarf_cfa),
- ConstantInt::get(Type::Int32Ty, cfa_offset));
+ Context.getConstantInt(Type::Int32Ty, cfa_offset));
return true;
}
@@ -5827,7 +5841,7 @@
return false;
unsigned int dwarf_regnum = DWARF_FRAME_REGNUM(STACK_POINTER_REGNUM);
- Result = ConstantInt::get(ConvertType(TREE_TYPE(exp)), dwarf_regnum);
+ Result = Context.getConstantInt(ConvertType(TREE_TYPE(exp)), dwarf_regnum);
return true;
}
@@ -5854,7 +5868,7 @@
iwhich = DWARF_FRAME_REGNUM (iwhich);
- Result = ConstantInt::get(ConvertType(TREE_TYPE(exp)), iwhich);
+ Result = Context.getConstantInt(ConvertType(TREE_TYPE(exp)), iwhich);
#endif
return true;
@@ -5874,7 +5888,7 @@
Intrinsic::eh_return_i32 : Intrinsic::eh_return_i64);
Offset = Builder.CreateIntCast(Offset, IntPtr, true);
- Handler = BitCastToType(Handler, PointerType::getUnqual(Type::Int8Ty));
+ Handler = BitCastToType(Handler, Context.getPointerTypeUnqual(Type::Int8Ty));
SmallVector<Value *, 2> Args;
Args.push_back(Offset);
@@ -5903,7 +5917,7 @@
}
Value *Addr = BitCastToType(Emit(TREE_VALUE(arglist), 0),
- PointerType::getUnqual(Type::Int8Ty));
+ Context.getPointerTypeUnqual(Type::Int8Ty));
Constant *Size, *Idx;
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) {
@@ -5924,21 +5938,21 @@
if (rnum < 0)
continue;
- Size = ConstantInt::get(Type::Int8Ty, size);
- Idx = ConstantInt::get(Type::Int32Ty, rnum);
+ Size = Context.getConstantInt(Type::Int8Ty, size);
+ Idx = Context.getConstantInt(Type::Int32Ty, rnum);
Builder.CreateStore(Size, Builder.CreateGEP(Addr, Idx), false);
}
}
if (!wrote_return_column) {
- Size = ConstantInt::get(Type::Int8Ty, GET_MODE_SIZE (Pmode));
- Idx = ConstantInt::get(Type::Int32Ty, DWARF_FRAME_RETURN_COLUMN);
+ Size = Context.getConstantInt(Type::Int8Ty, GET_MODE_SIZE (Pmode));
+ Idx = Context.getConstantInt(Type::Int32Ty, DWARF_FRAME_RETURN_COLUMN);
Builder.CreateStore(Size, Builder.CreateGEP(Addr, Idx), false);
}
#ifdef DWARF_ALT_FRAME_RETURN_COLUMN
- Size = ConstantInt::get(Type::Int8Ty, GET_MODE_SIZE (Pmode));
- Idx = ConstantInt::get(Type::Int32Ty, DWARF_ALT_FRAME_RETURN_COLUMN);
+ Size = Context.getConstantInt(Type::Int8Ty, GET_MODE_SIZE (Pmode));
+ Idx = Context.getConstantInt(Type::Int32Ty, DWARF_ALT_FRAME_RETURN_COLUMN);
Builder.CreateStore(Size, Builder.CreateGEP(Addr, Idx), false);
#endif
@@ -5965,7 +5979,7 @@
return false;
Value *Ptr = Emit(TREE_VALUE(arglist), 0);
- Ptr = BitCastToType(Ptr, PointerType::getUnqual(Type::Int8Ty));
+ Ptr = BitCastToType(Ptr, Context.getPointerTypeUnqual(Type::Int8Ty));
Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
Intrinsic::stackrestore), Ptr);
@@ -6019,14 +6033,14 @@
Intrinsic::vastart);
const Type *FTy =
cast<PointerType>(llvm_va_start_fn->getType())->getElementType();
- ArgVal = BitCastToType(ArgVal, PointerType::getUnqual(Type::Int8Ty));
+ ArgVal = BitCastToType(ArgVal, Context.getPointerTypeUnqual(Type::Int8Ty));
Builder.CreateCall(llvm_va_start_fn, ArgVal);
return true;
}
bool TreeToLLVM::EmitBuiltinVAEnd(tree exp) {
Value *Arg = Emit(TREE_VALUE(TREE_OPERAND(exp, 1)), 0);
- Arg = BitCastToType(Arg, PointerType::getUnqual(Type::Int8Ty));
+ Arg = BitCastToType(Arg, Context.getPointerTypeUnqual(Type::Int8Ty));
Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::vaend),
Arg);
return true;
@@ -6051,7 +6065,7 @@
Arg2 = Emit(Arg2T, 0);
}
- static const Type *VPTy = PointerType::getUnqual(Type::Int8Ty);
+ static const Type *VPTy = Context.getPointerTypeUnqual(Type::Int8Ty);
// FIXME: This ignores alignment and volatility of the arguments.
SmallVector<Value *, 2> Args;
@@ -6069,7 +6083,7 @@
VOID_TYPE))
return false;
- static const Type *VPTy = PointerType::getUnqual(Type::Int8Ty);
+ static const Type *VPTy = Context.getPointerTypeUnqual(Type::Int8Ty);
Value *Tramp = Emit(TREE_VALUE(arglist), 0);
Tramp = BitCastToType(Tramp, VPTy);
@@ -6264,7 +6278,7 @@
tree AnnotateAttr = lookup_attribute("annotate", DECL_ATTRIBUTES(FieldDecl));
const Type *OrigPtrTy = FieldPtr->getType();
- const Type *SBP = PointerType::getUnqual(Type::Int8Ty);
+ const Type *SBP = Context.getPointerTypeUnqual(Type::Int8Ty);
Function *Fn = Intrinsic::getDeclaration(TheModule,
Intrinsic::ptr_annotation,
@@ -6272,7 +6286,7 @@
// Get file and line number. FIXME: Should this be for the decl or the
// use. Is there a location info for the use?
- Constant *LineNo = ConstantInt::get(Type::Int32Ty,
+ Constant *LineNo = Context.getConstantInt(Type::Int32Ty,
DECL_SOURCE_LINE(FieldDecl));
Constant *File = ConvertMetadataStringToGV(DECL_SOURCE_FILE(FieldDecl));
@@ -6376,14 +6390,14 @@
if (isSequentialCompatible(ArrayTreeType)) {
SmallVector<Value*, 2> Idx;
if (TREE_CODE(ArrayTreeType) == ARRAY_TYPE)
- Idx.push_back(ConstantInt::get(IntPtrTy, 0));
+ Idx.push_back(Context.getConstantInt(IntPtrTy, 0));
Idx.push_back(IndexVal);
Value *Ptr = Builder.CreateGEP(ArrayAddr, Idx.begin(), Idx.end());
const Type *ElementTy = ConvertType(ElementType);
unsigned Alignment = MinAlign(ArrayAlign, TD.getABITypeAlignment(ElementTy));
return LValue(BitCastToType(Ptr,
- PointerType::getUnqual(ConvertType(TREE_TYPE(exp)))),
+ Context.getPointerTypeUnqual(ConvertType(TREE_TYPE(exp)))),
Alignment);
}
@@ -6391,7 +6405,8 @@
// much nicer in cases like:
// float foo(int w, float A[][w], int g) { return A[g][0]; }
- ArrayAddr = BitCastToType(ArrayAddr, PointerType::getUnqual(Type::Int8Ty));
+ ArrayAddr = BitCastToType(ArrayAddr,
+ Context.getPointerTypeUnqual(Type::Int8Ty));
if (VOID_TYPE_P(TREE_TYPE(ArrayTreeType)))
return LValue(Builder.CreateGEP(ArrayAddr, IndexVal), 1);
@@ -6404,7 +6419,7 @@
cast<ConstantInt>(IndexVal)->getZExtValue());
Value *Ptr = Builder.CreateGEP(ArrayAddr, IndexVal);
return LValue(BitCastToType(Ptr,
- PointerType::getUnqual(ConvertType(TREE_TYPE(exp)))),
+ Context.getPointerTypeUnqual(ConvertType(TREE_TYPE(exp)))),
Alignment);
}
@@ -6430,19 +6445,19 @@
if (unsigned UnitOffset = BitStart / ValueSizeInBits) {
// TODO: If Ptr.Ptr is a struct type or something, we can do much better
// than this. e.g. check out when compiling unwind-dw2-fde-darwin.c.
- Ptr.Ptr = BitCastToType(Ptr.Ptr, PointerType::getUnqual(ValTy));
+ Ptr.Ptr = BitCastToType(Ptr.Ptr, Context.getPointerTypeUnqual(ValTy));
Ptr.Ptr = Builder.CreateGEP(Ptr.Ptr,
- ConstantInt::get(Type::Int32Ty, UnitOffset));
+ Context.getConstantInt(Type::Int32Ty, UnitOffset));
BitStart -= UnitOffset*ValueSizeInBits;
}
// If this is referring to the whole field, return the whole thing.
if (BitStart == 0 && BitSize == ValueSizeInBits) {
- return LValue(BitCastToType(Ptr.Ptr, PointerType::getUnqual(ValTy)),
+ return LValue(BitCastToType(Ptr.Ptr, Context.getPointerTypeUnqual(ValTy)),
Ptr.getAlignment());
}
- return LValue(BitCastToType(Ptr.Ptr, PointerType::getUnqual(ValTy)), 1,
+ return LValue(BitCastToType(Ptr.Ptr, Context.getPointerTypeUnqual(ValTy)), 1,
BitStart, BitSize);
}
@@ -6465,7 +6480,7 @@
StructAddrLV.BitStart == 0) && "structs cannot be bitfields!");
StructAddrLV.Ptr = BitCastToType(StructAddrLV.Ptr,
- PointerType::getUnqual(StructTy));
+ Context.getPointerTypeUnqual(StructTy));
const Type *FieldTy = ConvertType(getDeclaredType(FieldDecl));
// BitStart - This is the actual offset of the field from the start of the
@@ -6531,7 +6546,7 @@
unsigned ByteOffset = BitStart/8;
if (ByteOffset > 0) {
Offset = Builder.CreateAdd(Offset,
- ConstantInt::get(Offset->getType(), ByteOffset));
+ Context.getConstantInt(Offset->getType(), ByteOffset));
BitStart -= ByteOffset*8;
// If the base is known to be 8-byte aligned, and we're adding a 4-byte
// offset, the field is known to be 4-byte aligned.
@@ -6542,7 +6557,7 @@
Offset->getType());
Ptr = Builder.CreateAdd(Ptr, Offset);
FieldPtr = CastToType(Instruction::IntToPtr, Ptr,
- PointerType::getUnqual(FieldTy));
+ Context.getPointerTypeUnqual(FieldTy));
}
if (isBitfield(FieldDecl)) {
@@ -6575,14 +6590,14 @@
// sized like an i24 there may be trouble: incrementing a T* will move
// the position by 32 bits not 24, leaving the upper 8 of those 32 bits
// inaccessible. Avoid this by rounding up the size appropriately.
- FieldTy = IntegerType::get(TD.getTypeAllocSizeInBits(FieldTy));
+ FieldTy = Context.getIntegerType(TD.getTypeAllocSizeInBits(FieldTy));
assert(FieldTy->getPrimitiveSizeInBits() ==
TD.getTypeAllocSizeInBits(FieldTy) && "Field type not sequential!");
// If this is a bitfield, the field may span multiple fields in the LLVM
// type. As such, cast the pointer to be a pointer to the declared type.
- FieldPtr = BitCastToType(FieldPtr, PointerType::getUnqual(FieldTy));
+ FieldPtr = BitCastToType(FieldPtr, Context.getPointerTypeUnqual(FieldTy));
unsigned LLVMValueBitSize = FieldTy->getPrimitiveSizeInBits();
// Finally, because bitfields can span LLVM fields, and because the start
@@ -6609,12 +6624,12 @@
unsigned ByteOffset = NumAlignmentUnits*ByteAlignment;
LVAlign = MinAlign(LVAlign, ByteOffset);
- Constant *Offset = ConstantInt::get(TD.getIntPtrType(), ByteOffset);
+ Constant *Offset = Context.getConstantInt(TD.getIntPtrType(), ByteOffset);
FieldPtr = CastToType(Instruction::PtrToInt, FieldPtr,
Offset->getType());
FieldPtr = Builder.CreateAdd(FieldPtr, Offset);
FieldPtr = CastToType(Instruction::IntToPtr, FieldPtr,
- PointerType::getUnqual(FieldTy));
+ Context.getPointerTypeUnqual(FieldTy));
// Adjust bitstart to account for the pointer movement.
BitStart -= ByteOffset*8;
@@ -6635,7 +6650,7 @@
} else {
// Make sure we return a pointer to the right type.
const Type *EltTy = ConvertType(TREE_TYPE(exp));
- FieldPtr = BitCastToType(FieldPtr, PointerType::getUnqual(EltTy));
+ FieldPtr = BitCastToType(FieldPtr, Context.getPointerTypeUnqual(EltTy));
}
assert(BitStart == 0 &&
@@ -6676,8 +6691,8 @@
if (Decl == 0) {
if (errorcount || sorrycount) {
const Type *Ty = ConvertType(TREE_TYPE(exp));
- const PointerType *PTy = PointerType::getUnqual(Ty);
- LValue LV(ConstantPointerNull::get(PTy), 1);
+ const PointerType *PTy = Context.getPointerTypeUnqual(Ty);
+ LValue LV(Context.getConstantPointerNull(PTy), 1);
return LV;
}
assert(0 && "INTERNAL ERROR: Referencing decl that hasn't been laid out");
@@ -6713,8 +6728,8 @@
const Type *Ty = ConvertType(TREE_TYPE(exp));
// If we have "extern void foo", make the global have type {} instead of
// type void.
- if (Ty == Type::VoidTy) Ty = StructType::get(NULL, NULL);
- const PointerType *PTy = PointerType::getUnqual(Ty);
+ if (Ty == Type::VoidTy) Ty = Context.getStructType(NULL, NULL);
+ const PointerType *PTy = Context.getPointerTypeUnqual(Ty);
unsigned Alignment = Ty->isSized() ? TD.getABITypeAlignment(Ty) : 1;
if (DECL_ALIGN(exp)) {
if (DECL_USER_ALIGN(exp) || 8 * Alignment < (unsigned)DECL_ALIGN(exp))
@@ -6730,7 +6745,7 @@
unsigned Alignment = TD.getABITypeAlignment(cast<PointerType>(ExceptionValue->
getType())->getElementType());
return LValue(BitCastToType(ExceptionValue,
- PointerType::getUnqual(ConvertType(TREE_TYPE(exp)))),
+ Context.getPointerTypeUnqual(ConvertType(TREE_TYPE(exp)))),
Alignment);
}
@@ -6759,7 +6774,7 @@
LValue LV = EmitLV(Op);
// The type is the type of the expression.
LV.Ptr = BitCastToType(LV.Ptr,
- PointerType::getUnqual(ConvertType(TREE_TYPE(exp))));
+ Context.getPointerTypeUnqual(ConvertType(TREE_TYPE(exp))));
return LV;
} else {
// If the input is a scalar, emit to a temporary.
@@ -6767,7 +6782,7 @@
StoreInst *S = Builder.CreateStore(Emit(Op, 0), Dest);
// The type is the type of the expression.
Dest = BitCastToType(Dest,
- PointerType::getUnqual(ConvertType(TREE_TYPE(exp))));
+ Context.getPointerTypeUnqual(ConvertType(TREE_TYPE(exp))));
return LValue(Dest, 1);
}
}
@@ -6808,7 +6823,7 @@
std::vector<Value *> BuildVecOps;
// Insert zero initializers for any uninitialized values.
- Constant *Zero = getGlobalContext().getNullValue(PTy->getElementType());
+ Constant *Zero = Context.getNullValue(PTy->getElementType());
BuildVecOps.resize(cast<VectorType>(Ty)->getNumElements(), Zero);
// Insert all of the elements here.
@@ -6862,7 +6877,7 @@
// Scalar value. Evaluate to a register, then do the store.
Value *V = Emit(tree_value, 0);
Value *Ptr = BitCastToType(DestLoc->Ptr,
- PointerType::getUnqual(V->getType()));
+ Context.getPointerTypeUnqual(V->getType()));
StoreInst *St = Builder.CreateStore(V, Ptr, DestLoc->Volatile);
St->setAlignment(DestLoc->getAlignment());
}
@@ -6912,14 +6927,14 @@
assert(HOST_BITS_PER_WIDE_INT == 64 &&
"i128 only supported on 64-bit system");
uint64_t Bits[] = { TREE_INT_CST_LOW(exp), TREE_INT_CST_HIGH(exp) };
- return ConstantInt::get(APInt(128, 2, Bits));
+ return Context.getConstantInt(APInt(128, 2, Bits));
}
}
// Build the value as a ulong constant, then constant fold it to the right
// type. This handles overflow and other things appropriately.
uint64_t IntValue = getINTEGER_CSTVal(exp);
- ConstantInt *C = ConstantInt::get(Type::Int64Ty, IntValue);
+ ConstantInt *C = Context.getConstantInt(Type::Int64Ty, IntValue);
// The destination type can be a pointer, integer or floating point
// so we need a generalized cast here
Instruction::CastOps opcode = CastInst::getCastOpcode(C, false, Ty,
@@ -6960,7 +6975,8 @@
if (llvm::sys::isBigEndianHost() != FLOAT_WORDS_BIG_ENDIAN)
std::swap(UArr[0], UArr[1]);
- return ConstantFP::get(Ty==Type::FloatTy ? APFloat((float)V) : APFloat(V));
+ return
+ Context.getConstantFP(Ty==Type::FloatTy ? APFloat((float)V) : APFloat(V));
} else if (Ty==Type::X86_FP80Ty) {
long RealArr[4];
uint64_t UArr[2];
@@ -6968,7 +6984,7 @@
UArr[0] = ((uint64_t)((uint32_t)RealArr[0])) |
((uint64_t)((uint32_t)RealArr[1]) << 32);
UArr[1] = (uint16_t)RealArr[2];
- return ConstantFP::get(APFloat(APInt(80, 2, UArr)));
+ return Context.getConstantFP(APFloat(APInt(80, 2, UArr)));
} else if (Ty==Type::PPC_FP128Ty) {
long RealArr[4];
uint64_t UArr[2];
@@ -6978,7 +6994,7 @@
((uint64_t)((uint32_t)RealArr[1]));
UArr[1] = ((uint64_t)((uint32_t)RealArr[2]) << 32) |
((uint64_t)((uint32_t)RealArr[3]));
- return ConstantFP::get(APFloat(APInt(128, 2, UArr)));
+ return Context.getConstantFP(APFloat(APInt(128, 2, UArr)));
}
assert(0 && "Floating point type not handled yet");
return 0; // outwit compiler warning
@@ -6986,7 +7002,7 @@
Constant *TreeConstantToLLVM::ConvertVECTOR_CST(tree exp) {
if (!TREE_VECTOR_CST_ELTS(exp))
- return getGlobalContext().getNullValue(ConvertType(TREE_TYPE(exp)));
+ return Context.getNullValue(ConvertType(TREE_TYPE(exp)));
std::vector<Constant*> Elts;
for (tree elt = TREE_VECTOR_CST_ELTS(exp); elt; elt = TREE_CHAIN(elt))
@@ -6995,12 +7011,12 @@
// The vector should be zero filled if insufficient elements are provided.
if (Elts.size() < TYPE_VECTOR_SUBPARTS(TREE_TYPE(exp))) {
tree EltType = TREE_TYPE(TREE_TYPE(exp));
- Constant *Zero = getGlobalContext().getNullValue(ConvertType(EltType));
+ Constant *Zero = Context.getNullValue(ConvertType(EltType));
while (Elts.size() < TYPE_VECTOR_SUBPARTS(TREE_TYPE(exp)))
Elts.push_back(Zero);
}
- return ConstantVector::get(Elts);
+ return Context.getConstantVector(Elts);
}
Constant *TreeConstantToLLVM::ConvertSTRING_CST(tree exp) {
@@ -7013,20 +7029,20 @@
if (ElTy == Type::Int8Ty) {
const unsigned char *InStr =(const unsigned char *)TREE_STRING_POINTER(exp);
for (unsigned i = 0; i != Len; ++i)
- Elts.push_back(ConstantInt::get(Type::Int8Ty, InStr[i]));
+ Elts.push_back(Context.getConstantInt(Type::Int8Ty, InStr[i]));
} else if (ElTy == Type::Int16Ty) {
assert((Len&1) == 0 &&
"Length in bytes should be a multiple of element size");
const unsigned short *InStr =
(const unsigned short *)TREE_STRING_POINTER(exp);
for (unsigned i = 0; i != Len/2; ++i)
- Elts.push_back(ConstantInt::get(Type::Int16Ty, InStr[i]));
+ Elts.push_back(Context.getConstantInt(Type::Int16Ty, InStr[i]));
} else if (ElTy == Type::Int32Ty) {
assert((Len&3) == 0 &&
"Length in bytes should be a multiple of element size");
const unsigned *InStr = (const unsigned *)TREE_STRING_POINTER(exp);
for (unsigned i = 0; i != Len/4; ++i)
- Elts.push_back(ConstantInt::get(Type::Int32Ty, InStr[i]));
+ Elts.push_back(Context.getConstantInt(Type::Int32Ty, InStr[i]));
} else {
assert(0 && "Unknown character type!");
}
@@ -7039,7 +7055,7 @@
tree Domain = TYPE_DOMAIN(TREE_TYPE(exp));
if (!Domain || !TYPE_MAX_VALUE(Domain)) {
ConstantSize = Len;
- StrTy = ArrayType::get(ElTy, Len);
+ StrTy = Context.getArrayType(ElTy, Len);
}
}
@@ -7048,19 +7064,19 @@
Elts.resize(ConstantSize);
} else {
// Fill the end of the string with nulls.
- Constant *C = getGlobalContext().getNullValue(ElTy);
+ Constant *C = Context.getNullValue(ElTy);
for (; Len != ConstantSize; ++Len)
Elts.push_back(C);
}
}
- return ConstantArray::get(StrTy, Elts);
+ return Context.getConstantArray(StrTy, Elts);
}
Constant *TreeConstantToLLVM::ConvertCOMPLEX_CST(tree exp) {
std::vector<Constant*> Elts;
Elts.push_back(Convert(TREE_REALPART(exp)));
Elts.push_back(Convert(TREE_IMAGPART(exp)));
- return ConstantStruct::get(Elts, false);
+ return Context.getConstantStruct(Elts, false);
}
Constant *TreeConstantToLLVM::ConvertNOP_EXPR(tree exp) {
@@ -7122,7 +7138,7 @@
// when array is filled during program initialization.
if (CONSTRUCTOR_ELTS(exp) == 0 ||
VEC_length(constructor_elt, CONSTRUCTOR_ELTS(exp)) == 0) // All zeros?
- return getGlobalContext().getNullValue(ConvertType(TREE_TYPE(exp)));
+ return Context.getNullValue(ConvertType(TREE_TYPE(exp)));
switch (TREE_CODE(TREE_TYPE(exp))) {
default:
@@ -7217,8 +7233,8 @@
// Zero length array.
if (ResultElts.empty())
- return ConstantArray::get(cast<ArrayType>(ConvertType(TREE_TYPE(exp))),
- ResultElts);
+ return Context.getConstantArray(
+ cast<ArrayType>(ConvertType(TREE_TYPE(exp))), ResultElts);
assert(SomeVal && "If we had some initializer, we should have some value!");
// Do a post-pass over all of the elements. We're taking care of two things
@@ -7229,7 +7245,7 @@
// of an array. This can occur in cases where we have an array of
// unions, and the various unions had different pieces init'd.
const Type *ElTy = SomeVal->getType();
- Constant *Filler = getGlobalContext().getNullValue(ElTy);
+ Constant *Filler = Context.getNullValue(ElTy);
bool AllEltsSameType = true;
for (unsigned i = 0, e = ResultElts.size(); i != e; ++i) {
if (ResultElts[i] == 0)
@@ -7240,13 +7256,13 @@
if (TREE_CODE(InitType) == VECTOR_TYPE) {
assert(AllEltsSameType && "Vector of heterogeneous element types?");
- return ConstantVector::get(ResultElts);
+ return Context.getConstantVector(ResultElts);
}
if (AllEltsSameType)
- return ConstantArray::get(ArrayType::get(ElTy, ResultElts.size()),
- ResultElts);
- return ConstantStruct::get(ResultElts, false);
+ return Context.getConstantArray(
+ Context.getArrayType(ElTy, ResultElts.size()), ResultElts);
+ return Context.getConstantStruct(ResultElts, false);
}
@@ -7313,9 +7329,9 @@
// Otherwise, there is padding here. Insert explicit zeros.
const Type *PadTy = Type::Int8Ty;
if (AlignedEltOffs-EltOffs != 1)
- PadTy = ArrayType::get(PadTy, AlignedEltOffs-EltOffs);
+ PadTy = Context.getArrayType(PadTy, AlignedEltOffs-EltOffs);
ResultElts.insert(ResultElts.begin()+i,
- getGlobalContext().getNullValue(PadTy));
+ Context.getNullValue(PadTy));
++e; // One extra element to scan.
}
@@ -7397,9 +7413,9 @@
// not get the same alignment as "Val".
const Type *FillTy = Type::Int8Ty;
if (GCCFieldOffsetInBits/8-NextFieldByteStart != 1)
- FillTy = ArrayType::get(FillTy,
+ FillTy = Context.getArrayType(FillTy,
GCCFieldOffsetInBits/8-NextFieldByteStart);
- ResultElts.push_back(getGlobalContext().getNullValue(FillTy));
+ ResultElts.push_back(Context.getNullValue(FillTy));
NextFieldByteStart = GCCFieldOffsetInBits/8;
@@ -7426,7 +7442,7 @@
// been an anonymous bitfield or other thing that shoved it over. No matter,
// just insert some i8 padding until there are bits to fill in.
while (GCCFieldOffsetInBits > NextFieldByteStart*8) {
- ResultElts.push_back(ConstantInt::get(Type::Int8Ty, 0));
+ ResultElts.push_back(Context.getConstantInt(Type::Int8Ty, 0));
++NextFieldByteStart;
}
@@ -7475,7 +7491,7 @@
APInt Tmp = ValC->getValue();
Tmp = Tmp.lshr(BitsInPreviousField);
Tmp = Tmp.trunc(ValBitSize-BitsInPreviousField);
- ValC = ConstantInt::get(Tmp);
+ ValC = Context.getConstantInt(Tmp);
} else {
// Big endian, take bits from the top of the field value.
ValForPrevField = ValForPrevField.lshr(ValBitSize-BitsInPreviousField);
@@ -7483,7 +7499,7 @@
APInt Tmp = ValC->getValue();
Tmp = Tmp.trunc(ValBitSize-BitsInPreviousField);
- ValC = ConstantInt::get(Tmp);
+ ValC = Context.getConstantInt(Tmp);
}
// Okay, we're going to insert ValForPrevField into the previous i8, extend
@@ -7500,7 +7516,7 @@
// "or" in the previous value and install it.
const APInt &LastElt = cast<ConstantInt>(ResultElts.back())->getValue();
- ResultElts.back() = ConstantInt::get(ValForPrevField | LastElt);
+ ResultElts.back() = Context.getConstantInt(ValForPrevField | LastElt);
// If the whole bit-field fit into the previous field, we're done.
if (ValC == 0) return;
@@ -7518,7 +7534,7 @@
// Little endian lays out low bits first.
APInt Tmp = Val;
Tmp.trunc(8);
- ValToAppend = ConstantInt::get(Tmp);
+ ValToAppend = Context.getConstantInt(Tmp);
Val = Val.lshr(8);
} else {
@@ -7526,17 +7542,17 @@
APInt Tmp = Val;
Tmp = Tmp.lshr(Tmp.getBitWidth()-8);
Tmp.trunc(8);
- ValToAppend = ConstantInt::get(Tmp);
+ ValToAppend = Context.getConstantInt(Tmp);
}
} else if (Val.getBitWidth() == 8) {
- ValToAppend = ConstantInt::get(Val);
+ ValToAppend = Context.getConstantInt(Val);
} else {
APInt Tmp = Val;
Tmp.zext(8);
if (BYTES_BIG_ENDIAN)
Tmp = Tmp << 8-Val.getBitWidth();
- ValToAppend = ConstantInt::get(Tmp);
+ ValToAppend = Context.getConstantInt(Tmp);
}
ResultElts.push_back(ValToAppend);
@@ -7584,8 +7600,8 @@
if (LLVMNaturalSize < GCCStructSize) {
const Type *FillTy = Type::Int8Ty;
if (GCCStructSize - NextFieldByteStart != 1)
- FillTy = ArrayType::get(FillTy, GCCStructSize - NextFieldByteStart);
- ResultElts.push_back(getGlobalContext().getNullValue(FillTy));
+ FillTy = Context.getArrayType(FillTy, GCCStructSize - NextFieldByteStart);
+ ResultElts.push_back(Context.getNullValue(FillTy));
NextFieldByteStart = GCCStructSize;
// At this point, we know that our struct should have the right size.
@@ -7647,7 +7663,7 @@
// Fields are allowed to be smaller than their type. Simply discard
// the unwanted upper bits in the field value.
APInt ValAsInt = cast<ConstantInt>(Val)->getValue();
- Val = ConstantInt::get(ValAsInt.trunc(FieldSizeInBits));
+ Val = Context.getConstantInt(ValAsInt.trunc(FieldSizeInBits));
}
LayoutInfo.AddBitFieldToRecordConstant(cast<ConstantInt>(Val),
GCCFieldOffsetInBits);
@@ -7663,7 +7679,8 @@
LayoutInfo.HandleTailPadding(getInt64(StructTypeSizeTree, true));
// Okay, we're done, return the computed elements.
- return ConstantStruct::get(LayoutInfo.ResultElts, LayoutInfo.StructIsPacked);
+ return
+ Context.getConstantStruct(LayoutInfo.ResultElts, LayoutInfo.StructIsPacked);
}
Constant *TreeConstantToLLVM::ConvertUnionCONSTRUCTOR(tree exp) {
@@ -7691,11 +7708,11 @@
if (UnionSize - InitSize == 1)
FillTy = Type::Int8Ty;
else
- FillTy = ArrayType::get(Type::Int8Ty, UnionSize - InitSize);
- Elts.push_back(getGlobalContext().getNullValue(FillTy));
+ FillTy = Context.getArrayType(Type::Int8Ty, UnionSize - InitSize);
+ Elts.push_back(Context.getNullValue(FillTy));
}
}
- return ConstantStruct::get(Elts, false);
+ return Context.getConstantStruct(Elts, false);
}
//===----------------------------------------------------------------------===//
@@ -7809,7 +7826,8 @@
BasicBlock *BB = getLabelDeclBlock(exp);
Constant *C = TheTreeToLLVM->getIndirectGotoBlockNumber(BB);
- return TheFolder->CreateIntToPtr(C, PointerType::getUnqual(Type::Int8Ty));
+ return
+ TheFolder->CreateIntToPtr(C, Context.getPointerTypeUnqual(Type::Int8Ty));
}
Constant *TreeConstantToLLVM::EmitLV_COMPLEX_CST(tree exp) {
@@ -7909,7 +7927,7 @@
std::vector<Value*> Idx;
if (TREE_CODE(ArrayType) == ARRAY_TYPE)
- Idx.push_back(ConstantInt::get(IntPtrTy, 0));
+ Idx.push_back(Context.getConstantInt(IntPtrTy, 0));
Idx.push_back(IndexVal);
return TheFolder->CreateGetElementPtr(ArrayAddr, &Idx[0], Idx.size());
@@ -7925,7 +7943,7 @@
tree FieldDecl = TREE_OPERAND(exp, 1);
StructAddrLV = TheFolder->CreateBitCast(StructAddrLV,
- PointerType::getUnqual(StructTy));
+ Context.getPointerTypeUnqual(StructTy));
const Type *FieldTy = ConvertType(getDeclaredType(FieldDecl));
// BitStart - This is the actual offset of the field from the start of the
@@ -7942,14 +7960,14 @@
Constant *Ops[] = {
StructAddrLV,
- getGlobalContext().getNullValue(Type::Int32Ty),
- ConstantInt::get(Type::Int32Ty, MemberIndex)
+ Context.getNullValue(Type::Int32Ty),
+ Context.getConstantInt(Type::Int32Ty, MemberIndex)
};
FieldPtr = TheFolder->CreateGetElementPtr(StructAddrLV, Ops+1, 2);
FieldPtr = ConstantFoldInstOperands(Instruction::GetElementPtr,
FieldPtr->getType(), Ops,
- 3, &getGlobalContext(), &TD);
+ 3, &Context, &TD);
// Now that we did an offset from the start of the struct, subtract off
// the offset from BitStart.
@@ -7962,13 +7980,14 @@
Constant *Offset = Convert(field_offset);
Constant *Ptr = TheFolder->CreatePtrToInt(StructAddrLV, Offset->getType());
Ptr = TheFolder->CreateAdd(Ptr, Offset);
- FieldPtr = TheFolder->CreateIntToPtr(Ptr, PointerType::getUnqual(FieldTy));
+ FieldPtr = TheFolder->CreateIntToPtr(Ptr,
+ Context.getPointerTypeUnqual(FieldTy));
}
// Make sure we return a result of the right type.
- if (PointerType::getUnqual(FieldTy) != FieldPtr->getType())
+ if (Context.getPointerTypeUnqual(FieldTy) != FieldPtr->getType())
FieldPtr = TheFolder->CreateBitCast(FieldPtr,
- PointerType::getUnqual(FieldTy));
+ Context.getPointerTypeUnqual(FieldTy));
assert(BitStart == 0 &&
"It's a bitfield reference or we didn't get to the field!");
Modified: llvm-gcc-4.2/trunk/gcc/llvm-types.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/llvm-types.cpp?rev=75704&r1=75703&r2=75704&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/llvm-types.cpp (original)
+++ llvm-gcc-4.2/trunk/gcc/llvm-types.cpp Tue Jul 14 18:10:12 2009
@@ -63,6 +63,8 @@
typedef DenseMap<const Type *, unsigned> LTypesMapTy;
static LTypesMapTy LTypesMap;
+static LLVMContext &Context = getGlobalContext();
+
// GET_TYPE_LLVM/SET_TYPE_LLVM - Associate an LLVM type with each TREE type.
// These are lazily computed by ConvertType.
@@ -186,11 +188,11 @@
}
const std::string &TypeName = TypeNameMap[*I];
- LTypesNames.push_back(ConstantArray::get(TypeName, false));
+ LTypesNames.push_back(Context.getConstantArray(TypeName, false));
}
// Create string table.
- Constant *LTypesNameTable = ConstantStruct::get(LTypesNames, false);
+ Constant *LTypesNameTable = Context.getConstantStruct(LTypesNames, false);
// Create variable to hold this string table.
GlobalVariable *GV = new GlobalVariable(*TheModule,
@@ -221,7 +223,7 @@
for (unsigned i = 0, e = ArgTys.size(); i != e; ++i)
ArgTysP.push_back(ArgTys[i]);
- return FunctionType::get(Res, ArgTysP, isVarArg);
+ return Context.getFunctionType(Res, ArgTysP, isVarArg);
}
//===----------------------------------------------------------------------===//
@@ -709,7 +711,7 @@
if (const Type *Ty = GET_TYPE_LLVM(type))
return Ty;
return SET_TYPE_LLVM(type,
- IntegerType::get(TREE_INT_CST_LOW(TYPE_SIZE(type))));
+ Context.getIntegerType(TREE_INT_CST_LOW(TYPE_SIZE(type))));
}
case ENUMERAL_TYPE:
// Use of an enum that is implicitly declared?
@@ -718,7 +720,7 @@
if (const Type *Ty = GET_TYPE_LLVM(orig_type))
return Ty;
- const Type *Ty = OpaqueType::get();
+ const Type *Ty = Context.getOpaqueType();
TheModule->addTypeName(GetTypeName("enum.", orig_type), Ty);
return TypeDB.setType(orig_type, Ty);
}
@@ -726,7 +728,7 @@
type = orig_type;
case INTEGER_TYPE:
if (const Type *Ty = GET_TYPE_LLVM(type)) return Ty;
- return SET_TYPE_LLVM(type, IntegerType::get(TYPE_PRECISION(type)));
+ return SET_TYPE_LLVM(type, Context.getIntegerType(TYPE_PRECISION(type)));
case REAL_TYPE:
if (const Type *Ty = GET_TYPE_LLVM(type)) return Ty;
switch (TYPE_PRECISION(type)) {
@@ -745,7 +747,8 @@
return SET_TYPE_LLVM(type, Type::FP128Ty);
#else
// 128-bit long doubles map onto { double, double }.
- return SET_TYPE_LLVM(type, StructType::get(Type::DoubleTy, Type::DoubleTy,
+ return SET_TYPE_LLVM(type,
+ Context.getStructType(Type::DoubleTy, Type::DoubleTy,
NULL));
#endif
}
@@ -754,13 +757,13 @@
if (const Type *Ty = GET_TYPE_LLVM(type)) return Ty;
const Type *Ty = ConvertType(TREE_TYPE(type));
assert(!Ty->isAbstract() && "should use TypeDB.setType()");
- return SET_TYPE_LLVM(type, StructType::get(Ty, Ty, NULL));
+ return SET_TYPE_LLVM(type, Context.getStructType(Ty, Ty, NULL));
}
case VECTOR_TYPE: {
if (const Type *Ty = GET_TYPE_LLVM(type)) return Ty;
const Type *Ty = ConvertType(TREE_TYPE(type));
assert(!Ty->isAbstract() && "should use TypeDB.setType()");
- Ty = VectorType::get(Ty, TYPE_VECTOR_SUBPARTS(type));
+ Ty = Context.getVectorType(Ty, TYPE_VECTOR_SUBPARTS(type));
return SET_TYPE_LLVM(type, Ty);
}
@@ -817,7 +820,7 @@
if (Ty == 0) {
PointersToReresolve.push_back(type);
return TypeDB.setType(type,
- PointerType::getUnqual(OpaqueType::get()));
+ Context.getPointerTypeUnqual(Context.getOpaqueType()));
}
// A type has already been computed. However, this may be some sort of
@@ -835,7 +838,7 @@
if (Ty->getTypeID() == Type::VoidTyID)
Ty = Type::Int8Ty; // void* -> sbyte*
- return TypeDB.setType(type, PointerType::getUnqual(Ty));
+ return TypeDB.setType(type, Context.getPointerTypeUnqual(Ty));
}
case METHOD_TYPE:
@@ -897,7 +900,7 @@
NumElements /= ElementSize;
}
- return TypeDB.setType(type, ArrayType::get(ElementTy, NumElements));
+ return TypeDB.setType(type, Context.getArrayType(ElementTy, NumElements));
}
case OFFSET_TYPE:
// Handle OFFSET_TYPE specially. This is used for pointers to members,
@@ -1005,7 +1008,7 @@
/// argument is passed by value. It is lowered to a parameter passed by
/// reference with an additional parameter attribute "ByVal".
void HandleByValArgument(const llvm::Type *LLVMTy, tree type) {
- HandleScalarArgument(PointerType::getUnqual(LLVMTy), type);
+ HandleScalarArgument(Context.getPointerTypeUnqual(LLVMTy), type);
}
/// HandleFCAArgument - This callback is invoked if the aggregate function
@@ -1351,7 +1354,7 @@
const Type *getLLVMType() const {
// Use Packed type if Packed is set or all struct fields are bitfields.
// Empty struct is not packed unless packed is set.
- return StructType::get(Elements,
+ return Context.getStructType(Elements,
Packed || (!Elements.empty() && AllBitFields));
}
@@ -1404,7 +1407,7 @@
assert (PadBytes > 0 && "Unable to remove extra bytes");
// Update last element type and size, element offset is unchanged.
- const Type *Pad = ArrayType::get(Type::Int8Ty, PadBytes);
+ const Type *Pad = Context.getArrayType(Type::Int8Ty, PadBytes);
unsigned OriginalSize = ElementSizeInBytes.back();
Elements.pop_back();
Elements.push_back(Pad);
@@ -1441,7 +1444,7 @@
// different offset.
const Type *Pad = Type::Int8Ty;
if (PoppedOffset != EndOffset + 1)
- Pad = ArrayType::get(Pad, PoppedOffset - EndOffset);
+ Pad = Context.getArrayType(Pad, PoppedOffset - EndOffset);
addElement(Pad, EndOffset, PoppedOffset - EndOffset);
}
}
@@ -1473,7 +1476,7 @@
// In this example, previous field is C and D is current field.
addElement(SavedTy, CurOffset, ByteOffset - CurOffset);
else if (ByteOffset - CurOffset != 1)
- Pad = ArrayType::get(Pad, ByteOffset - CurOffset);
+ Pad = Context.getArrayType(Pad, ByteOffset - CurOffset);
addElement(Pad, CurOffset, ByteOffset - CurOffset);
}
return true;
@@ -1613,7 +1616,7 @@
unsigned ByteAlignment = getTypeAlignment(NewFieldTy);
if (FirstUnallocatedByte & (ByteAlignment-1)) {
// Instead of inserting a nice whole field, insert a small array of ubytes.
- NewFieldTy = ArrayType::get(Type::Int8Ty, (Size+7)/8);
+ NewFieldTy = Context.getArrayType(Type::Int8Ty, (Size+7)/8);
}
// Finally, add the new field.
@@ -2077,7 +2080,7 @@
if (PadBytes) {
const Type *Pad = Type::Int8Ty;
if (PadBytes != 1)
- Pad = ArrayType::get(Pad, PadBytes);
+ Pad = Context.getArrayType(Pad, PadBytes);
Info.addElement(Pad, FirstUnallocatedByte, PadBytes);
}
@@ -2129,7 +2132,7 @@
}
if (TYPE_SIZE(type) == 0) { // Forward declaration?
- const Type *Ty = OpaqueType::get();
+ const Type *Ty = Context.getOpaqueType();
TheModule->addTypeName(GetTypeName("struct.", orig_type), Ty);
return TypeDB.setType(type, Ty);
}
@@ -2192,12 +2195,13 @@
Info->getTypeAlignment(Type::Int32Ty)) == 0) {
// insert array of i32
unsigned Int32ArraySize = (GCCTypeSize-LLVMStructSize)/4;
- const Type *PadTy = ArrayType::get(Type::Int32Ty, Int32ArraySize);
+ const Type *PadTy =
+ Context.getArrayType(Type::Int32Ty, Int32ArraySize);
Info->addElement(PadTy, GCCTypeSize - LLVMLastElementEnd,
Int32ArraySize, true /* Padding Element */);
} else {
const Type *PadTy =
- ArrayType::get(Type::Int8Ty, GCCTypeSize-LLVMStructSize);
+ Context.getArrayType(Type::Int8Ty, GCCTypeSize-LLVMStructSize);
Info->addElement(PadTy, GCCTypeSize - LLVMLastElementEnd,
GCCTypeSize - LLVMLastElementEnd,
true /* Padding Element */);
@@ -2306,7 +2310,7 @@
}
if (TYPE_SIZE(type) == 0) { // Forward declaraion?
- const Type *Ty = OpaqueType::get();
+ const Type *Ty = Context.getOpaqueType();
TheModule->addTypeName(GetTypeName("union.", orig_type), Ty);
return TypeDB.setType(type, Ty);
}
@@ -2434,13 +2438,13 @@
"LLVM type size doesn't match GCC type size!");
const Type *PadTy = Type::Int8Ty;
if (GCCTypeSize-EltSize != 1)
- PadTy = ArrayType::get(PadTy, GCCTypeSize-EltSize);
+ PadTy = Context.getArrayType(PadTy, GCCTypeSize-EltSize);
UnionElts.push_back(PadTy);
}
}
bool isPacked = 8 * EltAlign > TYPE_ALIGN(type);
- const Type *ResultTy = StructType::get(UnionElts, isPacked);
+ const Type *ResultTy = Context.getStructType(UnionElts, isPacked);
const OpaqueType *OldTy = cast_or_null<OpaqueType>(GET_TYPE_LLVM(type));
TypeDB.setType(type, ResultTy);
More information about the llvm-commits
mailing list