[llvm-commits] [gcc-plugin] r79452 - in /gcc-plugin/trunk: i386/llvm-i386-target.h i386/llvm-i386.cpp llvm-abi.h llvm-backend.cpp llvm-convert.cpp llvm-debug.cpp llvm-internal.h llvm-types.cpp
Duncan Sands
baldrick at free.fr
Wed Aug 19 12:49:08 PDT 2009
Author: baldrick
Date: Wed Aug 19 14:49:08 2009
New Revision: 79452
URL: http://llvm.org/viewvc/llvm-project?rev=79452&view=rev
Log:
Resync with llvm-gcc revision 79340. This gets
things compiling again.
Modified:
gcc-plugin/trunk/i386/llvm-i386-target.h
gcc-plugin/trunk/i386/llvm-i386.cpp
gcc-plugin/trunk/llvm-abi.h
gcc-plugin/trunk/llvm-backend.cpp
gcc-plugin/trunk/llvm-convert.cpp
gcc-plugin/trunk/llvm-debug.cpp
gcc-plugin/trunk/llvm-internal.h
gcc-plugin/trunk/llvm-types.cpp
Modified: gcc-plugin/trunk/i386/llvm-i386-target.h
URL: http://llvm.org/viewvc/llvm-project/gcc-plugin/trunk/i386/llvm-i386-target.h?rev=79452&r1=79451&r2=79452&view=diff
==============================================================================
--- gcc-plugin/trunk/i386/llvm-i386-target.h (original)
+++ gcc-plugin/trunk/i386/llvm-i386-target.h Wed Aug 19 14:49:08 2009
@@ -210,7 +210,7 @@
llvm_x86_32_should_pass_aggregate_in_mixed_regs(tree, const Type *Ty,
std::vector<const Type*>&);
-#define LLVM_SHOULD_PASS_AGGREGATE_IN_MIXED_REGS(T, TY, E) \
+#define LLVM_SHOULD_PASS_AGGREGATE_IN_MIXED_REGS(T, TY, CC, E) \
(TARGET_64BIT ? \
llvm_x86_64_should_pass_aggregate_in_mixed_regs((T), (TY), (E)) : \
llvm_x86_32_should_pass_aggregate_in_mixed_regs((T), (TY), (E)))
@@ -220,7 +220,7 @@
std::vector<const Type*>&,
bool);
-#define LLVM_AGGREGATE_PARTIALLY_PASSED_IN_REGS(E, SE, ISR) \
+#define LLVM_AGGREGATE_PARTIALLY_PASSED_IN_REGS(E, SE, ISR, CC) \
(TARGET_64BIT ? \
llvm_x86_64_aggregate_partially_passed_in_regs((E), (SE), (ISR)) : \
false)
Modified: gcc-plugin/trunk/i386/llvm-i386.cpp
URL: http://llvm.org/viewvc/llvm-project/gcc-plugin/trunk/i386/llvm-i386.cpp?rev=79452&r1=79451&r2=79452&view=diff
==============================================================================
--- gcc-plugin/trunk/i386/llvm-i386.cpp (original)
+++ gcc-plugin/trunk/i386/llvm-i386.cpp Wed Aug 19 14:49:08 2009
@@ -125,11 +125,11 @@
case IX86_BUILTIN_ANDNPD:
if (cast<VectorType>(ResultType)->getNumElements() == 4) // v4f32
Ops[0] = Builder.CreateBitCast(Ops[0],
- Context.getVectorType(Type::Int32Ty, 4),
+ VectorType::get(Type::getInt32Ty(Context), 4),
"tmp");
else // v2f64
Ops[0] = Builder.CreateBitCast(Ops[0],
- Context.getVectorType(Type::Int64Ty, 2),
+ VectorType::get(Type::getInt64Ty(Context), 2),
"tmp");
Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType(), "tmp");
@@ -285,25 +285,25 @@
Result = BuildVectorShuffle(Ops[0], Ops[1], 2, 1);
return true;
case IX86_BUILTIN_MOVQ: {
- Value *Zero = Context.getConstantInt(Type::Int32Ty, 0);
+ Value *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
Result = BuildVector(Zero, Zero, Zero, Zero, NULL);
Result = BuildVectorShuffle(Result, Ops[0], 4, 5, 2, 3);
return true;
}
case IX86_BUILTIN_LOADQ: {
- PointerType *i64Ptr = Context.getPointerTypeUnqual(Type::Int64Ty);
+ PointerType *i64Ptr = PointerType::getUnqual(Type::getInt64Ty(Context));
Ops[0] = Builder.CreateBitCast(Ops[0], i64Ptr, "tmp");
Ops[0] = Builder.CreateLoad(Ops[0], "tmp");
- Value *Zero = Context.getConstantInt(Type::Int64Ty, 0);
+ Value *Zero = ConstantInt::get(Type::getInt64Ty(Context), 0);
Result = BuildVector(Zero, Zero, NULL);
- Value *Idx = Context.getConstantInt(Type::Int32Ty, 0);
+ Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), 0);
Result = Builder.CreateInsertElement(Result, Ops[0], Idx, "tmp");
Result = Builder.CreateBitCast(Result, ResultType, "tmp");
return true;
}
case IX86_BUILTIN_LOADUPS: {
- VectorType *v4f32 = Context.getVectorType(Type::FloatTy, 4);
- PointerType *v4f32Ptr = Context.getPointerTypeUnqual(v4f32);
+ VectorType *v4f32 = VectorType::get(Type::getFloatTy(Context), 4);
+ PointerType *v4f32Ptr = PointerType::getUnqual(v4f32);
Value *BC = Builder.CreateBitCast(Ops[0], v4f32Ptr, "tmp");
LoadInst *LI = Builder.CreateLoad(BC, "tmp");
LI->setAlignment(1);
@@ -311,8 +311,8 @@
return true;
}
case IX86_BUILTIN_LOADUPD: {
- VectorType *v2f64 = Context.getVectorType(Type::DoubleTy, 2);
- PointerType *v2f64Ptr = Context.getPointerTypeUnqual(v2f64);
+ VectorType *v2f64 = VectorType::get(Type::getDoubleTy(Context), 2);
+ PointerType *v2f64Ptr = PointerType::getUnqual(v2f64);
Value *BC = Builder.CreateBitCast(Ops[0], v2f64Ptr, "tmp");
LoadInst *LI = Builder.CreateLoad(BC, "tmp");
LI->setAlignment(1);
@@ -320,8 +320,8 @@
return true;
}
case IX86_BUILTIN_LOADDQU: {
- VectorType *v16i8 = Context.getVectorType(Type::Int8Ty, 16);
- PointerType *v16i8Ptr = Context.getPointerTypeUnqual(v16i8);
+ VectorType *v16i8 = VectorType::get(Type::getInt8Ty(Context), 16);
+ PointerType *v16i8Ptr = PointerType::getUnqual(v16i8);
Value *BC = Builder.CreateBitCast(Ops[0], v16i8Ptr, "tmp");
LoadInst *LI = Builder.CreateLoad(BC, "tmp");
LI->setAlignment(1);
@@ -329,8 +329,8 @@
return true;
}
case IX86_BUILTIN_STOREUPS: {
- VectorType *v4f32 = Context.getVectorType(Type::FloatTy, 4);
- PointerType *v4f32Ptr = Context.getPointerTypeUnqual(v4f32);
+ VectorType *v4f32 = VectorType::get(Type::getFloatTy(Context), 4);
+ PointerType *v4f32Ptr = PointerType::getUnqual(v4f32);
Value *BC = Builder.CreateBitCast(Ops[0], v4f32Ptr, "tmp");
StoreInst *SI = Builder.CreateStore(Ops[1], BC);
SI->setAlignment(1);
@@ -338,8 +338,8 @@
return true;
}
case IX86_BUILTIN_STOREUPD: {
- VectorType *v2f64 = Context.getVectorType(Type::DoubleTy, 2);
- PointerType *v2f64Ptr = Context.getPointerTypeUnqual(v2f64);
+ VectorType *v2f64 = VectorType::get(Type::getDoubleTy(Context), 2);
+ PointerType *v2f64Ptr = PointerType::getUnqual(v2f64);
Value *BC = Builder.CreateBitCast(Ops[0], v2f64Ptr, "tmp");
StoreInst *SI = Builder.CreateStore(Ops[1], BC);
SI->setAlignment(1);
@@ -347,8 +347,8 @@
return true;
}
case IX86_BUILTIN_STOREDQU: {
- VectorType *v16i8 = Context.getVectorType(Type::Int8Ty, 16);
- PointerType *v16i8Ptr = Context.getPointerTypeUnqual(v16i8);
+ VectorType *v16i8 = VectorType::get(Type::getInt8Ty(Context), 16);
+ PointerType *v16i8Ptr = PointerType::getUnqual(v16i8);
Value *BC = Builder.CreateBitCast(Ops[0], v16i8Ptr, "tmp");
StoreInst *SI = Builder.CreateStore(Ops[1], BC);
SI->setAlignment(1);
@@ -356,20 +356,20 @@
return true;
}
case IX86_BUILTIN_LOADHPS: {
- PointerType *f64Ptr = Context.getPointerTypeUnqual(Type::DoubleTy);
+ PointerType *f64Ptr = PointerType::getUnqual(Type::getDoubleTy(Context));
Ops[1] = Builder.CreateBitCast(Ops[1], f64Ptr, "tmp");
Value *Load = Builder.CreateLoad(Ops[1], "tmp");
- Ops[1] = BuildVector(Load, Context.getUndef(Type::DoubleTy), NULL);
+ Ops[1] = BuildVector(Load, UndefValue::get(Type::getDoubleTy(Context)), NULL);
Ops[1] = Builder.CreateBitCast(Ops[1], ResultType, "tmp");
Result = BuildVectorShuffle(Ops[0], Ops[1], 0, 1, 4, 5);
Result = Builder.CreateBitCast(Result, ResultType, "tmp");
return true;
}
case IX86_BUILTIN_LOADLPS: {
- PointerType *f64Ptr = Context.getPointerTypeUnqual(Type::DoubleTy);
+ PointerType *f64Ptr = PointerType::getUnqual(Type::getDoubleTy(Context));
Ops[1] = Builder.CreateBitCast(Ops[1], f64Ptr, "tmp");
Value *Load = Builder.CreateLoad(Ops[1], "tmp");
- Ops[1] = BuildVector(Load, Context.getUndef(Type::DoubleTy), NULL);
+ Ops[1] = BuildVector(Load, UndefValue::get(Type::getDoubleTy(Context)), NULL);
Ops[1] = Builder.CreateBitCast(Ops[1], ResultType, "tmp");
Result = BuildVectorShuffle(Ops[0], Ops[1], 4, 5, 2, 3);
Result = Builder.CreateBitCast(Result, ResultType, "tmp");
@@ -377,7 +377,7 @@
}
case IX86_BUILTIN_LOADHPD: {
Value *Load = Builder.CreateLoad(Ops[1], "tmp");
- Ops[1] = BuildVector(Load, Context.getUndef(Type::DoubleTy), NULL);
+ Ops[1] = BuildVector(Load, UndefValue::get(Type::getDoubleTy(Context)), NULL);
Ops[1] = Builder.CreateBitCast(Ops[1], ResultType, "tmp");
Result = BuildVectorShuffle(Ops[0], Ops[1], 0, 2);
Result = Builder.CreateBitCast(Result, ResultType, "tmp");
@@ -385,27 +385,27 @@
}
case IX86_BUILTIN_LOADLPD: {
Value *Load = Builder.CreateLoad(Ops[1], "tmp");
- Ops[1] = BuildVector(Load, Context.getUndef(Type::DoubleTy), NULL);
+ Ops[1] = BuildVector(Load, UndefValue::get(Type::getDoubleTy(Context)), NULL);
Ops[1] = Builder.CreateBitCast(Ops[1], ResultType, "tmp");
Result = BuildVectorShuffle(Ops[0], Ops[1], 2, 1);
Result = Builder.CreateBitCast(Result, ResultType, "tmp");
return true;
}
case IX86_BUILTIN_STOREHPS: {
- VectorType *v2f64 = Context.getVectorType(Type::DoubleTy, 2);
- PointerType *f64Ptr = Context.getPointerTypeUnqual(Type::DoubleTy);
+ VectorType *v2f64 = VectorType::get(Type::getDoubleTy(Context), 2);
+ PointerType *f64Ptr = PointerType::getUnqual(Type::getDoubleTy(Context));
Ops[0] = Builder.CreateBitCast(Ops[0], f64Ptr, "tmp");
- Value *Idx = Context.getConstantInt(Type::Int32Ty, 1);
+ Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), 1);
Ops[1] = Builder.CreateBitCast(Ops[1], v2f64, "tmp");
Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "tmp");
Result = Builder.CreateStore(Ops[1], Ops[0]);
return true;
}
case IX86_BUILTIN_STORELPS: {
- VectorType *v2f64 = Context.getVectorType(Type::DoubleTy, 2);
- PointerType *f64Ptr = Context.getPointerTypeUnqual(Type::DoubleTy);
+ VectorType *v2f64 = VectorType::get(Type::getDoubleTy(Context), 2);
+ PointerType *f64Ptr = PointerType::getUnqual(Type::getDoubleTy(Context));
Ops[0] = Builder.CreateBitCast(Ops[0], f64Ptr, "tmp");
- Value *Idx = Context.getConstantInt(Type::Int32Ty, 0);
+ Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), 0);
Ops[1] = Builder.CreateBitCast(Ops[1], v2f64, "tmp");
Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "tmp");
Result = Builder.CreateStore(Ops[1], Ops[0]);
@@ -423,13 +423,13 @@
case IX86_BUILTIN_VEC_INIT_V4HI:
// Sometimes G++ promotes arguments to int.
for (unsigned i = 0; i != 4; ++i)
- Ops[i] = Builder.CreateIntCast(Ops[i], Type::Int16Ty, false, "tmp");
+ Ops[i] = Builder.CreateIntCast(Ops[i], Type::getInt16Ty(Context), false, "tmp");
Result = BuildVector(Ops[0], Ops[1], Ops[2], Ops[3], NULL);
return true;
case IX86_BUILTIN_VEC_INIT_V8QI:
// Sometimes G++ promotes arguments to int.
for (unsigned i = 0; i != 8; ++i)
- Ops[i] = Builder.CreateIntCast(Ops[i], Type::Int8Ty, false, "tmp");
+ Ops[i] = Builder.CreateIntCast(Ops[i], Type::getInt8Ty(Context), false, "tmp");
Result = BuildVector(Ops[0], Ops[1], Ops[2], Ops[3],
Ops[4], Ops[5], Ops[6], Ops[7], NULL);
return true;
@@ -443,10 +443,21 @@
case IX86_BUILTIN_VEC_EXT_V16QI:
Result = Builder.CreateExtractElement(Ops[0], Ops[1], "tmp");
return true;
+ case IX86_BUILTIN_VEC_SET_V16QI:
+ // Sometimes G++ promotes arguments to int.
+ Ops[1] = Builder.CreateIntCast(Ops[1], Type::getInt8Ty(Context), false, "tmp");
+ Result = Builder.CreateInsertElement(Ops[0], Ops[1], Ops[2], "tmp");
+ return true;
case IX86_BUILTIN_VEC_SET_V4HI:
case IX86_BUILTIN_VEC_SET_V8HI:
// GCC sometimes doesn't produce the right element type.
- Ops[1] = Builder.CreateIntCast(Ops[1], Type::Int16Ty, false, "tmp");
+ Ops[1] = Builder.CreateIntCast(Ops[1], Type::getInt16Ty(Context), false, "tmp");
+ Result = Builder.CreateInsertElement(Ops[0], Ops[1], Ops[2], "tmp");
+ return true;
+ case IX86_BUILTIN_VEC_SET_V4SI:
+ Result = Builder.CreateInsertElement(Ops[0], Ops[1], Ops[2], "tmp");
+ return true;
+ case IX86_BUILTIN_VEC_SET_V2DI:
Result = Builder.CreateInsertElement(Ops[0], Ops[1], Ops[2], "tmp");
return true;
case IX86_BUILTIN_CMPEQPS:
@@ -480,7 +491,7 @@
case IX86_BUILTIN_CMPNGEPS: PredCode = 6; flip = true; break;
case IX86_BUILTIN_CMPORDPS: PredCode = 7; break;
}
- Value *Pred = Context.getConstantInt(Type::Int8Ty, PredCode);
+ Value *Pred = ConstantInt::get(Type::getInt8Ty(Context), PredCode);
Value *Arg0 = Ops[0];
Value *Arg1 = Ops[1];
if (flip) std::swap(Arg0, Arg1);
@@ -513,7 +524,7 @@
case IX86_BUILTIN_CMPNLESS: PredCode = 6; break;
case IX86_BUILTIN_CMPORDSS: PredCode = 7; break;
}
- Value *Pred = Context.getConstantInt(Type::Int8Ty, PredCode);
+ Value *Pred = ConstantInt::get(Type::getInt8Ty(Context), PredCode);
Value *CallOps[3] = { Ops[0], Ops[1], Pred };
Result = Builder.CreateCall(cmpss, CallOps, CallOps+3, "tmp");
Result = Builder.CreateBitCast(Result, ResultType, "tmp");
@@ -550,7 +561,7 @@
case IX86_BUILTIN_CMPNGEPD: PredCode = 6; flip = true; break;
case IX86_BUILTIN_CMPORDPD: PredCode = 7; break;
}
- Value *Pred = Context.getConstantInt(Type::Int8Ty, PredCode);
+ Value *Pred = ConstantInt::get(Type::getInt8Ty(Context), PredCode);
Value *Arg0 = Ops[0];
Value *Arg1 = Ops[1];
if (flip) std::swap(Arg0, Arg1);
@@ -582,7 +593,7 @@
case IX86_BUILTIN_CMPNLESD: PredCode = 6; break;
case IX86_BUILTIN_CMPORDSD: PredCode = 7; break;
}
- Value *Pred = Context.getConstantInt(Type::Int8Ty, PredCode);
+ Value *Pred = ConstantInt::get(Type::getInt8Ty(Context), PredCode);
Value *CallOps[3] = { Ops[0], Ops[1], Pred };
Result = Builder.CreateCall(cmpsd, CallOps, CallOps+3, "tmp");
Result = Builder.CreateBitCast(Result, ResultType, "tmp");
@@ -591,18 +602,19 @@
case IX86_BUILTIN_LDMXCSR: {
Function *ldmxcsr =
Intrinsic::getDeclaration(TheModule, Intrinsic::x86_sse_ldmxcsr);
- Value *Ptr = CreateTemporary(Type::Int32Ty);
+ Value *Ptr = CreateTemporary(Type::getInt32Ty(Context));
Builder.CreateStore(Ops[0], Ptr);
- Ptr = Builder.CreateBitCast(Ptr, Context.getPointerTypeUnqual(Type::Int8Ty), "tmp");
+ Ptr = Builder.CreateBitCast(Ptr,
+ PointerType::getUnqual(Type::getInt8Ty(Context)), "tmp");
Result = Builder.CreateCall(ldmxcsr, Ptr);
return true;
}
case IX86_BUILTIN_STMXCSR: {
Function *stmxcsr =
Intrinsic::getDeclaration(TheModule, Intrinsic::x86_sse_stmxcsr);
- Value *Ptr = CreateTemporary(Type::Int32Ty);
- Value *BPtr = Builder.CreateBitCast(Ptr, Context.getPointerTypeUnqual(Type::Int8Ty),
- "tmp");
+ Value *Ptr = CreateTemporary(Type::getInt32Ty(Context));
+ Value *BPtr = Builder.CreateBitCast(Ptr,
+ PointerType::getUnqual(Type::getInt8Ty(Context)), "tmp");
Builder.CreateCall(stmxcsr, BPtr);
Result = Builder.CreateLoad(Ptr, "tmp");
@@ -672,10 +684,10 @@
// 32 and 64-bit integers are fine, as are float and double. Long double
// (which can be picked as the type for a union of 16 bytes) is not fine,
// as loads and stores of it get only 10 bytes.
- if (EltTy == Type::Int32Ty ||
- EltTy == Type::Int64Ty ||
- EltTy == Type::FloatTy ||
- EltTy == Type::DoubleTy ||
+ if (EltTy == Type::getInt32Ty(Context) ||
+ EltTy == Type::getInt64Ty(Context) ||
+ EltTy == Type::getFloatTy(Context) ||
+ EltTy == Type::getDoubleTy(Context) ||
isa<PointerType>(EltTy)) {
Elts.push_back(EltTy);
continue;
@@ -704,10 +716,10 @@
// short in 32-bit.
const Type *EltTy = STy->getElementType(0);
return !((TARGET_64BIT && (EltTy->isInteger() ||
- EltTy == Type::FloatTy ||
- EltTy == Type::DoubleTy)) ||
- EltTy == Type::Int16Ty ||
- EltTy == Type::Int8Ty);
+ EltTy == Type::getFloatTy(Context) ||
+ EltTy == Type::getDoubleTy(Context))) ||
+ EltTy == Type::getInt16Ty(Context) ||
+ EltTy == Type::getInt8Ty(Context));
}
/* Target hook for llvm-abi.h. It returns true if an aggregate of the
@@ -748,7 +760,7 @@
++NumXMMs;
} else if (Ty->isInteger() || isa<PointerType>(Ty)) {
++NumGPRs;
- } else if (Ty==Type::VoidTy) {
+ } else if (Ty==Type::getVoidTy(Context)) {
// Padding bytes that are not passed anywhere
;
} else {
@@ -836,7 +848,7 @@
switch (Class[i]) {
case X86_64_INTEGER_CLASS:
case X86_64_INTEGERSI_CLASS:
- Elts.push_back(Type::Int64Ty);
+ Elts.push_back(Type::getInt64Ty(Context));
totallyEmpty = false;
Bytes -= 8;
break;
@@ -851,10 +863,10 @@
// 5. 2 x SSE, size is 16: 2 x Double.
if ((NumClasses-i) == 1) {
if (Bytes == 8) {
- Elts.push_back(Type::DoubleTy);
+ Elts.push_back(Type::getDoubleTy(Context));
Bytes -= 8;
} else if (Bytes == 4) {
- Elts.push_back (Type::FloatTy);
+ Elts.push_back (Type::getFloatTy(Context));
Bytes -= 4;
} else
assert(0 && "Not yet handled!");
@@ -868,46 +880,46 @@
if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
if (VTy->getNumElements() == 2) {
if (VTy->getElementType()->isInteger()) {
- Elts.push_back(Context.getVectorType(Type::Int64Ty, 2));
+ Elts.push_back(VectorType::get(Type::getInt64Ty(Context), 2));
} else {
- Elts.push_back(Context.getVectorType(Type::DoubleTy, 2));
+ Elts.push_back(VectorType::get(Type::getDoubleTy(Context), 2));
}
Bytes -= 8;
} else {
assert(VTy->getNumElements() == 4);
if (VTy->getElementType()->isInteger()) {
- Elts.push_back(Context.getVectorType(Type::Int32Ty, 4));
+ Elts.push_back(VectorType::get(Type::getInt32Ty(Context), 4));
} else {
- Elts.push_back(Context.getVectorType(Type::FloatTy, 4));
+ Elts.push_back(VectorType::get(Type::getFloatTy(Context), 4));
}
Bytes -= 4;
}
} else if (llvm_x86_is_all_integer_types(Ty)) {
- Elts.push_back(Context.getVectorType(Type::Int32Ty, 4));
+ Elts.push_back(VectorType::get(Type::getInt32Ty(Context), 4));
Bytes -= 4;
} else {
- Elts.push_back(Context.getVectorType(Type::FloatTy, 4));
+ Elts.push_back(VectorType::get(Type::getFloatTy(Context), 4));
Bytes -= 4;
}
} else if (Class[i+1] == X86_64_SSESF_CLASS) {
assert(Bytes == 12 && "Not yet handled!");
- Elts.push_back(Type::DoubleTy);
- Elts.push_back(Type::FloatTy);
+ Elts.push_back(Type::getDoubleTy(Context));
+ Elts.push_back(Type::getFloatTy(Context));
Bytes -= 12;
} else if (Class[i+1] == X86_64_SSE_CLASS) {
- Elts.push_back(Type::DoubleTy);
- Elts.push_back(Type::DoubleTy);
+ Elts.push_back(Type::getDoubleTy(Context));
+ Elts.push_back(Type::getDoubleTy(Context));
Bytes -= 16;
} else if (Class[i+1] == X86_64_SSEDF_CLASS && Bytes == 16) {
- Elts.push_back(Context.getVectorType(Type::FloatTy, 2));
- Elts.push_back(Type::DoubleTy);
+ Elts.push_back(VectorType::get(Type::getFloatTy(Context), 2));
+ Elts.push_back(Type::getDoubleTy(Context));
} else if (Class[i+1] == X86_64_INTEGER_CLASS) {
- Elts.push_back(Context.getVectorType(Type::FloatTy, 2));
- Elts.push_back(Type::Int64Ty);
+ Elts.push_back(VectorType::get(Type::getFloatTy(Context), 2));
+ Elts.push_back(Type::getInt64Ty(Context));
} else if (Class[i+1] == X86_64_NO_CLASS) {
// padding bytes, don't pass
- Elts.push_back(Type::DoubleTy);
- Elts.push_back(Type::VoidTy);
+ Elts.push_back(Type::getDoubleTy(Context));
+ Elts.push_back(Type::getVoidTy(Context));
Bytes -= 16;
} else
assert(0 && "Not yet handled!");
@@ -917,12 +929,12 @@
break;
case X86_64_SSESF_CLASS:
totallyEmpty = false;
- Elts.push_back(Type::FloatTy);
+ Elts.push_back(Type::getFloatTy(Context));
Bytes -= 4;
break;
case X86_64_SSEDF_CLASS:
totallyEmpty = false;
- Elts.push_back(Type::DoubleTy);
+ Elts.push_back(Type::getDoubleTy(Context));
Bytes -= 8;
break;
case X86_64_X87_CLASS:
@@ -932,7 +944,7 @@
case X86_64_NO_CLASS:
// Padding bytes that are not passed (unless the entire object consists
// of padding)
- Elts.push_back(Type::VoidTy);
+ Elts.push_back(Type::getVoidTy(Context));
Bytes -= 8;
break;
default: assert(0 && "Unexpected register class!");
@@ -1092,13 +1104,13 @@
const Type *Ty = ConvertType(type);
unsigned Size = getTargetData().getTypeAllocSize(Ty);
if (Size == 0)
- return Type::VoidTy;
+ return Type::getVoidTy(Context);
else if (Size == 1)
- return Type::Int8Ty;
+ return Type::getInt8Ty(Context);
else if (Size == 2)
- return Type::Int16Ty;
+ return Type::getInt16Ty(Context);
else if (Size <= 4)
- return Type::Int32Ty;
+ return Type::getInt32Ty(Context);
// Check if Ty should be returned using multiple value return instruction.
if (llvm_suitable_multiple_ret_value_type(Ty, type))
@@ -1111,7 +1123,7 @@
enum machine_mode Mode = type_natural_mode(type, NULL);
int NumClasses = classify_argument(Mode, type, Class, 0);
if (NumClasses == 0)
- return Type::Int64Ty;
+ return Type::getInt64Ty(Context);
if (NumClasses == 1) {
if (Class[0] == X86_64_INTEGERSI_CLASS ||
@@ -1121,13 +1133,13 @@
(Mode == BLKmode) ? int_size_in_bytes(type) :
(int) GET_MODE_SIZE(Mode);
if (Bytes>4)
- return Type::Int64Ty;
+ return Type::getInt64Ty(Context);
else if (Bytes>2)
- return Type::Int32Ty;
+ return Type::getInt32Ty(Context);
else if (Bytes>1)
- return Type::Int16Ty;
+ return Type::getInt16Ty(Context);
else
- return Type::Int8Ty;
+ return Type::getInt8Ty(Context);
}
assert(0 && "Unexpected type!");
}
@@ -1136,22 +1148,22 @@
if (Class[0] == X86_64_INTEGER_CLASS ||
Class[0] == X86_64_NO_CLASS ||
Class[0] == X86_64_INTEGERSI_CLASS)
- return Type::Int64Ty;
+ return Type::getInt64Ty(Context);
else if (Class[0] == X86_64_SSE_CLASS || Class[0] == X86_64_SSEDF_CLASS)
- return Type::DoubleTy;
+ return Type::getDoubleTy(Context);
else if (Class[0] == X86_64_SSESF_CLASS)
- return Type::FloatTy;
+ return Type::getFloatTy(Context);
assert(0 && "Unexpected type!");
}
if (Class[0] == X86_64_NO_CLASS) {
*Offset = 8;
if (Class[1] == X86_64_INTEGERSI_CLASS ||
Class[1] == X86_64_INTEGER_CLASS)
- return Type::Int64Ty;
+ return Type::getInt64Ty(Context);
else if (Class[1] == X86_64_SSE_CLASS || Class[1] == X86_64_SSEDF_CLASS)
- return Type::DoubleTy;
+ return Type::getDoubleTy(Context);
else if (Class[1] == X86_64_SSESF_CLASS)
- return Type::FloatTy;
+ return Type::getFloatTy(Context);
assert(0 && "Unexpected type!");
}
assert(0 && "Unexpected type!");
@@ -1159,11 +1171,11 @@
assert(0 && "Unexpected type!");
} else {
if (Size <= 8)
- return Type::Int64Ty;
+ return Type::getInt64Ty(Context);
else if (Size <= 16)
- return Context.getIntegerType(128);
+ return IntegerType::get(Context, 128);
else if (Size <= 32)
- return Context.getIntegerType(256);
+ return IntegerType::get(Context, 256);
}
return NULL;
}
@@ -1201,7 +1213,7 @@
switch (Class[i]) {
case X86_64_INTEGER_CLASS:
case X86_64_INTEGERSI_CLASS:
- Elts.push_back(Type::Int64Ty);
+ Elts.push_back(Type::getInt64Ty(Context));
Bytes -= 8;
break;
case X86_64_SSE_CLASS:
@@ -1215,10 +1227,10 @@
// 6. 1 x SSE, 1 x NO: Second is padding, pass as double.
if ((NumClasses-i) == 1) {
if (Bytes == 8) {
- Elts.push_back(Type::DoubleTy);
+ Elts.push_back(Type::getDoubleTy(Context));
Bytes -= 8;
} else if (Bytes == 4) {
- Elts.push_back(Type::FloatTy);
+ Elts.push_back(Type::getFloatTy(Context));
Bytes -= 4;
} else
assert(0 && "Not yet handled!");
@@ -1232,42 +1244,42 @@
if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
if (VTy->getNumElements() == 2) {
if (VTy->getElementType()->isInteger())
- Elts.push_back(Context.getVectorType(Type::Int64Ty, 2));
+ Elts.push_back(VectorType::get(Type::getInt64Ty(Context), 2));
else
- Elts.push_back(Context.getVectorType(Type::DoubleTy, 2));
+ Elts.push_back(VectorType::get(Type::getDoubleTy(Context), 2));
Bytes -= 8;
} else {
assert(VTy->getNumElements() == 4);
if (VTy->getElementType()->isInteger())
- Elts.push_back(Context.getVectorType(Type::Int32Ty, 4));
+ Elts.push_back(VectorType::get(Type::getInt32Ty(Context), 4));
else
- Elts.push_back(Context.getVectorType(Type::FloatTy, 4));
+ Elts.push_back(VectorType::get(Type::getFloatTy(Context), 4));
Bytes -= 4;
}
} else if (llvm_x86_is_all_integer_types(Ty)) {
- Elts.push_back(Context.getVectorType(Type::Int32Ty, 4));
+ Elts.push_back(VectorType::get(Type::getInt32Ty(Context), 4));
Bytes -= 4;
} else {
- Elts.push_back(Context.getVectorType(Type::FloatTy, 4));
+ Elts.push_back(VectorType::get(Type::getFloatTy(Context), 4));
Bytes -= 4;
}
} else if (Class[i+1] == X86_64_SSESF_CLASS) {
assert(Bytes == 12 && "Not yet handled!");
- Elts.push_back(Type::DoubleTy);
- Elts.push_back(Type::FloatTy);
+ Elts.push_back(Type::getDoubleTy(Context));
+ Elts.push_back(Type::getFloatTy(Context));
Bytes -= 12;
} else if (Class[i+1] == X86_64_SSE_CLASS) {
- Elts.push_back(Type::DoubleTy);
- Elts.push_back(Type::DoubleTy);
+ Elts.push_back(Type::getDoubleTy(Context));
+ Elts.push_back(Type::getDoubleTy(Context));
Bytes -= 16;
} else if (Class[i+1] == X86_64_SSEDF_CLASS && Bytes == 16) {
- Elts.push_back(Context.getVectorType(Type::FloatTy, 2));
- Elts.push_back(Type::DoubleTy);
+ Elts.push_back(VectorType::get(Type::getFloatTy(Context), 2));
+ Elts.push_back(Type::getDoubleTy(Context));
} else if (Class[i+1] == X86_64_INTEGER_CLASS) {
- Elts.push_back(Context.getVectorType(Type::FloatTy, 2));
- Elts.push_back(Type::Int64Ty);
+ Elts.push_back(VectorType::get(Type::getFloatTy(Context), 2));
+ Elts.push_back(Type::getInt64Ty(Context));
} else if (Class[i+1] == X86_64_NO_CLASS) {
- Elts.push_back(Type::DoubleTy);
+ Elts.push_back(Type::getDoubleTy(Context));
Bytes -= 16;
} else {
assert(0 && "Not yet handled!");
@@ -1277,21 +1289,21 @@
assert(0 && "Not yet handled!");
break;
case X86_64_SSESF_CLASS:
- Elts.push_back(Type::FloatTy);
+ Elts.push_back(Type::getFloatTy(Context));
Bytes -= 4;
break;
case X86_64_SSEDF_CLASS:
- Elts.push_back(Type::DoubleTy);
+ Elts.push_back(Type::getDoubleTy(Context));
Bytes -= 8;
break;
case X86_64_X87_CLASS:
case X86_64_X87UP_CLASS:
case X86_64_COMPLEX_X87_CLASS:
- Elts.push_back(Type::X86_FP80Ty);
+ Elts.push_back(Type::getX86_FP80Ty(Context));
break;
case X86_64_NO_CLASS:
// padding bytes.
- Elts.push_back(Type::Int64Ty);
+ Elts.push_back(Type::getInt64Ty(Context));
break;
default: assert(0 && "Unexpected register class!");
}
@@ -1311,14 +1323,14 @@
// Special handling for _Complex.
if (llvm_x86_should_not_return_complex_in_memory(type)) {
- ElementTypes.push_back(Type::X86_FP80Ty);
- ElementTypes.push_back(Type::X86_FP80Ty);
- return Context.getStructType(ElementTypes, STy->isPacked());
+ ElementTypes.push_back(Type::getX86_FP80Ty(Context));
+ ElementTypes.push_back(Type::getX86_FP80Ty(Context));
+ return StructType::get(Context, ElementTypes, STy->isPacked());
}
std::vector<const Type*> GCCElts;
llvm_x86_64_get_multiple_return_reg_classes(type, Ty, GCCElts);
- return Context.getStructType(GCCElts, false);
+ return StructType::get(Context, GCCElts, false);
}
// llvm_x86_extract_mrv_array_element - Helper function that help extract
@@ -1338,12 +1350,12 @@
Value *EVI = Builder.CreateExtractValue(Src, SrcFieldNo, "mrv_gr");
const StructType *STy = cast<StructType>(Src->getType());
llvm::Value *Idxs[3];
- Idxs[0] = Context.getConstantInt(llvm::Type::Int32Ty, 0);
- Idxs[1] = Context.getConstantInt(llvm::Type::Int32Ty, DestFieldNo);
- Idxs[2] = Context.getConstantInt(llvm::Type::Int32Ty, DestElemNo);
+ Idxs[0] = ConstantInt::get(llvm::Type::getInt32Ty(Context), 0);
+ Idxs[1] = ConstantInt::get(llvm::Type::getInt32Ty(Context), DestFieldNo);
+ Idxs[2] = ConstantInt::get(llvm::Type::getInt32Ty(Context), DestElemNo);
Value *GEP = Builder.CreateGEP(Dest, Idxs, Idxs+3, "mrv_gep");
if (isa<VectorType>(STy->getElementType(SrcFieldNo))) {
- Value *ElemIndex = Context.getConstantInt(Type::Int32Ty, SrcElemNo);
+ Value *ElemIndex = ConstantInt::get(Type::getInt32Ty(Context), SrcElemNo);
Value *EVIElem = Builder.CreateExtractElement(EVI, ElemIndex, "mrv");
Builder.CreateStore(EVIElem, GEP, isVolatile);
} else {
@@ -1376,12 +1388,12 @@
Value *EVI = Builder.CreateExtractValue(Src, 0, "mrv_gr");
- Value *E0Index = Context.getConstantInt(Type::Int32Ty, 0);
+ Value *E0Index = ConstantInt::get(Type::getInt32Ty(Context), 0);
Value *EVI0 = Builder.CreateExtractElement(EVI, E0Index, "mrv.v");
Value *GEP0 = Builder.CreateStructGEP(Dest, 0, "mrv_gep");
Builder.CreateStore(EVI0, GEP0, isVolatile);
- Value *E1Index = Context.getConstantInt(Type::Int32Ty, 1);
+ Value *E1Index = ConstantInt::get(Type::getInt32Ty(Context), 1);
Value *EVI1 = Builder.CreateExtractElement(EVI, E1Index, "mrv.v");
Value *GEP1 = Builder.CreateStructGEP(Dest, 1, "mrv_gep");
Builder.CreateStore(EVI1, GEP1, isVolatile);
@@ -1408,16 +1420,16 @@
// Special treatement for _Complex.
if (const StructType *ComplexType = dyn_cast<StructType>(DestElemType)) {
llvm::Value *Idxs[3];
- Idxs[0] = Context.getConstantInt(llvm::Type::Int32Ty, 0);
- Idxs[1] = Context.getConstantInt(llvm::Type::Int32Ty, DNO);
+ Idxs[0] = ConstantInt::get(llvm::Type::getInt32Ty(Context), 0);
+ Idxs[1] = ConstantInt::get(llvm::Type::getInt32Ty(Context), DNO);
- Idxs[2] = Context.getConstantInt(llvm::Type::Int32Ty, 0);
+ Idxs[2] = ConstantInt::get(llvm::Type::getInt32Ty(Context), 0);
Value *GEP = Builder.CreateGEP(Dest, Idxs, Idxs+3, "mrv_gep");
Value *EVI = Builder.CreateExtractValue(Src, 0, "mrv_gr");
Builder.CreateStore(EVI, GEP, isVolatile);
++SNO;
- Idxs[2] = Context.getConstantInt(llvm::Type::Int32Ty, 1);
+ Idxs[2] = ConstantInt::get(llvm::Type::getInt32Ty(Context), 1);
GEP = Builder.CreateGEP(Dest, Idxs, Idxs+3, "mrv_gep");
EVI = Builder.CreateExtractValue(Src, 1, "mrv_gr");
Builder.CreateStore(EVI, GEP, isVolatile);
Modified: gcc-plugin/trunk/llvm-abi.h
URL: http://llvm.org/viewvc/llvm-project/gcc-plugin/trunk/llvm-abi.h?rev=79452&r1=79451&r2=79452&view=diff
==============================================================================
--- gcc-plugin/trunk/llvm-abi.h (original)
+++ gcc-plugin/trunk/llvm-abi.h Wed Aug 19 14:49:08 2009
@@ -29,9 +29,9 @@
#define LLVM_ABI_H
// LLVM headers
+#include "llvm/Attributes.h"
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
-#include "llvm/Attributes.h"
#include "llvm/Target/TargetData.h"
// System headers
@@ -218,19 +218,19 @@
unsigned Size = getTargetData().getTypeAllocSize(Ty);
*Offset = 0;
if (Size == 0)
- return Type::VoidTy;
+ return Type::getVoidTy(getGlobalContext());
else if (Size == 1)
- return Type::Int8Ty;
+ return Type::getInt8Ty(getGlobalContext());
else if (Size == 2)
- return Type::Int16Ty;
+ return Type::getInt16Ty(getGlobalContext());
else if (Size <= 4)
- return Type::Int32Ty;
+ return Type::getInt32Ty(getGlobalContext());
else if (Size <= 8)
- return Type::Int64Ty;
+ return Type::getInt64Ty(getGlobalContext());
else if (Size <= 16)
- return IntegerType::get(128);
+ return IntegerType::get(getGlobalContext(), 128);
else if (Size <= 32)
- return IntegerType::get(256);
+ return IntegerType::get(getGlobalContext(), 256);
return NULL;
}
@@ -277,7 +277,7 @@
// registers. The routine should also return by reference a vector of the
// types of the registers being used. The default is false.
#ifndef LLVM_SHOULD_PASS_AGGREGATE_IN_MIXED_REGS
-#define LLVM_SHOULD_PASS_AGGREGATE_IN_MIXED_REGS(T, TY, E) \
+#define LLVM_SHOULD_PASS_AGGREGATE_IN_MIXED_REGS(T, TY, CC, E) \
false
#endif
@@ -287,12 +287,12 @@
// the aggregate. Note, this routine should return false if none of the needed
// registers are available.
#ifndef LLVM_AGGREGATE_PARTIALLY_PASSED_IN_REGS
-#define LLVM_AGGREGATE_PARTIALLY_PASSED_IN_REGS(E, SE, ISR) \
+#define LLVM_AGGREGATE_PARTIALLY_PASSED_IN_REGS(E, SE, ISR, CC) \
false
#endif
// LLVM_BYVAL_ALIGNMENT - Returns the alignment of the type in bytes, if known,
-// in the context of its use as a function parameter.
+// in the getGlobalContext() of its use as a function parameter.
// Note that the alignment in the TYPE node is usually the alignment appropriate
// when the type is used within a struct, which may or may not be appropriate
// here.
@@ -384,7 +384,7 @@
void HandleReturnType(tree type, tree fn, bool isBuiltin) {
unsigned Offset = 0;
const Type *Ty = ConvertType(type);
- if (Ty->getTypeID() == Type::VectorTyID) {
+ if (isa<VectorType>(Ty)) {
// Vector handling is weird on x86. In particular builtin and
// non-builtin function of the same return types can use different
// calling conventions.
@@ -395,7 +395,7 @@
C.HandleScalarShadowResult(PointerType::getUnqual(Ty), false);
else
C.HandleScalarResult(Ty);
- } else if (Ty->isSingleValueType() || Ty == Type::VoidTy) {
+ } else if (Ty->isSingleValueType() || Ty == Type::getVoidTy(getGlobalContext())) {
// Return scalar values normally.
C.HandleScalarResult(Ty);
} else if (doNotUseShadowReturn(type, fn)) {
@@ -441,11 +441,16 @@
// Figure out if this field is zero bits wide, e.g. {} or [0 x int]. Do
// not include variable sized fields here.
std::vector<const Type*> Elts;
- if (isPassedByInvisibleReference(type)) { // variable size -> by-ref.
+ if (Ty == Type::getVoidTy(getGlobalContext())) {
+ // Handle void explicitly as an opaque type.
+ const Type *OpTy = OpaqueType::get(getGlobalContext());
+ C.HandleScalarArgument(OpTy, type);
+ ScalarElts.push_back(OpTy);
+ } else if (isPassedByInvisibleReference(type)) { // variable size -> by-ref.
const Type *PtrTy = PointerType::getUnqual(Ty);
C.HandleByInvisibleReferenceArgument(PtrTy, type);
ScalarElts.push_back(PtrTy);
- } else if (Ty->getTypeID()==Type::VectorTyID) {
+ } else if (isa<VectorType>(Ty)) {
if (LLVM_SHOULD_PASS_VECTOR_IN_INTEGER_REGS(type)) {
PassInIntegerRegisters(type, Ty, ScalarElts, 0, false);
} else if (LLVM_SHOULD_PASS_VECTOR_USING_BYVAL_ATTR(type)) {
@@ -464,9 +469,12 @@
ScalarElts.push_back(Ty);
} else if (LLVM_SHOULD_PASS_AGGREGATE_AS_FCA(type, Ty)) {
C.HandleFCAArgument(Ty, type);
- } else if (LLVM_SHOULD_PASS_AGGREGATE_IN_MIXED_REGS(type, Ty, Elts)) {
+ } else if (LLVM_SHOULD_PASS_AGGREGATE_IN_MIXED_REGS(type, Ty,
+ C.getCallingConv(),
+ Elts)) {
if (!LLVM_AGGREGATE_PARTIALLY_PASSED_IN_REGS(Elts, ScalarElts,
- C.isShadowReturn()))
+ C.isShadowReturn(),
+ C.getCallingConv()))
PassInMixedRegisters(type, Ty, Elts, ScalarElts);
else {
C.HandleByValArgument(Ty, type);
@@ -591,7 +599,8 @@
// don't bitcast aggregate value to Int64 if its alignment is different
// from Int64 alignment. ARM backend needs this.
unsigned Align = TYPE_ALIGN(type)/8;
- unsigned Int64Align = getTargetData().getABITypeAlignment(Type::Int64Ty);
+ unsigned Int64Align =
+ getTargetData().getABITypeAlignment(Type::getInt64Ty(getGlobalContext()));
bool UseInt64 = DontCheckAlignment ? true : (Align >= Int64Align);
// FIXME: In cases where we can, we should use the original struct.
@@ -606,25 +615,26 @@
const Type *ArrayElementType = NULL;
if (ArraySize) {
Size = Size % ElementSize;
- ArrayElementType = (UseInt64)?Type::Int64Ty:Type::Int32Ty;
+ ArrayElementType = (UseInt64) ?
+ Type::getInt64Ty(getGlobalContext()) : Type::getInt32Ty(getGlobalContext());
ATy = ArrayType::get(ArrayElementType, ArraySize);
Elts.push_back(ATy);
}
if (Size >= 4) {
- Elts.push_back(Type::Int32Ty);
+ Elts.push_back(Type::getInt32Ty(getGlobalContext()));
Size -= 4;
}
if (Size >= 2) {
- Elts.push_back(Type::Int16Ty);
+ Elts.push_back(Type::getInt16Ty(getGlobalContext()));
Size -= 2;
}
if (Size >= 1) {
- Elts.push_back(Type::Int8Ty);
+ Elts.push_back(Type::getInt8Ty(getGlobalContext()));
Size -= 1;
}
assert(Size == 0 && "Didn't cover value?");
- const StructType *STy = StructType::get(Elts, false);
+ const StructType *STy = StructType::get(getGlobalContext(), Elts, false);
unsigned i = 0;
if (ArraySize) {
@@ -656,13 +666,13 @@
// that occupies storage but has no useful information, and is not passed
// anywhere". Happens on x86-64.
std::vector<const Type*> Elts(OrigElts);
- const Type* wordType = getTargetData().getPointerSize() == 4 ? Type::Int32Ty :
- Type::Int64Ty;
+ const Type* wordType = getTargetData().getPointerSize() == 4 ?
+ Type::getInt32Ty(getGlobalContext()) : Type::getInt64Ty(getGlobalContext());
for (unsigned i=0, e=Elts.size(); i!=e; ++i)
- if (OrigElts[i]==Type::VoidTy)
+ if (OrigElts[i]==Type::getVoidTy(getGlobalContext()))
Elts[i] = wordType;
- const StructType *STy = StructType::get(Elts, false);
+ const StructType *STy = StructType::get(getGlobalContext(), Elts, false);
unsigned Size = getTargetData().getTypeAllocSize(STy);
const StructType *InSTy = dyn_cast<StructType>(Ty);
@@ -681,7 +691,7 @@
}
}
for (unsigned i = 0, e = Elts.size(); i != e; ++i) {
- if (OrigElts[i] != Type::VoidTy) {
+ if (OrigElts[i] != Type::getVoidTy(getGlobalContext())) {
C.EnterField(i, STy);
unsigned RealSize = 0;
if (LastEltSizeDiff && i == (e - 1))
@@ -730,7 +740,7 @@
void HandleReturnType(tree type, tree fn, bool isBuiltin) {
unsigned Offset = 0;
const Type *Ty = ConvertType(type);
- if (Ty->getTypeID() == Type::VectorTyID) {
+ if (isa<VectorType>(Ty)) {
// Vector handling is weird on x86. In particular builtin and
// non-builtin function of the same return types can use different
// calling conventions.
@@ -741,7 +751,7 @@
C.HandleScalarShadowResult(PointerType::getUnqual(Ty), false);
else
C.HandleScalarResult(Ty);
- } else if (Ty->isSingleValueType() || Ty == Type::VoidTy) {
+ } else if (Ty->isSingleValueType() || Ty == Type::getVoidTy(getGlobalContext())) {
// Return scalar values normally.
C.HandleScalarResult(Ty);
} else if (doNotUseShadowReturn(type, fn)) {
@@ -819,7 +829,7 @@
if (Attributes) {
*Attributes |= Attr;
}
- } else if (Ty->getTypeID()==Type::VectorTyID) {
+ } else if (isa<VectorType>(Ty)) {
if (LLVM_SHOULD_PASS_VECTOR_IN_INTEGER_REGS(type)) {
PassInIntegerRegisters(type, Ty, ScalarElts, 0, false);
} else if (LLVM_SHOULD_PASS_VECTOR_USING_BYVAL_ATTR(type)) {
@@ -858,7 +868,7 @@
Attr |= Attribute::InReg;
NumGPR = NumArgRegs;
}
- } else if (Ty->getTypeID() == Type::PointerTyID) {
+ } else if (isa<PointerType>(Ty)) {
if (NumGPR < NumArgRegs) {
NumGPR++;
} else {
@@ -866,15 +876,16 @@
}
// We don't care about arguments passed in Floating-point or vector
// registers.
- } else if (!(Ty->isFloatingPoint() ||
- Ty->getTypeID() == Type::VectorTyID)) {
+ } else if (!(Ty->isFloatingPoint() || isa<VectorType>(Ty))) {
abort();
}
if (Attributes) {
*Attributes |= Attr;
}
- } else if (LLVM_SHOULD_PASS_AGGREGATE_IN_MIXED_REGS(type, Ty, Elts)) {
+ } else if (LLVM_SHOULD_PASS_AGGREGATE_IN_MIXED_REGS(type, Ty,
+ C.getCallingConv(),
+ Elts)) {
HOST_WIDE_INT SrcSize = int_size_in_bytes(type);
// With the SVR4 ABI, the only aggregates which are passed in registers
@@ -1031,7 +1042,8 @@
// don't bitcast aggregate value to Int64 if its alignment is different
// from Int64 alignment. ARM backend needs this.
unsigned Align = TYPE_ALIGN(type)/8;
- unsigned Int64Align = getTargetData().getABITypeAlignment(Type::Int64Ty);
+ unsigned Int64Align =
+ getTargetData().getABITypeAlignment(Type::getInt64Ty(getGlobalContext()));
bool UseInt64 = DontCheckAlignment ? true : (Align >= Int64Align);
// FIXME: In cases where we can, we should use the original struct.
@@ -1046,25 +1058,26 @@
const Type *ArrayElementType = NULL;
if (ArraySize) {
Size = Size % ElementSize;
- ArrayElementType = (UseInt64)?Type::Int64Ty:Type::Int32Ty;
+ ArrayElementType = (UseInt64) ?
+ Type::getInt64Ty(getGlobalContext()) : Type::getInt32Ty(getGlobalContext());
ATy = ArrayType::get(ArrayElementType, ArraySize);
Elts.push_back(ATy);
}
if (Size >= 4) {
- Elts.push_back(Type::Int32Ty);
+ Elts.push_back(Type::getInt32Ty(getGlobalContext()));
Size -= 4;
}
if (Size >= 2) {
- Elts.push_back(Type::Int16Ty);
+ Elts.push_back(Type::getInt16Ty(getGlobalContext()));
Size -= 2;
}
if (Size >= 1) {
- Elts.push_back(Type::Int8Ty);
+ Elts.push_back(Type::getInt8Ty(getGlobalContext()));
Size -= 1;
}
assert(Size == 0 && "Didn't cover value?");
- const StructType *STy = StructType::get(Elts, false);
+ const StructType *STy = StructType::get(getGlobalContext(), Elts, false);
unsigned i = 0;
if (ArraySize) {
@@ -1098,13 +1111,13 @@
// that occupies storage but has no useful information, and is not passed
// anywhere". Happens on x86-64.
std::vector<const Type*> Elts(OrigElts);
- const Type* wordType = getTargetData().getPointerSize() == 4 ? Type::Int32Ty :
- Type::Int64Ty;
+ const Type* wordType = getTargetData().getPointerSize() == 4
+ ? Type::getInt32Ty(getGlobalContext()) : Type::getInt64Ty(getGlobalContext());
for (unsigned i=0, e=Elts.size(); i!=e; ++i)
- if (OrigElts[i]==Type::VoidTy)
+ if (OrigElts[i]==Type::getVoidTy(getGlobalContext()))
Elts[i] = wordType;
- const StructType *STy = StructType::get(Elts, false);
+ const StructType *STy = StructType::get(getGlobalContext(), Elts, false);
unsigned Size = getTargetData().getTypeAllocSize(STy);
const StructType *InSTy = dyn_cast<StructType>(Ty);
@@ -1123,7 +1136,7 @@
}
}
for (unsigned i = 0, e = Elts.size(); i != e; ++i) {
- if (OrigElts[i] != Type::VoidTy) {
+ if (OrigElts[i] != Type::getVoidTy(getGlobalContext())) {
C.EnterField(i, STy);
unsigned RealSize = 0;
if (LastEltSizeDiff && i == (e - 1))
Modified: gcc-plugin/trunk/llvm-backend.cpp
URL: http://llvm.org/viewvc/llvm-project/gcc-plugin/trunk/llvm-backend.cpp?rev=79452&r1=79451&r2=79452&view=diff
==============================================================================
--- gcc-plugin/trunk/llvm-backend.cpp (original)
+++ gcc-plugin/trunk/llvm-backend.cpp Wed Aug 19 14:49:08 2009
@@ -67,6 +67,7 @@
#include "cgraph.h"
#include "diagnostic.h"
+#include "except.h"
#include "flags.h"
#include "function.h"
#include "gcc-plugin.h"
@@ -75,7 +76,6 @@
#include "output.h"
#include "params.h"
#include "plugin-version.h"
-#include "tm.h"
#include "toplev.h"
#include "tree-inline.h"
#include "tree-flow.h"
@@ -117,6 +117,7 @@
std::vector<std::pair<Constant*, int> > StaticCtors, StaticDtors;
SmallSetVector<Constant*, 32> AttributeUsedGlobals;
+SmallSetVector<Constant*, 32> AttributeCompilerUsedGlobals;
std::vector<Constant*> AttributeAnnotateGlobals;
/// PerFunctionPasses - This is the list of cleanup passes run per-function
@@ -151,7 +152,7 @@
//TODO/// function-local decls to be recycled after the function is done.
//TODOstatic std::vector<unsigned> LocalLLVMValueIDs;
//TODO
-//TODO// Remember the LLVM value for GCC tree node.
+//TODO/// llvm_set_decl - Remember the LLVM value for GCC tree node.
//TODOvoid llvm_set_decl(tree Tr, Value *V) {
//TODO
//TODO // If there is not any value then do not add new LLVMValues entry.
@@ -179,7 +180,8 @@
//TODO LocalLLVMValueIDs.push_back(Index);
//TODO}
//TODO
-//TODO// Return TRUE if there is a LLVM Value associate with GCC tree node.
+//TODO/// llvm_set_decl_p - Return TRUE if there is a LLVM Value associate with GCC
+//TODO/// tree node.
//TODObool llvm_set_decl_p(tree Tr) {
//TODO unsigned Index = GET_DECL_LLVM_INDEX(Tr);
//TODO if (Index == 0)
@@ -188,10 +190,10 @@
//TODO return LLVMValues[Index - 1] != 0;
//TODO}
//TODO
-//TODO// Get LLVM Value for the GCC tree node based on LLVMValues vector index.
-//TODO// If there is not any value associated then use make_decl_llvm() to
-//TODO// make LLVM value. When GCC tree node is initialized, it has 0 as the
-//TODO// index value. This is why all recorded indices are offset by 1.
+//TODO/// llvm_get_decl - Get LLVM Value for the GCC tree node based on LLVMValues
+//TODO/// vector index. If there is not any value associated then use
+//TODO/// make_decl_llvm() to make LLVM value. When GCC tree node is initialized, it
+//TODO/// has 0 as the index value. This is why all recorded indices are offset by 1.
//TODOValue *llvm_get_decl(tree Tr) {
//TODO
//TODO unsigned Index = GET_DECL_LLVM_INDEX(Tr);
@@ -219,6 +221,11 @@
//TODO AttributeUsedGlobals.insert(New);
//TODO }
//TODO
+//TODO if (AttributeCompilerUsedGlobals.count(Old)) {
+//TODO AttributeCompilerUsedGlobals.remove(Old);
+//TODO AttributeCompilerUsedGlobals.insert(New);
+//TODO }
+//TODO
//TODO for (unsigned i = 0, e = StaticCtors.size(); i != e; ++i) {
//TODO if (StaticCtors[i].first == Old)
//TODO StaticCtors[i].first = New;
@@ -250,7 +257,7 @@
//TODO LLVMValuesMap[New] = Idx+1;
//TODO}
//TODO
-//TODO// Read LLVM Types string table
+//TODO/// readLLVMValues - Read LLVM Types string table
//TODOvoid readLLVMValues() {
//TODO GlobalValue *V = TheModule->getNamedGlobal("llvm.pch.values");
//TODO if (!V)
@@ -279,9 +286,9 @@
//TODO GV->eraseFromParent();
//TODO}
//TODO
-//TODO// GCC tree's uses LLVMValues vector's index to reach LLVM Values.
-//TODO// Create a string table to hold these LLVM Values' names. This string
-//TODO// table will be used to recreate LTypes vector after loading PCH.
+//TODO/// writeLLVMValues - GCC tree's uses LLVMValues vector's index to reach LLVM
+//TODO/// Values. Create a string table to hold these LLVM Values' names. This string
+//TODO/// table will be used to recreate LTypes vector after loading PCH.
//TODOvoid writeLLVMValues() {
//TODO if (LLVMValues.empty())
//TODO return;
@@ -296,11 +303,11 @@
//TODO else
//TODO // Non constant values, e.g. arguments, are not at global scope.
//TODO // When PCH is read, only global scope values are used.
-//TODO ValuesForPCH.push_back(Context.getNullValue(Type::Int32Ty));
+//TODO ValuesForPCH.push_back(Constant::getNullValue(Type::getInt32Ty(Context)));
//TODO }
//TODO
//TODO // Create string table.
-//TODO Constant *LLVMValuesTable = Context.getConstantStruct(ValuesForPCH, false);
+//TODO Constant *LLVMValuesTable = ConstantStruct::get(Context, ValuesForPCH, false);
//TODO
//TODO // Create variable to hold this string table.
//TODO new GlobalVariable(*TheModule, LLVMValuesTable->getType(), true,
@@ -328,8 +335,7 @@
//TODO }
//TODO}
-
-// Forward decl visibility style to global.
+/// handleVisibility - Forward decl visibility style to global.
void handleVisibility(tree decl, GlobalValue *GV) {
// If decl has visibility specified explicitely (via attribute) - honour
// it. Otherwise (e.g. visibility specified via -fvisibility=hidden) honour
@@ -485,8 +491,9 @@
// Create the TargetMachine we will be generating code with.
// FIXME: Figure out how to select the target and pass down subtarget info.
std::string Err;
+ std::string Triple = TheModule->getTargetTriple();
const Target *TME =
- TargetRegistry::getClosestStaticTargetForModule(*TheModule, Err);
+ TargetRegistry::lookupTarget(Triple, Err);
if (!TME)
llvm_report_error(Err);
@@ -499,7 +506,7 @@
//TODO LLVM_SET_SUBTARGET_FEATURES(Features);
//TODO FeatureStr = Features.getString();
//TODO#endif
- TheTarget = TME->createTargetMachine(*TheModule, FeatureStr);
+ TheTarget = TME->createTargetMachine(Triple, FeatureStr);
assert(TheTarget->getTargetData()->isBigEndian() == BYTES_BIG_ENDIAN);
TheFolder = new TargetFolder(TheTarget->getTargetData(), getGlobalContext());
@@ -520,10 +527,14 @@
//TODO TheDebugInfo = new DebugInfo(TheModule);
//TODO}
//TODO
-//TODO/// Set backend options that may only be known at codegen time.
+//TODO/// performLateBackendInitialization - Set backend options that may only be
+//TODO/// known at codegen time.
//TODOvoid performLateBackendInitialization(void) {
//TODO // The Ada front-end sets flag_exceptions only after processing the file.
-//TODO ExceptionHandling = flag_exceptions;
+//TODO if (USING_SJLJ_EXCEPTIONS)
+//TODO SjLjExceptionHandling = flag_exceptions;
+//TODO else
+//TODO DwarfExceptionHandling = flag_exceptions;
//TODO for (Module::iterator I = TheModule->begin(), E = TheModule->end();
//TODO I != E; ++I)
//TODO if (!I->isDeclaration()) {
@@ -554,7 +565,7 @@
//TODOstatic formatted_raw_ostream *AsmOutRawStream = 0;
//TODOoFILEstream *AsmIntermediateOutStream = 0;
//TODO
-//TODO/// Read bytecode from PCH file. Initialize TheModule and setup
+//TODO/// llvm_pch_read - Read bytecode from PCH file. Initialize TheModule and setup
//TODO/// LTypes vector.
//TODOvoid llvm_pch_read(const unsigned char *Buffer, unsigned Size) {
//TODO std::string ModuleName = TheModule->getModuleIdentifier();
@@ -598,7 +609,7 @@
//TODO flag_llvm_pch_read = 1;
//TODO}
//TODO
-//TODO// Initialize PCH writing.
+//TODO/// llvm_pch_write_init - Initialize PCH writing.
//TODOvoid llvm_pch_write_init(void) {
//TODO timevar_push(TV_LLVM_INIT);
//TODO AsmOutStream = new oFILEstream(asm_out_file);
@@ -805,7 +816,7 @@
//TODO }
//TODO}
//TODO
-//TODO// llvm_asm_file_start - Start the .s file.
+//TODO/// llvm_asm_file_start - Start the .s file.
//TODOvoid llvm_asm_file_start(void) {
//TODO timevar_push(TV_LLVM_INIT);
//TODO AsmOutStream = new oFILEstream(asm_out_file);
@@ -827,6 +838,7 @@
//TODO sys::Program::ChangeStdoutToBinary();
//TODO
//TODO AttributeUsedGlobals.clear();
+//TODO AttributeCompilerUsedGlobals.clear();
//TODO timevar_pop(TV_LLVM_INIT);
//TODO}
@@ -834,32 +846,33 @@
/// initializer suitable for the llvm.global_[cd]tors globals.
static void CreateStructorsList(std::vector<std::pair<Constant*, int> > &Tors,
const char *Name) {
- LLVMContext &Context = getGlobalContext();
-
std::vector<Constant*> InitList;
std::vector<Constant*> StructInit;
StructInit.resize(2);
+ LLVMContext &Context = getGlobalContext();
+
const Type *FPTy =
- Context.getFunctionType(Type::VoidTy, std::vector<const Type*>(), false);
- FPTy = Context.getPointerTypeUnqual(FPTy);
+ FunctionType::get(Type::getVoidTy(Context),
+ std::vector<const Type*>(), false);
+ FPTy = PointerType::getUnqual(FPTy);
for (unsigned i = 0, e = Tors.size(); i != e; ++i) {
- StructInit[0] = Context.getConstantInt(Type::Int32Ty, Tors[i].second);
+ StructInit[0] = ConstantInt::get(Type::getInt32Ty(Context), Tors[i].second);
// __attribute__(constructor) can be on a function with any type. Make sure
// the pointer is void()*.
StructInit[1] = TheFolder->CreateBitCast(Tors[i].first, FPTy);
- InitList.push_back(Context.getConstantStruct(StructInit, false));
+ InitList.push_back(ConstantStruct::get(Context, StructInit, false));
}
- Constant *Array = Context.getConstantArray(
- Context.getArrayType(InitList[0]->getType(), InitList.size()), InitList);
+ Constant *Array = ConstantArray::get(
+ ArrayType::get(InitList[0]->getType(), InitList.size()), InitList);
new GlobalVariable(*TheModule, Array->getType(), false,
GlobalValue::AppendingLinkage,
Array, Name);
}
-//TODO// llvm_asm_file_end - Finish the .s file.
+//TODO/// llvm_asm_file_end - Finish the .s file.
//TODOvoid llvm_asm_file_end(void) {
//TODO timevar_push(TV_LLVM_PERFILE);
//TODO LLVMContext &Context = getGlobalContext();
@@ -881,26 +894,46 @@
//TODO
//TODO if (!AttributeUsedGlobals.empty()) {
//TODO std::vector<Constant *> AUGs;
-//TODO const Type *SBP= Context.getPointerTypeUnqual(Type::Int8Ty);
-//TODO for (SmallSetVector<Constant *,32>::iterator AI = AttributeUsedGlobals.begin(),
+//TODO const Type *SBP= PointerType::getUnqual(Type::getInt8Ty(Context));
+//TODO for (SmallSetVector<Constant *,32>::iterator
+//TODO AI = AttributeUsedGlobals.begin(),
//TODO AE = AttributeUsedGlobals.end(); AI != AE; ++AI) {
//TODO Constant *C = *AI;
//TODO AUGs.push_back(TheFolder->CreateBitCast(C, SBP));
//TODO }
//TODO
-//TODO ArrayType *AT = Context.getArrayType(SBP, AUGs.size());
-//TODO Constant *Init = Context.getConstantArray(AT, AUGs);
+//TODO ArrayType *AT = ArrayType::get(SBP, AUGs.size());
+//TODO Constant *Init = ConstantArray::get(AT, AUGs);
//TODO GlobalValue *gv = new GlobalVariable(*TheModule, AT, false,
-//TODO GlobalValue::AppendingLinkage, Init,
-//TODO "llvm.used");
+//TODO GlobalValue::AppendingLinkage, Init,
+//TODO "llvm.used");
//TODO gv->setSection("llvm.metadata");
//TODO AttributeUsedGlobals.clear();
//TODO }
//TODO
+//TODO if (!AttributeCompilerUsedGlobals.empty()) {
+//TODO std::vector<Constant *> ACUGs;
+//TODO const Type *SBP= PointerType::getUnqual(Type::getInt8Ty(Context));
+//TODO for (SmallSetVector<Constant *,32>::iterator
+//TODO AI = AttributeCompilerUsedGlobals.begin(),
+//TODO AE = AttributeCompilerUsedGlobals.end(); AI != AE; ++AI) {
+//TODO Constant *C = *AI;
+//TODO ACUGs.push_back(TheFolder->CreateBitCast(C, SBP));
+//TODO }
+//TODO
+//TODO ArrayType *AT = ArrayType::get(SBP, ACUGs.size());
+//TODO Constant *Init = ConstantArray::get(AT, ACUGs);
+//TODO GlobalValue *gv = new GlobalVariable(*TheModule, AT, false,
+//TODO GlobalValue::AppendingLinkage, Init,
+//TODO "llvm.compiler.used");
+//TODO gv->setSection("llvm.metadata");
+//TODO AttributeCompilerUsedGlobals.clear();
+//TODO }
+//TODO
//TODO // Add llvm.global.annotations
//TODO if (!AttributeAnnotateGlobals.empty()) {
-//TODO Constant *Array = Context.getConstantArray(
-//TODO Context.getArrayType(AttributeAnnotateGlobals[0]->getType(),
+//TODO Constant *Array = ConstantArray::get(
+//TODO ArrayType::get(AttributeAnnotateGlobals[0]->getType(),
//TODO AttributeAnnotateGlobals.size()),
//TODO AttributeAnnotateGlobals);
//TODO GlobalValue *gv = new GlobalVariable(*TheModule, Array->getType(), false,
@@ -976,8 +1009,8 @@
//TODO llvm_shutdown();
//TODO}
//TODO
-//TODO// llvm_emit_code_for_current_function - Top level interface for emitting a
-//TODO// function to the .s file.
+//TODO/// llvm_emit_code_for_current_function - Top level interface for emitting a
+//TODO/// function to the .s file.
//TODOvoid llvm_emit_code_for_current_function(tree fndecl) {
//TODO if (cfun->nonlocal_goto_save_area)
//TODO sorry("%Jnon-local gotos not supported by LLVM", fndecl);
@@ -1028,7 +1061,7 @@
//TODO timevar_pop(TV_LLVM_FUNCS);
//TODO}
-// emit_alias_to_llvm - Given decl and target emit alias to target.
+/// emit_alias_to_llvm - Given decl and target emit alias to target.
void emit_alias_to_llvm(tree decl, tree target, tree target_decl) {
if (errorcount || sorrycount) {
TREE_ASM_WRITTEN(decl) = 1;
@@ -1090,6 +1123,8 @@
// A weak alias has TREE_PUBLIC set but not the other bits.
if (false)//FIXME DECL_LLVM_PRIVATE(decl))
Linkage = GlobalValue::PrivateLinkage;
+ else if (false)//FIXME DECL_LLVM_LINKER_PRIVATE(decl))
+ Linkage = GlobalValue::LinkerPrivateLinkage;
else if (DECL_WEAK(decl))
// The user may have explicitly asked for weak linkage - ignore flag_odr.
Linkage = GlobalValue::WeakAnyLinkage;
@@ -1104,7 +1139,7 @@
handleVisibility(decl, GA);
if (GA->getType()->canLosslesslyBitCastTo(V->getType()))
- V->replaceAllUsesWith(Context.getConstantExprBitCast(GA, V->getType()));
+ V->replaceAllUsesWith(ConstantExpr::getBitCast(GA, V->getType()));
else if (!V->use_empty()) {
error ("%J Alias %qD used with invalid type!", decl, decl);
//TODO timevar_pop(TV_LLVM_GLOBALS);
@@ -1128,10 +1163,11 @@
return;
}
-// Convert string to global value. Use existing global if possible.
+/// ConvertMetadataStringToGV - Convert string to global value. Use existing
+/// global if possible.
Constant* ConvertMetadataStringToGV(const char *str) {
- Constant *Init = getGlobalContext().getConstantArray(std::string(str));
+ Constant *Init = ConstantArray::get(getGlobalContext(), std::string(str));
// Use cached string if it exists.
static std::map<Constant*, GlobalVariable*> StringCSTCache;
@@ -1148,8 +1184,8 @@
}
-/// AddAnnotateAttrsToGlobal - Adds decls that have a
-/// annotate attribute to a vector to be emitted later.
+/// AddAnnotateAttrsToGlobal - Adds decls that have a annotate attribute to a
+/// vector to be emitted later.
void AddAnnotateAttrsToGlobal(GlobalValue *GV, tree decl) {
LLVMContext &Context = getGlobalContext();
@@ -1159,10 +1195,10 @@
return;
// Get file and line number
- Constant *lineNo =
- Context.getConstantInt(Type::Int32Ty, DECL_SOURCE_LINE(decl));
+ Constant *lineNo = ConstantInt::get(Type::getInt32Ty(Context),
+ DECL_SOURCE_LINE(decl));
Constant *file = ConvertMetadataStringToGV(DECL_SOURCE_FILE(decl));
- const Type *SBP= Context.getPointerTypeUnqual(Type::Int8Ty);
+ const Type *SBP= PointerType::getUnqual(Type::getInt8Ty(Context));
file = TheFolder->CreateBitCast(file, SBP);
// There may be multiple annotate attributes. Pass return of lookup_attr
@@ -1191,7 +1227,7 @@
};
AttributeAnnotateGlobals.push_back(
- Context.getConstantStruct(Element, 4, false));
+ ConstantStruct::get(Context, Element, 4, false));
}
// Get next annotate attribute.
@@ -1242,7 +1278,7 @@
handleVisibility(decl, GV);
// Temporary to avoid infinite recursion (see comments emit_global_to_llvm)
- GV->setInitializer(Context.getUndef(GV->getType()->getElementType()));
+ GV->setInitializer(UndefValue::get(GV->getType()->getElementType()));
// Convert the initializer over.
Constant *Init = TreeConstantToLLVM::Convert(DECL_INITIAL(decl));
@@ -1310,7 +1346,7 @@
// This global should be zero initialized. Reconvert the type in case the
// forward def of the global and the real def differ in type (e.g. declared
// as 'int A[]', and defined as 'int A[100]').
- Init = getGlobalContext().getNullValue(ConvertType(TREE_TYPE(decl)));
+ Init = Constant::getNullValue(ConvertType(TREE_TYPE(decl)));
} else {
assert((TREE_CONSTANT(DECL_INITIAL(decl)) ||
TREE_CODE(DECL_INITIAL(decl)) == STRING_CST) &&
@@ -1322,7 +1358,7 @@
// on it". When constructing the initializer it might refer to itself.
// this can happen for things like void *G = &G;
//
- GV->setInitializer(Context.getUndef(GV->getType()->getElementType()));
+ GV->setInitializer(UndefValue::get(GV->getType()->getElementType()));
Init = TreeConstantToLLVM::Convert(DECL_INITIAL(decl));
}
@@ -1351,10 +1387,14 @@
GV->setThreadLocal(true);
// Set the linkage.
- GlobalValue::LinkageTypes Linkage = GV->getLinkage();
+ GlobalValue::LinkageTypes Linkage;
+
if (CODE_CONTAINS_STRUCT (TREE_CODE (decl), TS_DECL_WITH_VIS)
&& false) {// FIXME DECL_LLVM_PRIVATE(decl)) {
Linkage = GlobalValue::PrivateLinkage;
+ } else if (CODE_CONTAINS_STRUCT (TREE_CODE (decl), TS_DECL_WITH_VIS)
+ && false) {//FIXME DECL_LLVM_LINKER_PRIVATE(decl)) {
+ Linkage = GlobalValue::LinkerPrivateLinkage;
} else if (!TREE_PUBLIC(decl)) {
Linkage = GlobalValue::InternalLinkage;
} else if (DECL_WEAK(decl)) {
@@ -1368,6 +1408,8 @@
Linkage = GlobalValue::CommonLinkage;
} else if (DECL_COMDAT(decl)) {
Linkage = GlobalValue::getLinkOnceLinkage(flag_odr);
+ } else {
+ Linkage = GV->getLinkage();
}
// Allow loads from constants to be folded even if the constant has weak
@@ -1412,8 +1454,12 @@
}
// Handle used decls
- if (DECL_PRESERVE_P (decl))
- AttributeUsedGlobals.insert(GV);
+ if (DECL_PRESERVE_P (decl)) {
+ if (false)//FIXME DECL_LLVM_LINKER_PRIVATE (decl))
+ AttributeCompilerUsedGlobals.insert(GV);
+ else
+ AttributeUsedGlobals.insert(GV);
+ }
// Add annotate attributes for globals
if (DECL_ATTRIBUTES(decl))
@@ -1453,12 +1499,9 @@
int RegNumber = decode_reg_name(extractRegisterName(decl));
const Type *Ty = ConvertType(TREE_TYPE(decl));
- // If this has already been processed, don't emit duplicate error messages.
- if (DECL_LLVM_SET_P(decl)) {
- // Error state encoded into DECL_LLVM.
- return cast<ConstantInt>(DECL_LLVM(decl))->getZExtValue();
- }
-
+ if (errorcount || sorrycount)
+ return true; // Do not process broken code.
+
/* Detect errors in declaring global registers. */
if (RegNumber == -1)
error("%Jregister name not specified for %qD", decl, decl);
@@ -1479,25 +1522,23 @@
else {
if (TREE_THIS_VOLATILE(decl))
warning(0, "volatile register variables don%'t work as you might wish");
-
- SET_DECL_LLVM(decl, Context.getFalse());
+
return false; // Everything ok.
}
- SET_DECL_LLVM(decl, Context.getTrue());
+
return true;
}
-// make_decl_llvm - Create the DECL_RTL for a VAR_DECL or FUNCTION_DECL. DECL
-// should have static storage duration. In other words, it should not be an
-// automatic variable, including PARM_DECLs.
-//
-// There is, however, one exception: this function handles variables explicitly
-// placed in a particular register by the user.
-//
-// This function corresponds to make_decl_rtl in varasm.c, and is implicitly
-// called by DECL_LLVM if a decl doesn't have an LLVM set.
-//
+/// make_decl_llvm - Create the DECL_RTL for a VAR_DECL or FUNCTION_DECL. DECL
+/// should have static storage duration. In other words, it should not be an
+/// automatic variable, including PARM_DECLs.
+///
+/// There is, however, one exception: this function handles variables explicitly
+/// placed in a particular register by the user.
+///
+/// This function corresponds to make_decl_rtl in varasm.c, and is implicitly
+/// called by DECL_LLVM if a decl doesn't have an LLVM set.
void make_decl_llvm(tree decl) {
#ifdef ENABLE_CHECKING
// Check that we are not being given an automatic variable.
@@ -1569,7 +1610,7 @@
// when we have something like __builtin_memset and memset in the same file.
Function *FnEntry = TheModule->getFunction(Name);
if (FnEntry == 0) {
- unsigned CC;
+ CallingConv::ID CC;
AttrListPtr PAL;
const FunctionType *Ty =
TheTypeConverter->ConvertFunctionType(TREE_TYPE(decl), decl, NULL,
@@ -1618,7 +1659,8 @@
// If we have "extern void foo", make the global have type {} instead of
// type void.
- if (Ty == Type::VoidTy) Ty = Context.getStructType(NULL, NULL);
+ if (Ty == Type::getVoidTy(Context))
+ Ty = StructType::get(Context);
if (Name[0] == 0) { // Global has no name.
GV = new GlobalVariable(*TheModule, Ty, false,
@@ -1718,9 +1760,9 @@
return "";
}
-// llvm_mark_decl_weak - Used by varasm.c, called when a decl is found to be
-// weak, but it already had an llvm object created for it. This marks the LLVM
-// object weak as well.
+/// llvm_mark_decl_weak - Used by varasm.c, called when a decl is found to be
+/// weak, but it already had an llvm object created for it. This marks the LLVM
+/// object weak as well.
void llvm_mark_decl_weak(tree decl) {
assert(DECL_LLVM_SET_P(decl) && DECL_WEAK(decl) &&
isa<GlobalValue>(DECL_LLVM(decl)) && "Decl isn't marked weak!");
@@ -1747,10 +1789,9 @@
}
}
-// llvm_emit_ctor_dtor - Called to emit static ctors/dtors to LLVM code. fndecl
-// is a 'void()' FUNCTION_DECL for the code, initprio is the init priority, and
-// isCtor indicates whether this is a ctor or dtor.
-//
+/// llvm_emit_ctor_dtor - Called to emit static ctors/dtors to LLVM code.
+/// fndecl is a 'void()' FUNCTION_DECL for the code, initprio is the init
+/// priority, and isCtor indicates whether this is a ctor or dtor.
void llvm_emit_ctor_dtor(tree FnDecl, int InitPrio, int isCtor) {
mark_decl_referenced(FnDecl); // Inform cgraph that we used the global.
@@ -1765,9 +1806,8 @@
return;
}
-// llvm_emit_file_scope_asm - Emit the specified string as a file-scope inline
-// asm block.
-//
+/// llvm_emit_file_scope_asm - Emit the specified string as a file-scope inline
+/// asm block.
void llvm_emit_file_scope_asm(const char *string) {
if (TheModule->getModuleInlineAsm().empty())
TheModule->setModuleInlineAsm(string);
@@ -1776,18 +1816,16 @@
string);
}
-//FIXME// print_llvm - Print the specified LLVM chunk like an operand, called by
-//FIXME// print-tree.c for tree dumps.
-//FIXME//
+//FIXME/// print_llvm - Print the specified LLVM chunk like an operand, called by
+//FIXME/// print-tree.c for tree dumps.
//FIXMEvoid print_llvm(FILE *file, void *LLVM) {
//FIXME oFILEstream FS(file);
//FIXME FS << "LLVM: ";
//FIXME WriteAsOperand(FS, (Value*)LLVM, true, TheModule);
//FIXME}
//FIXME
-//FIXME// print_llvm_type - Print the specified LLVM type symbolically, called by
-//FIXME// print-tree.c for tree dumps.
-//FIXME//
+//FIXME/// print_llvm_type - Print the specified LLVM type symbolically, called by
+//FIXME/// print-tree.c for tree dumps.
//FIXMEvoid print_llvm_type(FILE *file, void *LLVM) {
//FIXME oFILEstream FS(file);
//FIXME FS << "LLVM: ";
@@ -1799,13 +1837,12 @@
//FIXME WriteTypeSymbolic(RO, (const Type*)LLVM, TheModule);
//FIXME}
-// Get a register name given its decl. In 4.2 unlike 4.0 these names
-// have been run through set_user_assembler_name which means they may
-// have a leading \1 at this point; compensate.
-
+/// extractRegisterName - Get a register name given its decl. In 4.2 unlike 4.0
+/// these names have been run through set_user_assembler_name which means they
+/// may have a leading \1 at this point; compensate.
const char* extractRegisterName(tree decl) {
const char* Name = IDENTIFIER_POINTER(DECL_ASSEMBLER_NAME(decl));
- return (*Name==1) ? Name+1 : Name;
+ return (*Name == 1) ? Name + 1 : Name;
}
Modified: gcc-plugin/trunk/llvm-convert.cpp
URL: http://llvm.org/viewvc/llvm-project/gcc-plugin/trunk/llvm-convert.cpp?rev=79452&r1=79451&r2=79452&view=diff
==============================================================================
--- gcc-plugin/trunk/llvm-convert.cpp (original)
+++ gcc-plugin/trunk/llvm-convert.cpp Wed Aug 19 14:49:08 2009
@@ -36,7 +36,6 @@
#include "llvm/System/Host.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
-#include "llvm/Target/TargetAsmInfo.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
@@ -215,7 +214,7 @@
if (DECL_NAME(LabelDecl))
Name = IDENTIFIER_POINTER(DECL_NAME(LabelDecl));
- BasicBlock *NewBB = BasicBlock::Create(Name);
+ BasicBlock *NewBB = BasicBlock::Create(Context, Name);
SET_DECL_LLVM(LabelDecl, NewBB);
return NewBB;
}
@@ -231,7 +230,7 @@
assert(!BYTES_BIG_ENDIAN && "Unsupported case - please report");
// Do byte wise store because actual argument type does not match LLVMTy.
assert(isa<IntegerType>(ArgVal->getType()) && "Expected an integer value!");
- const Type *StoreType = Context.getIntegerType(RealSize * 8);
+ const Type *StoreType = IntegerType::get(Context, RealSize * 8);
Loc = Builder.CreateBitCast(Loc, StoreType->getPointerTo());
if (ArgVal->getType()->getPrimitiveSizeInBits() >=
StoreType->getPrimitiveSizeInBits())
@@ -241,7 +240,7 @@
Builder.CreateStore(ArgVal, Loc);
} else {
// This cast only involves pointers, therefore BitCast.
- Loc = Builder.CreateBitCast(Loc, Context.getPointerTypeUnqual(LLVMTy));
+ Loc = Builder.CreateBitCast(Loc, PointerType::getUnqual(LLVMTy));
Builder.CreateStore(ArgVal, Loc);
}
}
@@ -263,13 +262,17 @@
std::vector<Value*> LocStack;
std::vector<std::string> NameStack;
unsigned Offset;
+ CallingConv::ID &CallingConv;
bool isShadowRet;
FunctionPrologArgumentConversion(tree FnDecl,
Function::arg_iterator &ai,
- const LLVMBuilder &B)
- : FunctionDecl(FnDecl), AI(ai), Builder(B), Offset(0),
+ const LLVMBuilder &B, CallingConv::ID &CC)
+ : FunctionDecl(FnDecl), AI(ai), Builder(B), Offset(0), CallingConv(CC),
isShadowRet(false) {}
+ /// getCallingConv - This provides the desired CallingConv for the function.
+ CallingConv::ID& getCallingConv(void) { return CallingConv; }
+
bool isShadowReturn() {
return isShadowRet;
}
@@ -337,7 +340,7 @@
// If this is GCC being sloppy about pointer types, insert a bitcast.
// See PR1083 for an example.
ArgVal = Builder.CreateBitCast(ArgVal, LLVMTy);
- } else if (ArgVal->getType() == Type::DoubleTy) {
+ } else if (ArgVal->getType() == Type::getDoubleTy(Context)) {
// If this is a K&R float parameter, it got promoted to double. Insert
// the truncation to float now.
ArgVal = Builder.CreateFPTrunc(ArgVal, LLVMTy,
@@ -346,7 +349,8 @@
// If this is just a mismatch between integer types, this is due
// to K&R prototypes, where the forward proto defines the arg as int
// and the actual impls is a short or char.
- assert(ArgVal->getType() == Type::Int32Ty && LLVMTy->isInteger() &&
+ assert(ArgVal->getType() == Type::getInt32Ty(Context) &&
+ LLVMTy->isInteger() &&
"Lowerings don't match?");
ArgVal = Builder.CreateTrunc(ArgVal, LLVMTy,NameStack.back().c_str());
}
@@ -381,7 +385,7 @@
Value *Loc = LocStack.back();
// This cast only involves pointers, therefore BitCast.
- Loc = Builder.CreateBitCast(Loc, Context.getPointerTypeUnqual(StructTy));
+ Loc = Builder.CreateBitCast(Loc, PointerType::getUnqual(StructTy));
Loc = Builder.CreateStructGEP(Loc, FieldNo);
LocStack.push_back(Loc);
@@ -397,13 +401,14 @@
// passed in memory byval.
static bool isPassedByVal(tree type, const Type *Ty,
std::vector<const Type*> &ScalarArgs,
- bool isShadowRet) {
+ bool isShadowRet, CallingConv::ID &CC) {
if (LLVM_SHOULD_PASS_AGGREGATE_USING_BYVAL_ATTR(type, Ty))
return true;
std::vector<const Type*> Args;
- if (LLVM_SHOULD_PASS_AGGREGATE_IN_MIXED_REGS(type, Ty, Args) &&
- LLVM_AGGREGATE_PARTIALLY_PASSED_IN_REGS(Args, ScalarArgs, isShadowRet))
+ if (LLVM_SHOULD_PASS_AGGREGATE_IN_MIXED_REGS(type, Ty, CC, Args) &&
+ LLVM_AGGREGATE_PARTIALLY_PASSED_IN_REGS(Args, ScalarArgs, isShadowRet,
+ CC))
// We want to pass the whole aggregate in registers but only some of the
// registers are available.
return true;
@@ -419,7 +424,7 @@
// Determine the FunctionType and calling convention for this function.
tree static_chain = cfun->static_chain_decl;
const FunctionType *FTy;
- unsigned CallingConv;
+ CallingConv::ID CallingConv;
AttrListPtr PAL;
// If the function has no arguments and is varargs (...), turn it into a
@@ -445,7 +450,7 @@
if (DECL_LLVM_SET_P(FnDecl) &&
cast<PointerType>(DECL_LLVM(FnDecl)->getType())->getElementType() == FTy){
Fn = cast<Function>(DECL_LLVM(FnDecl));
- assert(Fn->getCallingConv() == CallingConv &&
+ assert(Fn->getCallingConv() == static_cast<unsigned>(CallingConv) &&
"Calling convention disagreement between prototype and impl!");
// The visibility can be changed from the last time we've seen this
// function. Set to current.
@@ -457,7 +462,7 @@
assert(FnEntry->isDeclaration() &&
"Multiple fns with same name and neither are external!");
FnEntry->setName(""); // Clear name to avoid conflicts.
- assert(FnEntry->getCallingConv() == CallingConv &&
+ assert(FnEntry->getCallingConv() == static_cast<unsigned>(CallingConv) &&
"Calling convention disagreement between prototype and impl!");
}
@@ -486,6 +491,8 @@
// Compute the linkage that the function should get.
if (false) {//FIXME DECL_LLVM_PRIVATE(FnDecl)) {
Fn->setLinkage(Function::PrivateLinkage);
+ } else if (false) {//FIXME DECL_LLVM_LINKER_PRIVATE(FnDecl)) {
+ Fn->setLinkage(Function::LinkerPrivateLinkage);
} else if (!TREE_PUBLIC(FnDecl) /*|| lang_hooks.llvm_is_in_anon(subr)*/) {
Fn->setLinkage(Function::InternalLinkage);
} else if (DECL_COMDAT(FnDecl)) {
@@ -544,7 +551,7 @@
Fn->setDoesNotThrow();
// Create a new basic block for the function.
- Builder.SetInsertPoint(BasicBlock::Create("entry", Fn));
+ Builder.SetInsertPoint(BasicBlock::Create(Context, "entry", Fn));
if (TheDebugInfo)
TheDebugInfo->EmitFunctionStart(FnDecl, Fn, Builder.GetInsertBlock());
@@ -555,7 +562,7 @@
Function::arg_iterator AI = Fn->arg_begin();
// Rename and alloca'ify real arguments.
- FunctionPrologArgumentConversion Client(FnDecl, AI, Builder);
+ FunctionPrologArgumentConversion Client(FnDecl, AI, Builder, CallingConv);
TheLLVMABI<FunctionPrologArgumentConversion> ABIConverter(Client);
// Handle the DECL_RESULT.
@@ -576,11 +583,11 @@
const Type *ArgTy = ConvertType(TREE_TYPE(Args));
bool isInvRef = isPassedByInvisibleReference(TREE_TYPE(Args));
if (isInvRef ||
- (ArgTy->getTypeID()==Type::VectorTyID &&
+ (isa<VectorType>(ArgTy) &&
LLVM_SHOULD_PASS_VECTOR_USING_BYVAL_ATTR(TREE_TYPE(Args))) ||
(!ArgTy->isSingleValueType() &&
isPassedByVal(TREE_TYPE(Args), ArgTy, ScalarArgs,
- Client.isShadowReturn()))) {
+ Client.isShadowReturn(), CallingConv))) {
// If the value is passed by 'invisible reference' or 'byval reference',
// the l-value for the argument IS the argument itself.
AI->setName(Name);
@@ -638,19 +645,19 @@
//TODO if (!DECL_LLVM_SET_P(TREE_VALUE(t)))
//TODO EmitAutomaticVariableDecl(TREE_VALUE(t));
//TODO }
-
+
// Create a new block for the return node, but don't insert it yet.
- ReturnBB = BasicBlock::Create("return");
+ ReturnBB = BasicBlock::Create(Context, "return");
}
Function *TreeToLLVM::FinishFunctionBody() {
// Insert the return block at the end of the function.
EmitBlock(ReturnBB);
-
+
SmallVector <Value *, 4> RetVals;
// If the function returns a value, get it into a register and return it now.
- if (Fn->getReturnType() != Type::VoidTy) {
+ if (Fn->getReturnType() != Type::getVoidTy(Context)) {
if (!isAggregateTreeType(TREE_TYPE(DECL_RESULT(FnDecl)))) {
// If the DECL_RESULT is a scalar type, just load out the return value
// and return it.
@@ -664,12 +671,12 @@
} else {
Value *RetVal = DECL_LLVM(DECL_RESULT(FnDecl));
if (const StructType *STy = dyn_cast<StructType>(Fn->getReturnType())) {
- Value *R1 = BitCastToType(RetVal, Context.getPointerTypeUnqual(STy));
+ Value *R1 = BitCastToType(RetVal, PointerType::getUnqual(STy));
llvm::Value *Idxs[2];
- Idxs[0] = Context.getConstantInt(llvm::Type::Int32Ty, 0);
+ Idxs[0] = ConstantInt::get(llvm::Type::getInt32Ty(Context), 0);
for (unsigned ri = 0; ri < STy->getNumElements(); ++ri) {
- Idxs[1] = Context.getConstantInt(llvm::Type::Int32Ty, ri);
+ Idxs[1] = ConstantInt::get(llvm::Type::getInt32Ty(Context), ri);
Value *GEP = Builder.CreateGEP(R1, Idxs, Idxs+2, "mrv_gep");
Value *E = Builder.CreateLoad(GEP, "mrv");
RetVals.push_back(E);
@@ -682,12 +689,12 @@
// beginning of the aggregate (x86-64).
if (ReturnOffset) {
RetVal = BitCastToType(RetVal,
- Context.getPointerTypeUnqual(Type::Int8Ty));
+ PointerType::getUnqual(Type::getInt8Ty(Context)));
RetVal = Builder.CreateGEP(RetVal,
- Context.getConstantInt(TD.getIntPtrType(), ReturnOffset));
+ ConstantInt::get(TD.getIntPtrType(Context), ReturnOffset));
}
RetVal = BitCastToType(RetVal,
- Context.getPointerTypeUnqual(Fn->getReturnType()));
+ PointerType::getUnqual(Fn->getReturnType()));
RetVal = Builder.CreateLoad(RetVal, "retval");
RetVals.push_back(RetVal);
}
@@ -714,7 +721,7 @@
// block.
if (IndirectGotoBlock) {
EmitBlock(IndirectGotoBlock);
-
+
// Change the default destination to go to one of the other destinations, if
// there is any other dest.
SwitchInst *SI = cast<SwitchInst>(IndirectGotoBlock->getTerminator());
@@ -777,7 +784,7 @@
break;
if (e && e->dest != bb->next_bb) {
Builder.CreateBr(getLabelDeclBlock(gimple_block_label (e->dest)));
- EmitBlock(BasicBlock::Create(""));
+ EmitBlock(BasicBlock::Create(Context, ""));
}
}
@@ -983,6 +990,10 @@
// result is not used then GCC sometimes sets the tree type to VOID_TYPE, so
// don't take VOID_TYPE too seriously here.
assert((Result == 0 || VOID_TYPE_P(TREE_TYPE(exp)) ||
+ // FIXME: The vector stuff isn't straight-forward. Sometimes X86 can
+ // pass it back as a scalar value. Disable checking if it's a
+ // vector. This should be made better, though.
+ isa<VectorType>(ConvertType(TREE_TYPE(exp))) ||
Result->getType() == ConvertType(TREE_TYPE(exp))) &&
"Value has wrong type!");
return Result;
@@ -1090,11 +1101,11 @@
// Handle 'trunc (zext i1 X to T2) to i1' as X, because this occurs all over
// the place.
if (ZExtInst *CI = dyn_cast<ZExtInst>(V))
- if (Ty == Type::Int1Ty && CI->getOperand(0)->getType() == Type::Int1Ty)
+ if (Ty == Type::getInt1Ty(Context) && CI->getOperand(0)->getType() == Type::getInt1Ty(Context))
return CI->getOperand(0);
return Builder.CreateCast(Instruction::CastOps(opcode), V, Ty,
- V->getNameStart());
+ V->getName().data());
}
/// CastToAnyType - Cast the specified value to the specified type making no
@@ -1174,8 +1185,8 @@
// it is dead. This allows us to insert allocas in order without having to
// scan for an insertion point. Use BitCast for int -> int
AllocaInsertionPoint = CastInst::Create(Instruction::BitCast,
- Context.getNullValue(Type::Int32Ty),
- Type::Int32Ty, "alloca point");
+ Constant::getNullValue(Type::getInt32Ty(Context)),
+ Type::getInt32Ty(Context), "alloca point");
// Insert it as the first instruction in the entry block.
Fn->begin()->getInstList().insert(Fn->begin()->begin(),
AllocaInsertionPoint);
@@ -1324,9 +1335,9 @@
// Don't copy tons of tiny elements.
CountAggregateElements(LLVMTy) <= 8) {
DestLoc.Ptr = BitCastToType(DestLoc.Ptr,
- Context.getPointerTypeUnqual(LLVMTy));
+ PointerType::getUnqual(LLVMTy));
SrcLoc.Ptr = BitCastToType(SrcLoc.Ptr,
- Context.getPointerTypeUnqual(LLVMTy));
+ PointerType::getUnqual(LLVMTy));
CopyAggregate(DestLoc, SrcLoc, Builder, type);
return;
}
@@ -1343,7 +1354,7 @@
const Type *ElTy =
cast<PointerType>(DestLoc.Ptr->getType())->getElementType();
if (ElTy->isSingleValueType()) {
- StoreInst *St = Builder.CreateStore(Context.getNullValue(ElTy),
+ StoreInst *St = Builder.CreateStore(Constant::getNullValue(ElTy),
DestLoc.Ptr, DestLoc.Volatile);
St->setAlignment(DestLoc.getAlignment());
} else if (const StructType *STy = dyn_cast<StructType>(ElTy)) {
@@ -1379,25 +1390,25 @@
// Don't zero tons of tiny elements.
CountAggregateElements(LLVMTy) <= 8) {
DestLoc.Ptr = BitCastToType(DestLoc.Ptr,
- Context.getPointerTypeUnqual(LLVMTy));
+ PointerType::getUnqual(LLVMTy));
ZeroAggregate(DestLoc, Builder);
return;
}
}
- EmitMemSet(DestLoc.Ptr, Context.getConstantInt(Type::Int8Ty, 0),
+ EmitMemSet(DestLoc.Ptr, ConstantInt::get(Type::getInt8Ty(Context), 0),
Emit(TYPE_SIZE_UNIT(type), 0), DestLoc.getAlignment());
}
Value *TreeToLLVM::EmitMemCpy(Value *DestPtr, Value *SrcPtr, Value *Size,
unsigned Align) {
- const Type *SBP = Context.getPointerTypeUnqual(Type::Int8Ty);
- const Type *IntPtr = TD.getIntPtrType();
+ const Type *SBP = PointerType::getUnqual(Type::getInt8Ty(Context));
+ const Type *IntPtr = TD.getIntPtrType(Context);
Value *Ops[4] = {
BitCastToType(DestPtr, SBP),
BitCastToType(SrcPtr, SBP),
CastToSIntType(Size, IntPtr),
- Context.getConstantInt(Type::Int32Ty, Align)
+ ConstantInt::get(Type::getInt32Ty(Context), Align)
};
Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::memcpy,
@@ -1407,13 +1418,13 @@
Value *TreeToLLVM::EmitMemMove(Value *DestPtr, Value *SrcPtr, Value *Size,
unsigned Align) {
- const Type *SBP = Context.getPointerTypeUnqual(Type::Int8Ty);
- const Type *IntPtr = TD.getIntPtrType();
+ const Type *SBP = PointerType::getUnqual(Type::getInt8Ty(Context));
+ const Type *IntPtr = TD.getIntPtrType(Context);
Value *Ops[4] = {
BitCastToType(DestPtr, SBP),
BitCastToType(SrcPtr, SBP),
CastToSIntType(Size, IntPtr),
- Context.getConstantInt(Type::Int32Ty, Align)
+ ConstantInt::get(Type::getInt32Ty(Context), Align)
};
Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::memmove,
@@ -1423,13 +1434,13 @@
Value *TreeToLLVM::EmitMemSet(Value *DestPtr, Value *SrcVal, Value *Size,
unsigned Align) {
- const Type *SBP = Context.getPointerTypeUnqual(Type::Int8Ty);
- const Type *IntPtr = TD.getIntPtrType();
+ const Type *SBP = PointerType::getUnqual(Type::getInt8Ty(Context));
+ const Type *IntPtr = TD.getIntPtrType(Context);
Value *Ops[4] = {
BitCastToType(DestPtr, SBP),
- CastToSIntType(SrcVal, Type::Int8Ty),
+ CastToSIntType(SrcVal, Type::getInt8Ty(Context)),
CastToSIntType(Size, IntPtr),
- Context.getConstantInt(Type::Int32Ty, Align)
+ ConstantInt::get(Type::getInt32Ty(Context), Align)
};
Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::memset,
@@ -1448,12 +1459,12 @@
// The idea is that it's a pointer to type "Value"
// which is opaque* but the routine expects i8** and i8*.
- const PointerType *Ty = Context.getPointerTypeUnqual(Type::Int8Ty);
- V = Builder.CreateBitCast(V, Context.getPointerTypeUnqual(Ty));
+ const PointerType *Ty = PointerType::getUnqual(Type::getInt8Ty(Context));
+ V = Builder.CreateBitCast(V, PointerType::getUnqual(Ty));
Value *Ops[2] = {
V,
- Context.getConstantPointerNull(Ty)
+ ConstantPointerNull::get(Ty)
};
Builder.CreateCall(gcrootFun, Ops, Ops+2);
@@ -1473,9 +1484,9 @@
// Get file and line number
Constant *lineNo =
- Context.getConstantInt(Type::Int32Ty, DECL_SOURCE_LINE(decl));
+ ConstantInt::get(Type::getInt32Ty(Context), DECL_SOURCE_LINE(decl));
Constant *file = ConvertMetadataStringToGV(DECL_SOURCE_FILE(decl));
- const Type *SBP= Context.getPointerTypeUnqual(Type::Int8Ty);
+ const Type *SBP= PointerType::getUnqual(Type::getInt8Ty(Context));
file = Builder.getFolder().CreateBitCast(file, SBP);
// There may be multiple annotate attributes. Pass return of lookup_attr
@@ -1495,7 +1506,7 @@
// Assert its a string, and then get that string.
assert(TREE_CODE(val) == STRING_CST &&
"Annotate attribute arg should always be a string");
- const Type *SBP = Context.getPointerTypeUnqual(Type::Int8Ty);
+ const Type *SBP = PointerType::getUnqual(Type::getInt8Ty(Context));
Constant *strGV = TreeConstantToLLVM::EmitLV_STRING_CST(val);
Value *Ops[4] = {
BitCastToType(V, SBP),
@@ -1585,9 +1596,9 @@
} else {
// Compute the variable's size in bytes.
Size = Emit(DECL_SIZE_UNIT(decl), 0);
- Ty = Type::Int8Ty;
+ Ty = Type::getInt8Ty(Context);
}
- Size = CastToUIntType(Size, Type::Int32Ty);
+ Size = CastToUIntType(Size, Type::getInt32Ty(Context));
}
unsigned Alignment = 0; // Alignment in bytes.
@@ -1634,7 +1645,7 @@
// before initialization doesn't get garbage results to follow.
const Type *T = cast<PointerType>(AI->getType())->getElementType();
EmitTypeGcroot(AI, decl);
- Builder.CreateStore(Context.getNullValue(T), AI);
+ Builder.CreateStore(Constant::getNullValue(T), AI);
}
if (TheDebugInfo) {
@@ -1663,7 +1674,7 @@
// Assign the new ID, update AddressTakenBBNumbers to remember it.
uint64_t BlockNo = ++NumAddressTakenBlocks;
BlockNo &= ~0ULL >> (64-TD.getPointerSizeInBits());
- Val = Context.getConstantInt(TD.getIntPtrType(), BlockNo);
+ Val = ConstantInt::get(TD.getIntPtrType(Context), BlockNo);
// Add it to the switch statement in the indirect goto block.
cast<SwitchInst>(getIndirectGotoBlock()->getTerminator())->addCase(Val, BB);
@@ -1676,10 +1687,10 @@
if (IndirectGotoBlock) return IndirectGotoBlock;
// Create a temporary for the value to be switched on.
- IndirectGotoValue = CreateTemporary(TD.getIntPtrType());
+ IndirectGotoValue = CreateTemporary(TD.getIntPtrType(Context));
// Create the block, emit a load, and emit the switch in the block.
- IndirectGotoBlock = BasicBlock::Create("indirectgoto");
+ IndirectGotoBlock = BasicBlock::Create(Context, "indirectgoto");
Value *Ld = new LoadInst(IndirectGotoValue, "gotodest", IndirectGotoBlock);
SwitchInst::Create(Ld, IndirectGotoBlock, 0, IndirectGotoBlock);
@@ -1710,7 +1721,7 @@
// Store the destination block to the GotoValue alloca.
Value *V = Emit(TREE_OPERAND(exp, 0), 0);
- V = CastToType(Instruction::PtrToInt, V, TD.getIntPtrType());
+ V = CastToType(Instruction::PtrToInt, V, TD.getIntPtrType(Context));
Builder.CreateStore(V, IndirectGotoValue);
// NOTE: This is HORRIBLY INCORRECT in the presence of exception handlers.
@@ -1719,7 +1730,7 @@
//
Builder.CreateBr(DestBB);
}
- EmitBlock(BasicBlock::Create(""));
+ EmitBlock(BasicBlock::Create(Context, ""));
return 0;
}
@@ -1740,7 +1751,7 @@
// Emit a branch to the exit label.
Builder.CreateBr(ReturnBB);
- EmitBlock(BasicBlock::Create(""));
+ EmitBlock(BasicBlock::Create(Context, ""));
return 0;
}
@@ -1797,11 +1808,11 @@
if (FPPred == ~0U) {
Cond = Emit(exp_cond, 0);
// Comparison against zero to convert the result to i1.
- if (Cond->getType() != Type::Int1Ty)
+ if (Cond->getType() != Type::getInt1Ty(Context))
Cond = Builder.CreateIsNotNull(Cond, "toBool");
} else {
- Cond = EmitCompare(exp_cond, UIPred, SIPred, FPPred, Type::Int1Ty);
- assert(Cond->getType() == Type::Int1Ty);
+ Cond = EmitCompare(exp_cond, UIPred, SIPred, FPPred, Type::getInt1Ty(Context));
+ assert(Cond->getType() == Type::getInt1Ty(Context));
}
tree Then = COND_EXPR_THEN(exp);
@@ -1812,7 +1823,7 @@
BasicBlock *ThenDest = getLabelDeclBlock(TREE_OPERAND(Then, 0));
BasicBlock *ElseDest = getLabelDeclBlock(TREE_OPERAND(Else, 0));
Builder.CreateCondBr(Cond, ThenDest, ElseDest);
- EmitBlock(BasicBlock::Create(""));
+ EmitBlock(BasicBlock::Create(Context, ""));
return 0;
}
@@ -1826,7 +1837,7 @@
// Emit the switch instruction.
SwitchInst *SI = Builder.CreateSwitch(SwitchExp, Builder.GetInsertBlock(),
TREE_VEC_LENGTH(Cases));
- EmitBlock(BasicBlock::Create(""));
+ EmitBlock(BasicBlock::Create(Context, ""));
// Default location starts out as fall-through
SI->setSuccessor(0, Builder.GetInsertBlock());
@@ -1869,13 +1880,14 @@
SI->addCase(LowC, Dest);
if (LowC == HighC) break; // Emitted the last one.
CurrentValue++;
- LowC = Context.getConstantInt(CurrentValue);
+ LowC = ConstantInt::get(Context, CurrentValue);
}
} else {
// The range is too big to add to the switch - emit an "if".
Value *Diff = Builder.CreateSub(SwitchExp, LowC);
- Value *Cond = Builder.CreateICmpULE(Diff, Context.getConstantInt(Range));
- BasicBlock *False_Block = BasicBlock::Create("case_false");
+ Value *Cond = Builder.CreateICmpULE(Diff,
+ ConstantInt::get(Context, Range));
+ BasicBlock *False_Block = BasicBlock::Create(Context, "case_false");
Builder.CreateCondBr(Cond, Dest, False_Block);
EmitBlock(False_Block);
}
@@ -1887,7 +1899,7 @@
else {
Builder.CreateBr(DefaultDest);
// Emit a "fallthrough" block, which is almost certainly dead.
- EmitBlock(BasicBlock::Create(""));
+ EmitBlock(BasicBlock::Create(Context, ""));
}
}
@@ -1900,9 +1912,9 @@
// Check to see if the exception values have been constructed.
if (ExceptionValue) return;
- const Type *IntPtr = TD.getIntPtrType();
+ const Type *IntPtr = TD.getIntPtrType(Context);
- ExceptionValue = CreateTemporary(Context.getPointerTypeUnqual(Type::Int8Ty));
+ ExceptionValue = CreateTemporary(PointerType::getUnqual(Type::getInt8Ty(Context)));
ExceptionValue->setName("eh_exception");
ExceptionSelectorValue = CreateTemporary(IntPtr);
@@ -1911,11 +1923,11 @@
FuncEHException = Intrinsic::getDeclaration(TheModule,
Intrinsic::eh_exception);
FuncEHSelector = Intrinsic::getDeclaration(TheModule,
- (IntPtr == Type::Int32Ty ?
+ (IntPtr == Type::getInt32Ty(Context) ?
Intrinsic::eh_selector_i32 :
Intrinsic::eh_selector_i64));
FuncEHGetTypeID = Intrinsic::getDeclaration(TheModule,
- (IntPtr == Type::Int32Ty ?
+ (IntPtr == Type::getInt32Ty(Context) ?
Intrinsic::eh_typeid_for_i32 :
Intrinsic::eh_typeid_for_i64));
}
@@ -1927,7 +1939,7 @@
BasicBlock *&PostPad = PostPads[RegionNo];
if (!PostPad)
- PostPad = BasicBlock::Create("ppad");
+ PostPad = BasicBlock::Create(Context, "ppad");
return PostPad;
}
@@ -1965,7 +1977,7 @@
//FIXME assert(llvm_eh_personality_libfunc
//FIXME && "no exception handling personality function!");
//FIXME Args.push_back(BitCastToType(DECL_LLVM(llvm_eh_personality_libfunc),
-//FIXME Context.getPointerTypeUnqual(Type::Int8Ty)));
+//FIXME PointerType::getUnqual(Type::getInt8Ty(Context))));
// Add selections for each handler.
foreach_reachable_handler(i, false, false, AddHandler, &Handlers);
@@ -1984,7 +1996,7 @@
//FIXME tree TypeList = get_eh_type_list(region);
//FIXME unsigned Length = list_length(TypeList);
//FIXME Args.reserve(Args.size() + Length + 1);
-//FIXME Args.push_back(Context.getConstantInt(Type::Int32Ty, Length + 1));
+//FIXME Args.push_back(ConstantInt::get(Type::getInt32Ty, Length + 1));
//FIXME
//FIXME // Add the type infos.
//FIXME for (; TypeList; TypeList = TREE_CHAIN(TypeList)) {
@@ -1998,7 +2010,7 @@
//FIXME if (!TypeList) {
//FIXME // Catch-all - push a null pointer.
//FIXME Args.push_back(
-//FIXME Context.getNullValue(Context.getPointerTypeUnqual(Type::Int8Ty))
+//FIXME Constant::getNullValue(PointerType::getUnqual(Type::getInt8Ty(Context)))
//FIXME );
//FIXME } else {
//FIXME // Add the type infos.
@@ -2016,29 +2028,28 @@
// what exception is being unwound, append a catch-all.
// The representation of a catch-all is language specific.
- Value *Catch_All;
+ Value *CatchAll;
abort();//FIXME
-//FIXME if (!lang_eh_catch_all) {
+//FIXME if (USING_SJLJ_EXCEPTIONS || !lang_eh_catch_all) {
//FIXME // Use a "cleanup" - this should be good enough for most languages.
-//FIXME Catch_All = Context.getConstantInt(Type::Int32Ty, 0);
+//FIXME CatchAll = ConstantInt::get(Type::getInt32Ty, 0);
//FIXME } else {
//FIXME tree catch_all_type = lang_eh_catch_all();
//FIXME if (catch_all_type == NULL_TREE)
//FIXME // Use a C++ style null catch-all object.
-//FIXME Catch_All = Context.getNullValue(
-//FIXME Context.getPointerTypeUnqual(Type::Int8Ty));
+//FIXME CatchAll = Constant::getNullValue(
+//FIXME PointerType::getUnqual(Type::getInt8Ty(Context)));
//FIXME else
//FIXME // This language has a type that catches all others.
-//FIXME Catch_All = Emit(catch_all_type, 0);
+//FIXME CatchAll = Emit(catch_all_type, 0);
//FIXME }
- Args.push_back(Catch_All);
+ Args.push_back(CatchAll);
}
// Emit the selector call.
Value *Select = Builder.CreateCall(FuncEHSelector, Args.begin(), Args.end(),
"eh_select");
Builder.CreateStore(Select, ExceptionSelectorValue);
-
// Branch to the post landing pad for the first reachable handler.
assert(!Handlers.empty() && "Landing pad but no handler?");
Builder.CreateBr(getPostPad(get_eh_region_number(*Handlers.begin())));
@@ -2077,11 +2088,11 @@
//FIXME Value *Select = Builder.CreateLoad(ExceptionSelectorValue);
//FIXME
//FIXME // Compare with the filter action value.
-//FIXME Value *Zero = Context.getConstantInt(Select->getType(), 0);
+//FIXME Value *Zero = ConstantInt::get(Select->getType(), 0);
//FIXME Value *Compare = Builder.CreateICmpSLT(Select, Zero);
//FIXME
//FIXME // Branch on the compare.
-//FIXME BasicBlock *NoFilterBB = BasicBlock::Create("nofilter");
+//FIXME BasicBlock *NoFilterBB = BasicBlock::Create(Context, "nofilter");
//FIXME Builder.CreateCondBr(Compare, Dest, NoFilterBB);
//FIXME EmitBlock(NoFilterBB);
//FIXME } else if (RegionKind > 0) {
@@ -2092,7 +2103,7 @@
//FIXME for (; TypeList; TypeList = TREE_CHAIN (TypeList)) {
//FIXME Value *TType = Emit(lookup_type_for_runtime(TREE_VALUE(TypeList)), 0);
//FIXME TType = BitCastToType(TType,
-//FIXME Context.getPointerTypeUnqual(Type::Int8Ty));
+//FIXME PointerType::getUnqual(Type::getInt8Ty(Context)));
//FIXME
//FIXME // Call get eh type id.
//FIXME Value *TypeID = Builder.CreateCall(FuncEHGetTypeID, TType, "eh_typeid");
@@ -2126,7 +2137,7 @@
//FIXME }
//FIXME
//FIXME // If there is no such catch, execute a RESX if the comparison fails.
-//FIXME NoCatchBB = BasicBlock::Create("nocatch");
+//FIXME NoCatchBB = BasicBlock::Create(Context, "nocatch");
//FIXME // Branch on the compare.
//FIXME Builder.CreateCondBr(Cond, Dest, NoCatchBB);
//FIXME EmitBlock(NoCatchBB);
@@ -2155,7 +2166,7 @@
"Must-not-throw region handled by runtime?");
// Unwinding continues in the caller.
if (!UnwindBB)
- UnwindBB = BasicBlock::Create("Unwind");
+ UnwindBB = BasicBlock::Create(Context, "Unwind");
Builder.CreateBr(UnwindBB);
}
@@ -2238,7 +2249,7 @@
if (!LV.isBitfield()) {
if (!DestLoc) {
// Scalar value: emit a load.
- Value *Ptr = BitCastToType(LV.Ptr, Context.getPointerTypeUnqual(Ty));
+ Value *Ptr = BitCastToType(LV.Ptr, PointerType::getUnqual(Ty));
LoadInst *LI = Builder.CreateLoad(Ptr, isVolatile);
LI->setAlignment(Alignment);
return LI;
@@ -2250,7 +2261,7 @@
} else {
// This is a bitfield reference.
if (!LV.BitSize)
- return Context.getNullValue(Ty);
+ return Constant::getNullValue(Ty);
const Type *ValTy = cast<PointerType>(LV.Ptr->getType())->getElementType();
unsigned ValSizeInBits = ValTy->getPrimitiveSizeInBits();
@@ -2276,7 +2287,7 @@
Value *Ptr = Index ?
Builder.CreateGEP(LV.Ptr,
- Context.getConstantInt(Type::Int32Ty, Index)) :
+ ConstantInt::get(Type::getInt32Ty(Context), Index)) :
LV.Ptr;
LoadInst *LI = Builder.CreateLoad(Ptr, isVolatile);
LI->setAlignment(Alignment);
@@ -2293,7 +2304,7 @@
// expression.
if (FirstBitInVal+BitsInVal != ValSizeInBits) {
- Value *ShAmt = Context.getConstantInt(ValTy, ValSizeInBits -
+ Value *ShAmt = ConstantInt::get(ValTy, ValSizeInBits -
(FirstBitInVal+BitsInVal));
Val = Builder.CreateShl(Val, ShAmt);
}
@@ -2301,13 +2312,13 @@
// Shift right required?
if (ValSizeInBits != BitsInVal) {
bool AddSignBits = !TYPE_UNSIGNED(TREE_TYPE(exp)) && !Result;
- Value *ShAmt = Context.getConstantInt(ValTy, ValSizeInBits-BitsInVal);
+ Value *ShAmt = ConstantInt::get(ValTy, ValSizeInBits-BitsInVal);
Val = AddSignBits ?
Builder.CreateAShr(Val, ShAmt) : Builder.CreateLShr(Val, ShAmt);
}
if (Result) {
- Value *ShAmt = Context.getConstantInt(ValTy, BitsInVal);
+ Value *ShAmt = ConstantInt::get(ValTy, BitsInVal);
Result = Builder.CreateShl(Result, ShAmt);
Result = Builder.CreateOr(Result, Val);
} else {
@@ -2355,7 +2366,7 @@
TREE_CODE(TREE_TYPE (TREE_OPERAND (exp, 0))) == REFERENCE_TYPE)
&& "Not calling a function pointer?");
tree function_type = TREE_TYPE(TREE_TYPE (TREE_OPERAND (exp, 0)));
- unsigned CallingConv;
+ CallingConv::ID CallingConv;
AttrListPtr PAL;
const Type *Ty = TheTypeConverter->ConvertFunctionType(function_type,
@@ -2366,7 +2377,7 @@
// If this is a direct call to a function using a static chain then we need
// to ensure the function type is the one just calculated: it has an extra
// parameter for the chain.
- Callee = BitCastToType(Callee, Context.getPointerTypeUnqual(Ty));
+ Callee = BitCastToType(Callee, PointerType::getUnqual(Ty));
// EmitCall(exp, DestLoc);
Value *Result = EmitCallOf(Callee, exp, DestLoc, PAL);
@@ -2377,7 +2388,7 @@
//
if (fndecl && TREE_THIS_VOLATILE(fndecl)) {
Builder.CreateUnreachable();
- EmitBlock(BasicBlock::Create(""));
+ EmitBlock(BasicBlock::Create(Context, ""));
}
return Result;
}
@@ -2388,12 +2399,12 @@
unsigned RealSize,
LLVMBuilder &Builder) {
if (!RealSize)
- return Context.getUndef(LLVMTy);
+ return UndefValue::get(LLVMTy);
// Not clear what this is supposed to do on big endian machines...
assert(!BYTES_BIG_ENDIAN && "Unsupported case - please report");
assert(isa<IntegerType>(LLVMTy) && "Expected an integer value!");
- const Type *LoadType = Context.getIntegerType(RealSize * 8);
+ const Type *LoadType = IntegerType::get(Context, RealSize * 8);
L = Builder.CreateBitCast(L, LoadType->getPointerTo());
Value *Val = Builder.CreateLoad(L);
if (LoadType->getPrimitiveSizeInBits() >= LLVMTy->getPrimitiveSizeInBits())
@@ -2421,6 +2432,7 @@
LLVMBuilder &Builder;
Value *TheValue;
MemRef RetBuf;
+ CallingConv::ID &CallingConv;
bool isShadowRet;
bool isAggrRet;
unsigned Offset;
@@ -2429,10 +2441,14 @@
const FunctionType *FnTy,
const MemRef *destloc,
bool ReturnSlotOpt,
- LLVMBuilder &b)
+ LLVMBuilder &b,
+ CallingConv::ID &CC)
: CallOperands(ops), FTy(FnTy), DestLoc(destloc),
- useReturnSlot(ReturnSlotOpt), Builder(b), isShadowRet(false),
- isAggrRet(false), Offset(0) { }
+ useReturnSlot(ReturnSlotOpt), Builder(b), CallingConv(CC),
+ isShadowRet(false), isAggrRet(false), Offset(0) { }
+
+ /// getCallingConv - This provides the desired CallingConv for the function.
+ CallingConv::ID& getCallingConv(void) { return CallingConv; }
// Push the address of an argument.
void pushAddress(Value *Loc) {
@@ -2466,7 +2482,7 @@
Value *Loc = LocStack.back();
if (Loc) {
// An address. Convert to the right type and load the value out.
- Loc = Builder.CreateBitCast(Loc, Context.getPointerTypeUnqual(Ty));
+ Loc = Builder.CreateBitCast(Loc, PointerType::getUnqual(Ty));
return Builder.CreateLoad(Loc, "val");
} else {
// A value - just return it.
@@ -2609,7 +2625,7 @@
/// reference with an additional parameter attribute "ByVal".
void HandleByValArgument(const llvm::Type *LLVMTy, tree type) {
Value *Loc = getAddress();
- assert(Context.getPointerTypeUnqual(LLVMTy) == Loc->getType());
+ assert(PointerType::getUnqual(LLVMTy) == Loc->getType());
CallOperands.push_back(Loc);
}
@@ -2617,7 +2633,7 @@
/// argument is passed as a first class aggregate.
void HandleFCAArgument(const llvm::Type *LLVMTy, tree type) {
Value *Loc = getAddress();
- assert(Context.getPointerTypeUnqual(LLVMTy) == Loc->getType());
+ assert(PointerType::getUnqual(LLVMTy) == Loc->getType());
CallOperands.push_back(Builder.CreateLoad(Loc));
}
@@ -2626,7 +2642,7 @@
/// LLVM Struct, StructTy is the LLVM type of the struct we are entering.
void EnterField(unsigned FieldNo, const llvm::Type *StructTy) {
Value *Loc = getAddress();
- Loc = Builder.CreateBitCast(Loc, Context.getPointerTypeUnqual(StructTy));
+ Loc = Builder.CreateBitCast(Loc, PointerType::getUnqual(StructTy));
pushAddress(Builder.CreateStructGEP(Loc, FieldNo, "elt"));
}
void ExitField() {
@@ -2667,7 +2683,7 @@
// Create a landing pad if one didn't exist already.
if (!ThisPad)
- ThisPad = BasicBlock::Create("lpad");
+ ThisPad = BasicBlock::Create(Context, "lpad");
LandingPad = ThisPad;
} else {
@@ -2692,7 +2708,7 @@
const FunctionType *FTy = cast<FunctionType>(PFTy->getElementType());
FunctionCallArgumentConversion Client(CallOperands, FTy, DestLoc,
CALL_EXPR_RETURN_SLOT_OPT(exp),
- Builder);
+ Builder, CallingConvention);
TheLLVMABI<FunctionCallArgumentConversion> ABIConverter(Client);
// Handle the result, including struct returns.
@@ -2771,7 +2787,7 @@
cast<CallInst>(Call)->setCallingConv(CallingConvention);
cast<CallInst>(Call)->setAttributes(PAL);
} else {
- BasicBlock *NextBlock = BasicBlock::Create("invcont");
+ BasicBlock *NextBlock = BasicBlock::Create(Context, "invcont");
Call = Builder.CreateInvoke(Callee, NextBlock, LandingPad,
CallOperands.begin(), CallOperands.end());
cast<InvokeInst>(Call)->setCallingConv(CallingConvention);
@@ -2782,12 +2798,12 @@
if (Client.isShadowReturn())
return Client.EmitShadowResult(TREE_TYPE(exp), DestLoc);
- if (Call->getType() == Type::VoidTy)
+ if (Call->getType() == Type::getVoidTy(Context))
return 0;
if (Client.isAggrReturn()) {
Value *Dest = BitCastToType(DestLoc->Ptr,
- Context.getPointerTypeUnqual(Call->getType()));
+ PointerType::getUnqual(Call->getType()));
LLVM_EXTRACT_MULTIPLE_RETURN_VALUE(Call,Dest,DestLoc->Volatile,Builder);
return 0;
}
@@ -2803,11 +2819,11 @@
Value *Ptr = DestLoc->Ptr;
if (Client.Offset) {
- Ptr = BitCastToType(Ptr, Context.getPointerTypeUnqual(Type::Int8Ty));
+ Ptr = BitCastToType(Ptr, PointerType::getUnqual(Type::getInt8Ty(Context)));
Ptr = Builder.CreateGEP(Ptr,
- Context.getConstantInt(TD.getIntPtrType(), Client.Offset));
+ ConstantInt::get(TD.getIntPtrType(Context), Client.Offset));
}
- Ptr = BitCastToType(Ptr, Context.getPointerTypeUnqual(Call->getType()));
+ Ptr = BitCastToType(Ptr, PointerType::getUnqual(Call->getType()));
StoreInst *St = Builder.CreateStore(Call, Ptr, DestLoc->Volatile);
St->setAlignment(DestLoc->getAlignment());
return 0;
@@ -2901,7 +2917,7 @@
// defined - ensure it can be uniquely identified by not folding the cast.
Instruction::CastOps opc = CastInst::getCastOpcode(RHS, RHSSigned,
LHSTy, LHSSigned);
- CastInst *Cast = CastInst::Create(opc, RHS, LHSTy, RHS->getNameStart());
+ CastInst *Cast = CastInst::Create(opc, RHS, LHSTy, RHS->getName());
if (opc == Instruction::BitCast && RHS->getType() == LHSTy)
// Simplify this no-op bitcast once the function is emitted.
UniquedValues.push_back(cast<BitCastInst>(Cast));
@@ -2939,7 +2955,7 @@
RHS = CastToAnyType(RHS, RHSSigned, PT->getElementType(), LHSSigned);
else
LV.Ptr = BitCastToType(LV.Ptr,
- Context.getPointerTypeUnqual(RHS->getType()));
+ PointerType::getUnqual(RHS->getType()));
StoreInst *SI = Builder.CreateStore(RHS, LV.Ptr, isVolatile);
SI->setAlignment(Alignment);
return RHS;
@@ -2986,7 +3002,7 @@
ThisLastBitPlusOne = LV.BitStart+LV.BitSize;
Value *Ptr = Index ?
- Builder.CreateGEP(LV.Ptr, Context.getConstantInt(Type::Int32Ty, Index)) :
+ Builder.CreateGEP(LV.Ptr, ConstantInt::get(Type::getInt32Ty(Context), Index)) :
LV.Ptr;
LoadInst *LI = Builder.CreateLoad(Ptr, isVolatile);
LI->setAlignment(Alignment);
@@ -3001,14 +3017,14 @@
// If not storing into the zero'th bit, shift the Src value to the left.
if (FirstBitInVal) {
- Value *ShAmt = Context.getConstantInt(ValTy, FirstBitInVal);
+ Value *ShAmt = ConstantInt::get(ValTy, FirstBitInVal);
NewVal = Builder.CreateShl(NewVal, ShAmt);
}
// Next, if this doesn't touch the top bit, mask out any bits that shouldn't
// be set in the result.
uint64_t MaskVal = ((1ULL << BitsInVal)-1) << FirstBitInVal;
- Constant *Mask = Context.getConstantInt(Type::Int64Ty, MaskVal);
+ Constant *Mask = ConstantInt::get(Type::getInt64Ty(Context), MaskVal);
Mask = Builder.getFolder().CreateTruncOrBitCast(Mask, ValTy);
if (FirstBitInVal+BitsInVal != ValSizeInBits)
@@ -3025,7 +3041,7 @@
SI->setAlignment(Alignment);
if (I + 1 < Strides) {
- Value *ShAmt = Context.getConstantInt(ValTy, BitsInVal);
+ Value *ShAmt = ConstantInt::get(ValTy, BitsInVal);
BitSource = Builder.CreateLShr(BitSource, ShAmt);
}
}
@@ -3046,12 +3062,12 @@
assert(!isAggregateTreeType(TREE_TYPE(Op))
&& "Aggregate to scalar nop_expr!");
Value *OpVal = Emit(Op, DestLoc);
- if (Ty == Type::VoidTy) return 0;
+ if (Ty == Type::getVoidTy(Context)) return 0;
return CastToAnyType(OpVal, OpIsSigned, Ty, ExpIsSigned);
} else if (isAggregateTreeType(TREE_TYPE(Op))) {
// Aggregate to aggregate copy.
MemRef NewLoc = *DestLoc;
- NewLoc.Ptr = BitCastToType(DestLoc->Ptr, Context.getPointerTypeUnqual(Ty));
+ NewLoc.Ptr = BitCastToType(DestLoc->Ptr, PointerType::getUnqual(Ty));
Value *OpVal = Emit(Op, &NewLoc);
assert(OpVal == 0 && "Shouldn't cast scalar to aggregate!");
return 0;
@@ -3060,7 +3076,7 @@
// Scalar to aggregate copy.
Value *OpVal = Emit(Op, 0);
Value *Ptr = BitCastToType(DestLoc->Ptr,
- Context.getPointerTypeUnqual(OpVal->getType()));
+ PointerType::getUnqual(OpVal->getType()));
StoreInst *St = Builder.CreateStore(OpVal, Ptr, DestLoc->Volatile);
St->setAlignment(DestLoc->getAlignment());
return 0;
@@ -3088,7 +3104,7 @@
// Make the destination look like the source type.
const Type *OpTy = ConvertType(TREE_TYPE(Op));
- Target.Ptr = BitCastToType(Target.Ptr, Context.getPointerTypeUnqual(OpTy));
+ Target.Ptr = BitCastToType(Target.Ptr, PointerType::getUnqual(OpTy));
// Needs to be in sync with EmitLV.
switch (TREE_CODE(Op)) {
@@ -3128,7 +3144,7 @@
// Target holds the temporary created above.
const Type *ExpTy = ConvertType(TREE_TYPE(exp));
return Builder.CreateLoad(BitCastToType(Target.Ptr,
- Context.getPointerTypeUnqual(ExpTy)));
+ PointerType::getUnqual(ExpTy)));
}
if (DestLoc) {
@@ -3137,7 +3153,7 @@
Value *OpVal = Emit(Op, 0);
assert(OpVal && "Expected a scalar result!");
Value *Ptr = BitCastToType(DestLoc->Ptr,
- Context.getPointerTypeUnqual(OpVal->getType()));
+ PointerType::getUnqual(OpVal->getType()));
StoreInst *St = Builder.CreateStore(OpVal, Ptr, DestLoc->Volatile);
St->setAlignment(DestLoc->getAlignment());
return 0;
@@ -3154,7 +3170,7 @@
if (isa<PointerType>(DestTy)) // ptr->ptr is a simple bitcast.
return Builder.CreateBitCast(OpVal, DestTy);
// Otherwise, ptrtoint to intptr_t first.
- OpVal = Builder.CreatePtrToInt(OpVal, TD.getIntPtrType());
+ OpVal = Builder.CreatePtrToInt(OpVal, TD.getIntPtrType(Context));
}
// If the destination type is a pointer, use inttoptr.
@@ -3175,7 +3191,7 @@
// GCC allows NEGATE_EXPR on pointers as well. Cast to int, negate, cast
// back.
- V = CastToAnyType(V, false, TD.getIntPtrType(), false);
+ V = CastToAnyType(V, false, TD.getIntPtrType(Context), false);
V = Builder.CreateNeg(V);
return CastToType(Instruction::IntToPtr, V, ConvertType(TREE_TYPE(exp)));
}
@@ -3222,11 +3238,11 @@
Value *TreeToLLVM::EmitABS_EXPR(tree exp) {
Value *Op = Emit(TREE_OPERAND(exp, 0), 0);
if (!Op->getType()->isFloatingPoint()) {
- Value *OpN = Builder.CreateNeg(Op, (Op->getName()+"neg").c_str());
+ Value *OpN = Builder.CreateNeg(Op, (Op->getNameStr()+"neg").c_str());
ICmpInst::Predicate pred = TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp, 0))) ?
ICmpInst::ICMP_UGE : ICmpInst::ICMP_SGE;
Value *Cmp = Builder.CreateICmp(pred, Op,
- Context.getNullValue(Op->getType()), "abscond");
+ Constant::getNullValue(Op->getType()), "abscond");
return Builder.CreateSelect(Cmp, Op, OpN, "abs");
}
@@ -3257,10 +3273,10 @@
if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
unsigned NumElements = VTy->getNumElements();
const Type *EltTy = VTy->getElementType();
- return Context.getVectorType(
- Context.getIntegerType(EltTy->getPrimitiveSizeInBits()), NumElements);
+ return VectorType::get(
+ IntegerType::get(Context, EltTy->getPrimitiveSizeInBits()), NumElements);
}
- return Context.getIntegerType(Ty->getPrimitiveSizeInBits());
+ return IntegerType::get(Context, Ty->getPrimitiveSizeInBits());
}
Value *TreeToLLVM::EmitBIT_NOT_EXPR(tree exp) {
@@ -3276,15 +3292,16 @@
cast<VectorType>(Ty)->getElementType()->isFloatingPoint())) {
Op = BitCastToType(Op, getSuitableBitCastIntType(Ty));
}
- return BitCastToType(Builder.CreateNot(Op, (Op->getName()+"not").c_str()),Ty);
+ return BitCastToType(Builder.CreateNot(Op,
+ (Op->getNameStr()+"not").c_str()),Ty);
}
Value *TreeToLLVM::EmitTRUTH_NOT_EXPR(tree exp) {
Value *V = Emit(TREE_OPERAND(exp, 0), 0);
- if (V->getType() != Type::Int1Ty)
+ if (V->getType() != Type::getInt1Ty(Context))
V = Builder.CreateICmpNE(V,
- Context.getNullValue(V->getType()), "toBool");
- V = Builder.CreateNot(V, (V->getName()+"not").c_str());
+ Constant::getNullValue(V->getType()), "toBool");
+ V = Builder.CreateNot(V, (V->getNameStr()+"not").c_str());
return CastToUIntType(V, ConvertType(TREE_TYPE(exp)));
}
@@ -3329,7 +3346,7 @@
Result = Builder.CreateICmp(pred, LHS, RHS);
}
}
- assert(Result->getType() == Type::Int1Ty && "Expected i1 result for compare");
+ assert(Result->getType() == Type::getInt1Ty(Context) && "Expected i1 result for compare");
if (DestTy == 0)
DestTy = ConvertType(TREE_TYPE(exp));
@@ -3360,6 +3377,8 @@
bool LHSIsSigned = !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp, 0)));
bool RHSIsSigned = !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp, 1)));
bool TyIsSigned = !TYPE_UNSIGNED(TREE_TYPE(exp));
+ bool IsExactDiv = TREE_CODE(exp) == EXACT_DIV_EXPR;
+ bool IsPlus = TREE_CODE(exp) == PLUS_EXPR;
LHS = CastToAnyType(LHS, LHSIsSigned, Ty, TyIsSigned);
RHS = CastToAnyType(RHS, RHSIsSigned, Ty, TyIsSigned);
@@ -3378,7 +3397,13 @@
RHS = BitCastToType(RHS, Ty);
}
- Value *V = Builder.CreateBinOp((Instruction::BinaryOps)Opc, LHS, RHS);
+ Value *V;
+ if (Opc == Instruction::SDiv && IsExactDiv)
+ V = Builder.CreateExactSDiv(LHS, RHS);
+ else if (Opc == Instruction::Add && IsPlus && TyIsSigned && !flag_wrapv)
+ V = Builder.CreateNSWAdd(LHS, RHS);
+ else
+ V = Builder.CreateBinOp((Instruction::BinaryOps)Opc, LHS, RHS);
if (ResTy != Ty)
V = BitCastToType(V, ResTy);
return V;
@@ -3413,8 +3438,10 @@
// If this is a subtract, we want to step backwards.
if (Opc == Instruction::Sub)
EltOffset = -EltOffset;
- Constant *C = Context.getConstantInt(Type::Int64Ty, EltOffset);
- Value *V = Builder.CreateGEP(LHS, C);
+ Constant *C = ConstantInt::get(Type::getInt64Ty(Context), EltOffset);
+ Value *V = flag_wrapv ?
+ Builder.CreateGEP(LHS, C) :
+ Builder.CreateInBoundsGEP(LHS, C);
return BitCastToType(V, ConvertType(TREE_TYPE(exp)));
}
}
@@ -3423,7 +3450,7 @@
Value *RHS = Emit(TREE_OPERAND(exp, 1), 0);
- const Type *IntPtrTy = TD.getIntPtrType();
+ const Type *IntPtrTy = TD.getIntPtrType(Context);
bool LHSIsSigned = !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp, 0)));
bool RHSIsSigned = !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp, 1)));
LHS = CastToAnyType(LHS, LHSIsSigned, IntPtrTy, false);
@@ -3442,10 +3469,10 @@
// This is a truth operation like the strict &&,||,^^. Convert to bool as
// a test against zero
LHS = Builder.CreateICmpNE(LHS,
- Context.getNullValue(LHS->getType()),
+ Constant::getNullValue(LHS->getType()),
"toBool");
RHS = Builder.CreateICmpNE(RHS,
- Context.getNullValue(RHS->getType()),
+ Constant::getNullValue(RHS->getType()),
"toBool");
Value *Res = Builder.CreateBinOp((Instruction::BinaryOps)Opc, LHS, RHS);
@@ -3462,7 +3489,7 @@
Value *RHS = Emit(TREE_OPERAND(exp, 1), 0);
if (RHS->getType() != LHS->getType())
RHS = Builder.CreateIntCast(RHS, LHS->getType(), false,
- (RHS->getName()+".cast").c_str());
+ (RHS->getNameStr()+".cast").c_str());
return Builder.CreateBinOp((Instruction::BinaryOps)Opc, LHS, RHS);
}
@@ -3472,10 +3499,10 @@
Value *Amt = Emit(TREE_OPERAND(exp, 1), 0);
if (Amt->getType() != In->getType())
Amt = Builder.CreateIntCast(Amt, In->getType(), false,
- (Amt->getName()+".cast").c_str());
+ (Amt->getNameStr()+".cast").c_str());
Value *TypeSize =
- Context.getConstantInt(In->getType(),
+ ConstantInt::get(In->getType(),
In->getType()->getPrimitiveSizeInBits());
// Do the two shifts.
@@ -3523,22 +3550,7 @@
// Unsigned EXACT_DIV_EXPR -> normal udiv.
if (TYPE_UNSIGNED(TREE_TYPE(exp)))
return EmitBinOp(exp, DestLoc, Instruction::UDiv);
-
- // If this is a signed EXACT_DIV_EXPR by a constant, and we know that
- // the RHS is a multiple of two, we strength reduce the result to use
- // a signed SHR here. We have no way in LLVM to represent EXACT_DIV_EXPR
- // precisely, so this transform can't currently be performed at the LLVM
- // level. This is commonly used for pointer subtraction.
- if (TREE_CODE(TREE_OPERAND(exp, 1)) == INTEGER_CST) {
- uint64_t IntValue = getINTEGER_CSTVal(TREE_OPERAND(exp, 1));
- if (isPowerOf2_64(IntValue)) {
- // Create an ashr instruction, by the log of the division amount.
- Value *LHS = Emit(TREE_OPERAND(exp, 0), 0);
- return Builder.CreateAShr(LHS, Context.getConstantInt(LHS->getType(),
- Log2_64(IntValue)));
- }
- }
-
+
// Otherwise, emit this as a normal signed divide.
return EmitBinOp(exp, DestLoc, Instruction::SDiv);
}
@@ -3555,7 +3567,7 @@
return EmitBinOp(exp, DestLoc, Instruction::URem);
const Type *Ty = ConvertType(TREE_TYPE(exp));
- Constant *Zero = Context.getConstantInt(Ty, 0);
+ Constant *Zero = ConstantInt::get(Ty, 0);
Value *LHS = Emit(TREE_OPERAND(exp, 0), 0);
Value *RHS = Emit(TREE_OPERAND(exp, 1), 0);
@@ -3586,9 +3598,9 @@
// otherwise.
const Type *Ty = ConvertType(TREE_TYPE(exp));
- Constant *Zero = Context.getConstantInt(Ty, 0);
- Constant *One = Context.getConstantInt(Ty, 1);
- Constant *MinusOne = Context.getAllOnesValue(Ty);
+ Constant *Zero = ConstantInt::get(Ty, 0);
+ Constant *One = ConstantInt::get(Ty, 1);
+ Constant *MinusOne = Constant::getAllOnesValue(Ty);
Value *LHS = Emit(TREE_OPERAND(exp, 0), 0);
Value *RHS = Emit(TREE_OPERAND(exp, 1), 0);
@@ -3658,9 +3670,9 @@
return Builder.CreateUDiv(LHS, RHS, "fdiv");
const Type *Ty = ConvertType(TREE_TYPE(exp));
- Constant *Zero = Context.getConstantInt(Ty, 0);
- Constant *One = Context.getConstantInt(Ty, 1);
- Constant *MinusOne = Context.getAllOnesValue(Ty);
+ Constant *Zero = ConstantInt::get(Ty, 0);
+ Constant *One = ConstantInt::get(Ty, 1);
+ Constant *MinusOne = Constant::getAllOnesValue(Ty);
// In the case of signed arithmetic, we calculate FDiv as follows:
// LHS FDiv RHS = (LHS + Sign(RHS) * Offset) Div RHS - Offset,
@@ -3704,8 +3716,8 @@
// we are doing signed or unsigned arithmetic.
const Type *Ty = ConvertType(TREE_TYPE(exp));
- Constant *Zero = Context.getConstantInt(Ty, 0);
- Constant *Two = Context.getConstantInt(Ty, 2);
+ Constant *Zero = ConstantInt::get(Ty, 0);
+ Constant *Two = ConstantInt::get(Ty, 2);
Value *LHS = Emit(TREE_OPERAND(exp, 0), 0);
Value *RHS = Emit(TREE_OPERAND(exp, 1), 0);
@@ -3813,11 +3825,11 @@
"Must-not-throw region handled by runtime?");
// Unwinding continues in the caller.
if (!UnwindBB)
- UnwindBB = BasicBlock::Create("Unwind");
+ UnwindBB = BasicBlock::Create(Context, "Unwind");
Builder.CreateBr(UnwindBB);
}
- EmitBlock(BasicBlock::Create(""));
+ EmitBlock(BasicBlock::Create(Context, ""));
return 0;
}
@@ -3835,13 +3847,12 @@
// If there was an error, return something bogus.
if (ValidateRegisterVariable(decl)) {
if (Ty->isSingleValueType())
- return Context.getUndef(Ty);
+ return UndefValue::get(Ty);
return 0; // Just don't copy something into DestLoc.
}
// Turn this into a 'tmp = call Ty asm "", "={reg}"()'.
- FunctionType *FTy =
- Context.getFunctionType(Ty, std::vector<const Type*>(),false);
+ FunctionType *FTy = FunctionType::get(Ty, std::vector<const Type*>(),false);
const char *Name = reg_names[decode_reg_name(extractRegisterName(decl))];
@@ -3861,7 +3872,7 @@
// Turn this into a 'call void asm sideeffect "", "{reg}"(Ty %RHS)'.
std::vector<const Type*> ArgTys;
ArgTys.push_back(ConvertType(TREE_TYPE(decl)));
- FunctionType *FTy = Context.getFunctionType(Type::VoidTy, ArgTys, false);
+ FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context), ArgTys, false);
const char *Name = reg_names[decode_reg_name(extractRegisterName(decl))];
@@ -4446,9 +4457,9 @@
uint64_t TySize = TD.getTypeSizeInBits(LLVMTy);
if (TySize == 1 || TySize == 8 || TySize == 16 ||
TySize == 32 || TySize == 64) {
- LLVMTy = Context.getIntegerType(TySize);
+ LLVMTy = IntegerType::get(Context, TySize);
Op = Builder.CreateLoad(BitCastToType(LV.Ptr,
- Context.getPointerTypeUnqual(LLVMTy)));
+ PointerType::getUnqual(LLVMTy)));
} else {
// Otherwise, emit our value as a lvalue and let the codegen deal with
// it.
@@ -4490,7 +4501,7 @@
Op = CastToAnyType(Op, !TYPE_UNSIGNED(type),
OTy, CallResultIsSigned[Match]);
if (BYTES_BIG_ENDIAN) {
- Constant *ShAmt = Context.getConstantInt(Op->getType(),
+ Constant *ShAmt = ConstantInt::get(Op->getType(),
OTyBits-OpTyBits);
Op = Builder.CreateLShr(Op, ShAmt);
}
@@ -4567,17 +4578,17 @@
const Type *CallResultType;
switch (CallResultTypes.size()) {
- case 0: CallResultType = Type::VoidTy; break;
+ case 0: CallResultType = Type::getVoidTy(Context); break;
case 1: CallResultType = CallResultTypes[0]; break;
default:
std::vector<const Type*> TmpVec(CallResultTypes.begin(),
CallResultTypes.end());
- CallResultType = Context.getStructType(TmpVec);
+ CallResultType = StructType::get(Context, TmpVec);
break;
}
const FunctionType *FTy =
- Context.getFunctionType(CallResultType, CallArgTypes, false);
+ FunctionType::get(CallResultType, CallArgTypes, false);
// Remove the leading comma if we have operands.
if (!ConstraintStr.empty())
@@ -4634,16 +4645,16 @@
std::vector<Constant*> CstOps;
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
CstOps.push_back(cast<Constant>(Ops[i]));
- return Context.getConstantVector(CstOps);
+ return ConstantVector::get(CstOps);
}
// Otherwise, insertelement the values to build the vector.
Value *Result =
- Context.getUndef(Context.getVectorType(Ops[0]->getType(), Ops.size()));
+ UndefValue::get(VectorType::get(Ops[0]->getType(), Ops.size()));
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
Result = Builder.CreateInsertElement(Result, Ops[i],
- Context.getConstantInt(Type::Int32Ty, i));
+ ConstantInt::get(Type::getInt32Ty(Context), i));
return Result;
}
@@ -4683,17 +4694,17 @@
for (unsigned i = 0; i != NumElements; ++i) {
int idx = va_arg(VA, int);
if (idx == -1)
- Idxs.push_back(Context.getUndef(Type::Int32Ty));
+ Idxs.push_back(UndefValue::get(Type::getInt32Ty(Context)));
else {
assert((unsigned)idx < 2*NumElements && "Element index out of range!");
- Idxs.push_back(Context.getConstantInt(Type::Int32Ty, idx));
+ Idxs.push_back(ConstantInt::get(Type::getInt32Ty(Context), idx));
}
}
va_end(VA);
// Turn this into the appropriate shuffle operation.
return Builder.CreateShuffleVector(InVec1, InVec2,
- Context.getConstantVector(Idxs));
+ ConstantVector::get(Idxs));
}
//===----------------------------------------------------------------------===//
@@ -4714,8 +4725,16 @@
// Get the result type and operand line in an easy to consume format.
const Type *ResultType = ConvertType(TREE_TYPE(TREE_TYPE(fndecl)));
std::vector<Value*> Operands;
- for (tree Op = TREE_OPERAND(exp, 1); Op; Op = TREE_CHAIN(Op))
- Operands.push_back(Emit(TREE_VALUE(Op), 0));
+ for (tree Op = TREE_OPERAND(exp, 1); Op; Op = TREE_CHAIN(Op)) {
+ tree OpVal = TREE_VALUE(Op);
+ if (isAggregateTreeType(TREE_TYPE(OpVal))) {
+ MemRef OpLoc = CreateTempLoc(ConvertType(TREE_TYPE(OpVal)));
+ Emit(OpVal, &OpLoc);
+ Operands.push_back(Builder.CreateLoad(OpLoc.Ptr));
+ } else {
+ Operands.push_back(Emit(OpVal, NULL));
+ }
+ }
unsigned FnCode = DECL_FUNCTION_CODE(fndecl);
return LLVM_TARGET_INTRINSIC_LOWER(exp, FnCode, DestLoc, Result, ResultType,
@@ -4734,12 +4753,12 @@
void TreeToLLVM::EmitMemoryBarrier(bool ll, bool ls, bool sl, bool ss) {
Value* C[5];
- C[0] = Context.getConstantInt(Type::Int1Ty, ll);
- C[1] = Context.getConstantInt(Type::Int1Ty, ls);
- C[2] = Context.getConstantInt(Type::Int1Ty, sl);
- C[3] = Context.getConstantInt(Type::Int1Ty, ss);
+ C[0] = ConstantInt::get(Type::getInt1Ty(Context), ll);
+ C[1] = ConstantInt::get(Type::getInt1Ty(Context), ls);
+ C[2] = ConstantInt::get(Type::getInt1Ty(Context), sl);
+ C[3] = ConstantInt::get(Type::getInt1Ty(Context), ss);
// Be conservatively safe.
- C[4] = Context.getConstantInt(Type::Int1Ty, true);
+ C[4] = ConstantInt::get(Type::getInt1Ty(Context), true);
Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
Intrinsic::memory_barrier),
@@ -4756,7 +4775,7 @@
};
const Type* Ty[2];
Ty[0] = ResultTy;
- Ty[1] = Context.getPointerTypeUnqual(ResultTy);
+ Ty[1] = PointerType::getUnqual(ResultTy);
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
C[1] = Builder.CreateIntCast(C[1], Ty[0], "cast");
// The gcc builtins are also full memory barriers.
@@ -4781,7 +4800,7 @@
};
const Type* Ty[2];
Ty[0] = ResultTy;
- Ty[1] = Context.getPointerTypeUnqual(ResultTy);
+ Ty[1] = PointerType::getUnqual(ResultTy);
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
C[1] = Builder.CreateIntCast(C[1], Ty[0], "cast");
C[2] = Builder.CreateIntCast(C[2], Ty[0], "cast");
@@ -4837,7 +4856,7 @@
BuiltinName);
const Type *ResTy = ConvertType(TREE_TYPE(exp));
if (ResTy->isSingleValueType())
- Result = Context.getUndef(ResTy);
+ Result = UndefValue::get(ResTy);
return true;
}
@@ -4921,9 +4940,9 @@
// This treats everything as unknown, and is minimally defensible as
// correct, although completely useless.
if (tree_low_cst (ObjSizeTree, 0) < 2)
- Result = Context.getAllOnesValue(TD.getIntPtrType());
+ Result = Constant::getAllOnesValue(TD.getIntPtrType(Context));
else
- Result = Context.getConstantInt(TD.getIntPtrType(), 0);
+ Result = ConstantInt::get(TD.getIntPtrType(Context), 0);
return true;
}
// Unary bit counting intrinsics.
@@ -4953,7 +4972,7 @@
Value *Amt = Emit(TREE_VALUE(TREE_OPERAND(exp, 1)), 0);
EmitBuiltinUnaryOp(Amt, Result, Intrinsic::ctpop);
Result = Builder.CreateBinOp(Instruction::And, Result,
- Context.getConstantInt(Result->getType(), 1));
+ ConstantInt::get(Result->getType(), 1));
return true;
}
case BUILT_IN_POPCOUNT: // These GCC builtins always return int.
@@ -5061,13 +5080,13 @@
Value *Amt = Emit(TREE_VALUE(TREE_OPERAND(exp, 1)), 0);
EmitBuiltinUnaryOp(Amt, Result, Intrinsic::cttz);
Result = Builder.CreateAdd(Result,
- Context.getConstantInt(Result->getType(), 1));
+ ConstantInt::get(Result->getType(), 1));
Result = CastToUIntType(Result, ConvertType(TREE_TYPE(exp)));
Value *Cond =
Builder.CreateICmpEQ(Amt,
- Context.getNullValue(Amt->getType()));
+ Constant::getNullValue(Amt->getType()));
Result = Builder.CreateSelect(Cond,
- Context.getNullValue(Result->getType()),
+ Constant::getNullValue(Result->getType()),
Result);
return true;
}
@@ -5082,7 +5101,7 @@
Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::trap));
// Emit an explicit unreachable instruction.
Builder.CreateUnreachable();
- EmitBlock(BasicBlock::Create(""));
+ EmitBlock(BasicBlock::Create(Context, ""));
return true;
//TODO // Convert annotation built-in to llvm.annotation intrinsic.
@@ -5090,9 +5109,9 @@
//TODO
//TODO // Get file and line number
//TODO location_t locus = EXPR_LOCATION (exp);
-//TODO Constant *lineNo = ConstantInt::get(Type::Int32Ty, LOCATION_LINE(locus));
+//TODO Constant *lineNo = ConstantInt::get(Type::getInt32Ty, LOCATION_LINE(locus));
//TODO Constant *file = ConvertMetadataStringToGV(LOCATION_FILE(locus));
-//TODO const Type *SBP= PointerType::getUnqual(Type::Int8Ty);
+//TODO const Type *SBP= PointerType::getUnqual(Type::getInt8Ty(Context));
//TODO file = Builder.getFolder().CreateBitCast(file, SBP);
//TODO
//TODO // Get arguments.
@@ -5119,8 +5138,8 @@
case BUILT_IN_SYNCHRONIZE: {
// We assume like gcc appears to, that this only applies to cached memory.
Value* C[5];
- C[0] = C[1] = C[2] = C[3] = Context.getConstantInt(Type::Int1Ty, 1);
- C[4] = Context.getConstantInt(Type::Int1Ty, 0);
+ C[0] = C[1] = C[2] = C[3] = ConstantInt::get(Type::getInt1Ty(Context), 1);
+ C[4] = ConstantInt::get(Type::getInt1Ty(Context), 0);
Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
Intrinsic::memory_barrier),
@@ -5265,7 +5284,7 @@
};
const Type* Ty[2];
Ty[0] = ResultTy;
- Ty[1] = Context.getPointerTypeUnqual(ResultTy);
+ Ty[1] = PointerType::getUnqual(ResultTy);
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
C[1] = Builder.CreateIntCast(C[1], Ty[0], "cast");
@@ -5303,7 +5322,7 @@
};
const Type* Ty[2];
Ty[0] = ResultTy;
- Ty[1] = Context.getPointerTypeUnqual(ResultTy);
+ Ty[1] = PointerType::getUnqual(ResultTy);
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
C[1] = Builder.CreateIntCast(C[1], Ty[0], "cast");
@@ -5341,7 +5360,7 @@
};
const Type* Ty[2];
Ty[0] = ResultTy;
- Ty[1] = Context.getPointerTypeUnqual(ResultTy);
+ Ty[1] = PointerType::getUnqual(ResultTy);
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
C[1] = Builder.CreateIntCast(C[1], Ty[0], "cast");
@@ -5379,7 +5398,7 @@
};
const Type* Ty[2];
Ty[0] = ResultTy;
- Ty[1] = Context.getPointerTypeUnqual(ResultTy);
+ Ty[1] = PointerType::getUnqual(ResultTy);
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
C[1] = Builder.CreateIntCast(C[1], Ty[0], "cast");
@@ -5417,7 +5436,7 @@
};
const Type* Ty[2];
Ty[0] = ResultTy;
- Ty[1] = Context.getPointerTypeUnqual(ResultTy);
+ Ty[1] = PointerType::getUnqual(ResultTy);
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
C[1] = Builder.CreateIntCast(C[1], Ty[0], "cast");
@@ -5455,7 +5474,7 @@
};
const Type* Ty[2];
Ty[0] = ResultTy;
- Ty[1] = Context.getPointerTypeUnqual(ResultTy);
+ Ty[1] = PointerType::getUnqual(ResultTy);
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
C[1] = Builder.CreateIntCast(C[1], Ty[0], "cast");
@@ -5544,8 +5563,8 @@
// FIXME: HACK: Just ignore these.
{
const Type *Ty = ConvertType(TREE_TYPE(exp));
- if (Ty != Type::VoidTy)
- Result = Context.getNullValue(Ty);
+ if (Ty != Type::getVoidTy(Context))
+ Result = Constant::getNullValue(Ty);
return true;
}
#endif // FIXME: Should handle these GCC extensions eventually.
@@ -5582,7 +5601,7 @@
Value *Val = Emit(TREE_VALUE(ArgList), 0);
Value *Pow = Emit(TREE_VALUE(TREE_CHAIN(ArgList)), 0);
const Type *Ty = Val->getType();
- Pow = CastToSIntType(Pow, Type::Int32Ty);
+ Pow = CastToSIntType(Pow, Type::getInt32Ty(Context));
SmallVector<Value *,2> Args;
Args.push_back(Val);
@@ -5610,7 +5629,7 @@
}
bool TreeToLLVM::EmitBuiltinConstantP(tree exp, Value *&Result) {
- Result = Context.getNullValue(ConvertType(TREE_TYPE(exp)));
+ Result = Constant::getNullValue(ConvertType(TREE_TYPE(exp)));
return true;
}
@@ -5725,7 +5744,7 @@
unsigned DstAlign = getPointerAlignment(Dst);
Value *DstV = Emit(Dst, 0);
- Value *Val = Context.getNullValue(Type::Int32Ty);
+ Value *Val = Constant::getNullValue(Type::getInt32Ty(Context));
Value *Len = Emit(TREE_VALUE(TREE_CHAIN(arglist)), 0);
EmitMemSet(DstV, Val, Len, DstAlign);
return true;
@@ -5751,7 +5770,7 @@
ReadWrite = 0;
} else {
ReadWrite = Builder.getFolder().CreateIntCast(cast<Constant>(ReadWrite),
- Type::Int32Ty, false);
+ Type::getInt32Ty(Context), false);
}
if (TREE_CHAIN(TREE_CHAIN(arglist))) {
@@ -5764,18 +5783,18 @@
Locality = 0;
} else {
Locality = Builder.getFolder().CreateIntCast(cast<Constant>(Locality),
- Type::Int32Ty, false);
+ Type::getInt32Ty(Context), false);
}
}
}
// Default to highly local read.
if (ReadWrite == 0)
- ReadWrite = Context.getNullValue(Type::Int32Ty);
+ ReadWrite = Constant::getNullValue(Type::getInt32Ty(Context));
if (Locality == 0)
- Locality = Context.getConstantInt(Type::Int32Ty, 3);
+ Locality = ConstantInt::get(Type::getInt32Ty(Context), 3);
- Ptr = BitCastToType(Ptr, Context.getPointerTypeUnqual(Type::Int8Ty));
+ Ptr = BitCastToType(Ptr, PointerType::getUnqual(Type::getInt8Ty(Context)));
Value *Ops[3] = { Ptr, ReadWrite, Locality };
Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::prefetch),
@@ -5818,7 +5837,7 @@
// Unfortunately, these constants are defined as RTL expressions and
// should be handled separately.
- Result = BitCastToType(Ptr, Context.getPointerTypeUnqual(Type::Int8Ty));
+ Result = BitCastToType(Ptr, PointerType::getUnqual(Type::getInt8Ty(Context)));
return true;
}
@@ -5834,7 +5853,7 @@
// needed for: MIPS, Sparc. Unfortunately, these constants are defined
// as RTL expressions and should be handled separately.
- Result = BitCastToType(Ptr, Context.getPointerTypeUnqual(Type::Int8Ty));
+ Result = BitCastToType(Ptr, PointerType::getUnqual(Type::getInt8Ty(Context)));
return true;
}
@@ -5885,7 +5904,7 @@
// FIXME: is i32 always enough here?
Result = Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
Intrinsic::eh_dwarf_cfa),
- Context.getConstantInt(Type::Int32Ty, cfa_offset));
+ ConstantInt::get(Type::getInt32Ty(Context), cfa_offset));
return true;
}
@@ -5895,7 +5914,7 @@
return false;
unsigned int dwarf_regnum = DWARF_FRAME_REGNUM(STACK_POINTER_REGNUM);
- Result = Context.getConstantInt(ConvertType(TREE_TYPE(exp)), dwarf_regnum);
+ Result = ConstantInt::get(ConvertType(TREE_TYPE(exp)), dwarf_regnum);
return true;
}
@@ -5922,7 +5941,7 @@
iwhich = DWARF_FRAME_REGNUM (iwhich);
- Result = Context.getConstantInt(ConvertType(TREE_TYPE(exp)), iwhich);
+ Result = ConstantInt::get(ConvertType(TREE_TYPE(exp)), iwhich);
#endif
return true;
@@ -5934,15 +5953,15 @@
if (!validate_arglist(arglist, INTEGER_TYPE, POINTER_TYPE, VOID_TYPE))
return false;
- const Type *IntPtr = TD.getIntPtrType();
+ const Type *IntPtr = TD.getIntPtrType(Context);
Value *Offset = Emit(TREE_VALUE(arglist), 0);
Value *Handler = Emit(TREE_VALUE(TREE_CHAIN(arglist)), 0);
- Intrinsic::ID IID = (IntPtr == Type::Int32Ty ?
+ Intrinsic::ID IID = (IntPtr == Type::getInt32Ty(Context) ?
Intrinsic::eh_return_i32 : Intrinsic::eh_return_i64);
Offset = Builder.CreateIntCast(Offset, IntPtr, true);
- Handler = BitCastToType(Handler, Context.getPointerTypeUnqual(Type::Int8Ty));
+ Handler = BitCastToType(Handler, PointerType::getUnqual(Type::getInt8Ty(Context)));
SmallVector<Value *, 2> Args;
Args.push_back(Offset);
@@ -5950,7 +5969,7 @@
Builder.CreateCall(Intrinsic::getDeclaration(TheModule, IID),
Args.begin(), Args.end());
Result = Builder.CreateUnreachable();
- EmitBlock(BasicBlock::Create(""));
+ EmitBlock(BasicBlock::Create(Context, ""));
return true;
}
@@ -5971,7 +5990,7 @@
}
Value *Addr = BitCastToType(Emit(TREE_VALUE(arglist), 0),
- Context.getPointerTypeUnqual(Type::Int8Ty));
+ PointerType::getUnqual(Type::getInt8Ty(Context)));
Constant *Size, *Idx;
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) {
@@ -5992,21 +6011,21 @@
if (rnum < 0)
continue;
- Size = Context.getConstantInt(Type::Int8Ty, size);
- Idx = Context.getConstantInt(Type::Int32Ty, rnum);
+ Size = ConstantInt::get(Type::getInt8Ty(Context), size);
+ Idx = ConstantInt::get(Type::getInt32Ty(Context), rnum);
Builder.CreateStore(Size, Builder.CreateGEP(Addr, Idx), false);
}
}
if (!wrote_return_column) {
- Size = Context.getConstantInt(Type::Int8Ty, GET_MODE_SIZE (Pmode));
- Idx = Context.getConstantInt(Type::Int32Ty, DWARF_FRAME_RETURN_COLUMN);
+ Size = ConstantInt::get(Type::getInt8Ty(Context), GET_MODE_SIZE (Pmode));
+ Idx = ConstantInt::get(Type::getInt32Ty(Context), DWARF_FRAME_RETURN_COLUMN);
Builder.CreateStore(Size, Builder.CreateGEP(Addr, Idx), false);
}
#ifdef DWARF_ALT_FRAME_RETURN_COLUMN
- Size = Context.getConstantInt(Type::Int8Ty, GET_MODE_SIZE (Pmode));
- Idx = Context.getConstantInt(Type::Int32Ty, DWARF_ALT_FRAME_RETURN_COLUMN);
+ Size = ConstantInt::get(Type::getInt8Ty(Context), GET_MODE_SIZE (Pmode));
+ Idx = ConstantInt::get(Type::getInt32Ty(Context), DWARF_ALT_FRAME_RETURN_COLUMN);
Builder.CreateStore(Size, Builder.CreateGEP(Addr, Idx), false);
#endif
@@ -6033,7 +6052,7 @@
return false;
Value *Ptr = Emit(TREE_VALUE(arglist), 0);
- Ptr = BitCastToType(Ptr, Context.getPointerTypeUnqual(Type::Int8Ty));
+ Ptr = BitCastToType(Ptr, PointerType::getUnqual(Type::getInt8Ty(Context)));
Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
Intrinsic::stackrestore), Ptr);
@@ -6046,8 +6065,8 @@
if (!validate_arglist(arglist, INTEGER_TYPE, VOID_TYPE))
return false;
Value *Amt = Emit(TREE_VALUE(arglist), 0);
- Amt = CastToSIntType(Amt, Type::Int32Ty);
- Result = Builder.CreateAlloca(Type::Int8Ty, Amt);
+ Amt = CastToSIntType(Amt, Type::getInt32Ty(Context));
+ Result = Builder.CreateAlloca(Type::getInt8Ty(Context), Amt);
return true;
}
@@ -6087,14 +6106,14 @@
Intrinsic::vastart);
const Type *FTy =
cast<PointerType>(llvm_va_start_fn->getType())->getElementType();
- ArgVal = BitCastToType(ArgVal, Context.getPointerTypeUnqual(Type::Int8Ty));
+ ArgVal = BitCastToType(ArgVal, PointerType::getUnqual(Type::getInt8Ty(Context)));
Builder.CreateCall(llvm_va_start_fn, ArgVal);
return true;
}
bool TreeToLLVM::EmitBuiltinVAEnd(tree exp) {
Value *Arg = Emit(TREE_VALUE(TREE_OPERAND(exp, 1)), 0);
- Arg = BitCastToType(Arg, Context.getPointerTypeUnqual(Type::Int8Ty));
+ Arg = BitCastToType(Arg, PointerType::getUnqual(Type::getInt8Ty(Context)));
Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::vaend),
Arg);
return true;
@@ -6119,7 +6138,7 @@
Arg2 = Emit(Arg2T, 0);
}
- static const Type *VPTy = Context.getPointerTypeUnqual(Type::Int8Ty);
+ static const Type *VPTy = PointerType::getUnqual(Type::getInt8Ty(Context));
// FIXME: This ignores alignment and volatility of the arguments.
SmallVector<Value *, 2> Args;
@@ -6137,7 +6156,7 @@
VOID_TYPE))
return false;
- static const Type *VPTy = Context.getPointerTypeUnqual(Type::Int8Ty);
+ static const Type *VPTy = PointerType::getUnqual(Type::getInt8Ty(Context));
Value *Tramp = Emit(TREE_VALUE(arglist), 0);
Tramp = BitCastToType(Tramp, VPTy);
@@ -6332,7 +6351,7 @@
tree AnnotateAttr = lookup_attribute("annotate", DECL_ATTRIBUTES(FieldDecl));
const Type *OrigPtrTy = FieldPtr->getType();
- const Type *SBP = Context.getPointerTypeUnqual(Type::Int8Ty);
+ const Type *SBP = PointerType::getUnqual(Type::getInt8Ty(Context));
Function *Fn = Intrinsic::getDeclaration(TheModule,
Intrinsic::ptr_annotation,
@@ -6340,7 +6359,7 @@
// Get file and line number. FIXME: Should this be for the decl or the
// use. Is there a location info for the use?
- Constant *LineNo = Context.getConstantInt(Type::Int32Ty,
+ Constant *LineNo = ConstantInt::get(Type::getInt32Ty(Context),
DECL_SOURCE_LINE(FieldDecl));
Constant *File = ConvertMetadataStringToGV(DECL_SOURCE_FILE(FieldDecl));
@@ -6370,7 +6389,7 @@
// attribute on a whole struct from one on the first element of the
// struct.
BitCastInst *CastFieldPtr = new BitCastInst(FieldPtr, SBP,
- FieldPtr->getNameStart());
+ FieldPtr->getName());
Builder.Insert(CastFieldPtr);
Value *Ops[4] = {
@@ -6431,7 +6450,7 @@
Value *IndexVal = Emit(Index, 0);
- const Type *IntPtrTy = getTargetData().getIntPtrType();
+ const Type *IntPtrTy = getTargetData().getIntPtrType(Context);
if (TYPE_UNSIGNED(IndexType)) // if the index is unsigned
// ZExt it to retain its value in the larger type
IndexVal = CastToUIntType(IndexVal, IntPtrTy);
@@ -6443,14 +6462,16 @@
if (isSequentialCompatible(ArrayTreeType)) {
SmallVector<Value*, 2> Idx;
if (TREE_CODE(ArrayTreeType) == ARRAY_TYPE)
- Idx.push_back(Context.getConstantInt(IntPtrTy, 0));
+ Idx.push_back(ConstantInt::get(IntPtrTy, 0));
Idx.push_back(IndexVal);
- Value *Ptr = Builder.CreateGEP(ArrayAddr, Idx.begin(), Idx.end());
+ Value *Ptr = flag_wrapv ?
+ Builder.CreateGEP(ArrayAddr, Idx.begin(), Idx.end()) :
+ Builder.CreateInBoundsGEP(ArrayAddr, Idx.begin(), Idx.end());
const Type *ElementTy = ConvertType(ElementType);
unsigned Alignment = MinAlign(ArrayAlign, TD.getABITypeAlignment(ElementTy));
return LValue(BitCastToType(Ptr,
- Context.getPointerTypeUnqual(ConvertType(TREE_TYPE(exp)))),
+ PointerType::getUnqual(ConvertType(TREE_TYPE(exp)))),
Alignment);
}
@@ -6459,7 +6480,7 @@
// float foo(int w, float A[][w], int g) { return A[g][0]; }
ArrayAddr = BitCastToType(ArrayAddr,
- Context.getPointerTypeUnqual(Type::Int8Ty));
+ PointerType::getUnqual(Type::getInt8Ty(Context)));
if (VOID_TYPE_P(TREE_TYPE(ArrayTreeType)))
return LValue(Builder.CreateGEP(ArrayAddr, IndexVal), 1);
@@ -6470,9 +6491,11 @@
if (isa<ConstantInt>(IndexVal))
Alignment = MinAlign(ArrayAlign,
cast<ConstantInt>(IndexVal)->getZExtValue());
- Value *Ptr = Builder.CreateGEP(ArrayAddr, IndexVal);
+ Value *Ptr = flag_wrapv ?
+ Builder.CreateGEP(ArrayAddr, IndexVal) :
+ Builder.CreateInBoundsGEP(ArrayAddr, IndexVal);
return LValue(BitCastToType(Ptr,
- Context.getPointerTypeUnqual(ConvertType(TREE_TYPE(exp)))),
+ PointerType::getUnqual(ConvertType(TREE_TYPE(exp)))),
Alignment);
}
@@ -6498,19 +6521,19 @@
if (unsigned UnitOffset = BitStart / ValueSizeInBits) {
// TODO: If Ptr.Ptr is a struct type or something, we can do much better
// than this. e.g. check out when compiling unwind-dw2-fde-darwin.c.
- Ptr.Ptr = BitCastToType(Ptr.Ptr, Context.getPointerTypeUnqual(ValTy));
+ Ptr.Ptr = BitCastToType(Ptr.Ptr, PointerType::getUnqual(ValTy));
Ptr.Ptr = Builder.CreateGEP(Ptr.Ptr,
- Context.getConstantInt(Type::Int32Ty, UnitOffset));
+ ConstantInt::get(Type::getInt32Ty(Context), UnitOffset));
BitStart -= UnitOffset*ValueSizeInBits;
}
// If this is referring to the whole field, return the whole thing.
if (BitStart == 0 && BitSize == ValueSizeInBits) {
- return LValue(BitCastToType(Ptr.Ptr, Context.getPointerTypeUnqual(ValTy)),
+ return LValue(BitCastToType(Ptr.Ptr, PointerType::getUnqual(ValTy)),
Ptr.getAlignment());
}
- return LValue(BitCastToType(Ptr.Ptr, Context.getPointerTypeUnqual(ValTy)), 1,
+ return LValue(BitCastToType(Ptr.Ptr, PointerType::getUnqual(ValTy)), 1,
BitStart, BitSize);
}
@@ -6533,7 +6556,7 @@
StructAddrLV.BitStart == 0) && "structs cannot be bitfields!");
StructAddrLV.Ptr = BitCastToType(StructAddrLV.Ptr,
- Context.getPointerTypeUnqual(StructTy));
+ PointerType::getUnqual(StructTy));
const Type *FieldTy = ConvertType(getDeclaredType(FieldDecl));
// BitStart - This is the actual offset of the field from the start of the
@@ -6590,7 +6613,7 @@
unsigned ByteOffset = BitStart/8;
if (ByteOffset > 0) {
Offset = Builder.CreateAdd(Offset,
- Context.getConstantInt(Offset->getType(), ByteOffset));
+ ConstantInt::get(Offset->getType(), ByteOffset));
BitStart -= ByteOffset*8;
// If the base is known to be 8-byte aligned, and we're adding a 4-byte
// offset, the field is known to be 4-byte aligned.
@@ -6601,7 +6624,7 @@
Offset->getType());
Ptr = Builder.CreateAdd(Ptr, Offset);
FieldPtr = CastToType(Instruction::IntToPtr, Ptr,
- Context.getPointerTypeUnqual(FieldTy));
+ PointerType::getUnqual(FieldTy));
}
if (isBitfield(FieldDecl)) {
@@ -6634,14 +6657,14 @@
// sized like an i24 there may be trouble: incrementing a T* will move
// the position by 32 bits not 24, leaving the upper 8 of those 32 bits
// inaccessible. Avoid this by rounding up the size appropriately.
- FieldTy = Context.getIntegerType(TD.getTypeAllocSizeInBits(FieldTy));
+ FieldTy = IntegerType::get(Context, TD.getTypeAllocSizeInBits(FieldTy));
assert(FieldTy->getPrimitiveSizeInBits() ==
TD.getTypeAllocSizeInBits(FieldTy) && "Field type not sequential!");
// If this is a bitfield, the field may span multiple fields in the LLVM
// type. As such, cast the pointer to be a pointer to the declared type.
- FieldPtr = BitCastToType(FieldPtr, Context.getPointerTypeUnqual(FieldTy));
+ FieldPtr = BitCastToType(FieldPtr, PointerType::getUnqual(FieldTy));
unsigned LLVMValueBitSize = FieldTy->getPrimitiveSizeInBits();
// Finally, because bitfields can span LLVM fields, and because the start
@@ -6668,12 +6691,12 @@
unsigned ByteOffset = NumAlignmentUnits*ByteAlignment;
LVAlign = MinAlign(LVAlign, ByteOffset);
- Constant *Offset = Context.getConstantInt(TD.getIntPtrType(), ByteOffset);
+ Constant *Offset = ConstantInt::get(TD.getIntPtrType(Context), ByteOffset);
FieldPtr = CastToType(Instruction::PtrToInt, FieldPtr,
Offset->getType());
FieldPtr = Builder.CreateAdd(FieldPtr, Offset);
FieldPtr = CastToType(Instruction::IntToPtr, FieldPtr,
- Context.getPointerTypeUnqual(FieldTy));
+ PointerType::getUnqual(FieldTy));
// Adjust bitstart to account for the pointer movement.
BitStart -= ByteOffset*8;
@@ -6694,7 +6717,7 @@
} else {
// Make sure we return a pointer to the right type.
const Type *EltTy = ConvertType(TREE_TYPE(exp));
- FieldPtr = BitCastToType(FieldPtr, Context.getPointerTypeUnqual(EltTy));
+ FieldPtr = BitCastToType(FieldPtr, PointerType::getUnqual(EltTy));
}
assert(BitStart == 0 &&
@@ -6735,8 +6758,8 @@
if (Decl == 0) {
if (errorcount || sorrycount) {
const Type *Ty = ConvertType(TREE_TYPE(exp));
- const PointerType *PTy = Context.getPointerTypeUnqual(Ty);
- LValue LV(Context.getConstantPointerNull(PTy), 1);
+ const PointerType *PTy = PointerType::getUnqual(Ty);
+ LValue LV(ConstantPointerNull::get(PTy), 1);
return LV;
}
assert(0 && "INTERNAL ERROR: Referencing decl that hasn't been laid out");
@@ -6772,8 +6795,8 @@
const Type *Ty = ConvertType(TREE_TYPE(exp));
// If we have "extern void foo", make the global have type {} instead of
// type void.
- if (Ty == Type::VoidTy) Ty = Context.getStructType(NULL, NULL);
- const PointerType *PTy = Context.getPointerTypeUnqual(Ty);
+ if (Ty == Type::getVoidTy(Context)) Ty = StructType::get(Context);
+ const PointerType *PTy = PointerType::getUnqual(Ty);
unsigned Alignment = Ty->isSized() ? TD.getABITypeAlignment(Ty) : 1;
if (DECL_ALIGN(exp)) {
if (DECL_USER_ALIGN(exp) || 8 * Alignment < (unsigned)DECL_ALIGN(exp))
@@ -6789,7 +6812,7 @@
unsigned Alignment = TD.getABITypeAlignment(cast<PointerType>(ExceptionValue->
getType())->getElementType());
return LValue(BitCastToType(ExceptionValue,
- Context.getPointerTypeUnqual(ConvertType(TREE_TYPE(exp)))),
+ PointerType::getUnqual(ConvertType(TREE_TYPE(exp)))),
Alignment);
}
@@ -6818,7 +6841,7 @@
LValue LV = EmitLV(Op);
// The type is the type of the expression.
LV.Ptr = BitCastToType(LV.Ptr,
- Context.getPointerTypeUnqual(ConvertType(TREE_TYPE(exp))));
+ PointerType::getUnqual(ConvertType(TREE_TYPE(exp))));
return LV;
} else {
// If the input is a scalar, emit to a temporary.
@@ -6826,7 +6849,7 @@
StoreInst *S = Builder.CreateStore(Emit(Op, 0), Dest);
// The type is the type of the expression.
Dest = BitCastToType(Dest,
- Context.getPointerTypeUnqual(ConvertType(TREE_TYPE(exp))));
+ PointerType::getUnqual(ConvertType(TREE_TYPE(exp))));
return LValue(Dest, 1);
}
}
@@ -6867,7 +6890,7 @@
std::vector<Value *> BuildVecOps;
// Insert zero initializers for any uninitialized values.
- Constant *Zero = Context.getNullValue(PTy->getElementType());
+ Constant *Zero = Constant::getNullValue(PTy->getElementType());
BuildVecOps.resize(cast<VectorType>(Ty)->getNumElements(), Zero);
// Insert all of the elements here.
@@ -6921,7 +6944,7 @@
// Scalar value. Evaluate to a register, then do the store.
Value *V = Emit(tree_value, 0);
Value *Ptr = BitCastToType(DestLoc->Ptr,
- Context.getPointerTypeUnqual(V->getType()));
+ PointerType::getUnqual(V->getType()));
StoreInst *St = Builder.CreateStore(V, Ptr, DestLoc->Volatile);
St->setAlignment(DestLoc->getAlignment());
}
@@ -6966,14 +6989,14 @@
assert(HOST_BITS_PER_WIDE_INT == 64 &&
"i128 only supported on 64-bit system");
uint64_t Bits[] = { TREE_INT_CST_LOW(exp), TREE_INT_CST_HIGH(exp) };
- return Context.getConstantInt(APInt(128, 2, Bits));
+ return ConstantInt::get(Context, APInt(128, 2, Bits));
}
}
// Build the value as a ulong constant, then constant fold it to the right
// type. This handles overflow and other things appropriately.
uint64_t IntValue = getINTEGER_CSTVal(exp);
- ConstantInt *C = Context.getConstantInt(Type::Int64Ty, IntValue);
+ ConstantInt *C = ConstantInt::get(Type::getInt64Ty(Context), IntValue);
// The destination type can be a pointer, integer or floating point
// so we need a generalized cast here
Instruction::CastOps opcode = CastInst::getCastOpcode(C, false, Ty,
@@ -6989,7 +7012,7 @@
int UArr[2];
double V;
};
- if (Ty==Type::FloatTy || Ty==Type::DoubleTy) {
+ if (Ty==Type::getFloatTy(Context) || Ty==Type::getDoubleTy(Context)) {
REAL_VALUE_TO_TARGET_DOUBLE(TREE_REAL_CST(exp), RealArr);
// Here's how this works:
@@ -7006,7 +7029,7 @@
// do not match. FLOAT_WORDS_BIG_ENDIAN describes the target endianness.
// The host's used to be available in HOST_WORDS_BIG_ENDIAN, but the gcc
// maintainers removed this in a fit of cleanliness between 4.0
- // and 4.2. For now, host and target endianness must match.
+ // and 4.2. llvm::sys has a substitute.
UArr[0] = RealArr[0]; // Long -> int convert
UArr[1] = RealArr[1];
@@ -7015,16 +7038,17 @@
std::swap(UArr[0], UArr[1]);
return
- Context.getConstantFP(Ty==Type::FloatTy ? APFloat((float)V) : APFloat(V));
- } else if (Ty==Type::X86_FP80Ty) {
+ ConstantFP::get(Context, Ty==Type::getFloatTy(Context) ?
+ APFloat((float)V) : APFloat(V));
+ } else if (Ty==Type::getX86_FP80Ty(Context)) {
long RealArr[4];
uint64_t UArr[2];
REAL_VALUE_TO_TARGET_LONG_DOUBLE(TREE_REAL_CST(exp), RealArr);
UArr[0] = ((uint64_t)((uint32_t)RealArr[0])) |
((uint64_t)((uint32_t)RealArr[1]) << 32);
UArr[1] = (uint16_t)RealArr[2];
- return Context.getConstantFP(APFloat(APInt(80, 2, UArr)));
- } else if (Ty==Type::PPC_FP128Ty) {
+ return ConstantFP::get(Context, APFloat(APInt(80, 2, UArr)));
+ } else if (Ty==Type::getPPC_FP128Ty(Context)) {
long RealArr[4];
uint64_t UArr[2];
REAL_VALUE_TO_TARGET_LONG_DOUBLE(TREE_REAL_CST(exp), RealArr);
@@ -7033,7 +7057,7 @@
((uint64_t)((uint32_t)RealArr[1]));
UArr[1] = ((uint64_t)((uint32_t)RealArr[2]) << 32) |
((uint64_t)((uint32_t)RealArr[3]));
- return Context.getConstantFP(APFloat(APInt(128, 2, UArr)));
+ return ConstantFP::get(Context, APFloat(APInt(128, 2, UArr)));
}
assert(0 && "Floating point type not handled yet");
return 0; // outwit compiler warning
@@ -7041,7 +7065,7 @@
Constant *TreeConstantToLLVM::ConvertVECTOR_CST(tree exp) {
if (!TREE_VECTOR_CST_ELTS(exp))
- return Context.getNullValue(ConvertType(TREE_TYPE(exp)));
+ return Constant::getNullValue(ConvertType(TREE_TYPE(exp)));
std::vector<Constant*> Elts;
for (tree elt = TREE_VECTOR_CST_ELTS(exp); elt; elt = TREE_CHAIN(elt))
@@ -7050,12 +7074,12 @@
// The vector should be zero filled if insufficient elements are provided.
if (Elts.size() < TYPE_VECTOR_SUBPARTS(TREE_TYPE(exp))) {
tree EltType = TREE_TYPE(TREE_TYPE(exp));
- Constant *Zero = Context.getNullValue(ConvertType(EltType));
+ Constant *Zero = Constant::getNullValue(ConvertType(EltType));
while (Elts.size() < TYPE_VECTOR_SUBPARTS(TREE_TYPE(exp)))
Elts.push_back(Zero);
}
- return Context.getConstantVector(Elts);
+ return ConstantVector::get(Elts);
}
Constant *TreeConstantToLLVM::ConvertSTRING_CST(tree exp) {
@@ -7065,23 +7089,37 @@
unsigned Len = (unsigned)TREE_STRING_LENGTH(exp);
std::vector<Constant*> Elts;
- if (ElTy == Type::Int8Ty) {
+ if (ElTy == Type::getInt8Ty(Context)) {
const unsigned char *InStr =(const unsigned char *)TREE_STRING_POINTER(exp);
for (unsigned i = 0; i != Len; ++i)
- Elts.push_back(Context.getConstantInt(Type::Int8Ty, InStr[i]));
- } else if (ElTy == Type::Int16Ty) {
+ Elts.push_back(ConstantInt::get(Type::getInt8Ty(Context), InStr[i]));
+ } else if (ElTy == Type::getInt16Ty(Context)) {
assert((Len&1) == 0 &&
"Length in bytes should be a multiple of element size");
- const unsigned short *InStr =
+ const uint16_t *InStr =
(const unsigned short *)TREE_STRING_POINTER(exp);
- for (unsigned i = 0; i != Len/2; ++i)
- Elts.push_back(Context.getConstantInt(Type::Int16Ty, InStr[i]));
- } else if (ElTy == Type::Int32Ty) {
+ for (unsigned i = 0; i != Len/2; ++i) {
+ // gcc has constructed the initializer elements in the target endianness,
+ // but we're going to treat them as ordinary shorts from here, with
+ // host endianness. Adjust if necessary.
+ if (llvm::sys::isBigEndianHost() == BYTES_BIG_ENDIAN)
+ Elts.push_back(ConstantInt::get(Type::getInt16Ty(Context), InStr[i]));
+ else
+ Elts.push_back(ConstantInt::get(Type::getInt16Ty(Context), ByteSwap_16(InStr[i])));
+ }
+ } else if (ElTy == Type::getInt32Ty(Context)) {
assert((Len&3) == 0 &&
"Length in bytes should be a multiple of element size");
- const unsigned *InStr = (const unsigned *)TREE_STRING_POINTER(exp);
- for (unsigned i = 0; i != Len/4; ++i)
- Elts.push_back(Context.getConstantInt(Type::Int32Ty, InStr[i]));
+ const uint32_t *InStr = (const unsigned *)TREE_STRING_POINTER(exp);
+ for (unsigned i = 0; i != Len/4; ++i) {
+ // gcc has constructed the initializer elements in the target endianness,
+ // but we're going to treat them as ordinary ints from here, with
+ // host endianness. Adjust if necessary.
+ if (llvm::sys::isBigEndianHost() == BYTES_BIG_ENDIAN)
+ Elts.push_back(ConstantInt::get(Type::getInt32Ty(Context), InStr[i]));
+ else
+ Elts.push_back(ConstantInt::get(Type::getInt32Ty(Context), ByteSwap_32(InStr[i])));
+ }
} else {
assert(0 && "Unknown character type!");
}
@@ -7096,7 +7134,7 @@
tree Domain = TYPE_DOMAIN(TREE_TYPE(exp));
if (!Domain || !TYPE_MAX_VALUE(Domain)) {
ConstantSize = LenInElts;
- StrTy = Context.getArrayType(ElTy, LenInElts);
+ StrTy = ArrayType::get(ElTy, LenInElts);
}
}
@@ -7105,19 +7143,19 @@
Elts.resize(ConstantSize);
} else {
// Fill the end of the string with nulls.
- Constant *C = Context.getNullValue(ElTy);
+ Constant *C = Constant::getNullValue(ElTy);
for (; LenInElts != ConstantSize; ++LenInElts)
Elts.push_back(C);
}
}
- return Context.getConstantArray(StrTy, Elts);
+ return ConstantArray::get(StrTy, Elts);
}
Constant *TreeConstantToLLVM::ConvertCOMPLEX_CST(tree exp) {
std::vector<Constant*> Elts;
Elts.push_back(Convert(TREE_REALPART(exp)));
Elts.push_back(Convert(TREE_IMAGPART(exp)));
- return Context.getConstantStruct(Elts, false);
+ return ConstantStruct::get(Context, Elts, false);
}
Constant *TreeConstantToLLVM::ConvertNOP_EXPR(tree exp) {
@@ -7153,7 +7191,7 @@
bool RHSIsSigned = !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp,1)));
Instruction::CastOps opcode;
if (isa<PointerType>(LHS->getType())) {
- const Type *IntPtrTy = getTargetData().getIntPtrType();
+ const Type *IntPtrTy = getTargetData().getIntPtrType(Context);
opcode = CastInst::getCastOpcode(LHS, LHSIsSigned, IntPtrTy, false);
LHS = TheFolder->CreateCast(opcode, LHS, IntPtrTy);
opcode = CastInst::getCastOpcode(RHS, RHSIsSigned, IntPtrTy, false);
@@ -7179,7 +7217,7 @@
// when array is filled during program initialization.
if (CONSTRUCTOR_ELTS(exp) == 0 ||
VEC_length(constructor_elt, CONSTRUCTOR_ELTS(exp)) == 0) // All zeros?
- return Context.getNullValue(ConvertType(TREE_TYPE(exp)));
+ return Constant::getNullValue(ConvertType(TREE_TYPE(exp)));
switch (TREE_CODE(TREE_TYPE(exp))) {
default:
@@ -7274,7 +7312,7 @@
// Zero length array.
if (ResultElts.empty())
- return Context.getConstantArray(
+ return ConstantArray::get(
cast<ArrayType>(ConvertType(TREE_TYPE(exp))), ResultElts);
assert(SomeVal && "If we had some initializer, we should have some value!");
@@ -7286,7 +7324,7 @@
// of an array. This can occur in cases where we have an array of
// unions, and the various unions had different pieces init'd.
const Type *ElTy = SomeVal->getType();
- Constant *Filler = Context.getNullValue(ElTy);
+ Constant *Filler = Constant::getNullValue(ElTy);
bool AllEltsSameType = true;
for (unsigned i = 0, e = ResultElts.size(); i != e; ++i) {
if (ResultElts[i] == 0)
@@ -7297,13 +7335,13 @@
if (TREE_CODE(InitType) == VECTOR_TYPE) {
assert(AllEltsSameType && "Vector of heterogeneous element types?");
- return Context.getConstantVector(ResultElts);
+ return ConstantVector::get(ResultElts);
}
if (AllEltsSameType)
- return Context.getConstantArray(
- Context.getArrayType(ElTy, ResultElts.size()), ResultElts);
- return Context.getConstantStruct(ResultElts, false);
+ return ConstantArray::get(
+ ArrayType::get(ElTy, ResultElts.size()), ResultElts);
+ return ConstantStruct::get(Context, ResultElts, false);
}
@@ -7368,11 +7406,14 @@
}
// Otherwise, there is padding here. Insert explicit zeros.
- const Type *PadTy = Type::Int8Ty;
+ const Type *PadTy = Type::getInt8Ty(Context);
if (AlignedEltOffs-EltOffs != 1)
- PadTy = Context.getArrayType(PadTy, AlignedEltOffs-EltOffs);
+ PadTy = ArrayType::get(PadTy, AlignedEltOffs-EltOffs);
ResultElts.insert(ResultElts.begin()+i,
- Context.getNullValue(PadTy));
+ Constant::getNullValue(PadTy));
+
+ // The padding is now element "i" and just bumped us up to "AlignedEltOffs".
+ EltOffs = AlignedEltOffs;
++e; // One extra element to scan.
}
@@ -7452,11 +7493,11 @@
// Insert enough padding to fully fill in the hole. Insert padding from
// NextFieldByteStart (not LLVMNaturalByteOffset) because the padding will
// not get the same alignment as "Val".
- const Type *FillTy = Type::Int8Ty;
+ const Type *FillTy = Type::getInt8Ty(Context);
if (GCCFieldOffsetInBits/8-NextFieldByteStart != 1)
- FillTy = Context.getArrayType(FillTy,
+ FillTy = ArrayType::get(FillTy,
GCCFieldOffsetInBits/8-NextFieldByteStart);
- ResultElts.push_back(Context.getNullValue(FillTy));
+ ResultElts.push_back(Constant::getNullValue(FillTy));
NextFieldByteStart = GCCFieldOffsetInBits/8;
@@ -7483,7 +7524,7 @@
// been an anonymous bitfield or other thing that shoved it over. No matter,
// just insert some i8 padding until there are bits to fill in.
while (GCCFieldOffsetInBits > NextFieldByteStart*8) {
- ResultElts.push_back(Context.getConstantInt(Type::Int8Ty, 0));
+ ResultElts.push_back(ConstantInt::get(Type::getInt8Ty(Context), 0));
++NextFieldByteStart;
}
@@ -7506,7 +7547,7 @@
if (GCCFieldOffsetInBits < NextFieldByteStart*8) {
unsigned ValBitSize = ValC->getBitWidth();
assert(!ResultElts.empty() && "Bitfield starts before first element?");
- assert(ResultElts.back()->getType() == Type::Int8Ty &&
+ assert(ResultElts.back()->getType() == Type::getInt8Ty(Context) &&
isa<ConstantInt>(ResultElts.back()) &&
"Merging bitfield with non-bitfield value?");
assert(NextFieldByteStart*8 - GCCFieldOffsetInBits < 8 &&
@@ -7532,7 +7573,7 @@
APInt Tmp = ValC->getValue();
Tmp = Tmp.lshr(BitsInPreviousField);
Tmp = Tmp.trunc(ValBitSize-BitsInPreviousField);
- ValC = Context.getConstantInt(Tmp);
+ ValC = ConstantInt::get(Context, Tmp);
} else {
// Big endian, take bits from the top of the field value.
ValForPrevField = ValForPrevField.lshr(ValBitSize-BitsInPreviousField);
@@ -7540,7 +7581,7 @@
APInt Tmp = ValC->getValue();
Tmp = Tmp.trunc(ValBitSize-BitsInPreviousField);
- ValC = Context.getConstantInt(Tmp);
+ ValC = ConstantInt::get(Context, Tmp);
}
// Okay, we're going to insert ValForPrevField into the previous i8, extend
@@ -7557,7 +7598,7 @@
// "or" in the previous value and install it.
const APInt &LastElt = cast<ConstantInt>(ResultElts.back())->getValue();
- ResultElts.back() = Context.getConstantInt(ValForPrevField | LastElt);
+ ResultElts.back() = ConstantInt::get(Context, ValForPrevField | LastElt);
// If the whole bit-field fit into the previous field, we're done.
if (ValC == 0) return;
@@ -7575,7 +7616,7 @@
// Little endian lays out low bits first.
APInt Tmp = Val;
Tmp.trunc(8);
- ValToAppend = Context.getConstantInt(Tmp);
+ ValToAppend = ConstantInt::get(Context, Tmp);
Val = Val.lshr(8);
} else {
@@ -7583,17 +7624,17 @@
APInt Tmp = Val;
Tmp = Tmp.lshr(Tmp.getBitWidth()-8);
Tmp.trunc(8);
- ValToAppend = Context.getConstantInt(Tmp);
+ ValToAppend = ConstantInt::get(Context, Tmp);
}
} else if (Val.getBitWidth() == 8) {
- ValToAppend = Context.getConstantInt(Val);
+ ValToAppend = ConstantInt::get(Context, Val);
} else {
APInt Tmp = Val;
Tmp.zext(8);
if (BYTES_BIG_ENDIAN)
Tmp = Tmp << 8-Val.getBitWidth();
- ValToAppend = Context.getConstantInt(Tmp);
+ ValToAppend = ConstantInt::get(Context, Tmp);
}
ResultElts.push_back(ValToAppend);
@@ -7639,10 +7680,10 @@
// If the LLVM Size is too small, add some tail padding to fill it in.
if (LLVMNaturalSize < GCCStructSize) {
- const Type *FillTy = Type::Int8Ty;
+ const Type *FillTy = Type::getInt8Ty(Context);
if (GCCStructSize - NextFieldByteStart != 1)
- FillTy = Context.getArrayType(FillTy, GCCStructSize - NextFieldByteStart);
- ResultElts.push_back(Context.getNullValue(FillTy));
+ FillTy = ArrayType::get(FillTy, GCCStructSize - NextFieldByteStart);
+ ResultElts.push_back(Constant::getNullValue(FillTy));
NextFieldByteStart = GCCStructSize;
// At this point, we know that our struct should have the right size.
@@ -7695,19 +7736,31 @@
if (!isBitfield(Field))
LayoutInfo.AddFieldToRecordConstant(Val, GCCFieldOffsetInBits);
else {
- assert(isa<ConstantInt>(Val) && "Can only init bitfield with constant");
+ // Bitfields can only be initialized with constants (integer constant
+ // expressions).
+ ConstantInt *ValC = cast<ConstantInt>(Val);
uint64_t FieldSizeInBits = getInt64(DECL_SIZE(Field), true);
uint64_t ValueSizeInBits = Val->getType()->getPrimitiveSizeInBits();
+
+ // G++ has various bugs handling {} initializers where it doesn't
+ // synthesize a zero node of the right type. Instead of figuring out G++,
+ // just hack around it by special casing zero and allowing it to be the
+ // wrong size.
+ if (ValueSizeInBits < FieldSizeInBits && ValC->isZero()) {
+ APInt ValAsInt = ValC->getValue();
+ ValC = ConstantInt::get(Context, ValAsInt.zext(FieldSizeInBits));
+ ValueSizeInBits = FieldSizeInBits;
+ }
+
assert(ValueSizeInBits >= FieldSizeInBits &&
"disagreement between LLVM and GCC on bitfield size");
if (ValueSizeInBits != FieldSizeInBits) {
// Fields are allowed to be smaller than their type. Simply discard
// the unwanted upper bits in the field value.
- APInt ValAsInt = cast<ConstantInt>(Val)->getValue();
- Val = Context.getConstantInt(ValAsInt.trunc(FieldSizeInBits));
+ APInt ValAsInt = ValC->getValue();
+ ValC = ConstantInt::get(Context, ValAsInt.trunc(FieldSizeInBits));
}
- LayoutInfo.AddBitFieldToRecordConstant(cast<ConstantInt>(Val),
- GCCFieldOffsetInBits);
+ LayoutInfo.AddBitFieldToRecordConstant(ValC, GCCFieldOffsetInBits);
}
}
@@ -7720,8 +7773,8 @@
LayoutInfo.HandleTailPadding(getInt64(StructTypeSizeTree, true));
// Okay, we're done, return the computed elements.
- return
- Context.getConstantStruct(LayoutInfo.ResultElts, LayoutInfo.StructIsPacked);
+ return ConstantStruct::get(Context, LayoutInfo.ResultElts,
+ LayoutInfo.StructIsPacked);
}
Constant *TreeConstantToLLVM::ConvertUnionCONSTRUCTOR(tree exp) {
@@ -7747,13 +7800,13 @@
const Type *FillTy;
assert(UnionSize > InitSize && "Init shouldn't be larger than union!");
if (UnionSize - InitSize == 1)
- FillTy = Type::Int8Ty;
+ FillTy = Type::getInt8Ty(Context);
else
- FillTy = Context.getArrayType(Type::Int8Ty, UnionSize - InitSize);
- Elts.push_back(Context.getNullValue(FillTy));
+ FillTy = ArrayType::get(Type::getInt8Ty(Context), UnionSize - InitSize);
+ Elts.push_back(Constant::getNullValue(FillTy));
}
}
- return Context.getConstantStruct(Elts, false);
+ return ConstantStruct::get(Context, Elts, false);
}
//===----------------------------------------------------------------------===//
@@ -7847,7 +7900,7 @@
// itself (allowed in GCC but not in LLVM) then the global is changed to have
// the type of the initializer. Correct for this now.
const Type *Ty = ConvertType(TREE_TYPE(exp));
- if (Ty == Type::VoidTy) Ty = Type::Int8Ty; // void* -> i8*.
+ if (Ty == Type::getVoidTy(Context)) Ty = Type::getInt8Ty(Context); // void* -> i8*.
return TheFolder->CreateBitCast(Val, Ty->getPointerTo());
}
@@ -7868,7 +7921,7 @@
BasicBlock *BB = getLabelDeclBlock(exp);
Constant *C = TheTreeToLLVM->getIndirectGotoBlockNumber(BB);
return
- TheFolder->CreateIntToPtr(C, Context.getPointerTypeUnqual(Type::Int8Ty));
+ TheFolder->CreateIntToPtr(C, PointerType::getUnqual(Type::getInt8Ty(Context)));
}
Constant *TreeConstantToLLVM::EmitLV_COMPLEX_CST(tree exp) {
@@ -7906,13 +7959,6 @@
GV->setAlignment(TYPE_ALIGN(TREE_TYPE(exp)) / 8);
if (SlotP) *SlotP = GV;
-#ifdef LLVM_CSTRING_SECTION
- // For Darwin, try to put it into the .cstring section.
- const TargetAsmInfo *TAI = TheTarget->getTargetAsmInfo();
- if (TAI && TAI->SectionKindForGlobal(GV) == SectionKind::RODataMergeStr)
- // The Darwin linker will coalesce strings in this section.
- GV->setSection(LLVM_CSTRING_SECTION);
-#endif // LLVM_CSTRING_SECTION
return GV;
}
@@ -7945,14 +7991,14 @@
Constant *IndexVal = Convert(Index);
- const Type *IntPtrTy = getTargetData().getIntPtrType();
+ const Type *IntPtrTy = getTargetData().getIntPtrType(Context);
if (IndexVal->getType() != IntPtrTy)
IndexVal = TheFolder->CreateIntCast(IndexVal, IntPtrTy,
!TYPE_UNSIGNED(IndexType));
std::vector<Value*> Idx;
if (TREE_CODE(ArrayType) == ARRAY_TYPE)
- Idx.push_back(Context.getConstantInt(IntPtrTy, 0));
+ Idx.push_back(ConstantInt::get(IntPtrTy, 0));
Idx.push_back(IndexVal);
return TheFolder->CreateGetElementPtr(ArrayAddr, &Idx[0], Idx.size());
@@ -7968,7 +8014,7 @@
tree FieldDecl = TREE_OPERAND(exp, 1);
StructAddrLV = TheFolder->CreateBitCast(StructAddrLV,
- Context.getPointerTypeUnqual(StructTy));
+ PointerType::getUnqual(StructTy));
const Type *FieldTy = ConvertType(getDeclaredType(FieldDecl));
// BitStart - This is the actual offset of the field from the start of the
@@ -7985,8 +8031,8 @@
Constant *Ops[] = {
StructAddrLV,
- Context.getNullValue(Type::Int32Ty),
- Context.getConstantInt(Type::Int32Ty, MemberIndex)
+ Constant::getNullValue(Type::getInt32Ty(Context)),
+ ConstantInt::get(Type::getInt32Ty(Context), MemberIndex)
};
FieldPtr = TheFolder->CreateGetElementPtr(StructAddrLV, Ops+1, 2);
@@ -8006,13 +8052,13 @@
Constant *Ptr = TheFolder->CreatePtrToInt(StructAddrLV, Offset->getType());
Ptr = TheFolder->CreateAdd(Ptr, Offset);
FieldPtr = TheFolder->CreateIntToPtr(Ptr,
- Context.getPointerTypeUnqual(FieldTy));
+ PointerType::getUnqual(FieldTy));
}
// Make sure we return a result of the right type.
- if (Context.getPointerTypeUnqual(FieldTy) != FieldPtr->getType())
+ if (PointerType::getUnqual(FieldTy) != FieldPtr->getType())
FieldPtr = TheFolder->CreateBitCast(FieldPtr,
- Context.getPointerTypeUnqual(FieldTy));
+ PointerType::getUnqual(FieldTy));
assert(BitStart == 0 &&
"It's a bitfield reference or we didn't get to the field!");
Modified: gcc-plugin/trunk/llvm-debug.cpp
URL: http://llvm.org/viewvc/llvm-project/gcc-plugin/trunk/llvm-debug.cpp?rev=79452&r1=79451&r2=79452&view=diff
==============================================================================
--- gcc-plugin/trunk/llvm-debug.cpp (original)
+++ gcc-plugin/trunk/llvm-debug.cpp Wed Aug 19 14:49:08 2009
@@ -200,7 +200,7 @@
// Use llvm value name as linkage name if it is available.
if (DECL_LLVM_SET_P(Node)) {
Value *V = DECL_LLVM(Node);
- return V->getNameStart();
+ return V->getName().data();
}
tree decl_name = DECL_NAME(Node);
@@ -500,12 +500,13 @@
// FIXME - handle dynamic ranges
tree MinValue = TYPE_MIN_VALUE(Domain);
tree MaxValue = TYPE_MAX_VALUE(Domain);
- if (MinValue && MaxValue &&
- isInt64(MinValue, 0) && isInt64(MaxValue, 0)) {
- uint64_t Low = getINTEGER_CSTVal(MinValue);
- uint64_t Hi = getINTEGER_CSTVal(MaxValue);
- Subscripts.push_back(DebugFactory.GetOrCreateSubrange(Low, Hi));
- }
+ uint64_t Low = 0;
+ uint64_t Hi = 0;
+ if (MinValue && isInt64(MinValue, 0))
+ Low = getINTEGER_CSTVal(MinValue);
+ if (MaxValue && isInt64(MaxValue, 0))
+ Hi = getINTEGER_CSTVal(MaxValue);
+ Subscripts.push_back(DebugFactory.GetOrCreateSubrange(Low, Hi));
}
EltTy = TREE_TYPE(atype);
}
@@ -589,7 +590,7 @@
// recursive) and replace all uses of the forward declaration with the
// final definition.
expanded_location Loc = GetNodeLocation(TREE_CHAIN(type), false);
- llvm::DIType FwdDecl =
+ llvm::DICompositeType FwdDecl =
DebugFactory.CreateCompositeType(Tag,
findRegion(type),
GetNodeName(type),
@@ -698,7 +699,7 @@
llvm::DIArray Elements =
DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
- llvm::DIType RealDecl =
+ llvm::DICompositeType RealDecl =
DebugFactory.CreateCompositeType(Tag, findRegion(type),
GetNodeName(type),
getOrCreateCompileUnit(Loc.file),
@@ -709,8 +710,7 @@
// Now that we have a real decl for the struct, replace anything using the
// old decl with the new one. This will recursively update the debug info.
- FwdDecl.getGV()->replaceAllUsesWith(RealDecl.getGV());
- FwdDecl.getGV()->eraseFromParent();
+ FwdDecl.replaceAllUsesWith(RealDecl);
return RealDecl;
}
Modified: gcc-plugin/trunk/llvm-internal.h
URL: http://llvm.org/viewvc/llvm-project/gcc-plugin/trunk/llvm-internal.h?rev=79452&r1=79451&r2=79452&view=diff
==============================================================================
--- gcc-plugin/trunk/llvm-internal.h (original)
+++ gcc-plugin/trunk/llvm-internal.h Wed Aug 19 14:49:08 2009
@@ -28,6 +28,7 @@
#define LLVM_INTERNAL_H
// LLVM headers
+#include "llvm/CallingConv.h"
#include "llvm/Intrinsics.h"
#include "llvm/ADT/IndexedMap.h"
#include "llvm/ADT/SmallVector.h"
@@ -151,7 +152,7 @@
const FunctionType *ConvertFunctionType(tree_node *type,
tree_node *decl,
tree_node *static_chain,
- unsigned &CallingConv,
+ CallingConv::ID &CallingConv,
AttrListPtr &PAL);
/// ConvertArgListToFnType - Given a DECL_ARGUMENTS list on an GCC tree,
@@ -160,7 +161,7 @@
const FunctionType *ConvertArgListToFnType(tree_node *type,
tree_node *arglist,
tree_node *static_chain,
- unsigned &CallingConv,
+ CallingConv::ID &CallingConv,
AttrListPtr &PAL);
private:
@@ -445,6 +446,12 @@
Value *EmitMemMove(Value *DestPtr, Value *SrcPtr, Value *Size, unsigned Align);
Value *EmitMemSet(Value *DestPtr, Value *SrcVal, Value *Size, unsigned Align);
+ /// EmitSjLjDispatcher - Emit SJLJ EH dispatcher
+ void EmitSjLjDispatcher();
+
+ /// EmitSjLjLandingPads - Emit SJLJ EH landing pads.
+ void EmitSjLjLandingPads();
+
/// EmitLandingPads - Emit EH landing pads.
void EmitLandingPads();
Modified: gcc-plugin/trunk/llvm-types.cpp
URL: http://llvm.org/viewvc/llvm-project/gcc-plugin/trunk/llvm-types.cpp?rev=79452&r1=79451&r2=79452&view=diff
==============================================================================
--- gcc-plugin/trunk/llvm-types.cpp (original)
+++ gcc-plugin/trunk/llvm-types.cpp Wed Aug 19 14:49:08 2009
@@ -169,11 +169,11 @@
//TODO }
//TODO
//TODO const std::string &TypeName = TypeNameMap[*I];
-//TODO LTypesNames.push_back(Context.getConstantArray(TypeName, false));
+//TODO LTypesNames.push_back(ConstantArray::get(Context, TypeName, false));
//TODO }
//TODO
//TODO // Create string table.
-//TODO Constant *LTypesNameTable = Context.getConstantStruct(LTypesNames, false);
+//TODO Constant *LTypesNameTable = ConstantStruct::get(Context, LTypesNames, false);
//TODO
//TODO // Create variable to hold this string table.
//TODO GlobalVariable *GV = new GlobalVariable(*TheModule,
@@ -204,7 +204,7 @@
for (unsigned i = 0, e = ArgTys.size(); i != e; ++i)
ArgTysP.push_back(ArgTys[i]);
- return Context.getFunctionType(Res, ArgTysP, isVarArg);
+ return FunctionType::get(Res, ArgTysP, isVarArg);
}
//===----------------------------------------------------------------------===//
@@ -672,7 +672,7 @@
//===----------------------------------------------------------------------===//
const Type *TypeConverter::ConvertType(tree orig_type) {
- if (orig_type == error_mark_node) return Type::Int32Ty;
+ if (orig_type == error_mark_node) return Type::getInt32Ty(Context);
// LLVM doesn't care about variants such as const, volatile, or restrict.
tree type = TYPE_MAIN_VARIANT(orig_type);
@@ -682,7 +682,7 @@
fprintf(stderr, "Unknown type to convert:\n");
debug_tree(type);
abort();
- case VOID_TYPE: return SET_TYPE_LLVM(type, Type::VoidTy);
+ case VOID_TYPE: return SET_TYPE_LLVM(type, Type::getVoidTy(Context));
case RECORD_TYPE: return ConvertRECORD(type, orig_type);
case QUAL_UNION_TYPE:
case UNION_TYPE: return ConvertUNION(type, orig_type);
@@ -690,7 +690,7 @@
if (const Type *Ty = GET_TYPE_LLVM(type))
return Ty;
return SET_TYPE_LLVM(type,
- Context.getIntegerType(TREE_INT_CST_LOW(TYPE_SIZE(type))));
+ IntegerType::get(Context, TREE_INT_CST_LOW(TYPE_SIZE(type))));
}
case ENUMERAL_TYPE:
// Use of an enum that is implicitly declared?
@@ -699,15 +699,19 @@
if (const Type *Ty = GET_TYPE_LLVM(orig_type))
return Ty;
- const Type *Ty = Context.getOpaqueType();
+ const Type *Ty = OpaqueType::get(Context);
TheModule->addTypeName(GetTypeName("enum.", orig_type), Ty);
return TypeDB.setType(orig_type, Ty);
}
// FALL THROUGH.
type = orig_type;
- case INTEGER_TYPE:
+ case INTEGER_TYPE: {
if (const Type *Ty = GET_TYPE_LLVM(type)) return Ty;
- return SET_TYPE_LLVM(type, Context.getIntegerType(TYPE_PRECISION(type)));
+ // The ARM port defines __builtin_neon_xi as a 511-bit type because GCC's
+ // type precision field has only 9 bits. Treat this as a special case.
+ int precision = TYPE_PRECISION(type) == 511 ? 512 : TYPE_PRECISION(type);
+ return SET_TYPE_LLVM(type, IntegerType::get(Context, precision));
+ }
case REAL_TYPE:
if (const Type *Ty = GET_TYPE_LLVM(type)) return Ty;
switch (TYPE_PRECISION(type)) {
@@ -715,20 +719,20 @@
fprintf(stderr, "Unknown FP type!\n");
debug_tree(type);
abort();
- case 32: return SET_TYPE_LLVM(type, Type::FloatTy);
- case 64: return SET_TYPE_LLVM(type, Type::DoubleTy);
- case 80: return SET_TYPE_LLVM(type, Type::X86_FP80Ty);
+ case 32: return SET_TYPE_LLVM(type, Type::getFloatTy(Context));
+ case 64: return SET_TYPE_LLVM(type, Type::getDoubleTy(Context));
+ case 80: return SET_TYPE_LLVM(type, Type::getX86_FP80Ty(Context));
case 128:
#ifdef TARGET_POWERPC
- return SET_TYPE_LLVM(type, Type::PPC_FP128Ty);
+ return SET_TYPE_LLVM(type, Type::getPPC_FP128Ty(Context));
#elif 0
// This is for IEEE double extended, e.g. Sparc
- return SET_TYPE_LLVM(type, Type::FP128Ty);
+ return SET_TYPE_LLVM(type, Type::getFP128Ty(Context));
#else
// 128-bit long doubles map onto { double, double }.
return SET_TYPE_LLVM(type,
- Context.getStructType(Type::DoubleTy, Type::DoubleTy,
- NULL));
+ StructType::get(Context, Type::getDoubleTy(Context),
+ Type::getDoubleTy(Context), NULL));
#endif
}
@@ -736,13 +740,13 @@
if (const Type *Ty = GET_TYPE_LLVM(type)) return Ty;
const Type *Ty = ConvertType(TREE_TYPE(type));
assert(!Ty->isAbstract() && "should use TypeDB.setType()");
- return SET_TYPE_LLVM(type, Context.getStructType(Ty, Ty, NULL));
+ return SET_TYPE_LLVM(type, StructType::get(Context, Ty, Ty, NULL));
}
case VECTOR_TYPE: {
if (const Type *Ty = GET_TYPE_LLVM(type)) return Ty;
const Type *Ty = ConvertType(TREE_TYPE(type));
assert(!Ty->isAbstract() && "should use TypeDB.setType()");
- Ty = Context.getVectorType(Ty, TYPE_VECTOR_SUBPARTS(type));
+ Ty = VectorType::get(Ty, TYPE_VECTOR_SUBPARTS(type));
return SET_TYPE_LLVM(type, Ty);
}
@@ -779,8 +783,8 @@
// Restore ConvertingStruct for the caller.
ConvertingStruct = false;
- if (Actual->getTypeID() == Type::VoidTyID)
- Actual = Type::Int8Ty; // void* -> sbyte*
+ if (Actual == Type::getVoidTy(Context))
+ Actual = Type::getInt8Ty(Context); // void* -> sbyte*
// Update the type, potentially updating TYPE_LLVM(type).
const OpaqueType *OT = cast<OpaqueType>(Ty->getElementType());
@@ -798,7 +802,7 @@
if (Ty == 0) {
PointersToReresolve.push_back(type);
return TypeDB.setType(type,
- Context.getPointerTypeUnqual(Context.getOpaqueType()));
+ PointerType::getUnqual(OpaqueType::get(Context)));
}
// A type has already been computed. However, this may be some sort of
@@ -814,9 +818,9 @@
Ty = ConvertType(TREE_TYPE(type));
}
- if (Ty->getTypeID() == Type::VoidTyID)
- Ty = Type::Int8Ty; // void* -> sbyte*
- return TypeDB.setType(type, Context.getPointerTypeUnqual(Ty));
+ if (Ty == Type::getVoidTy(Context))
+ Ty = Type::getInt8Ty(Context); // void* -> sbyte*
+ return TypeDB.setType(type, PointerType::getUnqual(Ty));
}
case METHOD_TYPE:
@@ -825,7 +829,7 @@
return Ty;
// No declaration to pass through, passing NULL.
- unsigned CallingConv;
+ CallingConv::ID CallingConv;
AttrListPtr PAL;
return TypeDB.setType(type, ConvertFunctionType(type, NULL, NULL,
CallingConv, PAL));
@@ -847,7 +851,7 @@
// that the gcc array type has constant size, using an i8 for the element
// type ensures we can produce an LLVM array of the right size.
ElementSize = 8;
- ElementTy = Type::Int8Ty;
+ ElementTy = Type::getInt8Ty(Context);
}
uint64_t NumElements;
@@ -878,7 +882,7 @@
NumElements /= ElementSize;
}
- return TypeDB.setType(type, Context.getArrayType(ElementTy, NumElements));
+ return TypeDB.setType(type, ArrayType::get(ElementTy, NumElements));
}
case OFFSET_TYPE:
// Handle OFFSET_TYPE specially. This is used for pointers to members,
@@ -886,8 +890,8 @@
// integer directly.
switch (getTargetData().getPointerSize()) {
default: assert(0 && "Unknown pointer size!");
- case 4: return Type::Int32Ty;
- case 8: return Type::Int64Ty;
+ case 4: return Type::getInt32Ty(Context);
+ case 8: return Type::getInt64Ty(Context);
}
}
}
@@ -900,18 +904,21 @@
class FunctionTypeConversion : public DefaultABIClient {
PATypeHolder &RetTy;
std::vector<PATypeHolder> &ArgTypes;
- unsigned &CallingConv;
+ CallingConv::ID &CallingConv;
bool isShadowRet;
bool KNRPromotion;
unsigned Offset;
public:
FunctionTypeConversion(PATypeHolder &retty, std::vector<PATypeHolder> &AT,
- unsigned &CC, bool KNR)
+ CallingConv::ID &CC, bool KNR)
: RetTy(retty), ArgTypes(AT), CallingConv(CC), KNRPromotion(KNR), Offset(0) {
CallingConv = CallingConv::C;
isShadowRet = false;
}
+ /// getCallingConv - This provides the desired CallingConv for the function.
+ CallingConv::ID& getCallingConv(void) { return CallingConv; }
+
bool isShadowReturn() const { return isShadowRet; }
/// HandleScalarResult - This callback is invoked if the function returns a
@@ -938,7 +945,7 @@
void HandleShadowResult(const PointerType *PtrArgTy, bool RetPtr) {
// This function either returns void or the shadow argument,
// depending on the target.
- RetTy = RetPtr ? PtrArgTy : Type::VoidTy;
+ RetTy = RetPtr ? PtrArgTy : Type::getVoidTy(Context);
// In any case, there is a dummy shadow argument though!
ArgTypes.push_back(PtrArgTy);
@@ -969,9 +976,9 @@
if (KNRPromotion) {
if (type == float_type_node)
LLVMTy = ConvertType(double_type_node);
- else if (LLVMTy == Type::Int16Ty || LLVMTy == Type::Int8Ty ||
- LLVMTy == Type::Int1Ty)
- LLVMTy = Type::Int32Ty;
+ else if (LLVMTy == Type::getInt16Ty(Context) || LLVMTy == Type::getInt8Ty(Context) ||
+ LLVMTy == Type::getInt1Ty(Context))
+ LLVMTy = Type::getInt32Ty(Context);
}
ArgTypes.push_back(LLVMTy);
}
@@ -986,7 +993,7 @@
/// argument is passed by value. It is lowered to a parameter passed by
/// reference with an additional parameter attribute "ByVal".
void HandleByValArgument(const llvm::Type *LLVMTy, tree type) {
- HandleScalarArgument(Context.getPointerTypeUnqual(LLVMTy), type);
+ HandleScalarArgument(PointerType::getUnqual(LLVMTy), type);
}
/// HandleFCAArgument - This callback is invoked if the aggregate function
@@ -1021,10 +1028,10 @@
/// specified result type for the function.
const FunctionType *TypeConverter::
ConvertArgListToFnType(tree type, tree Args, tree static_chain,
- unsigned &CallingConv, AttrListPtr &PAL) {
+ CallingConv::ID &CallingConv, AttrListPtr &PAL) {
tree ReturnType = TREE_TYPE(type);
std::vector<PATypeHolder> ArgTys;
- PATypeHolder RetTy(Type::VoidTy);
+ PATypeHolder RetTy(Type::getVoidTy(Context));
FunctionTypeConversion Client(RetTy, ArgTys, CallingConv, true /*K&R*/);
TheLLVMABI<FunctionTypeConversion> ABIConverter(Client);
@@ -1085,8 +1092,8 @@
const FunctionType *TypeConverter::
ConvertFunctionType(tree type, tree decl, tree static_chain,
- unsigned &CallingConv, AttrListPtr &PAL) {
- PATypeHolder RetTy = Type::VoidTy;
+ CallingConv::ID &CallingConv, AttrListPtr &PAL) {
+ PATypeHolder RetTy = Type::getVoidTy(Context);
std::vector<PATypeHolder> ArgTypes;
bool isVarArg = false;
FunctionTypeConversion Client(RetTy, ArgTypes, CallingConv, false/*not K&R*/);
@@ -1331,7 +1338,7 @@
const Type *getLLVMType() const {
// Use Packed type if Packed is set or all struct fields are bitfields.
// Empty struct is not packed unless packed is set.
- return Context.getStructType(Elements,
+ return StructType::get(Context, Elements,
Packed || (!Elements.empty() && AllBitFields));
}
@@ -1370,13 +1377,13 @@
const Type *LastType = Elements.back();
unsigned PadBytes = 0;
- if (LastType == Type::Int8Ty)
+ if (LastType == Type::getInt8Ty(Context))
PadBytes = 1 - NoOfBytesToRemove;
- else if (LastType == Type::Int16Ty)
+ else if (LastType == Type::getInt16Ty(Context))
PadBytes = 2 - NoOfBytesToRemove;
- else if (LastType == Type::Int32Ty)
+ else if (LastType == Type::getInt32Ty(Context))
PadBytes = 4 - NoOfBytesToRemove;
- else if (LastType == Type::Int64Ty)
+ else if (LastType == Type::getInt64Ty(Context))
PadBytes = 8 - NoOfBytesToRemove;
else
return;
@@ -1384,7 +1391,7 @@
assert (PadBytes > 0 && "Unable to remove extra bytes");
// Update last element type and size, element offset is unchanged.
- const Type *Pad = Context.getArrayType(Type::Int8Ty, PadBytes);
+ const Type *Pad = ArrayType::get(Type::getInt8Ty(Context), PadBytes);
unsigned OriginalSize = ElementSizeInBytes.back();
Elements.pop_back();
Elements.push_back(Pad);
@@ -1419,9 +1426,9 @@
// field we just popped. Otherwise we might end up with a
// gcc non-bitfield being mapped to an LLVM field with a
// different offset.
- const Type *Pad = Type::Int8Ty;
+ const Type *Pad = Type::getInt8Ty(Context);
if (PoppedOffset != EndOffset + 1)
- Pad = Context.getArrayType(Pad, PoppedOffset - EndOffset);
+ Pad = ArrayType::get(Pad, PoppedOffset - EndOffset);
addElement(Pad, EndOffset, PoppedOffset - EndOffset);
}
}
@@ -1442,7 +1449,7 @@
// padding.
if (NextByteOffset < ByteOffset) {
uint64_t CurOffset = getNewElementByteOffset(1);
- const Type *Pad = Type::Int8Ty;
+ const Type *Pad = Type::getInt8Ty(Context);
if (SavedTy && LastFieldStartsAtNonByteBoundry)
// We want to reuse SavedType to access this bit field.
// e.g. struct __attribute__((packed)) {
@@ -1453,7 +1460,7 @@
// In this example, previous field is C and D is current field.
addElement(SavedTy, CurOffset, ByteOffset - CurOffset);
else if (ByteOffset - CurOffset != 1)
- Pad = Context.getArrayType(Pad, ByteOffset - CurOffset);
+ Pad = ArrayType::get(Pad, ByteOffset - CurOffset);
addElement(Pad, CurOffset, ByteOffset - CurOffset);
}
return true;
@@ -1579,21 +1586,21 @@
// additional bits required after FirstunallocatedByte to cover new field.
const Type *NewFieldTy;
if (Size <= 8)
- NewFieldTy = Type::Int8Ty;
+ NewFieldTy = Type::getInt8Ty(Context);
else if (Size <= 16)
- NewFieldTy = Type::Int16Ty;
+ NewFieldTy = Type::getInt16Ty(Context);
else if (Size <= 32)
- NewFieldTy = Type::Int32Ty;
+ NewFieldTy = Type::getInt32Ty(Context);
else {
assert(Size <= 64 && "Bitfield too large!");
- NewFieldTy = Type::Int64Ty;
+ NewFieldTy = Type::getInt64Ty(Context);
}
// Check that the alignment of NewFieldTy won't cause a gap in the structure!
unsigned ByteAlignment = getTypeAlignment(NewFieldTy);
if (FirstUnallocatedByte & (ByteAlignment-1)) {
// Instead of inserting a nice whole field, insert a small array of ubytes.
- NewFieldTy = Context.getArrayType(Type::Int8Ty, (Size+7)/8);
+ NewFieldTy = ArrayType::get(Type::getInt8Ty(Context), (Size+7)/8);
}
// Finally, add the new field.
@@ -1824,9 +1831,9 @@
PadBytes = StartOffsetInBits/8-FirstUnallocatedByte;
if (PadBytes) {
- const Type *Pad = Type::Int8Ty;
+ const Type *Pad = Type::getInt8Ty(Context);
if (PadBytes != 1)
- Pad = Context.getArrayType(Pad, PadBytes);
+ Pad = ArrayType::get(Pad, PadBytes);
Info.addElement(Pad, FirstUnallocatedByte, PadBytes);
}
@@ -1878,7 +1885,7 @@
}
if (TYPE_SIZE(type) == 0) { // Forward declaration?
- const Type *Ty = Context.getOpaqueType();
+ const Type *Ty = OpaqueType::get(Context);
TheModule->addTypeName(GetTypeName("struct.", orig_type), Ty);
return TypeDB.setType(type, Ty);
}
@@ -1928,20 +1935,20 @@
// If only one byte is needed then insert i8.
if (GCCTypeSize-LLVMLastElementEnd == 1)
- Info->addElement(Type::Int8Ty, 1, 1);
+ Info->addElement(Type::getInt8Ty(Context), 1, 1);
else {
if (((GCCTypeSize-LLVMStructSize) % 4) == 0 &&
(Info->getAlignmentAsLLVMStruct() %
- Info->getTypeAlignment(Type::Int32Ty)) == 0) {
+ Info->getTypeAlignment(Type::getInt32Ty(Context))) == 0) {
// insert array of i32
unsigned Int32ArraySize = (GCCTypeSize-LLVMStructSize)/4;
const Type *PadTy =
- Context.getArrayType(Type::Int32Ty, Int32ArraySize);
+ ArrayType::get(Type::getInt32Ty(Context), Int32ArraySize);
Info->addElement(PadTy, GCCTypeSize - LLVMLastElementEnd,
Int32ArraySize, true /* Padding Element */);
} else {
const Type *PadTy =
- Context.getArrayType(Type::Int8Ty, GCCTypeSize-LLVMStructSize);
+ ArrayType::get(Type::getInt8Ty(Context), GCCTypeSize-LLVMStructSize);
Info->addElement(PadTy, GCCTypeSize - LLVMLastElementEnd,
GCCTypeSize - LLVMLastElementEnd,
true /* Padding Element */);
@@ -2046,7 +2053,7 @@
}
if (TYPE_SIZE(type) == 0) { // Forward declaraion?
- const Type *Ty = Context.getOpaqueType();
+ const Type *Ty = OpaqueType::get(Context);
TheModule->addTypeName(GetTypeName("union.", orig_type), Ty);
return TypeDB.setType(type, Ty);
}
@@ -2172,15 +2179,15 @@
if (EltSize != GCCTypeSize) {
assert(EltSize < GCCTypeSize &&
"LLVM type size doesn't match GCC type size!");
- const Type *PadTy = Type::Int8Ty;
+ const Type *PadTy = Type::getInt8Ty(Context);
if (GCCTypeSize-EltSize != 1)
- PadTy = Context.getArrayType(PadTy, GCCTypeSize-EltSize);
+ PadTy = ArrayType::get(PadTy, GCCTypeSize-EltSize);
UnionElts.push_back(PadTy);
}
}
bool isPacked = 8 * EltAlign > TYPE_ALIGN(type);
- const Type *ResultTy = Context.getStructType(UnionElts, isPacked);
+ const Type *ResultTy = StructType::get(Context, UnionElts, isPacked);
const OpaqueType *OldTy = cast_or_null<OpaqueType>(GET_TYPE_LLVM(type));
TypeDB.setType(type, ResultTy);
More information about the llvm-commits
mailing list