[dragonegg] r176016 - Format with clang-format.
Duncan Sands
baldrick at free.fr
Mon Feb 25 02:54:26 PST 2013
Author: baldrick
Date: Mon Feb 25 04:54:25 2013
New Revision: 176016
URL: http://llvm.org/viewvc/llvm-project?rev=176016&view=rev
Log:
Format with clang-format.
Modified:
dragonegg/trunk/include/arm/dragonegg/Target.h
dragonegg/trunk/include/dragonegg/ABI.h
dragonegg/trunk/include/dragonegg/ADT/IntervalList.h
dragonegg/trunk/include/dragonegg/Debug.h
dragonegg/trunk/include/dragonegg/Internals.h
dragonegg/trunk/include/dragonegg/TypeConversion.h
dragonegg/trunk/include/x86/dragonegg/Target.h
dragonegg/trunk/src/Aliasing.cpp
dragonegg/trunk/src/Backend.cpp
dragonegg/trunk/src/Cache.cpp
dragonegg/trunk/src/ConstantConversion.cpp
dragonegg/trunk/src/Convert.cpp
dragonegg/trunk/src/Debug.cpp
dragonegg/trunk/src/DefaultABI.cpp
dragonegg/trunk/src/Trees.cpp
dragonegg/trunk/src/TypeConversion.cpp
dragonegg/trunk/src/arm/Target.cpp
dragonegg/trunk/src/x86/Target.cpp
Modified: dragonegg/trunk/include/arm/dragonegg/Target.h
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/include/arm/dragonegg/Target.h?rev=176016&r1=176015&r2=176016&view=diff
==============================================================================
--- dragonegg/trunk/include/arm/dragonegg/Target.h (original)
+++ dragonegg/trunk/include/arm/dragonegg/Target.h Mon Feb 25 04:54:25 2013
@@ -70,8 +70,8 @@ extern bool llvm_arm_aggregate_partially
#define LLVM_AGGREGATE_PARTIALLY_PASSED_IN_REGS(E, SE, ISR, CC) \
llvm_arm_aggregate_partially_passed_in_regs((E), (SE), (CC))
-extern Type *llvm_arm_aggr_type_for_struct_return(tree_node *type,
- CallingConv::ID CC);
+extern Type *
+llvm_arm_aggr_type_for_struct_return(tree_node *type, CallingConv::ID CC);
/* LLVM_AGGR_TYPE_FOR_STRUCT_RETURN - Return LLVM Type if X can be
returned as an aggregate, otherwise return NULL. */
@@ -263,21 +263,35 @@ extern bool llvm_arm_should_pass_or_retu
* with function notes.
*/
#define LLVM_OVERRIDE_TARGET_ARCH() \
- (TARGET_THUMB ? \
- (arm_arch7 ? "thumbv7" : \
- (arm_arch_thumb2 ? "thumbv6t2" : \
- (arm_tune == cortexm0 ? "thumbv6m" : \
- (arm_arch6 ? "thumbv6" : \
- (arm_arch5e ? "thumbv5e" : \
- (arm_arch5 ? "thumbv5" : \
- (arm_arch4t ? "thumbv4t" : ""))))))) : \
- (arm_arch7 ? "armv7" : \
- (arm_arch_thumb2 ? "armv6t2" : \
- (arm_arch6 ? "armv6" : \
- (arm_arch5e ? "armv5e" : \
- (arm_arch5 ? "armv5" : \
- (arm_arch4t ? "armv4t" : \
- (arm_arch4 ? "armv4" : ""))))))))
+ (TARGET_THUMB \
+ ? (arm_arch7 \
+ ? "thumbv7" \
+ : (arm_arch_thumb2 \
+ ? "thumbv6t2" \
+ : (arm_tune == cortexm0 \
+ ? "thumbv6m" \
+ : (arm_arch6 \
+ ? "thumbv6" \
+ : (arm_arch5e \
+ ? "thumbv5e" \
+ : (arm_arch5 \
+ ? "thumbv5" \
+ : (arm_arch4t ? "thumbv4t" \
+ : ""))))))) \
+ : (arm_arch7 \
+ ? "armv7" \
+ : (arm_arch_thumb2 \
+ ? "armv6t2" \
+ : (arm_arch6 \
+ ? "armv6" \
+ : (arm_arch5e \
+ ? "armv5e" \
+ : (arm_arch5 \
+ ? "armv5" \
+ : (arm_arch4t \
+ ? "armv4t" \
+ : (arm_arch4 ? "armv4" \
+ : ""))))))))
#if 0
// Dragonegg should make flag_mkernel and flag_apple_kext option later on.
@@ -322,9 +336,10 @@ extern bool llvm_arm_should_pass_or_retu
so use the incoming register name if it exists. Otherwise, use the default
register names to match the backend. */
#define LLVM_GET_REG_NAME(REG_NAME, REG_NUM) \
- ((REG_NUM) == 10 ? "r10" : (REG_NUM) == 11 ? "r11" : (REG_NUM) == 12 ? \
- "r12" : (REG_NUM) >= FIRST_VFP_REGNUM && REG_NAME != 0 ? REG_NAME : \
- reg_names[REG_NUM])
+ ((REG_NUM) == 10 ? "r10" : (REG_NUM) == \
+ 11 ? "r11" : (REG_NUM) == \
+ 12 ? "r12" : (REG_NUM) >= FIRST_VFP_REGNUM && \
+ REG_NAME != 0 ? REG_NAME : reg_names[REG_NUM])
/* Define a static enumeration of the NEON builtins to be used when
converting to LLVM intrinsics. These names are derived from the
Modified: dragonegg/trunk/include/dragonegg/ABI.h
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/include/dragonegg/ABI.h?rev=176016&r1=176015&r2=176016&view=diff
==============================================================================
--- dragonegg/trunk/include/dragonegg/ABI.h (original)
+++ dragonegg/trunk/include/dragonegg/ABI.h Mon Feb 25 04:54:25 2013
@@ -143,8 +143,8 @@ extern bool isZeroSizedStructOrUnion(tre
// getLLVMScalarTypeForStructReturn - Return LLVM Type if TY can be
// returned as a scalar, otherwise return NULL. This is the default
// target independent implementation.
-inline Type *getLLVMScalarTypeForStructReturn(tree_node *type,
- unsigned *Offset) {
+inline Type *
+getLLVMScalarTypeForStructReturn(tree_node *type, unsigned *Offset) {
Type *Ty = ConvertType(type);
uint64_t Size = getDataLayout().getTypeAllocSize(Ty);
*Offset = 0;
Modified: dragonegg/trunk/include/dragonegg/ADT/IntervalList.h
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/include/dragonegg/ADT/IntervalList.h?rev=176016&r1=176015&r2=176016&view=diff
==============================================================================
--- dragonegg/trunk/include/dragonegg/ADT/IntervalList.h (original)
+++ dragonegg/trunk/include/dragonegg/ADT/IntervalList.h Mon Feb 25 04:54:25 2013
@@ -61,8 +61,8 @@ template <class T, typename U, unsigned
for (unsigned i = 0, e = (unsigned) Intervals.size(); i < e; ++i) {
if (Intervals[i].getRange().empty())
return false;
- if (i && Intervals[i].getRange().getFirst() <
- Intervals[i - 1].getRange().getLast())
+ if (i && Intervals[i].getRange().getFirst() < Intervals[i - 1]
+ .getRange().getLast())
return false;
}
return true;
@@ -106,10 +106,10 @@ void IntervalList<T, U, N>::AddInterval(
}
// Check for overlap with existing intervals.
- iterator Lo = std::lower_bound(Intervals.begin(), Intervals.end(), Interval,
- CmpFirst);
- iterator Hi = std::upper_bound(Intervals.begin(), Intervals.end(), Interval,
- CmpLast);
+ iterator Lo =
+ std::lower_bound(Intervals.begin(), Intervals.end(), Interval, CmpFirst);
+ iterator Hi =
+ std::upper_bound(Intervals.begin(), Intervals.end(), Interval, CmpLast);
if (Lo < Hi) {
// Intervals with index in [Lo, Hi) are those completely covered by the new
// interval. Throw them away.
Modified: dragonegg/trunk/include/dragonegg/Debug.h
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/include/dragonegg/Debug.h?rev=176016&r1=176015&r2=176016&view=diff
==============================================================================
--- dragonegg/trunk/include/dragonegg/Debug.h (original)
+++ dragonegg/trunk/include/dragonegg/Debug.h Mon Feb 25 04:54:25 2013
@@ -80,10 +80,10 @@ public:
/// CreateCompileUnit - Create a new descriptor for the specified compile
/// unit.
- void CreateCompileUnit(
- unsigned LangID, StringRef Filename, StringRef Directory,
- StringRef Producer, bool isMain = false, bool isOptimized = false,
- StringRef Flags = "", unsigned RunTimeVer = 0);
+ void CreateCompileUnit(unsigned LangID, StringRef Filename,
+ StringRef Directory, StringRef Producer,
+ bool isMain = false, bool isOptimized = false,
+ StringRef Flags = "", unsigned RunTimeVer = 0);
/// CreateFile - Create a new descriptor for the specified file.
DIFile CreateFile(StringRef Filename, StringRef Directory);
@@ -115,7 +115,7 @@ public:
unsigned Tag, DIDescriptor Context, StringRef Name, DIFile F,
unsigned LineNumber, uint64_t SizeInBits, uint64_t AlignInBits,
uint64_t OffsetInBits, unsigned Flags, DIType DerivedFrom,
- DIArray Elements, unsigned RunTimeLang = 0, MDNode * ContainingType = 0);
+ DIArray Elements, unsigned RunTimeLang = 0, MDNode *ContainingType = 0);
/// CreateTemporaryType - Create a temporary forward-declared type.
DIType CreateTemporaryType();
@@ -131,12 +131,12 @@ public:
StringRef LinkageName, DIFile F, unsigned LineNo, DIType Ty,
bool isLocalToUnit, bool isDefinition, unsigned VK = 0,
unsigned VIndex = 0, DIType ContainingType = DIType(), unsigned Flags = 0,
- bool isOptimized = false, Function * Fn = 0);
+ bool isOptimized = false, Function *Fn = 0);
/// CreateSubprogramDefinition - Create new subprogram descriptor for the
/// given declaration.
- DISubprogram CreateSubprogramDefinition(DISubprogram& SPDeclaration,
- unsigned LineNo, Function* Fn);
+ DISubprogram CreateSubprogramDefinition(DISubprogram &SPDeclaration,
+ unsigned LineNo, Function *Fn);
/// CreateGlobalVariable - Create a new descriptor for the specified global.
DIGlobalVariable CreateGlobalVariable(
@@ -180,12 +180,12 @@ public:
MDNode *OrigLoc = 0);
/// InsertDeclare - Insert a new llvm.dbg.declare intrinsic call.
- Instruction *InsertDeclare(llvm::Value *Storage, DIVariable D,
- BasicBlock *InsertAtEnd);
+ Instruction *
+ InsertDeclare(llvm::Value *Storage, DIVariable D, BasicBlock *InsertAtEnd);
/// InsertDeclare - Insert a new llvm.dbg.declare intrinsic call.
- Instruction *InsertDeclare(llvm::Value *Storage, DIVariable D,
- Instruction *InsertBefore);
+ Instruction *
+ InsertDeclare(llvm::Value *Storage, DIVariable D, Instruction *InsertBefore);
/// InsertDbgValueIntrinsic - Insert a new llvm.dbg.value intrinsic call.
Instruction *InsertDbgValueIntrinsic(llvm::Value *V, uint64_t Offset,
Modified: dragonegg/trunk/include/dragonegg/Internals.h
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/include/dragonegg/Internals.h?rev=176016&r1=176015&r2=176016&view=diff
==============================================================================
--- dragonegg/trunk/include/dragonegg/Internals.h (original)
+++ dragonegg/trunk/include/dragonegg/Internals.h Mon Feb 25 04:54:25 2013
@@ -146,8 +146,8 @@ bool isPaddingElement(tree_node *, unsig
/// that has no value specified. For example in C like languages such variables
/// are initialized to zero, while in Ada they hold an undefined value.
inline Constant *getDefaultValue(Type *Ty) {
- return flag_default_initialize_globals ? Constant::getNullValue(Ty) :
- UndefValue::get(Ty);
+ return flag_default_initialize_globals ? Constant::getNullValue(Ty)
+ : UndefValue::get(Ty);
}
/// isPassedByInvisibleReference - Return true if the specified type should be
@@ -323,10 +323,10 @@ public:
/// CastToAnyType - Cast the specified value to the specified type regardless
/// of the types involved. This is an inferred cast.
- Value *CastToAnyType(Value *Src, bool SrcIsSigned, Type *DstTy,
- bool DstIsSigned);
- Constant *CastToAnyType(Constant *Src, bool SrcIsSigned, Type *DstTy,
- bool DstIsSigned);
+ Value *
+ CastToAnyType(Value *Src, bool SrcIsSigned, Type *DstTy, bool DstIsSigned);
+ Constant *
+ CastToAnyType(Constant *Src, bool SrcIsSigned, Type *DstTy, bool DstIsSigned);
/// CastFromSameSizeInteger - Cast an integer (or vector of integer) value to
/// the given scalar (resp. vector of scalar) type of the same bitwidth.
@@ -420,8 +420,8 @@ private : // Helper functions.
/// llvm.memset call with the specified operands. Returns DestPtr bitcast
/// to i8*.
Value *EmitMemCpy(Value *DestPtr, Value *SrcPtr, Value *Size, unsigned Align);
- Value *EmitMemMove(Value *DestPtr, Value *SrcPtr, Value *Size,
- unsigned Align);
+ Value *
+ EmitMemMove(Value *DestPtr, Value *SrcPtr, Value *Size, unsigned Align);
Value *EmitMemSet(Value *DestPtr, Value *SrcVal, Value *Size, unsigned Align);
/// EmitLandingPads - Emit EH landing pads.
@@ -568,14 +568,14 @@ private:
#endif
Value *EmitReg_VEC_PACK_FIX_TRUNC_EXPR(tree_node *type, tree_node *op0,
tree_node *op1);
- Value *EmitReg_VEC_PACK_TRUNC_EXPR(tree_node *type, tree_node *op0,
- tree_node *op1);
+ Value *
+ EmitReg_VEC_PACK_TRUNC_EXPR(tree_node *type, tree_node *op0, tree_node *op1);
Value *EmitReg_VEC_WIDEN_MULT_HI_EXPR(tree_node *type, tree_node *op0,
tree_node *op1);
Value *EmitReg_VEC_WIDEN_MULT_LO_EXPR(tree_node *type, tree_node *op0,
tree_node *op1);
- Value *EmitReg_WIDEN_MULT_EXPR(tree_node *type, tree_node *op0,
- tree_node *op1);
+ Value *
+ EmitReg_WIDEN_MULT_EXPR(tree_node *type, tree_node *op0, tree_node *op1);
// Ternary expressions.
Value *EmitReg_CondExpr(tree_node *op0, tree_node *op1, tree_node *op2);
@@ -592,8 +592,7 @@ private:
Value *EmitCallOf(Value *Callee, gimple_statement_d *stmt,
const MemRef *DestLoc, const AttributeSet &PAL);
CallInst *EmitSimpleCall(StringRef CalleeName, tree_node *ret_type,
- /* arguments */ ...)
- END_WITH_NULL;
+ /* arguments */ ...) END_WITH_NULL;
Value *EmitFieldAnnotation(Value *FieldPtr, tree_node *FieldDecl);
// Inline Assembly and Register Variables.
@@ -606,8 +605,8 @@ private:
Value *BuildVectorShuffle(Value *InVec1, Value *InVec2, ...);
Value *BuildBinaryAtomic(gimple_statement_d *stmt, AtomicRMWInst::BinOp Kind,
unsigned PostOp = 0);
- Value *BuildCmpAndSwapAtomic(gimple_statement_d *stmt, unsigned Bits,
- bool isBool);
+ Value *
+ BuildCmpAndSwapAtomic(gimple_statement_d *stmt, unsigned Bits, bool isBool);
// Builtin Function Expansion.
bool EmitBuiltinCall(gimple_statement_d *stmt, tree_node *fndecl,
@@ -616,8 +615,8 @@ private:
tree_node *fndecl, const MemRef *DestLoc,
Value *&Result);
bool EmitBuiltinUnaryOp(Value *InVal, Value *&Result, Intrinsic::ID Id);
- Value *EmitBuiltinBitCountIntrinsic(gimple_statement_d *stmt,
- Intrinsic::ID Id);
+ Value *
+ EmitBuiltinBitCountIntrinsic(gimple_statement_d *stmt, Intrinsic::ID Id);
Value *EmitBuiltinSQRT(gimple_statement_d *stmt);
Value *EmitBuiltinPOWI(gimple_statement_d *stmt);
Value *EmitBuiltinPOW(gimple_statement_d *stmt);
Modified: dragonegg/trunk/include/dragonegg/TypeConversion.h
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/include/dragonegg/TypeConversion.h?rev=176016&r1=176015&r2=176016&view=diff
==============================================================================
--- dragonegg/trunk/include/dragonegg/TypeConversion.h (original)
+++ dragonegg/trunk/include/dragonegg/TypeConversion.h Mon Feb 25 04:54:25 2013
@@ -62,8 +62,8 @@ extern llvm::Type *GetUnitType(llvm::LLV
/// GetUnitPointerType - Returns an LLVM pointer type which points to memory one
/// address unit wide. For example, on a machine which has 16 bit bytes returns
/// an i16*.
-extern llvm::Type *GetUnitPointerType(llvm::LLVMContext &C,
- unsigned AddrSpace = 0);
+extern llvm::Type *
+GetUnitPointerType(llvm::LLVMContext &C, unsigned AddrSpace = 0);
/// isSizeCompatible - Return true if the specified gcc type is guaranteed to be
/// turned by ConvertType into an LLVM type of the same size (i.e. TYPE_SIZE the
@@ -88,9 +88,9 @@ extern llvm::Type *ConvertType(tree_node
/// ConvertFunctionType - Convert the specified FUNCTION_TYPE or METHOD_TYPE
/// tree to an LLVM type. This does the same thing that ConvertType does, but
/// it also returns the function's LLVM calling convention and attributes.
-extern llvm::FunctionType *ConvertFunctionType(
- tree_node *type, tree_node *decl, tree_node *static_chain,
- llvm::CallingConv::ID &CC, llvm::AttributeSet &PAL);
+extern llvm::FunctionType *
+ConvertFunctionType(tree_node *type, tree_node *decl, tree_node *static_chain,
+ llvm::CallingConv::ID &CC, llvm::AttributeSet &PAL);
/// ConvertArgListToFnType - Given a DECL_ARGUMENTS list on an GCC tree,
/// return the LLVM type corresponding to the function. This is useful for
Modified: dragonegg/trunk/include/x86/dragonegg/Target.h
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/include/x86/dragonegg/Target.h?rev=176016&r1=176015&r2=176016&view=diff
==============================================================================
--- dragonegg/trunk/include/x86/dragonegg/Target.h (original)
+++ dragonegg/trunk/include/x86/dragonegg/Target.h Mon Feb 25 04:54:25 2013
@@ -125,8 +125,8 @@ extern bool llvm_x86_should_pass_aggrega
#define LLVM_SHOULD_PASS_AGGREGATE_IN_INTEGER_REGS(X, Y, Z) \
llvm_x86_should_pass_aggregate_in_integer_regs((X), (Y), (Z))
-extern Type *llvm_x86_scalar_type_for_struct_return(tree_node *type,
- unsigned *Offset);
+extern Type *
+llvm_x86_scalar_type_for_struct_return(tree_node *type, unsigned *Offset);
/* LLVM_SCALAR_TYPE_FOR_STRUCT_RETURN - Return LLVM Type if X can be
returned as a scalar, otherwise return NULL. */
@@ -205,9 +205,9 @@ extern bool llvm_x86_32_should_pass_aggr
tree_node *, Type *Ty, std::vector<Type *> &);
#define LLVM_SHOULD_PASS_AGGREGATE_IN_MIXED_REGS(T, TY, CC, E) \
- (TARGET_64BIT ? \
- llvm_x86_64_should_pass_aggregate_in_mixed_regs((T), (TY), (E)) : \
- llvm_x86_32_should_pass_aggregate_in_mixed_regs((T), (TY), (E)))
+ (TARGET_64BIT \
+ ? llvm_x86_64_should_pass_aggregate_in_mixed_regs((T), (TY), (E)) \
+ : llvm_x86_32_should_pass_aggregate_in_mixed_regs((T), (TY), (E)))
extern bool llvm_x86_64_aggregate_partially_passed_in_regs(
std::vector<Type *> &, std::vector<Type *> &, bool);
Modified: dragonegg/trunk/src/Aliasing.cpp
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/src/Aliasing.cpp?rev=176016&r1=176015&r2=176016&view=diff
==============================================================================
--- dragonegg/trunk/src/Aliasing.cpp (original)
+++ dragonegg/trunk/src/Aliasing.cpp Mon Feb 25 04:54:25 2013
@@ -120,7 +120,7 @@ MDNode *describeAliasSet(tree t) {
// If there is a path from this node to any leaf node then it is not a leaf
// node and can be discarded.
- for (unsigned i = 0, e = (unsigned)LeafNodes.size(); i != e; ++i)
+ for (unsigned i = 0, e = (unsigned) LeafNodes.size(); i != e; ++i)
if (alias_set_subset_of(LeafNodes[i], alias_set)) {
NodeTags[alias_set] = 0;
return 0;
@@ -129,7 +129,7 @@ MDNode *describeAliasSet(tree t) {
// If there is a path from any leaf node to this one then no longer consider
// that node to be a leaf.
- for (unsigned i = (unsigned)LeafNodes.size(); i;) {
+ for (unsigned i = (unsigned) LeafNodes.size(); i;) {
alias_set_type leaf_set = LeafNodes[--i];
if (alias_set_subset_of(alias_set, leaf_set)) {
LeafNodes.erase(LeafNodes.begin() + i);
@@ -143,8 +143,8 @@ MDNode *describeAliasSet(tree t) {
// Create metadata describing the new node hanging off root. The name doesn't
// matter much but needs to be unique for the compilation unit.
- tree type = TYPE_CANONICAL(
- TYPE_MAIN_VARIANT(isa<TYPE>(t) ? t : TREE_TYPE(t)));
+ tree type =
+ TYPE_CANONICAL(TYPE_MAIN_VARIANT(isa<TYPE>(t) ? t : TREE_TYPE(t)));
std::string TreeName =
("alias set " + Twine(alias_set) + ": " + getDescriptiveName(type)).str();
MDBuilder MDHelper(Context);
Modified: dragonegg/trunk/src/Backend.cpp
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/src/Backend.cpp?rev=176016&r1=176015&r2=176016&view=diff
==============================================================================
--- dragonegg/trunk/src/Backend.cpp (original)
+++ dragonegg/trunk/src/Backend.cpp Mon Feb 25 04:54:25 2013
@@ -214,8 +214,8 @@ void handleVisibility(tree decl, GlobalV
/// CodeGenOptLevel - The optimization level to be used by the code generators.
static CodeGenOpt::Level CodeGenOptLevel() {
- int OptLevel = LLVMCodeGenOptimizeArg >= 0 ? LLVMCodeGenOptimizeArg :
- optimize;
+ int OptLevel =
+ LLVMCodeGenOptimizeArg >= 0 ? LLVMCodeGenOptimizeArg : optimize;
if (OptLevel <= 0)
return CodeGenOpt::None;
if (OptLevel == 1)
@@ -608,8 +608,8 @@ static void InitializeBackend(void) {
// vectorizer using -fplugin-arg-dragonegg-llvm-option=-vectorize
PassBuilder.Vectorize = PassManagerBuilder().Vectorize;
- PassBuilder.LibraryInfo = new TargetLibraryInfo((Triple)
- TheModule->getTargetTriple());
+ PassBuilder.LibraryInfo =
+ new TargetLibraryInfo((Triple) TheModule->getTargetTriple());
if (flag_no_simplify_libcalls)
PassBuilder.LibraryInfo->disableAllFunctions();
@@ -732,8 +732,8 @@ static void createPerModuleOptimizationP
// FIXME: This is disabled right now until bugs can be worked out. Reenable
// this for fast -O0 compiles!
if (PerModulePasses || 1) {
- FunctionPassManager *PM = CodeGenPasses = new FunctionPassManager(
- TheModule);
+ FunctionPassManager *PM = CodeGenPasses =
+ new FunctionPassManager(TheModule);
PM->add(new DataLayout(*TheTarget->getDataLayout()));
TheTarget->addAnalysisPasses(*PM);
@@ -766,8 +766,8 @@ static void CreateStructorsList(std::vec
LLVMContext &Context = getGlobalContext();
- Type *FPTy = FunctionType::get(Type::getVoidTy(Context),
- std::vector<Type *>(), false);
+ Type *FPTy =
+ FunctionType::get(Type::getVoidTy(Context), std::vector<Type *>(), false);
FPTy = FPTy->getPointerTo();
for (unsigned i = 0, e = Tors.size(); i != e; ++i) {
@@ -778,9 +778,8 @@ static void CreateStructorsList(std::vec
StructInit[1] = TheFolder->CreateBitCast(Tors[i].first, FPTy);
InitList.push_back(ConstantStruct::getAnon(Context, StructInit));
}
- Constant *Array = ConstantArray::get(ArrayType::get(InitList[0]->getType(),
- InitList.size()),
- InitList);
+ Constant *Array = ConstantArray::get(
+ ArrayType::get(InitList[0]->getType(), InitList.size()), InitList);
new GlobalVariable(*TheModule, Array->getType(), false,
GlobalValue::AppendingLinkage, Array, Name);
}
@@ -798,9 +797,9 @@ Constant *ConvertMetadataStringToGV(cons
return Slot;
// Create a new string global.
- GlobalVariable *GV = new GlobalVariable(*TheModule, Init->getType(), true,
- GlobalVariable::PrivateLinkage, Init,
- ".str");
+ GlobalVariable *GV =
+ new GlobalVariable(*TheModule, Init->getType(), true,
+ GlobalVariable::PrivateLinkage, Init, ".str");
GV->setSection("llvm.metadata");
Slot = GV;
return GV;
@@ -818,8 +817,8 @@ void AddAnnotateAttrsToGlobal(GlobalValu
return;
// Get file and line number
- Constant *lineNo = ConstantInt::get(Type::getInt32Ty(Context),
- DECL_SOURCE_LINE(decl));
+ Constant *lineNo =
+ ConstantInt::get(Type::getInt32Ty(Context), DECL_SOURCE_LINE(decl));
Constant *file = ConvertMetadataStringToGV(DECL_SOURCE_FILE(decl));
Type *SBP = Type::getInt8PtrTy(Context);
file = TheFolder->CreateBitCast(file, SBP);
@@ -902,7 +901,8 @@ static void emit_alias(tree decl, tree t
GlobalValue *Aliasee = 0;
if (isa<IDENTIFIER_NODE>(target)) {
- StringRef AliaseeName(IDENTIFIER_POINTER(target), IDENTIFIER_LENGTH(target));
+ StringRef AliaseeName(IDENTIFIER_POINTER(target),
+ IDENTIFIER_LENGTH(target));
if (!lookup_attribute("weakref", DECL_ATTRIBUTES(decl))) {
Aliasee = TheModule->getNamedValue(AliaseeName);
if (!Aliasee)
@@ -916,13 +916,12 @@ static void emit_alias(tree decl, tree t
// weakref to external symbol.
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
Aliasee = new GlobalVariable(
- *TheModule, GV->getType()->getElementType(),
- GV->isConstant(), GlobalVariable::ExternalWeakLinkage,
- NULL, AliaseeName);
+ *TheModule, GV->getType()->getElementType(), GV->isConstant(),
+ GlobalVariable::ExternalWeakLinkage, NULL, AliaseeName);
else if (Function *F = dyn_cast<Function>(V))
Aliasee = Function::Create(F->getFunctionType(),
- Function::ExternalWeakLinkage,
- AliaseeName, TheModule);
+ Function::ExternalWeakLinkage, AliaseeName,
+ TheModule);
else
llvm_unreachable("Unsuported global value");
}
@@ -934,8 +933,8 @@ static void emit_alias(tree decl, tree t
if (Linkage != GlobalValue::InternalLinkage) {
// Create the LLVM alias.
- GlobalAlias *GA = new GlobalAlias(Aliasee->getType(), Linkage, "", Aliasee,
- TheModule);
+ GlobalAlias *GA =
+ new GlobalAlias(Aliasee->getType(), Linkage, "", Aliasee, TheModule);
handleVisibility(decl, GA);
// Associate it with decl instead of V.
@@ -1020,9 +1019,9 @@ static void emit_global(tree decl) {
// global union, and the LLVM type followed a union initializer that is
// different from the union element used for the type.
GV->removeFromParent();
- GlobalVariable *NGV = new GlobalVariable(
- *TheModule, Init->getType(), GV->isConstant(),
- GlobalValue::ExternalLinkage, 0, GV->getName());
+ GlobalVariable *NGV =
+ new GlobalVariable(*TheModule, Init->getType(), GV->isConstant(),
+ GlobalValue::ExternalLinkage, 0, GV->getName());
NGV->setInitializer(Init);
GV->replaceAllUsesWith(TheFolder->CreateBitCast(NGV, GV->getType()));
changeLLVMConstant(GV, NGV);
@@ -1111,8 +1110,8 @@ static void emit_global(tree decl) {
if (DECL_SECTION_NAME(decl)) {
GV->setSection(TREE_STRING_POINTER(DECL_SECTION_NAME(decl)));
#ifdef LLVM_IMPLICIT_TARGET_GLOBAL_VAR_SECTION
- } else if (
- const char *Section = LLVM_IMPLICIT_TARGET_GLOBAL_VAR_SECTION(decl)) {
+ } else if (const char *Section =
+ LLVM_IMPLICIT_TARGET_GLOBAL_VAR_SECTION(decl)) {
GV->setSection(Section);
#endif
}
@@ -1296,10 +1295,10 @@ Value *make_decl_llvm(tree decl) {
if (FnEntry == 0) {
CallingConv::ID CC;
AttributeSet PAL;
- FunctionType *Ty = ConvertFunctionType(TREE_TYPE(decl), decl, NULL, CC,
- PAL);
- FnEntry = Function::Create(Ty, Function::ExternalLinkage, Name,
- TheModule);
+ FunctionType *Ty =
+ ConvertFunctionType(TREE_TYPE(decl), decl, NULL, CC, PAL);
+ FnEntry =
+ Function::Create(Ty, Function::ExternalLinkage, Name, TheModule);
FnEntry->setCallingConv(CC);
FnEntry->setAttributes(PAL);
@@ -1464,8 +1463,8 @@ Value *make_definition_llvm(tree decl) {
/// Fn is a 'void()' ctor/dtor function to be run, initprio is the init
/// priority, and isCtor indicates whether this is a ctor or dtor.
void register_ctor_dtor(Function *Fn, int InitPrio, bool isCtor) {
- (isCtor ? &StaticCtors : &StaticDtors)->push_back(std::make_pair(Fn,
- InitPrio));
+ (isCtor ? &StaticCtors : &StaticDtors)
+ ->push_back(std::make_pair(Fn, InitPrio));
}
/// extractRegisterName - Get a register name given its decl. In 4.2 unlike 4.0
@@ -1666,16 +1665,16 @@ static unsigned int rtl_emit_function(vo
/// pass_rtl_emit_function - RTL pass that converts a function to LLVM IR.
static struct rtl_opt_pass pass_rtl_emit_function = { {
- RTL_PASS, "rtl_emit_function", /* name */
- NULL, /* gate */
- rtl_emit_function, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_NONE, /* tv_id */
+ RTL_PASS, "rtl_emit_function", /* name */
+ NULL, /* gate */
+ rtl_emit_function, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_NONE, /* tv_id */
PROP_ssa | PROP_gimple_leh | PROP_cfg, /* properties_required */
- 0, /* properties_provided */
- PROP_ssa | PROP_trees, /* properties_destroyed */
+ 0, /* properties_provided */
+ PROP_ssa | PROP_trees, /* properties_destroyed */
TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts, /* todo_flags_start */
TODO_ggc_collect /* todo_flags_finish */
} };
@@ -1705,8 +1704,8 @@ static void emit_cgraph_weakrefs() {
for (struct cgraph_node *node = cgraph_nodes; node; node = node->next)
if (node->alias && DECL_EXTERNAL(node->decl) &&
lookup_attribute("weakref", DECL_ATTRIBUTES(node->decl)))
- emit_alias(node->decl, node->thunk.alias ? node->thunk.alias :
- get_alias_symbol(node->decl));
+ emit_alias(node->decl, node->thunk.alias ? node->thunk.alias
+ : get_alias_symbol(node->decl));
}
/// emit_varpool_weakrefs - Output any varpool weak references to external
@@ -1715,8 +1714,8 @@ static void emit_varpool_weakrefs() {
for (struct varpool_node *vnode = varpool_nodes; vnode; vnode = vnode->next)
if (vnode->alias && DECL_EXTERNAL(vnode->decl) &&
lookup_attribute("weakref", DECL_ATTRIBUTES(vnode->decl)))
- emit_alias(vnode->decl, vnode->alias_of ? vnode->alias_of :
- get_alias_symbol(vnode->decl));
+ emit_alias(vnode->decl, vnode->alias_of ? vnode->alias_of
+ : get_alias_symbol(vnode->decl));
}
#endif
@@ -1835,8 +1834,8 @@ static void llvm_finish_unit(void */*gcc
if (!AttributeUsedGlobals.empty()) {
std::vector<Constant *> AUGs;
Type *SBP = Type::getInt8PtrTy(Context);
- for (SmallSetVector<Constant *, 32>::iterator AI = AttributeUsedGlobals
- .begin(), AE = AttributeUsedGlobals.end();
+ for (SmallSetVector<Constant *, 32>::iterator AI =
+ AttributeUsedGlobals.begin(), AE = AttributeUsedGlobals.end();
AI != AE; ++AI) {
Constant *C = *AI;
AUGs.push_back(TheFolder->CreateBitCast(C, SBP));
@@ -1844,9 +1843,9 @@ static void llvm_finish_unit(void */*gcc
ArrayType *AT = ArrayType::get(SBP, AUGs.size());
Constant *Init = ConstantArray::get(AT, AUGs);
- GlobalValue *gv = new GlobalVariable(*TheModule, AT, false,
- GlobalValue::AppendingLinkage, Init,
- "llvm.used");
+ GlobalValue *gv =
+ new GlobalVariable(*TheModule, AT, false, GlobalValue::AppendingLinkage,
+ Init, "llvm.used");
gv->setSection("llvm.metadata");
AttributeUsedGlobals.clear();
}
@@ -1855,8 +1854,8 @@ static void llvm_finish_unit(void */*gcc
std::vector<Constant *> ACUGs;
Type *SBP = Type::getInt8PtrTy(Context);
for (SmallSetVector<Constant *, 32>::iterator AI =
- AttributeCompilerUsedGlobals.begin(), AE =
- AttributeCompilerUsedGlobals.end();
+ AttributeCompilerUsedGlobals
+ .begin(), AE = AttributeCompilerUsedGlobals.end();
AI != AE; ++AI) {
Constant *C = *AI;
ACUGs.push_back(TheFolder->CreateBitCast(C, SBP));
@@ -1864,9 +1863,9 @@ static void llvm_finish_unit(void */*gcc
ArrayType *AT = ArrayType::get(SBP, ACUGs.size());
Constant *Init = ConstantArray::get(AT, ACUGs);
- GlobalValue *gv = new GlobalVariable(*TheModule, AT, false,
- GlobalValue::AppendingLinkage, Init,
- "llvm.compiler.used");
+ GlobalValue *gv =
+ new GlobalVariable(*TheModule, AT, false, GlobalValue::AppendingLinkage,
+ Init, "llvm.compiler.used");
gv->setSection("llvm.metadata");
AttributeCompilerUsedGlobals.clear();
}
@@ -1874,9 +1873,9 @@ static void llvm_finish_unit(void */*gcc
// Add llvm.global.annotations
if (!AttributeAnnotateGlobals.empty()) {
Constant *Array = ConstantArray::get(
- ArrayType::get(AttributeAnnotateGlobals[0]->getType(),
- AttributeAnnotateGlobals.size()),
- AttributeAnnotateGlobals);
+ ArrayType::get(AttributeAnnotateGlobals[0]->getType(),
+ AttributeAnnotateGlobals.size()),
+ AttributeAnnotateGlobals);
GlobalValue *gv = new GlobalVariable(*TheModule, Array->getType(), false,
GlobalValue::AppendingLinkage, Array,
"llvm.global.annotations");
@@ -1931,17 +1930,17 @@ static bool gate_null(void) { return fal
/// pass_gimple_null - Gimple pass that does nothing.
static struct gimple_opt_pass pass_gimple_null = {
{ GIMPLE_PASS, "*gimple_null", /* name */
- gate_null, /* gate */
- NULL, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_NONE, /* tv_id */
- 0, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- 0 /* todo_flags_finish */
+ gate_null, /* gate */
+ NULL, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_NONE, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0 /* todo_flags_finish */
}
};
@@ -1960,78 +1959,78 @@ static bool gate_correct_state(void) { r
/// newly inserted functions are processed before being converted to LLVM IR.
static struct gimple_opt_pass pass_gimple_correct_state = {
{ GIMPLE_PASS, "*gimple_correct_state", /* name */
- gate_correct_state, /* gate */
- execute_correct_state, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_NONE, /* tv_id */
- 0, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- 0 /* todo_flags_finish */
+ gate_correct_state, /* gate */
+ execute_correct_state, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_NONE, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0 /* todo_flags_finish */
}
};
/// pass_ipa_null - IPA pass that does nothing.
static struct ipa_opt_pass_d pass_ipa_null = {
{ IPA_PASS, "*ipa_null", /* name */
- gate_null, /* gate */
- NULL, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_NONE, /* tv_id */
- 0, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- 0 /* todo_flags_finish */
-}, NULL, /* generate_summary */
- NULL, /* write_summary */
- NULL, /* read_summary */
+ gate_null, /* gate */
+ NULL, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_NONE, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0 /* todo_flags_finish */
+}, NULL, /* generate_summary */
+ NULL, /* write_summary */
+ NULL, /* read_summary */
#if (GCC_MINOR > 5)
- NULL, /* write_optimization_summary */
- NULL, /* read_optimization_summary */
+ NULL, /* write_optimization_summary */
+ NULL, /* read_optimization_summary */
#else
- NULL, /* function_read_summary */
+ NULL, /* function_read_summary */
#endif
- NULL, /* stmt_fixup */
- 0, /* function_transform_todo_flags_start */
- NULL, /* function_transform */
- NULL /* variable_transform */
+ NULL, /* stmt_fixup */
+ 0, /* function_transform_todo_flags_start */
+ NULL, /* function_transform */
+ NULL /* variable_transform */
};
/// pass_rtl_null - RTL pass that does nothing.
static struct rtl_opt_pass pass_rtl_null = { { RTL_PASS, "*rtl_null", /* name */
- gate_null, /* gate */
- NULL, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
+ gate_null, /* gate */
+ NULL, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
TV_NONE, /* tv_id */
0, /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- 0 /* todo_flags_finish */
+ 0 /* todo_flags_finish */
} };
/// pass_simple_ipa_null - Simple IPA pass that does nothing.
static struct simple_ipa_opt_pass pass_simple_ipa_null = {
{ SIMPLE_IPA_PASS, "*simple_ipa_null", /* name */
- gate_null, /* gate */
- NULL, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_NONE, /* tv_id */
- 0, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- 0 /* todo_flags_finish */
+ gate_null, /* gate */
+ NULL, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_NONE, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0 /* todo_flags_finish */
}
};
@@ -2055,9 +2054,9 @@ static FlagDescriptor PluginFlags[] = {
/// llvm_plugin_info - Information about this plugin. Users can access this
/// using "gcc --help -v".
static struct plugin_info llvm_plugin_info = {
- LLVM_VERSION, // version
- // TODO provide something useful here
- NULL // help
+ LLVM_VERSION, // version
+ // TODO provide something useful here
+ NULL // help
};
#ifndef DISABLE_VERSION_CHECK
Modified: dragonegg/trunk/src/Cache.cpp
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/src/Cache.cpp?rev=176016&r1=176015&r2=176016&view=diff
==============================================================================
--- dragonegg/trunk/src/Cache.cpp (original)
+++ dragonegg/trunk/src/Cache.cpp Mon Feb 25 04:54:25 2013
@@ -139,7 +139,7 @@ void setCachedInteger(tree t, int Val) {
intCache = htab_create_ggc(1024, tree2int_hash, tree2int_eq, 0);
tree_map_base in = { t };
- tree2int **slot = (tree2int * *) htab_find_slot(intCache, &in, INSERT);
+ tree2int **slot = (tree2int **)htab_find_slot(intCache, &in, INSERT);
assert(slot && "Failed to create hash table slot!");
if (!*slot) {
@@ -176,7 +176,7 @@ void setCachedType(tree t, Type *Ty) {
if (!TypeCache)
TypeCache = htab_create_ggc(1024, tree2Type_hash, tree2Type_eq, 0);
- tree2Type **slot = (tree2Type * *) htab_find_slot(TypeCache, &in, INSERT);
+ tree2Type **slot = (tree2Type **)htab_find_slot(TypeCache, &in, INSERT);
assert(slot && "Failed to create hash table slot!");
if (!*slot) {
@@ -220,11 +220,10 @@ void setCachedValue(tree t, Value *V) {
}
if (!WeakVHCache)
- WeakVHCache = htab_create_ggc(1024, tree2WeakVH_hash, tree2WeakVH_eq,
- DestructWeakVH);
+ WeakVHCache =
+ htab_create_ggc(1024, tree2WeakVH_hash, tree2WeakVH_eq, DestructWeakVH);
- tree2WeakVH **slot = (tree2WeakVH * *)
- htab_find_slot(WeakVHCache, &in, INSERT);
+ tree2WeakVH **slot = (tree2WeakVH **)htab_find_slot(WeakVHCache, &in, INSERT);
assert(slot && "Failed to create hash table slot!");
if (*slot) {
Modified: dragonegg/trunk/src/ConstantConversion.cpp
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/src/ConstantConversion.cpp?rev=176016&r1=176015&r2=176016&view=diff
==============================================================================
--- dragonegg/trunk/src/ConstantConversion.cpp (original)
+++ dragonegg/trunk/src/ConstantConversion.cpp Mon Feb 25 04:54:25 2013
@@ -308,8 +308,8 @@ static BitSlice ViewAsBits(Constant *C,
// nothing to worry about: the bits occupy the range [0, StoreSize). But
// if not then endianness matters: on big-endian machines there are padding
// bits at the start, while on little-endian machines they are at the end.
- return BYTES_BIG_ENDIAN ? BitSlice(StoreSize - BitWidth, StoreSize, C) :
- BitSlice(0, BitWidth, C);
+ return BYTES_BIG_ENDIAN ? BitSlice(StoreSize - BitWidth, StoreSize, C)
+ : BitSlice(0, BitWidth, C);
}
case Type::ArrayTyID: {
@@ -344,8 +344,8 @@ static BitSlice ViewAsBits(Constant *C,
const StructLayout *SL = getDataLayout().getStructLayout(STy);
// Fields with indices in [FirstIdx, LastIdx) overlap the range.
unsigned FirstIdx = SL->getElementContainingOffset(R.getFirst() / 8);
- unsigned LastIdx = 1 +
- SL->getElementContainingOffset((R.getLast() - 1) / 8);
+ unsigned LastIdx =
+ 1 + SL->getElementContainingOffset((R.getLast() - 1) / 8);
// Visit all fields that overlap the requested range, accumulating their
// bits in Bits.
BitSlice Bits;
@@ -404,8 +404,8 @@ static BitSlice ViewAsBits(Constant *C,
/// same constant as you would get by storing the bits of 'C' to memory (with
/// the first bit stored being 'StartingBit') and then loading out a (constant)
/// value of type 'Ty' from the stored to memory location.
-static Constant *InterpretAsType(Constant *C, Type *Ty, int StartingBit,
- TargetFolder &Folder) {
+static Constant *
+InterpretAsType(Constant *C, Type *Ty, int StartingBit, TargetFolder &Folder) {
// Efficient handling for some common cases.
if (C->getType() == Ty)
return C;
@@ -426,16 +426,16 @@ static Constant *InterpretAsType(Constan
// Convert the constant into a bunch of bits. Only the bits to be "loaded"
// out are needed, so rather than converting the entire constant this only
// converts enough to get all of the required bits.
- BitSlice Bits = ViewAsBits(C, SignedRange(StartingBit,
- StartingBit + StoreSize), Folder);
+ BitSlice Bits = ViewAsBits(
+ C, SignedRange(StartingBit, StartingBit + StoreSize), Folder);
// Extract the bits used by the integer. If the integer width is a multiple
// of the address unit then the endianness of the target doesn't matter. If
// not then the padding bits come at the start on big-endian machines and at
// the end on little-endian machines.
Bits = Bits.Displace(-StartingBit);
- return BYTES_BIG_ENDIAN ?
- Bits.getBits(SignedRange(StoreSize - BitWidth, StoreSize), Folder) :
- Bits.getBits(SignedRange(0, BitWidth), Folder);
+ return BYTES_BIG_ENDIAN
+ ? Bits.getBits(SignedRange(StoreSize - BitWidth, StoreSize), Folder)
+ : Bits.getBits(SignedRange(0, BitWidth), Folder);
}
case Type::PointerTyID: {
@@ -478,8 +478,9 @@ static Constant *InterpretAsType(Constan
unsigned NumElts = STy->getNumElements();
std::vector<Constant *> Vals(NumElts);
for (unsigned i = 0; i != NumElts; ++i)
- Vals[i] = InterpretAsType(C, STy->getElementType(i), StartingBit +
- SL->getElementOffsetInBits(i), Folder);
+ Vals[i] =
+ InterpretAsType(C, STy->getElementType(i),
+ StartingBit + SL->getElementOffsetInBits(i), Folder);
return ConstantStruct::get(STy, Vals); // TODO: Use ArrayRef constructor.
}
@@ -556,8 +557,8 @@ static Constant *ExtractRegisterFromCons
unsigned Stride = GET_MODE_BITSIZE(TYPE_MODE(elt_type));
SmallVector<Constant *, 16> Vals(NumElts);
for (unsigned i = 0; i != NumElts; ++i)
- Vals[i] = ExtractRegisterFromConstantImpl(C, elt_type, StartingBit +
- i * Stride, Folder);
+ Vals[i] = ExtractRegisterFromConstantImpl(
+ C, elt_type, StartingBit + i * Stride, Folder);
return ConstantVector::get(Vals);
}
@@ -569,8 +570,8 @@ static Constant *ExtractRegisterFromCons
/// getRegType, and is what you would get by storing the constant to memory and
/// using LoadRegisterFromMemory to load a register value back out starting from
/// byte StartingByte.
-Constant *ExtractRegisterFromConstant(Constant *C, tree type,
- int StartingByte) {
+Constant *
+ExtractRegisterFromConstant(Constant *C, tree type, int StartingByte) {
TargetFolder Folder(&getDataLayout());
return ExtractRegisterFromConstantImpl(C, type, StartingByte, Folder);
}
@@ -590,8 +591,8 @@ static Constant *getAsRegister(tree exp,
/// to the given GCC type) into an in-memory constant. The result has the
/// property that applying ExtractRegisterFromConstant to it gives you the
/// original in-register constant back again.
-static Constant *RepresentAsMemory(Constant *C, tree type,
- TargetFolder &Folder) {
+static Constant *
+RepresentAsMemory(Constant *C, tree type, TargetFolder &Folder) {
// NOTE: Needs to be kept in sync with ExtractRegisterFromConstant.
assert(C->getType() == getRegType(type) && "Constant has wrong type!");
Constant *Result;
@@ -612,8 +613,8 @@ static Constant *RepresentAsMemory(Const
unsigned Size = GET_MODE_BITSIZE(TYPE_MODE(type));
Type *MemTy = IntegerType::get(Context, Size);
bool isSigned = !TYPE_UNSIGNED(type);
- Result = isSigned ? Folder.CreateSExtOrBitCast(C, MemTy) :
- Folder.CreateZExtOrBitCast(C, MemTy);
+ Result = isSigned ? Folder.CreateSExtOrBitCast(C, MemTy)
+ : Folder.CreateZExtOrBitCast(C, MemTy);
break;
}
@@ -679,8 +680,8 @@ static Constant *RepresentAsMemory(Const
/// and pointless type changes in the IR, and for making explicit the implicit
/// scalar casts that GCC allows in "assignments" such as initializing a record
/// field.
-static Constant *ConvertInitializerWithCast(tree exp, tree type,
- TargetFolder &Folder) {
+static Constant *
+ConvertInitializerWithCast(tree exp, tree type, TargetFolder &Folder) {
// Convert the initializer. Note that the type of the returned value may be
// pretty much anything.
Constant *C = ConvertInitializerImpl(exp, Folder);
@@ -709,8 +710,8 @@ static Constant *ConvertInitializerWithC
// Cast to the desired type.
bool SrcIsSigned = !TYPE_UNSIGNED(TREE_TYPE(exp));
bool DestIsSigned = !TYPE_UNSIGNED(type);
- Instruction::CastOps opcode = CastInst::getCastOpcode(C, SrcIsSigned, DestTy,
- DestIsSigned);
+ Instruction::CastOps opcode =
+ CastInst::getCastOpcode(C, SrcIsSigned, DestTy, DestIsSigned);
C = Folder.CreateCast(opcode, C, DestTy);
return RepresentAsMemory(C, type, Folder);
@@ -720,8 +721,8 @@ static Constant *ConvertInitializerWithC
/// moment only INTEGER_CST, REAL_CST, COMPLEX_CST and VECTOR_CST are supported.
static Constant *ConvertCST(tree exp, TargetFolder &) {
const tree type = main_type(exp);
- unsigned SizeInChars = (TREE_INT_CST_LOW(TYPE_SIZE(type)) + CHAR_BIT - 1) /
- CHAR_BIT;
+ unsigned SizeInChars =
+ (TREE_INT_CST_LOW(TYPE_SIZE(type)) + CHAR_BIT - 1) / CHAR_BIT;
// Encode the constant in Buffer in target format.
SmallVector<uint8_t, 16> Buffer(SizeInChars);
unsigned CharsWritten = native_encode_expr(exp, &Buffer[0], SizeInChars);
@@ -742,8 +743,8 @@ static Constant *ConvertSTRING_CST(tree
std::vector<Constant *> Elts;
if (ElTy->isIntegerTy(8)) {
- const unsigned char *InStr = (const unsigned char *)TREE_STRING_POINTER(
- exp);
+ const unsigned char *InStr =
+ (const unsigned char *)TREE_STRING_POINTER(exp);
for (unsigned i = 0; i != Len; ++i)
Elts.push_back(ConstantInt::get(Type::getInt8Ty(Context), InStr[i]));
} else if (ElTy->isIntegerTy(16)) {
@@ -757,8 +758,8 @@ static Constant *ConvertSTRING_CST(tree
if (llvm::sys::isBigEndianHost() == BYTES_BIG_ENDIAN)
Elts.push_back(ConstantInt::get(Type::getInt16Ty(Context), InStr[i]));
else
- Elts.push_back(ConstantInt::get(Type::getInt16Ty(Context),
- ByteSwap_16(InStr[i])));
+ Elts.push_back(
+ ConstantInt::get(Type::getInt16Ty(Context), ByteSwap_16(InStr[i])));
}
} else if (ElTy->isIntegerTy(32)) {
assert((Len & 3) == 0 &&
@@ -771,15 +772,15 @@ static Constant *ConvertSTRING_CST(tree
if (llvm::sys::isBigEndianHost() == BYTES_BIG_ENDIAN)
Elts.push_back(ConstantInt::get(Type::getInt32Ty(Context), InStr[i]));
else
- Elts.push_back(ConstantInt::get(Type::getInt32Ty(Context),
- ByteSwap_32(InStr[i])));
+ Elts.push_back(
+ ConstantInt::get(Type::getInt32Ty(Context), ByteSwap_32(InStr[i])));
}
} else {
llvm_unreachable("Unknown character type!");
}
- unsigned LenInElts = Len / TREE_INT_CST_LOW(
- TYPE_SIZE_UNIT(main_type(main_type(exp))));
+ unsigned LenInElts =
+ Len / TREE_INT_CST_LOW(TYPE_SIZE_UNIT(main_type(main_type(exp))));
unsigned ConstantSize = StrTy->getNumElements();
if (LenInElts != ConstantSize) {
@@ -830,8 +831,9 @@ static Constant *ConvertArrayCONSTRUCTOR
// Resize to the number of array elements if known. This ensures that every
// element will be at least default initialized even if no initial value is
// given for it.
- uint64_t TypeElts = isa<ARRAY_TYPE>(init_type) ? ArrayLengthOf(init_type) :
- TYPE_VECTOR_SUBPARTS(init_type);
+ uint64_t TypeElts =
+ isa<ARRAY_TYPE>(init_type) ? ArrayLengthOf(init_type)
+ : TYPE_VECTOR_SUBPARTS(init_type);
if (TypeElts != NO_LENGTH)
Elts.resize(TypeElts);
@@ -961,9 +963,9 @@ static Constant *ConvertArrayCONSTRUCTOR
if (NumSameType == 1)
StructElt = Elts[First];
else
- StructElt = ConstantArray::get(ArrayType::get(Ty, NumSameType),
- ArrayRef<Constant *>(&Elts[First],
- NumSameType));
+ StructElt =
+ ConstantArray::get(ArrayType::get(Ty, NumSameType),
+ ArrayRef<Constant *>(&Elts[First], NumSameType));
StructElts.push_back(StructElt);
First = Last;
}
@@ -1028,8 +1030,8 @@ class FieldContents {
public:
/// get - Fill the range [first, last) with the given constant.
- static FieldContents get(int first, int last, Constant *c,
- TargetFolder &folder) {
+ static FieldContents
+ get(int first, int last, Constant *c, TargetFolder &folder) {
return FieldContents(SignedRange(first, last), c, first, folder);
}
@@ -1077,8 +1079,8 @@ public:
if ((C->getType()->getPrimitiveSizeInBits() % BITS_PER_UNIT) != 0) {
Type *Ty = C->getType();
assert(Ty->isIntegerTy() && "Non-integer type with non-byte size!");
- unsigned BitWidth = RoundUpToAlignment(Ty->getPrimitiveSizeInBits(),
- BITS_PER_UNIT);
+ unsigned BitWidth =
+ RoundUpToAlignment(Ty->getPrimitiveSizeInBits(), BITS_PER_UNIT);
Ty = IntegerType::get(Context, BitWidth);
C = TheFolder->CreateZExtOrBitCast(C, Ty);
if (isSafeToReturnContentsDirectly(DL))
@@ -1206,16 +1208,16 @@ static Constant *ConvertRecordCONSTRUCTO
assert(isa<FIELD_DECL>(field) && "Initial value not for a field!");
assert(OffsetIsLLVMCompatible(field) && "Field position not known!");
// Turn the initial value for this field into an LLVM constant.
- Constant *Init = ConvertInitializerWithCast(value, main_type(field),
- Folder);
+ Constant *Init =
+ ConvertInitializerWithCast(value, main_type(field), Folder);
// Work out the range of bits occupied by the field.
uint64_t FirstBit = getFieldOffsetInBits(field);
assert(FirstBit <= TypeSize && "Field off end of type!");
// If a size was specified for the field then use it. Otherwise take the
// size from the initial value.
- uint64_t BitWidth = isInt64(DECL_SIZE(field), true) ?
- getInt64(DECL_SIZE(field), true) :
- DL.getTypeAllocSizeInBits(Init->getType());
+ uint64_t BitWidth = isInt64(DECL_SIZE(field), true)
+ ? getInt64(DECL_SIZE(field), true)
+ : DL.getTypeAllocSizeInBits(Init->getType());
uint64_t LastBit = FirstBit + BitWidth;
// Set the bits occupied by the field to the initial value.
@@ -1364,9 +1366,9 @@ static Constant *ConvertPOINTER_PLUS_EXP
// Convert the pointer into an i8* and add the offset to it.
Ptr = Folder.CreateBitCast(Ptr, GetUnitPointerType(Context));
- Constant *Result = POINTER_TYPE_OVERFLOW_UNDEFINED ?
- Folder.CreateInBoundsGetElementPtr(Ptr, Idx) :
- Folder.CreateGetElementPtr(Ptr, Idx);
+ Constant *Result = POINTER_TYPE_OVERFLOW_UNDEFINED
+ ? Folder.CreateInBoundsGetElementPtr(Ptr, Idx)
+ : Folder.CreateGetElementPtr(Ptr, Idx);
// The result may be of a different pointer type.
Result = Folder.CreateBitCast(Result, getRegType(TREE_TYPE(exp)));
@@ -1535,9 +1537,9 @@ static Constant *AddressOfARRAY_REF(tree
Type *EltTy = ConvertType(main_type(main_type(array)));
ArrayAddr = Folder.CreateBitCast(ArrayAddr, EltTy->getPointerTo());
- return POINTER_TYPE_OVERFLOW_UNDEFINED ?
- Folder.CreateInBoundsGetElementPtr(ArrayAddr, IndexVal) :
- Folder.CreateGetElementPtr(ArrayAddr, IndexVal);
+ return POINTER_TYPE_OVERFLOW_UNDEFINED
+ ? Folder.CreateInBoundsGetElementPtr(ArrayAddr, IndexVal)
+ : Folder.CreateGetElementPtr(ArrayAddr, IndexVal);
}
/// AddressOfCOMPONENT_REF - Return the address of a field in a record.
@@ -1552,8 +1554,8 @@ static Constant *AddressOfCOMPONENT_REF(
// (DECL_OFFSET_ALIGN / BITS_PER_UNIT). Convert to units.
unsigned factor = DECL_OFFSET_ALIGN(field_decl) / BITS_PER_UNIT;
if (factor != 1)
- Offset = Folder.CreateMul(Offset,
- ConstantInt::get(Offset->getType(), factor));
+ Offset =
+ Folder.CreateMul(Offset, ConstantInt::get(Offset->getType(), factor));
} else {
assert(DECL_FIELD_OFFSET(field_decl) && "Field offset not available!");
Offset = getAsRegister(DECL_FIELD_OFFSET(field_decl), Folder);
@@ -1564,8 +1566,8 @@ static Constant *AddressOfCOMPONENT_REF(
// Incorporate as much of it as possible into the pointer computation.
uint64_t Units = BitStart / BITS_PER_UNIT;
if (Units > 0) {
- Offset = Folder.CreateAdd(Offset,
- ConstantInt::get(Offset->getType(), Units));
+ Offset =
+ Folder.CreateAdd(Offset, ConstantInt::get(Offset->getType(), Units));
BitStart -= Units * BITS_PER_UNIT;
(void) BitStart;
}
@@ -1581,8 +1583,8 @@ static Constant *AddressOfCOMPONENT_REF(
}
/// AddressOfCOMPOUND_LITERAL_EXPR - Return the address of a compound literal.
-static Constant *AddressOfCOMPOUND_LITERAL_EXPR(tree exp,
- TargetFolder &Folder) {
+static Constant *
+AddressOfCOMPOUND_LITERAL_EXPR(tree exp, TargetFolder &Folder) {
tree decl = DECL_EXPR_DECL(COMPOUND_LITERAL_EXPR_DECL_EXPR(exp));
return AddressOfImpl(decl, Folder);
}
Modified: dragonegg/trunk/src/Convert.cpp
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/src/Convert.cpp?rev=176016&r1=176015&r2=176016&view=diff
==============================================================================
--- dragonegg/trunk/src/Convert.cpp (original)
+++ dragonegg/trunk/src/Convert.cpp Mon Feb 25 04:54:25 2013
@@ -161,8 +161,8 @@ static StringRef SelectFPName(tree type,
/// DisplaceLocationByUnits - Move a memory location by a fixed number of units.
/// This uses an "inbounds" getelementptr, so the displacement should remain
/// inside the original object.
-MemRef DisplaceLocationByUnits(MemRef Loc, int32_t Offset,
- LLVMBuilder &Builder) {
+MemRef
+DisplaceLocationByUnits(MemRef Loc, int32_t Offset, LLVMBuilder &Builder) {
// Convert to a byte pointer and displace by the offset.
unsigned AddrSpace = Loc.Ptr->getType()->getPointerAddressSpace();
Type *UnitPtrTy = GetUnitPointerType(Context, AddrSpace);
@@ -175,25 +175,25 @@ MemRef DisplaceLocationByUnits(MemRef Lo
}
/// LoadFromLocation - Load a value of the given type from a memory location.
-static LoadInst *LoadFromLocation(MemRef Loc, Type *Ty, MDNode *AliasTag,
- LLVMBuilder &Builder) {
+static LoadInst *
+LoadFromLocation(MemRef Loc, Type *Ty, MDNode *AliasTag, LLVMBuilder &Builder) {
unsigned AddrSpace = Loc.Ptr->getType()->getPointerAddressSpace();
Value *Ptr = Builder.CreateBitCast(Loc.Ptr, Ty->getPointerTo(AddrSpace));
- LoadInst *LI = Builder.CreateAlignedLoad(Ptr, Loc.getAlignment(),
- Loc.Volatile);
+ LoadInst *LI =
+ Builder.CreateAlignedLoad(Ptr, Loc.getAlignment(), Loc.Volatile);
if (AliasTag)
LI->setMetadata(LLVMContext::MD_tbaa, AliasTag);
return LI;
}
/// StoreToLocation - Store a value to the given memory location.
-static StoreInst *StoreToLocation(Value *V, MemRef Loc, MDNode *AliasTag,
- LLVMBuilder &Builder) {
+static StoreInst *
+StoreToLocation(Value *V, MemRef Loc, MDNode *AliasTag, LLVMBuilder &Builder) {
Type *Ty = V->getType();
unsigned AddrSpace = Loc.Ptr->getType()->getPointerAddressSpace();
Value *Ptr = Builder.CreateBitCast(Loc.Ptr, Ty->getPointerTo(AddrSpace));
- StoreInst *SI = Builder.CreateAlignedStore(V, Ptr, Loc.getAlignment(),
- Loc.Volatile);
+ StoreInst *SI =
+ Builder.CreateAlignedStore(V, Ptr, Loc.getAlignment(), Loc.Volatile);
if (AliasTag)
SI->setMetadata(LLVMContext::MD_tbaa, AliasTag);
return SI;
@@ -613,7 +613,7 @@ static bool isLocalDecl(tree decl) {
return
// GCC bug workaround: RESULT_DECL may not have DECL_CONTEXT set in thunks.
(!DECL_CONTEXT(decl) && isa<RESULT_DECL>(decl)) ||
- // Usual case.
+ // Usual case.
(DECL_CONTEXT(decl) == current_function_decl &&
!DECL_EXTERNAL(decl) && // External variables are not local.
!TREE_STATIC(decl) && // Static variables not considered local.
@@ -951,8 +951,8 @@ void TreeToLLVM::StartFunctionBody() {
// If a previous proto existed with the wrong type, replace any uses of it
// with the actual function and delete the proto.
if (FnEntry) {
- FnEntry->replaceAllUsesWith(TheFolder->CreateBitCast(Fn,
- FnEntry->getType()));
+ FnEntry->replaceAllUsesWith(
+ TheFolder->CreateBitCast(Fn, FnEntry->getType()));
changeLLVMConstant(FnEntry, Fn);
FnEntry->eraseFromParent();
}
@@ -1130,10 +1130,10 @@ void TreeToLLVM::StartFunctionBody() {
// Loading the value of a PARM_DECL at this point yields its initial value.
// Remember this for use when materializing the reads implied by SSA default
// definitions.
- SSAInsertionPoint = Builder.Insert(CastInst::Create(
- Instruction::BitCast,
- Constant::getNullValue(Type::getInt32Ty(Context)),
- Type::getInt32Ty(Context)), "ssa point");
+ SSAInsertionPoint = Builder.Insert(
+ CastInst::Create(Instruction::BitCast,
+ Constant::getNullValue(Type::getInt32Ty(Context)),
+ Type::getInt32Ty(Context)), "ssa point");
// If this function has nested functions, we should handle a potential
// nonlocal_goto_save_area.
@@ -1314,14 +1314,14 @@ Function *TreeToLLVM::FinishFunctionBody
!isa<COMPLEX_TYPE>(TREE_TYPE(TreeRetVal))) {
// If the DECL_RESULT is a scalar type, just load out the return value
// and return it.
- LoadInst *Load = Builder.CreateAlignedLoad(ResultLV.Ptr,
- ResultLV.getAlignment());
+ LoadInst *Load =
+ Builder.CreateAlignedLoad(ResultLV.Ptr, ResultLV.getAlignment());
RetVals.push_back(Builder.CreateBitCast(Load, Fn->getReturnType()));
} else {
uint64_t ResultSize = getDataLayout().getTypeAllocSize(
- ConvertType(TREE_TYPE(TreeRetVal)));
- uint64_t ReturnSize = getDataLayout().getTypeAllocSize(
- Fn->getReturnType());
+ ConvertType(TREE_TYPE(TreeRetVal)));
+ uint64_t ReturnSize =
+ getDataLayout().getTypeAllocSize(Fn->getReturnType());
// The load does not necessarily start at the beginning of the aggregate
// (x86-64).
@@ -1331,15 +1331,15 @@ Function *TreeToLLVM::FinishFunctionBody
} else {
// Advance to the point we want to load from.
if (ReturnOffset) {
- ResultLV.Ptr = Builder.CreateBitCast(ResultLV.Ptr,
- Type::getInt8PtrTy(Context));
- ResultLV.Ptr = Builder.CreateGEP(
- ResultLV.Ptr,
- ConstantInt::get(DL.getIntPtrType(Context, 0),
- ReturnOffset),
- flag_verbose_asm ? "rtvl" : "");
- ResultLV.setAlignment(MinAlign(ResultLV.getAlignment(),
- ReturnOffset));
+ ResultLV.Ptr = Builder
+ .CreateBitCast(ResultLV.Ptr, Type::getInt8PtrTy(Context));
+ ResultLV.Ptr =
+ Builder.CreateGEP(ResultLV.Ptr,
+ ConstantInt::get(DL.getIntPtrType(Context, 0),
+ ReturnOffset),
+ flag_verbose_asm ? "rtvl" : "");
+ ResultLV.setAlignment(
+ MinAlign(ResultLV.getAlignment(), ReturnOffset));
ResultSize -= ReturnOffset;
}
@@ -1361,9 +1361,8 @@ Function *TreeToLLVM::FinishFunctionBody
Idxs[1] = Builder.getInt32(ri);
Value *GEP = Builder.CreateGEP(ReturnLoc.Ptr, Idxs,
flag_verbose_asm ? "mrv_gep" : "");
- Value *E = Builder.CreateAlignedLoad(GEP, /*Align*/ Packed,
- flag_verbose_asm ? "mrv" :
- "");
+ Value *E = Builder.CreateAlignedLoad(
+ GEP, /*Align*/ Packed, flag_verbose_asm ? "mrv" : "");
RetVals.push_back(E);
}
// If the return type specifies an empty struct then return one.
@@ -1433,8 +1432,8 @@ Function *TreeToLLVM::FinishFunctionBody
#else
// When checks are enabled, complain if an SSA name was used but not defined.
#endif
- for (DenseMap<tree, TrackingVH<Value> >::const_iterator I = SSANames
- .begin(), E = SSANames.end();
+ for (DenseMap<tree, TrackingVH<Value> >::const_iterator I =
+ SSANames.begin(), E = SSANames.end();
I != E; ++I) {
Value *NameDef = I->second;
// If this is not a placeholder then the SSA name was defined.
@@ -1657,9 +1656,8 @@ void TreeToLLVM::EmitAggregate(tree exp,
}
LValue LV = EmitLV(exp);
assert(!LV.isBitfield() && "Bitfields containing aggregates not supported!");
- EmitAggregateCopy(DestLoc,
- MemRef(LV.Ptr, LV.getAlignment(), TREE_THIS_VOLATILE(exp)),
- TREE_TYPE(exp));
+ EmitAggregateCopy(DestLoc, MemRef(LV.Ptr, LV.getAlignment(),
+ TREE_THIS_VOLATILE(exp)), TREE_TYPE(exp));
}
/// get_constant_alignment - Return the alignment of constant EXP in bits.
@@ -1799,8 +1797,8 @@ Value *TreeToLLVM::CastToAnyType(Value *
// The types are different so we must cast. Use getCastOpcode to create an
// inferred cast opcode.
- Instruction::CastOps opc = CastInst::getCastOpcode(Src, SrcIsSigned, DestTy,
- DestIsSigned);
+ Instruction::CastOps opc =
+ CastInst::getCastOpcode(Src, SrcIsSigned, DestTy, DestIsSigned);
// Generate the cast and return it.
return Builder.CreateCast(opc, Src, DestTy);
@@ -1834,8 +1832,8 @@ Constant *TreeToLLVM::CastToAnyType(Cons
// The types are different so we must cast. Use getCastOpcode to create an
// inferred cast opcode.
- Instruction::CastOps opc = CastInst::getCastOpcode(Src, SrcIsSigned, DestTy,
- DestIsSigned);
+ Instruction::CastOps opc =
+ CastInst::getCastOpcode(Src, SrcIsSigned, DestTy, DestIsSigned);
// Generate the cast and return it.
return TheFolder->CreateCast(opc, Src, DestTy);
@@ -1854,10 +1852,9 @@ Value *TreeToLLVM::CastFromSameSizeInteg
}
if (EltTy->isPointerTy()) {
// A pointer/vector of pointer - use inttoptr.
- assert(
- V->getType()->getScalarType()->getPrimitiveSizeInBits() ==
- DL.getPointerSizeInBits(cast<PointerType>(EltTy)->getAddressSpace()) &&
- "Pointer type not same size!");
+ assert(V->getType()->getScalarType()->getPrimitiveSizeInBits() ==
+ DL.getPointerSizeInBits(cast<PointerType>(
+ EltTy)->getAddressSpace()) && "Pointer type not same size!");
return Builder.CreateIntToPtr(V, Ty);
}
// Everything else.
@@ -1895,8 +1892,8 @@ Value *TreeToLLVM::CastToFPType(Value *V
unsigned DstBits = Ty->getPrimitiveSizeInBits();
if (SrcBits == DstBits)
return V;
- Instruction::CastOps opcode = (SrcBits > DstBits ? Instruction::FPTrunc :
- Instruction::FPExt);
+ Instruction::CastOps opcode =
+ (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt);
return Builder.CreateCast(opcode, V, Ty);
}
@@ -1943,10 +1940,9 @@ AllocaInst *TreeToLLVM::CreateTemporary(
// alloc instructions before. It doesn't matter what this instruction is,
// it is dead. This allows us to insert allocas in order without having to
// scan for an insertion point. Use BitCast for int -> int
- AllocaInsertionPoint =
- CastInst::Create(Instruction::BitCast,
- Constant::getNullValue(Type::getInt32Ty(Context)),
- Type::getInt32Ty(Context), "alloca point");
+ AllocaInsertionPoint = CastInst::Create(
+ Instruction::BitCast, Constant::getNullValue(Type::getInt32Ty(Context)),
+ Type::getInt32Ty(Context), "alloca point");
// Insert it as the first instruction in the entry block.
Fn->begin()->getInstList()
.insert(Fn->begin()->begin(), AllocaInsertionPoint);
@@ -2053,9 +2049,9 @@ void TreeToLLVM::CopyElementByElement(Me
if (!isa<AGGREGATE_TYPE>(type)) {
// Copy scalar.
MDNode *AliasTag = describeAliasSet(type);
- StoreRegisterToMemory(LoadRegisterFromMemory(SrcLoc, type, AliasTag,
- Builder),
- DestLoc, type, AliasTag, Builder);
+ StoreRegisterToMemory(
+ LoadRegisterFromMemory(SrcLoc, type, AliasTag, Builder), DestLoc, type,
+ AliasTag, Builder);
return;
}
@@ -2075,12 +2071,10 @@ void TreeToLLVM::CopyElementByElement(Me
// Get the address of the field.
int FieldIdx = GetFieldIndex(Field, Ty);
assert(FieldIdx != INT_MAX && "Should not be copying if no LLVM field!");
- Value *DestFieldPtr = Builder.CreateStructGEP(DestLoc.Ptr, FieldIdx,
- flag_verbose_asm ? "df" :
- "");
- Value *SrcFieldPtr = Builder.CreateStructGEP(SrcLoc.Ptr, FieldIdx,
- flag_verbose_asm ? "sf" :
- "");
+ Value *DestFieldPtr = Builder.CreateStructGEP(
+ DestLoc.Ptr, FieldIdx, flag_verbose_asm ? "df" : "");
+ Value *SrcFieldPtr = Builder.CreateStructGEP(
+ SrcLoc.Ptr, FieldIdx, flag_verbose_asm ? "sf" : "");
// Compute the field's alignment.
unsigned DestFieldAlign = DestLoc.getAlignment();
@@ -2113,9 +2107,9 @@ void TreeToLLVM::CopyElementByElement(Me
Value *DestCompPtr = DestLoc.Ptr, *SrcCompPtr = SrcLoc.Ptr;
if (i) {
DestCompPtr = Builder.CreateConstInBoundsGEP1_32(
- DestCompPtr, i, flag_verbose_asm ? "da" : "");
+ DestCompPtr, i, flag_verbose_asm ? "da" : "");
SrcCompPtr = Builder.CreateConstInBoundsGEP1_32(
- SrcCompPtr, i, flag_verbose_asm ? "sa" : "");
+ SrcCompPtr, i, flag_verbose_asm ? "sa" : "");
}
// Compute the component's alignment.
@@ -2209,7 +2203,7 @@ void TreeToLLVM::ZeroElementByElement(Me
Value *CompPtr = DestLoc.Ptr;
if (i)
CompPtr = Builder.CreateConstInBoundsGEP1_32(
- CompPtr, i, flag_verbose_asm ? "za" : "");
+ CompPtr, i, flag_verbose_asm ? "za" : "");
// Compute the component's alignment.
unsigned CompAlign = DestLoc.getAlignment();
@@ -2312,12 +2306,12 @@ void TreeToLLVM::EmitAnnotateIntrinsic(V
if (!annotateAttr)
return;
- Function *annotateFun = Intrinsic::getDeclaration(TheModule,
- Intrinsic::var_annotation);
+ Function *annotateFun =
+ Intrinsic::getDeclaration(TheModule, Intrinsic::var_annotation);
// Get file and line number
- Constant *lineNo = ConstantInt::get(Type::getInt32Ty(Context),
- DECL_SOURCE_LINE(decl));
+ Constant *lineNo =
+ ConstantInt::get(Type::getInt32Ty(Context), DECL_SOURCE_LINE(decl));
Constant *file = ConvertMetadataStringToGV(DECL_SOURCE_FILE(decl));
Type *SBP = Type::getInt8PtrTy(Context);
file = TheFolder->CreateBitCast(file, SBP);
@@ -2575,8 +2569,8 @@ void TreeToLLVM::EmitLandingPads() {
// the start of the corresponding landing pad. At this point each exception
// handling region has its own landing pad, which is only reachable via the
// unwind edges of the region's invokes.
- Type *UnwindDataTy = StructType::get(Builder.getInt8PtrTy(),
- Builder.getInt32Ty(), NULL);
+ Type *UnwindDataTy =
+ StructType::get(Builder.getInt8PtrTy(), Builder.getInt32Ty(), NULL);
for (unsigned LPadNo = 1; LPadNo < NormalInvokes.size(); ++LPadNo) {
// Get the list of invokes for this GCC landing pad.
SmallVector<InvokeInst *, 8> &InvokesForPad = NormalInvokes[LPadNo];
@@ -2603,9 +2597,8 @@ void TreeToLLVM::EmitLandingPads() {
"No exception handling personality!");
personality = lang_hooks.eh_personality();
}
- LandingPadInst *LPadInst = Builder.CreateLandingPad(UnwindDataTy,
- DECL_LLVM(personality),
- 0, "exc");
+ LandingPadInst *LPadInst = Builder.CreateLandingPad(
+ UnwindDataTy, DECL_LLVM(personality), 0, "exc");
// Store the exception pointer if made use of elsewhere.
if (RegionNo < ExceptionPtrs.size() && ExceptionPtrs[RegionNo]) {
@@ -2641,8 +2634,8 @@ void TreeToLLVM::EmitLandingPads() {
}
// Add the list of typeinfos as a filter clause.
- ArrayType *FilterTy = ArrayType::get(Builder.getInt8PtrTy(),
- TypeInfos.size());
+ ArrayType *FilterTy =
+ ArrayType::get(Builder.getInt8PtrTy(), TypeInfos.size());
LPadInst->addClause(ConstantArray::get(FilterTy, TypeInfos));
break;
}
@@ -2652,8 +2645,8 @@ void TreeToLLVM::EmitLandingPads() {
case ERT_MUST_NOT_THROW: {
// Same as a zero-length filter: add an empty filter clause.
ArrayType *FilterTy = ArrayType::get(Builder.getInt8PtrTy(), 0);
- LPadInst->addClause(ConstantArray::get(FilterTy,
- ArrayRef<Constant *>()));
+ LPadInst->addClause(
+ ConstantArray::get(FilterTy, ArrayRef<Constant *>()));
AllCaught = true;
break;
}
@@ -2728,13 +2721,12 @@ void TreeToLLVM::EmitFailureBlocks() {
// Generate a landingpad instruction with an empty (i.e. catch-all) filter
// clause.
- Type *UnwindDataTy = StructType::get(Builder.getInt8PtrTy(),
- Builder.getInt32Ty(), NULL);
+ Type *UnwindDataTy =
+ StructType::get(Builder.getInt8PtrTy(), Builder.getInt32Ty(), NULL);
tree personality = DECL_FUNCTION_PERSONALITY(FnDecl);
assert(personality && "No-throw region but no personality function!");
LandingPadInst *LPadInst = Builder.CreateLandingPad(
- UnwindDataTy, DECL_LLVM(personality), 1,
- "exc");
+ UnwindDataTy, DECL_LLVM(personality), 1, "exc");
ArrayType *FilterTy = ArrayType::get(Builder.getInt8PtrTy(), 0);
LPadInst->addClause(ConstantArray::get(FilterTy, ArrayRef<Constant *>()));
@@ -2835,10 +2827,10 @@ Value *TreeToLLVM::EmitLoadOfLValue(tree
// Shift the sign bit of the bitfield to the sign bit position in the loaded
// type. This zaps any extra bits occurring after the end of the bitfield.
unsigned FirstBitInVal = BYTES_BIG_ENDIAN ? LoadSizeInBits - LV.BitStart -
- LV.BitSize : LV.BitStart;
+ LV.BitSize : LV.BitStart;
if (FirstBitInVal + LV.BitSize != LoadSizeInBits) {
Value *ShAmt = ConstantInt::get(LoadType, LoadSizeInBits -
- (FirstBitInVal + LV.BitSize));
+ (FirstBitInVal + LV.BitSize));
Val = Builder.CreateShl(Val, ShAmt);
}
// Shift the first bit of the bitfield to be bit zero. This zaps any extra
@@ -2846,8 +2838,8 @@ Value *TreeToLLVM::EmitLoadOfLValue(tree
// this also duplicates the sign bit, giving a sign extended value.
bool isSigned = !TYPE_UNSIGNED(TREE_TYPE(exp));
Value *ShAmt = ConstantInt::get(LoadType, LoadSizeInBits - LV.BitSize);
- Val = isSigned ? Builder.CreateAShr(Val, ShAmt) :
- Builder.CreateLShr(Val, ShAmt);
+ Val = isSigned ? Builder.CreateAShr(Val, ShAmt)
+ : Builder.CreateLShr(Val, ShAmt);
// Get the bits as a value of the correct type.
// FIXME: This assumes the result is an integer.
@@ -2867,9 +2859,9 @@ Value *TreeToLLVM::EmitADDR_EXPR(tree ex
#if (GCC_MINOR < 7)
Value *TreeToLLVM::EmitCondExpr(tree exp) {
- return TriviallyTypeConvert(EmitReg_CondExpr(
- TREE_OPERAND(exp, 0), TREE_OPERAND(exp, 1), TREE_OPERAND(exp, 2)),
- getRegType(TREE_TYPE(exp)));
+ return TriviallyTypeConvert(
+ EmitReg_CondExpr(TREE_OPERAND(exp, 0), TREE_OPERAND(exp, 1),
+ TREE_OPERAND(exp, 2)), getRegType(TREE_TYPE(exp)));
}
#endif
@@ -2922,7 +2914,7 @@ Value *TreeToLLVM::EmitCONSTRUCTOR(tree
// Start out with the value zero'd out.
EmitAggregateZero(*DestLoc, type);
- VEC(constructor_elt, gc) * elt = CONSTRUCTOR_ELTS(exp);
+ VEC(constructor_elt, gc) *elt = CONSTRUCTOR_ELTS(exp);
switch (TREE_CODE(TREE_TYPE(exp))) {
case ARRAY_TYPE:
case RECORD_TYPE:
@@ -3211,8 +3203,8 @@ struct FunctionCallArgumentConversion :
void EnterField(unsigned FieldNo, llvm::Type *StructTy) {
Value *Loc = getAddress();
Loc = Builder.CreateBitCast(Loc, StructTy->getPointerTo());
- pushAddress(Builder.CreateStructGEP(Loc, FieldNo,
- flag_verbose_asm ? "elt" : ""));
+ pushAddress(
+ Builder.CreateStructGEP(Loc, FieldNo, flag_verbose_asm ? "elt" : ""));
}
void ExitField() {
assert(!LocStack.empty());
@@ -3265,8 +3257,8 @@ Value *TreeToLLVM::EmitCallOf(Value *Cal
}
tree fndecl = gimple_call_fndecl(stmt);
- tree fntype = fndecl ? TREE_TYPE(fndecl) :
- TREE_TYPE(TREE_TYPE(gimple_call_fn(stmt)));
+ tree fntype =
+ fndecl ? TREE_TYPE(fndecl) : TREE_TYPE(TREE_TYPE(gimple_call_fn(stmt)));
// Determine the calling convention.
CallingConv::ID CallingConvention = CallingConv::C;
@@ -3345,8 +3337,8 @@ Value *TreeToLLVM::EmitCallOf(Value *Cal
// If the caller and callee disagree about a parameter type but the difference
// is trivial, correct the type used by the caller.
- for (unsigned i = 0, e = std::min((unsigned) CallOperands.size(),
- FTy->getNumParams());
+ for (unsigned i =
+ 0, e = std::min((unsigned) CallOperands.size(), FTy->getNumParams());
i != e; ++i) {
Type *ExpectedTy = FTy->getParamType(i);
Type *ActualTy = CallOperands[i]->getType();
@@ -3406,10 +3398,10 @@ Value *TreeToLLVM::EmitCallOf(Value *Cal
Target = CreateTempLoc(ConvertType(gimple_call_return_type(stmt)));
if (DL.getTypeAllocSize(Call->getType()) <=
- DL.getTypeAllocSize(
- cast<PointerType>(Target.Ptr->getType())->getElementType())) {
- Value *Dest = Builder.CreateBitCast(Target.Ptr,
- Call->getType()->getPointerTo());
+ DL.getTypeAllocSize(cast<PointerType>(Target.Ptr->getType())
+ ->getElementType())) {
+ Value *Dest =
+ Builder.CreateBitCast(Target.Ptr, Call->getType()->getPointerTo());
LLVM_EXTRACT_MULTIPLE_RETURN_VALUE(Call, Dest, Target.Volatile, Builder);
} else {
// The call will return an aggregate value in registers, but
@@ -3421,10 +3413,11 @@ Value *TreeToLLVM::EmitCallOf(Value *Cal
AllocaInst *biggerTmp = CreateTemporary(Call->getType());
LLVM_EXTRACT_MULTIPLE_RETURN_VALUE(Call, biggerTmp, /*Volatile=*/ false,
Builder);
- EmitAggregateCopy(Target, MemRef(Builder.CreateBitCast(
- biggerTmp, Call->getType()->getPointerTo()),
- Target.getAlignment(), Target.Volatile),
- gimple_call_return_type(stmt));
+ EmitAggregateCopy(
+ Target, MemRef(Builder.CreateBitCast(biggerTmp,
+ Call->getType()->getPointerTo()),
+ Target.getAlignment(), Target.Volatile),
+ gimple_call_return_type(stmt));
}
return DestLoc ? 0 : Builder.CreateLoad(Target.Ptr);
@@ -3445,8 +3438,8 @@ Value *TreeToLLVM::EmitCallOf(Value *Cal
"Size mismatch in scalar to scalar conversion!");
Value *Tmp = CreateTemporary(Call->getType());
Builder.CreateStore(Call, Tmp);
- return Builder.CreateLoad(Builder.CreateBitCast(Tmp,
- RetTy->getPointerTo()));
+ return Builder.CreateLoad(
+ Builder.CreateBitCast(Tmp, RetTy->getPointerTo()));
}
// If the caller expects an aggregate, we have a situation where the ABI for
@@ -3464,10 +3457,9 @@ Value *TreeToLLVM::EmitCallOf(Value *Cal
int64_t MaxStoreSize = DL.getTypeAllocSize(AggTy);
if (Client.Offset) {
Ptr = Builder.CreateBitCast(Ptr, Type::getInt8PtrTy(Context));
- Ptr = Builder.CreateGEP(Ptr,
- ConstantInt::get(DL.getIntPtrType(Ptr->getType()),
- Client.Offset),
- flag_verbose_asm ? "ro" : "");
+ Ptr = Builder.CreateGEP(
+ Ptr, ConstantInt::get(DL.getIntPtrType(Ptr->getType()), Client.Offset),
+ flag_verbose_asm ? "ro" : "");
Align = MinAlign(Align, Client.Offset);
MaxStoreSize -= Client.Offset;
}
@@ -3522,8 +3514,8 @@ CallInst *TreeToLLVM::EmitSimpleCall(Str
#endif
va_end(ops);
- Type *RetTy = isa<VOID_TYPE>(ret_type) ? Type::getVoidTy(Context) :
- getRegType(ret_type);
+ Type *RetTy = isa<VOID_TYPE>(ret_type) ? Type::getVoidTy(Context)
+ : getRegType(ret_type);
// The LLVM argument types.
std::vector<Type *> ArgTys;
@@ -3611,8 +3603,8 @@ void TreeToLLVM::EmitModifyOfRegisterVar
// Turn this into a 'call void asm sideeffect "", "{reg}"(Ty %RHS)'.
std::vector<Type *> ArgTys;
ArgTys.push_back(RHS->getType());
- FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context), ArgTys,
- false);
+ FunctionType *FTy =
+ FunctionType::get(Type::getVoidTy(Context), ArgTys, false);
const char *Name = extractRegisterName(decl);
Name = LLVM_GET_REG_NAME(Name, decode_reg_name(Name));
@@ -3836,8 +3828,8 @@ static std::string CanonicalizeConstrain
// what it is. Cache this information in AnalyzedRegClasses once computed.
static std::map<unsigned, int> AnalyzedRegClasses;
- std::map<unsigned, int>::iterator I = AnalyzedRegClasses.lower_bound(
- RegClass);
+ std::map<unsigned, int>::iterator I =
+ AnalyzedRegClasses.lower_bound(RegClass);
int RegMember;
if (I != AnalyzedRegClasses.end() && I->first == RegClass) {
@@ -3947,7 +3939,7 @@ static void ChooseConstraintTuple(gimple
// are incremented as we go to point to the beginning of each
// comma-separated alternative.
const char **RunningConstraints =
- (const char * *) alloca((NumInputs + NumOutputs) * sizeof(const char *));
+ (const char **)alloca((NumInputs + NumOutputs) * sizeof(const char *));
memcpy(RunningConstraints, Constraints,
(NumInputs + NumOutputs) * sizeof(const char *));
// The entire point of this loop is to compute CommasToSkip.
@@ -4054,8 +4046,8 @@ Value *TreeToLLVM::BuildVector(const std
}
// Otherwise, insertelement the values to build the vector.
- Value *Result = UndefValue::get(VectorType::get(Ops[0]->getType(),
- Ops.size()));
+ Value *Result =
+ UndefValue::get(VectorType::get(Ops[0]->getType(), Ops.size()));
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt32(i));
@@ -4164,9 +4156,9 @@ Value *TreeToLLVM::BuildBinaryAtomic(gim
Ty[1] = ResultTy->getPointerTo();
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
C[1] = Builder.CreateIntCast(
- C[1], Ty[0], /*isSigned*/ !TYPE_UNSIGNED(return_type), "cast");
- Value *Result = Builder.CreateAtomicRMW(Kind, C[0], C[1],
- SequentiallyConsistent);
+ C[1], Ty[0], /*isSigned*/ !TYPE_UNSIGNED(return_type), "cast");
+ Value *Result =
+ Builder.CreateAtomicRMW(Kind, C[0], C[1], SequentiallyConsistent);
if (PostOp)
Result = Builder.CreateBinOp(Instruction::BinaryOps(PostOp), Result, C[1]);
@@ -4174,8 +4166,8 @@ Value *TreeToLLVM::BuildBinaryAtomic(gim
return Result;
}
-Value *TreeToLLVM::BuildCmpAndSwapAtomic(gimple stmt, unsigned Bits,
- bool isBool) {
+Value *
+TreeToLLVM::BuildCmpAndSwapAtomic(gimple stmt, unsigned Bits, bool isBool) {
tree ptr = gimple_call_arg(stmt, 0);
tree old_val = gimple_call_arg(stmt, 1);
tree new_val = gimple_call_arg(stmt, 2);
@@ -4185,16 +4177,16 @@ Value *TreeToLLVM::BuildCmpAndSwapAtomic
Type *MemPtrTy = MemTy->getPointerTo();
Value *Ptr = Builder.CreateBitCast(EmitRegister(ptr), MemPtrTy);
- Value *Old_Val = CastToAnyType(EmitRegister(old_val),
- !TYPE_UNSIGNED(TREE_TYPE(old_val)), MemTy,
- !TYPE_UNSIGNED(TREE_TYPE(old_val)));
- Value *New_Val = CastToAnyType(EmitRegister(new_val),
- !TYPE_UNSIGNED(TREE_TYPE(new_val)), MemTy,
- !TYPE_UNSIGNED(TREE_TYPE(new_val)));
+ Value *Old_Val =
+ CastToAnyType(EmitRegister(old_val), !TYPE_UNSIGNED(TREE_TYPE(old_val)),
+ MemTy, !TYPE_UNSIGNED(TREE_TYPE(old_val)));
+ Value *New_Val =
+ CastToAnyType(EmitRegister(new_val), !TYPE_UNSIGNED(TREE_TYPE(new_val)),
+ MemTy, !TYPE_UNSIGNED(TREE_TYPE(new_val)));
Value *C[3] = { Ptr, Old_Val, New_Val };
- Value *Result = Builder.CreateAtomicCmpXchg(C[0], C[1], C[2],
- SequentiallyConsistent);
+ Value *Result =
+ Builder.CreateAtomicCmpXchg(C[0], C[1], C[2], SequentiallyConsistent);
if (isBool)
Result = Builder.CreateICmpEQ(Result, Old_Val);
@@ -4228,8 +4220,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
// If this builtin directly corresponds to an LLVM intrinsic, get the
// IntrinsicID now.
const char *BuiltinName = IDENTIFIER_POINTER(DECL_NAME(fndecl));
- Intrinsic::ID IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(
- TargetPrefix, BuiltinName);
+ Intrinsic::ID IntrinsicID =
+ Intrinsic::getIntrinsicForGCCBuiltin(TargetPrefix, BuiltinName);
if (IntrinsicID == Intrinsic::not_intrinsic) {
error("unsupported target builtin %<%s%> used", BuiltinName);
Type *ResTy = ConvertType(gimple_call_return_type(stmt));
@@ -4239,12 +4231,12 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
}
// Finally, map the intrinsic ID back to a name.
- TargetBuiltinCache[FnCode] = Intrinsic::getDeclaration(TheModule,
- IntrinsicID);
+ TargetBuiltinCache[FnCode] =
+ Intrinsic::getDeclaration(TheModule, IntrinsicID);
}
- Result = EmitCallOf(TargetBuiltinCache[FnCode], stmt, DestLoc,
- AttributeSet());
+ Result =
+ EmitCallOf(TargetBuiltinCache[FnCode], stmt, DestLoc, AttributeSet());
return true;
}
@@ -4371,8 +4363,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
Args[1] = Builder.CreateIntCast(Args[1], Type::getInt1Ty(Context),
/*isSigned*/ false);
- Result = Builder.CreateCall(Intrinsic::getDeclaration(
- TheModule, Intrinsic::objectsize, Ty), Args);
+ Result = Builder.CreateCall(
+ Intrinsic::getDeclaration(TheModule, Intrinsic::objectsize, Ty), Args);
return true;
}
// Unary bit counting intrinsics.
@@ -4397,9 +4389,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
ConstantInt::get(Result->getType(), 1));
tree return_type = gimple_call_return_type(stmt);
Type *DestTy = ConvertType(return_type);
- Result = Builder.CreateIntCast(Result, DestTy,
- /*isSigned*/ !TYPE_UNSIGNED(return_type),
- "cast");
+ Result = Builder.CreateIntCast(
+ Result, DestTy, /*isSigned*/ !TYPE_UNSIGNED(return_type), "cast");
return true;
}
case BUILT_IN_POPCOUNT: // These GCC builtins always return int.
@@ -4409,9 +4400,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
EmitBuiltinUnaryOp(Amt, Result, Intrinsic::ctpop);
tree return_type = gimple_call_return_type(stmt);
Type *DestTy = ConvertType(return_type);
- Result = Builder.CreateIntCast(Result, DestTy,
- /*isSigned*/ !TYPE_UNSIGNED(return_type),
- "cast");
+ Result = Builder.CreateIntCast(
+ Result, DestTy, /*isSigned*/ !TYPE_UNSIGNED(return_type), "cast");
return true;
}
case BUILT_IN_BSWAP32:
@@ -4420,9 +4410,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
EmitBuiltinUnaryOp(Amt, Result, Intrinsic::bswap);
tree return_type = gimple_call_return_type(stmt);
Type *DestTy = ConvertType(return_type);
- Result = Builder.CreateIntCast(Result, DestTy,
- /*isSigned*/ !TYPE_UNSIGNED(return_type),
- "cast");
+ Result = Builder.CreateIntCast(
+ Result, DestTy, /*isSigned*/ !TYPE_UNSIGNED(return_type), "cast");
return true;
}
@@ -4508,17 +4497,16 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
// The argument and return type of cttz should match the argument type of
// the ffs, but should ignore the return type of ffs.
Value *Amt = EmitMemory(gimple_call_arg(stmt, 0));
- Result = Builder.CreateCall2(Intrinsic::getDeclaration(
- TheModule, Intrinsic::cttz, Amt->getType()),
- Amt, Builder.getTrue());
+ Result = Builder.CreateCall2(
+ Intrinsic::getDeclaration(TheModule, Intrinsic::cttz, Amt->getType()),
+ Amt, Builder.getTrue());
Result = Builder.CreateAdd(Result, ConstantInt::get(Result->getType(), 1));
- Result = Builder.CreateIntCast(Result,
- ConvertType(gimple_call_return_type(stmt)),
- /*isSigned*/ false);
- Value *Cond = Builder.CreateICmpEQ(Amt,
- Constant::getNullValue(Amt->getType()));
+ Result = Builder.CreateIntCast(
+ Result, ConvertType(gimple_call_return_type(stmt)), /*isSigned*/ false);
+ Value *Cond =
+ Builder.CreateICmpEQ(Amt, Constant::getNullValue(Amt->getType()));
Result = Builder.CreateSelect(
- Cond, Constant::getNullValue(Result->getType()), Result);
+ Cond, Constant::getNullValue(Result->getType()), Result);
return true;
}
#if (GCC_MINOR > 6)
@@ -4954,9 +4942,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
Value *C[2] = { EmitMemory(gimple_call_arg(stmt, 0)),
EmitMemory(gimple_call_arg(stmt, 1)) };
C[0] = Builder.CreateBitCast(C[0], ResultTy->getPointerTo());
- C[1] = Builder.CreateIntCast(C[1], ResultTy,
- /*isSigned*/ !TYPE_UNSIGNED(return_type),
- "cast");
+ C[1] = Builder.CreateIntCast(
+ C[1], ResultTy, /*isSigned*/ !TYPE_UNSIGNED(return_type), "cast");
Result = Builder.CreateAtomicRMW(AtomicRMWInst::Nand, C[0], C[1],
SequentiallyConsistent);
@@ -5079,7 +5066,7 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
return false;
}
- bool TreeToLLVM::EmitBuiltinUnaryOp(Value *InVal, Value *&Result,
+ bool TreeToLLVM::EmitBuiltinUnaryOp(Value * InVal, Value * &Result,
Intrinsic::ID Id) {
// The intrinsic might be overloaded in which case the argument is of
// varying type. Make sure that we specify the actual type for "iAny"
@@ -5094,9 +5081,9 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
Value *TreeToLLVM::EmitBuiltinBitCountIntrinsic(gimple stmt,
Intrinsic::ID Id) {
Value *Amt = EmitMemory(gimple_call_arg(stmt, 0));
- Value *Result = Builder.CreateCall2(Intrinsic::getDeclaration(
- TheModule, Id, Amt->getType()),
- Amt, Builder.getTrue());
+ Value *Result = Builder.CreateCall2(
+ Intrinsic::getDeclaration(TheModule, Id, Amt->getType()), Amt,
+ Builder.getTrue());
tree return_type = gimple_call_return_type(stmt);
Type *DestTy = ConvertType(return_type);
return Builder.CreateIntCast(
@@ -5159,8 +5146,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
// Then type cast the result of the "ceil" call.
tree type = gimple_call_return_type(stmt);
Type *RetTy = getRegType(type);
- return TYPE_UNSIGNED(type) ? Builder.CreateFPToUI(Call, RetTy) :
- Builder.CreateFPToSI(Call, RetTy);
+ return TYPE_UNSIGNED(type) ? Builder.CreateFPToUI(Call, RetTy)
+ : Builder.CreateFPToSI(Call, RetTy);
}
Value *TreeToLLVM::EmitBuiltinLFLOOR(gimple stmt) {
@@ -5179,8 +5166,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
// Then type cast the result of the "floor" call.
tree type = gimple_call_return_type(stmt);
Type *RetTy = getRegType(type);
- return TYPE_UNSIGNED(type) ? Builder.CreateFPToUI(Call, RetTy) :
- Builder.CreateFPToSI(Call, RetTy);
+ return TYPE_UNSIGNED(type) ? Builder.CreateFPToUI(Call, RetTy)
+ : Builder.CreateFPToSI(Call, RetTy);
}
Value *TreeToLLVM::EmitBuiltinCEXPI(gimple stmt) {
@@ -5212,8 +5199,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
#ifdef TARGET_ADJUST_LLVM_CC
// Query the target for the calling convention to use.
tree fntype = build_function_type_list(
- void_type_node, arg_type, TYPE_POINTER_TO(arg_type),
- TYPE_POINTER_TO(arg_type), NULL_TREE);
+ void_type_node, arg_type, TYPE_POINTER_TO(arg_type),
+ TYPE_POINTER_TO(arg_type), NULL_TREE);
TARGET_ADJUST_LLVM_CC(CC, fntype);
#endif
@@ -5263,8 +5250,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
// Form the complex number "0 + i*arg".
Value *Arg = EmitRegister(arg);
- Value *CplxArg = CreateComplex(Constant::getNullValue(Arg->getType()),
- Arg);
+ Value *CplxArg =
+ CreateComplex(Constant::getNullValue(Arg->getType()), Arg);
// Call cexp and return the result. This is rather painful because complex
// numbers may be passed in funky ways and we don't have a proper interface
@@ -5317,8 +5304,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
assert(DL.getTypeAllocSize(CI->getType()) <=
DL.getTypeAllocSize(CplxTy) &&
"Complex number returned in too large registers!");
- Value *Dest = Builder.CreateBitCast(Target.Ptr,
- CI->getType()->getPointerTo());
+ Value *Dest =
+ Builder.CreateBitCast(Target.Ptr, CI->getType()->getPointerTo());
LLVM_EXTRACT_MULTIPLE_RETURN_VALUE(CI, Dest, Target.Volatile,
Builder);
return Builder.CreateLoad(Target.Ptr);
@@ -5338,13 +5325,13 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
}
}
- bool TreeToLLVM::EmitBuiltinConstantP(gimple stmt, Value *&Result) {
- Result = Constant::getNullValue(
- ConvertType(gimple_call_return_type(stmt)));
+ bool TreeToLLVM::EmitBuiltinConstantP(gimple stmt, Value * &Result) {
+ Result =
+ Constant::getNullValue(ConvertType(gimple_call_return_type(stmt)));
return true;
}
- bool TreeToLLVM::EmitBuiltinExtendPointer(gimple stmt, Value *&Result) {
+ bool TreeToLLVM::EmitBuiltinExtendPointer(gimple stmt, Value * &Result) {
tree arg0 = gimple_call_arg(stmt, 0);
Value *Amt = EmitMemory(arg0);
bool AmtIsSigned = !TYPE_UNSIGNED(TREE_TYPE(arg0));
@@ -5359,7 +5346,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
/// size checking builtin calls (e.g. __builtin___memcpy_chk into the
/// plain non-checking calls. If the size of the argument is either -1 (unknown)
/// or large enough to ensure no overflow (> len), then it's safe to do so.
- static bool OptimizeIntoPlainBuiltIn(gimple stmt, Value *Len, Value *Size) {
+ static bool OptimizeIntoPlainBuiltIn(gimple stmt, Value * Len,
+ Value * Size) {
if (BitCastInst *SizeBC = dyn_cast<BitCastInst>(Size))
Size = SizeBC->getOperand(0);
ConstantInt *SizeCI = dyn_cast<ConstantInt>(Size);
@@ -5384,7 +5372,7 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
/// EmitBuiltinMemCopy - Emit an llvm.memcpy or llvm.memmove intrinsic,
/// depending on the value of isMemMove.
- bool TreeToLLVM::EmitBuiltinMemCopy(gimple stmt, Value *&Result,
+ bool TreeToLLVM::EmitBuiltinMemCopy(gimple stmt, Value * &Result,
bool isMemMove, bool SizeCheck) {
if (SizeCheck) {
if (!validate_gimple_arglist(stmt, POINTER_TYPE, POINTER_TYPE,
@@ -5411,13 +5399,13 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
return false;
}
- Result = isMemMove ?
- EmitMemMove(DstV, SrcV, Len, std::min(SrcAlign, DstAlign)) :
- EmitMemCpy(DstV, SrcV, Len, std::min(SrcAlign, DstAlign));
+ Result =
+ isMemMove ? EmitMemMove(DstV, SrcV, Len, std::min(SrcAlign, DstAlign))
+ : EmitMemCpy(DstV, SrcV, Len, std::min(SrcAlign, DstAlign));
return true;
}
- bool TreeToLLVM::EmitBuiltinMemSet(gimple stmt, Value *&Result,
+ bool TreeToLLVM::EmitBuiltinMemSet(gimple stmt, Value * &Result,
bool SizeCheck) {
if (SizeCheck) {
if (!validate_gimple_arglist(stmt, POINTER_TYPE, INTEGER_TYPE,
@@ -5445,7 +5433,7 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
return true;
}
- bool TreeToLLVM::EmitBuiltinBZero(gimple stmt, Value *&/*Result*/) {
+ bool TreeToLLVM::EmitBuiltinBZero(gimple stmt, Value * &/*Result*/) {
if (!validate_gimple_arglist(stmt, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE))
return false;
@@ -5511,21 +5499,21 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
Ptr = Builder.CreateBitCast(Ptr, Type::getInt8PtrTy(Context));
- Builder.CreateCall4(Intrinsic::getDeclaration(TheModule,
- Intrinsic::prefetch),
- Ptr, ReadWrite, Locality, Data);
+ Builder.CreateCall4(
+ Intrinsic::getDeclaration(TheModule, Intrinsic::prefetch), Ptr,
+ ReadWrite, Locality, Data);
return true;
}
/// EmitBuiltinReturnAddr - Emit an llvm.returnaddress or llvm.frameaddress
/// instruction, depending on whether isFrame is true or not.
- bool TreeToLLVM::EmitBuiltinReturnAddr(gimple stmt, Value *&Result,
+ bool TreeToLLVM::EmitBuiltinReturnAddr(gimple stmt, Value * &Result,
bool isFrame) {
if (!validate_gimple_arglist(stmt, INTEGER_TYPE, VOID_TYPE))
return false;
- ConstantInt *Level = dyn_cast<ConstantInt>(
- EmitMemory(gimple_call_arg(stmt, 0)));
+ ConstantInt *Level =
+ dyn_cast<ConstantInt>(EmitMemory(gimple_call_arg(stmt, 0)));
if (!Level) {
if (isFrame)
error("invalid argument to %<__builtin_frame_address%>");
@@ -5534,16 +5522,17 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
return false;
}
- Intrinsic::ID IID = !isFrame ? Intrinsic::returnaddress :
- Intrinsic::frameaddress;
- Result = Builder.CreateCall(Intrinsic::getDeclaration(TheModule, IID),
- Level);
+ Intrinsic::ID IID =
+ !isFrame ? Intrinsic::returnaddress : Intrinsic::frameaddress;
+ Result =
+ Builder.CreateCall(Intrinsic::getDeclaration(TheModule, IID), Level);
Result = Builder.CreateBitCast(
- Result, ConvertType(gimple_call_return_type(stmt)));
+ Result, ConvertType(gimple_call_return_type(stmt)));
return true;
}
- bool TreeToLLVM::EmitBuiltinExtractReturnAddr(gimple stmt, Value *&Result) {
+ bool TreeToLLVM::EmitBuiltinExtractReturnAddr(gimple stmt,
+ Value * &Result) {
Value *Ptr = EmitMemory(gimple_call_arg(stmt, 0));
// FIXME: Actually we should do something like this:
@@ -5558,7 +5547,7 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
return true;
}
- bool TreeToLLVM::EmitBuiltinFrobReturnAddr(gimple stmt, Value *&Result) {
+ bool TreeToLLVM::EmitBuiltinFrobReturnAddr(gimple stmt, Value * &Result) {
Value *Ptr = EmitMemory(gimple_call_arg(stmt, 0));
// FIXME: Actually we should do something like this:
@@ -5572,12 +5561,12 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
return true;
}
- bool TreeToLLVM::EmitBuiltinStackSave(gimple stmt, Value *&Result) {
+ bool TreeToLLVM::EmitBuiltinStackSave(gimple stmt, Value * &Result) {
if (!validate_gimple_arglist(stmt, VOID_TYPE))
return false;
Result = Builder.CreateCall(
- Intrinsic::getDeclaration(TheModule, Intrinsic::stacksave));
+ Intrinsic::getDeclaration(TheModule, Intrinsic::stacksave));
return true;
}
@@ -5600,7 +5589,7 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
return true;
}
- bool TreeToLLVM::EmitBuiltinEHFilter(gimple stmt, Value *&Result) {
+ bool TreeToLLVM::EmitBuiltinEHFilter(gimple stmt, Value * &Result) {
// Lookup the local that holds the selector value for this region.
unsigned RegionNo = tree_low_cst(gimple_call_arg(stmt, 0), 0);
AllocaInst *Filter = getExceptionFilter(RegionNo);
@@ -5613,7 +5602,7 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
return true;
}
- bool TreeToLLVM::EmitBuiltinEHPointer(gimple stmt, Value *&Result) {
+ bool TreeToLLVM::EmitBuiltinEHPointer(gimple stmt, Value * &Result) {
// Lookup the local that holds the exception pointer for this region.
unsigned RegionNo = tree_low_cst(gimple_call_arg(stmt, 0), 0);
AllocaInst *ExcPtr = getExceptionPtr(RegionNo);
@@ -5651,21 +5640,21 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
#define HARD_REGNO_CALL_PART_CLOBBERED(REGNO, MODE) 0
#endif
- bool TreeToLLVM::EmitBuiltinDwarfCFA(gimple stmt, Value *&Result) {
+ bool TreeToLLVM::EmitBuiltinDwarfCFA(gimple stmt, Value * &Result) {
if (!validate_gimple_arglist(stmt, VOID_TYPE))
return false;
int cfa_offset = ARG_POINTER_CFA_OFFSET(exp);
// FIXME: is i32 always enough here?
- Result = Builder.CreateCall(Intrinsic::getDeclaration(
- TheModule, Intrinsic::eh_dwarf_cfa),
- Builder.getInt32(cfa_offset));
+ Result = Builder.CreateCall(
+ Intrinsic::getDeclaration(TheModule, Intrinsic::eh_dwarf_cfa),
+ Builder.getInt32(cfa_offset));
return true;
}
- bool TreeToLLVM::EmitBuiltinDwarfSPColumn(gimple stmt, Value *&Result) {
+ bool TreeToLLVM::EmitBuiltinDwarfSPColumn(gimple stmt, Value * &Result) {
if (!validate_gimple_arglist(stmt, VOID_TYPE))
return false;
@@ -5676,7 +5665,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
return true;
}
- bool TreeToLLVM::EmitBuiltinEHReturnDataRegno(gimple stmt, Value *&Result) {
+ bool TreeToLLVM::EmitBuiltinEHReturnDataRegno(gimple stmt,
+ Value * &Result) {
#ifdef EH_RETURN_DATA_REGNO
if (!validate_gimple_arglist(stmt, INTEGER_TYPE, VOID_TYPE))
return false;
@@ -5696,14 +5686,14 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
iwhich = DWARF_FRAME_REGNUM(iwhich);
- Result = ConstantInt::get(ConvertType(gimple_call_return_type(stmt)),
- iwhich);
+ Result =
+ ConstantInt::get(ConvertType(gimple_call_return_type(stmt)), iwhich);
#endif
return true;
}
- bool TreeToLLVM::EmitBuiltinEHReturn(gimple stmt, Value *&/*Result*/) {
+ bool TreeToLLVM::EmitBuiltinEHReturn(gimple stmt, Value * &/*Result*/) {
if (!validate_gimple_arglist(stmt, INTEGER_TYPE, POINTER_TYPE, VOID_TYPE))
return false;
@@ -5711,8 +5701,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
Value *Offset = EmitMemory(gimple_call_arg(stmt, 0));
Value *Handler = EmitMemory(gimple_call_arg(stmt, 1));
- Intrinsic::ID IID = IntPtr->isIntegerTy(32) ? Intrinsic::eh_return_i32 :
- Intrinsic::eh_return_i64;
+ Intrinsic::ID IID = IntPtr->isIntegerTy(32) ? Intrinsic::eh_return_i32
+ : Intrinsic::eh_return_i64;
Offset = Builder.CreateIntCast(Offset, IntPtr, /*isSigned*/ true);
Handler = Builder.CreateBitCast(Handler, Type::getInt8PtrTy(Context));
@@ -5726,7 +5716,7 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
}
bool TreeToLLVM::EmitBuiltinInitDwarfRegSizes(gimple stmt,
- Value *&/*Result*/) {
+ Value * &/*Result*/) {
#ifdef DWARF2_UNWIND_INFO
unsigned int i;
bool wrote_return_column = false;
@@ -5764,9 +5754,9 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
Size = Builder.getInt8(size);
Idx = Builder.getInt32(rnum);
- Builder.CreateStore(Size,
- Builder.CreateGEP(Addr, Idx, flag_verbose_asm ?
- "rnum" : ""), false);
+ Builder.CreateStore(
+ Size, Builder.CreateGEP(Addr, Idx,
+ flag_verbose_asm ? "rnum" : ""), false);
}
}
@@ -5781,8 +5771,9 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
#ifdef DWARF_ALT_FRAME_RETURN_COLUMN
Size = Builder.getInt8(GET_MODE_SIZE(Pmode));
Idx = Builder.getInt32(DWARF_ALT_FRAME_RETURN_COLUMN);
- Builder.CreateStore(Size, Builder.CreateGEP(Addr, Idx, flag_verbose_asm ?
- "acol" : ""), false);
+ Builder.CreateStore(
+ Size, Builder.CreateGEP(Addr, Idx, flag_verbose_asm ? "acol" : ""),
+ false);
#endif
#endif /* DWARF2_UNWIND_INFO */
@@ -5792,12 +5783,12 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
return true;
}
- bool TreeToLLVM::EmitBuiltinUnwindInit(gimple stmt, Value *&/*Result*/) {
+ bool TreeToLLVM::EmitBuiltinUnwindInit(gimple stmt, Value * &/*Result*/) {
if (!validate_gimple_arglist(stmt, VOID_TYPE))
return false;
- Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
- Intrinsic::eh_unwind_init));
+ Builder.CreateCall(
+ Intrinsic::getDeclaration(TheModule, Intrinsic::eh_unwind_init));
return true;
}
@@ -5814,7 +5805,7 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
return true;
}
- bool TreeToLLVM::EmitBuiltinAlloca(gimple stmt, Value *&Result) {
+ bool TreeToLLVM::EmitBuiltinAlloca(gimple stmt, Value * &Result) {
if (!validate_gimple_arglist(stmt, INTEGER_TYPE, VOID_TYPE))
return false;
Value *Amt = EmitMemory(gimple_call_arg(stmt, 0));
@@ -5824,7 +5815,7 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
return true;
}
- bool TreeToLLVM::EmitBuiltinAllocaWithAlign(gimple stmt, Value *&Result) {
+ bool TreeToLLVM::EmitBuiltinAllocaWithAlign(gimple stmt, Value * &Result) {
if (!validate_gimple_arglist(stmt, INTEGER_TYPE, INTEGER_TYPE, VOID_TYPE))
return false;
Value *Amt = EmitMemory(gimple_call_arg(stmt, 0));
@@ -5836,29 +5827,29 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
}
#if (GCC_MINOR > 6)
- bool TreeToLLVM::EmitBuiltinAssumeAligned(gimple stmt, Value *&Result) {
+ bool TreeToLLVM::EmitBuiltinAssumeAligned(gimple stmt, Value * &Result) {
if (!validate_gimple_arglist(stmt, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE))
return false;
// Return the pointer argument. TODO: Pass the alignment information on to
// the optimizers.
Value *Ptr = EmitRegister(gimple_call_arg(stmt, 0));
// Bitcast it to the return type.
- Ptr = TriviallyTypeConvert(Ptr,
- getRegType(gimple_call_return_type(stmt)));
+ Ptr =
+ TriviallyTypeConvert(Ptr, getRegType(gimple_call_return_type(stmt)));
Result = Reg2Mem(Ptr, gimple_call_return_type(stmt), Builder);
return true;
}
#endif
- bool TreeToLLVM::EmitBuiltinExpect(gimple stmt, Value *&Result) {
+ bool TreeToLLVM::EmitBuiltinExpect(gimple stmt, Value * &Result) {
tree type = gimple_call_return_type(stmt);
if (gimple_call_num_args(stmt) < 2) {
Result = Constant::getNullValue(ConvertType(type));
return true;
}
Type *ArgTy = getRegType(type);
- Value *ExpectIntr = Intrinsic::getDeclaration(TheModule,
- Intrinsic::expect, ArgTy);
+ Value *ExpectIntr =
+ Intrinsic::getDeclaration(TheModule, Intrinsic::expect, ArgTy);
Value *ArgValue = EmitRegister(gimple_call_arg(stmt, 0));
Value *ExpectedValue = EmitRegister(gimple_call_arg(stmt, 1));
Result = Builder.CreateCall2(ExpectIntr, ArgValue, ExpectedValue);
@@ -5879,8 +5870,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
return true;
}
- Constant *va_start = Intrinsic::getDeclaration(TheModule,
- Intrinsic::vastart);
+ Constant *va_start =
+ Intrinsic::getDeclaration(TheModule, Intrinsic::vastart);
Value *ArgVal = EmitMemory(gimple_call_arg(stmt, 0));
ArgVal = Builder.CreateBitCast(ArgVal, Type::getInt8PtrTy(Context));
Builder.CreateCall(va_start, ArgVal);
@@ -5926,12 +5917,12 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
return true;
}
- bool TreeToLLVM::EmitBuiltinAdjustTrampoline(gimple stmt, Value *&Result) {
+ bool TreeToLLVM::EmitBuiltinAdjustTrampoline(gimple stmt, Value * &Result) {
if (!validate_gimple_arglist(stmt, POINTER_TYPE, VOID_TYPE))
return false;
- Function *Intr = Intrinsic::getDeclaration(TheModule,
- Intrinsic::adjust_trampoline);
+ Function *Intr =
+ Intrinsic::getDeclaration(TheModule, Intrinsic::adjust_trampoline);
Value *Arg = Builder.CreateBitCast(EmitRegister(gimple_call_arg(stmt, 0)),
Builder.getInt8PtrTy());
Result = Builder.CreateCall(Intr, Arg);
@@ -5939,7 +5930,7 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
}
bool TreeToLLVM::EmitBuiltinInitTrampoline(gimple stmt,
- Value *&/*Result*/) {
+ Value * &/*Result*/) {
if (!validate_gimple_arglist(stmt, POINTER_TYPE, POINTER_TYPE,
POINTER_TYPE, VOID_TYPE))
return false;
@@ -5953,8 +5944,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
Builder.CreateBitCast(Func, VPTy),
Builder.CreateBitCast(Chain, VPTy) };
- Function *Intr = Intrinsic::getDeclaration(TheModule,
- Intrinsic::init_trampoline);
+ Function *Intr =
+ Intrinsic::getDeclaration(TheModule, Intrinsic::init_trampoline);
Builder.CreateCall(Intr, Ops);
return true;
}
@@ -5963,7 +5954,7 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
// ... Complex Math Expressions ...
//===----------------------------------------------------------------------===//
- Value *TreeToLLVM::CreateComplex(Value *Real, Value *Imag) {
+ Value *TreeToLLVM::CreateComplex(Value * Real, Value * Imag) {
assert(Real->getType() == Imag->getType() && "Component type mismatch!");
Type *EltTy = Real->getType();
Value *Result = UndefValue::get(StructType::get(EltTy, EltTy, NULL));
@@ -5972,7 +5963,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
return Result;
}
- void TreeToLLVM::SplitComplex(Value *Complex, Value *&Real, Value *&Imag) {
+ void TreeToLLVM::SplitComplex(Value * Complex, Value * &Real,
+ Value * &Imag) {
Real = Builder.CreateExtractValue(Complex, 0);
Imag = Builder.CreateExtractValue(Complex, 1);
}
@@ -5981,14 +5973,14 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
// ... L-Value Expressions ...
//===----------------------------------------------------------------------===//
- Value *TreeToLLVM::EmitFieldAnnotation(Value *FieldPtr, tree FieldDecl) {
- tree AnnotateAttr = lookup_attribute("annotate",
- DECL_ATTRIBUTES(FieldDecl));
+ Value *TreeToLLVM::EmitFieldAnnotation(Value * FieldPtr, tree FieldDecl) {
+ tree AnnotateAttr =
+ lookup_attribute("annotate", DECL_ATTRIBUTES(FieldDecl));
Type *SBP = Type::getInt8PtrTy(Context);
- Function *An = Intrinsic::getDeclaration(TheModule,
- Intrinsic::ptr_annotation, SBP);
+ Function *An =
+ Intrinsic::getDeclaration(TheModule, Intrinsic::ptr_annotation, SBP);
// Get file and line number. FIXME: Should this be for the decl or the
// use. Is there a location info for the use?
@@ -6021,8 +6013,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
// the GEP that is critical to distinguish between an annotate
// attribute on a whole struct from one on the first element of the
// struct.
- BitCastInst *CastFieldPtr = new BitCastInst(FieldPtr, SBP,
- FieldPtr->getName());
+ BitCastInst *CastFieldPtr =
+ new BitCastInst(FieldPtr, SBP, FieldPtr->getName());
Builder.Insert(CastFieldPtr);
Value *Ops[4] = { CastFieldPtr, Builder.CreateBitCast(strGV, SBP),
@@ -6061,8 +6053,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
tree LowerBound = array_ref_low_bound(exp);
if (!integer_zerop(LowerBound))
IndexVal = Builder.CreateSub(
- IndexVal, EmitRegisterWithCast(LowerBound, IndexType),
- "", hasNUW(TREE_TYPE(Index)), hasNSW(TREE_TYPE(Index)));
+ IndexVal, EmitRegisterWithCast(LowerBound, IndexType), "",
+ hasNUW(TREE_TYPE(Index)), hasNSW(TREE_TYPE(Index)));
LValue ArrayAddrLV = EmitLV(Array);
assert(!ArrayAddrLV.isBitfield() && "Arrays cannot be bitfields!");
@@ -6080,14 +6072,14 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
Type *EltTy = ConvertType(ElementType);
ArrayAddr = Builder.CreateBitCast(ArrayAddr, EltTy->getPointerTo());
StringRef GEPName = flag_verbose_asm ? "ar" : "";
- Value *Ptr = POINTER_TYPE_OVERFLOW_UNDEFINED ?
- Builder.CreateInBoundsGEP(ArrayAddr, IndexVal, GEPName) :
- Builder.CreateGEP(ArrayAddr, IndexVal, GEPName);
- unsigned Alignment = MinAlign(ArrayAlign,
- DL.getABITypeAlignment(EltTy));
- return LValue(Builder.CreateBitCast(
- Ptr, PointerType::getUnqual(ConvertType(TREE_TYPE(exp)))),
- Alignment);
+ Value *Ptr = POINTER_TYPE_OVERFLOW_UNDEFINED
+ ? Builder.CreateInBoundsGEP(ArrayAddr, IndexVal, GEPName)
+ : Builder.CreateGEP(ArrayAddr, IndexVal, GEPName);
+ unsigned Alignment =
+ MinAlign(ArrayAlign, DL.getABITypeAlignment(EltTy));
+ return LValue(
+ Builder.CreateBitCast(Ptr, PointerType::getUnqual(ConvertType(
+ TREE_TYPE(exp)))), Alignment);
}
// Otherwise, just do raw, low-level pointer arithmetic. FIXME: this could be
@@ -6095,12 +6087,12 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
// float foo(int w, float A[][w], int g) { return A[g][0]; }
if (isa<VOID_TYPE>(TREE_TYPE(ArrayTreeType))) {
- ArrayAddr = Builder.CreateBitCast(ArrayAddr,
- Type::getInt8PtrTy(Context));
+ ArrayAddr =
+ Builder.CreateBitCast(ArrayAddr, Type::getInt8PtrTy(Context));
StringRef GEPName = flag_verbose_asm ? "va" : "";
- ArrayAddr = POINTER_TYPE_OVERFLOW_UNDEFINED ?
- Builder.CreateInBoundsGEP(ArrayAddr, IndexVal, GEPName) :
- Builder.CreateGEP(ArrayAddr, IndexVal, GEPName);
+ ArrayAddr = POINTER_TYPE_OVERFLOW_UNDEFINED
+ ? Builder.CreateInBoundsGEP(ArrayAddr, IndexVal, GEPName)
+ : Builder.CreateGEP(ArrayAddr, IndexVal, GEPName);
return LValue(ArrayAddr, 1);
}
@@ -6111,8 +6103,7 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
// ScaleFactor is the size of the element type in units divided by (exactly)
// TYPE_ALIGN_UNIT(ElementType).
Value *ScaleFactor = Builder.CreateIntCast(
- EmitRegister(TREE_OPERAND(exp, 3)), IntPtrTy,
- /*isSigned*/ false);
+ EmitRegister(TREE_OPERAND(exp, 3)), IntPtrTy, /*isSigned*/ false);
assert(isPowerOf2_32(TYPE_ALIGN(ElementType)) &&
"Alignment not a power of two!");
assert(TYPE_ALIGN(ElementType) >= 8 &&
@@ -6124,11 +6115,12 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
IndexVal = Builder.CreateMul(IndexVal, ScaleFactor);
unsigned Alignment = MinAlign(ArrayAlign, TYPE_ALIGN(ElementType) / 8);
StringRef GEPName = flag_verbose_asm ? "ra" : "";
- Value *Ptr = POINTER_TYPE_OVERFLOW_UNDEFINED ?
- Builder.CreateInBoundsGEP(ArrayAddr, IndexVal, GEPName) :
- Builder.CreateGEP(ArrayAddr, IndexVal, GEPName);
- return LValue(Builder.CreateBitCast(
- Ptr, PointerType::getUnqual(ConvertType(TREE_TYPE(exp)))), Alignment);
+ Value *Ptr = POINTER_TYPE_OVERFLOW_UNDEFINED
+ ? Builder.CreateInBoundsGEP(ArrayAddr, IndexVal, GEPName)
+ : Builder.CreateGEP(ArrayAddr, IndexVal, GEPName);
+ return LValue(
+ Builder.CreateBitCast(Ptr, PointerType::getUnqual(ConvertType(
+ TREE_TYPE(exp)))), Alignment);
}
LValue TreeToLLVM::EmitLV_BIT_FIELD_REF(tree exp) {
@@ -6184,14 +6176,14 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
assert((!StructAddrLV.isBitfield() || StructAddrLV.BitStart == 0) &&
"structs cannot be bitfields!");
- StructAddrLV.Ptr = Builder.CreateBitCast(StructAddrLV.Ptr,
- StructTy->getPointerTo());
+ StructAddrLV.Ptr =
+ Builder.CreateBitCast(StructAddrLV.Ptr, StructTy->getPointerTo());
Type *FieldTy = ConvertType(TREE_TYPE(FieldDecl));
// BitStart - This is the actual offset of the field from the start of the
// struct, in bits. For bitfields this may be on a non-byte boundary.
- uint64_t FieldBitOffset = getInt64(DECL_FIELD_BIT_OFFSET(FieldDecl),
- true);
+ uint64_t FieldBitOffset =
+ getInt64(DECL_FIELD_BIT_OFFSET(FieldDecl), true);
unsigned BitStart;
Value *FieldPtr;
@@ -6215,7 +6207,7 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
unsigned factor = DECL_OFFSET_ALIGN(FieldDecl) / 8;
if (factor != 1)
Offset = Builder.CreateMul(
- Offset, ConstantInt::get(Offset->getType(), factor));
+ Offset, ConstantInt::get(Offset->getType(), factor));
} else {
assert(DECL_FIELD_OFFSET(FieldDecl) && "Field offset not available!");
Offset = EmitRegister(DECL_FIELD_OFFSET(FieldDecl));
@@ -6223,7 +6215,7 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
unsigned factor = BITS_PER_UNIT / 8;
if (factor != 1)
Offset = Builder.CreateMul(
- Offset, ConstantInt::get(Offset->getType(), factor));
+ Offset, ConstantInt::get(Offset->getType(), factor));
}
// Here BitStart gives the offset of the field in bits from Offset.
@@ -6232,8 +6224,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
// Incorporate as much of it as possible into the pointer computation.
unsigned ByteOffset = BitStart / 8;
if (ByteOffset > 0) {
- Offset = Builder.CreateAdd(Offset, ConstantInt::get(Offset->getType(),
- ByteOffset));
+ Offset = Builder.CreateAdd(
+ Offset, ConstantInt::get(Offset->getType(), ByteOffset));
BitStart -= ByteOffset * 8;
}
@@ -6297,12 +6289,12 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
LValue TreeToLLVM::EmitLV_INDIRECT_REF(tree exp) {
// The lvalue is just the address.
- LValue LV = LValue(EmitRegister(TREE_OPERAND(exp, 0)),
- expr_align(exp) / 8);
+ LValue LV =
+ LValue(EmitRegister(TREE_OPERAND(exp, 0)), expr_align(exp) / 8);
// May need to change pointer type, for example when INDIRECT_REF is applied
// to a void*, resulting in a non-void type.
- LV.Ptr = Builder.CreateBitCast(LV.Ptr, ConvertType(TREE_TYPE(exp))
- ->getPointerTo());
+ LV.Ptr = Builder
+ .CreateBitCast(LV.Ptr, ConvertType(TREE_TYPE(exp))->getPointerTo());
return LV;
}
@@ -6315,9 +6307,9 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
Addr = Builder.CreateBitCast(Addr, GetUnitPointerType(Context));
APInt Offset = getAPIntValue(TREE_OPERAND(exp, 1));
// The address is always inside the referenced object, so "inbounds".
- Addr = Builder.CreateInBoundsGEP(Addr,
- ConstantInt::get(Context, Offset),
- flag_verbose_asm ? "mrf" : "");
+ Addr =
+ Builder.CreateInBoundsGEP(Addr, ConstantInt::get(Context, Offset),
+ flag_verbose_asm ? "mrf" : "");
}
// Ensure the pointer has the right type.
@@ -6352,8 +6344,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
LValue LV = LValue(EmitRegister(TREE_OPERAND(exp, 0)), Alignment / 8);
// May need to change pointer type, for example when MISALIGNED_INDIRECT_REF
// is applied to a void*, resulting in a non-void type.
- LV.Ptr = Builder.CreateBitCast(LV.Ptr, ConvertType(TREE_TYPE(exp))
- ->getPointerTo());
+ LV.Ptr = Builder
+ .CreateBitCast(LV.Ptr, ConvertType(TREE_TYPE(exp))->getPointerTo());
return LV;
}
#endif
@@ -6362,8 +6354,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
// The address is the address of the operand.
LValue LV = EmitLV(TREE_OPERAND(exp, 0));
// The type is the type of the expression.
- LV.Ptr = Builder.CreateBitCast(LV.Ptr, ConvertType(TREE_TYPE(exp))
- ->getPointerTo());
+ LV.Ptr = Builder
+ .CreateBitCast(LV.Ptr, ConvertType(TREE_TYPE(exp))->getPointerTo());
return LV;
}
@@ -6384,8 +6376,9 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
// IMAGPART alignment = MinAlign(Ptr.Alignment, sizeof field);
Alignment = MinAlign(Ptr.getAlignment(),
DL.getTypeAllocSize(Ptr.Ptr->getType()));
- return LValue(Builder.CreateStructGEP(Ptr.Ptr, Idx, flag_verbose_asm ?
- "prtxpr" : ""), Alignment);
+ return LValue(Builder.CreateStructGEP(Ptr.Ptr, Idx,
+ flag_verbose_asm ? "prtxpr" : ""),
+ Alignment);
}
LValue TreeToLLVM::EmitLV_SSA_NAME(tree exp) {
@@ -6428,8 +6421,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
}
if (TMR_OFFSET(exp) && !integer_zerop(TMR_OFFSET(exp))) {
- Constant *Off = ConstantInt::get(Context,
- getAPIntValue(TMR_OFFSET(exp)));
+ Constant *Off =
+ ConstantInt::get(Context, getAPIntValue(TMR_OFFSET(exp)));
Delta = Delta ? Builder.CreateAdd(Delta, Off) : Off;
}
@@ -6437,9 +6430,9 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
// Advance the base pointer by the given number of units.
Addr = Builder.CreateBitCast(Addr, GetUnitPointerType(Context));
StringRef GEPName = flag_verbose_asm ? "" : "tmrf";
- Addr = POINTER_TYPE_OVERFLOW_UNDEFINED ?
- Builder.CreateInBoundsGEP(Addr, Delta, GEPName) :
- Builder.CreateGEP(Addr, Delta, GEPName);
+ Addr = POINTER_TYPE_OVERFLOW_UNDEFINED
+ ? Builder.CreateInBoundsGEP(Addr, Delta, GEPName)
+ : Builder.CreateGEP(Addr, Delta, GEPName);
}
// The result can be of a different pointer type even if we didn't advance it.
@@ -6470,8 +6463,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
/// EmitMinInvariant - The given value is constant in this function. Return the
/// corresponding LLVM value. Only creates code in the entry block.
Value *TreeToLLVM::EmitMinInvariant(tree reg) {
- Value *V = isa<ADDR_EXPR>(reg) ? EmitInvariantAddress(reg) :
- EmitRegisterConstant(reg);
+ Value *V = isa<ADDR_EXPR>(reg) ? EmitInvariantAddress(reg)
+ : EmitRegisterConstant(reg);
assert(V->getType() == getRegType(TREE_TYPE(reg)) &&
"Gimple min invariant has wrong type!");
return V;
@@ -6572,7 +6565,7 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
/// The following kinds of expressions are currently supported: INTEGER_CST,
/// REAL_CST, COMPLEX_CST, VECTOR_CST, STRING_CST.
static unsigned EncodeExpr(tree exp,
- SmallVectorImpl<unsigned char> &Buffer) {
+ SmallVectorImpl<unsigned char> & Buffer) {
const tree type = TREE_TYPE(exp);
unsigned SizeInBytes = (TREE_INT_CST_LOW(TYPE_SIZE(type)) + 7) / 8;
Buffer.resize(SizeInBytes);
@@ -6601,8 +6594,7 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
// a generalized cast here
Type *Ty = getRegType(TREE_TYPE(reg));
Instruction::CastOps opcode = CastInst::getCastOpcode(
- CI, false, Ty,
- !TYPE_UNSIGNED(TREE_TYPE(reg)));
+ CI, false, Ty, !TYPE_UNSIGNED(TREE_TYPE(reg)));
return TheFolder->CreateCast(opcode, CI, Ty);
}
@@ -6691,7 +6683,7 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
/// VectorHighElements - Return a vector of half the length, consisting of the
/// elements of the given vector with indices in the top half.
- Value *TreeToLLVM::VectorHighElements(Value *Vec) {
+ Value *TreeToLLVM::VectorHighElements(Value * Vec) {
VectorType *Ty = cast<VectorType>(Vec->getType());
assert(!(Ty->getNumElements() & 1) &&
"Vector has odd number of elements!");
@@ -6706,7 +6698,7 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
/// VectorLowElements - Return a vector of half the length, consisting of the
/// elements of the given vector with indices in the bottom half.
- Value *TreeToLLVM::VectorLowElements(Value *Vec) {
+ Value *TreeToLLVM::VectorLowElements(Value * Vec) {
VectorType *Ty = cast<VectorType>(Vec->getType());
assert(!(Ty->getNumElements() & 1) &&
"Vector has odd number of elements!");
@@ -6809,8 +6801,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
// Use it to load the parameter value.
MemRef ParamLoc(DECL_LOCAL_IF_SET(var), Alignment, false);
- Value *Def = LoadRegisterFromMemory(ParamLoc, TREE_TYPE(reg), 0,
- SSABuilder);
+ Value *Def =
+ LoadRegisterFromMemory(ParamLoc, TREE_TYPE(reg), 0, SSABuilder);
if (flag_verbose_asm)
NameValue(Def, reg);
@@ -6822,11 +6814,11 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
if (!isa<FLOAT_TYPE>(TREE_TYPE(op))) {
Value *Op = EmitRegister(op);
Value *OpN = Builder.CreateNeg(Op, Op->getName() + "neg");
- ICmpInst::Predicate pred = TYPE_UNSIGNED(TREE_TYPE(op)) ?
- ICmpInst::ICMP_UGE : ICmpInst::ICMP_SGE;
- Value *Cmp = Builder.CreateICmp(pred, Op,
- Constant::getNullValue(Op->getType()),
- "abscond");
+ ICmpInst::Predicate pred =
+ TYPE_UNSIGNED(TREE_TYPE(op)) ? ICmpInst::ICMP_UGE
+ : ICmpInst::ICMP_SGE;
+ Value *Cmp = Builder.CreateICmp(
+ pred, Op, Constant::getNullValue(Op->getType()), "abscond");
return Builder.CreateSelect(Cmp, Op, OpN, Op->getName() + "abs");
}
@@ -7064,10 +7056,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
Value *Val = EmitRegister(op);
Type *Ty = Val->getType();
- CmpInst::Predicate Pred = CmpInst::Predicate(
- isa<FLOAT_TYPE>(TREE_TYPE(op)) ? FPPred :
- TYPE_UNSIGNED(TREE_TYPE(op)) ? UIPred :
- SIPred);
+ CmpInst::Predicate Pred = CmpInst::Predicate(isa<FLOAT_TYPE>(TREE_TYPE(
+ op)) ? FPPred : TYPE_UNSIGNED(TREE_TYPE(op)) ? UIPred : SIPred);
unsigned Length = (unsigned) TYPE_VECTOR_SUBPARTS(TREE_TYPE(op));
assert(Length > 1 && !(Length & (Length - 1)) &&
@@ -7090,9 +7080,9 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
ConstantVector::get(Mask));
// Replace Val with the max/min of the extracted elements.
- Value *Compare = isa<FLOAT_TYPE>(TREE_TYPE(op)) ?
- Builder.CreateFCmp(Pred, LHS, RHS) :
- Builder.CreateICmp(Pred, LHS, RHS);
+ Value *Compare =
+ isa<FLOAT_TYPE>(TREE_TYPE(op)) ? Builder.CreateFCmp(Pred, LHS, RHS)
+ : Builder.CreateICmp(Pred, LHS, RHS);
Val = Builder.CreateSelect(Compare, LHS, RHS);
// Repeat, using half as many elements.
@@ -7153,14 +7143,13 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
Amt->getName() + ".cast");
Value *TypeSize = ConstantInt::get(
- In->getType(),
- In->getType()->getPrimitiveSizeInBits());
+ In->getType(), In->getType()->getPrimitiveSizeInBits());
// Do the two shifts.
Value *V1 = Builder.CreateBinOp((Instruction::BinaryOps) Opc1, In, Amt);
Value *OtherShift = Builder.CreateSub(TypeSize, Amt);
- Value *V2 = Builder.CreateBinOp((Instruction::BinaryOps) Opc2, In,
- OtherShift);
+ Value *V2 =
+ Builder.CreateBinOp((Instruction::BinaryOps) Opc2, In, OtherShift);
// Or the two together to return them.
Value *Merge = Builder.CreateOr(V1, V2);
@@ -7221,8 +7210,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
unsigned EltBits = VecTy->getElementType()->getPrimitiveSizeInBits();
if (!(ShiftAmt % EltBits)) {
// A shift by an integral number of elements.
- unsigned EltOffset = ShiftAmt /
- EltBits; // Shift by this many elements.
+ unsigned EltOffset =
+ ShiftAmt / EltBits; // Shift by this many elements.
// Shuffle the elements sideways by the appropriate number of elements.
unsigned Length = VecTy->getNumElements();
SmallVector<Constant *, 8> Mask;
@@ -7255,8 +7244,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
Amt->getName() + ".cast");
// Perform the shift.
- LHS = Builder.CreateBinOp(isLeftShift ? Instruction::Shl :
- Instruction::LShr, LHS, Amt);
+ LHS = Builder.CreateBinOp(isLeftShift ? Instruction::Shl
+ : Instruction::LShr, LHS, Amt);
// Turn the result back into a vector.
return Builder.CreateBitCast(LHS, VecTy);
@@ -7308,8 +7297,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
// Determine the signs of LHS and RHS, and whether they have the same sign.
Value *LHSIsPositive = Builder.CreateICmpSGE(LHS, Zero);
Value *RHSIsPositive = Builder.CreateICmpSGE(RHS, Zero);
- Value *HaveSameSign = Builder.CreateICmpEQ(LHSIsPositive,
- RHSIsPositive);
+ Value *HaveSameSign =
+ Builder.CreateICmpEQ(LHSIsPositive, RHSIsPositive);
// Offset equals 1 if LHS and RHS have the same sign and LHS is not zero.
Value *LHSNotZero = Builder.CreateICmpNE(LHS, Zero);
@@ -7372,15 +7361,15 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
// reason for accessing tree operands directly rather than taking advantage
// of COND_EXPR_COND and friends is that the latter fail for VEC_COND_EXPR,
// which is also handled here.
- Value *CondVal = COMPARISON_CLASS_P(op0) ?
- EmitCompare(TREE_OPERAND(op0, 0), TREE_OPERAND(op0, 1),
- TREE_CODE(op0)) :
- EmitRegister(op0);
+ Value *CondVal = COMPARISON_CLASS_P(op0)
+ ? EmitCompare(TREE_OPERAND(op0, 0), TREE_OPERAND(op0, 1),
+ TREE_CODE(op0))
+ : EmitRegister(op0);
// Ensure the condition has i1 type.
if (!CondVal->getType()->getScalarType()->isIntegerTy(1))
CondVal = Builder.CreateICmpNE(
- CondVal, Constant::getNullValue(CondVal->getType()));
+ CondVal, Constant::getNullValue(CondVal->getType()));
// Emit the true and false values.
Value *TrueVal = EmitRegister(op1);
@@ -7570,9 +7559,9 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
// Convert the pointer into an i8* and add the offset to it.
Ptr = Builder.CreateBitCast(Ptr, GetUnitPointerType(Context));
StringRef GEPName = flag_verbose_asm ? "pp" : "";
- return POINTER_TYPE_OVERFLOW_UNDEFINED ?
- Builder.CreateInBoundsGEP(Ptr, Idx, GEPName) :
- Builder.CreateGEP(Ptr, Idx, GEPName);
+ return POINTER_TYPE_OVERFLOW_UNDEFINED
+ ? Builder.CreateInBoundsGEP(Ptr, Idx, GEPName)
+ : Builder.CreateGEP(Ptr, Idx, GEPName);
}
Value *TreeToLLVM::EmitReg_RDIV_EXPR(tree op0, tree op1) {
@@ -7645,8 +7634,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
// Determine the signs of LHS and RHS, and whether they have the same sign.
Value *LHSIsPositive = Builder.CreateICmpSGE(LHS, Zero);
Value *RHSIsPositive = Builder.CreateICmpSGE(RHS, Zero);
- Value *HaveSameSign = Builder.CreateICmpEQ(LHSIsPositive,
- RHSIsPositive);
+ Value *HaveSameSign =
+ Builder.CreateICmpEQ(LHSIsPositive, RHSIsPositive);
// Calculate |LHS| ...
Value *MinusLHS = Builder.CreateNeg(LHS);
@@ -7725,14 +7714,14 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
Value *Tmp4 = Builder.CreateMul(RHSr, RHSr); // c*c
Value *Tmp5 = Builder.CreateMul(RHSi, RHSi); // d*d
Value *Tmp6 = Builder.CreateAdd(Tmp4, Tmp5); // cc+dd
- DSTr = TYPE_UNSIGNED(elt_type) ? Builder.CreateUDiv(Tmp3, Tmp6) :
- Builder.CreateSDiv(Tmp3, Tmp6);
+ DSTr = TYPE_UNSIGNED(elt_type) ? Builder.CreateUDiv(Tmp3, Tmp6)
+ : Builder.CreateSDiv(Tmp3, Tmp6);
Value *Tmp7 = Builder.CreateMul(LHSi, RHSr); // b*c
Value *Tmp8 = Builder.CreateMul(LHSr, RHSi); // a*d
Value *Tmp9 = Builder.CreateSub(Tmp7, Tmp8); // bc-ad
- DSTi = TYPE_UNSIGNED(elt_type) ? Builder.CreateUDiv(Tmp9, Tmp6) :
- Builder.CreateSDiv(Tmp9, Tmp6);
+ DSTi = TYPE_UNSIGNED(elt_type) ? Builder.CreateUDiv(Tmp9, Tmp6)
+ : Builder.CreateSDiv(Tmp9, Tmp6);
return CreateComplex(DSTr, DSTi);
}
@@ -7748,8 +7737,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
Value *TreeToLLVM::EmitReg_TRUNC_MOD_EXPR(tree op0, tree op1) {
Value *LHS = EmitRegister(op0);
Value *RHS = EmitRegister(op1);
- return TYPE_UNSIGNED(TREE_TYPE(op0)) ? Builder.CreateURem(LHS, RHS) :
- Builder.CreateSRem(LHS, RHS);
+ return TYPE_UNSIGNED(TREE_TYPE(op0)) ? Builder.CreateURem(LHS, RHS)
+ : Builder.CreateSRem(LHS, RHS);
}
#if (GCC_MINOR < 7)
@@ -7849,14 +7838,14 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
// The GCC semantics are that mask indices off the end are wrapped back into
// range, so reduce the mask modulo 2*Length.
assert(!(Length & (Length - 1)) && "Vector length not a power of two!");
- Mask = Builder.CreateAnd(Mask, ConstantInt::get(Mask->getType(),
- 2 * Length - 1));
+ Mask = Builder.CreateAnd(
+ Mask, ConstantInt::get(Mask->getType(), 2 * Length - 1));
// Convert to a vector of i32, as required by the shufflevector instruction.
Type *MaskTy = VectorType::get(Builder.getInt32Ty(), Length);
tree mask_elt_type = TREE_TYPE(TREE_TYPE(op2));
- Value *Mask32 = Builder.CreateIntCast(Mask, MaskTy,
- !TYPE_UNSIGNED(mask_elt_type));
+ Value *Mask32 =
+ Builder.CreateIntCast(Mask, MaskTy, !TYPE_UNSIGNED(mask_elt_type));
// Use a shufflevector instruction if this directly corresponds to one, i.e.
// if the mask is a vector of constant integers or undef.
@@ -7872,13 +7861,13 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
ConvertType(TREE_TYPE(op1)), NULL);
AllocaInst *Tmp = CreateTemporary(TmpTy, Align);
// Store the first vector to the first element of the pair.
- Value *Tmp0 = Builder.CreateStructGEP(Tmp, 0,
- flag_verbose_asm ? "vp1s" : "");
+ Value *Tmp0 =
+ Builder.CreateStructGEP(Tmp, 0, flag_verbose_asm ? "vp1s" : "");
StoreRegisterToMemory(V0, MemRef(Tmp0, Align, /*Volatile*/ false),
TREE_TYPE(op0), 0, Builder);
// Store the second vector to the second element of the pair.
- Value *Tmp1 = Builder.CreateStructGEP(Tmp, 1,
- flag_verbose_asm ? "vp2s" : "");
+ Value *Tmp1 =
+ Builder.CreateStructGEP(Tmp, 1, flag_verbose_asm ? "vp2s" : "");
StoreRegisterToMemory(V1, MemRef(Tmp1, Align, /*Volatile*/ false),
TREE_TYPE(op1), 0, Builder);
@@ -8017,9 +8006,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
/// form instead of list form. This way of doing things is forced on us by
/// GCC routines like parse_output_constraint which rummage around inside the
/// array.
- const char **Constraints = (const char * *)
- alloca((NumOutputs + NumInputs) *
- sizeof(const char *));
+ const char **Constraints = (const char **)alloca(
+ (NumOutputs + NumInputs) * sizeof(const char *));
// Initialize the Constraints array.
for (unsigned i = 0; i != NumOutputs; ++i) {
@@ -8028,8 +8016,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
if (TREE_TYPE(TREE_VALUE(Output)) == error_mark_node)
return;
// Record the output constraint.
- const char *Constraint = TREE_STRING_POINTER(
- TREE_VALUE(TREE_PURPOSE(Output)));
+ const char *Constraint =
+ TREE_STRING_POINTER(TREE_VALUE(TREE_PURPOSE(Output)));
Constraints[i] = Constraint;
}
for (unsigned i = 0; i != NumInputs; ++i) {
@@ -8038,8 +8026,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
if (TREE_TYPE(TREE_VALUE(Input)) == error_mark_node)
return;
// Record the input constraint.
- const char *Constraint = TREE_STRING_POINTER(
- TREE_VALUE(TREE_PURPOSE(Input)));
+ const char *Constraint =
+ TREE_STRING_POINTER(TREE_VALUE(TREE_PURPOSE(Input)));
Constraints[NumOutputs + i] = Constraint;
}
@@ -8186,8 +8174,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
bool IsSigned = !TYPE_UNSIGNED(TREE_TYPE(Operand));
CallResultDests.push_back(std::make_pair(Dest.Ptr, IsSigned));
CallResultTypes.push_back(std::make_pair(DestValTy, IsSigned));
- OutputLocations.push_back(std::make_pair(true,
- CallResultTypes.size() - 1));
+ OutputLocations.push_back(
+ std::make_pair(true, CallResultTypes.size() - 1));
} else {
ConstraintStr += ",=*";
ConstraintStr += SimplifiedConstraint;
@@ -8243,7 +8231,7 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
TySize == 64 || (TySize == 128 && !AllowsMem)) {
LLVMTy = IntegerType::get(Context, (unsigned) TySize);
Op = Builder.CreateLoad(
- Builder.CreateBitCast(LV.Ptr, LLVMTy->getPointerTo()));
+ Builder.CreateBitCast(LV.Ptr, LLVMTy->getPointerTo()));
} else {
// Codegen only supports indirect operands with mem constraints.
if (!AllowsMem)
@@ -8264,8 +8252,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
// type, then cast it to the larger type and shift the value if the target
// is big endian.
if (ISDIGIT(Constraint[0])) {
- unsigned Match = (unsigned)
- atoi(Constraint); // Unsigned - no minus sign
+ unsigned Match =
+ (unsigned) atoi(Constraint); // Unsigned - no minus sign
// This output might have gotten put in either CallResult or CallArg
// depending whether it's a register or not. Find its type.
Type *OTy = 0;
@@ -8452,8 +8440,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
CallArgTypes[i] = CallOps[i]->getType();
// Get the type of the called asm "function".
- FunctionType *FTy = FunctionType::get(CallResultType, CallArgTypes,
- false);
+ FunctionType *FTy =
+ FunctionType::get(CallResultType, CallArgTypes, false);
// Remove the leading comma if we have operands.
if (!ConstraintStr.empty())
@@ -8466,10 +8454,10 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
}
std::string NewAsmStr = ConvertInlineAsmStr(stmt, NumOutputs + NumInputs);
- Value *Asm = InlineAsm::get(FTy, NewAsmStr, ConstraintStr,
- HasSideEffects);
- CallInst *CV = Builder.CreateCall(Asm, CallOps, CallResultTypes.empty() ?
- "" : "asmtmp");
+ Value *Asm =
+ InlineAsm::get(FTy, NewAsmStr, ConstraintStr, HasSideEffects);
+ CallInst *CV = Builder.CreateCall(
+ Asm, CallOps, CallResultTypes.empty() ? "" : "asmtmp");
CV->setDoesNotThrow();
if (gimple_has_location(stmt)) {
// Pass the location of the asm using a !srcloc metadata.
@@ -8480,8 +8468,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
// If the call produces a value, store it into the destination.
for (unsigned i = 0, NumResults = (unsigned) CallResultTypes.size();
i != NumResults; ++i) {
- Value *Val = NumResults == 1 ? CV :
- Builder.CreateExtractValue(CV, i, "asmresult");
+ Value *Val = NumResults ==
+ 1 ? CV : Builder.CreateExtractValue(CV, i, "asmresult");
bool ValIsSigned = CallResultTypes[i].second;
Value *Dest = CallResultDests[i].first;
@@ -8520,12 +8508,13 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
// saying that a RESULT_DECL is dead means we are dead - which is why we
// don't even consider it.
if (isa<PARM_DECL>(lhs) || isa<VAR_DECL>(lhs)) {
- Value *LHSAddr = Builder.CreateBitCast(DECL_LOCAL(lhs),
- Builder.getInt8PtrTy());
- uint64_t LHSSize = isInt64(DECL_SIZE(lhs), true) ?
- getInt64(DECL_SIZE(lhs), true) / 8 : ~0UL;
- Function *EndIntr = Intrinsic::getDeclaration(
- TheModule, Intrinsic::lifetime_end);
+ Value *LHSAddr =
+ Builder.CreateBitCast(DECL_LOCAL(lhs), Builder.getInt8PtrTy());
+ uint64_t LHSSize =
+ isInt64(DECL_SIZE(lhs), true) ? getInt64(DECL_SIZE(lhs), true) / 8
+ : ~0UL;
+ Function *EndIntr =
+ Intrinsic::getDeclaration(TheModule, Intrinsic::lifetime_end);
Builder.CreateCall2(EndIntr, Builder.getInt64(LHSSize), LHSAddr);
}
return;
@@ -8624,8 +8613,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
// Catches.
Value *Filter = NULL;
SmallSet<Value *, 8> AlreadyCaught; // Typeinfos known caught.
- Function *TypeIDIntr = Intrinsic::getDeclaration(
- TheModule, Intrinsic::eh_typeid_for);
+ Function *TypeIDIntr =
+ Intrinsic::getDeclaration(TheModule, Intrinsic::eh_typeid_for);
for (eh_catch c = region->u.eh_try.first_catch; c; c = c->next_catch) {
BasicBlock *Dest = getLabelDeclBlock(c->label);
if (!c->type_list) {
@@ -8691,8 +8680,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
// region then the reraised exception may be caught by the current function,
// in which case it can be simplified into a branch.
int DstLPadNo = lookup_stmt_eh_lp(stmt);
- eh_region dst_rgn = DstLPadNo ? get_eh_region_from_lp_number(DstLPadNo) :
- NULL;
+ eh_region dst_rgn =
+ DstLPadNo ? get_eh_region_from_lp_number(DstLPadNo) : NULL;
eh_region src_rgn = get_eh_region_from_number(gimple_resx_region(stmt));
if (!src_rgn) {
@@ -8730,8 +8719,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
// Unwind the exception out of the function using a resume instruction.
Value *ExcPtr = Builder.CreateLoad(getExceptionPtr(src_rgn->index));
Value *Filter = Builder.CreateLoad(getExceptionFilter(src_rgn->index));
- Type *UnwindDataTy = StructType::get(Builder.getInt8PtrTy(),
- Builder.getInt32Ty(), NULL);
+ Type *UnwindDataTy =
+ StructType::get(Builder.getInt8PtrTy(), Builder.getInt32Ty(), NULL);
Value *UnwindData = UndefValue::get(UnwindDataTy);
UnwindData = Builder.CreateInsertValue(UnwindData, ExcPtr, 0, "exc_ptr");
UnwindData = Builder.CreateInsertValue(UnwindData, Filter, 1, "filter");
@@ -8769,9 +8758,9 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
// Create the switch instruction.
tree default_label = CASE_LABEL(gimple_switch_label(stmt, 0));
- SwitchInst *SI = Builder.CreateSwitch(Index,
- getLabelDeclBlock(default_label),
- gimple_switch_num_labels(stmt));
+ SwitchInst *SI =
+ Builder.CreateSwitch(Index, getLabelDeclBlock(default_label),
+ gimple_switch_num_labels(stmt));
// Add the switch cases.
BasicBlock *IfBlock = 0; // Set if a range was output as an "if".
@@ -8811,8 +8800,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
BeginBlock(IfBlock);
}
Value *Diff = Builder.CreateSub(Index, LowC);
- Value *Cond = Builder.CreateICmpULE(Diff,
- ConstantInt::get(Context, Range));
+ Value *Cond =
+ Builder.CreateICmpULE(Diff, ConstantInt::get(Context, Range));
BasicBlock *False_Block = BasicBlock::Create(Context);
Builder.CreateCondBr(Cond, Dest, False_Block);
BeginBlock(False_Block);
@@ -8898,8 +8887,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
case UNLT_EXPR:
case UNORDERED_EXPR:
// The GCC result may be of any integer type.
- RHS = Builder.CreateZExt(EmitCompare(rhs1, rhs2, code),
- getRegType(type));
+ RHS =
+ Builder.CreateZExt(EmitCompare(rhs1, rhs2, code), getRegType(type));
break;
// Binary expressions.
@@ -8976,8 +8965,9 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
Instruction::Shl);
break;
case RSHIFT_EXPR:
- RHS = EmitReg_ShiftOp(rhs1, rhs2, TYPE_UNSIGNED(type) ?
- Instruction::LShr : Instruction::AShr);
+ RHS = EmitReg_ShiftOp(rhs1, rhs2,
+ TYPE_UNSIGNED(type) ? Instruction::LShr
+ : Instruction::AShr);
break;
case TRUNC_DIV_EXPR:
RHS = EmitReg_TRUNC_DIV_EXPR(rhs1, rhs2, /*isExact*/ false);
@@ -9075,8 +9065,8 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
// Exceptional (tcc_exceptional).
case CONSTRUCTOR:
// Vector constant constructors are gimple invariant.
- return is_gimple_constant(rhs) ? EmitRegisterConstant(rhs) :
- EmitCONSTRUCTOR(rhs, 0);
+ return is_gimple_constant(rhs) ? EmitRegisterConstant(rhs)
+ : EmitCONSTRUCTOR(rhs, 0);
// References (tcc_reference).
case ARRAY_REF:
@@ -9109,7 +9099,7 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
}
/// OutputCallRHS - Convert the RHS of a GIMPLE_CALL.
- Value *TreeToLLVM::OutputCallRHS(gimple stmt, const MemRef *DestLoc) {
+ Value *TreeToLLVM::OutputCallRHS(gimple stmt, const MemRef * DestLoc) {
// Check for a built-in function call. If we can lower it directly, do so
// now.
tree fndecl = gimple_call_fndecl(stmt);
@@ -9136,13 +9126,13 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
// with arguments none the less, then calculate the LLVM type from the list
// of arguments.
if (flag_functions_from_args) {
- tree *FirstArgAddr = gimple_call_num_args(stmt) > 0 ?
- gimple_call_arg_ptr(stmt, 0) : NULL;
+ tree *FirstArgAddr = gimple_call_num_args(stmt) >
+ 0 ? gimple_call_arg_ptr(stmt, 0) : NULL;
Ty = ConvertArgListToFnType(
- function_type,
- ArrayRef<tree>(FirstArgAddr, gimple_call_num_args(stmt)),
- gimple_call_chain(stmt), !flag_functions_from_args,
- CallingConv, PAL);
+ function_type,
+ ArrayRef<tree>(FirstArgAddr, gimple_call_num_args(stmt)),
+ gimple_call_chain(stmt), !flag_functions_from_args, CallingConv,
+ PAL);
} else {
Ty = ConvertFunctionType(function_type, fndecl, gimple_call_chain(stmt),
CallingConv, PAL);
@@ -9163,12 +9153,12 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
BeginBlock(BasicBlock::Create(Context));
}
- return Result ? Mem2Reg(Result, gimple_call_return_type(stmt), Builder) :
- 0;
+ return Result ? Mem2Reg(Result, gimple_call_return_type(stmt), Builder)
+ : 0;
}
/// WriteScalarToLHS - Store RHS, a non-aggregate value, into the given LHS.
- void TreeToLLVM::WriteScalarToLHS(tree lhs, Value *RHS) {
+ void TreeToLLVM::WriteScalarToLHS(tree lhs, Value * RHS) {
// May need a useless type conversion (useless_type_conversion_p).
RHS = TriviallyTypeConvert(RHS, getRegType(TREE_TYPE(lhs)));
@@ -9205,14 +9195,14 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
// Load and store the minimum number of bytes that covers the field.
unsigned LoadSizeInBits = LV.BitStart + LV.BitSize;
- LoadSizeInBits = (unsigned)
- RoundUpToAlignment(LoadSizeInBits, BITS_PER_UNIT);
+ LoadSizeInBits =
+ (unsigned) RoundUpToAlignment(LoadSizeInBits, BITS_PER_UNIT);
Type *LoadType = IntegerType::get(Context, LoadSizeInBits);
// Load the bits.
Value *Ptr = Builder.CreateBitCast(LV.Ptr, LoadType->getPointerTo());
- Value *Val = Builder.CreateAlignedLoad(Ptr, LV.getAlignment(),
- LV.Volatile);
+ Value *Val =
+ Builder.CreateAlignedLoad(Ptr, LV.getAlignment(), LV.Volatile);
// Get the right-hand side as a value of the same type.
// FIXME: This assumes the right-hand side is an integer.
@@ -9221,7 +9211,7 @@ bool TreeToLLVM::EmitBuiltinCall(gimple
// Shift the right-hand side so that its bits are in the right position.
unsigned FirstBitInVal = BYTES_BIG_ENDIAN ? LoadSizeInBits - LV.BitStart -
- LV.BitSize : LV.BitStart;
+ LV.BitSize : LV.BitStart;
if (FirstBitInVal) {
Value *ShAmt = ConstantInt::get(LoadType, FirstBitInVal);
RHS = Builder.CreateShl(RHS, ShAmt);
Modified: dragonegg/trunk/src/Debug.cpp
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/src/Debug.cpp?rev=176016&r1=176015&r2=176016&view=diff
==============================================================================
--- dragonegg/trunk/src/Debug.cpp (original)
+++ dragonegg/trunk/src/Debug.cpp Mon Feb 25 04:54:25 2013
@@ -122,8 +122,8 @@ static uint64_t NodeAlignInBits(tree Nod
static tree FieldType(tree Field) {
if (isa<ERROR_MARK>(Field))
return integer_type_node;
- return DECL_BIT_FIELD_TYPE(Field) ? DECL_BIT_FIELD_TYPE(Field) :
- TREE_TYPE(Field);
+ return DECL_BIT_FIELD_TYPE(Field) ? DECL_BIT_FIELD_TYPE(Field)
+ : TREE_TYPE(Field);
}
/// GetNodeName - Returns the name stored in a node regardless of whether the
@@ -238,7 +238,8 @@ void DebugInfo::EmitFunctionStart(tree F
std::map<tree_node *, WeakVH>::iterator I = SPCache.find(FnDecl);
if (I != SPCache.end()) {
DISubprogram SPDecl(cast<MDNode>(I->second));
- DISubprogram SP = DebugFactory.CreateSubprogramDefinition(SPDecl, lineno, Fn);
+ DISubprogram SP =
+ DebugFactory.CreateSubprogramDefinition(SPDecl, lineno, Fn);
SPDecl->replaceAllUsesWith(SP);
// Push function on region stack.
@@ -254,16 +255,17 @@ void DebugInfo::EmitFunctionStart(tree F
DECL_ABSTRACT_ORIGIN(FnDecl) != FnDecl)
ArtificialFnWithAbstractOrigin = true;
- DIDescriptor SPContext = ArtificialFnWithAbstractOrigin ?
- getOrCreateFile(main_input_filename) :
- findRegion(DECL_CONTEXT(FnDecl));
+ DIDescriptor SPContext =
+ ArtificialFnWithAbstractOrigin ? getOrCreateFile(main_input_filename)
+ : findRegion(DECL_CONTEXT(FnDecl));
// Creating context may have triggered creation of this SP descriptor. So
// check the cache again.
I = SPCache.find(FnDecl);
if (I != SPCache.end()) {
DISubprogram SPDecl(cast<MDNode>(I->second));
- DISubprogram SP = DebugFactory.CreateSubprogramDefinition(SPDecl, lineno, Fn);
+ DISubprogram SP =
+ DebugFactory.CreateSubprogramDefinition(SPDecl, lineno, Fn);
SPDecl->replaceAllUsesWith(SP);
// Push function on region stack.
@@ -290,11 +292,9 @@ void DebugInfo::EmitFunctionStart(tree F
StringRef FnName = getFunctionName(FnDecl);
DISubprogram SP = DebugFactory.CreateSubprogram(
- SPContext, FnName, FnName, LinkageName,
- getOrCreateFile(Loc.file), lineno, FNType,
- Fn->hasInternalLinkage(), true /*definition*/,
- Virtuality, VIndex, ContainingType,
- DECL_ARTIFICIAL(FnDecl), optimize, Fn);
+ SPContext, FnName, FnName, LinkageName, getOrCreateFile(Loc.file), lineno,
+ FNType, Fn->hasInternalLinkage(), true /*definition*/, Virtuality, VIndex,
+ ContainingType, DECL_ARTIFICIAL(FnDecl), optimize, Fn);
SPCache[FnDecl] = WeakVH(SP);
@@ -310,9 +310,8 @@ DINameSpace DebugInfo::getOrCreateNameSp
return DINameSpace(cast<MDNode>(I->second));
expanded_location Loc = GetNodeLocation(Node, false);
- DINameSpace DNS = DebugFactory.CreateNameSpace(Context, GetNodeName(Node),
- getOrCreateFile(Loc.file),
- Loc.line);
+ DINameSpace DNS = DebugFactory.CreateNameSpace(
+ Context, GetNodeName(Node), getOrCreateFile(Loc.file), Loc.line);
NameSpaceCache[Node] = WeakVH(DNS);
return DNS;
@@ -379,12 +378,11 @@ void DebugInfo::EmitDeclare(tree decl, u
// If type info is not available then do not emit debug info for this var.
if (!Ty)
return;
- llvm::DIVariable D = DebugFactory.CreateVariable(Tag, VarScope, Name,
- getOrCreateFile(Loc.file),
- Loc.line, Ty, optimize);
+ llvm::DIVariable D = DebugFactory.CreateVariable(
+ Tag, VarScope, Name, getOrCreateFile(Loc.file), Loc.line, Ty, optimize);
- Instruction *Call = DebugFactory.InsertDeclare(AI, D,
- Builder.GetInsertBlock());
+ Instruction *Call =
+ DebugFactory.InsertDeclare(AI, D, Builder.GetInsertBlock());
Call->setDebugLoc(DebugLoc::get(Loc.line, 0, VarScope));
}
@@ -458,8 +456,8 @@ DIType DebugInfo::createBasicType(tree t
Encoding = DW_ATE_float;
break;
case COMPLEX_TYPE:
- Encoding = isa<REAL_TYPE>(TREE_TYPE(type)) ? DW_ATE_complex_float :
- DW_ATE_lo_user;
+ Encoding =
+ isa<REAL_TYPE>(TREE_TYPE(type)) ? DW_ATE_complex_float : DW_ATE_lo_user;
break;
case BOOLEAN_TYPE:
Encoding = DW_ATE_boolean;
@@ -522,8 +520,8 @@ DIType DebugInfo::createMethodType(tree
ProcessedFirstArg = true;
}
- llvm::DIArray EltTypeArray = DebugFactory.GetOrCreateArray(EltTys.data(),
- EltTys.size());
+ llvm::DIArray EltTypeArray =
+ DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
RegionStack.pop_back();
std::map<tree_node *, WeakVH>::iterator RI = RegionMap.find(type);
@@ -531,10 +529,9 @@ DIType DebugInfo::createMethodType(tree
RegionMap.erase(RI);
llvm::DIType RealType = DebugFactory.CreateCompositeType(
- llvm::dwarf::DW_TAG_subroutine_type,
- findRegion(TYPE_CONTEXT(type)), StringRef(),
- getOrCreateFile(main_input_filename), 0, 0, 0, 0,
- 0, llvm::DIType(), EltTypeArray);
+ llvm::dwarf::DW_TAG_subroutine_type, findRegion(TYPE_CONTEXT(type)),
+ StringRef(), getOrCreateFile(main_input_filename), 0, 0, 0, 0, 0,
+ llvm::DIType(), EltTypeArray);
// Now that we have a real decl for the struct, replace anything using the
// old decl with the new one. This will recursively update the debug info.
@@ -549,8 +546,8 @@ DIType DebugInfo::createPointerType(tree
DIType FromTy = getOrCreateType(TREE_TYPE(type));
// type* and type&
// FIXME: Should BLOCK_POINTER_TYP have its own DW_TAG?
- unsigned Tag = isa<REFERENCE_TYPE>(type) ? DW_TAG_reference_type :
- DW_TAG_pointer_type;
+ unsigned Tag =
+ isa<REFERENCE_TYPE>(type) ? DW_TAG_reference_type : DW_TAG_pointer_type;
unsigned Flags = 0;
// Check if this pointer type has a name.
@@ -558,21 +555,19 @@ DIType DebugInfo::createPointerType(tree
if (isa<TYPE_DECL>(TyName) && !DECL_ORIGINAL_TYPE(TyName)) {
expanded_location TypeNameLoc = GetNodeLocation(TyName);
DIType Ty = DebugFactory.CreateDerivedType(
- Tag, findRegion(DECL_CONTEXT(TyName)),
- GetNodeName(TyName), getOrCreateFile(TypeNameLoc.file),
- TypeNameLoc.line, 0 /*size*/, 0 /*align*/, 0 /*offset */,
- 0 /*flags*/, FromTy);
+ Tag, findRegion(DECL_CONTEXT(TyName)), GetNodeName(TyName),
+ getOrCreateFile(TypeNameLoc.file), TypeNameLoc.line, 0 /*size*/,
+ 0 /*align*/, 0 /*offset */, 0 /*flags*/, FromTy);
TypeCache[TyName] = WeakVH(Ty);
return Ty;
}
StringRef PName = FromTy.getName();
DIType PTy = DebugFactory.CreateDerivedType(
- Tag, findRegion(TYPE_CONTEXT(type)),
- Tag == DW_TAG_pointer_type ? StringRef() : PName,
- getOrCreateFile(main_input_filename), 0 /*line no*/,
- NodeSizeInBits(type), NodeAlignInBits(type), 0 /*offset */,
- Flags, FromTy);
+ Tag, findRegion(TYPE_CONTEXT(type)),
+ Tag == DW_TAG_pointer_type ? StringRef() : PName,
+ getOrCreateFile(main_input_filename), 0 /*line no*/, NodeSizeInBits(type),
+ NodeAlignInBits(type), 0 /*offset */, Flags, FromTy);
return PTy;
}
@@ -610,8 +605,8 @@ DIType DebugInfo::createArrayType(tree t
Subscripts.push_back(DebugFactory.GetOrCreateSubrange(0, Length));
}
- llvm::DIArray SubscriptArray = DebugFactory.GetOrCreateArray(
- Subscripts.data(), Subscripts.size());
+ llvm::DIArray SubscriptArray =
+ DebugFactory.GetOrCreateArray(Subscripts.data(), Subscripts.size());
expanded_location Loc = GetNodeLocation(type);
return DebugFactory.CreateCompositeType(
llvm::dwarf::DW_TAG_array_type, findRegion(TYPE_CONTEXT(type)),
@@ -635,8 +630,8 @@ DIType DebugInfo::createEnumType(tree ty
}
}
- llvm::DIArray EltArray = DebugFactory.GetOrCreateArray(Elements.data(),
- Elements.size());
+ llvm::DIArray EltArray =
+ DebugFactory.GetOrCreateArray(Elements.data(), Elements.size());
expanded_location Loc = { NULL, 0, 0, false };
if (TYPE_SIZE(type))
@@ -654,8 +649,8 @@ DIType DebugInfo::createEnumType(tree ty
DIType DebugInfo::createStructType(tree type) {
// struct { a; b; ... z; }; | union { a; b; ... z; };
- unsigned Tag = isa<RECORD_TYPE>(type) ? DW_TAG_structure_type :
- DW_TAG_union_type;
+ unsigned Tag =
+ isa<RECORD_TYPE>(type) ? DW_TAG_structure_type : DW_TAG_union_type;
unsigned RunTimeLang = 0;
//TODO if (TYPE_LANG_SPECIFIC (type)
@@ -699,11 +694,10 @@ DIType DebugInfo::createStructType(tree
// forward declaration,
if (TYPE_SIZE(type) == 0) {
- llvm::DICompositeType FwdDecl =
- DebugFactory.CreateCompositeType(
- Tag, TyContext, GetNodeName(type), getOrCreateFile(Loc.file),
- Loc.line, 0, 0, 0, SFlags | llvm::DIType::FlagFwdDecl,
- llvm::DIType(), llvm::DIArray(), RunTimeLang);
+ llvm::DICompositeType FwdDecl = DebugFactory.CreateCompositeType(
+ Tag, TyContext, GetNodeName(type), getOrCreateFile(Loc.file), Loc.line,
+ 0, 0, 0, SFlags | llvm::DIType::FlagFwdDecl, llvm::DIType(),
+ llvm::DIArray(), RunTimeLang);
return FwdDecl;
}
@@ -722,7 +716,7 @@ DIType DebugInfo::createStructType(tree
llvm::SmallVector<llvm::DIDescriptor, 16> EltTys;
if (tree binfo = TYPE_BINFO(type)) {
- VEC(tree, gc) * accesses = BINFO_BASE_ACCESSES(binfo);
+ VEC(tree, gc) *accesses = BINFO_BASE_ACCESSES(binfo);
for (unsigned i = 0, e = BINFO_N_BASE_BINFOS(binfo); i != e; ++i) {
tree BInfo = BINFO_BASE_BINFO(binfo, i);
@@ -741,16 +735,15 @@ DIType DebugInfo::createStructType(tree
// Check for zero BINFO_OFFSET.
// FIXME : Is this correct ?
- unsigned Offset = BINFO_OFFSET(BInfo) ?
- getInt64(BINFO_OFFSET(BInfo), true) * 8 : 0;
+ unsigned Offset =
+ BINFO_OFFSET(BInfo) ? getInt64(BINFO_OFFSET(BInfo), true) * 8 : 0;
if (BINFO_VIRTUAL_P(BInfo))
Offset = 0 - getInt64(BINFO_VPTR_FIELD(BInfo), false);
// FIXME : name, size, align etc...
DIType DTy = DebugFactory.CreateDerivedType(
- DW_TAG_inheritance, findRegion(type),
- StringRef(), llvm::DIFile(), 0, 0, 0, Offset, BFlags,
- BaseClass);
+ DW_TAG_inheritance, findRegion(type), StringRef(), llvm::DIFile(), 0,
+ 0, 0, Offset, BFlags, BaseClass);
EltTys.push_back(DTy);
}
}
@@ -790,10 +783,10 @@ DIType DebugInfo::createStructType(tree
MFlags = llvm::DIType::FlagPrivate;
DIType DTy = DebugFactory.CreateDerivedType(
- DW_TAG_member, findRegion(DECL_CONTEXT(Member)),
- MemberName, getOrCreateFile(MemLoc.file), MemLoc.line,
- NodeSizeInBits(Member), NodeAlignInBits(FieldNodeType),
- int_bit_position(Member), MFlags, MemberType);
+ DW_TAG_member, findRegion(DECL_CONTEXT(Member)), MemberName,
+ getOrCreateFile(MemLoc.file), MemLoc.line, NodeSizeInBits(Member),
+ NodeAlignInBits(FieldNodeType), int_bit_position(Member), MFlags,
+ MemberType);
EltTys.push_back(DTy);
}
@@ -827,18 +820,17 @@ DIType DebugInfo::createStructType(tree
ContainingType = getOrCreateType(DECL_CONTEXT(Member));
}
DISubprogram SP = DebugFactory.CreateSubprogram(
- findRegion(DECL_CONTEXT(Member)), MemberName,
- MemberName, LinkageName,
- getOrCreateFile(MemLoc.file), MemLoc.line, SPTy,
- false, false, Virtuality, VIndex, ContainingType,
- DECL_ARTIFICIAL(Member), optimize);
+ findRegion(DECL_CONTEXT(Member)), MemberName, MemberName, LinkageName,
+ getOrCreateFile(MemLoc.file), MemLoc.line, SPTy, false, false,
+ Virtuality, VIndex, ContainingType, DECL_ARTIFICIAL(Member),
+ optimize);
EltTys.push_back(SP);
SPCache[Member] = WeakVH(SP);
}
}
- llvm::DIArray Elements = DebugFactory.GetOrCreateArray(EltTys.data(),
- EltTys.size());
+ llvm::DIArray Elements =
+ DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
RegionStack.pop_back();
std::map<tree_node *, WeakVH>::iterator RI = RegionMap.find(type);
@@ -850,12 +842,11 @@ DIType DebugInfo::createStructType(tree
tree vtype = DECL_FCONTEXT(TYPE_VFIELD(type));
ContainingType = getOrCreateType(vtype);
}
- llvm::DICompositeType RealDecl =
- DebugFactory.CreateCompositeType(
- Tag, findRegion(TYPE_CONTEXT(type)), GetNodeName(type),
- getOrCreateFile(Loc.file), Loc.line, NodeSizeInBits(type),
- NodeAlignInBits(type), 0, SFlags, llvm::DIType(), Elements,
- RunTimeLang, ContainingType);
+ llvm::DICompositeType RealDecl = DebugFactory.CreateCompositeType(
+ Tag, findRegion(TYPE_CONTEXT(type)), GetNodeName(type),
+ getOrCreateFile(Loc.file), Loc.line, NodeSizeInBits(type),
+ NodeAlignInBits(type), 0, SFlags, llvm::DIType(), Elements, RunTimeLang,
+ ContainingType);
RegionMap[type] = WeakVH(RealDecl);
// Now that we have a real decl for the struct, replace anything using the
@@ -877,10 +868,9 @@ DIType DebugInfo::createVariantType(tree
if (isa<TYPE_DECL>(TyDef) && DECL_ORIGINAL_TYPE(TyDef)) {
expanded_location TypeDefLoc = GetNodeLocation(TyDef);
Ty = DebugFactory.CreateDerivedType(
- DW_TAG_typedef, findRegion(DECL_CONTEXT(TyDef)),
- GetNodeName(TyDef), getOrCreateFile(TypeDefLoc.file),
- TypeDefLoc.line, 0 /*size*/, 0 /*align*/, 0 /*offset */,
- 0 /*flags*/, MainTy);
+ DW_TAG_typedef, findRegion(DECL_CONTEXT(TyDef)), GetNodeName(TyDef),
+ getOrCreateFile(TypeDefLoc.file), TypeDefLoc.line, 0 /*size*/,
+ 0 /*align*/, 0 /*offset */, 0 /*flags*/, MainTy);
TypeCache[TyDef] = WeakVH(Ty);
return Ty;
}
@@ -888,19 +878,19 @@ DIType DebugInfo::createVariantType(tree
if (TYPE_VOLATILE(type)) {
Ty = DebugFactory.CreateDerivedType(
- DW_TAG_volatile_type, findRegion(TYPE_CONTEXT(type)), StringRef(),
- getOrCreateFile(main_input_filename), 0 /*line no*/,
- NodeSizeInBits(type), NodeAlignInBits(type), 0 /*offset */,
- 0 /* flags */, MainTy);
+ DW_TAG_volatile_type, findRegion(TYPE_CONTEXT(type)), StringRef(),
+ getOrCreateFile(main_input_filename), 0 /*line no*/,
+ NodeSizeInBits(type), NodeAlignInBits(type), 0 /*offset */,
+ 0 /* flags */, MainTy);
MainTy = Ty;
}
if (TYPE_READONLY(type))
Ty = DebugFactory.CreateDerivedType(
- DW_TAG_const_type, findRegion(TYPE_CONTEXT(type)), StringRef(),
- getOrCreateFile(main_input_filename), 0 /*line no*/,
- NodeSizeInBits(type), NodeAlignInBits(type), 0 /*offset */,
- 0 /* flags */, MainTy);
+ DW_TAG_const_type, findRegion(TYPE_CONTEXT(type)), StringRef(),
+ getOrCreateFile(main_input_filename), 0 /*line no*/,
+ NodeSizeInBits(type), NodeAlignInBits(type), 0 /*offset */,
+ 0 /* flags */, MainTy);
if (TYPE_VOLATILE(type) || TYPE_READONLY(type)) {
TypeCache[type] = WeakVH(Ty);
@@ -1012,8 +1002,7 @@ void DebugInfo::Initialize() {
/// getOrCreateCompileUnit - Get the compile unit from the cache or
/// create a new one if necessary.
-void DebugInfo::getOrCreateCompileUnit(const char *FullPath,
- bool isMain) {
+void DebugInfo::getOrCreateCompileUnit(const char *FullPath, bool isMain) {
if (!FullPath) {
if (!strcmp(main_input_filename, ""))
FullPath = "<stdin>";
@@ -1084,12 +1073,11 @@ DIFactory::DIFactory(Module &m)
}
Constant *DIFactory::GetTagConstant(unsigned TAG) {
- assert((TAG &LLVMDebugVersionMask) == 0 &&
+ assert((TAG & LLVMDebugVersionMask) == 0 &&
"Tag too large for debug encoding!");
// llvm has moved forward. DIFactory does not emit debug info in updated form.
// Use LLVMDebugVersion10 directly here.
- return ConstantInt::get(Type::getInt32Ty(VMContext),
- TAG | LLVMDebugVersion);
+ return ConstantInt::get(Type::getInt32Ty(VMContext), TAG | LLVMDebugVersion);
}
//===----------------------------------------------------------------------===//
@@ -1119,11 +1107,12 @@ DIDescriptor DIFactory::CreateUnspecifie
/// CreateCompileUnit - Create a new descriptor for the specified compile
/// unit. Note that this does not unique compile units within the module.
-void DIFactory::CreateCompileUnit(
- unsigned LangID, StringRef Filename, StringRef Directory,
- StringRef Producer, bool isMain, bool isOptimized, StringRef Flags,
- unsigned RunTimeVer) {
- Builder.createCompileUnit(LangID, Filename, Directory, Producer, isOptimized, Flags, RunTimeVer);
+void DIFactory::CreateCompileUnit(unsigned LangID, StringRef Filename,
+ StringRef Directory, StringRef Producer,
+ bool isMain, bool isOptimized,
+ StringRef Flags, unsigned RunTimeVer) {
+ Builder.createCompileUnit(LangID, Filename, Directory, Producer, isOptimized,
+ Flags, RunTimeVer);
}
/// CreateFile - Create a new descriptor for the specified file.
@@ -1172,21 +1161,22 @@ DIDerivedType DIFactory::CreateDerivedTy
// an invalid typedef that is a typedef of no other type. (hence the null
// value as the last parameter in Elts, below)
if (!DerivedFrom.isValid()) {
- Value* Elts[] = {
- GetTagConstant(dwarf::DW_TAG_typedef),
- Context, MDString::get(VMContext, Name), F,
+ Value *Elts[] = {
+ GetTagConstant(dwarf::DW_TAG_typedef), Context,
+ MDString::get(VMContext, Name), F,
ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber),
- ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Size
- ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Align
- ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Offset
- ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Flags
+ ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Size
+ ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Align
+ ConstantInt::get(Type::getInt64Ty(VMContext), 0), // Offset
+ ConstantInt::get(Type::getInt32Ty(VMContext), 0), // Flags
NULL
};
return DIDerivedType(MDNode::get(VMContext, Elts));
}
return Builder.createTypedef(DerivedFrom, Name, F, LineNumber, Context);
case dwarf::DW_TAG_pointer_type:
- return Builder.createPointerType(DerivedFrom, SizeInBits, AlignInBits, Name);
+ return Builder.createPointerType(DerivedFrom, SizeInBits, AlignInBits,
+ Name);
case dwarf::DW_TAG_reference_type:
case dwarf::DW_TAG_rvalue_reference_type:
return Builder.createReferenceType(Tag, DerivedFrom);
@@ -1199,7 +1189,8 @@ DIDerivedType DIFactory::CreateDerivedTy
AlignInBits, OffsetInBits, Flags,
DerivedFrom);
case dwarf::DW_TAG_inheritance:
- return Builder.createInheritance(DIType(Context), DerivedFrom, OffsetInBits, Flags);
+ return Builder.createInheritance(DIType(Context), DerivedFrom, OffsetInBits,
+ Flags);
case dwarf::DW_TAG_friend:
case dwarf::DW_TAG_ptr_to_member_type:
break;
@@ -1259,8 +1250,8 @@ DISubprogram DIFactory::CreateSubprogram
isLocalToUnit, isDefinition, VK, VIndex, NULL,
Flags, isOptimized, Fn, NULL);
return Builder.createFunction(Context, Name, LinkageName, F, LineNo, Ty,
- isLocalToUnit, isDefinition,
- LineNo, Flags, isOptimized, Fn, NULL, NULL);
+ isLocalToUnit, isDefinition, LineNo, Flags,
+ isOptimized, Fn, NULL, NULL);
}
/// CreateSubprogramDefinition - Create new subprogram descriptor for the
@@ -1270,12 +1261,10 @@ DISubprogram DIFactory::CreateSubprogram
if (SP.isDefinition())
return DISubprogram(SP);
- return Builder.createFunction(SP.getContext(), SP.getName(),
- SP.getLinkageName(), SP.getFile(),
- SP.getLineNumber(), SP.getType(),
- SP.isLocalToUnit(), true, LineNo, SP.getFlags(),
- SP.isOptimized(), Fn, SP.getTemplateParams(),
- SP);
+ return Builder.createFunction(
+ SP.getContext(), SP.getName(), SP.getLinkageName(), SP.getFile(),
+ SP.getLineNumber(), SP.getType(), SP.isLocalToUnit(), true, LineNo,
+ SP.getFlags(), SP.isOptimized(), Fn, SP.getTemplateParams(), SP);
}
/// CreateGlobalVariable - Create a new descriptor for the specified global.
@@ -1283,7 +1272,8 @@ DIGlobalVariable DIFactory::CreateGlobal
DIDescriptor Context, StringRef Name, StringRef DisplayName,
StringRef LinkageName, DIFile F, unsigned LineNo, DIType Ty,
bool isLocalToUnit, bool isDefinition, llvm::GlobalVariable *Val) {
- return Builder.createStaticVariable(Context, Name, LinkageName, F, LineNo, Ty, isLocalToUnit, Val);
+ return Builder.createStaticVariable(Context, Name, LinkageName, F, LineNo, Ty,
+ isLocalToUnit, Val);
}
/// CreateGlobalVariable - Create a new descriptor for the specified constant.
@@ -1298,7 +1288,8 @@ DIGlobalVariable DIFactory::CreateGlobal
DIVariable DIFactory::CreateVariable(
unsigned Tag, DIDescriptor Context, StringRef Name, DIFile F,
unsigned LineNo, DIType Ty, bool AlwaysPreserve, unsigned Flags) {
- return Builder.createLocalVariable(Tag, Context, Name, F, LineNo, Ty, AlwaysPreserve, Flags);
+ return Builder.createLocalVariable(Tag, Context, Name, F, LineNo, Ty,
+ AlwaysPreserve, Flags);
}
/// CreateComplexVariable - Create a new descriptor for the specified variable
@@ -1306,7 +1297,8 @@ DIVariable DIFactory::CreateVariable(
DIVariable DIFactory::CreateComplexVariable(
unsigned Tag, DIDescriptor Context, StringRef Name, DIFile F,
unsigned LineNo, DIType Ty, Value *const *Addr, unsigned NumAddr) {
- return Builder.createComplexVariable(Tag, Context, Name, F, LineNo, Ty, ArrayRef<Value *>(Addr, NumAddr));
+ return Builder.createComplexVariable(Tag, Context, Name, F, LineNo, Ty,
+ ArrayRef<Value *>(Addr, NumAddr));
}
/// CreateBlock - This creates a descriptor for a lexical block with the
@@ -1396,6 +1388,4 @@ Instruction *DIFactory::InsertDbgValueIn
// RecordType - Record DIType in a module such that it is not lost even if
// it is not referenced through debug info anchors.
-void DIFactory::RecordType(DIType T) {
- Builder.retainType(T);
-}
+void DIFactory::RecordType(DIType T) { Builder.retainType(T); }
Modified: dragonegg/trunk/src/DefaultABI.cpp
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/src/DefaultABI.cpp?rev=176016&r1=176015&r2=176016&view=diff
==============================================================================
--- dragonegg/trunk/src/DefaultABI.cpp (original)
+++ dragonegg/trunk/src/DefaultABI.cpp Mon Feb 25 04:54:25 2013
@@ -110,9 +110,9 @@ tree isSingleElementStructOrArray(tree t
return 0; // More than one field.
}
}
- return FoundField ?
- isSingleElementStructOrArray(FoundField, ignoreZeroLength, false) :
- 0;
+ return FoundField
+ ? isSingleElementStructOrArray(FoundField, ignoreZeroLength, false)
+ : 0;
case ARRAY_TYPE:
ArrayType *Ty = dyn_cast<ArrayType>(ConvertType(type));
if (!Ty || Ty->getNumElements() != 1)
@@ -165,11 +165,11 @@ void DefaultABI::HandleReturnType(tree t
} else {
// Otherwise return as an integer value large enough to hold the entire
// aggregate.
- if (Type *AggrTy = LLVM_AGGR_TYPE_FOR_STRUCT_RETURN(type,
- C.getCallingConv()))
+ if (Type *AggrTy =
+ LLVM_AGGR_TYPE_FOR_STRUCT_RETURN(type, C.getCallingConv()))
C.HandleAggregateResultAsAggregate(AggrTy);
- else if (Type *ScalarTy = LLVM_SCALAR_TYPE_FOR_STRUCT_RETURN(type,
- &Offset))
+ else if (Type *ScalarTy =
+ LLVM_SCALAR_TYPE_FOR_STRUCT_RETURN(type, &Offset))
C.HandleAggregateResultAsScalar(ScalarTy, Offset);
else
llvm_unreachable("Unable to determine how to return this aggregate!");
@@ -231,9 +231,9 @@ void DefaultABI::HandleArgument(tree typ
} else if (LLVM_SHOULD_PASS_AGGREGATE_AS_FCA(type, Ty)) {
C.HandleFCAArgument(Ty, type);
} else if (LLVM_SHOULD_PASS_AGGREGATE_IN_MIXED_REGS(
- type, Ty, C.getCallingConv(), Elts)) {
+ type, Ty, C.getCallingConv(), Elts)) {
if (!LLVM_AGGREGATE_PARTIALLY_PASSED_IN_REGS(
- Elts, ScalarElts, C.isShadowReturn(), C.getCallingConv()))
+ Elts, ScalarElts, C.isShadowReturn(), C.getCallingConv()))
PassInMixedRegisters(Ty, Elts, ScalarElts);
else {
C.HandleByValArgument(Ty, type);
@@ -347,8 +347,8 @@ void DefaultABI::PassInIntegerRegisters(
// don't bitcast aggregate value to Int64 if its alignment is different
// from Int64 alignment. ARM backend needs this.
unsigned Align = TYPE_ALIGN(type) / 8;
- unsigned Int64Align = getDataLayout().getABITypeAlignment(
- Type::getInt64Ty(getGlobalContext()));
+ unsigned Int64Align =
+ getDataLayout().getABITypeAlignment(Type::getInt64Ty(getGlobalContext()));
bool UseInt64 = (DontCheckAlignment || Align >= Int64Align);
unsigned ElementSize = UseInt64 ? 8 : 4;
@@ -359,8 +359,8 @@ void DefaultABI::PassInIntegerRegisters(
Type *ArrayElementType = NULL;
if (ArraySize) {
Size = Size % ElementSize;
- ArrayElementType = (UseInt64 ? Type::getInt64Ty(getGlobalContext()) :
- Type::getInt32Ty(getGlobalContext()));
+ ArrayElementType = (UseInt64 ? Type::getInt64Ty(getGlobalContext())
+ : Type::getInt32Ty(getGlobalContext()));
ATy = ArrayType::get(ArrayElementType, ArraySize);
}
@@ -417,9 +417,9 @@ void DefaultABI::PassInMixedRegisters(Ty
// that occupies storage but has no useful information, and is not passed
// anywhere". Happens on x86-64.
std::vector<Type *> Elts(OrigElts);
- Type *wordType = getDataLayout().getPointerSize(0) == 4 ?
- Type::getInt32Ty(getGlobalContext()) :
- Type::getInt64Ty(getGlobalContext());
+ Type *wordType = getDataLayout().getPointerSize(0) == 4
+ ? Type::getInt32Ty(getGlobalContext())
+ : Type::getInt64Ty(getGlobalContext());
for (unsigned i = 0, e = Elts.size(); i != e; ++i)
if (OrigElts[i]->isVoidTy())
Elts[i] = wordType;
@@ -437,8 +437,8 @@ void DefaultABI::PassInMixedRegisters(Ty
unsigned N = STy->getNumElements();
llvm::Type *LastEltTy = STy->getElementType(N - 1);
if (LastEltTy->isIntegerTy())
- LastEltSizeDiff = getDataLayout().getTypeAllocSize(LastEltTy) -
- (Size - InSize);
+ LastEltSizeDiff =
+ getDataLayout().getTypeAllocSize(LastEltTy) - (Size - InSize);
}
}
for (unsigned i = 0, e = Elts.size(); i != e; ++i) {
Modified: dragonegg/trunk/src/Trees.cpp
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/src/Trees.cpp?rev=176016&r1=176015&r2=176016&view=diff
==============================================================================
--- dragonegg/trunk/src/Trees.cpp (original)
+++ dragonegg/trunk/src/Trees.cpp Mon Feb 25 04:54:25 2013
@@ -53,8 +53,8 @@ using namespace llvm;
/// concatIfNotEmpty - Concatenate the given strings if they are both non-empty.
/// Otherwise return the empty string.
-static std::string concatIfNotEmpty(const std::string &Left,
- const std::string &Right) {
+static std::string
+concatIfNotEmpty(const std::string &Left, const std::string &Right) {
if (Left.empty() || Right.empty())
return std::string();
return Left + Right;
@@ -156,9 +156,9 @@ APInt getAPIntValue(const_tree exp, unsi
assert(integerPartWidth == 2 * HOST_BITS_PER_WIDE_INT &&
"Unsupported host integer width!");
unsigned ShiftAmt = HOST_BITS_PER_WIDE_INT;
- integerPart Part = integerPart((unsigned HOST_WIDE_INT) val.low) +
- (integerPart((unsigned HOST_WIDE_INT) val.high)
- << ShiftAmt);
+ integerPart Part =
+ integerPart((unsigned HOST_WIDE_INT) val.low) +
+ (integerPart((unsigned HOST_WIDE_INT) val.high) << ShiftAmt);
DefaultValue = APInt(DefaultWidth, Part);
}
@@ -166,8 +166,8 @@ APInt getAPIntValue(const_tree exp, unsi
return DefaultValue;
if (Bitwidth > DefaultWidth)
- return TYPE_UNSIGNED(TREE_TYPE(exp)) ? DefaultValue.zext(Bitwidth) :
- DefaultValue.sext(Bitwidth);
+ return TYPE_UNSIGNED(TREE_TYPE(exp)) ? DefaultValue.zext(Bitwidth)
+ : DefaultValue.sext(Bitwidth);
assert((TYPE_UNSIGNED(TREE_TYPE(exp)) ||
DefaultValue.trunc(Bitwidth).sext(DefaultWidth) == DefaultValue) &&
Modified: dragonegg/trunk/src/TypeConversion.cpp
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/src/TypeConversion.cpp?rev=176016&r1=176015&r2=176016&view=diff
==============================================================================
--- dragonegg/trunk/src/TypeConversion.cpp (original)
+++ dragonegg/trunk/src/TypeConversion.cpp Mon Feb 25 04:54:25 2013
@@ -85,8 +85,8 @@ class ContainedTypeIterator {
public:
/// Dereference operator.
tree operator*() {
- return isa<TREE_LIST>(type_ref) ? TREE_VALUE(type_ref) :
- TREE_TYPE(type_ref);
+ return isa<TREE_LIST>(type_ref) ? TREE_VALUE(type_ref)
+ : TREE_TYPE(type_ref);
}
;
@@ -465,8 +465,9 @@ Type *getRegType(tree type) {
case REFERENCE_TYPE: {
// void* -> byte*
unsigned AS = TYPE_ADDR_SPACE(type);
- return isa<VOID_TYPE>(TREE_TYPE(type)) ? GetUnitPointerType(Context, AS) :
- ConvertType(TREE_TYPE(type))->getPointerTo(AS);
+ return isa<VOID_TYPE>(TREE_TYPE(type))
+ ? GetUnitPointerType(Context, AS)
+ : ConvertType(TREE_TYPE(type))->getPointerTo(AS);
}
case REAL_TYPE:
@@ -680,8 +681,8 @@ FunctionType *ConvertArgListToFnType(
#endif
if (RAttrBuilder.hasAttributes())
- Attrs.push_back(AttributeSet::get(Context, AttributeSet::ReturnIndex,
- RAttrBuilder));
+ Attrs.push_back(
+ AttributeSet::get(Context, AttributeSet::ReturnIndex, RAttrBuilder));
// If this function returns via a shadow argument, the dest loc is passed
// in as a pointer. Mark that pointer as struct-ret and noalias.
@@ -715,17 +716,16 @@ FunctionType *ConvertArgListToFnType(
PAttrBuilder.addAttribute(Attribute::NoAlias);
if (PAttrBuilder.hasAttributes())
- Attrs.push_back(AttributeSet::get(Context, ArgTys.size(),
- PAttrBuilder));
+ Attrs.push_back(AttributeSet::get(Context, ArgTys.size(), PAttrBuilder));
}
PAL = AttributeSet::get(Context, Attrs);
return FunctionType::get(RetTy, ArgTys, false);
}
-FunctionType *ConvertFunctionType(tree type, tree decl, tree static_chain,
- CallingConv::ID &CallingConv,
- AttributeSet &PAL) {
+FunctionType *
+ConvertFunctionType(tree type, tree decl, tree static_chain,
+ CallingConv::ID &CallingConv, AttributeSet &PAL) {
Type *RetTy = Type::getVoidTy(Context);
SmallVector<Type *, 8> ArgTypes;
FunctionTypeConversion Client(RetTy, ArgTypes, CallingConv,
@@ -795,8 +795,8 @@ FunctionType *ConvertFunctionType(tree t
RAttrBuilder.addAttribute(Attribute::NoAlias);
if (RAttrBuilder.hasAttributes())
- Attrs.push_back(AttributeSet::get(Context, AttributeSet::ReturnIndex,
- RAttrBuilder));
+ Attrs.push_back(
+ AttributeSet::get(Context, AttributeSet::ReturnIndex, RAttrBuilder));
// If this function returns via a shadow argument, the dest loc is passed
// in as a pointer. Mark that pointer as struct-ret and noalias.
@@ -811,8 +811,8 @@ FunctionType *ConvertFunctionType(tree t
// Pass the static chain as the first parameter.
ABIConverter.HandleArgument(TREE_TYPE(static_chain), ScalarArgs);
// Mark it as the chain argument.
- Attrs.push_back(AttributeSet::get(Context, ArgTypes.size(),
- Attribute::Nest));
+ Attrs.push_back(
+ AttributeSet::get(Context, ArgTypes.size(), Attribute::Nest));
}
#ifdef LLVM_TARGET_ENABLE_REGPARM
@@ -898,8 +898,8 @@ FunctionType *ConvertFunctionType(tree t
assert(RetTy && "Return type not specified!");
if (FnAttrBuilder.hasAttributes())
- Attrs.push_back(AttributeSet::get(Context, AttributeSet::FunctionIndex,
- FnAttrBuilder));
+ Attrs.push_back(
+ AttributeSet::get(Context, AttributeSet::FunctionIndex, FnAttrBuilder));
// Finally, make the function type and result attributes.
PAL = AttributeSet::get(Context, Attrs);
@@ -1044,8 +1044,8 @@ public:
// If the type is something like i17 then round it up to a multiple of a
// byte. This is not needed for correctness, but helps the optimizers.
if ((Ty->getPrimitiveSizeInBits() % BITS_PER_UNIT) != 0) {
- unsigned BitWidth = RoundUpToAlignment(Ty->getPrimitiveSizeInBits(),
- BITS_PER_UNIT);
+ unsigned BitWidth =
+ RoundUpToAlignment(Ty->getPrimitiveSizeInBits(), BITS_PER_UNIT);
Ty = IntegerType::get(Context, BitWidth);
if (isSafeToReturnContentsDirectly(DL))
return Ty;
@@ -1093,8 +1093,8 @@ static Type *ConvertRecordTypeRecursive(
// Get the size of the type in bits. If the type has variable or ginormous
// size then it is convenient to pretend it is "infinitely" big.
- uint64_t TypeSize = isInt64(TYPE_SIZE(type), true) ?
- getInt64(TYPE_SIZE(type), true) : ~0UL;
+ uint64_t TypeSize =
+ isInt64(TYPE_SIZE(type), true) ? getInt64(TYPE_SIZE(type), true) : ~0UL;
// Record all interesting fields so they can easily be visited backwards.
SmallVector<tree, 16> Fields;
@@ -1353,8 +1353,8 @@ static Type *ConvertTypeRecursive(tree t
CallingConv::ID CallingConv;
AttributeSet PAL;
// No declaration to pass through, passing NULL.
- return RememberTypeConversion(type, ConvertFunctionType(type, NULL, NULL,
- CallingConv, PAL));
+ return RememberTypeConversion(
+ type, ConvertFunctionType(type, NULL, NULL, CallingConv, PAL));
}
case POINTER_TYPE:
Modified: dragonegg/trunk/src/arm/Target.cpp
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/src/arm/Target.cpp?rev=176016&r1=176015&r2=176016&view=diff
==============================================================================
--- dragonegg/trunk/src/arm/Target.cpp (original)
+++ dragonegg/trunk/src/arm/Target.cpp Mon Feb 25 04:54:25 2013
@@ -78,8 +78,8 @@ enum arm_fdts {
static bool vfp_arg_homogeneous_aggregate_p(enum machine_mode mode, tree type,
int *fdt_counts) {
bool result = false;
- HOST_WIDE_INT bytes = (mode == BLKmode) ? int_size_in_bytes(type) :
- (int) GET_MODE_SIZE(mode);
+ HOST_WIDE_INT bytes =
+ (mode == BLKmode) ? int_size_in_bytes(type) : (int) GET_MODE_SIZE(mode);
if (type && isa<AGGREGATE_TYPE>(type)) {
int i;
@@ -114,9 +114,8 @@ static bool vfp_arg_homogeneous_aggregat
if (TREE_TYPE(field) == error_mark_node)
continue;
- result = vfp_arg_homogeneous_aggregate_p(TYPE_MODE(TREE_TYPE(field)),
- TREE_TYPE(field),
- fdt_counts);
+ result = vfp_arg_homogeneous_aggregate_p(
+ TYPE_MODE(TREE_TYPE(field)), TREE_TYPE(field), fdt_counts);
if (!result)
return false;
}
@@ -128,9 +127,8 @@ static bool vfp_arg_homogeneous_aggregat
{
int array_fdt_counts[ARM_FDT_MAX] = { 0 };
- result = vfp_arg_homogeneous_aggregate_p(TYPE_MODE(TREE_TYPE(type)),
- TREE_TYPE(type),
- array_fdt_counts);
+ result = vfp_arg_homogeneous_aggregate_p(
+ TYPE_MODE(TREE_TYPE(type)), TREE_TYPE(type), array_fdt_counts);
cnt = bytes / int_size_in_bytes(TREE_TYPE(type));
for (i = 0; i < ARM_FDT_MAX; ++i)
@@ -165,8 +163,8 @@ static bool vfp_arg_homogeneous_aggregat
if (union_field_fdt_counts[i] > 4) // bail early if we can
return false;
- union_fdt_counts[i] = MAX(union_fdt_counts[i],
- union_field_fdt_counts[i]);
+ union_fdt_counts[i] =
+ MAX(union_fdt_counts[i], union_field_fdt_counts[i]);
union_field_fdt_counts[i] = 0; // clear it out for next iter
}
}
@@ -216,22 +214,24 @@ static bool vfp_arg_homogeneous_aggregat
switch (TREE_CODE(type)) {
case REAL_TYPE:
- idx = (TYPE_PRECISION(type) == 32) ? ARM_FDT_FLOAT :
- ((TYPE_PRECISION(type) == 64) ? ARM_FDT_DOUBLE : ARM_FDT_INVALID);
+ idx = (TYPE_PRECISION(type) == 32)
+ ? ARM_FDT_FLOAT
+ : ((TYPE_PRECISION(type) == 64) ? ARM_FDT_DOUBLE : ARM_FDT_INVALID);
cnt = 1;
break;
case COMPLEX_TYPE: {
tree subtype = TREE_TYPE(type);
- idx = (TYPE_PRECISION(subtype) == 32) ? ARM_FDT_FLOAT :
- ((TYPE_PRECISION(subtype) == 64) ? ARM_FDT_DOUBLE :
- ARM_FDT_INVALID);
+ idx = (TYPE_PRECISION(subtype) == 32)
+ ? ARM_FDT_FLOAT
+ : ((TYPE_PRECISION(subtype) == 64) ? ARM_FDT_DOUBLE
+ : ARM_FDT_INVALID);
cnt = 2;
} break;
case VECTOR_TYPE:
- idx = (bytes == 8) ? ARM_FDT_VECTOR_64 : (bytes == 16) ?
- ARM_FDT_VECTOR_128 : ARM_FDT_INVALID;
+ idx = (bytes == 8) ? ARM_FDT_VECTOR_64
+ : (bytes == 16) ? ARM_FDT_VECTOR_128 : ARM_FDT_INVALID;
cnt = 1;
break;
@@ -558,8 +558,8 @@ void llvm_arm_extract_multiple_return_va
unsigned i = 0;
unsigned Size = 1;
- if (const VectorType *SElemTy = dyn_cast<VectorType>(
- STy->getElementType(SNO))) {
+ if (const VectorType *SElemTy =
+ dyn_cast<VectorType>(STy->getElementType(SNO))) {
Size = SElemTy->getNumElements();
}
while (i < Size) {
Modified: dragonegg/trunk/src/x86/Target.cpp
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/src/x86/Target.cpp?rev=176016&r1=176015&r2=176016&view=diff
==============================================================================
--- dragonegg/trunk/src/x86/Target.cpp (original)
+++ dragonegg/trunk/src/x86/Target.cpp Mon Feb 25 04:54:25 2013
@@ -146,8 +146,8 @@ bool TreeToLLVM::TargetIntrinsicLower(
Handler = UnsupportedBuiltin;
const char *Identifier = IDENTIFIER_POINTER(DECL_NAME(fndecl));
HandlerEntry ToFind = { Identifier, SearchForHandler };
- const HandlerEntry *E = std::lower_bound(Handlers, Handlers + N, ToFind,
- HandlerLT);
+ const HandlerEntry *E =
+ std::lower_bound(Handlers, Handlers + N, ToFind, HandlerLT);
if ((E < Handlers + N) && !strcmp(E->Name, ToFind.Name))
Handler = E->Handler;
}
@@ -290,9 +290,9 @@ bool TreeToLLVM::TargetIntrinsicLower(
case shufps:
if (ConstantInt *Elt = dyn_cast<ConstantInt>(Ops[2])) {
int EV = Elt->getZExtValue();
- Result = BuildVectorShuffle(Ops[0], Ops[1], ((EV & 0x03) >> 0),
- ((EV & 0x0c) >> 2), ((EV & 0x30) >> 4) +
- 4, ((EV & 0xc0) >> 6) + 4);
+ Result = BuildVectorShuffle(
+ Ops[0], Ops[1], ((EV & 0x03) >> 0), ((EV & 0x0c) >> 2),
+ ((EV & 0x30) >> 4) + 4, ((EV & 0xc0) >> 6) + 4);
} else {
error_at(gimple_location(stmt), "mask must be an immediate");
Result = Ops[0];
@@ -324,9 +324,9 @@ bool TreeToLLVM::TargetIntrinsicLower(
if (ConstantInt *Elt = dyn_cast<ConstantInt>(Ops[1])) {
int EV = Elt->getZExtValue();
Result = BuildVectorShuffle(
- Ops[0], Ops[0], 0, 1, 2, 3, ((EV & 0x03) >> 0) + 4,
- ((EV & 0x0c) >> 2) + 4, ((EV & 0x30) >> 4) + 4,
- ((EV & 0xc0) >> 6) + 4);
+ Ops[0], Ops[0], 0, 1, 2, 3, ((EV & 0x03) >> 0) + 4,
+ ((EV & 0x0c) >> 2) + 4, ((EV & 0x30) >> 4) + 4,
+ ((EV & 0xc0) >> 6) + 4);
return true;
}
return false;
@@ -472,8 +472,8 @@ bool TreeToLLVM::TargetIntrinsicLower(
PointerType *f64Ptr = Type::getDoublePtrTy(Context);
Ops[1] = Builder.CreateBitCast(Ops[1], f64Ptr);
Value *Load = Builder.CreateLoad(Ops[1]);
- Ops[1] = BuildVector(Load, UndefValue::get(Type::getDoubleTy(Context)),
- NULL);
+ Ops[1] =
+ BuildVector(Load, UndefValue::get(Type::getDoubleTy(Context)), NULL);
Ops[1] = Builder.CreateBitCast(Ops[1], ResultType);
Result = BuildVectorShuffle(Ops[0], Ops[1], 0, 1, 4, 5);
Result = Builder.CreateBitCast(Result, ResultType);
@@ -483,8 +483,8 @@ bool TreeToLLVM::TargetIntrinsicLower(
PointerType *f64Ptr = Type::getDoublePtrTy(Context);
Ops[1] = Builder.CreateBitCast(Ops[1], f64Ptr);
Value *Load = Builder.CreateLoad(Ops[1]);
- Ops[1] = BuildVector(Load, UndefValue::get(Type::getDoubleTy(Context)),
- NULL);
+ Ops[1] =
+ BuildVector(Load, UndefValue::get(Type::getDoubleTy(Context)), NULL);
Ops[1] = Builder.CreateBitCast(Ops[1], ResultType);
Result = BuildVectorShuffle(Ops[0], Ops[1], 4, 5, 2, 3);
Result = Builder.CreateBitCast(Result, ResultType);
@@ -494,8 +494,8 @@ bool TreeToLLVM::TargetIntrinsicLower(
PointerType *f64Ptr = Type::getDoublePtrTy(Context);
Ops[1] = Builder.CreateBitCast(Ops[1], f64Ptr);
Value *Load = Builder.CreateLoad(Ops[1]);
- Ops[1] = BuildVector(Load, UndefValue::get(Type::getDoubleTy(Context)),
- NULL);
+ Ops[1] =
+ BuildVector(Load, UndefValue::get(Type::getDoubleTy(Context)), NULL);
Ops[1] = Builder.CreateBitCast(Ops[1], ResultType);
Result = BuildVectorShuffle(Ops[0], Ops[1], 0, 2);
Result = Builder.CreateBitCast(Result, ResultType);
@@ -505,8 +505,8 @@ bool TreeToLLVM::TargetIntrinsicLower(
PointerType *f64Ptr = Type::getDoublePtrTy(Context);
Ops[1] = Builder.CreateBitCast(Ops[1], f64Ptr);
Value *Load = Builder.CreateLoad(Ops[1]);
- Ops[1] = BuildVector(Load, UndefValue::get(Type::getDoubleTy(Context)),
- NULL);
+ Ops[1] =
+ BuildVector(Load, UndefValue::get(Type::getDoubleTy(Context)), NULL);
Ops[1] = Builder.CreateBitCast(Ops[1], ResultType);
Result = BuildVectorShuffle(Ops[0], Ops[1], 2, 1);
Result = Builder.CreateBitCast(Result, ResultType);
@@ -627,8 +627,8 @@ bool TreeToLLVM::TargetIntrinsicLower(
PredCode = 7;
goto CMPXXPS;
CMPXXPS : {
- Function *cmpps = Intrinsic::getDeclaration(TheModule,
- Intrinsic::x86_sse_cmp_ps);
+ Function *cmpps =
+ Intrinsic::getDeclaration(TheModule, Intrinsic::x86_sse_cmp_ps);
Value *Pred = ConstantInt::get(Type::getInt8Ty(Context), PredCode);
Value *Arg0 = Ops[0];
Value *Arg1 = Ops[1];
@@ -664,8 +664,8 @@ bool TreeToLLVM::TargetIntrinsicLower(
PredCode = 7;
goto CMPXXSS;
CMPXXSS : {
- Function *cmpss = Intrinsic::getDeclaration(TheModule,
- Intrinsic::x86_sse_cmp_ss);
+ Function *cmpss =
+ Intrinsic::getDeclaration(TheModule, Intrinsic::x86_sse_cmp_ss);
Value *Pred = ConstantInt::get(Type::getInt8Ty(Context), PredCode);
Value *CallOps[3] = { Ops[0], Ops[1], Pred };
Result = Builder.CreateCall(cmpss, CallOps);
@@ -713,8 +713,8 @@ bool TreeToLLVM::TargetIntrinsicLower(
PredCode = 7;
goto CMPXXPD;
CMPXXPD : {
- Function *cmppd = Intrinsic::getDeclaration(TheModule,
- Intrinsic::x86_sse2_cmp_pd);
+ Function *cmppd =
+ Intrinsic::getDeclaration(TheModule, Intrinsic::x86_sse2_cmp_pd);
Value *Pred = ConstantInt::get(Type::getInt8Ty(Context), PredCode);
Value *Arg0 = Ops[0];
Value *Arg1 = Ops[1];
@@ -751,8 +751,8 @@ bool TreeToLLVM::TargetIntrinsicLower(
PredCode = 7;
goto CMPXXSD;
CMPXXSD : {
- Function *cmpsd = Intrinsic::getDeclaration(TheModule,
- Intrinsic::x86_sse2_cmp_sd);
+ Function *cmpsd =
+ Intrinsic::getDeclaration(TheModule, Intrinsic::x86_sse2_cmp_sd);
Value *Pred = ConstantInt::get(Type::getInt8Ty(Context), PredCode);
Value *CallOps[3] = { Ops[0], Ops[1], Pred };
Result = Builder.CreateCall(cmpsd, CallOps);
@@ -760,8 +760,8 @@ bool TreeToLLVM::TargetIntrinsicLower(
return true;
}
case ldmxcsr: {
- Function *ldmxcsr = Intrinsic::getDeclaration(TheModule,
- Intrinsic::x86_sse_ldmxcsr);
+ Function *ldmxcsr =
+ Intrinsic::getDeclaration(TheModule, Intrinsic::x86_sse_ldmxcsr);
Value *Ptr = CreateTemporary(Type::getInt32Ty(Context));
Builder.CreateStore(Ops[0], Ptr);
Ptr = Builder.CreateBitCast(Ptr, Type::getInt8PtrTy(Context));
@@ -769,8 +769,8 @@ bool TreeToLLVM::TargetIntrinsicLower(
return true;
}
case stmxcsr: {
- Function *stmxcsr = Intrinsic::getDeclaration(TheModule,
- Intrinsic::x86_sse_stmxcsr);
+ Function *stmxcsr =
+ Intrinsic::getDeclaration(TheModule, Intrinsic::x86_sse_stmxcsr);
Value *Ptr = CreateTemporary(Type::getInt32Ty(Context));
Value *BPtr = Builder.CreateBitCast(Ptr, Type::getInt8PtrTy(Context));
Builder.CreateCall(stmxcsr, BPtr);
@@ -817,10 +817,10 @@ bool TreeToLLVM::TargetIntrinsicLower(
Ops[1] = Builder.CreateBitCast(Ops[1], MMXTy);
// create i32 constant
- Function *F = Intrinsic::getDeclaration(TheModule,
- Intrinsic::x86_mmx_psrl_q);
- Result = Builder.CreateCall(F, ArrayRef<Value *>(&Ops[0], 2),
- "palignr");
+ Function *F =
+ Intrinsic::getDeclaration(TheModule, Intrinsic::x86_mmx_psrl_q);
+ Result =
+ Builder.CreateCall(F, ArrayRef<Value *>(&Ops[0], 2), "palignr");
Result = Builder.CreateBitCast(Result, ResultType);
return true;
}
@@ -872,10 +872,10 @@ bool TreeToLLVM::TargetIntrinsicLower(
Ops[1] = ConstantInt::get(IntTy, (shiftVal - 16) * 8);
// create i32 constant
- Function *F = Intrinsic::getDeclaration(TheModule,
- Intrinsic::x86_sse2_psrl_dq);
- Result = Builder.CreateCall(F, ArrayRef<Value *>(&Ops[0], 2),
- "palignr");
+ Function *F =
+ Intrinsic::getDeclaration(TheModule, Intrinsic::x86_sse2_psrl_dq);
+ Result =
+ Builder.CreateCall(F, ArrayRef<Value *>(&Ops[0], 2), "palignr");
Result = Builder.CreateBitCast(Result, ResultType);
return true;
}
@@ -904,7 +904,7 @@ bool TreeToLLVM::TargetIntrinsicLower(
// Convert the type of the pointer to a pointer to the stored type.
unsigned AS = Ops[0]->getType()->getPointerAddressSpace();
Value *Ptr = Builder.CreateBitCast(
- Ops[0], PointerType::get(Ops[1]->getType(), AS), "cast");
+ Ops[0], PointerType::get(Ops[1]->getType(), AS), "cast");
StoreInst *SI = Builder.CreateAlignedStore(Ops[1], Ptr, 16);
SI->setMetadata(TheModule->getMDKindID("nontemporal"), Node);
@@ -913,8 +913,8 @@ bool TreeToLLVM::TargetIntrinsicLower(
case rsqrtf: {
// rsqrtss with a Newton-Raphson step to improve accuracy:
// rsqrtf(x) = rsqrtss(x) * -0.5 * (rsqrtss(x) * x * rsqrtss(x) - 3.0)
- Function *rsqrtss = Intrinsic::getDeclaration(TheModule,
- Intrinsic::x86_sse_rsqrt_ss);
+ Function *rsqrtss =
+ Intrinsic::getDeclaration(TheModule, Intrinsic::x86_sse_rsqrt_ss);
// As rsqrtss is declared as taking a <4 x float> operand, mulch the operand
// into a vector.
Value *X = Ops[0];
@@ -930,9 +930,9 @@ bool TreeToLLVM::TargetIntrinsicLower(
R = Builder.CreateFPExt(R, X->getType()); // rsqrtss(x)
// Perform the Newton-Raphson step.
- Value *RHS = Builder.CreateFAdd(Builder.CreateFMul(Builder.CreateFMul(R, X),
- R),
- ConstantFP::get(X->getType(), -3.0));
+ Value *RHS =
+ Builder.CreateFAdd(Builder.CreateFMul(Builder.CreateFMul(R, X), R),
+ ConstantFP::get(X->getType(), -3.0));
Value *LHS = Builder.CreateFMul(R, ConstantFP::get(X->getType(), -0.5));
Result = Builder.CreateFMul(LHS, RHS);
return true;
@@ -940,13 +940,13 @@ bool TreeToLLVM::TargetIntrinsicLower(
case rsqrtps_nr: {
// rsqrtps with a Newton-Raphson step to improve accuracy:
// rsqrtps_nr(x) = rsqrtps(x) * -0.5 * (rsqrtps(x) * x * rsqrtps(x) - 3.0)
- Function *rsqrtps = Intrinsic::getDeclaration(TheModule,
- Intrinsic::x86_sse_rsqrt_ps);
+ Function *rsqrtps =
+ Intrinsic::getDeclaration(TheModule, Intrinsic::x86_sse_rsqrt_ps);
Value *X = Ops[0]; // x
Value *R = Builder.CreateCall(rsqrtps, X); // rsqrtps(x)
- Value *RHS = Builder.CreateFAdd(Builder.CreateFMul(Builder.CreateFMul(R, X),
- R),
- ConstantFP::get(X->getType(), -3.0));
+ Value *RHS =
+ Builder.CreateFAdd(Builder.CreateFMul(Builder.CreateFMul(R, X), R),
+ ConstantFP::get(X->getType(), -3.0));
Value *LHS = Builder.CreateFMul(R, ConstantFP::get(X->getType(), -0.5));
Result = Builder.CreateFMul(LHS, RHS);
return true;
@@ -954,8 +954,8 @@ bool TreeToLLVM::TargetIntrinsicLower(
case sqrtps_nr: {
// Turn this into sqrtps without a Newton-Raphson step - sqrtps is already
// accurate enough.
- Function *sqrtps = Intrinsic::getDeclaration(TheModule,
- Intrinsic::x86_sse_sqrt_ps);
+ Function *sqrtps =
+ Intrinsic::getDeclaration(TheModule, Intrinsic::x86_sse_sqrt_ps);
Result = Builder.CreateCall(sqrtps, Ops[0]);
return true;
}
@@ -984,7 +984,7 @@ bool TreeToLLVM::TargetIntrinsicLower(
return false;
if (!MaskTy->getElementType()->isIntegerTy(32))
Mask = ConstantExpr::getIntegerCast(
- Mask, VectorType::get(Builder.getInt32Ty(), NElts), false);
+ Mask, VectorType::get(Builder.getInt32Ty(), NElts), false);
Result = Builder.CreateShuffleVector(Ops[0], Ops[1], Mask);
return true;
}
@@ -1016,8 +1016,8 @@ bool TreeToLLVM::TargetIntrinsicLower(
case pswapdsi: {
Type *MMXTy = Type::getX86_MMXTy(Context);
Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy);
- Function *pswapd = Intrinsic::getDeclaration(TheModule,
- Intrinsic::x86_3dnowa_pswapd);
+ Function *pswapd =
+ Intrinsic::getDeclaration(TheModule, Intrinsic::x86_3dnowa_pswapd);
Result = Builder.CreateCall(pswapd, Ops[0]);
Result = Builder.CreateBitCast(Result, ResultType);
return true;
@@ -1026,8 +1026,8 @@ bool TreeToLLVM::TargetIntrinsicLower(
// The value is usually passed in as an int rather than as a short.
Type *Int16Ty = Builder.getInt16Ty();
Result = Builder.CreateTruncOrBitCast(Ops[0], Int16Ty);
- Function *ctlz = Intrinsic::getDeclaration(TheModule, Intrinsic::ctlz,
- Int16Ty);
+ Function *ctlz =
+ Intrinsic::getDeclaration(TheModule, Intrinsic::ctlz, Int16Ty);
Result = Builder.CreateCall2(ctlz, Result, Builder.getTrue());
return true;
}
@@ -1035,8 +1035,8 @@ bool TreeToLLVM::TargetIntrinsicLower(
// The value is usually passed in as an int rather than as a short.
Type *Int16Ty = Builder.getInt16Ty();
Result = Builder.CreateTruncOrBitCast(Ops[0], Int16Ty);
- Function *cttz = Intrinsic::getDeclaration(TheModule, Intrinsic::cttz,
- Int16Ty);
+ Function *cttz =
+ Intrinsic::getDeclaration(TheModule, Intrinsic::cttz, Int16Ty);
Result = Builder.CreateCall2(cttz, Result, Builder.getTrue());
return true;
}
@@ -1119,8 +1119,9 @@ bool llvm_x86_32_should_pass_aggregate_i
// 32 and 64-bit integers are fine, as are float and double. Long double
// (which can be picked as the type for a union of 16 bytes) is not fine,
// as loads and stores of it get only 10 bytes.
- if (EltTy == Type::getInt32Ty(Context) || EltTy ==
- Type::getInt64Ty(Context) || EltTy == Type::getFloatTy(Context) ||
+ if (EltTy == Type::getInt32Ty(Context) ||
+ EltTy == Type::getInt64Ty(Context) ||
+ EltTy == Type::getFloatTy(Context) ||
EltTy == Type::getDoubleTy(Context) || EltTy->isPointerTy()) {
Elts.push_back(EltTy);
continue;
@@ -1151,8 +1152,8 @@ bool llvm_x86_should_pass_aggregate_as_f
Type *EltTy = STy->getElementType(0);
return !((TARGET_64BIT &&
(EltTy->isIntegerTy() || EltTy == Type::getFloatTy(Context) ||
- EltTy == Type::getDoubleTy(Context))) ||
- EltTy->isIntegerTy(16) || EltTy->isIntegerTy(8));
+ EltTy == Type::getDoubleTy(Context))) || EltTy->isIntegerTy(16) ||
+ EltTy->isIntegerTy(8));
}
/* Target hook for llvm-abi.h. It returns true if an aggregate of the
@@ -1162,8 +1163,8 @@ bool llvm_x86_should_pass_aggregate_in_m
return false;
enum machine_mode Mode = type_natural_mode(TreeType, NULL);
- HOST_WIDE_INT Bytes = (Mode == BLKmode) ? int_size_in_bytes(TreeType) :
- (int) GET_MODE_SIZE(Mode);
+ HOST_WIDE_INT Bytes = (Mode == BLKmode) ? int_size_in_bytes(TreeType) : (int)
+ GET_MODE_SIZE(Mode);
// Zero sized array, struct, or class, not passed in memory.
if (Bytes == 0)
@@ -1265,8 +1266,8 @@ bool llvm_x86_64_should_pass_aggregate_i
enum x86_64_reg_class Class[MAX_CLASSES];
enum machine_mode Mode = type_natural_mode(TreeType, NULL);
bool totallyEmpty = true;
- HOST_WIDE_INT Bytes = (Mode == BLKmode) ? int_size_in_bytes(TreeType) :
- (int) GET_MODE_SIZE(Mode);
+ HOST_WIDE_INT Bytes = (Mode == BLKmode) ? int_size_in_bytes(TreeType) : (int)
+ GET_MODE_SIZE(Mode);
int NumClasses = classify_argument(Mode, TreeType, Class, 0);
if (!NumClasses)
return false;
@@ -1549,8 +1550,8 @@ Type *llvm_x86_scalar_type_for_struct_re
if (Class[0] == X86_64_INTEGERSI_CLASS ||
Class[0] == X86_64_INTEGER_CLASS) {
// one int register
- HOST_WIDE_INT Bytes = (Mode == BLKmode) ? int_size_in_bytes(type) :
- (int) GET_MODE_SIZE(Mode);
+ HOST_WIDE_INT Bytes = (Mode == BLKmode) ? int_size_in_bytes(type)
+ : (int) GET_MODE_SIZE(Mode);
if (Bytes > 4)
return Type::getInt64Ty(Context);
else if (Bytes > 2)
@@ -1607,8 +1608,8 @@ static void llvm_x86_64_get_multiple_ret
tree TreeType, Type */*Ty*/, std::vector<Type *> &Elts) {
enum x86_64_reg_class Class[MAX_CLASSES];
enum machine_mode Mode = type_natural_mode(TreeType, NULL);
- HOST_WIDE_INT Bytes = (Mode == BLKmode) ? int_size_in_bytes(TreeType) :
- (int) GET_MODE_SIZE(Mode);
+ HOST_WIDE_INT Bytes = (Mode == BLKmode) ? int_size_in_bytes(TreeType) : (int)
+ GET_MODE_SIZE(Mode);
int NumClasses = classify_argument(Mode, TreeType, Class, 0);
assert(NumClasses && "This type does not need multiple return registers!");
@@ -1859,8 +1860,8 @@ void llvm_x86_extract_multiple_return_va
unsigned i = 0;
unsigned Size = 1;
- if (VectorType *SElemTy = dyn_cast<
- VectorType>(STy->getElementType(SNO))) {
+ if (VectorType *SElemTy =
+ dyn_cast<VectorType>(STy->getElementType(SNO))) {
Size = SElemTy->getNumElements();
if (SElemTy->getElementType()->getTypeID() == Type::FloatTyID &&
Size == 4)
@@ -1896,8 +1897,8 @@ bool llvm_x86_should_pass_aggregate_in_i
if (NumClasses == 1 && (Class[0] == X86_64_INTEGER_CLASS ||
Class[0] == X86_64_INTEGERSI_CLASS)) {
// one int register
- HOST_WIDE_INT Bytes = (Mode == BLKmode) ? int_size_in_bytes(type) :
- (int) GET_MODE_SIZE(Mode);
+ HOST_WIDE_INT Bytes = (Mode == BLKmode) ? int_size_in_bytes(type) : (int)
+ GET_MODE_SIZE(Mode);
if (Bytes > 4)
*size = 8;
else if (Bytes > 2)
More information about the llvm-commits
mailing list