[llvm-commits] [dragonegg] r173245 - in /dragonegg/trunk/src: Aliasing.cpp Backend.cpp Cache.cpp ConstantConversion.cpp Convert.cpp Debug.cpp DefaultABI.cpp Trees.cpp TypeConversion.cpp arm/Target.cpp x86/Target.cpp
Duncan Sands
baldrick at free.fr
Wed Jan 23 01:54:28 PST 2013
Author: baldrick
Date: Wed Jan 23 03:54:28 2013
New Revision: 173245
URL: http://llvm.org/viewvc/llvm-project?rev=173245&view=rev
Log:
Reformat with the excellent clang-format.
Modified:
dragonegg/trunk/src/Aliasing.cpp
dragonegg/trunk/src/Backend.cpp
dragonegg/trunk/src/Cache.cpp
dragonegg/trunk/src/ConstantConversion.cpp
dragonegg/trunk/src/Convert.cpp
dragonegg/trunk/src/Debug.cpp
dragonegg/trunk/src/DefaultABI.cpp
dragonegg/trunk/src/Trees.cpp
dragonegg/trunk/src/TypeConversion.cpp
dragonegg/trunk/src/arm/Target.cpp
dragonegg/trunk/src/x86/Target.cpp
Modified: dragonegg/trunk/src/Aliasing.cpp
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/src/Aliasing.cpp?rev=173245&r1=173244&r2=173245&view=diff
==============================================================================
--- dragonegg/trunk/src/Aliasing.cpp (original)
+++ dragonegg/trunk/src/Aliasing.cpp Wed Jan 23 03:54:28 2013
@@ -102,10 +102,10 @@
// For the moment we take a very simple approach: we only use the leaf nodes
// of GCC's DAG. This means that we do a good job for scalars and a poor job
// for record types, including complex types.
- static std::map<alias_set_type, MDNode*> NodeTags; // Node -> metadata map.
- static SmallVector<alias_set_type, 8> LeafNodes; // Current set of leaves.
+ static std::map<alias_set_type, MDNode *> NodeTags; // Node -> metadata map.
+ static SmallVector<alias_set_type, 8> LeafNodes; // Current set of leaves.
- std::map<alias_set_type, MDNode*>::iterator I = NodeTags.find(alias_set);
+ std::map<alias_set_type, MDNode *>::iterator I = NodeTags.find(alias_set);
if (I != NodeTags.end())
return I->second;
@@ -129,7 +129,7 @@
// If there is a path from any leaf node to this one then no longer consider
// that node to be a leaf.
- for (unsigned i = LeafNodes.size(); i; ) {
+ for (unsigned i = LeafNodes.size(); i;) {
alias_set_type leaf_set = LeafNodes[--i];
if (alias_set_subset_of(alias_set, leaf_set)) {
LeafNodes.erase(LeafNodes.begin() + i);
@@ -143,10 +143,10 @@
// Create metadata describing the new node hanging off root. The name doesn't
// matter much but needs to be unique for the compilation unit.
- tree type =
- TYPE_CANONICAL(TYPE_MAIN_VARIANT(isa<TYPE>(t) ? t : TREE_TYPE(t)));
- std::string TreeName = ("alias set " + Twine(alias_set) + ": " +
- getDescriptiveName(type)).str();
+ tree type = TYPE_CANONICAL(
+ TYPE_MAIN_VARIANT(isa<TYPE>(t) ? t : TREE_TYPE(t)));
+ std::string TreeName =
+ ("alias set " + Twine(alias_set) + ": " + getDescriptiveName(type)).str();
MDBuilder MDHelper(Context);
MDNode *AliasTag = MDHelper.createTBAANode(TreeName, getTBAARoot());
Modified: dragonegg/trunk/src/Backend.cpp
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/src/Backend.cpp?rev=173245&r1=173244&r2=173245&view=diff
==============================================================================
--- dragonegg/trunk/src/Backend.cpp (original)
+++ dragonegg/trunk/src/Backend.cpp Wed Jan 23 03:54:28 2013
@@ -87,7 +87,7 @@
#include "version.h"
// TODO: In GCC, add targhooks.h to the list of plugin headers and remove this.
-tree default_mangle_decl_assembler_name (tree, tree);
+tree default_mangle_decl_assembler_name(tree, tree);
#ifndef ENABLE_BUILD_WITH_CXX
} // extern "C"
#endif
@@ -131,10 +131,10 @@
static int LLVMCodeGenOptimizeArg = -1;
static int LLVMIROptimizeArg = -1;
-std::vector<std::pair<Constant*, int> > StaticCtors, StaticDtors;
-SmallSetVector<Constant*, 32> AttributeUsedGlobals;
-SmallSetVector<Constant*, 32> AttributeCompilerUsedGlobals;
-std::vector<Constant*> AttributeAnnotateGlobals;
+std::vector<std::pair<Constant *, int> > StaticCtors, StaticDtors;
+SmallSetVector<Constant *, 32> AttributeUsedGlobals;
+SmallSetVector<Constant *, 32> AttributeCompilerUsedGlobals;
+std::vector<Constant *> AttributeAnnotateGlobals;
/// PerFunctionPasses - This is the list of cleanup passes run per-function
/// as each is compiled. In cases where we are not doing IPO, it includes the
@@ -214,8 +214,8 @@
/// CodeGenOptLevel - The optimization level to be used by the code generators.
static CodeGenOpt::Level CodeGenOptLevel() {
- int OptLevel = LLVMCodeGenOptimizeArg >= 0 ?
- LLVMCodeGenOptimizeArg : optimize;
+ int OptLevel = LLVMCodeGenOptimizeArg >= 0 ? LLVMCodeGenOptimizeArg :
+ optimize;
if (OptLevel <= 0)
return CodeGenOpt::None;
if (OptLevel == 1)
@@ -278,7 +278,7 @@
const DataLayout *DL = TheTarget->getDataLayout();
unsigned Align = 8 * DL->getABITypeAlignment(Ty);
return TheTarget->getDataLayout()->getTypeAllocSizeInBits(Ty) ==
- ((gcc_size + Align - 1) / Align) * Align;
+ ((gcc_size + Align - 1) / Align) * Align;
}
#endif
@@ -287,20 +287,20 @@
#endif
namespace llvm {
-#define Declare2(TARG, MOD) extern "C" void LLVMInitialize ## TARG ## MOD()
+#define Declare2(TARG, MOD) extern "C" void LLVMInitialize##TARG##MOD()
#define Declare(T, M) Declare2(T, M)
- Declare(LLVM_TARGET_NAME, TargetInfo);
- Declare(LLVM_TARGET_NAME, Target);
- Declare(LLVM_TARGET_NAME, TargetMC);
- Declare(LLVM_TARGET_NAME, AsmPrinter);
+Declare(LLVM_TARGET_NAME, TargetInfo);
+Declare(LLVM_TARGET_NAME, Target);
+Declare(LLVM_TARGET_NAME, TargetMC);
+Declare(LLVM_TARGET_NAME, AsmPrinter);
#undef Declare
#undef Declare2
}
/// ConfigureLLVM - Initialized and configure LLVM.
static void ConfigureLLVM(void) {
- // Initialize the LLVM backend.
-#define DoInit2(TARG, MOD) LLVMInitialize ## TARG ## MOD()
+// Initialize the LLVM backend.
+#define DoInit2(TARG, MOD) LLVMInitialize##TARG##MOD()
#define DoInit(T, M) DoInit2(T, M)
DoInit(LLVM_TARGET_NAME, TargetInfo);
DoInit(LLVM_TARGET_NAME, Target);
@@ -310,7 +310,7 @@
#undef DoInit2
// Initialize LLVM command line options.
- std::vector<const char*> Args;
+ std::vector<const char *> Args;
Args.push_back(progname); // program name
//TODO // Allow targets to specify PIC options and other stuff to the corresponding
@@ -324,13 +324,13 @@
#ifdef LLVM_SET_MACHINE_OPTIONS
LLVM_SET_MACHINE_OPTIONS(Args);
#endif
-//TODO#ifdef LLVM_SET_IMPLICIT_FLOAT
-//TODO LLVM_SET_IMPLICIT_FLOAT(flag_no_implicit_float)
-//TODO#endif
+ //TODO#ifdef LLVM_SET_IMPLICIT_FLOAT
+ //TODO LLVM_SET_IMPLICIT_FLOAT(flag_no_implicit_float)
+ //TODO#endif
- if (time_report || !quiet_flag || flag_detailed_statistics)
+ if (time_report || !quiet_flag || flag_detailed_statistics)
Args.push_back("--time-passes");
- if (!quiet_flag || flag_detailed_statistics)
+ if (!quiet_flag || flag_detailed_statistics)
Args.push_back("--stats");
if (flag_verbose_asm)
Args.push_back("--asm-verbose");
@@ -349,27 +349,27 @@
// directly from the command line, do so now. This is mainly for debugging
// purposes, and shouldn't really be for general use.
-//TODO if (flag_limited_precision > 0) {
-//TODO std::string Arg("--limit-float-precision="+utostr(flag_limited_precision));
-//TODO ArgStrings.push_back(Arg);
-//TODO }
+ //TODO if (flag_limited_precision > 0) {
+ //TODO std::string Arg("--limit-float-precision="+utostr(flag_limited_precision));
+ //TODO ArgStrings.push_back(Arg);
+ //TODO }
for (unsigned i = 0, e = ArgStrings.size(); i != e; ++i)
Args.push_back(ArgStrings[i].c_str());
-//TODO std::vector<std::string> LLVM_Optns; // Avoid deallocation before opts parsed!
-//TODO if (llvm_optns) {
-//TODO llvm::SmallVector<llvm::StringRef, 16> Buf;
-//TODO SplitString(llvm_optns, Buf);
-//TODO for(unsigned i = 0, e = Buf.size(); i != e; ++i) {
-//TODO LLVM_Optns.push_back(Buf[i]);
-//TODO Args.push_back(LLVM_Optns.back().c_str());
-//TODO }
-//TODO }
-
- Args.push_back(0); // Null terminator.
- int pseudo_argc = Args.size()-1;
- llvm::cl::ParseCommandLineOptions(pseudo_argc, const_cast<char**>(&Args[0]));
+ //TODO std::vector<std::string> LLVM_Optns; // Avoid deallocation before opts parsed!
+ //TODO if (llvm_optns) {
+ //TODO llvm::SmallVector<llvm::StringRef, 16> Buf;
+ //TODO SplitString(llvm_optns, Buf);
+ //TODO for(unsigned i = 0, e = Buf.size(); i != e; ++i) {
+ //TODO LLVM_Optns.push_back(Buf[i]);
+ //TODO Args.push_back(LLVM_Optns.back().c_str());
+ //TODO }
+ //TODO }
+
+ Args.push_back(0); // Null terminator.
+ int pseudo_argc = Args.size() - 1;
+ llvm::cl::ParseCommandLineOptions(pseudo_argc, const_cast<char **>(&Args[0]));
ArgStrings.clear();
}
@@ -422,8 +422,7 @@
static void CreateTargetMachine(const std::string &TargetTriple) {
// FIXME: Figure out how to select the target and pass down subtarget info.
std::string Err;
- const Target *TME =
- TargetRegistry::lookupTarget(TargetTriple, Err);
+ const Target *TME = TargetRegistry::lookupTarget(TargetTriple, Err);
if (!TME)
report_fatal_error(Err);
@@ -471,7 +470,7 @@
#if (GCC_MINOR > 5)
fast_math_flags_set_p(&global_options);
#else
- fast_math_flags_set_p();
+ fast_math_flags_set_p();
#endif
Options.NoInfsFPMath = flag_finite_math_only;
Options.NoNaNsFPMath = flag_finite_math_only;
@@ -505,14 +504,14 @@
StringRef ModuleID = main_input_filename ? main_input_filename : "";
TheModule = new Module(ModuleID, getGlobalContext());
- // Insert a special .ident directive to identify the version of the plugin
- // which compiled this code. The format of the .ident string is patterned
- // after the ones produced by GCC.
+// Insert a special .ident directive to identify the version of the plugin
+// which compiled this code. The format of the .ident string is patterned
+// after the ones produced by GCC.
#ifdef IDENT_ASM_OP
if (!flag_no_ident) {
const char *pkg_version = "(GNU) ";
- if (strcmp ("(GCC) ", pkgversion_string))
+ if (strcmp("(GCC) ", pkgversion_string))
pkg_version = pkgversion_string;
std::string IdentString = IDENT_ASM_OP;
@@ -529,8 +528,8 @@
// Install information about the target triple and data layout into the module
// for optimizer use.
TheModule->setTargetTriple(TargetTriple);
- TheModule->setDataLayout(TheTarget->getDataLayout()->
- getStringRepresentation());
+ TheModule->setDataLayout(
+ TheTarget->getDataLayout()->getStringRepresentation());
}
/// flag_default_initialize_globals - Whether global variables with no explicit
@@ -609,8 +608,8 @@
// vectorizer using -fplugin-arg-dragonegg-llvm-option=-vectorize
PassBuilder.Vectorize = PassManagerBuilder().Vectorize;
- PassBuilder.LibraryInfo =
- new TargetLibraryInfo((Triple)TheModule->getTargetTriple());
+ PassBuilder.LibraryInfo = new TargetLibraryInfo((Triple)
+ TheModule->getTargetTriple());
if (flag_no_simplify_libcalls)
PassBuilder.LibraryInfo->disableAllFunctions();
@@ -657,8 +656,8 @@
if (!EmitIR && 0) {
FunctionPassManager *PM = PerFunctionPasses;
- // Request that addPassesToEmitFile run the Verifier after running
- // passes which modify the IR.
+// Request that addPassesToEmitFile run the Verifier after running
+// passes which modify the IR.
#ifndef NDEBUG
bool DisableVerify = false;
#else
@@ -704,8 +703,8 @@
// If full inliner is not run, check if always-inline is needed to handle
// functions that are marked as always_inline.
// TODO: Consider letting the GCC inliner do this.
- for (Module::iterator I = TheModule->begin(), E = TheModule->end();
- I != E; ++I)
+ for (Module::iterator I = TheModule->begin(), E = TheModule->end(); I != E;
+ ++I)
if (I->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
Attribute::AlwaysInline)) {
NeedAlwaysInliner = true;
@@ -713,7 +712,7 @@
}
if (NeedAlwaysInliner)
- InliningPass = createAlwaysInlinerPass(); // Inline always_inline funcs
+ InliningPass = createAlwaysInlinerPass(); // Inline always_inline funcs
}
PassBuilder.OptLevel = ModuleOptLevel();
@@ -733,13 +732,13 @@
// FIXME: This is disabled right now until bugs can be worked out. Reenable
// this for fast -O0 compiles!
if (PerModulePasses || 1) {
- FunctionPassManager *PM = CodeGenPasses =
- new FunctionPassManager(TheModule);
+ FunctionPassManager *PM = CodeGenPasses = new FunctionPassManager(
+ TheModule);
PM->add(new DataLayout(*TheTarget->getDataLayout()));
TheTarget->addAnalysisPasses(*PM);
- // Request that addPassesToEmitFile run the Verifier after running
- // passes which modify the IR.
+// Request that addPassesToEmitFile run the Verifier after running
+// passes which modify the IR.
#ifndef NDEBUG
bool DisableVerify = false;
#else
@@ -759,17 +758,16 @@
/// ConvertStructorsList - Convert a list of static ctors/dtors to an
/// initializer suitable for the llvm.global_[cd]tors globals.
-static void CreateStructorsList(std::vector<std::pair<Constant*, int> > &Tors,
+static void CreateStructorsList(std::vector<std::pair<Constant *, int> > &Tors,
const char *Name) {
- std::vector<Constant*> InitList;
- std::vector<Constant*> StructInit;
+ std::vector<Constant *> InitList;
+ std::vector<Constant *> StructInit;
StructInit.resize(2);
LLVMContext &Context = getGlobalContext();
- Type *FPTy =
- FunctionType::get(Type::getVoidTy(Context),
- std::vector<Type*>(), false);
+ Type *FPTy = FunctionType::get(Type::getVoidTy(Context),
+ std::vector<Type *>(), false);
FPTy = FPTy->getPointerTo();
for (unsigned i = 0, e = Tors.size(); i != e; ++i) {
@@ -780,28 +778,29 @@
StructInit[1] = TheFolder->CreateBitCast(Tors[i].first, FPTy);
InitList.push_back(ConstantStruct::getAnon(Context, StructInit));
}
- Constant *Array = ConstantArray::get(
- ArrayType::get(InitList[0]->getType(), InitList.size()), InitList);
+ Constant *Array = ConstantArray::get(ArrayType::get(InitList[0]->getType(),
+ InitList.size()),
+ InitList);
new GlobalVariable(*TheModule, Array->getType(), false,
- GlobalValue::AppendingLinkage,
- Array, Name);
+ GlobalValue::AppendingLinkage, Array, Name);
}
/// ConvertMetadataStringToGV - Convert string to global value. Use existing
/// global if possible.
-Constant* ConvertMetadataStringToGV(const char *str) {
+Constant *ConvertMetadataStringToGV(const char *str) {
Constant *Init = ConstantDataArray::getString(getGlobalContext(), str);
// Use cached string if it exists.
- static std::map<Constant*, GlobalVariable*> StringCSTCache;
+ static std::map<Constant *, GlobalVariable *> StringCSTCache;
GlobalVariable *&Slot = StringCSTCache[Init];
- if (Slot) return Slot;
+ if (Slot)
+ return Slot;
// Create a new string global.
GlobalVariable *GV = new GlobalVariable(*TheModule, Init->getType(), true,
- GlobalVariable::PrivateLinkage,
- Init, ".str");
+ GlobalVariable::PrivateLinkage, Init,
+ ".str");
GV->setSection("llvm.metadata");
Slot = GV;
return GV;
@@ -814,7 +813,7 @@
LLVMContext &Context = getGlobalContext();
// Handle annotate attribute on global.
- tree annotateAttr = lookup_attribute("annotate", DECL_ATTRIBUTES (decl));
+ tree annotateAttr = lookup_attribute("annotate", DECL_ATTRIBUTES(decl));
if (annotateAttr == 0)
return;
@@ -843,12 +842,9 @@
assert(isa<STRING_CST>(val) &&
"Annotate attribute arg should always be a string");
Constant *strGV = AddressOf(val);
- Constant *Element[4] = {
- TheFolder->CreateBitCast(GV,SBP),
- TheFolder->CreateBitCast(strGV,SBP),
- file,
- lineNo
- };
+ Constant *Element[4] = { TheFolder->CreateBitCast(GV, SBP),
+ TheFolder->CreateBitCast(strGV, SBP), file,
+ lineNo };
AttributeAnnotateGlobals.push_back(ConstantStruct::getAnon(Element));
}
@@ -916,16 +912,14 @@
} else {
// weakref to external symbol.
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
- Aliasee = new GlobalVariable(*TheModule,
- GV->getType()->getElementType(),
- GV->isConstant(),
- GlobalVariable::ExternalWeakLinkage, NULL,
- IDENTIFIER_POINTER(target));
+ Aliasee = new GlobalVariable(
+ *TheModule, GV->getType()->getElementType(),
+ GV->isConstant(), GlobalVariable::ExternalWeakLinkage,
+ NULL, IDENTIFIER_POINTER(target));
else if (Function *F = dyn_cast<Function>(V))
Aliasee = Function::Create(F->getFunctionType(),
Function::ExternalWeakLinkage,
- IDENTIFIER_POINTER(target),
- TheModule);
+ IDENTIFIER_POINTER(target), TheModule);
else
llvm_unreachable("Unsuported global value");
}
@@ -937,8 +931,8 @@
if (Linkage != GlobalValue::InternalLinkage) {
// Create the LLVM alias.
- GlobalAlias* GA = new GlobalAlias(Aliasee->getType(), Linkage, "",
- Aliasee, TheModule);
+ GlobalAlias *GA = new GlobalAlias(Aliasee->getType(), Linkage, "", Aliasee,
+ TheModule);
handleVisibility(decl, GA);
// Associate it with decl instead of V.
@@ -988,7 +982,7 @@
if (!TYPE_SIZE(TREE_TYPE(decl)))
return;
-//TODO timevar_push(TV_LLVM_GLOBALS);
+ //TODO timevar_push(TV_LLVM_GLOBALS);
// Get or create the global variable now.
GlobalVariable *GV = cast<GlobalVariable>(DECL_LLVM(decl));
@@ -1023,10 +1017,9 @@
// global union, and the LLVM type followed a union initializer that is
// different from the union element used for the type.
GV->removeFromParent();
- GlobalVariable *NGV = new GlobalVariable(*TheModule, Init->getType(),
- GV->isConstant(),
- GlobalValue::ExternalLinkage, 0,
- GV->getName());
+ GlobalVariable *NGV = new GlobalVariable(
+ *TheModule, Init->getType(), GV->isConstant(),
+ GlobalValue::ExternalLinkage, 0, GV->getName());
NGV->setInitializer(Init);
GV->replaceAllUsesWith(TheFolder->CreateBitCast(NGV, GV->getType()));
changeLLVMConstant(GV, NGV);
@@ -1062,9 +1055,9 @@
// Set the linkage.
GlobalValue::LinkageTypes Linkage;
- if (false) {// FIXME DECL_LLVM_PRIVATE(decl)) {
+ if (false) { // FIXME DECL_LLVM_PRIVATE(decl)) {
Linkage = GlobalValue::PrivateLinkage;
- } else if (false) {//FIXME DECL_LLVM_LINKER_PRIVATE(decl)) {
+ } else if (false) { //FIXME DECL_LLVM_LINKER_PRIVATE(decl)) {
Linkage = GlobalValue::LinkerPrivateLinkage;
} else if (!TREE_PUBLIC(decl)) {
Linkage = GlobalValue::InternalLinkage;
@@ -1073,7 +1066,7 @@
Linkage = GlobalValue::WeakAnyLinkage;
} else if (DECL_ONE_ONLY(decl)) {
Linkage = GlobalValue::getWeakLinkage(flag_odr);
- } else if (DECL_COMMON(decl) && // DECL_COMMON is only meaningful if no init
+ } else if (DECL_COMMON(decl) && // DECL_COMMON is only meaningful if no init
(!DECL_INITIAL(decl) || DECL_INITIAL(decl) == error_mark_node)) {
// llvm-gcc also includes DECL_VIRTUAL_P here.
Linkage = GlobalValue::CommonLinkage;
@@ -1115,8 +1108,8 @@
if (DECL_SECTION_NAME(decl)) {
GV->setSection(TREE_STRING_POINTER(DECL_SECTION_NAME(decl)));
#ifdef LLVM_IMPLICIT_TARGET_GLOBAL_VAR_SECTION
- } else if (const char *Section =
- LLVM_IMPLICIT_TARGET_GLOBAL_VAR_SECTION(decl)) {
+ } else if (
+ const char *Section = LLVM_IMPLICIT_TARGET_GLOBAL_VAR_SECTION(decl)) {
GV->setSection(Section);
#endif
}
@@ -1136,8 +1129,8 @@
GV->setAlignment(0);
// Handle used decls
- if (DECL_PRESERVE_P (decl)) {
- if (false)//FIXME DECL_LLVM_LINKER_PRIVATE (decl))
+ if (DECL_PRESERVE_P(decl)) {
+ if (false) //FIXME DECL_LLVM_LINKER_PRIVATE (decl))
AttributeCompilerUsedGlobals.insert(GV);
else
AttributeUsedGlobals.insert(GV);
@@ -1149,11 +1142,10 @@
#ifdef LLVM_IMPLICIT_TARGET_GLOBAL_VAR_SECTION
} else if (isa<CONST_DECL>(decl)) {
- if (const char *Section =
- LLVM_IMPLICIT_TARGET_GLOBAL_VAR_SECTION(decl)) {
+ if (const char *Section = LLVM_IMPLICIT_TARGET_GLOBAL_VAR_SECTION(decl)) {
GV->setSection(Section);
- /* LLVM LOCAL - begin radar 6389998 */
+/* LLVM LOCAL - begin radar 6389998 */
#ifdef TARGET_ADJUST_CFSTRING_NAME
TARGET_ADJUST_CFSTRING_NAME(GV, Section);
#endif
@@ -1175,17 +1167,16 @@
if (isa<VAR_DECL>(decl))
if (struct varpool_node *vnode =
#if (GCC_MINOR < 6)
- varpool_node(decl)
+ varpool_node(decl)
#else
varpool_get_node(decl)
#endif
)
emit_varpool_aliases(vnode);
-//TODO timevar_pop(TV_LLVM_GLOBALS);
+ //TODO timevar_pop(TV_LLVM_GLOBALS);
}
-
/// ValidateRegisterVariable - Check that a static "asm" variable is
/// well-formed. If not, emit error messages and return true. If so, return
/// false.
@@ -1194,7 +1185,7 @@
int RegNumber = decode_reg_name(RegName);
if (errorcount || sorrycount)
- return true; // Do not process broken code.
+ return true; // Do not process broken code.
/* Detect errors in declaring global registers. */
if (RegNumber == -1)
@@ -1211,19 +1202,17 @@
else if (DECL_INITIAL(decl) != 0 && TREE_STATIC(decl))
error("global register variable has initial value");
else if (isa<AGGREGATE_TYPE>(TREE_TYPE(decl)))
- sorry("LLVM cannot handle register variable %<%s%>, report a bug",
- RegName);
+ sorry("LLVM cannot handle register variable %<%s%>, report a bug", RegName);
else {
if (TREE_THIS_VOLATILE(decl))
warning(0, "volatile register variables don%'t work as you might wish");
- return false; // Everything ok.
+ return false; // Everything ok.
}
return true;
}
-
/// make_decl_llvm - Create the DECL_RTL for a VAR_DECL or FUNCTION_DECL. DECL
/// should have static storage duration. In other words, it should not be an
/// automatic variable, including PARM_DECLs.
@@ -1241,17 +1230,17 @@
#ifndef NDEBUG
// Check that we are not being given an automatic variable or a type or label.
// A weak alias has TREE_PUBLIC set but not the other bits.
- if (isa<PARM_DECL>(decl) || isa<RESULT_DECL>(decl) ||
- isa<TYPE_DECL>(decl) || isa<LABEL_DECL>(decl) ||
- (isa<VAR_DECL>(decl) && !TREE_STATIC(decl) &&
- !TREE_PUBLIC(decl) && !DECL_EXTERNAL(decl) && !DECL_REGISTER(decl))) {
+ if (isa<PARM_DECL>(decl) || isa<RESULT_DECL>(decl) || isa<TYPE_DECL>(decl) ||
+ isa<LABEL_DECL>(decl) ||
+ (isa<VAR_DECL>(decl) && !TREE_STATIC(decl) && !TREE_PUBLIC(decl) &&
+ !DECL_EXTERNAL(decl) && !DECL_REGISTER(decl))) {
debug_tree(decl);
llvm_unreachable("Cannot make a global for this kind of declaration!");
}
#endif
if (errorcount || sorrycount)
- return NULL; // Do not process broken code.
+ return NULL; // Do not process broken code.
LLVMContext &Context = getGlobalContext();
@@ -1265,7 +1254,7 @@
return NULL;
}
-//TODO timevar_push(TV_LLVM_GLOBALS);
+ //TODO timevar_push(TV_LLVM_GLOBALS);
std::string Name;
if (!isa<CONST_DECL>(decl)) // CONST_DECLs do not have assembler names.
@@ -1275,7 +1264,7 @@
// Also handle vars declared register invalidly.
if (!Name.empty() && Name[0] == 1) {
#ifdef REGISTER_PREFIX
- if (strlen (REGISTER_PREFIX) != 0) {
+ if (strlen(REGISTER_PREFIX) != 0) {
int reg_number = decode_reg_name(Name.c_str());
if (reg_number >= 0 || reg_number == -3)
error("register name given for non-register variable %q+D", decl);
@@ -1306,7 +1295,8 @@
AttributeSet PAL;
FunctionType *Ty = ConvertFunctionType(TREE_TYPE(decl), decl, NULL, CC,
PAL);
- FnEntry = Function::Create(Ty, Function::ExternalLinkage, Name, TheModule);
+ FnEntry = Function::Create(Ty, Function::ExternalLinkage, Name,
+ TheModule);
FnEntry->setCallingConv(CC);
FnEntry->setAttributes(PAL);
@@ -1315,7 +1305,7 @@
FnEntry->setLinkage(Function::ExternalWeakLinkage);
#ifdef TARGET_ADJUST_LLVM_LINKAGE
- TARGET_ADJUST_LLVM_LINKAGE(FnEntry,decl);
+ TARGET_ADJUST_LLVM_LINKAGE(FnEntry, decl);
#endif /* TARGET_ADJUST_LLVM_LINKAGE */
handleVisibility(decl, FnEntry);
@@ -1344,17 +1334,17 @@
}
return SET_DECL_LLVM(decl, FnEntry);
} else {
- assert((isa<VAR_DECL>(decl) ||
- isa<CONST_DECL>(decl)) && "Not a function or var decl?");
+ assert((isa<VAR_DECL>(decl) || isa<CONST_DECL>(decl)) &&
+ "Not a function or var decl?");
Type *Ty = ConvertType(TREE_TYPE(decl));
- GlobalVariable *GV ;
+ GlobalVariable *GV;
// If we have "extern void foo", make the global have type {} instead of
// type void.
if (Ty->isVoidTy())
Ty = StructType::get(Context);
- if (Name.empty()) { // Global has no name.
+ if (Name.empty()) { // Global has no name.
GV = new GlobalVariable(*TheModule, Ty, false,
GlobalValue::ExternalLinkage, 0, "");
@@ -1363,7 +1353,7 @@
GV->setLinkage(GlobalValue::ExternalWeakLinkage);
#ifdef TARGET_ADJUST_LLVM_LINKAGE
- TARGET_ADJUST_LLVM_LINKAGE(GV,decl);
+ TARGET_ADJUST_LLVM_LINKAGE(GV, decl);
#endif /* TARGET_ADJUST_LLVM_LINKAGE */
handleVisibility(decl, GV);
@@ -1381,7 +1371,7 @@
GV->setLinkage(GlobalValue::ExternalWeakLinkage);
#ifdef TARGET_ADJUST_LLVM_LINKAGE
- TARGET_ADJUST_LLVM_LINKAGE(GV,decl);
+ TARGET_ADJUST_LLVM_LINKAGE(GV, decl);
#endif /* TARGET_ADJUST_LLVM_LINKAGE */
handleVisibility(decl, GV);
@@ -1408,7 +1398,7 @@
}
} else {
- GV = GVE; // Global already created, reuse it.
+ GV = GVE; // Global already created, reuse it.
}
}
@@ -1427,9 +1417,8 @@
} else {
// Mark readonly globals with constant initializers constant.
if (DECL_INITIAL(decl) != error_mark_node && // uninitialized?
- DECL_INITIAL(decl) &&
- (TREE_CONSTANT(DECL_INITIAL(decl)) ||
- isa<STRING_CST>(DECL_INITIAL(decl))))
+ DECL_INITIAL(decl) && (TREE_CONSTANT(DECL_INITIAL(decl)) ||
+ isa<STRING_CST>(DECL_INITIAL(decl))))
GV->setConstant(true);
}
}
@@ -1443,7 +1432,7 @@
return SET_DECL_LLVM(decl, GV);
}
-//TODO timevar_pop(TV_LLVM_GLOBALS);
+ //TODO timevar_pop(TV_LLVM_GLOBALS);
}
/// make_definition_llvm - Ensures that the body or initial value of the given
@@ -1472,7 +1461,8 @@
/// Fn is a 'void()' ctor/dtor function to be run, initprio is the init
/// priority, and isCtor indicates whether this is a ctor or dtor.
void register_ctor_dtor(Function *Fn, int InitPrio, bool isCtor) {
- (isCtor ? &StaticCtors:&StaticDtors)->push_back(std::make_pair(Fn, InitPrio));
+ (isCtor ? &StaticCtors : &StaticDtors)->push_back(std::make_pair(Fn,
+ InitPrio));
}
/// extractRegisterName - Get a register name given its decl. In 4.2 unlike 4.0
@@ -1554,7 +1544,6 @@
return false;
}
-
//===----------------------------------------------------------------------===//
// Plugin interface
//===----------------------------------------------------------------------===//
@@ -1562,14 +1551,13 @@
// This plugin's code is licensed under the GPLv2 or later. The LLVM libraries
// use the GPL compatible University of Illinois/NCSA Open Source License. The
// plugin is GPL compatible.
-int plugin_is_GPL_compatible __attribute__ ((visibility("default")));
-
+int plugin_is_GPL_compatible __attribute__((visibility("default")));
/// llvm_start_unit - Perform late initialization. This is called by GCC just
/// before processing the compilation unit.
/// NOTE: called even when only doing syntax checking, so do not initialize the
/// module etc here.
-static void llvm_start_unit(void * /*gcc_data*/, void * /*user_data*/) {
+static void llvm_start_unit(void */*gcc_data*/, void */*user_data*/) {
if (!quiet_flag)
errs() << "Starting compilation unit\n";
@@ -1579,9 +1567,9 @@
// We have the same needs as GCC's LTO. Always claim to be doing LTO.
flag_lto =
#if (GCC_MINOR > 5)
- "";
+ "";
#else
- 1;
+ 1;
#endif
flag_generate_lto = 1;
flag_whole_program = 0;
@@ -1607,7 +1595,8 @@
static void emit_cgraph_aliases(struct cgraph_node *node) {
#if (GCC_MINOR < 7)
struct cgraph_node *alias, *next;
- for (alias = node->same_body; alias && alias->next; alias = alias->next) ;
+ for (alias = node->same_body; alias && alias->next; alias = alias->next)
+ ;
for (; alias; alias = next) {
next = alias->previous;
if (!alias->thunk.thunk_p)
@@ -1657,7 +1646,7 @@
/// rtl_emit_function - Turn a gimple function into LLVM IR. This is called
/// once for each function in the compilation unit if GCC optimizations are
/// enabled.
-static unsigned int rtl_emit_function (void) {
+static unsigned int rtl_emit_function(void) {
if (!errorcount && !sorrycount) {
InitializeBackend();
// Convert the function.
@@ -1673,27 +1662,20 @@
}
/// pass_rtl_emit_function - RTL pass that converts a function to LLVM IR.
-static struct rtl_opt_pass pass_rtl_emit_function =
-{
- {
- RTL_PASS,
- "rtl_emit_function", /* name */
- NULL, /* gate */
- rtl_emit_function, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_NONE, /* tv_id */
- PROP_ssa | PROP_gimple_leh | PROP_cfg,
- /* properties_required */
- 0, /* properties_provided */
- PROP_ssa | PROP_trees, /* properties_destroyed */
- TODO_verify_ssa | TODO_verify_flow
- | TODO_verify_stmts, /* todo_flags_start */
- TODO_ggc_collect /* todo_flags_finish */
- }
-};
-
+static struct rtl_opt_pass pass_rtl_emit_function = { {
+ RTL_PASS, "rtl_emit_function", /* name */
+ NULL, /* gate */
+ rtl_emit_function, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_NONE, /* tv_id */
+ PROP_ssa | PROP_gimple_leh | PROP_cfg, /* properties_required */
+ 0, /* properties_provided */
+ PROP_ssa | PROP_trees, /* properties_destroyed */
+ TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts, /* todo_flags_start */
+ TODO_ggc_collect /* todo_flags_finish */
+} };
/// emit_file_scope_asms - Output any file-scope assembly.
static void emit_file_scope_asms() {
@@ -1701,7 +1683,7 @@
tree string = can->asm_str;
if (isa<ADDR_EXPR>(string))
string = TREE_OPERAND(string, 0);
- TheModule->appendModuleInlineAsm(TREE_STRING_POINTER (string));
+ TheModule->appendModuleInlineAsm(TREE_STRING_POINTER(string));
}
// Remove the asms so gcc doesn't waste time outputting them.
cgraph_asm_nodes = NULL;
@@ -1720,8 +1702,8 @@
for (struct cgraph_node *node = cgraph_nodes; node; node = node->next)
if (node->alias && DECL_EXTERNAL(node->decl) &&
lookup_attribute("weakref", DECL_ATTRIBUTES(node->decl)))
- emit_alias(node->decl, node->thunk.alias ?
- node->thunk.alias : get_alias_symbol(node->decl));
+ emit_alias(node->decl, node->thunk.alias ? node->thunk.alias :
+ get_alias_symbol(node->decl));
}
/// emit_varpool_weakrefs - Output any varpool weak references to external
@@ -1730,14 +1712,14 @@
for (struct varpool_node *vnode = varpool_nodes; vnode; vnode = vnode->next)
if (vnode->alias && DECL_EXTERNAL(vnode->decl) &&
lookup_attribute("weakref", DECL_ATTRIBUTES(vnode->decl)))
- emit_alias(vnode->decl, vnode->alias_of ?
- vnode->alias_of : get_alias_symbol(vnode->decl));
+ emit_alias(vnode->decl, vnode->alias_of ? vnode->alias_of :
+ get_alias_symbol(vnode->decl));
}
#endif
/// llvm_emit_globals - Output GCC global variables, aliases and asm's to the
/// LLVM IR.
-static void llvm_emit_globals(void * /*gcc_data*/, void * /*user_data*/) {
+static void llvm_emit_globals(void */*gcc_data*/, void */*user_data*/) {
if (errorcount || sorrycount)
return; // Do not process broken code.
@@ -1759,15 +1741,16 @@
// If this variable must be output even if unused then output it.
tree decl = vnode->decl;
- if (vnode->analyzed && (
+ if (vnode->analyzed &&
+ (
#if (GCC_MINOR > 5)
- !varpool_can_remove_if_no_refs(vnode)
+ !varpool_can_remove_if_no_refs(vnode)
#else
- vnode->force_output || (!DECL_COMDAT(decl) &&
- (!DECL_ARTIFICIAL(decl) ||
- vnode->externally_visible))
+ vnode->force_output ||
+ (!DECL_COMDAT(decl) &&
+ (!DECL_ARTIFICIAL(decl) || vnode->externally_visible))
#endif
- ))
+ ))
// TODO: Remove the check on the following lines. It only exists to avoid
// outputting block addresses when not compiling the function containing
// the block. We need to support outputting block addresses at odd times
@@ -1775,7 +1758,7 @@
if (isa<VAR_DECL>(decl) && !DECL_EXTERNAL(decl) &&
(TREE_PUBLIC(decl) || DECL_PRESERVE_P(decl) ||
TREE_THIS_VOLATILE(decl)))
- emit_global(decl);
+ emit_global(decl);
}
#if (GCC_MINOR > 6)
@@ -1793,7 +1776,7 @@
emit_alias(p->decl, p->target);
}
-static void InlineAsmDiagnosticHandler(const SMDiagnostic &D, void * /*Data*/,
+static void InlineAsmDiagnosticHandler(const SMDiagnostic &D, void */*Data*/,
location_t loc) {
std::string S = D.getMessage().str(); // Ensure Message is not dangling.
const char *Message = S.c_str();
@@ -1812,11 +1795,11 @@
/// llvm_finish_unit - Finish the .s file. This is called by GCC once the
/// compilation unit has been completely processed.
-static void llvm_finish_unit(void * /*gcc_data*/, void * /*user_data*/) {
+static void llvm_finish_unit(void */*gcc_data*/, void */*user_data*/) {
if (errorcount || sorrycount)
return; // Do not process broken code.
-//TODO timevar_push(TV_LLVM_PERFILE);
+ //TODO timevar_push(TV_LLVM_PERFILE);
if (!quiet_flag)
errs() << "Finishing compilation unit\n";
@@ -1826,14 +1809,14 @@
createPerFunctionOptimizationPasses();
-//TODO for (Module::iterator I = TheModule->begin(), E = TheModule->end();
-//TODO I != E; ++I)
-//TODO if (!I->isDeclaration()) {
-//TODO if (flag_disable_red_zone)
-//TODO I->addFnAttr(Attribute::NoRedZone);
-//TODO if (flag_no_implicit_float)
-//TODO I->addFnAttr(Attribute::NoImplicitFloat);
-//TODO }
+ //TODO for (Module::iterator I = TheModule->begin(), E = TheModule->end();
+ //TODO I != E; ++I)
+ //TODO if (!I->isDeclaration()) {
+ //TODO if (flag_disable_red_zone)
+ //TODO I->addFnAttr(Attribute::NoRedZone);
+ //TODO if (flag_no_implicit_float)
+ //TODO I->addFnAttr(Attribute::NoImplicitFloat);
+ //TODO }
// Add an llvm.global_ctors global if needed.
if (!StaticCtors.empty())
@@ -1845,9 +1828,9 @@
if (!AttributeUsedGlobals.empty()) {
std::vector<Constant *> AUGs;
Type *SBP = Type::getInt8PtrTy(Context);
- for (SmallSetVector<Constant *,32>::iterator
- AI = AttributeUsedGlobals.begin(),
- AE = AttributeUsedGlobals.end(); AI != AE; ++AI) {
+ for (SmallSetVector<Constant *, 32>::iterator AI = AttributeUsedGlobals
+ .begin(), AE = AttributeUsedGlobals.end();
+ AI != AE; ++AI) {
Constant *C = *AI;
AUGs.push_back(TheFolder->CreateBitCast(C, SBP));
}
@@ -1864,9 +1847,10 @@
if (!AttributeCompilerUsedGlobals.empty()) {
std::vector<Constant *> ACUGs;
Type *SBP = Type::getInt8PtrTy(Context);
- for (SmallSetVector<Constant *,32>::iterator
- AI = AttributeCompilerUsedGlobals.begin(),
- AE = AttributeCompilerUsedGlobals.end(); AI != AE; ++AI) {
+ for (SmallSetVector<Constant *, 32>::iterator AI =
+ AttributeCompilerUsedGlobals.begin(), AE =
+ AttributeCompilerUsedGlobals.end();
+ AI != AE; ++AI) {
Constant *C = *AI;
ACUGs.push_back(TheFolder->CreateBitCast(C, SBP));
}
@@ -1883,9 +1867,9 @@
// Add llvm.global.annotations
if (!AttributeAnnotateGlobals.empty()) {
Constant *Array = ConstantArray::get(
- ArrayType::get(AttributeAnnotateGlobals[0]->getType(),
- AttributeAnnotateGlobals.size()),
- AttributeAnnotateGlobals);
+ ArrayType::get(AttributeAnnotateGlobals[0]->getType(),
+ AttributeAnnotateGlobals.size()),
+ AttributeAnnotateGlobals);
GlobalValue *gv = new GlobalVariable(*TheModule, Array->getType(), false,
GlobalValue::AppendingLinkage, Array,
"llvm.global.annotations");
@@ -1906,13 +1890,13 @@
if (CodeGenPasses) {
// Arrange for inline asm problems to be printed nicely.
LLVMContext::InlineAsmDiagHandlerTy OldHandler =
- Context.getInlineAsmDiagnosticHandler();
+ Context.getInlineAsmDiagnosticHandler();
void *OldHandlerData = Context.getInlineAsmDiagnosticContext();
Context.setInlineAsmDiagnosticHandler(InlineAsmDiagnosticHandler, 0);
CodeGenPasses->doInitialization();
- for (Module::iterator I = TheModule->begin(), E = TheModule->end();
- I != E; ++I)
+ for (Module::iterator I = TheModule->begin(), E = TheModule->end(); I != E;
+ ++I)
if (!I->isDeclaration())
CodeGenPasses->run(*I);
CodeGenPasses->doFinalization();
@@ -1922,7 +1906,7 @@
FormattedOutStream.flush();
OutStream->flush();
-//TODO timevar_pop(TV_LLVM_PERFILE);
+ //TODO timevar_pop(TV_LLVM_PERFILE);
// We have finished - shutdown the plugin. Doing this here ensures that timer
// info and other statistics are not intermingled with those produced by GCC.
@@ -1930,147 +1914,123 @@
}
/// llvm_finish - Run shutdown code when GCC exits.
-static void llvm_finish(void * /*gcc_data*/, void * /*user_data*/) {
+static void llvm_finish(void */*gcc_data*/, void */*user_data*/) {
FinalizePlugin();
}
-
/// gate_null - Gate method for a pass that does nothing.
-static bool gate_null (void) {
- return false;
-}
+static bool gate_null(void) { return false; }
/// pass_gimple_null - Gimple pass that does nothing.
-static struct gimple_opt_pass pass_gimple_null =
-{
- {
- GIMPLE_PASS,
- "*gimple_null", /* name */
- gate_null, /* gate */
- NULL, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_NONE, /* tv_id */
- 0, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- 0 /* todo_flags_finish */
- }
+static struct gimple_opt_pass pass_gimple_null = {
+ { GIMPLE_PASS, "*gimple_null", /* name */
+ gate_null, /* gate */
+ NULL, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_NONE, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0 /* todo_flags_finish */
+}
};
/// execute_correct_state - Correct the cgraph state to ensure that newly
/// inserted functions are processed before being converted to LLVM IR.
-static unsigned int execute_correct_state (void) {
+static unsigned int execute_correct_state(void) {
if (cgraph_state < CGRAPH_STATE_IPA_SSA)
cgraph_state = CGRAPH_STATE_IPA_SSA;
return 0;
}
/// gate_correct_state - Gate method for pass_gimple_correct_state.
-static bool gate_correct_state (void) {
- return true;
-}
+static bool gate_correct_state(void) { return true; }
/// pass_gimple_correct_state - Gimple pass that corrects the cgraph state so
/// newly inserted functions are processed before being converted to LLVM IR.
-static struct gimple_opt_pass pass_gimple_correct_state =
-{
- {
- GIMPLE_PASS,
- "*gimple_correct_state", /* name */
- gate_correct_state, /* gate */
- execute_correct_state, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_NONE, /* tv_id */
- 0, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- 0 /* todo_flags_finish */
- }
+static struct gimple_opt_pass pass_gimple_correct_state = {
+ { GIMPLE_PASS, "*gimple_correct_state", /* name */
+ gate_correct_state, /* gate */
+ execute_correct_state, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_NONE, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0 /* todo_flags_finish */
+}
};
/// pass_ipa_null - IPA pass that does nothing.
static struct ipa_opt_pass_d pass_ipa_null = {
- {
- IPA_PASS,
- "*ipa_null", /* name */
- gate_null, /* gate */
- NULL, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_NONE, /* tv_id */
- 0, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- 0 /* todo_flags_finish */
- },
- NULL, /* generate_summary */
- NULL, /* write_summary */
- NULL, /* read_summary */
+ { IPA_PASS, "*ipa_null", /* name */
+ gate_null, /* gate */
+ NULL, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_NONE, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0 /* todo_flags_finish */
+}, NULL, /* generate_summary */
+ NULL, /* write_summary */
+ NULL, /* read_summary */
#if (GCC_MINOR > 5)
- NULL, /* write_optimization_summary */
- NULL, /* read_optimization_summary */
+ NULL, /* write_optimization_summary */
+ NULL, /* read_optimization_summary */
#else
- NULL, /* function_read_summary */
+ NULL, /* function_read_summary */
#endif
- NULL, /* stmt_fixup */
- 0, /* function_transform_todo_flags_start */
- NULL, /* function_transform */
- NULL /* variable_transform */
+ NULL, /* stmt_fixup */
+ 0, /* function_transform_todo_flags_start */
+ NULL, /* function_transform */
+ NULL /* variable_transform */
};
/// pass_rtl_null - RTL pass that does nothing.
-static struct rtl_opt_pass pass_rtl_null =
-{
- {
- RTL_PASS,
- "*rtl_null", /* name */
- gate_null, /* gate */
- NULL, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_NONE, /* tv_id */
- 0, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- 0 /* todo_flags_finish */
- }
-};
+static struct rtl_opt_pass pass_rtl_null = { { RTL_PASS, "*rtl_null", /* name */
+ gate_null, /* gate */
+ NULL, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_NONE, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0 /* todo_flags_finish */
+} };
/// pass_simple_ipa_null - Simple IPA pass that does nothing.
-static struct simple_ipa_opt_pass pass_simple_ipa_null =
-{
- {
- SIMPLE_IPA_PASS,
- "*simple_ipa_null", /* name */
- gate_null, /* gate */
- NULL, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_NONE, /* tv_id */
- 0, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- 0 /* todo_flags_finish */
- }
+static struct simple_ipa_opt_pass pass_simple_ipa_null = {
+ { SIMPLE_IPA_PASS, "*simple_ipa_null", /* name */
+ gate_null, /* gate */
+ NULL, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_NONE, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0 /* todo_flags_finish */
+}
};
-
// Garbage collector roots.
extern const struct ggc_cache_tab gt_ggc_rc__gt_cache_h[];
-
/// PluginFlags - Flag arguments for the plugin.
struct FlagDescriptor {
@@ -2079,21 +2039,18 @@
};
static FlagDescriptor PluginFlags[] = {
- { "debug-pass-structure", &DebugPassStructure},
- { "debug-pass-arguments", &DebugPassArguments},
- { "enable-gcc-optzns", &EnableGCCOptimizations },
- { "emit-ir", &EmitIR },
- { "save-gcc-output", &SaveGCCOutput },
- { NULL, NULL } // Terminator.
+ { "debug-pass-structure", &DebugPassStructure },
+ { "debug-pass-arguments", &DebugPassArguments },
+ { "enable-gcc-optzns", &EnableGCCOptimizations }, { "emit-ir", &EmitIR },
+ { "save-gcc-output", &SaveGCCOutput }, { NULL, NULL } // Terminator.
};
-
/// llvm_plugin_info - Information about this plugin. Users can access this
/// using "gcc --help -v".
static struct plugin_info llvm_plugin_info = {
- LLVM_VERSION, // version
+ LLVM_VERSION, // version
// TODO provide something useful here
- NULL // help
+ NULL // help
};
#ifndef DISABLE_VERSION_CHECK
@@ -2101,27 +2058,25 @@
// Check that the running gcc has exactly the same version as the gcc we were
// built against. This strict check seems wise when developing against a fast
// moving gcc tree. TODO: Use a milder check if doing a "release build".
- return plugin_default_version_check (&gcc_version, plugged_in_version);
+ return plugin_default_version_check(&gcc_version, plugged_in_version);
}
#endif
-
/// plugin_init - Plugin initialization routine, called by GCC. This is the
/// first code executed in the plugin (except for constructors). Configure
/// the plugin and setup GCC, taking over optimization and code generation.
-int __attribute__ ((visibility("default")))
-plugin_init(struct plugin_name_args *plugin_info,
- struct plugin_gcc_version *
+int __attribute__((visibility("default"))) plugin_init(
+ struct plugin_name_args *plugin_info, struct plugin_gcc_version *
#ifndef DISABLE_VERSION_CHECK
- version
+ version
#endif
- ) {
+ ) {
const char *plugin_name = plugin_info->base_name;
struct register_pass_info pass_info;
#ifndef DISABLE_VERSION_CHECK
// Check that the plugin is compatible with the running gcc.
- if (!version_check (version)) {
+ if (!version_check(version)) {
errs() << "Incompatible plugin version\n";
return 1;
}
@@ -2143,7 +2098,8 @@
plugin_name, argv[i].key);
continue;
}
- if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || argv[i].value[1]) {
+ if (argv[i].value[0] < '0' || argv[i].value[0] > '9' ||
+ argv[i].value[1]) {
error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"),
plugin_name, argv[i].key, argv[i].value);
continue;
@@ -2201,15 +2157,15 @@
// Look for a matching flag.
bool Found = false;
for (FlagDescriptor *F = PluginFlags; F->Key; ++F)
- if (!strcmp (argv[i].key, F->Key)) {
+ if (!strcmp(argv[i].key, F->Key)) {
Found = true;
*F->Flag = true;
break;
}
if (!Found)
- warning(0, G_("unrecognised option '-fplugin-arg-%s-%s'"),
- plugin_name, argv[i].key);
+ warning(0, G_("unrecognised option '-fplugin-arg-%s-%s'"), plugin_name,
+ argv[i].key);
}
}
@@ -2219,20 +2175,20 @@
// Register our garbage collector roots.
register_callback(plugin_name, PLUGIN_REGISTER_GGC_CACHES, NULL,
- const_cast<ggc_cache_tab*>(gt_ggc_rc__gt_cache_h));
+ const_cast<ggc_cache_tab *>(gt_ggc_rc__gt_cache_h));
// Perform late initialization just before processing the compilation unit.
register_callback(plugin_name, PLUGIN_START_UNIT, llvm_start_unit, NULL);
// Turn off all gcc optimization passes.
if (!EnableGCCOptimizations) {
- // TODO: figure out a good way of turning off ipa optimization passes.
- // Could just set optimize to zero (after taking a copy), but this would
- // also impact front-end optimizations.
+// TODO: figure out a good way of turning off ipa optimization passes.
+// Could just set optimize to zero (after taking a copy), but this would
+// also impact front-end optimizations.
- // Leave pass_ipa_free_lang_data.
+// Leave pass_ipa_free_lang_data.
- // Leave pass_ipa_function_and_variable_visibility. Needed for correctness.
+// Leave pass_ipa_function_and_variable_visibility. Needed for correctness.
#if (GCC_MINOR < 6)
// Turn off pass_ipa_early_inline.
Modified: dragonegg/trunk/src/Cache.cpp
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/src/Cache.cpp?rev=173245&r1=173244&r2=173245&view=diff
==============================================================================
--- dragonegg/trunk/src/Cache.cpp (original)
+++ dragonegg/trunk/src/Cache.cpp Wed Jan 23 03:54:28 2013
@@ -65,10 +65,8 @@
#define tree2int_hash tree_map_base_hash
#define tree2int_marked_p tree_map_base_marked_p
-static GTY((if_marked("tree2int_marked_p"),
- param_is(struct tree2int)))
- htab_t intCache;
-
+static GTY((if_marked("tree2int_marked_p"), param_is(struct tree2int)))
+ htab_t intCache;
// Hash table mapping trees to Type*.
@@ -81,17 +79,15 @@
#ifndef IN_GCC
struct
#endif
- Type *GTY((skip)) Ty;
+ Type *GTY((skip)) Ty;
};
#define tree2Type_eq tree_map_base_eq
#define tree2Type_hash tree_map_base_hash
#define tree2Type_marked_p tree_map_base_marked_p
-static GTY((if_marked("tree2Type_marked_p"),
- param_is(struct tree2Type)))
- htab_t TypeCache;
-
+static GTY((if_marked("tree2Type_marked_p"), param_is(struct tree2Type)))
+ htab_t TypeCache;
// Hash table mapping trees to WeakVH.
@@ -104,17 +100,15 @@
#ifndef IN_GCC
struct
#endif
- WeakVH GTY((skip)) V;
+ WeakVH GTY((skip)) V;
};
#define tree2WeakVH_eq tree_map_base_eq
#define tree2WeakVH_hash tree_map_base_hash
#define tree2WeakVH_marked_p tree_map_base_marked_p
-static GTY((if_marked("tree2WeakVH_marked_p"),
- param_is(struct tree2WeakVH)))
- htab_t WeakVHCache;
-
+static GTY((if_marked("tree2WeakVH_marked_p"), param_is(struct tree2WeakVH)))
+ htab_t WeakVHCache;
// Include the garbage collector header.
#ifndef ENABLE_BUILD_WITH_CXX
@@ -145,15 +139,15 @@
intCache = htab_create_ggc(1024, tree2int_hash, tree2int_eq, 0);
tree_map_base in = { t };
- tree2int **slot = (tree2int **)htab_find_slot(intCache, &in, INSERT);
+ tree2int **slot = (tree2int * *) htab_find_slot(intCache, &in, INSERT);
assert(slot && "Failed to create hash table slot!");
if (!*slot) {
*slot =
#if (GCC_MINOR > 5)
- ggc_alloc_tree2int();
+ ggc_alloc_tree2int();
#else
- GGC_NEW(struct tree2int);
+ GGC_NEW(struct tree2int);
#endif
(*slot)->base.from = t;
}
@@ -182,15 +176,15 @@
if (!TypeCache)
TypeCache = htab_create_ggc(1024, tree2Type_hash, tree2Type_eq, 0);
- tree2Type **slot = (tree2Type **)htab_find_slot(TypeCache, &in, INSERT);
+ tree2Type **slot = (tree2Type * *) htab_find_slot(TypeCache, &in, INSERT);
assert(slot && "Failed to create hash table slot!");
if (!*slot) {
*slot =
#if (GCC_MINOR > 5)
- ggc_alloc_tree2Type();
+ ggc_alloc_tree2Type();
#else
- GGC_NEW(struct tree2Type);
+ GGC_NEW(struct tree2Type);
#endif
(*slot)->base.from = t;
}
@@ -209,7 +203,7 @@
}
static void DestructWeakVH(void *p) {
- ((WeakVH*)&((tree2WeakVH*)p)->V)->~WeakVH();
+ ((WeakVH *)&((tree2WeakVH *)p)->V)->~WeakVH();
}
/// setCachedValue - Associates the given value (which may be null) with the
@@ -229,7 +223,8 @@
WeakVHCache = htab_create_ggc(1024, tree2WeakVH_hash, tree2WeakVH_eq,
DestructWeakVH);
- tree2WeakVH **slot = (tree2WeakVH **)htab_find_slot(WeakVHCache, &in, INSERT);
+ tree2WeakVH **slot = (tree2WeakVH * *)
+ htab_find_slot(WeakVHCache, &in, INSERT);
assert(slot && "Failed to create hash table slot!");
if (*slot) {
@@ -239,12 +234,12 @@
*slot =
#if (GCC_MINOR > 5)
- ggc_alloc_tree2WeakVH();
+ ggc_alloc_tree2WeakVH();
#else
- GGC_NEW(struct tree2WeakVH);
+ GGC_NEW(struct tree2WeakVH);
#endif
(*slot)->base.from = t;
- WeakVH *W = new(&(*slot)->V) WeakVH(V);
+ WeakVH *W = new (&(*slot)->V) WeakVH(V);
assert(W == &(*slot)->V && "Pointer was displaced!");
- (void)W;
+ (void) W;
}
Modified: dragonegg/trunk/src/ConstantConversion.cpp
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/src/ConstantConversion.cpp?rev=173245&r1=173244&r2=173245&view=diff
==============================================================================
--- dragonegg/trunk/src/ConstantConversion.cpp (original)
+++ dragonegg/trunk/src/ConstantConversion.cpp Wed Jan 23 03:54:28 2013
@@ -54,7 +54,7 @@
#if (GCC_MINOR < 7)
#include "flags.h" // For POINTER_TYPE_OVERFLOW_UNDEFINED.
#endif
-#include "tm_p.h" // For CONSTANT_ALIGNMENT.
+#include "tm_p.h" // For CONSTANT_ALIGNMENT.
#ifndef ENABLE_BUILD_WITH_CXX
} // extern "C"
#endif
@@ -99,7 +99,7 @@
if (empty())
return !Contents;
return Contents && isa<IntegerType>(Contents->getType()) &&
- getBitWidth() == Contents->getType()->getPrimitiveSizeInBits();
+ getBitWidth() == Contents->getType()->getPrimitiveSizeInBits();
}
/// ExtendRange - Extend the slice to a wider range. All added bits are zero.
@@ -125,24 +125,18 @@
/// BitSlice - Constructor for the range of bits ['first', 'last').
BitSlice(int first, int last, Constant *contents)
- : R(first, last), Contents(contents) {
+ : R(first, last), Contents(contents) {
assert(contentsValid() && "Contents do not match range");
}
/// empty - Return whether the bit range is empty.
- bool empty() const {
- return R.empty();
- }
+ bool empty() const { return R.empty(); }
/// getBitWidth - Return the number of bits in the range.
- unsigned getBitWidth() const {
- return (unsigned)R.getWidth();
- }
+ unsigned getBitWidth() const { return (unsigned) R.getWidth(); }
/// getRange - Return the range of bits in this slice.
- SignedRange getRange() const {
- return R;
- }
+ SignedRange getRange() const { return R; }
/// Displace - Return the result of sliding all bits by the given offset.
BitSlice Displace(int Offset) const {
@@ -173,7 +167,7 @@
if (R == r)
return *this;
assert(!r.empty() && "Empty ranges did not evaluate as equal?");
- Type *ExtTy = IntegerType::get(Context, (unsigned)r.getWidth());
+ Type *ExtTy = IntegerType::get(Context, (unsigned) r.getWidth());
// If the slice contains no bits then every bit of the extension is zero.
if (empty())
return BitSlice(r, Constant::getNullValue(ExtTy));
@@ -183,11 +177,11 @@
unsigned deltaFirst = (unsigned)(R.getFirst() - r.getFirst());
unsigned deltaLast = (unsigned)(r.getLast() - R.getLast());
if (BYTES_BIG_ENDIAN && deltaLast) {
- (void)deltaFirst; // Avoid unused variable warning.
+ (void) deltaFirst; // Avoid unused variable warning.
Constant *ShiftAmt = ConstantInt::get(C->getType(), deltaLast);
C = Folder.CreateShl(C, ShiftAmt);
} else if (!BYTES_BIG_ENDIAN && deltaFirst) {
- (void)deltaLast; // Avoid unused variable warning.
+ (void) deltaLast; // Avoid unused variable warning.
Constant *ShiftAmt = ConstantInt::get(C->getType(), deltaFirst);
C = Folder.CreateShl(C, ShiftAmt);
}
@@ -206,7 +200,7 @@
// Quick exit if the desired range matches that of the slice.
if (R == r)
return Contents;
- Type *RetTy = IntegerType::get(Context, (unsigned)r.getWidth());
+ Type *RetTy = IntegerType::get(Context, (unsigned) r.getWidth());
// If the slice contains no bits then every returned bit is undefined.
if (empty())
return UndefValue::get(RetTy);
@@ -258,16 +252,16 @@
unsigned deltaFirst = (unsigned)(r.getFirst() - R.getFirst());
unsigned deltaLast = (unsigned)(R.getLast() - r.getLast());
if (BYTES_BIG_ENDIAN && deltaLast) {
- (void)deltaFirst; // Avoid unused variable warning.
+ (void) deltaFirst; // Avoid unused variable warning.
Constant *ShiftAmt = ConstantInt::get(C->getType(), deltaLast);
C = Folder.CreateLShr(C, ShiftAmt);
} else if (!BYTES_BIG_ENDIAN && deltaFirst) {
- (void)deltaLast; // Avoid unused variable warning.
+ (void) deltaLast; // Avoid unused variable warning.
Constant *ShiftAmt = ConstantInt::get(C->getType(), deltaFirst);
C = Folder.CreateLShr(C, ShiftAmt);
}
// Truncate to the new type.
- Type *RedTy = IntegerType::get(Context, (unsigned)r.getWidth());
+ Type *RedTy = IntegerType::get(Context, (unsigned) r.getWidth());
C = Folder.CreateTruncOrBitCast(C, RedTy);
return BitSlice(r, C);
}
@@ -314,8 +308,8 @@
// nothing to worry about: the bits occupy the range [0, StoreSize). But
// if not then endianness matters: on big-endian machines there are padding
// bits at the start, while on little-endian machines they are at the end.
- return BYTES_BIG_ENDIAN ?
- BitSlice(StoreSize - BitWidth, StoreSize, C) : BitSlice(0, BitWidth, C);
+ return BYTES_BIG_ENDIAN ? BitSlice(StoreSize - BitWidth, StoreSize, C) :
+ BitSlice(0, BitWidth, C);
}
case Type::ArrayTyID: {
@@ -349,8 +343,9 @@
StructType *STy = cast<StructType>(Ty);
const StructLayout *SL = getDataLayout().getStructLayout(STy);
// Fields with indices in [FirstIdx, LastIdx) overlap the range.
- unsigned FirstIdx = SL->getElementContainingOffset(R.getFirst()/8);
- unsigned LastIdx = 1 + SL->getElementContainingOffset((R.getLast()-1)/8);
+ unsigned FirstIdx = SL->getElementContainingOffset(R.getFirst() / 8);
+ unsigned LastIdx = 1 +
+ SL->getElementContainingOffset((R.getLast() - 1) / 8);
// Visit all fields that overlap the requested range, accumulating their
// bits in Bits.
BitSlice Bits;
@@ -381,7 +376,8 @@
// Elements with indices in [FirstElt, LastElt) overlap the range.
unsigned FirstElt = R.getFirst() / Stride;
unsigned LastElt = (R.getLast() + Stride - 1) / Stride;
- assert(LastElt <= VTy->getNumElements() && "Store size bigger than vector?");
+ assert(LastElt <= VTy->getNumElements() &&
+ "Store size bigger than vector?");
// Visit all elements that overlap the requested range, accumulating their
// bits in Bits.
BitSlice Bits;
@@ -408,7 +404,7 @@
/// same constant as you would get by storing the bits of 'C' to memory (with
/// the first bit stored being 'StartingBit') and then loading out a (constant)
/// value of type 'Ty' from the stored to memory location.
-static Constant *InterpretAsType(Constant *C, Type* Ty, int StartingBit,
+static Constant *InterpretAsType(Constant *C, Type *Ty, int StartingBit,
TargetFolder &Folder) {
// Efficient handling for some common cases.
if (C->getType() == Ty)
@@ -438,8 +434,8 @@
// the end on little-endian machines.
Bits = Bits.Displace(-StartingBit);
return BYTES_BIG_ENDIAN ?
- Bits.getBits(SignedRange(StoreSize - BitWidth, StoreSize), Folder) :
- Bits.getBits(SignedRange(0, BitWidth), Folder);
+ Bits.getBits(SignedRange(StoreSize - BitWidth, StoreSize), Folder) :
+ Bits.getBits(SignedRange(0, BitWidth), Folder);
}
case Type::PointerTyID: {
@@ -469,9 +465,9 @@
Type *EltTy = ATy->getElementType();
const unsigned Stride = getDataLayout().getTypeAllocSizeInBits(EltTy);
const unsigned NumElts = ATy->getNumElements();
- std::vector<Constant*> Vals(NumElts);
+ std::vector<Constant *> Vals(NumElts);
for (unsigned i = 0; i != NumElts; ++i)
- Vals[i] = InterpretAsType(C, EltTy, StartingBit + i*Stride, Folder);
+ Vals[i] = InterpretAsType(C, EltTy, StartingBit + i * Stride, Folder);
return ConstantArray::get(ATy, Vals); // TODO: Use ArrayRef constructor.
}
@@ -480,11 +476,10 @@
StructType *STy = cast<StructType>(Ty);
const StructLayout *SL = getDataLayout().getStructLayout(STy);
unsigned NumElts = STy->getNumElements();
- std::vector<Constant*> Vals(NumElts);
+ std::vector<Constant *> Vals(NumElts);
for (unsigned i = 0; i != NumElts; ++i)
- Vals[i] = InterpretAsType(C, STy->getElementType(i),
- StartingBit + SL->getElementOffsetInBits(i),
- Folder);
+ Vals[i] = InterpretAsType(C, STy->getElementType(i), StartingBit +
+ SL->getElementOffsetInBits(i), Folder);
return ConstantStruct::get(STy, Vals); // TODO: Use ArrayRef constructor.
}
@@ -494,24 +489,22 @@
Type *EltTy = VTy->getElementType();
const unsigned Stride = getDataLayout().getTypeAllocSizeInBits(EltTy);
const unsigned NumElts = VTy->getNumElements();
- SmallVector<Constant*, 16> Vals(NumElts);
+ SmallVector<Constant *, 16> Vals(NumElts);
for (unsigned i = 0; i != NumElts; ++i)
- Vals[i] = InterpretAsType(C, EltTy, StartingBit + i*Stride, Folder);
+ Vals[i] = InterpretAsType(C, EltTy, StartingBit + i * Stride, Folder);
return ConstantVector::get(Vals);
}
}
}
-
//===----------------------------------------------------------------------===//
// ... ExtractRegisterFromConstant ...
//===----------------------------------------------------------------------===//
/// ExtractRegisterFromConstantImpl - Implementation of
/// ExtractRegisterFromConstant.
-static Constant *ExtractRegisterFromConstantImpl(Constant *C, tree type,
- int StartingByte,
- TargetFolder &Folder) {
+static Constant *ExtractRegisterFromConstantImpl(
+ Constant *C, tree type, int StartingByte, TargetFolder &Folder) {
// NOTE: Needs to be kept in sync with getRegType and RepresentAsMemory.
int StartingBit = StartingByte * BITS_PER_UNIT;
switch (TREE_CODE(type)) {
@@ -561,10 +554,10 @@
tree elt_type = main_type(type);
unsigned NumElts = TYPE_VECTOR_SUBPARTS(type);
unsigned Stride = GET_MODE_BITSIZE(TYPE_MODE(elt_type));
- SmallVector<Constant*, 16> Vals(NumElts);
+ SmallVector<Constant *, 16> Vals(NumElts);
for (unsigned i = 0; i != NumElts; ++i)
- Vals[i] = ExtractRegisterFromConstantImpl(C, elt_type,
- StartingBit+i*Stride, Folder);
+ Vals[i] = ExtractRegisterFromConstantImpl(C, elt_type, StartingBit +
+ i * Stride, Folder);
return ConstantVector::get(Vals);
}
@@ -576,12 +569,12 @@
/// getRegType, and is what you would get by storing the constant to memory and
/// using LoadRegisterFromMemory to load a register value back out starting from
/// byte StartingByte.
-Constant *ExtractRegisterFromConstant(Constant *C, tree type, int StartingByte) {
+Constant *ExtractRegisterFromConstant(Constant *C, tree type,
+ int StartingByte) {
TargetFolder Folder(&getDataLayout());
return ExtractRegisterFromConstantImpl(C, type, StartingByte, Folder);
}
-
//===----------------------------------------------------------------------===//
// ... ConvertInitializer ...
//===----------------------------------------------------------------------===//
@@ -620,7 +613,7 @@
Type *MemTy = IntegerType::get(Context, Size);
bool isSigned = !TYPE_UNSIGNED(type);
Result = isSigned ? Folder.CreateSExtOrBitCast(C, MemTy) :
- Folder.CreateZExtOrBitCast(C, MemTy);
+ Folder.CreateZExtOrBitCast(C, MemTy);
break;
}
@@ -655,7 +648,7 @@
case VECTOR_TYPE: {
tree elt_type = main_type(type);
unsigned NumElts = TYPE_VECTOR_SUBPARTS(type);
- std::vector<Constant*> Vals(NumElts);
+ std::vector<Constant *> Vals(NumElts);
for (unsigned i = 0; i != NumElts; ++i) {
ConstantInt *Idx = ConstantInt::get(Type::getInt32Ty(Context), i);
Vals[i] = Folder.CreateExtractElement(C, Idx);
@@ -728,13 +721,14 @@
static Constant *ConvertCST(tree exp, TargetFolder &) {
const tree type = main_type(exp);
unsigned SizeInChars = (TREE_INT_CST_LOW(TYPE_SIZE(type)) + CHAR_BIT - 1) /
- CHAR_BIT;
+ CHAR_BIT;
// Encode the constant in Buffer in target format.
SmallVector<uint8_t, 16> Buffer(SizeInChars);
unsigned CharsWritten = native_encode_expr(exp, &Buffer[0], SizeInChars);
assert(CharsWritten == SizeInChars && "Failed to fully encode expression!");
- (void)CharsWritten; // Avoid unused variable warning when assertions disabled.
- // Turn it into an LLVM byte array.
+ (void)
+ CharsWritten; // Avoid unused variable warning when assertions disabled.
+ // Turn it into an LLVM byte array.
return ConstantDataArray::get(Context, Buffer);
}
@@ -744,19 +738,19 @@
ArrayType *StrTy = cast<ArrayType>(ConvertType(TREE_TYPE(exp)));
Type *ElTy = StrTy->getElementType();
- unsigned Len = (unsigned)TREE_STRING_LENGTH(exp);
+ unsigned Len = (unsigned) TREE_STRING_LENGTH(exp);
- std::vector<Constant*> Elts;
+ std::vector<Constant *> Elts;
if (ElTy->isIntegerTy(8)) {
- const unsigned char *InStr =(const unsigned char *)TREE_STRING_POINTER(exp);
+ const unsigned char *InStr = (const unsigned char *)TREE_STRING_POINTER(
+ exp);
for (unsigned i = 0; i != Len; ++i)
Elts.push_back(ConstantInt::get(Type::getInt8Ty(Context), InStr[i]));
} else if (ElTy->isIntegerTy(16)) {
- assert((Len&1) == 0 &&
+ assert((Len & 1) == 0 &&
"Length in bytes should be a multiple of element size");
- const uint16_t *InStr =
- (const unsigned short *)TREE_STRING_POINTER(exp);
- for (unsigned i = 0; i != Len/2; ++i) {
+ const uint16_t *InStr = (const unsigned short *)TREE_STRING_POINTER(exp);
+ for (unsigned i = 0; i != Len / 2; ++i) {
// gcc has constructed the initializer elements in the target endianness,
// but we're going to treat them as ordinary shorts from here, with
// host endianness. Adjust if necessary.
@@ -767,10 +761,10 @@
ByteSwap_16(InStr[i])));
}
} else if (ElTy->isIntegerTy(32)) {
- assert((Len&3) == 0 &&
+ assert((Len & 3) == 0 &&
"Length in bytes should be a multiple of element size");
const uint32_t *InStr = (const uint32_t *)TREE_STRING_POINTER(exp);
- for (unsigned i = 0; i != Len/4; ++i) {
+ for (unsigned i = 0; i != Len / 4; ++i) {
// gcc has constructed the initializer elements in the target endianness,
// but we're going to treat them as ordinary ints from here, with
// host endianness. Adjust if necessary.
@@ -784,8 +778,8 @@
llvm_unreachable("Unknown character type!");
}
- unsigned LenInElts = Len /
- TREE_INT_CST_LOW(TYPE_SIZE_UNIT(main_type(main_type(exp))));
+ unsigned LenInElts = Len / TREE_INT_CST_LOW(
+ TYPE_SIZE_UNIT(main_type(main_type(exp))));
unsigned ConstantSize = StrTy->getNumElements();
if (LenInElts != ConstantSize) {
@@ -831,13 +825,13 @@
/// Elts - The initial values to use for the array elements. A null entry
/// means that the corresponding array element should be default initialized.
- std::vector<Constant*> Elts;
+ std::vector<Constant *> Elts;
// Resize to the number of array elements if known. This ensures that every
// element will be at least default initialized even if no initial value is
// given for it.
- uint64_t TypeElts = isa<ARRAY_TYPE>(init_type) ?
- ArrayLengthOf(init_type) : TYPE_VECTOR_SUBPARTS(init_type);
+ uint64_t TypeElts = isa<ARRAY_TYPE>(init_type) ? ArrayLengthOf(init_type) :
+ TYPE_VECTOR_SUBPARTS(init_type);
if (TypeElts != NO_LENGTH)
Elts.resize(TypeElts);
@@ -862,9 +856,8 @@
unsigned PadBits = EltSize - ValSize;
assert(PadBits % BITS_PER_UNIT == 0 && "Non-unit type size?");
unsigned Units = PadBits / BITS_PER_UNIT;
- Constant *PaddedElt[] = {
- Val, getDefaultValue(GetUnitType(Context, Units))
- };
+ Constant *PaddedElt[] = { Val,
+ getDefaultValue(GetUnitType(Context, Units)) };
Val = ConstantStruct::getAnon(PaddedElt);
}
@@ -879,7 +872,7 @@
LastIndex = FirstIndex = NextIndex;
} else if (isa<RANGE_EXPR>(index)) {
tree first = TREE_OPERAND(index, 0);
- tree last = TREE_OPERAND(index, 1);
+ tree last = TREE_OPERAND(index, 1);
// Subtract off the lower bound if any to ensure indices start from zero.
if (lower_bnd != NULL_TREE) {
@@ -949,11 +942,11 @@
// packed struct. This can happen if the user forced a small alignment on the
// array type.
if (MaxAlign * 8 > TYPE_ALIGN(main_type(exp)))
- return ConstantStruct::getAnon(Context, Elts, /*Packed*/true);
+ return ConstantStruct::getAnon(Context, Elts, /*Packed*/ true);
// Return as a struct if the contents are not homogeneous.
if (!isHomogeneous) {
- std::vector<Constant*> StructElts;
+ std::vector<Constant *> StructElts;
unsigned First = 0, E = Elts.size();
while (First < E) {
// Find the maximal value of Last s.t. all elements in the range
@@ -969,8 +962,8 @@
StructElt = Elts[First];
else
StructElt = ConstantArray::get(ArrayType::get(Ty, NumSameType),
- ArrayRef<Constant*>(&Elts[First],
- NumSameType));
+ ArrayRef<Constant *>(&Elts[First],
+ NumSameType));
StructElts.push_back(StructElt);
First = Last;
}
@@ -997,7 +990,7 @@
int Starts; // The first bit of the constant is positioned at this offset.
FieldContents(SignedRange r, Constant *c, int starts, TargetFolder &folder)
- : Folder(folder), R(r), C(c), Starts(starts) {
+ : Folder(folder), R(r), C(c), Starts(starts) {
assert((R.empty() || C) && "Need constant when range not empty!");
}
@@ -1030,7 +1023,7 @@
// If the constant is wider than the range then it needs to be truncated
// before being passed to the user.
unsigned AllocBits = DL.getTypeAllocSizeInBits(Ty);
- return AllocBits <= (unsigned)R.getWidth();
+ return AllocBits <= (unsigned) R.getWidth();
}
public:
@@ -1041,8 +1034,11 @@
}
// Copy assignment operator.
- FieldContents &operator=(const FieldContents &other) {
- R = other.R; C = other.C; Starts = other.Starts; Folder = other.Folder;
+ FieldContents &operator=(const FieldContents & other) {
+ R = other.R;
+ C = other.C;
+ Starts = other.Starts;
+ Folder = other.Folder;
return *this;
}
@@ -1118,7 +1114,7 @@
// together. This can result in a nasty integer constant expression, but as
// we only get here for bitfields that's mostly harmless.
BitSlice Bits(R, getAsBits());
- Bits.Merge (BitSlice(S.R, S.getAsBits()), Folder);
+ Bits.Merge(BitSlice(S.R, S.getAsBits()), Folder);
R = Bits.getRange();
C = Bits.getBits(R, Folder);
Starts = R.empty() ? 0 : R.getFirst();
@@ -1141,7 +1137,8 @@
// Record all interesting fields so they can easily be visited backwards.
SmallVector<tree, 16> Fields;
for (tree field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) {
- if (!isa<FIELD_DECL>(field)) continue;
+ if (!isa<FIELD_DECL>(field))
+ continue;
// Ignore fields with variable or unknown position since they cannot be
// default initialized.
if (!OffsetIsLLVMCompatible(field))
@@ -1153,7 +1150,8 @@
// types for which the first field must be default initialized (iterating
// in forward order would default initialize the last field).
for (SmallVector<tree, 16>::reverse_iterator I = Fields.rbegin(),
- E = Fields.rend(); I != E; ++I) {
+ E = Fields.rend();
+ I != E; ++I) {
tree field = *I;
uint64_t FirstBit = getFieldOffsetInBits(field);
assert(FirstBit <= TypeSize && "Field off end of type!");
@@ -1198,7 +1196,8 @@
field = next_field;
while (1) {
assert(field && "Fell off end of record!");
- if (isa<FIELD_DECL>(field)) break;
+ if (isa<FIELD_DECL>(field))
+ break;
field = TREE_CHAIN(field);
}
}
@@ -1215,8 +1214,8 @@
// If a size was specified for the field then use it. Otherwise take the
// size from the initial value.
uint64_t BitWidth = isInt64(DECL_SIZE(field), true) ?
- getInt64(DECL_SIZE(field), true) :
- DL.getTypeAllocSizeInBits(Init->getType());
+ getInt64(DECL_SIZE(field), true) :
+ DL.getTypeAllocSizeInBits(Init->getType());
uint64_t LastBit = FirstBit + BitWidth;
// Set the bits occupied by the field to the initial value.
@@ -1246,7 +1245,7 @@
// Create the elements that will make up the struct. As well as the fields
// themselves there may also be padding elements.
- std::vector<Constant*> Elts;
+ std::vector<Constant *> Elts;
Elts.reserve(Layout.getNumIntervals());
unsigned EndOfPrevious = 0; // Offset of first bit after previous element.
for (unsigned i = 0, e = Layout.getNumIntervals(); i != e; ++i) {
@@ -1333,10 +1332,12 @@
debug_tree(exp);
llvm_unreachable("Unknown constructor!");
case VECTOR_TYPE:
- case ARRAY_TYPE: return ConvertArrayCONSTRUCTOR(exp, Folder);
+ case ARRAY_TYPE:
+ return ConvertArrayCONSTRUCTOR(exp, Folder);
case QUAL_UNION_TYPE:
case RECORD_TYPE:
- case UNION_TYPE: return ConvertRecordCONSTRUCTOR(exp, Folder);
+ case UNION_TYPE:
+ return ConvertRecordCONSTRUCTOR(exp, Folder);
}
}
@@ -1364,8 +1365,8 @@
// Convert the pointer into an i8* and add the offset to it.
Ptr = Folder.CreateBitCast(Ptr, GetUnitPointerType(Context));
Constant *Result = POINTER_TYPE_OVERFLOW_UNDEFINED ?
- Folder.CreateInBoundsGetElementPtr(Ptr, Idx) :
- Folder.CreateGetElementPtr(Ptr, Idx);
+ Folder.CreateInBoundsGetElementPtr(Ptr, Idx) :
+ Folder.CreateGetElementPtr(Ptr, Idx);
// The result may be of a different pointer type.
Result = Folder.CreateBitCast(Result, getRegType(TREE_TYPE(exp)));
@@ -1473,7 +1474,6 @@
return ConvertInitializerImpl(exp, Folder);
}
-
//===----------------------------------------------------------------------===//
// ... AddressOf ...
//===----------------------------------------------------------------------===//
@@ -1485,7 +1485,7 @@
// Cache the constants to avoid making obvious duplicates that have to be
// folded by the optimizer.
- static DenseMap<Constant*, GlobalVariable*> CSTCache;
+ static DenseMap<Constant *, GlobalVariable *> CSTCache;
GlobalVariable *&Slot = CSTCache[Init];
if (Slot)
return Slot;
@@ -1536,8 +1536,8 @@
ArrayAddr = Folder.CreateBitCast(ArrayAddr, EltTy->getPointerTo());
return POINTER_TYPE_OVERFLOW_UNDEFINED ?
- Folder.CreateInBoundsGetElementPtr(ArrayAddr, IndexVal) :
- Folder.CreateGetElementPtr(ArrayAddr, IndexVal);
+ Folder.CreateInBoundsGetElementPtr(ArrayAddr, IndexVal) :
+ Folder.CreateGetElementPtr(ArrayAddr, IndexVal);
}
/// AddressOfCOMPONENT_REF - Return the address of a field in a record.
@@ -1552,8 +1552,8 @@
// (DECL_OFFSET_ALIGN / BITS_PER_UNIT). Convert to units.
unsigned factor = DECL_OFFSET_ALIGN(field_decl) / BITS_PER_UNIT;
if (factor != 1)
- Offset = Folder.CreateMul(Offset, ConstantInt::get(Offset->getType(),
- factor));
+ Offset = Folder.CreateMul(Offset,
+ ConstantInt::get(Offset->getType(), factor));
} else {
assert(DECL_FIELD_OFFSET(field_decl) && "Field offset not available!");
Offset = getAsRegister(DECL_FIELD_OFFSET(field_decl), Folder);
@@ -1564,10 +1564,10 @@
// Incorporate as much of it as possible into the pointer computation.
uint64_t Units = BitStart / BITS_PER_UNIT;
if (Units > 0) {
- Offset = Folder.CreateAdd(Offset, ConstantInt::get(Offset->getType(),
- Units));
+ Offset = Folder.CreateAdd(Offset,
+ ConstantInt::get(Offset->getType(), Units));
BitStart -= Units * BITS_PER_UNIT;
- (void)BitStart;
+ (void) BitStart;
}
assert(BitStart == 0 &&
"It's a bitfield reference or we didn't get to the field!");
@@ -1581,7 +1581,8 @@
}
/// AddressOfCOMPOUND_LITERAL_EXPR - Return the address of a compound literal.
-static Constant *AddressOfCOMPOUND_LITERAL_EXPR(tree exp, TargetFolder &Folder){
+static Constant *AddressOfCOMPOUND_LITERAL_EXPR(tree exp,
+ TargetFolder &Folder) {
tree decl = DECL_EXPR_DECL(COMPOUND_LITERAL_EXPR_DECL_EXPR(exp));
return AddressOfImpl(decl, Folder);
}
Modified: dragonegg/trunk/src/Convert.cpp
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/src/Convert.cpp?rev=173245&r1=173244&r2=173245&view=diff
==============================================================================
--- dragonegg/trunk/src/Convert.cpp (original)
+++ dragonegg/trunk/src/Convert.cpp Wed Jan 23 03:54:28 2013
@@ -96,17 +96,17 @@
static LLVMContext &Context = getGlobalContext();
STATISTIC(NumBasicBlocks, "Number of basic blocks converted");
-STATISTIC(NumStatements, "Number of gimple statements converted");
+STATISTIC(NumStatements, "Number of gimple statements converted");
/// getPointerAlignment - Return the alignment in bytes of exp, a pointer valued
/// expression, or 1 if the alignment is not known.
static unsigned int getPointerAlignment(tree exp) {
- assert(isa<ACCESS_TYPE>(TREE_TYPE (exp)) && "Expected a pointer type!");
+ assert(isa<ACCESS_TYPE>(TREE_TYPE(exp)) && "Expected a pointer type!");
unsigned int align =
#if (GCC_MINOR < 7)
- get_pointer_alignment(exp, BIGGEST_ALIGNMENT);
+ get_pointer_alignment(exp, BIGGEST_ALIGNMENT);
#else
- get_pointer_alignment(exp);
+ get_pointer_alignment(exp);
#endif
return align >= 8 ? align / 8 : 1;
}
@@ -192,7 +192,7 @@
Type *Ty = V->getType();
unsigned AddrSpace = Loc.Ptr->getType()->getPointerAddressSpace();
Value *Ptr = Builder.CreateBitCast(Loc.Ptr, Ty->getPointerTo(AddrSpace));
- StoreInst *SI = Builder.CreateAlignedStore(V, Ptr, Loc.getAlignment(),
+ StoreInst *SI = Builder.CreateAlignedStore(V, Ptr, Loc.getAlignment(),
Loc.Volatile);
if (AliasTag)
SI->setMetadata(LLVMContext::MD_tbaa, AliasTag);
@@ -214,7 +214,7 @@
if (RegTy->isIntegerTy()) {
assert(MemTy->isIntegerTy() && "Type mismatch!");
- return Builder.CreateIntCast(V, RegTy, /*isSigned*/!TYPE_UNSIGNED(type));
+ return Builder.CreateIntCast(V, RegTy, /*isSigned*/ !TYPE_UNSIGNED(type));
}
if (RegTy->isPointerTy()) {
@@ -239,7 +239,7 @@
assert(isa<VECTOR_TYPE>(type) && "Expected a vector type!");
assert(MemTy->isVectorTy() && "Type mismatch!");
Value *Res = UndefValue::get(RegTy);
- unsigned NumElts = (unsigned)TYPE_VECTOR_SUBPARTS(type);
+ unsigned NumElts = (unsigned) TYPE_VECTOR_SUBPARTS(type);
for (unsigned i = 0; i != NumElts; ++i) {
Value *Idx = Builder.getInt32(i);
Value *Val = Builder.CreateExtractElement(V, Idx);
@@ -268,7 +268,7 @@
if (MemTy->isIntegerTy()) {
assert(RegTy->isIntegerTy() && "Type mismatch!");
- return Builder.CreateIntCast(V, MemTy, /*isSigned*/!TYPE_UNSIGNED(type));
+ return Builder.CreateIntCast(V, MemTy, /*isSigned*/ !TYPE_UNSIGNED(type));
}
if (MemTy->isPointerTy()) {
@@ -293,7 +293,7 @@
assert(isa<VECTOR_TYPE>(type) && "Expected a vector type!");
assert(RegTy->isVectorTy() && "Type mismatch!");
Value *Res = UndefValue::get(MemTy);
- unsigned NumElts = (unsigned)TYPE_VECTOR_SUBPARTS(type);
+ unsigned NumElts = (unsigned) TYPE_VECTOR_SUBPARTS(type);
for (unsigned i = 0; i != NumElts; ++i) {
Value *Idx = Builder.getInt32(i);
Value *Val = Builder.CreateExtractElement(V, Idx);
@@ -310,7 +310,8 @@
/// describeTypeRange - Return metadata describing the set of possible values
/// that an in-memory variable of the given GCC type can take on.
static MDNode *describeTypeRange(tree type) {
- if (!isa<INTEGRAL_TYPE>(type)) return 0; // Only discrete types have ranges.
+ if (!isa<INTEGRAL_TYPE>(type))
+ return 0; // Only discrete types have ranges.
// The range of possible values is TYPE_MIN_VALUE .. TYPE_MAX_VALUE.
tree min = TYPE_MIN_VALUE(type);
@@ -505,7 +506,7 @@
// and big endian machines.
unsigned Size = GET_MODE_BITSIZE(TYPE_MODE(type));
Type *MemTy = IntegerType::get(Context, Size);
- V = Builder.CreateIntCast(V, MemTy, /*isSigned*/!TYPE_UNSIGNED(type));
+ V = Builder.CreateIntCast(V, MemTy, /*isSigned*/ !TYPE_UNSIGNED(type));
StoreToLocation(V, Loc, AliasTag, Builder);
break;
}
@@ -538,7 +539,7 @@
// store it to memory.
Type *MemVecTy = VectorType::get(MemTy, NumElts);
V = Builder.CreateIntCast(V, MemVecTy,
- /*isSigned*/!TYPE_UNSIGNED(elt_type));
+ /*isSigned*/ !TYPE_UNSIGNED(elt_type));
StoreToLocation(V, Loc, AliasTag, Builder);
break;
}
@@ -557,7 +558,6 @@
}
}
-
//===----------------------------------------------------------------------===//
// ... High-Level Methods ...
//===----------------------------------------------------------------------===//
@@ -565,9 +565,7 @@
/// TheTreeToLLVM - Keep track of the current function being compiled.
TreeToLLVM *TheTreeToLLVM = 0;
-const DataLayout &getDataLayout() {
- return *TheTarget->getDataLayout();
-}
+const DataLayout &getDataLayout() { return *TheTarget->getDataLayout(); }
/// EmitDebugInfo - Return true if debug info is to be emitted for current
/// function.
@@ -577,8 +575,8 @@
return false;
}
-TreeToLLVM::TreeToLLVM(tree fndecl) :
- DL(getDataLayout()), Builder(Context, *TheFolder) {
+TreeToLLVM::TreeToLLVM(tree fndecl)
+ : DL(getDataLayout()), Builder(Context, *TheFolder) {
FnDecl = fndecl;
AllocaInsertionPoint = 0;
Fn = 0;
@@ -586,7 +584,7 @@
ReturnOffset = 0;
if (EmitDebugInfo()) {
- expanded_location Location = expand_location(DECL_SOURCE_LOCATION (fndecl));
+ expanded_location Location = expand_location(DECL_SOURCE_LOCATION(fndecl));
if (Location.file) {
TheDebugInfo->setLocationFile(Location.file);
@@ -601,9 +599,7 @@
TheTreeToLLVM = this;
}
-TreeToLLVM::~TreeToLLVM() {
- TheTreeToLLVM = 0;
-}
+TreeToLLVM::~TreeToLLVM() { TheTreeToLLVM = 0; }
//===----------------------------------------------------------------------===//
// ... Local declarations ...
@@ -611,16 +607,17 @@
/// isLocalDecl - Whether this declaration is local to the current function.
static bool isLocalDecl(tree decl) {
- if (isa<CONST_DECL>(decl)) return false;
+ if (isa<CONST_DECL>(decl))
+ return false;
assert(HAS_RTL_P(decl) && "Expected a declaration with RTL!");
return
- // GCC bug workaround: RESULT_DECL may not have DECL_CONTEXT set in thunks.
- (!DECL_CONTEXT(decl) && isa<RESULT_DECL>(decl)) ||
- // Usual case.
- (DECL_CONTEXT(decl) == current_function_decl &&
- !DECL_EXTERNAL(decl) && // External variables are not local.
- !TREE_STATIC(decl) && // Static variables not considered local.
- !isa<FUNCTION_DECL>(decl)); // Nested functions not considered local.
+ // GCC bug workaround: RESULT_DECL may not have DECL_CONTEXT set in thunks.
+ (!DECL_CONTEXT(decl) && isa<RESULT_DECL>(decl)) ||
+ // Usual case.
+ (DECL_CONTEXT(decl) == current_function_decl &&
+ !DECL_EXTERNAL(decl) && // External variables are not local.
+ !TREE_STATIC(decl) && // Static variables not considered local.
+ !isa<FUNCTION_DECL>(decl)); // Nested functions not considered local.
}
/// set_decl_local - Remember the LLVM value for a GCC declaration.
@@ -678,8 +675,7 @@
/// llvm_store_scalar_argument - Store scalar argument ARGVAL of type
/// LLVMTY at location LOC.
static void llvm_store_scalar_argument(Value *Loc, Value *ArgVal,
- llvm::Type *LLVMTy,
- unsigned RealSize,
+ llvm::Type *LLVMTy, unsigned RealSize,
LLVMBuilder &Builder) {
if (RealSize) {
// Not clear what this is supposed to do on big endian machines...
@@ -703,203 +699,188 @@
}
#ifndef LLVM_STORE_SCALAR_ARGUMENT
-#define LLVM_STORE_SCALAR_ARGUMENT(LOC,ARG,TYPE,SIZE,BUILDER) \
- llvm_store_scalar_argument((LOC),(ARG),(TYPE),(SIZE),(BUILDER))
+#define LLVM_STORE_SCALAR_ARGUMENT(LOC, ARG, TYPE, SIZE, BUILDER) \
+ llvm_store_scalar_argument((LOC), (ARG), (TYPE), (SIZE), (BUILDER))
#endif
// This is true for types whose alignment when passed on the stack is less
// than the alignment of the type.
-#define LLVM_BYVAL_ALIGNMENT_TOO_SMALL(T) \
- (LLVM_BYVAL_ALIGNMENT(T) && LLVM_BYVAL_ALIGNMENT(T) < TYPE_ALIGN_UNIT(T))
+#define LLVM_BYVAL_ALIGNMENT_TOO_SMALL(T) \
+ (LLVM_BYVAL_ALIGNMENT(T) && LLVM_BYVAL_ALIGNMENT(T) < TYPE_ALIGN_UNIT(T))
namespace {
- /// FunctionPrologArgumentConversion - This helper class is driven by the ABI
- /// definition for this target to figure out how to retrieve arguments from
- /// the stack/regs coming into a function and store them into an appropriate
- /// alloca for the argument.
- struct FunctionPrologArgumentConversion : public DefaultABIClient {
- tree FunctionDecl;
- Function::arg_iterator &AI;
- LLVMBuilder Builder;
- std::vector<Value*> LocStack;
- std::vector<std::string> NameStack;
- CallingConv::ID &CallingConv;
- unsigned Offset;
- bool isShadowRet;
- FunctionPrologArgumentConversion(tree FnDecl,
- Function::arg_iterator &ai,
- const LLVMBuilder &B, CallingConv::ID &CC)
+/// FunctionPrologArgumentConversion - This helper class is driven by the ABI
+/// definition for this target to figure out how to retrieve arguments from
+/// the stack/regs coming into a function and store them into an appropriate
+/// alloca for the argument.
+struct FunctionPrologArgumentConversion : public DefaultABIClient {
+ tree FunctionDecl;
+ Function::arg_iterator &AI;
+ LLVMBuilder Builder;
+ std::vector<Value *> LocStack;
+ std::vector<std::string> NameStack;
+ CallingConv::ID &CallingConv;
+ unsigned Offset;
+ bool isShadowRet;
+ FunctionPrologArgumentConversion(tree FnDecl, Function::arg_iterator &ai,
+ const LLVMBuilder &B, CallingConv::ID &CC)
: FunctionDecl(FnDecl), AI(ai), Builder(B), CallingConv(CC), Offset(0),
- isShadowRet(false) {}
-
- /// getCallingConv - This provides the desired CallingConv for the function.
- CallingConv::ID getCallingConv(void) { return CallingConv; }
-
- void HandlePad(llvm::Type * /*LLVMTy*/) {
- ++AI;
- }
+ isShadowRet(false) {
+ }
- bool isShadowReturn() const {
- return isShadowRet;
- }
- void setName(const std::string &Name) {
- NameStack.push_back(Name);
- }
- void setLocation(Value *Loc) {
- LocStack.push_back(Loc);
- }
- void clear() {
- assert(NameStack.size() == 1 && LocStack.size() == 1 && "Imbalance!");
- NameStack.clear();
- LocStack.clear();
- }
-
- void HandleAggregateShadowResult(PointerType * /*PtrArgTy*/,
- bool /*RetPtr*/) {
- // If the function returns a structure by value, we transform the function
- // to take a pointer to the result as the first argument of the function
- // instead.
- assert(AI != Builder.GetInsertBlock()->getParent()->arg_end() &&
- "No explicit return value?");
- AI->setName("agg.result");
-
- isShadowRet = true;
- tree ResultDecl = DECL_RESULT(FunctionDecl);
- tree RetTy = TREE_TYPE(TREE_TYPE(FunctionDecl));
- if (TREE_CODE(RetTy) == TREE_CODE(TREE_TYPE(ResultDecl))) {
- TheTreeToLLVM->set_decl_local(ResultDecl, AI);
- ++AI;
- return;
- }
+ /// getCallingConv - This provides the desired CallingConv for the function.
+ CallingConv::ID getCallingConv(void) { return CallingConv; }
- // Otherwise, this must be something returned with NRVO.
- assert(isa<REFERENCE_TYPE>(TREE_TYPE(ResultDecl)) &&
- "Not type match and not passing by reference?");
- // Create an alloca for the ResultDecl.
- Value *Tmp = TheTreeToLLVM->CreateTemporary(AI->getType());
- Builder.CreateStore(AI, Tmp);
-
- TheTreeToLLVM->set_decl_local(ResultDecl, Tmp);
- if (TheDebugInfo && !DECL_IGNORED_P(FunctionDecl)) {
- TheDebugInfo->EmitDeclare(ResultDecl,
- dwarf::DW_TAG_auto_variable,
- "agg.result", RetTy, Tmp,
- Builder);
- }
- ++AI;
- }
+ void HandlePad(llvm::Type */*LLVMTy*/) { ++AI; }
- void HandleScalarShadowResult(PointerType * /*PtrArgTy*/,
- bool /*RetPtr*/) {
- assert(AI != Builder.GetInsertBlock()->getParent()->arg_end() &&
- "No explicit return value?");
- AI->setName("scalar.result");
- isShadowRet = true;
- TheTreeToLLVM->set_decl_local(DECL_RESULT(FunctionDecl), AI);
+ bool isShadowReturn() const { return isShadowRet; }
+ void setName(const std::string &Name) { NameStack.push_back(Name); }
+ void setLocation(Value *Loc) { LocStack.push_back(Loc); }
+ void clear() {
+ assert(NameStack.size() == 1 && LocStack.size() == 1 && "Imbalance!");
+ NameStack.clear();
+ LocStack.clear();
+ }
+
+ void HandleAggregateShadowResult(PointerType */*PtrArgTy*/, bool /*RetPtr*/) {
+ // If the function returns a structure by value, we transform the function
+ // to take a pointer to the result as the first argument of the function
+ // instead.
+ assert(AI != Builder.GetInsertBlock()->getParent()->arg_end() &&
+ "No explicit return value?");
+ AI->setName("agg.result");
+
+ isShadowRet = true;
+ tree ResultDecl = DECL_RESULT(FunctionDecl);
+ tree RetTy = TREE_TYPE(TREE_TYPE(FunctionDecl));
+ if (TREE_CODE(RetTy) == TREE_CODE(TREE_TYPE(ResultDecl))) {
+ TheTreeToLLVM->set_decl_local(ResultDecl, AI);
++AI;
+ return;
}
- void HandleScalarArgument(llvm::Type *LLVMTy, tree /*type*/,
- unsigned RealSize = 0) {
- Value *ArgVal = AI;
- if (ArgVal->getType() != LLVMTy) {
- if (ArgVal->getType()->isPointerTy() && LLVMTy->isPointerTy()) {
- // If this is GCC being sloppy about pointer types, insert a bitcast.
- // See PR1083 for an example.
- ArgVal = Builder.CreateBitCast(ArgVal, LLVMTy);
- } else if (ArgVal->getType()->isDoubleTy()) {
- // If this is a K&R float parameter, it got promoted to double. Insert
- // the truncation to float now.
- ArgVal = Builder.CreateFPTrunc(ArgVal, LLVMTy, NameStack.back());
- } else {
- // If this is just a mismatch between integer types, this is due
- // to K&R prototypes, where the forward proto defines the arg as int
- // and the actual impls is a short or char.
- assert(ArgVal->getType()->isIntegerTy(32) && LLVMTy->isIntegerTy() &&
- "Lowerings don't match?");
- ArgVal = Builder.CreateTrunc(ArgVal, LLVMTy,NameStack.back());
- }
- }
+ // Otherwise, this must be something returned with NRVO.
+ assert(isa<REFERENCE_TYPE>(TREE_TYPE(ResultDecl)) &&
+ "Not type match and not passing by reference?");
+ // Create an alloca for the ResultDecl.
+ Value *Tmp = TheTreeToLLVM->CreateTemporary(AI->getType());
+ Builder.CreateStore(AI, Tmp);
+
+ TheTreeToLLVM->set_decl_local(ResultDecl, Tmp);
+ if (TheDebugInfo && !DECL_IGNORED_P(FunctionDecl)) {
+ TheDebugInfo->EmitDeclare(ResultDecl, dwarf::DW_TAG_auto_variable,
+ "agg.result", RetTy, Tmp, Builder);
+ }
+ ++AI;
+ }
+
+ void HandleScalarShadowResult(PointerType */*PtrArgTy*/, bool /*RetPtr*/) {
+ assert(AI != Builder.GetInsertBlock()->getParent()->arg_end() &&
+ "No explicit return value?");
+ AI->setName("scalar.result");
+ isShadowRet = true;
+ TheTreeToLLVM->set_decl_local(DECL_RESULT(FunctionDecl), AI);
+ ++AI;
+ }
+
+ void HandleScalarArgument(llvm::Type *LLVMTy, tree /*type*/,
+ unsigned RealSize = 0) {
+ Value *ArgVal = AI;
+ if (ArgVal->getType() != LLVMTy) {
+ if (ArgVal->getType()->isPointerTy() && LLVMTy->isPointerTy()) {
+ // If this is GCC being sloppy about pointer types, insert a bitcast.
+ // See PR1083 for an example.
+ ArgVal = Builder.CreateBitCast(ArgVal, LLVMTy);
+ } else if (ArgVal->getType()->isDoubleTy()) {
+ // If this is a K&R float parameter, it got promoted to double. Insert
+ // the truncation to float now.
+ ArgVal = Builder.CreateFPTrunc(ArgVal, LLVMTy, NameStack.back());
+ } else {
+ // If this is just a mismatch between integer types, this is due
+ // to K&R prototypes, where the forward proto defines the arg as int
+ // and the actual impls is a short or char.
+ assert(ArgVal->getType()->isIntegerTy(32) && LLVMTy->isIntegerTy() &&
+ "Lowerings don't match?");
+ ArgVal = Builder.CreateTrunc(ArgVal, LLVMTy, NameStack.back());
+ }
+ }
+ assert(!LocStack.empty());
+ Value *Loc = LocStack.back();
+ LLVM_STORE_SCALAR_ARGUMENT(Loc, ArgVal, LLVMTy, RealSize, Builder);
+ AI->setName(NameStack.back());
+ ++AI;
+ }
+
+ void HandleByValArgument(llvm::Type */*LLVMTy*/, tree type) {
+ if (LLVM_BYVAL_ALIGNMENT_TOO_SMALL(type)) {
+ // Incoming object on stack is insufficiently aligned for the type.
+ // Make a correctly aligned copy.
assert(!LocStack.empty());
Value *Loc = LocStack.back();
- LLVM_STORE_SCALAR_ARGUMENT(Loc,ArgVal,LLVMTy,RealSize,Builder);
- AI->setName(NameStack.back());
- ++AI;
- }
-
- void HandleByValArgument(llvm::Type * /*LLVMTy*/, tree type) {
- if (LLVM_BYVAL_ALIGNMENT_TOO_SMALL(type)) {
- // Incoming object on stack is insufficiently aligned for the type.
- // Make a correctly aligned copy.
- assert(!LocStack.empty());
- Value *Loc = LocStack.back();
- // We cannot use field-by-field copy here; x86 long double is 16
- // bytes, but only 10 are copied. If the object is really a union
- // we might need the other bytes. We must also be careful to use
- // the smaller alignment.
- Type *SBP = Type::getInt8PtrTy(Context);
- Type *IntPtr = getDataLayout().getIntPtrType(Context, 0);
- Value *Ops[5] = {
- Builder.CreateCast(Instruction::BitCast, Loc, SBP),
- Builder.CreateCast(Instruction::BitCast, AI, SBP),
- ConstantInt::get(IntPtr,
- TREE_INT_CST_LOW(TYPE_SIZE_UNIT(type))),
- Builder.getInt32(LLVM_BYVAL_ALIGNMENT(type)),
- Builder.getFalse()
- };
- Type *ArgTypes[3] = {SBP, SBP, IntPtr };
- Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
- Intrinsic::memcpy,
- ArgTypes), Ops);
-
- AI->setName(NameStack.back());
- }
- ++AI;
- }
+ // We cannot use field-by-field copy here; x86 long double is 16
+ // bytes, but only 10 are copied. If the object is really a union
+ // we might need the other bytes. We must also be careful to use
+ // the smaller alignment.
+ Type *SBP = Type::getInt8PtrTy(Context);
+ Type *IntPtr = getDataLayout().getIntPtrType(Context, 0);
+ Value *Ops[5] = { Builder.CreateCast(Instruction::BitCast, Loc, SBP),
+ Builder.CreateCast(Instruction::BitCast, AI, SBP),
+ ConstantInt::get(
+ IntPtr, TREE_INT_CST_LOW(TYPE_SIZE_UNIT(type))),
+ Builder.getInt32(LLVM_BYVAL_ALIGNMENT(type)),
+ Builder.getFalse() };
+ Type *ArgTypes[3] = { SBP, SBP, IntPtr };
+ Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::memcpy,
+ ArgTypes), Ops);
- void HandleFCAArgument(llvm::Type * /*LLVMTy*/, tree /*type*/) {
- // Store the FCA argument into alloca.
- assert(!LocStack.empty());
- Value *Loc = LocStack.back();
- Builder.CreateStore(AI, Loc);
AI->setName(NameStack.back());
- ++AI;
}
+ ++AI;
+ }
- void HandleAggregateResultAsScalar(Type * /*ScalarTy*/,
- unsigned Off = 0) {
- this->Offset = Off;
- }
+ void HandleFCAArgument(llvm::Type */*LLVMTy*/, tree /*type*/) {
+ // Store the FCA argument into alloca.
+ assert(!LocStack.empty());
+ Value *Loc = LocStack.back();
+ Builder.CreateStore(AI, Loc);
+ AI->setName(NameStack.back());
+ ++AI;
+ }
- void EnterField(unsigned FieldNo, llvm::Type *StructTy) {
- NameStack.push_back(NameStack.back()+"."+utostr(FieldNo));
+ void HandleAggregateResultAsScalar(Type */*ScalarTy*/, unsigned Off = 0) {
+ this->Offset = Off;
+ }
- Value *Loc = LocStack.back();
- // This cast only involves pointers, therefore BitCast.
- Loc = Builder.CreateBitCast(Loc, StructTy->getPointerTo());
+ void EnterField(unsigned FieldNo, llvm::Type *StructTy) {
+ NameStack.push_back(NameStack.back() + "." + utostr(FieldNo));
- Loc = Builder.CreateStructGEP(Loc, FieldNo,
- flag_verbose_asm ? "ntr" : "");
- LocStack.push_back(Loc);
- }
- void ExitField() {
- NameStack.pop_back();
- LocStack.pop_back();
- }
- };
+ Value *Loc = LocStack.back();
+ // This cast only involves pointers, therefore BitCast.
+ Loc = Builder.CreateBitCast(Loc, StructTy->getPointerTo());
+
+ Loc = Builder.CreateStructGEP(Loc, FieldNo, flag_verbose_asm ? "ntr" : "");
+ LocStack.push_back(Loc);
+ }
+ void ExitField() {
+ NameStack.pop_back();
+ LocStack.pop_back();
+ }
+};
}
// isPassedByVal - Return true if an aggregate of the specified type will be
// passed in memory byval.
-static bool isPassedByVal(tree type, Type *Ty,
- std::vector<Type*> &ScalarArgs,
+static bool isPassedByVal(tree type, Type *Ty, std::vector<Type *> &ScalarArgs,
bool isShadowRet, CallingConv::ID CC) {
- (void)type; (void)Ty; (void)ScalarArgs; (void)isShadowRet;
- (void)CC; // Not used by all ABI macros.
+ (void) type;
+ (void) Ty;
+ (void) ScalarArgs;
+ (void) isShadowRet;
+ (void) CC; // Not used by all ABI macros.
if (LLVM_SHOULD_PASS_AGGREGATE_USING_BYVAL_ATTR(type, Ty))
return true;
- std::vector<Type*> Args;
+ std::vector<Type *> Args;
if (LLVM_SHOULD_PASS_AGGREGATE_IN_MIXED_REGS(type, Ty, CC, Args) &&
LLVM_AGGREGATE_PARTIALLY_PASSED_IN_REGS(Args, ScalarArgs, isShadowRet,
CC))
@@ -923,8 +904,8 @@
// If this is a K&R-style function: with a type that takes no arguments but
// with arguments none the less, then calculate the LLVM type from the list
// of arguments.
- if (flag_functions_from_args || (TYPE_ARG_TYPES(TREE_TYPE(FnDecl)) == 0 &&
- DECL_ARGUMENTS(FnDecl))) {
+ if (flag_functions_from_args ||
+ (TYPE_ARG_TYPES(TREE_TYPE(FnDecl)) == 0 && DECL_ARGUMENTS(FnDecl))) {
SmallVector<tree, 8> Args;
for (tree Arg = DECL_ARGUMENTS(FnDecl); Arg; Arg = TREE_CHAIN(Arg))
Args.push_back(Arg);
@@ -939,7 +920,8 @@
// If we've already seen this function and created a prototype, and if the
// proto has the right LLVM type, just use it.
if (DECL_LOCAL_SET_P(FnDecl) &&
- cast<PointerType>(DECL_LOCAL(FnDecl)->getType())->getElementType()==FTy) {
+ cast<PointerType>(DECL_LOCAL(FnDecl)->getType())->getElementType() ==
+ FTy) {
Fn = cast<Function>(DECL_LOCAL(FnDecl));
assert(Fn->getCallingConv() == CallingConv &&
"Calling convention disagreement between prototype and impl!");
@@ -954,7 +936,7 @@
assert((FnEntry->isDeclaration() ||
FnEntry->getLinkage() == Function::AvailableExternallyLinkage) &&
"Multiple fns with same name and neither are external!");
- FnEntry->setName(""); // Clear name to avoid conflicts.
+ FnEntry->setName(""); // Clear name to avoid conflicts.
assert(FnEntry->getCallingConv() == CallingConv &&
"Calling convention disagreement between prototype and impl!");
}
@@ -969,8 +951,8 @@
// If a previous proto existed with the wrong type, replace any uses of it
// with the actual function and delete the proto.
if (FnEntry) {
- FnEntry->replaceAllUsesWith
- (TheFolder->CreateBitCast(Fn, FnEntry->getType()));
+ FnEntry->replaceAllUsesWith(TheFolder->CreateBitCast(Fn,
+ FnEntry->getType()));
changeLLVMConstant(FnEntry, Fn);
FnEntry->eraseFromParent();
}
@@ -981,9 +963,9 @@
assert(Fn->empty() && "Function expanded multiple times!");
// Compute the linkage that the function should get.
- if (false) {//FIXME DECL_LLVM_PRIVATE(FnDecl)) {
+ if (false) { //FIXME DECL_LLVM_PRIVATE(FnDecl)) {
Fn->setLinkage(Function::PrivateLinkage);
- } else if (false) {//FIXME DECL_LLVM_LINKER_PRIVATE(FnDecl)) {
+ } else if (false) { //FIXME DECL_LLVM_LINKER_PRIVATE(FnDecl)) {
Fn->setLinkage(Function::LinkerPrivateLinkage);
} else if (!TREE_PUBLIC(FnDecl) /*|| lang_hooks.llvm_is_in_anon(subr)*/) {
Fn->setLinkage(Function::InternalLinkage);
@@ -999,7 +981,7 @@
}
#ifdef TARGET_ADJUST_LLVM_LINKAGE
- TARGET_ADJUST_LLVM_LINKAGE(Fn,FnDecl);
+ TARGET_ADJUST_LLVM_LINKAGE(Fn, FnDecl);
#endif /* TARGET_ADJUST_LLVM_LINKAGE */
Fn->setUnnamedAddr(!TREE_ADDRESSABLE(FnDecl));
@@ -1022,15 +1004,15 @@
Fn->setSection(TREE_STRING_POINTER(DECL_SECTION_NAME(FnDecl)));
// Handle used Functions
- if (lookup_attribute ("used", DECL_ATTRIBUTES (FnDecl)))
+ if (lookup_attribute("used", DECL_ATTRIBUTES(FnDecl)))
AttributeUsedGlobals.insert(Fn);
// Handle noinline Functions
- if (lookup_attribute ("noinline", DECL_ATTRIBUTES (FnDecl)))
+ if (lookup_attribute("noinline", DECL_ATTRIBUTES(FnDecl)))
Fn->addFnAttr(Attribute::NoInline);
// Handle always_inline attribute
- if (lookup_attribute ("always_inline", DECL_ATTRIBUTES (FnDecl)))
+ if (lookup_attribute("always_inline", DECL_ATTRIBUTES(FnDecl)))
Fn->addFnAttr(Attribute::AlwaysInline);
// Pass inline keyword to optimizer.
@@ -1047,7 +1029,7 @@
Fn->addFnAttr(Attribute::StackProtectReq);
// Handle naked attribute
- if (lookup_attribute ("naked", DECL_ATTRIBUTES (FnDecl)))
+ if (lookup_attribute("naked", DECL_ATTRIBUTES(FnDecl)))
Fn->addFnAttr(Attribute::Naked);
// Handle annotate attributes
@@ -1058,8 +1040,8 @@
if (!flag_exceptions)
Fn->setDoesNotThrow();
- if (flag_unwind_tables)
- Fn->setHasUWTable();
+ if (flag_unwind_tables)
+ Fn->setHasUWTable();
// Create a new basic block for the function.
BasicBlock *EntryBlock = BasicBlock::Create(Context, "entry", Fn);
@@ -1088,10 +1070,11 @@
tree Args = static_chain ? static_chain : DECL_ARGUMENTS(FnDecl);
// Scalar arguments processed so far.
- std::vector<Type*> ScalarArgs;
+ std::vector<Type *> ScalarArgs;
while (Args) {
const char *Name = "unnamed_arg";
- if (DECL_NAME(Args)) Name = IDENTIFIER_POINTER(DECL_NAME(Args));
+ if (DECL_NAME(Args))
+ Name = IDENTIFIER_POINTER(DECL_NAME(Args));
Type *ArgTy = ConvertType(TREE_TYPE(Args));
bool isInvRef = isPassedByInvisibleReference(TREE_TYPE(Args));
@@ -1111,21 +1094,19 @@
AI->setName(Name);
SET_DECL_LOCAL(Args, AI);
if (!isInvRef && EmitDebugInfo())
- TheDebugInfo->EmitDeclare(Args, dwarf::DW_TAG_arg_variable,
- Name, TREE_TYPE(Args),
- AI, Builder);
+ TheDebugInfo->EmitDeclare(Args, dwarf::DW_TAG_arg_variable, Name,
+ TREE_TYPE(Args), AI, Builder);
ABIConverter.HandleArgument(TREE_TYPE(Args), ScalarArgs);
} else {
// Otherwise, we create an alloca to hold the argument value and provide
// an l-value. On entry to the function, we copy formal argument values
// into the alloca.
Value *Tmp = CreateTemporary(ArgTy, TYPE_ALIGN_UNIT(TREE_TYPE(Args)));
- Tmp->setName(std::string(Name)+"_addr");
+ Tmp->setName(std::string(Name) + "_addr");
SET_DECL_LOCAL(Args, Tmp);
if (EmitDebugInfo()) {
- TheDebugInfo->EmitDeclare(Args, dwarf::DW_TAG_arg_variable,
- Name, TREE_TYPE(Args), Tmp,
- Builder);
+ TheDebugInfo->EmitDeclare(Args, dwarf::DW_TAG_arg_variable, Name,
+ TREE_TYPE(Args), Tmp, Builder);
}
// Emit annotate intrinsic if arg has annotate attr
@@ -1133,8 +1114,8 @@
EmitAnnotateIntrinsic(Tmp, Args);
// Emit gcroot intrinsic if arg has attribute
- if (isa<ACCESS_TYPE>(TREE_TYPE(Args))
- && lookup_attribute ("gcroot", TYPE_ATTRIBUTES(TREE_TYPE(Args))))
+ if (isa<ACCESS_TYPE>(TREE_TYPE(Args)) &&
+ lookup_attribute("gcroot", TYPE_ATTRIBUTES(TREE_TYPE(Args))))
EmitTypeGcroot(Tmp);
Client.setName(Name);
@@ -1149,9 +1130,10 @@
// Loading the value of a PARM_DECL at this point yields its initial value.
// Remember this for use when materializing the reads implied by SSA default
// definitions.
- SSAInsertionPoint = Builder.Insert(CastInst::Create(Instruction::BitCast,
- Constant::getNullValue(Type::getInt32Ty(Context)),
- Type::getInt32Ty(Context)), "ssa point");
+ SSAInsertionPoint = Builder.Insert(CastInst::Create(
+ Instruction::BitCast,
+ Constant::getNullValue(Type::getInt32Ty(Context)),
+ Type::getInt32Ty(Context)), "ssa point");
// If this function has nested functions, we should handle a potential
// nonlocal_goto_save_area.
@@ -1171,14 +1153,14 @@
/// EmitVariablesInScope - Output a declaration for every variable in the
/// given scope.
void TreeToLLVM::EmitVariablesInScope(tree scope) {
- for (tree t = BLOCK_VARS(scope); t; t = TREE_CHAIN (t))
+ for (tree t = BLOCK_VARS(scope); t; t = TREE_CHAIN(t))
if (isa<VAR_DECL>(t))
// If this is just the rotten husk of a variable that the gimplifier
// eliminated all uses of, but is preserving for debug info, ignore it.
if (!DECL_HAS_VALUE_EXPR_P(t))
make_decl_local(t);
// Declare variables in contained scopes.
- for (tree t = BLOCK_SUBBLOCKS (scope); t ; t = BLOCK_CHAIN (t))
+ for (tree t = BLOCK_SUBBLOCKS(scope); t; t = BLOCK_CHAIN(t))
EmitVariablesInScope(t);
}
@@ -1199,9 +1181,9 @@
return SSANames[reg] = Val;
}
-typedef SmallVector<std::pair<BasicBlock*, unsigned>, 8> PredVector;
-typedef SmallVector<std::pair<BasicBlock*, tree>, 8> TreeVector;
-typedef SmallVector<std::pair<BasicBlock*, Value*>, 8> ValueVector;
+typedef SmallVector<std::pair<BasicBlock *, unsigned>, 8> PredVector;
+typedef SmallVector<std::pair<BasicBlock *, tree>, 8> TreeVector;
+typedef SmallVector<std::pair<BasicBlock *, Value *>, 8> ValueVector;
/// PopulatePhiNodes - Populate generated phi nodes with their operands.
void TreeToLLVM::PopulatePhiNodes() {
@@ -1209,7 +1191,7 @@
TreeVector IncomingValues;
ValueVector PhiArguments;
- for (unsigned Idx = 0, EIdx = (unsigned)PendingPhis.size(); Idx < EIdx;
+ for (unsigned Idx = 0, EIdx = (unsigned) PendingPhis.size(); Idx < EIdx;
++Idx) {
// The phi node to process.
PhiRecord &P = PendingPhis[Idx];
@@ -1220,7 +1202,7 @@
basic_block bb = gimple_phi_arg_edge(P.gcc_phi, i)->src;
// The corresponding LLVM basic block.
- DenseMap<basic_block, BasicBlock*>::iterator BI = BasicBlocks.find(bb);
+ DenseMap<basic_block, BasicBlock *>::iterator BI = BasicBlocks.find(bb);
assert(BI != BasicBlocks.end() && "GCC basic block not output?");
// The incoming GCC expression.
@@ -1276,13 +1258,14 @@
// Now iterate over the predecessors, setting phi operands as we go.
TreeVector::iterator VI = IncomingValues.begin(), VE = IncomingValues.end();
PredVector::iterator PI = Predecessors.begin(), PE = Predecessors.end();
- PhiArguments.resize((unsigned)Predecessors.size());
+ PhiArguments.resize((unsigned) Predecessors.size());
while (PI != PE) {
// The predecessor basic block.
BasicBlock *BB = PI->first;
// Find the incoming value for this predecessor.
- while (VI != VE && VI->first != BB) ++VI;
+ while (VI != VE && VI->first != BB)
+ ++VI;
assert(VI != VE && "No value for predecessor!");
Value *Val = EmitRegister(VI->second);
@@ -1319,7 +1302,7 @@
// Insert the return block at the end of the function.
BeginBlock(ReturnBB);
- SmallVector <Value *, 4> RetVals;
+ SmallVector<Value *, 4> RetVals;
// If the function returns a value, get it into a register and return it now.
if (!Fn->getReturnType()->isVoidTy()) {
@@ -1335,10 +1318,10 @@
ResultLV.getAlignment());
RetVals.push_back(Builder.CreateBitCast(Load, Fn->getReturnType()));
} else {
- uint64_t ResultSize =
- getDataLayout().getTypeAllocSize(ConvertType(TREE_TYPE(TreeRetVal)));
- uint64_t ReturnSize =
- getDataLayout().getTypeAllocSize(Fn->getReturnType());
+ uint64_t ResultSize = getDataLayout().getTypeAllocSize(
+ ConvertType(TREE_TYPE(TreeRetVal)));
+ uint64_t ReturnSize = getDataLayout().getTypeAllocSize(
+ Fn->getReturnType());
// The load does not necessarily start at the beginning of the aggregate
// (x86-64).
@@ -1348,14 +1331,15 @@
} else {
// Advance to the point we want to load from.
if (ReturnOffset) {
- ResultLV.Ptr =
- Builder.CreateBitCast(ResultLV.Ptr, Type::getInt8PtrTy(Context));
- ResultLV.Ptr =
- Builder.CreateGEP(ResultLV.Ptr,
- ConstantInt::get(DL.getIntPtrType(Context, 0),
- ReturnOffset),
- flag_verbose_asm ? "rtvl" : "");
- ResultLV.setAlignment(MinAlign(ResultLV.getAlignment(), ReturnOffset));
+ ResultLV.Ptr = Builder.CreateBitCast(ResultLV.Ptr,
+ Type::getInt8PtrTy(Context));
+ ResultLV.Ptr = Builder.CreateGEP(
+ ResultLV.Ptr,
+ ConstantInt::get(DL.getIntPtrType(Context, 0),
+ ReturnOffset),
+ flag_verbose_asm ? "rtvl" : "");
+ ResultLV.setAlignment(MinAlign(ResultLV.getAlignment(),
+ ReturnOffset));
ResultSize -= ReturnOffset;
}
@@ -1365,8 +1349,9 @@
// Copy out DECL_RESULT while being careful to not overrun the source or
// destination buffers.
uint64_t OctetsToCopy = std::min(ResultSize, ReturnSize);
- EmitMemCpy(ReturnLoc.Ptr, ResultLV.Ptr, Builder.getInt64(OctetsToCopy),
- std::min(ReturnLoc.getAlignment(), ResultLV.getAlignment()));
+ EmitMemCpy(
+ ReturnLoc.Ptr, ResultLV.Ptr, Builder.getInt64(OctetsToCopy),
+ std::min(ReturnLoc.getAlignment(), ResultLV.getAlignment()));
if (StructType *STy = dyn_cast<StructType>(Fn->getReturnType())) {
llvm::Value *Idxs[2];
@@ -1376,8 +1361,9 @@
Idxs[1] = Builder.getInt32(ri);
Value *GEP = Builder.CreateGEP(ReturnLoc.Ptr, Idxs,
flag_verbose_asm ? "mrv_gep" : "");
- Value *E = Builder.CreateAlignedLoad(GEP, /*Align*/Packed,
- flag_verbose_asm ? "mrv":"");
+ Value *E = Builder.CreateAlignedLoad(GEP, /*Align*/ Packed,
+ flag_verbose_asm ? "mrv" :
+ "");
RetVals.push_back(E);
}
// If the return type specifies an empty struct then return one.
@@ -1395,11 +1381,12 @@
}
if (RetVals.empty())
Builder.CreateRetVoid();
- else if (RetVals.size() == 1 && RetVals[0]->getType() == Fn->getReturnType()){
+ else if (RetVals.size() == 1 &&
+ RetVals[0]->getType() == Fn->getReturnType()) {
Builder.CreateRet(RetVals[0]);
} else {
assert(Fn->getReturnType()->isAggregateType() && "Return type mismatch!");
- Builder.CreateAggregateRet(RetVals.data(), (unsigned)RetVals.size());
+ Builder.CreateAggregateRet(RetVals.data(), (unsigned) RetVals.size());
}
} else { // !ReturnBB
BasicBlock *CurBB = Builder.GetInsertBlock();
@@ -1417,7 +1404,6 @@
}
}
-
// Populate phi nodes with their operands now that all ssa names have been
// defined and all basic blocks output.
PopulatePhiNodes();
@@ -1445,10 +1431,11 @@
// not matter because the compiler is going to exit with an error anyway.
if (errorcount || sorrycount)
#else
- // When checks are enabled, complain if an SSA name was used but not defined.
+// When checks are enabled, complain if an SSA name was used but not defined.
#endif
- for (DenseMap<tree,TrackingVH<Value> >::const_iterator I = SSANames.begin(),
- E = SSANames.end(); I != E; ++I) {
+ for (DenseMap<tree, TrackingVH<Value> >::const_iterator I = SSANames
+ .begin(), E = SSANames.end();
+ I != E; ++I) {
Value *NameDef = I->second;
// If this is not a placeholder then the SSA name was defined.
if (!isSSAPlaceholder(NameDef))
@@ -1472,7 +1459,7 @@
/// getBasicBlock - Find or create the LLVM basic block corresponding to BB.
BasicBlock *TreeToLLVM::getBasicBlock(basic_block bb) {
// If we already associated an LLVM basic block with BB, then return it.
- DenseMap<basic_block, BasicBlock*>::iterator I = BasicBlocks.find(bb);
+ DenseMap<basic_block, BasicBlock *>::iterator I = BasicBlocks.find(bb);
if (I != BasicBlocks.end())
return I->second;
@@ -1591,8 +1578,8 @@
break;
case GIMPLE_CALL:
- RenderGIMPLE_CALL(stmt);
- break;
+ RenderGIMPLE_CALL(stmt);
+ break;
case GIMPLE_COND:
RenderGIMPLE_COND(stmt);
@@ -1638,13 +1625,12 @@
// Add a branch to the fallthru block.
edge e;
edge_iterator ei;
- FOR_EACH_EDGE (e, ei, bb->succs)
- if (e->flags & EDGE_FALLTHRU) {
- input_location = e->goto_locus;
- // TODO: set the debug info location.
- Builder.CreateBr(getBasicBlock(e->dest));
- break;
- }
+ FOR_EACH_EDGE(e, ei, bb->succs) if (e->flags & EDGE_FALLTHRU) {
+ input_location = e->goto_locus;
+ // TODO: set the debug info location.
+ Builder.CreateBr(getBasicBlock(e->dest));
+ break;
+ }
input_location = saved_loc;
}
@@ -1655,8 +1641,7 @@
// Output the basic blocks.
basic_block bb;
- FOR_EACH_BB(bb)
- EmitBasicBlock(bb);
+ FOR_EACH_BB(bb) EmitBasicBlock(bb);
// Wrap things up.
return FinishFunctionBody();
@@ -1672,18 +1657,17 @@
}
LValue LV = EmitLV(exp);
assert(!LV.isBitfield() && "Bitfields containing aggregates not supported!");
- EmitAggregateCopy(DestLoc, MemRef(LV.Ptr, LV.getAlignment(),
- TREE_THIS_VOLATILE(exp)), TREE_TYPE(exp));
+ EmitAggregateCopy(DestLoc,
+ MemRef(LV.Ptr, LV.getAlignment(), TREE_THIS_VOLATILE(exp)),
+ TREE_TYPE(exp));
}
/// get_constant_alignment - Return the alignment of constant EXP in bits.
///
-static unsigned int
-get_constant_alignment (tree exp)
-{
- unsigned int align = TYPE_ALIGN (TREE_TYPE (exp));
+static unsigned int get_constant_alignment(tree exp) {
+ unsigned int align = TYPE_ALIGN(TREE_TYPE(exp));
#ifdef CONSTANT_ALIGNMENT
- align = CONSTANT_ALIGNMENT (exp, align);
+ align = CONSTANT_ALIGNMENT(exp, align);
#endif
return align;
}
@@ -1733,7 +1717,7 @@
LV = EmitLV_TARGET_MEM_REF(exp);
break;
- // Constants.
+ // Constants.
case LABEL_DECL: {
LV = LValue(AddressOfLABEL_DECL(exp), 1);
break;
@@ -1750,12 +1734,12 @@
break;
}
- // Type Conversion.
+ // Type Conversion.
case VIEW_CONVERT_EXPR:
LV = EmitLV_VIEW_CONVERT_EXPR(exp);
break;
- // Trivial Cases.
+ // Trivial Cases.
case WITH_SIZE_EXPR:
LV = EmitLV_WITH_SIZE_EXPR(exp);
break;
@@ -1786,8 +1770,8 @@
/// CastToAnyType - Cast the specified value to the specified type making no
/// assumptions about the types of the arguments. This creates an inferred cast.
-Value *TreeToLLVM::CastToAnyType(Value *Src, bool SrcIsSigned,
- Type* DestTy, bool DestIsSigned) {
+Value *TreeToLLVM::CastToAnyType(Value *Src, bool SrcIsSigned, Type *DestTy,
+ bool DestIsSigned) {
Type *SrcTy = Src->getType();
// Eliminate useless casts of a type to itself.
@@ -1815,14 +1799,14 @@
// The types are different so we must cast. Use getCastOpcode to create an
// inferred cast opcode.
- Instruction::CastOps opc =
- CastInst::getCastOpcode(Src, SrcIsSigned, DestTy, DestIsSigned);
+ Instruction::CastOps opc = CastInst::getCastOpcode(Src, SrcIsSigned, DestTy,
+ DestIsSigned);
// Generate the cast and return it.
return Builder.CreateCast(opc, Src, DestTy);
}
Constant *TreeToLLVM::CastToAnyType(Constant *Src, bool SrcIsSigned,
- Type* DestTy, bool DestIsSigned) {
+ Type *DestTy, bool DestIsSigned) {
Type *SrcTy = Src->getType();
// Eliminate useless casts of a type to itself.
@@ -1850,8 +1834,8 @@
// The types are different so we must cast. Use getCastOpcode to create an
// inferred cast opcode.
- Instruction::CastOps opc =
- CastInst::getCastOpcode(Src, SrcIsSigned, DestTy, DestIsSigned);
+ Instruction::CastOps opc = CastInst::getCastOpcode(Src, SrcIsSigned, DestTy,
+ DestIsSigned);
// Generate the cast and return it.
return TheFolder->CreateCast(opc, Src, DestTy);
@@ -1870,9 +1854,10 @@
}
if (EltTy->isPointerTy()) {
// A pointer/vector of pointer - use inttoptr.
- assert(V->getType()->getScalarType()->getPrimitiveSizeInBits() ==
- DL.getPointerSizeInBits(cast<PointerType>(EltTy)->getAddressSpace())
- && "Pointer type not same size!");
+ assert(
+ V->getType()->getScalarType()->getPrimitiveSizeInBits() ==
+ DL.getPointerSizeInBits(cast<PointerType>(EltTy)->getAddressSpace()) &&
+ "Pointer type not same size!");
return Builder.CreateIntToPtr(V, Ty);
}
// Everything else.
@@ -1905,13 +1890,13 @@
/// CastToFPType - Cast the specified value to the specified type assuming
/// that V's type and Ty are floating point types. This arbitrates between
/// BitCast, FPTrunc and FPExt.
-Value *TreeToLLVM::CastToFPType(Value *V, Type* Ty) {
+Value *TreeToLLVM::CastToFPType(Value *V, Type *Ty) {
unsigned SrcBits = V->getType()->getPrimitiveSizeInBits();
unsigned DstBits = Ty->getPrimitiveSizeInBits();
if (SrcBits == DstBits)
return V;
- Instruction::CastOps opcode = (SrcBits > DstBits ?
- Instruction::FPTrunc : Instruction::FPExt);
+ Instruction::CastOps opcode = (SrcBits > DstBits ? Instruction::FPTrunc :
+ Instruction::FPExt);
return Builder.CreateCast(opcode, V, Ty);
}
@@ -1945,8 +1930,8 @@
if (isa<FLOAT_TYPE>(type))
return Builder.CreateFSub(LHS, RHS);
return Builder.CreateSub(CastToSameSizeInteger(LHS),
- CastToSameSizeInteger(RHS), "",
- hasNUW(type), hasNSW(type));
+ CastToSameSizeInteger(RHS), "", hasNUW(type),
+ hasNSW(type));
}
/// CreateTemporary - Create a new alloca instruction of the specified type,
@@ -1958,12 +1943,13 @@
// alloc instructions before. It doesn't matter what this instruction is,
// it is dead. This allows us to insert allocas in order without having to
// scan for an insertion point. Use BitCast for int -> int
- AllocaInsertionPoint = CastInst::Create(Instruction::BitCast,
- Constant::getNullValue(Type::getInt32Ty(Context)),
- Type::getInt32Ty(Context), "alloca point");
+ AllocaInsertionPoint =
+ CastInst::Create(Instruction::BitCast,
+ Constant::getNullValue(Type::getInt32Ty(Context)),
+ Type::getInt32Ty(Context), "alloca point");
// Insert it as the first instruction in the entry block.
- Fn->begin()->getInstList().insert(Fn->begin()->begin(),
- AllocaInsertionPoint);
+ Fn->begin()->getInstList()
+ .insert(Fn->begin()->begin(), AllocaInsertionPoint);
}
return new AllocaInst(Ty, 0, align, "", AllocaInsertionPoint);
}
@@ -1994,7 +1980,7 @@
// Add this block.
Fn->getBasicBlockList().push_back(BB);
- Builder.SetInsertPoint(BB); // It is now the current block.
+ Builder.SetInsertPoint(BB); // It is now the current block.
}
static const unsigned TooCostly = 8;
@@ -2018,7 +2004,8 @@
Type *Ty = ConvertType(type);
unsigned TotalCost = 0;
for (tree Field = TYPE_FIELDS(type); Field; Field = TREE_CHAIN(Field)) {
- if (!isa<FIELD_DECL>(Field)) continue;
+ if (!isa<FIELD_DECL>(Field))
+ continue;
// If the field has no size, for example because it is a C-style variable
// length array, then just give up.
if (!DECL_SIZE(Field))
@@ -2080,7 +2067,8 @@
// Copy each field in turn.
for (tree Field = TYPE_FIELDS(type); Field; Field = TREE_CHAIN(Field)) {
- if (!isa<FIELD_DECL>(Field)) continue;
+ if (!isa<FIELD_DECL>(Field))
+ continue;
// Ignore fields of size zero.
if (integer_zerop(DECL_SIZE(Field)))
continue;
@@ -2088,9 +2076,11 @@
int FieldIdx = GetFieldIndex(Field, Ty);
assert(FieldIdx != INT_MAX && "Should not be copying if no LLVM field!");
Value *DestFieldPtr = Builder.CreateStructGEP(DestLoc.Ptr, FieldIdx,
- flag_verbose_asm ? "df":"");
+ flag_verbose_asm ? "df" :
+ "");
Value *SrcFieldPtr = Builder.CreateStructGEP(SrcLoc.Ptr, FieldIdx,
- flag_verbose_asm ? "sf":"");
+ flag_verbose_asm ? "sf" :
+ "");
// Compute the field's alignment.
unsigned DestFieldAlign = DestLoc.getAlignment();
@@ -2122,10 +2112,10 @@
// Get the address of the component.
Value *DestCompPtr = DestLoc.Ptr, *SrcCompPtr = SrcLoc.Ptr;
if (i) {
- DestCompPtr = Builder.CreateConstInBoundsGEP1_32(DestCompPtr, i,
- flag_verbose_asm ? "da" : "");
- SrcCompPtr = Builder.CreateConstInBoundsGEP1_32(SrcCompPtr, i,
- flag_verbose_asm ? "sa" : "");
+ DestCompPtr = Builder.CreateConstInBoundsGEP1_32(
+ DestCompPtr, i, flag_verbose_asm ? "da" : "");
+ SrcCompPtr = Builder.CreateConstInBoundsGEP1_32(
+ SrcCompPtr, i, flag_verbose_asm ? "sa" : "");
}
// Compute the component's alignment.
@@ -2151,7 +2141,7 @@
/// GCC type specified by GCCType to know which elements to copy.
void TreeToLLVM::EmitAggregateCopy(MemRef DestLoc, MemRef SrcLoc, tree type) {
if (DestLoc.Ptr == SrcLoc.Ptr && !DestLoc.Volatile && !SrcLoc.Volatile)
- return; // noop copy.
+ return; // noop copy.
// If the type is small, copy element by element instead of using memcpy.
unsigned Cost = CostOfAccessingAllElements(type);
@@ -2182,7 +2172,8 @@
// Zero each field in turn.
for (tree Field = TYPE_FIELDS(type); Field; Field = TREE_CHAIN(Field)) {
- if (!isa<FIELD_DECL>(Field)) continue;
+ if (!isa<FIELD_DECL>(Field))
+ continue;
// Ignore fields of size zero.
if (integer_zerop(DECL_SIZE(Field)))
continue;
@@ -2217,8 +2208,8 @@
// Get the address of the component.
Value *CompPtr = DestLoc.Ptr;
if (i)
- CompPtr = Builder.CreateConstInBoundsGEP1_32(CompPtr, i,
- flag_verbose_asm ? "za":"");
+ CompPtr = Builder.CreateConstInBoundsGEP1_32(
+ CompPtr, i, flag_verbose_asm ? "za" : "");
// Compute the component's alignment.
unsigned CompAlign = DestLoc.getAlignment();
@@ -2253,17 +2244,14 @@
Type *SBP = Type::getInt8PtrTy(Context);
Type *IntPtr = DL.getIntPtrType(DestPtr->getType());
- Value *Ops[5] = {
- Builder.CreateBitCast(DestPtr, SBP),
- Builder.CreateBitCast(SrcPtr, SBP),
- Builder.CreateIntCast(Size, IntPtr, /*isSigned*/true),
- Builder.getInt32(Align),
- Builder.getFalse()
- };
+ Value *Ops[5] = { Builder.CreateBitCast(DestPtr, SBP),
+ Builder.CreateBitCast(SrcPtr, SBP),
+ Builder.CreateIntCast(Size, IntPtr, /*isSigned*/ true),
+ Builder.getInt32(Align), Builder.getFalse() };
Type *ArgTypes[3] = { SBP, SBP, IntPtr };
- Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::memcpy,
- ArgTypes), Ops);
+ Builder.CreateCall(
+ Intrinsic::getDeclaration(TheModule, Intrinsic::memcpy, ArgTypes), Ops);
return Ops[0];
}
@@ -2271,17 +2259,14 @@
unsigned Align) {
Type *SBP = Type::getInt8PtrTy(Context);
Type *IntPtr = DL.getIntPtrType(DestPtr->getType());
- Value *Ops[5] = {
- Builder.CreateBitCast(DestPtr, SBP),
- Builder.CreateBitCast(SrcPtr, SBP),
- Builder.CreateIntCast(Size, IntPtr, /*isSigned*/true),
- Builder.getInt32(Align),
- Builder.getFalse()
- };
+ Value *Ops[5] = { Builder.CreateBitCast(DestPtr, SBP),
+ Builder.CreateBitCast(SrcPtr, SBP),
+ Builder.CreateIntCast(Size, IntPtr, /*isSigned*/ true),
+ Builder.getInt32(Align), Builder.getFalse() };
Type *ArgTypes[3] = { SBP, SBP, IntPtr };
- Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::memmove,
- ArgTypes), Ops);
+ Builder.CreateCall(
+ Intrinsic::getDeclaration(TheModule, Intrinsic::memmove, ArgTypes), Ops);
return Ops[0];
}
@@ -2289,38 +2274,31 @@
unsigned Align) {
Type *SBP = Type::getInt8PtrTy(Context);
Type *IntPtr = DL.getIntPtrType(DestPtr->getType());
- Value *Ops[5] = {
- Builder.CreateBitCast(DestPtr, SBP),
- Builder.CreateIntCast(SrcVal, Type::getInt8Ty(Context), /*isSigned*/true),
- Builder.CreateIntCast(Size, IntPtr, /*isSigned*/true),
- Builder.getInt32(Align),
- Builder.getFalse()
- };
+ Value *Ops[5] = { Builder.CreateBitCast(DestPtr, SBP),
+ Builder.CreateIntCast(SrcVal, Type::getInt8Ty(Context),
+ /*isSigned*/ true),
+ Builder.CreateIntCast(Size, IntPtr, /*isSigned*/ true),
+ Builder.getInt32(Align), Builder.getFalse() };
Type *ArgTypes[2] = { SBP, IntPtr };
- Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::memset,
- ArgTypes), Ops);
+ Builder.CreateCall(
+ Intrinsic::getDeclaration(TheModule, Intrinsic::memset, ArgTypes), Ops);
return Ops[0];
}
-
// Emits code to do something for a type attribute
void TreeToLLVM::EmitTypeGcroot(Value *V) {
// GC intrinsics can only be used in functions which specify a collector.
Fn->setGC("shadow-stack");
- Function *gcrootFun = Intrinsic::getDeclaration(TheModule,
- Intrinsic::gcroot);
+ Function *gcrootFun = Intrinsic::getDeclaration(TheModule, Intrinsic::gcroot);
// The idea is that it's a pointer to type "Value"
// which is opaque* but the routine expects i8** and i8*.
PointerType *Ty = Type::getInt8PtrTy(Context);
V = Builder.CreateBitCast(V, Ty->getPointerTo());
- Value *Ops[2] = {
- V,
- ConstantPointerNull::get(Ty)
- };
+ Value *Ops[2] = { V, ConstantPointerNull::get(Ty) };
Builder.CreateCall(gcrootFun, Ops);
}
@@ -2329,7 +2307,7 @@
void TreeToLLVM::EmitAnnotateIntrinsic(Value *V, tree decl) {
// Handle annotate attribute on global.
- tree annotateAttr = lookup_attribute("annotate", DECL_ATTRIBUTES (decl));
+ tree annotateAttr = lookup_attribute("annotate", DECL_ATTRIBUTES(decl));
if (!annotateAttr)
return;
@@ -2338,8 +2316,8 @@
Intrinsic::var_annotation);
// Get file and line number
- Constant *lineNo =
- ConstantInt::get(Type::getInt32Ty(Context), DECL_SOURCE_LINE(decl));
+ Constant *lineNo = ConstantInt::get(Type::getInt32Ty(Context),
+ DECL_SOURCE_LINE(decl));
Constant *file = ConvertMetadataStringToGV(DECL_SOURCE_FILE(decl));
Type *SBP = Type::getInt8PtrTy(Context);
file = TheFolder->CreateBitCast(file, SBP);
@@ -2362,12 +2340,8 @@
assert(isa<STRING_CST>(val) &&
"Annotate attribute arg should always be a string");
Constant *strGV = AddressOf(val);
- Value *Ops[4] = {
- Builder.CreateBitCast(V, SBP),
- Builder.CreateBitCast(strGV, SBP),
- file,
- lineNo
- };
+ Value *Ops[4] = { Builder.CreateBitCast(V, SBP),
+ Builder.CreateBitCast(strGV, SBP), file, lineNo };
Builder.CreateCall(annotateFun, Ops);
}
@@ -2392,10 +2366,10 @@
return;
tree type = TREE_TYPE(decl);
- Type *Ty; // Type to allocate
+ Type *Ty; // Type to allocate
Value *Size = 0; // Amount to alloca (null for 1)
- if (DECL_SIZE(decl) == 0) { // Variable with incomplete type.
+ if (DECL_SIZE(decl) == 0) { // Variable with incomplete type.
if (DECL_INITIAL(decl) == 0)
return; // Error message was already done; now avoid a crash.
debug_tree(decl);
@@ -2419,7 +2393,7 @@
// Insert an alloca for this variable.
AllocaInst *AI;
- if (!Size) { // Fixed size alloca -> entry block.
+ if (!Size) { // Fixed size alloca -> entry block.
AI = CreateTemporary(Ty);
} else {
AI = Builder.CreateAlloca(Ty, Size);
@@ -2435,15 +2409,14 @@
EmitAnnotateIntrinsic(AI, decl);
// Handle gcroot attribute
- if (isa<ACCESS_TYPE>(TREE_TYPE (decl))
- && lookup_attribute("gcroot", TYPE_ATTRIBUTES(TREE_TYPE (decl))))
- {
- // We should null out local variables so that a stack crawl
- // before initialization doesn't get garbage results to follow.
- Type *T = cast<PointerType>(AI->getType())->getElementType();
- EmitTypeGcroot(AI);
- Builder.CreateStore(Constant::getNullValue(T), AI);
- }
+ if (isa<ACCESS_TYPE>(TREE_TYPE(decl)) &&
+ lookup_attribute("gcroot", TYPE_ATTRIBUTES(TREE_TYPE(decl)))) {
+ // We should null out local variables so that a stack crawl
+ // before initialization doesn't get garbage results to follow.
+ Type *T = cast<PointerType>(AI->getType())->getElementType();
+ EmitTypeGcroot(AI);
+ Builder.CreateStore(Constant::getNullValue(T), AI);
+ }
if (EmitDebugInfo()) {
if (DECL_NAME(decl) || isa<RESULT_DECL>(decl)) {
@@ -2453,7 +2426,6 @@
}
}
-
//===----------------------------------------------------------------------===//
// ... Control Flow ...
//===----------------------------------------------------------------------===//
@@ -2464,7 +2436,7 @@
// TODO: Once pass_ipa_free_lang is made a default pass, remove the call to
// lookup_type_for_runtime below.
if (isa<TYPE>(type))
- type = lookup_type_for_runtime (type);
+ type = lookup_type_for_runtime(type);
STRIP_NOPS(type);
if (isa<ADDR_EXPR>(type))
type = TREE_OPERAND(type, 0);
@@ -2476,7 +2448,7 @@
AllocaInst *TreeToLLVM::getExceptionPtr(int RegionNo) {
assert(RegionNo >= 0 && "Invalid exception handling region!");
- if ((unsigned)RegionNo >= ExceptionPtrs.size())
+ if ((unsigned) RegionNo >= ExceptionPtrs.size())
ExceptionPtrs.resize(RegionNo + 1, 0);
AllocaInst *&ExceptionPtr = ExceptionPtrs[RegionNo];
@@ -2494,7 +2466,7 @@
AllocaInst *TreeToLLVM::getExceptionFilter(int RegionNo) {
assert(RegionNo >= 0 && "Invalid exception handling region!");
- if ((unsigned)RegionNo >= ExceptionFilters.size())
+ if ((unsigned) RegionNo >= ExceptionFilters.size())
ExceptionFilters.resize(RegionNo + 1, 0);
AllocaInst *&ExceptionFilter = ExceptionFilters[RegionNo];
@@ -2512,7 +2484,7 @@
BasicBlock *TreeToLLVM::getFailureBlock(int RegionNo) {
assert(RegionNo >= 0 && "Invalid exception handling region!");
- if ((unsigned)RegionNo >= FailureBlocks.size())
+ if ((unsigned) RegionNo >= FailureBlocks.size())
FailureBlocks.resize(RegionNo + 1, 0);
BasicBlock *&FailureBlock = FailureBlocks[RegionNo];
@@ -2548,7 +2520,7 @@
// invokes that unwind to this post landing pad, and also that no normal
// edges land at this post pad. In this case there is no need to create
// an LLVM specific landing pad.
- if ((unsigned)std::distance(pred_begin(PostPad), pred_end(PostPad)) ==
+ if ((unsigned) std::distance(pred_begin(PostPad), pred_end(PostPad)) ==
InvokesForPad.size())
continue;
@@ -2579,7 +2551,7 @@
// Different unwind edges have different values. Create a new PHI node
// in LPad.
PHINode *NewPN = PHINode::Create(PN->getType(), std::distance(PB, PE),
- PN->getName()+".lpad", LPad);
+ PN->getName() + ".lpad", LPad);
// Add an entry for each unwind edge, using the value from the old PHI.
for (pred_iterator PI = PB; PI != PE; ++PI)
NewPN->addIncoming(PN->getIncomingValueForBlock(*PI), *PI);
@@ -2655,7 +2627,7 @@
case ERT_ALLOWED_EXCEPTIONS: {
// Filter. Compute the list of type infos.
AllCaught = true;
- std::vector<Constant*> TypeInfos;
+ std::vector<Constant *> TypeInfos;
for (tree type = region->u.allowed.type_list; type;
type = TREE_CHAIN(type)) {
Constant *TypeInfo = ConvertTypeInfo(TREE_VALUE(type));
@@ -2681,13 +2653,13 @@
// Same as a zero-length filter: add an empty filter clause.
ArrayType *FilterTy = ArrayType::get(Builder.getInt8PtrTy(), 0);
LPadInst->addClause(ConstantArray::get(FilterTy,
- ArrayRef<Constant*>()));
+ ArrayRef<Constant *>()));
AllCaught = true;
break;
}
case ERT_TRY:
// Catches.
- for (eh_catch c = region->u.eh_try.first_catch; c ; c = c->next_catch)
+ for (eh_catch c = region->u.eh_try.first_catch; c; c = c->next_catch)
if (!c->type_list) {
// Catch-all - add a null pointer as a catch clause.
LPadInst->addClause(Constant::getNullValue(Builder.getInt8PtrTy()));
@@ -2760,16 +2732,17 @@
Builder.getInt32Ty(), NULL);
tree personality = DECL_FUNCTION_PERSONALITY(FnDecl);
assert(personality && "No-throw region but no personality function!");
- LandingPadInst *LPadInst =
- Builder.CreateLandingPad(UnwindDataTy, DECL_LLVM(personality), 1,
- "exc");
+ LandingPadInst *LPadInst = Builder.CreateLandingPad(
+ UnwindDataTy, DECL_LLVM(personality), 1,
+ "exc");
ArrayType *FilterTy = ArrayType::get(Builder.getInt8PtrTy(), 0);
- LPadInst->addClause(ConstantArray::get(FilterTy, ArrayRef<Constant*>()));
+ LPadInst->addClause(ConstantArray::get(FilterTy, ArrayRef<Constant *>()));
if (LandingPad != FailureBlock) {
// Make sure all invokes unwind to the new landing pad.
for (pred_iterator I = pred_begin(FailureBlock),
- E = pred_end(FailureBlock); I != E; ) {
+ E = pred_end(FailureBlock);
+ I != E;) {
TerminatorInst *T = (*I++)->getTerminator();
if (isa<InvokeInst>(T))
T->setSuccessor(1, LandingPad);
@@ -2801,7 +2774,6 @@
}
}
-
//===----------------------------------------------------------------------===//
// ... Expressions ...
//===----------------------------------------------------------------------===//
@@ -2862,8 +2834,8 @@
// Shift the sign bit of the bitfield to the sign bit position in the loaded
// type. This zaps any extra bits occurring after the end of the bitfield.
- unsigned FirstBitInVal = BYTES_BIG_ENDIAN ?
- LoadSizeInBits - LV.BitStart - LV.BitSize : LV.BitStart;
+ unsigned FirstBitInVal = BYTES_BIG_ENDIAN ? LoadSizeInBits - LV.BitStart -
+ LV.BitSize : LV.BitStart;
if (FirstBitInVal + LV.BitSize != LoadSizeInBits) {
Value *ShAmt = ConstantInt::get(LoadType, LoadSizeInBits -
(FirstBitInVal + LV.BitSize));
@@ -2874,8 +2846,8 @@
// this also duplicates the sign bit, giving a sign extended value.
bool isSigned = !TYPE_UNSIGNED(TREE_TYPE(exp));
Value *ShAmt = ConstantInt::get(LoadType, LoadSizeInBits - LV.BitSize);
- Val = isSigned ?
- Builder.CreateAShr(Val, ShAmt) : Builder.CreateLShr(Val, ShAmt);
+ Val = isSigned ? Builder.CreateAShr(Val, ShAmt) :
+ Builder.CreateLShr(Val, ShAmt);
// Get the bits as a value of the correct type.
// FIXME: This assumes the result is an integer.
@@ -2895,9 +2867,8 @@
#if (GCC_MINOR < 7)
Value *TreeToLLVM::EmitCondExpr(tree exp) {
- return TriviallyTypeConvert(EmitReg_CondExpr(TREE_OPERAND(exp, 0),
- TREE_OPERAND(exp, 1),
- TREE_OPERAND(exp, 2)),
+ return TriviallyTypeConvert(EmitReg_CondExpr(
+ TREE_OPERAND(exp, 0), TREE_OPERAND(exp, 1), TREE_OPERAND(exp, 2)),
getRegType(TREE_TYPE(exp)));
}
#endif
@@ -2920,7 +2891,7 @@
// Insert all of the elements here.
unsigned HOST_WIDE_INT idx;
tree value;
- FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), idx, value) {
+ FOR_EACH_CONSTRUCTOR_VALUE(CONSTRUCTOR_ELTS(exp), idx, value) {
Value *Elt = EmitRegister(value);
if (VectorType *EltTy = dyn_cast<VectorType>(Elt->getType())) {
@@ -2951,7 +2922,7 @@
// Start out with the value zero'd out.
EmitAggregateZero(*DestLoc, type);
- VEC(constructor_elt, gc) *elt = CONSTRUCTOR_ELTS(exp);
+ VEC(constructor_elt, gc) * elt = CONSTRUCTOR_ELTS(exp);
switch (TREE_CODE(TREE_TYPE(exp))) {
case ARRAY_TYPE:
case RECORD_TYPE:
@@ -2964,13 +2935,14 @@
case UNION_TYPE:
// Store each element of the constructor into the corresponding field of
// DEST.
- if (!elt || VEC_empty(constructor_elt, elt)) return 0; // no elements
- assert(VEC_length(constructor_elt, elt) == 1
- && "Union CONSTRUCTOR should have one element!");
+ if (!elt || VEC_empty(constructor_elt, elt))
+ return 0; // no elements
+ assert(VEC_length(constructor_elt, elt) == 1 &&
+ "Union CONSTRUCTOR should have one element!");
tree tree_purpose = VEC_index(constructor_elt, elt, 0)->index;
- tree tree_value = VEC_index(constructor_elt, elt, 0)->value;
+ tree tree_value = VEC_index(constructor_elt, elt, 0)->value;
if (!tree_purpose)
- return 0; // Not actually initialized?
+ return 0; // Not actually initialized?
if (isa<AGGREGATE_TYPE>(TREE_TYPE(tree_purpose))) {
EmitAggregate(tree_value, *DestLoc);
@@ -2985,10 +2957,8 @@
}
/// llvm_load_scalar_argument - Load value located at LOC.
-static Value *llvm_load_scalar_argument(Value *L,
- llvm::Type *LLVMTy,
- unsigned RealSize,
- LLVMBuilder &Builder) {
+static Value *llvm_load_scalar_argument(
+ Value *L, llvm::Type *LLVMTy, unsigned RealSize, LLVMBuilder &Builder) {
if (!RealSize)
return UndefValue::get(LLVMTy);
@@ -3006,254 +2976,249 @@
}
#ifndef LLVM_LOAD_SCALAR_ARGUMENT
-#define LLVM_LOAD_SCALAR_ARGUMENT(LOC,TY,SIZE,BUILDER) \
- llvm_load_scalar_argument((LOC),(TY),(SIZE),(BUILDER))
+#define LLVM_LOAD_SCALAR_ARGUMENT(LOC, TY, SIZE, BUILDER) \
+ llvm_load_scalar_argument((LOC), (TY), (SIZE), (BUILDER))
#endif
namespace {
- /// FunctionCallArgumentConversion - This helper class is driven by the ABI
- /// definition for this target to figure out how to pass arguments into the
- /// stack/regs for a function call.
- struct FunctionCallArgumentConversion : public DefaultABIClient {
- SmallVector<Value*, 16> &CallOperands;
- SmallVector<Value*, 2> LocStack;
- FunctionType *FTy;
- const MemRef *DestLoc;
- LLVMBuilder &Builder;
- Value *TheValue;
- MemRef RetBuf;
- CallingConv::ID &CallingConv;
- unsigned Offset;
- bool isShadowRet;
- bool isAggrRet;
- bool useReturnSlot;
-
- FunctionCallArgumentConversion(SmallVector<Value*, 16> &ops,
- FunctionType *FnTy,
- const MemRef *destloc,
- bool ReturnSlotOpt,
- LLVMBuilder &b,
- CallingConv::ID &CC)
+/// FunctionCallArgumentConversion - This helper class is driven by the ABI
+/// definition for this target to figure out how to pass arguments into the
+/// stack/regs for a function call.
+struct FunctionCallArgumentConversion : public DefaultABIClient {
+ SmallVector<Value *, 16> &CallOperands;
+ SmallVector<Value *, 2> LocStack;
+ FunctionType *FTy;
+ const MemRef *DestLoc;
+ LLVMBuilder &Builder;
+ Value *TheValue;
+ MemRef RetBuf;
+ CallingConv::ID &CallingConv;
+ unsigned Offset;
+ bool isShadowRet;
+ bool isAggrRet;
+ bool useReturnSlot;
+
+ FunctionCallArgumentConversion(
+ SmallVector<Value *, 16> &ops, FunctionType *FnTy, const MemRef *destloc,
+ bool ReturnSlotOpt, LLVMBuilder &b, CallingConv::ID &CC)
: CallOperands(ops), FTy(FnTy), DestLoc(destloc), Builder(b),
CallingConv(CC), Offset(0), isShadowRet(false), isAggrRet(false),
- useReturnSlot(ReturnSlotOpt) { }
-
- /// getCallingConv - This provides the desired CallingConv for the function.
- CallingConv::ID getCallingConv(void) { return CallingConv; }
-
- // Push the address of an argument.
- void pushAddress(Value *Loc) {
- assert(Loc && "Invalid location!");
- LocStack.push_back(Loc);
- }
-
- // Push the value of an argument.
- void pushValue(Value *V) {
- assert(LocStack.empty() && "Value only allowed at top level!");
- LocStack.push_back(NULL);
- TheValue = V;
- }
+ useReturnSlot(ReturnSlotOpt) {
+ }
- // Get the address of the current location.
- Value *getAddress(void) {
- assert(!LocStack.empty());
- Value *&Loc = LocStack.back();
- if (!Loc) {
- // A value. Store to a temporary, and return the temporary's address.
- // Any future access to this argument will reuse the same address.
- Loc = TheTreeToLLVM->CreateTemporary(TheValue->getType());
- Builder.CreateStore(TheValue, Loc);
- }
- return Loc;
- }
+ /// getCallingConv - This provides the desired CallingConv for the function.
+ CallingConv::ID getCallingConv(void) { return CallingConv; }
- // Get the value of the current location (of type Ty).
- Value *getValue(Type *Ty) {
- assert(!LocStack.empty());
- Value *Loc = LocStack.back();
- if (Loc) {
- // An address. Convert to the right type and load the value out.
- Loc = Builder.CreateBitCast(Loc, Ty->getPointerTo());
- // FIXME: Pass alignment information down rather than just using 1 here.
- return Builder.CreateAlignedLoad(Loc, 1, "val");
- } else {
- // A value - just return it.
- assert(TheValue->getType() == Ty && "Value not of expected type!");
- return TheValue;
- }
+ // Push the address of an argument.
+ void pushAddress(Value *Loc) {
+ assert(Loc && "Invalid location!");
+ LocStack.push_back(Loc);
+ }
+
+ // Push the value of an argument.
+ void pushValue(Value *V) {
+ assert(LocStack.empty() && "Value only allowed at top level!");
+ LocStack.push_back(NULL);
+ TheValue = V;
+ }
+
+ // Get the address of the current location.
+ Value *getAddress(void) {
+ assert(!LocStack.empty());
+ Value *&Loc = LocStack.back();
+ if (!Loc) {
+ // A value. Store to a temporary, and return the temporary's address.
+ // Any future access to this argument will reuse the same address.
+ Loc = TheTreeToLLVM->CreateTemporary(TheValue->getType());
+ Builder.CreateStore(TheValue, Loc);
+ }
+ return Loc;
+ }
+
+ // Get the value of the current location (of type Ty).
+ Value *getValue(Type *Ty) {
+ assert(!LocStack.empty());
+ Value *Loc = LocStack.back();
+ if (Loc) {
+ // An address. Convert to the right type and load the value out.
+ Loc = Builder.CreateBitCast(Loc, Ty->getPointerTo());
+ // FIXME: Pass alignment information down rather than just using 1 here.
+ return Builder.CreateAlignedLoad(Loc, 1, "val");
+ } else {
+ // A value - just return it.
+ assert(TheValue->getType() == Ty && "Value not of expected type!");
+ return TheValue;
}
+ }
- void clear() {
- assert(LocStack.size() == 1 && "Imbalance!");
- LocStack.clear();
- }
+ void clear() {
+ assert(LocStack.size() == 1 && "Imbalance!");
+ LocStack.clear();
+ }
- bool isShadowReturn() const { return isShadowRet; }
- bool isAggrReturn() { return isAggrRet; }
+ bool isShadowReturn() const { return isShadowRet; }
+ bool isAggrReturn() { return isAggrRet; }
- // EmitShadowResult - If the return result was redirected to a buffer,
- // emit it now.
- Value *EmitShadowResult(tree type, const MemRef *DstLoc) {
- if (!RetBuf.Ptr)
- return 0;
+ // EmitShadowResult - If the return result was redirected to a buffer,
+ // emit it now.
+ Value *EmitShadowResult(tree type, const MemRef *DstLoc) {
+ if (!RetBuf.Ptr)
+ return 0;
- if (DstLoc) {
- // Copy out the aggregate return value now.
- assert(ConvertType(type) ==
- cast<PointerType>(RetBuf.Ptr->getType())->getElementType() &&
- "Inconsistent result types!");
- TheTreeToLLVM->EmitAggregateCopy(*DstLoc, RetBuf, type);
- return 0;
- } else {
- // Read out the scalar return value now.
- return Builder.CreateLoad(RetBuf.Ptr, "result");
- }
+ if (DstLoc) {
+ // Copy out the aggregate return value now.
+ assert(ConvertType(type) ==
+ cast<PointerType>(RetBuf.Ptr->getType())->getElementType() &&
+ "Inconsistent result types!");
+ TheTreeToLLVM->EmitAggregateCopy(*DstLoc, RetBuf, type);
+ return 0;
+ } else {
+ // Read out the scalar return value now.
+ return Builder.CreateLoad(RetBuf.Ptr, "result");
}
+ }
- /// HandleScalarResult - This callback is invoked if the function returns a
- /// simple scalar result value.
- void HandleScalarResult(Type * /*RetTy*/) {
- // There is nothing to do here if we return a scalar or void.
- assert(DestLoc == 0 &&
- "Call returns a scalar but caller expects aggregate!");
- }
-
- /// HandleAggregateResultAsScalar - This callback is invoked if the function
- /// returns an aggregate value by bit converting it to the specified scalar
- /// type and returning that.
- void HandleAggregateResultAsScalar(Type * /*ScalarTy*/,
- unsigned Off = 0) {
- this->Offset = Off;
- }
-
- /// HandleAggregateResultAsAggregate - This callback is invoked if the
- /// function returns an aggregate value using multiple return values.
- void HandleAggregateResultAsAggregate(Type * /*AggrTy*/) {
- // There is nothing to do here.
- isAggrRet = true;
- }
-
- /// HandleAggregateShadowResult - This callback is invoked if the function
- /// returns an aggregate value by using a "shadow" first parameter. If
- /// RetPtr is set to true, the pointer argument itself is returned from the
- /// function.
- void HandleAggregateShadowResult(PointerType *PtrArgTy, bool /*RetPtr*/) {
- // We need to pass memory to write the return value into.
- // FIXME: alignment and volatility are being ignored!
- assert(!DestLoc || PtrArgTy == DestLoc->Ptr->getType());
-
- if (DestLoc == 0) {
- // The result is unused, but still needs to be stored somewhere.
- Value *Buf = TheTreeToLLVM->CreateTemporary(PtrArgTy->getElementType());
- CallOperands.push_back(Buf);
- } else if (useReturnSlot) {
- // Letting the call write directly to the final destination is safe and
- // may be required. Do not use a buffer.
- CallOperands.push_back(DestLoc->Ptr);
- } else {
- // Letting the call write directly to the final destination may not be
- // safe (eg: if DestLoc aliases a parameter) and is not required - pass
- // a buffer and copy it to DestLoc after the call.
- RetBuf = TheTreeToLLVM->CreateTempLoc(PtrArgTy->getElementType());
- CallOperands.push_back(RetBuf.Ptr);
- }
-
- // Note the use of a shadow argument.
- isShadowRet = true;
- }
-
- void HandlePad(llvm::Type *LLVMTy) {
- CallOperands.push_back(UndefValue::get(LLVMTy));
- }
-
- /// HandleScalarShadowResult - This callback is invoked if the function
- /// returns a scalar value by using a "shadow" first parameter, which is a
- /// pointer to the scalar, of type PtrArgTy. If RetPtr is set to true,
- /// the pointer argument itself is returned from the function.
- void HandleScalarShadowResult(PointerType *PtrArgTy,
- bool /*RetPtr*/) {
- assert(DestLoc == 0 &&
- "Call returns a scalar but caller expects aggregate!");
- // Create a buffer to hold the result. The result will be loaded out of
- // it after the call.
+ /// HandleScalarResult - This callback is invoked if the function returns a
+ /// simple scalar result value.
+ void HandleScalarResult(Type */*RetTy*/) {
+ // There is nothing to do here if we return a scalar or void.
+ assert(DestLoc == 0 &&
+ "Call returns a scalar but caller expects aggregate!");
+ }
+
+ /// HandleAggregateResultAsScalar - This callback is invoked if the function
+ /// returns an aggregate value by bit converting it to the specified scalar
+ /// type and returning that.
+ void HandleAggregateResultAsScalar(Type */*ScalarTy*/, unsigned Off = 0) {
+ this->Offset = Off;
+ }
+
+ /// HandleAggregateResultAsAggregate - This callback is invoked if the
+ /// function returns an aggregate value using multiple return values.
+ void HandleAggregateResultAsAggregate(Type */*AggrTy*/) {
+ // There is nothing to do here.
+ isAggrRet = true;
+ }
+
+ /// HandleAggregateShadowResult - This callback is invoked if the function
+ /// returns an aggregate value by using a "shadow" first parameter. If
+ /// RetPtr is set to true, the pointer argument itself is returned from the
+ /// function.
+ void HandleAggregateShadowResult(PointerType *PtrArgTy, bool /*RetPtr*/) {
+ // We need to pass memory to write the return value into.
+ // FIXME: alignment and volatility are being ignored!
+ assert(!DestLoc || PtrArgTy == DestLoc->Ptr->getType());
+
+ if (DestLoc == 0) {
+ // The result is unused, but still needs to be stored somewhere.
+ Value *Buf = TheTreeToLLVM->CreateTemporary(PtrArgTy->getElementType());
+ CallOperands.push_back(Buf);
+ } else if (useReturnSlot) {
+ // Letting the call write directly to the final destination is safe and
+ // may be required. Do not use a buffer.
+ CallOperands.push_back(DestLoc->Ptr);
+ } else {
+ // Letting the call write directly to the final destination may not be
+ // safe (eg: if DestLoc aliases a parameter) and is not required - pass
+ // a buffer and copy it to DestLoc after the call.
RetBuf = TheTreeToLLVM->CreateTempLoc(PtrArgTy->getElementType());
CallOperands.push_back(RetBuf.Ptr);
-
- // Note the use of a shadow argument.
- isShadowRet = true;
}
- /// HandleScalarArgument - This is the primary callback that specifies an
- /// LLVM argument to pass. It is only used for first class types.
- void HandleScalarArgument(llvm::Type *LLVMTy, tree type,
- unsigned RealSize = 0) {
- Value *Loc = NULL;
- if (RealSize) {
- Value *L = getAddress();
- Loc = LLVM_LOAD_SCALAR_ARGUMENT(L,LLVMTy,RealSize,Builder);
- } else
- Loc = getValue(LLVMTy);
-
- // Perform any implicit type conversions.
- if (CallOperands.size() < FTy->getNumParams()) {
- Type *CalledTy= FTy->getParamType(CallOperands.size());
- if (Loc->getType() != CalledTy) {
- if (type) {
- bool isSigned = !TYPE_UNSIGNED(type);
- Loc = TheTreeToLLVM->CastToAnyType(Loc, isSigned, CalledTy, false);
- } else {
- // Only trivial type conversions should get here.
- Loc = Builder.CreateBitCast(Loc, CalledTy);
- }
+ // Note the use of a shadow argument.
+ isShadowRet = true;
+ }
+
+ void HandlePad(llvm::Type *LLVMTy) {
+ CallOperands.push_back(UndefValue::get(LLVMTy));
+ }
+
+ /// HandleScalarShadowResult - This callback is invoked if the function
+ /// returns a scalar value by using a "shadow" first parameter, which is a
+ /// pointer to the scalar, of type PtrArgTy. If RetPtr is set to true,
+ /// the pointer argument itself is returned from the function.
+ void HandleScalarShadowResult(PointerType *PtrArgTy, bool /*RetPtr*/) {
+ assert(DestLoc == 0 &&
+ "Call returns a scalar but caller expects aggregate!");
+ // Create a buffer to hold the result. The result will be loaded out of
+ // it after the call.
+ RetBuf = TheTreeToLLVM->CreateTempLoc(PtrArgTy->getElementType());
+ CallOperands.push_back(RetBuf.Ptr);
+
+ // Note the use of a shadow argument.
+ isShadowRet = true;
+ }
+
+ /// HandleScalarArgument - This is the primary callback that specifies an
+ /// LLVM argument to pass. It is only used for first class types.
+ void HandleScalarArgument(llvm::Type *LLVMTy, tree type,
+ unsigned RealSize = 0) {
+ Value *Loc = NULL;
+ if (RealSize) {
+ Value *L = getAddress();
+ Loc = LLVM_LOAD_SCALAR_ARGUMENT(L, LLVMTy, RealSize, Builder);
+ } else
+ Loc = getValue(LLVMTy);
+
+ // Perform any implicit type conversions.
+ if (CallOperands.size() < FTy->getNumParams()) {
+ Type *CalledTy = FTy->getParamType(CallOperands.size());
+ if (Loc->getType() != CalledTy) {
+ if (type) {
+ bool isSigned = !TYPE_UNSIGNED(type);
+ Loc = TheTreeToLLVM->CastToAnyType(Loc, isSigned, CalledTy, false);
+ } else {
+ // Only trivial type conversions should get here.
+ Loc = Builder.CreateBitCast(Loc, CalledTy);
}
}
-
- CallOperands.push_back(Loc);
}
- /// HandleByInvisibleReferenceArgument - This callback is invoked if a
- /// pointer (of type PtrTy) to the argument is passed rather than the
- /// argument itself.
- void HandleByInvisibleReferenceArgument(llvm::Type *PtrTy,
- tree /*type*/) {
- Value *Loc = getAddress();
- Loc = Builder.CreateBitCast(Loc, PtrTy);
- CallOperands.push_back(Loc);
- }
-
- /// HandleByValArgument - This callback is invoked if the aggregate function
- /// argument is passed by value. It is lowered to a parameter passed by
- /// reference with an additional parameter attribute "ByVal".
- void HandleByValArgument(llvm::Type *LLVMTy, tree /*type*/) {
- Value *Loc = getAddress();
- assert(LLVMTy->getPointerTo() == Loc->getType());
- (void)LLVMTy; // Otherwise unused if asserts off - avoid compiler warning.
- CallOperands.push_back(Loc);
- }
-
- /// HandleFCAArgument - This callback is invoked if the aggregate function
- /// argument is passed as a first class aggregate.
- void HandleFCAArgument(llvm::Type *LLVMTy, tree /*type*/) {
- Value *Loc = getAddress();
- assert(LLVMTy->getPointerTo() == Loc->getType());
- (void)LLVMTy; // Otherwise unused if asserts off - avoid compiler warning.
- CallOperands.push_back(Builder.CreateLoad(Loc));
- }
-
- /// EnterField - Called when we're about the enter the field of a struct
- /// or union. FieldNo is the number of the element we are entering in the
- /// LLVM Struct, StructTy is the LLVM type of the struct we are entering.
- void EnterField(unsigned FieldNo, llvm::Type *StructTy) {
- Value *Loc = getAddress();
- Loc = Builder.CreateBitCast(Loc, StructTy->getPointerTo());
- pushAddress(Builder.CreateStructGEP(Loc, FieldNo,
- flag_verbose_asm ? "elt" : ""));
- }
- void ExitField() {
- assert(!LocStack.empty());
- LocStack.pop_back();
- }
- };
+ CallOperands.push_back(Loc);
+ }
+
+ /// HandleByInvisibleReferenceArgument - This callback is invoked if a
+ /// pointer (of type PtrTy) to the argument is passed rather than the
+ /// argument itself.
+ void HandleByInvisibleReferenceArgument(llvm::Type *PtrTy, tree /*type*/) {
+ Value *Loc = getAddress();
+ Loc = Builder.CreateBitCast(Loc, PtrTy);
+ CallOperands.push_back(Loc);
+ }
+
+ /// HandleByValArgument - This callback is invoked if the aggregate function
+ /// argument is passed by value. It is lowered to a parameter passed by
+ /// reference with an additional parameter attribute "ByVal".
+ void HandleByValArgument(llvm::Type *LLVMTy, tree /*type*/) {
+ Value *Loc = getAddress();
+ assert(LLVMTy->getPointerTo() == Loc->getType());
+ (void) LLVMTy; // Otherwise unused if asserts off - avoid compiler warning.
+ CallOperands.push_back(Loc);
+ }
+
+ /// HandleFCAArgument - This callback is invoked if the aggregate function
+ /// argument is passed as a first class aggregate.
+ void HandleFCAArgument(llvm::Type *LLVMTy, tree /*type*/) {
+ Value *Loc = getAddress();
+ assert(LLVMTy->getPointerTo() == Loc->getType());
+ (void) LLVMTy; // Otherwise unused if asserts off - avoid compiler warning.
+ CallOperands.push_back(Builder.CreateLoad(Loc));
+ }
+
+ /// EnterField - Called when we're about the enter the field of a struct
+ /// or union. FieldNo is the number of the element we are entering in the
+ /// LLVM Struct, StructTy is the LLVM type of the struct we are entering.
+ void EnterField(unsigned FieldNo, llvm::Type *StructTy) {
+ Value *Loc = getAddress();
+ Loc = Builder.CreateBitCast(Loc, StructTy->getPointerTo());
+ pushAddress(Builder.CreateStructGEP(Loc, FieldNo,
+ flag_verbose_asm ? "elt" : ""));
+ }
+ void ExitField() {
+ assert(!LocStack.empty());
+ LocStack.pop_back();
+ }
+};
}
/// EmitCallOf - Emit a call to the specified callee with the operands specified
@@ -3274,8 +3239,7 @@
PAL = PAL.addAttribute(Callee->getContext(), AttributeSet::FunctionIndex,
Attribute::NoUnwind);
- if (!PAL.hasAttribute(AttributeSet::FunctionIndex,
- Attribute::NoUnwind)) {
+ if (!PAL.hasAttribute(AttributeSet::FunctionIndex, Attribute::NoUnwind)) {
// This call may throw. Determine if we need to generate
// an invoke rather than a simple call.
LPadNo = lookup_stmt_eh_lp(stmt);
@@ -3301,8 +3265,8 @@
}
tree fndecl = gimple_call_fndecl(stmt);
- tree fntype = fndecl ?
- TREE_TYPE(fndecl) : TREE_TYPE (TREE_TYPE(gimple_call_fn(stmt)));
+ tree fntype = fndecl ? TREE_TYPE(fndecl) :
+ TREE_TYPE(TREE_TYPE(gimple_call_fn(stmt)));
// Determine the calling convention.
CallingConv::ID CallingConvention = CallingConv::C;
@@ -3310,7 +3274,7 @@
TARGET_ADJUST_LLVM_CC(CallingConvention, fntype);
#endif
- SmallVector<Value*, 16> CallOperands;
+ SmallVector<Value *, 16> CallOperands;
PointerType *PFTy = cast<PointerType>(Callee->getType());
FunctionType *FTy = cast<FunctionType>(PFTy->getElementType());
FunctionCallArgumentConversion Client(CallOperands, FTy, DestLoc,
@@ -3328,7 +3292,7 @@
CallOperands.push_back(EmitMemory(gimple_call_chain(stmt)));
// Loop over the arguments, expanding them and adding them to the op list.
- std::vector<Type*> ScalarArgs;
+ std::vector<Type *> ScalarArgs;
for (unsigned i = 0, e = gimple_call_num_args(stmt); i != e; ++i) {
tree arg = gimple_call_arg(stmt, i);
tree type = TREE_TYPE(arg);
@@ -3381,8 +3345,8 @@
// If the caller and callee disagree about a parameter type but the difference
// is trivial, correct the type used by the caller.
- for (unsigned i = 0, e = std::min((unsigned)CallOperands.size(),
- FTy->getNumParams());
+ for (unsigned i = 0, e = std::min((unsigned) CallOperands.size(),
+ FTy->getNumParams());
i != e; ++i) {
Type *ExpectedTy = FTy->getParamType(i);
Type *ActualTy = CallOperands[i]->getType();
@@ -3416,7 +3380,7 @@
// precedes the GCC one, after phi nodes have been populated (doing things
// this way simplifies the generation of phi nodes). Record the invoke as
// well as the GCC exception handling region.
- if ((unsigned)LPadNo >= NormalInvokes.size())
+ if ((unsigned) LPadNo >= NormalInvokes.size())
NormalInvokes.resize(LPadNo + 1);
NormalInvokes[LPadNo].push_back(cast<InvokeInst>(Call));
}
@@ -3442,12 +3406,11 @@
Target = CreateTempLoc(ConvertType(gimple_call_return_type(stmt)));
if (DL.getTypeAllocSize(Call->getType()) <=
- DL.getTypeAllocSize(cast<PointerType>(Target.Ptr->getType())
- ->getElementType())) {
+ DL.getTypeAllocSize(
+ cast<PointerType>(Target.Ptr->getType())->getElementType())) {
Value *Dest = Builder.CreateBitCast(Target.Ptr,
Call->getType()->getPointerTo());
- LLVM_EXTRACT_MULTIPLE_RETURN_VALUE(Call, Dest, Target.Volatile,
- Builder);
+ LLVM_EXTRACT_MULTIPLE_RETURN_VALUE(Call, Dest, Target.Volatile, Builder);
} else {
// The call will return an aggregate value in registers, but
// those registers are bigger than Target. Allocate a
@@ -3456,12 +3419,11 @@
// the correct type, copy the value into Target. Assume the
// optimizer will delete the temporary and clean this up.
AllocaInst *biggerTmp = CreateTemporary(Call->getType());
- LLVM_EXTRACT_MULTIPLE_RETURN_VALUE(Call,biggerTmp,/*Volatile=*/false,
- Builder);
- EmitAggregateCopy(Target,
- MemRef(Builder.CreateBitCast(biggerTmp,Call->getType()->
- getPointerTo()),
- Target.getAlignment(), Target.Volatile),
+ LLVM_EXTRACT_MULTIPLE_RETURN_VALUE(Call, biggerTmp, /*Volatile=*/ false,
+ Builder);
+ EmitAggregateCopy(Target, MemRef(Builder.CreateBitCast(
+ biggerTmp, Call->getType()->getPointerTo()),
+ Target.getAlignment(), Target.Volatile),
gimple_call_return_type(stmt));
}
@@ -3471,7 +3433,7 @@
if (!DestLoc) {
Type *RetTy = ConvertType(gimple_call_return_type(stmt));
if (Call->getType() == RetTy)
- return Call; // Normal scalar return.
+ return Call; // Normal scalar return.
// May be something as simple as a float being returned as an integer, or
// something trickier like a complex int type { i32, i32 } being returned
@@ -3483,7 +3445,8 @@
"Size mismatch in scalar to scalar conversion!");
Value *Tmp = CreateTemporary(Call->getType());
Builder.CreateStore(Call, Tmp);
- return Builder.CreateLoad(Builder.CreateBitCast(Tmp,RetTy->getPointerTo()));
+ return Builder.CreateLoad(Builder.CreateBitCast(Tmp,
+ RetTy->getPointerTo()));
}
// If the caller expects an aggregate, we have a situation where the ABI for
@@ -3511,7 +3474,7 @@
assert(MaxStoreSize > 0 && "Storing off end of aggregate?");
Value *Val = Call;
// Check whether storing the scalar directly would overflow the aggregate.
- if (DL.getTypeStoreSize(Call->getType()) > (uint64_t)MaxStoreSize) {
+ if (DL.getTypeStoreSize(Call->getType()) > (uint64_t) MaxStoreSize) {
// Chop down the size of the scalar to the maximum number of bytes that can
// be stored without overflowing the destination.
// TODO: Check whether this works correctly on big-endian machines.
@@ -3520,7 +3483,7 @@
Builder.CreateStore(Call, Tmp);
// Load the desired number of bytes back out again as an integer of the
// appropriate size.
- Type *SmallTy = IntegerType::get(Context, MaxStoreSize*8);
+ Type *SmallTy = IntegerType::get(Context, MaxStoreSize * 8);
Tmp = Builder.CreateBitCast(Tmp, PointerType::getUnqual(SmallTy));
Val = Builder.CreateLoad(Tmp);
// Store the integer rather than the call result to the aggregate.
@@ -3540,7 +3503,7 @@
va_start(ops, ret_type);
// Build the list of arguments.
- std::vector<Value*> Args;
+ std::vector<Value *> Args;
#ifdef TARGET_ADJUST_LLVM_CC
// Build the list of GCC argument types.
tree arg_types;
@@ -3559,11 +3522,11 @@
#endif
va_end(ops);
- Type *RetTy = isa<VOID_TYPE>(ret_type) ?
- Type::getVoidTy(Context) : getRegType(ret_type);
+ Type *RetTy = isa<VOID_TYPE>(ret_type) ? Type::getVoidTy(Context) :
+ getRegType(ret_type);
// The LLVM argument types.
- std::vector<Type*> ArgTys;
+ std::vector<Type *> ArgTys;
ArgTys.reserve(Args.size());
for (unsigned i = 0, e = Args.size(); i != e; ++i)
ArgTys.push_back(Args[i]->getType());
@@ -3577,7 +3540,7 @@
#endif
// Get the function declaration for the callee.
- FunctionType *FTy = FunctionType::get(RetTy, ArgTys, /*isVarArg*/false);
+ FunctionType *FTy = FunctionType::get(RetTy, ArgTys, /*isVarArg*/ false);
Constant *Func = TheModule->getOrInsertFunction(CalleeName, FTy);
// If the function already existed with the wrong prototype then don't try to
@@ -3591,7 +3554,6 @@
return CI;
}
-
//===----------------------------------------------------------------------===//
// ... Inline Assembly and Register Variables ...
//===----------------------------------------------------------------------===//
@@ -3623,13 +3585,12 @@
return UndefValue::get(RegTy);
// Turn this into a 'tmp = call Ty asm "", "={reg}"()'.
- FunctionType *FTy = FunctionType::get(MemTy, std::vector<Type*>(),
- false);
+ FunctionType *FTy = FunctionType::get(MemTy, std::vector<Type *>(), false);
const char *Name = extractRegisterName(decl);
Name = LLVM_GET_REG_NAME(Name, decode_reg_name(Name));
- InlineAsm *IA = InlineAsm::get(FTy, "", "={"+std::string(Name)+"}", true);
+ InlineAsm *IA = InlineAsm::get(FTy, "", "={" + std::string(Name) + "}", true);
CallInst *Call = Builder.CreateCall(IA);
Call->setDoesNotThrow();
@@ -3648,7 +3609,7 @@
RHS = Reg2Mem(RHS, TREE_TYPE(decl), Builder);
// Turn this into a 'call void asm sideeffect "", "{reg}"(Ty %RHS)'.
- std::vector<Type*> ArgTys;
+ std::vector<Type *> ArgTys;
ArgTys.push_back(RHS->getType());
FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context), ArgTys,
false);
@@ -3656,7 +3617,7 @@
const char *Name = extractRegisterName(decl);
Name = LLVM_GET_REG_NAME(Name, decode_reg_name(Name));
- InlineAsm *IA = InlineAsm::get(FTy, "", "{"+std::string(Name)+"}", true);
+ InlineAsm *IA = InlineAsm::get(FTy, "", "{" + std::string(Name) + "}", true);
CallInst *Call = Builder.CreateCall(IA, RHS);
Call->setDoesNotThrow();
}
@@ -3688,9 +3649,14 @@
std::string Result;
while (1) {
switch (*InStr++) {
- case 0: return Result; // End of string.
- default: Result += InStr[-1]; break; // Normal character.
- case '$': Result += "$$"; break; // Escape '$' characters.
+ case 0:
+ return Result; // End of string.
+ default:
+ Result += InStr[-1];
+ break; // Normal character.
+ case '$':
+ Result += "$$";
+ break; // Escape '$' characters.
}
}
}
@@ -3698,26 +3664,37 @@
std::string Result;
while (1) {
switch (*AsmStr++) {
- case 0: return Result; // End of string.
- default: Result += AsmStr[-1]; break; // Normal character.
- case '$': Result += "$$"; break; // Escape '$' characters.
+ case 0:
+ return Result; // End of string.
+ default:
+ Result += AsmStr[-1];
+ break; // Normal character.
+ case '$':
+ Result += "$$";
+ break; // Escape '$' characters.
#ifdef ASSEMBLER_DIALECT
- // Note that we can't escape to ${, because that is the syntax for vars.
- case '{': Result += "$("; break; // Escape '{' character.
- case '}': Result += "$)"; break; // Escape '}' character.
- case '|': Result += "$|"; break; // Escape '|' character.
+ // Note that we can't escape to ${, because that is the syntax for vars.
+ case '{':
+ Result += "$(";
+ break; // Escape '{' character.
+ case '}':
+ Result += "$)";
+ break; // Escape '}' character.
+ case '|':
+ Result += "$|";
+ break; // Escape '|' character.
#endif
- case '%': // GCC escape character.
+ case '%': // GCC escape character.
char EscapedChar = *AsmStr++;
- if (EscapedChar == '%') { // Escaped '%' character
+ if (EscapedChar == '%') { // Escaped '%' character
Result += '%';
- } else if (EscapedChar == '=') { // Unique ID for the asm instance.
+ } else if (EscapedChar == '=') { // Unique ID for the asm instance.
Result += "${:uid}";
}
#ifdef LLVM_ASM_EXTENSIONS
LLVM_ASM_EXTENSIONS(EscapedChar, AsmStr, Result)
#endif
- else if (ISALPHA(EscapedChar)) {
+ else if (ISALPHA(EscapedChar)) {
// % followed by a letter and some digits. This outputs an operand in a
// special way depending on the letter. We turn this into LLVM ${N:o}
// syntax.
@@ -3733,18 +3710,21 @@
}
Result += "${" + utostr(OpNum) + ":" + EscapedChar + "}";
AsmStr = EndPtr;
- } else if (ISDIGIT(EscapedChar)) {
+ }
+ else if (ISDIGIT(EscapedChar)) {
char *EndPtr;
- unsigned long OpNum = strtoul(AsmStr-1, &EndPtr, 10);
+ unsigned long OpNum = strtoul(AsmStr - 1, &EndPtr, 10);
AsmStr = EndPtr;
Result += "$" + utostr(OpNum);
#ifdef PRINT_OPERAND_PUNCT_VALID_P
- } else if (PRINT_OPERAND_PUNCT_VALID_P((unsigned char)EscapedChar)) {
+ }
+ else if (PRINT_OPERAND_PUNCT_VALID_P((unsigned char) EscapedChar)) {
Result += "${:";
Result += EscapedChar;
Result += "}";
#endif
- } else {
+ }
+ else {
output_operand_lossage("invalid %%-code");
}
break;
@@ -3761,7 +3741,7 @@
if (gimple_asm_input_p(stmt))
return false;
// Search for a non-escaped '%' character followed by OpNum.
- for (const char *AsmStr = gimple_asm_string(stmt); *AsmStr; ++AsmStr) {
+ for (const char *AsmStr = gimple_asm_string(stmt); * AsmStr; ++AsmStr) {
if (*AsmStr != '%')
// Not a '%', move on to next character.
continue;
@@ -3777,7 +3757,7 @@
continue;
char *EndPtr;
// If this is an explicit reference to OpNum then we are done.
- if (OpNum == strtoul(AsmStr+1, &EndPtr, 10))
+ if (OpNum == strtoul(AsmStr + 1, &EndPtr, 10))
return true;
// Otherwise, skip over the number and keep scanning.
AsmStr = EndPtr - 1;
@@ -3796,7 +3776,9 @@
bool DoneModifiers = false;
while (!DoneModifiers) {
switch (*Constraint) {
- default: DoneModifiers = true; break;
+ default:
+ DoneModifiers = true;
+ break;
case '=':
llvm_unreachable("Should be after '='s");
case '+':
@@ -3806,11 +3788,11 @@
case '!':
++Constraint;
break;
- case '&': // Pass earlyclobber to LLVM.
- case '%': // Pass commutative to LLVM.
+ case '&': // Pass earlyclobber to LLVM.
+ case '%': // Pass commutative to LLVM.
Result += *Constraint++;
break;
- case '#': // No constraint letters left.
+ case '#': // No constraint letters left.
return Result;
}
}
@@ -3843,9 +3825,9 @@
// REG_CLASS_FROM_CONSTRAINT doesn't support 'r' for some reason.
RegClass = GENERAL_REGS;
else
- RegClass = REG_CLASS_FROM_CONSTRAINT(Constraint[-1], Constraint-1);
+ RegClass = REG_CLASS_FROM_CONSTRAINT(Constraint[-1], Constraint - 1);
- if (RegClass == NO_REGS) { // not a reg class.
+ if (RegClass == NO_REGS) { // not a reg class.
Result += ConstraintChar;
continue;
}
@@ -3854,8 +3836,8 @@
// what it is. Cache this information in AnalyzedRegClasses once computed.
static std::map<unsigned, int> AnalyzedRegClasses;
- std::map<unsigned, int>::iterator I =
- AnalyzedRegClasses.lower_bound(RegClass);
+ std::map<unsigned, int>::iterator I = AnalyzedRegClasses.lower_bound(
+ RegClass);
int RegMember;
if (I != AnalyzedRegClasses.end() && I->first == RegClass) {
@@ -3863,7 +3845,7 @@
RegMember = I->second;
} else {
// Otherwise, scan the regclass, looking for exactly one member.
- RegMember = -1; // -1 => not a single-register class.
+ RegMember = -1; // -1 => not a single-register class.
for (unsigned j = 0; j != FIRST_PSEUDO_REGISTER; ++j)
if (TEST_HARD_REG_BIT(reg_class_contents[RegClass], j)) {
if (RegMember == -1) {
@@ -3924,11 +3906,11 @@
if (isa<INTEGER_CST>(Operand)) {
do {
RetVal = -1;
- if (*p == 'i' || *p == 'n') { // integer constant
+ if (*p == 'i' || *p == 'n') { // integer constant
RetVal = 1;
break;
}
- if (*p != 'm' && *p != 'o' && *p != 'V') // not memory
+ if (*p != 'm' && *p != 'o' && *p != 'V') // not memory
RetVal = 0;
++p;
} while (*p != ',' && *p != 0);
@@ -3964,19 +3946,19 @@
// RunningConstraints is pointers into the Constraints strings which
// are incremented as we go to point to the beginning of each
// comma-separated alternative.
- const char** RunningConstraints =
- (const char**)alloca((NumInputs+NumOutputs)*sizeof(const char*));
+ const char **RunningConstraints =
+ (const char * *) alloca((NumInputs + NumOutputs) * sizeof(const char *));
memcpy(RunningConstraints, Constraints,
- (NumInputs+NumOutputs) * sizeof(const char*));
+ (NumInputs + NumOutputs) * sizeof(const char *));
// The entire point of this loop is to compute CommasToSkip.
for (unsigned i = 0; i != NumChoices; ++i) {
Weights[i] = 0;
for (unsigned j = 0; j != NumOutputs; ++j) {
tree Output = gimple_asm_output_op(stmt, j);
- if (i==0)
- RunningConstraints[j]++; // skip leading =
- const char* p = RunningConstraints[j];
- while (*p=='*' || *p=='&' || *p=='%') // skip modifiers
+ if (i == 0)
+ RunningConstraints[j]++; // skip leading =
+ const char *p = RunningConstraints[j];
+ while (*p == '*' || *p == '&' || *p == '%') // skip modifiers
p++;
if (Weights[i] != -1) {
int w = MatchWeight(p, TREE_VALUE(Output));
@@ -3988,32 +3970,32 @@
else
Weights[i] += w;
}
- while (*p!=0 && *p!=',')
+ while (*p != 0 && *p != ',')
p++;
- if (*p!=0) {
- p++; // skip comma
- while (*p=='*' || *p=='&' || *p=='%')
- p++; // skip modifiers
+ if (*p != 0) {
+ p++; // skip comma
+ while (*p == '*' || *p == '&' || *p == '%')
+ p++; // skip modifiers
}
RunningConstraints[j] = p;
}
for (unsigned j = 0; j != NumInputs; ++j) {
tree Input = gimple_asm_input_op(stmt, j);
- const char* p = RunningConstraints[NumOutputs + j];
+ const char *p = RunningConstraints[NumOutputs + j];
if (Weights[i] != -1) {
int w = MatchWeight(p, TREE_VALUE(Input));
if (w < 0)
- Weights[i] = -1; // As above.
+ Weights[i] = -1; // As above.
else
Weights[i] += w;
}
- while (*p!=0 && *p!=',')
+ while (*p != 0 && *p != ',')
p++;
- if (*p!=0)
+ if (*p != 0)
p++;
RunningConstraints[NumOutputs + j] = p;
}
- if (Weights[i]>MaxWeight) {
+ if (Weights[i] > MaxWeight) {
CommasToSkip = i;
MaxWeight = Weights[i];
}
@@ -4021,16 +4003,16 @@
// We have picked an alternative (the CommasToSkip'th one).
// Change Constraints to point to malloc'd copies of the appropriate
// constraints picked out of the original strings.
- for (unsigned int i=0; i<NumInputs+NumOutputs; i++) {
- assert(*(RunningConstraints[i])==0); // sanity check
- const char* start = Constraints[i];
- if (i<NumOutputs)
- start++; // skip '=' or '+'
- const char* end = start;
+ for (unsigned int i = 0; i < NumInputs + NumOutputs; i++) {
+ assert(*(RunningConstraints[i]) == 0); // sanity check
+ const char *start = Constraints[i];
+ if (i < NumOutputs)
+ start++; // skip '=' or '+'
+ const char *end = start;
while (*end != ',' && *end != 0)
end++;
- for (unsigned int j=0; j<CommasToSkip; j++) {
- start = end+1;
+ for (unsigned int j = 0; j < CommasToSkip; j++) {
+ start = end + 1;
end = start;
while (*end != ',' && *end != 0)
end++;
@@ -4038,27 +4020,26 @@
// String we want is at start..end-1 inclusive.
// For outputs, copy the leading = or +.
char *newstring;
- if (i<NumOutputs) {
- newstring = StringStorage.Allocate<char>(end-start+1+1);
+ if (i < NumOutputs) {
+ newstring = StringStorage.Allocate<char>(end - start + 1 + 1);
newstring[0] = *(Constraints[i]);
- strncpy(newstring+1, start, end-start);
- newstring[end-start+1] = 0;
+ strncpy(newstring + 1, start, end - start);
+ newstring[end - start + 1] = 0;
} else {
- newstring = StringStorage.Allocate<char>(end-start+1);
- strncpy(newstring, start, end-start);
- newstring[end-start] = 0;
+ newstring = StringStorage.Allocate<char>(end - start + 1);
+ strncpy(newstring, start, end - start);
+ newstring[end - start] = 0;
}
Constraints[i] = (const char *)newstring;
}
}
-
//===----------------------------------------------------------------------===//
// ... Helpers for Builtin Function Expansion ...
//===----------------------------------------------------------------------===//
-Value *TreeToLLVM::BuildVector(const std::vector<Value*> &Ops) {
- assert((Ops.size() & (Ops.size()-1)) == 0 &&
+Value *TreeToLLVM::BuildVector(const std::vector<Value *> &Ops) {
+ assert((Ops.size() & (Ops.size() - 1)) == 0 &&
"Not a power-of-two sized vector!");
bool AllConstants = true;
for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i)
@@ -4066,15 +4047,15 @@
// If this is a constant vector, create a ConstantVector.
if (AllConstants) {
- SmallVector<Constant*, 16> CstOps;
+ SmallVector<Constant *, 16> CstOps;
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
CstOps.push_back(cast<Constant>(Ops[i]));
return ConstantVector::get(CstOps);
}
// Otherwise, insertelement the values to build the vector.
- Value *Result =
- UndefValue::get(VectorType::get(Ops[0]->getType(), Ops.size()));
+ Value *Result = UndefValue::get(VectorType::get(Ops[0]->getType(),
+ Ops.size()));
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt32(i));
@@ -4086,7 +4067,7 @@
/// the specified null-terminated list of elements. The elements must be all
/// the same element type and there must be a power of two of them.
Value *TreeToLLVM::BuildVector(Value *Elt, ...) {
- std::vector<Value*> Ops;
+ std::vector<Value *> Ops;
va_list VA;
va_start(VA, Elt);
@@ -4111,7 +4092,7 @@
unsigned NumElements = cast<VectorType>(InVec1->getType())->getNumElements();
// Get all the indexes from varargs.
- SmallVector<Constant*, 16> Idxs;
+ SmallVector<Constant *, 16> Idxs;
va_list VA;
va_start(VA, InVec2);
for (unsigned i = 0; i != NumElements; ++i) {
@@ -4119,15 +4100,14 @@
if (idx == -1)
Idxs.push_back(UndefValue::get(Type::getInt32Ty(Context)));
else {
- assert((unsigned)idx < 2*NumElements && "Element index out of range!");
+ assert((unsigned) idx < 2 * NumElements && "Element index out of range!");
Idxs.push_back(Builder.getInt32(idx));
}
}
va_end(VA);
// Turn this into the appropriate shuffle operation.
- return Builder.CreateShuffleVector(InVec1, InVec2,
- ConstantVector::get(Idxs));
+ return Builder.CreateShuffleVector(InVec1, InVec2, ConstantVector::get(Idxs));
}
//===----------------------------------------------------------------------===//
@@ -4140,13 +4120,12 @@
///
/// This method returns true if the builtin is handled, otherwise false.
///
-bool TreeToLLVM::EmitFrontendExpandedBuiltinCall(gimple stmt, tree fndecl,
- const MemRef *DestLoc,
- Value *&Result) {
+bool TreeToLLVM::EmitFrontendExpandedBuiltinCall(
+ gimple stmt, tree fndecl, const MemRef *DestLoc, Value *&Result) {
#ifdef LLVM_TARGET_INTRINSIC_LOWER
// Get the result type and operand line in an easy to consume format.
Type *ResultType = ConvertType(TREE_TYPE(TREE_TYPE(fndecl)));
- std::vector<Value*> Operands;
+ std::vector<Value *> Operands;
for (unsigned i = 0, e = gimple_call_num_args(stmt); i != e; ++i) {
tree OpVal = gimple_call_arg(stmt, i);
if (isa<AGGREGATE_TYPE>(TREE_TYPE(OpVal))) {
@@ -4162,30 +4141,30 @@
Operands);
#else
// Avoid compiler warnings about unused parameters.
- (void)stmt; (void)fndecl; (void)DestLoc; (void)Result;
+ (void) stmt;
+ (void) fndecl;
+ (void) DestLoc;
+ (void) Result;
return false;
#endif
}
/// TargetBuiltinCache - A cache of builtin intrinsics indexed by the GCC
/// builtin number.
-static std::vector<Constant*> TargetBuiltinCache;
+static std::vector<Constant *> TargetBuiltinCache;
Value *TreeToLLVM::BuildBinaryAtomic(gimple stmt, AtomicRMWInst::BinOp Kind,
unsigned PostOp) {
tree return_type = gimple_call_return_type(stmt);
Type *ResultTy = ConvertType(return_type);
- Value* C[2] = {
- EmitMemory(gimple_call_arg(stmt, 0)),
- EmitMemory(gimple_call_arg(stmt, 1))
- };
- Type* Ty[2];
+ Value *C[2] = { EmitMemory(gimple_call_arg(stmt, 0)),
+ EmitMemory(gimple_call_arg(stmt, 1)) };
+ Type *Ty[2];
Ty[0] = ResultTy;
Ty[1] = ResultTy->getPointerTo();
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
- C[1] = Builder.CreateIntCast(C[1], Ty[0],
- /*isSigned*/!TYPE_UNSIGNED(return_type),
- "cast");
+ C[1] = Builder.CreateIntCast(
+ C[1], Ty[0], /*isSigned*/ !TYPE_UNSIGNED(return_type), "cast");
Value *Result = Builder.CreateAtomicRMW(Kind, C[0], C[1],
SequentiallyConsistent);
if (PostOp)
@@ -4195,8 +4174,8 @@
return Result;
}
-Value *
-TreeToLLVM::BuildCmpAndSwapAtomic(gimple stmt, unsigned Bits, bool isBool) {
+Value *TreeToLLVM::BuildCmpAndSwapAtomic(gimple stmt, unsigned Bits,
+ bool isBool) {
tree ptr = gimple_call_arg(stmt, 0);
tree old_val = gimple_call_arg(stmt, 1);
tree new_val = gimple_call_arg(stmt, 2);
@@ -4233,7 +4212,7 @@
if (DECL_BUILT_IN_CLASS(fndecl) == BUILT_IN_MD) {
unsigned FnCode = DECL_FUNCTION_CODE(fndecl);
if (TargetBuiltinCache.size() <= FnCode)
- TargetBuiltinCache.resize(FnCode+1);
+ TargetBuiltinCache.resize(FnCode + 1);
// If we haven't converted this intrinsic over yet, do so now.
if (TargetBuiltinCache[FnCode] == 0) {
@@ -4249,8 +4228,8 @@
// If this builtin directly corresponds to an LLVM intrinsic, get the
// IntrinsicID now.
const char *BuiltinName = IDENTIFIER_POINTER(DECL_NAME(fndecl));
- Intrinsic::ID IntrinsicID =
- Intrinsic::getIntrinsicForGCCBuiltin(TargetPrefix, BuiltinName);
+ Intrinsic::ID IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(
+ TargetPrefix, BuiltinName);
if (IntrinsicID == Intrinsic::not_intrinsic) {
error("unsupported target builtin %<%s%> used", BuiltinName);
Type *ResTy = ConvertType(gimple_call_return_type(stmt));
@@ -4260,8 +4239,8 @@
}
// Finally, map the intrinsic ID back to a name.
- TargetBuiltinCache[FnCode] =
- Intrinsic::getDeclaration(TheModule, IntrinsicID);
+ TargetBuiltinCache[FnCode] = Intrinsic::getDeclaration(TheModule,
+ IntrinsicID);
}
Result = EmitCallOf(TargetBuiltinCache[FnCode], stmt, DestLoc,
@@ -4271,52 +4250,68 @@
enum built_in_function fcode = DECL_FUNCTION_CODE(fndecl);
switch (fcode) {
- default: return false;
- // Varargs builtins.
- case BUILT_IN_VA_START: return EmitBuiltinVAStart(stmt);
- case BUILT_IN_VA_END: return EmitBuiltinVAEnd(stmt);
- case BUILT_IN_VA_COPY: return EmitBuiltinVACopy(stmt);
+ default:
+ return false;
+ // Varargs builtins.
+ case BUILT_IN_VA_START:
+ return EmitBuiltinVAStart(stmt);
+ case BUILT_IN_VA_END:
+ return EmitBuiltinVAEnd(stmt);
+ case BUILT_IN_VA_COPY:
+ return EmitBuiltinVACopy(stmt);
case BUILT_IN_ADJUST_TRAMPOLINE:
return EmitBuiltinAdjustTrampoline(stmt, Result);
- case BUILT_IN_ALLOCA: return EmitBuiltinAlloca(stmt, Result);
+ case BUILT_IN_ALLOCA:
+ return EmitBuiltinAlloca(stmt, Result);
#if (GCC_MINOR > 6)
case BUILT_IN_ALLOCA_WITH_ALIGN:
- return EmitBuiltinAllocaWithAlign(stmt, Result);
+ return EmitBuiltinAllocaWithAlign(stmt, Result);
#endif
#if (GCC_MINOR > 6)
case BUILT_IN_ASSUME_ALIGNED:
- return EmitBuiltinAssumeAligned(stmt, Result);
+ return EmitBuiltinAssumeAligned(stmt, Result);
#endif
- case BUILT_IN_BZERO: return EmitBuiltinBZero(stmt, Result);
- case BUILT_IN_CONSTANT_P: return EmitBuiltinConstantP(stmt, Result);
- case BUILT_IN_EXPECT: return EmitBuiltinExpect(stmt, Result);
- case BUILT_IN_EXTEND_POINTER: return EmitBuiltinExtendPointer(stmt, Result);
+ case BUILT_IN_BZERO:
+ return EmitBuiltinBZero(stmt, Result);
+ case BUILT_IN_CONSTANT_P:
+ return EmitBuiltinConstantP(stmt, Result);
+ case BUILT_IN_EXPECT:
+ return EmitBuiltinExpect(stmt, Result);
+ case BUILT_IN_EXTEND_POINTER:
+ return EmitBuiltinExtendPointer(stmt, Result);
case BUILT_IN_EXTRACT_RETURN_ADDR:
- return EmitBuiltinExtractReturnAddr(stmt, Result);
- case BUILT_IN_FRAME_ADDRESS: return EmitBuiltinReturnAddr(stmt, Result,true);
+ return EmitBuiltinExtractReturnAddr(stmt, Result);
+ case BUILT_IN_FRAME_ADDRESS:
+ return EmitBuiltinReturnAddr(stmt, Result, true);
case BUILT_IN_FROB_RETURN_ADDR:
- return EmitBuiltinFrobReturnAddr(stmt, Result);
+ return EmitBuiltinFrobReturnAddr(stmt, Result);
case BUILT_IN_INIT_TRAMPOLINE:
return EmitBuiltinInitTrampoline(stmt, Result);
- case BUILT_IN_MEMCPY: return EmitBuiltinMemCopy(stmt, Result,
- false, false);
- case BUILT_IN_MEMCPY_CHK: return EmitBuiltinMemCopy(stmt, Result,
- false, true);
- case BUILT_IN_MEMMOVE: return EmitBuiltinMemCopy(stmt, Result,
- true, false);
- case BUILT_IN_MEMMOVE_CHK: return EmitBuiltinMemCopy(stmt, Result,
- true, true);
- case BUILT_IN_MEMSET: return EmitBuiltinMemSet(stmt, Result, false);
- case BUILT_IN_MEMSET_CHK: return EmitBuiltinMemSet(stmt, Result, true);
- case BUILT_IN_PREFETCH: return EmitBuiltinPrefetch(stmt);
+ case BUILT_IN_MEMCPY:
+ return EmitBuiltinMemCopy(stmt, Result, false, false);
+ case BUILT_IN_MEMCPY_CHK:
+ return EmitBuiltinMemCopy(stmt, Result, false, true);
+ case BUILT_IN_MEMMOVE:
+ return EmitBuiltinMemCopy(stmt, Result, true, false);
+ case BUILT_IN_MEMMOVE_CHK:
+ return EmitBuiltinMemCopy(stmt, Result, true, true);
+ case BUILT_IN_MEMSET:
+ return EmitBuiltinMemSet(stmt, Result, false);
+ case BUILT_IN_MEMSET_CHK:
+ return EmitBuiltinMemSet(stmt, Result, true);
+ case BUILT_IN_PREFETCH:
+ return EmitBuiltinPrefetch(stmt);
case BUILT_IN_RETURN_ADDRESS:
- return EmitBuiltinReturnAddr(stmt, Result,false);
- case BUILT_IN_STACK_RESTORE: return EmitBuiltinStackRestore(stmt);
- case BUILT_IN_STACK_SAVE: return EmitBuiltinStackSave(stmt, Result);
- case BUILT_IN_UNREACHABLE: return EmitBuiltinUnreachable();
+ return EmitBuiltinReturnAddr(stmt, Result, false);
+ case BUILT_IN_STACK_RESTORE:
+ return EmitBuiltinStackRestore(stmt);
+ case BUILT_IN_STACK_SAVE:
+ return EmitBuiltinStackSave(stmt, Result);
+ case BUILT_IN_UNREACHABLE:
+ return EmitBuiltinUnreachable();
- // Exception handling builtins.
+ // Exception handling builtins.
case BUILT_IN_EH_COPY_VALUES:
return EmitBuiltinEHCopyValues(stmt);
case BUILT_IN_EH_FILTER:
@@ -4324,7 +4319,7 @@
case BUILT_IN_EH_POINTER:
return EmitBuiltinEHPointer(stmt, Result);
- // Builtins used by the exception handling runtime.
+ // Builtins used by the exception handling runtime.
case BUILT_IN_DWARF_CFA:
return EmitBuiltinDwarfCFA(stmt, Result);
#ifdef DWARF2_UNWIND_INFO
@@ -4348,10 +4343,9 @@
return false;
}
tree ObjSizeTree = gimple_call_arg(stmt, 1);
- STRIP_NOPS (ObjSizeTree);
- if (!isa<INTEGER_CST>(ObjSizeTree)
- || tree_int_cst_sgn (ObjSizeTree) < 0
- || compare_tree_int (ObjSizeTree, 3) > 0) {
+ STRIP_NOPS(ObjSizeTree);
+ if (!isa<INTEGER_CST>(ObjSizeTree) || tree_int_cst_sgn(ObjSizeTree) < 0 ||
+ compare_tree_int(ObjSizeTree, 3) > 0) {
error("Invalid second builtin_object_size argument");
return false;
}
@@ -4367,34 +4361,29 @@
Value *NewTy = ConstantInt::get(Tmp->getType(), val);
- Value* Args[] = {
- EmitMemory(gimple_call_arg(stmt, 0)),
- NewTy
- };
+ Value *Args[] = { EmitMemory(gimple_call_arg(stmt, 0)), NewTy };
// Grab the current return type.
- Type* Ty = ConvertType(gimple_call_return_type(stmt));
+ Type *Ty = ConvertType(gimple_call_return_type(stmt));
// Manually coerce the arg to the correct pointer type.
Args[0] = Builder.CreateBitCast(Args[0], Type::getInt8PtrTy(Context));
Args[1] = Builder.CreateIntCast(Args[1], Type::getInt1Ty(Context),
- /*isSigned*/false);
+ /*isSigned*/ false);
- Result = Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
- Intrinsic::objectsize,
- Ty),
- Args);
+ Result = Builder.CreateCall(Intrinsic::getDeclaration(
+ TheModule, Intrinsic::objectsize, Ty), Args);
return true;
}
- // Unary bit counting intrinsics.
- // NOTE: do not merge these case statements. That will cause the memoized
- // Function* to be incorrectly shared across the different typed functions.
- case BUILT_IN_CLZ: // These GCC builtins always return int.
+ // Unary bit counting intrinsics.
+ // NOTE: do not merge these case statements. That will cause the memoized
+ // Function* to be incorrectly shared across the different typed functions.
+ case BUILT_IN_CLZ: // These GCC builtins always return int.
case BUILT_IN_CLZL:
case BUILT_IN_CLZLL:
Result = EmitBuiltinBitCountIntrinsic(stmt, Intrinsic::ctlz);
return true;
- case BUILT_IN_CTZ: // These GCC builtins always return int.
+ case BUILT_IN_CTZ: // These GCC builtins always return int.
case BUILT_IN_CTZL:
case BUILT_IN_CTZLL:
Result = EmitBuiltinBitCountIntrinsic(stmt, Intrinsic::cttz);
@@ -4409,11 +4398,11 @@
tree return_type = gimple_call_return_type(stmt);
Type *DestTy = ConvertType(return_type);
Result = Builder.CreateIntCast(Result, DestTy,
- /*isSigned*/!TYPE_UNSIGNED(return_type),
+ /*isSigned*/ !TYPE_UNSIGNED(return_type),
"cast");
return true;
}
- case BUILT_IN_POPCOUNT: // These GCC builtins always return int.
+ case BUILT_IN_POPCOUNT: // These GCC builtins always return int.
case BUILT_IN_POPCOUNTL:
case BUILT_IN_POPCOUNTLL: {
Value *Amt = EmitMemory(gimple_call_arg(stmt, 0));
@@ -4421,7 +4410,7 @@
tree return_type = gimple_call_return_type(stmt);
Type *DestTy = ConvertType(return_type);
Result = Builder.CreateIntCast(Result, DestTy,
- /*isSigned*/!TYPE_UNSIGNED(return_type),
+ /*isSigned*/ !TYPE_UNSIGNED(return_type),
"cast");
return true;
}
@@ -4432,7 +4421,7 @@
tree return_type = gimple_call_return_type(stmt);
Type *DestTy = ConvertType(return_type);
Result = Builder.CreateIntCast(Result, DestTy,
- /*isSigned*/!TYPE_UNSIGNED(return_type),
+ /*isSigned*/ !TYPE_UNSIGNED(return_type),
"cast");
return true;
}
@@ -4513,26 +4502,23 @@
return true;
}
break;
- case BUILT_IN_FFS: // These GCC builtins always return int.
+ case BUILT_IN_FFS: // These GCC builtins always return int.
case BUILT_IN_FFSL:
- case BUILT_IN_FFSLL: { // FFS(X) -> (x == 0 ? 0 : CTTZ(x)+1)
+ case BUILT_IN_FFSLL: { // FFS(X) -> (x == 0 ? 0 : CTTZ(x)+1)
// The argument and return type of cttz should match the argument type of
// the ffs, but should ignore the return type of ffs.
Value *Amt = EmitMemory(gimple_call_arg(stmt, 0));
- Result = Builder.CreateCall2(Intrinsic::getDeclaration(TheModule,
- Intrinsic::cttz,
- Amt->getType()),
+ Result = Builder.CreateCall2(Intrinsic::getDeclaration(
+ TheModule, Intrinsic::cttz, Amt->getType()),
Amt, Builder.getTrue());
Result = Builder.CreateAdd(Result, ConstantInt::get(Result->getType(), 1));
Result = Builder.CreateIntCast(Result,
ConvertType(gimple_call_return_type(stmt)),
- /*isSigned*/false);
- Value *Cond =
- Builder.CreateICmpEQ(Amt,
- Constant::getNullValue(Amt->getType()));
- Result = Builder.CreateSelect(Cond,
- Constant::getNullValue(Result->getType()),
- Result);
+ /*isSigned*/ false);
+ Value *Cond = Builder.CreateICmpEQ(Amt,
+ Constant::getNullValue(Amt->getType()));
+ Result = Builder.CreateSelect(
+ Cond, Constant::getNullValue(Result->getType()), Result);
return true;
}
#if (GCC_MINOR > 6)
@@ -4566,13 +4552,13 @@
case BUILT_IN_CEXPIL:
Result = EmitBuiltinCEXPI(stmt);
return true;
-//TODO case BUILT_IN_FLT_ROUNDS: {
-//TODO Result =
-//TODO Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
-//TODO Intrinsic::flt_rounds));
-//TODO Result = Builder.CreateBitCast(Result, ConvertType(gimple_call_return_type(stmt)));
-//TODO return true;
-//TODO }
+ //TODO case BUILT_IN_FLT_ROUNDS: {
+ //TODO Result =
+ //TODO Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
+ //TODO Intrinsic::flt_rounds));
+ //TODO Result = Builder.CreateBitCast(Result, ConvertType(gimple_call_return_type(stmt)));
+ //TODO return true;
+ //TODO }
case BUILT_IN_TRAP:
Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::trap));
// Emit an explicit unreachable instruction.
@@ -4619,17 +4605,17 @@
// We assume like gcc appears to, that this only applies to cached memory.
Builder.CreateFence(llvm::SequentiallyConsistent);
return true;
-#if defined(TARGET_ALPHA) || defined(TARGET_386) || defined(TARGET_POWERPC) \
- || defined(TARGET_ARM)
- // gcc uses many names for the sync intrinsics
- // The type of the first argument is not reliable for choosing the
- // right llvm function; if the original type is not volatile, gcc has
- // helpfully changed it to "volatile void *" at this point. The
- // original type can be recovered from the function type in most cases.
- // For lock_release and bool_compare_and_swap even that is not good
- // enough, we have to key off the opcode.
- // Note that Intrinsic::getDeclaration expects the type list in reversed
- // order, while CreateCall expects the parameter list in normal order.
+#if defined(TARGET_ALPHA) || defined(TARGET_386) || defined(TARGET_POWERPC) || \
+ defined(TARGET_ARM)
+// gcc uses many names for the sync intrinsics
+// The type of the first argument is not reliable for choosing the
+// right llvm function; if the original type is not volatile, gcc has
+// helpfully changed it to "volatile void *" at this point. The
+// original type can be recovered from the function type in most cases.
+// For lock_release and bool_compare_and_swap even that is not good
+// enough, we have to key off the opcode.
+// Note that Intrinsic::getDeclaration expects the type list in reversed
+// order, while CreateCall expects the parameter list in normal order.
#if (GCC_MINOR < 7)
case BUILT_IN_BOOL_COMPARE_AND_SWAP_1:
#else
@@ -4642,14 +4628,14 @@
#else
case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_2:
#endif
- Result = BuildCmpAndSwapAtomic(stmt, 2*BITS_PER_UNIT, true);
+ Result = BuildCmpAndSwapAtomic(stmt, 2 * BITS_PER_UNIT, true);
return true;
#if (GCC_MINOR < 7)
case BUILT_IN_BOOL_COMPARE_AND_SWAP_4:
#else
case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_4:
#endif
- Result = BuildCmpAndSwapAtomic(stmt, 4*BITS_PER_UNIT, true);
+ Result = BuildCmpAndSwapAtomic(stmt, 4 * BITS_PER_UNIT, true);
return true;
#if (GCC_MINOR < 7)
case BUILT_IN_BOOL_COMPARE_AND_SWAP_8:
@@ -4660,10 +4646,10 @@
if (!TARGET_64BIT)
return false;
#endif
- Result = BuildCmpAndSwapAtomic(stmt, 8*BITS_PER_UNIT, true);
+ Result = BuildCmpAndSwapAtomic(stmt, 8 * BITS_PER_UNIT, true);
return true;
- // Fall through.
+// Fall through.
#if (GCC_MINOR < 7)
case BUILT_IN_VAL_COMPARE_AND_SWAP_1:
#else
@@ -4676,14 +4662,14 @@
#else
case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_2:
#endif
- Result = BuildCmpAndSwapAtomic(stmt, 2*BITS_PER_UNIT, false);
+ Result = BuildCmpAndSwapAtomic(stmt, 2 * BITS_PER_UNIT, false);
return true;
#if (GCC_MINOR < 7)
case BUILT_IN_VAL_COMPARE_AND_SWAP_4:
#else
case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_4:
#endif
- Result = BuildCmpAndSwapAtomic(stmt, 4*BITS_PER_UNIT, false);
+ Result = BuildCmpAndSwapAtomic(stmt, 4 * BITS_PER_UNIT, false);
return true;
#if (GCC_MINOR < 7)
case BUILT_IN_VAL_COMPARE_AND_SWAP_8:
@@ -4694,7 +4680,7 @@
if (!TARGET_64BIT)
return false;
#endif
- Result = BuildCmpAndSwapAtomic(stmt, 8*BITS_PER_UNIT, false);
+ Result = BuildCmpAndSwapAtomic(stmt, 8 * BITS_PER_UNIT, false);
return true;
#if (GCC_MINOR < 7)
@@ -4965,13 +4951,11 @@
#endif
tree return_type = gimple_call_return_type(stmt);
Type *ResultTy = ConvertType(return_type);
- Value* C[2] = {
- EmitMemory(gimple_call_arg(stmt, 0)),
- EmitMemory(gimple_call_arg(stmt, 1))
- };
+ Value *C[2] = { EmitMemory(gimple_call_arg(stmt, 0)),
+ EmitMemory(gimple_call_arg(stmt, 1)) };
C[0] = Builder.CreateBitCast(C[0], ResultTy->getPointerTo());
C[1] = Builder.CreateIntCast(C[1], ResultTy,
- /*isSigned*/!TYPE_UNSIGNED(return_type),
+ /*isSigned*/ !TYPE_UNSIGNED(return_type),
"cast");
Result = Builder.CreateAtomicRMW(AtomicRMWInst::Nand, C[0], C[1],
SequentiallyConsistent);
@@ -5002,38 +4986,42 @@
// doesn't get too clever, and is horribly broken anywhere else. It needs
// to use "store atomic [...] release".
Type *Ty;
- switch(DECL_FUNCTION_CODE(fndecl)) {
+ switch (DECL_FUNCTION_CODE(fndecl)) {
#if (GCC_MINOR < 7)
- case BUILT_IN_LOCK_RELEASE_16: // not handled; should use SSE on x86
+ case BUILT_IN_LOCK_RELEASE_16: // not handled; should use SSE on x86
#else
- case BUILT_IN_SYNC_LOCK_RELEASE_16: // not handled; should use SSE on x86
+ case BUILT_IN_SYNC_LOCK_RELEASE_16: // not handled; should use SSE on x86
#endif
- default:
- llvm_unreachable("Not handled; should use SSE on x86!");
+ default:
+ llvm_unreachable("Not handled; should use SSE on x86!");
#if (GCC_MINOR < 7)
- case BUILT_IN_LOCK_RELEASE_1:
+ case BUILT_IN_LOCK_RELEASE_1:
#else
- case BUILT_IN_SYNC_LOCK_RELEASE_1:
+ case BUILT_IN_SYNC_LOCK_RELEASE_1:
#endif
- Ty = Type::getInt8Ty(Context); break;
+ Ty = Type::getInt8Ty(Context);
+ break;
#if (GCC_MINOR < 7)
- case BUILT_IN_LOCK_RELEASE_2:
+ case BUILT_IN_LOCK_RELEASE_2:
#else
- case BUILT_IN_SYNC_LOCK_RELEASE_2:
+ case BUILT_IN_SYNC_LOCK_RELEASE_2:
#endif
- Ty = Type::getInt16Ty(Context); break;
+ Ty = Type::getInt16Ty(Context);
+ break;
#if (GCC_MINOR < 7)
- case BUILT_IN_LOCK_RELEASE_4:
+ case BUILT_IN_LOCK_RELEASE_4:
#else
- case BUILT_IN_SYNC_LOCK_RELEASE_4:
+ case BUILT_IN_SYNC_LOCK_RELEASE_4:
#endif
- Ty = Type::getInt32Ty(Context); break;
+ Ty = Type::getInt32Ty(Context);
+ break;
#if (GCC_MINOR < 7)
- case BUILT_IN_LOCK_RELEASE_8:
+ case BUILT_IN_LOCK_RELEASE_8:
#else
- case BUILT_IN_SYNC_LOCK_RELEASE_8:
+ case BUILT_IN_SYNC_LOCK_RELEASE_8:
#endif
- Ty = Type::getInt64Ty(Context); break;
+ Ty = Type::getInt64Ty(Context);
+ break;
}
Value *Ptr = EmitMemory(gimple_call_arg(stmt, 0));
Ptr = Builder.CreateBitCast(Ptr, Ty->getPointerTo());
@@ -5044,24 +5032,22 @@
#endif //FIXME: these break the build for backends that haven't implemented them
-
-#if 1 // FIXME: Should handle these GCC extensions eventually.
+#if 1 // FIXME: Should handle these GCC extensions eventually.
case BUILT_IN_LONGJMP: {
if (validate_gimple_arglist(stmt, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) {
tree value = gimple_call_arg(stmt, 1);
if (!isa<INTEGER_CST>(value) ||
cast<ConstantInt>(EmitMemory(value))->getValue() != 1) {
- error ("%<__builtin_longjmp%> second argument must be 1");
+ error("%<__builtin_longjmp%> second argument must be 1");
return false;
}
}
#if defined(TARGET_ARM) && defined(CONFIG_DARWIN_H)
Value *Buf = Emit(TREE_VALUE(arglist), 0);
Buf = Builder.CreateBitCast(Buf, Type::getInt8Ty(Context)->getPointerTo());
- Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
- Intrinsic::eh_sjlj_longjmp),
- Buf);
+ Builder.CreateCall(
+ Intrinsic::getDeclaration(TheModule, Intrinsic::eh_sjlj_longjmp), Buf);
Result = 0;
return true;
#endif
@@ -5081,568 +5067,575 @@
case BUILT_IN_SETJMP_DISPATCHER:
case BUILT_IN_SETJMP_RECEIVER:
case BUILT_IN_UPDATE_SETJMP_BUF:
+ // FIXME: HACK: Just ignore these.
+ {
+ Type *Ty = ConvertType(gimple_call_return_type(stmt));
+ if (!Ty->isVoidTy())
+ Result = Constant::getNullValue(Ty);
+ return true;
+ }
+#endif // FIXME: Should handle these GCC extensions eventually.
+ }
+ return false;
+ }
- // FIXME: HACK: Just ignore these.
- {
- Type *Ty = ConvertType(gimple_call_return_type(stmt));
- if (!Ty->isVoidTy())
- Result = Constant::getNullValue(Ty);
+ bool TreeToLLVM::EmitBuiltinUnaryOp(Value *InVal, Value *&Result,
+ Intrinsic::ID Id) {
+ // The intrinsic might be overloaded in which case the argument is of
+ // varying type. Make sure that we specify the actual type for "iAny"
+ // by passing it as the 3rd and 4th parameters. This isn't needed for
+ // most intrinsics, but is needed for ctpop, cttz, ctlz.
+ Type *Ty = InVal->getType();
+ Result = Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Id, Ty),
+ InVal);
return true;
}
-#endif // FIXME: Should handle these GCC extensions eventually.
- }
- return false;
-}
-
-bool TreeToLLVM::EmitBuiltinUnaryOp(Value *InVal, Value *&Result,
- Intrinsic::ID Id) {
- // The intrinsic might be overloaded in which case the argument is of
- // varying type. Make sure that we specify the actual type for "iAny"
- // by passing it as the 3rd and 4th parameters. This isn't needed for
- // most intrinsics, but is needed for ctpop, cttz, ctlz.
- Type *Ty = InVal->getType();
- Result = Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Id, Ty),
- InVal);
- return true;
-}
-
-Value *TreeToLLVM::EmitBuiltinBitCountIntrinsic(gimple stmt, Intrinsic::ID Id) {
- Value *Amt = EmitMemory(gimple_call_arg(stmt, 0));
- Value *Result = Builder.CreateCall2(Intrinsic::getDeclaration(TheModule, Id,
- Amt->getType()),
- Amt, Builder.getTrue());
- tree return_type = gimple_call_return_type(stmt);
- Type *DestTy = ConvertType(return_type);
- return Builder.CreateIntCast(Result, DestTy,
- /*isSigned*/!TYPE_UNSIGNED(return_type),
- "cast");
-}
-
-Value *TreeToLLVM::EmitBuiltinSQRT(gimple stmt) {
- Value *Amt = EmitMemory(gimple_call_arg(stmt, 0));
- Type* Ty = Amt->getType();
- return Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
- Intrinsic::sqrt, Ty),
- Amt);
-}
+ Value *TreeToLLVM::EmitBuiltinBitCountIntrinsic(gimple stmt,
+ Intrinsic::ID Id) {
+ Value *Amt = EmitMemory(gimple_call_arg(stmt, 0));
+ Value *Result = Builder.CreateCall2(Intrinsic::getDeclaration(
+ TheModule, Id, Amt->getType()),
+ Amt, Builder.getTrue());
+ tree return_type = gimple_call_return_type(stmt);
+ Type *DestTy = ConvertType(return_type);
+ return Builder.CreateIntCast(
+ Result, DestTy, /*isSigned*/ !TYPE_UNSIGNED(return_type), "cast");
+ }
-Value *TreeToLLVM::EmitBuiltinPOWI(gimple stmt) {
- if (!validate_gimple_arglist(stmt, REAL_TYPE, INTEGER_TYPE, VOID_TYPE))
- return 0;
+ Value *TreeToLLVM::EmitBuiltinSQRT(gimple stmt) {
+ Value *Amt = EmitMemory(gimple_call_arg(stmt, 0));
+ Type *Ty = Amt->getType();
- Value *Val = EmitMemory(gimple_call_arg(stmt, 0));
- Value *Pow = EmitMemory(gimple_call_arg(stmt, 1));
- Type *Ty = Val->getType();
- Pow = Builder.CreateIntCast(Pow, Type::getInt32Ty(Context), /*isSigned*/true);
-
- SmallVector<Value *,2> Args;
- Args.push_back(Val);
- Args.push_back(Pow);
- return Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
- Intrinsic::powi, Ty),
- Args);
-}
+ return Builder.CreateCall(
+ Intrinsic::getDeclaration(TheModule, Intrinsic::sqrt, Ty), Amt);
+ }
-Value *TreeToLLVM::EmitBuiltinPOW(gimple stmt) {
- if (!validate_gimple_arglist(stmt, REAL_TYPE, REAL_TYPE, VOID_TYPE))
- return 0;
+ Value *TreeToLLVM::EmitBuiltinPOWI(gimple stmt) {
+ if (!validate_gimple_arglist(stmt, REAL_TYPE, INTEGER_TYPE, VOID_TYPE))
+ return 0;
- Value *Val = EmitMemory(gimple_call_arg(stmt, 0));
- Value *Pow = EmitMemory(gimple_call_arg(stmt, 1));
- Type *Ty = Val->getType();
-
- SmallVector<Value *,2> Args;
- Args.push_back(Val);
- Args.push_back(Pow);
- return Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
- Intrinsic::pow, Ty),
- Args);
-}
+ Value *Val = EmitMemory(gimple_call_arg(stmt, 0));
+ Value *Pow = EmitMemory(gimple_call_arg(stmt, 1));
+ Type *Ty = Val->getType();
+ Pow = Builder.CreateIntCast(Pow, Type::getInt32Ty(Context),
+ /*isSigned*/ true);
+
+ SmallVector<Value *, 2> Args;
+ Args.push_back(Val);
+ Args.push_back(Pow);
+ return Builder.CreateCall(
+ Intrinsic::getDeclaration(TheModule, Intrinsic::powi, Ty), Args);
+ }
-Value *TreeToLLVM::EmitBuiltinLCEIL(gimple stmt) {
- if (!validate_gimple_arglist(stmt, REAL_TYPE, VOID_TYPE))
- return 0;
+ Value *TreeToLLVM::EmitBuiltinPOW(gimple stmt) {
+ if (!validate_gimple_arglist(stmt, REAL_TYPE, REAL_TYPE, VOID_TYPE))
+ return 0;
- // Cast the result of "ceil" to the appropriate integer type.
- // First call the appropriate version of "ceil".
- tree op = gimple_call_arg(stmt, 0);
- StringRef Name = SelectFPName(TREE_TYPE(op), "ceilf", "ceil", "ceill");
- assert(!Name.empty() && "Unsupported floating point type!");
- CallInst *Call = EmitSimpleCall(Name, TREE_TYPE(op), op, NULL);
- Call->setDoesNotThrow();
- Call->setDoesNotAccessMemory();
+ Value *Val = EmitMemory(gimple_call_arg(stmt, 0));
+ Value *Pow = EmitMemory(gimple_call_arg(stmt, 1));
+ Type *Ty = Val->getType();
+
+ SmallVector<Value *, 2> Args;
+ Args.push_back(Val);
+ Args.push_back(Pow);
+ return Builder.CreateCall(
+ Intrinsic::getDeclaration(TheModule, Intrinsic::pow, Ty), Args);
+ }
- // Then type cast the result of the "ceil" call.
- tree type = gimple_call_return_type(stmt);
- Type *RetTy = getRegType(type);
- return TYPE_UNSIGNED(type) ? Builder.CreateFPToUI(Call, RetTy) :
- Builder.CreateFPToSI(Call, RetTy);
-}
+ Value *TreeToLLVM::EmitBuiltinLCEIL(gimple stmt) {
+ if (!validate_gimple_arglist(stmt, REAL_TYPE, VOID_TYPE))
+ return 0;
-Value *TreeToLLVM::EmitBuiltinLFLOOR(gimple stmt) {
- if (!validate_gimple_arglist(stmt, REAL_TYPE, VOID_TYPE))
- return 0;
+ // Cast the result of "ceil" to the appropriate integer type.
+ // First call the appropriate version of "ceil".
+ tree op = gimple_call_arg(stmt, 0);
+ StringRef Name = SelectFPName(TREE_TYPE(op), "ceilf", "ceil", "ceill");
+ assert(!Name.empty() && "Unsupported floating point type!");
+ CallInst *Call = EmitSimpleCall(Name, TREE_TYPE(op), op, NULL);
+ Call->setDoesNotThrow();
+ Call->setDoesNotAccessMemory();
+
+ // Then type cast the result of the "ceil" call.
+ tree type = gimple_call_return_type(stmt);
+ Type *RetTy = getRegType(type);
+ return TYPE_UNSIGNED(type) ? Builder.CreateFPToUI(Call, RetTy) :
+ Builder.CreateFPToSI(Call, RetTy);
+ }
- // Cast the result of "floor" to the appropriate integer type.
- // First call the appropriate version of "floor".
- tree op = gimple_call_arg(stmt, 0);
- StringRef Name = SelectFPName(TREE_TYPE(op), "floorf", "floor", "floorl");
- assert(!Name.empty() && "Unsupported floating point type!");
- CallInst *Call = EmitSimpleCall(Name, TREE_TYPE(op), op, NULL);
- Call->setDoesNotThrow();
- Call->setDoesNotAccessMemory();
+ Value *TreeToLLVM::EmitBuiltinLFLOOR(gimple stmt) {
+ if (!validate_gimple_arglist(stmt, REAL_TYPE, VOID_TYPE))
+ return 0;
- // Then type cast the result of the "floor" call.
- tree type = gimple_call_return_type(stmt);
- Type *RetTy = getRegType(type);
- return TYPE_UNSIGNED(type) ? Builder.CreateFPToUI(Call, RetTy) :
- Builder.CreateFPToSI(Call, RetTy);
-}
+ // Cast the result of "floor" to the appropriate integer type.
+ // First call the appropriate version of "floor".
+ tree op = gimple_call_arg(stmt, 0);
+ StringRef Name = SelectFPName(TREE_TYPE(op), "floorf", "floor", "floorl");
+ assert(!Name.empty() && "Unsupported floating point type!");
+ CallInst *Call = EmitSimpleCall(Name, TREE_TYPE(op), op, NULL);
+ Call->setDoesNotThrow();
+ Call->setDoesNotAccessMemory();
+
+ // Then type cast the result of the "floor" call.
+ tree type = gimple_call_return_type(stmt);
+ Type *RetTy = getRegType(type);
+ return TYPE_UNSIGNED(type) ? Builder.CreateFPToUI(Call, RetTy) :
+ Builder.CreateFPToSI(Call, RetTy);
+ }
-Value *TreeToLLVM::EmitBuiltinCEXPI(gimple stmt) {
- if (!validate_gimple_arglist(stmt, REAL_TYPE, VOID_TYPE))
- return 0;
+ Value *TreeToLLVM::EmitBuiltinCEXPI(gimple stmt) {
+ if (!validate_gimple_arglist(stmt, REAL_TYPE, VOID_TYPE))
+ return 0;
- if (TARGET_HAS_SINCOS) {
- // exp(i*arg) = cos(arg) + i*sin(arg). Emit a call to sincos. First
- // determine which version of sincos to call.
- tree arg = gimple_call_arg(stmt, 0);
- tree arg_type = TREE_TYPE(arg);
- StringRef Name = SelectFPName(arg_type, "sincosf", "sincos", "sincosl");
- assert(!Name.empty() && "Unsupported floating point type!");
-
- // Create stack slots to store the real (cos) and imaginary (sin) parts in.
- Value *Val = EmitRegister(arg);
- Value *SinPtr = CreateTemporary(Val->getType());
- Value *CosPtr = CreateTemporary(Val->getType());
-
- // Get the LLVM function declaration for sincos.
- Type *ArgTys[3] =
- { Val->getType(), SinPtr->getType(), CosPtr->getType() };
- FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context),
- ArgTys, /*isVarArg*/false);
- Constant *Func = TheModule->getOrInsertFunction(Name, FTy);
+ if (TARGET_HAS_SINCOS) {
+ // exp(i*arg) = cos(arg) + i*sin(arg). Emit a call to sincos. First
+ // determine which version of sincos to call.
+ tree arg = gimple_call_arg(stmt, 0);
+ tree arg_type = TREE_TYPE(arg);
+ StringRef Name = SelectFPName(arg_type, "sincosf", "sincos", "sincosl");
+ assert(!Name.empty() && "Unsupported floating point type!");
+
+ // Create stack slots to store the real (cos) and imaginary (sin) parts in.
+ Value *Val = EmitRegister(arg);
+ Value *SinPtr = CreateTemporary(Val->getType());
+ Value *CosPtr = CreateTemporary(Val->getType());
+
+ // Get the LLVM function declaration for sincos.
+ Type *ArgTys[3] = { Val->getType(), SinPtr->getType(),
+ CosPtr->getType() };
+ FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context), ArgTys,
+ /*isVarArg*/ false);
+ Constant *Func = TheModule->getOrInsertFunction(Name, FTy);
- // Determine the calling convention.
- CallingConv::ID CC = CallingConv::C;
+ // Determine the calling convention.
+ CallingConv::ID CC = CallingConv::C;
#ifdef TARGET_ADJUST_LLVM_CC
- // Query the target for the calling convention to use.
- tree fntype = build_function_type_list(void_type_node, arg_type,
- TYPE_POINTER_TO(arg_type),
- TYPE_POINTER_TO(arg_type),
- NULL_TREE);
- TARGET_ADJUST_LLVM_CC(CC, fntype);
-#endif
-
- // If the function already existed with the wrong prototype then don't try to
- // muck with its calling convention. Otherwise, set the calling convention.
- if (Function *F = dyn_cast<Function>(Func))
- F->setCallingConv(CC);
-
- // Call sincos.
- Value *Args[3] = { Val, SinPtr, CosPtr };
- CallInst *CI = Builder.CreateCall(Func, Args);
- CI->setCallingConv(CC);
- CI->setDoesNotThrow();
-
- // Load out the real (cos) and imaginary (sin) parts.
- Value *Sin = Builder.CreateLoad(SinPtr);
- Value *Cos = Builder.CreateLoad(CosPtr);
+ // Query the target for the calling convention to use.
+ tree fntype = build_function_type_list(
+ void_type_node, arg_type, TYPE_POINTER_TO(arg_type),
+ TYPE_POINTER_TO(arg_type), NULL_TREE);
+ TARGET_ADJUST_LLVM_CC(CC, fntype);
+#endif
+
+ // If the function already existed with the wrong prototype then don't try to
+ // muck with its calling convention. Otherwise, set the calling convention.
+ if (Function *F = dyn_cast<Function>(Func))
+ F->setCallingConv(CC);
+
+ // Call sincos.
+ Value *Args[3] = { Val, SinPtr, CosPtr };
+ CallInst *CI = Builder.CreateCall(Func, Args);
+ CI->setCallingConv(CC);
+ CI->setDoesNotThrow();
+
+ // Load out the real (cos) and imaginary (sin) parts.
+ Value *Sin = Builder.CreateLoad(SinPtr);
+ Value *Cos = Builder.CreateLoad(CosPtr);
- // Return the complex number "cos(arg) + i*sin(arg)".
- return CreateComplex(Cos, Sin);
- } else {
- // Emit a call to cexp. First determine which version of cexp to call.
- tree arg = gimple_call_arg(stmt, 0);
- tree arg_type = TREE_TYPE(arg);
- StringRef Name = SelectFPName(arg_type, "cexpf", "cexp", "cexpl");
- assert(!Name.empty() && "Unsupported floating point type!");
-
- // Get the GCC and LLVM function types for cexp.
- tree cplx_type = gimple_call_return_type(stmt);
- tree fntype = build_function_type_list(cplx_type, cplx_type, NULL_TREE);
- FunctionType *FTy = cast<FunctionType>(ConvertType(fntype));
+ // Return the complex number "cos(arg) + i*sin(arg)".
+ return CreateComplex(Cos, Sin);
+ } else {
+ // Emit a call to cexp. First determine which version of cexp to call.
+ tree arg = gimple_call_arg(stmt, 0);
+ tree arg_type = TREE_TYPE(arg);
+ StringRef Name = SelectFPName(arg_type, "cexpf", "cexp", "cexpl");
+ assert(!Name.empty() && "Unsupported floating point type!");
+
+ // Get the GCC and LLVM function types for cexp.
+ tree cplx_type = gimple_call_return_type(stmt);
+ tree fntype = build_function_type_list(cplx_type, cplx_type, NULL_TREE);
+ FunctionType *FTy = cast<FunctionType>(ConvertType(fntype));
- // Get the LLVM function declaration for cexp.
- Constant *Func = TheModule->getOrInsertFunction(Name, FTy);
+ // Get the LLVM function declaration for cexp.
+ Constant *Func = TheModule->getOrInsertFunction(Name, FTy);
- // Determine the calling convention.
- CallingConv::ID CC = CallingConv::C;
+ // Determine the calling convention.
+ CallingConv::ID CC = CallingConv::C;
#ifdef TARGET_ADJUST_LLVM_CC
- // Query the target for the calling convention to use.
- TARGET_ADJUST_LLVM_CC(CC, fntype);
+ // Query the target for the calling convention to use.
+ TARGET_ADJUST_LLVM_CC(CC, fntype);
#endif
- // If the function already existed with the wrong prototype then don't try to
- // muck with its calling convention. Otherwise, set the calling convention.
- if (Function *F = dyn_cast<Function>(Func))
- F->setCallingConv(CC);
-
- // Form the complex number "0 + i*arg".
- Value *Arg = EmitRegister(arg);
- Value *CplxArg = CreateComplex(Constant::getNullValue(Arg->getType()), Arg);
-
- // Call cexp and return the result. This is rather painful because complex
- // numbers may be passed in funky ways and we don't have a proper interface
- // for marshalling call parameters.
- SmallVector<Value*, 16> CallOperands;
- FunctionCallArgumentConversion Client(CallOperands, FTy, /*destloc*/0,
- /*ReturnSlotOpt*/false, Builder, CC);
- DefaultABI ABIConverter(Client);
+ // If the function already existed with the wrong prototype then don't try to
+ // muck with its calling convention. Otherwise, set the calling convention.
+ if (Function *F = dyn_cast<Function>(Func))
+ F->setCallingConv(CC);
+
+ // Form the complex number "0 + i*arg".
+ Value *Arg = EmitRegister(arg);
+ Value *CplxArg = CreateComplex(Constant::getNullValue(Arg->getType()),
+ Arg);
+
+ // Call cexp and return the result. This is rather painful because complex
+ // numbers may be passed in funky ways and we don't have a proper interface
+ // for marshalling call parameters.
+ SmallVector<Value *, 16> CallOperands;
+ FunctionCallArgumentConversion Client(CallOperands, FTy, /*destloc*/ 0,
+ /*ReturnSlotOpt*/ false, Builder,
+ CC);
+ DefaultABI ABIConverter(Client);
+
+ // Handle the result.
+ ABIConverter.HandleReturnType(cplx_type, fntype, false);
+
+ // Push the argument.
+ bool PassedInMemory;
+ Type *CplxTy = CplxArg->getType();
+ if (LLVM_SHOULD_PASS_AGGREGATE_AS_FCA(cplx_type, CplxTy)) {
+ Client.pushValue(CplxArg);
+ PassedInMemory = false;
+ } else {
+ // Push the address of a temporary copy.
+ MemRef Copy = CreateTempLoc(CplxTy);
+ StoreRegisterToMemory(CplxArg, Copy, cplx_type, 0, Builder);
+ Client.pushAddress(Copy.Ptr);
+ PassedInMemory = true;
+ }
+
+ AttrBuilder AttrBuilder;
+ std::vector<Type *> ScalarArgs;
+ ABIConverter.HandleArgument(cplx_type, ScalarArgs, &AttrBuilder);
+ assert(!AttrBuilder.hasAttributes() &&
+ "Got attributes but none given!");
+ Client.clear();
+
+ // Create the call.
+ CallInst *CI = Builder.CreateCall(Func, CallOperands);
+ CI->setCallingConv(CC);
+ CI->setDoesNotThrow();
+ if (!PassedInMemory)
+ CI->setDoesNotAccessMemory();
+
+ // Extract and return the result.
+ if (Client.isShadowReturn())
+ return Client.EmitShadowResult(cplx_type, 0);
+
+ if (Client.isAggrReturn()) {
+ // Extract to a temporary then load the value out later.
+ MemRef Target = CreateTempLoc(CplxTy);
+
+ assert(DL.getTypeAllocSize(CI->getType()) <=
+ DL.getTypeAllocSize(CplxTy) &&
+ "Complex number returned in too large registers!");
+ Value *Dest = Builder.CreateBitCast(Target.Ptr,
+ CI->getType()->getPointerTo());
+ LLVM_EXTRACT_MULTIPLE_RETURN_VALUE(CI, Dest, Target.Volatile,
+ Builder);
+ return Builder.CreateLoad(Target.Ptr);
+ }
- // Handle the result.
- ABIConverter.HandleReturnType(cplx_type, fntype, false);
+ if (CI->getType() == CplxTy)
+ return CI; // Normal scalar return.
- // Push the argument.
- bool PassedInMemory;
- Type *CplxTy = CplxArg->getType();
- if (LLVM_SHOULD_PASS_AGGREGATE_AS_FCA(cplx_type, CplxTy)) {
- Client.pushValue(CplxArg);
- PassedInMemory = false;
- } else {
- // Push the address of a temporary copy.
- MemRef Copy = CreateTempLoc(CplxTy);
- StoreRegisterToMemory(CplxArg, Copy, cplx_type, 0, Builder);
- Client.pushAddress(Copy.Ptr);
- PassedInMemory = true;
+ // Probably { float, float } being returned as a double.
+ assert(DL.getTypeAllocSize(CI->getType()) ==
+ DL.getTypeAllocSize(CplxTy) &&
+ "Size mismatch in scalar to scalar conversion!");
+ Value *Tmp = CreateTemporary(CI->getType());
+ Builder.CreateStore(CI, Tmp);
+ Type *CplxPtrTy = CplxTy->getPointerTo();
+ return Builder.CreateLoad(Builder.CreateBitCast(Tmp, CplxPtrTy));
+ }
}
- AttrBuilder AttrBuilder;
- std::vector<Type*> ScalarArgs;
- ABIConverter.HandleArgument(cplx_type, ScalarArgs, &AttrBuilder);
- assert(!AttrBuilder.hasAttributes() && "Got attributes but none given!");
- Client.clear();
+ bool TreeToLLVM::EmitBuiltinConstantP(gimple stmt, Value *&Result) {
+ Result = Constant::getNullValue(
+ ConvertType(gimple_call_return_type(stmt)));
+ return true;
+ }
- // Create the call.
- CallInst *CI = Builder.CreateCall(Func, CallOperands);
- CI->setCallingConv(CC);
- CI->setDoesNotThrow();
- if (!PassedInMemory)
- CI->setDoesNotAccessMemory();
-
- // Extract and return the result.
- if (Client.isShadowReturn())
- return Client.EmitShadowResult(cplx_type, 0);
-
- if (Client.isAggrReturn()) {
- // Extract to a temporary then load the value out later.
- MemRef Target = CreateTempLoc(CplxTy);
+ bool TreeToLLVM::EmitBuiltinExtendPointer(gimple stmt, Value *&Result) {
+ tree arg0 = gimple_call_arg(stmt, 0);
+ Value *Amt = EmitMemory(arg0);
+ bool AmtIsSigned = !TYPE_UNSIGNED(TREE_TYPE(arg0));
+ bool ExpIsSigned = !TYPE_UNSIGNED(gimple_call_return_type(stmt));
+ Result = CastToAnyType(Amt, AmtIsSigned,
+ ConvertType(gimple_call_return_type(stmt)),
+ ExpIsSigned);
+ return true;
+ }
- assert(DL.getTypeAllocSize(CI->getType()) <= DL.getTypeAllocSize(CplxTy)
- && "Complex number returned in too large registers!");
- Value *Dest = Builder.CreateBitCast(Target.Ptr,
- CI->getType()->getPointerTo());
- LLVM_EXTRACT_MULTIPLE_RETURN_VALUE(CI, Dest, Target.Volatile, Builder);
- return Builder.CreateLoad(Target.Ptr);
+ /// OptimizeIntoPlainBuiltIn - Return true if it's safe to lower the object
+ /// size checking builtin calls (e.g. __builtin___memcpy_chk into the
+ /// plain non-checking calls. If the size of the argument is either -1 (unknown)
+ /// or large enough to ensure no overflow (> len), then it's safe to do so.
+ static bool OptimizeIntoPlainBuiltIn(gimple stmt, Value *Len, Value *Size) {
+ if (BitCastInst *SizeBC = dyn_cast<BitCastInst>(Size))
+ Size = SizeBC->getOperand(0);
+ ConstantInt *SizeCI = dyn_cast<ConstantInt>(Size);
+ if (!SizeCI)
+ return false;
+ if (SizeCI->isAllOnesValue())
+ // If size is -1, convert to plain memcpy, etc.
+ return true;
+
+ if (BitCastInst *LenBC = dyn_cast<BitCastInst>(Len))
+ Len = LenBC->getOperand(0);
+ ConstantInt *LenCI = dyn_cast<ConstantInt>(Len);
+ if (!LenCI)
+ return false;
+ if (SizeCI->getValue().ult(LenCI->getValue())) {
+ warning(0, "call to %D will always overflow destination buffer",
+ gimple_call_fndecl(stmt));
+ return false;
+ }
+ return true;
}
- if (CI->getType() == CplxTy)
- return CI; // Normal scalar return.
+ /// EmitBuiltinMemCopy - Emit an llvm.memcpy or llvm.memmove intrinsic,
+ /// depending on the value of isMemMove.
+ bool TreeToLLVM::EmitBuiltinMemCopy(gimple stmt, Value *&Result,
+ bool isMemMove, bool SizeCheck) {
+ if (SizeCheck) {
+ if (!validate_gimple_arglist(stmt, POINTER_TYPE, POINTER_TYPE,
+ INTEGER_TYPE, INTEGER_TYPE, VOID_TYPE))
+ return false;
+ } else {
+ if (!validate_gimple_arglist(stmt, POINTER_TYPE, POINTER_TYPE,
+ INTEGER_TYPE, VOID_TYPE))
+ return false;
+ }
+
+ tree Dst = gimple_call_arg(stmt, 0);
+ tree Src = gimple_call_arg(stmt, 1);
+ unsigned SrcAlign = getPointerAlignment(Src);
+ unsigned DstAlign = getPointerAlignment(Dst);
+
+ Value *DstV = EmitMemory(Dst);
+ Value *SrcV = EmitMemory(Src);
+ Value *Len = EmitMemory(gimple_call_arg(stmt, 2));
+ if (SizeCheck) {
+ tree SizeArg = gimple_call_arg(stmt, 3);
+ Value *Size = EmitMemory(SizeArg);
+ if (!OptimizeIntoPlainBuiltIn(stmt, Len, Size))
+ return false;
+ }
+
+ Result = isMemMove ?
+ EmitMemMove(DstV, SrcV, Len, std::min(SrcAlign, DstAlign)) :
+ EmitMemCpy(DstV, SrcV, Len, std::min(SrcAlign, DstAlign));
+ return true;
+ }
- // Probably { float, float } being returned as a double.
- assert(DL.getTypeAllocSize(CI->getType()) == DL.getTypeAllocSize(CplxTy) &&
- "Size mismatch in scalar to scalar conversion!");
- Value *Tmp = CreateTemporary(CI->getType());
- Builder.CreateStore(CI, Tmp);
- Type *CplxPtrTy = CplxTy->getPointerTo();
- return Builder.CreateLoad(Builder.CreateBitCast(Tmp, CplxPtrTy));
- }
-}
+ bool TreeToLLVM::EmitBuiltinMemSet(gimple stmt, Value *&Result,
+ bool SizeCheck) {
+ if (SizeCheck) {
+ if (!validate_gimple_arglist(stmt, POINTER_TYPE, INTEGER_TYPE,
+ INTEGER_TYPE, INTEGER_TYPE, VOID_TYPE))
+ return false;
+ } else {
+ if (!validate_gimple_arglist(stmt, POINTER_TYPE, INTEGER_TYPE,
+ INTEGER_TYPE, VOID_TYPE))
+ return false;
+ }
+
+ tree Dst = gimple_call_arg(stmt, 0);
+ unsigned DstAlign = getPointerAlignment(Dst);
+
+ Value *DstV = EmitMemory(Dst);
+ Value *Val = EmitMemory(gimple_call_arg(stmt, 1));
+ Value *Len = EmitMemory(gimple_call_arg(stmt, 2));
+ if (SizeCheck) {
+ tree SizeArg = gimple_call_arg(stmt, 3);
+ Value *Size = EmitMemory(SizeArg);
+ if (!OptimizeIntoPlainBuiltIn(stmt, Len, Size))
+ return false;
+ }
+ Result = EmitMemSet(DstV, Val, Len, DstAlign);
+ return true;
+ }
-bool TreeToLLVM::EmitBuiltinConstantP(gimple stmt, Value *&Result) {
- Result = Constant::getNullValue(ConvertType(gimple_call_return_type(stmt)));
- return true;
-}
-
-bool TreeToLLVM::EmitBuiltinExtendPointer(gimple stmt, Value *&Result) {
- tree arg0 = gimple_call_arg(stmt, 0);
- Value *Amt = EmitMemory(arg0);
- bool AmtIsSigned = !TYPE_UNSIGNED(TREE_TYPE(arg0));
- bool ExpIsSigned = !TYPE_UNSIGNED(gimple_call_return_type(stmt));
- Result = CastToAnyType(Amt, AmtIsSigned,
- ConvertType(gimple_call_return_type(stmt)),
- ExpIsSigned);
- return true;
-}
-
-/// OptimizeIntoPlainBuiltIn - Return true if it's safe to lower the object
-/// size checking builtin calls (e.g. __builtin___memcpy_chk into the
-/// plain non-checking calls. If the size of the argument is either -1 (unknown)
-/// or large enough to ensure no overflow (> len), then it's safe to do so.
-static bool OptimizeIntoPlainBuiltIn(gimple stmt, Value *Len, Value *Size) {
- if (BitCastInst *SizeBC = dyn_cast<BitCastInst>(Size))
- Size = SizeBC->getOperand(0);
- ConstantInt *SizeCI = dyn_cast<ConstantInt>(Size);
- if (!SizeCI)
- return false;
- if (SizeCI->isAllOnesValue())
- // If size is -1, convert to plain memcpy, etc.
- return true;
+ bool TreeToLLVM::EmitBuiltinBZero(gimple stmt, Value *&/*Result*/) {
+ if (!validate_gimple_arglist(stmt, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE))
+ return false;
- if (BitCastInst *LenBC = dyn_cast<BitCastInst>(Len))
- Len = LenBC->getOperand(0);
- ConstantInt *LenCI = dyn_cast<ConstantInt>(Len);
- if (!LenCI)
- return false;
- if (SizeCI->getValue().ult(LenCI->getValue())) {
- warning(0, "call to %D will always overflow destination buffer",
- gimple_call_fndecl(stmt));
- return false;
- }
- return true;
-}
+ tree Dst = gimple_call_arg(stmt, 0);
+ unsigned DstAlign = getPointerAlignment(Dst);
-/// EmitBuiltinMemCopy - Emit an llvm.memcpy or llvm.memmove intrinsic,
-/// depending on the value of isMemMove.
-bool TreeToLLVM::EmitBuiltinMemCopy(gimple stmt, Value *&Result, bool isMemMove,
- bool SizeCheck) {
- if (SizeCheck) {
- if (!validate_gimple_arglist(stmt, POINTER_TYPE, POINTER_TYPE,
- INTEGER_TYPE, INTEGER_TYPE, VOID_TYPE))
- return false;
- } else {
- if (!validate_gimple_arglist(stmt, POINTER_TYPE, POINTER_TYPE,
- INTEGER_TYPE, VOID_TYPE))
- return false;
- }
+ Value *DstV = EmitMemory(Dst);
+ Value *Val = Constant::getNullValue(Type::getInt32Ty(Context));
+ Value *Len = EmitMemory(gimple_call_arg(stmt, 1));
+ EmitMemSet(DstV, Val, Len, DstAlign);
+ return true;
+ }
- tree Dst = gimple_call_arg(stmt, 0);
- tree Src = gimple_call_arg(stmt, 1);
- unsigned SrcAlign = getPointerAlignment(Src);
- unsigned DstAlign = getPointerAlignment(Dst);
-
- Value *DstV = EmitMemory(Dst);
- Value *SrcV = EmitMemory(Src);
- Value *Len = EmitMemory(gimple_call_arg(stmt, 2));
- if (SizeCheck) {
- tree SizeArg = gimple_call_arg(stmt, 3);
- Value *Size = EmitMemory(SizeArg);
- if (!OptimizeIntoPlainBuiltIn(stmt, Len, Size))
- return false;
- }
+ bool TreeToLLVM::EmitBuiltinPrefetch(gimple stmt) {
+ if (!validate_gimple_arglist(stmt, POINTER_TYPE, 0))
+ return false;
- Result = isMemMove ?
- EmitMemMove(DstV, SrcV, Len, std::min(SrcAlign, DstAlign)) :
- EmitMemCpy(DstV, SrcV, Len, std::min(SrcAlign, DstAlign));
- return true;
-}
+ Value *Ptr = EmitMemory(gimple_call_arg(stmt, 0));
+ Value *ReadWrite = 0;
+ Value *Locality = 0;
+ Value *Data = 0;
+
+ if (gimple_call_num_args(stmt) > 1) { // Args 1/2 are optional
+ ReadWrite = EmitMemory(gimple_call_arg(stmt, 1));
+ if (!isa<ConstantInt>(ReadWrite)) {
+ error("second argument to %<__builtin_prefetch%> must be a constant");
+ ReadWrite = 0;
+ } else if (cast<ConstantInt>(ReadWrite)->getZExtValue() > 1) {
+ warning(0, "invalid second argument to %<__builtin_prefetch%>;"
+ " using zero");
+ ReadWrite = 0;
+ } else {
+ ReadWrite = TheFolder->CreateIntCast(cast<Constant>(ReadWrite),
+ Type::getInt32Ty(Context),
+ /*isSigned*/ false);
+ }
-bool TreeToLLVM::EmitBuiltinMemSet(gimple stmt, Value *&Result, bool SizeCheck){
- if (SizeCheck) {
- if (!validate_gimple_arglist(stmt, POINTER_TYPE, INTEGER_TYPE,
- INTEGER_TYPE, INTEGER_TYPE, VOID_TYPE))
- return false;
- } else {
- if (!validate_gimple_arglist(stmt, POINTER_TYPE, INTEGER_TYPE,
- INTEGER_TYPE, VOID_TYPE))
- return false;
- }
+ if (gimple_call_num_args(stmt) > 2) {
+ Locality = EmitMemory(gimple_call_arg(stmt, 2));
+ if (!isa<ConstantInt>(Locality)) {
+ error(
+ "third argument to %<__builtin_prefetch%> must be a constant");
+ Locality = 0;
+ } else if (cast<ConstantInt>(Locality)->getZExtValue() > 3) {
+ warning(
+ 0, "invalid third argument to %<__builtin_prefetch%>; using 3");
+ Locality = 0;
+ } else {
+ Locality = TheFolder->CreateIntCast(cast<Constant>(Locality),
+ Type::getInt32Ty(Context),
+ /*isSigned*/ false);
+ }
+ }
+ }
- tree Dst = gimple_call_arg(stmt, 0);
- unsigned DstAlign = getPointerAlignment(Dst);
+ // Default to highly local read.
+ if (ReadWrite == 0)
+ ReadWrite = Builder.getInt32(0);
+ if (Locality == 0)
+ Locality = Builder.getInt32(3);
+ if (Data == 0)
+ Data = Builder.getInt32(1);
+
+ Ptr = Builder.CreateBitCast(Ptr, Type::getInt8PtrTy(Context));
+
+ Builder.CreateCall4(Intrinsic::getDeclaration(TheModule,
+ Intrinsic::prefetch),
+ Ptr, ReadWrite, Locality, Data);
+ return true;
+ }
- Value *DstV = EmitMemory(Dst);
- Value *Val = EmitMemory(gimple_call_arg(stmt, 1));
- Value *Len = EmitMemory(gimple_call_arg(stmt, 2));
- if (SizeCheck) {
- tree SizeArg = gimple_call_arg(stmt, 3);
- Value *Size = EmitMemory(SizeArg);
- if (!OptimizeIntoPlainBuiltIn(stmt, Len, Size))
- return false;
- }
- Result = EmitMemSet(DstV, Val, Len, DstAlign);
- return true;
-}
+ /// EmitBuiltinReturnAddr - Emit an llvm.returnaddress or llvm.frameaddress
+ /// instruction, depending on whether isFrame is true or not.
+ bool TreeToLLVM::EmitBuiltinReturnAddr(gimple stmt, Value *&Result,
+ bool isFrame) {
+ if (!validate_gimple_arglist(stmt, INTEGER_TYPE, VOID_TYPE))
+ return false;
-bool TreeToLLVM::EmitBuiltinBZero(gimple stmt, Value *&/*Result*/) {
- if (!validate_gimple_arglist(stmt, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE))
- return false;
+ ConstantInt *Level = dyn_cast<ConstantInt>(
+ EmitMemory(gimple_call_arg(stmt, 0)));
+ if (!Level) {
+ if (isFrame)
+ error("invalid argument to %<__builtin_frame_address%>");
+ else
+ error("invalid argument to %<__builtin_return_address%>");
+ return false;
+ }
- tree Dst = gimple_call_arg(stmt, 0);
- unsigned DstAlign = getPointerAlignment(Dst);
+ Intrinsic::ID IID = !isFrame ? Intrinsic::returnaddress :
+ Intrinsic::frameaddress;
+ Result = Builder.CreateCall(Intrinsic::getDeclaration(TheModule, IID),
+ Level);
+ Result = Builder.CreateBitCast(
+ Result, ConvertType(gimple_call_return_type(stmt)));
+ return true;
+ }
- Value *DstV = EmitMemory(Dst);
- Value *Val = Constant::getNullValue(Type::getInt32Ty(Context));
- Value *Len = EmitMemory(gimple_call_arg(stmt, 1));
- EmitMemSet(DstV, Val, Len, DstAlign);
- return true;
-}
+ bool TreeToLLVM::EmitBuiltinExtractReturnAddr(gimple stmt, Value *&Result) {
+ Value *Ptr = EmitMemory(gimple_call_arg(stmt, 0));
-bool TreeToLLVM::EmitBuiltinPrefetch(gimple stmt) {
- if (!validate_gimple_arglist(stmt, POINTER_TYPE, 0))
- return false;
+ // FIXME: Actually we should do something like this:
+ //
+ // Result = (Ptr & MASK_RETURN_ADDR) + RETURN_ADDR_OFFSET, if mask and
+ // offset are defined. This seems to be needed for: ARM, MIPS, Sparc.
+ // Unfortunately, these constants are defined as RTL expressions and
+ // should be handled separately.
- Value *Ptr = EmitMemory(gimple_call_arg(stmt, 0));
- Value *ReadWrite = 0;
- Value *Locality = 0;
- Value *Data = 0;
-
- if (gimple_call_num_args(stmt) > 1) { // Args 1/2 are optional
- ReadWrite = EmitMemory(gimple_call_arg(stmt, 1));
- if (!isa<ConstantInt>(ReadWrite)) {
- error("second argument to %<__builtin_prefetch%> must be a constant");
- ReadWrite = 0;
- } else if (cast<ConstantInt>(ReadWrite)->getZExtValue() > 1) {
- warning (0, "invalid second argument to %<__builtin_prefetch%>;"
- " using zero");
- ReadWrite = 0;
- } else {
- ReadWrite = TheFolder->CreateIntCast(cast<Constant>(ReadWrite),
- Type::getInt32Ty(Context),
- /*isSigned*/false);
- }
+ Result = Builder.CreateBitCast(Ptr, Type::getInt8PtrTy(Context));
- if (gimple_call_num_args(stmt) > 2) {
- Locality = EmitMemory(gimple_call_arg(stmt, 2));
- if (!isa<ConstantInt>(Locality)) {
- error("third argument to %<__builtin_prefetch%> must be a constant");
- Locality = 0;
- } else if (cast<ConstantInt>(Locality)->getZExtValue() > 3) {
- warning(0, "invalid third argument to %<__builtin_prefetch%>; using 3");
- Locality = 0;
- } else {
- Locality = TheFolder->CreateIntCast(cast<Constant>(Locality),
- Type::getInt32Ty(Context),
- /*isSigned*/false);
- }
+ return true;
}
- }
- // Default to highly local read.
- if (ReadWrite == 0)
- ReadWrite = Builder.getInt32(0);
- if (Locality == 0)
- Locality = Builder.getInt32(3);
- if (Data == 0)
- Data = Builder.getInt32(1);
+ bool TreeToLLVM::EmitBuiltinFrobReturnAddr(gimple stmt, Value *&Result) {
+ Value *Ptr = EmitMemory(gimple_call_arg(stmt, 0));
- Ptr = Builder.CreateBitCast(Ptr, Type::getInt8PtrTy(Context));
+ // FIXME: Actually we should do something like this:
+ //
+ // Result = Ptr - RETURN_ADDR_OFFSET, if offset is defined. This seems to be
+ // needed for: MIPS, Sparc. Unfortunately, these constants are defined
+ // as RTL expressions and should be handled separately.
- Builder.CreateCall4(Intrinsic::getDeclaration(TheModule, Intrinsic::prefetch),
- Ptr, ReadWrite, Locality, Data);
- return true;
-}
+ Result = Builder.CreateBitCast(Ptr, Type::getInt8PtrTy(Context));
-/// EmitBuiltinReturnAddr - Emit an llvm.returnaddress or llvm.frameaddress
-/// instruction, depending on whether isFrame is true or not.
-bool TreeToLLVM::EmitBuiltinReturnAddr(gimple stmt, Value *&Result,
- bool isFrame) {
- if (!validate_gimple_arglist(stmt, INTEGER_TYPE, VOID_TYPE))
- return false;
+ return true;
+ }
- ConstantInt *Level =
- dyn_cast<ConstantInt>(EmitMemory(gimple_call_arg(stmt, 0)));
- if (!Level) {
- if (isFrame)
- error("invalid argument to %<__builtin_frame_address%>");
- else
- error("invalid argument to %<__builtin_return_address%>");
- return false;
- }
+ bool TreeToLLVM::EmitBuiltinStackSave(gimple stmt, Value *&Result) {
+ if (!validate_gimple_arglist(stmt, VOID_TYPE))
+ return false;
- Intrinsic::ID IID =
- !isFrame ? Intrinsic::returnaddress : Intrinsic::frameaddress;
- Result = Builder.CreateCall(Intrinsic::getDeclaration(TheModule, IID), Level);
- Result = Builder.CreateBitCast(Result,
- ConvertType(gimple_call_return_type(stmt)));
- return true;
-}
+ Result = Builder.CreateCall(
+ Intrinsic::getDeclaration(TheModule, Intrinsic::stacksave));
+ return true;
+ }
-bool TreeToLLVM::EmitBuiltinExtractReturnAddr(gimple stmt, Value *&Result) {
- Value *Ptr = EmitMemory(gimple_call_arg(stmt, 0));
+ bool TreeToLLVM::EmitBuiltinUnreachable() {
+ Builder.CreateUnreachable();
+ return true;
+ }
- // FIXME: Actually we should do something like this:
- //
- // Result = (Ptr & MASK_RETURN_ADDR) + RETURN_ADDR_OFFSET, if mask and
- // offset are defined. This seems to be needed for: ARM, MIPS, Sparc.
- // Unfortunately, these constants are defined as RTL expressions and
- // should be handled separately.
+ // Exception handling builtins.
- Result = Builder.CreateBitCast(Ptr, Type::getInt8PtrTy(Context));
-
- return true;
-}
-
-bool TreeToLLVM::EmitBuiltinFrobReturnAddr(gimple stmt, Value *&Result) {
- Value *Ptr = EmitMemory(gimple_call_arg(stmt, 0));
-
- // FIXME: Actually we should do something like this:
- //
- // Result = Ptr - RETURN_ADDR_OFFSET, if offset is defined. This seems to be
- // needed for: MIPS, Sparc. Unfortunately, these constants are defined
- // as RTL expressions and should be handled separately.
-
- Result = Builder.CreateBitCast(Ptr, Type::getInt8PtrTy(Context));
-
- return true;
-}
-
-bool TreeToLLVM::EmitBuiltinStackSave(gimple stmt, Value *&Result) {
- if (!validate_gimple_arglist(stmt, VOID_TYPE))
- return false;
+ bool TreeToLLVM::EmitBuiltinEHCopyValues(gimple stmt) {
+ unsigned DstRegionNo = tree_low_cst(gimple_call_arg(stmt, 0), 0);
+ unsigned SrcRegionNo = tree_low_cst(gimple_call_arg(stmt, 1), 0);
+ // Copy the exception pointer.
+ Value *ExcPtr = Builder.CreateLoad(getExceptionPtr(SrcRegionNo));
+ Builder.CreateStore(ExcPtr, getExceptionPtr(DstRegionNo));
+ // Copy the selector value.
+ Value *Filter = Builder.CreateLoad(getExceptionFilter(SrcRegionNo));
+ Builder.CreateStore(Filter, getExceptionFilter(DstRegionNo));
+ return true;
+ }
- Result = Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
- Intrinsic::stacksave));
- return true;
-}
-
-bool TreeToLLVM::EmitBuiltinUnreachable() {
- Builder.CreateUnreachable();
- return true;
-}
-
-// Exception handling builtins.
-
-bool TreeToLLVM::EmitBuiltinEHCopyValues(gimple stmt) {
- unsigned DstRegionNo = tree_low_cst(gimple_call_arg(stmt, 0), 0);
- unsigned SrcRegionNo = tree_low_cst(gimple_call_arg(stmt, 1), 0);
- // Copy the exception pointer.
- Value *ExcPtr = Builder.CreateLoad(getExceptionPtr(SrcRegionNo));
- Builder.CreateStore(ExcPtr, getExceptionPtr(DstRegionNo));
- // Copy the selector value.
- Value *Filter = Builder.CreateLoad(getExceptionFilter(SrcRegionNo));
- Builder.CreateStore(Filter, getExceptionFilter(DstRegionNo));
- return true;
-}
-
-bool TreeToLLVM::EmitBuiltinEHFilter(gimple stmt, Value *&Result) {
- // Lookup the local that holds the selector value for this region.
- unsigned RegionNo = tree_low_cst(gimple_call_arg(stmt, 0), 0);
- AllocaInst *Filter = getExceptionFilter(RegionNo);
- // Load the selector value out.
- Result = Builder.CreateLoad(Filter);
- // Ensure the returned value has the right integer type.
- tree type = gimple_call_return_type(stmt);
- Result = CastToAnyType(Result, /*isSigned*/true, getRegType(type),
- /*isSigned*/!TYPE_UNSIGNED(type));
- return true;
-}
-
-bool TreeToLLVM::EmitBuiltinEHPointer(gimple stmt, Value *&Result) {
- // Lookup the local that holds the exception pointer for this region.
- unsigned RegionNo = tree_low_cst(gimple_call_arg(stmt, 0), 0);
- AllocaInst *ExcPtr = getExceptionPtr(RegionNo);
- // Load the exception pointer out.
- Result = Builder.CreateLoad(ExcPtr);
- // Ensure the returned value has the right pointer type.
- tree type = gimple_call_return_type(stmt);
- Result = Builder.CreateBitCast(Result, getRegType(type));
- return true;
-}
+ bool TreeToLLVM::EmitBuiltinEHFilter(gimple stmt, Value *&Result) {
+ // Lookup the local that holds the selector value for this region.
+ unsigned RegionNo = tree_low_cst(gimple_call_arg(stmt, 0), 0);
+ AllocaInst *Filter = getExceptionFilter(RegionNo);
+ // Load the selector value out.
+ Result = Builder.CreateLoad(Filter);
+ // Ensure the returned value has the right integer type.
+ tree type = gimple_call_return_type(stmt);
+ Result = CastToAnyType(Result, /*isSigned*/ true, getRegType(type),
+ /*isSigned*/ !TYPE_UNSIGNED(type));
+ return true;
+ }
+ bool TreeToLLVM::EmitBuiltinEHPointer(gimple stmt, Value *&Result) {
+ // Lookup the local that holds the exception pointer for this region.
+ unsigned RegionNo = tree_low_cst(gimple_call_arg(stmt, 0), 0);
+ AllocaInst *ExcPtr = getExceptionPtr(RegionNo);
+ // Load the exception pointer out.
+ Result = Builder.CreateLoad(ExcPtr);
+ // Ensure the returned value has the right pointer type.
+ tree type = gimple_call_return_type(stmt);
+ Result = Builder.CreateBitCast(Result, getRegType(type));
+ return true;
+ }
// Builtins used by the exception handling runtime.
// On most machines, the CFA coincides with the first incoming parm.
#ifndef ARG_POINTER_CFA_OFFSET
-#define ARG_POINTER_CFA_OFFSET(FNDECL) FIRST_PARM_OFFSET (FNDECL)
+#define ARG_POINTER_CFA_OFFSET(FNDECL) FIRST_PARM_OFFSET(FNDECL)
#endif
// The mapping from gcc register number to DWARF 2 CFA column number. By
// default, we just provide columns for all registers.
#ifndef DWARF_FRAME_REGNUM
-#define DWARF_FRAME_REGNUM(REG) DBX_REGISTER_NUMBER (REG)
+#define DWARF_FRAME_REGNUM(REG) DBX_REGISTER_NUMBER(REG)
#endif
// Map register numbers held in the call frame info that gcc has
@@ -5658,3460 +5651,3593 @@
#define HARD_REGNO_CALL_PART_CLOBBERED(REGNO, MODE) 0
#endif
-bool TreeToLLVM::EmitBuiltinDwarfCFA(gimple stmt, Value *&Result) {
- if (!validate_gimple_arglist(stmt, VOID_TYPE))
- return false;
+ bool TreeToLLVM::EmitBuiltinDwarfCFA(gimple stmt, Value *&Result) {
+ if (!validate_gimple_arglist(stmt, VOID_TYPE))
+ return false;
- int cfa_offset = ARG_POINTER_CFA_OFFSET(exp);
+ int cfa_offset = ARG_POINTER_CFA_OFFSET(exp);
- // FIXME: is i32 always enough here?
- Result =
- Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
- Intrinsic::eh_dwarf_cfa),
- Builder.getInt32(cfa_offset));
+ // FIXME: is i32 always enough here?
+ Result = Builder.CreateCall(Intrinsic::getDeclaration(
+ TheModule, Intrinsic::eh_dwarf_cfa),
+ Builder.getInt32(cfa_offset));
- return true;
-}
+ return true;
+ }
-bool TreeToLLVM::EmitBuiltinDwarfSPColumn(gimple stmt, Value *&Result) {
- if (!validate_gimple_arglist(stmt, VOID_TYPE))
- return false;
+ bool TreeToLLVM::EmitBuiltinDwarfSPColumn(gimple stmt, Value *&Result) {
+ if (!validate_gimple_arglist(stmt, VOID_TYPE))
+ return false;
- unsigned int dwarf_regnum = DWARF_FRAME_REGNUM(STACK_POINTER_REGNUM);
- Result = ConstantInt::get(ConvertType(gimple_call_return_type(stmt)),
- dwarf_regnum);
+ unsigned int dwarf_regnum = DWARF_FRAME_REGNUM(STACK_POINTER_REGNUM);
+ Result = ConstantInt::get(ConvertType(gimple_call_return_type(stmt)),
+ dwarf_regnum);
- return true;
-}
+ return true;
+ }
-bool TreeToLLVM::EmitBuiltinEHReturnDataRegno(gimple stmt, Value *&Result) {
+ bool TreeToLLVM::EmitBuiltinEHReturnDataRegno(gimple stmt, Value *&Result) {
#ifdef EH_RETURN_DATA_REGNO
- if (!validate_gimple_arglist(stmt, INTEGER_TYPE, VOID_TYPE))
- return false;
+ if (!validate_gimple_arglist(stmt, INTEGER_TYPE, VOID_TYPE))
+ return false;
- tree which = gimple_call_arg(stmt, 0);
- unsigned HOST_WIDE_INT iwhich;
+ tree which = gimple_call_arg(stmt, 0);
+ unsigned HOST_WIDE_INT iwhich;
- if (!isa<INTEGER_CST>(which)) {
- error ("argument of %<__builtin_eh_return_regno%> must be constant");
- return false;
- }
+ if (!isa<INTEGER_CST>(which)) {
+ error("argument of %<__builtin_eh_return_regno%> must be constant");
+ return false;
+ }
- iwhich = tree_low_cst (which, 1);
- iwhich = EH_RETURN_DATA_REGNO (iwhich);
- if (iwhich == INVALID_REGNUM)
- return false;
+ iwhich = tree_low_cst(which, 1);
+ iwhich = EH_RETURN_DATA_REGNO(iwhich);
+ if (iwhich == INVALID_REGNUM)
+ return false;
- iwhich = DWARF_FRAME_REGNUM (iwhich);
+ iwhich = DWARF_FRAME_REGNUM(iwhich);
- Result = ConstantInt::get(ConvertType(gimple_call_return_type(stmt)), iwhich);
+ Result = ConstantInt::get(ConvertType(gimple_call_return_type(stmt)),
+ iwhich);
#endif
- return true;
-}
-
-bool TreeToLLVM::EmitBuiltinEHReturn(gimple stmt, Value *&/*Result*/) {
- if (!validate_gimple_arglist(stmt, INTEGER_TYPE, POINTER_TYPE, VOID_TYPE))
- return false;
-
- Type *IntPtr = DL.getIntPtrType(Context, 0);
- Value *Offset = EmitMemory(gimple_call_arg(stmt, 0));
- Value *Handler = EmitMemory(gimple_call_arg(stmt, 1));
-
- Intrinsic::ID IID = IntPtr->isIntegerTy(32) ?
- Intrinsic::eh_return_i32 : Intrinsic::eh_return_i64;
+ return true;
+ }
- Offset = Builder.CreateIntCast(Offset, IntPtr, /*isSigned*/true);
- Handler = Builder.CreateBitCast(Handler, Type::getInt8PtrTy(Context));
+ bool TreeToLLVM::EmitBuiltinEHReturn(gimple stmt, Value *&/*Result*/) {
+ if (!validate_gimple_arglist(stmt, INTEGER_TYPE, POINTER_TYPE, VOID_TYPE))
+ return false;
- Value *Args[2] = { Offset, Handler };
- Builder.CreateCall(Intrinsic::getDeclaration(TheModule, IID), Args);
- Builder.CreateUnreachable();
- BeginBlock(BasicBlock::Create(Context));
+ Type *IntPtr = DL.getIntPtrType(Context, 0);
+ Value *Offset = EmitMemory(gimple_call_arg(stmt, 0));
+ Value *Handler = EmitMemory(gimple_call_arg(stmt, 1));
+
+ Intrinsic::ID IID = IntPtr->isIntegerTy(32) ? Intrinsic::eh_return_i32 :
+ Intrinsic::eh_return_i64;
+
+ Offset = Builder.CreateIntCast(Offset, IntPtr, /*isSigned*/ true);
+ Handler = Builder.CreateBitCast(Handler, Type::getInt8PtrTy(Context));
+
+ Value *Args[2] = { Offset, Handler };
+ Builder.CreateCall(Intrinsic::getDeclaration(TheModule, IID), Args);
+ Builder.CreateUnreachable();
+ BeginBlock(BasicBlock::Create(Context));
- return true;
-}
+ return true;
+ }
-bool TreeToLLVM::EmitBuiltinInitDwarfRegSizes(gimple stmt, Value *&/*Result*/) {
+ bool TreeToLLVM::EmitBuiltinInitDwarfRegSizes(gimple stmt,
+ Value *&/*Result*/) {
#ifdef DWARF2_UNWIND_INFO
- unsigned int i;
- bool wrote_return_column = false;
- static bool reg_modes_initialized = false;
+ unsigned int i;
+ bool wrote_return_column = false;
+ static bool reg_modes_initialized = false;
- if (!validate_gimple_arglist(stmt, POINTER_TYPE, VOID_TYPE))
- return false;
+ if (!validate_gimple_arglist(stmt, POINTER_TYPE, VOID_TYPE))
+ return false;
- if (!reg_modes_initialized) {
- init_reg_modes_target();
- reg_modes_initialized = true;
- }
-
- Value *Addr =
- Builder.CreateBitCast(EmitMemory(gimple_call_arg(stmt, 0)),
- Type::getInt8PtrTy(Context));
- Constant *Size, *Idx;
-
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) {
- int rnum = DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), 1);
-
- if (rnum < DWARF_FRAME_REGISTERS) {
- enum machine_mode save_mode = reg_raw_mode[i];
- HOST_WIDE_INT size;
-
- if (HARD_REGNO_CALL_PART_CLOBBERED (i, save_mode))
- save_mode = choose_hard_reg_mode (i, 1, true);
- if (DWARF_FRAME_REGNUM (i) == DWARF_FRAME_RETURN_COLUMN) {
- if (save_mode == VOIDmode)
- continue;
- wrote_return_column = true;
- }
- size = GET_MODE_SIZE (save_mode);
- if (rnum < 0)
- continue;
+ if (!reg_modes_initialized) {
+ init_reg_modes_target();
+ reg_modes_initialized = true;
+ }
+
+ Value *Addr = Builder.CreateBitCast(EmitMemory(gimple_call_arg(stmt, 0)),
+ Type::getInt8PtrTy(Context));
+ Constant *Size, *Idx;
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) {
+ int rnum = DWARF2_FRAME_REG_OUT(DWARF_FRAME_REGNUM(i), 1);
+
+ if (rnum < DWARF_FRAME_REGISTERS) {
+ enum machine_mode save_mode = reg_raw_mode[i];
+ HOST_WIDE_INT size;
+
+ if (HARD_REGNO_CALL_PART_CLOBBERED(i, save_mode))
+ save_mode = choose_hard_reg_mode(i, 1, true);
+ if (DWARF_FRAME_REGNUM(i) == DWARF_FRAME_RETURN_COLUMN) {
+ if (save_mode == VOIDmode)
+ continue;
+ wrote_return_column = true;
+ }
+ size = GET_MODE_SIZE(save_mode);
+ if (rnum < 0)
+ continue;
- Size = Builder.getInt8(size);
- Idx = Builder.getInt32(rnum);
- Builder.CreateStore(Size, Builder.CreateGEP(Addr, Idx, flag_verbose_asm ?
- "rnum" : ""), false);
- }
- }
+ Size = Builder.getInt8(size);
+ Idx = Builder.getInt32(rnum);
+ Builder.CreateStore(Size,
+ Builder.CreateGEP(Addr, Idx, flag_verbose_asm ?
+ "rnum" : ""), false);
+ }
+ }
- if (!wrote_return_column) {
- Size = Builder.getInt8(GET_MODE_SIZE (Pmode));
- Idx = Builder.getInt32(DWARF_FRAME_RETURN_COLUMN);
- Builder.CreateStore(Size, Builder.CreateGEP(Addr, Idx, flag_verbose_asm ?
- "rcol" : ""), false);
- }
+ if (!wrote_return_column) {
+ Size = Builder.getInt8(GET_MODE_SIZE(Pmode));
+ Idx = Builder.getInt32(DWARF_FRAME_RETURN_COLUMN);
+ Builder.CreateStore(
+ Size, Builder.CreateGEP(Addr, Idx, flag_verbose_asm ? "rcol" : ""),
+ false);
+ }
#ifdef DWARF_ALT_FRAME_RETURN_COLUMN
- Size = Builder.getInt8(GET_MODE_SIZE (Pmode));
- Idx = Builder.getInt32(DWARF_ALT_FRAME_RETURN_COLUMN);
- Builder.CreateStore(Size, Builder.CreateGEP(Addr, Idx, flag_verbose_asm ?
- "acol" : ""), false);
+ Size = Builder.getInt8(GET_MODE_SIZE(Pmode));
+ Idx = Builder.getInt32(DWARF_ALT_FRAME_RETURN_COLUMN);
+ Builder.CreateStore(Size, Builder.CreateGEP(Addr, Idx, flag_verbose_asm ?
+ "acol" : ""), false);
#endif
#endif /* DWARF2_UNWIND_INFO */
- // TODO: the RS6000 target needs extra initialization [gcc changeset 122468].
-
- return true;
-}
+ // TODO: the RS6000 target needs extra initialization [gcc changeset 122468].
-bool TreeToLLVM::EmitBuiltinUnwindInit(gimple stmt, Value *&/*Result*/) {
- if (!validate_gimple_arglist(stmt, VOID_TYPE))
- return false;
+ return true;
+ }
- Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
- Intrinsic::eh_unwind_init));
+ bool TreeToLLVM::EmitBuiltinUnwindInit(gimple stmt, Value *&/*Result*/) {
+ if (!validate_gimple_arglist(stmt, VOID_TYPE))
+ return false;
- return true;
-}
+ Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
+ Intrinsic::eh_unwind_init));
-bool TreeToLLVM::EmitBuiltinStackRestore(gimple stmt) {
- if (!validate_gimple_arglist(stmt, POINTER_TYPE, VOID_TYPE))
- return false;
+ return true;
+ }
- Value *Ptr = EmitMemory(gimple_call_arg(stmt, 0));
- Ptr = Builder.CreateBitCast(Ptr, Type::getInt8PtrTy(Context));
+ bool TreeToLLVM::EmitBuiltinStackRestore(gimple stmt) {
+ if (!validate_gimple_arglist(stmt, POINTER_TYPE, VOID_TYPE))
+ return false;
- Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
- Intrinsic::stackrestore), Ptr);
- return true;
-}
+ Value *Ptr = EmitMemory(gimple_call_arg(stmt, 0));
+ Ptr = Builder.CreateBitCast(Ptr, Type::getInt8PtrTy(Context));
+ Builder.CreateCall(
+ Intrinsic::getDeclaration(TheModule, Intrinsic::stackrestore), Ptr);
+ return true;
+ }
-bool TreeToLLVM::EmitBuiltinAlloca(gimple stmt, Value *&Result) {
- if (!validate_gimple_arglist(stmt, INTEGER_TYPE, VOID_TYPE))
- return false;
- Value *Amt = EmitMemory(gimple_call_arg(stmt, 0));
- AllocaInst *Alloca = Builder.CreateAlloca(Type::getInt8Ty(Context), Amt);
- Alloca->setAlignment(BIGGEST_ALIGNMENT / 8);
- Result = Alloca;
- return true;
-}
+ bool TreeToLLVM::EmitBuiltinAlloca(gimple stmt, Value *&Result) {
+ if (!validate_gimple_arglist(stmt, INTEGER_TYPE, VOID_TYPE))
+ return false;
+ Value *Amt = EmitMemory(gimple_call_arg(stmt, 0));
+ AllocaInst *Alloca = Builder.CreateAlloca(Type::getInt8Ty(Context), Amt);
+ Alloca->setAlignment(BIGGEST_ALIGNMENT / 8);
+ Result = Alloca;
+ return true;
+ }
-bool TreeToLLVM::EmitBuiltinAllocaWithAlign(gimple stmt, Value *&Result) {
- if (!validate_gimple_arglist(stmt, INTEGER_TYPE, INTEGER_TYPE, VOID_TYPE))
- return false;
- Value *Amt = EmitMemory(gimple_call_arg(stmt, 0));
- uint64_t Align = getInt64(gimple_call_arg(stmt, 1), true);
- AllocaInst *Alloca = Builder.CreateAlloca(Type::getInt8Ty(Context), Amt);
- Alloca->setAlignment(Align / 8);
- Result = Alloca;
- return true;
-}
+ bool TreeToLLVM::EmitBuiltinAllocaWithAlign(gimple stmt, Value *&Result) {
+ if (!validate_gimple_arglist(stmt, INTEGER_TYPE, INTEGER_TYPE, VOID_TYPE))
+ return false;
+ Value *Amt = EmitMemory(gimple_call_arg(stmt, 0));
+ uint64_t Align = getInt64(gimple_call_arg(stmt, 1), true);
+ AllocaInst *Alloca = Builder.CreateAlloca(Type::getInt8Ty(Context), Amt);
+ Alloca->setAlignment(Align / 8);
+ Result = Alloca;
+ return true;
+ }
#if (GCC_MINOR > 6)
-bool TreeToLLVM::EmitBuiltinAssumeAligned(gimple stmt, Value *&Result) {
- if (!validate_gimple_arglist(stmt, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE))
- return false;
- // Return the pointer argument. TODO: Pass the alignment information on to
- // the optimizers.
- Value *Ptr = EmitRegister(gimple_call_arg(stmt, 0));
- // Bitcast it to the return type.
- Ptr = TriviallyTypeConvert(Ptr, getRegType(gimple_call_return_type(stmt)));
- Result = Reg2Mem(Ptr, gimple_call_return_type(stmt), Builder);
- return true;
-}
+ bool TreeToLLVM::EmitBuiltinAssumeAligned(gimple stmt, Value *&Result) {
+ if (!validate_gimple_arglist(stmt, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE))
+ return false;
+ // Return the pointer argument. TODO: Pass the alignment information on to
+ // the optimizers.
+ Value *Ptr = EmitRegister(gimple_call_arg(stmt, 0));
+ // Bitcast it to the return type.
+ Ptr = TriviallyTypeConvert(Ptr,
+ getRegType(gimple_call_return_type(stmt)));
+ Result = Reg2Mem(Ptr, gimple_call_return_type(stmt), Builder);
+ return true;
+ }
#endif
-bool TreeToLLVM::EmitBuiltinExpect(gimple stmt, Value *&Result) {
- tree type = gimple_call_return_type(stmt);
- if (gimple_call_num_args(stmt) < 2) {
- Result = Constant::getNullValue(ConvertType(type));
- return true;
- }
- Type *ArgTy = getRegType(type);
- Value *ExpectIntr = Intrinsic::getDeclaration(TheModule, Intrinsic::expect,
- ArgTy);
- Value *ArgValue = EmitRegister(gimple_call_arg(stmt, 0));
- Value *ExpectedValue = EmitRegister(gimple_call_arg(stmt, 1));
- Result = Builder.CreateCall2(ExpectIntr, ArgValue, ExpectedValue);
- Result = Reg2Mem(Result, type, Builder);
- return true;
-}
-
-bool TreeToLLVM::EmitBuiltinVAStart(gimple stmt) {
- if (gimple_call_num_args(stmt) < 2) {
- error("too few arguments to function %<va_start%>");
- return true;
- }
-
- tree fntype = TREE_TYPE(current_function_decl);
- if (TYPE_ARG_TYPES(fntype) == 0 ||
- (tree_last(TYPE_ARG_TYPES(fntype)) == void_type_node)) {
- error("%<va_start%> used in function with fixed args");
- return true;
- }
-
- Constant *va_start = Intrinsic::getDeclaration(TheModule, Intrinsic::vastart);
- Value *ArgVal = EmitMemory(gimple_call_arg(stmt, 0));
- ArgVal = Builder.CreateBitCast(ArgVal, Type::getInt8PtrTy(Context));
- Builder.CreateCall(va_start, ArgVal);
- return true;
-}
-
-bool TreeToLLVM::EmitBuiltinVAEnd(gimple stmt) {
- Value *Arg = EmitMemory(gimple_call_arg(stmt, 0));
- Arg = Builder.CreateBitCast(Arg, Type::getInt8PtrTy(Context));
- Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::vaend),
- Arg);
- return true;
-}
-
-bool TreeToLLVM::EmitBuiltinVACopy(gimple stmt) {
- tree Arg1T = gimple_call_arg(stmt, 0);
- tree Arg2T = gimple_call_arg(stmt, 1);
-
- Value *Arg1 = EmitMemory(Arg1T); // Emit the address of the destination.
- // The second arg of llvm.va_copy is a pointer to a valist.
- Value *Arg2;
- if (!isa<AGGREGATE_TYPE>(va_list_type_node)) {
- // Emit it as a value, then store it to a temporary slot.
- Value *V2 = EmitMemory(Arg2T);
- Arg2 = CreateTemporary(V2->getType());
- Builder.CreateStore(V2, Arg2);
- } else {
- // If the target has aggregate valists, then the second argument
- // from GCC is the address of the source valist and we don't
- // need to do anything special.
- Arg2 = EmitMemory(Arg2T);
- }
-
- static Type *VPTy = Type::getInt8PtrTy(Context);
-
- // FIXME: This ignores alignment and volatility of the arguments.
- SmallVector<Value *, 2> Args;
- Args.push_back(Builder.CreateBitCast(Arg1, VPTy));
- Args.push_back(Builder.CreateBitCast(Arg2, VPTy));
-
- Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::vacopy),
- Args);
- return true;
-}
-
-bool TreeToLLVM::EmitBuiltinAdjustTrampoline(gimple stmt, Value *&Result) {
- if (!validate_gimple_arglist(stmt, POINTER_TYPE, VOID_TYPE))
- return false;
-
- Function *Intr = Intrinsic::getDeclaration(TheModule,
- Intrinsic::adjust_trampoline);
- Value *Arg = Builder.CreateBitCast(EmitRegister(gimple_call_arg(stmt, 0)),
- Builder.getInt8PtrTy());
- Result = Builder.CreateCall(Intr, Arg);
- return true;
-}
-
-bool TreeToLLVM::EmitBuiltinInitTrampoline(gimple stmt, Value *&/*Result*/) {
- if (!validate_gimple_arglist(stmt, POINTER_TYPE, POINTER_TYPE, POINTER_TYPE,
- VOID_TYPE))
- return false;
-
- Value *Tramp = EmitRegister(gimple_call_arg(stmt, 0));
- Value *Func = EmitRegister(gimple_call_arg(stmt, 1));
- Value *Chain = EmitRegister(gimple_call_arg(stmt, 2));
-
- Type *VPTy = Builder.getInt8PtrTy();
- Value *Ops[3] = {
- Builder.CreateBitCast(Tramp, VPTy),
- Builder.CreateBitCast(Func, VPTy),
- Builder.CreateBitCast(Chain, VPTy)
- };
-
- Function *Intr = Intrinsic::getDeclaration(TheModule,
- Intrinsic::init_trampoline);
- Builder.CreateCall(Intr, Ops);
- return true;
-}
-
-//===----------------------------------------------------------------------===//
-// ... Complex Math Expressions ...
-//===----------------------------------------------------------------------===//
-
-Value *TreeToLLVM::CreateComplex(Value *Real, Value *Imag) {
- assert(Real->getType() == Imag->getType() && "Component type mismatch!");
- Type *EltTy = Real->getType();
- Value *Result = UndefValue::get(StructType::get(EltTy, EltTy, NULL));
- Result = Builder.CreateInsertValue(Result, Real, 0);
- Result = Builder.CreateInsertValue(Result, Imag, 1);
- return Result;
-}
-
-void TreeToLLVM::SplitComplex(Value *Complex, Value *&Real, Value *&Imag) {
- Real = Builder.CreateExtractValue(Complex, 0);
- Imag = Builder.CreateExtractValue(Complex, 1);
-}
-
+ bool TreeToLLVM::EmitBuiltinExpect(gimple stmt, Value *&Result) {
+ tree type = gimple_call_return_type(stmt);
+ if (gimple_call_num_args(stmt) < 2) {
+ Result = Constant::getNullValue(ConvertType(type));
+ return true;
+ }
+ Type *ArgTy = getRegType(type);
+ Value *ExpectIntr = Intrinsic::getDeclaration(TheModule,
+ Intrinsic::expect, ArgTy);
+ Value *ArgValue = EmitRegister(gimple_call_arg(stmt, 0));
+ Value *ExpectedValue = EmitRegister(gimple_call_arg(stmt, 1));
+ Result = Builder.CreateCall2(ExpectIntr, ArgValue, ExpectedValue);
+ Result = Reg2Mem(Result, type, Builder);
+ return true;
+ }
-//===----------------------------------------------------------------------===//
-// ... L-Value Expressions ...
-//===----------------------------------------------------------------------===//
+ bool TreeToLLVM::EmitBuiltinVAStart(gimple stmt) {
+ if (gimple_call_num_args(stmt) < 2) {
+ error("too few arguments to function %<va_start%>");
+ return true;
+ }
-Value *TreeToLLVM::EmitFieldAnnotation(Value *FieldPtr, tree FieldDecl) {
- tree AnnotateAttr = lookup_attribute("annotate", DECL_ATTRIBUTES(FieldDecl));
+ tree fntype = TREE_TYPE(current_function_decl);
+ if (TYPE_ARG_TYPES(fntype) == 0 ||
+ (tree_last(TYPE_ARG_TYPES(fntype)) == void_type_node)) {
+ error("%<va_start%> used in function with fixed args");
+ return true;
+ }
- Type *SBP = Type::getInt8PtrTy(Context);
+ Constant *va_start = Intrinsic::getDeclaration(TheModule,
+ Intrinsic::vastart);
+ Value *ArgVal = EmitMemory(gimple_call_arg(stmt, 0));
+ ArgVal = Builder.CreateBitCast(ArgVal, Type::getInt8PtrTy(Context));
+ Builder.CreateCall(va_start, ArgVal);
+ return true;
+ }
- Function *An = Intrinsic::getDeclaration(TheModule,
- Intrinsic::ptr_annotation,
- SBP);
-
- // Get file and line number. FIXME: Should this be for the decl or the
- // use. Is there a location info for the use?
- Constant *LineNo = ConstantInt::get(Type::getInt32Ty(Context),
- DECL_SOURCE_LINE(FieldDecl));
- Constant *File = ConvertMetadataStringToGV(DECL_SOURCE_FILE(FieldDecl));
+ bool TreeToLLVM::EmitBuiltinVAEnd(gimple stmt) {
+ Value *Arg = EmitMemory(gimple_call_arg(stmt, 0));
+ Arg = Builder.CreateBitCast(Arg, Type::getInt8PtrTy(Context));
+ Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::vaend),
+ Arg);
+ return true;
+ }
- File = TheFolder->CreateBitCast(File, SBP);
+ bool TreeToLLVM::EmitBuiltinVACopy(gimple stmt) {
+ tree Arg1T = gimple_call_arg(stmt, 0);
+ tree Arg2T = gimple_call_arg(stmt, 1);
+
+ Value *Arg1 = EmitMemory(Arg1T); // Emit the address of the destination.
+ // The second arg of llvm.va_copy is a pointer to a valist.
+ Value *Arg2;
+ if (!isa<AGGREGATE_TYPE>(va_list_type_node)) {
+ // Emit it as a value, then store it to a temporary slot.
+ Value *V2 = EmitMemory(Arg2T);
+ Arg2 = CreateTemporary(V2->getType());
+ Builder.CreateStore(V2, Arg2);
+ } else {
+ // If the target has aggregate valists, then the second argument
+ // from GCC is the address of the source valist and we don't
+ // need to do anything special.
+ Arg2 = EmitMemory(Arg2T);
+ }
- // There may be multiple annotate attributes. Pass return of lookup_attr
- // to successive lookups.
- while (AnnotateAttr) {
- // Each annotate attribute is a tree list.
- // Get value of list which is our linked list of args.
- tree args = TREE_VALUE(AnnotateAttr);
+ static Type *VPTy = Type::getInt8PtrTy(Context);
- // Each annotate attribute may have multiple args.
- // Treat each arg as if it were a separate annotate attribute.
- for (tree a = args; a; a = TREE_CHAIN(a)) {
- // Each element of the arg list is a tree list, so get value
- tree val = TREE_VALUE(a);
+ // FIXME: This ignores alignment and volatility of the arguments.
+ SmallVector<Value *, 2> Args;
+ Args.push_back(Builder.CreateBitCast(Arg1, VPTy));
+ Args.push_back(Builder.CreateBitCast(Arg2, VPTy));
- // Assert its a string, and then get that string.
- assert(isa<STRING_CST>(val) &&
- "Annotate attribute arg should always be a string");
+ Builder.CreateCall(
+ Intrinsic::getDeclaration(TheModule, Intrinsic::vacopy), Args);
+ return true;
+ }
- Constant *strGV = AddressOf(val);
+ bool TreeToLLVM::EmitBuiltinAdjustTrampoline(gimple stmt, Value *&Result) {
+ if (!validate_gimple_arglist(stmt, POINTER_TYPE, VOID_TYPE))
+ return false;
- // We can not use the IRBuilder because it will constant fold away
- // the GEP that is critical to distinguish between an annotate
- // attribute on a whole struct from one on the first element of the
- // struct.
- BitCastInst *CastFieldPtr = new BitCastInst(FieldPtr, SBP,
- FieldPtr->getName());
- Builder.Insert(CastFieldPtr);
-
- Value *Ops[4] = {
- CastFieldPtr, Builder.CreateBitCast(strGV, SBP),
- File, LineNo
- };
-
- Type* FieldPtrType = FieldPtr->getType();
- FieldPtr = Builder.CreateCall(An, Ops);
- FieldPtr = Builder.CreateBitCast(FieldPtr, FieldPtrType);
+ Function *Intr = Intrinsic::getDeclaration(TheModule,
+ Intrinsic::adjust_trampoline);
+ Value *Arg = Builder.CreateBitCast(EmitRegister(gimple_call_arg(stmt, 0)),
+ Builder.getInt8PtrTy());
+ Result = Builder.CreateCall(Intr, Arg);
+ return true;
}
- // Get next annotate attribute.
- AnnotateAttr = TREE_CHAIN(AnnotateAttr);
- if (AnnotateAttr)
- AnnotateAttr = lookup_attribute("annotate", AnnotateAttr);
- }
- return FieldPtr;
-}
-
-LValue TreeToLLVM::EmitLV_ARRAY_REF(tree exp) {
- // The result type is an ElementTy* in the case of an ARRAY_REF, an array
- // of ElementTy in the case of ARRAY_RANGE_REF.
-
- tree Array = TREE_OPERAND(exp, 0);
- tree ArrayTreeType = TREE_TYPE(Array);
- tree Index = TREE_OPERAND(exp, 1);
- tree IndexType = TREE_TYPE(Index);
- tree ElementType = TREE_TYPE(ArrayTreeType);
-
- assert(isa<ARRAY_TYPE>(ArrayTreeType) && "Unknown ARRAY_REF!");
-
- Value *ArrayAddr;
- unsigned ArrayAlign;
-
- // First subtract the lower bound, if any, in the type of the index.
- Value *IndexVal = EmitRegister(Index);
- tree LowerBound = array_ref_low_bound(exp);
- if (!integer_zerop(LowerBound))
- IndexVal = Builder.CreateSub(IndexVal,
- EmitRegisterWithCast(LowerBound, IndexType),
- "", hasNUW(TREE_TYPE(Index)),
- hasNSW(TREE_TYPE(Index)));
-
- LValue ArrayAddrLV = EmitLV(Array);
- assert(!ArrayAddrLV.isBitfield() && "Arrays cannot be bitfields!");
- ArrayAddr = ArrayAddrLV.Ptr;
- ArrayAlign = ArrayAddrLV.getAlignment();
-
- Type *IntPtrTy = getDataLayout().getIntPtrType(ArrayAddr->getType());
- IndexVal = Builder.CreateIntCast(IndexVal, IntPtrTy,
- /*isSigned*/!TYPE_UNSIGNED(IndexType));
-
- // If we are indexing over a fixed-size type, just use a GEP.
- if (isSizeCompatible(ElementType)) {
- // Avoid any assumptions about how the array type is represented in LLVM by
- // doing the GEP on a pointer to the first array element.
- Type *EltTy = ConvertType(ElementType);
- ArrayAddr = Builder.CreateBitCast(ArrayAddr, EltTy->getPointerTo());
- StringRef GEPName = flag_verbose_asm ? "ar" : "";
- Value *Ptr = POINTER_TYPE_OVERFLOW_UNDEFINED ?
- Builder.CreateInBoundsGEP(ArrayAddr, IndexVal, GEPName) :
- Builder.CreateGEP(ArrayAddr, IndexVal, GEPName);
- unsigned Alignment = MinAlign(ArrayAlign, DL.getABITypeAlignment(EltTy));
- return LValue(Builder.CreateBitCast(Ptr,
- PointerType::getUnqual(ConvertType(TREE_TYPE(exp)))),
- Alignment);
- }
-
- // Otherwise, just do raw, low-level pointer arithmetic. FIXME: this could be
- // much nicer in cases like:
- // float foo(int w, float A[][w], int g) { return A[g][0]; }
-
- if (isa<VOID_TYPE>(TREE_TYPE(ArrayTreeType))) {
- ArrayAddr = Builder.CreateBitCast(ArrayAddr, Type::getInt8PtrTy(Context));
- StringRef GEPName = flag_verbose_asm ? "va" : "";
- ArrayAddr = POINTER_TYPE_OVERFLOW_UNDEFINED ?
- Builder.CreateInBoundsGEP(ArrayAddr, IndexVal, GEPName) :
- Builder.CreateGEP(ArrayAddr, IndexVal, GEPName);
- return LValue(ArrayAddr, 1);
- }
-
- // FIXME: Might also get here if the element type has constant size, but is
- // humongous. Add support for this case.
- assert(TREE_OPERAND(exp, 3) && "Size missing for variable sized element!");
- // ScaleFactor is the size of the element type in units divided by (exactly)
- // TYPE_ALIGN_UNIT(ElementType).
- Value *ScaleFactor = Builder.CreateIntCast(EmitRegister(TREE_OPERAND(exp, 3)),
- IntPtrTy, /*isSigned*/false);
- assert(isPowerOf2_32(TYPE_ALIGN(ElementType)) &&
- "Alignment not a power of two!");
- assert(TYPE_ALIGN(ElementType) >= 8 && "Unit size not a multiple of 8 bits!");
- // ScaleType is chosen to correct for the division in ScaleFactor.
- Type *ScaleType = IntegerType::get(Context, TYPE_ALIGN(ElementType));
- ArrayAddr = Builder.CreateBitCast(ArrayAddr, ScaleType->getPointerTo());
-
- IndexVal = Builder.CreateMul(IndexVal, ScaleFactor);
- unsigned Alignment = MinAlign(ArrayAlign, TYPE_ALIGN(ElementType) / 8);
- StringRef GEPName = flag_verbose_asm ? "ra" : "";
- Value *Ptr = POINTER_TYPE_OVERFLOW_UNDEFINED ?
- Builder.CreateInBoundsGEP(ArrayAddr, IndexVal, GEPName) :
- Builder.CreateGEP(ArrayAddr, IndexVal, GEPName);
- return LValue(Builder.CreateBitCast(Ptr,
- PointerType::getUnqual(ConvertType(TREE_TYPE(exp)))),
- Alignment);
-}
-
-LValue TreeToLLVM::EmitLV_BIT_FIELD_REF(tree exp) {
- LValue Ptr = EmitLV(TREE_OPERAND(exp, 0));
- assert(!Ptr.isBitfield() && "BIT_FIELD_REF operands cannot be bitfields!");
-
- unsigned BitStart = (unsigned)TREE_INT_CST_LOW(TREE_OPERAND(exp, 2));
- unsigned BitSize = (unsigned)TREE_INT_CST_LOW(TREE_OPERAND(exp, 1));
- Type *ValTy = ConvertType(TREE_TYPE(exp));
-
- unsigned ValueSizeInBits = DL.getTypeSizeInBits(ValTy);
- assert(BitSize <= ValueSizeInBits &&
- "ValTy isn't large enough to hold the value loaded!");
-
- assert(ValueSizeInBits == DL.getTypeAllocSizeInBits(ValTy) &&
- "FIXME: BIT_FIELD_REF logic is broken for non-round types");
-
- // BIT_FIELD_REF values can have BitStart values that are quite large. We
- // know that the thing we are loading is ValueSizeInBits large. If BitStart
- // is larger than ValueSizeInBits, bump the pointer over to where it should
- // be.
- if (unsigned UnitOffset = BitStart / ValueSizeInBits) {
- // TODO: If Ptr.Ptr is a struct type or something, we can do much better
- // than this. e.g. check out when compiling unwind-dw2-fde-darwin.c.
- Ptr.Ptr = Builder.CreateBitCast(Ptr.Ptr, ValTy->getPointerTo());
- Ptr.Ptr = Builder.CreateGEP(Ptr.Ptr, Builder.getInt32(UnitOffset),
- flag_verbose_asm ? "bfr" : "");
- unsigned OctetOffset = (UnitOffset * ValueSizeInBits) / 8;
- Ptr.setAlignment(MinAlign(Ptr.getAlignment(), OctetOffset));
- BitStart -= UnitOffset*ValueSizeInBits;
- }
-
- // If this is referring to the whole field, return the whole thing.
- if (BitStart == 0 && BitSize == ValueSizeInBits) {
- return LValue(Builder.CreateBitCast(Ptr.Ptr, ValTy->getPointerTo()),
- Ptr.getAlignment());
- }
-
- return LValue(Builder.CreateBitCast(Ptr.Ptr, ValTy->getPointerTo()),
- 1, BitStart, BitSize);
-}
-
-LValue TreeToLLVM::EmitLV_COMPONENT_REF(tree exp) {
- LValue StructAddrLV = EmitLV(TREE_OPERAND(exp, 0));
- tree FieldDecl = TREE_OPERAND(exp, 1);
- unsigned LVAlign = StructAddrLV.getAlignment();
-
- assert(isa<RECORD_OR_UNION_TYPE>(DECL_CONTEXT(FieldDecl)));
-
- Type *StructTy = ConvertType(DECL_CONTEXT(FieldDecl));
-
- assert((!StructAddrLV.isBitfield() ||
- StructAddrLV.BitStart == 0) && "structs cannot be bitfields!");
-
- StructAddrLV.Ptr = Builder.CreateBitCast(StructAddrLV.Ptr,
- StructTy->getPointerTo());
- Type *FieldTy = ConvertType(TREE_TYPE(FieldDecl));
-
- // BitStart - This is the actual offset of the field from the start of the
- // struct, in bits. For bitfields this may be on a non-byte boundary.
- uint64_t FieldBitOffset = getInt64(DECL_FIELD_BIT_OFFSET(FieldDecl), true);
- unsigned BitStart;
- Value *FieldPtr;
-
- // If the GCC field directly corresponds to an LLVM field, handle it.
- unsigned MemberIndex = GetFieldIndex(FieldDecl, StructTy);
- if (MemberIndex < INT_MAX) {
- assert(!TREE_OPERAND(exp, 2) && "Constant not gimple min invariant?");
- // Get a pointer to the byte in which the GCC field starts.
- FieldPtr = Builder.CreateStructGEP(StructAddrLV.Ptr, MemberIndex,
- flag_verbose_asm ? "cr" : "");
- // Within that byte, the bit at which the GCC field starts.
- BitStart = FieldBitOffset & 7;
- } else {
- // Offset will hold the field offset in octets.
- Value *Offset;
+ bool TreeToLLVM::EmitBuiltinInitTrampoline(gimple stmt,
+ Value *&/*Result*/) {
+ if (!validate_gimple_arglist(stmt, POINTER_TYPE, POINTER_TYPE,
+ POINTER_TYPE, VOID_TYPE))
+ return false;
- if (TREE_OPERAND(exp, 2)) {
- Offset = EmitRegister(TREE_OPERAND(exp, 2));
- // At this point the offset is measured in units divided by (exactly)
- // (DECL_OFFSET_ALIGN / BITS_PER_UNIT). Convert to octets.
- unsigned factor = DECL_OFFSET_ALIGN(FieldDecl) / 8;
- if (factor != 1)
- Offset = Builder.CreateMul(Offset,
- ConstantInt::get(Offset->getType(), factor));
- } else {
- assert(DECL_FIELD_OFFSET(FieldDecl) && "Field offset not available!");
- Offset = EmitRegister(DECL_FIELD_OFFSET(FieldDecl));
- // At this point the offset is measured in units. Convert to octets.
- unsigned factor = BITS_PER_UNIT / 8;
- if (factor != 1)
- Offset = Builder.CreateMul(Offset,
- ConstantInt::get(Offset->getType(), factor));
+ Value *Tramp = EmitRegister(gimple_call_arg(stmt, 0));
+ Value *Func = EmitRegister(gimple_call_arg(stmt, 1));
+ Value *Chain = EmitRegister(gimple_call_arg(stmt, 2));
+
+ Type *VPTy = Builder.getInt8PtrTy();
+ Value *Ops[3] = { Builder.CreateBitCast(Tramp, VPTy),
+ Builder.CreateBitCast(Func, VPTy),
+ Builder.CreateBitCast(Chain, VPTy) };
+
+ Function *Intr = Intrinsic::getDeclaration(TheModule,
+ Intrinsic::init_trampoline);
+ Builder.CreateCall(Intr, Ops);
+ return true;
}
- // Here BitStart gives the offset of the field in bits from Offset.
- BitStart = FieldBitOffset;
-
- // Incorporate as much of it as possible into the pointer computation.
- unsigned ByteOffset = BitStart / 8;
- if (ByteOffset > 0) {
- Offset = Builder.CreateAdd(Offset,
- ConstantInt::get(Offset->getType(), ByteOffset));
- BitStart -= ByteOffset*8;
+ //===----------------------------------------------------------------------===//
+ // ... Complex Math Expressions ...
+ //===----------------------------------------------------------------------===//
+
+ Value *TreeToLLVM::CreateComplex(Value *Real, Value *Imag) {
+ assert(Real->getType() == Imag->getType() && "Component type mismatch!");
+ Type *EltTy = Real->getType();
+ Value *Result = UndefValue::get(StructType::get(EltTy, EltTy, NULL));
+ Result = Builder.CreateInsertValue(Result, Real, 0);
+ Result = Builder.CreateInsertValue(Result, Imag, 1);
+ return Result;
}
- Type *BytePtrTy = Type::getInt8PtrTy(Context);
- FieldPtr = Builder.CreateBitCast(StructAddrLV.Ptr, BytePtrTy);
- FieldPtr = Builder.CreateInBoundsGEP(FieldPtr, Offset, flag_verbose_asm ?
- "rc" : "");
- FieldPtr = Builder.CreateBitCast(FieldPtr, FieldTy->getPointerTo());
- }
+ void TreeToLLVM::SplitComplex(Value *Complex, Value *&Real, Value *&Imag) {
+ Real = Builder.CreateExtractValue(Complex, 0);
+ Imag = Builder.CreateExtractValue(Complex, 1);
+ }
+
+ //===----------------------------------------------------------------------===//
+ // ... L-Value Expressions ...
+ //===----------------------------------------------------------------------===//
+
+ Value *TreeToLLVM::EmitFieldAnnotation(Value *FieldPtr, tree FieldDecl) {
+ tree AnnotateAttr = lookup_attribute("annotate",
+ DECL_ATTRIBUTES(FieldDecl));
+
+ Type *SBP = Type::getInt8PtrTy(Context);
+
+ Function *An = Intrinsic::getDeclaration(TheModule,
+ Intrinsic::ptr_annotation, SBP);
+
+ // Get file and line number. FIXME: Should this be for the decl or the
+ // use. Is there a location info for the use?
+ Constant *LineNo = ConstantInt::get(Type::getInt32Ty(Context),
+ DECL_SOURCE_LINE(FieldDecl));
+ Constant *File = ConvertMetadataStringToGV(DECL_SOURCE_FILE(FieldDecl));
+
+ File = TheFolder->CreateBitCast(File, SBP);
+
+ // There may be multiple annotate attributes. Pass return of lookup_attr
+ // to successive lookups.
+ while (AnnotateAttr) {
+ // Each annotate attribute is a tree list.
+ // Get value of list which is our linked list of args.
+ tree args = TREE_VALUE(AnnotateAttr);
+
+ // Each annotate attribute may have multiple args.
+ // Treat each arg as if it were a separate annotate attribute.
+ for (tree a = args; a; a = TREE_CHAIN(a)) {
+ // Each element of the arg list is a tree list, so get value
+ tree val = TREE_VALUE(a);
+
+ // Assert its a string, and then get that string.
+ assert(isa<STRING_CST>(val) &&
+ "Annotate attribute arg should always be a string");
+
+ Constant *strGV = AddressOf(val);
+
+ // We can not use the IRBuilder because it will constant fold away
+ // the GEP that is critical to distinguish between an annotate
+ // attribute on a whole struct from one on the first element of the
+ // struct.
+ BitCastInst *CastFieldPtr = new BitCastInst(FieldPtr, SBP,
+ FieldPtr->getName());
+ Builder.Insert(CastFieldPtr);
+
+ Value *Ops[4] = { CastFieldPtr, Builder.CreateBitCast(strGV, SBP),
+ File, LineNo };
+
+ Type *FieldPtrType = FieldPtr->getType();
+ FieldPtr = Builder.CreateCall(An, Ops);
+ FieldPtr = Builder.CreateBitCast(FieldPtr, FieldPtrType);
+ }
- // Compute the alignment of the octet containing the first bit of the field,
- // without assuming that the containing struct itself is properly aligned.
- LVAlign = MinAlign(LVAlign, getFieldAlignment(FieldDecl));
+ // Get next annotate attribute.
+ AnnotateAttr = TREE_CHAIN(AnnotateAttr);
+ if (AnnotateAttr)
+ AnnotateAttr = lookup_attribute("annotate", AnnotateAttr);
+ }
+ return FieldPtr;
+ }
+
+ LValue TreeToLLVM::EmitLV_ARRAY_REF(tree exp) {
+ // The result type is an ElementTy* in the case of an ARRAY_REF, an array
+ // of ElementTy in the case of ARRAY_RANGE_REF.
+
+ tree Array = TREE_OPERAND(exp, 0);
+ tree ArrayTreeType = TREE_TYPE(Array);
+ tree Index = TREE_OPERAND(exp, 1);
+ tree IndexType = TREE_TYPE(Index);
+ tree ElementType = TREE_TYPE(ArrayTreeType);
+
+ assert(isa<ARRAY_TYPE>(ArrayTreeType) && "Unknown ARRAY_REF!");
+
+ Value *ArrayAddr;
+ unsigned ArrayAlign;
+
+ // First subtract the lower bound, if any, in the type of the index.
+ Value *IndexVal = EmitRegister(Index);
+ tree LowerBound = array_ref_low_bound(exp);
+ if (!integer_zerop(LowerBound))
+ IndexVal = Builder.CreateSub(
+ IndexVal, EmitRegisterWithCast(LowerBound, IndexType),
+ "", hasNUW(TREE_TYPE(Index)), hasNSW(TREE_TYPE(Index)));
+
+ LValue ArrayAddrLV = EmitLV(Array);
+ assert(!ArrayAddrLV.isBitfield() && "Arrays cannot be bitfields!");
+ ArrayAddr = ArrayAddrLV.Ptr;
+ ArrayAlign = ArrayAddrLV.getAlignment();
+
+ Type *IntPtrTy = getDataLayout().getIntPtrType(ArrayAddr->getType());
+ IndexVal = Builder.CreateIntCast(IndexVal, IntPtrTy,
+ /*isSigned*/ !TYPE_UNSIGNED(IndexType));
+
+ // If we are indexing over a fixed-size type, just use a GEP.
+ if (isSizeCompatible(ElementType)) {
+ // Avoid any assumptions about how the array type is represented in LLVM by
+ // doing the GEP on a pointer to the first array element.
+ Type *EltTy = ConvertType(ElementType);
+ ArrayAddr = Builder.CreateBitCast(ArrayAddr, EltTy->getPointerTo());
+ StringRef GEPName = flag_verbose_asm ? "ar" : "";
+ Value *Ptr = POINTER_TYPE_OVERFLOW_UNDEFINED ?
+ Builder.CreateInBoundsGEP(ArrayAddr, IndexVal, GEPName) :
+ Builder.CreateGEP(ArrayAddr, IndexVal, GEPName);
+ unsigned Alignment = MinAlign(ArrayAlign,
+ DL.getABITypeAlignment(EltTy));
+ return LValue(Builder.CreateBitCast(
+ Ptr, PointerType::getUnqual(ConvertType(TREE_TYPE(exp)))),
+ Alignment);
+ }
+
+ // Otherwise, just do raw, low-level pointer arithmetic. FIXME: this could be
+ // much nicer in cases like:
+ // float foo(int w, float A[][w], int g) { return A[g][0]; }
+
+ if (isa<VOID_TYPE>(TREE_TYPE(ArrayTreeType))) {
+ ArrayAddr = Builder.CreateBitCast(ArrayAddr,
+ Type::getInt8PtrTy(Context));
+ StringRef GEPName = flag_verbose_asm ? "va" : "";
+ ArrayAddr = POINTER_TYPE_OVERFLOW_UNDEFINED ?
+ Builder.CreateInBoundsGEP(ArrayAddr, IndexVal, GEPName) :
+ Builder.CreateGEP(ArrayAddr, IndexVal, GEPName);
+ return LValue(ArrayAddr, 1);
+ }
+
+ // FIXME: Might also get here if the element type has constant size, but is
+ // humongous. Add support for this case.
+ assert(TREE_OPERAND(exp, 3) &&
+ "Size missing for variable sized element!");
+ // ScaleFactor is the size of the element type in units divided by (exactly)
+ // TYPE_ALIGN_UNIT(ElementType).
+ Value *ScaleFactor = Builder.CreateIntCast(
+ EmitRegister(TREE_OPERAND(exp, 3)), IntPtrTy,
+ /*isSigned*/ false);
+ assert(isPowerOf2_32(TYPE_ALIGN(ElementType)) &&
+ "Alignment not a power of two!");
+ assert(TYPE_ALIGN(ElementType) >= 8 &&
+ "Unit size not a multiple of 8 bits!");
+ // ScaleType is chosen to correct for the division in ScaleFactor.
+ Type *ScaleType = IntegerType::get(Context, TYPE_ALIGN(ElementType));
+ ArrayAddr = Builder.CreateBitCast(ArrayAddr, ScaleType->getPointerTo());
+
+ IndexVal = Builder.CreateMul(IndexVal, ScaleFactor);
+ unsigned Alignment = MinAlign(ArrayAlign, TYPE_ALIGN(ElementType) / 8);
+ StringRef GEPName = flag_verbose_asm ? "ra" : "";
+ Value *Ptr = POINTER_TYPE_OVERFLOW_UNDEFINED ?
+ Builder.CreateInBoundsGEP(ArrayAddr, IndexVal, GEPName) :
+ Builder.CreateGEP(ArrayAddr, IndexVal, GEPName);
+ return LValue(Builder.CreateBitCast(
+ Ptr, PointerType::getUnqual(ConvertType(TREE_TYPE(exp)))), Alignment);
+ }
+
+ LValue TreeToLLVM::EmitLV_BIT_FIELD_REF(tree exp) {
+ LValue Ptr = EmitLV(TREE_OPERAND(exp, 0));
+ assert(!Ptr.isBitfield() &&
+ "BIT_FIELD_REF operands cannot be bitfields!");
+
+ unsigned BitStart = (unsigned) TREE_INT_CST_LOW(TREE_OPERAND(exp, 2));
+ unsigned BitSize = (unsigned) TREE_INT_CST_LOW(TREE_OPERAND(exp, 1));
+ Type *ValTy = ConvertType(TREE_TYPE(exp));
+
+ unsigned ValueSizeInBits = DL.getTypeSizeInBits(ValTy);
+ assert(BitSize <= ValueSizeInBits &&
+ "ValTy isn't large enough to hold the value loaded!");
+
+ assert(ValueSizeInBits == DL.getTypeAllocSizeInBits(ValTy) &&
+ "FIXME: BIT_FIELD_REF logic is broken for non-round types");
+
+ // BIT_FIELD_REF values can have BitStart values that are quite large. We
+ // know that the thing we are loading is ValueSizeInBits large. If BitStart
+ // is larger than ValueSizeInBits, bump the pointer over to where it should
+ // be.
+ if (unsigned UnitOffset = BitStart / ValueSizeInBits) {
+ // TODO: If Ptr.Ptr is a struct type or something, we can do much better
+ // than this. e.g. check out when compiling unwind-dw2-fde-darwin.c.
+ Ptr.Ptr = Builder.CreateBitCast(Ptr.Ptr, ValTy->getPointerTo());
+ Ptr.Ptr = Builder.CreateGEP(Ptr.Ptr, Builder.getInt32(UnitOffset),
+ flag_verbose_asm ? "bfr" : "");
+ unsigned OctetOffset = (UnitOffset * ValueSizeInBits) / 8;
+ Ptr.setAlignment(MinAlign(Ptr.getAlignment(), OctetOffset));
+ BitStart -= UnitOffset * ValueSizeInBits;
+ }
+
+ // If this is referring to the whole field, return the whole thing.
+ if (BitStart == 0 && BitSize == ValueSizeInBits) {
+ return LValue(Builder.CreateBitCast(Ptr.Ptr, ValTy->getPointerTo()),
+ Ptr.getAlignment());
+ }
+
+ return LValue(Builder.CreateBitCast(Ptr.Ptr, ValTy->getPointerTo()), 1,
+ BitStart, BitSize);
+ }
+
+ LValue TreeToLLVM::EmitLV_COMPONENT_REF(tree exp) {
+ LValue StructAddrLV = EmitLV(TREE_OPERAND(exp, 0));
+ tree FieldDecl = TREE_OPERAND(exp, 1);
+ unsigned LVAlign = StructAddrLV.getAlignment();
+
+ assert(isa<RECORD_OR_UNION_TYPE>(DECL_CONTEXT(FieldDecl)));
+
+ Type *StructTy = ConvertType(DECL_CONTEXT(FieldDecl));
+
+ assert((!StructAddrLV.isBitfield() || StructAddrLV.BitStart == 0) &&
+ "structs cannot be bitfields!");
+
+ StructAddrLV.Ptr = Builder.CreateBitCast(StructAddrLV.Ptr,
+ StructTy->getPointerTo());
+ Type *FieldTy = ConvertType(TREE_TYPE(FieldDecl));
+
+ // BitStart - This is the actual offset of the field from the start of the
+ // struct, in bits. For bitfields this may be on a non-byte boundary.
+ uint64_t FieldBitOffset = getInt64(DECL_FIELD_BIT_OFFSET(FieldDecl),
+ true);
+ unsigned BitStart;
+ Value *FieldPtr;
+
+ // If the GCC field directly corresponds to an LLVM field, handle it.
+ unsigned MemberIndex = GetFieldIndex(FieldDecl, StructTy);
+ if (MemberIndex < INT_MAX) {
+ assert(!TREE_OPERAND(exp, 2) && "Constant not gimple min invariant?");
+ // Get a pointer to the byte in which the GCC field starts.
+ FieldPtr = Builder.CreateStructGEP(StructAddrLV.Ptr, MemberIndex,
+ flag_verbose_asm ? "cr" : "");
+ // Within that byte, the bit at which the GCC field starts.
+ BitStart = FieldBitOffset & 7;
+ } else {
+ // Offset will hold the field offset in octets.
+ Value *Offset;
- // If the FIELD_DECL has an annotate attribute on it, emit it.
- if (lookup_attribute("annotate", DECL_ATTRIBUTES(FieldDecl)))
- FieldPtr = EmitFieldAnnotation(FieldPtr, FieldDecl);
+ if (TREE_OPERAND(exp, 2)) {
+ Offset = EmitRegister(TREE_OPERAND(exp, 2));
+ // At this point the offset is measured in units divided by (exactly)
+ // (DECL_OFFSET_ALIGN / BITS_PER_UNIT). Convert to octets.
+ unsigned factor = DECL_OFFSET_ALIGN(FieldDecl) / 8;
+ if (factor != 1)
+ Offset = Builder.CreateMul(
+ Offset, ConstantInt::get(Offset->getType(), factor));
+ } else {
+ assert(DECL_FIELD_OFFSET(FieldDecl) && "Field offset not available!");
+ Offset = EmitRegister(DECL_FIELD_OFFSET(FieldDecl));
+ // At this point the offset is measured in units. Convert to octets.
+ unsigned factor = BITS_PER_UNIT / 8;
+ if (factor != 1)
+ Offset = Builder.CreateMul(
+ Offset, ConstantInt::get(Offset->getType(), factor));
+ }
- // Make sure we return a pointer to the right type.
- Type *EltTy = ConvertType(TREE_TYPE(exp));
- FieldPtr = Builder.CreateBitCast(FieldPtr, EltTy->getPointerTo());
+ // Here BitStart gives the offset of the field in bits from Offset.
+ BitStart = FieldBitOffset;
- if (!isBitfield(FieldDecl)) {
- assert(BitStart == 0 && "Not a bitfield but not at a byte offset!");
- return LValue(FieldPtr, LVAlign);
- }
+ // Incorporate as much of it as possible into the pointer computation.
+ unsigned ByteOffset = BitStart / 8;
+ if (ByteOffset > 0) {
+ Offset = Builder.CreateAdd(Offset, ConstantInt::get(Offset->getType(),
+ ByteOffset));
+ BitStart -= ByteOffset * 8;
+ }
- assert(BitStart < 8 && "Bit offset not properly incorporated in the pointer");
- assert(DECL_SIZE(FieldDecl) && isa<INTEGER_CST>(DECL_SIZE(FieldDecl)) &&
- "Variable sized bitfield?");
- unsigned BitfieldSize = TREE_INT_CST_LOW(DECL_SIZE(FieldDecl));
- return LValue(FieldPtr, LVAlign, BitStart, BitfieldSize);
-}
+ Type *BytePtrTy = Type::getInt8PtrTy(Context);
+ FieldPtr = Builder.CreateBitCast(StructAddrLV.Ptr, BytePtrTy);
+ FieldPtr = Builder.CreateInBoundsGEP(FieldPtr, Offset,
+ flag_verbose_asm ? "rc" : "");
+ FieldPtr = Builder.CreateBitCast(FieldPtr, FieldTy->getPointerTo());
+ }
+
+ // Compute the alignment of the octet containing the first bit of the field,
+ // without assuming that the containing struct itself is properly aligned.
+ LVAlign = MinAlign(LVAlign, getFieldAlignment(FieldDecl));
+
+ // If the FIELD_DECL has an annotate attribute on it, emit it.
+ if (lookup_attribute("annotate", DECL_ATTRIBUTES(FieldDecl)))
+ FieldPtr = EmitFieldAnnotation(FieldPtr, FieldDecl);
+
+ // Make sure we return a pointer to the right type.
+ Type *EltTy = ConvertType(TREE_TYPE(exp));
+ FieldPtr = Builder.CreateBitCast(FieldPtr, EltTy->getPointerTo());
+
+ if (!isBitfield(FieldDecl)) {
+ assert(BitStart == 0 && "Not a bitfield but not at a byte offset!");
+ return LValue(FieldPtr, LVAlign);
+ }
+
+ assert(BitStart < 8 &&
+ "Bit offset not properly incorporated in the pointer");
+ assert(DECL_SIZE(FieldDecl) && isa<INTEGER_CST>(DECL_SIZE(FieldDecl)) &&
+ "Variable sized bitfield?");
+ unsigned BitfieldSize = TREE_INT_CST_LOW(DECL_SIZE(FieldDecl));
+ return LValue(FieldPtr, LVAlign, BitStart, BitfieldSize);
+ }
+
+ LValue TreeToLLVM::EmitLV_DECL(tree exp) {
+ Value *Decl = DEFINITION_LOCAL(exp);
+ if (Decl == 0) {
+ if (errorcount || sorrycount) {
+ Type *Ty = ConvertType(TREE_TYPE(exp));
+ PointerType *PTy = Ty->getPointerTo();
+ LValue LV(ConstantPointerNull::get(PTy), 1);
+ return LV;
+ }
+ debug_tree(exp);
+ llvm_unreachable("Referencing decl that hasn't been laid out!");
+ }
-LValue TreeToLLVM::EmitLV_DECL(tree exp) {
- Value *Decl = DEFINITION_LOCAL(exp);
- if (Decl == 0) {
- if (errorcount || sorrycount) {
Type *Ty = ConvertType(TREE_TYPE(exp));
+ // If we have "extern void foo", make the global have type {} instead of
+ // type void.
+ if (Ty->isVoidTy())
+ Ty = StructType::get(Context);
PointerType *PTy = Ty->getPointerTo();
- LValue LV(ConstantPointerNull::get(PTy), 1);
- return LV;
+ unsigned Alignment = DECL_ALIGN(exp) / 8;
+ if (!Alignment)
+ Alignment = 1;
+
+ return LValue(Builder.CreateBitCast(Decl, PTy), Alignment);
}
- debug_tree(exp);
- llvm_unreachable("Referencing decl that hasn't been laid out!");
- }
- Type *Ty = ConvertType(TREE_TYPE(exp));
- // If we have "extern void foo", make the global have type {} instead of
- // type void.
- if (Ty->isVoidTy()) Ty = StructType::get(Context);
- PointerType *PTy = Ty->getPointerTo();
- unsigned Alignment = DECL_ALIGN(exp) / 8;
- if (!Alignment)
- Alignment = 1;
-
- return LValue(Builder.CreateBitCast(Decl, PTy), Alignment);
-}
-
-LValue TreeToLLVM::EmitLV_INDIRECT_REF(tree exp) {
- // The lvalue is just the address.
- LValue LV = LValue(EmitRegister(TREE_OPERAND(exp, 0)), expr_align(exp) / 8);
- // May need to change pointer type, for example when INDIRECT_REF is applied
- // to a void*, resulting in a non-void type.
- LV.Ptr = Builder.CreateBitCast(LV.Ptr,
- ConvertType(TREE_TYPE(exp))->getPointerTo());
- return LV;
-}
+ LValue TreeToLLVM::EmitLV_INDIRECT_REF(tree exp) {
+ // The lvalue is just the address.
+ LValue LV = LValue(EmitRegister(TREE_OPERAND(exp, 0)),
+ expr_align(exp) / 8);
+ // May need to change pointer type, for example when INDIRECT_REF is applied
+ // to a void*, resulting in a non-void type.
+ LV.Ptr = Builder.CreateBitCast(LV.Ptr, ConvertType(TREE_TYPE(exp))
+ ->getPointerTo());
+ return LV;
+ }
#if (GCC_MINOR > 5)
-LValue TreeToLLVM::EmitLV_MEM_REF(tree exp) {
- // The address is the first operand offset in bytes by the second.
- Value *Addr = EmitRegister(TREE_OPERAND(exp, 0));
- if (!integer_zerop(TREE_OPERAND(exp, 1))) {
- // Convert to a byte pointer and displace by the offset.
- Addr = Builder.CreateBitCast(Addr, GetUnitPointerType(Context));
- APInt Offset = getAPIntValue(TREE_OPERAND(exp, 1));
- // The address is always inside the referenced object, so "inbounds".
- Addr = Builder.CreateInBoundsGEP(Addr, ConstantInt::get(Context, Offset),
- flag_verbose_asm ? "mrf" : "");
- }
+ LValue TreeToLLVM::EmitLV_MEM_REF(tree exp) {
+ // The address is the first operand offset in bytes by the second.
+ Value *Addr = EmitRegister(TREE_OPERAND(exp, 0));
+ if (!integer_zerop(TREE_OPERAND(exp, 1))) {
+ // Convert to a byte pointer and displace by the offset.
+ Addr = Builder.CreateBitCast(Addr, GetUnitPointerType(Context));
+ APInt Offset = getAPIntValue(TREE_OPERAND(exp, 1));
+ // The address is always inside the referenced object, so "inbounds".
+ Addr = Builder.CreateInBoundsGEP(Addr,
+ ConstantInt::get(Context, Offset),
+ flag_verbose_asm ? "mrf" : "");
+ }
- // Ensure the pointer has the right type.
- Addr = Builder.CreateBitCast(Addr, getPointerToType(TREE_TYPE(exp)));
+ // Ensure the pointer has the right type.
+ Addr = Builder.CreateBitCast(Addr, getPointerToType(TREE_TYPE(exp)));
- unsigned Alignment =
+ unsigned Alignment =
#if (GCC_MINOR < 6)
- get_object_alignment(exp, TYPE_ALIGN(TREE_TYPE (exp)), BIGGEST_ALIGNMENT);
-#elif (GCC_MINOR < 7)
- std::max(get_object_alignment(exp, BIGGEST_ALIGNMENT),
- TYPE_ALIGN(TREE_TYPE (exp)));
+ get_object_alignment(exp, TYPE_ALIGN(TREE_TYPE(exp)),
+ BIGGEST_ALIGNMENT);
+#elif(GCC_MINOR < 7)
+ std::max(get_object_alignment(exp, BIGGEST_ALIGNMENT),
+ TYPE_ALIGN(TREE_TYPE(exp)));
#else
- get_object_or_type_alignment(exp);
+ get_object_or_type_alignment(exp);
#endif
- bool Volatile = TREE_THIS_VOLATILE(exp);
+ bool Volatile = TREE_THIS_VOLATILE(exp);
- return LValue(Addr, Alignment / 8, Volatile);
-}
+ return LValue(Addr, Alignment / 8, Volatile);
+ }
#endif
#if (GCC_MINOR < 6)
-LValue TreeToLLVM::EmitLV_MISALIGNED_INDIRECT_REF(tree exp) {
- // The lvalue is just the address. The alignment is given by operand 1.
- unsigned Alignment = tree_low_cst(TREE_OPERAND(exp, 1), true);
- // The alignment need not be a power of two, so replace it with the largest
- // power of two that divides it.
- Alignment &= -Alignment;
- if (!Alignment) Alignment = 8;
- assert(!(Alignment & 7) && "Alignment not in octets!");
- LValue LV = LValue(EmitRegister(TREE_OPERAND(exp, 0)), Alignment / 8);
- // May need to change pointer type, for example when MISALIGNED_INDIRECT_REF
- // is applied to a void*, resulting in a non-void type.
- LV.Ptr = Builder.CreateBitCast(LV.Ptr,
- ConvertType(TREE_TYPE(exp))->getPointerTo());
- return LV;
-}
+ LValue TreeToLLVM::EmitLV_MISALIGNED_INDIRECT_REF(tree exp) {
+ // The lvalue is just the address. The alignment is given by operand 1.
+ unsigned Alignment = tree_low_cst(TREE_OPERAND(exp, 1), true);
+ // The alignment need not be a power of two, so replace it with the largest
+ // power of two that divides it.
+ Alignment &= -Alignment;
+ if (!Alignment)
+ Alignment = 8;
+ assert(!(Alignment & 7) && "Alignment not in octets!");
+ LValue LV = LValue(EmitRegister(TREE_OPERAND(exp, 0)), Alignment / 8);
+ // May need to change pointer type, for example when MISALIGNED_INDIRECT_REF
+ // is applied to a void*, resulting in a non-void type.
+ LV.Ptr = Builder.CreateBitCast(LV.Ptr, ConvertType(TREE_TYPE(exp))
+ ->getPointerTo());
+ return LV;
+ }
#endif
-LValue TreeToLLVM::EmitLV_VIEW_CONVERT_EXPR(tree exp) {
- // The address is the address of the operand.
- LValue LV = EmitLV(TREE_OPERAND(exp, 0));
- // The type is the type of the expression.
- LV.Ptr = Builder.CreateBitCast(LV.Ptr,
- ConvertType(TREE_TYPE(exp))->getPointerTo());
- return LV;
-}
+ LValue TreeToLLVM::EmitLV_VIEW_CONVERT_EXPR(tree exp) {
+ // The address is the address of the operand.
+ LValue LV = EmitLV(TREE_OPERAND(exp, 0));
+ // The type is the type of the expression.
+ LV.Ptr = Builder.CreateBitCast(LV.Ptr, ConvertType(TREE_TYPE(exp))
+ ->getPointerTo());
+ return LV;
+ }
-LValue TreeToLLVM::EmitLV_WITH_SIZE_EXPR(tree exp) {
- // The address is the address of the operand.
- return EmitLV(TREE_OPERAND(exp, 0));
-}
+ LValue TreeToLLVM::EmitLV_WITH_SIZE_EXPR(tree exp) {
+ // The address is the address of the operand.
+ return EmitLV(TREE_OPERAND(exp, 0));
+ }
-LValue TreeToLLVM::EmitLV_XXXXPART_EXPR(tree exp, unsigned Idx) {
- LValue Ptr = EmitLV(TREE_OPERAND(exp, 0));
- assert(!Ptr.isBitfield() &&
- "REALPART_EXPR / IMAGPART_EXPR operands cannot be bitfields!");
- unsigned Alignment;
- if (Idx == 0)
- // REALPART alignment is same as the complex operand.
- Alignment = Ptr.getAlignment();
- else
- // IMAGPART alignment = MinAlign(Ptr.Alignment, sizeof field);
- Alignment = MinAlign(Ptr.getAlignment(),
- DL.getTypeAllocSize(Ptr.Ptr->getType()));
- return LValue(Builder.CreateStructGEP(Ptr.Ptr, Idx, flag_verbose_asm ?
- "prtxpr" : ""), Alignment);
-}
+ LValue TreeToLLVM::EmitLV_XXXXPART_EXPR(tree exp, unsigned Idx) {
+ LValue Ptr = EmitLV(TREE_OPERAND(exp, 0));
+ assert(!Ptr.isBitfield() &&
+ "REALPART_EXPR / IMAGPART_EXPR operands cannot be bitfields!");
+ unsigned Alignment;
+ if (Idx == 0)
+ // REALPART alignment is same as the complex operand.
+ Alignment = Ptr.getAlignment();
+ else
+ // IMAGPART alignment = MinAlign(Ptr.Alignment, sizeof field);
+ Alignment = MinAlign(Ptr.getAlignment(),
+ DL.getTypeAllocSize(Ptr.Ptr->getType()));
+ return LValue(Builder.CreateStructGEP(Ptr.Ptr, Idx, flag_verbose_asm ?
+ "prtxpr" : ""), Alignment);
+ }
-LValue TreeToLLVM::EmitLV_SSA_NAME(tree exp) {
- // TODO: Check the ssa name is being used as an rvalue, see EmitLoadOfLValue.
- Value *Temp = CreateTemporary(ConvertType(TREE_TYPE(exp)));
- Builder.CreateStore(EmitReg_SSA_NAME(exp), Temp);
- return LValue(Temp, 1);
-}
+ LValue TreeToLLVM::EmitLV_SSA_NAME(tree exp) {
+ // TODO: Check the ssa name is being used as an rvalue, see EmitLoadOfLValue.
+ Value *Temp = CreateTemporary(ConvertType(TREE_TYPE(exp)));
+ Builder.CreateStore(EmitReg_SSA_NAME(exp), Temp);
+ return LValue(Temp, 1);
+ }
-LValue TreeToLLVM::EmitLV_TARGET_MEM_REF(tree exp) {
- // TODO: Take the address space into account.
+ LValue TreeToLLVM::EmitLV_TARGET_MEM_REF(tree exp) {
+ // TODO: Take the address space into account.
- Value *Addr;
- Value *Delta = 0; // Offset from base pointer in units
+ Value *Addr;
+ Value *Delta = 0; // Offset from base pointer in units
#if (GCC_MINOR > 5)
- // Starting with gcc 4.6 the address is base + index * step + index2 + offset.
- Addr = EmitRegister(TMR_BASE(exp));
- if (TMR_INDEX2(exp) && !integer_zerop (TMR_INDEX2(exp)))
- Delta = EmitRegister(TMR_INDEX2(exp));
-#else
- // In gcc 4.5 the address is &symbol + base + index * step + offset.
- if (TMR_SYMBOL(exp)) {
- Addr = EmitLV(TMR_SYMBOL(exp)).Ptr;
- if (TMR_BASE(exp) && !integer_zerop (TMR_BASE(exp)))
- Delta = EmitRegister(TMR_BASE(exp));
- } else {
- assert(TMR_BASE(exp) && "TARGET_MEM_REF has neither base nor symbol!");
- Addr = EmitRegister(TMR_BASE(exp));
- // The type of BASE is sizetype or a pointer type. Convert sizetype to i8*.
- if (!isa<PointerType>(Addr->getType()))
- Addr = Builder.CreateIntToPtr(Addr, GetUnitPointerType(Context));
- }
+ // Starting with gcc 4.6 the address is base + index * step + index2 + offset.
+ Addr = EmitRegister(TMR_BASE(exp));
+ if (TMR_INDEX2(exp) && !integer_zerop(TMR_INDEX2(exp)))
+ Delta = EmitRegister(TMR_INDEX2(exp));
+#else
+ // In gcc 4.5 the address is &symbol + base + index * step + offset.
+ if (TMR_SYMBOL(exp)) {
+ Addr = EmitLV(TMR_SYMBOL(exp)).Ptr;
+ if (TMR_BASE(exp) && !integer_zerop(TMR_BASE(exp)))
+ Delta = EmitRegister(TMR_BASE(exp));
+ } else {
+ assert(TMR_BASE(exp) && "TARGET_MEM_REF has neither base nor symbol!");
+ Addr = EmitRegister(TMR_BASE(exp));
+ // The type of BASE is sizetype or a pointer type. Convert sizetype to i8*.
+ if (!isa<PointerType>(Addr->getType()))
+ Addr = Builder.CreateIntToPtr(Addr, GetUnitPointerType(Context));
+ }
#endif
- if (TMR_INDEX(exp)) {
- Value *Index = EmitRegister(TMR_INDEX(exp));
- if (TMR_STEP(exp) && !integer_onep (TMR_STEP(exp)))
- Index = Builder.CreateMul(Index, EmitRegisterConstant(TMR_STEP(exp)));
- Delta = Delta ? Builder.CreateAdd(Delta, Index) : Index;
- }
+ if (TMR_INDEX(exp)) {
+ Value *Index = EmitRegister(TMR_INDEX(exp));
+ if (TMR_STEP(exp) && !integer_onep(TMR_STEP(exp)))
+ Index = Builder.CreateMul(Index, EmitRegisterConstant(TMR_STEP(exp)));
+ Delta = Delta ? Builder.CreateAdd(Delta, Index) : Index;
+ }
- if (TMR_OFFSET(exp) && !integer_zerop (TMR_OFFSET(exp))) {
- Constant *Off = ConstantInt::get(Context, getAPIntValue(TMR_OFFSET(exp)));
- Delta = Delta ? Builder.CreateAdd(Delta, Off) : Off;
- }
+ if (TMR_OFFSET(exp) && !integer_zerop(TMR_OFFSET(exp))) {
+ Constant *Off = ConstantInt::get(Context,
+ getAPIntValue(TMR_OFFSET(exp)));
+ Delta = Delta ? Builder.CreateAdd(Delta, Off) : Off;
+ }
- if (Delta) {
- // Advance the base pointer by the given number of units.
- Addr = Builder.CreateBitCast(Addr, GetUnitPointerType(Context));
- StringRef GEPName = flag_verbose_asm ? "" : "tmrf";
- Addr = POINTER_TYPE_OVERFLOW_UNDEFINED ?
- Builder.CreateInBoundsGEP(Addr, Delta, GEPName)
- : Builder.CreateGEP(Addr, Delta, GEPName);
- }
+ if (Delta) {
+ // Advance the base pointer by the given number of units.
+ Addr = Builder.CreateBitCast(Addr, GetUnitPointerType(Context));
+ StringRef GEPName = flag_verbose_asm ? "" : "tmrf";
+ Addr = POINTER_TYPE_OVERFLOW_UNDEFINED ?
+ Builder.CreateInBoundsGEP(Addr, Delta, GEPName) :
+ Builder.CreateGEP(Addr, Delta, GEPName);
+ }
- // The result can be of a different pointer type even if we didn't advance it.
- Addr = Builder.CreateBitCast(Addr, getPointerToType(TREE_TYPE(exp)));
- unsigned Alignment =
+ // The result can be of a different pointer type even if we didn't advance it.
+ Addr = Builder.CreateBitCast(Addr, getPointerToType(TREE_TYPE(exp)));
+ unsigned Alignment =
#if (GCC_MINOR < 6)
- get_object_alignment(exp, TYPE_ALIGN(TREE_TYPE (exp)), BIGGEST_ALIGNMENT);
-#elif (GCC_MINOR < 7)
- std::max(get_object_alignment(exp, BIGGEST_ALIGNMENT),
- TYPE_ALIGN(TREE_TYPE (exp)));
+ get_object_alignment(exp, TYPE_ALIGN(TREE_TYPE(exp)),
+ BIGGEST_ALIGNMENT);
+#elif(GCC_MINOR < 7)
+ std::max(get_object_alignment(exp, BIGGEST_ALIGNMENT),
+ TYPE_ALIGN(TREE_TYPE(exp)));
#else
- get_object_or_type_alignment(exp);
+ get_object_or_type_alignment(exp);
#endif
- bool Volatile = TREE_THIS_VOLATILE(exp);
+ bool Volatile = TREE_THIS_VOLATILE(exp);
- return LValue(Addr, Alignment / 8, Volatile);
-}
+ return LValue(Addr, Alignment / 8, Volatile);
+ }
-Constant *TreeToLLVM::AddressOfLABEL_DECL(tree exp) {
- return BlockAddress::get(Fn, getLabelDeclBlock(exp));
-}
+ Constant *TreeToLLVM::AddressOfLABEL_DECL(tree exp) {
+ return BlockAddress::get(Fn, getLabelDeclBlock(exp));
+ }
+ //===----------------------------------------------------------------------===//
+ // ... Emit helpers ...
+ //===----------------------------------------------------------------------===//
-//===----------------------------------------------------------------------===//
-// ... Emit helpers ...
-//===----------------------------------------------------------------------===//
+ /// EmitMinInvariant - The given value is constant in this function. Return the
+ /// corresponding LLVM value. Only creates code in the entry block.
+ Value *TreeToLLVM::EmitMinInvariant(tree reg) {
+ Value *V = isa<ADDR_EXPR>(reg) ? EmitInvariantAddress(reg) :
+ EmitRegisterConstant(reg);
+ assert(V->getType() == getRegType(TREE_TYPE(reg)) &&
+ "Gimple min invariant has wrong type!");
+ return V;
+ }
-/// EmitMinInvariant - The given value is constant in this function. Return the
-/// corresponding LLVM value. Only creates code in the entry block.
-Value *TreeToLLVM::EmitMinInvariant(tree reg) {
- Value *V = isa<ADDR_EXPR>(reg) ?
- EmitInvariantAddress(reg) : EmitRegisterConstant(reg);
- assert(V->getType() == getRegType(TREE_TYPE(reg)) &&
- "Gimple min invariant has wrong type!");
- return V;
-}
-
-/// EmitInvariantAddress - The given address is constant in this function.
-/// Return the corresponding LLVM value. Only creates code in the entry block.
-Value *TreeToLLVM::EmitInvariantAddress(tree addr) {
- assert(is_gimple_invariant_address(addr) &&
- "Expected a locally constant address!");
- assert(is_gimple_reg_type(TREE_TYPE(addr)) && "Not of register type!");
-
- // Any generated code goes in the entry block.
- BasicBlock *EntryBlock = Fn->begin();
-
- // Note the current builder position.
- BasicBlock *SavedInsertBB = Builder.GetInsertBlock();
- BasicBlock::iterator SavedInsertPoint = Builder.GetInsertPoint();
-
- // Pop the entry block terminator. There may not be a terminator if we are
- // recursing or if the entry block was not yet finished.
- Instruction *Terminator = EntryBlock->getTerminator();
- assert(((SavedInsertBB != EntryBlock && Terminator) ||
- (SavedInsertPoint == EntryBlock->end() && !Terminator)) &&
- "Insertion point doesn't make sense!");
- if (Terminator)
- Terminator->removeFromParent();
+ /// EmitInvariantAddress - The given address is constant in this function.
+ /// Return the corresponding LLVM value. Only creates code in the entry block.
+ Value *TreeToLLVM::EmitInvariantAddress(tree addr) {
+ assert(is_gimple_invariant_address(addr) &&
+ "Expected a locally constant address!");
+ assert(is_gimple_reg_type(TREE_TYPE(addr)) && "Not of register type!");
- // Point the builder at the end of the entry block.
- Builder.SetInsertPoint(EntryBlock);
+ // Any generated code goes in the entry block.
+ BasicBlock *EntryBlock = Fn->begin();
- // Calculate the address.
- assert(isa<ADDR_EXPR>(addr) && "Invariant address not ADDR_EXPR!");
- Value *Address = EmitADDR_EXPR(addr);
-
- // Restore the entry block terminator.
- if (Terminator)
- EntryBlock->getInstList().push_back(Terminator);
-
- // Restore the builder insertion point.
- if (SavedInsertBB != EntryBlock)
- Builder.SetInsertPoint(SavedInsertBB, SavedInsertPoint);
-
- assert(Address->getType() == getRegType(TREE_TYPE(addr)) &&
- "Invariant address has wrong type!");
- return Address;
-}
-
-/// EmitRegisterConstant - Convert the given global constant of register type to
-/// an LLVM constant. Creates no code, only constants.
-Constant *TreeToLLVM::EmitRegisterConstant(tree reg) {
-#ifndef NDEBUG
- if (!is_gimple_constant(reg)) {
- debug_tree(reg);
- llvm_unreachable("Unsupported gimple!");
- }
-#endif
- assert(is_gimple_reg_type(TREE_TYPE(reg)) && "Not of register type!");
+ // Note the current builder position.
+ BasicBlock *SavedInsertBB = Builder.GetInsertBlock();
+ BasicBlock::iterator SavedInsertPoint = Builder.GetInsertPoint();
- switch (TREE_CODE(reg)) {
- default:
- debug_tree(reg);
- llvm_unreachable("Unhandled GIMPLE constant!");
+ // Pop the entry block terminator. There may not be a terminator if we are
+ // recursing or if the entry block was not yet finished.
+ Instruction *Terminator = EntryBlock->getTerminator();
+ assert(((SavedInsertBB != EntryBlock && Terminator) ||
+ (SavedInsertPoint == EntryBlock->end() && !Terminator)) &&
+ "Insertion point doesn't make sense!");
+ if (Terminator)
+ Terminator->removeFromParent();
- case INTEGER_CST:
- return EmitIntegerRegisterConstant(reg);
- case REAL_CST:
- return EmitRealRegisterConstant(reg);
- //case FIXED_CST: // Fixed point constant - not yet supported.
- //case STRING_CST: // Allowed by is_gimple_constant, but no known examples.
- case COMPLEX_CST:
- return EmitComplexRegisterConstant(reg);
- case VECTOR_CST:
- return EmitVectorRegisterConstant(reg);
- case CONSTRUCTOR:
- // Vector constant constructors are gimple invariant. See GCC testcase
- // pr34856.c for an example.
- return EmitConstantVectorConstructor(reg);
- }
-}
-
-/// EmitRegisterConstantWithCast - Utility that casts the value returned by
-/// EmitRegisterConstant to the given register type.
-Constant *TreeToLLVM::EmitRegisterConstantWithCast(tree reg, tree type) {
- Constant *C = EmitRegisterConstant(reg);
- if (TREE_TYPE(reg) == type)
- return C;
- // For vector types, TYPE_UNSIGNED returns the unsignedness of the element.
- bool SrcIsSigned = !TYPE_UNSIGNED(TREE_TYPE(reg));
- bool DstIsSigned = !TYPE_UNSIGNED(type);
- return CastToAnyType(C, SrcIsSigned, getRegType(type), DstIsSigned);
-}
-
-/// EncodeExpr - Write the given expression into Buffer as it would appear in
-/// memory on the target (the buffer is resized to contain exactly the bytes
-/// written). Return the number of bytes written; this can also be obtained
-/// by querying the buffer's size.
-/// The following kinds of expressions are currently supported: INTEGER_CST,
-/// REAL_CST, COMPLEX_CST, VECTOR_CST, STRING_CST.
-static unsigned EncodeExpr(tree exp, SmallVectorImpl<unsigned char> &Buffer) {
- const tree type = TREE_TYPE(exp);
- unsigned SizeInBytes = (TREE_INT_CST_LOW(TYPE_SIZE(type)) + 7) / 8;
- Buffer.resize(SizeInBytes);
- unsigned BytesWritten = native_encode_expr(exp, &Buffer[0], SizeInBytes);
- assert(BytesWritten == SizeInBytes && "Failed to fully encode expression!");
- return BytesWritten;
-}
-
-/// EmitComplexRegisterConstant - Turn the given COMPLEX_CST into an LLVM
-/// constant of the corresponding register type.
-Constant *TreeToLLVM::EmitComplexRegisterConstant(tree reg) {
- tree elt_type = TREE_TYPE(TREE_TYPE(reg));
- Constant *Elts[2] = {
- EmitRegisterConstantWithCast(TREE_REALPART(reg), elt_type),
- EmitRegisterConstantWithCast(TREE_IMAGPART(reg), elt_type)
- };
- return ConstantStruct::getAnon(Elts);
-}
-
-/// EmitIntegerRegisterConstant - Turn the given INTEGER_CST into an LLVM
-/// constant of the corresponding register type.
-Constant *TreeToLLVM::EmitIntegerRegisterConstant(tree reg) {
- ConstantInt *CI = ConstantInt::get(Context, getAPIntValue(reg));
- // The destination can be a pointer, integer or floating point type so we need
- // a generalized cast here
- Type *Ty = getRegType(TREE_TYPE(reg));
- Instruction::CastOps opcode = CastInst::getCastOpcode(CI, false, Ty,
- !TYPE_UNSIGNED(TREE_TYPE(reg)));
- return TheFolder->CreateCast(opcode, CI, Ty);
-}
-
-/// EmitRealRegisterConstant - Turn the given REAL_CST into an LLVM constant
-/// of the corresponding register type.
-Constant *TreeToLLVM::EmitRealRegisterConstant(tree reg) {
- // TODO: Rather than going through memory, construct the APFloat directly from
- // the real_value. This works fine for zero, inf and nan values, but APFloat
- // has no constructor for normal numbers, i.e. constructing a normal number
- // from the exponent and significand.
- // TODO: Test implementation on a big-endian machine.
-
- // Encode the constant in Buffer in target format.
- SmallVector<unsigned char, 16> Buffer;
- EncodeExpr(reg, Buffer);
-
- // Discard any alignment padding, which we assume comes at the end.
- unsigned Precision = TYPE_PRECISION(TREE_TYPE(reg));
- assert((Precision & 7) == 0 && "Unsupported real number precision!");
- Buffer.resize(Precision / 8);
-
- // We are going to view the buffer as an array of APInt words. Ensure that
- // the buffer contains a whole number of words by extending it if necessary.
- unsigned Words = (Precision + integerPartWidth - 1) / integerPartWidth;
- // On a little-endian machine extend the buffer by adding bytes to the end.
- Buffer.resize(Words * (integerPartWidth / 8));
- // On a big-endian machine extend the buffer by adding bytes to the beginning.
- if (BYTES_BIG_ENDIAN)
- std::copy_backward(Buffer.begin(), Buffer.begin() + Precision / 8,
- Buffer.end());
-
- // Ensure that the least significant word comes first: we are going to make an
- // APInt, and the APInt constructor wants the least significant word first.
- integerPart *Parts = (integerPart *)&Buffer[0];
- if (BYTES_BIG_ENDIAN)
- std::reverse(Parts, Parts + Words);
-
- Type *Ty = getRegType(TREE_TYPE(reg));
- if (Ty->isPPC_FP128Ty()) {
- // This type is actually a pair of doubles in disguise. They turn up the
- // wrong way round here, so flip them.
- assert(FLOAT_WORDS_BIG_ENDIAN && "PPC not big endian!");
- assert(Words == 2 && Precision == 128 && "Strange size for PPC_FP128!");
- std::swap(Parts[0], Parts[1]);
- }
-
- // Form an APInt from the buffer, an APFloat from the APInt, and the desired
- // floating point constant from the APFloat, phew!
- const APInt &I = APInt(Precision, Words, Parts);
- return ConstantFP::get(Context, APFloat(Ty->getFltSemantics(), I));
-}
-
-/// EmitConstantVectorConstructor - Turn the given constant CONSTRUCTOR into
-/// an LLVM constant of the corresponding vector register type.
-Constant *TreeToLLVM::EmitConstantVectorConstructor(tree reg) {
- // Get the constructor as an LLVM constant.
- Constant *C = ConvertInitializer(reg);
- // Load the vector register out of it.
- return ExtractRegisterFromConstant(C, TREE_TYPE(reg));
-}
-
-/// EmitVectorRegisterConstant - Turn the given VECTOR_CST into an LLVM constant
-/// of the corresponding register type.
-Constant *TreeToLLVM::EmitVectorRegisterConstant(tree reg) {
- // If there are no elements then immediately return the default value for a
- // small speedup.
- if (!TREE_VECTOR_CST_ELTS(reg))
- return getDefaultValue(getRegType(TREE_TYPE(reg)));
-
- // Convert the elements.
- SmallVector<Constant*, 16> Elts;
- tree elt_type = TREE_TYPE(TREE_TYPE(reg));
- for (tree elt = TREE_VECTOR_CST_ELTS(reg); elt; elt = TREE_CHAIN(elt))
- Elts.push_back(EmitRegisterConstantWithCast(TREE_VALUE(elt), elt_type));
-
- // If there weren't enough elements then set the rest of the vector to the
- // default value.
- if (Elts.size() < TYPE_VECTOR_SUBPARTS(TREE_TYPE(reg))) {
- Constant *Default = getDefaultValue(getRegType(elt_type));
- Elts.append(TYPE_VECTOR_SUBPARTS(TREE_TYPE(reg)) - Elts.size(), Default);
- }
-
- return ConstantVector::get(Elts);
-}
-
-/// VectorHighElements - Return a vector of half the length, consisting of the
-/// elements of the given vector with indices in the top half.
-Value *TreeToLLVM::VectorHighElements(Value *Vec) {
- VectorType *Ty = cast<VectorType>(Vec->getType());
- assert(!(Ty->getNumElements() & 1) && "Vector has odd number of elements!");
- unsigned NumElts = Ty->getNumElements() / 2;
- SmallVector<Constant*, 8> Mask;
- Mask.reserve(NumElts);
- for (unsigned i = 0; i != NumElts; ++i)
- Mask.push_back(Builder.getInt32(NumElts + i));
- return Builder.CreateShuffleVector(Vec, UndefValue::get(Ty),
- ConstantVector::get(Mask));
-}
-
-/// VectorLowElements - Return a vector of half the length, consisting of the
-/// elements of the given vector with indices in the bottom half.
-Value *TreeToLLVM::VectorLowElements(Value *Vec) {
- VectorType *Ty = cast<VectorType>(Vec->getType());
- assert(!(Ty->getNumElements() & 1) && "Vector has odd number of elements!");
- unsigned NumElts = Ty->getNumElements() / 2;
- SmallVector<Constant*, 8> Mask;
- Mask.reserve(NumElts);
- for (unsigned i = 0; i != NumElts; ++i)
- Mask.push_back(Builder.getInt32(i));
- return Builder.CreateShuffleVector(Vec, UndefValue::get(Ty),
- ConstantVector::get(Mask));
-}
+ // Point the builder at the end of the entry block.
+ Builder.SetInsertPoint(EntryBlock);
+ // Calculate the address.
+ assert(isa<ADDR_EXPR>(addr) && "Invariant address not ADDR_EXPR!");
+ Value *Address = EmitADDR_EXPR(addr);
-//===----------------------------------------------------------------------===//
-// ... EmitReg* - Convert register expression to LLVM...
-//===----------------------------------------------------------------------===//
+ // Restore the entry block terminator.
+ if (Terminator)
+ EntryBlock->getInstList().push_back(Terminator);
-/// EmitMemory - Convert the specified gimple register or local constant of
-/// register type to an LLVM value with in-memory type (given by ConvertType).
-Value *TreeToLLVM::EmitMemory(tree reg) {
- return Reg2Mem(EmitRegister(reg), TREE_TYPE(reg), Builder);
-}
-
-/// EmitRegister - Convert the specified gimple register or local constant of
-/// register type to an LLVM value. Only creates code in the entry block.
-Value *TreeToLLVM::EmitRegister(tree reg) {
- while (isa<OBJ_TYPE_REF>(reg)) reg = OBJ_TYPE_REF_EXPR(reg);
- return isa<SSA_NAME>(reg) ? EmitReg_SSA_NAME(reg) : EmitMinInvariant(reg);
-}
-
-/// EmitRegisterWithCast - Utility method that calls EmitRegister, then casts
-/// the returned value to the given register type.
-Value *TreeToLLVM::EmitRegisterWithCast(tree reg, tree type) {
- Value *V = EmitRegister(reg);
- if (TREE_TYPE(reg) == type)
- return V;
- // For vector types, TYPE_UNSIGNED returns the unsignedness of the element.
- bool SrcIsSigned = !TYPE_UNSIGNED(TREE_TYPE(reg));
- bool DstIsSigned = !TYPE_UNSIGNED(type);
- return CastToAnyType(V, SrcIsSigned, getRegType(type), DstIsSigned);
-}
+ // Restore the builder insertion point.
+ if (SavedInsertBB != EntryBlock)
+ Builder.SetInsertPoint(SavedInsertBB, SavedInsertPoint);
-/// EmitReg_SSA_NAME - Return the defining value of the given SSA_NAME.
-/// Only creates code in the entry block.
-Value *TreeToLLVM::EmitReg_SSA_NAME(tree reg) {
- assert(is_gimple_reg_type(TREE_TYPE(reg)) && "Not of register type!");
+ assert(Address->getType() == getRegType(TREE_TYPE(addr)) &&
+ "Invariant address has wrong type!");
+ return Address;
+ }
- // If we already found the definition of the SSA name, return it.
- if (Value *ExistingValue = SSANames[reg]) {
- assert(ExistingValue->getType() == getRegType(TREE_TYPE(reg)) &&
- "SSA name has wrong type!");
- if (!isSSAPlaceholder(ExistingValue))
- return ExistingValue;
- }
+ /// EmitRegisterConstant - Convert the given global constant of register type to
+ /// an LLVM constant. Creates no code, only constants.
+ Constant *TreeToLLVM::EmitRegisterConstant(tree reg) {
+#ifndef NDEBUG
+ if (!is_gimple_constant(reg)) {
+ debug_tree(reg);
+ llvm_unreachable("Unsupported gimple!");
+ }
+#endif
+ assert(is_gimple_reg_type(TREE_TYPE(reg)) && "Not of register type!");
- // If this is not the definition of the SSA name, return a placeholder value.
- if (!SSA_NAME_IS_DEFAULT_DEF(reg)) {
- if (Value *ExistingValue = SSANames[reg])
- return ExistingValue; // The type was sanity checked above.
- return SSANames[reg] = GetSSAPlaceholder(getRegType(TREE_TYPE(reg)));
- }
+ switch (TREE_CODE(reg)) {
+ default:
+ debug_tree(reg);
+ llvm_unreachable("Unhandled GIMPLE constant!");
- // This SSA name is the default definition for the underlying symbol.
+ case INTEGER_CST:
+ return EmitIntegerRegisterConstant(reg);
+ case REAL_CST:
+ return EmitRealRegisterConstant(reg);
+ //case FIXED_CST: // Fixed point constant - not yet supported.
+ //case STRING_CST: // Allowed by is_gimple_constant, but no known examples.
+ case COMPLEX_CST:
+ return EmitComplexRegisterConstant(reg);
+ case VECTOR_CST:
+ return EmitVectorRegisterConstant(reg);
+ case CONSTRUCTOR:
+ // Vector constant constructors are gimple invariant. See GCC testcase
+ // pr34856.c for an example.
+ return EmitConstantVectorConstructor(reg);
+ }
+ }
+
+ /// EmitRegisterConstantWithCast - Utility that casts the value returned by
+ /// EmitRegisterConstant to the given register type.
+ Constant *TreeToLLVM::EmitRegisterConstantWithCast(tree reg, tree type) {
+ Constant *C = EmitRegisterConstant(reg);
+ if (TREE_TYPE(reg) == type)
+ return C;
+ // For vector types, TYPE_UNSIGNED returns the unsignedness of the element.
+ bool SrcIsSigned = !TYPE_UNSIGNED(TREE_TYPE(reg));
+ bool DstIsSigned = !TYPE_UNSIGNED(type);
+ return CastToAnyType(C, SrcIsSigned, getRegType(type), DstIsSigned);
+ }
+
+ /// EncodeExpr - Write the given expression into Buffer as it would appear in
+ /// memory on the target (the buffer is resized to contain exactly the bytes
+ /// written). Return the number of bytes written; this can also be obtained
+ /// by querying the buffer's size.
+ /// The following kinds of expressions are currently supported: INTEGER_CST,
+ /// REAL_CST, COMPLEX_CST, VECTOR_CST, STRING_CST.
+ static unsigned EncodeExpr(tree exp,
+ SmallVectorImpl<unsigned char> &Buffer) {
+ const tree type = TREE_TYPE(exp);
+ unsigned SizeInBytes = (TREE_INT_CST_LOW(TYPE_SIZE(type)) + 7) / 8;
+ Buffer.resize(SizeInBytes);
+ unsigned BytesWritten = native_encode_expr(exp, &Buffer[0], SizeInBytes);
+ assert(BytesWritten == SizeInBytes &&
+ "Failed to fully encode expression!");
+ return BytesWritten;
+ }
+
+ /// EmitComplexRegisterConstant - Turn the given COMPLEX_CST into an LLVM
+ /// constant of the corresponding register type.
+ Constant *TreeToLLVM::EmitComplexRegisterConstant(tree reg) {
+ tree elt_type = TREE_TYPE(TREE_TYPE(reg));
+ Constant *Elts[2] = { EmitRegisterConstantWithCast(TREE_REALPART(reg),
+ elt_type),
+ EmitRegisterConstantWithCast(TREE_IMAGPART(reg),
+ elt_type) };
+ return ConstantStruct::getAnon(Elts);
+ }
+
+ /// EmitIntegerRegisterConstant - Turn the given INTEGER_CST into an LLVM
+ /// constant of the corresponding register type.
+ Constant *TreeToLLVM::EmitIntegerRegisterConstant(tree reg) {
+ ConstantInt *CI = ConstantInt::get(Context, getAPIntValue(reg));
+ // The destination can be a pointer, integer or floating point type so we need
+ // a generalized cast here
+ Type *Ty = getRegType(TREE_TYPE(reg));
+ Instruction::CastOps opcode = CastInst::getCastOpcode(
+ CI, false, Ty,
+ !TYPE_UNSIGNED(TREE_TYPE(reg)));
+ return TheFolder->CreateCast(opcode, CI, Ty);
+ }
+
+ /// EmitRealRegisterConstant - Turn the given REAL_CST into an LLVM constant
+ /// of the corresponding register type.
+ Constant *TreeToLLVM::EmitRealRegisterConstant(tree reg) {
+ // TODO: Rather than going through memory, construct the APFloat directly from
+ // the real_value. This works fine for zero, inf and nan values, but APFloat
+ // has no constructor for normal numbers, i.e. constructing a normal number
+ // from the exponent and significand.
+ // TODO: Test implementation on a big-endian machine.
+
+ // Encode the constant in Buffer in target format.
+ SmallVector<unsigned char, 16> Buffer;
+ EncodeExpr(reg, Buffer);
+
+ // Discard any alignment padding, which we assume comes at the end.
+ unsigned Precision = TYPE_PRECISION(TREE_TYPE(reg));
+ assert((Precision & 7) == 0 && "Unsupported real number precision!");
+ Buffer.resize(Precision / 8);
+
+ // We are going to view the buffer as an array of APInt words. Ensure that
+ // the buffer contains a whole number of words by extending it if necessary.
+ unsigned Words = (Precision + integerPartWidth - 1) / integerPartWidth;
+ // On a little-endian machine extend the buffer by adding bytes to the end.
+ Buffer.resize(Words * (integerPartWidth / 8));
+ // On a big-endian machine extend the buffer by adding bytes to the beginning.
+ if (BYTES_BIG_ENDIAN)
+ std::copy_backward(Buffer.begin(), Buffer.begin() + Precision / 8,
+ Buffer.end());
+
+ // Ensure that the least significant word comes first: we are going to make an
+ // APInt, and the APInt constructor wants the least significant word first.
+ integerPart *Parts = (integerPart *)&Buffer[0];
+ if (BYTES_BIG_ENDIAN)
+ std::reverse(Parts, Parts + Words);
+
+ Type *Ty = getRegType(TREE_TYPE(reg));
+ if (Ty->isPPC_FP128Ty()) {
+ // This type is actually a pair of doubles in disguise. They turn up the
+ // wrong way round here, so flip them.
+ assert(FLOAT_WORDS_BIG_ENDIAN && "PPC not big endian!");
+ assert(Words == 2 && Precision == 128 && "Strange size for PPC_FP128!");
+ std::swap(Parts[0], Parts[1]);
+ }
+
+ // Form an APInt from the buffer, an APFloat from the APInt, and the desired
+ // floating point constant from the APFloat, phew!
+ const APInt &I = APInt(Precision, Words, Parts);
+ return ConstantFP::get(Context, APFloat(Ty->getFltSemantics(), I));
+ }
+
+ /// EmitConstantVectorConstructor - Turn the given constant CONSTRUCTOR into
+ /// an LLVM constant of the corresponding vector register type.
+ Constant *TreeToLLVM::EmitConstantVectorConstructor(tree reg) {
+ // Get the constructor as an LLVM constant.
+ Constant *C = ConvertInitializer(reg);
+ // Load the vector register out of it.
+ return ExtractRegisterFromConstant(C, TREE_TYPE(reg));
+ }
+
+ /// EmitVectorRegisterConstant - Turn the given VECTOR_CST into an LLVM constant
+ /// of the corresponding register type.
+ Constant *TreeToLLVM::EmitVectorRegisterConstant(tree reg) {
+ // If there are no elements then immediately return the default value for a
+ // small speedup.
+ if (!TREE_VECTOR_CST_ELTS(reg))
+ return getDefaultValue(getRegType(TREE_TYPE(reg)));
+
+ // Convert the elements.
+ SmallVector<Constant *, 16> Elts;
+ tree elt_type = TREE_TYPE(TREE_TYPE(reg));
+ for (tree elt = TREE_VECTOR_CST_ELTS(reg); elt; elt = TREE_CHAIN(elt))
+ Elts.push_back(EmitRegisterConstantWithCast(TREE_VALUE(elt), elt_type));
+
+ // If there weren't enough elements then set the rest of the vector to the
+ // default value.
+ if (Elts.size() < TYPE_VECTOR_SUBPARTS(TREE_TYPE(reg))) {
+ Constant *Default = getDefaultValue(getRegType(elt_type));
+ Elts.append(TYPE_VECTOR_SUBPARTS(TREE_TYPE(reg)) - Elts.size(),
+ Default);
+ }
+
+ return ConstantVector::get(Elts);
+ }
+
+ /// VectorHighElements - Return a vector of half the length, consisting of the
+ /// elements of the given vector with indices in the top half.
+ Value *TreeToLLVM::VectorHighElements(Value *Vec) {
+ VectorType *Ty = cast<VectorType>(Vec->getType());
+ assert(!(Ty->getNumElements() & 1) &&
+ "Vector has odd number of elements!");
+ unsigned NumElts = Ty->getNumElements() / 2;
+ SmallVector<Constant *, 8> Mask;
+ Mask.reserve(NumElts);
+ for (unsigned i = 0; i != NumElts; ++i)
+ Mask.push_back(Builder.getInt32(NumElts + i));
+ return Builder.CreateShuffleVector(Vec, UndefValue::get(Ty),
+ ConstantVector::get(Mask));
+ }
- // The underlying symbol is an SSA variable.
- tree var = SSA_NAME_VAR(reg);
- assert(SSA_VAR_P(var) && "Not an SSA variable!");
+ /// VectorLowElements - Return a vector of half the length, consisting of the
+ /// elements of the given vector with indices in the bottom half.
+ Value *TreeToLLVM::VectorLowElements(Value *Vec) {
+ VectorType *Ty = cast<VectorType>(Vec->getType());
+ assert(!(Ty->getNumElements() & 1) &&
+ "Vector has odd number of elements!");
+ unsigned NumElts = Ty->getNumElements() / 2;
+ SmallVector<Constant *, 8> Mask;
+ Mask.reserve(NumElts);
+ for (unsigned i = 0; i != NumElts; ++i)
+ Mask.push_back(Builder.getInt32(i));
+ return Builder.CreateShuffleVector(Vec, UndefValue::get(Ty),
+ ConstantVector::get(Mask));
+ }
- // If the variable is itself an ssa name, use its LLVM value.
- if (isa<SSA_NAME>(var)) {
- Value *Val = EmitReg_SSA_NAME(var);
- assert(Val->getType() == getRegType(TREE_TYPE(reg)) &&
- "SSA name has wrong type!");
- return DefineSSAName(reg, Val);
- }
+ //===----------------------------------------------------------------------===//
+ // ... EmitReg* - Convert register expression to LLVM...
+ //===----------------------------------------------------------------------===//
- // Otherwise the symbol is a VAR_DECL, PARM_DECL or RESULT_DECL. Since a
- // default definition is only created if the very first reference to the
- // variable in the function is a read operation, and refers to the value
- // read, it has an undefined value for VAR_DECLs (a RESULT_DECL can have
- // an initial value if the function returns a class by value).
- assert((isa<PARM_DECL>(var) || isa<RESULT_DECL>(var) ||
- isa<VAR_DECL>(var)) && "Unsupported SSA name definition!");
- if (isa<VAR_DECL>(var))
- return DefineSSAName(reg, UndefValue::get(getRegType(TREE_TYPE(reg))));
+ /// EmitMemory - Convert the specified gimple register or local constant of
+ /// register type to an LLVM value with in-memory type (given by ConvertType).
+ Value *TreeToLLVM::EmitMemory(tree reg) {
+ return Reg2Mem(EmitRegister(reg), TREE_TYPE(reg), Builder);
+ }
- // Read the initial value of the parameter and associate it with the ssa name.
- assert(DECL_LOCAL_IF_SET(var) != 0 && "Parameter not laid out?");
+ /// EmitRegister - Convert the specified gimple register or local constant of
+ /// register type to an LLVM value. Only creates code in the entry block.
+ Value *TreeToLLVM::EmitRegister(tree reg) {
+ while (isa<OBJ_TYPE_REF>(reg))
+ reg = OBJ_TYPE_REF_EXPR(reg);
+ return isa<SSA_NAME>(reg) ? EmitReg_SSA_NAME(reg) : EmitMinInvariant(reg);
+ }
- unsigned Alignment = DECL_ALIGN(var) / 8;
- assert(Alignment != 0 && "Parameter with unknown alignment!");
+ /// EmitRegisterWithCast - Utility method that calls EmitRegister, then casts
+ /// the returned value to the given register type.
+ Value *TreeToLLVM::EmitRegisterWithCast(tree reg, tree type) {
+ Value *V = EmitRegister(reg);
+ if (TREE_TYPE(reg) == type)
+ return V;
+ // For vector types, TYPE_UNSIGNED returns the unsignedness of the element.
+ bool SrcIsSigned = !TYPE_UNSIGNED(TREE_TYPE(reg));
+ bool DstIsSigned = !TYPE_UNSIGNED(type);
+ return CastToAnyType(V, SrcIsSigned, getRegType(type), DstIsSigned);
+ }
- // Perform the load in the entry block, after all parameters have been set up
- // with their initial values, and before any modifications to their values.
+ /// EmitReg_SSA_NAME - Return the defining value of the given SSA_NAME.
+ /// Only creates code in the entry block.
+ Value *TreeToLLVM::EmitReg_SSA_NAME(tree reg) {
+ assert(is_gimple_reg_type(TREE_TYPE(reg)) && "Not of register type!");
- // Create a builder that inserts code before the SSAInsertionPoint marker.
- LLVMBuilder SSABuilder(Context, Builder.getFolder());
- SSABuilder.SetInsertPoint(SSAInsertionPoint->getParent(), SSAInsertionPoint);
+ // If we already found the definition of the SSA name, return it.
+ if (Value *ExistingValue = SSANames[reg]) {
+ assert(ExistingValue->getType() == getRegType(TREE_TYPE(reg)) &&
+ "SSA name has wrong type!");
+ if (!isSSAPlaceholder(ExistingValue))
+ return ExistingValue;
+ }
- // Use it to load the parameter value.
- MemRef ParamLoc(DECL_LOCAL_IF_SET(var), Alignment, false);
- Value *Def = LoadRegisterFromMemory(ParamLoc, TREE_TYPE(reg), 0, SSABuilder);
+ // If this is not the definition of the SSA name, return a placeholder value.
+ if (!SSA_NAME_IS_DEFAULT_DEF(reg)) {
+ if (Value *ExistingValue = SSANames[reg])
+ return ExistingValue; // The type was sanity checked above.
+ return SSANames[reg] = GetSSAPlaceholder(getRegType(TREE_TYPE(reg)));
+ }
- if (flag_verbose_asm)
- NameValue(Def, reg);
- return DefineSSAName(reg, Def);
-}
+ // This SSA name is the default definition for the underlying symbol.
-// Unary expressions.
-Value *TreeToLLVM::EmitReg_ABS_EXPR(tree op) {
- if (!isa<FLOAT_TYPE>(TREE_TYPE(op))) {
- Value *Op = EmitRegister(op);
- Value *OpN = Builder.CreateNeg(Op, Op->getName()+"neg");
- ICmpInst::Predicate pred = TYPE_UNSIGNED(TREE_TYPE(op)) ?
- ICmpInst::ICMP_UGE : ICmpInst::ICMP_SGE;
- Value *Cmp = Builder.CreateICmp(pred, Op,
- Constant::getNullValue(Op->getType()), "abscond");
- return Builder.CreateSelect(Cmp, Op, OpN, Op->getName()+"abs");
- }
+ // The underlying symbol is an SSA variable.
+ tree var = SSA_NAME_VAR(reg);
+ assert(SSA_VAR_P(var) && "Not an SSA variable!");
- if (isa<VECTOR_TYPE>(TREE_TYPE(op))) {
- // Clear the sign bits.
- Value *Op = EmitRegister(op);
- VectorType *VecTy = cast<VectorType>(Op->getType());
+ // If the variable is itself an ssa name, use its LLVM value.
+ if (isa<SSA_NAME>(var)) {
+ Value *Val = EmitReg_SSA_NAME(var);
+ assert(Val->getType() == getRegType(TREE_TYPE(reg)) &&
+ "SSA name has wrong type!");
+ return DefineSSAName(reg, Val);
+ }
- // Mask = ~(1 << (Bits-1)).
- unsigned Bits = VecTy->getElementType()->getPrimitiveSizeInBits();
- Type *IntTy = IntegerType::get(Context, Bits);
- Type *IntVecTy = VectorType::get(IntTy, VecTy->getNumElements());
- APInt API = APInt::getAllOnesValue(Bits);
- API.clearBit(Bits-1);
- Constant *Mask = ConstantInt::get(IntVecTy, API);
+ // Otherwise the symbol is a VAR_DECL, PARM_DECL or RESULT_DECL. Since a
+ // default definition is only created if the very first reference to the
+ // variable in the function is a read operation, and refers to the value
+ // read, it has an undefined value for VAR_DECLs (a RESULT_DECL can have
+ // an initial value if the function returns a class by value).
+ assert((isa<PARM_DECL>(var) || isa<RESULT_DECL>(var) ||
+ isa<VAR_DECL>(var)) && "Unsupported SSA name definition!");
+ if (isa<VAR_DECL>(var))
+ return DefineSSAName(reg, UndefValue::get(getRegType(TREE_TYPE(reg))));
- // Zap the sign bits.
- Op = Builder.CreateBitCast(Op, IntVecTy);
- Op = Builder.CreateAnd(Op, Mask);
- Op = Builder.CreateBitCast(Op, VecTy);
- return Op;
- }
+ // Read the initial value of the parameter and associate it with the ssa name.
+ assert(DECL_LOCAL_IF_SET(var) != 0 && "Parameter not laid out?");
- // Turn FP abs into fabs/fabsf.
- StringRef Name = SelectFPName(TREE_TYPE(op), "fabsf", "fabs", "fabsl");
- if (!Name.empty()) {
- CallInst *Call = EmitSimpleCall(Name, TREE_TYPE(op), op, NULL);
- Call->setDoesNotThrow();
- Call->setDoesNotAccessMemory();
- return Call;
- }
+ unsigned Alignment = DECL_ALIGN(var) / 8;
+ assert(Alignment != 0 && "Parameter with unknown alignment!");
- // Otherwise clear the sign bit.
- Value *Op = EmitRegister(op);
- Type *Ty = Op->getType();
+ // Perform the load in the entry block, after all parameters have been set up
+ // with their initial values, and before any modifications to their values.
- // Mask = ~(1 << (Bits-1)).
- unsigned Bits = Ty->getPrimitiveSizeInBits();
- Type *IntTy = IntegerType::get(Context, Bits);
- APInt API = APInt::getAllOnesValue(Bits);
- API.clearBit(Bits-1);
- Constant *Mask = ConstantInt::get(IntTy, API);
+ // Create a builder that inserts code before the SSAInsertionPoint marker.
+ LLVMBuilder SSABuilder(Context, Builder.getFolder());
+ SSABuilder.SetInsertPoint(SSAInsertionPoint->getParent(),
+ SSAInsertionPoint);
- // Zap the sign bit.
- Op = Builder.CreateBitCast(Op, IntTy);
- Op = Builder.CreateAnd(Op, Mask);
- Op = Builder.CreateBitCast(Op, Ty);
- return Op;
-}
+ // Use it to load the parameter value.
+ MemRef ParamLoc(DECL_LOCAL_IF_SET(var), Alignment, false);
+ Value *Def = LoadRegisterFromMemory(ParamLoc, TREE_TYPE(reg), 0,
+ SSABuilder);
-Value *TreeToLLVM::EmitReg_BIT_NOT_EXPR(tree op) {
- Value *Op = EmitRegister(op);
- return Builder.CreateNot(Op, Op->getName()+"not");
-}
+ if (flag_verbose_asm)
+ NameValue(Def, reg);
+ return DefineSSAName(reg, Def);
+ }
-Value *TreeToLLVM::EmitReg_CONJ_EXPR(tree op) {
- tree elt_type = TREE_TYPE(TREE_TYPE(op));
- Value *R, *I;
- SplitComplex(EmitRegister(op), R, I);
+ // Unary expressions.
+ Value *TreeToLLVM::EmitReg_ABS_EXPR(tree op) {
+ if (!isa<FLOAT_TYPE>(TREE_TYPE(op))) {
+ Value *Op = EmitRegister(op);
+ Value *OpN = Builder.CreateNeg(Op, Op->getName() + "neg");
+ ICmpInst::Predicate pred = TYPE_UNSIGNED(TREE_TYPE(op)) ?
+ ICmpInst::ICMP_UGE : ICmpInst::ICMP_SGE;
+ Value *Cmp = Builder.CreateICmp(pred, Op,
+ Constant::getNullValue(Op->getType()),
+ "abscond");
+ return Builder.CreateSelect(Cmp, Op, OpN, Op->getName() + "abs");
+ }
- // ~(a+ib) = a + i*-b
- I = CreateAnyNeg(I, elt_type);
+ if (isa<VECTOR_TYPE>(TREE_TYPE(op))) {
+ // Clear the sign bits.
+ Value *Op = EmitRegister(op);
+ VectorType *VecTy = cast<VectorType>(Op->getType());
- return CreateComplex(R, I);
-}
+ // Mask = ~(1 << (Bits-1)).
+ unsigned Bits = VecTy->getElementType()->getPrimitiveSizeInBits();
+ Type *IntTy = IntegerType::get(Context, Bits);
+ Type *IntVecTy = VectorType::get(IntTy, VecTy->getNumElements());
+ APInt API = APInt::getAllOnesValue(Bits);
+ API.clearBit(Bits - 1);
+ Constant *Mask = ConstantInt::get(IntVecTy, API);
-Value *TreeToLLVM::EmitReg_CONVERT_EXPR(tree type, tree op) {
- return EmitRegisterWithCast(op, type);
-}
+ // Zap the sign bits.
+ Op = Builder.CreateBitCast(Op, IntVecTy);
+ Op = Builder.CreateAnd(Op, Mask);
+ Op = Builder.CreateBitCast(Op, VecTy);
+ return Op;
+ }
-Value *TreeToLLVM::EmitReg_NEGATE_EXPR(tree op) {
- Value *V = EmitRegister(op);
- tree type = TREE_TYPE(op);
+ // Turn FP abs into fabs/fabsf.
+ StringRef Name = SelectFPName(TREE_TYPE(op), "fabsf", "fabs", "fabsl");
+ if (!Name.empty()) {
+ CallInst *Call = EmitSimpleCall(Name, TREE_TYPE(op), op, NULL);
+ Call->setDoesNotThrow();
+ Call->setDoesNotAccessMemory();
+ return Call;
+ }
- if (isa<COMPLEX_TYPE>(type)) {
- tree elt_type = TREE_TYPE(type);
- Value *R, *I; SplitComplex(V, R, I);
+ // Otherwise clear the sign bit.
+ Value *Op = EmitRegister(op);
+ Type *Ty = Op->getType();
- // -(a+ib) = -a + i*-b
- R = CreateAnyNeg(R, elt_type);
- I = CreateAnyNeg(I, elt_type);
+ // Mask = ~(1 << (Bits-1)).
+ unsigned Bits = Ty->getPrimitiveSizeInBits();
+ Type *IntTy = IntegerType::get(Context, Bits);
+ APInt API = APInt::getAllOnesValue(Bits);
+ API.clearBit(Bits - 1);
+ Constant *Mask = ConstantInt::get(IntTy, API);
- return CreateComplex(R, I);
- }
+ // Zap the sign bit.
+ Op = Builder.CreateBitCast(Op, IntTy);
+ Op = Builder.CreateAnd(Op, Mask);
+ Op = Builder.CreateBitCast(Op, Ty);
+ return Op;
+ }
- return CreateAnyNeg(V, type);
-}
+ Value *TreeToLLVM::EmitReg_BIT_NOT_EXPR(tree op) {
+ Value *Op = EmitRegister(op);
+ return Builder.CreateNot(Op, Op->getName() + "not");
+ }
-Value *TreeToLLVM::EmitReg_PAREN_EXPR(tree op) {
- // TODO: Understand and correctly deal with this subtle expression.
- return EmitRegister(op);
-}
+ Value *TreeToLLVM::EmitReg_CONJ_EXPR(tree op) {
+ tree elt_type = TREE_TYPE(TREE_TYPE(op));
+ Value *R, *I;
+ SplitComplex(EmitRegister(op), R, I);
-Value *TreeToLLVM::EmitReg_TRUTH_NOT_EXPR(tree type, tree op) {
- Value *V = EmitRegister(op);
- if (!V->getType()->isIntegerTy(1))
- V = Builder.CreateICmpNE(V,
- Constant::getNullValue(V->getType()), "toBool");
- V = Builder.CreateNot(V, V->getName()+"not");
- return Builder.CreateIntCast(V, getRegType(type), /*isSigned*/false);
-}
+ // ~(a+ib) = a + i*-b
+ I = CreateAnyNeg(I, elt_type);
-// Comparisons.
+ return CreateComplex(R, I);
+ }
-/// EmitCompare - Compare LHS with RHS using the appropriate comparison code.
-/// The result is an i1 boolean.
-Value *TreeToLLVM::EmitCompare(tree lhs, tree rhs, unsigned code) {
- Value *LHS = EmitRegister(lhs);
- Value *RHS = TriviallyTypeConvert(EmitRegister(rhs), LHS->getType());
+ Value *TreeToLLVM::EmitReg_CONVERT_EXPR(tree type, tree op) {
+ return EmitRegisterWithCast(op, type);
+ }
- // Compute the LLVM opcodes corresponding to the GCC comparison.
- CmpInst::Predicate UIPred = CmpInst::BAD_ICMP_PREDICATE;
- CmpInst::Predicate SIPred = CmpInst::BAD_ICMP_PREDICATE;
- CmpInst::Predicate FPPred = CmpInst::BAD_FCMP_PREDICATE;
+ Value *TreeToLLVM::EmitReg_NEGATE_EXPR(tree op) {
+ Value *V = EmitRegister(op);
+ tree type = TREE_TYPE(op);
- switch (code) {
- default:
- llvm_unreachable("Unhandled condition code!");
- case LT_EXPR:
- UIPred = CmpInst::ICMP_ULT;
- SIPred = CmpInst::ICMP_SLT;
- FPPred = CmpInst::FCMP_OLT;
- break;
- case LE_EXPR:
- UIPred = CmpInst::ICMP_ULE;
- SIPred = CmpInst::ICMP_SLE;
- FPPred = CmpInst::FCMP_OLE;
- break;
- case GT_EXPR:
- UIPred = CmpInst::ICMP_UGT;
- SIPred = CmpInst::ICMP_SGT;
- FPPred = CmpInst::FCMP_OGT;
- break;
- case GE_EXPR:
- UIPred = CmpInst::ICMP_UGE;
- SIPred = CmpInst::ICMP_SGE;
- FPPred = CmpInst::FCMP_OGE;
- break;
- case EQ_EXPR:
- UIPred = SIPred = CmpInst::ICMP_EQ;
- FPPred = CmpInst::FCMP_OEQ;
- break;
- case NE_EXPR:
- UIPred = SIPred = CmpInst::ICMP_NE;
- FPPred = CmpInst::FCMP_UNE;
- break;
- case UNORDERED_EXPR: FPPred = CmpInst::FCMP_UNO; break;
- case ORDERED_EXPR: FPPred = CmpInst::FCMP_ORD; break;
- case UNLT_EXPR: FPPred = CmpInst::FCMP_ULT; break;
- case UNLE_EXPR: FPPred = CmpInst::FCMP_ULE; break;
- case UNGT_EXPR: FPPred = CmpInst::FCMP_UGT; break;
- case UNGE_EXPR: FPPred = CmpInst::FCMP_UGE; break;
- case UNEQ_EXPR: FPPred = CmpInst::FCMP_UEQ; break;
- case LTGT_EXPR: FPPred = CmpInst::FCMP_ONE; break;
- }
-
- if (isa<COMPLEX_TYPE>(TREE_TYPE(lhs))) {
- Value *LHSr, *LHSi;
- SplitComplex(LHS, LHSr, LHSi);
- Value *RHSr, *RHSi;
- SplitComplex(RHS, RHSr, RHSi);
-
- Value *DSTr, *DSTi;
- if (LHSr->getType()->isFloatingPointTy()) {
- DSTr = Builder.CreateFCmp(FPPred, LHSr, RHSr);
- DSTi = Builder.CreateFCmp(FPPred, LHSi, RHSi);
- if (FPPred == CmpInst::FCMP_OEQ)
- return Builder.CreateAnd(DSTr, DSTi);
- assert(FPPred == CmpInst::FCMP_UNE && "Unhandled complex comparison!");
- return Builder.CreateOr(DSTr, DSTi);
- }
-
- assert(SIPred == UIPred && "(In)equality comparison depends on sign!");
- DSTr = Builder.CreateICmp(UIPred, LHSr, RHSr);
- DSTi = Builder.CreateICmp(UIPred, LHSi, RHSi);
- if (UIPred == CmpInst::ICMP_EQ)
- return Builder.CreateAnd(DSTr, DSTi);
- assert(UIPred == CmpInst::ICMP_NE && "Unhandled complex comparison!");
- return Builder.CreateOr(DSTr, DSTi);
- }
-
- if (LHS->getType()->isFPOrFPVectorTy())
- return Builder.CreateFCmp(FPPred, LHS, RHS);
-
- // Determine which predicate to use based on signedness.
- CmpInst::Predicate pred = TYPE_UNSIGNED(TREE_TYPE(lhs)) ? UIPred : SIPred;
- return Builder.CreateICmp(pred, LHS, RHS);
-}
-
-Value *TreeToLLVM::EmitReg_MinMaxExpr(tree op0, tree op1, unsigned UIPred,
- unsigned SIPred, unsigned FPPred) {
- Value *LHS = EmitRegister(op0);
- Value *RHS = EmitRegister(op1);
-
- Value *Compare;
- if (isa<FLOAT_TYPE>(TREE_TYPE(op0)))
- Compare = Builder.CreateFCmp(FCmpInst::Predicate(FPPred), LHS, RHS);
- else if (TYPE_UNSIGNED(TREE_TYPE(op0)))
- Compare = Builder.CreateICmp(ICmpInst::Predicate(UIPred), LHS, RHS);
- else
- Compare = Builder.CreateICmp(ICmpInst::Predicate(SIPred), LHS, RHS);
+ if (isa<COMPLEX_TYPE>(type)) {
+ tree elt_type = TREE_TYPE(type);
+ Value *R, *I;
+ SplitComplex(V, R, I);
- return Builder.CreateSelect(Compare, LHS, RHS);
-}
+ // -(a+ib) = -a + i*-b
+ R = CreateAnyNeg(R, elt_type);
+ I = CreateAnyNeg(I, elt_type);
-Value *TreeToLLVM::EmitReg_ReducMinMaxExpr(tree op, unsigned UIPred,
- unsigned SIPred, unsigned FPPred) {
- // In the bottom half of the vector, form the max/min of the bottom and top
- // halves of the vector. Rinse and repeat on the just computed bottom half:
- // in the bottom quarter of the vector, form the max/min of the bottom and
- // top halves of the bottom half. Continue until only the first element of
- // the vector is computed. For example, reduc-max <x0, x1, x2, x3> becomes
- // v = max <x0, x1, undef, undef>, <x2, x3, undef, undef>
- // w = max <v0, undef, undef, undef>, <v1, undef, undef, undef>
- // where v = <v0, v1, undef, undef>. The first element of w is the max/min
- // of x0,x1,x2,x3.
- Value *Val = EmitRegister(op);
- Type *Ty = Val->getType();
-
- CmpInst::Predicate Pred =
- CmpInst::Predicate(isa<FLOAT_TYPE>(TREE_TYPE(op)) ?
- FPPred : TYPE_UNSIGNED(TREE_TYPE(op)) ? UIPred : SIPred);
-
- unsigned Length = (unsigned)TYPE_VECTOR_SUBPARTS(TREE_TYPE(op));
- assert(Length > 1 && !(Length & (Length - 1)) && "Length not a power of 2!");
- SmallVector<Constant*, 8> Mask(Length);
- Constant *UndefIndex = UndefValue::get(Type::getInt32Ty(Context));
- for (unsigned Elts = Length >> 1; Elts; Elts >>= 1) {
- // In the extracted vectors, elements with index Elts and on are undefined.
- for (unsigned i = Elts; i != Length; ++i)
- Mask[i] = UndefIndex;
- // Extract elements [0, Elts) from Val.
- for (unsigned i = 0; i != Elts; ++i)
- Mask[i] = Builder.getInt32(i);
- Value *LHS = Builder.CreateShuffleVector(Val, UndefValue::get(Ty),
- ConstantVector::get(Mask));
- // Extract elements [Elts, 2*Elts) from Val.
- for (unsigned i = 0; i != Elts; ++i)
- Mask[i] = Builder.getInt32(Elts + i);
- Value *RHS = Builder.CreateShuffleVector(Val, UndefValue::get(Ty),
- ConstantVector::get(Mask));
+ return CreateComplex(R, I);
+ }
- // Replace Val with the max/min of the extracted elements.
- Value *Compare = isa<FLOAT_TYPE>(TREE_TYPE(op)) ?
- Builder.CreateFCmp(Pred, LHS, RHS) : Builder.CreateICmp(Pred, LHS, RHS);
- Val = Builder.CreateSelect(Compare, LHS, RHS);
+ return CreateAnyNeg(V, type);
+ }
- // Repeat, using half as many elements.
- }
+ Value *TreeToLLVM::EmitReg_PAREN_EXPR(tree op) {
+ // TODO: Understand and correctly deal with this subtle expression.
+ return EmitRegister(op);
+ }
- return Val;
-}
+ Value *TreeToLLVM::EmitReg_TRUTH_NOT_EXPR(tree type, tree op) {
+ Value *V = EmitRegister(op);
+ if (!V->getType()->isIntegerTy(1))
+ V = Builder.CreateICmpNE(V, Constant::getNullValue(V->getType()),
+ "toBool");
+ V = Builder.CreateNot(V, V->getName() + "not");
+ return Builder.CreateIntCast(V, getRegType(type), /*isSigned*/ false);
+ }
-Value *TreeToLLVM::EmitReg_REDUC_PLUS_EXPR(tree op) {
- // In the bottom half of the vector, form the sum of the bottom and top halves
- // of the vector. Rinse and repeat on the just computed bottom half: in the
- // bottom quarter of the vector, form the sum of the bottom and top halves of
- // the bottom half. Continue until only the first element of the vector is
- // computed. For example, reduc-plus <x0, x1, x2, x3> becomes
- // v = <x0, x1, undef, undef> + <x2, x3, undef, undef>
- // w = <v0, undef, undef, undef> + <v1, undef, undef, undef>
- // where v = <v0, v1, undef, undef>. The first element of w is x0+x1+x2+x3.
- Value *Val = EmitRegister(op);
- Type *Ty = Val->getType();
-
- unsigned Length = (unsigned)TYPE_VECTOR_SUBPARTS(TREE_TYPE(op));
- assert(Length > 1 && !(Length & (Length - 1)) && "Length not a power of 2!");
- SmallVector<Constant*, 8> Mask(Length);
- Constant *UndefIndex = UndefValue::get(Type::getInt32Ty(Context));
- for (unsigned Elts = Length >> 1; Elts; Elts >>= 1) {
- // In the extracted vectors, elements with index Elts and on are undefined.
- for (unsigned i = Elts; i != Length; ++i)
- Mask[i] = UndefIndex;
- // Extract elements [0, Elts) from Val.
- for (unsigned i = 0; i != Elts; ++i)
- Mask[i] = Builder.getInt32(i);
- Value *LHS = Builder.CreateShuffleVector(Val, UndefValue::get(Ty),
- ConstantVector::get(Mask));
- // Extract elements [Elts, 2*Elts) from Val.
- for (unsigned i = 0; i != Elts; ++i)
- Mask[i] = Builder.getInt32(Elts + i);
- Value *RHS = Builder.CreateShuffleVector(Val, UndefValue::get(Ty),
- ConstantVector::get(Mask));
+ // Comparisons.
- // Replace Val with the sum of the extracted elements.
- // TODO: Are nsw/nuw flags valid here?
- Val = CreateAnyAdd(LHS, RHS, TREE_TYPE(TREE_TYPE(op)));
+ /// EmitCompare - Compare LHS with RHS using the appropriate comparison code.
+ /// The result is an i1 boolean.
+ Value *TreeToLLVM::EmitCompare(tree lhs, tree rhs, unsigned code) {
+ Value *LHS = EmitRegister(lhs);
+ Value *RHS = TriviallyTypeConvert(EmitRegister(rhs), LHS->getType());
- // Repeat, using half as many elements.
- }
+ // Compute the LLVM opcodes corresponding to the GCC comparison.
+ CmpInst::Predicate UIPred = CmpInst::BAD_ICMP_PREDICATE;
+ CmpInst::Predicate SIPred = CmpInst::BAD_ICMP_PREDICATE;
+ CmpInst::Predicate FPPred = CmpInst::BAD_FCMP_PREDICATE;
- return Val;
-}
+ switch (code) {
+ default:
+ llvm_unreachable("Unhandled condition code!");
+ case LT_EXPR:
+ UIPred = CmpInst::ICMP_ULT;
+ SIPred = CmpInst::ICMP_SLT;
+ FPPred = CmpInst::FCMP_OLT;
+ break;
+ case LE_EXPR:
+ UIPred = CmpInst::ICMP_ULE;
+ SIPred = CmpInst::ICMP_SLE;
+ FPPred = CmpInst::FCMP_OLE;
+ break;
+ case GT_EXPR:
+ UIPred = CmpInst::ICMP_UGT;
+ SIPred = CmpInst::ICMP_SGT;
+ FPPred = CmpInst::FCMP_OGT;
+ break;
+ case GE_EXPR:
+ UIPred = CmpInst::ICMP_UGE;
+ SIPred = CmpInst::ICMP_SGE;
+ FPPred = CmpInst::FCMP_OGE;
+ break;
+ case EQ_EXPR:
+ UIPred = SIPred = CmpInst::ICMP_EQ;
+ FPPred = CmpInst::FCMP_OEQ;
+ break;
+ case NE_EXPR:
+ UIPred = SIPred = CmpInst::ICMP_NE;
+ FPPred = CmpInst::FCMP_UNE;
+ break;
+ case UNORDERED_EXPR:
+ FPPred = CmpInst::FCMP_UNO;
+ break;
+ case ORDERED_EXPR:
+ FPPred = CmpInst::FCMP_ORD;
+ break;
+ case UNLT_EXPR:
+ FPPred = CmpInst::FCMP_ULT;
+ break;
+ case UNLE_EXPR:
+ FPPred = CmpInst::FCMP_ULE;
+ break;
+ case UNGT_EXPR:
+ FPPred = CmpInst::FCMP_UGT;
+ break;
+ case UNGE_EXPR:
+ FPPred = CmpInst::FCMP_UGE;
+ break;
+ case UNEQ_EXPR:
+ FPPred = CmpInst::FCMP_UEQ;
+ break;
+ case LTGT_EXPR:
+ FPPred = CmpInst::FCMP_ONE;
+ break;
+ }
-Value *TreeToLLVM::EmitReg_RotateOp(tree type, tree op0, tree op1,
- unsigned Opc1, unsigned Opc2) {
- Value *In = EmitRegister(op0);
- Value *Amt = EmitRegister(op1);
-
- if (Amt->getType() != In->getType())
- Amt = Builder.CreateIntCast(Amt, In->getType(), /*isSigned*/false,
- Amt->getName()+".cast");
-
- Value *TypeSize =
- ConstantInt::get(In->getType(),
- In->getType()->getPrimitiveSizeInBits());
-
- // Do the two shifts.
- Value *V1 = Builder.CreateBinOp((Instruction::BinaryOps)Opc1, In, Amt);
- Value *OtherShift = Builder.CreateSub(TypeSize, Amt);
- Value *V2 = Builder.CreateBinOp((Instruction::BinaryOps)Opc2, In, OtherShift);
-
- // Or the two together to return them.
- Value *Merge = Builder.CreateOr(V1, V2);
- return Builder.CreateIntCast(Merge, getRegType(type), /*isSigned*/false);
-}
-
-Value *TreeToLLVM::EmitReg_ShiftOp(tree op0, tree op1, unsigned Opc) {
- Value *LHS = EmitRegister(op0);
- Value *RHS = EmitRegister(op1);
- // Ensure that the shift amount has the same type as the shiftee.
- if (RHS->getType() != LHS->getType()) {
- if (LHS->getType()->isVectorTy() == RHS->getType()->isVectorTy()) {
- // Scalar shifted by a scalar amount, or a vector shifted by a vector
- // amount.
- assert((!LHS->getType()->isVectorTy() ||
- cast<VectorType>(LHS->getType())->getNumElements() ==
- cast<VectorType>(RHS->getType())->getNumElements()) &&
- "Vector length mismatch!");
- RHS = CastToAnyType(RHS, /*isSigned*/false, LHS->getType(),
- /*isSigned*/false);
- } else {
- // Vector shifted by a scalar amount. Turn the shift amount into a vector
- // with all elements equal.
- assert(LHS->getType()->isVectorTy() &&
- "Shifting a scalar by a vector amount!");
- VectorType *VecTy = cast<VectorType>(LHS->getType());
- RHS = CastToAnyType(RHS, /*isSigned*/false, VecTy->getElementType(),
- /*isSigned*/false);
- RHS = Builder.CreateInsertElement(UndefValue::get(VecTy), RHS,
- Builder.getInt32(0));
- Type *MaskTy = VectorType::get(Type::getInt32Ty(Context),
- VecTy->getNumElements());
- RHS = Builder.CreateShuffleVector(RHS, UndefValue::get(VecTy),
- ConstantInt::get(MaskTy, 0));
- }
- }
- return Builder.CreateBinOp((Instruction::BinaryOps)Opc, LHS, RHS);
-}
-
-Value *TreeToLLVM::EmitReg_VecShiftOp(tree op0, tree op1, bool isLeftShift) {
- Value *LHS = EmitRegister(op0); // A vector.
- Value *Amt = EmitRegister(op1); // An integer.
- VectorType *VecTy = cast<VectorType>(LHS->getType());
- unsigned Bits = VecTy->getPrimitiveSizeInBits();
-
- // If the shift is by a multiple of the element size then emit a shuffle.
- if (ConstantInt *CI = dyn_cast<ConstantInt>(Amt)) {
- // The GCC docs are not clear whether the bits shifted in must be zero or if
- // they can be anything. Since these expressions are currently only used in
- // situations which make no assumptions about the shifted in bits, we choose
- // to consider them to be undefined since this results in better code.
- unsigned ShiftAmt = (unsigned)CI->getLimitedValue(Bits);
- if (ShiftAmt >= Bits)
- // Shifting by more than the width of the vector is documented as giving
- // an undefined result.
- return UndefValue::get(VecTy);
- unsigned EltBits = VecTy->getElementType()->getPrimitiveSizeInBits();
- if (!(ShiftAmt % EltBits)) {
- // A shift by an integral number of elements.
- unsigned EltOffset = ShiftAmt / EltBits; // Shift by this many elements.
- // Shuffle the elements sideways by the appropriate number of elements.
- unsigned Length = VecTy->getNumElements();
- SmallVector<Constant*, 8> Mask;
- Mask.reserve(Length);
- if (isLeftShift) {
- // shl <4 x i32> %v, 32 ->
- // shufflevector <4 x i32> %v, <4 x i32> undef, <undef, 0, 1, 2>
- Mask.append(Length - EltOffset,
- UndefValue::get(Type::getInt32Ty(Context)));
- for (unsigned i = 0; i != EltOffset; ++i)
- Mask.push_back(Builder.getInt32(i));
- } else {
- // shr <4 x i32> %v, 32 ->
- // shufflevector <4 x i32> %v, <4 x i32> undef, <1, 2, 3, undef>
- for (unsigned i = EltOffset; i != Length; ++i)
- Mask.push_back(Builder.getInt32(i));
- Mask.append(EltOffset, UndefValue::get(Type::getInt32Ty(Context)));
+ if (isa<COMPLEX_TYPE>(TREE_TYPE(lhs))) {
+ Value *LHSr, *LHSi;
+ SplitComplex(LHS, LHSr, LHSi);
+ Value *RHSr, *RHSi;
+ SplitComplex(RHS, RHSr, RHSi);
+
+ Value *DSTr, *DSTi;
+ if (LHSr->getType()->isFloatingPointTy()) {
+ DSTr = Builder.CreateFCmp(FPPred, LHSr, RHSr);
+ DSTi = Builder.CreateFCmp(FPPred, LHSi, RHSi);
+ if (FPPred == CmpInst::FCMP_OEQ)
+ return Builder.CreateAnd(DSTr, DSTi);
+ assert(FPPred == CmpInst::FCMP_UNE &&
+ "Unhandled complex comparison!");
+ return Builder.CreateOr(DSTr, DSTi);
+ }
+
+ assert(SIPred == UIPred && "(In)equality comparison depends on sign!");
+ DSTr = Builder.CreateICmp(UIPred, LHSr, RHSr);
+ DSTi = Builder.CreateICmp(UIPred, LHSi, RHSi);
+ if (UIPred == CmpInst::ICMP_EQ)
+ return Builder.CreateAnd(DSTr, DSTi);
+ assert(UIPred == CmpInst::ICMP_NE && "Unhandled complex comparison!");
+ return Builder.CreateOr(DSTr, DSTi);
+ }
+
+ if (LHS->getType()->isFPOrFPVectorTy())
+ return Builder.CreateFCmp(FPPred, LHS, RHS);
+
+ // Determine which predicate to use based on signedness.
+ CmpInst::Predicate pred = TYPE_UNSIGNED(TREE_TYPE(lhs)) ? UIPred : SIPred;
+ return Builder.CreateICmp(pred, LHS, RHS);
+ }
+
+ Value *TreeToLLVM::EmitReg_MinMaxExpr(tree op0, tree op1, unsigned UIPred,
+ unsigned SIPred, unsigned FPPred) {
+ Value *LHS = EmitRegister(op0);
+ Value *RHS = EmitRegister(op1);
+
+ Value *Compare;
+ if (isa<FLOAT_TYPE>(TREE_TYPE(op0)))
+ Compare = Builder.CreateFCmp(FCmpInst::Predicate(FPPred), LHS, RHS);
+ else if (TYPE_UNSIGNED(TREE_TYPE(op0)))
+ Compare = Builder.CreateICmp(ICmpInst::Predicate(UIPred), LHS, RHS);
+ else
+ Compare = Builder.CreateICmp(ICmpInst::Predicate(SIPred), LHS, RHS);
+
+ return Builder.CreateSelect(Compare, LHS, RHS);
+ }
+
+ Value *TreeToLLVM::EmitReg_ReducMinMaxExpr(
+ tree op, unsigned UIPred, unsigned SIPred, unsigned FPPred) {
+ // In the bottom half of the vector, form the max/min of the bottom and top
+ // halves of the vector. Rinse and repeat on the just computed bottom half:
+ // in the bottom quarter of the vector, form the max/min of the bottom and
+ // top halves of the bottom half. Continue until only the first element of
+ // the vector is computed. For example, reduc-max <x0, x1, x2, x3> becomes
+ // v = max <x0, x1, undef, undef>, <x2, x3, undef, undef>
+ // w = max <v0, undef, undef, undef>, <v1, undef, undef, undef>
+ // where v = <v0, v1, undef, undef>. The first element of w is the max/min
+ // of x0,x1,x2,x3.
+ Value *Val = EmitRegister(op);
+ Type *Ty = Val->getType();
+
+ CmpInst::Predicate Pred = CmpInst::Predicate(
+ isa<FLOAT_TYPE>(TREE_TYPE(op)) ? FPPred :
+ TYPE_UNSIGNED(TREE_TYPE(op)) ? UIPred :
+ SIPred);
+
+ unsigned Length = (unsigned) TYPE_VECTOR_SUBPARTS(TREE_TYPE(op));
+ assert(Length > 1 && !(Length & (Length - 1)) &&
+ "Length not a power of 2!");
+ SmallVector<Constant *, 8> Mask(Length);
+ Constant *UndefIndex = UndefValue::get(Type::getInt32Ty(Context));
+ for (unsigned Elts = Length >> 1; Elts; Elts >>= 1) {
+ // In the extracted vectors, elements with index Elts and on are undefined.
+ for (unsigned i = Elts; i != Length; ++i)
+ Mask[i] = UndefIndex;
+ // Extract elements [0, Elts) from Val.
+ for (unsigned i = 0; i != Elts; ++i)
+ Mask[i] = Builder.getInt32(i);
+ Value *LHS = Builder.CreateShuffleVector(Val, UndefValue::get(Ty),
+ ConstantVector::get(Mask));
+ // Extract elements [Elts, 2*Elts) from Val.
+ for (unsigned i = 0; i != Elts; ++i)
+ Mask[i] = Builder.getInt32(Elts + i);
+ Value *RHS = Builder.CreateShuffleVector(Val, UndefValue::get(Ty),
+ ConstantVector::get(Mask));
+
+ // Replace Val with the max/min of the extracted elements.
+ Value *Compare = isa<FLOAT_TYPE>(TREE_TYPE(op)) ?
+ Builder.CreateFCmp(Pred, LHS, RHS) :
+ Builder.CreateICmp(Pred, LHS, RHS);
+ Val = Builder.CreateSelect(Compare, LHS, RHS);
+
+ // Repeat, using half as many elements.
+ }
+
+ return Val;
+ }
+
+ Value *TreeToLLVM::EmitReg_REDUC_PLUS_EXPR(tree op) {
+ // In the bottom half of the vector, form the sum of the bottom and top halves
+ // of the vector. Rinse and repeat on the just computed bottom half: in the
+ // bottom quarter of the vector, form the sum of the bottom and top halves of
+ // the bottom half. Continue until only the first element of the vector is
+ // computed. For example, reduc-plus <x0, x1, x2, x3> becomes
+ // v = <x0, x1, undef, undef> + <x2, x3, undef, undef>
+ // w = <v0, undef, undef, undef> + <v1, undef, undef, undef>
+ // where v = <v0, v1, undef, undef>. The first element of w is x0+x1+x2+x3.
+ Value *Val = EmitRegister(op);
+ Type *Ty = Val->getType();
+
+ unsigned Length = (unsigned) TYPE_VECTOR_SUBPARTS(TREE_TYPE(op));
+ assert(Length > 1 && !(Length & (Length - 1)) &&
+ "Length not a power of 2!");
+ SmallVector<Constant *, 8> Mask(Length);
+ Constant *UndefIndex = UndefValue::get(Type::getInt32Ty(Context));
+ for (unsigned Elts = Length >> 1; Elts; Elts >>= 1) {
+ // In the extracted vectors, elements with index Elts and on are undefined.
+ for (unsigned i = Elts; i != Length; ++i)
+ Mask[i] = UndefIndex;
+ // Extract elements [0, Elts) from Val.
+ for (unsigned i = 0; i != Elts; ++i)
+ Mask[i] = Builder.getInt32(i);
+ Value *LHS = Builder.CreateShuffleVector(Val, UndefValue::get(Ty),
+ ConstantVector::get(Mask));
+ // Extract elements [Elts, 2*Elts) from Val.
+ for (unsigned i = 0; i != Elts; ++i)
+ Mask[i] = Builder.getInt32(Elts + i);
+ Value *RHS = Builder.CreateShuffleVector(Val, UndefValue::get(Ty),
+ ConstantVector::get(Mask));
+
+ // Replace Val with the sum of the extracted elements.
+ // TODO: Are nsw/nuw flags valid here?
+ Val = CreateAnyAdd(LHS, RHS, TREE_TYPE(TREE_TYPE(op)));
+
+ // Repeat, using half as many elements.
+ }
+
+ return Val;
+ }
+
+ Value *TreeToLLVM::EmitReg_RotateOp(tree type, tree op0, tree op1,
+ unsigned Opc1, unsigned Opc2) {
+ Value *In = EmitRegister(op0);
+ Value *Amt = EmitRegister(op1);
+
+ if (Amt->getType() != In->getType())
+ Amt = Builder.CreateIntCast(Amt, In->getType(), /*isSigned*/ false,
+ Amt->getName() + ".cast");
+
+ Value *TypeSize = ConstantInt::get(
+ In->getType(),
+ In->getType()->getPrimitiveSizeInBits());
+
+ // Do the two shifts.
+ Value *V1 = Builder.CreateBinOp((Instruction::BinaryOps) Opc1, In, Amt);
+ Value *OtherShift = Builder.CreateSub(TypeSize, Amt);
+ Value *V2 = Builder.CreateBinOp((Instruction::BinaryOps) Opc2, In,
+ OtherShift);
+
+ // Or the two together to return them.
+ Value *Merge = Builder.CreateOr(V1, V2);
+ return Builder.CreateIntCast(Merge, getRegType(type), /*isSigned*/ false);
+ }
+
+ Value *TreeToLLVM::EmitReg_ShiftOp(tree op0, tree op1, unsigned Opc) {
+ Value *LHS = EmitRegister(op0);
+ Value *RHS = EmitRegister(op1);
+ // Ensure that the shift amount has the same type as the shiftee.
+ if (RHS->getType() != LHS->getType()) {
+ if (LHS->getType()->isVectorTy() == RHS->getType()->isVectorTy()) {
+ // Scalar shifted by a scalar amount, or a vector shifted by a vector
+ // amount.
+ assert((!LHS->getType()->isVectorTy() ||
+ cast<VectorType>(LHS->getType())->getNumElements() ==
+ cast<VectorType>(RHS->getType())->getNumElements()) &&
+ "Vector length mismatch!");
+ RHS = CastToAnyType(RHS, /*isSigned*/ false, LHS->getType(),
+ /*isSigned*/ false);
+ } else {
+ // Vector shifted by a scalar amount. Turn the shift amount into a vector
+ // with all elements equal.
+ assert(LHS->getType()->isVectorTy() &&
+ "Shifting a scalar by a vector amount!");
+ VectorType *VecTy = cast<VectorType>(LHS->getType());
+ RHS = CastToAnyType(RHS, /*isSigned*/ false, VecTy->getElementType(),
+ /*isSigned*/ false);
+ RHS = Builder.CreateInsertElement(UndefValue::get(VecTy), RHS,
+ Builder.getInt32(0));
+ Type *MaskTy = VectorType::get(Type::getInt32Ty(Context),
+ VecTy->getNumElements());
+ RHS = Builder.CreateShuffleVector(RHS, UndefValue::get(VecTy),
+ ConstantInt::get(MaskTy, 0));
+ }
}
- return Builder.CreateShuffleVector(LHS, UndefValue::get(VecTy),
- ConstantVector::get(Mask));
+ return Builder.CreateBinOp((Instruction::BinaryOps) Opc, LHS, RHS);
}
- }
- // Turn the vector into a mighty integer of the same size.
- LHS = Builder.CreateBitCast(LHS, IntegerType::get(Context, Bits));
+ Value *TreeToLLVM::EmitReg_VecShiftOp(tree op0, tree op1,
+ bool isLeftShift) {
+ Value *LHS = EmitRegister(op0); // A vector.
+ Value *Amt = EmitRegister(op1); // An integer.
+ VectorType *VecTy = cast<VectorType>(LHS->getType());
+ unsigned Bits = VecTy->getPrimitiveSizeInBits();
- // Ensure the shift amount has the same type.
- if (Amt->getType() != LHS->getType())
- Amt = Builder.CreateIntCast(Amt, LHS->getType(), /*isSigned*/false,
- Amt->getName()+".cast");
-
- // Perform the shift.
- LHS = Builder.CreateBinOp(isLeftShift ? Instruction::Shl : Instruction::LShr,
- LHS, Amt);
-
- // Turn the result back into a vector.
- return Builder.CreateBitCast(LHS, VecTy);
-}
-
-Value *TreeToLLVM::EmitReg_TruthOp(tree type, tree op0, tree op1, unsigned Opc){
- Value *LHS = EmitRegister(op0);
- Value *RHS = EmitRegister(op1);
-
- // This is a truth operation like the strict &&,||,^^. Convert to bool as
- // a test against zero
- LHS = Builder.CreateICmpNE(LHS,
- Constant::getNullValue(LHS->getType()),
- "toBool");
- RHS = Builder.CreateICmpNE(RHS,
- Constant::getNullValue(RHS->getType()),
- "toBool");
-
- Value *Res = Builder.CreateBinOp((Instruction::BinaryOps)Opc, LHS, RHS);
- return Builder.CreateZExt(Res, getRegType(type));
-}
-
-Value *TreeToLLVM::EmitReg_CEIL_DIV_EXPR(tree op0, tree op1) {
- // Notation: CEIL_DIV_EXPR <-> CDiv, TRUNC_DIV_EXPR <-> Div.
-
- // CDiv calculates LHS/RHS by rounding up to the nearest integer. In terms
- // of Div this means if the values of LHS and RHS have opposite signs or if
- // LHS is zero, then CDiv necessarily equals Div; and
- // LHS CDiv RHS = (LHS - Sign(RHS)) Div RHS + 1
- // otherwise.
-
- Type *Ty = getRegType(TREE_TYPE(op0));
- Constant *Zero = ConstantInt::get(Ty, 0);
- Constant *One = ConstantInt::get(Ty, 1);
- Constant *MinusOne = Constant::getAllOnesValue(Ty);
-
- Value *LHS = EmitRegister(op0);
- Value *RHS = EmitRegister(op1);
-
- if (!TYPE_UNSIGNED(TREE_TYPE(op0))) {
- // In the case of signed arithmetic, we calculate CDiv as follows:
- // LHS CDiv RHS = (LHS - Sign(RHS) * Offset) Div RHS + Offset,
- // where Offset is 1 if LHS and RHS have the same sign and LHS is
- // not zero, and 0 otherwise.
-
- // On some machines INT_MIN Div -1 traps. You might expect a trap for
- // INT_MIN CDiv -1 too, but this implementation will not generate one.
- // Quick quiz question: what value is returned for INT_MIN CDiv -1?
-
- // Determine the signs of LHS and RHS, and whether they have the same sign.
- Value *LHSIsPositive = Builder.CreateICmpSGE(LHS, Zero);
- Value *RHSIsPositive = Builder.CreateICmpSGE(RHS, Zero);
- Value *HaveSameSign = Builder.CreateICmpEQ(LHSIsPositive, RHSIsPositive);
-
- // Offset equals 1 if LHS and RHS have the same sign and LHS is not zero.
- Value *LHSNotZero = Builder.CreateICmpNE(LHS, Zero);
- Value *OffsetOne = Builder.CreateAnd(HaveSameSign, LHSNotZero);
- // ... otherwise it is 0.
- Value *Offset = Builder.CreateSelect(OffsetOne, One, Zero);
-
- // Calculate Sign(RHS) ...
- Value *SignRHS = Builder.CreateSelect(RHSIsPositive, One, MinusOne);
- // ... and Sign(RHS) * Offset
- Value *SignedOffset = Builder.CreateSExt(OffsetOne, Ty);
- SignedOffset = Builder.CreateAnd(SignRHS, SignedOffset);
-
- // Return CDiv = (LHS - Sign(RHS) * Offset) Div RHS + Offset.
- Value *CDiv = Builder.CreateSub(LHS, SignedOffset);
- CDiv = Builder.CreateSDiv(CDiv, RHS);
- return Builder.CreateAdd(CDiv, Offset, "cdiv");
- }
-
- // In the case of unsigned arithmetic, LHS and RHS necessarily have the
- // same sign, so we can use
- // LHS CDiv RHS = (LHS - 1) Div RHS + 1
- // as long as LHS is non-zero.
-
- // Offset is 1 if LHS is non-zero, 0 otherwise.
- Value *LHSNotZero = Builder.CreateICmpNE(LHS, Zero);
- Value *Offset = Builder.CreateSelect(LHSNotZero, One, Zero);
-
- // Return CDiv = (LHS - Offset) Div RHS + Offset.
- Value *CDiv = Builder.CreateSub(LHS, Offset);
- CDiv = Builder.CreateUDiv(CDiv, RHS);
- return Builder.CreateAdd(CDiv, Offset, "cdiv");
-}
-
-Value *TreeToLLVM::EmitReg_BIT_AND_EXPR(tree op0, tree op1) {
- Value *LHS = CastToSameSizeInteger(EmitRegister(op0));
- Value *RHS = CastToSameSizeInteger(EmitRegister(op1));
- Value *Res = Builder.CreateAnd(LHS, RHS);
- return CastFromSameSizeInteger(Res, getRegType(TREE_TYPE(op0)));
-}
-
-Value *TreeToLLVM::EmitReg_BIT_IOR_EXPR(tree op0, tree op1) {
- Value *LHS = CastToSameSizeInteger(EmitRegister(op0));
- Value *RHS = CastToSameSizeInteger(EmitRegister(op1));
- Value *Res = Builder.CreateOr(LHS, RHS);
- return CastFromSameSizeInteger(Res, getRegType(TREE_TYPE(op0)));
-}
-
-Value *TreeToLLVM::EmitReg_BIT_XOR_EXPR(tree op0, tree op1) {
- Value *LHS = CastToSameSizeInteger(EmitRegister(op0));
- Value *RHS = CastToSameSizeInteger(EmitRegister(op1));
- Value *Res = Builder.CreateXor(LHS, RHS);
- return CastFromSameSizeInteger(Res, getRegType(TREE_TYPE(op0)));
-}
-
-/// EmitReg_CondExpr - Handle COND_EXPR and VEC_COND_EXPR gimple assign right-
-/// hand sides.
-Value *TreeToLLVM::EmitReg_CondExpr(tree op0, tree op1, tree op2) {
- // The condition is either a comparison or an SSA register. Note that the
- // reason for accessing tree operands directly rather than taking advantage
- // of COND_EXPR_COND and friends is that the latter fail for VEC_COND_EXPR,
- // which is also handled here.
- Value *CondVal = COMPARISON_CLASS_P(op0) ?
- EmitCompare(TREE_OPERAND(op0, 0), TREE_OPERAND(op0, 1), TREE_CODE(op0)) :
- EmitRegister(op0);
-
- // Ensure the condition has i1 type.
- if (!CondVal->getType()->getScalarType()->isIntegerTy(1))
- CondVal = Builder.CreateICmpNE(CondVal,
- Constant::getNullValue(CondVal->getType()));
-
- // Emit the true and false values.
- Value *TrueVal = EmitRegister(op1);
- Value *FalseVal = EmitRegister(op2);
- FalseVal = TriviallyTypeConvert(FalseVal, TrueVal->getType());
-
- // Select the value to use based on the condition.
- return Builder.CreateSelect(CondVal, TrueVal, FalseVal);
-}
-
-Value *TreeToLLVM::EmitReg_COMPLEX_EXPR(tree op0, tree op1) {
- return CreateComplex(EmitRegister(op0), EmitRegister(op1));
-}
-
-Value *TreeToLLVM::EmitReg_FLOOR_DIV_EXPR(tree op0, tree op1) {
- // Notation: FLOOR_DIV_EXPR <-> FDiv, TRUNC_DIV_EXPR <-> Div.
- Value *LHS = EmitRegister(op0);
- Value *RHS = EmitRegister(op1);
-
- // FDiv calculates LHS/RHS by rounding down to the nearest integer. In terms
- // of Div this means if the values of LHS and RHS have the same sign or if LHS
- // is zero, then FDiv necessarily equals Div; and
- // LHS FDiv RHS = (LHS + Sign(RHS)) Div RHS - 1
- // otherwise.
-
- if (TYPE_UNSIGNED(TREE_TYPE(op0)))
- // In the case of unsigned arithmetic, LHS and RHS necessarily have the
- // same sign, so FDiv is the same as Div.
- return Builder.CreateUDiv(LHS, RHS, "fdiv");
-
- Type *Ty = getRegType(TREE_TYPE(op0));
- Constant *Zero = ConstantInt::get(Ty, 0);
- Constant *One = ConstantInt::get(Ty, 1);
- Constant *MinusOne = Constant::getAllOnesValue(Ty);
-
- // In the case of signed arithmetic, we calculate FDiv as follows:
- // LHS FDiv RHS = (LHS + Sign(RHS) * Offset) Div RHS - Offset,
- // where Offset is 1 if LHS and RHS have opposite signs and LHS is
- // not zero, and 0 otherwise.
-
- // Determine the signs of LHS and RHS, and whether they have the same sign.
- Value *LHSIsPositive = Builder.CreateICmpSGE(LHS, Zero);
- Value *RHSIsPositive = Builder.CreateICmpSGE(RHS, Zero);
- Value *SignsDiffer = Builder.CreateICmpNE(LHSIsPositive, RHSIsPositive);
-
- // Offset equals 1 if LHS and RHS have opposite signs and LHS is not zero.
- Value *LHSNotZero = Builder.CreateICmpNE(LHS, Zero);
- Value *OffsetOne = Builder.CreateAnd(SignsDiffer, LHSNotZero);
- // ... otherwise it is 0.
- Value *Offset = Builder.CreateSelect(OffsetOne, One, Zero);
-
- // Calculate Sign(RHS) ...
- Value *SignRHS = Builder.CreateSelect(RHSIsPositive, One, MinusOne);
- // ... and Sign(RHS) * Offset
- Value *SignedOffset = Builder.CreateSExt(OffsetOne, Ty);
- SignedOffset = Builder.CreateAnd(SignRHS, SignedOffset);
-
- // Return FDiv = (LHS + Sign(RHS) * Offset) Div RHS - Offset.
- Value *FDiv = Builder.CreateAdd(LHS, SignedOffset);
- FDiv = Builder.CreateSDiv(FDiv, RHS);
- return Builder.CreateSub(FDiv, Offset, "fdiv");
-}
-
-Value *TreeToLLVM::EmitReg_FLOOR_MOD_EXPR(tree op0, tree op1) {
- // Notation: FLOOR_MOD_EXPR <-> Mod, TRUNC_MOD_EXPR <-> Rem.
-
- Value *LHS = EmitRegister(op0);
- Value *RHS = EmitRegister(op1);
-
- // We express Mod in terms of Rem as follows: if RHS exactly divides LHS,
- // or the values of LHS and RHS have the same sign, then Mod equals Rem.
- // Otherwise Mod equals Rem + RHS. This means that LHS Mod RHS traps iff
- // LHS Rem RHS traps.
- if (TYPE_UNSIGNED(TREE_TYPE(op0)))
- // LHS and RHS values must have the same sign if their type is unsigned.
- return Builder.CreateURem(LHS, RHS);
-
- Type *Ty = getRegType(TREE_TYPE(op0));
- Constant *Zero = ConstantInt::get(Ty, 0);
-
- // The two possible values for Mod.
- Value *Rem = Builder.CreateSRem(LHS, RHS, "rem");
- Value *RemPlusRHS = Builder.CreateAdd(Rem, RHS);
-
- // HaveSameSign: (LHS >= 0) == (RHS >= 0).
- Value *LHSIsPositive = Builder.CreateICmpSGE(LHS, Zero);
- Value *RHSIsPositive = Builder.CreateICmpSGE(RHS, Zero);
- Value *HaveSameSign = Builder.CreateICmpEQ(LHSIsPositive,RHSIsPositive);
-
- // RHS exactly divides LHS iff Rem is zero.
- Value *RemIsZero = Builder.CreateICmpEQ(Rem, Zero);
-
- Value *SameAsRem = Builder.CreateOr(HaveSameSign, RemIsZero);
- return Builder.CreateSelect(SameAsRem, Rem, RemPlusRHS, "mod");
-}
-
-Value *TreeToLLVM::EmitReg_MINUS_EXPR(tree op0, tree op1) {
- Value *LHS = EmitRegister(op0);
- Value *RHS = EmitRegister(op1);
- tree type = TREE_TYPE(op0);
-
- if (isa<COMPLEX_TYPE>(type)) {
- tree elt_type = TREE_TYPE(type);
- Value *LHSr, *LHSi; SplitComplex(LHS, LHSr, LHSi);
- Value *RHSr, *RHSi; SplitComplex(RHS, RHSr, RHSi);
-
- // (a+ib) - (c+id) = (a-c) + i(b-d)
- LHSr = CreateAnySub(LHSr, RHSr, elt_type);
- LHSi = CreateAnySub(LHSi, RHSi, elt_type);
-
- return CreateComplex(LHSr, LHSi);
- }
-
- return CreateAnySub(LHS, RHS, type);
-}
-
-Value *TreeToLLVM::EmitReg_MULT_EXPR(tree op0, tree op1) {
- Value *LHS = EmitRegister(op0);
- Value *RHS = EmitRegister(op1);
- tree type = TREE_TYPE(op0);
-
- if (isa<COMPLEX_TYPE>(type)) {
- tree elt_type = TREE_TYPE(type);
- Value *LHSr, *LHSi; SplitComplex(LHS, LHSr, LHSi);
- Value *RHSr, *RHSi; SplitComplex(RHS, RHSr, RHSi);
- Value *DSTr, *DSTi;
-
- // (a+ib) * (c+id) = (ac-bd) + i(ad+cb)
- if (isa<REAL_TYPE>(elt_type)) {
- Value *Tmp1 = Builder.CreateFMul(LHSr, RHSr); // a*c
- Value *Tmp2 = Builder.CreateFMul(LHSi, RHSi); // b*d
- DSTr = Builder.CreateFSub(Tmp1, Tmp2); // ac-bd
-
- Value *Tmp3 = Builder.CreateFMul(LHSr, RHSi); // a*d
- Value *Tmp4 = Builder.CreateFMul(RHSr, LHSi); // c*b
- DSTi = Builder.CreateFAdd(Tmp3, Tmp4); // ad+cb
- } else {
- // If overflow does not wrap in the element type then it is tempting to
- // use NSW operations here. However that would be wrong since overflow
- // of an intermediate value calculated here does not necessarily imply
- // that the final result overflows.
- Value *Tmp1 = Builder.CreateMul(LHSr, RHSr); // a*c
- Value *Tmp2 = Builder.CreateMul(LHSi, RHSi); // b*d
- DSTr = Builder.CreateSub(Tmp1, Tmp2); // ac-bd
-
- Value *Tmp3 = Builder.CreateMul(LHSr, RHSi); // a*d
- Value *Tmp4 = Builder.CreateMul(RHSr, LHSi); // c*b
- DSTi = Builder.CreateAdd(Tmp3, Tmp4); // ad+cb
- }
-
- return CreateComplex(DSTr, DSTi);
- }
-
- return CreateAnyMul(LHS, RHS, type);
-}
-
-Value *TreeToLLVM::EmitReg_PLUS_EXPR(tree op0, tree op1) {
- Value *LHS = EmitRegister(op0);
- Value *RHS = EmitRegister(op1);
- tree type = TREE_TYPE(op0);
-
- if (isa<COMPLEX_TYPE>(type)) {
- tree elt_type = TREE_TYPE(type);
- Value *LHSr, *LHSi; SplitComplex(LHS, LHSr, LHSi);
- Value *RHSr, *RHSi; SplitComplex(RHS, RHSr, RHSi);
-
- // (a+ib) + (c+id) = (a+c) + i(b+d)
- LHSr = CreateAnyAdd(LHSr, RHSr, elt_type);
- LHSi = CreateAnyAdd(LHSi, RHSi, elt_type);
-
- return CreateComplex(LHSr, LHSi);
- }
-
- return CreateAnyAdd(LHS, RHS, type);
-}
-
-Value *TreeToLLVM::EmitReg_POINTER_PLUS_EXPR(tree op0, tree op1) {
- Value *Ptr = EmitRegister(op0); // The pointer.
- Value *Idx = EmitRegister(op1); // The offset in units.
-
- // Convert the pointer into an i8* and add the offset to it.
- Ptr = Builder.CreateBitCast(Ptr, GetUnitPointerType(Context));
- StringRef GEPName = flag_verbose_asm ? "pp" : "";
- return POINTER_TYPE_OVERFLOW_UNDEFINED ?
- Builder.CreateInBoundsGEP(Ptr, Idx, GEPName) :
- Builder.CreateGEP(Ptr, Idx, GEPName);
-}
-
-Value *TreeToLLVM::EmitReg_RDIV_EXPR(tree op0, tree op1) {
- Value *LHS = EmitRegister(op0);
- Value *RHS = EmitRegister(op1);
- tree type = TREE_TYPE(op0);
-
- if (isa<COMPLEX_TYPE>(type)) {
- Value *LHSr, *LHSi; SplitComplex(LHS, LHSr, LHSi);
- Value *RHSr, *RHSi; SplitComplex(RHS, RHSr, RHSi);
- Value *DSTr, *DSTi;
-
- // (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd))
- assert (isa<REAL_TYPE>(TREE_TYPE(type)) &&
- "RDIV_EXPR not floating point!");
- Value *Tmp1 = Builder.CreateFMul(LHSr, RHSr); // a*c
- Value *Tmp2 = Builder.CreateFMul(LHSi, RHSi); // b*d
- Value *Tmp3 = Builder.CreateFAdd(Tmp1, Tmp2); // ac+bd
-
- Value *Tmp4 = Builder.CreateFMul(RHSr, RHSr); // c*c
- Value *Tmp5 = Builder.CreateFMul(RHSi, RHSi); // d*d
- Value *Tmp6 = Builder.CreateFAdd(Tmp4, Tmp5); // cc+dd
- DSTr = Builder.CreateFDiv(Tmp3, Tmp6);
-
- Value *Tmp7 = Builder.CreateFMul(LHSi, RHSr); // b*c
- Value *Tmp8 = Builder.CreateFMul(LHSr, RHSi); // a*d
- Value *Tmp9 = Builder.CreateFSub(Tmp7, Tmp8); // bc-ad
- DSTi = Builder.CreateFDiv(Tmp9, Tmp6);
-
- return CreateComplex(DSTr, DSTi);
- }
-
- assert(isa<FLOAT_TYPE>(type) && "RDIV_EXPR not floating point!");
- return Builder.CreateFDiv(LHS, RHS);
-}
-
-Value *TreeToLLVM::EmitReg_ROUND_DIV_EXPR(tree op0, tree op1) {
- // Notation: ROUND_DIV_EXPR <-> RDiv, TRUNC_DIV_EXPR <-> Div.
-
- // RDiv calculates LHS/RHS by rounding to the nearest integer. Ties
- // are broken by rounding away from zero. In terms of Div this means:
- // LHS RDiv RHS = (LHS + (RHS Div 2)) Div RHS
- // if the values of LHS and RHS have the same sign; and
- // LHS RDiv RHS = (LHS - (RHS Div 2)) Div RHS
- // if the values of LHS and RHS differ in sign. The intermediate
- // expressions in these formulae can overflow, so some tweaking is
- // required to ensure correct results. The details depend on whether
- // we are doing signed or unsigned arithmetic.
-
- Type *Ty = getRegType(TREE_TYPE(op0));
- Constant *Zero = ConstantInt::get(Ty, 0);
- Constant *Two = ConstantInt::get(Ty, 2);
-
- Value *LHS = EmitRegister(op0);
- Value *RHS = EmitRegister(op1);
-
- if (!TYPE_UNSIGNED(TREE_TYPE(op0))) {
- // In the case of signed arithmetic, we calculate RDiv as follows:
- // LHS RDiv RHS = (sign) ( (|LHS| + (|RHS| UDiv 2)) UDiv |RHS| ),
- // where sign is +1 if LHS and RHS have the same sign, -1 if their
- // signs differ. Doing the computation unsigned ensures that there
- // is no overflow.
-
- // On some machines INT_MIN Div -1 traps. You might expect a trap for
- // INT_MIN RDiv -1 too, but this implementation will not generate one.
- // Quick quiz question: what value is returned for INT_MIN RDiv -1?
-
- // Determine the signs of LHS and RHS, and whether they have the same sign.
- Value *LHSIsPositive = Builder.CreateICmpSGE(LHS, Zero);
- Value *RHSIsPositive = Builder.CreateICmpSGE(RHS, Zero);
- Value *HaveSameSign = Builder.CreateICmpEQ(LHSIsPositive, RHSIsPositive);
-
- // Calculate |LHS| ...
- Value *MinusLHS = Builder.CreateNeg(LHS);
- Value *AbsLHS = Builder.CreateSelect(LHSIsPositive, LHS, MinusLHS,
- LHS->getName()+".abs");
- // ... and |RHS|
- Value *MinusRHS = Builder.CreateNeg(RHS);
- Value *AbsRHS = Builder.CreateSelect(RHSIsPositive, RHS, MinusRHS,
- RHS->getName()+".abs");
-
- // Calculate AbsRDiv = (|LHS| + (|RHS| UDiv 2)) UDiv |RHS|.
- Value *HalfAbsRHS = Builder.CreateUDiv(AbsRHS, Two);
- Value *Numerator = Builder.CreateAdd(AbsLHS, HalfAbsRHS);
- Value *AbsRDiv = Builder.CreateUDiv(Numerator, AbsRHS);
-
- // Return AbsRDiv or -AbsRDiv according to whether LHS and RHS have the
- // same sign or not.
- Value *MinusAbsRDiv = Builder.CreateNeg(AbsRDiv);
- return Builder.CreateSelect(HaveSameSign, AbsRDiv, MinusAbsRDiv, "rdiv");
- }
-
- // In the case of unsigned arithmetic, LHS and RHS necessarily have the
- // same sign, however overflow is a problem. We want to use the formula
- // LHS RDiv RHS = (LHS + (RHS Div 2)) Div RHS,
- // but if LHS + (RHS Div 2) overflows then we get the wrong result. Since
- // the use of a conditional branch seems to be unavoidable, we choose the
- // simple solution of explicitly checking for overflow, and using
- // LHS RDiv RHS = ((LHS + (RHS Div 2)) - RHS) Div RHS + 1
- // if it occurred.
-
- // Usually the numerator is LHS + (RHS Div 2); calculate this.
- Value *HalfRHS = Builder.CreateUDiv(RHS, Two);
- Value *Numerator = Builder.CreateAdd(LHS, HalfRHS);
-
- // Did the calculation overflow?
- Value *Overflowed = Builder.CreateICmpULT(Numerator, HalfRHS);
-
- // If so, use (LHS + (RHS Div 2)) - RHS for the numerator instead.
- Value *AltNumerator = Builder.CreateSub(Numerator, RHS);
- Numerator = Builder.CreateSelect(Overflowed, AltNumerator, Numerator);
-
- // Quotient = Numerator / RHS.
- Value *Quotient = Builder.CreateUDiv(Numerator, RHS);
-
- // Return Quotient unless we overflowed, in which case return Quotient + 1.
- return Builder.CreateAdd(Quotient, Builder.CreateIntCast(Overflowed, Ty,
- /*isSigned*/false),
- "rdiv");
-}
-
-Value *TreeToLLVM::EmitReg_TRUNC_DIV_EXPR(tree op0, tree op1, bool isExact) {
- Value *LHS = EmitRegister(op0);
- Value *RHS = EmitRegister(op1);
- tree type = TREE_TYPE(op0);
-
- if (isa<COMPLEX_TYPE>(type)) {
- tree elt_type = TREE_TYPE(type);
- Value *LHSr, *LHSi; SplitComplex(LHS, LHSr, LHSi);
- Value *RHSr, *RHSi; SplitComplex(RHS, RHSr, RHSi);
- Value *DSTr, *DSTi;
-
- // (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd))
- assert (LHSr->getType()->isIntegerTy() && "TRUNC_DIV_EXPR not integer!");
- // If overflow does not wrap in the element type then it is tempting to
- // use NSW operations here. However that would be wrong since overflow
- // of an intermediate value calculated here does not necessarily imply
- // that the final result overflows.
- Value *Tmp1 = Builder.CreateMul(LHSr, RHSr); // a*c
- Value *Tmp2 = Builder.CreateMul(LHSi, RHSi); // b*d
- Value *Tmp3 = Builder.CreateAdd(Tmp1, Tmp2); // ac+bd
-
- Value *Tmp4 = Builder.CreateMul(RHSr, RHSr); // c*c
- Value *Tmp5 = Builder.CreateMul(RHSi, RHSi); // d*d
- Value *Tmp6 = Builder.CreateAdd(Tmp4, Tmp5); // cc+dd
- DSTr = TYPE_UNSIGNED(elt_type) ?
- Builder.CreateUDiv(Tmp3, Tmp6) : Builder.CreateSDiv(Tmp3, Tmp6);
-
- Value *Tmp7 = Builder.CreateMul(LHSi, RHSr); // b*c
- Value *Tmp8 = Builder.CreateMul(LHSr, RHSi); // a*d
- Value *Tmp9 = Builder.CreateSub(Tmp7, Tmp8); // bc-ad
- DSTi = TYPE_UNSIGNED(elt_type) ?
- Builder.CreateUDiv(Tmp9, Tmp6) : Builder.CreateSDiv(Tmp9, Tmp6);
-
- return CreateComplex(DSTr, DSTi);
- }
-
- assert(LHS->getType()->isIntOrIntVectorTy() && "TRUNC_DIV_EXPR not integer!");
- if (TYPE_UNSIGNED(type))
- return Builder.CreateUDiv(LHS, RHS, "", isExact);
- else
- return Builder.CreateSDiv(LHS, RHS, "", isExact);
-}
+ // If the shift is by a multiple of the element size then emit a shuffle.
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(Amt)) {
+ // The GCC docs are not clear whether the bits shifted in must be zero or if
+ // they can be anything. Since these expressions are currently only used in
+ // situations which make no assumptions about the shifted in bits, we choose
+ // to consider them to be undefined since this results in better code.
+ unsigned ShiftAmt = (unsigned) CI->getLimitedValue(Bits);
+ if (ShiftAmt >= Bits)
+ // Shifting by more than the width of the vector is documented as giving
+ // an undefined result.
+ return UndefValue::get(VecTy);
+ unsigned EltBits = VecTy->getElementType()->getPrimitiveSizeInBits();
+ if (!(ShiftAmt % EltBits)) {
+ // A shift by an integral number of elements.
+ unsigned EltOffset = ShiftAmt /
+ EltBits; // Shift by this many elements.
+ // Shuffle the elements sideways by the appropriate number of elements.
+ unsigned Length = VecTy->getNumElements();
+ SmallVector<Constant *, 8> Mask;
+ Mask.reserve(Length);
+ if (isLeftShift) {
+ // shl <4 x i32> %v, 32 ->
+ // shufflevector <4 x i32> %v, <4 x i32> undef, <undef, 0, 1, 2>
+ Mask.append(Length - EltOffset,
+ UndefValue::get(Type::getInt32Ty(Context)));
+ for (unsigned i = 0; i != EltOffset; ++i)
+ Mask.push_back(Builder.getInt32(i));
+ } else {
+ // shr <4 x i32> %v, 32 ->
+ // shufflevector <4 x i32> %v, <4 x i32> undef, <1, 2, 3, undef>
+ for (unsigned i = EltOffset; i != Length; ++i)
+ Mask.push_back(Builder.getInt32(i));
+ Mask.append(EltOffset, UndefValue::get(Type::getInt32Ty(Context)));
+ }
+ return Builder.CreateShuffleVector(LHS, UndefValue::get(VecTy),
+ ConstantVector::get(Mask));
+ }
+ }
-Value *TreeToLLVM::EmitReg_TRUNC_MOD_EXPR(tree op0, tree op1) {
- Value *LHS = EmitRegister(op0);
- Value *RHS = EmitRegister(op1);
- return TYPE_UNSIGNED(TREE_TYPE(op0)) ?
- Builder.CreateURem(LHS, RHS) : Builder.CreateSRem(LHS, RHS);
-}
+ // Turn the vector into a mighty integer of the same size.
+ LHS = Builder.CreateBitCast(LHS, IntegerType::get(Context, Bits));
+
+ // Ensure the shift amount has the same type.
+ if (Amt->getType() != LHS->getType())
+ Amt = Builder.CreateIntCast(Amt, LHS->getType(), /*isSigned*/ false,
+ Amt->getName() + ".cast");
+
+ // Perform the shift.
+ LHS = Builder.CreateBinOp(isLeftShift ? Instruction::Shl :
+ Instruction::LShr, LHS, Amt);
+
+ // Turn the result back into a vector.
+ return Builder.CreateBitCast(LHS, VecTy);
+ }
+
+ Value *TreeToLLVM::EmitReg_TruthOp(tree type, tree op0, tree op1,
+ unsigned Opc) {
+ Value *LHS = EmitRegister(op0);
+ Value *RHS = EmitRegister(op1);
+
+ // This is a truth operation like the strict &&,||,^^. Convert to bool as
+ // a test against zero
+ LHS = Builder.CreateICmpNE(LHS, Constant::getNullValue(LHS->getType()),
+ "toBool");
+ RHS = Builder.CreateICmpNE(RHS, Constant::getNullValue(RHS->getType()),
+ "toBool");
+
+ Value *Res = Builder.CreateBinOp((Instruction::BinaryOps) Opc, LHS, RHS);
+ return Builder.CreateZExt(Res, getRegType(type));
+ }
+
+ Value *TreeToLLVM::EmitReg_CEIL_DIV_EXPR(tree op0, tree op1) {
+ // Notation: CEIL_DIV_EXPR <-> CDiv, TRUNC_DIV_EXPR <-> Div.
+
+ // CDiv calculates LHS/RHS by rounding up to the nearest integer. In terms
+ // of Div this means if the values of LHS and RHS have opposite signs or if
+ // LHS is zero, then CDiv necessarily equals Div; and
+ // LHS CDiv RHS = (LHS - Sign(RHS)) Div RHS + 1
+ // otherwise.
+
+ Type *Ty = getRegType(TREE_TYPE(op0));
+ Constant *Zero = ConstantInt::get(Ty, 0);
+ Constant *One = ConstantInt::get(Ty, 1);
+ Constant *MinusOne = Constant::getAllOnesValue(Ty);
+
+ Value *LHS = EmitRegister(op0);
+ Value *RHS = EmitRegister(op1);
+
+ if (!TYPE_UNSIGNED(TREE_TYPE(op0))) {
+ // In the case of signed arithmetic, we calculate CDiv as follows:
+ // LHS CDiv RHS = (LHS - Sign(RHS) * Offset) Div RHS + Offset,
+ // where Offset is 1 if LHS and RHS have the same sign and LHS is
+ // not zero, and 0 otherwise.
+
+ // On some machines INT_MIN Div -1 traps. You might expect a trap for
+ // INT_MIN CDiv -1 too, but this implementation will not generate one.
+ // Quick quiz question: what value is returned for INT_MIN CDiv -1?
+
+ // Determine the signs of LHS and RHS, and whether they have the same sign.
+ Value *LHSIsPositive = Builder.CreateICmpSGE(LHS, Zero);
+ Value *RHSIsPositive = Builder.CreateICmpSGE(RHS, Zero);
+ Value *HaveSameSign = Builder.CreateICmpEQ(LHSIsPositive,
+ RHSIsPositive);
+
+ // Offset equals 1 if LHS and RHS have the same sign and LHS is not zero.
+ Value *LHSNotZero = Builder.CreateICmpNE(LHS, Zero);
+ Value *OffsetOne = Builder.CreateAnd(HaveSameSign, LHSNotZero);
+ // ... otherwise it is 0.
+ Value *Offset = Builder.CreateSelect(OffsetOne, One, Zero);
+
+ // Calculate Sign(RHS) ...
+ Value *SignRHS = Builder.CreateSelect(RHSIsPositive, One, MinusOne);
+ // ... and Sign(RHS) * Offset
+ Value *SignedOffset = Builder.CreateSExt(OffsetOne, Ty);
+ SignedOffset = Builder.CreateAnd(SignRHS, SignedOffset);
+
+ // Return CDiv = (LHS - Sign(RHS) * Offset) Div RHS + Offset.
+ Value *CDiv = Builder.CreateSub(LHS, SignedOffset);
+ CDiv = Builder.CreateSDiv(CDiv, RHS);
+ return Builder.CreateAdd(CDiv, Offset, "cdiv");
+ }
+
+ // In the case of unsigned arithmetic, LHS and RHS necessarily have the
+ // same sign, so we can use
+ // LHS CDiv RHS = (LHS - 1) Div RHS + 1
+ // as long as LHS is non-zero.
+
+ // Offset is 1 if LHS is non-zero, 0 otherwise.
+ Value *LHSNotZero = Builder.CreateICmpNE(LHS, Zero);
+ Value *Offset = Builder.CreateSelect(LHSNotZero, One, Zero);
+
+ // Return CDiv = (LHS - Offset) Div RHS + Offset.
+ Value *CDiv = Builder.CreateSub(LHS, Offset);
+ CDiv = Builder.CreateUDiv(CDiv, RHS);
+ return Builder.CreateAdd(CDiv, Offset, "cdiv");
+ }
+
+ Value *TreeToLLVM::EmitReg_BIT_AND_EXPR(tree op0, tree op1) {
+ Value *LHS = CastToSameSizeInteger(EmitRegister(op0));
+ Value *RHS = CastToSameSizeInteger(EmitRegister(op1));
+ Value *Res = Builder.CreateAnd(LHS, RHS);
+ return CastFromSameSizeInteger(Res, getRegType(TREE_TYPE(op0)));
+ }
+
+ Value *TreeToLLVM::EmitReg_BIT_IOR_EXPR(tree op0, tree op1) {
+ Value *LHS = CastToSameSizeInteger(EmitRegister(op0));
+ Value *RHS = CastToSameSizeInteger(EmitRegister(op1));
+ Value *Res = Builder.CreateOr(LHS, RHS);
+ return CastFromSameSizeInteger(Res, getRegType(TREE_TYPE(op0)));
+ }
+
+ Value *TreeToLLVM::EmitReg_BIT_XOR_EXPR(tree op0, tree op1) {
+ Value *LHS = CastToSameSizeInteger(EmitRegister(op0));
+ Value *RHS = CastToSameSizeInteger(EmitRegister(op1));
+ Value *Res = Builder.CreateXor(LHS, RHS);
+ return CastFromSameSizeInteger(Res, getRegType(TREE_TYPE(op0)));
+ }
+
+ /// EmitReg_CondExpr - Handle COND_EXPR and VEC_COND_EXPR gimple assign right-
+ /// hand sides.
+ Value *TreeToLLVM::EmitReg_CondExpr(tree op0, tree op1, tree op2) {
+ // The condition is either a comparison or an SSA register. Note that the
+ // reason for accessing tree operands directly rather than taking advantage
+ // of COND_EXPR_COND and friends is that the latter fail for VEC_COND_EXPR,
+ // which is also handled here.
+ Value *CondVal = COMPARISON_CLASS_P(op0) ?
+ EmitCompare(TREE_OPERAND(op0, 0), TREE_OPERAND(op0, 1),
+ TREE_CODE(op0)) :
+ EmitRegister(op0);
+
+ // Ensure the condition has i1 type.
+ if (!CondVal->getType()->getScalarType()->isIntegerTy(1))
+ CondVal = Builder.CreateICmpNE(
+ CondVal, Constant::getNullValue(CondVal->getType()));
+
+ // Emit the true and false values.
+ Value *TrueVal = EmitRegister(op1);
+ Value *FalseVal = EmitRegister(op2);
+ FalseVal = TriviallyTypeConvert(FalseVal, TrueVal->getType());
+
+ // Select the value to use based on the condition.
+ return Builder.CreateSelect(CondVal, TrueVal, FalseVal);
+ }
+
+ Value *TreeToLLVM::EmitReg_COMPLEX_EXPR(tree op0, tree op1) {
+ return CreateComplex(EmitRegister(op0), EmitRegister(op1));
+ }
+
+ Value *TreeToLLVM::EmitReg_FLOOR_DIV_EXPR(tree op0, tree op1) {
+ // Notation: FLOOR_DIV_EXPR <-> FDiv, TRUNC_DIV_EXPR <-> Div.
+ Value *LHS = EmitRegister(op0);
+ Value *RHS = EmitRegister(op1);
+
+ // FDiv calculates LHS/RHS by rounding down to the nearest integer. In terms
+ // of Div this means if the values of LHS and RHS have the same sign or if LHS
+ // is zero, then FDiv necessarily equals Div; and
+ // LHS FDiv RHS = (LHS + Sign(RHS)) Div RHS - 1
+ // otherwise.
+
+ if (TYPE_UNSIGNED(TREE_TYPE(op0)))
+ // In the case of unsigned arithmetic, LHS and RHS necessarily have the
+ // same sign, so FDiv is the same as Div.
+ return Builder.CreateUDiv(LHS, RHS, "fdiv");
+
+ Type *Ty = getRegType(TREE_TYPE(op0));
+ Constant *Zero = ConstantInt::get(Ty, 0);
+ Constant *One = ConstantInt::get(Ty, 1);
+ Constant *MinusOne = Constant::getAllOnesValue(Ty);
+
+ // In the case of signed arithmetic, we calculate FDiv as follows:
+ // LHS FDiv RHS = (LHS + Sign(RHS) * Offset) Div RHS - Offset,
+ // where Offset is 1 if LHS and RHS have opposite signs and LHS is
+ // not zero, and 0 otherwise.
+
+ // Determine the signs of LHS and RHS, and whether they have the same sign.
+ Value *LHSIsPositive = Builder.CreateICmpSGE(LHS, Zero);
+ Value *RHSIsPositive = Builder.CreateICmpSGE(RHS, Zero);
+ Value *SignsDiffer = Builder.CreateICmpNE(LHSIsPositive, RHSIsPositive);
+
+ // Offset equals 1 if LHS and RHS have opposite signs and LHS is not zero.
+ Value *LHSNotZero = Builder.CreateICmpNE(LHS, Zero);
+ Value *OffsetOne = Builder.CreateAnd(SignsDiffer, LHSNotZero);
+ // ... otherwise it is 0.
+ Value *Offset = Builder.CreateSelect(OffsetOne, One, Zero);
+
+ // Calculate Sign(RHS) ...
+ Value *SignRHS = Builder.CreateSelect(RHSIsPositive, One, MinusOne);
+ // ... and Sign(RHS) * Offset
+ Value *SignedOffset = Builder.CreateSExt(OffsetOne, Ty);
+ SignedOffset = Builder.CreateAnd(SignRHS, SignedOffset);
+
+ // Return FDiv = (LHS + Sign(RHS) * Offset) Div RHS - Offset.
+ Value *FDiv = Builder.CreateAdd(LHS, SignedOffset);
+ FDiv = Builder.CreateSDiv(FDiv, RHS);
+ return Builder.CreateSub(FDiv, Offset, "fdiv");
+ }
+
+ Value *TreeToLLVM::EmitReg_FLOOR_MOD_EXPR(tree op0, tree op1) {
+ // Notation: FLOOR_MOD_EXPR <-> Mod, TRUNC_MOD_EXPR <-> Rem.
+
+ Value *LHS = EmitRegister(op0);
+ Value *RHS = EmitRegister(op1);
+
+ // We express Mod in terms of Rem as follows: if RHS exactly divides LHS,
+ // or the values of LHS and RHS have the same sign, then Mod equals Rem.
+ // Otherwise Mod equals Rem + RHS. This means that LHS Mod RHS traps iff
+ // LHS Rem RHS traps.
+ if (TYPE_UNSIGNED(TREE_TYPE(op0)))
+ // LHS and RHS values must have the same sign if their type is unsigned.
+ return Builder.CreateURem(LHS, RHS);
+
+ Type *Ty = getRegType(TREE_TYPE(op0));
+ Constant *Zero = ConstantInt::get(Ty, 0);
+
+ // The two possible values for Mod.
+ Value *Rem = Builder.CreateSRem(LHS, RHS, "rem");
+ Value *RemPlusRHS = Builder.CreateAdd(Rem, RHS);
+
+ // HaveSameSign: (LHS >= 0) == (RHS >= 0).
+ Value *LHSIsPositive = Builder.CreateICmpSGE(LHS, Zero);
+ Value *RHSIsPositive = Builder.CreateICmpSGE(RHS, Zero);
+ Value *HaveSameSign = Builder.CreateICmpEQ(LHSIsPositive, RHSIsPositive);
+
+ // RHS exactly divides LHS iff Rem is zero.
+ Value *RemIsZero = Builder.CreateICmpEQ(Rem, Zero);
+
+ Value *SameAsRem = Builder.CreateOr(HaveSameSign, RemIsZero);
+ return Builder.CreateSelect(SameAsRem, Rem, RemPlusRHS, "mod");
+ }
+
+ Value *TreeToLLVM::EmitReg_MINUS_EXPR(tree op0, tree op1) {
+ Value *LHS = EmitRegister(op0);
+ Value *RHS = EmitRegister(op1);
+ tree type = TREE_TYPE(op0);
+
+ if (isa<COMPLEX_TYPE>(type)) {
+ tree elt_type = TREE_TYPE(type);
+ Value *LHSr, *LHSi;
+ SplitComplex(LHS, LHSr, LHSi);
+ Value *RHSr, *RHSi;
+ SplitComplex(RHS, RHSr, RHSi);
+
+ // (a+ib) - (c+id) = (a-c) + i(b-d)
+ LHSr = CreateAnySub(LHSr, RHSr, elt_type);
+ LHSi = CreateAnySub(LHSi, RHSi, elt_type);
+
+ return CreateComplex(LHSr, LHSi);
+ }
+
+ return CreateAnySub(LHS, RHS, type);
+ }
+
+ Value *TreeToLLVM::EmitReg_MULT_EXPR(tree op0, tree op1) {
+ Value *LHS = EmitRegister(op0);
+ Value *RHS = EmitRegister(op1);
+ tree type = TREE_TYPE(op0);
+
+ if (isa<COMPLEX_TYPE>(type)) {
+ tree elt_type = TREE_TYPE(type);
+ Value *LHSr, *LHSi;
+ SplitComplex(LHS, LHSr, LHSi);
+ Value *RHSr, *RHSi;
+ SplitComplex(RHS, RHSr, RHSi);
+ Value *DSTr, *DSTi;
+
+ // (a+ib) * (c+id) = (ac-bd) + i(ad+cb)
+ if (isa<REAL_TYPE>(elt_type)) {
+ Value *Tmp1 = Builder.CreateFMul(LHSr, RHSr); // a*c
+ Value *Tmp2 = Builder.CreateFMul(LHSi, RHSi); // b*d
+ DSTr = Builder.CreateFSub(Tmp1, Tmp2); // ac-bd
+
+ Value *Tmp3 = Builder.CreateFMul(LHSr, RHSi); // a*d
+ Value *Tmp4 = Builder.CreateFMul(RHSr, LHSi); // c*b
+ DSTi = Builder.CreateFAdd(Tmp3, Tmp4); // ad+cb
+ } else {
+ // If overflow does not wrap in the element type then it is tempting to
+ // use NSW operations here. However that would be wrong since overflow
+ // of an intermediate value calculated here does not necessarily imply
+ // that the final result overflows.
+ Value *Tmp1 = Builder.CreateMul(LHSr, RHSr); // a*c
+ Value *Tmp2 = Builder.CreateMul(LHSi, RHSi); // b*d
+ DSTr = Builder.CreateSub(Tmp1, Tmp2); // ac-bd
+
+ Value *Tmp3 = Builder.CreateMul(LHSr, RHSi); // a*d
+ Value *Tmp4 = Builder.CreateMul(RHSr, LHSi); // c*b
+ DSTi = Builder.CreateAdd(Tmp3, Tmp4); // ad+cb
+ }
+
+ return CreateComplex(DSTr, DSTi);
+ }
+
+ return CreateAnyMul(LHS, RHS, type);
+ }
+
+ Value *TreeToLLVM::EmitReg_PLUS_EXPR(tree op0, tree op1) {
+ Value *LHS = EmitRegister(op0);
+ Value *RHS = EmitRegister(op1);
+ tree type = TREE_TYPE(op0);
+
+ if (isa<COMPLEX_TYPE>(type)) {
+ tree elt_type = TREE_TYPE(type);
+ Value *LHSr, *LHSi;
+ SplitComplex(LHS, LHSr, LHSi);
+ Value *RHSr, *RHSi;
+ SplitComplex(RHS, RHSr, RHSi);
+
+ // (a+ib) + (c+id) = (a+c) + i(b+d)
+ LHSr = CreateAnyAdd(LHSr, RHSr, elt_type);
+ LHSi = CreateAnyAdd(LHSi, RHSi, elt_type);
+
+ return CreateComplex(LHSr, LHSi);
+ }
+
+ return CreateAnyAdd(LHS, RHS, type);
+ }
+
+ Value *TreeToLLVM::EmitReg_POINTER_PLUS_EXPR(tree op0, tree op1) {
+ Value *Ptr = EmitRegister(op0); // The pointer.
+ Value *Idx = EmitRegister(op1); // The offset in units.
+
+ // Convert the pointer into an i8* and add the offset to it.
+ Ptr = Builder.CreateBitCast(Ptr, GetUnitPointerType(Context));
+ StringRef GEPName = flag_verbose_asm ? "pp" : "";
+ return POINTER_TYPE_OVERFLOW_UNDEFINED ?
+ Builder.CreateInBoundsGEP(Ptr, Idx, GEPName) :
+ Builder.CreateGEP(Ptr, Idx, GEPName);
+ }
+
+ Value *TreeToLLVM::EmitReg_RDIV_EXPR(tree op0, tree op1) {
+ Value *LHS = EmitRegister(op0);
+ Value *RHS = EmitRegister(op1);
+ tree type = TREE_TYPE(op0);
+
+ if (isa<COMPLEX_TYPE>(type)) {
+ Value *LHSr, *LHSi;
+ SplitComplex(LHS, LHSr, LHSi);
+ Value *RHSr, *RHSi;
+ SplitComplex(RHS, RHSr, RHSi);
+ Value *DSTr, *DSTi;
+
+ // (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd))
+ assert(isa<REAL_TYPE>(TREE_TYPE(type)) &&
+ "RDIV_EXPR not floating point!");
+ Value *Tmp1 = Builder.CreateFMul(LHSr, RHSr); // a*c
+ Value *Tmp2 = Builder.CreateFMul(LHSi, RHSi); // b*d
+ Value *Tmp3 = Builder.CreateFAdd(Tmp1, Tmp2); // ac+bd
+
+ Value *Tmp4 = Builder.CreateFMul(RHSr, RHSr); // c*c
+ Value *Tmp5 = Builder.CreateFMul(RHSi, RHSi); // d*d
+ Value *Tmp6 = Builder.CreateFAdd(Tmp4, Tmp5); // cc+dd
+ DSTr = Builder.CreateFDiv(Tmp3, Tmp6);
+
+ Value *Tmp7 = Builder.CreateFMul(LHSi, RHSr); // b*c
+ Value *Tmp8 = Builder.CreateFMul(LHSr, RHSi); // a*d
+ Value *Tmp9 = Builder.CreateFSub(Tmp7, Tmp8); // bc-ad
+ DSTi = Builder.CreateFDiv(Tmp9, Tmp6);
+
+ return CreateComplex(DSTr, DSTi);
+ }
+
+ assert(isa<FLOAT_TYPE>(type) && "RDIV_EXPR not floating point!");
+ return Builder.CreateFDiv(LHS, RHS);
+ }
+
+ Value *TreeToLLVM::EmitReg_ROUND_DIV_EXPR(tree op0, tree op1) {
+ // Notation: ROUND_DIV_EXPR <-> RDiv, TRUNC_DIV_EXPR <-> Div.
+
+ // RDiv calculates LHS/RHS by rounding to the nearest integer. Ties
+ // are broken by rounding away from zero. In terms of Div this means:
+ // LHS RDiv RHS = (LHS + (RHS Div 2)) Div RHS
+ // if the values of LHS and RHS have the same sign; and
+ // LHS RDiv RHS = (LHS - (RHS Div 2)) Div RHS
+ // if the values of LHS and RHS differ in sign. The intermediate
+ // expressions in these formulae can overflow, so some tweaking is
+ // required to ensure correct results. The details depend on whether
+ // we are doing signed or unsigned arithmetic.
+
+ Type *Ty = getRegType(TREE_TYPE(op0));
+ Constant *Zero = ConstantInt::get(Ty, 0);
+ Constant *Two = ConstantInt::get(Ty, 2);
+
+ Value *LHS = EmitRegister(op0);
+ Value *RHS = EmitRegister(op1);
+
+ if (!TYPE_UNSIGNED(TREE_TYPE(op0))) {
+ // In the case of signed arithmetic, we calculate RDiv as follows:
+ // LHS RDiv RHS = (sign) ( (|LHS| + (|RHS| UDiv 2)) UDiv |RHS| ),
+ // where sign is +1 if LHS and RHS have the same sign, -1 if their
+ // signs differ. Doing the computation unsigned ensures that there
+ // is no overflow.
+
+ // On some machines INT_MIN Div -1 traps. You might expect a trap for
+ // INT_MIN RDiv -1 too, but this implementation will not generate one.
+ // Quick quiz question: what value is returned for INT_MIN RDiv -1?
+
+ // Determine the signs of LHS and RHS, and whether they have the same sign.
+ Value *LHSIsPositive = Builder.CreateICmpSGE(LHS, Zero);
+ Value *RHSIsPositive = Builder.CreateICmpSGE(RHS, Zero);
+ Value *HaveSameSign = Builder.CreateICmpEQ(LHSIsPositive,
+ RHSIsPositive);
+
+ // Calculate |LHS| ...
+ Value *MinusLHS = Builder.CreateNeg(LHS);
+ Value *AbsLHS = Builder.CreateSelect(LHSIsPositive, LHS, MinusLHS,
+ LHS->getName() + ".abs");
+ // ... and |RHS|
+ Value *MinusRHS = Builder.CreateNeg(RHS);
+ Value *AbsRHS = Builder.CreateSelect(RHSIsPositive, RHS, MinusRHS,
+ RHS->getName() + ".abs");
+
+ // Calculate AbsRDiv = (|LHS| + (|RHS| UDiv 2)) UDiv |RHS|.
+ Value *HalfAbsRHS = Builder.CreateUDiv(AbsRHS, Two);
+ Value *Numerator = Builder.CreateAdd(AbsLHS, HalfAbsRHS);
+ Value *AbsRDiv = Builder.CreateUDiv(Numerator, AbsRHS);
+
+ // Return AbsRDiv or -AbsRDiv according to whether LHS and RHS have the
+ // same sign or not.
+ Value *MinusAbsRDiv = Builder.CreateNeg(AbsRDiv);
+ return Builder.CreateSelect(HaveSameSign, AbsRDiv, MinusAbsRDiv,
+ "rdiv");
+ }
+
+ // In the case of unsigned arithmetic, LHS and RHS necessarily have the
+ // same sign, however overflow is a problem. We want to use the formula
+ // LHS RDiv RHS = (LHS + (RHS Div 2)) Div RHS,
+ // but if LHS + (RHS Div 2) overflows then we get the wrong result. Since
+ // the use of a conditional branch seems to be unavoidable, we choose the
+ // simple solution of explicitly checking for overflow, and using
+ // LHS RDiv RHS = ((LHS + (RHS Div 2)) - RHS) Div RHS + 1
+ // if it occurred.
+
+ // Usually the numerator is LHS + (RHS Div 2); calculate this.
+ Value *HalfRHS = Builder.CreateUDiv(RHS, Two);
+ Value *Numerator = Builder.CreateAdd(LHS, HalfRHS);
+
+ // Did the calculation overflow?
+ Value *Overflowed = Builder.CreateICmpULT(Numerator, HalfRHS);
+
+ // If so, use (LHS + (RHS Div 2)) - RHS for the numerator instead.
+ Value *AltNumerator = Builder.CreateSub(Numerator, RHS);
+ Numerator = Builder.CreateSelect(Overflowed, AltNumerator, Numerator);
+
+ // Quotient = Numerator / RHS.
+ Value *Quotient = Builder.CreateUDiv(Numerator, RHS);
+
+ // Return Quotient unless we overflowed, in which case return Quotient + 1.
+ return Builder.CreateAdd(
+ Quotient, Builder.CreateIntCast(Overflowed, Ty, /*isSigned*/ false),
+ "rdiv");
+ }
+
+ Value *TreeToLLVM::EmitReg_TRUNC_DIV_EXPR(tree op0, tree op1,
+ bool isExact) {
+ Value *LHS = EmitRegister(op0);
+ Value *RHS = EmitRegister(op1);
+ tree type = TREE_TYPE(op0);
+
+ if (isa<COMPLEX_TYPE>(type)) {
+ tree elt_type = TREE_TYPE(type);
+ Value *LHSr, *LHSi;
+ SplitComplex(LHS, LHSr, LHSi);
+ Value *RHSr, *RHSi;
+ SplitComplex(RHS, RHSr, RHSi);
+ Value *DSTr, *DSTi;
+
+ // (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd))
+ assert(LHSr->getType()->isIntegerTy() && "TRUNC_DIV_EXPR not integer!");
+ // If overflow does not wrap in the element type then it is tempting to
+ // use NSW operations here. However that would be wrong since overflow
+ // of an intermediate value calculated here does not necessarily imply
+ // that the final result overflows.
+ Value *Tmp1 = Builder.CreateMul(LHSr, RHSr); // a*c
+ Value *Tmp2 = Builder.CreateMul(LHSi, RHSi); // b*d
+ Value *Tmp3 = Builder.CreateAdd(Tmp1, Tmp2); // ac+bd
+
+ Value *Tmp4 = Builder.CreateMul(RHSr, RHSr); // c*c
+ Value *Tmp5 = Builder.CreateMul(RHSi, RHSi); // d*d
+ Value *Tmp6 = Builder.CreateAdd(Tmp4, Tmp5); // cc+dd
+ DSTr = TYPE_UNSIGNED(elt_type) ? Builder.CreateUDiv(Tmp3, Tmp6) :
+ Builder.CreateSDiv(Tmp3, Tmp6);
+
+ Value *Tmp7 = Builder.CreateMul(LHSi, RHSr); // b*c
+ Value *Tmp8 = Builder.CreateMul(LHSr, RHSi); // a*d
+ Value *Tmp9 = Builder.CreateSub(Tmp7, Tmp8); // bc-ad
+ DSTi = TYPE_UNSIGNED(elt_type) ? Builder.CreateUDiv(Tmp9, Tmp6) :
+ Builder.CreateSDiv(Tmp9, Tmp6);
+
+ return CreateComplex(DSTr, DSTi);
+ }
+
+ assert(LHS->getType()->isIntOrIntVectorTy() &&
+ "TRUNC_DIV_EXPR not integer!");
+ if (TYPE_UNSIGNED(type))
+ return Builder.CreateUDiv(LHS, RHS, "", isExact);
+ else
+ return Builder.CreateSDiv(LHS, RHS, "", isExact);
+ }
+
+ Value *TreeToLLVM::EmitReg_TRUNC_MOD_EXPR(tree op0, tree op1) {
+ Value *LHS = EmitRegister(op0);
+ Value *RHS = EmitRegister(op1);
+ return TYPE_UNSIGNED(TREE_TYPE(op0)) ? Builder.CreateURem(LHS, RHS) :
+ Builder.CreateSRem(LHS, RHS);
+ }
#if (GCC_MINOR < 7)
-Value *TreeToLLVM::EmitReg_VEC_EXTRACT_EVEN_EXPR(tree op0, tree op1) {
- Value *LHS = EmitRegister(op0);
- Value *RHS = EmitRegister(op1);
- unsigned Length = (unsigned)TYPE_VECTOR_SUBPARTS(TREE_TYPE(op0));
- SmallVector<Constant*, 16> Mask;
- Mask.reserve(Length);
- for (unsigned i = 0; i != Length; ++i)
- Mask.push_back(Builder.getInt32(2*i));
- return Builder.CreateShuffleVector(LHS, RHS, ConstantVector::get(Mask));
-}
+ Value *TreeToLLVM::EmitReg_VEC_EXTRACT_EVEN_EXPR(tree op0, tree op1) {
+ Value *LHS = EmitRegister(op0);
+ Value *RHS = EmitRegister(op1);
+ unsigned Length = (unsigned) TYPE_VECTOR_SUBPARTS(TREE_TYPE(op0));
+ SmallVector<Constant *, 16> Mask;
+ Mask.reserve(Length);
+ for (unsigned i = 0; i != Length; ++i)
+ Mask.push_back(Builder.getInt32(2 * i));
+ return Builder.CreateShuffleVector(LHS, RHS, ConstantVector::get(Mask));
+ }
#endif
#if (GCC_MINOR < 7)
-Value *TreeToLLVM::EmitReg_VEC_EXTRACT_ODD_EXPR(tree op0, tree op1) {
- Value *LHS = EmitRegister(op0);
- Value *RHS = EmitRegister(op1);
- unsigned Length = (unsigned)TYPE_VECTOR_SUBPARTS(TREE_TYPE(op0));
- SmallVector<Constant*, 16> Mask;
- Mask.reserve(Length);
- for (unsigned i = 0; i != Length; ++i)
- Mask.push_back(Builder.getInt32(2*i+1));
- return Builder.CreateShuffleVector(LHS, RHS, ConstantVector::get(Mask));
-}
+ Value *TreeToLLVM::EmitReg_VEC_EXTRACT_ODD_EXPR(tree op0, tree op1) {
+ Value *LHS = EmitRegister(op0);
+ Value *RHS = EmitRegister(op1);
+ unsigned Length = (unsigned) TYPE_VECTOR_SUBPARTS(TREE_TYPE(op0));
+ SmallVector<Constant *, 16> Mask;
+ Mask.reserve(Length);
+ for (unsigned i = 0; i != Length; ++i)
+ Mask.push_back(Builder.getInt32(2 * i + 1));
+ return Builder.CreateShuffleVector(LHS, RHS, ConstantVector::get(Mask));
+ }
#endif
#if (GCC_MINOR < 7)
-Value *TreeToLLVM::EmitReg_VEC_INTERLEAVE_HIGH_EXPR(tree op0, tree op1) {
- Value *LHS = EmitRegister(op0);
- Value *RHS = EmitRegister(op1);
- unsigned Length = (unsigned)TYPE_VECTOR_SUBPARTS(TREE_TYPE(op0));
- assert(!(Length & 1) && "Expected an even number of vector elements!");
- SmallVector<Constant*, 16> Mask;
- Mask.reserve(Length);
- for (unsigned i = Length/2; i != Length; ++i) {
- Mask.push_back(Builder.getInt32(i));
- Mask.push_back(Builder.getInt32(Length + i));
- }
- return Builder.CreateShuffleVector(LHS, RHS, ConstantVector::get(Mask));
-}
+ Value *TreeToLLVM::EmitReg_VEC_INTERLEAVE_HIGH_EXPR(tree op0, tree op1) {
+ Value *LHS = EmitRegister(op0);
+ Value *RHS = EmitRegister(op1);
+ unsigned Length = (unsigned) TYPE_VECTOR_SUBPARTS(TREE_TYPE(op0));
+ assert(!(Length & 1) && "Expected an even number of vector elements!");
+ SmallVector<Constant *, 16> Mask;
+ Mask.reserve(Length);
+ for (unsigned i = Length / 2; i != Length; ++i) {
+ Mask.push_back(Builder.getInt32(i));
+ Mask.push_back(Builder.getInt32(Length + i));
+ }
+ return Builder.CreateShuffleVector(LHS, RHS, ConstantVector::get(Mask));
+ }
#endif
#if (GCC_MINOR < 7)
-Value *TreeToLLVM::EmitReg_VEC_INTERLEAVE_LOW_EXPR(tree op0, tree op1) {
- Value *LHS = EmitRegister(op0);
- Value *RHS = EmitRegister(op1);
- unsigned Length = (unsigned)TYPE_VECTOR_SUBPARTS(TREE_TYPE(op0));
- assert(!(Length & 1) && "Expected an even number of vector elements!");
- SmallVector<Constant*, 16> Mask;
- Mask.reserve(Length);
- for (unsigned i = 0, e = Length/2; i != e; ++i) {
- Mask.push_back(Builder.getInt32(i));
- Mask.push_back(Builder.getInt32(Length + i));
- }
- return Builder.CreateShuffleVector(LHS, RHS, ConstantVector::get(Mask));
-}
+ Value *TreeToLLVM::EmitReg_VEC_INTERLEAVE_LOW_EXPR(tree op0, tree op1) {
+ Value *LHS = EmitRegister(op0);
+ Value *RHS = EmitRegister(op1);
+ unsigned Length = (unsigned) TYPE_VECTOR_SUBPARTS(TREE_TYPE(op0));
+ assert(!(Length & 1) && "Expected an even number of vector elements!");
+ SmallVector<Constant *, 16> Mask;
+ Mask.reserve(Length);
+ for (unsigned i = 0, e = Length / 2; i != e; ++i) {
+ Mask.push_back(Builder.getInt32(i));
+ Mask.push_back(Builder.getInt32(Length + i));
+ }
+ return Builder.CreateShuffleVector(LHS, RHS, ConstantVector::get(Mask));
+ }
#endif
-Value *TreeToLLVM::EmitReg_VEC_PACK_TRUNC_EXPR(tree type, tree op0, tree op1) {
- // Eg: <4 x float> = VEC_PACK_TRUNC_EXPR(<2 x double>, <2 x double>)
- // or <4 x i32> = VEC_PACK_FIX_TRUNC_EXPR(<2 x double>, <2 x double>).
- Value *LHS = EmitRegister(op0);
- Value *RHS = EmitRegister(op1);
-
- // Truncate the input elements to the output element type, eg: <2 x double>
- // -> <2 x float>.
- unsigned Length = (unsigned)TYPE_VECTOR_SUBPARTS(TREE_TYPE(op0));
- Type *DestTy = VectorType::get(getRegType(TREE_TYPE(type)), Length);
- LHS = CastToAnyType(LHS, !TYPE_UNSIGNED(TREE_TYPE(TREE_TYPE(op0))), DestTy,
- !TYPE_UNSIGNED(TREE_TYPE(type)));
- RHS = CastToAnyType(RHS, !TYPE_UNSIGNED(TREE_TYPE(TREE_TYPE(op0))), DestTy,
- !TYPE_UNSIGNED(TREE_TYPE(type)));
-
- // Concatenate the truncated inputs into one vector of twice the length,
- // eg: <2 x float>, <2 x float> -> <4 x float>.
- SmallVector<Constant*, 16> Mask;
- Mask.reserve(2*Length);
- for (unsigned i = 0, e = 2*Length; i != e; ++i)
- Mask.push_back(Builder.getInt32(i));
- return Builder.CreateShuffleVector(LHS, RHS, ConstantVector::get(Mask));
-}
+ Value *TreeToLLVM::EmitReg_VEC_PACK_TRUNC_EXPR(tree type, tree op0,
+ tree op1) {
+ // Eg: <4 x float> = VEC_PACK_TRUNC_EXPR(<2 x double>, <2 x double>)
+ // or <4 x i32> = VEC_PACK_FIX_TRUNC_EXPR(<2 x double>, <2 x double>).
+ Value *LHS = EmitRegister(op0);
+ Value *RHS = EmitRegister(op1);
+
+ // Truncate the input elements to the output element type, eg: <2 x double>
+ // -> <2 x float>.
+ unsigned Length = (unsigned) TYPE_VECTOR_SUBPARTS(TREE_TYPE(op0));
+ Type *DestTy = VectorType::get(getRegType(TREE_TYPE(type)), Length);
+ LHS = CastToAnyType(LHS, !TYPE_UNSIGNED(TREE_TYPE(TREE_TYPE(op0))),
+ DestTy, !TYPE_UNSIGNED(TREE_TYPE(type)));
+ RHS = CastToAnyType(RHS, !TYPE_UNSIGNED(TREE_TYPE(TREE_TYPE(op0))),
+ DestTy, !TYPE_UNSIGNED(TREE_TYPE(type)));
+
+ // Concatenate the truncated inputs into one vector of twice the length,
+ // eg: <2 x float>, <2 x float> -> <4 x float>.
+ SmallVector<Constant *, 16> Mask;
+ Mask.reserve(2 * Length);
+ for (unsigned i = 0, e = 2 * Length; i != e; ++i)
+ Mask.push_back(Builder.getInt32(i));
+ return Builder.CreateShuffleVector(LHS, RHS, ConstantVector::get(Mask));
+ }
#if (GCC_MINOR > 6)
-Value *TreeToLLVM::EmitReg_VEC_PERM_EXPR(tree op0, tree op1, tree op2) {
- unsigned Length = (unsigned)TYPE_VECTOR_SUBPARTS(TREE_TYPE(op0));
+ Value *TreeToLLVM::EmitReg_VEC_PERM_EXPR(tree op0, tree op1, tree op2) {
+ unsigned Length = (unsigned) TYPE_VECTOR_SUBPARTS(TREE_TYPE(op0));
- // The vectors to shuffle.
- Value *V0 = EmitRegister(op0);
- Value *V1 = EmitRegister(op1);
-
- // The shuffle mask.
- Value *Mask = EmitRegister(op2);
-
- // The GCC semantics are that mask indices off the end are wrapped back into
- // range, so reduce the mask modulo 2*Length.
- assert(!(Length & (Length - 1)) && "Vector length not a power of two!");
- Mask = Builder.CreateAnd(Mask, ConstantInt::get(Mask->getType(), 2*Length-1));
-
- // Convert to a vector of i32, as required by the shufflevector instruction.
- Type *MaskTy = VectorType::get(Builder.getInt32Ty(), Length);
- tree mask_elt_type = TREE_TYPE(TREE_TYPE(op2));
- Value *Mask32 = Builder.CreateIntCast(Mask, MaskTy,
- !TYPE_UNSIGNED(mask_elt_type));
-
- // Use a shufflevector instruction if this directly corresponds to one, i.e.
- // if the mask is a vector of constant integers or undef.
- if (ShuffleVectorInst::isValidOperands(V0, V1, Mask32))
- return Builder.CreateShuffleVector(V0, V1, Mask32);
-
- // Store the vectors to successive memory locations in a temporary.
- tree elt_type = TREE_TYPE(TREE_TYPE(op0));
- Type *EltTy = ConvertType(elt_type);
- unsigned Align = DL.getABITypeAlignment(EltTy);
- // The temporary is a struct containing the pair of input vectors.
- Type *TmpTy = StructType::get(ConvertType(TREE_TYPE(op0)),
- ConvertType(TREE_TYPE(op1)), NULL);
- AllocaInst *Tmp = CreateTemporary(TmpTy, Align);
- // Store the first vector to the first element of the pair.
- Value *Tmp0 = Builder.CreateStructGEP(Tmp, 0, flag_verbose_asm ?
- "vp1s" : "");
- StoreRegisterToMemory(V0, MemRef(Tmp0, Align, /*Volatile*/false),
- TREE_TYPE(op0), 0, Builder);
- // Store the second vector to the second element of the pair.
- Value *Tmp1 = Builder.CreateStructGEP(Tmp, 1, flag_verbose_asm ?
- "vp2s" : "");
- StoreRegisterToMemory(V1, MemRef(Tmp1, Align, /*Volatile*/false),
- TREE_TYPE(op1), 0, Builder);
-
- // Load out the components according to the mask.
- Value *Result = UndefValue::get(V0->getType());
- Value *BaseAddr = Builder.CreateBitCast(Tmp, EltTy->getPointerTo());
- for (unsigned i = 0; i != Length; ++i) {
- // Extract from the mask the index of the element to load.
- Value *MaskIdx = Builder.getInt32(i);
- Value *Idx = Builder.CreateExtractElement(Mask, MaskIdx);
- // Advance that many elements from the start of the temporary and load it.
- Value *Ptr = Builder.CreateInBoundsGEP(BaseAddr, Idx, flag_verbose_asm ?
- "vpl" : "");
- Value *Elt = LoadRegisterFromMemory(MemRef(Ptr, Align, false), elt_type, 0,
- Builder);
- // Insert it into the result.
- Result = Builder.CreateInsertElement(Result, Elt, MaskIdx);
- }
- return Result;
-}
-#endif
-
-Value *TreeToLLVM::EmitReg_VecUnpackHiExpr(tree type, tree op0) {
- // Eg: <2 x double> = VEC_UNPACK_HI_EXPR(<4 x float>)
- Value *Op = EmitRegister(op0);
-
- // Extract the high elements, eg: <4 x float> -> <2 x float>.
- Op = VectorHighElements(Op);
-
- // Extend the input elements to the output element type, eg: <2 x float>
- // -> <2 x double>.
- Type *DestTy = getRegType(type);
- return CastToAnyType(Op, !TYPE_UNSIGNED(TREE_TYPE(TREE_TYPE(op0))), DestTy,
- !TYPE_UNSIGNED(TREE_TYPE(type)));
-}
-
-Value *TreeToLLVM::EmitReg_VecUnpackLoExpr(tree type, tree op0) {
- // Eg: <2 x double> = VEC_UNPACK_LO_EXPR(<4 x float>)
- Value *Op = EmitRegister(op0);
-
- // Extract the low elements, eg: <4 x float> -> <2 x float>.
- Op = VectorLowElements(Op);
-
- // Extend the input elements to the output element type, eg: <2 x float>
- // -> <2 x double>.
- Type *DestTy = getRegType(type);
- return CastToAnyType(Op, !TYPE_UNSIGNED(TREE_TYPE(TREE_TYPE(op0))), DestTy,
- !TYPE_UNSIGNED(TREE_TYPE(type)));
-}
-
-Value *TreeToLLVM::EmitReg_VEC_WIDEN_MULT_HI_EXPR(tree type, tree op0,
- tree op1) {
- Value *Hi0 = EmitReg_VecUnpackHiExpr(type, op0);
- Value *Hi1 = EmitReg_VecUnpackHiExpr(type, op1);
- return Builder.CreateMul(Hi0, Hi1);
-}
-
-Value *TreeToLLVM::EmitReg_VEC_WIDEN_MULT_LO_EXPR(tree type, tree op0,
- tree op1) {
- Value *Lo0 = EmitReg_VecUnpackLoExpr(type, op0);
- Value *Lo1 = EmitReg_VecUnpackLoExpr(type, op1);
- return Builder.CreateMul(Lo0, Lo1);
-}
-
-Value *TreeToLLVM::EmitReg_WIDEN_MULT_EXPR(tree type, tree op0, tree op1) {
- Value *LHS = EmitRegisterWithCast(op0, type);
- Value *RHS = EmitRegisterWithCast(op1, type);
- return Builder.CreateMul(LHS, RHS);
-}
-
-
-//===----------------------------------------------------------------------===//
-// ... Exception Handling ...
-//===----------------------------------------------------------------------===//
-
-
-
-//===----------------------------------------------------------------------===//
-// ... Render* - Convert GIMPLE to LLVM ...
-//===----------------------------------------------------------------------===//
-
-void TreeToLLVM::RenderGIMPLE_ASM(gimple stmt) {
- // A gimple asm statement consists of an asm string, a list of outputs, a list
- // of inputs, a list of clobbers, a list of labels and a "volatile" flag.
- // These correspond directly to the elements of an asm statement. For example
- // asm ("combine %2,%0" : "=r" (x) : "0" (x), "g" (y));
- // Here the asm string is "combine %2,%0" and can be obtained as a const char*
- // by calling gimple_asm_string. The only output is "=r" (x). The number of
- // outputs is given by gimple_asm_noutputs, 1 in this case, and the outputs
- // themselves can be obtained by calling gimple_asm_output_op. This returns a
- // TREE_LIST node with an SSA name for "x" as the TREE_VALUE; the TREE_PURPOSE
- // is also a TREE_LIST with TREE_VALUE a string constant holding "=r". There
- // are two inputs, "0" (x) and "g" (y), so gimple_asm_ninputs returns 2. The
- // routine gimple_asm_input_op returns them in the same format as for outputs.
- // The number of clobbers is returned by gimple_asm_nclobbers, 0 in this case.
- // To get the clobbers use gimple_asm_clobber_op. This returns a TREE_LIST
- // node with TREE_VALUE a string constant holding the clobber. To find out if
- // the asm is volatile call gimple_asm_volatile_p, which returns true if so.
- // See below for labels (this example does not have any).
-
- // Note that symbolic names have been substituted before getting here. For
- // example this
- // asm ("cmoveq %1,%2,%[result]" : [result] "=r"(result)
- // : "r"(test), "r"(new), "[result]"(old));
- // turns up as
- // asm ("cmoveq %1,%2,%0" : "=r"(result) : "r"(test), "r"(new), "0"(old));
-
- // Note that clobbers may not turn up in the same order as in the original, eg
- // asm volatile ("movc3 %0,%1,%2" : /* no outputs */
- // : "g" (from), "g" (to), "g" (count)
- // : "r0", "r1", "r2", "r3", "r4", "r5");
- // The clobbers turn up as "r5", "r4", "r3", "r2", "r1", "r0".
-
- // Here is an example of the "asm goto" construct (not yet supported by LLVM):
- // int frob(int x) {
- // int y;
- // asm goto ("frob %%r5, %1; jc %l[error]; mov (%2), %%r5"
- // : : "r"(x), "r"(&y) : "r5", "memory" : error);
- // return y;
- // error:
- // return -1;
- // }
- // The number of labels, one in this case, is returned by gimple_asm_nlabels.
- // The labels themselves are returned by gimple_asm_label_op as a TREE_LIST
- // node with TREE_PURPOSE a string constant holding the label name ("error")
- // and TREE_VALUE holding the appropriate LABEL_DECL.
-
- // TODO: Add support for labels.
- if (gimple_asm_nlabels(stmt) > 0) {
- sorry("'asm goto' not supported");
- return;
- }
-
- const unsigned NumOutputs = gimple_asm_noutputs (stmt);
- const unsigned NumInputs = gimple_asm_ninputs(stmt);
- const unsigned NumClobbers = gimple_asm_nclobbers (stmt);
-
- /// Constraints - The output/input constraints, concatenated together in array
- /// form instead of list form. This way of doing things is forced on us by
- /// GCC routines like parse_output_constraint which rummage around inside the
- /// array.
- const char **Constraints =
- (const char **)alloca((NumOutputs + NumInputs) * sizeof(const char *));
-
- // Initialize the Constraints array.
- for (unsigned i = 0; i != NumOutputs; ++i) {
- tree Output = gimple_asm_output_op(stmt, i);
- // If there's an erroneous arg then bail out.
- if (TREE_TYPE(TREE_VALUE(Output)) == error_mark_node) return;
- // Record the output constraint.
- const char *Constraint =
- TREE_STRING_POINTER(TREE_VALUE(TREE_PURPOSE(Output)));
- Constraints[i] = Constraint;
- }
- for (unsigned i = 0; i != NumInputs; ++i) {
- tree Input = gimple_asm_input_op(stmt, i);
- // If there's an erroneous arg then bail out.
- if (TREE_TYPE(TREE_VALUE(Input)) == error_mark_node) return;
- // Record the input constraint.
- const char *Constraint =
- TREE_STRING_POINTER(TREE_VALUE(TREE_PURPOSE(Input)));
- Constraints[NumOutputs+i] = Constraint;
- }
-
- // Look for multiple alternative constraints: multiple alternatives separated
- // by commas.
- unsigned NumChoices = 0; // sentinal; real value is always at least 1.
- for (unsigned i = 0; i != NumInputs; ++i) {
- tree Input = gimple_asm_input_op(stmt, i);
- unsigned NumInputChoices = 1;
- for (const char *p = TREE_STRING_POINTER(TREE_VALUE(TREE_PURPOSE(Input)));
- *p; ++p)
- if (*p == ',')
- ++NumInputChoices;
- if (NumChoices && (NumInputChoices != NumChoices)) {
- error("operand constraints for %<asm%> differ in number of alternatives");
- return;
- }
- if (NumChoices == 0)
- NumChoices = NumInputChoices;
- }
- for (unsigned i = 0; i != NumOutputs; ++i) {
- tree Output = gimple_asm_output_op(stmt, i);
- unsigned NumOutputChoices = 1;
- for (const char *p = TREE_STRING_POINTER(TREE_VALUE(TREE_PURPOSE(Output)));
- *p; ++p)
- if (*p == ',')
- ++NumOutputChoices;
- if (NumChoices && (NumOutputChoices != NumChoices)) {
- error("operand constraints for %<asm%> differ in number of alternatives");
- return;
+ // The vectors to shuffle.
+ Value *V0 = EmitRegister(op0);
+ Value *V1 = EmitRegister(op1);
+
+ // The shuffle mask.
+ Value *Mask = EmitRegister(op2);
+
+ // The GCC semantics are that mask indices off the end are wrapped back into
+ // range, so reduce the mask modulo 2*Length.
+ assert(!(Length & (Length - 1)) && "Vector length not a power of two!");
+ Mask = Builder.CreateAnd(Mask, ConstantInt::get(Mask->getType(),
+ 2 * Length - 1));
+
+ // Convert to a vector of i32, as required by the shufflevector instruction.
+ Type *MaskTy = VectorType::get(Builder.getInt32Ty(), Length);
+ tree mask_elt_type = TREE_TYPE(TREE_TYPE(op2));
+ Value *Mask32 = Builder.CreateIntCast(Mask, MaskTy,
+ !TYPE_UNSIGNED(mask_elt_type));
+
+ // Use a shufflevector instruction if this directly corresponds to one, i.e.
+ // if the mask is a vector of constant integers or undef.
+ if (ShuffleVectorInst::isValidOperands(V0, V1, Mask32))
+ return Builder.CreateShuffleVector(V0, V1, Mask32);
+
+ // Store the vectors to successive memory locations in a temporary.
+ tree elt_type = TREE_TYPE(TREE_TYPE(op0));
+ Type *EltTy = ConvertType(elt_type);
+ unsigned Align = DL.getABITypeAlignment(EltTy);
+ // The temporary is a struct containing the pair of input vectors.
+ Type *TmpTy = StructType::get(ConvertType(TREE_TYPE(op0)),
+ ConvertType(TREE_TYPE(op1)), NULL);
+ AllocaInst *Tmp = CreateTemporary(TmpTy, Align);
+ // Store the first vector to the first element of the pair.
+ Value *Tmp0 = Builder.CreateStructGEP(Tmp, 0,
+ flag_verbose_asm ? "vp1s" : "");
+ StoreRegisterToMemory(V0, MemRef(Tmp0, Align, /*Volatile*/ false),
+ TREE_TYPE(op0), 0, Builder);
+ // Store the second vector to the second element of the pair.
+ Value *Tmp1 = Builder.CreateStructGEP(Tmp, 1,
+ flag_verbose_asm ? "vp2s" : "");
+ StoreRegisterToMemory(V1, MemRef(Tmp1, Align, /*Volatile*/ false),
+ TREE_TYPE(op1), 0, Builder);
+
+ // Load out the components according to the mask.
+ Value *Result = UndefValue::get(V0->getType());
+ Value *BaseAddr = Builder.CreateBitCast(Tmp, EltTy->getPointerTo());
+ for (unsigned i = 0; i != Length; ++i) {
+ // Extract from the mask the index of the element to load.
+ Value *MaskIdx = Builder.getInt32(i);
+ Value *Idx = Builder.CreateExtractElement(Mask, MaskIdx);
+ // Advance that many elements from the start of the temporary and load it.
+ Value *Ptr = Builder.CreateInBoundsGEP(BaseAddr, Idx,
+ flag_verbose_asm ? "vpl" : "");
+ Value *Elt = LoadRegisterFromMemory(MemRef(Ptr, Align, false), elt_type,
+ 0, Builder);
+ // Insert it into the result.
+ Result = Builder.CreateInsertElement(Result, Elt, MaskIdx);
+ }
+ return Result;
}
- if (NumChoices == 0)
- NumChoices = NumOutputChoices;
- }
-
- // If there are multiple constraint tuples, pick one. Constraints is
- // altered to point to shorter strings (which are malloc'ed), and everything
- // below Just Works as in the NumChoices==1 case.
- BumpPtrAllocator StringStorage(256, 256);
- if (NumChoices > 1)
- ChooseConstraintTuple(stmt, Constraints, NumChoices, StringStorage);
-
- // HasSideEffects - Whether the LLVM inline asm should be marked as having
- // side effects.
- bool HasSideEffects = gimple_asm_volatile_p(stmt) || (NumOutputs == 0);
-
- // CallResultTypes - The inline asm call may return one or more results. The
- // types of the results are recorded here along with a flag indicating whether
- // the corresponding GCC type is signed.
- SmallVector<std::pair<Type *, bool>, 4> CallResultTypes;
-
- // CallResultDests - Each result returned by the inline asm call is stored in
- // a memory location. These are listed here along with a flag indicating if
- // the GCC type corresponding to the memory location is signed. The type of
- // the memory location is allowed to differ from the type of the call result,
- // in which case the result is converted before being stored.
- SmallVector<std::pair<Value *, bool>, 4> CallResultDests;
-
- // CallOps - The operands pass to the inline asm call.
- SmallVector<Value*, 16> CallOps;
-
- // OutputLocations - For each output holds an index into CallOps (if the flag
- // is false) or into CallResultTypes (if the flag is true). Outputs returned
- // in memory are passed to the asm as an operand and thus appear in CallOps.
- // Those returned in registers are obtained as one of the results of the asm
- // call and thus correspond to an entry in CallResultTypes.
- SmallVector<std::pair<bool, unsigned>, 4> OutputLocations;
-
- // SSADefinitions - If the asm defines an SSA name then the SSA name and a
- // memory location are recorded here. The asm result defining the SSA name
- // will be stored to the memory memory location, and loaded out afterwards
- // to define the SSA name.
- SmallVector<std::pair<tree, MemRef>, 4> SSADefinitions;
-
- // ConstraintStr - The string of constraints in LLVM format.
- std::string ConstraintStr;
-
- // Process outputs.
- for (unsigned i = 0; i != NumOutputs; ++i) {
- tree Output = gimple_asm_output_op(stmt, i);
- tree Operand = TREE_VALUE(Output);
-
- // Parse the output constraint.
- const char *Constraint = Constraints[i];
- bool IsInOut, AllowsReg, AllowsMem;
- if (!parse_output_constraint(&Constraint, i, NumInputs, NumOutputs,
- &AllowsMem, &AllowsReg, &IsInOut))
- return;
- assert(Constraint[0] == '=' && "Not an output constraint?");
- assert(!IsInOut && "asm expression not gimplified?");
+#endif
- std::string SimplifiedConstraint;
- // If this output register is pinned to a machine register, use that machine
- // register instead of the specified constraint.
- if (isa<VAR_DECL>(Operand) && DECL_HARD_REGISTER(Operand)) {
- const char* RegName = extractRegisterName(Operand);
- int RegNum = decode_reg_name(RegName);
- if (RegNum >= 0) {
- RegName = LLVM_GET_REG_NAME(RegName, RegNum);
- size_t RegNameLen = strlen(RegName);
- char *NewConstraint = (char*)alloca(RegNameLen+3);
- NewConstraint[0] = '{';
- memcpy(NewConstraint+1, RegName, RegNameLen);
- NewConstraint[RegNameLen+1] = '}';
- NewConstraint[RegNameLen+2] = 0;
- SimplifiedConstraint = NewConstraint;
- // This output will now be implicit; set the sideffect flag on the asm.
- HasSideEffects = true;
- // We should no longer consider mem constraints.
- AllowsMem = false;
- } else {
- // If we can simplify the constraint into something else, do so now.
- // This avoids LLVM having to know about all the (redundant) GCC
- // constraints.
- SimplifiedConstraint = CanonicalizeConstraint(Constraint+1);
+ Value *TreeToLLVM::EmitReg_VecUnpackHiExpr(tree type, tree op0) {
+ // Eg: <2 x double> = VEC_UNPACK_HI_EXPR(<4 x float>)
+ Value *Op = EmitRegister(op0);
+
+ // Extract the high elements, eg: <4 x float> -> <2 x float>.
+ Op = VectorHighElements(Op);
+
+ // Extend the input elements to the output element type, eg: <2 x float>
+ // -> <2 x double>.
+ Type *DestTy = getRegType(type);
+ return CastToAnyType(Op, !TYPE_UNSIGNED(TREE_TYPE(TREE_TYPE(op0))),
+ DestTy, !TYPE_UNSIGNED(TREE_TYPE(type)));
+ }
+
+ Value *TreeToLLVM::EmitReg_VecUnpackLoExpr(tree type, tree op0) {
+ // Eg: <2 x double> = VEC_UNPACK_LO_EXPR(<4 x float>)
+ Value *Op = EmitRegister(op0);
+
+ // Extract the low elements, eg: <4 x float> -> <2 x float>.
+ Op = VectorLowElements(Op);
+
+ // Extend the input elements to the output element type, eg: <2 x float>
+ // -> <2 x double>.
+ Type *DestTy = getRegType(type);
+ return CastToAnyType(Op, !TYPE_UNSIGNED(TREE_TYPE(TREE_TYPE(op0))),
+ DestTy, !TYPE_UNSIGNED(TREE_TYPE(type)));
+ }
+
+ Value *TreeToLLVM::EmitReg_VEC_WIDEN_MULT_HI_EXPR(tree type, tree op0,
+ tree op1) {
+ Value *Hi0 = EmitReg_VecUnpackHiExpr(type, op0);
+ Value *Hi1 = EmitReg_VecUnpackHiExpr(type, op1);
+ return Builder.CreateMul(Hi0, Hi1);
+ }
+
+ Value *TreeToLLVM::EmitReg_VEC_WIDEN_MULT_LO_EXPR(tree type, tree op0,
+ tree op1) {
+ Value *Lo0 = EmitReg_VecUnpackLoExpr(type, op0);
+ Value *Lo1 = EmitReg_VecUnpackLoExpr(type, op1);
+ return Builder.CreateMul(Lo0, Lo1);
+ }
+
+ Value *TreeToLLVM::EmitReg_WIDEN_MULT_EXPR(tree type, tree op0, tree op1) {
+ Value *LHS = EmitRegisterWithCast(op0, type);
+ Value *RHS = EmitRegisterWithCast(op1, type);
+ return Builder.CreateMul(LHS, RHS);
+ }
+
+ //===----------------------------------------------------------------------===//
+ // ... Exception Handling ...
+ //===----------------------------------------------------------------------===//
+
+ //===----------------------------------------------------------------------===//
+ // ... Render* - Convert GIMPLE to LLVM ...
+ //===----------------------------------------------------------------------===//
+
+ void TreeToLLVM::RenderGIMPLE_ASM(gimple stmt) {
+ // A gimple asm statement consists of an asm string, a list of outputs, a list
+ // of inputs, a list of clobbers, a list of labels and a "volatile" flag.
+ // These correspond directly to the elements of an asm statement. For example
+ // asm ("combine %2,%0" : "=r" (x) : "0" (x), "g" (y));
+ // Here the asm string is "combine %2,%0" and can be obtained as a const char*
+ // by calling gimple_asm_string. The only output is "=r" (x). The number of
+ // outputs is given by gimple_asm_noutputs, 1 in this case, and the outputs
+ // themselves can be obtained by calling gimple_asm_output_op. This returns a
+ // TREE_LIST node with an SSA name for "x" as the TREE_VALUE; the TREE_PURPOSE
+ // is also a TREE_LIST with TREE_VALUE a string constant holding "=r". There
+ // are two inputs, "0" (x) and "g" (y), so gimple_asm_ninputs returns 2. The
+ // routine gimple_asm_input_op returns them in the same format as for outputs.
+ // The number of clobbers is returned by gimple_asm_nclobbers, 0 in this case.
+ // To get the clobbers use gimple_asm_clobber_op. This returns a TREE_LIST
+ // node with TREE_VALUE a string constant holding the clobber. To find out if
+ // the asm is volatile call gimple_asm_volatile_p, which returns true if so.
+ // See below for labels (this example does not have any).
+
+ // Note that symbolic names have been substituted before getting here. For
+ // example this
+ // asm ("cmoveq %1,%2,%[result]" : [result] "=r"(result)
+ // : "r"(test), "r"(new), "[result]"(old));
+ // turns up as
+ // asm ("cmoveq %1,%2,%0" : "=r"(result) : "r"(test), "r"(new), "0"(old));
+
+ // Note that clobbers may not turn up in the same order as in the original, eg
+ // asm volatile ("movc3 %0,%1,%2" : /* no outputs */
+ // : "g" (from), "g" (to), "g" (count)
+ // : "r0", "r1", "r2", "r3", "r4", "r5");
+ // The clobbers turn up as "r5", "r4", "r3", "r2", "r1", "r0".
+
+ // Here is an example of the "asm goto" construct (not yet supported by LLVM):
+ // int frob(int x) {
+ // int y;
+ // asm goto ("frob %%r5, %1; jc %l[error]; mov (%2), %%r5"
+ // : : "r"(x), "r"(&y) : "r5", "memory" : error);
+ // return y;
+ // error:
+ // return -1;
+ // }
+ // The number of labels, one in this case, is returned by gimple_asm_nlabels.
+ // The labels themselves are returned by gimple_asm_label_op as a TREE_LIST
+ // node with TREE_PURPOSE a string constant holding the label name ("error")
+ // and TREE_VALUE holding the appropriate LABEL_DECL.
+
+ // TODO: Add support for labels.
+ if (gimple_asm_nlabels(stmt) > 0) {
+ sorry("'asm goto' not supported");
+ return;
}
- } else {
- SimplifiedConstraint = CanonicalizeConstraint(Constraint+1);
- }
- LValue Dest;
- Type *DestValTy = ConvertType(TREE_TYPE(Operand));
- if (isa<SSA_NAME>(Operand)) {
- // The ASM is defining an ssa name. Store the output to a temporary, then
- // load it out again later as the ssa name.
- MemRef TmpLoc = CreateTempLoc(DestValTy);
- SSADefinitions.push_back(std::make_pair(Operand, TmpLoc));
- Dest = LValue(TmpLoc);
- } else {
- Dest = EmitLV(Operand);
- assert(cast<PointerType>(Dest.Ptr->getType())->getElementType() ==
- DestValTy && "LValue has wrong type!");
- }
+ const unsigned NumOutputs = gimple_asm_noutputs(stmt);
+ const unsigned NumInputs = gimple_asm_ninputs(stmt);
+ const unsigned NumClobbers = gimple_asm_nclobbers(stmt);
+
+ /// Constraints - The output/input constraints, concatenated together in array
+ /// form instead of list form. This way of doing things is forced on us by
+ /// GCC routines like parse_output_constraint which rummage around inside the
+ /// array.
+ const char **Constraints = (const char * *)
+ alloca((NumOutputs + NumInputs) *
+ sizeof(const char *));
+
+ // Initialize the Constraints array.
+ for (unsigned i = 0; i != NumOutputs; ++i) {
+ tree Output = gimple_asm_output_op(stmt, i);
+ // If there's an erroneous arg then bail out.
+ if (TREE_TYPE(TREE_VALUE(Output)) == error_mark_node)
+ return;
+ // Record the output constraint.
+ const char *Constraint = TREE_STRING_POINTER(
+ TREE_VALUE(TREE_PURPOSE(Output)));
+ Constraints[i] = Constraint;
+ }
+ for (unsigned i = 0; i != NumInputs; ++i) {
+ tree Input = gimple_asm_input_op(stmt, i);
+ // If there's an erroneous arg then bail out.
+ if (TREE_TYPE(TREE_VALUE(Input)) == error_mark_node)
+ return;
+ // Record the input constraint.
+ const char *Constraint = TREE_STRING_POINTER(
+ TREE_VALUE(TREE_PURPOSE(Input)));
+ Constraints[NumOutputs + i] = Constraint;
+ }
+
+ // Look for multiple alternative constraints: multiple alternatives separated
+ // by commas.
+ unsigned NumChoices = 0; // sentinal; real value is always at least 1.
+ for (unsigned i = 0; i != NumInputs; ++i) {
+ tree Input = gimple_asm_input_op(stmt, i);
+ unsigned NumInputChoices = 1;
+ for (const char *p =
+ TREE_STRING_POINTER(TREE_VALUE(TREE_PURPOSE(Input)));
+ * p; ++p)
+ if (*p == ',')
+ ++NumInputChoices;
+ if (NumChoices && (NumInputChoices != NumChoices)) {
+ error(
+ "operand constraints for %<asm%> differ in number of alternatives");
+ return;
+ }
+ if (NumChoices == 0)
+ NumChoices = NumInputChoices;
+ }
+ for (unsigned i = 0; i != NumOutputs; ++i) {
+ tree Output = gimple_asm_output_op(stmt, i);
+ unsigned NumOutputChoices = 1;
+ for (const char *p =
+ TREE_STRING_POINTER(TREE_VALUE(TREE_PURPOSE(Output)));
+ * p; ++p)
+ if (*p == ',')
+ ++NumOutputChoices;
+ if (NumChoices && (NumOutputChoices != NumChoices)) {
+ error(
+ "operand constraints for %<asm%> differ in number of alternatives");
+ return;
+ }
+ if (NumChoices == 0)
+ NumChoices = NumOutputChoices;
+ }
- assert(!Dest.isBitfield() && "Cannot assign into a bitfield!");
- if (!AllowsMem && DestValTy->isSingleValueType()) {// Reg dest -> asm return
- ConstraintStr += ",=";
- ConstraintStr += SimplifiedConstraint;
- bool IsSigned = !TYPE_UNSIGNED(TREE_TYPE(Operand));
- CallResultDests.push_back(std::make_pair(Dest.Ptr, IsSigned));
- CallResultTypes.push_back(std::make_pair(DestValTy, IsSigned));
- OutputLocations.push_back(std::make_pair(true, CallResultTypes.size()-1));
- } else {
- ConstraintStr += ",=*";
- ConstraintStr += SimplifiedConstraint;
- CallOps.push_back(Dest.Ptr);
- OutputLocations.push_back(std::make_pair(false, CallOps.size()-1));
- }
- }
+ // If there are multiple constraint tuples, pick one. Constraints is
+ // altered to point to shorter strings (which are malloc'ed), and everything
+ // below Just Works as in the NumChoices==1 case.
+ BumpPtrAllocator StringStorage(256, 256);
+ if (NumChoices > 1)
+ ChooseConstraintTuple(stmt, Constraints, NumChoices, StringStorage);
+
+ // HasSideEffects - Whether the LLVM inline asm should be marked as having
+ // side effects.
+ bool HasSideEffects = gimple_asm_volatile_p(stmt) || (NumOutputs == 0);
+
+ // CallResultTypes - The inline asm call may return one or more results. The
+ // types of the results are recorded here along with a flag indicating whether
+ // the corresponding GCC type is signed.
+ SmallVector<std::pair<Type *, bool>, 4> CallResultTypes;
+
+ // CallResultDests - Each result returned by the inline asm call is stored in
+ // a memory location. These are listed here along with a flag indicating if
+ // the GCC type corresponding to the memory location is signed. The type of
+ // the memory location is allowed to differ from the type of the call result,
+ // in which case the result is converted before being stored.
+ SmallVector<std::pair<Value *, bool>, 4> CallResultDests;
+
+ // CallOps - The operands pass to the inline asm call.
+ SmallVector<Value *, 16> CallOps;
+
+ // OutputLocations - For each output holds an index into CallOps (if the flag
+ // is false) or into CallResultTypes (if the flag is true). Outputs returned
+ // in memory are passed to the asm as an operand and thus appear in CallOps.
+ // Those returned in registers are obtained as one of the results of the asm
+ // call and thus correspond to an entry in CallResultTypes.
+ SmallVector<std::pair<bool, unsigned>, 4> OutputLocations;
+
+ // SSADefinitions - If the asm defines an SSA name then the SSA name and a
+ // memory location are recorded here. The asm result defining the SSA name
+ // will be stored to the memory memory location, and loaded out afterwards
+ // to define the SSA name.
+ SmallVector<std::pair<tree, MemRef>, 4> SSADefinitions;
+
+ // ConstraintStr - The string of constraints in LLVM format.
+ std::string ConstraintStr;
+
+ // Process outputs.
+ for (unsigned i = 0; i != NumOutputs; ++i) {
+ tree Output = gimple_asm_output_op(stmt, i);
+ tree Operand = TREE_VALUE(Output);
+
+ // Parse the output constraint.
+ const char *Constraint = Constraints[i];
+ bool IsInOut, AllowsReg, AllowsMem;
+ if (!parse_output_constraint(&Constraint, i, NumInputs, NumOutputs,
+ &AllowsMem, &AllowsReg, &IsInOut))
+ return;
+ assert(Constraint[0] == '=' && "Not an output constraint?");
+ assert(!IsInOut && "asm expression not gimplified?");
+
+ std::string SimplifiedConstraint;
+ // If this output register is pinned to a machine register, use that machine
+ // register instead of the specified constraint.
+ if (isa<VAR_DECL>(Operand) && DECL_HARD_REGISTER(Operand)) {
+ const char *RegName = extractRegisterName(Operand);
+ int RegNum = decode_reg_name(RegName);
+ if (RegNum >= 0) {
+ RegName = LLVM_GET_REG_NAME(RegName, RegNum);
+ size_t RegNameLen = strlen(RegName);
+ char *NewConstraint = (char *)alloca(RegNameLen + 3);
+ NewConstraint[0] = '{';
+ memcpy(NewConstraint + 1, RegName, RegNameLen);
+ NewConstraint[RegNameLen + 1] = '}';
+ NewConstraint[RegNameLen + 2] = 0;
+ SimplifiedConstraint = NewConstraint;
+ // This output will now be implicit; set the sideffect flag on the asm.
+ HasSideEffects = true;
+ // We should no longer consider mem constraints.
+ AllowsMem = false;
+ } else {
+ // If we can simplify the constraint into something else, do so now.
+ // This avoids LLVM having to know about all the (redundant) GCC
+ // constraints.
+ SimplifiedConstraint = CanonicalizeConstraint(Constraint + 1);
+ }
+ } else {
+ SimplifiedConstraint = CanonicalizeConstraint(Constraint + 1);
+ }
- // Process inputs.
- for (unsigned i = 0; i != NumInputs; ++i) {
- tree Input = gimple_asm_input_op(stmt, i);
- tree Val = TREE_VALUE(Input);
- tree type = TREE_TYPE(Val);
- bool IsSigned = !TYPE_UNSIGNED(type);
-
- const char *Constraint = Constraints[NumOutputs+i];
-
- bool AllowsReg, AllowsMem;
- if (!parse_input_constraint(Constraints+NumOutputs+i, i,
- NumInputs, NumOutputs, 0,
- Constraints, &AllowsMem, &AllowsReg))
- return;
- bool isIndirect = false;
- if (AllowsReg || !AllowsMem) { // Register operand.
- Type *LLVMTy = ConvertType(type);
-
- Value *Op = 0;
- Type *OpTy = LLVMTy;
- if (LLVMTy->isSingleValueType()) {
- if (isa<ADDR_EXPR>(Val) && isa<LABEL_DECL>(TREE_OPERAND(Val,0))) {
- // Emit the label, but do not assume it is going to be the target
- // of an indirect branch. Having this logic here is a hack; there
- // should be a bit in the label identifying it as in an asm.
- Op = getLabelDeclBlock(TREE_OPERAND(Val, 0));
- } else if (isa<VAR_DECL>(Val) && DECL_HARD_REGISTER(Val)) {
- // GCC special cases hard registers used as inputs to asm statements.
- // Emit an inline asm node that copies the value out of the specified
- // register.
- assert(canEmitRegisterVariable(Val) && "Cannot read hard register!");
- Op = EmitReadOfRegisterVariable(Val);
+ LValue Dest;
+ Type *DestValTy = ConvertType(TREE_TYPE(Operand));
+ if (isa<SSA_NAME>(Operand)) {
+ // The ASM is defining an ssa name. Store the output to a temporary, then
+ // load it out again later as the ssa name.
+ MemRef TmpLoc = CreateTempLoc(DestValTy);
+ SSADefinitions.push_back(std::make_pair(Operand, TmpLoc));
+ Dest = LValue(TmpLoc);
} else {
- Op = EmitMemory(Val);
+ Dest = EmitLV(Operand);
+ assert(cast<PointerType>(Dest.Ptr->getType())->getElementType() ==
+ DestValTy && "LValue has wrong type!");
}
- } else {
- LValue LV = EmitLV(Val);
- assert(!LV.isBitfield() && "Inline asm can't have bitfield operand");
- // Small structs and unions can be treated as integers.
- uint64_t TySize = DL.getTypeSizeInBits(LLVMTy);
- if (TySize == 1 || TySize == 8 || TySize == 16 ||
- TySize == 32 || TySize == 64 || (TySize == 128 && !AllowsMem)) {
- LLVMTy = IntegerType::get(Context, (unsigned)TySize);
- Op =
- Builder.CreateLoad(Builder.CreateBitCast(LV.Ptr,
- LLVMTy->getPointerTo()));
+ assert(!Dest.isBitfield() && "Cannot assign into a bitfield!");
+ if (!AllowsMem &&
+ DestValTy->isSingleValueType()) { // Reg dest -> asm return
+ ConstraintStr += ",=";
+ ConstraintStr += SimplifiedConstraint;
+ bool IsSigned = !TYPE_UNSIGNED(TREE_TYPE(Operand));
+ CallResultDests.push_back(std::make_pair(Dest.Ptr, IsSigned));
+ CallResultTypes.push_back(std::make_pair(DestValTy, IsSigned));
+ OutputLocations.push_back(std::make_pair(true,
+ CallResultTypes.size() - 1));
} else {
- // Codegen only supports indirect operands with mem constraints.
- if (!AllowsMem)
- error("aggregate does not match inline asm register constraint");
- // Otherwise, emit our value as a lvalue.
- isIndirect = true;
- Op = LV.Ptr;
- OpTy = Op->getType();
+ ConstraintStr += ",=*";
+ ConstraintStr += SimplifiedConstraint;
+ CallOps.push_back(Dest.Ptr);
+ OutputLocations.push_back(std::make_pair(false, CallOps.size() - 1));
}
}
- // If this input operand is matching an output operand, e.g. '0', check if
- // this is something that llvm supports. If the operand types are
- // different, then emit an error if 1) one of the types is not integer or
- // pointer, 2) if size of input type is larger than the output type. If
- // the size of the integer input size is smaller than the integer output
- // type, then cast it to the larger type and shift the value if the target
- // is big endian.
- if (ISDIGIT(Constraint[0])) {
- unsigned Match = (unsigned)atoi(Constraint); // Unsigned - no minus sign
- // This output might have gotten put in either CallResult or CallArg
- // depending whether it's a register or not. Find its type.
- Type *OTy = 0;
- unsigned OutputIndex = ~0U;
- if (Match < OutputLocations.size()) {
- // Indices here known to be within range.
- OutputIndex = OutputLocations[Match].second;
- if (OutputLocations[Match].first)
- OTy = CallResultTypes[OutputIndex].first;
- else {
- OTy = CallOps[OutputIndex]->getType();
- assert(OTy->isPointerTy() && "Expected pointer type!");
- OTy = cast<PointerType>(OTy)->getElementType();
- }
- }
- if (OTy && OTy != OpTy) {
- if (!OTy->isSingleValueType() || !OpTy->isSingleValueType()) {
- error("unsupported inline asm: input constraint with a matching "
- "output constraint of incompatible type!");
- return;
- }
- uint64_t OTyBits = DL.getTypeSizeInBits(OTy);
- uint64_t OpTyBits = DL.getTypeSizeInBits(OpTy);
- if (OTyBits == 0 || OpTyBits == 0) {
- error("unsupported inline asm: input constraint with a matching "
- "output constraint of incompatible type!");
- return;
- } else if (OTyBits < OpTyBits) {
- // The output is smaller than the input.
- if (OutputLocations[Match].first &&
- !isOperandMentioned(stmt, Match)) {
- // The output is a register and is not explicitly mentioned in the
- // asm string. Use the input type for the output, and arrange for
- // the result to be truncated to the original output type after
- // the asm call.
- CallResultTypes[OutputIndex] = std::make_pair(OpTy, IsSigned);
- } else if (isa<Constant>(Op) &&
- !isOperandMentioned(stmt, NumOutputs+i)) {
- // The input is a constant that is not explicitly mentioned in the
- // asm string. Convert to the output type like in an assignment.
- Op = CastToAnyType(Op, IsSigned, OTy,
- CallResultTypes[OutputIndex].second);
+ // Process inputs.
+ for (unsigned i = 0; i != NumInputs; ++i) {
+ tree Input = gimple_asm_input_op(stmt, i);
+ tree Val = TREE_VALUE(Input);
+ tree type = TREE_TYPE(Val);
+ bool IsSigned = !TYPE_UNSIGNED(type);
+
+ const char *Constraint = Constraints[NumOutputs + i];
+
+ bool AllowsReg, AllowsMem;
+ if (!parse_input_constraint(Constraints + NumOutputs + i, i, NumInputs,
+ NumOutputs, 0, Constraints, &AllowsMem,
+ &AllowsReg))
+ return;
+ bool isIndirect = false;
+ if (AllowsReg || !AllowsMem) { // Register operand.
+ Type *LLVMTy = ConvertType(type);
+
+ Value *Op = 0;
+ Type *OpTy = LLVMTy;
+ if (LLVMTy->isSingleValueType()) {
+ if (isa<ADDR_EXPR>(Val) && isa<LABEL_DECL>(TREE_OPERAND(Val, 0))) {
+ // Emit the label, but do not assume it is going to be the target
+ // of an indirect branch. Having this logic here is a hack; there
+ // should be a bit in the label identifying it as in an asm.
+ Op = getLabelDeclBlock(TREE_OPERAND(Val, 0));
+ } else if (isa<VAR_DECL>(Val) && DECL_HARD_REGISTER(Val)) {
+ // GCC special cases hard registers used as inputs to asm statements.
+ // Emit an inline asm node that copies the value out of the specified
+ // register.
+ assert(canEmitRegisterVariable(Val) &&
+ "Cannot read hard register!");
+ Op = EmitReadOfRegisterVariable(Val);
} else {
- error("unsupported inline asm: input constraint with a matching "
- "output constraint of incompatible type!");
- return;
+ Op = EmitMemory(Val);
+ }
+ } else {
+ LValue LV = EmitLV(Val);
+ assert(!LV.isBitfield() &&
+ "Inline asm can't have bitfield operand");
+
+ // Small structs and unions can be treated as integers.
+ uint64_t TySize = DL.getTypeSizeInBits(LLVMTy);
+ if (TySize == 1 || TySize == 8 || TySize == 16 || TySize == 32 ||
+ TySize == 64 || (TySize == 128 && !AllowsMem)) {
+ LLVMTy = IntegerType::get(Context, (unsigned) TySize);
+ Op = Builder.CreateLoad(
+ Builder.CreateBitCast(LV.Ptr, LLVMTy->getPointerTo()));
+ } else {
+ // Codegen only supports indirect operands with mem constraints.
+ if (!AllowsMem)
+ error(
+ "aggregate does not match inline asm register constraint");
+ // Otherwise, emit our value as a lvalue.
+ isIndirect = true;
+ Op = LV.Ptr;
+ OpTy = Op->getType();
}
- } else if (OTyBits > OpTyBits) {
- // The input is smaller than the output. If the input is explicitly
- // mentioned in the asm string then we cannot safely promote it, so
- // bail out.
- if (isOperandMentioned(stmt, NumOutputs + i)) {
- error("unsupported inline asm: input constraint with a matching "
+ }
+
+ // If this input operand is matching an output operand, e.g. '0', check if
+ // this is something that llvm supports. If the operand types are
+ // different, then emit an error if 1) one of the types is not integer or
+ // pointer, 2) if size of input type is larger than the output type. If
+ // the size of the integer input size is smaller than the integer output
+ // type, then cast it to the larger type and shift the value if the target
+ // is big endian.
+ if (ISDIGIT(Constraint[0])) {
+ unsigned Match = (unsigned)
+ atoi(Constraint); // Unsigned - no minus sign
+ // This output might have gotten put in either CallResult or CallArg
+ // depending whether it's a register or not. Find its type.
+ Type *OTy = 0;
+ unsigned OutputIndex = ~0U;
+ if (Match < OutputLocations.size()) {
+ // Indices here known to be within range.
+ OutputIndex = OutputLocations[Match].second;
+ if (OutputLocations[Match].first)
+ OTy = CallResultTypes[OutputIndex].first;
+ else {
+ OTy = CallOps[OutputIndex]->getType();
+ assert(OTy->isPointerTy() && "Expected pointer type!");
+ OTy = cast<PointerType>(OTy)->getElementType();
+ }
+ }
+ if (OTy && OTy != OpTy) {
+ if (!OTy->isSingleValueType() || !OpTy->isSingleValueType()) {
+ error(
+ "unsupported inline asm: input constraint with a matching "
+ "output constraint of incompatible type!");
+ return;
+ }
+ uint64_t OTyBits = DL.getTypeSizeInBits(OTy);
+ uint64_t OpTyBits = DL.getTypeSizeInBits(OpTy);
+ if (OTyBits == 0 || OpTyBits == 0) {
+ error(
+ "unsupported inline asm: input constraint with a matching "
"output constraint of incompatible type!");
- return;
+ return;
+ } else if (OTyBits < OpTyBits) {
+ // The output is smaller than the input.
+ if (OutputLocations[Match].first &&
+ !isOperandMentioned(stmt, Match)) {
+ // The output is a register and is not explicitly mentioned in the
+ // asm string. Use the input type for the output, and arrange for
+ // the result to be truncated to the original output type after
+ // the asm call.
+ CallResultTypes[OutputIndex] = std::make_pair(OpTy, IsSigned);
+ } else if (isa<Constant>(Op) &&
+ !isOperandMentioned(stmt, NumOutputs + i)) {
+ // The input is a constant that is not explicitly mentioned in the
+ // asm string. Convert to the output type like in an assignment.
+ Op = CastToAnyType(Op, IsSigned, OTy,
+ CallResultTypes[OutputIndex].second);
+ } else {
+ error(
+ "unsupported inline asm: input constraint with a matching "
+ "output constraint of incompatible type!");
+ return;
+ }
+ } else if (OTyBits > OpTyBits) {
+ // The input is smaller than the output. If the input is explicitly
+ // mentioned in the asm string then we cannot safely promote it, so
+ // bail out.
+ if (isOperandMentioned(stmt, NumOutputs + i)) {
+ error(
+ "unsupported inline asm: input constraint with a matching "
+ "output constraint of incompatible type!");
+ return;
+ }
+ Op = CastToAnyType(Op, IsSigned, OTy,
+ CallResultTypes[OutputIndex].second);
+ }
}
- Op = CastToAnyType(Op, IsSigned, OTy,
- CallResultTypes[OutputIndex].second);
}
+
+ CallOps.push_back(Op);
+ } else { // Memory operand.
+ mark_addressable(TREE_VALUE(Input));
+ isIndirect = true;
+ LValue Src = EmitLV(Val);
+ assert(!Src.isBitfield() && "Cannot read from a bitfield!");
+ CallOps.push_back(Src.Ptr);
}
- }
- CallOps.push_back(Op);
- } else { // Memory operand.
- mark_addressable(TREE_VALUE(Input));
- isIndirect = true;
- LValue Src = EmitLV(Val);
- assert(!Src.isBitfield() && "Cannot read from a bitfield!");
- CallOps.push_back(Src.Ptr);
- }
-
- ConstraintStr += ',';
- if (isIndirect)
- ConstraintStr += '*';
-
- // If this input register is pinned to a machine register, use that machine
- // register instead of the specified constraint.
- if (isa<VAR_DECL>(Val) && DECL_HARD_REGISTER(Val)) {
- const char *RegName = extractRegisterName(Val);
- int RegNum = decode_reg_name(RegName);
- if (RegNum >= 0) {
- RegName = LLVM_GET_REG_NAME(RegName, RegNum);
- ConstraintStr += '{';
- ConstraintStr += RegName;
- ConstraintStr += '}';
- continue;
- }
- }
+ ConstraintStr += ',';
+ if (isIndirect)
+ ConstraintStr += '*';
+
+ // If this input register is pinned to a machine register, use that machine
+ // register instead of the specified constraint.
+ if (isa<VAR_DECL>(Val) && DECL_HARD_REGISTER(Val)) {
+ const char *RegName = extractRegisterName(Val);
+ int RegNum = decode_reg_name(RegName);
+ if (RegNum >= 0) {
+ RegName = LLVM_GET_REG_NAME(RegName, RegNum);
+ ConstraintStr += '{';
+ ConstraintStr += RegName;
+ ConstraintStr += '}';
+ continue;
+ }
+ }
- // If there is a simpler form for the register constraint, use it.
- std::string Simplified = CanonicalizeConstraint(Constraint);
- ConstraintStr += Simplified;
- }
+ // If there is a simpler form for the register constraint, use it.
+ std::string Simplified = CanonicalizeConstraint(Constraint);
+ ConstraintStr += Simplified;
+ }
+
+ // Process clobbers.
+
+ // Some targets automatically clobber registers across an asm.
+ tree Clobbers;
+ {
+ // Create input, output & clobber lists for the benefit of md_asm_clobbers.
+ tree outputs = NULL_TREE;
+ if (NumOutputs) {
+ tree t = outputs = gimple_asm_output_op(stmt, 0);
+ for (unsigned i = 1; i < NumOutputs; i++) {
+ TREE_CHAIN(t) = gimple_asm_output_op(stmt, i);
+ t = gimple_asm_output_op(stmt, i);
+ }
+ }
- // Process clobbers.
+ tree inputs = NULL_TREE;
+ if (NumInputs) {
+ tree t = inputs = gimple_asm_input_op(stmt, 0);
+ for (unsigned i = 1; i < NumInputs; i++) {
+ TREE_CHAIN(t) = gimple_asm_input_op(stmt, i);
+ t = gimple_asm_input_op(stmt, i);
+ }
+ }
- // Some targets automatically clobber registers across an asm.
- tree Clobbers;
- {
- // Create input, output & clobber lists for the benefit of md_asm_clobbers.
- tree outputs = NULL_TREE;
- if (NumOutputs) {
- tree t = outputs = gimple_asm_output_op (stmt, 0);
- for (unsigned i = 1; i < NumOutputs; i++) {
- TREE_CHAIN (t) = gimple_asm_output_op (stmt, i);
- t = gimple_asm_output_op (stmt, i);
- }
- }
+ tree clobbers = NULL_TREE;
+ if (NumClobbers) {
+ tree t = clobbers = gimple_asm_clobber_op(stmt, 0);
+ for (unsigned i = 1; i < NumClobbers; i++) {
+ TREE_CHAIN(t) = gimple_asm_clobber_op(stmt, i);
+ t = gimple_asm_clobber_op(stmt, i);
+ }
+ }
- tree inputs = NULL_TREE;
- if (NumInputs) {
- tree t = inputs = gimple_asm_input_op (stmt, 0);
- for (unsigned i = 1; i < NumInputs; i++) {
- TREE_CHAIN (t) = gimple_asm_input_op (stmt, i);
- t = gimple_asm_input_op (stmt, i);
+ Clobbers = targetm.md_asm_clobbers(outputs, inputs, clobbers);
}
- }
- tree clobbers = NULL_TREE;
- if (NumClobbers) {
- tree t = clobbers = gimple_asm_clobber_op (stmt, 0);
- for (unsigned i = 1; i < NumClobbers; i++) {
- TREE_CHAIN (t) = gimple_asm_clobber_op (stmt, i);
- t = gimple_asm_clobber_op (stmt, i);
+ for (; Clobbers; Clobbers = TREE_CHAIN(Clobbers)) {
+ const char *RegName = TREE_STRING_POINTER(TREE_VALUE(Clobbers));
+ int RegCode = decode_reg_name(RegName);
+
+ switch (RegCode) {
+ case -1: // Nothing specified?
+ case -2: // Invalid.
+ error("unknown register name %qs in %<asm%>", RegName);
+ return;
+ case -3: // cc
+ ConstraintStr += ",~{cc}";
+ break;
+ case -4: // memory
+ ConstraintStr += ",~{memory}";
+ break;
+ default : // Normal register name.
+ assert(RegName && "Null register name successfully decoded!");
+ RegName = LLVM_GET_REG_NAME(RegName, RegCode);
+ ConstraintStr += ",~{";
+ ConstraintStr += RegName;
+ ConstraintStr += "}";
+ break;
+ }
}
- }
- Clobbers = targetm.md_asm_clobbers(outputs, inputs, clobbers);
- }
+ // Compute the return type to use for the asm call.
+ Type *CallResultType;
+ switch (CallResultTypes.size()) {
+ // If there are no results then the return type is void!
+ case 0:
+ CallResultType = Type::getVoidTy(Context);
+ break;
+ // If there is one result then use the result's type as the return type.
+ case 1:
+ CallResultType = CallResultTypes[0].first;
+ break;
+ // If the asm returns multiple results then create a struct type with the
+ // result types as its fields, and use it for the return type.
+ default:
+ SmallVector<Type *, 4> Fields((unsigned) CallResultTypes.size());
+ for (unsigned i = 0, e = (unsigned) CallResultTypes.size(); i != e; ++i)
+ Fields[i] = CallResultTypes[i].first;
+ CallResultType = StructType::get(Context, Fields);
+ break;
+ }
- for (; Clobbers; Clobbers = TREE_CHAIN(Clobbers)) {
- const char *RegName = TREE_STRING_POINTER(TREE_VALUE(Clobbers));
- int RegCode = decode_reg_name(RegName);
+ // Compute the types of the arguments to the asm call.
+ SmallVector<Type *, 16> CallArgTypes((unsigned) CallOps.size());
+ for (unsigned i = 0, e = (unsigned) CallOps.size(); i != e; ++i)
+ CallArgTypes[i] = CallOps[i]->getType();
+
+ // Get the type of the called asm "function".
+ FunctionType *FTy = FunctionType::get(CallResultType, CallArgTypes,
+ false);
+
+ // Remove the leading comma if we have operands.
+ if (!ConstraintStr.empty())
+ ConstraintStr.erase(ConstraintStr.begin());
+
+ // Make sure we're created a valid inline asm expression.
+ if (!InlineAsm::Verify(FTy, ConstraintStr)) {
+ error("Invalid or unsupported inline assembly!");
+ return;
+ }
- switch (RegCode) {
- case -1: // Nothing specified?
- case -2: // Invalid.
- error("unknown register name %qs in %<asm%>", RegName);
- return;
- case -3: // cc
- ConstraintStr += ",~{cc}";
- break;
- case -4: // memory
- ConstraintStr += ",~{memory}";
- break;
- default: // Normal register name.
- assert(RegName && "Null register name successfully decoded!");
- RegName = LLVM_GET_REG_NAME(RegName, RegCode);
- ConstraintStr += ",~{";
- ConstraintStr += RegName;
- ConstraintStr += "}";
- break;
+ std::string NewAsmStr = ConvertInlineAsmStr(stmt, NumOutputs + NumInputs);
+ Value *Asm = InlineAsm::get(FTy, NewAsmStr, ConstraintStr,
+ HasSideEffects);
+ CallInst *CV = Builder.CreateCall(Asm, CallOps, CallResultTypes.empty() ?
+ "" : "asmtmp");
+ CV->setDoesNotThrow();
+ if (gimple_has_location(stmt)) {
+ // Pass the location of the asm using a !srcloc metadata.
+ Constant *LocationCookie = Builder.getInt64(gimple_location(stmt));
+ CV->setMetadata("srcloc", MDNode::get(Context, LocationCookie));
+ }
+
+ // If the call produces a value, store it into the destination.
+ for (unsigned i = 0, NumResults = (unsigned) CallResultTypes.size();
+ i != NumResults; ++i) {
+ Value *Val = NumResults == 1 ? CV :
+ Builder.CreateExtractValue(CV, i, "asmresult");
+ bool ValIsSigned = CallResultTypes[i].second;
+
+ Value *Dest = CallResultDests[i].first;
+ Type *DestTy = cast<PointerType>(Dest->getType())->getElementType();
+ bool DestIsSigned = CallResultDests[i].second;
+ Val = CastToAnyType(Val, ValIsSigned, DestTy, DestIsSigned);
+ Builder.CreateStore(Val, Dest);
+ }
+
+ // If the call defined any ssa names, associate them with their value.
+ for (unsigned i = 0, e = (unsigned) SSADefinitions.size(); i != e; ++i) {
+ tree Name = SSADefinitions[i].first;
+ MemRef Loc = SSADefinitions[i].second;
+ Value *Val = LoadRegisterFromMemory(Loc, TREE_TYPE(Name), 0, Builder);
+ DefineSSAName(Name, Val);
+ }
+
+ // Give the backend a chance to upgrade the inline asm to LLVM code. This
+ // handles some common cases that LLVM has intrinsics for, e.g. x86 bswap ->
+ // llvm.bswap.
+ if (const TargetLowering *TLI = TheTarget->getTargetLowering())
+ TLI->ExpandInlineAsm(CV);
}
- }
-
- // Compute the return type to use for the asm call.
- Type *CallResultType;
- switch (CallResultTypes.size()) {
- // If there are no results then the return type is void!
- case 0: CallResultType = Type::getVoidTy(Context); break;
- // If there is one result then use the result's type as the return type.
- case 1: CallResultType = CallResultTypes[0].first; break;
- // If the asm returns multiple results then create a struct type with the
- // result types as its fields, and use it for the return type.
- default:
- SmallVector<Type*, 4> Fields((unsigned)CallResultTypes.size());
- for (unsigned i = 0, e = (unsigned)CallResultTypes.size(); i != e; ++i)
- Fields[i] = CallResultTypes[i].first;
- CallResultType = StructType::get(Context, Fields);
- break;
- }
- // Compute the types of the arguments to the asm call.
- SmallVector<Type*, 16> CallArgTypes((unsigned)CallOps.size());
- for (unsigned i = 0, e = (unsigned)CallOps.size(); i != e; ++i)
- CallArgTypes[i] = CallOps[i]->getType();
-
- // Get the type of the called asm "function".
- FunctionType *FTy =
- FunctionType::get(CallResultType, CallArgTypes, false);
-
- // Remove the leading comma if we have operands.
- if (!ConstraintStr.empty())
- ConstraintStr.erase(ConstraintStr.begin());
-
- // Make sure we're created a valid inline asm expression.
- if (!InlineAsm::Verify(FTy, ConstraintStr)) {
- error("Invalid or unsupported inline assembly!");
- return;
- }
-
- std::string NewAsmStr = ConvertInlineAsmStr(stmt, NumOutputs+NumInputs);
- Value *Asm = InlineAsm::get(FTy, NewAsmStr, ConstraintStr, HasSideEffects);
- CallInst *CV = Builder.CreateCall(Asm, CallOps,
- CallResultTypes.empty() ? "" : "asmtmp");
- CV->setDoesNotThrow();
- if (gimple_has_location(stmt)) {
- // Pass the location of the asm using a !srcloc metadata.
- Constant *LocationCookie = Builder.getInt64(gimple_location(stmt));
- CV->setMetadata("srcloc", MDNode::get(Context, LocationCookie));
- }
-
- // If the call produces a value, store it into the destination.
- for (unsigned i = 0, NumResults = (unsigned)CallResultTypes.size();
- i != NumResults; ++i) {
- Value *Val = NumResults == 1 ?
- CV : Builder.CreateExtractValue(CV, i, "asmresult");
- bool ValIsSigned = CallResultTypes[i].second;
-
- Value *Dest = CallResultDests[i].first;
- Type *DestTy = cast<PointerType>(Dest->getType())->getElementType();
- bool DestIsSigned = CallResultDests[i].second;
- Val = CastToAnyType(Val, ValIsSigned, DestTy, DestIsSigned);
- Builder.CreateStore(Val, Dest);
- }
-
- // If the call defined any ssa names, associate them with their value.
- for (unsigned i = 0, e = (unsigned)SSADefinitions.size(); i != e; ++i) {
- tree Name = SSADefinitions[i].first;
- MemRef Loc = SSADefinitions[i].second;
- Value *Val = LoadRegisterFromMemory(Loc, TREE_TYPE(Name), 0, Builder);
- DefineSSAName(Name, Val);
- }
-
- // Give the backend a chance to upgrade the inline asm to LLVM code. This
- // handles some common cases that LLVM has intrinsics for, e.g. x86 bswap ->
- // llvm.bswap.
- if (const TargetLowering *TLI = TheTarget->getTargetLowering())
- TLI->ExpandInlineAsm(CV);
-}
-
-void TreeToLLVM::RenderGIMPLE_ASSIGN(gimple stmt) {
- tree lhs = gimple_assign_lhs(stmt);
+ void TreeToLLVM::RenderGIMPLE_ASSIGN(gimple stmt) {
+ tree lhs = gimple_assign_lhs(stmt);
#if (GCC_MINOR > 6)
- // Assigning a right-hand side with TREE_CLOBBER_P says that the left-hand
- // side is dead from this point on. Output an llvm.lifetime.end intrinsic.
- if (get_gimple_rhs_class(gimple_expr_code(stmt)) == GIMPLE_SINGLE_RHS &&
- TREE_CLOBBER_P(gimple_assign_rhs1(stmt))) {
- // Be conservative and only output the intrinsic if the left-hand side
- // corresponds to some kind of concrete object. Note that we generate
- // code to read from RESULT_DECLs before returning from the function, so
- // saying that a RESULT_DECL is dead means we are dead - which is why we
- // don't even consider it.
- if (isa<PARM_DECL>(lhs) || isa<VAR_DECL>(lhs)) {
- Value *LHSAddr = Builder.CreateBitCast(DECL_LOCAL(lhs),
- Builder.getInt8PtrTy());
- uint64_t LHSSize = isInt64(DECL_SIZE(lhs), true) ?
- getInt64(DECL_SIZE(lhs), true) / 8 : ~0UL;
- Function *EndIntr = Intrinsic::getDeclaration(TheModule,
- Intrinsic::lifetime_end);
- Builder.CreateCall2(EndIntr, Builder.getInt64(LHSSize), LHSAddr);
- }
- return;
- }
+ // Assigning a right-hand side with TREE_CLOBBER_P says that the left-hand
+ // side is dead from this point on. Output an llvm.lifetime.end intrinsic.
+ if (get_gimple_rhs_class(gimple_expr_code(stmt)) == GIMPLE_SINGLE_RHS &&
+ TREE_CLOBBER_P(gimple_assign_rhs1(stmt))) {
+ // Be conservative and only output the intrinsic if the left-hand side
+ // corresponds to some kind of concrete object. Note that we generate
+ // code to read from RESULT_DECLs before returning from the function, so
+ // saying that a RESULT_DECL is dead means we are dead - which is why we
+ // don't even consider it.
+ if (isa<PARM_DECL>(lhs) || isa<VAR_DECL>(lhs)) {
+ Value *LHSAddr = Builder.CreateBitCast(DECL_LOCAL(lhs),
+ Builder.getInt8PtrTy());
+ uint64_t LHSSize = isInt64(DECL_SIZE(lhs), true) ?
+ getInt64(DECL_SIZE(lhs), true) / 8 : ~0UL;
+ Function *EndIntr = Intrinsic::getDeclaration(
+ TheModule, Intrinsic::lifetime_end);
+ Builder.CreateCall2(EndIntr, Builder.getInt64(LHSSize), LHSAddr);
+ }
+ return;
+ }
#endif
- if (isa<AGGREGATE_TYPE>(TREE_TYPE(lhs))) {
- assert(get_gimple_rhs_class(gimple_expr_code(stmt)) == GIMPLE_SINGLE_RHS &&
- "Aggregate type but rhs not simple!");
- LValue LV = EmitLV(lhs);
- MemRef NewLoc(LV.Ptr, LV.getAlignment(), TREE_THIS_VOLATILE(lhs));
- EmitAggregate(gimple_assign_rhs1 (stmt), NewLoc);
- return;
- }
- WriteScalarToLHS(lhs, EmitAssignRHS(stmt));
-}
-
-void TreeToLLVM::RenderGIMPLE_CALL(gimple stmt) {
- tree lhs = gimple_call_lhs(stmt);
- if (!lhs) {
- // The returned value is not used.
- if (!isa<AGGREGATE_TYPE>(gimple_call_return_type(stmt))) {
- OutputCallRHS(stmt, 0);
- return;
+ if (isa<AGGREGATE_TYPE>(TREE_TYPE(lhs))) {
+ assert(get_gimple_rhs_class(gimple_expr_code(stmt)) ==
+ GIMPLE_SINGLE_RHS && "Aggregate type but rhs not simple!");
+ LValue LV = EmitLV(lhs);
+ MemRef NewLoc(LV.Ptr, LV.getAlignment(), TREE_THIS_VOLATILE(lhs));
+ EmitAggregate(gimple_assign_rhs1(stmt), NewLoc);
+ return;
+ }
+ WriteScalarToLHS(lhs, EmitAssignRHS(stmt));
}
- // Create a temporary to hold the returned value.
- // TODO: Figure out how to avoid creating this temporary and the
- // associated useless code that stores the returned value into it.
- MemRef Loc = CreateTempLoc(ConvertType(gimple_call_return_type(stmt)));
- OutputCallRHS(stmt, &Loc);
- return;
- }
-
- if (isa<AGGREGATE_TYPE>(TREE_TYPE(lhs))) {
- LValue LV = EmitLV(lhs);
- MemRef NewLoc(LV.Ptr, LV.getAlignment(), TREE_THIS_VOLATILE(lhs));
- OutputCallRHS(stmt, &NewLoc);
- return;
- }
- WriteScalarToLHS(lhs, OutputCallRHS(stmt, 0));
-}
-
-void TreeToLLVM::RenderGIMPLE_COND(gimple stmt) {
- // Emit the comparison.
- Value *Cond = EmitCompare(gimple_cond_lhs(stmt), gimple_cond_rhs(stmt),
- gimple_cond_code(stmt));
- // Extract the target basic blocks.
- edge true_edge, false_edge;
- extract_true_false_edges_from_block(gimple_bb(stmt), &true_edge, &false_edge);
- BasicBlock *IfTrue = getBasicBlock(true_edge->dest);
- BasicBlock *IfFalse = getBasicBlock(false_edge->dest);
-
- // Branch based on the condition.
- Builder.CreateCondBr(Cond, IfTrue, IfFalse);
-}
-
-void TreeToLLVM::RenderGIMPLE_EH_DISPATCH(gimple stmt) {
- int RegionNo = gimple_eh_dispatch_region(stmt);
- eh_region region = get_eh_region_from_number(RegionNo);
+ void TreeToLLVM::RenderGIMPLE_CALL(gimple stmt) {
+ tree lhs = gimple_call_lhs(stmt);
+ if (!lhs) {
+ // The returned value is not used.
+ if (!isa<AGGREGATE_TYPE>(gimple_call_return_type(stmt))) {
+ OutputCallRHS(stmt, 0);
+ return;
+ }
+ // Create a temporary to hold the returned value.
+ // TODO: Figure out how to avoid creating this temporary and the
+ // associated useless code that stores the returned value into it.
+ MemRef Loc = CreateTempLoc(ConvertType(gimple_call_return_type(stmt)));
+ OutputCallRHS(stmt, &Loc);
+ return;
+ }
- switch (region->type) {
- default:
- llvm_unreachable("Unexpected region type!");
- case ERT_ALLOWED_EXCEPTIONS: {
- // Filter.
- BasicBlock *Dest = getLabelDeclBlock(region->u.allowed.label);
-
- if (!region->u.allowed.type_list) {
- // Not allowed to throw. Branch directly to the post landing pad.
- Builder.CreateBr(Dest);
- BeginBlock(BasicBlock::Create(Context));
- break;
+ if (isa<AGGREGATE_TYPE>(TREE_TYPE(lhs))) {
+ LValue LV = EmitLV(lhs);
+ MemRef NewLoc(LV.Ptr, LV.getAlignment(), TREE_THIS_VOLATILE(lhs));
+ OutputCallRHS(stmt, &NewLoc);
+ return;
+ }
+ WriteScalarToLHS(lhs, OutputCallRHS(stmt, 0));
}
- // The result of a filter selection will be a negative index if there is a
- // match.
- // FIXME: It looks like you have to compare against a specific value,
- // checking for any old negative number is not enough! This should not
- // matter if the failure code branched to on a filter match is always the
- // same (as in C++), but might cause problems with other languages.
- Value *Filter = Builder.CreateLoad(getExceptionFilter(RegionNo));
-
- // Compare with the filter action value.
- Value *Zero = ConstantInt::get(Filter->getType(), 0);
- Value *Compare = Builder.CreateICmpSLT(Filter, Zero);
-
- // Branch on the compare.
- BasicBlock *NoMatchBB = BasicBlock::Create(Context);
- Builder.CreateCondBr(Compare, Dest, NoMatchBB);
- BeginBlock(NoMatchBB);
- break;
- }
- case ERT_TRY:
- // Catches.
- Value *Filter = NULL;
- SmallSet<Value *, 8> AlreadyCaught; // Typeinfos known caught.
- Function *TypeIDIntr = Intrinsic::getDeclaration(TheModule,
- Intrinsic::eh_typeid_for);
- for (eh_catch c = region->u.eh_try.first_catch; c ; c = c->next_catch) {
- BasicBlock *Dest = getLabelDeclBlock(c->label);
- if (!c->type_list) {
- // Catch-all. Branch directly to the post landing pad.
- Builder.CreateBr(Dest);
- break;
- }
+ void TreeToLLVM::RenderGIMPLE_COND(gimple stmt) {
+ // Emit the comparison.
+ Value *Cond = EmitCompare(gimple_cond_lhs(stmt), gimple_cond_rhs(stmt),
+ gimple_cond_code(stmt));
- Value *Cond = NULL;
- for (tree type = c->type_list; type; type = TREE_CHAIN (type)) {
- Value *TypeInfo = ConvertTypeInfo(TREE_VALUE(type));
- // No point in trying to catch a typeinfo that was already caught.
- if (!AlreadyCaught.insert(TypeInfo))
- continue;
+ // Extract the target basic blocks.
+ edge true_edge, false_edge;
+ extract_true_false_edges_from_block(gimple_bb(stmt), &true_edge,
+ &false_edge);
+ BasicBlock *IfTrue = getBasicBlock(true_edge->dest);
+ BasicBlock *IfFalse = getBasicBlock(false_edge->dest);
- TypeInfo = Builder.CreateBitCast(TypeInfo, Builder.getInt8PtrTy());
+ // Branch based on the condition.
+ Builder.CreateCondBr(Cond, IfTrue, IfFalse);
+ }
- // Call get eh type id.
- Value *TypeID = Builder.CreateCall(TypeIDIntr, TypeInfo, "typeid");
+ void TreeToLLVM::RenderGIMPLE_EH_DISPATCH(gimple stmt) {
+ int RegionNo = gimple_eh_dispatch_region(stmt);
+ eh_region region = get_eh_region_from_number(RegionNo);
- if (!Filter)
- Filter = Builder.CreateLoad(getExceptionFilter(RegionNo));
+ switch (region->type) {
+ default:
+ llvm_unreachable("Unexpected region type!");
+ case ERT_ALLOWED_EXCEPTIONS: {
+ // Filter.
+ BasicBlock *Dest = getLabelDeclBlock(region->u.allowed.label);
- // Compare with the exception selector.
- Value *Compare = Builder.CreateICmpEQ(Filter, TypeID);
+ if (!region->u.allowed.type_list) {
+ // Not allowed to throw. Branch directly to the post landing pad.
+ Builder.CreateBr(Dest);
+ BeginBlock(BasicBlock::Create(Context));
+ break;
+ }
- Cond = Cond ? Builder.CreateOr(Cond, Compare) : Compare;
- }
+ // The result of a filter selection will be a negative index if there is a
+ // match.
+ // FIXME: It looks like you have to compare against a specific value,
+ // checking for any old negative number is not enough! This should not
+ // matter if the failure code branched to on a filter match is always the
+ // same (as in C++), but might cause problems with other languages.
+ Value *Filter = Builder.CreateLoad(getExceptionFilter(RegionNo));
+
+ // Compare with the filter action value.
+ Value *Zero = ConstantInt::get(Filter->getType(), 0);
+ Value *Compare = Builder.CreateICmpSLT(Filter, Zero);
- if (Cond) {
+ // Branch on the compare.
BasicBlock *NoMatchBB = BasicBlock::Create(Context);
- Builder.CreateCondBr(Cond, Dest, NoMatchBB);
+ Builder.CreateCondBr(Compare, Dest, NoMatchBB);
BeginBlock(NoMatchBB);
+ break;
}
- }
- break;
- }
-}
-
-void TreeToLLVM::RenderGIMPLE_GOTO(gimple stmt) {
- tree dest = gimple_goto_dest(stmt);
-
- if (isa<LABEL_DECL>(dest)) {
- // Direct branch.
- Builder.CreateBr(getLabelDeclBlock(dest));
- return;
- }
-
- // Indirect branch.
- basic_block source = gimple_bb(stmt);
- IndirectBrInst *Br = Builder.CreateIndirectBr(EmitRegister(dest),
- EDGE_COUNT(source->succs));
-
- // Add the list of possible destinations.
- edge e;
- edge_iterator ei;
- FOR_EACH_EDGE (e, ei, source->succs)
- Br->addDestination(getBasicBlock(e->dest));
-}
+ case ERT_TRY:
+ // Catches.
+ Value *Filter = NULL;
+ SmallSet<Value *, 8> AlreadyCaught; // Typeinfos known caught.
+ Function *TypeIDIntr = Intrinsic::getDeclaration(
+ TheModule, Intrinsic::eh_typeid_for);
+ for (eh_catch c = region->u.eh_try.first_catch; c; c = c->next_catch) {
+ BasicBlock *Dest = getLabelDeclBlock(c->label);
+ if (!c->type_list) {
+ // Catch-all. Branch directly to the post landing pad.
+ Builder.CreateBr(Dest);
+ break;
+ }
-void TreeToLLVM::RenderGIMPLE_RESX(gimple stmt) {
- // Reraise an exception. If this statement is inside an exception handling
- // region then the reraised exception may be caught by the current function,
- // in which case it can be simplified into a branch.
- int DstLPadNo = lookup_stmt_eh_lp(stmt);
- eh_region dst_rgn =
- DstLPadNo ? get_eh_region_from_lp_number(DstLPadNo) : NULL;
- eh_region src_rgn = get_eh_region_from_number(gimple_resx_region(stmt));
+ Value *Cond = NULL;
+ for (tree type = c->type_list; type; type = TREE_CHAIN(type)) {
+ Value *TypeInfo = ConvertTypeInfo(TREE_VALUE(type));
+ // No point in trying to catch a typeinfo that was already caught.
+ if (!AlreadyCaught.insert(TypeInfo))
+ continue;
- if (!src_rgn) {
- // Unreachable block?
- Builder.CreateUnreachable();
- return;
- }
+ TypeInfo = Builder.CreateBitCast(TypeInfo, Builder.getInt8PtrTy());
- if (dst_rgn) {
- if (DstLPadNo < 0) {
- // The reraise is inside a must-not-throw region. Turn the reraise into a
- // call to the failure routine (eg: std::terminate).
- assert(dst_rgn->type == ERT_MUST_NOT_THROW && "Unexpected region type!");
+ // Call get eh type id.
+ Value *TypeID = Builder.CreateCall(TypeIDIntr, TypeInfo, "typeid");
- // Branch to the block containing the failure code.
- Builder.CreateBr(getFailureBlock(dst_rgn->index));
- return;
- }
+ if (!Filter)
+ Filter = Builder.CreateLoad(getExceptionFilter(RegionNo));
- // Use the exception pointer and filter value for the source region as the
- // values for the destination region.
- Value *ExcPtr = Builder.CreateLoad(getExceptionPtr(src_rgn->index));
- Builder.CreateStore(ExcPtr, getExceptionPtr(dst_rgn->index));
- Value *Filter = Builder.CreateLoad(getExceptionFilter(src_rgn->index));
- Builder.CreateStore(Filter, getExceptionFilter(dst_rgn->index));
-
- // Branch to the post landing pad for the destination region.
- eh_landing_pad lp = get_eh_landing_pad_from_number(DstLPadNo);
- assert(lp && "Post landing pad not found!");
- Builder.CreateBr(getLabelDeclBlock(lp->post_landing_pad));
- return;
- }
+ // Compare with the exception selector.
+ Value *Compare = Builder.CreateICmpEQ(Filter, TypeID);
- // Unwind the exception out of the function using a resume instruction.
- Value *ExcPtr = Builder.CreateLoad(getExceptionPtr(src_rgn->index));
- Value *Filter = Builder.CreateLoad(getExceptionFilter(src_rgn->index));
- Type *UnwindDataTy = StructType::get(Builder.getInt8PtrTy(),
- Builder.getInt32Ty(), NULL);
- Value *UnwindData = UndefValue::get(UnwindDataTy);
- UnwindData = Builder.CreateInsertValue(UnwindData, ExcPtr, 0, "exc_ptr");
- UnwindData = Builder.CreateInsertValue(UnwindData, Filter, 1, "filter");
- Builder.CreateResume(UnwindData);
-}
+ Cond = Cond ? Builder.CreateOr(Cond, Compare) : Compare;
+ }
-void TreeToLLVM::RenderGIMPLE_RETURN(gimple stmt) {
- tree retval = gimple_return_retval(stmt);
- tree result = DECL_RESULT(current_function_decl);
-
- if (retval && retval != error_mark_node && retval != result) {
- // Store the return value to the function's DECL_RESULT.
- MemRef DestLoc(DECL_LOCAL(result), 1, false); // FIXME: What alignment?
- if (isa<AGGREGATE_TYPE>(TREE_TYPE(result))) {
- EmitAggregate(retval, DestLoc);
- } else {
- Value *Val = Builder.CreateBitCast(EmitRegister(retval),
- getRegType(TREE_TYPE(result)));
- StoreRegisterToMemory(Val, DestLoc, TREE_TYPE(result), 0, Builder);
+ if (Cond) {
+ BasicBlock *NoMatchBB = BasicBlock::Create(Context);
+ Builder.CreateCondBr(Cond, Dest, NoMatchBB);
+ BeginBlock(NoMatchBB);
+ }
+ }
+ break;
+ }
}
- }
- // Emit a branch to the exit label.
- if (!ReturnBB)
- // Create a new block for the return node, but don't insert it yet.
- ReturnBB = BasicBlock::Create(Context, "return");
+ void TreeToLLVM::RenderGIMPLE_GOTO(gimple stmt) {
+ tree dest = gimple_goto_dest(stmt);
- Builder.CreateBr(ReturnBB);
-}
+ if (isa<LABEL_DECL>(dest)) {
+ // Direct branch.
+ Builder.CreateBr(getLabelDeclBlock(dest));
+ return;
+ }
-void TreeToLLVM::RenderGIMPLE_SWITCH(gimple stmt) {
- // Emit the condition.
- Value *Index = EmitRegister(gimple_switch_index(stmt));
- tree index_type = TREE_TYPE(gimple_switch_index(stmt));
+ // Indirect branch.
+ basic_block source = gimple_bb(stmt);
+ IndirectBrInst *Br = Builder.CreateIndirectBr(EmitRegister(dest),
+ EDGE_COUNT(source->succs));
+
+ // Add the list of possible destinations.
+ edge e;
+ edge_iterator ei;
+ FOR_EACH_EDGE(e, ei, source->succs)
+ Br->addDestination(getBasicBlock(e->dest));
+ }
+
+ void TreeToLLVM::RenderGIMPLE_RESX(gimple stmt) {
+ // Reraise an exception. If this statement is inside an exception handling
+ // region then the reraised exception may be caught by the current function,
+ // in which case it can be simplified into a branch.
+ int DstLPadNo = lookup_stmt_eh_lp(stmt);
+ eh_region dst_rgn = DstLPadNo ? get_eh_region_from_lp_number(DstLPadNo) :
+ NULL;
+ eh_region src_rgn = get_eh_region_from_number(gimple_resx_region(stmt));
- // Create the switch instruction.
- tree default_label = CASE_LABEL(gimple_switch_label(stmt, 0));
- SwitchInst *SI = Builder.CreateSwitch(Index, getLabelDeclBlock(default_label),
- gimple_switch_num_labels(stmt));
+ if (!src_rgn) {
+ // Unreachable block?
+ Builder.CreateUnreachable();
+ return;
+ }
- // Add the switch cases.
- BasicBlock *IfBlock = 0; // Set if a range was output as an "if".
- for (unsigned i = 1, e = gimple_switch_num_labels(stmt); i != e; ++i) {
- tree label = gimple_switch_label(stmt, i);
- BasicBlock *Dest = getLabelDeclBlock(CASE_LABEL(label));
+ if (dst_rgn) {
+ if (DstLPadNo < 0) {
+ // The reraise is inside a must-not-throw region. Turn the reraise into a
+ // call to the failure routine (eg: std::terminate).
+ assert(dst_rgn->type == ERT_MUST_NOT_THROW &&
+ "Unexpected region type!");
+
+ // Branch to the block containing the failure code.
+ Builder.CreateBr(getFailureBlock(dst_rgn->index));
+ return;
+ }
- // Convert the integer to the right type.
- Value *Val = EmitRegisterWithCast(CASE_LOW(label), index_type);
- ConstantInt *LowC = cast<ConstantInt>(Val);
+ // Use the exception pointer and filter value for the source region as the
+ // values for the destination region.
+ Value *ExcPtr = Builder.CreateLoad(getExceptionPtr(src_rgn->index));
+ Builder.CreateStore(ExcPtr, getExceptionPtr(dst_rgn->index));
+ Value *Filter = Builder.CreateLoad(getExceptionFilter(src_rgn->index));
+ Builder.CreateStore(Filter, getExceptionFilter(dst_rgn->index));
+
+ // Branch to the post landing pad for the destination region.
+ eh_landing_pad lp = get_eh_landing_pad_from_number(DstLPadNo);
+ assert(lp && "Post landing pad not found!");
+ Builder.CreateBr(getLabelDeclBlock(lp->post_landing_pad));
+ return;
+ }
- if (!CASE_HIGH(label)) {
- SI->addCase(LowC, Dest); // Single destination.
- continue;
+ // Unwind the exception out of the function using a resume instruction.
+ Value *ExcPtr = Builder.CreateLoad(getExceptionPtr(src_rgn->index));
+ Value *Filter = Builder.CreateLoad(getExceptionFilter(src_rgn->index));
+ Type *UnwindDataTy = StructType::get(Builder.getInt8PtrTy(),
+ Builder.getInt32Ty(), NULL);
+ Value *UnwindData = UndefValue::get(UnwindDataTy);
+ UnwindData = Builder.CreateInsertValue(UnwindData, ExcPtr, 0, "exc_ptr");
+ UnwindData = Builder.CreateInsertValue(UnwindData, Filter, 1, "filter");
+ Builder.CreateResume(UnwindData);
}
- // Otherwise, we have a range, like 'case 1 ... 17'.
- // Make sure the case value is the same type as the switch expression
- Val = EmitRegisterWithCast(CASE_HIGH(label), index_type);
- ConstantInt *HighC = cast<ConstantInt>(Val);
-
- APInt Range = HighC->getValue() - LowC->getValue();
- if (Range.ult(APInt(Range.getBitWidth(), 64))) {
- // Add all of the necessary successors to the switch.
- APInt CurrentValue = LowC->getValue();
- while (1) {
- SI->addCase(LowC, Dest);
- if (LowC == HighC) break; // Emitted the last one.
- CurrentValue++;
- LowC = ConstantInt::get(Context, CurrentValue);
+ void TreeToLLVM::RenderGIMPLE_RETURN(gimple stmt) {
+ tree retval = gimple_return_retval(stmt);
+ tree result = DECL_RESULT(current_function_decl);
+
+ if (retval && retval != error_mark_node && retval != result) {
+ // Store the return value to the function's DECL_RESULT.
+ MemRef DestLoc(DECL_LOCAL(result), 1, false); // FIXME: What alignment?
+ if (isa<AGGREGATE_TYPE>(TREE_TYPE(result))) {
+ EmitAggregate(retval, DestLoc);
+ } else {
+ Value *Val = Builder.CreateBitCast(EmitRegister(retval),
+ getRegType(TREE_TYPE(result)));
+ StoreRegisterToMemory(Val, DestLoc, TREE_TYPE(result), 0, Builder);
+ }
}
- } else {
- // The range is too big to add to the switch - emit an "if".
- if (!IfBlock) {
- IfBlock = BasicBlock::Create(Context);
- BeginBlock(IfBlock);
- }
- Value *Diff = Builder.CreateSub(Index, LowC);
- Value *Cond = Builder.CreateICmpULE(Diff,
- ConstantInt::get(Context, Range));
- BasicBlock *False_Block = BasicBlock::Create(Context);
- Builder.CreateCondBr(Cond, Dest, False_Block);
- BeginBlock(False_Block);
- }
- }
- if (IfBlock) {
- Builder.CreateBr(SI->getDefaultDest());
- SI->setDefaultDest(IfBlock);
- }
-}
+ // Emit a branch to the exit label.
+ if (!ReturnBB)
+ // Create a new block for the return node, but don't insert it yet.
+ ReturnBB = BasicBlock::Create(Context, "return");
+
+ Builder.CreateBr(ReturnBB);
+ }
+
+ void TreeToLLVM::RenderGIMPLE_SWITCH(gimple stmt) {
+ // Emit the condition.
+ Value *Index = EmitRegister(gimple_switch_index(stmt));
+ tree index_type = TREE_TYPE(gimple_switch_index(stmt));
+
+ // Create the switch instruction.
+ tree default_label = CASE_LABEL(gimple_switch_label(stmt, 0));
+ SwitchInst *SI = Builder.CreateSwitch(Index,
+ getLabelDeclBlock(default_label),
+ gimple_switch_num_labels(stmt));
+
+ // Add the switch cases.
+ BasicBlock *IfBlock = 0; // Set if a range was output as an "if".
+ for (unsigned i = 1, e = gimple_switch_num_labels(stmt); i != e; ++i) {
+ tree label = gimple_switch_label(stmt, i);
+ BasicBlock *Dest = getLabelDeclBlock(CASE_LABEL(label));
+
+ // Convert the integer to the right type.
+ Value *Val = EmitRegisterWithCast(CASE_LOW(label), index_type);
+ ConstantInt *LowC = cast<ConstantInt>(Val);
+ if (!CASE_HIGH(label)) {
+ SI->addCase(LowC, Dest); // Single destination.
+ continue;
+ }
-//===----------------------------------------------------------------------===//
-// ... Render helpers ...
-//===----------------------------------------------------------------------===//
+ // Otherwise, we have a range, like 'case 1 ... 17'.
+ // Make sure the case value is the same type as the switch expression
+ Val = EmitRegisterWithCast(CASE_HIGH(label), index_type);
+ ConstantInt *HighC = cast<ConstantInt>(Val);
+
+ APInt Range = HighC->getValue() - LowC->getValue();
+ if (Range.ult(APInt(Range.getBitWidth(), 64))) {
+ // Add all of the necessary successors to the switch.
+ APInt CurrentValue = LowC->getValue();
+ while (1) {
+ SI->addCase(LowC, Dest);
+ if (LowC == HighC)
+ break; // Emitted the last one.
+ CurrentValue++;
+ LowC = ConstantInt::get(Context, CurrentValue);
+ }
+ } else {
+ // The range is too big to add to the switch - emit an "if".
+ if (!IfBlock) {
+ IfBlock = BasicBlock::Create(Context);
+ BeginBlock(IfBlock);
+ }
+ Value *Diff = Builder.CreateSub(Index, LowC);
+ Value *Cond = Builder.CreateICmpULE(Diff,
+ ConstantInt::get(Context, Range));
+ BasicBlock *False_Block = BasicBlock::Create(Context);
+ Builder.CreateCondBr(Cond, Dest, False_Block);
+ BeginBlock(False_Block);
+ }
+ }
+
+ if (IfBlock) {
+ Builder.CreateBr(SI->getDefaultDest());
+ SI->setDefaultDest(IfBlock);
+ }
+ }
-/// EmitAssignRHS - Convert the RHS of a scalar GIMPLE_ASSIGN to LLVM.
-Value *TreeToLLVM::EmitAssignRHS(gimple stmt) {
- // Loads from memory and other non-register expressions are handled by
- // EmitAssignSingleRHS.
- if (get_gimple_rhs_class(gimple_expr_code(stmt)) == GIMPLE_SINGLE_RHS) {
- Value *RHS = EmitAssignSingleRHS(gimple_assign_rhs1(stmt));
- assert(RHS->getType() == getRegType(TREE_TYPE(gimple_assign_rhs1(stmt))) &&
- "RHS has wrong type!");
- return RHS;
- }
-
- // The RHS is a register expression. Emit it now.
- tree type = TREE_TYPE(gimple_assign_lhs(stmt));
- tree_code code = gimple_assign_rhs_code(stmt);
- tree rhs1 = gimple_assign_rhs1(stmt);
- tree rhs2 = gimple_assign_rhs2(stmt);
+ //===----------------------------------------------------------------------===//
+ // ... Render helpers ...
+ //===----------------------------------------------------------------------===//
+
+ /// EmitAssignRHS - Convert the RHS of a scalar GIMPLE_ASSIGN to LLVM.
+ Value *TreeToLLVM::EmitAssignRHS(gimple stmt) {
+ // Loads from memory and other non-register expressions are handled by
+ // EmitAssignSingleRHS.
+ if (get_gimple_rhs_class(gimple_expr_code(stmt)) == GIMPLE_SINGLE_RHS) {
+ Value *RHS = EmitAssignSingleRHS(gimple_assign_rhs1(stmt));
+ assert(RHS->getType() ==
+ getRegType(TREE_TYPE(gimple_assign_rhs1(stmt))) &&
+ "RHS has wrong type!");
+ return RHS;
+ }
+
+ // The RHS is a register expression. Emit it now.
+ tree type = TREE_TYPE(gimple_assign_lhs(stmt));
+ tree_code code = gimple_assign_rhs_code(stmt);
+ tree rhs1 = gimple_assign_rhs1(stmt);
+ tree rhs2 = gimple_assign_rhs2(stmt);
#if (GCC_MINOR > 6)
- tree rhs3 = gimple_assign_rhs3(stmt);
+ tree rhs3 = gimple_assign_rhs3(stmt);
#endif
- Value *RHS = 0;
- switch (code) {
- default:
- debug_gimple_stmt(stmt);
- llvm_unreachable("Unsupported GIMPLE assignment!");
+ Value *RHS = 0;
+ switch (code) {
+ default:
+ debug_gimple_stmt(stmt);
+ llvm_unreachable("Unsupported GIMPLE assignment!");
- // Unary expressions.
- case ABS_EXPR:
- RHS = EmitReg_ABS_EXPR(rhs1); break;
- case BIT_NOT_EXPR:
- RHS = EmitReg_BIT_NOT_EXPR(rhs1); break;
- case CONJ_EXPR:
- RHS = EmitReg_CONJ_EXPR(rhs1); break;
- case CONVERT_EXPR:
- case FIX_TRUNC_EXPR:
- case FLOAT_EXPR:
- case NOP_EXPR:
- RHS = EmitReg_CONVERT_EXPR(type, rhs1); break;
- case NEGATE_EXPR:
- RHS = EmitReg_NEGATE_EXPR(rhs1); break;
- case PAREN_EXPR:
- RHS = EmitReg_PAREN_EXPR(rhs1); break;
- case TRUTH_NOT_EXPR:
- RHS = EmitReg_TRUTH_NOT_EXPR(type, rhs1); break;
-
- // Comparisons.
- case EQ_EXPR:
- case GE_EXPR:
- case GT_EXPR:
- case LE_EXPR:
- case LT_EXPR:
- case LTGT_EXPR:
- case NE_EXPR:
- case ORDERED_EXPR:
- case UNEQ_EXPR:
- case UNGE_EXPR:
- case UNGT_EXPR:
- case UNLE_EXPR:
- case UNLT_EXPR:
- case UNORDERED_EXPR:
- // The GCC result may be of any integer type.
- RHS = Builder.CreateZExt(EmitCompare(rhs1, rhs2, code), getRegType(type));
- break;
+ // Unary expressions.
+ case ABS_EXPR:
+ RHS = EmitReg_ABS_EXPR(rhs1);
+ break;
+ case BIT_NOT_EXPR:
+ RHS = EmitReg_BIT_NOT_EXPR(rhs1);
+ break;
+ case CONJ_EXPR:
+ RHS = EmitReg_CONJ_EXPR(rhs1);
+ break;
+ case CONVERT_EXPR:
+ case FIX_TRUNC_EXPR:
+ case FLOAT_EXPR:
+ case NOP_EXPR:
+ RHS = EmitReg_CONVERT_EXPR(type, rhs1);
+ break;
+ case NEGATE_EXPR:
+ RHS = EmitReg_NEGATE_EXPR(rhs1);
+ break;
+ case PAREN_EXPR:
+ RHS = EmitReg_PAREN_EXPR(rhs1);
+ break;
+ case TRUTH_NOT_EXPR:
+ RHS = EmitReg_TRUTH_NOT_EXPR(type, rhs1);
+ break;
- // Binary expressions.
- case BIT_AND_EXPR:
- RHS = EmitReg_BIT_AND_EXPR(rhs1, rhs2); break;
- case BIT_IOR_EXPR:
- RHS = EmitReg_BIT_IOR_EXPR(rhs1, rhs2); break;
- case BIT_XOR_EXPR:
- RHS = EmitReg_BIT_XOR_EXPR(rhs1, rhs2); break;
- case CEIL_DIV_EXPR:
- RHS = EmitReg_CEIL_DIV_EXPR(rhs1, rhs2); break;
- case COMPLEX_EXPR:
- RHS = EmitReg_COMPLEX_EXPR(rhs1, rhs2); break;
- case EXACT_DIV_EXPR:
- RHS = EmitReg_TRUNC_DIV_EXPR(rhs1, rhs2, /*isExact*/true); break;
- case FLOOR_DIV_EXPR:
- RHS = EmitReg_FLOOR_DIV_EXPR(rhs1, rhs2); break;
- case FLOOR_MOD_EXPR:
- RHS = EmitReg_FLOOR_MOD_EXPR(rhs1, rhs2); break;
- case LROTATE_EXPR:
- RHS = EmitReg_RotateOp(type, rhs1, rhs2, Instruction::Shl,
- Instruction::LShr);
- break;
- case LSHIFT_EXPR:
- RHS = EmitReg_ShiftOp(rhs1, rhs2, Instruction::Shl); break;
- case MAX_EXPR:
- RHS = EmitReg_MinMaxExpr(rhs1, rhs2, ICmpInst::ICMP_UGE, ICmpInst::ICMP_SGE,
- FCmpInst::FCMP_OGE);
- break;
- case MIN_EXPR:
- RHS = EmitReg_MinMaxExpr(rhs1, rhs2, ICmpInst::ICMP_ULE, ICmpInst::ICMP_SLE,
- FCmpInst::FCMP_OLE);
- break;
- case MINUS_EXPR:
- RHS = EmitReg_MINUS_EXPR(rhs1, rhs2); break;
- case MULT_EXPR:
- RHS = EmitReg_MULT_EXPR(rhs1, rhs2); break;
- case PLUS_EXPR:
- RHS = EmitReg_PLUS_EXPR(rhs1, rhs2); break;
- case POINTER_PLUS_EXPR:
- RHS = EmitReg_POINTER_PLUS_EXPR(rhs1, rhs2); break;
- case RDIV_EXPR:
- RHS = EmitReg_RDIV_EXPR(rhs1, rhs2); break;
- case REDUC_MAX_EXPR:
- RHS = EmitReg_ReducMinMaxExpr(rhs1, ICmpInst::ICMP_UGE, ICmpInst::ICMP_SGE,
- FCmpInst::FCMP_OGE);
- break;
- case REDUC_MIN_EXPR:
- RHS = EmitReg_ReducMinMaxExpr(rhs1, ICmpInst::ICMP_ULE, ICmpInst::ICMP_SLE,
- FCmpInst::FCMP_OLE);
- break;
- case REDUC_PLUS_EXPR:
- RHS = EmitReg_REDUC_PLUS_EXPR(rhs1);
- break;
- case ROUND_DIV_EXPR:
- RHS = EmitReg_ROUND_DIV_EXPR(rhs1, rhs2); break;
- case RROTATE_EXPR:
- RHS = EmitReg_RotateOp(type, rhs1, rhs2, Instruction::LShr,
- Instruction::Shl);
- break;
- case RSHIFT_EXPR:
- RHS = EmitReg_ShiftOp(rhs1, rhs2, TYPE_UNSIGNED(type) ?
- Instruction::LShr : Instruction::AShr);
- break;
- case TRUNC_DIV_EXPR:
- RHS = EmitReg_TRUNC_DIV_EXPR(rhs1, rhs2, /*isExact*/false); break;
- case TRUNC_MOD_EXPR:
- RHS = EmitReg_TRUNC_MOD_EXPR(rhs1, rhs2); break;
- case TRUTH_AND_EXPR:
- RHS = EmitReg_TruthOp(type, rhs1, rhs2, Instruction::And); break;
- case TRUTH_OR_EXPR:
- RHS = EmitReg_TruthOp(type, rhs1, rhs2, Instruction::Or); break;
- case TRUTH_XOR_EXPR:
- RHS = EmitReg_TruthOp(type, rhs1, rhs2, Instruction::Xor); break;
-#if (GCC_MINOR < 7)
- case VEC_EXTRACT_EVEN_EXPR:
- RHS = EmitReg_VEC_EXTRACT_EVEN_EXPR(rhs1, rhs2); break;
- case VEC_EXTRACT_ODD_EXPR:
- RHS = EmitReg_VEC_EXTRACT_ODD_EXPR(rhs1, rhs2); break;
- case VEC_INTERLEAVE_HIGH_EXPR:
- RHS = EmitReg_VEC_INTERLEAVE_HIGH_EXPR(rhs1, rhs2); break;
- case VEC_INTERLEAVE_LOW_EXPR:
- RHS = EmitReg_VEC_INTERLEAVE_LOW_EXPR(rhs1, rhs2); break;
-#endif
- case VEC_LSHIFT_EXPR:
- RHS = EmitReg_VecShiftOp(rhs1, rhs2, /*isLeftShift*/true); break;
- case VEC_PACK_FIX_TRUNC_EXPR:
- case VEC_PACK_TRUNC_EXPR:
- RHS = EmitReg_VEC_PACK_TRUNC_EXPR(type, rhs1, rhs2); break;
- case VEC_RSHIFT_EXPR:
- RHS = EmitReg_VecShiftOp(rhs1, rhs2, /*isLeftShift*/false); break;
- case VEC_UNPACK_FLOAT_HI_EXPR:
- case VEC_UNPACK_HI_EXPR:
- RHS = EmitReg_VecUnpackHiExpr(type, rhs1); break;
- case VEC_UNPACK_FLOAT_LO_EXPR:
- case VEC_UNPACK_LO_EXPR:
- RHS = EmitReg_VecUnpackLoExpr(type, rhs1); break;
- case VEC_WIDEN_MULT_HI_EXPR:
- RHS = EmitReg_VEC_WIDEN_MULT_HI_EXPR(type, rhs1, rhs2); break;
- case VEC_WIDEN_MULT_LO_EXPR:
- RHS = EmitReg_VEC_WIDEN_MULT_LO_EXPR(type, rhs1, rhs2); break;
- case WIDEN_MULT_EXPR:
- RHS = EmitReg_WIDEN_MULT_EXPR(type, rhs1, rhs2); break;
+ // Comparisons.
+ case EQ_EXPR:
+ case GE_EXPR:
+ case GT_EXPR:
+ case LE_EXPR:
+ case LT_EXPR:
+ case LTGT_EXPR:
+ case NE_EXPR:
+ case ORDERED_EXPR:
+ case UNEQ_EXPR:
+ case UNGE_EXPR:
+ case UNGT_EXPR:
+ case UNLE_EXPR:
+ case UNLT_EXPR:
+ case UNORDERED_EXPR:
+ // The GCC result may be of any integer type.
+ RHS = Builder.CreateZExt(EmitCompare(rhs1, rhs2, code),
+ getRegType(type));
+ break;
- // Ternary expressions.
-#if (GCC_MINOR > 6)
- case COND_EXPR:
- case VEC_COND_EXPR:
- RHS = EmitReg_CondExpr(rhs1, rhs2, rhs3); break;
- case VEC_PERM_EXPR:
- RHS = EmitReg_VEC_PERM_EXPR(rhs1, rhs2, rhs3); break;
+ // Binary expressions.
+ case BIT_AND_EXPR:
+ RHS = EmitReg_BIT_AND_EXPR(rhs1, rhs2);
+ break;
+ case BIT_IOR_EXPR:
+ RHS = EmitReg_BIT_IOR_EXPR(rhs1, rhs2);
+ break;
+ case BIT_XOR_EXPR:
+ RHS = EmitReg_BIT_XOR_EXPR(rhs1, rhs2);
+ break;
+ case CEIL_DIV_EXPR:
+ RHS = EmitReg_CEIL_DIV_EXPR(rhs1, rhs2);
+ break;
+ case COMPLEX_EXPR:
+ RHS = EmitReg_COMPLEX_EXPR(rhs1, rhs2);
+ break;
+ case EXACT_DIV_EXPR:
+ RHS = EmitReg_TRUNC_DIV_EXPR(rhs1, rhs2, /*isExact*/ true);
+ break;
+ case FLOOR_DIV_EXPR:
+ RHS = EmitReg_FLOOR_DIV_EXPR(rhs1, rhs2);
+ break;
+ case FLOOR_MOD_EXPR:
+ RHS = EmitReg_FLOOR_MOD_EXPR(rhs1, rhs2);
+ break;
+ case LROTATE_EXPR:
+ RHS = EmitReg_RotateOp(type, rhs1, rhs2, Instruction::Shl,
+ Instruction::LShr);
+ break;
+ case LSHIFT_EXPR:
+ RHS = EmitReg_ShiftOp(rhs1, rhs2, Instruction::Shl);
+ break;
+ case MAX_EXPR:
+ RHS = EmitReg_MinMaxExpr(rhs1, rhs2, ICmpInst::ICMP_UGE,
+ ICmpInst::ICMP_SGE, FCmpInst::FCMP_OGE);
+ break;
+ case MIN_EXPR:
+ RHS = EmitReg_MinMaxExpr(rhs1, rhs2, ICmpInst::ICMP_ULE,
+ ICmpInst::ICMP_SLE, FCmpInst::FCMP_OLE);
+ break;
+ case MINUS_EXPR:
+ RHS = EmitReg_MINUS_EXPR(rhs1, rhs2);
+ break;
+ case MULT_EXPR:
+ RHS = EmitReg_MULT_EXPR(rhs1, rhs2);
+ break;
+ case PLUS_EXPR:
+ RHS = EmitReg_PLUS_EXPR(rhs1, rhs2);
+ break;
+ case POINTER_PLUS_EXPR:
+ RHS = EmitReg_POINTER_PLUS_EXPR(rhs1, rhs2);
+ break;
+ case RDIV_EXPR:
+ RHS = EmitReg_RDIV_EXPR(rhs1, rhs2);
+ break;
+ case REDUC_MAX_EXPR:
+ RHS = EmitReg_ReducMinMaxExpr(rhs1, ICmpInst::ICMP_UGE,
+ ICmpInst::ICMP_SGE, FCmpInst::FCMP_OGE);
+ break;
+ case REDUC_MIN_EXPR:
+ RHS = EmitReg_ReducMinMaxExpr(rhs1, ICmpInst::ICMP_ULE,
+ ICmpInst::ICMP_SLE, FCmpInst::FCMP_OLE);
+ break;
+ case REDUC_PLUS_EXPR:
+ RHS = EmitReg_REDUC_PLUS_EXPR(rhs1);
+ break;
+ case ROUND_DIV_EXPR:
+ RHS = EmitReg_ROUND_DIV_EXPR(rhs1, rhs2);
+ break;
+ case RROTATE_EXPR:
+ RHS = EmitReg_RotateOp(type, rhs1, rhs2, Instruction::LShr,
+ Instruction::Shl);
+ break;
+ case RSHIFT_EXPR:
+ RHS = EmitReg_ShiftOp(rhs1, rhs2, TYPE_UNSIGNED(type) ?
+ Instruction::LShr : Instruction::AShr);
+ break;
+ case TRUNC_DIV_EXPR:
+ RHS = EmitReg_TRUNC_DIV_EXPR(rhs1, rhs2, /*isExact*/ false);
+ break;
+ case TRUNC_MOD_EXPR:
+ RHS = EmitReg_TRUNC_MOD_EXPR(rhs1, rhs2);
+ break;
+ case TRUTH_AND_EXPR:
+ RHS = EmitReg_TruthOp(type, rhs1, rhs2, Instruction::And);
+ break;
+ case TRUTH_OR_EXPR:
+ RHS = EmitReg_TruthOp(type, rhs1, rhs2, Instruction::Or);
+ break;
+ case TRUTH_XOR_EXPR:
+ RHS = EmitReg_TruthOp(type, rhs1, rhs2, Instruction::Xor);
+ break;
+#if (GCC_MINOR < 7)
+ case VEC_EXTRACT_EVEN_EXPR:
+ RHS = EmitReg_VEC_EXTRACT_EVEN_EXPR(rhs1, rhs2);
+ break;
+ case VEC_EXTRACT_ODD_EXPR:
+ RHS = EmitReg_VEC_EXTRACT_ODD_EXPR(rhs1, rhs2);
+ break;
+ case VEC_INTERLEAVE_HIGH_EXPR:
+ RHS = EmitReg_VEC_INTERLEAVE_HIGH_EXPR(rhs1, rhs2);
+ break;
+ case VEC_INTERLEAVE_LOW_EXPR:
+ RHS = EmitReg_VEC_INTERLEAVE_LOW_EXPR(rhs1, rhs2);
+ break;
#endif
- }
-
- return TriviallyTypeConvert(RHS, getRegType(type));
-}
+ case VEC_LSHIFT_EXPR:
+ RHS = EmitReg_VecShiftOp(rhs1, rhs2, /*isLeftShift*/ true);
+ break;
+ case VEC_PACK_FIX_TRUNC_EXPR:
+ case VEC_PACK_TRUNC_EXPR:
+ RHS = EmitReg_VEC_PACK_TRUNC_EXPR(type, rhs1, rhs2);
+ break;
+ case VEC_RSHIFT_EXPR:
+ RHS = EmitReg_VecShiftOp(rhs1, rhs2, /*isLeftShift*/ false);
+ break;
+ case VEC_UNPACK_FLOAT_HI_EXPR:
+ case VEC_UNPACK_HI_EXPR:
+ RHS = EmitReg_VecUnpackHiExpr(type, rhs1);
+ break;
+ case VEC_UNPACK_FLOAT_LO_EXPR:
+ case VEC_UNPACK_LO_EXPR:
+ RHS = EmitReg_VecUnpackLoExpr(type, rhs1);
+ break;
+ case VEC_WIDEN_MULT_HI_EXPR:
+ RHS = EmitReg_VEC_WIDEN_MULT_HI_EXPR(type, rhs1, rhs2);
+ break;
+ case VEC_WIDEN_MULT_LO_EXPR:
+ RHS = EmitReg_VEC_WIDEN_MULT_LO_EXPR(type, rhs1, rhs2);
+ break;
+ case WIDEN_MULT_EXPR:
+ RHS = EmitReg_WIDEN_MULT_EXPR(type, rhs1, rhs2);
+ break;
-/// EmitAssignSingleRHS - Helper for EmitAssignRHS. Handles those RHS that are
-/// not register expressions.
-Value *TreeToLLVM::EmitAssignSingleRHS(tree rhs) {
- assert(!isa<AGGREGATE_TYPE>(TREE_TYPE(rhs)) && "Expected a scalar type!");
+// Ternary expressions.
+#if (GCC_MINOR > 6)
+ case COND_EXPR:
+ case VEC_COND_EXPR:
+ RHS = EmitReg_CondExpr(rhs1, rhs2, rhs3);
+ break;
+ case VEC_PERM_EXPR:
+ RHS = EmitReg_VEC_PERM_EXPR(rhs1, rhs2, rhs3);
+ break;
+#endif
+ }
- switch (TREE_CODE(rhs)) {
- // Catch-all for SSA names, constants etc.
- default: return EmitRegister(rhs);
+ return TriviallyTypeConvert(RHS, getRegType(type));
+ }
- // Expressions (tcc_expression).
- case ADDR_EXPR: return EmitADDR_EXPR(rhs);
-#if (GCC_MINOR < 7)
- case COND_EXPR:
- case VEC_COND_EXPR: return EmitCondExpr(rhs);
-#endif
- case OBJ_TYPE_REF: return EmitOBJ_TYPE_REF(rhs);
+ /// EmitAssignSingleRHS - Helper for EmitAssignRHS. Handles those RHS that are
+ /// not register expressions.
+ Value *TreeToLLVM::EmitAssignSingleRHS(tree rhs) {
+ assert(!isa<AGGREGATE_TYPE>(TREE_TYPE(rhs)) && "Expected a scalar type!");
- // Exceptional (tcc_exceptional).
- case CONSTRUCTOR:
- // Vector constant constructors are gimple invariant.
- return is_gimple_constant(rhs) ?
- EmitRegisterConstant(rhs) : EmitCONSTRUCTOR(rhs, 0);
+ switch (TREE_CODE(rhs)) {
+ // Catch-all for SSA names, constants etc.
+ default:
+ return EmitRegister(rhs);
- // References (tcc_reference).
- case ARRAY_REF:
- case ARRAY_RANGE_REF:
- case BIT_FIELD_REF:
- case COMPONENT_REF:
- case IMAGPART_EXPR:
- case INDIRECT_REF:
+ // Expressions (tcc_expression).
+ case ADDR_EXPR:
+ return EmitADDR_EXPR(rhs);
+#if (GCC_MINOR < 7)
+ case COND_EXPR:
+ case VEC_COND_EXPR:
+ return EmitCondExpr(rhs);
+#endif
+ case OBJ_TYPE_REF:
+ return EmitOBJ_TYPE_REF(rhs);
+
+ // Exceptional (tcc_exceptional).
+ case CONSTRUCTOR:
+ // Vector constant constructors are gimple invariant.
+ return is_gimple_constant(rhs) ? EmitRegisterConstant(rhs) :
+ EmitCONSTRUCTOR(rhs, 0);
+
+ // References (tcc_reference).
+ case ARRAY_REF:
+ case ARRAY_RANGE_REF:
+ case BIT_FIELD_REF:
+ case COMPONENT_REF:
+ case IMAGPART_EXPR:
+ case INDIRECT_REF:
#if (GCC_MINOR > 5)
- case MEM_REF:
+ case MEM_REF:
#endif
#if (GCC_MINOR < 6)
- case MISALIGNED_INDIRECT_REF:
+ case MISALIGNED_INDIRECT_REF:
#endif
- case REALPART_EXPR:
- case TARGET_MEM_REF:
- case VIEW_CONVERT_EXPR:
- return EmitLoadOfLValue(rhs); // Load from memory.
-
- // Declarations (tcc_declaration).
- case PARM_DECL:
- case RESULT_DECL:
- case VAR_DECL:
- return EmitLoadOfLValue(rhs); // Load from memory.
-
- // Constants (tcc_constant).
- case STRING_CST:
- return EmitLoadOfLValue(rhs); // Load from memory.
- }
-}
-
-/// OutputCallRHS - Convert the RHS of a GIMPLE_CALL.
-Value *TreeToLLVM::OutputCallRHS(gimple stmt, const MemRef *DestLoc) {
- // Check for a built-in function call. If we can lower it directly, do so
- // now.
- tree fndecl = gimple_call_fndecl(stmt);
- if (fndecl && DECL_BUILT_IN(fndecl) &&
- DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_FRONTEND) {
- Value *Res = 0;
- if (EmitBuiltinCall(stmt, fndecl, DestLoc, Res))
- return Res ? Mem2Reg(Res, gimple_call_return_type(stmt), Builder) : 0;
- }
-
- tree call_expr = gimple_call_fn(stmt);
- assert(TREE_TYPE (call_expr) &&
- (isa<POINTER_TYPE>(TREE_TYPE (call_expr)) ||
- isa<REFERENCE_TYPE>(TREE_TYPE (call_expr)))
- && "Not calling a function pointer?");
-
- tree function_type = TREE_TYPE(TREE_TYPE (call_expr));
- Value *Callee = EmitRegister(call_expr);
- CallingConv::ID CallingConv;
- AttributeSet PAL;
+ case REALPART_EXPR:
+ case TARGET_MEM_REF:
+ case VIEW_CONVERT_EXPR:
+ return EmitLoadOfLValue(rhs); // Load from memory.
+
+ // Declarations (tcc_declaration).
+ case PARM_DECL:
+ case RESULT_DECL:
+ case VAR_DECL:
+ return EmitLoadOfLValue(rhs); // Load from memory.
+
+ // Constants (tcc_constant).
+ case STRING_CST:
+ return EmitLoadOfLValue(rhs); // Load from memory.
+ }
+ }
- Type *Ty;
- // If this is a K&R-style function: with a type that takes no arguments but
- // with arguments none the less, then calculate the LLVM type from the list
- // of arguments.
- if (flag_functions_from_args) {
- tree *FirstArgAddr = gimple_call_num_args(stmt) > 0 ?
- gimple_call_arg_ptr(stmt, 0) : NULL;
- Ty = ConvertArgListToFnType(function_type,
- ArrayRef<tree>(FirstArgAddr,
- gimple_call_num_args(stmt)),
- gimple_call_chain(stmt),
- !flag_functions_from_args, CallingConv, PAL);
- } else {
- Ty = ConvertFunctionType(function_type, fndecl, gimple_call_chain(stmt),
- CallingConv, PAL);
- }
+ /// OutputCallRHS - Convert the RHS of a GIMPLE_CALL.
+ Value *TreeToLLVM::OutputCallRHS(gimple stmt, const MemRef *DestLoc) {
+ // Check for a built-in function call. If we can lower it directly, do so
+ // now.
+ tree fndecl = gimple_call_fndecl(stmt);
+ if (fndecl && DECL_BUILT_IN(fndecl) &&
+ DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_FRONTEND) {
+ Value *Res = 0;
+ if (EmitBuiltinCall(stmt, fndecl, DestLoc, Res))
+ return Res ? Mem2Reg(Res, gimple_call_return_type(stmt), Builder) : 0;
+ }
+
+ tree call_expr = gimple_call_fn(stmt);
+ assert(TREE_TYPE(call_expr) &&
+ (isa<POINTER_TYPE>(TREE_TYPE(call_expr)) ||
+ isa<REFERENCE_TYPE>(TREE_TYPE(call_expr))) &&
+ "Not calling a function pointer?");
+
+ tree function_type = TREE_TYPE(TREE_TYPE(call_expr));
+ Value *Callee = EmitRegister(call_expr);
+ CallingConv::ID CallingConv;
+ AttributeSet PAL;
+
+ Type *Ty;
+ // If this is a K&R-style function: with a type that takes no arguments but
+ // with arguments none the less, then calculate the LLVM type from the list
+ // of arguments.
+ if (flag_functions_from_args) {
+ tree *FirstArgAddr = gimple_call_num_args(stmt) > 0 ?
+ gimple_call_arg_ptr(stmt, 0) : NULL;
+ Ty = ConvertArgListToFnType(
+ function_type,
+ ArrayRef<tree>(FirstArgAddr, gimple_call_num_args(stmt)),
+ gimple_call_chain(stmt), !flag_functions_from_args,
+ CallingConv, PAL);
+ } else {
+ Ty = ConvertFunctionType(function_type, fndecl, gimple_call_chain(stmt),
+ CallingConv, PAL);
+ }
- // If this is a direct call to a function using a static chain then we need
- // to ensure the function type is the one just calculated: it has an extra
- // parameter for the chain.
- Callee = Builder.CreateBitCast(Callee, Ty->getPointerTo());
+ // If this is a direct call to a function using a static chain then we need
+ // to ensure the function type is the one just calculated: it has an extra
+ // parameter for the chain.
+ Callee = Builder.CreateBitCast(Callee, Ty->getPointerTo());
- Value *Result = EmitCallOf(Callee, stmt, DestLoc, PAL);
+ Value *Result = EmitCallOf(Callee, stmt, DestLoc, PAL);
- // When calling a "noreturn" function output an unreachable instruction right
- // after the function to prevent LLVM from thinking that control flow will
- // fall into the subsequent block.
- if (gimple_call_flags(stmt) & ECF_NORETURN) {
- Builder.CreateUnreachable();
- BeginBlock(BasicBlock::Create(Context));
- }
+ // When calling a "noreturn" function output an unreachable instruction right
+ // after the function to prevent LLVM from thinking that control flow will
+ // fall into the subsequent block.
+ if (gimple_call_flags(stmt) & ECF_NORETURN) {
+ Builder.CreateUnreachable();
+ BeginBlock(BasicBlock::Create(Context));
+ }
- return Result ? Mem2Reg(Result, gimple_call_return_type(stmt), Builder) : 0;
-}
+ return Result ? Mem2Reg(Result, gimple_call_return_type(stmt), Builder) :
+ 0;
+ }
-/// WriteScalarToLHS - Store RHS, a non-aggregate value, into the given LHS.
-void TreeToLLVM::WriteScalarToLHS(tree lhs, Value *RHS) {
- // May need a useless type conversion (useless_type_conversion_p).
- RHS = TriviallyTypeConvert(RHS, getRegType(TREE_TYPE(lhs)));
+ /// WriteScalarToLHS - Store RHS, a non-aggregate value, into the given LHS.
+ void TreeToLLVM::WriteScalarToLHS(tree lhs, Value *RHS) {
+ // May need a useless type conversion (useless_type_conversion_p).
+ RHS = TriviallyTypeConvert(RHS, getRegType(TREE_TYPE(lhs)));
- // If this is the definition of an ssa name, record it in the SSANames map.
- if (isa<SSA_NAME>(lhs)) {
- if (flag_verbose_asm)
- NameValue(RHS, lhs);
- DefineSSAName(lhs, RHS);
- return;
- }
+ // If this is the definition of an ssa name, record it in the SSANames map.
+ if (isa<SSA_NAME>(lhs)) {
+ if (flag_verbose_asm)
+ NameValue(RHS, lhs);
+ DefineSSAName(lhs, RHS);
+ return;
+ }
- if (canEmitRegisterVariable(lhs)) {
- // If this is a store to a register variable, EmitLV can't handle the dest
- // (there is no l-value of a register variable). Emit an inline asm node
- // that copies the value into the specified register.
- EmitModifyOfRegisterVariable(lhs, RHS);
- return;
- }
+ if (canEmitRegisterVariable(lhs)) {
+ // If this is a store to a register variable, EmitLV can't handle the dest
+ // (there is no l-value of a register variable). Emit an inline asm node
+ // that copies the value into the specified register.
+ EmitModifyOfRegisterVariable(lhs, RHS);
+ return;
+ }
- LValue LV = EmitLV(lhs);
- LV.Volatile = TREE_THIS_VOLATILE(lhs);
- // TODO: Arrange for Volatile to already be set in the LValue.
- if (!LV.isBitfield()) {
- // Non-bitfield, scalar value. Just emit a store.
- StoreRegisterToMemory(RHS, LV, TREE_TYPE(lhs), describeAliasSet(lhs),
- Builder);
- return;
- }
+ LValue LV = EmitLV(lhs);
+ LV.Volatile = TREE_THIS_VOLATILE(lhs);
+ // TODO: Arrange for Volatile to already be set in the LValue.
+ if (!LV.isBitfield()) {
+ // Non-bitfield, scalar value. Just emit a store.
+ StoreRegisterToMemory(RHS, LV, TREE_TYPE(lhs), describeAliasSet(lhs),
+ Builder);
+ return;
+ }
- // Last case, this is a store to a bitfield, so we have to emit a
- // read/modify/write sequence.
- if (!LV.BitSize)
- return;
+ // Last case, this is a store to a bitfield, so we have to emit a
+ // read/modify/write sequence.
+ if (!LV.BitSize)
+ return;
- // Load and store the minimum number of bytes that covers the field.
- unsigned LoadSizeInBits = LV.BitStart + LV.BitSize;
- LoadSizeInBits = (unsigned)RoundUpToAlignment(LoadSizeInBits, BITS_PER_UNIT);
- Type *LoadType = IntegerType::get(Context, LoadSizeInBits);
-
- // Load the bits.
- Value *Ptr = Builder.CreateBitCast(LV.Ptr, LoadType->getPointerTo());
- Value *Val = Builder.CreateAlignedLoad(Ptr, LV.getAlignment(), LV.Volatile);
-
- // Get the right-hand side as a value of the same type.
- // FIXME: This assumes the right-hand side is an integer.
- bool isSigned = !TYPE_UNSIGNED(TREE_TYPE(lhs));
- RHS = CastToAnyType(RHS, isSigned, LoadType, isSigned);
-
- // Shift the right-hand side so that its bits are in the right position.
- unsigned FirstBitInVal = BYTES_BIG_ENDIAN ?
- LoadSizeInBits - LV.BitStart - LV.BitSize : LV.BitStart;
- if (FirstBitInVal) {
- Value *ShAmt = ConstantInt::get(LoadType, FirstBitInVal);
- RHS = Builder.CreateShl(RHS, ShAmt);
- }
- // Mask out any bits in the right-hand side that shouldn't be in the result.
- // The lower bits are zero already, so this only changes bits off the end.
- APInt Mask = APInt::getBitsSet(LoadSizeInBits, FirstBitInVal,
- FirstBitInVal + LV.BitSize);
- if (FirstBitInVal + LV.BitSize != LoadSizeInBits)
- RHS = Builder.CreateAnd(RHS, ConstantInt::get(Context, Mask));
-
- // Mask out those bits in the original value that are being replaced by the
- // right-hand side.
- Val = Builder.CreateAnd(Val, ConstantInt::get(Context, ~Mask));
-
- // Finally, merge the two together and store it.
- Val = Builder.CreateOr(Val, RHS);
- Builder.CreateAlignedStore(Val, Ptr, LV.getAlignment(), LV.Volatile);
-}
+ // Load and store the minimum number of bytes that covers the field.
+ unsigned LoadSizeInBits = LV.BitStart + LV.BitSize;
+ LoadSizeInBits = (unsigned)
+ RoundUpToAlignment(LoadSizeInBits, BITS_PER_UNIT);
+ Type *LoadType = IntegerType::get(Context, LoadSizeInBits);
+
+ // Load the bits.
+ Value *Ptr = Builder.CreateBitCast(LV.Ptr, LoadType->getPointerTo());
+ Value *Val = Builder.CreateAlignedLoad(Ptr, LV.getAlignment(),
+ LV.Volatile);
+
+ // Get the right-hand side as a value of the same type.
+ // FIXME: This assumes the right-hand side is an integer.
+ bool isSigned = !TYPE_UNSIGNED(TREE_TYPE(lhs));
+ RHS = CastToAnyType(RHS, isSigned, LoadType, isSigned);
+
+ // Shift the right-hand side so that its bits are in the right position.
+ unsigned FirstBitInVal = BYTES_BIG_ENDIAN ? LoadSizeInBits - LV.BitStart -
+ LV.BitSize : LV.BitStart;
+ if (FirstBitInVal) {
+ Value *ShAmt = ConstantInt::get(LoadType, FirstBitInVal);
+ RHS = Builder.CreateShl(RHS, ShAmt);
+ }
+ // Mask out any bits in the right-hand side that shouldn't be in the result.
+ // The lower bits are zero already, so this only changes bits off the end.
+ APInt Mask = APInt::getBitsSet(LoadSizeInBits, FirstBitInVal,
+ FirstBitInVal + LV.BitSize);
+ if (FirstBitInVal + LV.BitSize != LoadSizeInBits)
+ RHS = Builder.CreateAnd(RHS, ConstantInt::get(Context, Mask));
+
+ // Mask out those bits in the original value that are being replaced by the
+ // right-hand side.
+ Val = Builder.CreateAnd(Val, ConstantInt::get(Context, ~Mask));
+
+ // Finally, merge the two together and store it.
+ Val = Builder.CreateOr(Val, RHS);
+ Builder.CreateAlignedStore(Val, Ptr, LV.getAlignment(), LV.Volatile);
+ }
Modified: dragonegg/trunk/src/Debug.cpp
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/src/Debug.cpp?rev=173245&r1=173244&r2=173245&view=diff
==============================================================================
--- dragonegg/trunk/src/Debug.cpp (original)
+++ dragonegg/trunk/src/Debug.cpp Wed Jan 23 03:54:28 2013
@@ -58,12 +58,11 @@
using namespace llvm::dwarf;
#ifndef LLVMTESTDEBUG
-#define DEBUGASSERT(S) ((void)0)
+#define DEBUGASSERT(S) ((void) 0)
#else
#define DEBUGASSERT(S) assert(S)
#endif
-
/// DirectoryAndFile - Extract the directory and file name from a path. If no
/// directory is specified, then use the source working directory.
static void DirectoryAndFile(const std::string &FullPath,
@@ -115,18 +114,22 @@
/// NodeAlignInBits - Returns the alignment in bits stored in a tree node
/// regardless of whether the node is a TYPE or DECL.
static uint64_t NodeAlignInBits(tree Node) {
- if (isa<ERROR_MARK>(Node)) return BITS_PER_WORD;
- if (isa<TYPE>(Node)) return TYPE_ALIGN(Node);
- if (DECL_P(Node)) return DECL_ALIGN(Node);
+ if (isa<ERROR_MARK>(Node))
+ return BITS_PER_WORD;
+ if (isa<TYPE>(Node))
+ return TYPE_ALIGN(Node);
+ if (DECL_P(Node))
+ return DECL_ALIGN(Node);
return BITS_PER_WORD;
}
/// FieldType - Returns the type node of a structure member field.
///
static tree FieldType(tree Field) {
- if (isa<ERROR_MARK>(Field)) return integer_type_node;
- return DECL_BIT_FIELD_TYPE(Field) ?
- DECL_BIT_FIELD_TYPE(Field) : TREE_TYPE (Field);
+ if (isa<ERROR_MARK>(Field))
+ return integer_type_node;
+ return DECL_BIT_FIELD_TYPE(Field) ? DECL_BIT_FIELD_TYPE(Field) :
+ TREE_TYPE(Field);
}
/// GetNodeName - Returns the name stored in a node regardless of whether the
@@ -199,9 +202,8 @@
}
tree decl_name = DECL_NAME(Node);
- if (decl_name != NULL && IDENTIFIER_POINTER (decl_name) != NULL) {
- if (TREE_PUBLIC(Node) &&
- DECL_ASSEMBLER_NAME(Node) != DECL_NAME(Node) &&
+ if (decl_name != NULL && IDENTIFIER_POINTER(decl_name) != NULL) {
+ if (TREE_PUBLIC(Node) && DECL_ASSEMBLER_NAME(Node) != DECL_NAME(Node) &&
!DECL_ABSTRACT(Node)) {
return StringRef(IDENTIFIER_POINTER(DECL_ASSEMBLER_NAME(Node)));
}
@@ -210,13 +212,9 @@
}
DebugInfo::DebugInfo(Module *m)
-: DebugFactory(*m)
-, CurFullPath("")
-, PrevFullPath("")
-, CurLineNo(0)
-, PrevLineNo(0)
-, PrevBB(NULL)
-{}
+ : DebugFactory(*m), CurFullPath(""), PrevFullPath(""), CurLineNo(0),
+ PrevLineNo(0), PrevBB(NULL) {
+}
/// getFunctionName - Get function name for the given FnDecl. If the
/// name is constructred on demand (e.g. C++ destructor) then the name
@@ -241,11 +239,10 @@
void DebugInfo::EmitFunctionStart(tree FnDecl, Function *Fn) {
DIType FNType = getOrCreateType(TREE_TYPE(FnDecl));
- std::map<tree_node *, WeakVH >::iterator I = SPCache.find(FnDecl);
+ std::map<tree_node *, WeakVH>::iterator I = SPCache.find(FnDecl);
if (I != SPCache.end()) {
DISubprogram SPDecl(cast<MDNode>(I->second));
- DISubprogram SP =
- DebugFactory.CreateSubprogramDefinition(SPDecl);
+ DISubprogram SP = DebugFactory.CreateSubprogramDefinition(SPDecl);
SPDecl->replaceAllUsesWith(SP);
// Push function on region stack.
@@ -257,22 +254,20 @@
bool ArtificialFnWithAbstractOrigin = false;
// If this artificial function has abstract origin then put this function
// at module scope. The abstract copy will be placed in appropriate region.
- if (DECL_ARTIFICIAL (FnDecl)
- && DECL_ABSTRACT_ORIGIN (FnDecl)
- && DECL_ABSTRACT_ORIGIN (FnDecl) != FnDecl)
+ if (DECL_ARTIFICIAL(FnDecl) && DECL_ABSTRACT_ORIGIN(FnDecl) &&
+ DECL_ABSTRACT_ORIGIN(FnDecl) != FnDecl)
ArtificialFnWithAbstractOrigin = true;
DIDescriptor SPContext = ArtificialFnWithAbstractOrigin ?
- getOrCreateFile(main_input_filename) :
- findRegion (DECL_CONTEXT(FnDecl));
+ getOrCreateFile(main_input_filename) :
+ findRegion(DECL_CONTEXT(FnDecl));
// Creating context may have triggered creation of this SP descriptor. So
// check the cache again.
I = SPCache.find(FnDecl);
if (I != SPCache.end()) {
DISubprogram SPDecl(cast<MDNode>(I->second));
- DISubprogram SP =
- DebugFactory.CreateSubprogramDefinition(SPDecl);
+ DISubprogram SP = DebugFactory.CreateSubprogramDefinition(SPDecl);
SPDecl->replaceAllUsesWith(SP);
// Push function on region stack.
@@ -290,26 +285,22 @@
unsigned Virtuality = 0;
unsigned VIndex = 0;
DIType ContainingType;
- if (DECL_VINDEX (FnDecl) &&
- DECL_CONTEXT (FnDecl) && isa<TYPE>((DECL_CONTEXT (FnDecl)))) { // Workaround GCC PR42653
- if (host_integerp (DECL_VINDEX (FnDecl), 0))
- VIndex = tree_low_cst (DECL_VINDEX (FnDecl), 0);
+ if (DECL_VINDEX(FnDecl) && DECL_CONTEXT(FnDecl) &&
+ isa<TYPE>((DECL_CONTEXT(FnDecl)))) { // Workaround GCC PR42653
+ if (host_integerp(DECL_VINDEX(FnDecl), 0))
+ VIndex = tree_low_cst(DECL_VINDEX(FnDecl), 0);
Virtuality = dwarf::DW_VIRTUALITY_virtual;
- ContainingType = getOrCreateType(DECL_CONTEXT (FnDecl));
+ ContainingType = getOrCreateType(DECL_CONTEXT(FnDecl));
}
StringRef FnName = getFunctionName(FnDecl);
- DISubprogram SP =
- DebugFactory.CreateSubprogram(SPContext,
- FnName, FnName,
- LinkageName,
- getOrCreateFile(Loc.file), lineno,
- FNType,
- Fn->hasInternalLinkage(),
- true /*definition*/,
- Virtuality, VIndex, ContainingType,
- DECL_ARTIFICIAL (FnDecl), optimize);
+ DISubprogram SP = DebugFactory.CreateSubprogram(
+ SPContext, FnName, FnName, LinkageName,
+ getOrCreateFile(Loc.file), lineno, FNType,
+ Fn->hasInternalLinkage(), true /*definition*/,
+ Virtuality, VIndex, ContainingType,
+ DECL_ARTIFICIAL(FnDecl), optimize);
SPCache[FnDecl] = WeakVH(SP);
@@ -320,15 +311,14 @@
/// getOrCreateNameSpace - Get name space descriptor for the tree node.
DINameSpace DebugInfo::getOrCreateNameSpace(tree Node, DIDescriptor Context) {
- std::map<tree_node *, WeakVH >::iterator I =
- NameSpaceCache.find(Node);
+ std::map<tree_node *, WeakVH>::iterator I = NameSpaceCache.find(Node);
if (I != NameSpaceCache.end())
return DINameSpace(cast<MDNode>(I->second));
expanded_location Loc = GetNodeLocation(Node, false);
- DINameSpace DNS =
- DebugFactory.CreateNameSpace(Context, GetNodeName(Node),
- getOrCreateFile(Loc.file), Loc.line);
+ DINameSpace DNS = DebugFactory.CreateNameSpace(Context, GetNodeName(Node),
+ getOrCreateFile(Loc.file),
+ Loc.line);
NameSpaceCache[Node] = WeakVH(DNS);
return DNS;
@@ -347,13 +337,13 @@
if (isa<TYPE>(Node)) {
DIType Ty = getOrCreateType(Node);
return DIDescriptor(Ty);
- } else if (DECL_P (Node)) {
+ } else if (DECL_P(Node)) {
if (isa<NAMESPACE_DECL>(Node)) {
DIDescriptor NSContext = findRegion(DECL_CONTEXT(Node));
DINameSpace NS = getOrCreateNameSpace(Node, NSContext);
return DIDescriptor(NS);
}
- return findRegion (DECL_CONTEXT (Node));
+ return findRegion(DECL_CONTEXT(Node));
}
// Otherwise main compile unit covers everything.
@@ -374,8 +364,8 @@
}
/// EmitDeclare - Constructs the debug code for allocation of a new variable.
-void DebugInfo::EmitDeclare(tree decl, unsigned Tag, StringRef Name,
- tree type, Value *AI, LLVMBuilder &Builder) {
+void DebugInfo::EmitDeclare(tree decl, unsigned Tag, StringRef Name, tree type,
+ Value *AI, LLVMBuilder &Builder) {
// Ignore compiler generated temporaries.
if (DECL_IGNORED_P(decl))
@@ -390,18 +380,17 @@
DIType Ty = getOrCreateType(type);
if (!Ty && isa<OFFSET_TYPE>(type))
Ty = createPointerType(TREE_TYPE(type));
- if (Ty && DECL_ARTIFICIAL (decl))
- Ty = DebugFactory.CreateArtificialType(Ty);
+ if (Ty && DECL_ARTIFICIAL(decl))
+ Ty = DebugFactory.CreateArtificialType(Ty);
// If type info is not available then do not emit debug info for this var.
if (!Ty)
return;
- llvm::DIVariable D =
- DebugFactory.CreateVariable(Tag, VarScope,
- Name, getOrCreateFile(Loc.file),
- Loc.line, Ty, optimize);
+ llvm::DIVariable D = DebugFactory.CreateVariable(Tag, VarScope, Name,
+ getOrCreateFile(Loc.file),
+ Loc.line, Ty, optimize);
- Instruction *Call =
- DebugFactory.InsertDeclare(AI, D, Builder.GetInsertBlock());
+ Instruction *Call = DebugFactory.InsertDeclare(AI, D,
+ Builder.GetInsertBlock());
Call->setDebugLoc(DebugLoc::get(Loc.line, 0, VarScope));
}
@@ -409,21 +398,21 @@
/// EmitStopPoint - Set current source location.
void DebugInfo::EmitStopPoint(BasicBlock *CurBB, LLVMBuilder &Builder) {
// Don't bother if things are the same as last time.
- if (PrevLineNo == CurLineNo &&
- PrevBB == CurBB &&
- (PrevFullPath == CurFullPath ||
- !strcmp(PrevFullPath, CurFullPath))) return;
- if (!CurFullPath[0] || CurLineNo == 0) return;
+ if (PrevLineNo == CurLineNo && PrevBB == CurBB &&
+ (PrevFullPath == CurFullPath || !strcmp(PrevFullPath, CurFullPath)))
+ return;
+ if (!CurFullPath[0] || CurLineNo == 0)
+ return;
// Update last state.
PrevFullPath = CurFullPath;
PrevLineNo = CurLineNo;
PrevBB = CurBB;
- if (RegionStack.empty())
- return;
- MDNode *Scope = cast<MDNode>(RegionStack.back());
- Builder.SetCurrentDebugLocation(DebugLoc::get(CurLineNo,0/*col*/,Scope));
+ if (RegionStack.empty())
+ return;
+ MDNode *Scope = cast<MDNode>(RegionStack.back());
+ Builder.SetCurrentDebugLocation(DebugLoc::get(CurLineNo, 0 /*col*/, Scope));
}
/// EmitGlobalVariable - Emit information about a global variable.
@@ -441,14 +430,13 @@
}
StringRef LinkageName;
// The gdb does not expect linkage names for function local statics.
- if (DECL_CONTEXT (decl))
- if (!isa<FUNCTION_DECL>(DECL_CONTEXT (decl)))
+ if (DECL_CONTEXT(decl))
+ if (!isa<FUNCTION_DECL>(DECL_CONTEXT(decl)))
LinkageName = GV->getName();
- DebugFactory.CreateGlobalVariable(findRegion(DECL_CONTEXT(decl)),
- DispName, DispName, LinkageName,
- getOrCreateFile(Loc.file), Loc.line,
- TyD, GV->hasInternalLinkage(),
- true/*definition*/, GV);
+ DebugFactory.CreateGlobalVariable(
+ findRegion(DECL_CONTEXT(decl)), DispName, DispName, LinkageName,
+ getOrCreateFile(Loc.file), Loc.line, TyD, GV->hasInternalLinkage(),
+ true /*definition*/, GV);
}
/// createBasicType - Create BasicType.
@@ -462,13 +450,12 @@
switch (TREE_CODE(type)) {
case INTEGER_TYPE:
- if (TYPE_STRING_FLAG (type)) {
- if (TYPE_UNSIGNED (type))
+ if (TYPE_STRING_FLAG(type)) {
+ if (TYPE_UNSIGNED(type))
Encoding = DW_ATE_unsigned_char;
else
Encoding = DW_ATE_signed_char;
- }
- else if (TYPE_UNSIGNED (type))
+ } else if (TYPE_UNSIGNED(type))
Encoding = DW_ATE_unsigned;
else
Encoding = DW_ATE_signed;
@@ -477,13 +464,13 @@
Encoding = DW_ATE_float;
break;
case COMPLEX_TYPE:
- Encoding = isa<REAL_TYPE>(TREE_TYPE(type)) ?
- DW_ATE_complex_float : DW_ATE_lo_user;
+ Encoding = isa<REAL_TYPE>(TREE_TYPE(type)) ? DW_ATE_complex_float :
+ DW_ATE_lo_user;
break;
case BOOLEAN_TYPE:
Encoding = DW_ATE_boolean;
break;
- default: {
+ default : {
DEBUGASSERT(0 && "Basic type case missing");
Encoding = DW_ATE_signed;
Size = BITS_PER_WORD;
@@ -492,23 +479,22 @@
}
}
- return
- DebugFactory.CreateBasicType(getOrCreateFile(main_input_filename),
- TypeName,
- getOrCreateFile(main_input_filename),
- 0, Size, Align,
- 0, 0, Encoding);
+ return DebugFactory.CreateBasicType(
+ getOrCreateFile(main_input_filename), TypeName,
+ getOrCreateFile(main_input_filename), 0, Size, Align, 0, 0, Encoding);
}
/// isArtificialArgumentType - Return true if arg_type represents artificial,
/// i.e. "this" in c++, argument.
static bool isArtificialArgumentType(tree arg_type, tree method_type) {
- if (!isa<METHOD_TYPE>(method_type)) return false;
- if (!isa<POINTER_TYPE>(arg_type)) return false;
- if (TREE_TYPE (arg_type) == TYPE_METHOD_BASETYPE (method_type))
+ if (!isa<METHOD_TYPE>(method_type))
+ return false;
+ if (!isa<POINTER_TYPE>(arg_type))
+ return false;
+ if (TREE_TYPE(arg_type) == TYPE_METHOD_BASETYPE(method_type))
return true;
- if (main_type (arg_type) && main_type (arg_type) != TREE_TYPE (arg_type)
- && (main_type (arg_type) == TYPE_METHOD_BASETYPE (method_type)))
+ if (main_type(arg_type) && main_type(arg_type) != TREE_TYPE(arg_type) &&
+ (main_type(arg_type) == TYPE_METHOD_BASETYPE(method_type)))
return true;
return false;
}
@@ -535,7 +521,8 @@
bool ProcessedFirstArg = false;
for (tree arg = TYPE_ARG_TYPES(type); arg; arg = TREE_CHAIN(arg)) {
tree formal_type = TREE_VALUE(arg);
- if (formal_type == void_type_node) break;
+ if (formal_type == void_type_node)
+ break;
llvm::DIType FormalType = getOrCreateType(formal_type);
if (!ProcessedFirstArg && isArtificialArgumentType(formal_type, type)) {
DIType AFormalType = DebugFactory.CreateArtificialType(FormalType);
@@ -546,21 +533,19 @@
ProcessedFirstArg = true;
}
- llvm::DIArray EltTypeArray =
- DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+ llvm::DIArray EltTypeArray = DebugFactory.GetOrCreateArray(EltTys.data(),
+ EltTys.size());
RegionStack.pop_back();
std::map<tree_node *, WeakVH>::iterator RI = RegionMap.find(type);
if (RI != RegionMap.end())
RegionMap.erase(RI);
- llvm::DIType RealType =
- DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_subroutine_type,
- findRegion(TYPE_CONTEXT(type)),
- StringRef(),
- getOrCreateFile(main_input_filename),
- 0, 0, 0, 0, 0,
- llvm::DIType(), EltTypeArray);
+ llvm::DIType RealType = DebugFactory.CreateCompositeType(
+ llvm::dwarf::DW_TAG_subroutine_type,
+ findRegion(TYPE_CONTEXT(type)), StringRef(),
+ getOrCreateFile(main_input_filename), 0, 0, 0, 0,
+ 0, llvm::DIType(), EltTypeArray);
// Now that we have a real decl for the struct, replace anything using the
// old decl with the new one. This will recursively update the debug info.
@@ -575,40 +560,30 @@
DIType FromTy = getOrCreateType(TREE_TYPE(type));
// type* and type&
// FIXME: Should BLOCK_POINTER_TYP have its own DW_TAG?
- unsigned Tag = isa<REFERENCE_TYPE>(type) ?
- DW_TAG_reference_type: DW_TAG_pointer_type;
+ unsigned Tag = isa<REFERENCE_TYPE>(type) ? DW_TAG_reference_type :
+ DW_TAG_pointer_type;
unsigned Flags = 0;
// Check if this pointer type has a name.
if (tree TyName = TYPE_NAME(type))
if (isa<TYPE_DECL>(TyName) && !DECL_ORIGINAL_TYPE(TyName)) {
expanded_location TypeNameLoc = GetNodeLocation(TyName);
- DIType Ty =
- DebugFactory.CreateDerivedType(Tag, findRegion(DECL_CONTEXT(TyName)),
- GetNodeName(TyName),
- getOrCreateFile(TypeNameLoc.file),
- TypeNameLoc.line,
- 0 /*size*/,
- 0 /*align*/,
- 0 /*offset */,
- 0 /*flags*/,
- FromTy);
+ DIType Ty = DebugFactory.CreateDerivedType(
+ Tag, findRegion(DECL_CONTEXT(TyName)),
+ GetNodeName(TyName), getOrCreateFile(TypeNameLoc.file),
+ TypeNameLoc.line, 0 /*size*/, 0 /*align*/, 0 /*offset */,
+ 0 /*flags*/, FromTy);
TypeCache[TyName] = WeakVH(Ty);
return Ty;
}
StringRef PName = FromTy.getName();
- DIType PTy =
- DebugFactory.CreateDerivedType(Tag, findRegion(TYPE_CONTEXT(type)),
- Tag == DW_TAG_pointer_type ?
- StringRef() : PName,
- getOrCreateFile(main_input_filename),
- 0 /*line no*/,
- NodeSizeInBits(type),
- NodeAlignInBits(type),
- 0 /*offset */,
- Flags,
- FromTy);
+ DIType PTy = DebugFactory.CreateDerivedType(
+ Tag, findRegion(TYPE_CONTEXT(type)),
+ Tag == DW_TAG_pointer_type ? StringRef() : PName,
+ getOrCreateFile(main_input_filename), 0 /*line no*/,
+ NodeSizeInBits(type), NodeAlignInBits(type), 0 /*offset */,
+ Flags, FromTy);
return PTy;
}
@@ -616,8 +591,8 @@
DIType DebugInfo::createArrayType(tree type) {
// type[n][m]...[p]
- if (isa<ARRAY_TYPE>(type)
- && TYPE_STRING_FLAG(type) && isa<INTEGER_TYPE>(TREE_TYPE(type))) {
+ if (isa<ARRAY_TYPE>(type) && TYPE_STRING_FLAG(type) &&
+ isa<INTEGER_TYPE>(TREE_TYPE(type))) {
DEBUGASSERT(0 && "Don't support pascal strings");
return DIType();
}
@@ -654,17 +629,13 @@
Subscripts.push_back(DebugFactory.GetOrCreateSubrange(0, Length));
}
- llvm::DIArray SubscriptArray =
- DebugFactory.GetOrCreateArray(Subscripts.data(), Subscripts.size());
+ llvm::DIArray SubscriptArray = DebugFactory.GetOrCreateArray(
+ Subscripts.data(), Subscripts.size());
expanded_location Loc = GetNodeLocation(type);
- return DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_array_type,
- findRegion(TYPE_CONTEXT(type)),
- StringRef(),
- getOrCreateFile(Loc.file), 0,
- NodeSizeInBits(type),
- NodeAlignInBits(type), 0, 0,
- getOrCreateType(EltTy),
- SubscriptArray);
+ return DebugFactory.CreateCompositeType(
+ llvm::dwarf::DW_TAG_array_type, findRegion(TYPE_CONTEXT(type)),
+ StringRef(), getOrCreateFile(Loc.file), 0, NodeSizeInBits(type),
+ NodeAlignInBits(type), 0, 0, getOrCreateType(EltTy), SubscriptArray);
}
/// createEnumType - Create EnumType.
@@ -683,22 +654,19 @@
}
}
- llvm::DIArray EltArray =
- DebugFactory.GetOrCreateArray(Elements.data(), Elements.size());
+ llvm::DIArray EltArray = DebugFactory.GetOrCreateArray(Elements.data(),
+ Elements.size());
expanded_location Loc = { NULL, 0, 0, false };
if (TYPE_SIZE(type))
// Incomplete enums do not have any location info.
Loc = GetNodeLocation(TREE_CHAIN(type), false);
- return DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_enumeration_type,
- findRegion(TYPE_CONTEXT(type)),
- GetNodeName(type),
- getOrCreateFile(Loc.file),
- Loc.line,
- NodeSizeInBits(type),
- NodeAlignInBits(type), 0, 0,
- llvm::DIType(), EltArray);
+ return DebugFactory.CreateCompositeType(
+ llvm::dwarf::DW_TAG_enumeration_type, findRegion(TYPE_CONTEXT(type)),
+ GetNodeName(type), getOrCreateFile(Loc.file), Loc.line,
+ NodeSizeInBits(type), NodeAlignInBits(type), 0, 0, llvm::DIType(),
+ EltArray);
}
/// createStructType - Create StructType for struct or union or class.
@@ -706,27 +674,27 @@
// struct { a; b; ... z; }; | union { a; b; ... z; };
unsigned Tag = isa<RECORD_TYPE>(type) ? DW_TAG_structure_type :
- DW_TAG_union_type;
+ DW_TAG_union_type;
unsigned RunTimeLang = 0;
-//TODO if (TYPE_LANG_SPECIFIC (type)
-//TODO && lang_hooks.types.is_runtime_specific_type (type))
-//TODO {
-//TODO unsigned CULang = TheCU.getLanguage();
-//TODO switch (CULang) {
-//TODO case DW_LANG_ObjC_plus_plus :
-//TODO RunTimeLang = DW_LANG_ObjC_plus_plus;
-//TODO break;
-//TODO case DW_LANG_ObjC :
-//TODO RunTimeLang = DW_LANG_ObjC;
-//TODO break;
-//TODO case DW_LANG_C_plus_plus :
-//TODO RunTimeLang = DW_LANG_C_plus_plus;
-//TODO break;
-//TODO default:
-//TODO break;
-//TODO }
-//TODO }
+ //TODO if (TYPE_LANG_SPECIFIC (type)
+ //TODO && lang_hooks.types.is_runtime_specific_type (type))
+ //TODO {
+ //TODO unsigned CULang = TheCU.getLanguage();
+ //TODO switch (CULang) {
+ //TODO case DW_LANG_ObjC_plus_plus :
+ //TODO RunTimeLang = DW_LANG_ObjC_plus_plus;
+ //TODO break;
+ //TODO case DW_LANG_ObjC :
+ //TODO RunTimeLang = DW_LANG_ObjC;
+ //TODO break;
+ //TODO case DW_LANG_C_plus_plus :
+ //TODO RunTimeLang = DW_LANG_C_plus_plus;
+ //TODO break;
+ //TODO default:
+ //TODO break;
+ //TODO }
+ //TODO }
// Records and classes and unions can all be recursive. To handle them,
// we first generate a debug descriptor for the struct as a forward
@@ -737,12 +705,12 @@
// final definition.
expanded_location Loc = GetNodeLocation(TREE_CHAIN(type), false);
unsigned SFlags = 0;
- DIDescriptor TyContext = findRegion(TYPE_CONTEXT(type));
+ DIDescriptor TyContext = findRegion(TYPE_CONTEXT(type));
// Check if this type is created while creating context information
// descriptor.
{
- std::map<tree_node *, WeakVH >::iterator I = TypeCache.find(type);
+ std::map<tree_node *, WeakVH>::iterator I = TypeCache.find(type);
if (I != TypeCache.end())
if (MDNode *TN = dyn_cast_or_null<MDNode>(&*I->second))
return DIType(TN);
@@ -751,15 +719,10 @@
// forward declaration,
if (TYPE_SIZE(type) == 0) {
llvm::DICompositeType FwdDecl =
- DebugFactory.CreateCompositeType(Tag,
- TyContext,
- GetNodeName(type),
- getOrCreateFile(Loc.file),
- Loc.line,
- 0, 0, 0,
- SFlags | llvm::DIType::FlagFwdDecl,
- llvm::DIType(), llvm::DIArray(),
- RunTimeLang);
+ DebugFactory.CreateCompositeType(
+ Tag, TyContext, GetNodeName(type), getOrCreateFile(Loc.file),
+ Loc.line, 0, 0, 0, SFlags | llvm::DIType::FlagFwdDecl,
+ llvm::DIType(), llvm::DIArray(), RunTimeLang);
return FwdDecl;
}
@@ -778,17 +741,17 @@
llvm::SmallVector<llvm::DIDescriptor, 16> EltTys;
if (tree binfo = TYPE_BINFO(type)) {
- VEC(tree,gc) *accesses = BINFO_BASE_ACCESSES (binfo);
+ VEC(tree, gc) * accesses = BINFO_BASE_ACCESSES(binfo);
for (unsigned i = 0, e = BINFO_N_BASE_BINFOS(binfo); i != e; ++i) {
tree BInfo = BINFO_BASE_BINFO(binfo, i);
- tree BInfoType = BINFO_TYPE (BInfo);
+ tree BInfoType = BINFO_TYPE(BInfo);
DIType BaseClass = getOrCreateType(BInfoType);
unsigned BFlags = 0;
- if (BINFO_VIRTUAL_P (BInfo))
+ if (BINFO_VIRTUAL_P(BInfo))
BFlags = llvm::DIType::FlagVirtual;
if (accesses) {
- tree access = VEC_index (tree, accesses, i);
+ tree access = VEC_index(tree, accesses, i);
if (access == access_protected_node)
BFlags |= llvm::DIType::FlagProtected;
else if (access == access_private_node)
@@ -798,26 +761,24 @@
// Check for zero BINFO_OFFSET.
// FIXME : Is this correct ?
unsigned Offset = BINFO_OFFSET(BInfo) ?
- getInt64(BINFO_OFFSET(BInfo), true)*8 : 0;
+ getInt64(BINFO_OFFSET(BInfo), true) * 8 : 0;
- if (BINFO_VIRTUAL_P (BInfo))
- Offset = 0 - getInt64(BINFO_VPTR_FIELD (BInfo), false);
+ if (BINFO_VIRTUAL_P(BInfo))
+ Offset = 0 - getInt64(BINFO_VPTR_FIELD(BInfo), false);
// FIXME : name, size, align etc...
- DIType DTy =
- DebugFactory.CreateDerivedType(DW_TAG_inheritance,
- findRegion(TYPE_CONTEXT(type)), StringRef(),
- llvm::DIFile(), 0,0,0,
- Offset,
- BFlags, BaseClass);
+ DIType DTy = DebugFactory.CreateDerivedType(
+ DW_TAG_inheritance, findRegion(TYPE_CONTEXT(type)),
+ StringRef(), llvm::DIFile(), 0, 0, 0, Offset, BFlags,
+ BaseClass);
EltTys.push_back(DTy);
}
}
// Now add members of this class.
- for (tree Member = TYPE_FIELDS(type); Member;
- Member = TREE_CHAIN(Member)) {
+ for (tree Member = TYPE_FIELDS(type); Member; Member = TREE_CHAIN(Member)) {
// Should we skip.
- if (DECL_P(Member) && DECL_IGNORED_P(Member)) continue;
+ if (DECL_P(Member) && DECL_IGNORED_P(Member))
+ continue;
// Get the location of the member.
expanded_location MemLoc = GetNodeLocation(Member, false);
@@ -833,7 +794,7 @@
continue;
/* Ignore nameless fields. */
- if (DECL_NAME (Member) == NULL_TREE &&
+ if (DECL_NAME(Member) == NULL_TREE &&
!isa<RECORD_OR_UNION_TYPE>(TREE_TYPE(Member)))
continue;
@@ -847,28 +808,26 @@
else if (TREE_PRIVATE(Member))
MFlags = llvm::DIType::FlagPrivate;
- DIType DTy =
- DebugFactory.CreateDerivedType(DW_TAG_member,
- findRegion(DECL_CONTEXT(Member)),
- MemberName,
- getOrCreateFile(MemLoc.file),
- MemLoc.line, NodeSizeInBits(Member),
- NodeAlignInBits(FieldNodeType),
- int_bit_position(Member),
- MFlags, MemberType);
+ DIType DTy = DebugFactory.CreateDerivedType(
+ DW_TAG_member, findRegion(DECL_CONTEXT(Member)),
+ MemberName, getOrCreateFile(MemLoc.file), MemLoc.line,
+ NodeSizeInBits(Member), NodeAlignInBits(FieldNodeType),
+ int_bit_position(Member), MFlags, MemberType);
EltTys.push_back(DTy);
}
- for (tree Member = TYPE_METHODS(type); Member;
- Member = TREE_CHAIN(Member)) {
+ for (tree Member = TYPE_METHODS(type); Member; Member = TREE_CHAIN(Member)) {
- if (DECL_ABSTRACT_ORIGIN (Member)) continue;
+ if (DECL_ABSTRACT_ORIGIN(Member))
+ continue;
// Ignore unused aritificial members.
- if (DECL_ARTIFICIAL (Member) && !TREE_USED (Member)) continue;
+ if (DECL_ARTIFICIAL(Member) && !TREE_USED(Member))
+ continue;
// In C++, TEMPLATE_DECLs are marked Ignored, and should be.
- if (DECL_P (Member) && DECL_IGNORED_P (Member)) continue;
+ if (DECL_P(Member) && DECL_IGNORED_P(Member))
+ continue;
- std::map<tree_node *, WeakVH >::iterator I = SPCache.find(Member);
+ std::map<tree_node *, WeakVH>::iterator I = SPCache.find(Member);
if (I != SPCache.end())
EltTys.push_back(DISubprogram(cast<MDNode>(I->second)));
else {
@@ -880,27 +839,25 @@
unsigned Virtuality = 0;
unsigned VIndex = 0;
DIType ContainingType;
- if (DECL_VINDEX (Member)) {
- if (host_integerp (DECL_VINDEX (Member), 0))
- VIndex = tree_low_cst (DECL_VINDEX (Member), 0);
+ if (DECL_VINDEX(Member)) {
+ if (host_integerp(DECL_VINDEX(Member), 0))
+ VIndex = tree_low_cst(DECL_VINDEX(Member), 0);
Virtuality = dwarf::DW_VIRTUALITY_virtual;
ContainingType = getOrCreateType(DECL_CONTEXT(Member));
}
- DISubprogram SP =
- DebugFactory.CreateSubprogram(findRegion(DECL_CONTEXT(Member)),
- MemberName, MemberName,
- LinkageName,
- getOrCreateFile(MemLoc.file),
- MemLoc.line, SPTy, false, false,
- Virtuality, VIndex, ContainingType,
- DECL_ARTIFICIAL (Member), optimize);
+ DISubprogram SP = DebugFactory.CreateSubprogram(
+ findRegion(DECL_CONTEXT(Member)), MemberName,
+ MemberName, LinkageName,
+ getOrCreateFile(MemLoc.file), MemLoc.line, SPTy,
+ false, false, Virtuality, VIndex, ContainingType,
+ DECL_ARTIFICIAL(Member), optimize);
EltTys.push_back(SP);
SPCache[Member] = WeakVH(SP);
}
}
- llvm::DIArray Elements =
- DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+ llvm::DIArray Elements = DebugFactory.GetOrCreateArray(EltTys.data(),
+ EltTys.size());
RegionStack.pop_back();
std::map<tree_node *, WeakVH>::iterator RI = RegionMap.find(type);
@@ -908,18 +865,16 @@
RegionMap.erase(RI);
llvm::DIType ContainingType;
- if (TYPE_VFIELD (type)) {
- tree vtype = DECL_FCONTEXT (TYPE_VFIELD (type));
+ if (TYPE_VFIELD(type)) {
+ tree vtype = DECL_FCONTEXT(TYPE_VFIELD(type));
ContainingType = getOrCreateType(vtype);
}
llvm::DICompositeType RealDecl =
- DebugFactory.CreateCompositeType(Tag, findRegion(TYPE_CONTEXT(type)),
- GetNodeName(type),
- getOrCreateFile(Loc.file),
- Loc.line,
- NodeSizeInBits(type), NodeAlignInBits(type),
- 0, SFlags, llvm::DIType(), Elements,
- RunTimeLang, ContainingType);
+ DebugFactory.CreateCompositeType(
+ Tag, findRegion(TYPE_CONTEXT(type)), GetNodeName(type),
+ getOrCreateFile(Loc.file), Loc.line, NodeSizeInBits(type),
+ NodeAlignInBits(type), 0, SFlags, llvm::DIType(), Elements,
+ RunTimeLang, ContainingType);
RegionMap[type] = WeakVH(RealDecl);
// Now that we have a real decl for the struct, replace anything using the
@@ -934,52 +889,37 @@
DIType Ty;
if (tree TyDef = TYPE_NAME(type)) {
- std::map<tree_node *, WeakVH >::iterator I = TypeCache.find(TyDef);
+ std::map<tree_node *, WeakVH>::iterator I = TypeCache.find(TyDef);
if (I != TypeCache.end())
if (I->second)
return DIType(cast<MDNode>(I->second));
- if (isa<TYPE_DECL>(TyDef) && DECL_ORIGINAL_TYPE(TyDef)) {
+ if (isa<TYPE_DECL>(TyDef) && DECL_ORIGINAL_TYPE(TyDef)) {
expanded_location TypeDefLoc = GetNodeLocation(TyDef);
- Ty = DebugFactory.CreateDerivedType(DW_TAG_typedef,
- findRegion(DECL_CONTEXT(TyDef)),
- GetNodeName(TyDef),
- getOrCreateFile(TypeDefLoc.file),
- TypeDefLoc.line,
- 0 /*size*/,
- 0 /*align*/,
- 0 /*offset */,
- 0 /*flags*/,
- MainTy);
+ Ty = DebugFactory.CreateDerivedType(
+ DW_TAG_typedef, findRegion(DECL_CONTEXT(TyDef)),
+ GetNodeName(TyDef), getOrCreateFile(TypeDefLoc.file),
+ TypeDefLoc.line, 0 /*size*/, 0 /*align*/, 0 /*offset */,
+ 0 /*flags*/, MainTy);
TypeCache[TyDef] = WeakVH(Ty);
return Ty;
}
}
if (TYPE_VOLATILE(type)) {
- Ty = DebugFactory.CreateDerivedType(DW_TAG_volatile_type,
- findRegion(TYPE_CONTEXT(type)),
- StringRef(),
- getOrCreateFile(main_input_filename),
- 0 /*line no*/,
- NodeSizeInBits(type),
- NodeAlignInBits(type),
- 0 /*offset */,
- 0 /* flags */,
- MainTy);
+ Ty = DebugFactory.CreateDerivedType(
+ DW_TAG_volatile_type, findRegion(TYPE_CONTEXT(type)), StringRef(),
+ getOrCreateFile(main_input_filename), 0 /*line no*/,
+ NodeSizeInBits(type), NodeAlignInBits(type), 0 /*offset */,
+ 0 /* flags */, MainTy);
MainTy = Ty;
}
if (TYPE_READONLY(type))
- Ty = DebugFactory.CreateDerivedType(DW_TAG_const_type,
- findRegion(TYPE_CONTEXT(type)),
- StringRef(),
- getOrCreateFile(main_input_filename),
- 0 /*line no*/,
- NodeSizeInBits(type),
- NodeAlignInBits(type),
- 0 /*offset */,
- 0 /* flags */,
- MainTy);
+ Ty = DebugFactory.CreateDerivedType(
+ DW_TAG_const_type, findRegion(TYPE_CONTEXT(type)), StringRef(),
+ getOrCreateFile(main_input_filename), 0 /*line no*/,
+ NodeSizeInBits(type), NodeAlignInBits(type), 0 /*offset */,
+ 0 /* flags */, MainTy);
if (TYPE_VOLATILE(type) || TYPE_READONLY(type)) {
TypeCache[type] = WeakVH(Ty);
@@ -993,16 +933,17 @@
/// getOrCreateType - Get the type from the cache or create a new type if
/// necessary.
DIType DebugInfo::getOrCreateType(tree type) {
- DEBUGASSERT(type != NULL_TREE && type != error_mark_node &&
- "Not a type.");
- if (type == NULL_TREE || type == error_mark_node) return DIType();
+ DEBUGASSERT(type != NULL_TREE && type != error_mark_node && "Not a type.");
+ if (type == NULL_TREE || type == error_mark_node)
+ return DIType();
// Should only be void if a pointer/reference/return type. Returning NULL
// allows the caller to produce a non-derived type.
- if (isa<VOID_TYPE>(type)) return DIType();
+ if (isa<VOID_TYPE>(type))
+ return DIType();
// Check to see if the compile unit already has created this type.
- std::map<tree_node *, WeakVH >::iterator I = TypeCache.find(type);
+ std::map<tree_node *, WeakVH>::iterator I = TypeCache.find(type);
if (I != TypeCache.end())
if (I->second)
return DIType(cast<MDNode>(I->second));
@@ -1017,56 +958,56 @@
// Work out details of type.
switch (TREE_CODE(type)) {
- case ERROR_MARK:
- case LANG_TYPE:
- case TRANSLATION_UNIT_DECL:
- default: {
- DEBUGASSERT(0 && "Unsupported type");
- return DIType();
- }
+ case ERROR_MARK:
+ case LANG_TYPE:
+ case TRANSLATION_UNIT_DECL:
+ default : {
+ DEBUGASSERT(0 && "Unsupported type");
+ return DIType();
+ }
#if (GCC_MINOR > 5)
- case NULLPTR_TYPE:
+ case NULLPTR_TYPE:
#endif
- case POINTER_TYPE:
- case REFERENCE_TYPE:
- // Do not cache pointer type. The pointer may point to forward declared
- // struct.
- return createPointerType(type);
-
- case OFFSET_TYPE: {
- // gen_type_die(TYPE_OFFSET_BASETYPE(type), context_die);
- // gen_type_die(TREE_TYPE(type), context_die);
- // gen_ptr_to_mbr_type_die(type, context_die);
- // PR 7104
- break;
- }
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ // Do not cache pointer type. The pointer may point to forward declared
+ // struct.
+ return createPointerType(type);
+
+ case OFFSET_TYPE: {
+ // gen_type_die(TYPE_OFFSET_BASETYPE(type), context_die);
+ // gen_type_die(TREE_TYPE(type), context_die);
+ // gen_ptr_to_mbr_type_die(type, context_die);
+ // PR 7104
+ break;
+ }
- case FUNCTION_TYPE:
- case METHOD_TYPE:
- Ty = createMethodType(type);
- break;
+ case FUNCTION_TYPE:
+ case METHOD_TYPE:
+ Ty = createMethodType(type);
+ break;
- case VECTOR_TYPE:
- case ARRAY_TYPE:
- Ty = createArrayType(type);
- break;
+ case VECTOR_TYPE:
+ case ARRAY_TYPE:
+ Ty = createArrayType(type);
+ break;
- case ENUMERAL_TYPE:
- Ty = createEnumType(type);
- break;
+ case ENUMERAL_TYPE:
+ Ty = createEnumType(type);
+ break;
- case RECORD_TYPE:
- case QUAL_UNION_TYPE:
- case UNION_TYPE:
- return createStructType(type);
-
- case INTEGER_TYPE:
- case REAL_TYPE:
- case COMPLEX_TYPE:
- case BOOLEAN_TYPE:
- Ty = createBasicType(type);
- break;
+ case RECORD_TYPE:
+ case QUAL_UNION_TYPE:
+ case UNION_TYPE:
+ return createStructType(type);
+
+ case INTEGER_TYPE:
+ case REAL_TYPE:
+ case COMPLEX_TYPE:
+ case BOOLEAN_TYPE:
+ Ty = createBasicType(type);
+ break;
}
TypeCache[type] = WeakVH(Ty);
return Ty;
@@ -1085,7 +1026,7 @@
// code generator accepts maximum one main compile unit per module. If a
// module does not contain any main compile unit then the code generator
// will emit multiple compile units in the output object file.
- if (!strcmp (main_input_filename, ""))
+ if (!strcmp(main_input_filename, ""))
TheCU = getOrCreateCompileUnit("<stdin>", true);
else
TheCU = getOrCreateCompileUnit(main_input_filename, true);
@@ -1096,7 +1037,7 @@
DICompileUnit DebugInfo::getOrCreateCompileUnit(const char *FullPath,
bool isMain) {
if (!FullPath) {
- if (!strcmp (main_input_filename, ""))
+ if (!strcmp(main_input_filename, ""))
FullPath = "<stdin>";
else
FullPath = main_input_filename;
@@ -1134,18 +1075,17 @@
// flag_objc_abi represents Objective-C runtime version number. It is zero
// for all other language.
unsigned ObjcRunTimeVer = 0;
-// if (flag_objc_abi != 0 && flag_objc_abi != -1)
-// ObjcRunTimeVer = flag_objc_abi;
+ // if (flag_objc_abi != 0 && flag_objc_abi != -1)
+ // ObjcRunTimeVer = flag_objc_abi;
return DebugFactory.CreateCompileUnit(LangTag, FileName, Directory,
- version_string, isMain,
- optimize, Flags,
+ version_string, isMain, optimize, Flags,
ObjcRunTimeVer);
}
/// getOrCreateFile - Get DIFile descriptor.
DIFile DebugInfo::getOrCreateFile(const char *FullPath) {
if (!FullPath) {
- if (!strcmp (main_input_filename, ""))
+ if (!strcmp(main_input_filename, ""))
FullPath = "<stdin>";
else
FullPath = main_input_filename;
@@ -1163,10 +1103,11 @@
//===----------------------------------------------------------------------===//
DIFactory::DIFactory(Module &m)
- : M(m), VMContext(M.getContext()), DeclareFn(0), ValueFn(0) {}
+ : M(m), VMContext(M.getContext()), DeclareFn(0), ValueFn(0) {
+}
Constant *DIFactory::GetTagConstant(unsigned TAG) {
- assert((TAG & LLVMDebugVersionMask) == 0 &&
+ assert((TAG &LLVMDebugVersionMask) == 0 &&
"Tag too large for debug encoding!");
// llvm has moved forward. DIFactory does not emit debug info in updated form.
// Use LLVMDebugVersion10 directly here.
@@ -1186,18 +1127,16 @@
return DIArray(MDNode::get(VMContext, Null));
}
- SmallVector<Value *, 16> Elts(Tys, Tys+NumTys);
+ SmallVector<Value *, 16> Elts(Tys, Tys + NumTys);
return DIArray(MDNode::get(VMContext, Elts));
}
/// GetOrCreateSubrange - Create a descriptor for a value range. This
/// implicitly uniques the values returned.
DISubrange DIFactory::GetOrCreateSubrange(int64_t Lo, int64_t Hi) {
- Value *Elts[] = {
- GetTagConstant(dwarf::DW_TAG_subrange_type),
- ConstantInt::get(Type::getInt64Ty(VMContext), Lo),
- ConstantInt::get(Type::getInt64Ty(VMContext), Hi)
- };
+ Value *Elts[] = { GetTagConstant(dwarf::DW_TAG_subrange_type),
+ ConstantInt::get(Type::getInt64Ty(VMContext), Lo),
+ ConstantInt::get(Type::getInt64Ty(VMContext), Hi) };
return DISubrange(MDNode::get(VMContext, Elts));
}
@@ -1205,109 +1144,75 @@
/// CreateUnspecifiedParameter - Create unspeicified type descriptor
/// for the subroutine type.
DIDescriptor DIFactory::CreateUnspecifiedParameter() {
- Value *Elts[] = {
- GetTagConstant(dwarf::DW_TAG_unspecified_parameters)
- };
+ Value *Elts[] = { GetTagConstant(dwarf::DW_TAG_unspecified_parameters) };
return DIDescriptor(MDNode::get(VMContext, Elts));
}
/// CreateCompileUnit - Create a new descriptor for the specified compile
/// unit. Note that this does not unique compile units within the module.
-DICompileUnit DIFactory::CreateCompileUnit(unsigned LangID,
- StringRef Filename,
- StringRef Directory,
- StringRef Producer,
- bool isMain,
- bool isOptimized,
- StringRef Flags,
- unsigned RunTimeVer) {
- Value *Elts[] = {
- GetTagConstant(dwarf::DW_TAG_compile_unit),
- llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
- ConstantInt::get(Type::getInt32Ty(VMContext), LangID),
- MDString::get(VMContext, Filename),
- MDString::get(VMContext, Directory),
- MDString::get(VMContext, Producer),
- ConstantInt::get(Type::getInt1Ty(VMContext), isMain),
- ConstantInt::get(Type::getInt1Ty(VMContext), isOptimized),
- MDString::get(VMContext, Flags),
- ConstantInt::get(Type::getInt32Ty(VMContext), RunTimeVer)
- };
+DICompileUnit DIFactory::CreateCompileUnit(
+ unsigned LangID, StringRef Filename, StringRef Directory,
+ StringRef Producer, bool isMain, bool isOptimized, StringRef Flags,
+ unsigned RunTimeVer) {
+ Value *Elts[] = { GetTagConstant(dwarf::DW_TAG_compile_unit),
+ llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ ConstantInt::get(Type::getInt32Ty(VMContext), LangID),
+ MDString::get(VMContext, Filename),
+ MDString::get(VMContext, Directory),
+ MDString::get(VMContext, Producer),
+ ConstantInt::get(Type::getInt1Ty(VMContext), isMain),
+ ConstantInt::get(Type::getInt1Ty(VMContext), isOptimized),
+ MDString::get(VMContext, Flags),
+ ConstantInt::get(Type::getInt32Ty(VMContext), RunTimeVer) };
return DICompileUnit(MDNode::get(VMContext, Elts));
}
/// CreateFile - Create a new descriptor for the specified file.
-DIFile DIFactory::CreateFile(StringRef Filename,
- StringRef Directory,
+DIFile DIFactory::CreateFile(StringRef Filename, StringRef Directory,
DICompileUnit CU) {
- Value *Elts[] = {
- GetTagConstant(dwarf::DW_TAG_file_type),
- MDString::get(VMContext, Filename),
- MDString::get(VMContext, Directory),
- CU
- };
+ Value *Elts[] = { GetTagConstant(dwarf::DW_TAG_file_type),
+ MDString::get(VMContext, Filename),
+ MDString::get(VMContext, Directory), CU };
return DIFile(MDNode::get(VMContext, Elts));
}
/// CreateEnumerator - Create a single enumerator value.
-DIEnumerator DIFactory::CreateEnumerator(StringRef Name, uint64_t Val){
- Value *Elts[] = {
- GetTagConstant(dwarf::DW_TAG_enumerator),
- MDString::get(VMContext, Name),
- ConstantInt::get(Type::getInt64Ty(VMContext), Val)
- };
+DIEnumerator DIFactory::CreateEnumerator(StringRef Name, uint64_t Val) {
+ Value *Elts[] = { GetTagConstant(dwarf::DW_TAG_enumerator),
+ MDString::get(VMContext, Name),
+ ConstantInt::get(Type::getInt64Ty(VMContext), Val) };
return DIEnumerator(MDNode::get(VMContext, Elts));
}
-
/// CreateBasicType - Create a basic type like int, float, etc.
-DIBasicType DIFactory::CreateBasicType(DIDescriptor Context,
- StringRef Name,
- DIFile F,
- unsigned LineNumber,
- uint64_t SizeInBits,
- uint64_t AlignInBits,
- uint64_t OffsetInBits, unsigned Flags,
- unsigned Encoding) {
- Value *Elts[] = {
- GetTagConstant(dwarf::DW_TAG_base_type),
- Context,
- MDString::get(VMContext, Name),
- F,
- ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber),
- ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits),
- ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits),
- ConstantInt::get(Type::getInt64Ty(VMContext), OffsetInBits),
- ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
- ConstantInt::get(Type::getInt32Ty(VMContext), Encoding)
- };
+DIBasicType DIFactory::CreateBasicType(
+ DIDescriptor Context, StringRef Name, DIFile F, unsigned LineNumber,
+ uint64_t SizeInBits, uint64_t AlignInBits, uint64_t OffsetInBits,
+ unsigned Flags, unsigned Encoding) {
+ Value *Elts[] = { GetTagConstant(dwarf::DW_TAG_base_type), Context,
+ MDString::get(VMContext, Name), F,
+ ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber),
+ ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits),
+ ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits),
+ ConstantInt::get(Type::getInt64Ty(VMContext), OffsetInBits),
+ ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
+ ConstantInt::get(Type::getInt32Ty(VMContext), Encoding) };
return DIBasicType(MDNode::get(VMContext, Elts));
}
-
/// CreateBasicType - Create a basic type like int, float, etc.
-DIBasicType DIFactory::CreateBasicTypeEx(DIDescriptor Context,
- StringRef Name,
- DIFile F,
- unsigned LineNumber,
- Constant *SizeInBits,
- Constant *AlignInBits,
- Constant *OffsetInBits, unsigned Flags,
- unsigned Encoding) {
- Value *Elts[] = {
- GetTagConstant(dwarf::DW_TAG_base_type),
- Context,
- MDString::get(VMContext, Name),
- F,
- ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber),
- SizeInBits,
- AlignInBits,
- OffsetInBits,
- ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
- ConstantInt::get(Type::getInt32Ty(VMContext), Encoding)
- };
+DIBasicType DIFactory::CreateBasicTypeEx(
+ DIDescriptor Context, StringRef Name, DIFile F, unsigned LineNumber,
+ Constant *SizeInBits, Constant *AlignInBits, Constant *OffsetInBits,
+ unsigned Flags, unsigned Encoding) {
+ Value *Elts[] = { GetTagConstant(dwarf::DW_TAG_base_type), Context,
+ MDString::get(VMContext, Name), F,
+ ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber),
+ SizeInBits, AlignInBits, OffsetInBits,
+ ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
+ ConstantInt::get(Type::getInt32Ty(VMContext), Encoding) };
return DIBasicType(MDNode::get(VMContext, Elts));
}
@@ -1318,7 +1223,7 @@
SmallVector<Value *, 9> Elts;
MDNode *N = Ty;
- assert (N && "Unexpected input DIType!");
+ assert(N && "Unexpected input DIType!");
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
if (Value *V = N->getOperand(i))
Elts.push_back(V);
@@ -1330,97 +1235,60 @@
CurFlags = CurFlags | DIType::FlagArtificial;
// Flags are stored at this slot.
- Elts[8] = ConstantInt::get(Type::getInt32Ty(VMContext), CurFlags);
+ Elts[8] = ConstantInt::get(Type::getInt32Ty(VMContext), CurFlags);
return DIType(MDNode::get(VMContext, Elts));
}
/// CreateDerivedType - Create a derived type like const qualified type,
/// pointer, typedef, etc.
-DIDerivedType DIFactory::CreateDerivedType(unsigned Tag,
- DIDescriptor Context,
- StringRef Name,
- DIFile F,
- unsigned LineNumber,
- uint64_t SizeInBits,
- uint64_t AlignInBits,
- uint64_t OffsetInBits,
- unsigned Flags,
- DIType DerivedFrom) {
- Value *Elts[] = {
- GetTagConstant(Tag),
- Context,
- MDString::get(VMContext, Name),
- F,
- ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber),
- ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits),
- ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits),
- ConstantInt::get(Type::getInt64Ty(VMContext), OffsetInBits),
- ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
- DerivedFrom,
- };
+DIDerivedType DIFactory::CreateDerivedType(
+ unsigned Tag, DIDescriptor Context, StringRef Name, DIFile F,
+ unsigned LineNumber, uint64_t SizeInBits, uint64_t AlignInBits,
+ uint64_t OffsetInBits, unsigned Flags, DIType DerivedFrom) {
+ Value *Elts[] = { GetTagConstant(Tag), Context,
+ MDString::get(VMContext, Name), F,
+ ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber),
+ ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits),
+ ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits),
+ ConstantInt::get(Type::getInt64Ty(VMContext), OffsetInBits),
+ ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
+ DerivedFrom, };
return DIDerivedType(MDNode::get(VMContext, Elts));
}
-
/// CreateDerivedType - Create a derived type like const qualified type,
/// pointer, typedef, etc.
-DIDerivedType DIFactory::CreateDerivedTypeEx(unsigned Tag,
- DIDescriptor Context,
- StringRef Name,
- DIFile F,
- unsigned LineNumber,
- Constant *SizeInBits,
- Constant *AlignInBits,
- Constant *OffsetInBits,
- unsigned Flags,
- DIType DerivedFrom) {
- Value *Elts[] = {
- GetTagConstant(Tag),
- Context,
- MDString::get(VMContext, Name),
- F,
- ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber),
- SizeInBits,
- AlignInBits,
- OffsetInBits,
- ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
- DerivedFrom,
- };
+DIDerivedType DIFactory::CreateDerivedTypeEx(
+ unsigned Tag, DIDescriptor Context, StringRef Name, DIFile F,
+ unsigned LineNumber, Constant *SizeInBits, Constant *AlignInBits,
+ Constant *OffsetInBits, unsigned Flags, DIType DerivedFrom) {
+ Value *Elts[] = { GetTagConstant(Tag), Context,
+ MDString::get(VMContext, Name), F,
+ ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber),
+ SizeInBits, AlignInBits, OffsetInBits,
+ ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
+ DerivedFrom, };
return DIDerivedType(MDNode::get(VMContext, Elts));
}
-
/// CreateCompositeType - Create a composite type like array, struct, etc.
-DICompositeType DIFactory::CreateCompositeType(unsigned Tag,
- DIDescriptor Context,
- StringRef Name,
- DIFile F,
- unsigned LineNumber,
- uint64_t SizeInBits,
- uint64_t AlignInBits,
- uint64_t OffsetInBits,
- unsigned Flags,
- DIType DerivedFrom,
- DIArray Elements,
- unsigned RuntimeLang,
- MDNode *ContainingType) {
-
- Value *Elts[] = {
- GetTagConstant(Tag),
- Context,
- MDString::get(VMContext, Name),
- F,
- ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber),
- ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits),
- ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits),
- ConstantInt::get(Type::getInt64Ty(VMContext), OffsetInBits),
- ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
- DerivedFrom,
- Elements,
- ConstantInt::get(Type::getInt32Ty(VMContext), RuntimeLang),
- ContainingType
- };
+DICompositeType DIFactory::CreateCompositeType(
+ unsigned Tag, DIDescriptor Context, StringRef Name, DIFile F,
+ unsigned LineNumber, uint64_t SizeInBits, uint64_t AlignInBits,
+ uint64_t OffsetInBits, unsigned Flags, DIType DerivedFrom, DIArray Elements,
+ unsigned RuntimeLang, MDNode *ContainingType) {
+
+ Value *Elts[] = { GetTagConstant(Tag), Context,
+ MDString::get(VMContext, Name), F,
+ ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber),
+ ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits),
+ ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits),
+ ConstantInt::get(Type::getInt64Ty(VMContext), OffsetInBits),
+ ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
+ DerivedFrom, Elements,
+ ConstantInt::get(Type::getInt32Ty(VMContext), RuntimeLang),
+ ContainingType };
MDNode *Node = MDNode::get(VMContext, Elts);
// Create a named metadata so that we do not lose this enum info.
@@ -1435,9 +1303,7 @@
DIType DIFactory::CreateTemporaryType() {
// Give the temporary MDNode a tag. It doesn't matter what tag we
// use here as long as DIType accepts it.
- Value *Elts[] = {
- GetTagConstant(DW_TAG_base_type)
- };
+ Value *Elts[] = { GetTagConstant(DW_TAG_base_type) };
MDNode *Node = MDNode::getTemporary(VMContext, Elts);
return DIType(Node);
}
@@ -1446,45 +1312,26 @@
DIType DIFactory::CreateTemporaryType(DIFile F) {
// Give the temporary MDNode a tag. It doesn't matter what tag we
// use here as long as DIType accepts it.
- Value *Elts[] = {
- GetTagConstant(DW_TAG_base_type),
- F.getCompileUnit(),
- NULL,
- F
- };
+ Value *Elts[] = { GetTagConstant(DW_TAG_base_type), F.getCompileUnit(), NULL,
+ F };
MDNode *Node = MDNode::getTemporary(VMContext, Elts);
return DIType(Node);
}
/// CreateCompositeType - Create a composite type like array, struct, etc.
-DICompositeType DIFactory::CreateCompositeTypeEx(unsigned Tag,
- DIDescriptor Context,
- StringRef Name,
- DIFile F,
- unsigned LineNumber,
- Constant *SizeInBits,
- Constant *AlignInBits,
- Constant *OffsetInBits,
- unsigned Flags,
- DIType DerivedFrom,
- DIArray Elements,
- unsigned RuntimeLang,
- MDNode *ContainingType) {
- Value *Elts[] = {
- GetTagConstant(Tag),
- Context,
- MDString::get(VMContext, Name),
- F,
- ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber),
- SizeInBits,
- AlignInBits,
- OffsetInBits,
- ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
- DerivedFrom,
- Elements,
- ConstantInt::get(Type::getInt32Ty(VMContext), RuntimeLang),
- ContainingType
- };
+DICompositeType DIFactory::CreateCompositeTypeEx(
+ unsigned Tag, DIDescriptor Context, StringRef Name, DIFile F,
+ unsigned LineNumber, Constant *SizeInBits, Constant *AlignInBits,
+ Constant *OffsetInBits, unsigned Flags, DIType DerivedFrom,
+ DIArray Elements, unsigned RuntimeLang, MDNode *ContainingType) {
+ Value *Elts[] = { GetTagConstant(Tag), Context,
+ MDString::get(VMContext, Name), F,
+ ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber),
+ SizeInBits, AlignInBits, OffsetInBits,
+ ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
+ DerivedFrom, Elements,
+ ConstantInt::get(Type::getInt32Ty(VMContext), RuntimeLang),
+ ContainingType };
MDNode *Node = MDNode::get(VMContext, Elts);
// Create a named metadata so that we do not lose this enum info.
if (Tag == dwarf::DW_TAG_enumeration_type) {
@@ -1494,42 +1341,27 @@
return DICompositeType(Node);
}
-
/// CreateSubprogram - Create a new descriptor for the specified subprogram.
/// See comments in DISubprogram for descriptions of these fields. This
/// method does not unique the generated descriptors.
-DISubprogram DIFactory::CreateSubprogram(DIDescriptor Context,
- StringRef Name,
- StringRef DisplayName,
- StringRef LinkageName,
- DIFile F,
- unsigned LineNo, DIType Ty,
- bool isLocalToUnit,
- bool isDefinition,
- unsigned VK, unsigned VIndex,
- DIType ContainingType,
- unsigned Flags,
- bool isOptimized,
- Function *Fn) {
+DISubprogram DIFactory::CreateSubprogram(
+ DIDescriptor Context, StringRef Name, StringRef DisplayName,
+ StringRef LinkageName, DIFile F, unsigned LineNo, DIType Ty,
+ bool isLocalToUnit, bool isDefinition, unsigned VK, unsigned VIndex,
+ DIType ContainingType, unsigned Flags, bool isOptimized, Function *Fn) {
Value *Elts[] = {
GetTagConstant(dwarf::DW_TAG_subprogram),
- llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
- Context,
- MDString::get(VMContext, Name),
- MDString::get(VMContext, DisplayName),
- MDString::get(VMContext, LinkageName),
- F,
- ConstantInt::get(Type::getInt32Ty(VMContext), LineNo),
- Ty,
+ llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)), Context,
+ MDString::get(VMContext, Name), MDString::get(VMContext, DisplayName),
+ MDString::get(VMContext, LinkageName), F,
+ ConstantInt::get(Type::getInt32Ty(VMContext), LineNo), Ty,
ConstantInt::get(Type::getInt1Ty(VMContext), isLocalToUnit),
ConstantInt::get(Type::getInt1Ty(VMContext), isDefinition),
- ConstantInt::get(Type::getInt32Ty(VMContext), (unsigned)VK),
- ConstantInt::get(Type::getInt32Ty(VMContext), VIndex),
- ContainingType,
+ ConstantInt::get(Type::getInt32Ty(VMContext), (unsigned) VK),
+ ConstantInt::get(Type::getInt32Ty(VMContext), VIndex), ContainingType,
ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
- ConstantInt::get(Type::getInt1Ty(VMContext), isOptimized),
- Fn
+ ConstantInt::get(Type::getInt1Ty(VMContext), isOptimized), Fn
};
MDNode *Node = MDNode::get(VMContext, Elts);
@@ -1541,31 +1373,30 @@
/// CreateSubprogramDefinition - Create new subprogram descriptor for the
/// given declaration.
-DISubprogram DIFactory::CreateSubprogramDefinition(DISubprogram &SPDeclaration){
+DISubprogram DIFactory::CreateSubprogramDefinition(
+ DISubprogram &SPDeclaration) {
if (SPDeclaration.isDefinition())
return DISubprogram(SPDeclaration);
MDNode *DeclNode = SPDeclaration;
- Value *Elts[] = {
- GetTagConstant(dwarf::DW_TAG_subprogram),
- llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
- DeclNode->getOperand(2), // Context
- DeclNode->getOperand(3), // Name
- DeclNode->getOperand(4), // DisplayName
- DeclNode->getOperand(5), // LinkageName
- DeclNode->getOperand(6), // CompileUnit
- DeclNode->getOperand(7), // LineNo
- DeclNode->getOperand(8), // Type
- DeclNode->getOperand(9), // isLocalToUnit
- ConstantInt::get(Type::getInt1Ty(VMContext), true),
- DeclNode->getOperand(11), // Virtuality
- DeclNode->getOperand(12), // VIndex
- DeclNode->getOperand(13), // Containting Type
- DeclNode->getOperand(14), // Flags
- DeclNode->getOperand(15), // isOptimized
- SPDeclaration.getFunction()
- };
- MDNode *Node =MDNode::get(VMContext, Elts);
+ Value *Elts[] = { GetTagConstant(dwarf::DW_TAG_subprogram),
+ llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ DeclNode->getOperand(2), // Context
+ DeclNode->getOperand(3), // Name
+ DeclNode->getOperand(4), // DisplayName
+ DeclNode->getOperand(5), // LinkageName
+ DeclNode->getOperand(6), // CompileUnit
+ DeclNode->getOperand(7), // LineNo
+ DeclNode->getOperand(8), // Type
+ DeclNode->getOperand(9), // isLocalToUnit
+ ConstantInt::get(Type::getInt1Ty(VMContext), true),
+ DeclNode->getOperand(11), // Virtuality
+ DeclNode->getOperand(12), // VIndex
+ DeclNode->getOperand(13), // Containting Type
+ DeclNode->getOperand(14), // Flags
+ DeclNode->getOperand(15), // isOptimized
+ SPDeclaration.getFunction() };
+ MDNode *Node = MDNode::get(VMContext, Elts);
// Create a named metadata so that we do not lose this mdnode.
NamedMDNode *NMD = M.getOrInsertNamedMetadata("llvm.dbg.sp");
@@ -1574,27 +1405,19 @@
}
/// CreateGlobalVariable - Create a new descriptor for the specified global.
-DIGlobalVariable
-DIFactory::CreateGlobalVariable(DIDescriptor Context, StringRef Name,
- StringRef DisplayName,
- StringRef LinkageName,
- DIFile F,
- unsigned LineNo, DIType Ty,bool isLocalToUnit,
- bool isDefinition, llvm::GlobalVariable *Val) {
- Value *Elts[] = {
- GetTagConstant(dwarf::DW_TAG_variable),
- llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
- Context,
- MDString::get(VMContext, Name),
- MDString::get(VMContext, DisplayName),
- MDString::get(VMContext, LinkageName),
- F,
- ConstantInt::get(Type::getInt32Ty(VMContext), LineNo),
- Ty,
- ConstantInt::get(Type::getInt1Ty(VMContext), isLocalToUnit),
- ConstantInt::get(Type::getInt1Ty(VMContext), isDefinition),
- Val
- };
+DIGlobalVariable DIFactory::CreateGlobalVariable(
+ DIDescriptor Context, StringRef Name, StringRef DisplayName,
+ StringRef LinkageName, DIFile F, unsigned LineNo, DIType Ty,
+ bool isLocalToUnit, bool isDefinition, llvm::GlobalVariable *Val) {
+ Value *Elts[] = { GetTagConstant(dwarf::DW_TAG_variable),
+ llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ Context, MDString::get(VMContext, Name),
+ MDString::get(VMContext, DisplayName),
+ MDString::get(VMContext, LinkageName), F,
+ ConstantInt::get(Type::getInt32Ty(VMContext), LineNo), Ty,
+ ConstantInt::get(Type::getInt1Ty(VMContext), isLocalToUnit),
+ ConstantInt::get(Type::getInt1Ty(VMContext), isDefinition),
+ Val };
MDNode *Node = MDNode::get(VMContext, Elts);
@@ -1606,27 +1429,19 @@
}
/// CreateGlobalVariable - Create a new descriptor for the specified constant.
-DIGlobalVariable
-DIFactory::CreateGlobalVariable(DIDescriptor Context, StringRef Name,
- StringRef DisplayName,
- StringRef LinkageName,
- DIFile F,
- unsigned LineNo, DIType Ty,bool isLocalToUnit,
- bool isDefinition, llvm::Constant *Val) {
- Value *Elts[] = {
- GetTagConstant(dwarf::DW_TAG_variable),
- llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
- Context,
- MDString::get(VMContext, Name),
- MDString::get(VMContext, DisplayName),
- MDString::get(VMContext, LinkageName),
- F,
- ConstantInt::get(Type::getInt32Ty(VMContext), LineNo),
- Ty,
- ConstantInt::get(Type::getInt1Ty(VMContext), isLocalToUnit),
- ConstantInt::get(Type::getInt1Ty(VMContext), isDefinition),
- Val
- };
+DIGlobalVariable DIFactory::CreateGlobalVariable(
+ DIDescriptor Context, StringRef Name, StringRef DisplayName,
+ StringRef LinkageName, DIFile F, unsigned LineNo, DIType Ty,
+ bool isLocalToUnit, bool isDefinition, llvm::Constant *Val) {
+ Value *Elts[] = { GetTagConstant(dwarf::DW_TAG_variable),
+ llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ Context, MDString::get(VMContext, Name),
+ MDString::get(VMContext, DisplayName),
+ MDString::get(VMContext, LinkageName), F,
+ ConstantInt::get(Type::getInt32Ty(VMContext), LineNo), Ty,
+ ConstantInt::get(Type::getInt1Ty(VMContext), isLocalToUnit),
+ ConstantInt::get(Type::getInt1Ty(VMContext), isDefinition),
+ Val };
MDNode *Node = MDNode::get(VMContext, Elts);
@@ -1638,22 +1453,14 @@
}
/// CreateVariable - Create a new descriptor for the specified variable.
-DIVariable DIFactory::CreateVariable(unsigned Tag, DIDescriptor Context,
- StringRef Name,
- DIFile F,
- unsigned LineNo,
- DIType Ty, bool AlwaysPreserve,
- unsigned Flags) {
- Value *Elts[] = {
- GetTagConstant(Tag),
- Context,
- MDString::get(VMContext, Name),
- F,
- ConstantInt::get(Type::getInt32Ty(VMContext), LineNo),
- Ty,
- ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
- Constant::getNullValue(Type::getInt32Ty(VMContext))
- };
+DIVariable DIFactory::CreateVariable(
+ unsigned Tag, DIDescriptor Context, StringRef Name, DIFile F,
+ unsigned LineNo, DIType Ty, bool AlwaysPreserve, unsigned Flags) {
+ Value *Elts[] = { GetTagConstant(Tag), Context,
+ MDString::get(VMContext, Name), F,
+ ConstantInt::get(Type::getInt32Ty(VMContext), LineNo), Ty,
+ ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
+ Constant::getNullValue(Type::getInt32Ty(VMContext)) };
MDNode *Node = MDNode::get(VMContext, Elts);
if (AlwaysPreserve) {
// The optimizer may remove local variable. If there is an interest
@@ -1667,21 +1474,17 @@
if (FName.startswith(StringRef(&One, 1)))
FName = FName.substr(1);
-
NamedMDNode *FnLocals = getOrInsertFnSpecificMDNode(M, Fn); /* FName);*/
FnLocals->addOperand(Node);
}
return DIVariable(Node);
}
-
/// CreateComplexVariable - Create a new descriptor for the specified variable
/// which has a complex address expression for its address.
-DIVariable DIFactory::CreateComplexVariable(unsigned Tag, DIDescriptor Context,
- StringRef Name, DIFile F,
- unsigned LineNo,
- DIType Ty, Value *const *Addr,
- unsigned NumAddr) {
+DIVariable DIFactory::CreateComplexVariable(
+ unsigned Tag, DIDescriptor Context, StringRef Name, DIFile F,
+ unsigned LineNo, DIType Ty, Value *const *Addr, unsigned NumAddr) {
SmallVector<Value *, 15> Elts;
Elts.push_back(GetTagConstant(Tag));
Elts.push_back(Context);
@@ -1691,54 +1494,41 @@
Elts.push_back(Ty);
Elts.push_back(llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)));
Elts.push_back(llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)));
- Elts.append(Addr, Addr+NumAddr);
+ Elts.append(Addr, Addr + NumAddr);
return DIVariable(MDNode::get(VMContext, Elts));
}
-
/// CreateBlock - This creates a descriptor for a lexical block with the
/// specified parent VMContext.
-DILexicalBlock DIFactory::CreateLexicalBlock(DIDescriptor Context,
- DIFile F, unsigned LineNo,
- unsigned Col) {
+DILexicalBlock DIFactory::CreateLexicalBlock(DIDescriptor Context, DIFile F,
+ unsigned LineNo, unsigned Col) {
// Defeat MDNode uniqing for lexical blocks.
static unsigned int unique_id = 0;
- Value *Elts[] = {
- GetTagConstant(dwarf::DW_TAG_lexical_block),
- Context,
- ConstantInt::get(Type::getInt32Ty(VMContext), LineNo),
- ConstantInt::get(Type::getInt32Ty(VMContext), Col),
- F,
- ConstantInt::get(Type::getInt32Ty(VMContext), unique_id++)
- };
+ Value *Elts[] = { GetTagConstant(dwarf::DW_TAG_lexical_block), Context,
+ ConstantInt::get(Type::getInt32Ty(VMContext), LineNo),
+ ConstantInt::get(Type::getInt32Ty(VMContext), Col), F,
+ ConstantInt::get(Type::getInt32Ty(VMContext),
+ unique_id++) };
return DILexicalBlock(MDNode::get(VMContext, Elts));
}
/// CreateNameSpace - This creates new descriptor for a namespace
/// with the specified parent context.
DINameSpace DIFactory::CreateNameSpace(DIDescriptor Context, StringRef Name,
- DIFile F,
- unsigned LineNo) {
- Value *Elts[] = {
- GetTagConstant(dwarf::DW_TAG_namespace),
- Context,
- MDString::get(VMContext, Name),
- F,
- ConstantInt::get(Type::getInt32Ty(VMContext), LineNo)
- };
+ DIFile F, unsigned LineNo) {
+ Value *Elts[] = { GetTagConstant(dwarf::DW_TAG_namespace), Context,
+ MDString::get(VMContext, Name), F,
+ ConstantInt::get(Type::getInt32Ty(VMContext), LineNo) };
return DINameSpace(MDNode::get(VMContext, Elts));
}
/// CreateLocation - Creates a debug info location.
DILocation DIFactory::CreateLocation(unsigned LineNo, unsigned ColumnNo,
DIScope S, DILocation OrigLoc) {
- Value *Elts[] = {
- ConstantInt::get(Type::getInt32Ty(VMContext), LineNo),
- ConstantInt::get(Type::getInt32Ty(VMContext), ColumnNo),
- S,
- OrigLoc,
- };
+ Value *Elts[] = { ConstantInt::get(Type::getInt32Ty(VMContext), LineNo),
+ ConstantInt::get(Type::getInt32Ty(VMContext), ColumnNo), S,
+ OrigLoc, };
return DILocation(MDNode::get(VMContext, Elts));
}
@@ -1754,8 +1544,7 @@
if (!DeclareFn)
DeclareFn = Intrinsic::getDeclaration(&M, Intrinsic::dbg_declare);
- Value *Args[] = { MDNode::get(Storage->getContext(), Storage),
- D };
+ Value *Args[] = { MDNode::get(Storage->getContext(), Storage), D };
return CallInst::Create(DeclareFn, Args, "", InsertBefore);
}
@@ -1767,20 +1556,19 @@
if (!DeclareFn)
DeclareFn = Intrinsic::getDeclaration(&M, Intrinsic::dbg_declare);
- Value *Args[] = { MDNode::get(Storage->getContext(), Storage),
- D };
+ Value *Args[] = { MDNode::get(Storage->getContext(), Storage), D };
// If this block already has a terminator then insert this intrinsic
// before the terminator.
if (TerminatorInst *T = InsertAtEnd->getTerminator())
return CallInst::Create(DeclareFn, Args, "", T);
else
- return CallInst::Create(DeclareFn, Args, "", InsertAtEnd);}
+ return CallInst::Create(DeclareFn, Args, "", InsertAtEnd);
+}
/// InsertDbgValueIntrinsic - Insert a new llvm.dbg.value intrinsic call.
-Instruction *DIFactory::InsertDbgValueIntrinsic(Value *V, uint64_t Offset,
- DIVariable D,
- Instruction *InsertBefore) {
+Instruction *DIFactory::InsertDbgValueIntrinsic(
+ Value *V, uint64_t Offset, DIVariable D, Instruction *InsertBefore) {
assert(V && "no value passed to dbg.value");
assert(D.Verify() && "invalid DIVariable passed to dbg.value");
if (!ValueFn)
@@ -1793,9 +1581,8 @@
}
/// InsertDbgValueIntrinsic - Insert a new llvm.dbg.value intrinsic call.
-Instruction *DIFactory::InsertDbgValueIntrinsic(Value *V, uint64_t Offset,
- DIVariable D,
- BasicBlock *InsertAtEnd) {
+Instruction *DIFactory::InsertDbgValueIntrinsic(
+ Value *V, uint64_t Offset, DIVariable D, BasicBlock *InsertAtEnd) {
assert(V && "no value passed to dbg.value");
assert(D.Verify() && "invalid DIVariable passed to dbg.value");
if (!ValueFn)
Modified: dragonegg/trunk/src/DefaultABI.cpp
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/src/DefaultABI.cpp?rev=173245&r1=173244&r2=173245&view=diff
==============================================================================
--- dragonegg/trunk/src/DefaultABI.cpp (original)
+++ dragonegg/trunk/src/DefaultABI.cpp Wed Jan 23 03:54:28 2013
@@ -51,7 +51,7 @@
// doNotUseShadowReturn - Return true if the specified GCC type
// should not be returned using a pointer to struct parameter.
bool doNotUseShadowReturn(tree type, tree fndecl, CallingConv::ID CC) {
- (void)CC; // Not used by all ABI macros.
+ (void) CC; // Not used by all ABI macros.
if (!TYPE_SIZE(type))
return false;
if (!isa<INTEGER_CST>(TYPE_SIZE(type)))
@@ -75,15 +75,17 @@
tree isSingleElementStructOrArray(tree type, bool ignoreZeroLength,
bool rejectFatBitfield) {
// Complex numbers have two fields.
- if (isa<COMPLEX_TYPE>(type)) return 0;
+ if (isa<COMPLEX_TYPE>(type))
+ return 0;
// All other scalars are good.
- if (!isa<AGGREGATE_TYPE>(type)) return type;
+ if (!isa<AGGREGATE_TYPE>(type))
+ return type;
tree FoundField = 0;
switch (TREE_CODE(type)) {
case QUAL_UNION_TYPE:
- case UNION_TYPE: // Single element unions don't count.
- case COMPLEX_TYPE: // Complex values are like 2-element records.
+ case UNION_TYPE: // Single element unions don't count.
+ case COMPLEX_TYPE: // Complex values are like 2-element records.
default:
return 0;
case RECORD_TYPE:
@@ -99,19 +101,18 @@
continue;
}
if (!FoundField) {
- if (rejectFatBitfield &&
- isa<INTEGER_CST>(TYPE_SIZE(type)) &&
+ if (rejectFatBitfield && isa<INTEGER_CST>(TYPE_SIZE(type)) &&
TREE_INT_CST_LOW(TYPE_SIZE(TREE_TYPE(Field))) >
TREE_INT_CST_LOW(TYPE_SIZE(type)))
return 0;
FoundField = TREE_TYPE(Field);
} else {
- return 0; // More than one field.
+ return 0; // More than one field.
}
}
- return FoundField ? isSingleElementStructOrArray(FoundField,
- ignoreZeroLength, false)
- : 0;
+ return FoundField ?
+ isSingleElementStructOrArray(FoundField, ignoreZeroLength, false) :
+ 0;
case ARRAY_TYPE:
ArrayType *Ty = dyn_cast<ArrayType>(ConvertType(type));
if (!Ty || Ty->getNumElements() != 1)
@@ -137,7 +138,7 @@
/// on the client that indicate how its pieces should be handled. This
/// handles things like returning structures via hidden parameters.
void DefaultABI::HandleReturnType(tree type, tree fn, bool isBuiltin) {
- (void)isBuiltin; // Not used by all ABI macros.
+ (void) isBuiltin; // Not used by all ABI macros.
unsigned Offset = 0;
Type *Ty = ConvertType(type);
if (Ty->isVectorTy()) {
@@ -165,10 +166,10 @@
// Otherwise return as an integer value large enough to hold the entire
// aggregate.
if (Type *AggrTy = LLVM_AGGR_TYPE_FOR_STRUCT_RETURN(type,
- C.getCallingConv()))
+ C.getCallingConv()))
C.HandleAggregateResultAsAggregate(AggrTy);
- else if (Type* ScalarTy =
- LLVM_SCALAR_TYPE_FOR_STRUCT_RETURN(type, &Offset))
+ else if (Type *ScalarTy = LLVM_SCALAR_TYPE_FOR_STRUCT_RETURN(type,
+ &Offset))
C.HandleAggregateResultAsScalar(ScalarTy, Offset);
else
llvm_unreachable("Unable to determine how to return this aggregate!");
@@ -191,14 +192,14 @@
/// argument and invokes methods on the client that indicate how its pieces
/// should be handled. This handles things like decimating structures into
/// their fields.
-void DefaultABI::HandleArgument(tree type, std::vector<Type*> &ScalarElts,
+void DefaultABI::HandleArgument(tree type, std::vector<Type *> &ScalarElts,
AttrBuilder *AttrBuilder) {
unsigned Size = 0;
bool DontCheckAlignment = false;
Type *Ty = ConvertType(type);
// Figure out if this field is zero bits wide, e.g. {} or [0 x int]. Do
// not include variable sized fields here.
- std::vector<Type*> Elts;
+ std::vector<Type *> Elts;
if (Ty->isVoidTy()) {
// Handle void explicitly as a {} type.
Type *OpTy = StructType::get(getGlobalContext());
@@ -229,12 +230,10 @@
ScalarElts.push_back(Ty);
} else if (LLVM_SHOULD_PASS_AGGREGATE_AS_FCA(type, Ty)) {
C.HandleFCAArgument(Ty, type);
- } else if (LLVM_SHOULD_PASS_AGGREGATE_IN_MIXED_REGS(type, Ty,
- C.getCallingConv(),
- Elts)) {
- if (!LLVM_AGGREGATE_PARTIALLY_PASSED_IN_REGS(Elts, ScalarElts,
- C.isShadowReturn(),
- C.getCallingConv()))
+ } else if (LLVM_SHOULD_PASS_AGGREGATE_IN_MIXED_REGS(
+ type, Ty, C.getCallingConv(), Elts)) {
+ if (!LLVM_AGGREGATE_PARTIALLY_PASSED_IN_REGS(
+ Elts, ScalarElts, C.isShadowReturn(), C.getCallingConv()))
PassInMixedRegisters(Ty, Elts, ScalarElts);
else {
C.HandleByValArgument(Ty, type);
@@ -269,7 +268,7 @@
// they would hit the assert in FunctionPrologArgumentConversion::
// HandleByValArgument.)
Type *FTy = ConvertType(Ftype);
- (void)FTy; // Not used by all ABI macros.
+ (void) FTy; // Not used by all ABI macros.
if (!LLVM_SHOULD_PASS_AGGREGATE_USING_BYVAL_ATTR(Ftype, FTy)) {
C.EnterField(FNo, Ty);
HandleArgument(TREE_TYPE(Field), ScalarElts);
@@ -283,8 +282,7 @@
C.EnterField(1, Ty);
HandleArgument(TREE_TYPE(type), ScalarElts);
C.ExitField();
- } else if ((isa<UNION_TYPE>(type)) ||
- (isa<QUAL_UNION_TYPE>(type))) {
+ } else if ((isa<UNION_TYPE>(type)) || (isa<QUAL_UNION_TYPE>(type))) {
HandleUnion(type, ScalarElts);
} else if (isa<ARRAY_TYPE>(type)) {
// Array with padding?
@@ -302,7 +300,7 @@
}
/// HandleUnion - Handle a UNION_TYPE or QUAL_UNION_TYPE tree.
-void DefaultABI::HandleUnion(tree type, std::vector<Type*> &ScalarElts) {
+void DefaultABI::HandleUnion(tree type, std::vector<Type *> &ScalarElts) {
if (TYPE_TRANSPARENT_AGGR(type)) {
tree Field = TYPE_FIELDS(type);
assert(Field && "Transparent union must have some elements!");
@@ -319,7 +317,7 @@
for (tree Field = TYPE_FIELDS(type); Field; Field = TREE_CHAIN(Field)) {
if (isa<FIELD_DECL>(Field)) {
tree SizeTree = TYPE_SIZE(TREE_TYPE(Field));
- unsigned Size = ((unsigned)TREE_INT_CST_LOW(SizeTree)+7)/8;
+ unsigned Size = ((unsigned) TREE_INT_CST_LOW(SizeTree) + 7) / 8;
if (Size > MaxSize) {
MaxSize = Size;
MaxElt = Field;
@@ -335,26 +333,25 @@
/// PassInIntegerRegisters - Given an aggregate value that should be passed in
/// integer registers, convert it to a structure containing ints and pass all
/// of the struct elements in. If Size is set we pass only that many bytes.
-void DefaultABI::PassInIntegerRegisters(tree type,
- std::vector<Type*> &ScalarElts,
- unsigned origSize,
- bool DontCheckAlignment) {
+void DefaultABI::PassInIntegerRegisters(
+ tree type, std::vector<Type *> &ScalarElts, unsigned origSize,
+ bool DontCheckAlignment) {
unsigned Size;
if (origSize)
Size = origSize;
else
- Size = TREE_INT_CST_LOW(TYPE_SIZE(type))/8;
+ Size = TREE_INT_CST_LOW(TYPE_SIZE(type)) / 8;
// FIXME: We should preserve all aggregate value alignment information.
// Work around to preserve some aggregate value alignment information:
// don't bitcast aggregate value to Int64 if its alignment is different
// from Int64 alignment. ARM backend needs this.
- unsigned Align = TYPE_ALIGN(type)/8;
- unsigned Int64Align =
- getDataLayout().getABITypeAlignment(Type::getInt64Ty(getGlobalContext()));
+ unsigned Align = TYPE_ALIGN(type) / 8;
+ unsigned Int64Align = getDataLayout().getABITypeAlignment(
+ Type::getInt64Ty(getGlobalContext()));
bool UseInt64 = (DontCheckAlignment || Align >= Int64Align);
- unsigned ElementSize = UseInt64 ? 8:4;
+ unsigned ElementSize = UseInt64 ? 8 : 4;
unsigned ArraySize = Size / ElementSize;
// Put as much of the aggregate as possible into an array.
@@ -362,9 +359,8 @@
Type *ArrayElementType = NULL;
if (ArraySize) {
Size = Size % ElementSize;
- ArrayElementType = (UseInt64 ?
- Type::getInt64Ty(getGlobalContext()) :
- Type::getInt32Ty(getGlobalContext()));
+ ArrayElementType = (UseInt64 ? Type::getInt64Ty(getGlobalContext()) :
+ Type::getInt32Ty(getGlobalContext()));
ATy = ArrayType::get(ArrayElementType, ArraySize);
}
@@ -385,7 +381,7 @@
LastEltRealSize = Size;
}
- std::vector<Type*> Elts;
+ std::vector<Type *> Elts;
if (ATy)
Elts.push_back(ATy);
if (LastEltTy)
@@ -415,16 +411,16 @@
/// PassInMixedRegisters - Given an aggregate value that should be passed in
/// mixed integer, floating point, and vector registers, convert it to a
/// structure containing the specified struct elements in.
-void DefaultABI::PassInMixedRegisters(Type *Ty,
- std::vector<Type*> &OrigElts,
- std::vector<Type*> &ScalarElts) {
+void DefaultABI::PassInMixedRegisters(Type *Ty, std::vector<Type *> &OrigElts,
+ std::vector<Type *> &ScalarElts) {
// We use VoidTy in OrigElts to mean "this is a word in the aggregate
// that occupies storage but has no useful information, and is not passed
// anywhere". Happens on x86-64.
- std::vector<Type*> Elts(OrigElts);
- Type* wordType = getDataLayout().getPointerSize(0) == 4 ?
- Type::getInt32Ty(getGlobalContext()) : Type::getInt64Ty(getGlobalContext());
- for (unsigned i=0, e=Elts.size(); i!=e; ++i)
+ std::vector<Type *> Elts(OrigElts);
+ Type *wordType = getDataLayout().getPointerSize(0) == 4 ?
+ Type::getInt32Ty(getGlobalContext()) :
+ Type::getInt64Ty(getGlobalContext());
+ for (unsigned i = 0, e = Elts.size(); i != e; ++i)
if (OrigElts[i]->isVoidTy())
Elts[i] = wordType;
@@ -439,10 +435,10 @@
InSize = getDataLayout().getTypeAllocSize(Ty);
if (InSize < Size) {
unsigned N = STy->getNumElements();
- llvm::Type *LastEltTy = STy->getElementType(N-1);
+ llvm::Type *LastEltTy = STy->getElementType(N - 1);
if (LastEltTy->isIntegerTy())
- LastEltSizeDiff =
- getDataLayout().getTypeAllocSize(LastEltTy) - (Size - InSize);
+ LastEltSizeDiff = getDataLayout().getTypeAllocSize(LastEltTy) -
+ (Size - InSize);
}
}
for (unsigned i = 0, e = Elts.size(); i != e; ++i) {
Modified: dragonegg/trunk/src/Trees.cpp
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/src/Trees.cpp?rev=173245&r1=173244&r2=173245&view=diff
==============================================================================
--- dragonegg/trunk/src/Trees.cpp (original)
+++ dragonegg/trunk/src/Trees.cpp Wed Jan 23 03:54:28 2013
@@ -64,7 +64,8 @@
/// string if no sensible name was found. These names are used to make the IR
/// more readable, and have no official status.
std::string getDescriptiveName(const_tree t) {
- if (!t) return std::string(); // Occurs when recursing.
+ if (!t)
+ return std::string(); // Occurs when recursing.
// Name identifier nodes after their contents. This gives the desired effect
// when called recursively.
@@ -151,13 +152,14 @@
APInt DefaultValue;
if (integerPartWidth == HOST_BITS_PER_WIDE_INT) {
- DefaultValue = APInt(DefaultWidth, /*numWords*/2, (integerPart*)&val);
+ DefaultValue = APInt(DefaultWidth, /*numWords*/ 2, (integerPart *)&val);
} else {
assert(integerPartWidth == 2 * HOST_BITS_PER_WIDE_INT &&
"Unsupported host integer width!");
unsigned ShiftAmt = HOST_BITS_PER_WIDE_INT;
- integerPart Part = integerPart((unsigned HOST_WIDE_INT)val.low) +
- (integerPart((unsigned HOST_WIDE_INT)val.high) << ShiftAmt);
+ integerPart Part = integerPart((unsigned HOST_WIDE_INT) val.low) +
+ (integerPart((unsigned HOST_WIDE_INT) val.high)
+ << ShiftAmt);
DefaultValue = APInt(DefaultWidth, Part);
}
@@ -165,8 +167,8 @@
return DefaultValue;
if (Bitwidth > DefaultWidth)
- return TYPE_UNSIGNED(TREE_TYPE(exp)) ?
- DefaultValue.zext(Bitwidth) : DefaultValue.sext(Bitwidth);
+ return TYPE_UNSIGNED(TREE_TYPE(exp)) ? DefaultValue.zext(Bitwidth) :
+ DefaultValue.sext(Bitwidth);
assert((TYPE_UNSIGNED(TREE_TYPE(exp)) ||
DefaultValue.trunc(Bitwidth).sext(DefaultWidth) == DefaultValue) &&
@@ -185,16 +187,15 @@
if (!t)
return false;
if (HOST_BITS_PER_WIDE_INT == 64)
- return host_integerp(t, Unsigned) && !TREE_OVERFLOW (t);
+ return host_integerp(t, Unsigned) && !TREE_OVERFLOW(t);
assert(HOST_BITS_PER_WIDE_INT == 32 &&
"Only 32- and 64-bit hosts supported!");
- return
- (isa<INTEGER_CST>(t) && !TREE_OVERFLOW (t))
- && ((TYPE_UNSIGNED(TREE_TYPE(t)) == Unsigned) ||
- // If the constant is signed and we want an unsigned result, check
- // that the value is non-negative. If the constant is unsigned and
- // we want a signed result, check it fits in 63 bits.
- (HOST_WIDE_INT)TREE_INT_CST_HIGH(t) >= 0);
+ return (isa<INTEGER_CST>(t) && !TREE_OVERFLOW(t)) &&
+ ((TYPE_UNSIGNED(TREE_TYPE(t)) == Unsigned) ||
+ // If the constant is signed and we want an unsigned result, check
+ // that the value is non-negative. If the constant is unsigned and
+ // we want a signed result, check it fits in 63 bits.
+ (HOST_WIDE_INT) TREE_INT_CST_HIGH(t) >= 0);
}
/// getInt64 - Extract the value of an INTEGER_CST as a 64 bit integer. If
@@ -203,15 +204,15 @@
/// overflowed constants. These conditions can be checked by calling isInt64.
uint64_t getInt64(const_tree t, bool Unsigned) {
assert(isInt64(t, Unsigned) && "invalid constant!");
- (void)Unsigned; // Otherwise unused if asserts off - avoid compiler warning.
- unsigned HOST_WIDE_INT LO = (unsigned HOST_WIDE_INT)TREE_INT_CST_LOW(t);
+ (void) Unsigned; // Otherwise unused if asserts off - avoid compiler warning.
+ unsigned HOST_WIDE_INT LO = (unsigned HOST_WIDE_INT) TREE_INT_CST_LOW(t);
if (HOST_BITS_PER_WIDE_INT == 64) {
- return (uint64_t)LO;
+ return (uint64_t) LO;
} else {
assert(HOST_BITS_PER_WIDE_INT == 32 &&
"Only 32- and 64-bit hosts supported!");
- unsigned HOST_WIDE_INT HI = (unsigned HOST_WIDE_INT)TREE_INT_CST_HIGH(t);
- return ((uint64_t)HI << 32) | (uint64_t)LO;
+ unsigned HOST_WIDE_INT HI = (unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH(t);
+ return ((uint64_t) HI << 32) | (uint64_t) LO;
}
}
@@ -247,11 +248,11 @@
// Does not start on a byte boundary - must treat as a bitfield.
return true;
- if (!isInt64(TYPE_SIZE (TREE_TYPE(field_decl)), true))
+ if (!isInt64(TYPE_SIZE(TREE_TYPE(field_decl)), true))
// No size or variable sized - play safe, treat as a bitfield.
return true;
- uint64_t TypeSizeInBits = getInt64(TYPE_SIZE (TREE_TYPE(field_decl)), true);
+ uint64_t TypeSizeInBits = getInt64(TYPE_SIZE(TREE_TYPE(field_decl)), true);
assert(!(TypeSizeInBits & 7) && "A type with a non-byte size!");
assert(DECL_SIZE(field_decl) && "Bitfield with no bit size!");
Modified: dragonegg/trunk/src/TypeConversion.cpp
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/src/TypeConversion.cpp?rev=173245&r1=173244&r2=173245&view=diff
==============================================================================
--- dragonegg/trunk/src/TypeConversion.cpp (original)
+++ dragonegg/trunk/src/TypeConversion.cpp Wed Jan 23 03:54:28 2013
@@ -59,7 +59,7 @@
static LLVMContext &Context = getGlobalContext();
/// SCCInProgress - Set of mutually dependent types currently being converted.
-static const std::vector<tree_node*> *SCCInProgress;
+static const std::vector<tree_node *> *SCCInProgress;
//===----------------------------------------------------------------------===//
// ... ContainedTypeIterator ...
@@ -73,138 +73,136 @@
/// over all of the types contained in a given type.
namespace {
- class ContainedTypeIterator {
- /// type_ref - Either a TREE_LIST node, in which case TREE_VALUE gives the
- /// contained type, or some other kind of tree node and TREE_TYPE gives the
- /// contained type. A null value indicates the end iterator.
- tree type_ref;
-
- /// ContainedTypeIterator - Convenience constructor for internal use.
- explicit ContainedTypeIterator(const tree& t) : type_ref(t) {}
-
- public:
- /// Dereference operator.
- tree operator*() {
- return isa<TREE_LIST>(type_ref) ?
- TREE_VALUE(type_ref) : TREE_TYPE(type_ref);
- };
-
- /// Comparison operators.
- bool operator==(const ContainedTypeIterator &other) const {
- return other.type_ref == this->type_ref;
- }
- bool operator!=(const ContainedTypeIterator &other) const {
- return !(*this == other);
- }
+class ContainedTypeIterator {
+ /// type_ref - Either a TREE_LIST node, in which case TREE_VALUE gives the
+ /// contained type, or some other kind of tree node and TREE_TYPE gives the
+ /// contained type. A null value indicates the end iterator.
+ tree type_ref;
- /// Prefix increment operator.
- ContainedTypeIterator& operator++() {
- assert(type_ref && "Incrementing end iterator!");
-
- switch (TREE_CODE(type_ref)) {
- default:
- debug_tree(type_ref);
- llvm_unreachable("Unexpected tree kind!");
- case ARRAY_TYPE:
- case COMPLEX_TYPE:
- case POINTER_TYPE:
- case REFERENCE_TYPE:
- case VECTOR_TYPE:
- // Here type_ref is the type being iterated over. These types all have
- // only one contained type, so incrementing returns the end iterator.
- type_ref = 0;
- break;
+ /// ContainedTypeIterator - Convenience constructor for internal use.
+ explicit ContainedTypeIterator(const tree &t) : type_ref(t) {}
+
+public:
+ /// Dereference operator.
+ tree operator*() {
+ return isa<TREE_LIST>(type_ref) ? TREE_VALUE(type_ref) :
+ TREE_TYPE(type_ref);
+ }
+ ;
+
+ /// Comparison operators.
+ bool operator==(const ContainedTypeIterator &other) const {
+ return other.type_ref == this->type_ref;
+ }
+ bool operator!=(const ContainedTypeIterator &other) const {
+ return !(*this == other);
+ }
+
+ /// Prefix increment operator.
+ ContainedTypeIterator &operator++() {
+ assert(type_ref && "Incrementing end iterator!");
+
+ switch (TREE_CODE(type_ref)) {
+ default:
+ debug_tree(type_ref);
+ llvm_unreachable("Unexpected tree kind!");
+ case ARRAY_TYPE:
+ case COMPLEX_TYPE:
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ case VECTOR_TYPE:
+ // Here type_ref is the type being iterated over. These types all have
+ // only one contained type, so incrementing returns the end iterator.
+ type_ref = 0;
+ break;
- case FIELD_DECL:
- // Here type_ref is a field of the record or union type being iterated
- // over. Move on to the next field.
- do
- type_ref = TREE_CHAIN(type_ref);
- while (type_ref && !isa<FIELD_DECL>(type_ref));
- break;
-
- case FUNCTION_TYPE:
- case METHOD_TYPE:
- // Here type_ref is the type being iterated over and the iterator refers
- // to the function return type. Move on to the first function argument
- // (a TREE_LIST node).
- type_ref = TYPE_ARG_TYPES(type_ref);
- break;
-
- case TREE_LIST:
- // Here type_ref belongs to the argument list of the function or method
- // being iterated over. Move on to the next function argument.
+ case FIELD_DECL:
+ // Here type_ref is a field of the record or union type being iterated
+ // over. Move on to the next field.
+ do
type_ref = TREE_CHAIN(type_ref);
- // If the function takes a fixed number of arguments then the argument
- // list is terminated by void_list_node. This is not a real argument.
- if (type_ref == void_list_node)
- type_ref = 0;
- break;
- }
+ while (type_ref && !isa<FIELD_DECL>(type_ref));
+ break;
+
+ case FUNCTION_TYPE:
+ case METHOD_TYPE:
+ // Here type_ref is the type being iterated over and the iterator refers
+ // to the function return type. Move on to the first function argument
+ // (a TREE_LIST node).
+ type_ref = TYPE_ARG_TYPES(type_ref);
+ break;
- return *this;
+ case TREE_LIST:
+ // Here type_ref belongs to the argument list of the function or method
+ // being iterated over. Move on to the next function argument.
+ type_ref = TREE_CHAIN(type_ref);
+ // If the function takes a fixed number of arguments then the argument
+ // list is terminated by void_list_node. This is not a real argument.
+ if (type_ref == void_list_node)
+ type_ref = 0;
+ break;
}
- /// begin - Return an iterator referring to the first type contained in the
- /// given type.
- static ContainedTypeIterator begin(tree type) {
- switch (TREE_CODE(type)) {
- default:
- llvm_unreachable("Unknown type!");
-
- case BOOLEAN_TYPE:
- case ENUMERAL_TYPE:
- case FIXED_POINT_TYPE:
- case INTEGER_TYPE:
+ return *this;
+ }
+
+ /// begin - Return an iterator referring to the first type contained in the
+ /// given type.
+ static ContainedTypeIterator begin(tree type) {
+ switch (TREE_CODE(type)) {
+ default:
+ llvm_unreachable("Unknown type!");
+
+ case BOOLEAN_TYPE:
+ case ENUMERAL_TYPE:
+ case FIXED_POINT_TYPE:
+ case INTEGER_TYPE:
#if (GCC_MINOR > 5)
- case NULLPTR_TYPE:
+ case NULLPTR_TYPE:
#endif
- case OFFSET_TYPE:
- case REAL_TYPE:
- case VOID_TYPE:
- // No contained types.
- return end();
-
- case ARRAY_TYPE:
- case COMPLEX_TYPE:
- case POINTER_TYPE:
- case REFERENCE_TYPE:
- case VECTOR_TYPE:
- // Use the type itself as the "pointer" to the contained type.
- return ContainedTypeIterator(type);
-
- case QUAL_UNION_TYPE:
- case RECORD_TYPE:
- case UNION_TYPE:
- // The contained types are the types of the record's fields. Use the
- // first FIELD_DECL as the "pointer" to the first contained type.
- for (tree field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field))
- if (isa<FIELD_DECL>(field))
- return ContainedTypeIterator(field);
- return end();
-
- case FUNCTION_TYPE:
- case METHOD_TYPE:
- // The contained types are the return type and the argument types (in
- // the case of METHOD_TYPE nothing special needs to be done for 'this'
- // since it occurs explicitly in the argument list). Return the type
- // itself as the "pointer" to the return type; incrementing will move
- // the iterator on to the argument types.
- // Note that static chains for nested functions cannot be obtained from
- // the function type which is why there is no attempt to handle them.
- return ContainedTypeIterator(type);
- }
+ case OFFSET_TYPE:
+ case REAL_TYPE:
+ case VOID_TYPE:
+ // No contained types.
+ return end();
+
+ case ARRAY_TYPE:
+ case COMPLEX_TYPE:
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ case VECTOR_TYPE:
+ // Use the type itself as the "pointer" to the contained type.
+ return ContainedTypeIterator(type);
+
+ case QUAL_UNION_TYPE:
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ // The contained types are the types of the record's fields. Use the
+ // first FIELD_DECL as the "pointer" to the first contained type.
+ for (tree field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field))
+ if (isa<FIELD_DECL>(field))
+ return ContainedTypeIterator(field);
+ return end();
+
+ case FUNCTION_TYPE:
+ case METHOD_TYPE:
+ // The contained types are the return type and the argument types (in
+ // the case of METHOD_TYPE nothing special needs to be done for 'this'
+ // since it occurs explicitly in the argument list). Return the type
+ // itself as the "pointer" to the return type; incrementing will move
+ // the iterator on to the argument types.
+ // Note that static chains for nested functions cannot be obtained from
+ // the function type which is why there is no attempt to handle them.
+ return ContainedTypeIterator(type);
}
+ }
- /// end - Return the end iterator for contained type iteration.
- static ContainedTypeIterator end() {
- return ContainedTypeIterator(0);
- }
- };
+ /// end - Return the end iterator for contained type iteration.
+ static ContainedTypeIterator end() { return ContainedTypeIterator(0); }
+};
} // Unnamed namespace.
-
//===----------------------------------------------------------------------===//
// Utilities
//===----------------------------------------------------------------------===//
@@ -221,8 +219,8 @@
// Bail out if the array has variable or unknown length.
if (!isInt64(range, false))
return NO_LENGTH;
- int64_t Range = (int64_t)getInt64(range, false);
- return Range < 0 ? 0 : 1 + (uint64_t)Range;
+ int64_t Range = (int64_t) getInt64(range, false);
+ return Range < 0 ? 0 : 1 + (uint64_t) Range;
}
/// set_decl_index - Associate a non-negative number with the given GCC
@@ -251,10 +249,10 @@
assert(isa<FIELD_DECL>(decl) && "Expected a FIELD_DECL!");
// FIXME: The following test sometimes fails when compiling Fortran90 because
// DECL_CONTEXT does not point to the containing type, but some other type!
-// assert(Ty == ConvertType(DECL_CONTEXT(decl)) && "Field not for this type!");
+ // assert(Ty == ConvertType(DECL_CONTEXT(decl)) && "Field not for this type!");
// If we previously cached the field index, return the cached value.
- unsigned Index = (unsigned)get_decl_index(decl);
+ unsigned Index = (unsigned) get_decl_index(decl);
if (Index <= INT_MAX)
return Index;
@@ -342,7 +340,6 @@
return isInt64(TYPE_SIZE(type), true);
}
-
//===----------------------------------------------------------------------===//
// Matching LLVM types with GCC trees
//===----------------------------------------------------------------------===//
@@ -364,8 +361,7 @@
uint64_t LLVMSize = getDataLayout().getTypeAllocSizeInBits(Ty);
if (LLVMSize != GCCSize) {
Mismatch = true;
- errs() << "GCC size: " << GCCSize << "; LLVM size: " << LLVMSize
- << "!\n";
+ errs() << "GCC size: " << GCCSize << "; LLVM size: " << LLVMSize << "!\n";
}
}
// Check that the LLVM type has the same alignment or less than the GCC type.
@@ -375,7 +371,7 @@
if (LLVMAlign > GCCAlign) {
Mismatch = true;
errs() << "GCC align: " << GCCAlign << "; LLVM align: " << LLVMAlign
- << "\n";
+ << "\n";
}
}
if (Mismatch) {
@@ -387,7 +383,7 @@
}
#endif
- (void)type;
+ (void) type;
return Ty;
}
@@ -399,7 +395,6 @@
return Ty;
}
-
//===----------------------------------------------------------------------===//
// Type Conversion Utilities
//===----------------------------------------------------------------------===//
@@ -418,7 +413,6 @@
!isa<INTEGER_CST>(TYPE_SIZE(Type));
}
-
//===----------------------------------------------------------------------===//
// ... getRegType ...
//===----------------------------------------------------------------------===//
@@ -429,8 +423,8 @@
Type *getRegType(tree type) {
// Check that the type mode doesn't depend on the type variant (various bits
// of the plugin rely on this).
- assert(TYPE_MODE(type) == TYPE_MODE(TYPE_MAIN_VARIANT(type))
- && "Type mode differs between variants!");
+ assert(TYPE_MODE(type) == TYPE_MODE(TYPE_MAIN_VARIANT(type)) &&
+ "Type mode differs between variants!");
// LLVM doesn't care about variants such as const, volatile, or restrict.
type = TYPE_MAIN_VARIANT(type);
@@ -471,8 +465,8 @@
case REFERENCE_TYPE: {
// void* -> byte*
unsigned AS = TYPE_ADDR_SPACE(type);
- return isa<VOID_TYPE>(TREE_TYPE(type)) ? GetUnitPointerType(Context, AS) :
- ConvertType(TREE_TYPE(type))->getPointerTo(AS);
+ return isa<VOID_TYPE>(TREE_TYPE(type)) ? GetUnitPointerType(Context, AS) :
+ ConvertType(TREE_TYPE(type))->getPointerTo(AS);
}
case REAL_TYPE:
@@ -486,19 +480,19 @@
#ifdef TARGET_POWERPC
return Type::getPPC_FP128Ty(Context);
#else
- // IEEE quad precision.
- return Type::getFP128Ty(Context);
+ // IEEE quad precision.
+ return Type::getFP128Ty(Context);
#endif
debug_tree(type);
llvm_unreachable("Unknown FP type!");
case VECTOR_TYPE:
- return VectorType::get(getRegType(TREE_TYPE(type)), TYPE_VECTOR_SUBPARTS(type));
+ return VectorType::get(getRegType(TREE_TYPE(type)),
+ TYPE_VECTOR_SUBPARTS(type));
}
}
-
//===----------------------------------------------------------------------===//
// ... ConvertType ...
//===----------------------------------------------------------------------===//
@@ -525,7 +519,7 @@
// gives a constant size.
if (isInt64(TYPE_SIZE(type), true)) {
uint64_t PadBits = getInt64(TYPE_SIZE(type), true) -
- getDataLayout().getTypeAllocSizeInBits(Ty);
+ getDataLayout().getTypeAllocSizeInBits(Ty);
if (PadBits) {
Type *Padding = ArrayType::get(Type::getInt8Ty(Context), PadBits / 8);
Ty = StructType::get(Ty, Padding, NULL);
@@ -536,113 +530,105 @@
}
namespace {
- class FunctionTypeConversion : public DefaultABIClient {
- Type *&RetTy;
- SmallVectorImpl<Type*> &ArgTypes;
- CallingConv::ID &CallingConv;
- unsigned Offset;
- bool isShadowRet;
- bool KNRPromotion;
- public:
- FunctionTypeConversion(Type *&retty, SmallVectorImpl<Type*> &AT,
- CallingConv::ID &CC, bool KNR)
+class FunctionTypeConversion : public DefaultABIClient {
+ Type *&RetTy;
+ SmallVectorImpl<Type *> &ArgTypes;
+ CallingConv::ID &CallingConv;
+ unsigned Offset;
+ bool isShadowRet;
+ bool KNRPromotion;
+public:
+ FunctionTypeConversion(Type *&retty, SmallVectorImpl<Type *> &AT,
+ CallingConv::ID &CC, bool KNR)
: RetTy(retty), ArgTypes(AT), CallingConv(CC), Offset(0),
KNRPromotion(KNR) {
- CallingConv = CallingConv::C;
- isShadowRet = false;
- }
+ CallingConv = CallingConv::C;
+ isShadowRet = false;
+ }
- /// getCallingConv - This provides the desired CallingConv for the function.
- CallingConv::ID getCallingConv(void) { return CallingConv; }
+ /// getCallingConv - This provides the desired CallingConv for the function.
+ CallingConv::ID getCallingConv(void) { return CallingConv; }
- bool isShadowReturn() const { return isShadowRet; }
+ bool isShadowReturn() const { return isShadowRet; }
- /// HandleScalarResult - This callback is invoked if the function returns a
- /// simple scalar result value.
- void HandleScalarResult(Type *RTy) {
- this->RetTy = RTy;
- }
+ /// HandleScalarResult - This callback is invoked if the function returns a
+ /// simple scalar result value.
+ void HandleScalarResult(Type *RTy) { this->RetTy = RTy; }
- /// HandleAggregateResultAsScalar - This callback is invoked if the function
- /// returns an aggregate value by bit converting it to the specified scalar
- /// type and returning that.
- void HandleAggregateResultAsScalar(Type *ScalarTy, unsigned Off=0) {
- RetTy = ScalarTy;
- this->Offset = Off;
- }
+ /// HandleAggregateResultAsScalar - This callback is invoked if the function
+ /// returns an aggregate value by bit converting it to the specified scalar
+ /// type and returning that.
+ void HandleAggregateResultAsScalar(Type *ScalarTy, unsigned Off = 0) {
+ RetTy = ScalarTy;
+ this->Offset = Off;
+ }
- /// HandleAggregateResultAsAggregate - This callback is invoked if the function
- /// returns an aggregate value using multiple return values.
- void HandleAggregateResultAsAggregate(Type *AggrTy) {
- RetTy = AggrTy;
- }
+ /// HandleAggregateResultAsAggregate - This callback is invoked if the function
+ /// returns an aggregate value using multiple return values.
+ void HandleAggregateResultAsAggregate(Type *AggrTy) { RetTy = AggrTy; }
- /// HandleShadowResult - Handle an aggregate or scalar shadow argument.
- void HandleShadowResult(PointerType *PtrArgTy, bool RetPtr) {
- // This function either returns void or the shadow argument,
- // depending on the target.
- RetTy = RetPtr ? PtrArgTy : Type::getVoidTy(Context);
+ /// HandleShadowResult - Handle an aggregate or scalar shadow argument.
+ void HandleShadowResult(PointerType *PtrArgTy, bool RetPtr) {
+ // This function either returns void or the shadow argument,
+ // depending on the target.
+ RetTy = RetPtr ? PtrArgTy : Type::getVoidTy(Context);
- // In any case, there is a dummy shadow argument though!
- ArgTypes.push_back(PtrArgTy);
+ // In any case, there is a dummy shadow argument though!
+ ArgTypes.push_back(PtrArgTy);
- // Also, note the use of a shadow argument.
- isShadowRet = true;
- }
+ // Also, note the use of a shadow argument.
+ isShadowRet = true;
+ }
- /// HandleAggregateShadowResult - This callback is invoked if the function
- /// returns an aggregate value by using a "shadow" first parameter, which is
- /// a pointer to the aggregate, of type PtrArgTy. If RetPtr is set to true,
- /// the pointer argument itself is returned from the function.
- void HandleAggregateShadowResult(PointerType *PtrArgTy,
- bool RetPtr) {
- HandleShadowResult(PtrArgTy, RetPtr);
- }
+ /// HandleAggregateShadowResult - This callback is invoked if the function
+ /// returns an aggregate value by using a "shadow" first parameter, which is
+ /// a pointer to the aggregate, of type PtrArgTy. If RetPtr is set to true,
+ /// the pointer argument itself is returned from the function.
+ void HandleAggregateShadowResult(PointerType *PtrArgTy, bool RetPtr) {
+ HandleShadowResult(PtrArgTy, RetPtr);
+ }
- /// HandleScalarShadowResult - This callback is invoked if the function
- /// returns a scalar value by using a "shadow" first parameter, which is a
- /// pointer to the scalar, of type PtrArgTy. If RetPtr is set to true,
- /// the pointer argument itself is returned from the function.
- void HandleScalarShadowResult(PointerType *PtrArgTy, bool RetPtr) {
- HandleShadowResult(PtrArgTy, RetPtr);
- }
+ /// HandleScalarShadowResult - This callback is invoked if the function
+ /// returns a scalar value by using a "shadow" first parameter, which is a
+ /// pointer to the scalar, of type PtrArgTy. If RetPtr is set to true,
+ /// the pointer argument itself is returned from the function.
+ void HandleScalarShadowResult(PointerType *PtrArgTy, bool RetPtr) {
+ HandleShadowResult(PtrArgTy, RetPtr);
+ }
- void HandlePad(llvm::Type *LLVMTy) {
- HandleScalarArgument(LLVMTy, 0, 0);
- }
+ void HandlePad(llvm::Type *LLVMTy) { HandleScalarArgument(LLVMTy, 0, 0); }
- void HandleScalarArgument(llvm::Type *LLVMTy, tree type,
- unsigned /*RealSize*/ = 0) {
- if (KNRPromotion) {
- if (type == float_type_node)
- LLVMTy = ConvertType(double_type_node);
- else if (LLVMTy->isIntegerTy(16) || LLVMTy->isIntegerTy(8) ||
- LLVMTy->isIntegerTy(1))
- LLVMTy = Type::getInt32Ty(Context);
- }
- ArgTypes.push_back(LLVMTy);
+ void HandleScalarArgument(llvm::Type *LLVMTy, tree type,
+ unsigned /*RealSize*/ = 0) {
+ if (KNRPromotion) {
+ if (type == float_type_node)
+ LLVMTy = ConvertType(double_type_node);
+ else if (LLVMTy->isIntegerTy(16) || LLVMTy->isIntegerTy(8) ||
+ LLVMTy->isIntegerTy(1))
+ LLVMTy = Type::getInt32Ty(Context);
}
+ ArgTypes.push_back(LLVMTy);
+ }
- /// HandleByInvisibleReferenceArgument - This callback is invoked if a pointer
- /// (of type PtrTy) to the argument is passed rather than the argument itself.
- void HandleByInvisibleReferenceArgument(llvm::Type *PtrTy,
- tree /*type*/) {
- ArgTypes.push_back(PtrTy);
- }
+ /// HandleByInvisibleReferenceArgument - This callback is invoked if a pointer
+ /// (of type PtrTy) to the argument is passed rather than the argument itself.
+ void HandleByInvisibleReferenceArgument(llvm::Type *PtrTy, tree /*type*/) {
+ ArgTypes.push_back(PtrTy);
+ }
- /// HandleByValArgument - This callback is invoked if the aggregate function
- /// argument is passed by value. It is lowered to a parameter passed by
- /// reference with an additional parameter attribute "ByVal".
- void HandleByValArgument(llvm::Type *LLVMTy, tree type) {
- HandleScalarArgument(LLVMTy->getPointerTo(), type);
- }
+ /// HandleByValArgument - This callback is invoked if the aggregate function
+ /// argument is passed by value. It is lowered to a parameter passed by
+ /// reference with an additional parameter attribute "ByVal".
+ void HandleByValArgument(llvm::Type *LLVMTy, tree type) {
+ HandleScalarArgument(LLVMTy->getPointerTo(), type);
+ }
- /// HandleFCAArgument - This callback is invoked if the aggregate function
- /// argument is a first class aggregate passed by value.
- void HandleFCAArgument(llvm::Type *LLVMTy, tree /*type*/) {
- ArgTypes.push_back(LLVMTy);
- }
- };
+ /// HandleFCAArgument - This callback is invoked if the aggregate function
+ /// argument is a first class aggregate passed by value.
+ void HandleFCAArgument(llvm::Type *LLVMTy, tree /*type*/) {
+ ArgTypes.push_back(LLVMTy);
+ }
+};
}
static void HandleArgumentExtension(tree ArgTy, AttrBuilder &AttrBuilder) {
@@ -664,12 +650,11 @@
/// for the function. This method takes the DECL_ARGUMENTS list (Args), and
/// fills in Result with the argument types for the function. It returns the
/// specified result type for the function.
-FunctionType *ConvertArgListToFnType(tree type, ArrayRef<tree> Args,
- tree static_chain, bool KNRPromotion,
- CallingConv::ID &CallingConv,
- AttributeSet &PAL) {
+FunctionType *ConvertArgListToFnType(
+ tree type, ArrayRef<tree> Args, tree static_chain, bool KNRPromotion,
+ CallingConv::ID &CallingConv, AttributeSet &PAL) {
tree ReturnType = TREE_TYPE(type);
- SmallVector<Type*, 8> ArgTys;
+ SmallVector<Type *, 8> ArgTys;
Type *RetTy(Type::getVoidTy(Context));
FunctionTypeConversion Client(RetTy, ArgTys, CallingConv, KNRPromotion);
@@ -689,26 +674,25 @@
AttrBuilder RAttrBuilder;
HandleArgumentExtension(ReturnType, RAttrBuilder);
- // Allow the target to change the attributes.
+// Allow the target to change the attributes.
#ifdef TARGET_ADJUST_LLVM_RETATTR
TARGET_ADJUST_LLVM_RETATTR(type, RAttrBuilder);
#endif
if (RAttrBuilder.hasAttributes())
- Attrs.push_back(AttributeWithIndex::get(0, Attribute::get(Context,
- RAttrBuilder)));
+ Attrs.push_back(
+ AttributeWithIndex::get(0, Attribute::get(Context, RAttrBuilder)));
// If this function returns via a shadow argument, the dest loc is passed
// in as a pointer. Mark that pointer as struct-ret and noalias.
if (ABIConverter.isShadowReturn()) {
AttrBuilder B;
- B.addAttribute(Attribute::StructRet)
- .addAttribute(Attribute::NoAlias);
+ B.addAttribute(Attribute::StructRet).addAttribute(Attribute::NoAlias);
Attrs.push_back(AttributeWithIndex::get(ArgTys.size(),
Attribute::get(Context, B)));
}
- std::vector<Type*> ScalarArgs;
+ std::vector<Type *> ScalarArgs;
if (static_chain) {
// Pass the static chain as the first parameter.
ABIConverter.HandleArgument(TREE_TYPE(static_chain), ScalarArgs);
@@ -733,9 +717,8 @@
PAttrBuilder.addAttribute(Attribute::NoAlias);
if (PAttrBuilder.hasAttributes())
- Attrs.push_back(AttributeWithIndex::get(ArgTys.size(),
- Attribute::get(Context,
- PAttrBuilder)));
+ Attrs.push_back(AttributeWithIndex::get(
+ ArgTys.size(), Attribute::get(Context, PAttrBuilder)));
}
PAL = AttributeSet::get(Context, Attrs);
@@ -746,11 +729,12 @@
CallingConv::ID &CallingConv,
AttributeSet &PAL) {
Type *RetTy = Type::getVoidTy(Context);
- SmallVector<Type*, 8> ArgTypes;
- FunctionTypeConversion Client(RetTy, ArgTypes, CallingConv, false/*not K&R*/);
+ SmallVector<Type *, 8> ArgTypes;
+ FunctionTypeConversion Client(RetTy, ArgTypes, CallingConv,
+ false /*not K&R*/);
DefaultABI ABIConverter(Client);
- // Allow the target to set the CC for things like fastcall etc.
+// Allow the target to set the CC for things like fastcall etc.
#ifdef TARGET_ADJUST_LLVM_CC
TARGET_ADJUST_LLVM_CC(CallingConv, type);
#endif
@@ -788,7 +772,7 @@
// 'sret' functions cannot be 'readnone' or 'readonly'.
if (ABIConverter.isShadowReturn()) {
FnAttrBuilder.removeAttribute(Attribute::ReadNone)
- .removeAttribute(Attribute::ReadOnly);
+ .removeAttribute(Attribute::ReadOnly);
}
// Demote 'readnone' nested functions to 'readonly' since
@@ -803,7 +787,7 @@
AttrBuilder RAttrBuilder;
HandleArgumentExtension(TREE_TYPE(type), RAttrBuilder);
- // Allow the target to change the attributes.
+// Allow the target to change the attributes.
#ifdef TARGET_ADJUST_LLVM_RETATTR
TARGET_ADJUST_LLVM_RETATTR(type, RAttrBuilder);
#endif
@@ -813,20 +797,19 @@
RAttrBuilder.addAttribute(Attribute::NoAlias);
if (RAttrBuilder.hasAttributes())
- Attrs.push_back(AttributeWithIndex::get(0, Attribute::get(Context,
- RAttrBuilder)));
+ Attrs.push_back(
+ AttributeWithIndex::get(0, Attribute::get(Context, RAttrBuilder)));
// If this function returns via a shadow argument, the dest loc is passed
// in as a pointer. Mark that pointer as struct-ret and noalias.
if (ABIConverter.isShadowReturn()) {
AttrBuilder B;
- B.addAttribute(Attribute::StructRet)
- .addAttribute(Attribute::NoAlias);
+ B.addAttribute(Attribute::StructRet).addAttribute(Attribute::NoAlias);
Attrs.push_back(AttributeWithIndex::get(ArgTypes.size(),
Attribute::get(Context, B)));
}
- std::vector<Type*> ScalarArgs;
+ std::vector<Type *> ScalarArgs;
if (static_chain) {
// Pass the static chain as the first parameter.
ABIConverter.HandleArgument(TREE_TYPE(static_chain), ScalarArgs);
@@ -850,7 +833,7 @@
tree DeclArgs = (decl) ? DECL_ARGUMENTS(decl) : NULL;
// Loop over all of the arguments, adding them as we go.
tree Args = TYPE_ARG_TYPES(type);
- for (; Args && TREE_VALUE(Args) != void_type_node; Args = TREE_CHAIN(Args)){
+ for (; Args && TREE_VALUE(Args) != void_type_node; Args = TREE_CHAIN(Args)) {
tree ArgTy = TREE_VALUE(Args);
if (!isPassedByInvisibleReference(ArgTy))
if (const StructType *STy = dyn_cast<StructType>(ConvertType(ArgTy)))
@@ -862,7 +845,7 @@
ArgTypes.clear();
else
// Don't nuke last argument.
- ArgTypes.erase(ArgTypes.begin()+1, ArgTypes.end());
+ ArgTypes.erase(ArgTypes.begin() + 1, ArgTypes.end());
Args = 0;
break;
}
@@ -899,8 +882,8 @@
// If the argument is split into multiple scalars, assign the
// attributes to all scalars of the aggregate.
for (unsigned i = OldSize + 1; i <= ArgTypes.size(); ++i)
- Attrs.push_back(AttributeWithIndex::get
- (i, Attribute::get(Context, PAttrBuilder)));
+ Attrs.push_back(
+ AttributeWithIndex::get(i, Attribute::get(Context, PAttrBuilder)));
}
if (DeclArgs)
@@ -914,14 +897,13 @@
// readonly/readnone functions.
if (HasByVal)
FnAttrBuilder.removeAttribute(Attribute::ReadNone)
- .removeAttribute(Attribute::ReadOnly);
+ .removeAttribute(Attribute::ReadOnly);
assert(RetTy && "Return type not specified!");
if (FnAttrBuilder.hasAttributes())
- Attrs.push_back(AttributeWithIndex::get(~0,
- Attribute::get(Context,
- FnAttrBuilder)));
+ Attrs.push_back(
+ AttributeWithIndex::get(~0, Attribute::get(Context, FnAttrBuilder)));
// Finally, make the function type and result attributes.
PAL = AttributeSet::get(Context, Attrs);
@@ -992,8 +974,8 @@
Type *Ty; // The type. May be null if the range is empty.
uint64_t Starts; // The first bit of the type is positioned at this offset.
- TypedRange(BitRange r, Type *t, uint64_t starts) :
- R(r), Ty(t), Starts(starts) {
+ TypedRange(BitRange r, Type *t, uint64_t starts)
+ : R(r), Ty(t), Starts(starts) {
assert((R.empty() || Ty) && "Need type when range not empty!");
}
@@ -1026,8 +1008,10 @@
}
// Copy assignment operator.
- TypedRange &operator=(const TypedRange &other) {
- R = other.R; Ty = other.Ty; Starts = other.Starts;
+ TypedRange &operator=(const TypedRange & other) {
+ R = other.R;
+ Ty = other.Ty;
+ Starts = other.Starts;
return *this;
}
@@ -1114,12 +1098,13 @@
// Get the size of the type in bits. If the type has variable or ginormous
// size then it is convenient to pretend it is "infinitely" big.
uint64_t TypeSize = isInt64(TYPE_SIZE(type), true) ?
- getInt64(TYPE_SIZE(type), true) : ~0UL;
+ getInt64(TYPE_SIZE(type), true) : ~0UL;
// Record all interesting fields so they can easily be visited backwards.
SmallVector<tree, 16> Fields;
for (tree field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) {
- if (!isa<FIELD_DECL>(field)) continue;
+ if (!isa<FIELD_DECL>(field))
+ continue;
// Ignore fields with variable or unknown position since they cannot be
// represented by the LLVM type system.
if (!OffsetIsLLVMCompatible(field))
@@ -1132,7 +1117,8 @@
// initialize the first union member, which is needed if the zero constant
// is to be used as the default value for the union type.
for (SmallVector<tree, 16>::reverse_iterator I = Fields.rbegin(),
- E = Fields.rend(); I != E; ++I) {
+ E = Fields.rend();
+ I != E; ++I) {
tree field = *I;
uint64_t FirstBit = getFieldOffsetInBits(field);
assert(FirstBit <= TypeSize && "Field off end of type!");
@@ -1194,7 +1180,7 @@
// Create the elements that will make up the struct type. As well as the
// fields themselves there may also be padding elements.
- std::vector<Type*> Elts;
+ std::vector<Type *> Elts;
Elts.reserve(Layout.getNumIntervals());
uint64_t EndOfPrevious = 0; // Offset of first bit after previous element.
for (unsigned i = 0, e = Layout.getNumIntervals(); i != e; ++i) {
@@ -1413,9 +1399,8 @@
// Return an opaque struct for an incomplete record type.
assert(isa<RECORD_OR_UNION_TYPE>(type) && "Unexpected incomplete type!");
- return RememberTypeConversion(type,
- StructType::create(Context,
- getDescriptiveName(type)));
+ return RememberTypeConversion(
+ type, StructType::create(Context, getDescriptiveName(type)));
}
// From here on we are only dealing with straightforward types.
@@ -1508,94 +1493,94 @@
/// for more information about the type graph and self-referential types.
namespace {
- class RecursiveTypeIterator {
- // This class wraps an iterator that visits all contained types, and just
- // increments the iterator over any contained types that will not recurse.
- ContainedTypeIterator I;
-
- /// SkipNonRecursiveTypes - Increment the wrapped iterator over any types
- /// that mayRecurse says can be converted directly without having to worry
- /// about self-recursion.
- void SkipNonRecursiveTypes() {
- while (I != ContainedTypeIterator::end() &&
- !(isa<TYPE>(*I) && mayRecurse(TYPE_MAIN_VARIANT(*I))))
- ++I;
- }
+class RecursiveTypeIterator {
+ // This class wraps an iterator that visits all contained types, and just
+ // increments the iterator over any contained types that will not recurse.
+ ContainedTypeIterator I;
+
+ /// SkipNonRecursiveTypes - Increment the wrapped iterator over any types
+ /// that mayRecurse says can be converted directly without having to worry
+ /// about self-recursion.
+ void SkipNonRecursiveTypes() {
+ while (I != ContainedTypeIterator::end() &&
+ !(isa<TYPE>(*I) && mayRecurse(TYPE_MAIN_VARIANT(*I))))
+ ++I;
+ }
- /// RecursiveTypeIterator - Convenience constructor for internal use.
- explicit RecursiveTypeIterator(const ContainedTypeIterator& i) : I(i) {}
+ /// RecursiveTypeIterator - Convenience constructor for internal use.
+ explicit RecursiveTypeIterator(const ContainedTypeIterator &i) : I(i) {}
- public:
+public:
- /// Dereference operator returning the main variant of the contained type.
- tree operator*() {
- return TYPE_MAIN_VARIANT(*I);
- };
-
- /// Comparison operators.
- bool operator==(const RecursiveTypeIterator &other) const {
- return other.I == this->I;
- }
- bool operator!=(const RecursiveTypeIterator &other) const {
- return !(*this == other);
- }
+ /// Dereference operator returning the main variant of the contained type.
+ tree operator*() { return TYPE_MAIN_VARIANT(*I); }
+ ;
- /// Postfix increment operator.
- RecursiveTypeIterator operator++(int) {
- RecursiveTypeIterator Result(*this);
- ++(*this);
- return Result;
- }
+ /// Comparison operators.
+ bool operator==(const RecursiveTypeIterator &other) const {
+ return other.I == this->I;
+ }
+ bool operator!=(const RecursiveTypeIterator &other) const {
+ return !(*this == other);
+ }
- /// Prefix increment operator.
- RecursiveTypeIterator& operator++() {
- ++I;
- SkipNonRecursiveTypes();
- return *this;
- }
+ /// Postfix increment operator.
+ RecursiveTypeIterator operator++(int) {
+ RecursiveTypeIterator Result(*this);
+ ++(*this);
+ return Result;
+ }
- /// begin - Return an iterator referring to the first type contained in the
- /// given type.
- static RecursiveTypeIterator begin(tree type) {
- RecursiveTypeIterator R(ContainedTypeIterator::begin(type));
- R.SkipNonRecursiveTypes();
- return R;
- }
+ /// Prefix increment operator.
+ RecursiveTypeIterator &operator++() {
+ ++I;
+ SkipNonRecursiveTypes();
+ return *this;
+ }
- /// end - Return the end iterator for contained type iteration.
- static RecursiveTypeIterator end() {
- return RecursiveTypeIterator(ContainedTypeIterator::end());
- }
- };
+ /// begin - Return an iterator referring to the first type contained in the
+ /// given type.
+ static RecursiveTypeIterator begin(tree type) {
+ RecursiveTypeIterator R(ContainedTypeIterator::begin(type));
+ R.SkipNonRecursiveTypes();
+ return R;
+ }
+
+ /// end - Return the end iterator for contained type iteration.
+ static RecursiveTypeIterator end() {
+ return RecursiveTypeIterator(ContainedTypeIterator::end());
+ }
+};
} // Unnamed namespace.
// Traits for working with the graph of possibly self-referential type nodes,
// see RecursiveTypeIterator.
namespace llvm {
- template <> struct GraphTraits<tree> {
- typedef tree_node NodeType;
- typedef RecursiveTypeIterator ChildIteratorType;
- static inline NodeType *getEntryNode(tree t) {
- assert(TYPE_P(t) && "Expected a type!");
- return t;
- }
- static inline ChildIteratorType child_begin(tree type) {
- return ChildIteratorType::begin(type);
- }
- static inline ChildIteratorType child_end(tree) {
- return ChildIteratorType::end();
- }
- };
+template <> struct GraphTraits<tree> {
+ typedef tree_node NodeType;
+ typedef RecursiveTypeIterator ChildIteratorType;
+ static inline NodeType *getEntryNode(tree t) {
+ assert(TYPE_P(t) && "Expected a type!");
+ return t;
+ }
+ static inline ChildIteratorType child_begin(tree type) {
+ return ChildIteratorType::begin(type);
+ }
+ static inline ChildIteratorType child_end(tree) {
+ return ChildIteratorType::end();
+ }
+};
}
Type *ConvertType(tree type) {
- if (type == error_mark_node) return Type::getInt32Ty(Context);
+ if (type == error_mark_node)
+ return Type::getInt32Ty(Context);
// Check that the type mode doesn't depend on the type variant (various bits
// of the plugin rely on this).
- assert(TYPE_MODE(type) == TYPE_MODE(TYPE_MAIN_VARIANT(type))
- && "Type mode differs between variants!");
+ assert(TYPE_MODE(type) == TYPE_MODE(TYPE_MAIN_VARIANT(type)) &&
+ "Type mode differs between variants!");
// LLVM doesn't care about variants such as const, volatile, or restrict.
type = TYPE_MAIN_VARIANT(type);
Modified: dragonegg/trunk/src/arm/Target.cpp
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/src/arm/Target.cpp?rev=173245&r1=173244&r2=173245&view=diff
==============================================================================
--- dragonegg/trunk/src/arm/Target.cpp (original)
+++ dragonegg/trunk/src/arm/Target.cpp Wed Jan 23 03:54:28 2013
@@ -75,16 +75,13 @@
// Classify type according to the number of fundamental data types contained
// among its members. Returns true if type is a homogeneous aggregate.
-static bool
-vfp_arg_homogeneous_aggregate_p(enum machine_mode mode, tree type,
- int *fdt_counts)
-{
+static bool vfp_arg_homogeneous_aggregate_p(enum machine_mode mode, tree type,
+ int *fdt_counts) {
bool result = false;
- HOST_WIDE_INT bytes =
- (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
+ HOST_WIDE_INT bytes = (mode == BLKmode) ? int_size_in_bytes(type) :
+ (int) GET_MODE_SIZE(mode);
- if (type && isa<AGGREGATE_TYPE>(type))
- {
+ if (type && isa<AGGREGATE_TYPE>(type)) {
int i;
int cnt = 0;
tree field;
@@ -94,28 +91,27 @@
return 0;
// Classify each field of records.
- switch (TREE_CODE (type))
- {
- case RECORD_TYPE:
+ switch (TREE_CODE(type)) {
+ case RECORD_TYPE:
// For classes first merge in the field of the subclasses.
- if (TYPE_BINFO (type)) {
+ if (TYPE_BINFO(type)) {
tree binfo, base_binfo;
int basenum;
- for (binfo = TYPE_BINFO (type), basenum = 0;
- BINFO_BASE_ITERATE (binfo, basenum, base_binfo); basenum++) {
- tree type = BINFO_TYPE (base_binfo);
+ for (binfo = TYPE_BINFO(type), basenum = 0;
+ BINFO_BASE_ITERATE(binfo, basenum, base_binfo); basenum++) {
+ tree type = BINFO_TYPE(base_binfo);
- result = vfp_arg_homogeneous_aggregate_p(TYPE_MODE (type), type,
+ result = vfp_arg_homogeneous_aggregate_p(TYPE_MODE(type), type,
fdt_counts);
if (!result)
return false;
}
}
// And now merge the fields of structure.
- for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) {
+ for (field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) {
if (isa<FIELD_DECL>(field)) {
- if (TREE_TYPE (field) == error_mark_node)
+ if (TREE_TYPE(field) == error_mark_node)
continue;
result = vfp_arg_homogeneous_aggregate_p(TYPE_MODE(TREE_TYPE(field)),
@@ -127,74 +123,69 @@
}
break;
- case ARRAY_TYPE:
+ case ARRAY_TYPE:
// Arrays are handled as small records.
{
- int array_fdt_counts[ARM_FDT_MAX] = { 0 };
+ int array_fdt_counts[ARM_FDT_MAX] = { 0 };
- result = vfp_arg_homogeneous_aggregate_p(TYPE_MODE(TREE_TYPE(type)),
- TREE_TYPE(type),
- array_fdt_counts);
-
- cnt = bytes / int_size_in_bytes(TREE_TYPE(type));
- for (i = 0; i < ARM_FDT_MAX; ++i)
- fdt_counts[i] += array_fdt_counts[i] * cnt;
+ result = vfp_arg_homogeneous_aggregate_p(TYPE_MODE(TREE_TYPE(type)),
+ TREE_TYPE(type),
+ array_fdt_counts);
- if (!result)
- return false;
- }
- break;
+ cnt = bytes / int_size_in_bytes(TREE_TYPE(type));
+ for (i = 0; i < ARM_FDT_MAX; ++i)
+ fdt_counts[i] += array_fdt_counts[i] * cnt;
- case UNION_TYPE:
- case QUAL_UNION_TYPE:
- {
- // Unions are similar to RECORD_TYPE.
- int union_fdt_counts[ARM_FDT_MAX] = { 0 };
+ if (!result)
+ return false;
+ } break;
- // Unions are not derived.
- gcc_assert (!TYPE_BINFO (type)
- || !BINFO_N_BASE_BINFOS (TYPE_BINFO (type)));
- for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) {
- int union_field_fdt_counts[ARM_FDT_MAX] = { 0 };
-
- if (isa<FIELD_DECL>(field)) {
- if (TREE_TYPE (field) == error_mark_node)
- continue;
-
- result = vfp_arg_homogeneous_aggregate_p(
- TYPE_MODE(TREE_TYPE(field)),
- TREE_TYPE(field),
- union_field_fdt_counts);
- if (!result)
- return false;
-
- // track largest union field
- for (i = 0; i < ARM_FDT_MAX; ++i) {
- if (union_field_fdt_counts[i] > 4) // bail early if we can
- return false;
-
- union_fdt_counts[i] = MAX(union_fdt_counts[i],
- union_field_fdt_counts[i]);
- union_field_fdt_counts[i] = 0; // clear it out for next iter
- }
- }
- }
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE: {
+ // Unions are similar to RECORD_TYPE.
+ int union_fdt_counts[ARM_FDT_MAX] = { 0 };
+
+ // Unions are not derived.
+ gcc_assert(!TYPE_BINFO(type) || !BINFO_N_BASE_BINFOS(TYPE_BINFO(type)));
+ for (field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) {
+ int union_field_fdt_counts[ARM_FDT_MAX] = { 0 };
- // check for only one type across all union fields
- cnt = 0;
- for (i = 0; i < ARM_FDT_MAX; ++i) {
- if (union_fdt_counts[i])
- ++cnt;
+ if (isa<FIELD_DECL>(field)) {
+ if (TREE_TYPE(field) == error_mark_node)
+ continue;
- if (cnt > 1)
+ result = vfp_arg_homogeneous_aggregate_p(TYPE_MODE(TREE_TYPE(field)),
+ TREE_TYPE(field),
+ union_field_fdt_counts);
+ if (!result)
+ return false;
+
+ // track largest union field
+ for (i = 0; i < ARM_FDT_MAX; ++i) {
+ if (union_field_fdt_counts[i] > 4) // bail early if we can
return false;
- fdt_counts[i] += union_fdt_counts[i];
+ union_fdt_counts[i] = MAX(union_fdt_counts[i],
+ union_field_fdt_counts[i]);
+ union_field_fdt_counts[i] = 0; // clear it out for next iter
}
}
- break;
+ }
- default:
+ // check for only one type across all union fields
+ cnt = 0;
+ for (i = 0; i < ARM_FDT_MAX; ++i) {
+ if (union_fdt_counts[i])
+ ++cnt;
+
+ if (cnt > 1)
+ return false;
+
+ fdt_counts[i] += union_fdt_counts[i];
+ }
+ } break;
+
+ default:
llvm_unreachable("What type is this?");
}
@@ -219,40 +210,28 @@
return true;
}
- if (type)
- {
+ if (type) {
int idx = 0;
int cnt = 0;
- switch (TREE_CODE(type))
- {
+ switch (TREE_CODE(type)) {
case REAL_TYPE:
- idx = (TYPE_PRECISION(type) == 32) ?
- ARM_FDT_FLOAT :
- ((TYPE_PRECISION(type) == 64) ?
- ARM_FDT_DOUBLE :
- ARM_FDT_INVALID);
+ idx = (TYPE_PRECISION(type) == 32) ? ARM_FDT_FLOAT :
+ ((TYPE_PRECISION(type) == 64) ? ARM_FDT_DOUBLE : ARM_FDT_INVALID);
cnt = 1;
break;
- case COMPLEX_TYPE:
- {
- tree subtype = TREE_TYPE(type);
- idx = (TYPE_PRECISION(subtype) == 32) ?
- ARM_FDT_FLOAT :
- ((TYPE_PRECISION(subtype) == 64) ?
- ARM_FDT_DOUBLE :
- ARM_FDT_INVALID);
- cnt = 2;
- }
- break;
+ case COMPLEX_TYPE: {
+ tree subtype = TREE_TYPE(type);
+ idx = (TYPE_PRECISION(subtype) == 32) ? ARM_FDT_FLOAT :
+ ((TYPE_PRECISION(subtype) == 64) ? ARM_FDT_DOUBLE :
+ ARM_FDT_INVALID);
+ cnt = 2;
+ } break;
case VECTOR_TYPE:
- idx = (bytes == 8) ?
- ARM_FDT_VECTOR_64 :
- (bytes == 16) ?
- ARM_FDT_VECTOR_128 :
- ARM_FDT_INVALID;
+ idx = (bytes == 8) ? ARM_FDT_VECTOR_64 : (bytes == 16) ?
+ ARM_FDT_VECTOR_128 : ARM_FDT_INVALID;
cnt = 1;
break;
@@ -264,13 +243,12 @@
case FUNCTION_TYPE:
case METHOD_TYPE:
default:
- return false; // All of these disqualify.
+ return false; // All of these disqualify.
}
fdt_counts[idx] += cnt;
return true;
- }
- else
+ } else
llvm_unreachable("what type was this?");
return false;
@@ -279,22 +257,20 @@
// Walk over an LLVM Type that we know is a homogeneous aggregate and
// push the proper LLVM Types that represent the register types to pass
// that struct member in.
-static void push_elts(Type *Ty, std::vector<Type*> &Elts)
-{
+static void push_elts(Type *Ty, std::vector<Type *> &Elts) {
for (Type::subtype_iterator I = Ty->subtype_begin(), E = Ty->subtype_end();
I != E; ++I) {
Type *STy = *I;
if (const VectorType *VTy = dyn_cast<VectorType>(STy)) {
- switch (VTy->getBitWidth())
- {
- case 64: // v2f32
- Elts.push_back(VectorType::get(Type::getFloatTy(Context), 2));
- break;
- case 128: // v2f64
- Elts.push_back(VectorType::get(Type::getDoubleTy(Context), 2));
- break;
- default:
- assert (0 && "invalid vector type");
+ switch (VTy->getBitWidth()) {
+ case 64: // v2f32
+ Elts.push_back(VectorType::get(Type::getFloatTy(Context), 2));
+ break;
+ case 128: // v2f64
+ Elts.push_back(VectorType::get(Type::getDoubleTy(Context), 2));
+ break;
+ default:
+ assert(0 && "invalid vector type");
}
} else if (ArrayType *ATy = dyn_cast<ArrayType>(STy)) {
Type *ETy = ATy->getElementType();
@@ -308,7 +284,7 @@
}
}
-static unsigned count_num_words(std::vector<Type*> &ScalarElts) {
+static unsigned count_num_words(std::vector<Type *> &ScalarElts) {
unsigned NumWords = 0;
for (unsigned i = 0, e = ScalarElts.size(); i != e; ++i) {
Type *Ty = ScalarElts[i];
@@ -320,7 +296,7 @@
NumWords += NumWordsForType;
} else {
- assert (0 && "Unexpected type.");
+ assert(0 && "Unexpected type.");
}
}
return NumWords;
@@ -330,11 +306,9 @@
// handling of arguments is that arguments larger than 32 bits are split
// and padding arguments are added as necessary for alignment. This makes
// the IL a bit more explicit about how arguments are handled.
-extern bool
-llvm_arm_try_pass_aggregate_custom(tree type,
- std::vector<Type*>& ScalarElts,
- CallingConv::ID CC,
- struct DefaultABIClient* C) {
+extern bool llvm_arm_try_pass_aggregate_custom(
+ tree type, std::vector<Type *> &ScalarElts, CallingConv::ID CC,
+ struct DefaultABIClient *C) {
if (CC != CallingConv::ARM_AAPCS && CC != CallingConv::C)
return false;
@@ -347,22 +321,22 @@
if (Ty->isPointerTy())
return false;
- const unsigned Size = TREE_INT_CST_LOW(TYPE_SIZE(type))/8;
- const unsigned Alignment = TYPE_ALIGN(type)/8;
+ const unsigned Size = TREE_INT_CST_LOW(TYPE_SIZE(type)) / 8;
+ const unsigned Alignment = TYPE_ALIGN(type) / 8;
const unsigned NumWords = count_num_words(ScalarElts);
const bool AddPad = Alignment >= 8 && (NumWords % 2);
// First, build a type that will be bitcast to the original one and
// from where elements will be extracted.
- std::vector<Type*> Elts;
- Type* Int32Ty = Type::getInt32Ty(getGlobalContext());
+ std::vector<Type *> Elts;
+ Type *Int32Ty = Type::getInt32Ty(getGlobalContext());
const unsigned NumRegularArgs = Size / 4;
for (unsigned i = 0; i < NumRegularArgs; ++i) {
Elts.push_back(Int32Ty);
}
const unsigned RestSize = Size % 4;
llvm::Type *RestType = NULL;
- if (RestSize> 2) {
+ if (RestSize > 2) {
RestType = Type::getInt32Ty(getGlobalContext());
} else if (RestSize > 1) {
RestType = Type::getInt16Ty(getGlobalContext());
@@ -398,10 +372,8 @@
// It also returns a vector of types that correspond to the registers used
// for parameter passing. This only applies to AAPCS-VFP "homogeneous
// aggregates" as specified in 4.3.5 of the AAPCS spec.
-bool
-llvm_arm_should_pass_aggregate_in_mixed_regs(tree TreeType, Type *Ty,
- CallingConv::ID CC,
- std::vector<Type*> &Elts) {
+bool llvm_arm_should_pass_aggregate_in_mixed_regs(
+ tree TreeType, Type *Ty, CallingConv::ID CC, std::vector<Type *> &Elts) {
if (!llvm_arm_should_pass_or_return_aggregate_in_regs(TreeType, CC))
return false;
@@ -412,8 +384,7 @@
return true;
}
-static bool alloc_next_spr(bool *SPRs)
-{
+static bool alloc_next_spr(bool *SPRs) {
for (int i = 0; i < 16; ++i)
if (!SPRs[i]) {
SPRs[i] = true;
@@ -422,11 +393,10 @@
return false;
}
-static bool alloc_next_dpr(bool *SPRs)
-{
+static bool alloc_next_dpr(bool *SPRs) {
for (int i = 0; i < 16; i += 2)
if (!SPRs[i]) {
- SPRs[i] = SPRs[i+1] = true;
+ SPRs[i] = SPRs[i + 1] = true;
return true;
}
return false;
@@ -435,7 +405,7 @@
static bool alloc_next_qpr(bool *SPRs) {
for (int i = 0; i < 16; i += 4)
if (!SPRs[i]) {
- SPRs[i] = SPRs[i+1] = SPRs[i+2] = SPRs[i+3] = true;
+ SPRs[i] = SPRs[i + 1] = SPRs[i + 2] = SPRs[i + 3] = true;
return true;
}
return false;
@@ -444,43 +414,41 @@
// count_num_registers_uses - Simulate argument passing reg allocation in SPRs.
// Caller is expected to zero out SPRs. Returns true if all of ScalarElts fit
// in registers.
-static bool count_num_registers_uses(std::vector<Type*> &ScalarElts,
+static bool count_num_registers_uses(std::vector<Type *> &ScalarElts,
bool *SPRs) {
for (unsigned i = 0, e = ScalarElts.size(); i != e; ++i) {
Type *Ty = ScalarElts[i];
if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
- switch (VTy->getBitWidth())
- {
- case 64:
- if (!alloc_next_dpr(SPRs))
- return false;
- break;
- case 128:
- if (!alloc_next_qpr(SPRs))
- return false;
- break;
- default:
- assert(0);
+ switch (VTy->getBitWidth()) {
+ case 64:
+ if (!alloc_next_dpr(SPRs))
+ return false;
+ break;
+ case 128:
+ if (!alloc_next_qpr(SPRs))
+ return false;
+ break;
+ default:
+ assert(0);
}
} else if (Ty->isIntegerTy() || Ty->isPointerTy() ||
- Ty==Type::getVoidTy(Context)) {
+ Ty == Type::getVoidTy(Context)) {
;
} else {
// Floating point scalar argument.
assert(Ty->isFloatingPointTy() && Ty->isPrimitiveType() &&
"Expecting a floating point primitive type!");
- switch (Ty->getTypeID())
- {
- case Type::FloatTyID:
- if (!alloc_next_spr(SPRs))
- return false;
- break;
- case Type::DoubleTyID:
- if (!alloc_next_spr(SPRs))
- return false;
- break;
- default:
- assert(0);
+ switch (Ty->getTypeID()) {
+ case Type::FloatTyID:
+ if (!alloc_next_spr(SPRs))
+ return false;
+ break;
+ case Type::DoubleTyID:
+ if (!alloc_next_spr(SPRs))
+ return false;
+ break;
+ default:
+ assert(0);
}
}
}
@@ -491,16 +459,15 @@
// in registers. If there are only enough available parameter registers to pass
// part of the aggregate, return true. That means the aggregate should instead
// be passed in memory.
-bool
-llvm_arm_aggregate_partially_passed_in_regs(std::vector<Type*> &Elts,
- std::vector<Type*> &ScalarElts,
- CallingConv::ID CC) {
+bool llvm_arm_aggregate_partially_passed_in_regs(
+ std::vector<Type *> &Elts, std::vector<Type *> &ScalarElts,
+ CallingConv::ID CC) {
// Homogeneous aggregates are an AAPCS-VFP feature.
if ((CC != CallingConv::ARM_AAPCS_VFP) ||
!(TARGET_AAPCS_BASED && TARGET_VFP && TARGET_HARD_FLOAT_ABI))
return true;
- bool SPRs[16] = { 0 }; // represents S0-S16
+ bool SPRs[16] = { 0 }; // represents S0-S16
// Figure out which SPRs are available.
if (!count_num_registers_uses(ScalarElts, SPRs))
@@ -509,19 +476,18 @@
if (!count_num_registers_uses(Elts, SPRs))
return true;
- return false; // it all fit in registers!
+ return false; // it all fit in registers!
}
// Return LLVM Type if TYPE can be returned as an aggregate,
// otherwise return NULL.
-Type *llvm_arm_aggr_type_for_struct_return(tree TreeType,
- CallingConv::ID CC) {
+Type *llvm_arm_aggr_type_for_struct_return(tree TreeType, CallingConv::ID CC) {
if (!llvm_arm_should_pass_or_return_aggregate_in_regs(TreeType, CC))
return NULL;
// Walk Ty and push LLVM types corresponding to register types onto
// Elts.
- std::vector<Type*> Elts;
+ std::vector<Type *> Elts;
Type *Ty = ConvertType(TreeType);
push_elts(Ty, Elts);
@@ -535,13 +501,10 @@
// Extract SRCFIELDNO's ELEMENO value and store it in DEST's FIELDNO field's
// ELEMENTNO.
//
-static void llvm_arm_extract_mrv_array_element(Value *Src, Value *Dest,
- unsigned SrcFieldNo,
- unsigned SrcElemNo,
- unsigned DestFieldNo,
- unsigned DestElemNo,
- LLVMBuilder &Builder,
- bool isVolatile) {
+static void llvm_arm_extract_mrv_array_element(
+ Value *Src, Value *Dest, unsigned SrcFieldNo, unsigned SrcElemNo,
+ unsigned DestFieldNo, unsigned DestElemNo, LLVMBuilder &Builder,
+ bool isVolatile) {
Value *EVI = Builder.CreateExtractValue(Src, SrcFieldNo, "mrv_gr");
const StructType *STy = cast<StructType>(Src->getType());
llvm::Value *Idxs[3];
@@ -561,9 +524,8 @@
// llvm_arm_extract_multiple_return_value - Extract multiple values returned
// by SRC and store them in DEST. It is expected that SRC and
// DEST types are StructType, but they may not match.
-void llvm_arm_extract_multiple_return_value(Value *Src, Value *Dest,
- bool isVolatile,
- LLVMBuilder &Builder) {
+void llvm_arm_extract_multiple_return_value(
+ Value *Src, Value *Dest, bool isVolatile, LLVMBuilder &Builder) {
const StructType *STy = cast<StructType>(Src->getType());
unsigned NumElements = STy->getNumElements();
@@ -582,7 +544,8 @@
Value *GEP = Builder.CreateStructGEP(Dest, DNO, "mrv_gep");
Value *EVI = Builder.CreateExtractValue(Src, SNO, "mrv_gr");
Builder.CreateAlignedStore(EVI, GEP, 1, isVolatile);
- ++DNO; ++SNO;
+ ++DNO;
+ ++SNO;
continue;
}
@@ -595,13 +558,12 @@
unsigned i = 0;
unsigned Size = 1;
- if (const VectorType *SElemTy =
- dyn_cast<VectorType>(STy->getElementType(SNO))) {
+ if (const VectorType *SElemTy = dyn_cast<VectorType>(
+ STy->getElementType(SNO))) {
Size = SElemTy->getNumElements();
}
while (i < Size) {
- llvm_arm_extract_mrv_array_element(Src, Dest, SNO, i++,
- DNO, DElemNo++,
+ llvm_arm_extract_mrv_array_element(Src, Dest, SNO, i++, DNO, DElemNo++,
Builder, isVolatile);
}
// Consumed this src field. Try next one.
Modified: dragonegg/trunk/src/x86/Target.cpp
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/src/x86/Target.cpp?rev=173245&r1=173244&r2=173245&view=diff
==============================================================================
--- dragonegg/trunk/src/x86/Target.cpp (original)
+++ dragonegg/trunk/src/x86/Target.cpp Wed Jan 23 03:54:28 2013
@@ -70,24 +70,26 @@
static Value *BitCastToIntVector(Value *Op, LLVMBuilder &Builder) {
VectorType *VecTy = cast<VectorType>(Op->getType());
Type *EltTy = VecTy->getElementType();
- Type *IntTy = IntegerType::get(Context,EltTy->getPrimitiveSizeInBits());
- return Builder.CreateBitCast(Op, VectorType::get(IntTy,
- VecTy->getNumElements()));
+ Type *IntTy = IntegerType::get(Context, EltTy->getPrimitiveSizeInBits());
+ return Builder.CreateBitCast(Op,
+ VectorType::get(IntTy, VecTy->getNumElements()));
}
/// BuiltinCode - A enumerated type with one value for each supported builtin.
enum BuiltinCode {
SearchForHandler, // Builtin not seen before - search for a handler.
- clzs, // Builtin with exceptional name.
- ctzs, // Builtin with exceptional name.
+ clzs, // Builtin with exceptional name.
+ ctzs, // Builtin with exceptional name.
#define DEFINE_BUILTIN(x) x
#include "x86_builtins"
#undef DEFINE_BUILTIN
- , UnsupportedBuiltin // There is no handler for this builtin.
+ ,
+ UnsupportedBuiltin // There is no handler for this builtin.
};
struct HandlerEntry {
- const char *Name; BuiltinCode Handler;
+ const char *Name;
+ BuiltinCode Handler;
};
static bool HandlerLT(const HandlerEntry &E, const HandlerEntry &F) {
@@ -98,12 +100,9 @@
* code, emit the code now. If we can handle the code, this macro should emit
* the code, return true.
*/
-bool TreeToLLVM::TargetIntrinsicLower(gimple stmt,
- tree fndecl,
- const MemRef * /*DestLoc*/,
- Value *&Result,
- Type *ResultType,
- std::vector<Value*> &Ops) {
+bool TreeToLLVM::TargetIntrinsicLower(
+ gimple stmt, tree fndecl, const MemRef */*DestLoc*/, Value *&Result,
+ Type *ResultType, std::vector<Value *> &Ops) {
// DECL_FUNCTION_CODE contains a value of the enumerated type ix86_builtins,
// declared in i386.c. If this type was visible to us then we could simply
// use a switch statement on DECL_FUNCTION_CODE to jump to the right code for
@@ -115,7 +114,7 @@
// The map from DECL_FUNCTION_CODE values to BuiltinCode.
static std::vector<BuiltinCode> FunctionCodeMap;
if (FunctionCodeMap.size() <= DECL_FUNCTION_CODE(fndecl))
- FunctionCodeMap.resize(DECL_FUNCTION_CODE(fndecl) + 1);
+ FunctionCodeMap.resize(DECL_FUNCTION_CODE(fndecl) + 1);
// See if we already associated a BuiltinCode with this DECL_FUNCTION_CODE.
BuiltinCode &Handler = FunctionCodeMap[DECL_FUNCTION_CODE(fndecl)];
@@ -125,9 +124,10 @@
// List of builtin names and associated BuiltinCode.
static const HandlerEntry Handlers[] = {
- {"__builtin_clzs", clzs}, // Builtin with exceptional name.
- {"__builtin_ctzs", ctzs}, // Builtin with exceptional name.
-#define DEFINE_BUILTIN(x) {"__builtin_ia32_" #x, x}
+ { "__builtin_clzs", clzs }, // Builtin with exceptional name.
+ { "__builtin_ctzs", ctzs }, // Builtin with exceptional name.
+#define DEFINE_BUILTIN(x) \
+ { "__builtin_ia32_" #x, x }
#include "x86_builtins"
#undef DEFINE_BUILTIN
};
@@ -137,7 +137,8 @@
static bool Checked = false;
if (!Checked) {
for (unsigned i = 1; i < N; ++i)
- assert(HandlerLT(Handlers[i-1], Handlers[i]) && "Handlers not sorted!");
+ assert(HandlerLT(Handlers[i - 1], Handlers[i]) &&
+ "Handlers not sorted!");
Checked = true;
}
#endif
@@ -158,7 +159,8 @@
case SearchForHandler:
debug_gimple_stmt(stmt);
llvm_unreachable("Unexpected builtin code!");
- case UnsupportedBuiltin: return false;
+ case UnsupportedBuiltin:
+ return false;
case addps:
case addps256:
case addpd:
@@ -288,9 +290,9 @@
case shufps:
if (ConstantInt *Elt = dyn_cast<ConstantInt>(Ops[2])) {
int EV = Elt->getZExtValue();
- Result = BuildVectorShuffle(Ops[0], Ops[1],
- ((EV & 0x03) >> 0), ((EV & 0x0c) >> 2),
- ((EV & 0x30) >> 4)+4, ((EV & 0xc0) >> 6)+4);
+ Result = BuildVectorShuffle(Ops[0], Ops[1], ((EV & 0x03) >> 0),
+ ((EV & 0x0c) >> 2), ((EV & 0x30) >> 4) +
+ 4, ((EV & 0xc0) >> 6) + 4);
} else {
error_at(gimple_location(stmt), "mask must be an immediate");
Result = Ops[0];
@@ -299,8 +301,8 @@
case shufpd:
if (ConstantInt *Elt = dyn_cast<ConstantInt>(Ops[2])) {
int EV = Elt->getZExtValue();
- Result = BuildVectorShuffle(Ops[0], Ops[1],
- ((EV & 0x01) >> 0), ((EV & 0x02) >> 1)+2);
+ Result = BuildVectorShuffle(Ops[0], Ops[1], ((EV & 0x01) >> 0),
+ ((EV & 0x02) >> 1) + 2);
} else {
error_at(gimple_location(stmt), "mask must be an immediate");
Result = Ops[0];
@@ -310,9 +312,9 @@
case pshufd:
if (ConstantInt *Elt = dyn_cast<ConstantInt>(Ops[1])) {
int EV = Elt->getZExtValue();
- Result = BuildVectorShuffle(Ops[0], Ops[0],
- ((EV & 0x03) >> 0), ((EV & 0x0c) >> 2),
- ((EV & 0x30) >> 4), ((EV & 0xc0) >> 6));
+ Result = BuildVectorShuffle(Ops[0], Ops[0], ((EV & 0x03) >> 0),
+ ((EV & 0x0c) >> 2), ((EV & 0x30) >> 4),
+ ((EV & 0xc0) >> 6));
} else {
error_at(gimple_location(stmt), "mask must be an immediate");
Result = Ops[0];
@@ -321,20 +323,19 @@
case pshufhw:
if (ConstantInt *Elt = dyn_cast<ConstantInt>(Ops[1])) {
int EV = Elt->getZExtValue();
- Result = BuildVectorShuffle(Ops[0], Ops[0],
- 0, 1, 2, 3,
- ((EV & 0x03) >> 0)+4, ((EV & 0x0c) >> 2)+4,
- ((EV & 0x30) >> 4)+4, ((EV & 0xc0) >> 6)+4);
+ Result = BuildVectorShuffle(
+ Ops[0], Ops[0], 0, 1, 2, 3, ((EV & 0x03) >> 0) + 4,
+ ((EV & 0x0c) >> 2) + 4, ((EV & 0x30) >> 4) + 4,
+ ((EV & 0xc0) >> 6) + 4);
return true;
}
return false;
case pshuflw:
if (ConstantInt *Elt = dyn_cast<ConstantInt>(Ops[1])) {
int EV = Elt->getZExtValue();
- Result = BuildVectorShuffle(Ops[0], Ops[0],
- ((EV & 0x03) >> 0), ((EV & 0x0c) >> 2),
- ((EV & 0x30) >> 4), ((EV & 0xc0) >> 6),
- 4, 5, 6, 7);
+ Result = BuildVectorShuffle(Ops[0], Ops[0], ((EV & 0x03) >> 0),
+ ((EV & 0x0c) >> 2), ((EV & 0x30) >> 4),
+ ((EV & 0xc0) >> 6), 4, 5, 6, 7);
} else {
error_at(gimple_location(stmt), "mask must be an immediate");
Result = Ops[0];
@@ -342,8 +343,7 @@
return true;
case punpckhbw:
- Result = BuildVectorShuffle(Ops[0], Ops[1], 4, 12, 5, 13,
- 6, 14, 7, 15);
+ Result = BuildVectorShuffle(Ops[0], Ops[1], 4, 12, 5, 13, 6, 14, 7, 15);
return true;
case punpckhwd:
Result = BuildVectorShuffle(Ops[0], Ops[1], 2, 6, 3, 7);
@@ -352,8 +352,7 @@
Result = BuildVectorShuffle(Ops[0], Ops[1], 1, 3);
return true;
case punpcklbw:
- Result = BuildVectorShuffle(Ops[0], Ops[1], 0, 8, 1, 9,
- 2, 10, 3, 11);
+ Result = BuildVectorShuffle(Ops[0], Ops[1], 0, 8, 1, 9, 2, 10, 3, 11);
return true;
case punpcklwd:
Result = BuildVectorShuffle(Ops[0], Ops[1], 0, 4, 1, 5);
@@ -362,10 +361,8 @@
Result = BuildVectorShuffle(Ops[0], Ops[1], 0, 2);
return true;
case punpckhbw128:
- Result = BuildVectorShuffle(Ops[0], Ops[1], 8, 24, 9, 25,
- 10, 26, 11, 27,
- 12, 28, 13, 29,
- 14, 30, 15, 31);
+ Result = BuildVectorShuffle(Ops[0], Ops[1], 8, 24, 9, 25, 10, 26, 11, 27,
+ 12, 28, 13, 29, 14, 30, 15, 31);
return true;
case punpckhwd128:
Result = BuildVectorShuffle(Ops[0], Ops[1], 4, 12, 5, 13, 6, 14, 7, 15);
@@ -377,10 +374,8 @@
Result = BuildVectorShuffle(Ops[0], Ops[1], 1, 3);
return true;
case punpcklbw128:
- Result = BuildVectorShuffle(Ops[0], Ops[1], 0, 16, 1, 17,
- 2, 18, 3, 19,
- 4, 20, 5, 21,
- 6, 22, 7, 23);
+ Result = BuildVectorShuffle(Ops[0], Ops[1], 0, 16, 1, 17, 2, 18, 3, 19, 4,
+ 20, 5, 21, 6, 22, 7, 23);
return true;
case punpcklwd128:
Result = BuildVectorShuffle(Ops[0], Ops[1], 0, 8, 1, 9, 2, 10, 3, 11);
@@ -420,17 +415,17 @@
Result = BuildVectorShuffle(Zero, Ops[0], 2, 1);
return true;
}
-//TODO IX86_BUILTIN_LOADQ: {
-//TODO PointerType *i64Ptr = Type::getInt64PtrTy(Context);
-//TODO Ops[0] = Builder.CreateBitCast(Ops[0], i64Ptr);
-//TODO Ops[0] = Builder.CreateLoad(Ops[0]);
-//TODO Value *Zero = ConstantInt::get(Type::getInt64Ty(Context), 0);
-//TODO Result = BuildVector(Zero, Zero, NULL);
-//TODO Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), 0);
-//TODO Result = Builder.CreateInsertElement(Result, Ops[0], Idx);
-//TODO Result = Builder.CreateBitCast(Result, ResultType);
-//TODO return true;
-//TODO }
+ //TODO IX86_BUILTIN_LOADQ: {
+ //TODO PointerType *i64Ptr = Type::getInt64PtrTy(Context);
+ //TODO Ops[0] = Builder.CreateBitCast(Ops[0], i64Ptr);
+ //TODO Ops[0] = Builder.CreateLoad(Ops[0]);
+ //TODO Value *Zero = ConstantInt::get(Type::getInt64Ty(Context), 0);
+ //TODO Result = BuildVector(Zero, Zero, NULL);
+ //TODO Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), 0);
+ //TODO Result = Builder.CreateInsertElement(Result, Ops[0], Idx);
+ //TODO Result = Builder.CreateBitCast(Result, ResultType);
+ //TODO return true;
+ //TODO }
case loadups: {
VectorType *v4f32 = VectorType::get(Type::getFloatTy(Context), 4);
PointerType *v4f32Ptr = v4f32->getPointerTo();
@@ -477,7 +472,8 @@
PointerType *f64Ptr = Type::getDoublePtrTy(Context);
Ops[1] = Builder.CreateBitCast(Ops[1], f64Ptr);
Value *Load = Builder.CreateLoad(Ops[1]);
- Ops[1] = BuildVector(Load, UndefValue::get(Type::getDoubleTy(Context)), NULL);
+ Ops[1] = BuildVector(Load, UndefValue::get(Type::getDoubleTy(Context)),
+ NULL);
Ops[1] = Builder.CreateBitCast(Ops[1], ResultType);
Result = BuildVectorShuffle(Ops[0], Ops[1], 0, 1, 4, 5);
Result = Builder.CreateBitCast(Result, ResultType);
@@ -487,7 +483,8 @@
PointerType *f64Ptr = Type::getDoublePtrTy(Context);
Ops[1] = Builder.CreateBitCast(Ops[1], f64Ptr);
Value *Load = Builder.CreateLoad(Ops[1]);
- Ops[1] = BuildVector(Load, UndefValue::get(Type::getDoubleTy(Context)), NULL);
+ Ops[1] = BuildVector(Load, UndefValue::get(Type::getDoubleTy(Context)),
+ NULL);
Ops[1] = Builder.CreateBitCast(Ops[1], ResultType);
Result = BuildVectorShuffle(Ops[0], Ops[1], 4, 5, 2, 3);
Result = Builder.CreateBitCast(Result, ResultType);
@@ -497,7 +494,8 @@
PointerType *f64Ptr = Type::getDoublePtrTy(Context);
Ops[1] = Builder.CreateBitCast(Ops[1], f64Ptr);
Value *Load = Builder.CreateLoad(Ops[1]);
- Ops[1] = BuildVector(Load, UndefValue::get(Type::getDoubleTy(Context)), NULL);
+ Ops[1] = BuildVector(Load, UndefValue::get(Type::getDoubleTy(Context)),
+ NULL);
Ops[1] = Builder.CreateBitCast(Ops[1], ResultType);
Result = BuildVectorShuffle(Ops[0], Ops[1], 0, 2);
Result = Builder.CreateBitCast(Result, ResultType);
@@ -507,7 +505,8 @@
PointerType *f64Ptr = Type::getDoublePtrTy(Context);
Ops[1] = Builder.CreateBitCast(Ops[1], f64Ptr);
Value *Load = Builder.CreateLoad(Ops[1]);
- Ops[1] = BuildVector(Load, UndefValue::get(Type::getDoubleTy(Context)), NULL);
+ Ops[1] = BuildVector(Load, UndefValue::get(Type::getDoubleTy(Context)),
+ NULL);
Ops[1] = Builder.CreateBitCast(Ops[1], ResultType);
Result = BuildVectorShuffle(Ops[0], Ops[1], 2, 1);
Result = Builder.CreateBitCast(Result, ResultType);
@@ -546,16 +545,16 @@
// Sometimes G++ promotes arguments to int.
for (unsigned i = 0; i != 4; ++i)
Ops[i] = Builder.CreateIntCast(Ops[i], Type::getInt16Ty(Context),
- /*isSigned*/false);
+ /*isSigned*/ false);
Result = BuildVector(Ops[0], Ops[1], Ops[2], Ops[3], NULL);
return true;
case vec_init_v8qi:
// Sometimes G++ promotes arguments to int.
for (unsigned i = 0; i != 8; ++i)
Ops[i] = Builder.CreateIntCast(Ops[i], Type::getInt8Ty(Context),
- /*isSigned*/false);
- Result = BuildVector(Ops[0], Ops[1], Ops[2], Ops[3],
- Ops[4], Ops[5], Ops[6], Ops[7], NULL);
+ /*isSigned*/ false);
+ Result = BuildVector(Ops[0], Ops[1], Ops[2], Ops[3], Ops[4], Ops[5], Ops[6],
+ Ops[7], NULL);
return true;
case vec_ext_v2si:
case vec_ext_v4hi:
@@ -570,14 +569,14 @@
case vec_set_v16qi:
// Sometimes G++ promotes arguments to int.
Ops[1] = Builder.CreateIntCast(Ops[1], Type::getInt8Ty(Context),
- /*isSigned*/false);
+ /*isSigned*/ false);
Result = Builder.CreateInsertElement(Ops[0], Ops[1], Ops[2]);
return true;
case vec_set_v4hi:
case vec_set_v8hi:
// GCC sometimes doesn't produce the right element type.
Ops[1] = Builder.CreateIntCast(Ops[1], Type::getInt16Ty(Context),
- /*isSigned*/false);
+ /*isSigned*/ false);
Result = Builder.CreateInsertElement(Ops[0], Ops[1], Ops[2]);
return true;
case vec_set_v4si:
@@ -587,83 +586,173 @@
Result = Builder.CreateInsertElement(Ops[0], Ops[1], Ops[2]);
return true;
- case cmpeqps: PredCode = 0; goto CMPXXPS;
- case cmpltps: PredCode = 1; goto CMPXXPS;
- case cmpgtps: PredCode = 1; flip = true; goto CMPXXPS;
- case cmpleps: PredCode = 2; goto CMPXXPS;
- case cmpgeps: PredCode = 2; flip = true; goto CMPXXPS;
- case cmpunordps: PredCode = 3; goto CMPXXPS;
- case cmpneqps: PredCode = 4; goto CMPXXPS;
- case cmpnltps: PredCode = 5; goto CMPXXPS;
- case cmpngtps: PredCode = 5; flip = true; goto CMPXXPS;
- case cmpnleps: PredCode = 6; goto CMPXXPS;
- case cmpngeps: PredCode = 6; flip = true; goto CMPXXPS;
- case cmpordps: PredCode = 7; goto CMPXXPS;
- CMPXXPS: {
- Function *cmpps =
- Intrinsic::getDeclaration(TheModule, Intrinsic::x86_sse_cmp_ps);
+ case cmpeqps:
+ PredCode = 0;
+ goto CMPXXPS;
+ case cmpltps:
+ PredCode = 1;
+ goto CMPXXPS;
+ case cmpgtps:
+ PredCode = 1;
+ flip = true;
+ goto CMPXXPS;
+ case cmpleps:
+ PredCode = 2;
+ goto CMPXXPS;
+ case cmpgeps:
+ PredCode = 2;
+ flip = true;
+ goto CMPXXPS;
+ case cmpunordps:
+ PredCode = 3;
+ goto CMPXXPS;
+ case cmpneqps:
+ PredCode = 4;
+ goto CMPXXPS;
+ case cmpnltps:
+ PredCode = 5;
+ goto CMPXXPS;
+ case cmpngtps:
+ PredCode = 5;
+ flip = true;
+ goto CMPXXPS;
+ case cmpnleps:
+ PredCode = 6;
+ goto CMPXXPS;
+ case cmpngeps:
+ PredCode = 6;
+ flip = true;
+ goto CMPXXPS;
+ case cmpordps:
+ PredCode = 7;
+ goto CMPXXPS;
+ CMPXXPS : {
+ Function *cmpps = Intrinsic::getDeclaration(TheModule,
+ Intrinsic::x86_sse_cmp_ps);
Value *Pred = ConstantInt::get(Type::getInt8Ty(Context), PredCode);
Value *Arg0 = Ops[0];
Value *Arg1 = Ops[1];
- if (flip) std::swap(Arg0, Arg1);
+ if (flip)
+ std::swap(Arg0, Arg1);
Value *CallOps[3] = { Arg0, Arg1, Pred };
Result = Builder.CreateCall(cmpps, CallOps);
Result = Builder.CreateBitCast(Result, ResultType);
return true;
}
- case cmpeqss: PredCode = 0; goto CMPXXSS;
- case cmpltss: PredCode = 1; goto CMPXXSS;
- case cmpless: PredCode = 2; goto CMPXXSS;
- case cmpunordss: PredCode = 3; goto CMPXXSS;
- case cmpneqss: PredCode = 4; goto CMPXXSS;
- case cmpnltss: PredCode = 5; goto CMPXXSS;
- case cmpnless: PredCode = 6; goto CMPXXSS;
- case cmpordss: PredCode = 7; goto CMPXXSS;
- CMPXXSS: {
- Function *cmpss =
- Intrinsic::getDeclaration(TheModule, Intrinsic::x86_sse_cmp_ss);
+ case cmpeqss:
+ PredCode = 0;
+ goto CMPXXSS;
+ case cmpltss:
+ PredCode = 1;
+ goto CMPXXSS;
+ case cmpless:
+ PredCode = 2;
+ goto CMPXXSS;
+ case cmpunordss:
+ PredCode = 3;
+ goto CMPXXSS;
+ case cmpneqss:
+ PredCode = 4;
+ goto CMPXXSS;
+ case cmpnltss:
+ PredCode = 5;
+ goto CMPXXSS;
+ case cmpnless:
+ PredCode = 6;
+ goto CMPXXSS;
+ case cmpordss:
+ PredCode = 7;
+ goto CMPXXSS;
+ CMPXXSS : {
+ Function *cmpss = Intrinsic::getDeclaration(TheModule,
+ Intrinsic::x86_sse_cmp_ss);
Value *Pred = ConstantInt::get(Type::getInt8Ty(Context), PredCode);
Value *CallOps[3] = { Ops[0], Ops[1], Pred };
Result = Builder.CreateCall(cmpss, CallOps);
Result = Builder.CreateBitCast(Result, ResultType);
return true;
}
- case cmpeqpd: PredCode = 0; goto CMPXXPD;
- case cmpltpd: PredCode = 1; goto CMPXXPD;
- case cmpgtpd: PredCode = 1; flip = true; goto CMPXXPD;
- case cmplepd: PredCode = 2; goto CMPXXPD;
- case cmpgepd: PredCode = 2; flip = true; goto CMPXXPD;
- case cmpunordpd: PredCode = 3; goto CMPXXPD;
- case cmpneqpd: PredCode = 4; goto CMPXXPD;
- case cmpnltpd: PredCode = 5; goto CMPXXPD;
- case cmpngtpd: PredCode = 5; flip = true; goto CMPXXPD;
- case cmpnlepd: PredCode = 6; goto CMPXXPD;
- case cmpngepd: PredCode = 6; flip = true; goto CMPXXPD;
- case cmpordpd: PredCode = 7; goto CMPXXPD;
- CMPXXPD: {
- Function *cmppd =
- Intrinsic::getDeclaration(TheModule, Intrinsic::x86_sse2_cmp_pd);
+ case cmpeqpd:
+ PredCode = 0;
+ goto CMPXXPD;
+ case cmpltpd:
+ PredCode = 1;
+ goto CMPXXPD;
+ case cmpgtpd:
+ PredCode = 1;
+ flip = true;
+ goto CMPXXPD;
+ case cmplepd:
+ PredCode = 2;
+ goto CMPXXPD;
+ case cmpgepd:
+ PredCode = 2;
+ flip = true;
+ goto CMPXXPD;
+ case cmpunordpd:
+ PredCode = 3;
+ goto CMPXXPD;
+ case cmpneqpd:
+ PredCode = 4;
+ goto CMPXXPD;
+ case cmpnltpd:
+ PredCode = 5;
+ goto CMPXXPD;
+ case cmpngtpd:
+ PredCode = 5;
+ flip = true;
+ goto CMPXXPD;
+ case cmpnlepd:
+ PredCode = 6;
+ goto CMPXXPD;
+ case cmpngepd:
+ PredCode = 6;
+ flip = true;
+ goto CMPXXPD;
+ case cmpordpd:
+ PredCode = 7;
+ goto CMPXXPD;
+ CMPXXPD : {
+ Function *cmppd = Intrinsic::getDeclaration(TheModule,
+ Intrinsic::x86_sse2_cmp_pd);
Value *Pred = ConstantInt::get(Type::getInt8Ty(Context), PredCode);
Value *Arg0 = Ops[0];
Value *Arg1 = Ops[1];
- if (flip) std::swap(Arg0, Arg1);
+ if (flip)
+ std::swap(Arg0, Arg1);
Value *CallOps[3] = { Arg0, Arg1, Pred };
Result = Builder.CreateCall(cmppd, CallOps);
Result = Builder.CreateBitCast(Result, ResultType);
return true;
}
- case cmpeqsd: PredCode = 0; goto CMPXXSD;
- case cmpltsd: PredCode = 1; goto CMPXXSD;
- case cmplesd: PredCode = 2; goto CMPXXSD;
- case cmpunordsd: PredCode = 3; goto CMPXXSD;
- case cmpneqsd: PredCode = 4; goto CMPXXSD;
- case cmpnltsd: PredCode = 5; goto CMPXXSD;
- case cmpnlesd: PredCode = 6; goto CMPXXSD;
- case cmpordsd: PredCode = 7; goto CMPXXSD;
- CMPXXSD: {
- Function *cmpsd =
- Intrinsic::getDeclaration(TheModule, Intrinsic::x86_sse2_cmp_sd);
+ case cmpeqsd:
+ PredCode = 0;
+ goto CMPXXSD;
+ case cmpltsd:
+ PredCode = 1;
+ goto CMPXXSD;
+ case cmplesd:
+ PredCode = 2;
+ goto CMPXXSD;
+ case cmpunordsd:
+ PredCode = 3;
+ goto CMPXXSD;
+ case cmpneqsd:
+ PredCode = 4;
+ goto CMPXXSD;
+ case cmpnltsd:
+ PredCode = 5;
+ goto CMPXXSD;
+ case cmpnlesd:
+ PredCode = 6;
+ goto CMPXXSD;
+ case cmpordsd:
+ PredCode = 7;
+ goto CMPXXSD;
+ CMPXXSD : {
+ Function *cmpsd = Intrinsic::getDeclaration(TheModule,
+ Intrinsic::x86_sse2_cmp_sd);
Value *Pred = ConstantInt::get(Type::getInt8Ty(Context), PredCode);
Value *CallOps[3] = { Ops[0], Ops[1], Pred };
Result = Builder.CreateCall(cmpsd, CallOps);
@@ -671,8 +760,8 @@
return true;
}
case ldmxcsr: {
- Function *ldmxcsr =
- Intrinsic::getDeclaration(TheModule, Intrinsic::x86_sse_ldmxcsr);
+ Function *ldmxcsr = Intrinsic::getDeclaration(TheModule,
+ Intrinsic::x86_sse_ldmxcsr);
Value *Ptr = CreateTemporary(Type::getInt32Ty(Context));
Builder.CreateStore(Ops[0], Ptr);
Ptr = Builder.CreateBitCast(Ptr, Type::getInt8PtrTy(Context));
@@ -680,9 +769,9 @@
return true;
}
case stmxcsr: {
- Function *stmxcsr =
- Intrinsic::getDeclaration(TheModule, Intrinsic::x86_sse_stmxcsr);
- Value *Ptr = CreateTemporary(Type::getInt32Ty(Context));
+ Function *stmxcsr = Intrinsic::getDeclaration(TheModule,
+ Intrinsic::x86_sse_stmxcsr);
+ Value *Ptr = CreateTemporary(Type::getInt32Ty(Context));
Value *BPtr = Builder.CreateBitCast(Ptr, Type::getInt8PtrTy(Context));
Builder.CreateCall(stmxcsr, BPtr);
@@ -693,7 +782,7 @@
if (isa<ConstantInt>(Ops[2])) {
// In the header we multiply by 8, correct that back now.
- unsigned shiftVal = (cast<ConstantInt>(Ops[2])->getZExtValue())/8;
+ unsigned shiftVal = (cast<ConstantInt>(Ops[2])->getZExtValue()) / 8;
// If palignr is shifting the pair of input vectors less than 9 bytes,
// emit a shuffle instruction.
@@ -705,11 +794,11 @@
Ops[1] = Builder.CreateBitCast(Ops[1], VecTy);
Ops[0] = Builder.CreateBitCast(Ops[0], VecTy);
- SmallVector<Constant*, 8> Indices;
+ SmallVector<Constant *, 8> Indices;
for (unsigned i = 0; i != 8; ++i)
Indices.push_back(ConstantInt::get(IntTy, shiftVal + i));
- Value* SV = ConstantVector::get(Indices);
+ Value *SV = ConstantVector::get(Indices);
Result = Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
Result = Builder.CreateBitCast(Result, ResultType);
return true;
@@ -724,7 +813,7 @@
Type *MMXTy = Type::getX86_MMXTy(Context);
Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy);
- Ops[1] = ConstantInt::get(VecTy, (shiftVal-8) * 8);
+ Ops[1] = ConstantInt::get(VecTy, (shiftVal - 8) * 8);
Ops[1] = Builder.CreateBitCast(Ops[1], MMXTy);
// create i32 constant
@@ -750,7 +839,7 @@
if (isa<ConstantInt>(Ops[2])) {
// In the header we multiply by 8, correct that back now.
- unsigned shiftVal = (cast<ConstantInt>(Ops[2])->getZExtValue())/8;
+ unsigned shiftVal = (cast<ConstantInt>(Ops[2])->getZExtValue()) / 8;
// If palignr is shifting the pair of input vectors less than 17 bytes,
// emit a shuffle instruction.
@@ -762,11 +851,11 @@
Ops[1] = Builder.CreateBitCast(Ops[1], VecTy);
Ops[0] = Builder.CreateBitCast(Ops[0], VecTy);
- SmallVector<Constant*, 16> Indices;
+ SmallVector<Constant *, 16> Indices;
for (unsigned i = 0; i != 16; ++i)
Indices.push_back(ConstantInt::get(IntTy, shiftVal + i));
- Value* SV = ConstantVector::get(Indices);
+ Value *SV = ConstantVector::get(Indices);
Result = Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
Result = Builder.CreateBitCast(Result, ResultType);
return true;
@@ -780,7 +869,7 @@
Type *IntTy = Type::getInt32Ty(Context);
Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
- Ops[1] = ConstantInt::get(IntTy, (shiftVal-16) * 8);
+ Ops[1] = ConstantInt::get(IntTy, (shiftVal - 16) * 8);
// create i32 constant
Function *F = Intrinsic::getDeclaration(TheModule,
@@ -814,9 +903,8 @@
// Convert the type of the pointer to a pointer to the stored type.
unsigned AS = Ops[0]->getType()->getPointerAddressSpace();
- Value *Ptr = Builder.CreateBitCast(Ops[0],
- PointerType::get(Ops[1]->getType(), AS),
- "cast");
+ Value *Ptr = Builder.CreateBitCast(
+ Ops[0], PointerType::get(Ops[1]->getType(), AS), "cast");
StoreInst *SI = Builder.CreateAlignedStore(Ops[1], Ptr, 16);
SI->setMetadata(TheModule->getMDKindID("nontemporal"), Node);
@@ -854,7 +942,7 @@
// rsqrtps_nr(x) = rsqrtps(x) * -0.5 * (rsqrtps(x) * x * rsqrtps(x) - 3.0)
Function *rsqrtps = Intrinsic::getDeclaration(TheModule,
Intrinsic::x86_sse_rsqrt_ps);
- Value *X = Ops[0]; // x
+ Value *X = Ops[0]; // x
Value *R = Builder.CreateCall(rsqrtps, X); // rsqrtps(x)
Value *RHS = Builder.CreateFAdd(Builder.CreateFMul(Builder.CreateFMul(R, X),
R),
@@ -895,9 +983,8 @@
!MaskTy->getElementType()->isIntegerTy())
return false;
if (!MaskTy->getElementType()->isIntegerTy(32))
- Mask = ConstantExpr::getIntegerCast(Mask,
- VectorType::get(Builder.getInt32Ty(),
- NElts), false);
+ Mask = ConstantExpr::getIntegerCast(
+ Mask, VectorType::get(Builder.getInt32Ty(), NElts), false);
Result = Builder.CreateShuffleVector(Ops[0], Ops[1], Mask);
return true;
}
@@ -983,14 +1070,14 @@
/* Target hook for llvm-abi.h. It returns true if an aggregate of the
specified type should be passed in memory. This is only called for
x86-64. */
-static bool llvm_x86_64_should_pass_aggregate_in_memory(tree TreeType,
- enum machine_mode Mode){
+static bool llvm_x86_64_should_pass_aggregate_in_memory(
+ tree TreeType, enum machine_mode Mode) {
int IntRegs, SSERegs;
/* If examine_argument return 0, then it's passed byval in memory.*/
int ret = examine_argument(Mode, TreeType, 0, &IntRegs, &SSERegs);
- if (ret==0)
+ if (ret == 0)
return true;
- if (ret==1 && IntRegs==0 && SSERegs==0) // zero-sized struct
+ if (ret == 1 && IntRegs == 0 && SSERegs == 0) // zero-sized struct
return true;
return false;
}
@@ -1010,9 +1097,8 @@
specified type should be passed in a number of registers of mixed types.
It also returns a vector of types that correspond to the registers used
for parameter passing. This is only called for x86-32. */
-bool
-llvm_x86_32_should_pass_aggregate_in_mixed_regs(tree TreeType, Type *Ty,
- std::vector<Type*> &Elts){
+bool llvm_x86_32_should_pass_aggregate_in_mixed_regs(
+ tree TreeType, Type *Ty, std::vector<Type *> &Elts) {
// If this is a small fixed size type, investigate it.
HOST_WIDE_INT SrcSize = int_size_in_bytes(TreeType);
if (SrcSize <= 0 || SrcSize > 16)
@@ -1025,18 +1111,17 @@
// {i16, i16} should be passed in on 32-bit unit, which is not how "i16, i16"
// would be passed as stand-alone arguments.
StructType *STy = dyn_cast<StructType>(Ty);
- if (!STy || STy->isPacked()) return false;
+ if (!STy || STy->isPacked())
+ return false;
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
Type *EltTy = STy->getElementType(i);
// 32 and 64-bit integers are fine, as are float and double. Long double
// (which can be picked as the type for a union of 16 bytes) is not fine,
// as loads and stores of it get only 10 bytes.
- if (EltTy == Type::getInt32Ty(Context) ||
- EltTy == Type::getInt64Ty(Context) ||
- EltTy == Type::getFloatTy(Context) ||
- EltTy == Type::getDoubleTy(Context) ||
- EltTy->isPointerTy()) {
+ if (EltTy == Type::getInt32Ty(Context) || EltTy ==
+ Type::getInt64Ty(Context) || EltTy == Type::getFloatTy(Context) ||
+ EltTy == Type::getDoubleTy(Context) || EltTy->isPointerTy()) {
Elts.push_back(EltTy);
continue;
}
@@ -1057,15 +1142,16 @@
if (!isa<COMPLEX_TYPE>(type))
return false;
StructType *STy = dyn_cast<StructType>(Ty);
- if (!STy || STy->isPacked()) return false;
+ if (!STy || STy->isPacked())
+ return false;
// FIXME: Currently codegen isn't lowering most _Complex types in a way that
// makes it ABI compatible for x86-64. Same for _Complex char and _Complex
// short in 32-bit.
Type *EltTy = STy->getElementType(0);
- return !((TARGET_64BIT && (EltTy->isIntegerTy() ||
- EltTy == Type::getFloatTy(Context) ||
- EltTy == Type::getDoubleTy(Context))) ||
+ return !((TARGET_64BIT &&
+ (EltTy->isIntegerTy() || EltTy == Type::getFloatTy(Context) ||
+ EltTy == Type::getDoubleTy(Context))) ||
EltTy->isIntegerTy(16) || EltTy->isIntegerTy(8));
}
@@ -1076,15 +1162,15 @@
return false;
enum machine_mode Mode = type_natural_mode(TreeType, NULL);
- HOST_WIDE_INT Bytes =
- (Mode == BLKmode) ? int_size_in_bytes(TreeType) : (int) GET_MODE_SIZE(Mode);
+ HOST_WIDE_INT Bytes = (Mode == BLKmode) ? int_size_in_bytes(TreeType) :
+ (int) GET_MODE_SIZE(Mode);
// Zero sized array, struct, or class, not passed in memory.
if (Bytes == 0)
return false;
if (!TARGET_64BIT) {
- std::vector<Type*> Elts;
+ std::vector<Type *> Elts;
return !llvm_x86_32_should_pass_aggregate_in_mixed_regs(TreeType, Ty, Elts);
}
return llvm_x86_64_should_pass_aggregate_in_memory(TreeType, Mode);
@@ -1092,7 +1178,7 @@
/* count_num_registers_uses - Return the number of GPRs and XMMs parameter
register used so far. Caller is responsible for initializing outputs. */
-static void count_num_registers_uses(std::vector<Type*> &ScalarElts,
+static void count_num_registers_uses(std::vector<Type *> &ScalarElts,
unsigned &NumGPRs, unsigned &NumXMMs) {
for (size_t i = 0, e = ScalarElts.size(); i != e; ++i) {
Type *Ty = ScalarElts[i];
@@ -1107,15 +1193,15 @@
++NumXMMs;
} else if (Ty->isIntegerTy() || Ty->isPointerTy()) {
++NumGPRs;
- } else if (Ty==Type::getVoidTy(Context)) {
+ } else if (Ty == Type::getVoidTy(Context)) {
// Padding bytes that are not passed anywhere
;
} else {
// Floating point scalar argument.
assert(Ty->isFloatingPointTy() && Ty->isPrimitiveType() &&
"Expecting a floating point primitive type!");
- if (Ty->getTypeID() == Type::FloatTyID
- || Ty->getTypeID() == Type::DoubleTyID)
+ if (Ty->getTypeID() == Type::FloatTyID ||
+ Ty->getTypeID() == Type::DoubleTyID)
++NumXMMs;
}
}
@@ -1125,10 +1211,9 @@
in registers. If there are only enough available parameter registers to pass
part of the aggregate, return true. That means the aggregate should instead
be passed in memory. */
-bool
-llvm_x86_64_aggregate_partially_passed_in_regs(std::vector<Type*> &Elts,
- std::vector<Type*> &ScalarElts,
- bool isShadowReturn) {
+bool llvm_x86_64_aggregate_partially_passed_in_regs(
+ std::vector<Type *> &Elts, std::vector<Type *> &ScalarElts,
+ bool isShadowReturn) {
// Counting number of GPRs and XMMs used so far. According to AMD64 ABI
// document: "If there are no registers available for any eightbyte of an
// argument, the whole argument is passed on the stack." X86-64 uses 6
@@ -1172,17 +1257,16 @@
specified type should be passed in a number of registers of mixed types.
It also returns a vector of types that correspond to the registers used
for parameter passing. This is only called for x86-64. */
-bool
-llvm_x86_64_should_pass_aggregate_in_mixed_regs(tree TreeType, Type *Ty,
- std::vector<Type*> &Elts){
+bool llvm_x86_64_should_pass_aggregate_in_mixed_regs(
+ tree TreeType, Type *Ty, std::vector<Type *> &Elts) {
if (llvm_x86_should_pass_aggregate_as_fca(TreeType, Ty))
return false;
enum x86_64_reg_class Class[MAX_CLASSES];
enum machine_mode Mode = type_natural_mode(TreeType, NULL);
bool totallyEmpty = true;
- HOST_WIDE_INT Bytes =
- (Mode == BLKmode) ? int_size_in_bytes(TreeType) : (int) GET_MODE_SIZE(Mode);
+ HOST_WIDE_INT Bytes = (Mode == BLKmode) ? int_size_in_bytes(TreeType) :
+ (int) GET_MODE_SIZE(Mode);
int NumClasses = classify_argument(Mode, TreeType, Class, 0);
if (!NumClasses)
return false;
@@ -1208,17 +1292,17 @@
// <2 x i64>, or <2 x f64>.
// 4. 1 x SSE + 1 x SSESF, size is 12: 1 x Double, 1 x Float.
// 5. 2 x SSE, size is 16: 2 x Double.
- if ((NumClasses-i) == 1) {
+ if ((NumClasses - i) == 1) {
if (Bytes == 8) {
Elts.push_back(Type::getDoubleTy(Context));
Bytes -= 8;
} else if (Bytes == 4) {
- Elts.push_back (Type::getFloatTy(Context));
+ Elts.push_back(Type::getFloatTy(Context));
Bytes -= 4;
} else
llvm_unreachable("Not yet handled!");
- } else if ((NumClasses-i) == 2) {
- if (Class[i+1] == X86_64_SSEUP_CLASS) {
+ } else if ((NumClasses - i) == 2) {
+ if (Class[i + 1] == X86_64_SSEUP_CLASS) {
Type *LLVMTy = ConvertType(TreeType);
if (StructType *STy = dyn_cast<StructType>(LLVMTy))
// Look pass the struct wrapper.
@@ -1248,22 +1332,22 @@
Elts.push_back(VectorType::get(Type::getFloatTy(Context), 4));
Bytes -= 4;
}
- } else if (Class[i+1] == X86_64_SSESF_CLASS) {
+ } else if (Class[i + 1] == X86_64_SSESF_CLASS) {
assert(Bytes == 12 && "Not yet handled!");
Elts.push_back(Type::getDoubleTy(Context));
Elts.push_back(Type::getFloatTy(Context));
Bytes -= 12;
- } else if (Class[i+1] == X86_64_SSE_CLASS) {
+ } else if (Class[i + 1] == X86_64_SSE_CLASS) {
Elts.push_back(Type::getDoubleTy(Context));
Elts.push_back(Type::getDoubleTy(Context));
Bytes -= 16;
- } else if (Class[i+1] == X86_64_SSEDF_CLASS && Bytes == 16) {
+ } else if (Class[i + 1] == X86_64_SSEDF_CLASS && Bytes == 16) {
Elts.push_back(VectorType::get(Type::getFloatTy(Context), 2));
Elts.push_back(Type::getDoubleTy(Context));
- } else if (Class[i+1] == X86_64_INTEGER_CLASS) {
+ } else if (Class[i + 1] == X86_64_INTEGER_CLASS) {
Elts.push_back(VectorType::get(Type::getFloatTy(Context), 2));
Elts.push_back(Type::getInt64Ty(Context));
- } else if (Class[i+1] == X86_64_NO_CLASS) {
+ } else if (Class[i + 1] == X86_64_NO_CLASS) {
// padding bytes, don't pass
Elts.push_back(Type::getDoubleTy(Context));
Elts.push_back(Type::getVoidTy(Context));
@@ -1294,7 +1378,8 @@
Elts.push_back(Type::getVoidTy(Context));
Bytes -= 8;
break;
- default: llvm_unreachable("Unexpected register class!");
+ default:
+ llvm_unreachable("Unexpected register class!");
}
}
@@ -1307,12 +1392,11 @@
bool llvm_x86_should_pass_vector_in_integer_regs(tree type) {
if (!TARGET_MACHO)
return false;
- if (isa<VECTOR_TYPE>(type) &&
- TYPE_SIZE(type) &&
+ if (isa<VECTOR_TYPE>(type) && TYPE_SIZE(type) &&
isa<INTEGER_CST>(TYPE_SIZE(type))) {
- if (TREE_INT_CST_LOW(TYPE_SIZE(type))==64 && TARGET_MMX)
+ if (TREE_INT_CST_LOW(TYPE_SIZE(type)) == 64 && TARGET_MMX)
return false;
- if (TREE_INT_CST_LOW(TYPE_SIZE(type))==128 && TARGET_SSE)
+ if (TREE_INT_CST_LOW(TYPE_SIZE(type)) == 128 && TARGET_SSE)
return false;
if (TARGET_64BIT && TREE_INT_CST_LOW(TYPE_SIZE(type)) > 128)
return false;
@@ -1327,10 +1411,9 @@
return false;
if (!TARGET_64BIT)
return false;
- if (isa<VECTOR_TYPE>(type) &&
- TYPE_SIZE(type) &&
+ if (isa<VECTOR_TYPE>(type) && TYPE_SIZE(type) &&
isa<INTEGER_CST>(TYPE_SIZE(type))) {
- if (TREE_INT_CST_LOW(TYPE_SIZE(type))<=128)
+ if (TREE_INT_CST_LOW(TYPE_SIZE(type)) <= 128)
return false;
}
return true;
@@ -1343,17 +1426,14 @@
returned in XMM0. Judging from comments, this would not be right for
Win64. Don't know about Linux. */
tree llvm_x86_should_return_vector_as_scalar(tree type, bool isBuiltin) {
- if (TARGET_MACHO &&
- !isBuiltin &&
- isa<VECTOR_TYPE>(type) &&
- TYPE_SIZE(type) &&
+ if (TARGET_MACHO && !isBuiltin && isa<VECTOR_TYPE>(type) && TYPE_SIZE(type) &&
isa<INTEGER_CST>(TYPE_SIZE(type))) {
- if (TREE_INT_CST_LOW(TYPE_SIZE(type))==64 &&
- TYPE_VECTOR_SUBPARTS(type)==1)
+ if (TREE_INT_CST_LOW(TYPE_SIZE(type)) == 64 &&
+ TYPE_VECTOR_SUBPARTS(type) == 1)
return uint64_type_node;
- if (TARGET_64BIT && TREE_INT_CST_LOW(TYPE_SIZE(type))==64)
+ if (TARGET_64BIT && TREE_INT_CST_LOW(TYPE_SIZE(type)) == 64)
return double_type_node;
- if (TREE_INT_CST_LOW(TYPE_SIZE(type))==32)
+ if (TREE_INT_CST_LOW(TYPE_SIZE(type)) == 32)
return uint32_type_node;
}
return 0;
@@ -1367,10 +1447,9 @@
tree retType = isSingleElementStructOrArray(type, true, false);
if (!retType || !TARGET_64BIT || !TARGET_MACHO)
return retType;
- if (isa<VECTOR_TYPE>(retType) &&
- TYPE_SIZE(retType) &&
+ if (isa<VECTOR_TYPE>(retType) && TYPE_SIZE(retType) &&
isa<INTEGER_CST>(TYPE_SIZE(retType)) &&
- TREE_INT_CST_LOW(TYPE_SIZE(retType))==64)
+ TREE_INT_CST_LOW(TYPE_SIZE(retType)) == 64)
return double_type_node;
return retType;
}
@@ -1378,17 +1457,13 @@
/* MMX vectors v2i32, v4i16, v8i8, v2f32 are returned using sret on Darwin
32-bit. Vectors bigger than 128 are returned using sret. */
bool llvm_x86_should_return_vector_as_shadow(tree type, bool isBuiltin) {
- if (TARGET_MACHO &&
- !isBuiltin &&
- !TARGET_64BIT &&
- isa<VECTOR_TYPE>(type) &&
- TYPE_SIZE(type) &&
- isa<INTEGER_CST>(TYPE_SIZE(type))) {
- if (TREE_INT_CST_LOW(TYPE_SIZE(type))==64 &&
- TYPE_VECTOR_SUBPARTS(type)>1)
+ if (TARGET_MACHO && !isBuiltin && !TARGET_64BIT && isa<VECTOR_TYPE>(type) &&
+ TYPE_SIZE(type) && isa<INTEGER_CST>(TYPE_SIZE(type))) {
+ if (TREE_INT_CST_LOW(TYPE_SIZE(type)) == 64 &&
+ TYPE_VECTOR_SUBPARTS(type) > 1)
return true;
}
- if (TREE_INT_CST_LOW(TYPE_SIZE(type))>128)
+ if (TREE_INT_CST_LOW(TYPE_SIZE(type)) > 128)
return true;
return false;
}
@@ -1400,8 +1475,7 @@
if (!TARGET_64BIT)
return false;
- if (isa<COMPLEX_TYPE>(type) &&
- TREE_INT_CST_LOW(TYPE_SIZE_UNIT(type)) == 32)
+ if (isa<COMPLEX_TYPE>(type) && TREE_INT_CST_LOW(TYPE_SIZE_UNIT(type)) == 32)
return true;
return false;
@@ -1409,8 +1483,7 @@
// llvm_suitable_multiple_ret_value_type - Return TRUE if return value
// of type TY should be returned using multiple value return instruction.
-static bool llvm_suitable_multiple_ret_value_type(Type *Ty,
- tree TreeType) {
+static bool llvm_suitable_multiple_ret_value_type(Type *Ty, tree TreeType) {
if (!TARGET_64BIT)
return false;
@@ -1476,14 +1549,13 @@
if (Class[0] == X86_64_INTEGERSI_CLASS ||
Class[0] == X86_64_INTEGER_CLASS) {
// one int register
- HOST_WIDE_INT Bytes =
- (Mode == BLKmode) ? int_size_in_bytes(type) :
+ HOST_WIDE_INT Bytes = (Mode == BLKmode) ? int_size_in_bytes(type) :
(int) GET_MODE_SIZE(Mode);
- if (Bytes>4)
+ if (Bytes > 4)
return Type::getInt64Ty(Context);
- else if (Bytes>2)
+ else if (Bytes > 2)
return Type::getInt32Ty(Context);
- else if (Bytes>1)
+ else if (Bytes > 1)
return Type::getInt16Ty(Context);
else
return Type::getInt8Ty(Context);
@@ -1492,8 +1564,7 @@
}
if (NumClasses == 2) {
if (Class[1] == X86_64_NO_CLASS) {
- if (Class[0] == X86_64_INTEGER_CLASS ||
- Class[0] == X86_64_NO_CLASS ||
+ if (Class[0] == X86_64_INTEGER_CLASS || Class[0] == X86_64_NO_CLASS ||
Class[0] == X86_64_INTEGERSI_CLASS)
return Type::getInt64Ty(Context);
else if (Class[0] == X86_64_SSE_CLASS || Class[0] == X86_64_SSEDF_CLASS)
@@ -1532,13 +1603,12 @@
/// This routine uses GCC implementation to find required register classes.
/// The original implementation of this routine is based on
/// llvm_x86_64_should_pass_aggregate_in_mixed_regs code.
-static void
-llvm_x86_64_get_multiple_return_reg_classes(tree TreeType, Type * /*Ty*/,
- std::vector<Type*> &Elts) {
+static void llvm_x86_64_get_multiple_return_reg_classes(
+ tree TreeType, Type */*Ty*/, std::vector<Type *> &Elts) {
enum x86_64_reg_class Class[MAX_CLASSES];
enum machine_mode Mode = type_natural_mode(TreeType, NULL);
- HOST_WIDE_INT Bytes =
- (Mode == BLKmode) ? int_size_in_bytes(TreeType) : (int) GET_MODE_SIZE(Mode);
+ HOST_WIDE_INT Bytes = (Mode == BLKmode) ? int_size_in_bytes(TreeType) :
+ (int) GET_MODE_SIZE(Mode);
int NumClasses = classify_argument(Mode, TreeType, Class, 0);
assert(NumClasses && "This type does not need multiple return registers!");
@@ -1552,7 +1622,7 @@
// empty structs. Recognize it and don't add any return values in that
// case.
if (NumClasses == 1 && Class[0] == X86_64_NO_CLASS)
- return;
+ return;
for (int i = 0; i < NumClasses; ++i) {
switch (Class[i]) {
@@ -1570,7 +1640,7 @@
// 4. 1 x SSE + 1 x SSESF, size is 12: 1 x Double, 1 x Float.
// 5. 2 x SSE, size is 16: 2 x Double.
// 6. 1 x SSE, 1 x NO: Second is padding, pass as double.
- if ((NumClasses-i) == 1) {
+ if ((NumClasses - i) == 1) {
if (Bytes == 8) {
Elts.push_back(Type::getDoubleTy(Context));
Bytes -= 8;
@@ -1579,8 +1649,8 @@
Bytes -= 4;
} else
llvm_unreachable("Not yet handled!");
- } else if ((NumClasses-i) == 2) {
- if (Class[i+1] == X86_64_SSEUP_CLASS) {
+ } else if ((NumClasses - i) == 2) {
+ if (Class[i + 1] == X86_64_SSEUP_CLASS) {
Type *Ty = ConvertType(TreeType);
if (StructType *STy = dyn_cast<StructType>(Ty))
// Look pass the struct wrapper.
@@ -1608,22 +1678,22 @@
Elts.push_back(VectorType::get(Type::getFloatTy(Context), 4));
Bytes -= 4;
}
- } else if (Class[i+1] == X86_64_SSESF_CLASS) {
+ } else if (Class[i + 1] == X86_64_SSESF_CLASS) {
assert(Bytes == 12 && "Not yet handled!");
Elts.push_back(Type::getDoubleTy(Context));
Elts.push_back(Type::getFloatTy(Context));
Bytes -= 12;
- } else if (Class[i+1] == X86_64_SSE_CLASS) {
+ } else if (Class[i + 1] == X86_64_SSE_CLASS) {
Elts.push_back(Type::getDoubleTy(Context));
Elts.push_back(Type::getDoubleTy(Context));
Bytes -= 16;
- } else if (Class[i+1] == X86_64_SSEDF_CLASS && Bytes == 16) {
+ } else if (Class[i + 1] == X86_64_SSEDF_CLASS && Bytes == 16) {
Elts.push_back(VectorType::get(Type::getFloatTy(Context), 2));
Elts.push_back(Type::getDoubleTy(Context));
- } else if (Class[i+1] == X86_64_INTEGER_CLASS) {
+ } else if (Class[i + 1] == X86_64_INTEGER_CLASS) {
Elts.push_back(VectorType::get(Type::getFloatTy(Context), 2));
Elts.push_back(Type::getInt64Ty(Context));
- } else if (Class[i+1] == X86_64_NO_CLASS) {
+ } else if (Class[i + 1] == X86_64_NO_CLASS) {
Elts.push_back(Type::getDoubleTy(Context));
Bytes -= 16;
} else {
@@ -1650,7 +1720,8 @@
// padding bytes.
Elts.push_back(Type::getInt64Ty(Context));
break;
- default: llvm_unreachable("Unexpected register class!");
+ default:
+ llvm_unreachable("Unexpected register class!");
}
}
}
@@ -1672,7 +1743,7 @@
return StructType::get(Context, ElementTypes, STy->isPacked());
}
- std::vector<Type*> GCCElts;
+ std::vector<Type *> GCCElts;
llvm_x86_64_get_multiple_return_reg_classes(type, Ty, GCCElts);
return StructType::get(Context, GCCElts, false);
}
@@ -1684,13 +1755,10 @@
// Extract SRCFIELDNO's ELEMENO value and store it in DEST's FIELDNO field's
// ELEMENTNO.
//
-static void llvm_x86_extract_mrv_array_element(Value *Src, Value *Dest,
- unsigned SrcFieldNo,
- unsigned SrcElemNo,
- unsigned DestFieldNo,
- unsigned DestElemNo,
- LLVMBuilder &Builder,
- bool isVolatile) {
+static void llvm_x86_extract_mrv_array_element(
+ Value *Src, Value *Dest, unsigned SrcFieldNo, unsigned SrcElemNo,
+ unsigned DestFieldNo, unsigned DestElemNo, LLVMBuilder &Builder,
+ bool isVolatile) {
Value *EVI = Builder.CreateExtractValue(Src, SrcFieldNo, "mrv_gr");
StructType *STy = cast<StructType>(Src->getType());
Value *Idxs[3];
@@ -1710,9 +1778,8 @@
// llvm_x86_extract_multiple_return_value - Extract multiple values returned
// by SRC and store them in DEST. It is expected thaty SRC and
// DEST types are StructType, but they may not match.
-void llvm_x86_extract_multiple_return_value(Value *Src, Value *Dest,
- bool isVolatile,
- LLVMBuilder &Builder) {
+void llvm_x86_extract_multiple_return_value(
+ Value *Src, Value *Dest, bool isVolatile, LLVMBuilder &Builder) {
StructType *STy = cast<StructType>(Src->getType());
unsigned NumElements = STy->getNumElements();
@@ -1723,10 +1790,10 @@
unsigned SNO = 0;
unsigned DNO = 0;
- if (DestTy->getNumElements() == 3
- && DestTy->getElementType(0)->getTypeID() == Type::FloatTyID
- && DestTy->getElementType(1)->getTypeID() == Type::FloatTyID
- && DestTy->getElementType(2)->getTypeID() == Type::FloatTyID) {
+ if (DestTy->getNumElements() == 3 &&
+ DestTy->getElementType(0)->getTypeID() == Type::FloatTyID &&
+ DestTy->getElementType(1)->getTypeID() == Type::FloatTyID &&
+ DestTy->getElementType(2)->getTypeID() == Type::FloatTyID) {
// DestTy is { float, float, float }
// STy is { <4 x float>, float > }
@@ -1757,7 +1824,8 @@
Value *GEP = Builder.CreateStructGEP(Dest, DNO, "mrv_gep");
Value *EVI = Builder.CreateExtractValue(Src, SNO, "mrv_gr");
Builder.CreateAlignedStore(EVI, GEP, 1, isVolatile);
- ++DNO; ++SNO;
+ ++DNO;
+ ++SNO;
continue;
}
@@ -1777,7 +1845,8 @@
GEP = Builder.CreateGEP(Dest, Idxs, "mrv_gep");
EVI = Builder.CreateExtractValue(Src, 1, "mrv_gr");
Builder.CreateAlignedStore(EVI, GEP, 1, isVolatile);
- ++DNO; ++SNO;
+ ++DNO;
+ ++SNO;
continue;
}
@@ -1790,17 +1859,16 @@
unsigned i = 0;
unsigned Size = 1;
- if (VectorType *SElemTy =
- dyn_cast<VectorType>(STy->getElementType(SNO))) {
+ if (VectorType *SElemTy = dyn_cast<
+ VectorType>(STy->getElementType(SNO))) {
Size = SElemTy->getNumElements();
- if (SElemTy->getElementType()->getTypeID() == Type::FloatTyID
- && Size == 4)
+ if (SElemTy->getElementType()->getTypeID() == Type::FloatTyID &&
+ Size == 4)
// Ignore last two <4 x float> elements.
Size = 2;
}
while (i < Size) {
- llvm_x86_extract_mrv_array_element(Src, Dest, SNO, i++,
- DNO, DElemNo++,
+ llvm_x86_extract_mrv_array_element(Src, Dest, SNO, i++, DNO, DElemNo++,
Builder, isVolatile);
}
// Consumed this src field. Try next one.
@@ -1824,15 +1892,15 @@
enum x86_64_reg_class Class[MAX_CLASSES];
enum machine_mode Mode = type_natural_mode(type, NULL);
int NumClasses = classify_argument(Mode, type, Class, 0);
- *DontCheckAlignment= true;
+ *DontCheckAlignment = true;
if (NumClasses == 1 && (Class[0] == X86_64_INTEGER_CLASS ||
Class[0] == X86_64_INTEGERSI_CLASS)) {
// one int register
- HOST_WIDE_INT Bytes =
- (Mode == BLKmode) ? int_size_in_bytes(type) : (int) GET_MODE_SIZE(Mode);
- if (Bytes>4)
+ HOST_WIDE_INT Bytes = (Mode == BLKmode) ? int_size_in_bytes(type) :
+ (int) GET_MODE_SIZE(Mode);
+ if (Bytes > 4)
*size = 8;
- else if (Bytes>2)
+ else if (Bytes > 2)
*size = 4;
else
*size = Bytes;
@@ -1853,8 +1921,7 @@
}
}
return false;
- }
- else
+ } else
return !isSingleElementStructOrArray(type, false, true);
}
@@ -1867,8 +1934,8 @@
}
void llvm_x86_set_subtarget_features(std::string &C,
- llvm::SubtargetFeatures &F) {
- if (TARGET_MACHO && ! strcmp (ix86_arch_string, "apple"))
+ llvm::SubtargetFeatures &F) {
+ if (TARGET_MACHO && !strcmp(ix86_arch_string, "apple"))
C = TARGET_64BIT ? "core2" : "yonah";
else
C = ix86_arch_string;
More information about the llvm-commits
mailing list