[llvm-commits] [llvm] r53163 [4/7] - in /llvm/branches/non-call-eh: ./ autoconf/ bindings/ocaml/llvm/ docs/ docs/CommandGuide/ docs/tutorial/ examples/BrainF/ examples/Fibonacci/ examples/HowToUseJIT/ examples/ModuleMaker/ examples/ParallelJIT/ include/llvm-c/ include/llvm/ include/llvm/ADT/ include/llvm/Analysis/ include/llvm/Bitcode/ include/llvm/CodeGen/ include/llvm/Debugger/ include/llvm/ExecutionEngine/ include/llvm/Support/ include/llvm/System/ include/llvm/Target/ include/llvm/Transforms/ include/llvm/Transform...
Nick Lewycky
nicholas at mxc.ca
Sun Jul 6 13:45:51 PDT 2008
Modified: llvm/branches/non-call-eh/lib/Linker/LinkModules.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Linker/LinkModules.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Linker/LinkModules.cpp (original)
+++ llvm/branches/non-call-eh/lib/Linker/LinkModules.cpp Sun Jul 6 15:45:41 2008
@@ -26,6 +26,7 @@
#include "llvm/Assembly/Writer.h"
#include "llvm/Support/Streams.h"
#include "llvm/System/Path.h"
+#include "llvm/ADT/DenseMap.h"
#include <sstream>
using namespace llvm;
@@ -52,7 +53,6 @@
// Inputs:
// DestTy - The type to which we wish to resolve.
// SrcTy - The original type which we want to resolve.
-// Name - The name of the type.
//
// Outputs:
// DestST - The symbol table in which the new type should be placed.
@@ -61,121 +61,203 @@
// true - There is an error and the types cannot yet be linked.
// false - No errors.
//
-static bool ResolveTypes(const Type *DestTy, const Type *SrcTy,
- TypeSymbolTable *DestST, const std::string &Name) {
+static bool ResolveTypes(const Type *DestTy, const Type *SrcTy) {
if (DestTy == SrcTy) return false; // If already equal, noop
+ assert(DestTy && SrcTy && "Can't handle null types");
- // Does the type already exist in the module?
- if (DestTy && !isa<OpaqueType>(DestTy)) { // Yup, the type already exists...
- if (const OpaqueType *OT = dyn_cast<OpaqueType>(SrcTy)) {
- const_cast<OpaqueType*>(OT)->refineAbstractTypeTo(DestTy);
- } else {
- return true; // Cannot link types... neither is opaque and not-equal
- }
- } else { // Type not in dest module. Add it now.
- if (DestTy) // Type _is_ in module, just opaque...
- const_cast<OpaqueType*>(cast<OpaqueType>(DestTy))
- ->refineAbstractTypeTo(SrcTy);
- else if (!Name.empty())
- DestST->insert(Name, const_cast<Type*>(SrcTy));
+ if (const OpaqueType *OT = dyn_cast<OpaqueType>(DestTy)) {
+ // Type _is_ in module, just opaque...
+ const_cast<OpaqueType*>(OT)->refineAbstractTypeTo(SrcTy);
+ } else if (const OpaqueType *OT = dyn_cast<OpaqueType>(SrcTy)) {
+ const_cast<OpaqueType*>(OT)->refineAbstractTypeTo(DestTy);
+ } else {
+ return true; // Cannot link types... not-equal and neither is opaque.
}
return false;
}
-static const FunctionType *getFT(const PATypeHolder &TH) {
- return cast<FunctionType>(TH.get());
-}
-static const StructType *getST(const PATypeHolder &TH) {
- return cast<StructType>(TH.get());
+/// LinkerTypeMap - This implements a map of types that is stable
+/// even if types are resolved/refined to other types. This is not a general
+/// purpose map, it is specific to the linker's use.
+namespace {
+class LinkerTypeMap : public AbstractTypeUser {
+ typedef DenseMap<const Type*, PATypeHolder> TheMapTy;
+ TheMapTy TheMap;
+
+ LinkerTypeMap(const LinkerTypeMap&); // DO NOT IMPLEMENT
+ void operator=(const LinkerTypeMap&); // DO NOT IMPLEMENT
+public:
+ LinkerTypeMap() {}
+ ~LinkerTypeMap() {
+ for (DenseMap<const Type*, PATypeHolder>::iterator I = TheMap.begin(),
+ E = TheMap.end(); I != E; ++I)
+ I->first->removeAbstractTypeUser(this);
+ }
+
+ /// lookup - Return the value for the specified type or null if it doesn't
+ /// exist.
+ const Type *lookup(const Type *Ty) const {
+ TheMapTy::const_iterator I = TheMap.find(Ty);
+ if (I != TheMap.end()) return I->second;
+ return 0;
+ }
+
+ /// erase - Remove the specified type, returning true if it was in the set.
+ bool erase(const Type *Ty) {
+ if (!TheMap.erase(Ty))
+ return false;
+ if (Ty->isAbstract())
+ Ty->removeAbstractTypeUser(this);
+ return true;
+ }
+
+ /// insert - This returns true if the pointer was new to the set, false if it
+ /// was already in the set.
+ bool insert(const Type *Src, const Type *Dst) {
+ if (!TheMap.insert(std::make_pair(Src, PATypeHolder(Dst))))
+ return false; // Already in map.
+ if (Src->isAbstract())
+ Src->addAbstractTypeUser(this);
+ return true;
+ }
+
+protected:
+ /// refineAbstractType - The callback method invoked when an abstract type is
+ /// resolved to another type. An object must override this method to update
+ /// its internal state to reference NewType instead of OldType.
+ ///
+ virtual void refineAbstractType(const DerivedType *OldTy,
+ const Type *NewTy) {
+ TheMapTy::iterator I = TheMap.find(OldTy);
+ const Type *DstTy = I->second;
+
+ TheMap.erase(I);
+ if (OldTy->isAbstract())
+ OldTy->removeAbstractTypeUser(this);
+
+ // Don't reinsert into the map if the key is concrete now.
+ if (NewTy->isAbstract())
+ insert(NewTy, DstTy);
+ }
+
+ /// The other case which AbstractTypeUsers must be aware of is when a type
+ /// makes the transition from being abstract (where it has clients on it's
+ /// AbstractTypeUsers list) to concrete (where it does not). This method
+ /// notifies ATU's when this occurs for a type.
+ virtual void typeBecameConcrete(const DerivedType *AbsTy) {
+ TheMap.erase(AbsTy);
+ AbsTy->removeAbstractTypeUser(this);
+ }
+
+ // for debugging...
+ virtual void dump() const {
+ cerr << "AbstractTypeSet!\n";
+ }
+};
}
+
// RecursiveResolveTypes - This is just like ResolveTypes, except that it
// recurses down into derived types, merging the used types if the parent types
// are compatible.
-static bool RecursiveResolveTypesI(const PATypeHolder &DestTy,
- const PATypeHolder &SrcTy,
- TypeSymbolTable *DestST,
- const std::string &Name,
- std::vector<std::pair<PATypeHolder, PATypeHolder> > &Pointers) {
- const Type *SrcTyT = SrcTy.get();
- const Type *DestTyT = DestTy.get();
- if (DestTyT == SrcTyT) return false; // If already equal, noop
+static bool RecursiveResolveTypesI(const Type *DstTy, const Type *SrcTy,
+ LinkerTypeMap &Pointers) {
+ if (DstTy == SrcTy) return false; // If already equal, noop
// If we found our opaque type, resolve it now!
- if (isa<OpaqueType>(DestTyT) || isa<OpaqueType>(SrcTyT))
- return ResolveTypes(DestTyT, SrcTyT, DestST, Name);
+ if (isa<OpaqueType>(DstTy) || isa<OpaqueType>(SrcTy))
+ return ResolveTypes(DstTy, SrcTy);
// Two types cannot be resolved together if they are of different primitive
// type. For example, we cannot resolve an int to a float.
- if (DestTyT->getTypeID() != SrcTyT->getTypeID()) return true;
+ if (DstTy->getTypeID() != SrcTy->getTypeID()) return true;
+ // If neither type is abstract, then they really are just different types.
+ if (!DstTy->isAbstract() && !SrcTy->isAbstract())
+ return true;
+
// Otherwise, resolve the used type used by this derived type...
- switch (DestTyT->getTypeID()) {
- case Type::IntegerTyID: {
- if (cast<IntegerType>(DestTyT)->getBitWidth() !=
- cast<IntegerType>(SrcTyT)->getBitWidth())
- return true;
- return false;
- }
+ switch (DstTy->getTypeID()) {
+ default:
+ return true;
case Type::FunctionTyID: {
- if (cast<FunctionType>(DestTyT)->isVarArg() !=
- cast<FunctionType>(SrcTyT)->isVarArg() ||
- cast<FunctionType>(DestTyT)->getNumContainedTypes() !=
- cast<FunctionType>(SrcTyT)->getNumContainedTypes())
+ const FunctionType *DstFT = cast<FunctionType>(DstTy);
+ const FunctionType *SrcFT = cast<FunctionType>(SrcTy);
+ if (DstFT->isVarArg() != SrcFT->isVarArg() ||
+ DstFT->getNumContainedTypes() != SrcFT->getNumContainedTypes())
return true;
- for (unsigned i = 0, e = getFT(DestTy)->getNumContainedTypes(); i != e; ++i)
- if (RecursiveResolveTypesI(getFT(DestTy)->getContainedType(i),
- getFT(SrcTy)->getContainedType(i), DestST, "",
- Pointers))
+
+ // Use TypeHolder's so recursive resolution won't break us.
+ PATypeHolder ST(SrcFT), DT(DstFT);
+ for (unsigned i = 0, e = DstFT->getNumContainedTypes(); i != e; ++i) {
+ const Type *SE = ST->getContainedType(i), *DE = DT->getContainedType(i);
+ if (SE != DE && RecursiveResolveTypesI(DE, SE, Pointers))
return true;
+ }
return false;
}
case Type::StructTyID: {
- if (getST(DestTy)->getNumContainedTypes() !=
- getST(SrcTy)->getNumContainedTypes()) return 1;
- for (unsigned i = 0, e = getST(DestTy)->getNumContainedTypes(); i != e; ++i)
- if (RecursiveResolveTypesI(getST(DestTy)->getContainedType(i),
- getST(SrcTy)->getContainedType(i), DestST, "",
- Pointers))
+ const StructType *DstST = cast<StructType>(DstTy);
+ const StructType *SrcST = cast<StructType>(SrcTy);
+ if (DstST->getNumContainedTypes() != SrcST->getNumContainedTypes())
+ return true;
+
+ PATypeHolder ST(SrcST), DT(DstST);
+ for (unsigned i = 0, e = DstST->getNumContainedTypes(); i != e; ++i) {
+ const Type *SE = ST->getContainedType(i), *DE = DT->getContainedType(i);
+ if (SE != DE && RecursiveResolveTypesI(DE, SE, Pointers))
return true;
+ }
return false;
}
case Type::ArrayTyID: {
- const ArrayType *DAT = cast<ArrayType>(DestTy.get());
- const ArrayType *SAT = cast<ArrayType>(SrcTy.get());
+ const ArrayType *DAT = cast<ArrayType>(DstTy);
+ const ArrayType *SAT = cast<ArrayType>(SrcTy);
if (DAT->getNumElements() != SAT->getNumElements()) return true;
return RecursiveResolveTypesI(DAT->getElementType(), SAT->getElementType(),
- DestST, "", Pointers);
+ Pointers);
+ }
+ case Type::VectorTyID: {
+ const VectorType *DVT = cast<VectorType>(DstTy);
+ const VectorType *SVT = cast<VectorType>(SrcTy);
+ if (DVT->getNumElements() != SVT->getNumElements()) return true;
+ return RecursiveResolveTypesI(DVT->getElementType(), SVT->getElementType(),
+ Pointers);
}
case Type::PointerTyID: {
+ const PointerType *DstPT = cast<PointerType>(DstTy);
+ const PointerType *SrcPT = cast<PointerType>(SrcTy);
+
+ if (DstPT->getAddressSpace() != SrcPT->getAddressSpace())
+ return true;
+
// If this is a pointer type, check to see if we have already seen it. If
// so, we are in a recursive branch. Cut off the search now. We cannot use
// an associative container for this search, because the type pointers (keys
- // in the container) change whenever types get resolved...
- for (unsigned i = 0, e = Pointers.size(); i != e; ++i)
- if (Pointers[i].first == DestTy)
- return Pointers[i].second != SrcTy;
-
+ // in the container) change whenever types get resolved.
+ if (SrcPT->isAbstract())
+ if (const Type *ExistingDestTy = Pointers.lookup(SrcPT))
+ return ExistingDestTy != DstPT;
+
+ if (DstPT->isAbstract())
+ if (const Type *ExistingSrcTy = Pointers.lookup(DstPT))
+ return ExistingSrcTy != SrcPT;
// Otherwise, add the current pointers to the vector to stop recursion on
// this pair.
- Pointers.push_back(std::make_pair(DestTyT, SrcTyT));
- bool Result =
- RecursiveResolveTypesI(cast<PointerType>(DestTy.get())->getElementType(),
- cast<PointerType>(SrcTy.get())->getElementType(),
- DestST, "", Pointers);
- Pointers.pop_back();
- return Result;
+ if (DstPT->isAbstract())
+ Pointers.insert(DstPT, SrcPT);
+ if (SrcPT->isAbstract())
+ Pointers.insert(SrcPT, DstPT);
+
+ return RecursiveResolveTypesI(DstPT->getElementType(),
+ SrcPT->getElementType(), Pointers);
}
- default: assert(0 && "Unexpected type!"); return true;
}
}
-static bool RecursiveResolveTypes(const PATypeHolder &DestTy,
- const PATypeHolder &SrcTy,
- TypeSymbolTable *DestST,
- const std::string &Name){
- std::vector<std::pair<PATypeHolder, PATypeHolder> > PointerTypes;
- return RecursiveResolveTypesI(DestTy, SrcTy, DestST, Name, PointerTypes);
+static bool RecursiveResolveTypes(const Type *DestTy, const Type *SrcTy) {
+ LinkerTypeMap PointerTypes;
+ return RecursiveResolveTypesI(DestTy, SrcTy, PointerTypes);
}
@@ -200,10 +282,14 @@
const std::string &Name = TI->first;
const Type *RHS = TI->second;
- // Check to see if this type name is already in the dest module...
+ // Check to see if this type name is already in the dest module.
Type *Entry = DestST->lookup(Name);
- if (ResolveTypes(Entry, RHS, DestST, Name)) {
+ // If the name is just in the source module, bring it over to the dest.
+ if (Entry == 0) {
+ if (!Name.empty())
+ DestST->insert(Name, const_cast<Type*>(RHS));
+ } else if (ResolveTypes(Entry, RHS)) {
// They look different, save the types 'till later to resolve.
DelayedTypesToResolve.push_back(Name);
}
@@ -219,7 +305,7 @@
const std::string &Name = DelayedTypesToResolve[i];
Type *T1 = SrcST->lookup(Name);
Type *T2 = DestST->lookup(Name);
- if (!ResolveTypes(T2, T1, DestST, Name)) {
+ if (!ResolveTypes(T2, T1)) {
// We are making progress!
DelayedTypesToResolve.erase(DelayedTypesToResolve.begin()+i);
--i;
@@ -232,10 +318,7 @@
// two types: { int* } and { opaque* }
for (unsigned i = 0, e = DelayedTypesToResolve.size(); i != e; ++i) {
const std::string &Name = DelayedTypesToResolve[i];
- PATypeHolder T1(SrcST->lookup(Name));
- PATypeHolder T2(DestST->lookup(Name));
-
- if (!RecursiveResolveTypes(T2, T1, DestST, Name)) {
+ if (!RecursiveResolveTypes(SrcST->lookup(Name), DestST->lookup(Name))) {
// We are making progress!
DelayedTypesToResolve.erase(DelayedTypesToResolve.begin()+i);
@@ -351,20 +434,10 @@
/// CopyGVAttributes - copy additional attributes (those not needed to construct
/// a GlobalValue) from the SrcGV to the DestGV.
static void CopyGVAttributes(GlobalValue *DestGV, const GlobalValue *SrcGV) {
- // Propagate alignment, visibility and section info.
- DestGV->setAlignment(std::max(DestGV->getAlignment(), SrcGV->getAlignment()));
- DestGV->setSection(SrcGV->getSection());
- DestGV->setVisibility(SrcGV->getVisibility());
- if (const Function *SrcF = dyn_cast<Function>(SrcGV)) {
- Function *DestF = cast<Function>(DestGV);
- DestF->setCallingConv(SrcF->getCallingConv());
- DestF->setParamAttrs(SrcF->getParamAttrs());
- if (SrcF->hasCollector())
- DestF->setCollector(SrcF->getCollector());
- } else if (const GlobalVariable *SrcVar = dyn_cast<GlobalVariable>(SrcGV)) {
- GlobalVariable *DestVar = cast<GlobalVariable>(DestGV);
- DestVar->setThreadLocal(SrcVar->isThreadLocal());
- }
+ // Use the maximum alignment, rather than just copying the alignment of SrcGV.
+ unsigned Alignment = std::max(DestGV->getAlignment(), SrcGV->getAlignment());
+ DestGV->copyAttributesFrom(SrcGV);
+ DestGV->setAlignment(Alignment);
}
/// GetLinkageResult - This analyzes the two global values and determines what
@@ -410,10 +483,12 @@
"': can only link appending global with another appending global!");
LinkFromSrc = true; // Special cased.
LT = Src->getLinkage();
- } else if (Src->hasWeakLinkage() || Src->hasLinkOnceLinkage()) {
- // At this point we know that Dest has LinkOnce, External*, Weak, or
- // DLL* linkage.
- if ((Dest->hasLinkOnceLinkage() && Src->hasWeakLinkage()) ||
+ } else if (Src->hasWeakLinkage() || Src->hasLinkOnceLinkage() ||
+ Src->hasCommonLinkage()) {
+ // At this point we know that Dest has LinkOnce, External*, Weak, Common,
+ // or DLL* linkage.
+ if ((Dest->hasLinkOnceLinkage() &&
+ (Src->hasWeakLinkage() || Src->hasCommonLinkage())) ||
Dest->hasExternalWeakLinkage()) {
LinkFromSrc = true;
LT = Src->getLinkage();
@@ -421,7 +496,8 @@
LinkFromSrc = false;
LT = Dest->getLinkage();
}
- } else if (Dest->hasWeakLinkage() || Dest->hasLinkOnceLinkage()) {
+ } else if (Dest->hasWeakLinkage() || Dest->hasLinkOnceLinkage() ||
+ Dest->hasCommonLinkage()) {
// At this point we know that Src has External* or DLL* linkage.
if (Src->hasExternalWeakLinkage()) {
LinkFromSrc = false;
@@ -469,8 +545,7 @@
DGV = Dest->getGlobalVariable(SGV->getName());
if (DGV && DGV->getType() != SGV->getType())
// If types don't agree due to opaque types, try to resolve them.
- RecursiveResolveTypes(SGV->getType(), DGV->getType(),
- &Dest->getTypeSymbolTable(), "");
+ RecursiveResolveTypes(SGV->getType(), DGV->getType());
}
// Check to see if may have to link the global with the alias
@@ -478,8 +553,7 @@
DGV = Dest->getNamedAlias(SGV->getName());
if (DGV && DGV->getType() != SGV->getType())
// If types don't agree due to opaque types, try to resolve them.
- RecursiveResolveTypes(SGV->getType(), DGV->getType(),
- &Dest->getTypeSymbolTable(), "");
+ RecursiveResolveTypes(SGV->getType(), DGV->getType());
}
if (DGV && DGV->hasInternalLinkage())
@@ -501,7 +575,8 @@
GlobalVariable *NewDGV =
new GlobalVariable(SGV->getType()->getElementType(),
SGV->isConstant(), SGV->getLinkage(), /*init*/0,
- SGV->getName(), Dest);
+ SGV->getName(), Dest, false,
+ SGV->getType()->getAddressSpace());
// Propagate alignment, visibility and section info.
CopyGVAttributes(NewDGV, SGV);
@@ -525,7 +600,8 @@
GlobalVariable *NewDGV =
new GlobalVariable(SGV->getType()->getElementType(),
SGV->isConstant(), SGV->getLinkage(), /*init*/0,
- "", Dest);
+ "", Dest, false,
+ SGV->getType()->getAddressSpace());
// Set alignment allowing CopyGVAttributes merge it with alignment of SGV.
NewDGV->setAlignment(DGV->getAlignment());
@@ -560,7 +636,8 @@
GlobalVariable *NewDGV =
new GlobalVariable(SGV->getType()->getElementType(),
DGVar->isConstant(), DGVar->getLinkage(),
- /*init*/0, DGVar->getName(), Dest);
+ /*init*/0, DGVar->getName(), Dest, false,
+ SGV->getType()->getAddressSpace());
CopyGVAttributes(NewDGV, DGVar);
DGV->replaceAllUsesWith(ConstantExpr::getBitCast(NewDGV,
DGVar->getType()));
@@ -627,20 +704,49 @@
GlobalAlias *NewGA = NULL;
// Globals were already linked, thus we can just query ValueMap for variant
- // of SAliasee in Dest
+ // of SAliasee in Dest.
std::map<const Value*,Value*>::const_iterator VMI = ValueMap.find(SAliasee);
assert(VMI != ValueMap.end() && "Aliasee not linked");
GlobalValue* DAliasee = cast<GlobalValue>(VMI->second);
+ GlobalValue* DGV = NULL;
// Try to find something 'similar' to SGA in destination module.
- if (GlobalAlias *DGA = Dest->getNamedAlias(SGA->getName())) {
+ if (!DGV && !SGA->hasInternalLinkage()) {
+ DGV = Dest->getNamedAlias(SGA->getName());
+
+ // If types don't agree due to opaque types, try to resolve them.
+ if (DGV && DGV->getType() != SGA->getType())
+ if (RecursiveResolveTypes(SGA->getType(), DGV->getType()))
+ return Error(Err, "Alias Collision on '" + SGA->getName()+
+ "': aliases have different types");
+ }
+
+ if (!DGV && !SGA->hasInternalLinkage()) {
+ DGV = Dest->getGlobalVariable(SGA->getName());
+
+ // If types don't agree due to opaque types, try to resolve them.
+ if (DGV && DGV->getType() != SGA->getType())
+ if (RecursiveResolveTypes(SGA->getType(), DGV->getType()))
+ return Error(Err, "Alias Collision on '" + SGA->getName()+
+ "': aliases have different types");
+ }
+
+ if (!DGV && !SGA->hasInternalLinkage()) {
+ DGV = Dest->getFunction(SGA->getName());
+
// If types don't agree due to opaque types, try to resolve them.
- if (RecursiveResolveTypes(SGA->getType(), DGA->getType(),
- &Dest->getTypeSymbolTable(), ""))
- return Error(Err, "Alias Collision on '" + SGA->getName()+
- "': aliases have different types");
+ if (DGV && DGV->getType() != SGA->getType())
+ if (RecursiveResolveTypes(SGA->getType(), DGV->getType()))
+ return Error(Err, "Alias Collision on '" + SGA->getName()+
+ "': aliases have different types");
+ }
- // Now types are known to be the same, check whether aliasees equal. As
+ // No linking to be performed on internal stuff.
+ if (DGV && DGV->hasInternalLinkage())
+ DGV = NULL;
+
+ if (GlobalAlias *DGA = dyn_cast_or_null<GlobalAlias>(DGV)) {
+ // Types are known to be the same, check whether aliasees equal. As
// globals are already linked we just need query ValueMap to find the
// mapping.
if (DAliasee == DGA->getAliasedGlobal()) {
@@ -653,47 +759,41 @@
} else
return Error(Err, "Alias Collision on '" + SGA->getName()+
"': aliases have different aliasees");
- } else if (GlobalVariable *DGV = Dest->getGlobalVariable(SGA->getName())) {
- RecursiveResolveTypes(SGA->getType(), DGV->getType(),
- &Dest->getTypeSymbolTable(), "");
-
+ } else if (GlobalVariable *DGVar = dyn_cast_or_null<GlobalVariable>(DGV)) {
// The only allowed way is to link alias with external declaration.
- if (DGV->isDeclaration()) {
+ if (DGVar->isDeclaration()) {
// But only if aliasee is global too...
if (!isa<GlobalVariable>(DAliasee))
- return Error(Err, "Global-Alias Collision on '" + SGA->getName() +
- "': aliasee is not global variable");
+ return Error(Err, "Global-Alias Collision on '" + SGA->getName() +
+ "': aliasee is not global variable");
NewGA = new GlobalAlias(SGA->getType(), SGA->getLinkage(),
SGA->getName(), DAliasee, Dest);
CopyGVAttributes(NewGA, SGA);
// Any uses of DGV need to change to NewGA, with cast, if needed.
- if (SGA->getType() != DGV->getType())
- DGV->replaceAllUsesWith(ConstantExpr::getBitCast(NewGA,
- DGV->getType()));
+ if (SGA->getType() != DGVar->getType())
+ DGVar->replaceAllUsesWith(ConstantExpr::getBitCast(NewGA,
+ DGVar->getType()));
else
- DGV->replaceAllUsesWith(NewGA);
+ DGVar->replaceAllUsesWith(NewGA);
- // DGV will conflict with NewGA because they both had the same
+ // DGVar will conflict with NewGA because they both had the same
// name. We must erase this now so ForceRenaming doesn't assert
// because DGV might not have internal linkage.
- DGV->eraseFromParent();
+ DGVar->eraseFromParent();
// Proceed to 'common' steps
} else
return Error(Err, "Global-Alias Collision on '" + SGA->getName() +
"': symbol multiple defined");
- } else if (Function *DF = Dest->getFunction(SGA->getName())) {
- RecursiveResolveTypes(SGA->getType(), DF->getType(),
- &Dest->getTypeSymbolTable(), "");
-
+ } else if (Function *DF = dyn_cast_or_null<Function>(DGV)) {
// The only allowed way is to link alias with external declaration.
if (DF->isDeclaration()) {
// But only if aliasee is function too...
if (!isa<Function>(DAliasee))
- return Error(Err, "Function-Alias Collision on '" + SGA->getName() +
- "': aliasee is not function");
+ return Error(Err, "Function-Alias Collision on '" + SGA->getName() +
+ "': aliasee is not function");
NewGA = new GlobalAlias(SGA->getType(), SGA->getLinkage(),
SGA->getName(), DAliasee, Dest);
@@ -716,7 +816,8 @@
return Error(Err, "Function-Alias Collision on '" + SGA->getName() +
"': symbol multiple defined");
} else {
- // Nothing similar found, just copy alias into destination module.
+ // No linking to be performed, simply create an identical version of the
+ // alias over in the dest module...
NewGA = new GlobalAlias(SGA->getType(), SGA->getLinkage(),
SGA->getName(), DAliasee, Dest);
@@ -728,7 +829,7 @@
assert(NewGA && "No alias was created in destination module!");
// If the symbol table renamed the alias, but it is an externally visible
- // symbol, DGV must be an global value with internal linkage. Rename it.
+ // symbol, DGA must be an global value with internal linkage. Rename it.
if (NewGA->getName() != SGA->getName() &&
!NewGA->hasInternalLinkage())
ForceRenaming(NewGA, SGA->getName());
@@ -758,16 +859,19 @@
Constant *SInit =
cast<Constant>(RemapOperand(SGV->getInitializer(), ValueMap));
- GlobalVariable *DGV = cast<GlobalVariable>(ValueMap[SGV]);
+ GlobalVariable *DGV =
+ cast<GlobalVariable>(ValueMap[SGV]->stripPointerCasts());
if (DGV->hasInitializer()) {
if (SGV->hasExternalLinkage()) {
if (DGV->getInitializer() != SInit)
return Error(Err, "Global Variable Collision on '" + SGV->getName() +
"': global variables have different initializers");
- } else if (DGV->hasLinkOnceLinkage() || DGV->hasWeakLinkage()) {
+ } else if (DGV->hasLinkOnceLinkage() || DGV->hasWeakLinkage() ||
+ DGV->hasCommonLinkage()) {
// Nothing is required, mapped values will take the new global
// automatically.
- } else if (SGV->hasLinkOnceLinkage() || SGV->hasWeakLinkage()) {
+ } else if (SGV->hasLinkOnceLinkage() || SGV->hasWeakLinkage() ||
+ SGV->hasCommonLinkage()) {
// Nothing is required, mapped values will take the new global
// automatically.
} else if (DGV->hasAppendingLinkage()) {
@@ -794,35 +898,65 @@
// Loop over all of the functions in the src module, mapping them over
for (Module::const_iterator I = Src->begin(), E = Src->end(); I != E; ++I) {
const Function *SF = I; // SrcFunction
+
Function *DF = 0;
+ Value *MappedDF;
+
+ // If this function is internal or has no name, it doesn't participate in
+ // linkage.
if (SF->hasName() && !SF->hasInternalLinkage()) {
// Check to see if may have to link the function.
DF = Dest->getFunction(SF->getName());
- if (DF && SF->getType() != DF->getType())
- // If types don't agree because of opaque, try to resolve them
- RecursiveResolveTypes(SF->getType(), DF->getType(),
- &Dest->getTypeSymbolTable(), "");
+ if (DF && DF->hasInternalLinkage())
+ DF = 0;
}
-
- // Check visibility
- if (DF && !DF->hasInternalLinkage() &&
- SF->getVisibility() != DF->getVisibility()) {
+
+ // If there is no linkage to be performed, just bring over SF without
+ // modifying it.
+ if (DF == 0) {
+ // Function does not already exist, simply insert an function signature
+ // identical to SF into the dest module.
+ Function *NewDF = Function::Create(SF->getFunctionType(),
+ SF->getLinkage(),
+ SF->getName(), Dest);
+ CopyGVAttributes(NewDF, SF);
+
+ // If the LLVM runtime renamed the function, but it is an externally
+ // visible symbol, DF must be an existing function with internal linkage.
+ // Rename it.
+ if (!NewDF->hasInternalLinkage() && NewDF->getName() != SF->getName())
+ ForceRenaming(NewDF, SF->getName());
+
+ // ... and remember this mapping...
+ ValueMap[SF] = NewDF;
+ continue;
+ }
+
+
+ // If types don't agree because of opaque, try to resolve them.
+ if (SF->getType() != DF->getType())
+ RecursiveResolveTypes(SF->getType(), DF->getType());
+
+ // Check visibility, merging if a definition overrides a prototype.
+ if (SF->getVisibility() != DF->getVisibility()) {
// If one is a prototype, ignore its visibility. Prototypes are always
// overridden by the definition.
if (!SF->isDeclaration() && !DF->isDeclaration())
return Error(Err, "Linking functions named '" + SF->getName() +
"': symbols have different visibilities!");
+
+ // Otherwise, replace the visibility of DF if DF is a prototype.
+ if (DF->isDeclaration())
+ DF->setVisibility(SF->getVisibility());
}
- if (DF && DF->hasInternalLinkage())
- DF = NULL;
-
- if (DF && DF->getType() != SF->getType()) {
+ if (DF->getType() != SF->getType()) {
if (DF->isDeclaration() && !SF->isDeclaration()) {
// We have a definition of the same name but different type in the
// source module. Copy the prototype to the destination and replace
// uses of the destination's prototype with the new prototype.
- Function *NewDF = Function::Create(SF->getFunctionType(), SF->getLinkage(),
+ Function *NewDF = Function::Create(SF->getFunctionType(),
+ SF->getLinkage(),
SF->getName(), Dest);
CopyGVAttributes(NewDF, SF);
@@ -843,77 +977,77 @@
// Remember this mapping so uses in the source module get remapped
// later by RemapOperand.
ValueMap[SF] = NewDF;
- } else if (SF->isDeclaration()) {
- // We have two functions of the same name but different type and the
- // source is a declaration while the destination is not. Any use of
- // the source must be mapped to the destination, with a cast.
- ValueMap[SF] = ConstantExpr::getBitCast(DF, SF->getType());
+ continue;
} else {
- // We have two functions of the same name but different types and they
- // are both definitions. This is an error.
- return Error(Err, "Function '" + DF->getName() + "' defined as both '" +
- ToStr(SF->getFunctionType(), Src) + "' and '" +
- ToStr(DF->getFunctionType(), Dest) + "'");
+ // We have two functions of the same name but different type. Any use
+ // of the source must be mapped to the destination, with a cast.
+ MappedDF = ConstantExpr::getBitCast(DF, SF->getType());
}
- } else if (!DF || SF->hasInternalLinkage() || DF->hasInternalLinkage()) {
- // Function does not already exist, simply insert an function signature
- // identical to SF into the dest module.
- Function *NewDF = Function::Create(SF->getFunctionType(), SF->getLinkage(),
- SF->getName(), Dest);
- CopyGVAttributes(NewDF, SF);
-
- // If the LLVM runtime renamed the function, but it is an externally
- // visible symbol, DF must be an existing function with internal linkage.
- // Rename it.
- if (NewDF->getName() != SF->getName() && !NewDF->hasInternalLinkage())
- ForceRenaming(NewDF, SF->getName());
-
- // ... and remember this mapping...
- ValueMap[SF] = NewDF;
- } else if (SF->isDeclaration()) {
+ } else {
+ MappedDF = DF;
+ }
+
+ if (SF->isDeclaration()) {
// If SF is a declaration or if both SF & DF are declarations, just link
// the declarations, we aren't adding anything.
if (SF->hasDLLImportLinkage()) {
if (DF->isDeclaration()) {
- ValueMap.insert(std::make_pair(SF, DF));
+ ValueMap[SF] = MappedDF;
DF->setLinkage(SF->getLinkage());
- }
+ }
} else {
- ValueMap[SF] = DF;
- }
- } else if (DF->isDeclaration() && !DF->hasDLLImportLinkage()) {
- // If DF is external but SF is not...
- // Link the external functions, update linkage qualifiers
- ValueMap.insert(std::make_pair(SF, DF));
+ ValueMap[SF] = MappedDF;
+ }
+ continue;
+ }
+
+ // If DF is external but SF is not, link the external functions, update
+ // linkage qualifiers.
+ if (DF->isDeclaration() && !DF->hasDLLImportLinkage()) {
+ ValueMap.insert(std::make_pair(SF, MappedDF));
DF->setLinkage(SF->getLinkage());
- // Visibility of prototype is overridden by vis of definition.
- DF->setVisibility(SF->getVisibility());
- } else if (SF->hasWeakLinkage() || SF->hasLinkOnceLinkage()) {
- // At this point we know that DF has LinkOnce, Weak, or External* linkage.
- ValueMap[SF] = DF;
+ continue;
+ }
+
+ // At this point we know that DF has LinkOnce, Weak, or External* linkage.
+ if (SF->hasWeakLinkage() || SF->hasLinkOnceLinkage() ||
+ SF->hasCommonLinkage()) {
+ ValueMap[SF] = MappedDF;
// Linkonce+Weak = Weak
// *+External Weak = *
- if ((DF->hasLinkOnceLinkage() && SF->hasWeakLinkage()) ||
+ if ((DF->hasLinkOnceLinkage() &&
+ (SF->hasWeakLinkage() || SF->hasCommonLinkage())) ||
DF->hasExternalWeakLinkage())
DF->setLinkage(SF->getLinkage());
- } else if (DF->hasWeakLinkage() || DF->hasLinkOnceLinkage()) {
+ continue;
+ }
+
+ if (DF->hasWeakLinkage() || DF->hasLinkOnceLinkage() ||
+ DF->hasCommonLinkage()) {
// At this point we know that SF has LinkOnce or External* linkage.
- ValueMap[SF] = DF;
- if (!SF->hasLinkOnceLinkage() && !SF->hasExternalWeakLinkage())
- // Don't inherit linkonce & external weak linkage
+ ValueMap[SF] = MappedDF;
+
+ // If the source function has stronger linkage than the destination,
+ // its body and linkage should override ours.
+ if (!SF->hasLinkOnceLinkage() && !SF->hasExternalWeakLinkage()) {
+ // Don't inherit linkonce & external weak linkage.
DF->setLinkage(SF->getLinkage());
- } else if (SF->getLinkage() != DF->getLinkage()) {
- return Error(Err, "Functions named '" + SF->getName() +
- "' have different linkage specifiers!");
- } else if (SF->hasExternalLinkage()) {
- // The function is defined identically in both modules!!
+ DF->deleteBody();
+ }
+ continue;
+ }
+
+ if (SF->getLinkage() != DF->getLinkage())
+ return Error(Err, "Functions named '" + SF->getName() +
+ "' have different linkage specifiers!");
+
+ // The function is defined identically in both modules!
+ if (SF->hasExternalLinkage())
return Error(Err, "Function '" +
ToStr(SF->getFunctionType(), Src) + "':\"" +
SF->getName() + "\" - Function is already defined!");
- } else {
- assert(0 && "Unknown linkage configuration found!");
- }
+ assert(0 && "Unknown linkage configuration found!");
}
return false;
}
@@ -971,10 +1105,10 @@
// go
for (Module::iterator SF = Src->begin(), E = Src->end(); SF != E; ++SF) {
if (!SF->isDeclaration()) { // No body if function is external
- Function *DF = cast<Function>(ValueMap[SF]); // Destination function
+ Function *DF = dyn_cast<Function>(ValueMap[SF]); // Destination function
// DF not external SF external?
- if (DF->isDeclaration())
+ if (DF && DF->isDeclaration())
// Only provide the function body if there isn't one already.
if (LinkFunctionBody(DF, SF, ValueMap, Err))
return true;
@@ -1035,7 +1169,8 @@
// Create the new global variable...
GlobalVariable *NG =
new GlobalVariable(NewType, G1->isConstant(), G1->getLinkage(),
- /*init*/0, First->first, M, G1->isThreadLocal());
+ /*init*/0, First->first, M, G1->isThreadLocal(),
+ G1->getType()->getAddressSpace());
// Propagate alignment, visibility and section info.
CopyGVAttributes(NG, G1);
Modified: llvm/branches/non-call-eh/lib/Support/APFloat.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Support/APFloat.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Support/APFloat.cpp (original)
+++ llvm/branches/non-call-eh/lib/Support/APFloat.cpp Sun Jul 6 15:45:41 2008
@@ -163,9 +163,9 @@
static int
totalExponent(const char *p, int exponentAdjustment)
{
- integerPart unsignedExponent;
+ int unsignedExponent;
bool negative, overflow;
- long exponent;
+ int exponent;
/* Move past the exponent letter and sign to the digits. */
p++;
@@ -280,9 +280,10 @@
while (*p == '.');
/* Adjust the exponents for any decimal point. */
- D->exponent += (dot - p) - (dot > p);
- D->normalizedExponent = (D->exponent + (p - D->firstSigDigit)
- - (dot > D->firstSigDigit && dot < p));
+ D->exponent += static_cast<exponent_t>((dot - p) - (dot > p));
+ D->normalizedExponent = (D->exponent +
+ static_cast<exponent_t>((p - D->firstSigDigit)
+ - (dot > D->firstSigDigit && dot < p)));
}
D->lastSigDigit = p;
@@ -437,8 +438,8 @@
static unsigned int
powerOf5(integerPart *dst, unsigned int power)
{
- static integerPart firstEightPowers[] = { 1, 5, 25, 125, 625, 3125,
- 15625, 78125 };
+ static const integerPart firstEightPowers[] = { 1, 5, 25, 125, 625, 3125,
+ 15625, 78125 };
static integerPart pow5s[maxPowerOfFiveParts * 2 + 5] = { 78125 * 5 };
static unsigned int partsCount[16] = { 1 };
@@ -2043,7 +2044,7 @@
/* Calculate the exponent adjustment implicit in the number of
significant digits. */
- expAdjustment = dot - firstSignificantDigit;
+ expAdjustment = static_cast<int>(dot - firstSignificantDigit);
if(expAdjustment < 0)
expAdjustment++;
expAdjustment = expAdjustment * 4 - 1;
@@ -2097,7 +2098,8 @@
decSig.exponent += exp;
lostFraction calcLostFraction;
- integerPart HUerr, HUdistance, powHUerr;
+ integerPart HUerr, HUdistance;
+ unsigned int powHUerr;
if (exp >= 0) {
/* multiplySignificand leaves the precision-th bit set to 1. */
@@ -2113,7 +2115,7 @@
excessPrecision = calcSemantics.precision;
}
/* Extra half-ulp lost in reciprocal of exponent. */
- powHUerr = (powStatus == opOK && calcLostFraction == lfExactlyZero) ? 0: 2;
+ powHUerr = (powStatus == opOK && calcLostFraction == lfExactlyZero) ? 0:2;
}
/* Both multiplySignificand and divideSignificand return the
@@ -2190,7 +2192,7 @@
N-digit decimal integer is N * 196 / 59. Allocate enough space
to hold the full significand, and an extra part required by
tcMultiplyPart. */
- partCount = (D.lastSigDigit - D.firstSigDigit) + 1;
+ partCount = static_cast<unsigned int>(D.lastSigDigit - D.firstSigDigit) + 1;
partCount = partCountForBits(1 + 196 * partCount / 59);
decSignificand = new integerPart[partCount + 1];
partCount = 0;
@@ -2320,7 +2322,7 @@
*dst = 0;
- return dst - p;
+ return static_cast<unsigned int>(dst - p);
}
/* Does the hard work of outputting the correctly rounded hexadecimal
@@ -2443,7 +2445,7 @@
uint32_t hash = sign<<11 | semantics->precision | exponent<<12;
const integerPart* p = significandParts();
for (int i=partCount(); i>0; i--, p++)
- hash ^= ((uint32_t)*p) ^ (*p)>>32;
+ hash ^= ((uint32_t)*p) ^ (uint32_t)((*p)>>32);
return hash;
}
}
@@ -2483,8 +2485,8 @@
}
uint64_t words[2];
- words[0] = (((uint64_t)sign & 1) << 63) |
- ((myexponent & 0x7fff) << 48) |
+ words[0] = ((uint64_t)(sign & 1) << 63) |
+ ((myexponent & 0x7fffLL) << 48) |
((mysignificand >>16) & 0xffffffffffffLL);
words[1] = mysignificand & 0xffff;
return APInt(80, 2, words);
@@ -2526,10 +2528,10 @@
}
uint64_t words[2];
- words[0] = (((uint64_t)sign & 1) << 63) |
+ words[0] = ((uint64_t)(sign & 1) << 63) |
((myexponent & 0x7ff) << 52) |
(mysignificand & 0xfffffffffffffLL);
- words[1] = (((uint64_t)sign2 & 1) << 63) |
+ words[1] = ((uint64_t)(sign2 & 1) << 63) |
((myexponent2 & 0x7ff) << 52) |
(mysignificand2 & 0xfffffffffffffLL);
return APInt(128, 2, words);
@@ -2560,7 +2562,7 @@
mysignificand = *significandParts();
}
- return APInt(64, (((((uint64_t)sign & 1) << 63) |
+ return APInt(64, ((((uint64_t)(sign & 1) << 63) |
((myexponent & 0x7ff) << 52) |
(mysignificand & 0xfffffffffffffLL))));
}
@@ -2575,7 +2577,7 @@
if (category==fcNormal) {
myexponent = exponent+127; //bias
- mysignificand = *significandParts();
+ mysignificand = (uint32_t)*significandParts();
if (myexponent == 1 && !(mysignificand & 0x800000))
myexponent = 0; // denormal
} else if (category==fcZero) {
@@ -2587,7 +2589,7 @@
} else {
assert(category == fcNaN && "Unknown category!");
myexponent = 0xff;
- mysignificand = *significandParts();
+ mysignificand = (uint32_t)*significandParts();
}
return APInt(32, (((sign&1) << 31) | ((myexponent&0xff) << 23) |
@@ -2649,7 +2651,7 @@
initialize(&APFloat::x87DoubleExtended);
assert(partCount()==2);
- sign = i1>>63;
+ sign = static_cast<unsigned int>(i1>>63);
if (myexponent==0 && mysignificand==0) {
// exponent, significand meaningless
category = fcZero;
@@ -2685,8 +2687,8 @@
initialize(&APFloat::PPCDoubleDouble);
assert(partCount()==2);
- sign = i1>>63;
- sign2 = i2>>63;
+ sign = static_cast<unsigned int>(i1>>63);
+ sign2 = static_cast<unsigned int>(i2>>63);
if (myexponent==0 && mysignificand==0) {
// exponent, significand meaningless
// exponent2 and significand2 are required to be 0; we don't check
@@ -2732,7 +2734,7 @@
initialize(&APFloat::IEEEdouble);
assert(partCount()==1);
- sign = i>>63;
+ sign = static_cast<unsigned int>(i>>63);
if (myexponent==0 && mysignificand==0) {
// exponent, significand meaningless
category = fcZero;
Modified: llvm/branches/non-call-eh/lib/Support/APInt.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Support/APInt.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Support/APInt.cpp (original)
+++ llvm/branches/non-call-eh/lib/Support/APInt.cpp Sun Jul 6 15:45:41 2008
@@ -99,7 +99,7 @@
assert(BitWidth >= MIN_INT_BITS && "bitwidth too small");
assert(BitWidth <= MAX_INT_BITS && "bitwidth too large");
assert(!Val.empty() && "String empty?");
- fromString(numbits, Val.c_str(), Val.size(), radix);
+ fromString(numbits, Val.c_str(), (uint32_t)Val.size(), radix);
}
APInt::APInt(const APInt& that)
@@ -905,7 +905,7 @@
// Otherwise, we have to shift the mantissa bits up to the right location
APInt Tmp(width, mantissa);
- Tmp = Tmp.shl(exp - 52);
+ Tmp = Tmp.shl((uint32_t)exp - 52);
return isNeg ? -Tmp : Tmp;
}
@@ -1086,7 +1086,7 @@
/// Arithmetic right-shift this APInt by shiftAmt.
/// @brief Arithmetic right-shift function.
APInt APInt::ashr(const APInt &shiftAmt) const {
- return ashr(shiftAmt.getLimitedValue(BitWidth));
+ return ashr((uint32_t)shiftAmt.getLimitedValue(BitWidth));
}
/// Arithmetic right-shift this APInt by shiftAmt.
@@ -1113,7 +1113,7 @@
// issues in the algorithm below.
if (shiftAmt == BitWidth) {
if (isNegative())
- return APInt(BitWidth, -1ULL);
+ return APInt(BitWidth, -1ULL, true);
else
return APInt(BitWidth, 0);
}
@@ -1175,7 +1175,7 @@
/// Logical right-shift this APInt by shiftAmt.
/// @brief Logical right-shift function.
APInt APInt::lshr(const APInt &shiftAmt) const {
- return lshr(shiftAmt.getLimitedValue(BitWidth));
+ return lshr((uint32_t)shiftAmt.getLimitedValue(BitWidth));
}
/// Logical right-shift this APInt by shiftAmt.
@@ -1244,7 +1244,7 @@
/// @brief Left-shift function.
APInt APInt::shl(const APInt &shiftAmt) const {
// It's undefined behavior in C to shift by BitWidth or greater, but
- return shl(shiftAmt.getLimitedValue(BitWidth));
+ return shl((uint32_t)shiftAmt.getLimitedValue(BitWidth));
}
/// Left-shift this APInt by shiftAmt.
@@ -1307,7 +1307,7 @@
}
APInt APInt::rotl(const APInt &rotateAmt) const {
- return rotl(rotateAmt.getLimitedValue(BitWidth));
+ return rotl((uint32_t)rotateAmt.getLimitedValue(BitWidth));
}
APInt APInt::rotl(uint32_t rotateAmt) const {
@@ -1322,7 +1322,7 @@
}
APInt APInt::rotr(const APInt &rotateAmt) const {
- return rotr(rotateAmt.getLimitedValue(BitWidth));
+ return rotr((uint32_t)rotateAmt.getLimitedValue(BitWidth));
}
APInt APInt::rotr(uint32_t rotateAmt) const {
@@ -1426,6 +1426,50 @@
return x_old + 1;
}
+/// Computes the multiplicative inverse of this APInt for a given modulo. The
+/// iterative extended Euclidean algorithm is used to solve for this value,
+/// however we simplify it to speed up calculating only the inverse, and take
+/// advantage of div+rem calculations. We also use some tricks to avoid copying
+/// (potentially large) APInts around.
+APInt APInt::multiplicativeInverse(const APInt& modulo) const {
+ assert(ult(modulo) && "This APInt must be smaller than the modulo");
+
+ // Using the properties listed at the following web page (accessed 06/21/08):
+ // http://www.numbertheory.org/php/euclid.html
+ // (especially the properties numbered 3, 4 and 9) it can be proved that
+ // BitWidth bits suffice for all the computations in the algorithm implemented
+ // below. More precisely, this number of bits suffice if the multiplicative
+ // inverse exists, but may not suffice for the general extended Euclidean
+ // algorithm.
+
+ APInt r[2] = { modulo, *this };
+ APInt t[2] = { APInt(BitWidth, 0), APInt(BitWidth, 1) };
+ APInt q(BitWidth, 0);
+
+ unsigned i;
+ for (i = 0; r[i^1] != 0; i ^= 1) {
+ // An overview of the math without the confusing bit-flipping:
+ // q = r[i-2] / r[i-1]
+ // r[i] = r[i-2] % r[i-1]
+ // t[i] = t[i-2] - t[i-1] * q
+ udivrem(r[i], r[i^1], q, r[i]);
+ t[i] -= t[i^1] * q;
+ }
+
+ // If this APInt and the modulo are not coprime, there is no multiplicative
+ // inverse, so return 0. We check this by looking at the next-to-last
+ // remainder, which is the gcd(*this,modulo) as calculated by the Euclidean
+ // algorithm.
+ if (r[i] != 1)
+ return APInt(BitWidth, 0);
+
+ // The next-to-last t is the multiplicative inverse. However, we are
+ // interested in a positive inverse. Calcuate a positive one from a negative
+ // one if necessary. A simple addition of the modulo suffices because
+ // abs(t[i]) is known to less than *this/2 (see the link above).
+ return t[i].isNegative() ? t[i] + modulo : t[i];
+}
+
/// Implementation of Knuth's Algorithm D (Division of nonnegative integers)
/// from "Art of Computer Programming, Volume 2", section 4.3.1, p. 272. The
/// variables here have the same names as in the algorithm. Comments explain
@@ -1517,8 +1561,8 @@
uint64_t result = u_tmp - subtrahend;
uint32_t k = j + i;
- u[k++] = result & (b-1); // subtract low word
- u[k++] = result >> 32; // subtract high word
+ u[k++] = (uint32_t)(result & (b-1)); // subtract low word
+ u[k++] = (uint32_t)(result >> 32); // subtract high word
while (borrow && k <= m+n) { // deal with borrow to the left
borrow = u[k] == 0;
u[k]--;
@@ -1549,7 +1593,7 @@
// D5. [Test remainder.] Set q[j] = qp. If the result of step D4 was
// negative, go to step D6; otherwise go on to step D7.
- q[j] = qp;
+ q[j] = (uint32_t)qp;
if (isNeg) {
// D6. [Add back]. The probability that this step is necessary is very
// small, on the order of only 2/b. Make sure that test data accounts for
@@ -1645,8 +1689,8 @@
memset(U, 0, (m+n+1)*sizeof(uint32_t));
for (unsigned i = 0; i < lhsWords; ++i) {
uint64_t tmp = (LHS.getNumWords() == 1 ? LHS.VAL : LHS.pVal[i]);
- U[i * 2] = tmp & mask;
- U[i * 2 + 1] = tmp >> (sizeof(uint32_t)*8);
+ U[i * 2] = (uint32_t)(tmp & mask);
+ U[i * 2 + 1] = (uint32_t)(tmp >> (sizeof(uint32_t)*8));
}
U[m+n] = 0; // this extra word is for "spill" in the Knuth algorithm.
@@ -1654,8 +1698,8 @@
memset(V, 0, (n)*sizeof(uint32_t));
for (unsigned i = 0; i < rhsWords; ++i) {
uint64_t tmp = (RHS.getNumWords() == 1 ? RHS.VAL : RHS.pVal[i]);
- V[i * 2] = tmp & mask;
- V[i * 2 + 1] = tmp >> (sizeof(uint32_t)*8);
+ V[i * 2] = (uint32_t)(tmp & mask);
+ V[i * 2 + 1] = (uint32_t)(tmp >> (sizeof(uint32_t)*8));
}
// initialize the quotient and remainder
@@ -1691,13 +1735,13 @@
remainder = 0;
} else if (partial_dividend < divisor) {
Q[i] = 0;
- remainder = partial_dividend;
+ remainder = (uint32_t)partial_dividend;
} else if (partial_dividend == divisor) {
Q[i] = 1;
remainder = 0;
} else {
- Q[i] = partial_dividend / divisor;
- remainder = partial_dividend - (Q[i] * divisor);
+ Q[i] = (uint32_t)(partial_dividend / divisor);
+ remainder = (uint32_t)(partial_dividend - (Q[i] * divisor));
}
}
if (R)
@@ -1882,13 +1926,10 @@
if (lhsWords == 1 && rhsWords == 1) {
// There is only one word to consider so use the native versions.
- if (LHS.isSingleWord()) {
- Quotient = APInt(LHS.getBitWidth(), LHS.VAL / RHS.VAL);
- Remainder = APInt(LHS.getBitWidth(), LHS.VAL % RHS.VAL);
- } else {
- Quotient = APInt(LHS.getBitWidth(), LHS.pVal[0] / RHS.pVal[0]);
- Remainder = APInt(LHS.getBitWidth(), LHS.pVal[0] % RHS.pVal[0]);
- }
+ uint64_t lhsValue = LHS.isSingleWord() ? LHS.VAL : LHS.pVal[0];
+ uint64_t rhsValue = RHS.isSingleWord() ? RHS.VAL : RHS.pVal[0];
+ Quotient = APInt(LHS.getBitWidth(), lhsValue / rhsValue);
+ Remainder = APInt(LHS.getBitWidth(), lhsValue % rhsValue);
return;
}
@@ -1991,7 +2032,7 @@
memset(buf, 0, 65);
uint64_t v = VAL;
while (bits_used) {
- uint32_t bit = v & 1;
+ uint32_t bit = (uint32_t)v & 1;
bits_used--;
buf[bits_used] = digits[bit][0];
v >>=1;
@@ -2026,7 +2067,8 @@
uint64_t mask = radix - 1;
APInt zero(tmp.getBitWidth(), 0);
while (tmp.ne(zero)) {
- unsigned digit = (tmp.isSingleWord() ? tmp.VAL : tmp.pVal[0]) & mask;
+ unsigned digit =
+ (unsigned)((tmp.isSingleWord() ? tmp.VAL : tmp.pVal[0]) & mask);
result.insert(insert_at, digits[digit]);
tmp = tmp.lshr(shift);
}
@@ -2047,14 +2089,14 @@
result = "-";
insert_at = 1;
}
- if (tmp == APInt(tmp.getBitWidth(), 0))
+ if (tmp == zero)
result = "0";
else while (tmp.ne(zero)) {
APInt APdigit(1,0);
APInt tmp2(tmp.getBitWidth(), 0);
divide(tmp, tmp.getNumWords(), divisor, divisor.getNumWords(), &tmp2,
&APdigit);
- uint32_t digit = APdigit.getZExtValue();
+ uint32_t digit = (uint32_t)APdigit.getZExtValue();
assert(digit < radix && "divide failed");
result.insert(insert_at,digits[digit]);
tmp = tmp2;
Modified: llvm/branches/non-call-eh/lib/Support/Allocator.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Support/Allocator.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Support/Allocator.cpp (original)
+++ llvm/branches/non-call-eh/lib/Support/Allocator.cpp Sun Jul 6 15:45:41 2008
@@ -45,14 +45,17 @@
/// Allocate - Allocate and return at least the specified number of bytes.
///
- void *Allocate(unsigned AllocSize, unsigned Alignment, MemRegion **RegPtr) {
- // Round size up to an even multiple of the alignment.
- AllocSize = (AllocSize+Alignment-1) & ~(Alignment-1);
+ void *Allocate(size_t AllocSize, size_t Alignment, MemRegion **RegPtr) {
- // If there is space in this region, return it.
- if (unsigned(NextPtr+AllocSize-(char*)this) <= RegionSize) {
- void *Result = NextPtr;
- NextPtr += AllocSize;
+ char* Result = (char*) (((uintptr_t) (NextPtr+Alignment-1))
+ & ~((uintptr_t) Alignment-1));
+
+ // Speculate the new value of NextPtr.
+ char* NextPtrTmp = Result + AllocSize;
+
+ // If we are still within the current region, return Result.
+ if (unsigned (NextPtrTmp - (char*) this) <= RegionSize) {
+ NextPtr = NextPtrTmp;
return Result;
}
@@ -110,7 +113,7 @@
TheMemory = MRP;
}
-void *BumpPtrAllocator::Allocate(unsigned Size, unsigned Align) {
+void *BumpPtrAllocator::Allocate(size_t Size, size_t Align) {
MemRegion *MRP = (MemRegion*)TheMemory;
void *Ptr = MRP->Allocate(Size, Align, &MRP);
TheMemory = MRP;
Modified: llvm/branches/non-call-eh/lib/Support/CommandLine.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Support/CommandLine.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Support/CommandLine.cpp (original)
+++ llvm/branches/non-call-eh/lib/Support/CommandLine.cpp Sun Jul 6 15:45:41 2008
@@ -17,7 +17,9 @@
//===----------------------------------------------------------------------===//
#include "llvm/Config/config.h"
+#include "llvm/ADT/OwningPtr.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/Streams.h"
#include "llvm/System/Path.h"
@@ -87,7 +89,7 @@
void Option::addArgument() {
assert(NextRegistered == 0 && "argument multiply registered!");
-
+
NextRegistered = RegisteredOptionList;
RegisteredOptionList = this;
MarkOptionsChanged();
@@ -111,19 +113,19 @@
O->getExtraOptionNames(OptionNames);
if (O->ArgStr[0])
OptionNames.push_back(O->ArgStr);
-
+
// Handle named options.
- for (unsigned i = 0, e = OptionNames.size(); i != e; ++i) {
+ for (size_t i = 0, e = OptionNames.size(); i != e; ++i) {
// Add argument to the argument map!
if (!OptionsMap.insert(std::pair<std::string,Option*>(OptionNames[i],
O)).second) {
cerr << ProgramName << ": CommandLine Error: Argument '"
- << OptionNames[0] << "' defined more than once!\n";
+ << OptionNames[i] << "' defined more than once!\n";
}
}
-
+
OptionNames.clear();
-
+
// Remember information about positional options.
if (O->getFormattingFlag() == cl::Positional)
PositionalOpts.push_back(O);
@@ -135,10 +137,10 @@
CAOpt = O;
}
}
-
+
if (CAOpt)
PositionalOpts.push_back(CAOpt);
-
+
// Make sure that they are in order of registration not backwards.
std::reverse(PositionalOpts.begin(), PositionalOpts.end());
}
@@ -150,17 +152,17 @@
static Option *LookupOption(const char *&Arg, const char *&Value,
std::map<std::string, Option*> &OptionsMap) {
while (*Arg == '-') ++Arg; // Eat leading dashes
-
+
const char *ArgEnd = Arg;
while (*ArgEnd && *ArgEnd != '=')
++ArgEnd; // Scan till end of argument name.
-
+
if (*ArgEnd == '=') // If we have an equals sign...
Value = ArgEnd+1; // Get the value, not the equals
-
-
+
+
if (*Arg == 0) return 0;
-
+
// Look up the option.
std::map<std::string, Option*>::iterator I =
OptionsMap.find(std::string(Arg, ArgEnd));
@@ -221,7 +223,7 @@
// see if there options that satisfy the predicate. If we find one, return it,
// otherwise return null.
//
-static Option *getOptionPred(std::string Name, unsigned &Length,
+static Option *getOptionPred(std::string Name, size_t &Length,
bool (*Pred)(const Option*),
std::map<std::string, Option*> &OptionsMap) {
@@ -309,7 +311,7 @@
/// an environment variable (whose name is given in ENVVAR).
///
void cl::ParseEnvironmentOptions(const char *progName, const char *envVar,
- const char *Overview) {
+ const char *Overview, bool ReadResponseFiles) {
// Check args.
assert(progName && "Program name not specified");
assert(envVar && "Environment variable name missing");
@@ -327,8 +329,8 @@
// Parse the value of the environment variable into a "command line"
// and hand it off to ParseCommandLineOptions().
ParseCStringVector(newArgv, envValue);
- int newArgc = newArgv.size();
- ParseCommandLineOptions(newArgc, &newArgv[0], Overview);
+ int newArgc = static_cast<int>(newArgv.size());
+ ParseCommandLineOptions(newArgc, &newArgv[0], Overview, ReadResponseFiles);
// Free all the strdup()ed strings.
for (std::vector<char*>::iterator i = newArgv.begin(), e = newArgv.end();
@@ -336,32 +338,78 @@
free (*i);
}
+
+/// ExpandResponseFiles - Copy the contents of argv into newArgv,
+/// substituting the contents of the response files for the arguments
+/// of type @file.
+static void ExpandResponseFiles(int argc, char** argv,
+ std::vector<char*>& newArgv) {
+ for (int i = 1; i != argc; ++i) {
+ char* arg = argv[i];
+
+ if (arg[0] == '@') {
+
+ sys::PathWithStatus respFile(++arg);
+
+ // Check that the response file is not empty (mmap'ing empty
+ // files can be problematic).
+ const sys::FileStatus *FileStat = respFile.getFileStatus();
+ if (!FileStat)
+ continue;
+ if (FileStat->getSize() == 0)
+ continue;
+
+ // Mmap the response file into memory.
+ OwningPtr<MemoryBuffer>
+ respFilePtr(MemoryBuffer::getFile(respFile.c_str()));
+
+ if (respFilePtr == 0)
+ continue;
+
+ ParseCStringVector(newArgv, respFilePtr->getBufferStart());
+ }
+ else {
+ newArgv.push_back(strdup(arg));
+ }
+ }
+}
+
void cl::ParseCommandLineOptions(int argc, char **argv,
- const char *Overview) {
+ const char *Overview, bool ReadResponseFiles) {
// Process all registered options.
std::vector<Option*> PositionalOpts;
std::vector<Option*> SinkOpts;
std::map<std::string, Option*> Opts;
GetOptionInfo(PositionalOpts, SinkOpts, Opts);
-
+
assert((!Opts.empty() || !PositionalOpts.empty()) &&
"No options specified!");
+
+ // Expand response files.
+ std::vector<char*> newArgv;
+ if (ReadResponseFiles) {
+ newArgv.push_back(strdup(argv[0]));
+ ExpandResponseFiles(argc, argv, newArgv);
+ argv = &newArgv[0];
+ argc = static_cast<int>(newArgv.size());
+ }
+
sys::Path progname(argv[0]);
// Copy the program name into ProgName, making sure not to overflow it.
std::string ProgName = sys::Path(argv[0]).getLast();
if (ProgName.size() > 79) ProgName.resize(79);
strcpy(ProgramName, ProgName.c_str());
-
+
ProgramOverview = Overview;
bool ErrorParsing = false;
// Check out the positional arguments to collect information about them.
unsigned NumPositionalRequired = 0;
-
+
// Determine whether or not there are an unlimited number of positionals
bool HasUnlimitedPositionals = false;
-
+
Option *ConsumeAfterOpt = 0;
if (!PositionalOpts.empty()) {
if (PositionalOpts[0]->getNumOccurrencesFlag() == cl::ConsumeAfter) {
@@ -372,7 +420,7 @@
// Calculate how many positional values are _required_.
bool UnboundedFound = false;
- for (unsigned i = ConsumeAfterOpt != 0, e = PositionalOpts.size();
+ for (size_t i = ConsumeAfterOpt != 0, e = PositionalOpts.size();
i != e; ++i) {
Option *Opt = PositionalOpts[i];
if (RequiresValue(Opt))
@@ -427,7 +475,7 @@
GetOptionInfo(PositionalOpts, SinkOpts, Opts);
OptionListChanged = false;
}
-
+
// Check to see if this is a positional argument. This argument is
// considered to be positional if it doesn't start with '-', if it is "-"
// itself, or if we have seen "--" already.
@@ -477,7 +525,7 @@
if (Handler == 0) {
std::string RealName(ArgName);
if (RealName.size() > 1) {
- unsigned Length = 0;
+ size_t Length = 0;
Option *PGOpt = getOptionPred(RealName, Length, isPrefixedOrGrouping,
Opts);
@@ -567,7 +615,7 @@
<< ": Not enough positional command line arguments specified!\n"
<< "Must specify at least " << NumPositionalRequired
<< " positional arguments: See: " << argv[0] << " --help\n";
-
+
ErrorParsing = true;
} else if (!HasUnlimitedPositionals
&& PositionalVals.size() > PositionalOpts.size()) {
@@ -579,8 +627,8 @@
} else if (ConsumeAfterOpt == 0) {
// Positional args have already been handled if ConsumeAfter is specified...
- unsigned ValNo = 0, NumVals = PositionalVals.size();
- for (unsigned i = 0, e = PositionalOpts.size(); i != e; ++i) {
+ unsigned ValNo = 0, NumVals = static_cast<unsigned>(PositionalVals.size());
+ for (size_t i = 0, e = PositionalOpts.size(); i != e; ++i) {
if (RequiresValue(PositionalOpts[i])) {
ProvidePositionalOption(PositionalOpts[i], PositionalVals[ValNo].first,
PositionalVals[ValNo].second);
@@ -614,7 +662,7 @@
} else {
assert(ConsumeAfterOpt && NumPositionalRequired <= PositionalVals.size());
unsigned ValNo = 0;
- for (unsigned j = 1, e = PositionalOpts.size(); j != e; ++j)
+ for (size_t j = 1, e = PositionalOpts.size(); j != e; ++j)
if (RequiresValue(PositionalOpts[j])) {
ErrorParsing |= ProvidePositionalOption(PositionalOpts[j],
PositionalVals[ValNo].first,
@@ -664,6 +712,14 @@
PositionalOpts.clear();
MoreHelp->clear();
+ // Free the memory allocated by ExpandResponseFiles.
+ if (ReadResponseFiles) {
+ // Free all the strdup()ed strings.
+ for (std::vector<char*>::iterator i = newArgv.begin(), e = newArgv.end();
+ i != e; ++i)
+ free (*i);
+ }
+
// If we had an error processing our arguments, don't let the program execute
if (ErrorParsing) exit(1);
}
@@ -678,7 +734,7 @@
cerr << HelpStr; // Be nice for positional arguments
else
cerr << ProgramName << ": for the -" << ArgName;
-
+
cerr << " option: " << Message << "\n";
return true;
}
@@ -719,13 +775,13 @@
//
// Return the width of the option tag for printing...
-unsigned alias::getOptionWidth() const {
+size_t alias::getOptionWidth() const {
return std::strlen(ArgStr)+6;
}
// Print out the option for the alias.
-void alias::printOptionInfo(unsigned GlobalWidth) const {
- unsigned L = std::strlen(ArgStr);
+void alias::printOptionInfo(size_t GlobalWidth) const {
+ size_t L = std::strlen(ArgStr);
cout << " -" << ArgStr << std::string(GlobalWidth-L-6, ' ') << " - "
<< HelpStr << "\n";
}
@@ -740,8 +796,8 @@
//
// Return the width of the option tag for printing...
-unsigned basic_parser_impl::getOptionWidth(const Option &O) const {
- unsigned Len = std::strlen(O.ArgStr);
+size_t basic_parser_impl::getOptionWidth(const Option &O) const {
+ size_t Len = std::strlen(O.ArgStr);
if (const char *ValName = getValueName())
Len += std::strlen(getValueStr(O, ValName))+3;
@@ -752,7 +808,7 @@
// to-be-maintained width is specified.
//
void basic_parser_impl::printOptionInfo(const Option &O,
- unsigned GlobalWidth) const {
+ size_t GlobalWidth) const {
cout << " -" << O.ArgStr;
if (const char *ValName = getValueName())
@@ -870,16 +926,16 @@
// Return the width of the option tag for printing...
-unsigned generic_parser_base::getOptionWidth(const Option &O) const {
+size_t generic_parser_base::getOptionWidth(const Option &O) const {
if (O.hasArgStr()) {
- unsigned Size = std::strlen(O.ArgStr)+6;
+ size_t Size = std::strlen(O.ArgStr)+6;
for (unsigned i = 0, e = getNumOptions(); i != e; ++i)
- Size = std::max(Size, (unsigned)std::strlen(getOption(i))+8);
+ Size = std::max(Size, std::strlen(getOption(i))+8);
return Size;
} else {
- unsigned BaseSize = 0;
+ size_t BaseSize = 0;
for (unsigned i = 0, e = getNumOptions(); i != e; ++i)
- BaseSize = std::max(BaseSize, (unsigned)std::strlen(getOption(i))+8);
+ BaseSize = std::max(BaseSize, std::strlen(getOption(i))+8);
return BaseSize;
}
}
@@ -888,14 +944,14 @@
// to-be-maintained width is specified.
//
void generic_parser_base::printOptionInfo(const Option &O,
- unsigned GlobalWidth) const {
+ size_t GlobalWidth) const {
if (O.hasArgStr()) {
- unsigned L = std::strlen(O.ArgStr);
+ size_t L = std::strlen(O.ArgStr);
cout << " -" << O.ArgStr << std::string(GlobalWidth-L-6, ' ')
<< " - " << O.HelpStr << "\n";
for (unsigned i = 0, e = getNumOptions(); i != e; ++i) {
- unsigned NumSpaces = GlobalWidth-strlen(getOption(i))-8;
+ size_t NumSpaces = GlobalWidth-strlen(getOption(i))-8;
cout << " =" << getOption(i) << std::string(NumSpaces, ' ')
<< " - " << getDescription(i) << "\n";
}
@@ -903,7 +959,7 @@
if (O.HelpStr[0])
cout << " " << O.HelpStr << "\n";
for (unsigned i = 0, e = getNumOptions(); i != e; ++i) {
- unsigned L = std::strlen(getOption(i));
+ size_t L = std::strlen(getOption(i));
cout << " -" << getOption(i) << std::string(GlobalWidth-L-8, ' ')
<< " - " << getDescription(i) << "\n";
}
@@ -918,7 +974,7 @@
namespace {
class HelpPrinter {
- unsigned MaxArgLen;
+ size_t MaxArgLen;
const Option *EmptyArg;
const bool ShowHidden;
@@ -943,7 +999,7 @@
std::vector<Option*> SinkOpts;
std::map<std::string, Option*> OptMap;
GetOptionInfo(PositionalOpts, SinkOpts, OptMap);
-
+
// Copy Options into a vector so we can sort them as we like...
std::vector<std::pair<std::string, Option*> > Opts;
copy(OptMap.begin(), OptMap.end(), std::back_inserter(Opts));
@@ -970,11 +1026,11 @@
// Print out the positional options.
Option *CAOpt = 0; // The cl::ConsumeAfter option, if it exists...
- if (!PositionalOpts.empty() &&
+ if (!PositionalOpts.empty() &&
PositionalOpts[0]->getNumOccurrencesFlag() == ConsumeAfter)
CAOpt = PositionalOpts[0];
- for (unsigned i = CAOpt != 0, e = PositionalOpts.size(); i != e; ++i) {
+ for (size_t i = CAOpt != 0, e = PositionalOpts.size(); i != e; ++i) {
if (PositionalOpts[i]->ArgStr[0])
cout << " --" << PositionalOpts[i]->ArgStr;
cout << " " << PositionalOpts[i]->HelpStr;
@@ -987,11 +1043,11 @@
// Compute the maximum argument length...
MaxArgLen = 0;
- for (unsigned i = 0, e = Opts.size(); i != e; ++i)
+ for (size_t i = 0, e = Opts.size(); i != e; ++i)
MaxArgLen = std::max(MaxArgLen, Opts[i].second->getOptionWidth());
cout << "OPTIONS:\n";
- for (unsigned i = 0, e = Opts.size(); i != e; ++i)
+ for (size_t i = 0, e = Opts.size(); i != e; ++i)
Opts[i].second->printOptionInfo(MaxArgLen);
// Print any extra help the user has declared.
Modified: llvm/branches/non-call-eh/lib/Support/ConstantRange.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Support/ConstantRange.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Support/ConstantRange.cpp (original)
+++ llvm/branches/non-call-eh/lib/Support/ConstantRange.cpp Sun Jul 6 15:45:41 2008
@@ -464,7 +464,7 @@
///
void ConstantRange::print(std::ostream &OS) const {
OS << "[" << Lower.toStringSigned(10) << ","
- << Upper.toStringSigned(10) << " )";
+ << Upper.toStringSigned(10) << ")";
}
/// dump - Allow printing from a debugger easily...
Modified: llvm/branches/non-call-eh/lib/Support/Debug.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Support/Debug.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Support/Debug.cpp (original)
+++ llvm/branches/non-call-eh/lib/Support/Debug.cpp Sun Jul 6 15:45:41 2008
@@ -33,19 +33,19 @@
#ifndef NDEBUG
// -debug - Command line option to enable the DEBUG statements in the passes.
// This flag may only be enabled in debug builds.
- cl::opt<bool, true>
+ static cl::opt<bool, true>
Debug("debug", cl::desc("Enable debug output"), cl::Hidden,
cl::location(DebugFlag));
- std::string CurrentDebugType;
- struct DebugOnlyOpt {
+ static std::string CurrentDebugType;
+ static struct DebugOnlyOpt {
void operator=(const std::string &Val) const {
DebugFlag |= !Val.empty();
CurrentDebugType = Val;
}
} DebugOnlyOptLoc;
- cl::opt<DebugOnlyOpt, true, cl::parser<std::string> >
+ static cl::opt<DebugOnlyOpt, true, cl::parser<std::string> >
DebugOnly("debug-only", cl::desc("Enable a specific type of debug output"),
cl::Hidden, cl::value_desc("debug string"),
cl::location(DebugOnlyOptLoc), cl::ValueRequired);
Modified: llvm/branches/non-call-eh/lib/Support/Dwarf.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Support/Dwarf.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Support/Dwarf.cpp (original)
+++ llvm/branches/non-call-eh/lib/Support/Dwarf.cpp Sun Jul 6 15:45:41 2008
@@ -12,7 +12,6 @@
//===----------------------------------------------------------------------===//
#include "llvm/Support/Dwarf.h"
-#include "llvm/System/IncludeFile.h"
#include <cassert>
@@ -582,5 +581,3 @@
} // End of namespace dwarf.
} // End of namespace llvm.
-
-DEFINING_FILE_FOR(SupportDwarf)
Modified: llvm/branches/non-call-eh/lib/Support/FileUtilities.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Support/FileUtilities.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Support/FileUtilities.cpp (original)
+++ llvm/branches/non-call-eh/lib/Support/FileUtilities.cpp Sun Jul 6 15:45:41 2008
@@ -98,7 +98,8 @@
if (*F1NumEnd == 'D' || *F1NumEnd == 'd') {
// Copy string into tmp buffer to replace the 'D' with an 'e'.
SmallString<200> StrTmp(F1P, EndOfNumber(F1NumEnd)+1);
- StrTmp[F1NumEnd-F1P] = 'e'; // Strange exponential notation!
+ // Strange exponential notation!
+ StrTmp[static_cast<unsigned>(F1NumEnd-F1P)] = 'e';
V1 = strtod(&StrTmp[0], const_cast<char**>(&F1NumEnd));
F1NumEnd = F1P + (F1NumEnd-&StrTmp[0]);
@@ -107,7 +108,8 @@
if (*F2NumEnd == 'D' || *F2NumEnd == 'd') {
// Copy string into tmp buffer to replace the 'D' with an 'e'.
SmallString<200> StrTmp(F2P, EndOfNumber(F2NumEnd)+1);
- StrTmp[F2NumEnd-F2P] = 'e'; // Strange exponential notation!
+ // Strange exponential notation!
+ StrTmp[static_cast<unsigned>(F2NumEnd-F2P)] = 'e';
V2 = strtod(&StrTmp[0], const_cast<char**>(&F2NumEnd));
F2NumEnd = F2P + (F2NumEnd-&StrTmp[0]);
Modified: llvm/branches/non-call-eh/lib/Support/FoldingSet.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Support/FoldingSet.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Support/FoldingSet.cpp (original)
+++ llvm/branches/non-call-eh/lib/Support/FoldingSet.cpp Sun Jul 6 15:45:41 2008
@@ -57,8 +57,46 @@
void FoldingSetNodeID::AddDouble(double D) {
AddInteger(DoubleToBits(D));
}
+
+void FoldingSetNodeID::AddString(const char *String) {
+ unsigned Size = static_cast<unsigned>(strlen(String));
+ Bits.push_back(Size);
+ if (!Size) return;
+
+ unsigned Units = Size / 4;
+ unsigned Pos = 0;
+ const unsigned *Base = (const unsigned *)String;
+
+ // If the string is aligned do a bulk transfer.
+ if (!((intptr_t)Base & 3)) {
+ Bits.append(Base, Base + Units);
+ Pos = (Units + 1) * 4;
+ } else {
+ // Otherwise do it the hard way.
+ for ( Pos += 4; Pos <= Size; Pos += 4) {
+ unsigned V = ((unsigned char)String[Pos - 4] << 24) |
+ ((unsigned char)String[Pos - 3] << 16) |
+ ((unsigned char)String[Pos - 2] << 8) |
+ (unsigned char)String[Pos - 1];
+ Bits.push_back(V);
+ }
+ }
+
+ // With the leftover bits.
+ unsigned V = 0;
+ // Pos will have overshot size by 4 - #bytes left over.
+ switch (Pos - Size) {
+ case 1: V = (V << 8) | (unsigned char)String[Size - 3]; // Fall thru.
+ case 2: V = (V << 8) | (unsigned char)String[Size - 2]; // Fall thru.
+ case 3: V = (V << 8) | (unsigned char)String[Size - 1]; break;
+ default: return; // Nothing left.
+ }
+
+ Bits.push_back(V);
+}
+
void FoldingSetNodeID::AddString(const std::string &String) {
- unsigned Size = String.size();
+ unsigned Size = static_cast<unsigned>(String.size());
Bits.push_back(Size);
if (!Size) return;
@@ -98,7 +136,7 @@
/// lookup the node in the FoldingSetImpl.
unsigned FoldingSetNodeID::ComputeHash() const {
// This is adapted from SuperFastHash by Paul Hsieh.
- unsigned Hash = Bits.size();
+ unsigned Hash = static_cast<unsigned>(Bits.size());
for (const unsigned *BP = &Bits[0], *E = BP+Bits.size(); BP != E; ++BP) {
unsigned Data = *BP;
Hash += Data & 0xFFFF;
@@ -170,7 +208,7 @@
memset(Buckets, 0, NumBuckets*sizeof(void*));
// Set the very last bucket to be a non-null "pointer".
- Buckets[NumBuckets] = reinterpret_cast<void*>(-2);
+ Buckets[NumBuckets] = reinterpret_cast<void*>(-1);
}
FoldingSetImpl::~FoldingSetImpl() {
delete [] Buckets;
Modified: llvm/branches/non-call-eh/lib/Support/MemoryBuffer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Support/MemoryBuffer.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Support/MemoryBuffer.cpp (original)
+++ llvm/branches/non-call-eh/lib/Support/MemoryBuffer.cpp Sun Jul 6 15:45:41 2008
@@ -26,11 +26,10 @@
#if !defined(_MSC_VER) && !defined(__MINGW32__)
#include <unistd.h>
#include <sys/uio.h>
-#include <sys/fcntl.h>
#else
#include <io.h>
-#include <fcntl.h>
#endif
+#include <fcntl.h>
using namespace llvm;
//===----------------------------------------------------------------------===//
@@ -107,7 +106,7 @@
/// that is completely initialized to zeros. Note that the caller should
/// initialize the memory allocated by this method. The memory is owned by
/// the MemoryBuffer object.
-MemoryBuffer *MemoryBuffer::getNewUninitMemBuffer(unsigned Size,
+MemoryBuffer *MemoryBuffer::getNewUninitMemBuffer(size_t Size,
const char *BufferName) {
char *Buf = new char[Size+1];
Buf[Size] = 0;
@@ -121,7 +120,7 @@
/// is completely initialized to zeros. Note that the caller should
/// initialize the memory allocated by this method. The memory is owned by
/// the MemoryBuffer object.
-MemoryBuffer *MemoryBuffer::getNewMemBuffer(unsigned Size,
+MemoryBuffer *MemoryBuffer::getNewMemBuffer(size_t Size,
const char *BufferName) {
MemoryBuffer *SB = getNewUninitMemBuffer(Size, BufferName);
memset(const_cast<char*>(SB->getBufferStart()), 0, Size+1);
@@ -215,7 +214,7 @@
SB.reset(MemoryBuffer::getNewUninitMemBuffer(FileSize, Filename));
char *BufPtr = const_cast<char*>(SB->getBufferStart());
- unsigned BytesLeft = FileSize;
+ size_t BytesLeft = FileSize;
while (BytesLeft) {
ssize_t NumRead = ::read(FD, BufPtr, BytesLeft);
if (NumRead != -1) {
Modified: llvm/branches/non-call-eh/lib/Support/Statistic.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Support/Statistic.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Support/Statistic.cpp (original)
+++ llvm/branches/non-call-eh/lib/Support/Statistic.cpp Sun Jul 6 15:45:41 2008
@@ -70,6 +70,8 @@
Initialized = true;
}
+namespace {
+
struct NameCompare {
bool operator()(const Statistic *LHS, const Statistic *RHS) const {
int Cmp = std::strcmp(LHS->getName(), RHS->getName());
@@ -80,6 +82,8 @@
}
};
+}
+
// Print information when destroyed, iff command line option is specified.
StatisticInfo::~StatisticInfo() {
// Statistics not enabled?
@@ -90,7 +94,7 @@
// Figure out how long the biggest Value and Name fields are.
unsigned MaxNameLen = 0, MaxValLen = 0;
- for (unsigned i = 0, e = Stats.size(); i != e; ++i) {
+ for (size_t i = 0, e = Stats.size(); i != e; ++i) {
MaxValLen = std::max(MaxValLen,
(unsigned)utostr(Stats[i]->getValue()).size());
MaxNameLen = std::max(MaxNameLen,
@@ -106,7 +110,7 @@
<< "===" << std::string(73, '-') << "===\n\n";
// Print all of the statistics.
- for (unsigned i = 0, e = Stats.size(); i != e; ++i) {
+ for (size_t i = 0, e = Stats.size(); i != e; ++i) {
std::string CountStr = utostr(Stats[i]->getValue());
OutStream << std::string(MaxValLen-CountStr.size(), ' ')
<< CountStr << " " << Stats[i]->getName()
Modified: llvm/branches/non-call-eh/lib/Support/Streams.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Support/Streams.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Support/Streams.cpp (original)
+++ llvm/branches/non-call-eh/lib/Support/Streams.cpp Sun Jul 6 15:45:41 2008
@@ -19,3 +19,12 @@
OStream llvm::cout(std::cout);
OStream llvm::cerr(std::cerr);
IStream llvm::cin(std::cin);
+
+namespace llvm {
+
+/// FlushStream - Function called by BaseStream to flush an ostream.
+void FlushStream(std::ostream &S) {
+ S << std::flush;
+}
+
+} // end anonymous namespace
Modified: llvm/branches/non-call-eh/lib/Support/StringExtras.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Support/StringExtras.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Support/StringExtras.cpp (original)
+++ llvm/branches/non-call-eh/lib/Support/StringExtras.cpp Sun Jul 6 15:45:41 2008
@@ -22,7 +22,7 @@
/// The Source source string is updated in place to remove the returned string
/// and any delimiter prefix from it.
std::string llvm::getToken(std::string &Source, const char *Delimiters) {
- unsigned NumDelimiters = std::strlen(Delimiters);
+ size_t NumDelimiters = std::strlen(Delimiters);
// Figure out where the token starts.
std::string::size_type Start =
Modified: llvm/branches/non-call-eh/lib/Support/Timer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Support/Timer.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Support/Timer.cpp (original)
+++ llvm/branches/non-call-eh/lib/Support/Timer.cpp Sun Jul 6 15:45:41 2008
@@ -39,12 +39,12 @@
}
namespace {
- cl::opt<bool>
+ static cl::opt<bool>
TrackSpace("track-memory", cl::desc("Enable -time-passes memory "
"tracking (this may be slow)"),
cl::Hidden);
- cl::opt<std::string, true>
+ static cl::opt<std::string, true>
InfoOutputFilename("info-output-file", cl::value_desc("filename"),
cl::desc("File to append -stats and -timer output to"),
cl::Hidden, cl::location(getLibSupportInfoOutputFilename()));
@@ -132,13 +132,13 @@
void Timer::startTimer() {
Started = true;
+ ActiveTimers->push_back(this);
TimeRecord TR = getTimeRecord(true);
Elapsed -= TR.Elapsed;
UserTime -= TR.UserTime;
SystemTime -= TR.SystemTime;
MemUsed -= TR.MemUsed;
PeakMemBase = TR.MemUsed;
- ActiveTimers->push_back(this);
}
void Timer::stopTimer() {
Modified: llvm/branches/non-call-eh/lib/System/Alarm.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/System/Alarm.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/System/Alarm.cpp (original)
+++ llvm/branches/non-call-eh/lib/System/Alarm.cpp Sun Jul 6 15:45:41 2008
@@ -31,5 +31,3 @@
#ifdef LLVM_ON_WIN32
#include "Win32/Alarm.inc"
#endif
-
-DEFINING_FILE_FOR(SystemAlarm)
Modified: llvm/branches/non-call-eh/lib/System/DynamicLibrary.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/System/DynamicLibrary.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/System/DynamicLibrary.cpp (original)
+++ llvm/branches/non-call-eh/lib/System/DynamicLibrary.cpp Sun Jul 6 15:45:41 2008
@@ -159,5 +159,3 @@
}
#endif // LLVM_ON_WIN32
-
-DEFINING_FILE_FOR(SystemDynamicLibrary)
Modified: llvm/branches/non-call-eh/lib/System/Memory.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/System/Memory.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/System/Memory.cpp (original)
+++ llvm/branches/non-call-eh/lib/System/Memory.cpp Sun Jul 6 15:45:41 2008
@@ -14,16 +14,9 @@
#include "llvm/System/Memory.h"
#include "llvm/Config/config.h"
-#include "llvm/System/IncludeFile.h"
namespace llvm {
using namespace sys;
-
-//===----------------------------------------------------------------------===//
-//=== WARNING: Implementation here must contain only TRULY operating system
-//=== independent code.
-//===----------------------------------------------------------------------===//
-
}
// Include the platform-specific parts of this class.
@@ -34,4 +27,34 @@
#include "Win32/Memory.inc"
#endif
-DEFINING_FILE_FOR(SystemMemory)
+extern "C" void sys_icache_invalidate(const void *Addr, size_t len);
+
+/// InvalidateInstructionCache - Before the JIT can run a block of code
+/// that has been emitted it must invalidate the instruction cache on some
+/// platforms.
+void llvm::sys::Memory::InvalidateInstructionCache(const void *Addr,
+ size_t Len) {
+
+// icache invalidation for PPC.
+#if (defined(__POWERPC__) || defined (__ppc__) || \
+ defined(_POWER) || defined(_ARCH_PPC))
+ #if defined(__APPLE__)
+ sys_icache_invalidate(Addr, Len);
+ #elif defined(__GNUC__)
+ const size_t LineSize = 32;
+
+ const intptr_t Mask = ~(LineSize - 1);
+ const intptr_t StartLine = ((intptr_t) Addr) & Mask;
+ const intptr_t EndLine = ((intptr_t) Addr + Len + LineSize - 1) & Mask;
+
+ for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
+ asm volatile("dcbf 0, %0" : : "r"(Line));
+ asm volatile("sync");
+
+ for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
+ asm volatile("icbi 0, %0" : : "r"(Line));
+ asm volatile("isync");
+ #endif
+#endif // end PPC
+
+}
Modified: llvm/branches/non-call-eh/lib/System/Mutex.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/System/Mutex.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/System/Mutex.cpp (original)
+++ llvm/branches/non-call-eh/lib/System/Mutex.cpp Sun Jul 6 15:45:41 2008
@@ -13,7 +13,6 @@
#include "llvm/Config/config.h"
#include "llvm/System/Mutex.h"
-#include "llvm/System/IncludeFile.h"
//===----------------------------------------------------------------------===//
//=== WARNING: Implementation here must contain only TRULY operating system
@@ -76,7 +75,7 @@
errorcode = pthread_mutexattr_settype(&attr, kind);
assert(errorcode == 0);
-#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
+#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) && !defined(__DragonFly__)
// Make it a process local mutex
errorcode = pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_PRIVATE);
#endif
@@ -99,7 +98,7 @@
{
if (pthread_enabled)
{
- pthread_mutex_t* mutex = reinterpret_cast<pthread_mutex_t*>(data_);
+ pthread_mutex_t* mutex = static_cast<pthread_mutex_t*>(data_);
assert(mutex != 0);
pthread_mutex_destroy(mutex);
assert(mutex != 0);
@@ -111,7 +110,7 @@
{
if (pthread_enabled)
{
- pthread_mutex_t* mutex = reinterpret_cast<pthread_mutex_t*>(data_);
+ pthread_mutex_t* mutex = static_cast<pthread_mutex_t*>(data_);
assert(mutex != 0);
int errorcode = pthread_mutex_lock(mutex);
@@ -125,7 +124,7 @@
{
if (pthread_enabled)
{
- pthread_mutex_t* mutex = reinterpret_cast<pthread_mutex_t*>(data_);
+ pthread_mutex_t* mutex = static_cast<pthread_mutex_t*>(data_);
assert(mutex != 0);
int errorcode = pthread_mutex_unlock(mutex);
@@ -139,7 +138,7 @@
{
if (pthread_enabled)
{
- pthread_mutex_t* mutex = reinterpret_cast<pthread_mutex_t*>(data_);
+ pthread_mutex_t* mutex = static_cast<pthread_mutex_t*>(data_);
assert(mutex != 0);
int errorcode = pthread_mutex_trylock(mutex);
Modified: llvm/branches/non-call-eh/lib/System/Path.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/System/Path.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/System/Path.cpp (original)
+++ llvm/branches/non-call-eh/lib/System/Path.cpp Sun Jul 6 15:45:41 2008
@@ -24,6 +24,18 @@
//=== independent code.
//===----------------------------------------------------------------------===//
+bool Path::operator==(const Path &that) const {
+ return path == that.path;
+}
+
+bool Path::operator!=(const Path &that) const {
+ return path != that.path;
+}
+
+bool Path::operator<(const Path& that) const {
+ return path < that.path;
+}
+
std::ostream& llvm::operator<<(std::ostream &strm, const sys::Path &aPath) {
strm << aPath.toString();
return strm;
@@ -68,29 +80,43 @@
break;
case 0xCA:
- // This is complicated by an overlap with Java class files.
- // See the Mach-O section in /usr/share/file/magic for details.
if (magic[1] == char(0xFE) && magic[2] == char(0xBA) &&
magic[3] == char(0xBE)) {
- return Mach_O_DynamicallyLinkedSharedLib_FileType;
-
- // FIXME: How does this work?
- if (length >= 14 && magic[13] == 0)
- switch (magic[12]) {
- default: break;
- case 1: return Mach_O_Object_FileType;
- case 2: return Mach_O_Executable_FileType;
- case 3: return Mach_O_FixedVirtualMemorySharedLib_FileType;
- case 4: return Mach_O_Core_FileType;
- case 5: return Mach_O_PreloadExectuable_FileType;
- case 6: return Mach_O_DynamicallyLinkedSharedLib_FileType;
- case 7: return Mach_O_DynamicLinker_FileType;
- case 8: return Mach_O_Bundle_FileType;
- case 9: return Mach_O_DynamicallyLinkedSharedLibStub_FileType;
- }
+ // This is complicated by an overlap with Java class files.
+ // See the Mach-O section in /usr/share/file/magic for details.
+ if (length >= 8 && magic[7] < 43)
+ // FIXME: Universal Binary of any type.
+ return Mach_O_DynamicallyLinkedSharedLib_FileType;
}
break;
+ case 0xFE:
+ case 0xCE: {
+ uint16_t type = 0;
+ if (magic[0] == char(0xFE) && magic[1] == char(0xED) &&
+ magic[2] == char(0xFA) && magic[3] == char(0xCE)) {
+ /* Native endian */
+ if (length >= 16) type = magic[14] << 8 | magic[15];
+ } else if (magic[0] == char(0xCE) && magic[1] == char(0xFA) &&
+ magic[2] == char(0xED) && magic[3] == char(0xFE)) {
+ /* Reverse endian */
+ if (length >= 14) type = magic[13] << 8 | magic[12];
+ }
+ switch (type) {
+ default: break;
+ case 1: return Mach_O_Object_FileType;
+ case 2: return Mach_O_Executable_FileType;
+ case 3: return Mach_O_FixedVirtualMemorySharedLib_FileType;
+ case 4: return Mach_O_Core_FileType;
+ case 5: return Mach_O_PreloadExectuable_FileType;
+ case 6: return Mach_O_DynamicallyLinkedSharedLib_FileType;
+ case 7: return Mach_O_DynamicLinker_FileType;
+ case 8: return Mach_O_Bundle_FileType;
+ case 9: return Mach_O_DynamicallyLinkedSharedLibStub_FileType;
+ case 10: break; // FIXME: MH_DSYM companion file with only debug.
+ }
+ break;
+ }
case 0xF0: // PowerPC Windows
case 0x83: // Alpha 32-bit
case 0x84: // Alpha 64-bit
@@ -124,7 +150,8 @@
if (canRead()) {
std::string Magic;
if (getMagicNumber(Magic, 64))
- switch (IdentifyFileType(Magic.c_str(), Magic.length())) {
+ switch (IdentifyFileType(Magic.c_str(),
+ static_cast<unsigned>(Magic.length()))) {
default: return false;
case Mach_O_FixedVirtualMemorySharedLib_FileType:
case Mach_O_DynamicallyLinkedSharedLib_FileType:
@@ -167,16 +194,11 @@
bool Path::hasMagicNumber(const std::string &Magic) const {
std::string actualMagic;
- if (getMagicNumber(actualMagic, Magic.size()))
+ if (getMagicNumber(actualMagic, static_cast<unsigned>(Magic.size())))
return Magic == actualMagic;
return false;
}
-std::string
-Path::getSuffix() const {
- return path.substr(path.rfind('.') + 1);
-}
-
static void getPathList(const char*path, std::vector<Path>& Paths) {
const char* at = path;
const char* delim = strchr(at, PathSeparator);
@@ -204,7 +226,7 @@
// If the path is all slashes, return a single slash.
// Otherwise, remove all trailing slashes.
- signed pos = path.size() - 1;
+ signed pos = static_cast<signed>(path.size()) - 1;
while (pos >= 0 && path[pos] == Sep)
--pos;
Modified: llvm/branches/non-call-eh/lib/System/Process.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/System/Process.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/System/Process.cpp (original)
+++ llvm/branches/non-call-eh/lib/System/Process.cpp Sun Jul 6 15:45:41 2008
@@ -31,5 +31,3 @@
#ifdef LLVM_ON_WIN32
#include "Win32/Process.inc"
#endif
-
-DEFINING_FILE_FOR(SystemProcess)
Modified: llvm/branches/non-call-eh/lib/System/Program.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/System/Program.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/System/Program.cpp (original)
+++ llvm/branches/non-call-eh/lib/System/Program.cpp Sun Jul 6 15:45:41 2008
@@ -31,5 +31,3 @@
#ifdef LLVM_ON_WIN32
#include "Win32/Program.inc"
#endif
-
-DEFINING_FILE_FOR(SystemProgram)
Modified: llvm/branches/non-call-eh/lib/System/Signals.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/System/Signals.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/System/Signals.cpp (original)
+++ llvm/branches/non-call-eh/lib/System/Signals.cpp Sun Jul 6 15:45:41 2008
@@ -32,5 +32,3 @@
#ifdef LLVM_ON_WIN32
#include "Win32/Signals.inc"
#endif
-
-DEFINING_FILE_FOR(SystemSignals)
Modified: llvm/branches/non-call-eh/lib/System/Unix/Memory.inc
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/System/Unix/Memory.inc?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/System/Unix/Memory.inc (original)
+++ llvm/branches/non-call-eh/lib/System/Unix/Memory.inc Sun Jul 6 15:45:41 2008
@@ -28,7 +28,7 @@
std::string *ErrMsg) {
if (NumBytes == 0) return MemoryBlock();
- long pageSize = Process::GetPageSize();
+ unsigned pageSize = Process::GetPageSize();
unsigned NumPages = (NumBytes+pageSize-1)/pageSize;
int fd = -1;
Modified: llvm/branches/non-call-eh/lib/System/Unix/Path.inc
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/System/Unix/Path.inc?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/System/Unix/Path.inc (original)
+++ llvm/branches/non-call-eh/lib/System/Unix/Path.inc Sun Jul 6 15:45:41 2008
@@ -75,6 +75,12 @@
extern const char sys::PathSeparator = ':';
+Path::Path(const std::string& p)
+ : path(p) {}
+
+Path::Path(const char *StrStart, unsigned StrLen)
+ : path(StrStart, StrLen) {}
+
bool
Path::isValid() const {
// Check some obvious things
@@ -257,11 +263,9 @@
/// GetMainExecutable - Return the path to the main executable, given the
/// value of argv[0] from program startup.
Path Path::GetMainExecutable(const char *argv0, void *MainAddr) {
-#if defined(__CYGWIN__)
- char exe_link[64];
- snprintf(exe_link, sizeof(exe_link), "/proc/%d/exe", getpid());
+#if defined(__linux__) || defined(__CYGWIN__)
char exe_path[MAXPATHLEN];
- ssize_t len = readlink(exe_link, exe_path, sizeof(exe_path));
+ ssize_t len = readlink("/proc/self/exe", exe_path, sizeof(exe_path));
if (len > 0 && len < MAXPATHLEN - 1) {
exe_path[len] = '\0';
return Path(std::string(exe_path));
@@ -297,6 +301,22 @@
return path.substr(slash, dot - slash);
}
+std::string
+Path::getSuffix() const {
+ // Find the last slash
+ std::string::size_type slash = path.rfind('/');
+ if (slash == std::string::npos)
+ slash = 0;
+ else
+ slash++;
+
+ std::string::size_type dot = path.rfind('.');
+ if (dot == std::string::npos || dot < slash)
+ return std::string();
+ else
+ return path.substr(dot + 1);
+}
+
bool Path::getMagicNumber(std::string& Magic, unsigned len) const {
assert(len < 1024 && "Request for magic string too long");
char* buf = (char*) alloca(1 + len);
@@ -566,7 +586,7 @@
path.copy(pathname,MAXPATHLEN);
// Null-terminate the last component
- int lastchar = path.length() - 1 ;
+ size_t lastchar = path.length() - 1 ;
if (pathname[lastchar] != '/')
++lastchar;
@@ -639,7 +659,7 @@
// Otherwise, try to just remove the one directory.
char pathname[MAXPATHLEN];
path.copy(pathname, MAXPATHLEN);
- int lastchar = path.length() - 1 ;
+ size_t lastchar = path.length() - 1;
if (pathname[lastchar] == '/')
pathname[lastchar] = 0;
else
Modified: llvm/branches/non-call-eh/lib/System/Unix/Program.inc
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/System/Unix/Program.inc?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/System/Unix/Program.inc (original)
+++ llvm/branches/non-call-eh/lib/System/Unix/Program.inc Sun Jul 6 15:45:41 2008
@@ -58,7 +58,7 @@
return Path();
// Now we have a colon separated list of directories to search; try them.
- unsigned PathLen = strlen(PathStr);
+ size_t PathLen = strlen(PathStr);
while (PathLen) {
// Find the first colon...
const char *Colon = std::find(PathStr, PathStr+PathLen, ':');
@@ -84,8 +84,16 @@
return Path();
}
-static bool RedirectFD(const std::string &File, int FD, std::string* ErrMsg) {
- if (File.empty()) return false; // Noop
+static bool RedirectIO(const Path *Path, int FD, std::string* ErrMsg) {
+ if (Path == 0)
+ // Noop
+ return false;
+ std::string File;
+ if (Path->isEmpty())
+ // Redirect empty paths to /dev/null
+ File = "/dev/null";
+ else
+ File = Path->toString();
// Open the file
int InFD = open(File.c_str(), FD == 0 ? O_RDONLY : O_WRONLY|O_CREAT, 0666);
@@ -162,30 +170,21 @@
case 0: {
// Redirect file descriptors...
if (redirects) {
- if (redirects[0]) {
- if (redirects[0]->isEmpty()) {
- if (RedirectFD("/dev/null",0,ErrMsg)) { return -1; }
- } else {
- if (RedirectFD(redirects[0]->toString(), 0,ErrMsg)) { return -1; }
- }
- }
- if (redirects[1]) {
- if (redirects[1]->isEmpty()) {
- if (RedirectFD("/dev/null",1,ErrMsg)) { return -1; }
- } else {
- if (RedirectFD(redirects[1]->toString(),1,ErrMsg)) { return -1; }
- }
- }
+ // Redirect stdin
+ if (RedirectIO(redirects[0], 0, ErrMsg)) { return -1; }
+ // Redirect stdout
+ if (RedirectIO(redirects[1], 1, ErrMsg)) { return -1; }
if (redirects[1] && redirects[2] &&
- *(redirects[1]) != *(redirects[2])) {
- if (redirects[2]->isEmpty()) {
- if (RedirectFD("/dev/null",2,ErrMsg)) { return -1; }
- } else {
- if (RedirectFD(redirects[2]->toString(), 2,ErrMsg)) { return -1; }
+ *(redirects[1]) == *(redirects[2])) {
+ // If stdout and stderr should go to the same place, redirect stderr
+ // to the FD already open for stdout.
+ if (-1 == dup2(1,2)) {
+ MakeErrMsg(ErrMsg, "Can't redirect stderr to stdout");
+ return -1;
}
- } else if (-1 == dup2(1,2)) {
- MakeErrMsg(ErrMsg, "Can't redirect");
- return -1;
+ } else {
+ // Just redirect stderr
+ if (RedirectIO(redirects[2], 2, ErrMsg)) { return -1; }
}
}
Modified: llvm/branches/non-call-eh/lib/System/Unix/Signals.inc
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/System/Unix/Signals.inc?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/System/Unix/Signals.inc (original)
+++ llvm/branches/non-call-eh/lib/System/Unix/Signals.inc Sun Jul 6 15:45:41 2008
@@ -41,7 +41,8 @@
static const int IntSigs[] = {
SIGHUP, SIGINT, SIGQUIT, SIGPIPE, SIGTERM, SIGUSR1, SIGUSR2
};
-static const int *IntSigsEnd = IntSigs + sizeof(IntSigs) / sizeof(IntSigs[0]);
+static const int *const IntSigsEnd =
+ IntSigs + sizeof(IntSigs) / sizeof(IntSigs[0]);
// KillSigs - Signals that are synchronous with the program that will cause it
// to die.
@@ -51,7 +52,8 @@
, SIGEMT
#endif
};
-static const int *KillSigsEnd = KillSigs + sizeof(KillSigs) / sizeof(KillSigs[0]);
+static const int *const KillSigsEnd =
+ KillSigs + sizeof(KillSigs) / sizeof(KillSigs[0]);
#ifdef HAVE_BACKTRACE
static void* StackTrace[256];
@@ -65,7 +67,8 @@
static void PrintStackTrace() {
#ifdef HAVE_BACKTRACE
// Use backtrace() to output a backtrace on Linux systems with glibc.
- int depth = backtrace(StackTrace, array_lengthof(StackTrace));
+ int depth = backtrace(StackTrace,
+ static_cast<int>(array_lengthof(StackTrace)));
backtrace_symbols_fd(StackTrace, depth, STDERR_FILENO);
#endif
}
Modified: llvm/branches/non-call-eh/lib/System/Unix/Unix.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/System/Unix/Unix.h?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/System/Unix/Unix.h (original)
+++ llvm/branches/non-call-eh/lib/System/Unix/Unix.h Sun Jul 6 15:45:41 2008
@@ -70,7 +70,7 @@
/// string and the Unix error number given by \p errnum. If errnum is -1, the
/// default then the value of errno is used.
/// @brief Make an error message
-inline bool MakeErrMsg(
+static inline bool MakeErrMsg(
std::string* ErrMsg, const std::string& prefix, int errnum = -1) {
if (!ErrMsg)
return true;
Modified: llvm/branches/non-call-eh/lib/System/Win32/DynamicLibrary.inc
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/System/Win32/DynamicLibrary.inc?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/System/Win32/DynamicLibrary.inc (original)
+++ llvm/branches/non-call-eh/lib/System/Win32/DynamicLibrary.inc Sun Jul 6 15:45:41 2008
@@ -134,6 +134,21 @@
#if defined(__MINGW32__)
EXPLICIT_SYMBOL_DEF(_alloca);
EXPLICIT_SYMBOL_DEF(__main);
+ EXPLICIT_SYMBOL_DEF(__ashldi3);
+ EXPLICIT_SYMBOL_DEF(__ashrdi3);
+ EXPLICIT_SYMBOL_DEF(__cmpdi2);
+ EXPLICIT_SYMBOL_DEF(__divdi3);
+ EXPLICIT_SYMBOL_DEF(__eprintf);
+ EXPLICIT_SYMBOL_DEF(__fixdfdi);
+ EXPLICIT_SYMBOL_DEF(__fixsfdi);
+ EXPLICIT_SYMBOL_DEF(__fixunsdfdi);
+ EXPLICIT_SYMBOL_DEF(__fixunssfdi);
+ EXPLICIT_SYMBOL_DEF(__floatdidf);
+ EXPLICIT_SYMBOL_DEF(__floatdisf);
+ EXPLICIT_SYMBOL_DEF(__lshrdi3);
+ EXPLICIT_SYMBOL_DEF(__moddi3);
+ EXPLICIT_SYMBOL_DEF(__udivdi3);
+ EXPLICIT_SYMBOL_DEF(__umoddi3);
#elif defined(_MSC_VER)
EXPLICIT_SYMBOL_DEF(_alloca_probe);
#endif
@@ -157,6 +172,21 @@
{
EXPLICIT_SYMBOL(_alloca);
EXPLICIT_SYMBOL(__main);
+ EXPLICIT_SYMBOL(__ashldi3);
+ EXPLICIT_SYMBOL(__ashrdi3);
+ EXPLICIT_SYMBOL(__cmpdi2);
+ EXPLICIT_SYMBOL(__divdi3);
+ EXPLICIT_SYMBOL(__eprintf);
+ EXPLICIT_SYMBOL(__fixdfdi);
+ EXPLICIT_SYMBOL(__fixsfdi);
+ EXPLICIT_SYMBOL(__fixunsdfdi);
+ EXPLICIT_SYMBOL(__fixunssfdi);
+ EXPLICIT_SYMBOL(__floatdidf);
+ EXPLICIT_SYMBOL(__floatdisf);
+ EXPLICIT_SYMBOL(__lshrdi3);
+ EXPLICIT_SYMBOL(__moddi3);
+ EXPLICIT_SYMBOL(__udivdi3);
+ EXPLICIT_SYMBOL(__umoddi3);
EXPLICIT_SYMBOL2(alloca, _alloca);
#undef EXPLICIT_SYMBOL
Modified: llvm/branches/non-call-eh/lib/System/Win32/Path.inc
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/System/Win32/Path.inc?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/System/Win32/Path.inc (original)
+++ llvm/branches/non-call-eh/lib/System/Win32/Path.inc Sun Jul 6 15:45:41 2008
@@ -1,4 +1,4 @@
-//===- llvm/System/Linux/Path.cpp - Linux Path Implementation ---*- C++ -*-===//
+//===- llvm/System/Win32/Path.cpp - Win32 Path Implementation ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -46,6 +46,16 @@
namespace sys {
const char PathSeparator = ';';
+Path::Path(const std::string& p)
+ : path(p) {
+ FlipBackSlashes(path);
+}
+
+Path::Path(const char *StrStart, unsigned StrLen)
+ : path(StrStart, StrLen) {
+ FlipBackSlashes(path);
+}
+
bool
Path::isValid() const {
if (path.empty())
@@ -230,7 +240,7 @@
}
std::string Path::getDirname() const {
- return getDirnameCharSep(path, '\\');
+ return getDirnameCharSep(path, '/');
}
std::string
@@ -249,6 +259,22 @@
return path.substr(slash, dot - slash);
}
+std::string
+Path::getSuffix() const {
+ // Find the last slash
+ size_t slash = path.rfind('/');
+ if (slash == std::string::npos)
+ slash = 0;
+ else
+ slash++;
+
+ size_t dot = path.rfind('.');
+ if (dot == std::string::npos || dot < slash)
+ return std::string();
+ else
+ return path.substr(dot + 1);
+}
+
bool
Path::exists() const {
DWORD attr = GetFileAttributes(path.c_str());
@@ -672,7 +698,7 @@
if (!MoveFileEx(path.c_str(), newName.c_str(), MOVEFILE_REPLACE_EXISTING))
return MakeErrMsg(ErrMsg, "Can't move '" + path + "' to '" + newName.path
+ "': ");
- return true;
+ return false;
}
bool
Modified: llvm/branches/non-call-eh/lib/System/Win32/Program.inc
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/System/Win32/Program.inc?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/System/Win32/Program.inc (original)
+++ llvm/branches/non-call-eh/lib/System/Win32/Program.inc Sun Jul 6 15:45:41 2008
@@ -77,10 +77,12 @@
0, TRUE, DUPLICATE_SAME_ACCESS);
return h;
}
-
- const char *fname = path->toString().c_str();
- if (*fname == 0)
+
+ const char *fname;
+ if (path->isEmpty())
fname = "NUL";
+ else
+ fname = path->toString().c_str();
SECURITY_ATTRIBUTES sa;
sa.nLength = sizeof(sa);
@@ -152,6 +154,33 @@
*p = 0;
+ // The pointer to the environment block for the new process.
+ char *envblock = 0;
+
+ if (envp) {
+ // An environment block consists of a null-terminated block of
+ // null-terminated strings. Convert the array of environment variables to
+ // an environment block by concatenating them.
+
+ // First, determine the length of the environment block.
+ len = 0;
+ for (unsigned i = 0; envp[i]; i++)
+ len += strlen(envp[i]) + 1;
+
+ // Now build the environment block.
+ envblock = reinterpret_cast<char *>(_alloca(len+1));
+ p = envblock;
+
+ for (unsigned i = 0; envp[i]; i++) {
+ const char *ev = envp[i];
+ size_t len = strlen(ev) + 1;
+ memcpy(p, ev, len);
+ p += len;
+ }
+
+ *p = 0;
+ }
+
// Create a child process.
STARTUPINFO si;
memset(&si, 0, sizeof(si));
@@ -174,7 +203,14 @@
MakeErrMsg(ErrMsg, "can't redirect stdout");
return -1;
}
- if (redirects[1] && redirects[2] && *(redirects[1]) != *(redirects[2])) {
+ if (redirects[1] && redirects[2] && *(redirects[1]) == *(redirects[2])) {
+ // If stdout and stderr should go to the same place, redirect stderr
+ // to the handle already open for stdout.
+ DuplicateHandle(GetCurrentProcess(), si.hStdOutput,
+ GetCurrentProcess(), &si.hStdError,
+ 0, TRUE, DUPLICATE_SAME_ACCESS);
+ } else {
+ // Just redirect stderr
si.hStdError = RedirectIO(redirects[2], 2, ErrMsg);
if (si.hStdError == INVALID_HANDLE_VALUE) {
CloseHandle(si.hStdInput);
@@ -182,10 +218,6 @@
MakeErrMsg(ErrMsg, "can't redirect stderr");
return -1;
}
- } else {
- DuplicateHandle(GetCurrentProcess(), si.hStdOutput,
- GetCurrentProcess(), &si.hStdError,
- 0, TRUE, DUPLICATE_SAME_ACCESS);
}
}
@@ -195,7 +227,7 @@
fflush(stdout);
fflush(stderr);
BOOL rc = CreateProcess(path.c_str(), command, NULL, NULL, FALSE, 0,
- envp, NULL, &si, &pi);
+ envblock, NULL, &si, &pi);
DWORD err = GetLastError();
// Regardless of whether the process got created or not, we are done with
Modified: llvm/branches/non-call-eh/lib/Target/ARM/ARMAsmPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/ARM/ARMAsmPrinter.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/ARM/ARMAsmPrinter.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/ARM/ARMAsmPrinter.cpp Sun Jul 6 15:45:41 2008
@@ -127,10 +127,10 @@
Name += ACPV->getSymbol();
if (ACPV->isNonLazyPointer()) {
GVNonLazyPtrs.insert(Name);
- O << TAI->getPrivateGlobalPrefix() << Name << "$non_lazy_ptr";
+ printSuffixedName(Name, "$non_lazy_ptr");
} else if (ACPV->isStub()) {
FnStubs.insert(Name);
- O << TAI->getPrivateGlobalPrefix() << Name << "$stub";
+ printSuffixedName(Name, "$stub");
} else
O << Name;
if (ACPV->hasModifier()) O << "(" << ACPV->getModifier() << ")";
@@ -295,7 +295,7 @@
GV->hasLinkOnceLinkage());
if (isExt && isCallOp && Subtarget->isTargetDarwin() &&
TM.getRelocationModel() != Reloc::Static) {
- O << TAI->getPrivateGlobalPrefix() << Name << "$stub";
+ printSuffixedName(Name, "$stub");
FnStubs.insert(Name);
} else
O << Name;
@@ -318,7 +318,7 @@
Name += MO.getSymbolName();
if (isCallOp && Subtarget->isTargetDarwin() &&
TM.getRelocationModel() != Reloc::Static) {
- O << TAI->getPrivateGlobalPrefix() << Name << "$stub";
+ printSuffixedName(Name, "$stub");
FnStubs.insert(Name);
} else
O << Name;
@@ -1004,21 +1004,32 @@
EmitAlignment(2);
O << "\t.code\t32\n";
- O << "L" << *i << "$stub:\n";
+ std::string p = *i;
+ printSuffixedName(p, "$stub");
+ O << ":\n";
O << "\t.indirect_symbol " << *i << "\n";
- O << "\tldr ip, L" << *i << "$slp\n";
+ O << "\tldr ip, ";
+ printSuffixedName(p, "$slp");
+ O << "\n";
if (TM.getRelocationModel() == Reloc::PIC_) {
- O << "L" << *i << "$scv:\n";
+ printSuffixedName(p, "$scv");
+ O << ":\n";
O << "\tadd ip, pc, ip\n";
}
O << "\tldr pc, [ip, #0]\n";
- O << "L" << *i << "$slp:\n";
- if (TM.getRelocationModel() == Reloc::PIC_)
- O << "\t.long\tL" << *i << "$lazy_ptr-(L" << *i << "$scv+8)\n";
- else
- O << "\t.long\tL" << *i << "$lazy_ptr\n";
+ printSuffixedName(p, "$slp");
+ O << ":\n";
+ O << "\t.long\t";
+ printSuffixedName(p, "$lazy_ptr");
+ if (TM.getRelocationModel() == Reloc::PIC_) {
+ O << "-(";
+ printSuffixedName(p, "$scv");
+ O << "+8)\n";
+ } else
+ O << "\n";
SwitchToDataSection(".lazy_symbol_pointer", 0);
- O << "L" << *i << "$lazy_ptr:\n";
+ printSuffixedName(p, "$lazy_ptr");
+ O << ":\n";
O << "\t.indirect_symbol " << *i << "\n";
O << "\t.long\tdyld_stub_binding_helper\n";
}
@@ -1029,7 +1040,9 @@
SwitchToDataSection(".non_lazy_symbol_pointer", 0);
for (std::set<std::string>::iterator i = GVNonLazyPtrs.begin(),
e = GVNonLazyPtrs.end(); i != e; ++i) {
- O << "L" << *i << "$non_lazy_ptr:\n";
+ std::string p = *i;
+ printSuffixedName(p, "$non_lazy_ptr");
+ O << ":\n";
O << "\t.indirect_symbol " << *i << "\n";
O << "\t.long\t0\n";
}
Modified: llvm/branches/non-call-eh/lib/Target/ARM/ARMISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/ARM/ARMISelDAGToDAG.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/ARM/ARMISelDAGToDAG.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/ARM/ARMISelDAGToDAG.cpp Sun Jul 6 15:45:41 2008
@@ -54,7 +54,7 @@
}
SDNode *Select(SDOperand Op);
- virtual void InstructionSelectBasicBlock(SelectionDAG &DAG);
+ virtual void InstructionSelect(SelectionDAG &DAG);
bool SelectAddrMode2(SDOperand Op, SDOperand N, SDOperand &Base,
SDOperand &Offset, SDOperand &Opc);
bool SelectAddrMode2Offset(SDOperand Op, SDOperand N,
@@ -91,13 +91,11 @@
};
}
-void ARMDAGToDAGISel::InstructionSelectBasicBlock(SelectionDAG &DAG) {
+void ARMDAGToDAGISel::InstructionSelect(SelectionDAG &DAG) {
DEBUG(BB->dump());
DAG.setRoot(SelectRoot(DAG.getRoot()));
DAG.RemoveDeadNodes();
-
- ScheduleAndEmitDAG(DAG);
}
bool ARMDAGToDAGISel::SelectAddrMode2(SDOperand Op, SDOperand N,
@@ -660,7 +658,7 @@
case ISD::LOAD: {
LoadSDNode *LD = cast<LoadSDNode>(Op);
ISD::MemIndexedMode AM = LD->getAddressingMode();
- MVT::ValueType LoadedVT = LD->getMemoryVT();
+ MVT LoadedVT = LD->getMemoryVT();
if (AM != ISD::UNINDEXED) {
SDOperand Offset, AMOpc;
bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
@@ -741,7 +739,7 @@
}
case ARMISD::CMOV: {
bool isThumb = Subtarget->isThumb();
- MVT::ValueType VT = Op.getValueType();
+ MVT VT = Op.getValueType();
SDOperand N0 = Op.getOperand(0);
SDOperand N1 = Op.getOperand(1);
SDOperand N2 = Op.getOperand(2);
@@ -805,7 +803,7 @@
cast<ConstantSDNode>(N2)->getValue()), MVT::i32);
SDOperand Ops[] = { N0, N1, Tmp2, N3, InFlag };
unsigned Opc = 0;
- switch (VT) {
+ switch (VT.getSimpleVT()) {
default: assert(false && "Illegal conditional move type!");
break;
case MVT::i32:
@@ -821,7 +819,7 @@
return CurDAG->SelectNodeTo(Op.Val, Opc, VT, Ops, 5);
}
case ARMISD::CNEG: {
- MVT::ValueType VT = Op.getValueType();
+ MVT VT = Op.getValueType();
SDOperand N0 = Op.getOperand(0);
SDOperand N1 = Op.getOperand(1);
SDOperand N2 = Op.getOperand(2);
@@ -837,7 +835,7 @@
cast<ConstantSDNode>(N2)->getValue()), MVT::i32);
SDOperand Ops[] = { N0, N1, Tmp2, N3, InFlag };
unsigned Opc = 0;
- switch (VT) {
+ switch (VT.getSimpleVT()) {
default: assert(false && "Illegal conditional move type!");
break;
case MVT::f32:
Modified: llvm/branches/non-call-eh/lib/Target/ARM/ARMISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/ARM/ARMISelLowering.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/ARM/ARMISelLowering.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/ARM/ARMISelLowering.cpp Sun Jul 6 15:45:41 2008
@@ -188,7 +188,7 @@
setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
// Support label based line numbers.
- setOperationAction(ISD::LOCATION, MVT::Other, Expand);
+ setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
setOperationAction(ISD::RET, MVT::Other, Custom);
@@ -363,7 +363,7 @@
}
static void
-HowToPassArgument(MVT::ValueType ObjectVT, unsigned NumGPRs,
+HowToPassArgument(MVT ObjectVT, unsigned NumGPRs,
unsigned StackOffset, unsigned &NeededGPRs,
unsigned &NeededStackSize, unsigned &GPRPad,
unsigned &StackPad, ISD::ArgFlagsTy Flags) {
@@ -375,7 +375,7 @@
GPRPad = NumGPRs % ((align + 3)/4);
StackPad = StackOffset % align;
unsigned firstGPR = NumGPRs + GPRPad;
- switch (ObjectVT) {
+ switch (ObjectVT.getSimpleVT()) {
default: assert(0 && "Unhandled argument type!");
case MVT::i32:
case MVT::f32:
@@ -400,7 +400,7 @@
/// ARMISD:CALL <- callseq_end chain. Also add input and output parameter
/// nodes.
SDOperand ARMTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) {
- MVT::ValueType RetVT= Op.Val->getValueType(0);
+ MVT RetVT= Op.Val->getValueType(0);
SDOperand Chain = Op.getOperand(0);
unsigned CallConv = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
assert((CallConv == CallingConv::C ||
@@ -419,7 +419,7 @@
unsigned ObjGPRs;
unsigned StackPad;
unsigned GPRPad;
- MVT::ValueType ObjectVT = Op.getOperand(5+2*i).getValueType();
+ MVT ObjectVT = Op.getOperand(5+2*i).getValueType();
ISD::ArgFlagsTy Flags =
cast<ARG_FLAGSSDNode>(Op.getOperand(5+2*i+1))->getArgFlags();
HowToPassArgument(ObjectVT, NumGPRs, NumBytes, ObjGPRs, ObjSize,
@@ -446,7 +446,7 @@
SDOperand Arg = Op.getOperand(5+2*i);
ISD::ArgFlagsTy Flags =
cast<ARG_FLAGSSDNode>(Op.getOperand(5+2*i+1))->getArgFlags();
- MVT::ValueType ArgVT = Arg.getValueType();
+ MVT ArgVT = Arg.getValueType();
unsigned ObjSize;
unsigned ObjGPRs;
@@ -457,7 +457,7 @@
NumGPRs += GPRPad;
ArgOffset += StackPad;
if (ObjGPRs > 0) {
- switch (ArgVT) {
+ switch (ArgVT.getSimpleVT()) {
default: assert(0 && "Unexpected ValueType for argument!");
case MVT::i32:
RegsToPass.push_back(std::make_pair(GPRArgRegs[NumGPRs], Arg));
@@ -587,10 +587,6 @@
InFlag = Chain.getValue(1);
}
- std::vector<MVT::ValueType> NodeTys;
- NodeTys.push_back(MVT::Other); // Returns a chain
- NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
-
std::vector<SDOperand> Ops;
Ops.push_back(Chain);
Ops.push_back(Callee);
@@ -603,7 +599,9 @@
if (InFlag.Val)
Ops.push_back(InFlag);
- Chain = DAG.getNode(CallOpc, NodeTys, &Ops[0], Ops.size());
+ // Returns a chain and a flag for retval copy to use.
+ Chain = DAG.getNode(CallOpc, DAG.getVTList(MVT::Other, MVT::Flag),
+ &Ops[0], Ops.size());
InFlag = Chain.getValue(1);
Chain = DAG.getCALLSEQ_END(Chain,
@@ -614,10 +612,9 @@
InFlag = Chain.getValue(1);
std::vector<SDOperand> ResultVals;
- NodeTys.clear();
// If the call has results, copy the values out of the ret val registers.
- switch (RetVT) {
+ switch (RetVT.getSimpleVT()) {
default: assert(0 && "Unexpected ret value!");
case MVT::Other:
break;
@@ -629,33 +626,26 @@
Chain = DAG.getCopyFromReg(Chain, ARM::R1, MVT::i32,
Chain.getValue(2)).getValue(1);
ResultVals.push_back(Chain.getValue(0));
- NodeTys.push_back(MVT::i32);
}
- NodeTys.push_back(MVT::i32);
break;
case MVT::f32:
Chain = DAG.getCopyFromReg(Chain, ARM::R0, MVT::i32, InFlag).getValue(1);
ResultVals.push_back(DAG.getNode(ISD::BIT_CONVERT, MVT::f32,
Chain.getValue(0)));
- NodeTys.push_back(MVT::f32);
break;
case MVT::f64: {
SDOperand Lo = DAG.getCopyFromReg(Chain, ARM::R0, MVT::i32, InFlag);
SDOperand Hi = DAG.getCopyFromReg(Lo, ARM::R1, MVT::i32, Lo.getValue(2));
ResultVals.push_back(DAG.getNode(ARMISD::FMDRR, MVT::f64, Lo, Hi));
- NodeTys.push_back(MVT::f64);
break;
}
}
- NodeTys.push_back(MVT::Other);
-
if (ResultVals.empty())
return Chain;
ResultVals.push_back(Chain);
- SDOperand Res = DAG.getNode(ISD::MERGE_VALUES, NodeTys, &ResultVals[0],
- ResultVals.size());
+ SDOperand Res = DAG.getMergeValues(&ResultVals[0], ResultVals.size());
return Res.getValue(Op.ResNo);
}
@@ -708,7 +698,7 @@
// be used to form addressing mode. These wrapped nodes will be selected
// into MOVi.
static SDOperand LowerConstantPool(SDOperand Op, SelectionDAG &DAG) {
- MVT::ValueType PtrVT = Op.getValueType();
+ MVT PtrVT = Op.getValueType();
ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
SDOperand Res;
if (CP->isMachineConstantPoolEntry())
@@ -724,7 +714,7 @@
SDOperand
ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
SelectionDAG &DAG) {
- MVT::ValueType PtrVT = getPointerTy();
+ MVT PtrVT = getPointerTy();
unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
ARMConstantPoolValue *CPV =
new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex, ARMCP::CPValue,
@@ -758,7 +748,7 @@
GlobalValue *GV = GA->getGlobal();
SDOperand Offset;
SDOperand Chain = DAG.getEntryNode();
- MVT::ValueType PtrVT = getPointerTy();
+ MVT PtrVT = getPointerTy();
// Get the Thread Pointer
SDOperand ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, PtrVT);
@@ -807,7 +797,7 @@
SDOperand ARMTargetLowering::LowerGlobalAddressELF(SDOperand Op,
SelectionDAG &DAG) {
- MVT::ValueType PtrVT = getPointerTy();
+ MVT PtrVT = getPointerTy();
GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
Reloc::Model RelocM = getTargetMachine().getRelocationModel();
if (RelocM == Reloc::PIC_) {
@@ -840,7 +830,7 @@
SDOperand ARMTargetLowering::LowerGlobalAddressDarwin(SDOperand Op,
SelectionDAG &DAG) {
- MVT::ValueType PtrVT = getPointerTy();
+ MVT PtrVT = getPointerTy();
GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
Reloc::Model RelocM = getTargetMachine().getRelocationModel();
bool IsIndirect = GVIsIndirectSymbol(GV, RelocM);
@@ -875,7 +865,7 @@
SelectionDAG &DAG){
assert(Subtarget->isTargetELF() &&
"GLOBAL OFFSET TABLE not implemented for non-ELF targets");
- MVT::ValueType PtrVT = getPointerTy();
+ MVT PtrVT = getPointerTy();
unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
ARMConstantPoolValue *CPV = new ARMConstantPoolValue("_GLOBAL_OFFSET_TABLE_",
ARMPCLabelIndex,
@@ -888,7 +878,7 @@
}
static SDOperand LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) {
- MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+ MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getValue();
switch (IntNo) {
default: return SDOperand(); // Don't custom lower most intrinsics.
@@ -901,7 +891,7 @@
unsigned VarArgsFrameIndex) {
// vastart just stores the address of the VarArgsFrameIndex slot into the
// memory location argument.
- MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+ MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
return DAG.getStore(Op.getOperand(0), FR, Op.getOperand(1), SV, 0);
@@ -911,7 +901,7 @@
unsigned ArgNo, unsigned &NumGPRs,
unsigned &ArgOffset) {
MachineFunction &MF = DAG.getMachineFunction();
- MVT::ValueType ObjectVT = Op.getValue(ArgNo).getValueType();
+ MVT ObjectVT = Op.getValue(ArgNo).getValueType();
SDOperand Root = Op.getOperand(0);
std::vector<SDOperand> ArgValues;
MachineRegisterInfo &RegInfo = MF.getRegInfo();
@@ -1025,9 +1015,8 @@
ArgValues.push_back(Root);
// Return the new list of results.
- std::vector<MVT::ValueType> RetVT(Op.Val->value_begin(),
- Op.Val->value_end());
- return DAG.getNode(ISD::MERGE_VALUES, RetVT, &ArgValues[0], ArgValues.size());
+ return DAG.getMergeValues(Op.Val->getVTList(), &ArgValues[0],
+ ArgValues.size());
}
/// isFloatingPointZero - Return true if this is +0.0.
@@ -1123,7 +1112,7 @@
static SDOperand LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG,
const ARMSubtarget *ST) {
- MVT::ValueType VT = Op.getValueType();
+ MVT VT = Op.getValueType();
SDOperand LHS = Op.getOperand(0);
SDOperand RHS = Op.getOperand(1);
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
@@ -1195,7 +1184,7 @@
SDOperand Table = Op.getOperand(1);
SDOperand Index = Op.getOperand(2);
- MVT::ValueType PTy = getPointerTy();
+ MVT PTy = getPointerTy();
JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>();
SDOperand UId = DAG.getConstant(AFI->createJumpTableUId(), PTy);
@@ -1204,7 +1193,7 @@
Index = DAG.getNode(ISD::MUL, PTy, Index, DAG.getConstant(4, PTy));
SDOperand Addr = DAG.getNode(ISD::ADD, PTy, Index, Table);
bool isPIC = getTargetMachine().getRelocationModel() == Reloc::PIC_;
- Addr = DAG.getLoad(isPIC ? (MVT::ValueType)MVT::i32 : PTy,
+ Addr = DAG.getLoad(isPIC ? (MVT)MVT::i32 : PTy,
Chain, Addr, NULL, 0);
Chain = Addr.getValue(1);
if (isPIC)
@@ -1220,7 +1209,7 @@
}
static SDOperand LowerINT_TO_FP(SDOperand Op, SelectionDAG &DAG) {
- MVT::ValueType VT = Op.getValueType();
+ MVT VT = Op.getValueType();
unsigned Opc =
Op.getOpcode() == ISD::SINT_TO_FP ? ARMISD::SITOF : ARMISD::UITOF;
@@ -1232,8 +1221,8 @@
// Implement fcopysign with a fabs and a conditional fneg.
SDOperand Tmp0 = Op.getOperand(0);
SDOperand Tmp1 = Op.getOperand(1);
- MVT::ValueType VT = Op.getValueType();
- MVT::ValueType SrcVT = Tmp1.getValueType();
+ MVT VT = Op.getValueType();
+ MVT SrcVT = Tmp1.getValueType();
SDOperand AbsVal = DAG.getNode(ISD::FABS, VT, Tmp0);
SDOperand Cmp = getVFPCmp(Tmp1, DAG.getConstantFP(0.0, SrcVT), DAG);
SDOperand ARMCC = DAG.getConstant(ARMCC::LT, MVT::i32);
@@ -1247,8 +1236,8 @@
SDOperand Dst, SDOperand Src,
SDOperand Size, unsigned Align,
bool AlwaysInline,
- const Value *DstSV, uint64_t DstOff,
- const Value *SrcSV, uint64_t SrcOff){
+ const Value *DstSV, uint64_t DstSVOff,
+ const Value *SrcSV, uint64_t SrcSVOff){
// Do repeated 4-byte loads and stores. To be improved.
// This requires 4-byte alignment.
if ((Align & 3) != 0)
@@ -1265,12 +1254,13 @@
unsigned BytesLeft = SizeVal & 3;
unsigned NumMemOps = SizeVal >> 2;
unsigned EmittedNumMemOps = 0;
- MVT::ValueType VT = MVT::i32;
+ MVT VT = MVT::i32;
unsigned VTSize = 4;
unsigned i = 0;
const unsigned MAX_LOADS_IN_LDM = 6;
SDOperand TFOps[MAX_LOADS_IN_LDM];
SDOperand Loads[MAX_LOADS_IN_LDM];
+ uint64_t SrcOff = 0, DstOff = 0;
// Emit up to MAX_LOADS_IN_LDM loads, then a TokenFactor barrier, then the
// same number of stores. The loads and stores will get combined into
@@ -1281,7 +1271,7 @@
Loads[i] = DAG.getLoad(VT, Chain,
DAG.getNode(ISD::ADD, MVT::i32, Src,
DAG.getConstant(SrcOff, MVT::i32)),
- SrcSV, SrcOff);
+ SrcSV, SrcSVOff + SrcOff);
TFOps[i] = Loads[i].getValue(1);
SrcOff += VTSize;
}
@@ -1292,7 +1282,7 @@
TFOps[i] = DAG.getStore(Chain, Loads[i],
DAG.getNode(ISD::ADD, MVT::i32, Dst,
DAG.getConstant(DstOff, MVT::i32)),
- DstSV, DstOff);
+ DstSV, DstSVOff + DstOff);
DstOff += VTSize;
}
Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, &TFOps[0], i);
@@ -1318,7 +1308,7 @@
Loads[i] = DAG.getLoad(VT, Chain,
DAG.getNode(ISD::ADD, MVT::i32, Src,
DAG.getConstant(SrcOff, MVT::i32)),
- SrcSV, SrcOff);
+ SrcSV, SrcSVOff + SrcOff);
TFOps[i] = Loads[i].getValue(1);
++i;
SrcOff += VTSize;
@@ -1340,7 +1330,7 @@
TFOps[i] = DAG.getStore(Chain, Loads[i],
DAG.getNode(ISD::ADD, MVT::i32, Dst,
DAG.getConstant(DstOff, MVT::i32)),
- DstSV, DstOff);
+ DstSV, DstSVOff + DstOff);
++i;
DstOff += VTSize;
BytesLeft -= VTSize;
@@ -1428,9 +1418,9 @@
}
-/// ExpandOperationResult - Provide custom lowering hooks for expanding
-/// operations.
-SDNode *ARMTargetLowering::ExpandOperationResult(SDNode *N, SelectionDAG &DAG) {
+/// ReplaceNodeResults - Provide custom lowering hooks for nodes with illegal
+/// result types.
+SDNode *ARMTargetLowering::ReplaceNodeResults(SDNode *N, SelectionDAG &DAG) {
switch (N->getOpcode()) {
default: assert(0 && "Don't know how to custom expand this!"); abort();
case ISD::BIT_CONVERT: return ExpandBIT_CONVERT(N, DAG);
@@ -1535,7 +1525,7 @@
/// isLegalAddressImmediate - Return true if the integer value can be used
/// as the offset of the target addressing mode for load / store of the
/// given type.
-static bool isLegalAddressImmediate(int64_t V, MVT::ValueType VT,
+static bool isLegalAddressImmediate(int64_t V, MVT VT,
const ARMSubtarget *Subtarget) {
if (V == 0)
return true;
@@ -1545,7 +1535,7 @@
return false;
unsigned Scale = 1;
- switch (VT) {
+ switch (VT.getSimpleVT()) {
default: return false;
case MVT::i1:
case MVT::i8:
@@ -1569,7 +1559,7 @@
if (V < 0)
V = - V;
- switch (VT) {
+ switch (VT.getSimpleVT()) {
default: return false;
case MVT::i1:
case MVT::i8:
@@ -1614,7 +1604,7 @@
return false;
int Scale = AM.Scale;
- switch (getValueType(Ty)) {
+ switch (getValueType(Ty).getSimpleVT()) {
default: return false;
case MVT::i1:
case MVT::i8:
@@ -1649,7 +1639,7 @@
}
-static bool getIndexedAddressParts(SDNode *Ptr, MVT::ValueType VT,
+static bool getIndexedAddressParts(SDNode *Ptr, MVT VT,
bool isSEXTLoad, SDOperand &Base,
SDOperand &Offset, bool &isInc,
SelectionDAG &DAG) {
@@ -1716,7 +1706,7 @@
if (Subtarget->isThumb())
return false;
- MVT::ValueType VT;
+ MVT VT;
SDOperand Ptr;
bool isSEXTLoad = false;
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
@@ -1750,7 +1740,7 @@
if (Subtarget->isThumb())
return false;
- MVT::ValueType VT;
+ MVT VT;
SDOperand Ptr;
bool isSEXTLoad = false;
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
@@ -1815,7 +1805,7 @@
std::pair<unsigned, const TargetRegisterClass*>
ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
- MVT::ValueType VT) const {
+ MVT VT) const {
if (Constraint.size() == 1) {
// GCC RS6000 Constraint Letters
switch (Constraint[0]) {
@@ -1837,7 +1827,7 @@
std::vector<unsigned> ARMTargetLowering::
getRegClassForInlineAsmConstraint(const std::string &Constraint,
- MVT::ValueType VT) const {
+ MVT VT) const {
if (Constraint.size() != 1)
return std::vector<unsigned>();
Modified: llvm/branches/non-call-eh/lib/Target/ARM/ARMISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/ARM/ARMISelLowering.h?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/ARM/ARMISelLowering.h (original)
+++ llvm/branches/non-call-eh/lib/Target/ARM/ARMISelLowering.h Sun Jul 6 15:45:41 2008
@@ -76,7 +76,7 @@
explicit ARMTargetLowering(TargetMachine &TM);
virtual SDOperand LowerOperation(SDOperand Op, SelectionDAG &DAG);
- virtual SDNode *ExpandOperationResult(SDNode *N, SelectionDAG &DAG);
+ virtual SDNode *ReplaceNodeResults(SDNode *N, SelectionDAG &DAG);
virtual SDOperand PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
@@ -114,10 +114,10 @@
ConstraintType getConstraintType(const std::string &Constraint) const;
std::pair<unsigned, const TargetRegisterClass*>
getRegForInlineAsmConstraint(const std::string &Constraint,
- MVT::ValueType VT) const;
+ MVT VT) const;
std::vector<unsigned>
getRegClassForInlineAsmConstraint(const std::string &Constraint,
- MVT::ValueType VT) const;
+ MVT VT) const;
virtual const ARMSubtarget* getSubtarget() {
return Subtarget;
@@ -149,8 +149,8 @@
SDOperand Dst, SDOperand Src,
SDOperand Size, unsigned Align,
bool AlwaysInline,
- const Value *DstSV, uint64_t DstOff,
- const Value *SrcSV, uint64_t SrcOff);
+ const Value *DstSV, uint64_t DstSVOff,
+ const Value *SrcSV, uint64_t SrcSVOff);
};
}
Modified: llvm/branches/non-call-eh/lib/Target/ARM/ARMInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/ARM/ARMInstrInfo.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/ARM/ARMInstrInfo.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/ARM/ARMInstrInfo.cpp Sun Jul 6 15:45:41 2008
@@ -191,7 +191,7 @@
MachineInstr *
ARMInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
MachineBasicBlock::iterator &MBBI,
- LiveVariables &LV) const {
+ LiveVariables *LV) const {
if (!EnableARM3Addr)
return NULL;
@@ -300,22 +300,25 @@
if (MO.isRegister() && MO.getReg() &&
TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
unsigned Reg = MO.getReg();
- LiveVariables::VarInfo &VI = LV.getVarInfo(Reg);
- if (MO.isDef()) {
- MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI;
- if (MO.isDead())
- LV.addVirtualRegisterDead(Reg, NewMI);
- }
- if (MO.isUse() && MO.isKill()) {
- for (unsigned j = 0; j < 2; ++j) {
- // Look at the two new MI's in reverse order.
- MachineInstr *NewMI = NewMIs[j];
- if (!NewMI->readsRegister(Reg))
- continue;
- LV.addVirtualRegisterKilled(Reg, NewMI);
- if (VI.removeKill(MI))
- VI.Kills.push_back(NewMI);
- break;
+
+ if (LV) {
+ LiveVariables::VarInfo &VI = LV->getVarInfo(Reg);
+ if (MO.isDef()) {
+ MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI;
+ if (MO.isDead())
+ LV->addVirtualRegisterDead(Reg, NewMI);
+ }
+ if (MO.isUse() && MO.isKill()) {
+ for (unsigned j = 0; j < 2; ++j) {
+ // Look at the two new MI's in reverse order.
+ MachineInstr *NewMI = NewMIs[j];
+ if (!NewMI->readsRegister(Reg))
+ continue;
+ LV->addVirtualRegisterKilled(Reg, NewMI);
+ if (VI.removeKill(MI))
+ VI.Kills.push_back(NewMI);
+ break;
+ }
}
}
}
@@ -674,30 +677,35 @@
unsigned PredReg = MI->getOperand(3).getReg();
if (OpNum == 0) { // move -> store
unsigned SrcReg = MI->getOperand(1).getReg();
- NewMI = BuildMI(get(ARM::STR)).addReg(SrcReg).addFrameIndex(FI)
- .addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
+ bool isKill = MI->getOperand(1).isKill();
+ NewMI = BuildMI(get(ARM::STR)).addReg(SrcReg, false, false, isKill)
+ .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
} else { // move -> load
unsigned DstReg = MI->getOperand(0).getReg();
- NewMI = BuildMI(get(ARM::LDR), DstReg).addFrameIndex(FI).addReg(0)
- .addImm(0).addImm(Pred).addReg(PredReg);
+ bool isDead = MI->getOperand(0).isDead();
+ NewMI = BuildMI(get(ARM::LDR)).addReg(DstReg, true, false, false, isDead)
+ .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
}
break;
}
case ARM::tMOVr: {
if (OpNum == 0) { // move -> store
unsigned SrcReg = MI->getOperand(1).getReg();
+ bool isKill = MI->getOperand(1).isKill();
if (RI.isPhysicalRegister(SrcReg) && !RI.isLowRegister(SrcReg))
// tSpill cannot take a high register operand.
break;
- NewMI = BuildMI(get(ARM::tSpill)).addReg(SrcReg).addFrameIndex(FI)
- .addImm(0);
+ NewMI = BuildMI(get(ARM::tSpill)).addReg(SrcReg, false, false, isKill)
+ .addFrameIndex(FI).addImm(0);
} else { // move -> load
unsigned DstReg = MI->getOperand(0).getReg();
if (RI.isPhysicalRegister(DstReg) && !RI.isLowRegister(DstReg))
// tRestore cannot target a high register operand.
break;
- NewMI = BuildMI(get(ARM::tRestore), DstReg).addFrameIndex(FI)
- .addImm(0);
+ bool isDead = MI->getOperand(0).isDead();
+ NewMI = BuildMI(get(ARM::tRestore))
+ .addReg(DstReg, true, false, false, isDead)
+ .addFrameIndex(FI).addImm(0);
}
break;
}
@@ -720,19 +728,19 @@
unsigned PredReg = MI->getOperand(3).getReg();
if (OpNum == 0) { // move -> store
unsigned SrcReg = MI->getOperand(1).getReg();
- NewMI = BuildMI(get(ARM::FSTD)).addReg(SrcReg).addFrameIndex(FI)
- .addImm(0).addImm(Pred).addReg(PredReg);
+ bool isKill = MI->getOperand(1).isKill();
+ NewMI = BuildMI(get(ARM::FSTD)).addReg(SrcReg, false, false, isKill)
+ .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
} else { // move -> load
unsigned DstReg = MI->getOperand(0).getReg();
- NewMI = BuildMI(get(ARM::FLDD), DstReg).addFrameIndex(FI)
- .addImm(0).addImm(Pred).addReg(PredReg);
+ bool isDead = MI->getOperand(0).isDead();
+ NewMI = BuildMI(get(ARM::FLDD)).addReg(DstReg, true, false, false, isDead)
+ .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
}
break;
}
}
- if (NewMI)
- NewMI->copyKillDeadInfo(MI);
return NewMI;
}
@@ -891,7 +899,7 @@
// If this machine instr is an inline asm, measure it.
if (MI->getOpcode() == ARM::INLINEASM)
return TAI->getInlineAsmLength(MI->getOperand(0).getSymbolName());
- if (MI->getOpcode() == ARM::LABEL)
+ if (MI->isLabel())
return 0;
if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
return 0;
Modified: llvm/branches/non-call-eh/lib/Target/ARM/ARMInstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/ARM/ARMInstrInfo.h?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/ARM/ARMInstrInfo.h (original)
+++ llvm/branches/non-call-eh/lib/Target/ARM/ARMInstrInfo.h Sun Jul 6 15:45:41 2008
@@ -134,7 +134,7 @@
/// such, whenever a client has an instance of instruction info, it should
/// always be able to get register info as well (through this method).
///
- virtual const TargetRegisterInfo &getRegisterInfo() const { return RI; }
+ virtual const ARMRegisterInfo &getRegisterInfo() const { return RI; }
/// getPointerRegClass - Return the register class to use to hold pointers.
/// This is used for addressing modes.
@@ -153,7 +153,7 @@
virtual MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI,
MachineBasicBlock::iterator &MBBI,
- LiveVariables &LV) const;
+ LiveVariables *LV) const;
// Branch analysis.
virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
Modified: llvm/branches/non-call-eh/lib/Target/ARM/ARMTargetAsmInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/ARM/ARMTargetAsmInfo.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/ARM/ARMTargetAsmInfo.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/ARM/ARMTargetAsmInfo.cpp Sun Jul 6 15:45:41 2008
@@ -48,6 +48,7 @@
if (Subtarget->isTargetDarwin()) {
GlobalPrefix = "_";
PrivateGlobalPrefix = "L";
+ StringConstantPrefix = "\1LC";
BSSSection = 0; // no BSS section.
ZeroFillDirective = "\t.zerofill\t"; // Uses .zerofill
SetDirective = "\t.set\t";
Modified: llvm/branches/non-call-eh/lib/Target/ARM/ARMTargetMachine.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/ARM/ARMTargetMachine.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/ARM/ARMTargetMachine.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/ARM/ARMTargetMachine.cpp Sun Jul 6 15:45:41 2008
@@ -27,11 +27,9 @@
static cl::opt<bool> DisableIfConversion("disable-arm-if-conversion",cl::Hidden,
cl::desc("Disable if-conversion pass"));
-namespace {
- // Register the target.
- RegisterTarget<ARMTargetMachine> X("arm", " ARM");
- RegisterTarget<ThumbTargetMachine> Y("thumb", " Thumb");
-}
+// Register the target.
+static RegisterTarget<ARMTargetMachine> X("arm", " ARM");
+static RegisterTarget<ThumbTargetMachine> Y("thumb", " Thumb");
/// ThumbTargetMachine - Create an Thumb architecture model.
///
@@ -94,7 +92,8 @@
unsigned ARMTargetMachine::getModuleMatchQuality(const Module &M) {
std::string TT = M.getTargetTriple();
- if (TT.size() >= 4 && std::string(TT.begin(), TT.begin()+4) == "arm-")
+ if (TT.size() >= 4 && // Match arm-foo-bar, as well as things like armv5blah-*
+ (TT.substr(0, 4) == "arm-" || TT.substr(0, 4) == "armv"))
return 20;
// If the target triple is something non-arm, we don't match.
if (!TT.empty()) return 0;
Modified: llvm/branches/non-call-eh/lib/Target/ARM/ARMTargetMachine.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/ARM/ARMTargetMachine.h?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/ARM/ARMTargetMachine.h (original)
+++ llvm/branches/non-call-eh/lib/Target/ARM/ARMTargetMachine.h Sun Jul 6 15:45:41 2008
@@ -38,10 +38,10 @@
public:
ARMTargetMachine(const Module &M, const std::string &FS, bool isThumb = false);
- virtual const ARMInstrInfo *getInstrInfo() const { return &InstrInfo; }
- virtual const TargetFrameInfo *getFrameInfo() const { return &FrameInfo; }
- virtual TargetJITInfo *getJITInfo() { return &JITInfo; }
- virtual const TargetRegisterInfo *getRegisterInfo() const {
+ virtual const ARMInstrInfo *getInstrInfo() const { return &InstrInfo; }
+ virtual const ARMFrameInfo *getFrameInfo() const { return &FrameInfo; }
+ virtual ARMJITInfo *getJITInfo() { return &JITInfo; }
+ virtual const ARMRegisterInfo *getRegisterInfo() const {
return &InstrInfo.getRegisterInfo();
}
virtual const TargetData *getTargetData() const { return &DataLayout; }
Modified: llvm/branches/non-call-eh/lib/Target/Alpha/Alpha.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/Alpha/Alpha.h?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/Alpha/Alpha.h (original)
+++ llvm/branches/non-call-eh/lib/Target/Alpha/Alpha.h Sun Jul 6 15:45:41 2008
@@ -24,8 +24,7 @@
class TargetMachine;
class MachineCodeEmitter;
- FunctionPass *createAlphaSimpleInstructionSelector(TargetMachine &TM);
- FunctionPass *createAlphaISelDag(TargetMachine &TM);
+ FunctionPass *createAlphaISelDag(AlphaTargetMachine &TM);
FunctionPass *createAlphaCodePrinterPass(std::ostream &OS,
TargetMachine &TM);
FunctionPass *createAlphaPatternInstructionSelector(TargetMachine &TM);
Modified: llvm/branches/non-call-eh/lib/Target/Alpha/AlphaAsmPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/Alpha/AlphaAsmPrinter.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/Alpha/AlphaAsmPrinter.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/Alpha/AlphaAsmPrinter.cpp Sun Jul 6 15:45:41 2008
@@ -225,6 +225,7 @@
switch (I->getLinkage()) {
case GlobalValue::LinkOnceLinkage:
case GlobalValue::WeakLinkage:
+ case GlobalValue::CommonLinkage:
O << TAI->getWeakRefDirective() << name << '\n';
break;
case GlobalValue::AppendingLinkage:
Modified: llvm/branches/non-call-eh/lib/Target/Alpha/AlphaISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/Alpha/AlphaISelDAGToDAG.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/Alpha/AlphaISelDAGToDAG.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/Alpha/AlphaISelDAGToDAG.cpp Sun Jul 6 15:45:41 2008
@@ -146,9 +146,9 @@
}
public:
- AlphaDAGToDAGISel(TargetMachine &TM)
+ explicit AlphaDAGToDAGISel(AlphaTargetMachine &TM)
: SelectionDAGISel(AlphaLowering),
- AlphaLowering(*(AlphaTargetLowering*)(TM.getTargetLowering()))
+ AlphaLowering(*TM.getTargetLowering())
{}
/// getI64Imm - Return a target constant with the specified value, of type
@@ -161,9 +161,9 @@
// target-specific node if it hasn't already been changed.
SDNode *Select(SDOperand Op);
- /// InstructionSelectBasicBlock - This callback is invoked by
+ /// InstructionSelect - This callback is invoked by
/// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
- virtual void InstructionSelectBasicBlock(SelectionDAG &DAG);
+ virtual void InstructionSelect(SelectionDAG &DAG);
virtual const char *getPassName() const {
return "Alpha DAG->DAG Pattern Instruction Selection";
@@ -230,17 +230,14 @@
RA, MVT::i64);
}
-/// InstructionSelectBasicBlock - This callback is invoked by
+/// InstructionSelect - This callback is invoked by
/// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
-void AlphaDAGToDAGISel::InstructionSelectBasicBlock(SelectionDAG &DAG) {
+void AlphaDAGToDAGISel::InstructionSelect(SelectionDAG &DAG) {
DEBUG(BB->dump());
// Select target instructions for the DAG.
DAG.setRoot(SelectRoot(DAG.getRoot()));
DAG.RemoveDeadNodes();
-
- // Emit machine code to BB.
- ScheduleAndEmitDAG(DAG);
}
// Select - Convert the specified operand from a target-independent to a
@@ -334,7 +331,7 @@
case ISD::TargetConstantFP: {
ConstantFPSDNode *CN = cast<ConstantFPSDNode>(N);
bool isDouble = N->getValueType(0) == MVT::f64;
- MVT::ValueType T = isDouble ? MVT::f64 : MVT::f32;
+ MVT T = isDouble ? MVT::f64 : MVT::f32;
if (CN->getValueAPF().isPosZero()) {
return CurDAG->SelectNodeTo(N, isDouble ? Alpha::CPYST : Alpha::CPYSS,
T, CurDAG->getRegister(Alpha::F31, T),
@@ -350,7 +347,7 @@
}
case ISD::SETCC:
- if (MVT::isFloatingPoint(N->getOperand(0).Val->getValueType(0))) {
+ if (N->getOperand(0).Val->getValueType(0).isFloatingPoint()) {
ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
unsigned Opc = Alpha::WTF;
@@ -404,9 +401,9 @@
break;
case ISD::SELECT:
- if (MVT::isFloatingPoint(N->getValueType(0)) &&
+ if (N->getValueType(0).isFloatingPoint() &&
(N->getOperand(0).getOpcode() != ISD::SETCC ||
- !MVT::isFloatingPoint(N->getOperand(0).getOperand(1).getValueType()))) {
+ !N->getOperand(0).getOperand(1).getValueType().isFloatingPoint())) {
//This should be the condition not covered by the Patterns
//FIXME: Don't have SelectCode die, but rather return something testable
// so that things like this can be caught in fall though code
@@ -472,7 +469,7 @@
AddToISelQueue(Chain);
std::vector<SDOperand> CallOperands;
- std::vector<MVT::ValueType> TypeOperands;
+ std::vector<MVT> TypeOperands;
//grab the arguments
for(int i = 2, e = N->getNumOperands(); i < e; ++i) {
@@ -489,7 +486,7 @@
for (int i = 6; i < count; ++i) {
unsigned Opc = Alpha::WTF;
- if (MVT::isInteger(TypeOperands[i])) {
+ if (TypeOperands[i].isInteger()) {
Opc = Alpha::STQ;
} else if (TypeOperands[i] == MVT::f32) {
Opc = Alpha::STS;
@@ -504,7 +501,7 @@
Chain = SDOperand(CurDAG->getTargetNode(Opc, MVT::Other, Ops, 4), 0);
}
for (int i = 0; i < std::min(6, count); ++i) {
- if (MVT::isInteger(TypeOperands[i])) {
+ if (TypeOperands[i].isInteger()) {
Chain = CurDAG->getCopyToReg(Chain, args_int[i], CallOperands[i], InFlag);
InFlag = Chain.getValue(1);
} else if (TypeOperands[i] == MVT::f32 || TypeOperands[i] == MVT::f64) {
@@ -533,7 +530,7 @@
std::vector<SDOperand> CallResults;
- switch (N->getValueType(0)) {
+ switch (N->getValueType(0).getSimpleVT()) {
default: assert(0 && "Unexpected ret value!");
case MVT::Other: break;
case MVT::i64:
@@ -559,6 +556,6 @@
/// createAlphaISelDag - This pass converts a legalized DAG into a
/// Alpha-specific DAG, ready for instruction scheduling.
///
-FunctionPass *llvm::createAlphaISelDag(TargetMachine &TM) {
+FunctionPass *llvm::createAlphaISelDag(AlphaTargetMachine &TM) {
return new AlphaDAGToDAGISel(TM);
}
Modified: llvm/branches/non-call-eh/lib/Target/Alpha/AlphaISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/Alpha/AlphaISelLowering.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/Alpha/AlphaISelLowering.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/Alpha/AlphaISelLowering.cpp Sun Jul 6 15:45:41 2008
@@ -104,9 +104,10 @@
setOperationAction(ISD::BIT_CONVERT, MVT::f32, Promote);
// We don't have line number support yet.
- setOperationAction(ISD::LOCATION, MVT::Other, Expand);
+ setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
- setOperationAction(ISD::LABEL, MVT::Other, Expand);
+ setOperationAction(ISD::DBG_LABEL, MVT::Other, Expand);
+ setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
// Not implemented yet.
setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
@@ -145,8 +146,7 @@
computeRegisterProperties();
}
-MVT::ValueType
-AlphaTargetLowering::getSetCCResultType(const SDOperand &) const {
+MVT AlphaTargetLowering::getSetCCResultType(const SDOperand &) const {
return MVT::i64;
}
@@ -169,7 +169,7 @@
}
static SDOperand LowerJumpTable(SDOperand Op, SelectionDAG &DAG) {
- MVT::ValueType PtrVT = Op.getValueType();
+ MVT PtrVT = Op.getValueType();
JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
SDOperand JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
SDOperand Zero = DAG.getConstant(0, PtrVT);
@@ -217,14 +217,13 @@
for (unsigned ArgNo = 0, e = Op.Val->getNumValues()-1; ArgNo != e; ++ArgNo) {
SDOperand argt;
- MVT::ValueType ObjectVT = Op.getValue(ArgNo).getValueType();
+ MVT ObjectVT = Op.getValue(ArgNo).getValueType();
SDOperand ArgVal;
if (ArgNo < 6) {
- switch (ObjectVT) {
+ switch (ObjectVT.getSimpleVT()) {
default:
- cerr << "Unknown Type " << ObjectVT << "\n";
- abort();
+ assert(false && "Invalid value type!");
case MVT::f64:
args_float[ArgNo] = AddLiveIn(MF, args_float[ArgNo],
&Alpha::F8RCRegClass);
@@ -282,9 +281,8 @@
ArgValues.push_back(Root);
// Return the new list of results.
- std::vector<MVT::ValueType> RetVT(Op.Val->value_begin(),
- Op.Val->value_end());
- return DAG.getNode(ISD::MERGE_VALUES, RetVT, &ArgValues[0], ArgValues.size());
+ return DAG.getMergeValues(Op.Val->getVTList(), &ArgValues[0],
+ ArgValues.size());
}
static SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG) {
@@ -300,12 +298,12 @@
break;
//return SDOperand(); // ret void is legal
case 3: {
- MVT::ValueType ArgVT = Op.getOperand(1).getValueType();
+ MVT ArgVT = Op.getOperand(1).getValueType();
unsigned ArgReg;
- if (MVT::isInteger(ArgVT))
+ if (ArgVT.isInteger())
ArgReg = Alpha::R0;
else {
- assert(MVT::isFloatingPoint(ArgVT));
+ assert(ArgVT.isFloatingPoint());
ArgReg = Alpha::F0;
}
Copy = DAG.getCopyToReg(Copy, ArgReg, Op.getOperand(1), Copy.getValue(1));
@@ -332,7 +330,7 @@
std::vector<SDOperand> args_to_use;
for (unsigned i = 0, e = Args.size(); i != e; ++i)
{
- switch (getValueType(Args[i].Ty)) {
+ switch (getValueType(Args[i].Ty).getSimpleVT()) {
default: assert(0 && "Unexpected ValueType for argument!");
case MVT::i1:
case MVT::i8:
@@ -355,10 +353,10 @@
args_to_use.push_back(Args[i].Node);
}
- std::vector<MVT::ValueType> RetVals;
- MVT::ValueType RetTyVT = getValueType(RetTy);
- MVT::ValueType ActualRetTyVT = RetTyVT;
- if (RetTyVT >= MVT::i1 && RetTyVT <= MVT::i32)
+ std::vector<MVT> RetVals;
+ MVT RetTyVT = getValueType(RetTy);
+ MVT ActualRetTyVT = RetTyVT;
+ if (RetTyVT.getSimpleVT() >= MVT::i1 && RetTyVT.getSimpleVT() <= MVT::i32)
ActualRetTyVT = MVT::i64;
if (RetTyVT != MVT::isVoid)
@@ -394,6 +392,34 @@
return std::make_pair(RetVal, Chain);
}
+void AlphaTargetLowering::LowerVAARG(SDNode *N, SDOperand &Chain,
+ SDOperand &DataPtr, SelectionDAG &DAG) {
+ Chain = N->getOperand(0);
+ SDOperand VAListP = N->getOperand(1);
+ const Value *VAListS = cast<SrcValueSDNode>(N->getOperand(2))->getValue();
+
+ SDOperand Base = DAG.getLoad(MVT::i64, Chain, VAListP, VAListS, 0);
+ SDOperand Tmp = DAG.getNode(ISD::ADD, MVT::i64, VAListP,
+ DAG.getConstant(8, MVT::i64));
+ SDOperand Offset = DAG.getExtLoad(ISD::SEXTLOAD, MVT::i64, Base.getValue(1),
+ Tmp, NULL, 0, MVT::i32);
+ DataPtr = DAG.getNode(ISD::ADD, MVT::i64, Base, Offset);
+ if (N->getValueType(0).isFloatingPoint())
+ {
+ //if fp && Offset < 6*8, then subtract 6*8 from DataPtr
+ SDOperand FPDataPtr = DAG.getNode(ISD::SUB, MVT::i64, DataPtr,
+ DAG.getConstant(8*6, MVT::i64));
+ SDOperand CC = DAG.getSetCC(MVT::i64, Offset,
+ DAG.getConstant(8*6, MVT::i64), ISD::SETLT);
+ DataPtr = DAG.getNode(ISD::SELECT, MVT::i64, CC, FPDataPtr, DataPtr);
+ }
+
+ SDOperand NewOffset = DAG.getNode(ISD::ADD, MVT::i64, Offset,
+ DAG.getConstant(8, MVT::i64));
+ Chain = DAG.getTruncStore(Offset.getValue(1), NewOffset, Tmp, NULL, 0,
+ MVT::i32);
+}
+
/// LowerOperation - Provide custom lowering hooks for some operations.
///
SDOperand AlphaTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
@@ -407,17 +433,17 @@
case ISD::JumpTable: return LowerJumpTable(Op, DAG);
case ISD::SINT_TO_FP: {
- assert(MVT::i64 == Op.getOperand(0).getValueType() &&
+ assert(Op.getOperand(0).getValueType() == MVT::i64 &&
"Unhandled SINT_TO_FP type in custom expander!");
SDOperand LD;
- bool isDouble = MVT::f64 == Op.getValueType();
+ bool isDouble = Op.getValueType() == MVT::f64;
LD = DAG.getNode(ISD::BIT_CONVERT, MVT::f64, Op.getOperand(0));
SDOperand FP = DAG.getNode(isDouble?AlphaISD::CVTQT_:AlphaISD::CVTQS_,
isDouble?MVT::f64:MVT::f32, LD);
return FP;
}
case ISD::FP_TO_SINT: {
- bool isDouble = MVT::f64 == Op.getOperand(0).getValueType();
+ bool isDouble = Op.getOperand(0).getValueType() == MVT::f64;
SDOperand src = Op.getOperand(0);
if (!isDouble) //Promote
@@ -465,7 +491,7 @@
case ISD::SREM:
//Expand only on constant case
if (Op.getOperand(1).getOpcode() == ISD::Constant) {
- MVT::ValueType VT = Op.Val->getValueType(0);
+ MVT VT = Op.Val->getValueType(0);
SDOperand Tmp1 = Op.Val->getOpcode() == ISD::UREM ?
BuildUDIV(Op.Val, DAG, NULL) :
BuildSDIV(Op.Val, DAG, NULL);
@@ -476,7 +502,7 @@
//fall through
case ISD::SDIV:
case ISD::UDIV:
- if (MVT::isInteger(Op.getValueType())) {
+ if (Op.getValueType().isInteger()) {
if (Op.getOperand(1).getOpcode() == ISD::Constant)
return Op.getOpcode() == ISD::SDIV ? BuildSDIV(Op.Val, DAG, NULL)
: BuildUDIV(Op.Val, DAG, NULL);
@@ -495,37 +521,15 @@
break;
case ISD::VAARG: {
- SDOperand Chain = Op.getOperand(0);
- SDOperand VAListP = Op.getOperand(1);
- const Value *VAListS = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
-
- SDOperand Base = DAG.getLoad(MVT::i64, Chain, VAListP, VAListS, 0);
- SDOperand Tmp = DAG.getNode(ISD::ADD, MVT::i64, VAListP,
- DAG.getConstant(8, MVT::i64));
- SDOperand Offset = DAG.getExtLoad(ISD::SEXTLOAD, MVT::i64, Base.getValue(1),
- Tmp, NULL, 0, MVT::i32);
- SDOperand DataPtr = DAG.getNode(ISD::ADD, MVT::i64, Base, Offset);
- if (MVT::isFloatingPoint(Op.getValueType()))
- {
- //if fp && Offset < 6*8, then subtract 6*8 from DataPtr
- SDOperand FPDataPtr = DAG.getNode(ISD::SUB, MVT::i64, DataPtr,
- DAG.getConstant(8*6, MVT::i64));
- SDOperand CC = DAG.getSetCC(MVT::i64, Offset,
- DAG.getConstant(8*6, MVT::i64), ISD::SETLT);
- DataPtr = DAG.getNode(ISD::SELECT, MVT::i64, CC, FPDataPtr, DataPtr);
- }
+ SDOperand Chain, DataPtr;
+ LowerVAARG(Op.Val, Chain, DataPtr, DAG);
- SDOperand NewOffset = DAG.getNode(ISD::ADD, MVT::i64, Offset,
- DAG.getConstant(8, MVT::i64));
- SDOperand Update = DAG.getTruncStore(Offset.getValue(1), NewOffset,
- Tmp, NULL, 0, MVT::i32);
-
SDOperand Result;
if (Op.getValueType() == MVT::i32)
- Result = DAG.getExtLoad(ISD::SEXTLOAD, MVT::i64, Update, DataPtr,
+ Result = DAG.getExtLoad(ISD::SEXTLOAD, MVT::i64, Chain, DataPtr,
NULL, 0, MVT::i32);
else
- Result = DAG.getLoad(Op.getValueType(), Update, DataPtr, NULL, 0);
+ Result = DAG.getLoad(Op.getValueType(), Chain, DataPtr, NULL, 0);
return Result;
}
case ISD::VACOPY: {
@@ -566,14 +570,15 @@
return SDOperand();
}
-SDOperand AlphaTargetLowering::CustomPromoteOperation(SDOperand Op,
- SelectionDAG &DAG) {
- assert(Op.getValueType() == MVT::i32 &&
- Op.getOpcode() == ISD::VAARG &&
+SDNode *AlphaTargetLowering::ReplaceNodeResults(SDNode *N,
+ SelectionDAG &DAG) {
+ assert(N->getValueType(0) == MVT::i32 &&
+ N->getOpcode() == ISD::VAARG &&
"Unknown node to custom promote!");
-
- // The code in LowerOperation already handles i32 vaarg
- return LowerOperation(Op, DAG);
+
+ SDOperand Chain, DataPtr;
+ LowerVAARG(N, Chain, DataPtr, DAG);
+ return DAG.getLoad(N->getValueType(0), Chain, DataPtr, NULL, 0).Val;
}
@@ -596,7 +601,7 @@
std::vector<unsigned> AlphaTargetLowering::
getRegClassForInlineAsmConstraint(const std::string &Constraint,
- MVT::ValueType VT) const {
+ MVT VT) const {
if (Constraint.size() == 1) {
switch (Constraint[0]) {
default: break; // Unknown constriant letter
@@ -664,11 +669,7 @@
MachineBasicBlock *llscMBB = new MachineBasicBlock(LLVM_BB);
MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB);
- for(MachineBasicBlock::succ_iterator i = thisMBB->succ_begin(),
- e = thisMBB->succ_end(); i != e; ++i)
- sinkMBB->addSuccessor(*i);
- while(!thisMBB->succ_empty())
- thisMBB->removeSuccessor(thisMBB->succ_begin());
+ sinkMBB->transferSuccessors(thisMBB);
MachineFunction *F = BB->getParent();
F->getBasicBlockList().insert(It, llscMBB);
Modified: llvm/branches/non-call-eh/lib/Target/Alpha/AlphaISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/Alpha/AlphaISelLowering.h?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/Alpha/AlphaISelLowering.h (original)
+++ llvm/branches/non-call-eh/lib/Target/Alpha/AlphaISelLowering.h Sun Jul 6 15:45:41 2008
@@ -67,14 +67,14 @@
explicit AlphaTargetLowering(TargetMachine &TM);
/// getSetCCResultType - Get the SETCC result ValueType
- virtual MVT::ValueType getSetCCResultType(const SDOperand &) const;
+ virtual MVT getSetCCResultType(const SDOperand &) const;
/// LowerOperation - Provide custom lowering hooks for some operations.
///
virtual SDOperand LowerOperation(SDOperand Op, SelectionDAG &DAG);
- virtual SDOperand CustomPromoteOperation(SDOperand Op, SelectionDAG &DAG);
+ virtual SDNode *ReplaceNodeResults(SDNode *N, SelectionDAG &DAG);
- //Friendly names for dumps
+ // Friendly names for dumps
const char *getTargetNodeName(unsigned Opcode) const;
/// LowerCallTo - This hook lowers an abstract call to a function into an
@@ -88,12 +88,18 @@
std::vector<unsigned>
getRegClassForInlineAsmConstraint(const std::string &Constraint,
- MVT::ValueType VT) const;
+ MVT VT) const;
bool hasITOF() { return useITOF; }
MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *BB);
+
+ private:
+ // Helpers for custom lowering.
+ void LowerVAARG(SDNode *N, SDOperand &Chain, SDOperand &DataPtr,
+ SelectionDAG &DAG);
+
};
}
Modified: llvm/branches/non-call-eh/lib/Target/Alpha/AlphaInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/Alpha/AlphaInstrInfo.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/Alpha/AlphaInstrInfo.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/Alpha/AlphaInstrInfo.cpp Sun Jul 6 15:45:41 2008
@@ -269,23 +269,25 @@
if (MI->getOperand(1).getReg() == MI->getOperand(2).getReg()) {
if (Ops[0] == 0) { // move -> store
unsigned InReg = MI->getOperand(1).getReg();
+ bool isKill = MI->getOperand(1).isKill();
Opc = (Opc == Alpha::BISr) ? Alpha::STQ :
((Opc == Alpha::CPYSS) ? Alpha::STS : Alpha::STT);
- NewMI = BuildMI(get(Opc)).addReg(InReg).addFrameIndex(FrameIndex)
+ NewMI = BuildMI(get(Opc)).addReg(InReg, false, false, isKill)
+ .addFrameIndex(FrameIndex)
.addReg(Alpha::F31);
} else { // load -> move
unsigned OutReg = MI->getOperand(0).getReg();
+ bool isDead = MI->getOperand(0).isDead();
Opc = (Opc == Alpha::BISr) ? Alpha::LDQ :
((Opc == Alpha::CPYSS) ? Alpha::LDS : Alpha::LDT);
- NewMI = BuildMI(get(Opc), OutReg).addFrameIndex(FrameIndex)
+ NewMI = BuildMI(get(Opc)).addReg(OutReg, true, false, false, isDead)
+ .addFrameIndex(FrameIndex)
.addReg(Alpha::F31);
}
}
break;
}
- if (NewMI)
- NewMI->copyKillDeadInfo(MI);
- return 0;
+ return NewMI;
}
static unsigned AlphaRevCondCode(unsigned Opcode) {
Modified: llvm/branches/non-call-eh/lib/Target/Alpha/AlphaInstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/Alpha/AlphaInstrInfo.h?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/Alpha/AlphaInstrInfo.h (original)
+++ llvm/branches/non-call-eh/lib/Target/Alpha/AlphaInstrInfo.h Sun Jul 6 15:45:41 2008
@@ -28,7 +28,7 @@
/// such, whenever a client has an instance of instruction info, it should
/// always be able to get register info as well (through this method).
///
- virtual const TargetRegisterInfo &getRegisterInfo() const { return RI; }
+ virtual const AlphaRegisterInfo &getRegisterInfo() const { return RI; }
/// Return true if the instruction is a register to register move and
/// leave the source and dest operands in the passed parameters.
Modified: llvm/branches/non-call-eh/lib/Target/Alpha/AlphaInstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/Alpha/AlphaInstrInfo.td?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/Alpha/AlphaInstrInfo.td (original)
+++ llvm/branches/non-call-eh/lib/Target/Alpha/AlphaInstrInfo.td Sun Jul 6 15:45:41 2008
@@ -160,14 +160,14 @@
let usesCustomDAGSchedInserter = 1 in { // Expanded by the scheduler.
def CAS32 : PseudoInstAlpha<(outs GPRC:$dst), (ins GPRC:$ptr, GPRC:$cmp, GPRC:$swp), "",
- [(set GPRC:$dst, (atomic_lcs_32 GPRC:$ptr, GPRC:$cmp, GPRC:$swp))], s_pseudo>;
+ [(set GPRC:$dst, (atomic_cmp_swap_32 GPRC:$ptr, GPRC:$cmp, GPRC:$swp))], s_pseudo>;
def CAS64 : PseudoInstAlpha<(outs GPRC:$dst), (ins GPRC:$ptr, GPRC:$cmp, GPRC:$swp), "",
- [(set GPRC:$dst, (atomic_lcs_64 GPRC:$ptr, GPRC:$cmp, GPRC:$swp))], s_pseudo>;
+ [(set GPRC:$dst, (atomic_cmp_swap_64 GPRC:$ptr, GPRC:$cmp, GPRC:$swp))], s_pseudo>;
def LAS32 : PseudoInstAlpha<(outs GPRC:$dst), (ins GPRC:$ptr, GPRC:$swp), "",
- [(set GPRC:$dst, (atomic_las_32 GPRC:$ptr, GPRC:$swp))], s_pseudo>;
+ [(set GPRC:$dst, (atomic_load_add_32 GPRC:$ptr, GPRC:$swp))], s_pseudo>;
def LAS64 :PseudoInstAlpha<(outs GPRC:$dst), (ins GPRC:$ptr, GPRC:$swp), "",
- [(set GPRC:$dst, (atomic_las_64 GPRC:$ptr, GPRC:$swp))], s_pseudo>;
+ [(set GPRC:$dst, (atomic_load_add_64 GPRC:$ptr, GPRC:$swp))], s_pseudo>;
def SWAP32 : PseudoInstAlpha<(outs GPRC:$dst), (ins GPRC:$ptr, GPRC:$swp), "",
[(set GPRC:$dst, (atomic_swap_32 GPRC:$ptr, GPRC:$swp))], s_pseudo>;
Modified: llvm/branches/non-call-eh/lib/Target/Alpha/AlphaTargetMachine.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/Alpha/AlphaTargetMachine.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/Alpha/AlphaTargetMachine.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/Alpha/AlphaTargetMachine.cpp Sun Jul 6 15:45:41 2008
@@ -20,10 +20,8 @@
using namespace llvm;
-namespace {
- // Register the targets
- RegisterTarget<AlphaTargetMachine> X("alpha", " Alpha (incomplete)");
-}
+// Register the targets
+static RegisterTarget<AlphaTargetMachine> X("alpha", " Alpha (incomplete)");
const TargetAsmInfo *AlphaTargetMachine::createTargetAsmInfo() const {
return new AlphaTargetAsmInfo(*this);
Modified: llvm/branches/non-call-eh/lib/Target/Alpha/AlphaTargetMachine.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/Alpha/AlphaTargetMachine.h?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/Alpha/AlphaTargetMachine.h (original)
+++ llvm/branches/non-call-eh/lib/Target/Alpha/AlphaTargetMachine.h Sun Jul 6 15:45:41 2008
@@ -42,15 +42,15 @@
virtual const AlphaInstrInfo *getInstrInfo() const { return &InstrInfo; }
virtual const TargetFrameInfo *getFrameInfo() const { return &FrameInfo; }
- virtual const TargetSubtarget *getSubtargetImpl() const{ return &Subtarget; }
- virtual const TargetRegisterInfo *getRegisterInfo() const {
+ virtual const AlphaSubtarget *getSubtargetImpl() const{ return &Subtarget; }
+ virtual const AlphaRegisterInfo *getRegisterInfo() const {
return &InstrInfo.getRegisterInfo();
}
- virtual TargetLowering* getTargetLowering() const {
+ virtual AlphaTargetLowering* getTargetLowering() const {
return const_cast<AlphaTargetLowering*>(&TLInfo);
}
virtual const TargetData *getTargetData() const { return &DataLayout; }
- virtual TargetJITInfo* getJITInfo() {
+ virtual AlphaJITInfo* getJITInfo() {
return &JITInfo;
}
Modified: llvm/branches/non-call-eh/lib/Target/CBackend/CBackend.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/CBackend/CBackend.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/CBackend/CBackend.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/CBackend/CBackend.cpp Sun Jul 6 15:45:41 2008
@@ -47,10 +47,10 @@
#include <sstream>
using namespace llvm;
-namespace {
- // Register the target.
- RegisterTarget<CTargetMachine> X("c", " C backend");
+// Register the target.
+static RegisterTarget<CTargetMachine> X("c", " C backend");
+namespace {
/// CBackendNameAllUsedStructsAndMergeFunctions - This pass inserts names for
/// any unnamed structure types that are used by the program, and merges
/// external functions with the same name.
@@ -155,6 +155,7 @@
void writeOperand(Value *Operand);
void writeOperandRaw(Value *Operand);
+ void writeInstComputationInline(Instruction &I);
void writeOperandInternal(Value *Operand);
void writeOperandWithCast(Value* Operand, unsigned Opcode);
void writeOperandWithCast(Value* Operand, const ICmpInst &I);
@@ -170,7 +171,7 @@
void printModule(Module *M);
void printModuleTypes(const TypeSymbolTable &ST);
- void printContainedStructs(const Type *Ty, std::set<const StructType *> &);
+ void printContainedStructs(const Type *Ty, std::set<const Type *> &);
void printFloatingPointConstants(Function &F);
void printFunctionSignature(const Function *F, bool Prototype);
@@ -209,7 +210,8 @@
// emit it inline where it would go.
if (I.getType() == Type::VoidTy || !I.hasOneUse() ||
isa<TerminatorInst>(I) || isa<CallInst>(I) || isa<PHINode>(I) ||
- isa<LoadInst>(I) || isa<VAArgInst>(I) || isa<InsertElementInst>(I))
+ isa<LoadInst>(I) || isa<VAArgInst>(I) || isa<InsertElementInst>(I) ||
+ isa<InsertValueInst>(I))
// Don't inline a load across a store or other bad things!
return false;
@@ -283,6 +285,10 @@
void visitInsertElementInst(InsertElementInst &I);
void visitExtractElementInst(ExtractElementInst &I);
void visitShuffleVectorInst(ShuffleVectorInst &SVI);
+ void visitGetResultInst(GetResultInst &GRI);
+
+ void visitInsertValueInst(InsertValueInst &I);
+ void visitExtractValueInst(ExtractValueInst &I);
void visitInstruction(Instruction &I) {
cerr << "C Writer does not know about " << I;
@@ -323,9 +329,10 @@
TI != TE; ) {
TypeSymbolTable::iterator I = TI++;
- // If this isn't a struct type, remove it from our set of types to name.
- // This simplifies emission later.
- if (!isa<StructType>(I->second) && !isa<OpaqueType>(I->second)) {
+ // If this isn't a struct or array type, remove it from our set of types
+ // to name. This simplifies emission later.
+ if (!isa<StructType>(I->second) && !isa<OpaqueType>(I->second) &&
+ !isa<ArrayType>(I->second)) {
TST.remove(I);
} else {
// If this is not used, remove it from the symbol table.
@@ -344,8 +351,8 @@
unsigned RenameCounter = 0;
for (std::set<const Type *>::const_iterator I = UT.begin(), E = UT.end();
I != E; ++I)
- if (const StructType *ST = dyn_cast<StructType>(*I)) {
- while (M.addTypeName("unnamed"+utostr(RenameCounter), ST))
+ if (isa<StructType>(*I) || isa<ArrayType>(*I)) {
+ while (M.addTypeName("unnamed"+utostr(RenameCounter), *I))
++RenameCounter;
Changed = true;
}
@@ -555,8 +562,12 @@
const ArrayType *ATy = cast<ArrayType>(Ty);
unsigned NumElements = ATy->getNumElements();
if (NumElements == 0) NumElements = 1;
- return printType(Out, ATy->getElementType(), false,
- NameSoFar + "[" + utostr(NumElements) + "]");
+ // Arrays are wrapped in structs to allow them to have normal
+ // value semantics (avoiding the array "decay").
+ Out << NameSoFar << " { ";
+ printType(Out, ATy->getElementType(), false,
+ "array[" + utostr(NumElements) + "]");
+ return Out << "; }";
}
case Type::OpaqueTyID: {
@@ -912,7 +923,7 @@
<< *CE << "\n";
abort();
}
- } else if (isa<UndefValue>(CPV) && CPV->getType()->isFirstClassType()) {
+ } else if (isa<UndefValue>(CPV) && CPV->getType()->isSingleValueType()) {
Out << "((";
printType(Out, CPV->getType()); // sign doesn't matter
Out << ")/*UNDEF*/";
@@ -1011,6 +1022,7 @@
}
case Type::ArrayTyID:
+ Out << "{ "; // Arrays are wrapped in struct types.
if (ConstantArray *CA = dyn_cast<ConstantArray>(CPV)) {
printConstantArray(CA);
} else {
@@ -1028,6 +1040,7 @@
}
Out << " }";
}
+ Out << " }"; // Arrays are wrapped in struct types.
break;
case Type::VectorTyID:
@@ -1216,12 +1229,32 @@
return Name;
}
+/// writeInstComputationInline - Emit the computation for the specified
+/// instruction inline, with no destination provided.
+void CWriter::writeInstComputationInline(Instruction &I) {
+ // If this is a non-trivial bool computation, make sure to truncate down to
+ // a 1 bit value. This is important because we want "add i1 x, y" to return
+ // "0" when x and y are true, not "2" for example.
+ bool NeedBoolTrunc = false;
+ if (I.getType() == Type::Int1Ty && !isa<ICmpInst>(I) && !isa<FCmpInst>(I))
+ NeedBoolTrunc = true;
+
+ if (NeedBoolTrunc)
+ Out << "((";
+
+ visit(I);
+
+ if (NeedBoolTrunc)
+ Out << ")&1)";
+}
+
+
void CWriter::writeOperandInternal(Value *Operand) {
if (Instruction *I = dyn_cast<Instruction>(Operand))
+ // Should we inline this instruction to build a tree?
if (isInlinableInst(*I) && !isDirectAlloca(I)) {
- // Should we inline this instruction to build a tree?
Out << '(';
- visit(*I);
+ writeInstComputationInline(*I);
Out << ')';
return;
}
@@ -1382,7 +1415,7 @@
<< "extern void *__builtin_alloca(unsigned int);\n"
<< "#endif\n"
<< "#define alloca(x) __builtin_alloca(x)\n"
- << "#elif defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)\n"
+ << "#elif defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__)\n"
<< "#define alloca(x) __builtin_alloca(x)\n"
<< "#elif defined(_MSC_VER)\n"
<< "#define inline _inline\n"
@@ -1484,12 +1517,10 @@
// Output typedefs for 128-bit integers. If these are needed with a
// 32-bit target or with a C compiler that doesn't support mode(TI),
// more drastic measures will be needed.
- if (TD->getPointerSize() >= 8) {
- Out << "#ifdef __GNUC__ /* 128-bit integer types */\n"
- << "typedef int __attribute__((mode(TI))) llvmInt128;\n"
- << "typedef unsigned __attribute__((mode(TI))) llvmUInt128;\n"
- << "#endif\n\n";
- }
+ Out << "#if __GNUC__ && __LP64__ /* 128-bit integer types */\n"
+ << "typedef int __attribute__((mode(TI))) llvmInt128;\n"
+ << "typedef unsigned __attribute__((mode(TI))) llvmUInt128;\n"
+ << "#endif\n\n";
// Output target-specific code that should be inserted into main.
Out << "#define CODE_FOR_MAIN() /* Any target-specific code for main()*/\n";
@@ -1603,7 +1634,8 @@
for (Module::global_iterator I = M.global_begin(), E = M.global_end();
I != E; ++I) {
- if (I->hasExternalLinkage() || I->hasExternalWeakLinkage())
+ if (I->hasExternalLinkage() || I->hasExternalWeakLinkage() ||
+ I->hasCommonLinkage())
Out << "extern ";
else if (I->hasDLLImportLinkage())
Out << "__declspec(dllimport) ";
@@ -1677,6 +1709,8 @@
if (I->hasLinkOnceLinkage())
Out << " __attribute__((common))";
+ else if (I->hasCommonLinkage()) // FIXME is this right?
+ Out << " __ATTRIBUTE_WEAK__";
else if (I->hasWeakLinkage())
Out << " __ATTRIBUTE_WEAK__";
else if (I->hasExternalWeakLinkage())
@@ -1714,6 +1748,8 @@
Out << " __attribute__((common))";
else if (I->hasWeakLinkage())
Out << " __ATTRIBUTE_WEAK__";
+ else if (I->hasCommonLinkage())
+ Out << " __ATTRIBUTE_WEAK__";
if (I->hasHiddenVisibility())
Out << " __HIDDEN__";
@@ -1723,6 +1759,7 @@
// this, however, occurs when the variable has weak linkage. In this
// case, the assembler will complain about the variable being both weak
// and common, so we disable this optimization.
+ // FIXME common linkage should avoid this problem.
if (!I->getInitializer()->isNullValue()) {
Out << " = " ;
writeOperand(I->getInitializer());
@@ -1732,9 +1769,12 @@
// the compiler figure out the rest of the zeros.
Out << " = " ;
if (isa<StructType>(I->getInitializer()->getType()) ||
- isa<ArrayType>(I->getInitializer()->getType()) ||
isa<VectorType>(I->getInitializer()->getType())) {
Out << "{ 0 }";
+ } else if (isa<ArrayType>(I->getInitializer()->getType())) {
+ // As with structs and vectors, but with an extra set of braces
+ // because arrays are wrapped in structs.
+ Out << "{ { 0 } }";
} else {
// Just print it out normally.
writeOperand(I->getInitializer());
@@ -1816,7 +1856,7 @@
Out << "static const ConstantFP80Ty FPConstant" << FPCounter++
<< " = { 0x" << std::hex
<< ((uint16_t)p[1] | (p[0] & 0xffffffffffffLL)<<16)
- << ", 0x" << (uint16_t)(p[0] >> 48) << ",0,0,0"
+ << "ULL, 0x" << (uint16_t)(p[0] >> 48) << ",{0,0,0}"
<< "}; /* Long double constant */\n" << std::dec;
} else if (FPC->getType() == Type::PPC_FP128Ty) {
APInt api = FPC->getValueAPF().convertToAPInt();
@@ -1876,16 +1916,16 @@
Out << '\n';
// Keep track of which structures have been printed so far...
- std::set<const StructType *> StructPrinted;
+ std::set<const Type *> StructPrinted;
// Loop over all structures then push them into the stack so they are
// printed in the correct order.
//
Out << "/* Structure contents */\n";
for (I = TST.begin(); I != End; ++I)
- if (const StructType *STy = dyn_cast<StructType>(I->second))
+ if (isa<StructType>(I->second) || isa<ArrayType>(I->second))
// Only print out used types!
- printContainedStructs(STy, StructPrinted);
+ printContainedStructs(I->second, StructPrinted);
}
// Push the struct onto the stack and recursively push all structs
@@ -1894,7 +1934,7 @@
// TODO: Make this work properly with vector types
//
void CWriter::printContainedStructs(const Type *Ty,
- std::set<const StructType*> &StructPrinted){
+ std::set<const Type*> &StructPrinted) {
// Don't walk through pointers.
if (isa<PointerType>(Ty) || Ty->isPrimitiveType() || Ty->isInteger()) return;
@@ -1903,12 +1943,12 @@
E = Ty->subtype_end(); I != E; ++I)
printContainedStructs(*I, StructPrinted);
- if (const StructType *STy = dyn_cast<StructType>(Ty)) {
+ if (isa<StructType>(Ty) || isa<ArrayType>(Ty)) {
// Check to see if we have already printed this struct.
- if (StructPrinted.insert(STy).second) {
+ if (StructPrinted.insert(Ty).second) {
// Print structure type out.
- std::string Name = TypeNames[STy];
- printType(Out, STy, false, Name, true);
+ std::string Name = TypeNames[Ty];
+ printType(Out, Ty, false, Name, true);
Out << ";\n\n";
}
}
@@ -2145,12 +2185,12 @@
outputLValue(II);
else
Out << " ";
- visit(*II);
+ writeInstComputationInline(*II);
Out << ";\n";
}
}
- // Don't emit prefix or suffix for the terminator...
+ // Don't emit prefix or suffix for the terminator.
visit(*BB->getTerminator());
}
@@ -2174,6 +2214,24 @@
return;
}
+ if (I.getNumOperands() > 1) {
+ Out << " {\n";
+ Out << " ";
+ printType(Out, I.getParent()->getParent()->getReturnType());
+ Out << " llvm_cbe_mrv_temp = {\n";
+ for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
+ Out << " ";
+ writeOperand(I.getOperand(i));
+ if (i != e - 1)
+ Out << ",";
+ Out << "\n";
+ }
+ Out << " };\n";
+ Out << " return llvm_cbe_mrv_temp;\n";
+ Out << " }\n";
+ return;
+ }
+
Out << " return";
if (I.getNumOperands()) {
Out << ' ';
@@ -2456,29 +2514,34 @@
void CWriter::visitCastInst(CastInst &I) {
const Type *DstTy = I.getType();
const Type *SrcTy = I.getOperand(0)->getType();
- Out << '(';
if (isFPIntBitCast(I)) {
+ Out << '(';
// These int<->float and long<->double casts need to be handled specially
Out << GetValueName(&I) << "__BITCAST_TEMPORARY."
<< getFloatBitCastField(I.getOperand(0)->getType()) << " = ";
writeOperand(I.getOperand(0));
Out << ", " << GetValueName(&I) << "__BITCAST_TEMPORARY."
<< getFloatBitCastField(I.getType());
- } else {
- printCast(I.getOpcode(), SrcTy, DstTy);
- if (I.getOpcode() == Instruction::SExt && SrcTy == Type::Int1Ty) {
- // Make sure we really get a sext from bool by subtracing the bool from 0
- Out << "0-";
- }
- writeOperand(I.getOperand(0));
- if (DstTy == Type::Int1Ty &&
- (I.getOpcode() == Instruction::Trunc ||
- I.getOpcode() == Instruction::FPToUI ||
- I.getOpcode() == Instruction::FPToSI ||
- I.getOpcode() == Instruction::PtrToInt)) {
- // Make sure we really get a trunc to bool by anding the operand with 1
- Out << "&1u";
- }
+ Out << ')';
+ return;
+ }
+
+ Out << '(';
+ printCast(I.getOpcode(), SrcTy, DstTy);
+
+ // Make a sext from i1 work by subtracting the i1 from 0 (an int).
+ if (SrcTy == Type::Int1Ty && I.getOpcode() == Instruction::SExt)
+ Out << "0-";
+
+ writeOperand(I.getOperand(0));
+
+ if (DstTy == Type::Int1Ty &&
+ (I.getOpcode() == Instruction::Trunc ||
+ I.getOpcode() == Instruction::FPToUI ||
+ I.getOpcode() == Instruction::FPToSI ||
+ I.getOpcode() == Instruction::PtrToInt)) {
+ // Make sure we really get a trunc to bool by anding the operand with 1
+ Out << "&1u";
}
Out << ')';
}
@@ -2573,11 +2636,8 @@
}
void CWriter::visitCallInst(CallInst &I) {
- //check if we have inline asm
- if (isInlineAsm(I)) {
- visitInlineAsm(I);
- return;
- }
+ if (isa<InlineAsm>(I.getOperand(0)))
+ return visitInlineAsm(I);
bool WroteCallee = false;
@@ -2894,66 +2954,114 @@
void CWriter::visitInlineAsm(CallInst &CI) {
InlineAsm* as = cast<InlineAsm>(CI.getOperand(0));
std::vector<InlineAsm::ConstraintInfo> Constraints = as->ParseConstraints();
- std::vector<std::pair<std::string, Value*> > Input;
- std::vector<std::pair<std::string, Value*> > Output;
- std::string Clobber;
- int count = CI.getType() == Type::VoidTy ? 1 : 0;
+
+ std::vector<std::pair<Value*, int> > ResultVals;
+ if (CI.getType() == Type::VoidTy)
+ ;
+ else if (const StructType *ST = dyn_cast<StructType>(CI.getType())) {
+ for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i)
+ ResultVals.push_back(std::make_pair(&CI, (int)i));
+ } else {
+ ResultVals.push_back(std::make_pair(&CI, -1));
+ }
+
+ // Fix up the asm string for gcc and emit it.
+ Out << "__asm__ volatile (\"" << gccifyAsm(as->getAsmString()) << "\"\n";
+ Out << " :";
+
+ unsigned ValueCount = 0;
+ bool IsFirst = true;
+
+ // Convert over all the output constraints.
for (std::vector<InlineAsm::ConstraintInfo>::iterator I = Constraints.begin(),
- E = Constraints.end(); I != E; ++I) {
- assert(I->Codes.size() == 1 && "Too many asm constraint codes to handle");
- std::string c =
- InterpretASMConstraint(*I);
- switch(I->Type) {
- default:
- assert(0 && "Unknown asm constraint");
- break;
- case InlineAsm::isInput: {
- if (c.size()) {
- Input.push_back(std::make_pair(c, count ? CI.getOperand(count) : &CI));
- ++count; //consume arg
- }
- break;
- }
- case InlineAsm::isOutput: {
- if (c.size()) {
- Output.push_back(std::make_pair("="+((I->isEarlyClobber ? "&" : "")+c),
- count ? CI.getOperand(count) : &CI));
- ++count; //consume arg
- }
- break;
- }
- case InlineAsm::isClobber: {
- if (c.size())
- Clobber += ",\"" + c + "\"";
- break;
+ E = Constraints.end(); I != E; ++I) {
+
+ if (I->Type != InlineAsm::isOutput) {
+ ++ValueCount;
+ continue; // Ignore non-output constraints.
}
+
+ assert(I->Codes.size() == 1 && "Too many asm constraint codes to handle");
+ std::string C = InterpretASMConstraint(*I);
+ if (C.empty()) continue;
+
+ if (!IsFirst) {
+ Out << ", ";
+ IsFirst = false;
}
+
+ // Unpack the dest.
+ Value *DestVal;
+ int DestValNo = -1;
+
+ if (ValueCount < ResultVals.size()) {
+ DestVal = ResultVals[ValueCount].first;
+ DestValNo = ResultVals[ValueCount].second;
+ } else
+ DestVal = CI.getOperand(ValueCount-ResultVals.size()+1);
+
+ if (I->isEarlyClobber)
+ C = "&"+C;
+
+ Out << "\"=" << C << "\"(" << GetValueName(DestVal);
+ if (DestValNo != -1)
+ Out << ".field" << DestValNo; // Multiple retvals.
+ Out << ")";
+ ++ValueCount;
}
- //fix up the asm string for gcc
- std::string asmstr = gccifyAsm(as->getAsmString());
- Out << "__asm__ volatile (\"" << asmstr << "\"\n";
- Out << " :";
- for (std::vector<std::pair<std::string, Value*> >::iterator I =Output.begin(),
- E = Output.end(); I != E; ++I) {
- Out << "\"" << I->first << "\"(";
- writeOperandRaw(I->second);
- Out << ")";
- if (I + 1 != E)
- Out << ",";
- }
+ // Convert over all the input constraints.
Out << "\n :";
- for (std::vector<std::pair<std::string, Value*> >::iterator I = Input.begin(),
- E = Input.end(); I != E; ++I) {
- Out << "\"" << I->first << "\"(";
- writeOperandRaw(I->second);
+ IsFirst = true;
+ ValueCount = 0;
+ for (std::vector<InlineAsm::ConstraintInfo>::iterator I = Constraints.begin(),
+ E = Constraints.end(); I != E; ++I) {
+ if (I->Type != InlineAsm::isInput) {
+ ++ValueCount;
+ continue; // Ignore non-input constraints.
+ }
+
+ assert(I->Codes.size() == 1 && "Too many asm constraint codes to handle");
+ std::string C = InterpretASMConstraint(*I);
+ if (C.empty()) continue;
+
+ if (!IsFirst) {
+ Out << ", ";
+ IsFirst = false;
+ }
+
+ assert(ValueCount >= ResultVals.size() && "Input can't refer to result");
+ Value *SrcVal = CI.getOperand(ValueCount-ResultVals.size()+1);
+
+ Out << "\"" << C << "\"(";
+ if (!I->isIndirect)
+ writeOperand(SrcVal);
+ else
+ writeOperandDeref(SrcVal);
Out << ")";
- if (I + 1 != E)
- Out << ",";
}
- if (Clobber.size())
- Out << "\n :" << Clobber.substr(1);
+
+ // Convert over the clobber constraints.
+ IsFirst = true;
+ ValueCount = 0;
+ for (std::vector<InlineAsm::ConstraintInfo>::iterator I = Constraints.begin(),
+ E = Constraints.end(); I != E; ++I) {
+ if (I->Type != InlineAsm::isClobber)
+ continue; // Ignore non-input constraints.
+
+ assert(I->Codes.size() == 1 && "Too many asm constraint codes to handle");
+ std::string C = InterpretASMConstraint(*I);
+ if (C.empty()) continue;
+
+ if (!IsFirst) {
+ Out << ", ";
+ IsFirst = false;
+ }
+
+ Out << '\"' << C << '"';
+ }
+
Out << ")";
}
@@ -3039,6 +3147,10 @@
for (; I != E; ++I) {
if (isa<StructType>(*I)) {
Out << ".field" << cast<ConstantInt>(I.getOperand())->getZExtValue();
+ } else if (isa<ArrayType>(*I)) {
+ Out << ".array[";
+ writeOperandWithCast(I.getOperand(), Instruction::GetElementPtr);
+ Out << ']';
} else if (!isa<VectorType>(*I)) {
Out << '[';
writeOperandWithCast(I.getOperand(), Instruction::GetElementPtr);
@@ -3173,17 +3285,70 @@
Out << "((";
printType(Out, PointerType::getUnqual(EltTy));
Out << ")(&" << GetValueName(Op)
- << "))[" << (SrcVal & NumElts-1) << "]";
+ << "))[" << (SrcVal & (NumElts-1)) << "]";
} else if (isa<ConstantAggregateZero>(Op) || isa<UndefValue>(Op)) {
Out << "0";
} else {
- printConstant(cast<ConstantVector>(Op)->getOperand(SrcVal & NumElts-1));
+ printConstant(cast<ConstantVector>(Op)->getOperand(SrcVal &
+ (NumElts-1)));
}
}
}
Out << "}";
}
+void CWriter::visitGetResultInst(GetResultInst &GRI) {
+ Out << "(";
+ if (isa<UndefValue>(GRI.getOperand(0))) {
+ Out << "(";
+ printType(Out, GRI.getType());
+ Out << ") 0/*UNDEF*/";
+ } else {
+ Out << GetValueName(GRI.getOperand(0)) << ".field" << GRI.getIndex();
+ }
+ Out << ")";
+}
+
+void CWriter::visitInsertValueInst(InsertValueInst &IVI) {
+ // Start by copying the entire aggregate value into the result variable.
+ writeOperand(IVI.getOperand(0));
+ Out << ";\n ";
+
+ // Then do the insert to update the field.
+ Out << GetValueName(&IVI);
+ for (const unsigned *b = IVI.idx_begin(), *i = b, *e = IVI.idx_end();
+ i != e; ++i) {
+ const Type *IndexedTy =
+ ExtractValueInst::getIndexedType(IVI.getOperand(0)->getType(), b, i+1);
+ if (isa<ArrayType>(IndexedTy))
+ Out << ".array[" << *i << "]";
+ else
+ Out << ".field" << *i;
+ }
+ Out << " = ";
+ writeOperand(IVI.getOperand(1));
+}
+
+void CWriter::visitExtractValueInst(ExtractValueInst &EVI) {
+ Out << "(";
+ if (isa<UndefValue>(EVI.getOperand(0))) {
+ Out << "(";
+ printType(Out, EVI.getType());
+ Out << ") 0/*UNDEF*/";
+ } else {
+ Out << GetValueName(EVI.getOperand(0));
+ for (const unsigned *b = EVI.idx_begin(), *i = b, *e = EVI.idx_end();
+ i != e; ++i) {
+ const Type *IndexedTy =
+ ExtractValueInst::getIndexedType(EVI.getOperand(0)->getType(), b, i+1);
+ if (isa<ArrayType>(IndexedTy))
+ Out << ".array[" << *i << "]";
+ else
+ Out << ".field" << *i;
+ }
+ }
+ Out << ")";
+}
//===----------------------------------------------------------------------===//
// External Interface declaration
Modified: llvm/branches/non-call-eh/lib/Target/CellSPU/SPUAsmPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/CellSPU/SPUAsmPrinter.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/CellSPU/SPUAsmPrinter.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/CellSPU/SPUAsmPrinter.cpp Sun Jul 6 15:45:41 2008
@@ -340,7 +340,7 @@
// stubs
if (TM.getRelocationModel() != Reloc::Static) {
if (((GV->isDeclaration() || GV->hasWeakLinkage() ||
- GV->hasLinkOnceLinkage()))) {
+ GV->hasLinkOnceLinkage() || GV->hasCommonLinkage()))) {
GVStubs.insert(Name);
O << "L" << Name << "$non_lazy_ptr";
return;
@@ -510,7 +510,7 @@
if (C->isNullValue() && /* FIXME: Verify correct */
(I->hasInternalLinkage() || I->hasWeakLinkage() ||
- I->hasLinkOnceLinkage() ||
+ I->hasLinkOnceLinkage() || I->hasCommonLinkage() ||
(I->hasExternalLinkage() && !I->hasSection()))) {
if (Size == 0) Size = 1; // .comm Foo, 0 is undefined, avoid it.
if (I->hasExternalLinkage()) {
@@ -537,6 +537,7 @@
switch (I->getLinkage()) {
case GlobalValue::LinkOnceLinkage:
case GlobalValue::WeakLinkage:
+ case GlobalValue::CommonLinkage:
O << "\t.global " << name << '\n'
<< "\t.weak_definition " << name << '\n';
SwitchToDataSection(".section __DATA,__datacoal_nt,coalesced", I);
Modified: llvm/branches/non-call-eh/lib/Target/CellSPU/SPUFrameInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/CellSPU/SPUFrameInfo.h?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/CellSPU/SPUFrameInfo.h (original)
+++ llvm/branches/non-call-eh/lib/Target/CellSPU/SPUFrameInfo.h Sun Jul 6 15:45:41 2008
@@ -57,6 +57,10 @@
static int fullSpillSize() {
return (SPURegisterInfo::getNumArgRegs() * stackSlotSize());
}
+ //! Convert frame index to stack offset
+ static int FItoStackOffset(int frame_index) {
+ return frame_index * stackSlotSize();
+ }
//! Number of instructions required to overcome hint-for-branch latency
/*!
HBR (hint-for-branch) instructions can be inserted when, for example,
Modified: llvm/branches/non-call-eh/lib/Target/CellSPU/SPUISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/CellSPU/SPUISelDAGToDAG.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/CellSPU/SPUISelDAGToDAG.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/CellSPU/SPUISelDAGToDAG.cpp Sun Jul 6 15:45:41 2008
@@ -17,6 +17,7 @@
#include "SPUISelLowering.h"
#include "SPUHazardRecognizers.h"
#include "SPUFrameInfo.h"
+#include "SPURegisterNames.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -30,7 +31,6 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/Compiler.h"
-#include <iostream>
#include <queue>
#include <set>
@@ -110,9 +110,9 @@
bool
isIntS16Immediate(ConstantSDNode *CN, short &Imm)
{
- MVT::ValueType vt = CN->getValueType(0);
+ MVT vt = CN->getValueType(0);
Imm = (short) CN->getValue();
- if (vt >= MVT::i1 && vt <= MVT::i16) {
+ if (vt.getSimpleVT() >= MVT::i1 && vt.getSimpleVT() <= MVT::i16) {
return true;
} else if (vt == MVT::i32) {
int32_t i_val = (int32_t) CN->getValue();
@@ -139,7 +139,7 @@
static bool
isFPS16Immediate(ConstantFPSDNode *FPN, short &Imm)
{
- MVT::ValueType vt = FPN->getValueType(0);
+ MVT vt = FPN->getValueType(0);
if (vt == MVT::f32) {
int val = FloatToBits(FPN->getValueAPF().convertToFloat());
int sval = (int) ((val << 16) >> 16);
@@ -161,10 +161,10 @@
}
//===------------------------------------------------------------------===//
- //! MVT::ValueType to "useful stuff" mapping structure:
+ //! MVT to "useful stuff" mapping structure:
struct valtype_map_s {
- MVT::ValueType VT;
+ MVT VT;
unsigned ldresult_ins; /// LDRESULT instruction (0 = undefined)
bool ldresult_imm; /// LDRESULT instruction requires immediate?
int prefslot_byte; /// Byte offset of the "preferred" slot
@@ -189,7 +189,7 @@
const size_t n_valtype_map = sizeof(valtype_map) / sizeof(valtype_map[0]);
- const valtype_map_s *getValueTypeMapEntry(MVT::ValueType VT)
+ const valtype_map_s *getValueTypeMapEntry(MVT VT)
{
const valtype_map_s *retval = 0;
for (size_t i = 0; i < n_valtype_map; ++i) {
@@ -203,7 +203,7 @@
#ifndef NDEBUG
if (retval == 0) {
cerr << "SPUISelDAGToDAG.cpp: getValueTypeMapEntry returns NULL for "
- << MVT::getValueTypeString(VT)
+ << VT.getMVTString()
<< "\n";
abort();
}
@@ -213,6 +213,8 @@
}
}
+namespace {
+
//===--------------------------------------------------------------------===//
/// SPUDAGToDAGISel - Cell SPU-specific code to select SPU machine
/// instructions for SelectionDAG operations.
@@ -315,9 +317,9 @@
return false;
}
- /// InstructionSelectBasicBlock - This callback is invoked by
+ /// InstructionSelect - This callback is invoked by
/// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
- virtual void InstructionSelectBasicBlock(SelectionDAG &DAG);
+ virtual void InstructionSelect(SelectionDAG &DAG);
virtual const char *getPassName() const {
return "Cell SPU DAG->DAG Pattern Instruction Selection";
@@ -335,19 +337,18 @@
#include "SPUGenDAGISel.inc"
};
-/// InstructionSelectBasicBlock - This callback is invoked by
+}
+
+/// InstructionSelect - This callback is invoked by
/// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
void
-SPUDAGToDAGISel::InstructionSelectBasicBlock(SelectionDAG &DAG)
+SPUDAGToDAGISel::InstructionSelect(SelectionDAG &DAG)
{
DEBUG(BB->dump());
// Select target instructions for the DAG.
DAG.setRoot(SelectRoot(DAG.getRoot()));
DAG.RemoveDeadNodes();
-
- // Emit machine code to BB.
- ScheduleAndEmitDAG(DAG);
}
/*!
@@ -360,7 +361,7 @@
SPUDAGToDAGISel::SelectAFormAddr(SDOperand Op, SDOperand N, SDOperand &Base,
SDOperand &Index) {
// These match the addr256k operand type:
- MVT::ValueType OffsVT = MVT::i16;
+ MVT OffsVT = MVT::i16;
SDOperand Zero = CurDAG->getTargetConstant(0, OffsVT);
switch (N.getOpcode()) {
@@ -411,7 +412,10 @@
bool
SPUDAGToDAGISel::SelectDForm2Addr(SDOperand Op, SDOperand N, SDOperand &Disp,
SDOperand &Base) {
- return DFormAddressPredicate(Op, N, Disp, Base, -(1 << 7), (1 << 7) - 1);
+ const int minDForm2Offset = -(1 << 7);
+ const int maxDForm2Offset = (1 << 7) - 1;
+ return DFormAddressPredicate(Op, N, Disp, Base, minDForm2Offset,
+ maxDForm2Offset);
}
/*!
@@ -439,16 +443,17 @@
SDOperand &Index, int minOffset,
int maxOffset) {
unsigned Opc = N.getOpcode();
- unsigned PtrTy = SPUtli.getPointerTy();
+ MVT PtrTy = SPUtli.getPointerTy();
if (Opc == ISD::FrameIndex) {
// Stack frame index must be less than 512 (divided by 16):
- FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N);
+ FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(N);
+ int FI = int(FIN->getIndex());
DEBUG(cerr << "SelectDFormAddr: ISD::FrameIndex = "
- << FI->getIndex() << "\n");
- if (FI->getIndex() < maxOffset) {
+ << FI << "\n");
+ if (SPUFrameInfo::FItoStackOffset(FI) < maxOffset) {
Base = CurDAG->getTargetConstant(0, PtrTy);
- Index = CurDAG->getTargetFrameIndex(FI->getIndex(), PtrTy);
+ Index = CurDAG->getTargetFrameIndex(FI, PtrTy);
return true;
}
} else if (Opc == ISD::ADD) {
@@ -467,13 +472,14 @@
int32_t offset = int32_t(CN->getSignExtended());
if (Op0.getOpcode() == ISD::FrameIndex) {
- FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op0);
+ FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Op0);
+ int FI = int(FIN->getIndex());
DEBUG(cerr << "SelectDFormAddr: ISD::ADD offset = " << offset
- << " frame index = " << FI->getIndex() << "\n");
+ << " frame index = " << FI << "\n");
- if (FI->getIndex() < maxOffset) {
+ if (SPUFrameInfo::FItoStackOffset(FI) < maxOffset) {
Base = CurDAG->getTargetConstant(offset, PtrTy);
- Index = CurDAG->getTargetFrameIndex(FI->getIndex(), PtrTy);
+ Index = CurDAG->getTargetFrameIndex(FI, PtrTy);
return true;
}
} else if (offset > minOffset && offset < maxOffset) {
@@ -487,13 +493,14 @@
int32_t offset = int32_t(CN->getSignExtended());
if (Op1.getOpcode() == ISD::FrameIndex) {
- FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op1);
+ FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Op1);
+ int FI = int(FIN->getIndex());
DEBUG(cerr << "SelectDFormAddr: ISD::ADD offset = " << offset
- << " frame index = " << FI->getIndex() << "\n");
+ << " frame index = " << FI << "\n");
- if (FI->getIndex() < maxOffset) {
+ if (SPUFrameInfo::FItoStackOffset(FI) < maxOffset) {
Base = CurDAG->getTargetConstant(offset, PtrTy);
- Index = CurDAG->getTargetFrameIndex(FI->getIndex(), PtrTy);
+ Index = CurDAG->getTargetFrameIndex(FI, PtrTy);
return true;
}
} else if (offset > minOffset && offset < maxOffset) {
@@ -577,23 +584,37 @@
unsigned Opc = N->getOpcode();
int n_ops = -1;
unsigned NewOpc;
- MVT::ValueType OpVT = Op.getValueType();
+ MVT OpVT = Op.getValueType();
SDOperand Ops[8];
if (Opc >= ISD::BUILTIN_OP_END && Opc < SPUISD::FIRST_NUMBER) {
return NULL; // Already selected.
} else if (Opc == ISD::FrameIndex) {
- // Selects to AIr32 FI, 0 which in turn will become AIr32 SP, imm.
- int FI = cast<FrameIndexSDNode>(N)->getIndex();
- MVT::ValueType PtrVT = SPUtli.getPointerTy();
- SDOperand Zero = CurDAG->getTargetConstant(0, PtrVT);
- SDOperand TFI = CurDAG->getTargetFrameIndex(FI, PtrVT);
-
- DEBUG(cerr << "SPUDAGToDAGISel: Replacing FrameIndex with AI32 <FI>, 0\n");
- NewOpc = SPU::AIr32;
- Ops[0] = TFI;
- Ops[1] = Zero;
- n_ops = 2;
+ // Selects to (add $sp, FI * stackSlotSize)
+ int FI =
+ SPUFrameInfo::FItoStackOffset(cast<FrameIndexSDNode>(N)->getIndex());
+ MVT PtrVT = SPUtli.getPointerTy();
+
+ // Adjust stack slot to actual offset in frame:
+ if (isS10Constant(FI)) {
+ DEBUG(cerr << "SPUDAGToDAGISel: Replacing FrameIndex with AIr32 $sp, "
+ << FI
+ << "\n");
+ NewOpc = SPU::AIr32;
+ Ops[0] = CurDAG->getRegister(SPU::R1, PtrVT);
+ Ops[1] = CurDAG->getTargetConstant(FI, PtrVT);
+ n_ops = 2;
+ } else {
+ DEBUG(cerr << "SPUDAGToDAGISel: Replacing FrameIndex with Ar32 $sp, "
+ << FI
+ << "\n");
+ NewOpc = SPU::Ar32;
+ Ops[0] = CurDAG->getRegister(SPU::R1, PtrVT);
+ Ops[1] = CurDAG->getConstant(FI, PtrVT);
+ n_ops = 2;
+
+ AddToISelQueue(Ops[1]);
+ }
} else if (Opc == ISD::ZERO_EXTEND) {
// (zero_extend:i16 (and:i8 <arg>, <const>))
const SDOperand &Op1 = N->getOperand(0);
@@ -612,7 +633,7 @@
}
} else if (Opc == SPUISD::LDRESULT) {
// Custom select instructions for LDRESULT
- unsigned VT = N->getValueType(0);
+ MVT VT = N->getValueType(0);
SDOperand Arg = N->getOperand(0);
SDOperand Chain = N->getOperand(1);
SDNode *Result;
@@ -620,7 +641,7 @@
if (vtm->ldresult_ins == 0) {
cerr << "LDRESULT for unsupported type: "
- << MVT::getValueTypeString(VT)
+ << VT.getMVTString()
<< "\n";
abort();
}
@@ -646,7 +667,7 @@
/* || Op0.getOpcode() == SPUISD::AFormAddr) */
// (IndirectAddr (LDRESULT, imm))
SDOperand Op1 = Op.getOperand(1);
- MVT::ValueType VT = Op.getValueType();
+ MVT VT = Op.getValueType();
DEBUG(cerr << "CellSPU: IndirectAddr(LDRESULT, imm):\nOp0 = ");
DEBUG(Op.getOperand(0).Val->dump(CurDAG));
Modified: llvm/branches/non-call-eh/lib/Target/CellSPU/SPUISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/CellSPU/SPUISelLowering.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/CellSPU/SPUISelLowering.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/CellSPU/SPUISelLowering.cpp Sun Jul 6 15:45:41 2008
@@ -14,8 +14,8 @@
#include "SPURegisterNames.h"
#include "SPUISelLowering.h"
#include "SPUTargetMachine.h"
+#include "SPUFrameInfo.h"
#include "llvm/ADT/VectorExtras.h"
-#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -37,9 +37,9 @@
namespace {
std::map<unsigned, const char *> node_names;
- //! MVT::ValueType mapping to useful data for Cell SPU
+ //! MVT mapping to useful data for Cell SPU
struct valtype_map_s {
- const MVT::ValueType valtype;
+ const MVT valtype;
const int prefslot_byte;
};
@@ -56,7 +56,7 @@
const size_t n_valtype_map = sizeof(valtype_map) / sizeof(valtype_map[0]);
- const valtype_map_s *getValueTypeMapEntry(MVT::ValueType VT) {
+ const valtype_map_s *getValueTypeMapEntry(MVT VT) {
const valtype_map_s *retval = 0;
for (size_t i = 0; i < n_valtype_map; ++i) {
@@ -69,7 +69,7 @@
#ifndef NDEBUG
if (retval == 0) {
cerr << "getValueTypeMapEntry returns NULL for "
- << MVT::getValueTypeString(VT)
+ << VT.getMVTString()
<< "\n";
abort();
}
@@ -161,8 +161,10 @@
// SPU's loads and stores have to be custom lowered:
for (unsigned sctype = (unsigned) MVT::i1; sctype < (unsigned) MVT::f128;
++sctype) {
- setOperationAction(ISD::LOAD, sctype, Custom);
- setOperationAction(ISD::STORE, sctype, Custom);
+ MVT VT = (MVT::SimpleValueType)sctype;
+
+ setOperationAction(ISD::LOAD, VT, Custom);
+ setOperationAction(ISD::STORE, VT, Custom);
}
// Custom lower BRCOND for i1, i8 to "promote" the result to
@@ -218,8 +220,10 @@
// Custom lower i32 multiplications
setOperationAction(ISD::MUL, MVT::i32, Custom);
- // Need to custom handle (some) common i8 math ops
+ // Need to custom handle (some) common i8, i64 math ops
+ setOperationAction(ISD::ADD, MVT::i64, Custom);
setOperationAction(ISD::SUB, MVT::i8, Custom);
+ setOperationAction(ISD::SUB, MVT::i64, Custom);
setOperationAction(ISD::MUL, MVT::i8, Custom);
// SPU does not have BSWAP. It does have i32 support CTLZ.
@@ -237,7 +241,7 @@
setOperationAction(ISD::CTLZ , MVT::i32, Legal);
- // SPU has a version of select that implements (a&~c)|(b|c), just like
+ // SPU has a version of select that implements (a&~c)|(b&c), just like
// select ought to work:
setOperationAction(ISD::SELECT, MVT::i1, Promote);
setOperationAction(ISD::SELECT, MVT::i8, Legal);
@@ -286,16 +290,18 @@
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
// Support label based line numbers.
- setOperationAction(ISD::LOCATION, MVT::Other, Expand);
+ setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
// We want to legalize GlobalAddress and ConstantPool nodes into the
// appropriate instructions to materialize the address.
for (unsigned sctype = (unsigned) MVT::i1; sctype < (unsigned) MVT::f128;
++sctype) {
- setOperationAction(ISD::GlobalAddress, sctype, Custom);
- setOperationAction(ISD::ConstantPool, sctype, Custom);
- setOperationAction(ISD::JumpTable, sctype, Custom);
+ MVT VT = (MVT::SimpleValueType)sctype;
+
+ setOperationAction(ISD::GlobalAddress, VT, Custom);
+ setOperationAction(ISD::ConstantPool, VT, Custom);
+ setOperationAction(ISD::JumpTable, VT, Custom);
}
// RET must be custom lowered, to meet ABI requirements
@@ -332,36 +338,38 @@
addRegisterClass(MVT::v4f32, SPU::VECREGRegisterClass);
addRegisterClass(MVT::v2f64, SPU::VECREGRegisterClass);
- for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
- VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) {
+ for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
+ i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
+ MVT VT = (MVT::SimpleValueType)i;
+
// add/sub are legal for all supported vector VT's.
- setOperationAction(ISD::ADD , (MVT::ValueType)VT, Legal);
- setOperationAction(ISD::SUB , (MVT::ValueType)VT, Legal);
+ setOperationAction(ISD::ADD , VT, Legal);
+ setOperationAction(ISD::SUB , VT, Legal);
// mul has to be custom lowered.
- setOperationAction(ISD::MUL , (MVT::ValueType)VT, Custom);
+ setOperationAction(ISD::MUL , VT, Custom);
- setOperationAction(ISD::AND , (MVT::ValueType)VT, Legal);
- setOperationAction(ISD::OR , (MVT::ValueType)VT, Legal);
- setOperationAction(ISD::XOR , (MVT::ValueType)VT, Legal);
- setOperationAction(ISD::LOAD , (MVT::ValueType)VT, Legal);
- setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Legal);
- setOperationAction(ISD::STORE, (MVT::ValueType)VT, Legal);
+ setOperationAction(ISD::AND , VT, Legal);
+ setOperationAction(ISD::OR , VT, Legal);
+ setOperationAction(ISD::XOR , VT, Legal);
+ setOperationAction(ISD::LOAD , VT, Legal);
+ setOperationAction(ISD::SELECT, VT, Legal);
+ setOperationAction(ISD::STORE, VT, Legal);
// These operations need to be expanded:
- setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Custom);
+ setOperationAction(ISD::SDIV, VT, Expand);
+ setOperationAction(ISD::SREM, VT, Expand);
+ setOperationAction(ISD::UDIV, VT, Expand);
+ setOperationAction(ISD::UREM, VT, Expand);
+ setOperationAction(ISD::FDIV, VT, Custom);
// Custom lower build_vector, constant pool spills, insert and
// extract vector elements:
- setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Custom);
- setOperationAction(ISD::ConstantPool, (MVT::ValueType)VT, Custom);
- setOperationAction(ISD::SCALAR_TO_VECTOR, (MVT::ValueType)VT, Custom);
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Custom);
- setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Custom);
- setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Custom);
+ setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
+ setOperationAction(ISD::ConstantPool, VT, Custom);
+ setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
+ setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
+ setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
}
setOperationAction(ISD::MUL, MVT::v16i8, Custom);
@@ -426,8 +434,14 @@
node_names[(unsigned) SPUISD::ROTBYTES_LEFT] = "SPUISD::ROTBYTES_LEFT";
node_names[(unsigned) SPUISD::ROTBYTES_LEFT_CHAINED] =
"SPUISD::ROTBYTES_LEFT_CHAINED";
- node_names[(unsigned) SPUISD::FSMBI] = "SPUISD::FSMBI";
+ node_names[(unsigned) SPUISD::ROTBYTES_LEFT_BITS] =
+ "SPUISD::ROTBYTES_LEFT_BITS";
+ node_names[(unsigned) SPUISD::SELECT_MASK] = "SPUISD::SELECT_MASK";
node_names[(unsigned) SPUISD::SELB] = "SPUISD::SELB";
+ node_names[(unsigned) SPUISD::ADD_EXTENDED] = "SPUISD::ADD_EXTENDED";
+ node_names[(unsigned) SPUISD::CARRY_GENERATE] = "SPUISD::CARRY_GENERATE";
+ node_names[(unsigned) SPUISD::SUB_EXTENDED] = "SPUISD::SUB_EXTENDED";
+ node_names[(unsigned) SPUISD::BORROW_GENERATE] = "SPUISD::BORROW_GENERATE";
node_names[(unsigned) SPUISD::FPInterp] = "SPUISD::FPInterp";
node_names[(unsigned) SPUISD::FPRecipEst] = "SPUISD::FPRecipEst";
node_names[(unsigned) SPUISD::SEXT32TO64] = "SPUISD::SEXT32TO64";
@@ -438,10 +452,9 @@
return ((i != node_names.end()) ? i->second : 0);
}
-MVT::ValueType
-SPUTargetLowering::getSetCCResultType(const SDOperand &Op) const {
- MVT::ValueType VT = Op.getValueType();
- if (MVT::isInteger(VT))
+MVT SPUTargetLowering::getSetCCResultType(const SDOperand &Op) const {
+ MVT VT = Op.getValueType();
+ if (VT.isInteger())
return VT;
else
return MVT::i32;
@@ -481,9 +494,9 @@
AlignedLoad(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST,
LSBaseSDNode *LSN,
unsigned &alignment, int &alignOffs, int &prefSlotOffs,
- MVT::ValueType &VT, bool &was16aligned)
+ MVT &VT, bool &was16aligned)
{
- MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+ MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
const valtype_map_s *vtm = getValueTypeMapEntry(VT);
SDOperand basePtr = LSN->getBasePtr();
SDOperand chain = LSN->getChain();
@@ -514,6 +527,12 @@
alignOffs = 0;
prefSlotOffs = -vtm->prefslot_byte;
}
+ } else if (basePtr.getOpcode() == ISD::FrameIndex) {
+ FrameIndexSDNode *FIN = cast<FrameIndexSDNode>(basePtr);
+ alignOffs = int(FIN->getIndex() * SPUFrameInfo::stackSlotSize());
+ prefSlotOffs = (int) (alignOffs & 0xf);
+ prefSlotOffs -= vtm->prefslot_byte;
+ basePtr = DAG.getRegister(SPU::R1, VT);
} else {
alignOffs = 0;
prefSlotOffs = -vtm->prefslot_byte;
@@ -558,8 +577,8 @@
LowerLOAD(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
LoadSDNode *LN = cast<LoadSDNode>(Op);
SDOperand the_chain = LN->getChain();
- MVT::ValueType VT = LN->getMemoryVT();
- MVT::ValueType OpVT = Op.Val->getValueType(0);
+ MVT VT = LN->getMemoryVT();
+ MVT OpVT = Op.Val->getValueType(0);
ISD::LoadExtType ExtType = LN->getExtensionType();
unsigned alignment = LN->getAlignment();
SDOperand Ops[8];
@@ -586,7 +605,7 @@
if (was16aligned) {
Ops[2] = DAG.getConstant(rotamt, MVT::i16);
} else {
- MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+ MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
LoadSDNode *LN1 = cast<LoadSDNode>(result);
Ops[2] = DAG.getNode(ISD::ADD, PtrVT, LN1->getBasePtr(),
DAG.getConstant(rotamt, PtrVT));
@@ -598,15 +617,15 @@
if (VT == OpVT || ExtType == ISD::EXTLOAD) {
SDVTList scalarvts;
- MVT::ValueType vecVT = MVT::v16i8;
+ MVT vecVT = MVT::v16i8;
// Convert the loaded v16i8 vector to the appropriate vector type
// specified by the operand:
if (OpVT == VT) {
if (VT != MVT::i1)
- vecVT = MVT::getVectorType(VT, (128 / MVT::getSizeInBits(VT)));
+ vecVT = MVT::getVectorVT(VT, (128 / VT.getSizeInBits()));
} else
- vecVT = MVT::getVectorType(OpVT, (128 / MVT::getSizeInBits(OpVT)));
+ vecVT = MVT::getVectorVT(OpVT, (128 / OpVT.getSizeInBits()));
Ops[0] = the_chain;
Ops[1] = DAG.getNode(ISD::BIT_CONVERT, vecVT, result);
@@ -666,9 +685,9 @@
LowerSTORE(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
StoreSDNode *SN = cast<StoreSDNode>(Op);
SDOperand Value = SN->getValue();
- MVT::ValueType VT = Value.getValueType();
- MVT::ValueType StVT = (!SN->isTruncatingStore() ? VT : SN->getMemoryVT());
- MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+ MVT VT = Value.getValueType();
+ MVT StVT = (!SN->isTruncatingStore() ? VT : SN->getMemoryVT());
+ MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
unsigned alignment = SN->getAlignment();
switch (SN->getAddressingMode()) {
@@ -678,11 +697,11 @@
// The vector type we really want to load from the 16-byte chunk, except
// in the case of MVT::i1, which has to be v16i8.
- unsigned vecVT, stVecVT = MVT::v16i8;
+ MVT vecVT, stVecVT = MVT::v16i8;
if (StVT != MVT::i1)
- stVecVT = MVT::getVectorType(StVT, (128 / MVT::getSizeInBits(StVT)));
- vecVT = MVT::getVectorType(VT, (128 / MVT::getSizeInBits(VT)));
+ stVecVT = MVT::getVectorVT(StVT, (128 / StVT.getSizeInBits()));
+ vecVT = MVT::getVectorVT(VT, (128 / VT.getSizeInBits()));
SDOperand alignLoadVec =
AlignedLoad(Op, DAG, ST, SN, alignment,
@@ -758,7 +777,7 @@
/// Generate the address of a constant pool entry.
static SDOperand
LowerConstantPool(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
- MVT::ValueType PtrVT = Op.getValueType();
+ MVT PtrVT = Op.getValueType();
ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
Constant *C = CP->getConstVal();
SDOperand CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment());
@@ -783,7 +802,7 @@
static SDOperand
LowerJumpTable(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
- MVT::ValueType PtrVT = Op.getValueType();
+ MVT PtrVT = Op.getValueType();
JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
SDOperand JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
SDOperand Zero = DAG.getConstant(0, PtrVT);
@@ -806,7 +825,7 @@
static SDOperand
LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
- MVT::ValueType PtrVT = Op.getValueType();
+ MVT PtrVT = Op.getValueType();
GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
GlobalValue *GV = GSDN->getGlobal();
SDOperand GA = DAG.getTargetGlobalAddress(GV, PtrVT, GSDN->getOffset());
@@ -838,7 +857,7 @@
*/
static SDOperand
LowerConstant(SDOperand Op, SelectionDAG &DAG) {
- unsigned VT = Op.getValueType();
+ MVT VT = Op.getValueType();
ConstantSDNode *CN = cast<ConstantSDNode>(Op.Val);
if (VT == MVT::i64) {
@@ -847,7 +866,7 @@
DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i64, T, T));
} else {
cerr << "LowerConstant: unhandled constant type "
- << MVT::getValueTypeString(VT)
+ << VT.getMVTString()
<< "\n";
abort();
/*NOTREACHED*/
@@ -859,7 +878,7 @@
//! Custom lower double precision floating point constants
static SDOperand
LowerConstantFP(SDOperand Op, SelectionDAG &DAG) {
- unsigned VT = Op.getValueType();
+ MVT VT = Op.getValueType();
ConstantFPSDNode *FP = cast<ConstantFPSDNode>(Op.Val);
assert((FP != 0) &&
@@ -879,8 +898,8 @@
LowerBRCOND(SDOperand Op, SelectionDAG &DAG)
{
SDOperand Cond = Op.getOperand(1);
- MVT::ValueType CondVT = Cond.getValueType();
- MVT::ValueType CondNVT;
+ MVT CondVT = Cond.getValueType();
+ MVT CondNVT;
if (CondVT == MVT::i1 || CondVT == MVT::i8) {
CondNVT = (CondVT == MVT::i1 ? MVT::i32 : MVT::i16);
@@ -909,19 +928,19 @@
unsigned ArgRegIdx = 0;
unsigned StackSlotSize = SPUFrameInfo::stackSlotSize();
- MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+ MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
// Add DAG nodes to load the arguments or copy them out of registers.
for (unsigned ArgNo = 0, e = Op.Val->getNumValues()-1; ArgNo != e; ++ArgNo) {
SDOperand ArgVal;
bool needsLoad = false;
- MVT::ValueType ObjectVT = Op.getValue(ArgNo).getValueType();
- unsigned ObjSize = MVT::getSizeInBits(ObjectVT)/8;
+ MVT ObjectVT = Op.getValue(ArgNo).getValueType();
+ unsigned ObjSize = ObjectVT.getSizeInBits()/8;
- switch (ObjectVT) {
+ switch (ObjectVT.getSimpleVT()) {
default: {
cerr << "LowerFORMAL_ARGUMENTS Unhandled argument type: "
- << MVT::getValueTypeString(ObjectVT)
+ << ObjectVT.getMVTString()
<< "\n";
abort();
}
@@ -1017,7 +1036,7 @@
// If the function takes variable number of arguments, make a frame index for
// the start of the first vararg value... for expansion of llvm.va_start.
if (isVarArg) {
- VarArgsFrameIndex = MFI->CreateFixedObject(MVT::getSizeInBits(PtrVT)/8,
+ VarArgsFrameIndex = MFI->CreateFixedObject(PtrVT.getSizeInBits()/8,
ArgOffset);
SDOperand FIN = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
// If this function is vararg, store any remaining integer argument regs to
@@ -1031,7 +1050,7 @@
SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0);
MemOps.push_back(Store);
// Increment the address by four for the next argument to store
- SDOperand PtrOff = DAG.getConstant(MVT::getSizeInBits(PtrVT)/8, PtrVT);
+ SDOperand PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT);
FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff);
}
if (!MemOps.empty())
@@ -1041,9 +1060,8 @@
ArgValues.push_back(Root);
// Return the new list of results.
- std::vector<MVT::ValueType> RetVT(Op.Val->value_begin(),
- Op.Val->value_end());
- return DAG.getNode(ISD::MERGE_VALUES, RetVT, &ArgValues[0], ArgValues.size());
+ return DAG.getMergeValues(Op.Val->getVTList(), &ArgValues[0],
+ ArgValues.size());
}
/// isLSAAddress - Return the immediate to use if the specified
@@ -1075,7 +1093,7 @@
const unsigned NumArgRegs = SPURegisterInfo::getNumArgRegs();
// Handy pointer type
- MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+ MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
// Accumulate how many bytes are to be pushed on the stack, including the
// linkage area, and parameter passing area. According to the SPU ABI,
@@ -1105,7 +1123,7 @@
SDOperand PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType());
PtrOff = DAG.getNode(ISD::ADD, PtrVT, StackPtr, PtrOff);
- switch (Arg.getValueType()) {
+ switch (Arg.getValueType().getSimpleVT()) {
default: assert(0 && "Unexpected ValueType for argument!");
case MVT::i32:
case MVT::i64:
@@ -1159,10 +1177,6 @@
InFlag = Chain.getValue(1);
}
- std::vector<MVT::ValueType> NodeTys;
- NodeTys.push_back(MVT::Other); // Returns a chain
- NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
-
SmallVector<SDOperand, 8> Ops;
unsigned CallOpc = SPUISD::CALL;
@@ -1171,7 +1185,7 @@
// node so that legalize doesn't hack it.
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
GlobalValue *GV = G->getGlobal();
- unsigned CalleeVT = Callee.getValueType();
+ MVT CalleeVT = Callee.getValueType();
SDOperand Zero = DAG.getConstant(0, PtrVT);
SDOperand GA = DAG.getTargetGlobalAddress(GV, CalleeVT);
@@ -1213,7 +1227,9 @@
if (InFlag.Val)
Ops.push_back(InFlag);
- Chain = DAG.getNode(CallOpc, NodeTys, &Ops[0], Ops.size());
+ // Returns a chain and a flag for retval copy to use.
+ Chain = DAG.getNode(CallOpc, DAG.getVTList(MVT::Other, MVT::Flag),
+ &Ops[0], Ops.size());
InFlag = Chain.getValue(1);
Chain = DAG.getCALLSEQ_END(Chain,
@@ -1225,10 +1241,9 @@
SDOperand ResultVals[3];
unsigned NumResults = 0;
- NodeTys.clear();
// If the call has results, copy the values out of the ret val registers.
- switch (Op.Val->getValueType(0)) {
+ switch (Op.Val->getValueType(0).getSimpleVT()) {
default: assert(0 && "Unexpected ret value!");
case MVT::Other: break;
case MVT::i32:
@@ -1239,19 +1254,16 @@
Chain.getValue(2)).getValue(1);
ResultVals[1] = Chain.getValue(0);
NumResults = 2;
- NodeTys.push_back(MVT::i32);
} else {
Chain = DAG.getCopyFromReg(Chain, SPU::R3, MVT::i32, InFlag).getValue(1);
ResultVals[0] = Chain.getValue(0);
NumResults = 1;
}
- NodeTys.push_back(MVT::i32);
break;
case MVT::i64:
Chain = DAG.getCopyFromReg(Chain, SPU::R3, MVT::i64, InFlag).getValue(1);
ResultVals[0] = Chain.getValue(0);
NumResults = 1;
- NodeTys.push_back(MVT::i64);
break;
case MVT::f32:
case MVT::f64:
@@ -1259,7 +1271,6 @@
InFlag).getValue(1);
ResultVals[0] = Chain.getValue(0);
NumResults = 1;
- NodeTys.push_back(Op.Val->getValueType(0));
break;
case MVT::v2f64:
case MVT::v4f32:
@@ -1270,20 +1281,16 @@
InFlag).getValue(1);
ResultVals[0] = Chain.getValue(0);
NumResults = 1;
- NodeTys.push_back(Op.Val->getValueType(0));
break;
}
-
- NodeTys.push_back(MVT::Other);
-
+
// If the function returns void, just return the chain.
if (NumResults == 0)
return Chain;
// Otherwise, merge everything together with a MERGE_VALUES node.
ResultVals[NumResults++] = Chain;
- SDOperand Res = DAG.getNode(ISD::MERGE_VALUES, NodeTys,
- ResultVals, NumResults);
+ SDOperand Res = DAG.getMergeValues(ResultVals, NumResults);
return Res.getValue(Op.ResNo);
}
@@ -1350,7 +1357,7 @@
/// and the value fits into an unsigned 18-bit constant, and if so, return the
/// constant
SDOperand SPU::get_vec_u18imm(SDNode *N, SelectionDAG &DAG,
- MVT::ValueType ValueType) {
+ MVT ValueType) {
if (ConstantSDNode *CN = getVecImm(N)) {
uint64_t Value = CN->getValue();
if (ValueType == MVT::i64) {
@@ -1372,7 +1379,7 @@
/// and the value fits into a signed 16-bit constant, and if so, return the
/// constant
SDOperand SPU::get_vec_i16imm(SDNode *N, SelectionDAG &DAG,
- MVT::ValueType ValueType) {
+ MVT ValueType) {
if (ConstantSDNode *CN = getVecImm(N)) {
int64_t Value = CN->getSignExtended();
if (ValueType == MVT::i64) {
@@ -1395,7 +1402,7 @@
/// and the value fits into a signed 10-bit constant, and if so, return the
/// constant
SDOperand SPU::get_vec_i10imm(SDNode *N, SelectionDAG &DAG,
- MVT::ValueType ValueType) {
+ MVT ValueType) {
if (ConstantSDNode *CN = getVecImm(N)) {
int64_t Value = CN->getSignExtended();
if (ValueType == MVT::i64) {
@@ -1421,7 +1428,7 @@
/// constant vectors. Thus, we test to see if the upper and lower bytes are the
/// same value.
SDOperand SPU::get_vec_i8imm(SDNode *N, SelectionDAG &DAG,
- MVT::ValueType ValueType) {
+ MVT ValueType) {
if (ConstantSDNode *CN = getVecImm(N)) {
int Value = (int) CN->getValue();
if (ValueType == MVT::i16
@@ -1440,7 +1447,7 @@
/// and the value fits into a signed 16-bit constant, and if so, return the
/// constant
SDOperand SPU::get_ILHUvec_imm(SDNode *N, SelectionDAG &DAG,
- MVT::ValueType ValueType) {
+ MVT ValueType) {
if (ConstantSDNode *CN = getVecImm(N)) {
uint64_t Value = CN->getValue();
if ((ValueType == MVT::i32
@@ -1480,7 +1487,7 @@
// Start with zero'd results.
VectorBits[0] = VectorBits[1] = UndefBits[0] = UndefBits[1] = 0;
- unsigned EltBitSize = MVT::getSizeInBits(BV->getOperand(0).getValueType());
+ unsigned EltBitSize = BV->getOperand(0).getValueType().getSizeInBits();
for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
SDOperand OpVal = BV->getOperand(i);
@@ -1582,7 +1589,7 @@
// this case more efficiently than a constant pool load, lower it to the
// sequence of ops that should be used.
static SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) {
- MVT::ValueType VT = Op.getValueType();
+ MVT VT = Op.getValueType();
// If this is a vector of constants or undefs, get the bits. A bit in
// UndefBits is set if the corresponding element of the vector is an
// ISD::UNDEF value. For undefs, the corresponding VectorBits values are
@@ -1593,11 +1600,11 @@
int SplatSize;
if (GetConstantBuildVectorBits(Op.Val, VectorBits, UndefBits)
|| !isConstantSplat(VectorBits, UndefBits,
- MVT::getSizeInBits(MVT::getVectorElementType(VT)),
+ VT.getVectorElementType().getSizeInBits(),
SplatBits, SplatUndef, SplatSize))
return SDOperand(); // Not a constant vector, not a splat.
- switch (VT) {
+ switch (VT.getSimpleVT()) {
default:
case MVT::v4f32: {
uint32_t Value32 = SplatBits;
@@ -1634,14 +1641,14 @@
Value16 = (unsigned short) (SplatBits & 0xffff);
else
Value16 = (unsigned short) (SplatBits | (SplatBits << 8));
- SDOperand T = DAG.getConstant(Value16, MVT::getVectorElementType(VT));
+ SDOperand T = DAG.getConstant(Value16, VT.getVectorElementType());
SDOperand Ops[8];
for (int i = 0; i < 8; ++i) Ops[i] = T;
return DAG.getNode(ISD::BUILD_VECTOR, VT, Ops, 8);
}
case MVT::v4i32: {
unsigned int Value = SplatBits;
- SDOperand T = DAG.getConstant(Value, MVT::getVectorElementType(VT));
+ SDOperand T = DAG.getConstant(Value, VT.getVectorElementType());
return DAG.getNode(ISD::BUILD_VECTOR, VT, T, T, T, T);
}
case MVT::v2i64: {
@@ -1699,33 +1706,33 @@
}
for (int i = 0; i < 4; ++i) {
+ uint64_t val = 0;
for (int j = 0; j < 4; ++j) {
SDOperand V;
bool process_upper, process_lower;
- uint64_t val = 0;
-
+ val <<= 8;
process_upper = (upper_special && (i & 1) == 0);
process_lower = (lower_special && (i & 1) == 1);
if (process_upper || process_lower) {
if ((process_upper && upper == 0)
|| (process_lower && lower == 0))
- val = 0x80;
+ val |= 0x80;
else if ((process_upper && upper == 0xffffffff)
|| (process_lower && lower == 0xffffffff))
- val = 0xc0;
+ val |= 0xc0;
else if ((process_upper && upper == 0x80000000)
|| (process_lower && lower == 0x80000000))
- val = (j == 0 ? 0xe0 : 0x80);
+ val |= (j == 0 ? 0xe0 : 0x80);
} else
- val = i * 4 + j + ((i & 1) * 16);
-
- ShufBytes.push_back(DAG.getConstant(val, MVT::i8));
+ val |= i * 4 + j + ((i & 1) * 16);
}
+
+ ShufBytes.push_back(DAG.getConstant(val, MVT::i32));
}
return DAG.getNode(SPUISD::SHUFB, VT, HI32, LO32,
- DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8,
+ DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32,
&ShufBytes[0], ShufBytes.size()));
}
}
@@ -1757,7 +1764,7 @@
// If we have a single element being moved from V1 to V2, this can be handled
// using the C*[DX] compute mask instructions, but the vector elements have
// to be monotonically increasing with one exception element.
- MVT::ValueType EltVT = MVT::getVectorElementType(V1.getValueType());
+ MVT EltVT = V1.getValueType().getVectorElementType();
unsigned EltsFromV2 = 0;
unsigned V2Elt = 0;
unsigned V2EltIdx0 = 0;
@@ -1796,7 +1803,7 @@
MachineFunction &MF = DAG.getMachineFunction();
MachineRegisterInfo &RegInfo = MF.getRegInfo();
unsigned VReg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
- MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+ MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
// Initialize temporary register to 0
SDOperand InitTempReg =
DAG.getCopyToReg(DAG.getEntryNode(), VReg, DAG.getConstant(0, PtrVT));
@@ -1809,7 +1816,7 @@
return DAG.getNode(SPUISD::SHUFB, V1.getValueType(), V2, V1, ShufMaskOp);
} else {
// Convert the SHUFFLE_VECTOR mask's input element units to the actual bytes.
- unsigned BytesPerElement = MVT::getSizeInBits(EltVT)/8;
+ unsigned BytesPerElement = EltVT.getSizeInBits()/8;
SmallVector<SDOperand, 16> ResultMask;
for (unsigned i = 0, e = PermMask.getNumOperands(); i != e; ++i) {
@@ -1840,11 +1847,11 @@
ConstantSDNode *CN = cast<ConstantSDNode>(Op0.Val);
SmallVector<SDOperand, 16> ConstVecValues;
- MVT::ValueType VT;
+ MVT VT;
size_t n_copies;
// Create a constant vector:
- switch (Op.getValueType()) {
+ switch (Op.getValueType().getSimpleVT()) {
default: assert(0 && "Unexpected constant value type in "
"LowerSCALAR_TO_VECTOR");
case MVT::v16i8: n_copies = 16; VT = MVT::i8; break;
@@ -1863,7 +1870,7 @@
&ConstVecValues[0], ConstVecValues.size());
} else {
// Otherwise, copy the value from one register to another:
- switch (Op0.getValueType()) {
+ switch (Op0.getValueType().getSimpleVT()) {
default: assert(0 && "Unexpected value type in LowerSCALAR_TO_VECTOR");
case MVT::i8:
case MVT::i16:
@@ -1879,7 +1886,14 @@
}
static SDOperand LowerVectorMUL(SDOperand Op, SelectionDAG &DAG) {
- switch (Op.getValueType()) {
+ switch (Op.getValueType().getSimpleVT()) {
+ default:
+ cerr << "CellSPU: Unknown vector multiplication, got "
+ << Op.getValueType().getMVTString()
+ << "\n";
+ abort();
+ /*NOTREACHED*/
+
case MVT::v4i32: {
SDOperand rA = Op.getOperand(0);
SDOperand rB = Op.getOperand(1);
@@ -1897,7 +1911,7 @@
// b) multiply upper halves, rotate left by 16 bits (inserts 16 lower zeroes)
// c) Use SELB to select upper and lower halves from the intermediate results
//
- // NOTE: We really want to move the FSMBI to earlier to actually get the
+ // NOTE: We really want to move the SELECT_MASK to earlier to actually get the
// dual-issue. This code does manage to do this, even if it's a little on
// the wacky side
case MVT::v8i16: {
@@ -1911,8 +1925,8 @@
SDOperand FSMBOp =
DAG.getCopyToReg(Chain, FSMBIreg,
- DAG.getNode(SPUISD::FSMBI, MVT::v8i16,
- DAG.getConstant(0xcccc, MVT::i32)));
+ DAG.getNode(SPUISD::SELECT_MASK, MVT::v8i16,
+ DAG.getConstant(0xcccc, MVT::i16)));
SDOperand HHProd =
DAG.getCopyToReg(FSMBOp, HiProdReg,
@@ -1955,8 +1969,8 @@
DAG.getNode(SPUISD::VEC_SHL, MVT::v8i16,
DAG.getNode(SPUISD::MPY, MVT::v8i16, rALH, rBLH), c8);
- SDOperand FSMBmask = DAG.getNode(SPUISD::FSMBI, MVT::v8i16,
- DAG.getConstant(0x2222, MVT::i32));
+ SDOperand FSMBmask = DAG.getNode(SPUISD::SELECT_MASK, MVT::v8i16,
+ DAG.getConstant(0x2222, MVT::i16));
SDOperand LoProdParts =
DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32,
@@ -2005,13 +2019,6 @@
DAG.getNode(ISD::OR, MVT::v4i32,
LoProd, HiProd));
}
-
- default:
- cerr << "CellSPU: Unknown vector multiplication, got "
- << MVT::getValueTypeString(Op.getValueType())
- << "\n";
- abort();
- /*NOTREACHED*/
}
return SDOperand();
@@ -2023,7 +2030,7 @@
SDOperand A = Op.getOperand(0);
SDOperand B = Op.getOperand(1);
- unsigned VT = Op.getValueType();
+ MVT VT = Op.getValueType();
unsigned VRegBR, VRegC;
@@ -2062,7 +2069,7 @@
}
static SDOperand LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) {
- unsigned VT = Op.getValueType();
+ MVT VT = Op.getValueType();
SDOperand N = Op.getOperand(0);
SDOperand Elt = Op.getOperand(1);
SDOperand ShufMask[16];
@@ -2089,9 +2096,11 @@
// Need to generate shuffle mask and extract:
int prefslot_begin = -1, prefslot_end = -1;
- int elt_byte = EltNo * MVT::getSizeInBits(VT) / 8;
+ int elt_byte = EltNo * VT.getSizeInBits() / 8;
- switch (VT) {
+ switch (VT.getSimpleVT()) {
+ default:
+ assert(false && "Invalid value type!");
case MVT::i8: {
prefslot_begin = prefslot_end = 3;
break;
@@ -2144,12 +2153,12 @@
SDOperand VecOp = Op.getOperand(0);
SDOperand ValOp = Op.getOperand(1);
SDOperand IdxOp = Op.getOperand(2);
- MVT::ValueType VT = Op.getValueType();
+ MVT VT = Op.getValueType();
ConstantSDNode *CN = cast<ConstantSDNode>(IdxOp);
assert(CN != 0 && "LowerINSERT_VECTOR_ELT: Index is not constant!");
- MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+ MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
// Use $2 because it's always 16-byte aligned and it's available:
SDOperand PtrBase = DAG.getRegister(SPU::R2, PtrVT);
@@ -2196,7 +2205,7 @@
N0 = (N0.getOpcode() != ISD::Constant
? DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, N0)
: DAG.getConstant(cast<ConstantSDNode>(N0)->getValue(), MVT::i16));
- N1Opc = (N1.getValueType() < MVT::i16 ? ISD::ZERO_EXTEND : ISD::TRUNCATE);
+ N1Opc = N1.getValueType().bitsLT(MVT::i16) ? ISD::ZERO_EXTEND : ISD::TRUNCATE;
N1 = (N1.getOpcode() != ISD::Constant
? DAG.getNode(N1Opc, MVT::i16, N1)
: DAG.getConstant(cast<ConstantSDNode>(N1)->getValue(), MVT::i16));
@@ -2214,7 +2223,7 @@
N0 = (N0.getOpcode() != ISD::Constant
? DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, N0)
: DAG.getConstant(cast<ConstantSDNode>(N0)->getValue(), MVT::i16));
- N1Opc = (N1.getValueType() < MVT::i16 ? ISD::ZERO_EXTEND : ISD::TRUNCATE);
+ N1Opc = N1.getValueType().bitsLT(MVT::i16) ? ISD::ZERO_EXTEND : ISD::TRUNCATE;
N1 = (N1.getOpcode() != ISD::Constant
? DAG.getNode(N1Opc, MVT::i16, N1)
: DAG.getConstant(cast<ConstantSDNode>(N1)->getValue(), MVT::i16));
@@ -2227,7 +2236,7 @@
N0 = (N0.getOpcode() != ISD::Constant
? DAG.getNode(ISD::SIGN_EXTEND, MVT::i16, N0)
: DAG.getConstant(cast<ConstantSDNode>(N0)->getValue(), MVT::i16));
- N1Opc = (N1.getValueType() < MVT::i16 ? ISD::SIGN_EXTEND : ISD::TRUNCATE);
+ N1Opc = N1.getValueType().bitsLT(MVT::i16) ? ISD::SIGN_EXTEND : ISD::TRUNCATE;
N1 = (N1.getOpcode() != ISD::Constant
? DAG.getNode(N1Opc, MVT::i16, N1)
: DAG.getConstant(cast<ConstantSDNode>(N1)->getValue(), MVT::i16));
@@ -2240,7 +2249,7 @@
N0 = (N0.getOpcode() != ISD::Constant
? DAG.getNode(ISD::SIGN_EXTEND, MVT::i16, N0)
: DAG.getConstant(cast<ConstantSDNode>(N0)->getValue(), MVT::i16));
- N1Opc = (N1.getValueType() < MVT::i16 ? ISD::SIGN_EXTEND : ISD::TRUNCATE);
+ N1Opc = N1.getValueType().bitsLT(MVT::i16) ? ISD::SIGN_EXTEND : ISD::TRUNCATE;
N1 = (N1.getOpcode() != ISD::Constant
? DAG.getNode(N1Opc, MVT::i16, N1)
: DAG.getConstant(cast<ConstantSDNode>(N1)->getValue(), MVT::i16));
@@ -2255,9 +2264,8 @@
static SDOperand LowerI64Math(SDOperand Op, SelectionDAG &DAG, unsigned Opc)
{
- MVT::ValueType VT = Op.getValueType();
- unsigned VecVT =
- MVT::getVectorType(VT, (128 / MVT::getSizeInBits(VT)));
+ MVT VT = Op.getValueType();
+ MVT VecVT = MVT::getVectorVT(VT, (128 / VT.getSizeInBits()));
SDOperand Op0 = Op.getOperand(0);
@@ -2265,12 +2273,12 @@
case ISD::ZERO_EXTEND:
case ISD::SIGN_EXTEND:
case ISD::ANY_EXTEND: {
- MVT::ValueType Op0VT = Op0.getValueType();
- unsigned Op0VecVT =
- MVT::getVectorType(Op0VT, (128 / MVT::getSizeInBits(Op0VT)));
+ MVT Op0VT = Op0.getValueType();
+ MVT Op0VecVT = MVT::getVectorVT(Op0VT, (128 / Op0VT.getSizeInBits()));
assert(Op0VT == MVT::i32
&& "CellSPU: Zero/sign extending something other than i32");
+ DEBUG(cerr << "CellSPU: LowerI64Math custom lowering zero/sign/any extend\n");
unsigned NewOpc = (Opc == ISD::SIGN_EXTEND
? SPUISD::ROTBYTES_RIGHT_S
@@ -2285,15 +2293,73 @@
DAG.getConstant(4, MVT::i32))));
}
+ case ISD::ADD: {
+ // Turn operands into vectors to satisfy type checking (shufb works on
+ // vectors)
+ SDOperand Op0 =
+ DAG.getNode(SPUISD::PROMOTE_SCALAR, MVT::v2i64, Op.getOperand(0));
+ SDOperand Op1 =
+ DAG.getNode(SPUISD::PROMOTE_SCALAR, MVT::v2i64, Op.getOperand(1));
+ SmallVector<SDOperand, 16> ShufBytes;
+
+ // Create the shuffle mask for "rotating" the borrow up one register slot
+ // once the borrow is generated.
+ ShufBytes.push_back(DAG.getConstant(0x04050607, MVT::i32));
+ ShufBytes.push_back(DAG.getConstant(0x80808080, MVT::i32));
+ ShufBytes.push_back(DAG.getConstant(0x0c0d0e0f, MVT::i32));
+ ShufBytes.push_back(DAG.getConstant(0x80808080, MVT::i32));
+
+ SDOperand CarryGen =
+ DAG.getNode(SPUISD::CARRY_GENERATE, MVT::v2i64, Op0, Op1);
+ SDOperand ShiftedCarry =
+ DAG.getNode(SPUISD::SHUFB, MVT::v2i64,
+ CarryGen, CarryGen,
+ DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32,
+ &ShufBytes[0], ShufBytes.size()));
+
+ return DAG.getNode(SPUISD::EXTRACT_ELT0, MVT::i64,
+ DAG.getNode(SPUISD::ADD_EXTENDED, MVT::v2i64,
+ Op0, Op1, ShiftedCarry));
+ }
+
+ case ISD::SUB: {
+ // Turn operands into vectors to satisfy type checking (shufb works on
+ // vectors)
+ SDOperand Op0 =
+ DAG.getNode(SPUISD::PROMOTE_SCALAR, MVT::v2i64, Op.getOperand(0));
+ SDOperand Op1 =
+ DAG.getNode(SPUISD::PROMOTE_SCALAR, MVT::v2i64, Op.getOperand(1));
+ SmallVector<SDOperand, 16> ShufBytes;
+
+ // Create the shuffle mask for "rotating" the borrow up one register slot
+ // once the borrow is generated.
+ ShufBytes.push_back(DAG.getConstant(0x04050607, MVT::i32));
+ ShufBytes.push_back(DAG.getConstant(0xc0c0c0c0, MVT::i32));
+ ShufBytes.push_back(DAG.getConstant(0x0c0d0e0f, MVT::i32));
+ ShufBytes.push_back(DAG.getConstant(0xc0c0c0c0, MVT::i32));
+
+ SDOperand BorrowGen =
+ DAG.getNode(SPUISD::BORROW_GENERATE, MVT::v2i64, Op0, Op1);
+ SDOperand ShiftedBorrow =
+ DAG.getNode(SPUISD::SHUFB, MVT::v2i64,
+ BorrowGen, BorrowGen,
+ DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32,
+ &ShufBytes[0], ShufBytes.size()));
+
+ return DAG.getNode(SPUISD::EXTRACT_ELT0, MVT::i64,
+ DAG.getNode(SPUISD::SUB_EXTENDED, MVT::v2i64,
+ Op0, Op1, ShiftedBorrow));
+ }
+
case ISD::SHL: {
SDOperand ShiftAmt = Op.getOperand(1);
- unsigned ShiftAmtVT = unsigned(ShiftAmt.getValueType());
+ MVT ShiftAmtVT = ShiftAmt.getValueType();
SDOperand Op0Vec = DAG.getNode(SPUISD::PROMOTE_SCALAR, VecVT, Op0);
SDOperand MaskLower =
DAG.getNode(SPUISD::SELB, VecVT,
Op0Vec,
DAG.getConstant(0, VecVT),
- DAG.getNode(SPUISD::FSMBI, VecVT,
+ DAG.getNode(SPUISD::SELECT_MASK, VecVT,
DAG.getConstant(0xff00ULL, MVT::i16)));
SDOperand ShiftAmtBytes =
DAG.getNode(ISD::SRL, ShiftAmtVT,
@@ -2312,9 +2378,9 @@
}
case ISD::SRL: {
- unsigned VT = unsigned(Op.getValueType());
+ MVT VT = Op.getValueType();
SDOperand ShiftAmt = Op.getOperand(1);
- unsigned ShiftAmtVT = unsigned(ShiftAmt.getValueType());
+ MVT ShiftAmtVT = ShiftAmt.getValueType();
SDOperand ShiftAmtBytes =
DAG.getNode(ISD::SRL, ShiftAmtVT,
ShiftAmt,
@@ -2329,6 +2395,43 @@
Op0, ShiftAmtBytes),
ShiftAmtBits);
}
+
+ case ISD::SRA: {
+ // Promote Op0 to vector
+ SDOperand Op0 =
+ DAG.getNode(SPUISD::PROMOTE_SCALAR, MVT::v2i64, Op.getOperand(0));
+ SDOperand ShiftAmt = Op.getOperand(1);
+ MVT ShiftVT = ShiftAmt.getValueType();
+
+ // Negate variable shift amounts
+ if (!isa<ConstantSDNode>(ShiftAmt)) {
+ ShiftAmt = DAG.getNode(ISD::SUB, ShiftVT,
+ DAG.getConstant(0, ShiftVT), ShiftAmt);
+ }
+
+ SDOperand UpperHalfSign =
+ DAG.getNode(SPUISD::EXTRACT_ELT0, MVT::i32,
+ DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32,
+ DAG.getNode(SPUISD::VEC_SRA, MVT::v2i64,
+ Op0, DAG.getConstant(31, MVT::i32))));
+ SDOperand UpperHalfSignMask =
+ DAG.getNode(SPUISD::SELECT_MASK, MVT::v2i64, UpperHalfSign);
+ SDOperand UpperLowerMask =
+ DAG.getNode(SPUISD::SELECT_MASK, MVT::v2i64,
+ DAG.getConstant(0xff00, MVT::i16));
+ SDOperand UpperLowerSelect =
+ DAG.getNode(SPUISD::SELB, MVT::v2i64,
+ UpperHalfSignMask, Op0, UpperLowerMask);
+ SDOperand RotateLeftBytes =
+ DAG.getNode(SPUISD::ROTBYTES_LEFT_BITS, MVT::v2i64,
+ UpperLowerSelect, ShiftAmt);
+ SDOperand RotateLeftBits =
+ DAG.getNode(SPUISD::ROTBYTES_LEFT, MVT::v2i64,
+ RotateLeftBytes, ShiftAmt);
+
+ return DAG.getNode(SPUISD::EXTRACT_ELT0, MVT::i64,
+ RotateLeftBits);
+ }
}
return SDOperand();
@@ -2339,7 +2442,7 @@
LowerByteImmed(SDOperand Op, SelectionDAG &DAG) {
SDOperand ConstVec;
SDOperand Arg;
- MVT::ValueType VT = Op.getValueType();
+ MVT VT = Op.getValueType();
ConstVec = Op.getOperand(0);
Arg = Op.getOperand(1);
@@ -2363,7 +2466,7 @@
if (!GetConstantBuildVectorBits(ConstVec.Val, VectorBits, UndefBits)
&& isConstantSplat(VectorBits, UndefBits,
- MVT::getSizeInBits(MVT::getVectorElementType(VT)),
+ VT.getVectorElementType().getSizeInBits(),
SplatBits, SplatUndef, SplatSize)) {
SDOperand tcVec[16];
SDOperand tc = DAG.getTargetConstant(SplatBits & 0xff, MVT::i8);
@@ -2382,12 +2485,12 @@
}
//! Lower i32 multiplication
-static SDOperand LowerMUL(SDOperand Op, SelectionDAG &DAG, unsigned VT,
+static SDOperand LowerMUL(SDOperand Op, SelectionDAG &DAG, MVT VT,
unsigned Opc) {
- switch (VT) {
+ switch (VT.getSimpleVT()) {
default:
cerr << "CellSPU: Unknown LowerMUL value type, got "
- << MVT::getValueTypeString(Op.getValueType())
+ << Op.getValueType().getMVTString()
<< "\n";
abort();
/*NOTREACHED*/
@@ -2414,10 +2517,12 @@
ones per byte, which then have to be accumulated.
*/
static SDOperand LowerCTPOP(SDOperand Op, SelectionDAG &DAG) {
- unsigned VT = Op.getValueType();
- unsigned vecVT = MVT::getVectorType(VT, (128 / MVT::getSizeInBits(VT)));
+ MVT VT = Op.getValueType();
+ MVT vecVT = MVT::getVectorVT(VT, (128 / VT.getSizeInBits()));
- switch (VT) {
+ switch (VT.getSimpleVT()) {
+ default:
+ assert(false && "Invalid value type!");
case MVT::i8: {
SDOperand N = Op.getOperand(0);
SDOperand Elt0 = DAG.getConstant(0, MVT::i32);
@@ -2519,7 +2624,7 @@
SPUTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG)
{
unsigned Opc = (unsigned) Op.getOpcode();
- unsigned VT = (unsigned) Op.getValueType();
+ MVT VT = Op.getValueType();
switch (Opc) {
default: {
@@ -2559,17 +2664,19 @@
case ISD::ZERO_EXTEND:
case ISD::SIGN_EXTEND:
case ISD::ANY_EXTEND:
+ case ISD::ADD:
case ISD::SUB:
case ISD::ROTR:
case ISD::ROTL:
case ISD::SRL:
case ISD::SHL:
- case ISD::SRA:
+ case ISD::SRA: {
if (VT == MVT::i8)
return LowerI8Math(Op, DAG, Opc);
else if (VT == MVT::i64)
return LowerI64Math(Op, DAG, Opc);
break;
+ }
// Vector-related lowering.
case ISD::BUILD_VECTOR:
@@ -2591,7 +2698,7 @@
// Vector and i8 multiply:
case ISD::MUL:
- if (MVT::isVector(VT))
+ if (VT.isVector())
return LowerVectorMUL(Op, DAG);
else if (VT == MVT::i8)
return LowerI8Math(Op, DAG, Opc);
@@ -2633,9 +2740,7 @@
case ISD::ADD: {
SDOperand Op1 = N->getOperand(1);
- if ((Op1.getOpcode() == ISD::Constant
- || Op1.getOpcode() == ISD::TargetConstant)
- && Op0.getOpcode() == SPUISD::IndirectAddr) {
+ if (isa<ConstantSDNode>(Op1) && Op0.getOpcode() == SPUISD::IndirectAddr) {
SDOperand Op01 = Op0.getOperand(1);
if (Op01.getOpcode() == ISD::Constant
|| Op01.getOpcode() == ISD::TargetConstant) {
@@ -2654,8 +2759,7 @@
return DAG.getNode(SPUISD::IndirectAddr, Op0.getValueType(),
Op0.getOperand(0), combinedConst);
}
- } else if ((Op0.getOpcode() == ISD::Constant
- || Op0.getOpcode() == ISD::TargetConstant)
+ } else if (isa<ConstantSDNode>(Op0)
&& Op1.getOpcode() == SPUISD::IndirectAddr) {
SDOperand Op11 = Op1.getOperand(1);
if (Op11.getOpcode() == ISD::Constant
@@ -2764,7 +2868,7 @@
}
}
// Otherwise, return unchanged.
-#if 0
+#if 1
if (Result.Val) {
DEBUG(cerr << "\nReplace.SPU: ");
DEBUG(N->dump(&DAG));
@@ -2801,7 +2905,7 @@
std::pair<unsigned, const TargetRegisterClass*>
SPUTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
- MVT::ValueType VT) const
+ MVT VT) const
{
if (Constraint.size() == 1) {
// GCC RS6000 Constraint Letters
@@ -2833,7 +2937,9 @@
APInt &KnownOne,
const SelectionDAG &DAG,
unsigned Depth ) const {
+#if 0
const uint64_t uint64_sizebits = sizeof(uint64_t) * 8;
+#endif
switch (Op.getOpcode()) {
default:
@@ -2849,18 +2955,22 @@
case SPUISD::PROMOTE_SCALAR: {
SDOperand Op0 = Op.getOperand(0);
- uint64_t InMask = MVT::getIntVTBitMask(Op0.getValueType());
- KnownZero |= APInt(uint64_sizebits, ~InMask, false);
- KnownOne |= APInt(uint64_sizebits, InMask, false);
+ MVT Op0VT = Op0.getValueType();
+ unsigned Op0VTBits = Op0VT.getSizeInBits();
+ uint64_t InMask = Op0VT.getIntegerVTBitMask();
+ KnownZero |= APInt(Op0VTBits, ~InMask, false);
+ KnownOne |= APInt(Op0VTBits, InMask, false);
break;
}
case SPUISD::LDRESULT:
case SPUISD::EXTRACT_ELT0:
case SPUISD::EXTRACT_ELT0_CHAINED: {
- uint64_t InMask = MVT::getIntVTBitMask(Op.getValueType());
- KnownZero |= APInt(uint64_sizebits, ~InMask, false);
- KnownOne |= APInt(uint64_sizebits, InMask, false);
+ MVT OpVT = Op.getValueType();
+ unsigned OpVTBits = OpVT.getSizeInBits();
+ uint64_t InMask = OpVT.getIntegerVTBitMask();
+ KnownZero |= APInt(OpVTBits, ~InMask, false);
+ KnownOne |= APInt(OpVTBits, InMask, false);
break;
}
@@ -2873,24 +2983,23 @@
case MPYU:
case MPYH:
case MPYHH:
- case SHLQUAD_L_BITS:
- case SHLQUAD_L_BYTES:
- case VEC_SHL:
- case VEC_SRL:
- case VEC_SRA:
- case VEC_ROTL:
- case VEC_ROTR:
- case ROTQUAD_RZ_BYTES:
- case ROTQUAD_RZ_BITS:
- case ROTBYTES_RIGHT_S:
- case ROTBYTES_LEFT:
- case ROTBYTES_LEFT_CHAINED:
- case FSMBI:
- case SELB:
- case SFPConstant:
- case FPInterp:
- case FPRecipEst:
- case SEXT32TO64:
+ case SPUISD::SHLQUAD_L_BITS:
+ case SPUISD::SHLQUAD_L_BYTES:
+ case SPUISD::VEC_SHL:
+ case SPUISD::VEC_SRL:
+ case SPUISD::VEC_SRA:
+ case SPUISD::VEC_ROTL:
+ case SPUISD::VEC_ROTR:
+ case SPUISD::ROTQUAD_RZ_BYTES:
+ case SPUISD::ROTQUAD_RZ_BITS:
+ case SPUISD::ROTBYTES_RIGHT_S:
+ case SPUISD::ROTBYTES_LEFT:
+ case SPUISD::ROTBYTES_LEFT_CHAINED:
+ case SPUISD::SELECT_MASK:
+ case SPUISD::SELB:
+ case SPUISD::FPInterp:
+ case SPUISD::FPRecipEst:
+ case SPUISD::SEXT32TO64:
#endif
}
}
@@ -2900,7 +3009,7 @@
SPUTargetLowering::LowerAsmOperandForConstraint(SDOperand Op,
char ConstraintLetter,
std::vector<SDOperand> &Ops,
- SelectionDAG &DAG) {
+ SelectionDAG &DAG) const {
// Default, for the time being, to the base class handler
TargetLowering::LowerAsmOperandForConstraint(Op, ConstraintLetter, Ops, DAG);
}
Modified: llvm/branches/non-call-eh/lib/Target/CellSPU/SPUISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/CellSPU/SPUISelLowering.h?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/CellSPU/SPUISelLowering.h (original)
+++ llvm/branches/non-call-eh/lib/Target/CellSPU/SPUISelLowering.h Sun Jul 6 15:45:41 2008
@@ -62,8 +62,13 @@
ROTBYTES_RIGHT_S, ///< Vector rotate right, by bytes, sign fill
ROTBYTES_LEFT, ///< Rotate bytes (loads -> ROTQBYI)
ROTBYTES_LEFT_CHAINED, ///< Rotate bytes (loads -> ROTQBYI), with chain
- FSMBI, ///< Form Select Mask for Bytes, Immediate
+ ROTBYTES_LEFT_BITS, ///< Rotate bytes left by bit shift count
+ SELECT_MASK, ///< Select Mask (FSM, FSMB, FSMH, FSMBI)
SELB, ///< Select bits -> (b & mask) | (a & ~mask)
+ ADD_EXTENDED, ///< Add extended, with carry
+ CARRY_GENERATE, ///< Carry generate for ADD_EXTENDED
+ SUB_EXTENDED, ///< Subtract extended, with borrow
+ BORROW_GENERATE, ///< Borrow generate for SUB_EXTENDED
FPInterp, ///< Floating point interpolate
FPRecipEst, ///< Floating point reciprocal estimate
SEXT32TO64, ///< Sign-extended 32-bit const -> 64-bits
@@ -74,15 +79,15 @@
/// Predicates that are used for node matching:
namespace SPU {
SDOperand get_vec_u18imm(SDNode *N, SelectionDAG &DAG,
- MVT::ValueType ValueType);
+ MVT ValueType);
SDOperand get_vec_i16imm(SDNode *N, SelectionDAG &DAG,
- MVT::ValueType ValueType);
+ MVT ValueType);
SDOperand get_vec_i10imm(SDNode *N, SelectionDAG &DAG,
- MVT::ValueType ValueType);
+ MVT ValueType);
SDOperand get_vec_i8imm(SDNode *N, SelectionDAG &DAG,
- MVT::ValueType ValueType);
+ MVT ValueType);
SDOperand get_ILHUvec_imm(SDNode *N, SelectionDAG &DAG,
- MVT::ValueType ValueType);
+ MVT ValueType);
SDOperand get_v4i32_imm(SDNode *N, SelectionDAG &DAG);
SDOperand get_v2i64_imm(SDNode *N, SelectionDAG &DAG);
}
@@ -104,7 +109,7 @@
virtual const char *getTargetNodeName(unsigned Opcode) const;
/// getSetCCResultType - Return the ValueType for ISD::SETCC
- virtual MVT::ValueType getSetCCResultType(const SDOperand &) const;
+ virtual MVT getSetCCResultType(const SDOperand &) const;
/// LowerOperation - Provide custom lowering hooks for some operations.
///
@@ -123,11 +128,11 @@
std::pair<unsigned, const TargetRegisterClass*>
getRegForInlineAsmConstraint(const std::string &Constraint,
- MVT::ValueType VT) const;
+ MVT VT) const;
void LowerAsmOperandForConstraint(SDOperand Op, char ConstraintLetter,
std::vector<SDOperand> &Ops,
- SelectionDAG &DAG);
+ SelectionDAG &DAG) const;
/// isLegalAddressImmediate - Return true if the integer value can be used
/// as the offset of the target addressing mode.
Modified: llvm/branches/non-call-eh/lib/Target/CellSPU/SPUInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/CellSPU/SPUInstrInfo.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/CellSPU/SPUInstrInfo.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/CellSPU/SPUInstrInfo.cpp Sun Jul 6 15:45:41 2008
@@ -17,7 +17,7 @@
#include "SPUTargetMachine.h"
#include "SPUGenInstrInfo.inc"
#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include <iostream>
+#include "llvm/Support/Streams.h"
using namespace llvm;
@@ -218,7 +218,7 @@
BuildMI(MBB, MI, get(SPU::ORv4i32), DestReg).addReg(SrcReg)
.addReg(SrcReg);
} else {
- std::cerr << "Attempt to copy unknown/unsupported register class!\n";
+ cerr << "Attempt to copy unknown/unsupported register class!\n";
abort();
}
}
@@ -412,20 +412,22 @@
&& MI->getOperand(1).getReg() == MI->getOperand(2).getReg()) {
if (OpNum == 0) { // move -> store
unsigned InReg = MI->getOperand(1).getReg();
+ bool isKill = MI->getOperand(1).isKill();
if (FrameIndex < SPUFrameInfo::maxFrameOffset()) {
- NewMI = addFrameReference(BuildMI(TII.get(SPU::STQDr32)).addReg(InReg),
+ NewMI = addFrameReference(BuildMI(TII.get(SPU::STQDr32))
+ .addReg(InReg, false, false, isKill),
FrameIndex);
}
} else { // move -> load
unsigned OutReg = MI->getOperand(0).getReg();
- Opc = (FrameIndex < SPUFrameInfo::maxFrameOffset()) ? SPU::STQDr32 : SPU::STQXr32;
- NewMI = addFrameReference(BuildMI(TII.get(Opc), OutReg), FrameIndex);
+ bool isDead = MI->getOperand(0).isDead();
+ Opc = (FrameIndex < SPUFrameInfo::maxFrameOffset())
+ ? SPU::STQDr32 : SPU::STQXr32;
+ NewMI = addFrameReference(BuildMI(TII.get(Opc))
+ .addReg(OutReg, true, false, false, isDead), FrameIndex);
}
}
- if (NewMI)
- NewMI->copyKillDeadInfo(MI);
-
return NewMI;
#else
return 0;
Modified: llvm/branches/non-call-eh/lib/Target/CellSPU/SPUInstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/CellSPU/SPUInstrInfo.h?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/CellSPU/SPUInstrInfo.h (original)
+++ llvm/branches/non-call-eh/lib/Target/CellSPU/SPUInstrInfo.h Sun Jul 6 15:45:41 2008
@@ -30,7 +30,7 @@
/// such, whenever a client has an instance of instruction info, it should
/// always be able to get register info as well (through this method).
///
- virtual const TargetRegisterInfo &getRegisterInfo() const { return RI; }
+ virtual const SPURegisterInfo &getRegisterInfo() const { return RI; }
/// getPointerRegClass - Return the register class to use to hold pointers.
/// This is used for addressing modes.
Modified: llvm/branches/non-call-eh/lib/Target/CellSPU/SPUInstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/CellSPU/SPUInstrInfo.td?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/CellSPU/SPUInstrInfo.td (original)
+++ llvm/branches/non-call-eh/lib/Target/CellSPU/SPUInstrInfo.td Sun Jul 6 15:45:41 2008
@@ -22,10 +22,10 @@
//===----------------------------------------------------------------------===//
let hasCtrlDep = 1, Defs = [R1], Uses = [R1] in {
- def ADJCALLSTACKDOWN : Pseudo<(outs), (ins u16imm:$amt),
+ def ADJCALLSTACKDOWN : Pseudo<(outs), (ins u16imm_i32:$amt),
"${:comment} ADJCALLSTACKDOWN",
[(callseq_start imm:$amt)]>;
- def ADJCALLSTACKUP : Pseudo<(outs), (ins u16imm:$amt),
+ def ADJCALLSTACKUP : Pseudo<(outs), (ins u16imm_i32:$amt),
"${:comment} ADJCALLSTACKUP",
[(callseq_end imm:$amt)]>;
}
@@ -290,18 +290,34 @@
"cwd\t$rT, $src", ShuffleOp,
[(set (v4i32 VECREG:$rT), (SPUvecinsmask dform2_addr:$src))]>;
+def CWDf32 : RI7Form<0b01101111100, (outs VECREG:$rT), (ins memri7:$src),
+ "cwd\t$rT, $src", ShuffleOp,
+ [(set (v4f32 VECREG:$rT), (SPUvecinsmask dform2_addr:$src))]>;
+
def CWX : RRForm<0b01101011100, (outs VECREG:$rT), (ins memrr:$src),
"cwx\t$rT, $src", ShuffleOp,
[(set (v4i32 VECREG:$rT), (SPUvecinsmask xform_addr:$src))]>;
+def CWXf32 : RRForm<0b01101011100, (outs VECREG:$rT), (ins memrr:$src),
+ "cwx\t$rT, $src", ShuffleOp,
+ [(set (v4f32 VECREG:$rT), (SPUvecinsmask xform_addr:$src))]>;
+
def CDD : RI7Form<0b11101111100, (outs VECREG:$rT), (ins memri7:$src),
"cdd\t$rT, $src", ShuffleOp,
[(set (v2i64 VECREG:$rT), (SPUvecinsmask dform2_addr:$src))]>;
+def CDDf64 : RI7Form<0b11101111100, (outs VECREG:$rT), (ins memri7:$src),
+ "cdd\t$rT, $src", ShuffleOp,
+ [(set (v2f64 VECREG:$rT), (SPUvecinsmask dform2_addr:$src))]>;
+
def CDX : RRForm<0b11101011100, (outs VECREG:$rT), (ins memrr:$src),
"cdx\t$rT, $src", ShuffleOp,
[(set (v2i64 VECREG:$rT), (SPUvecinsmask xform_addr:$src))]>;
+def CDXf64 : RRForm<0b11101011100, (outs VECREG:$rT), (ins memrr:$src),
+ "cdx\t$rT, $src", ShuffleOp,
+ [(set (v2f64 VECREG:$rT), (SPUvecinsmask xform_addr:$src))]>;
+
//===----------------------------------------------------------------------===//
// Constant formation:
//===----------------------------------------------------------------------===//
@@ -367,10 +383,10 @@
multiclass ImmLoadHalfwordUpper
{
def v2i64: ILHUVecInst<v2i64, u16imm_i64, immILHUvec_i64>;
- def v4i32: ILHUVecInst<v4i32, u16imm, immILHUvec>;
+ def v4i32: ILHUVecInst<v4i32, u16imm_i32, immILHUvec>;
def r64: ILHURegInst<R64C, u16imm_i64, hi16>;
- def r32: ILHURegInst<R32C, u16imm, hi16>;
+ def r32: ILHURegInst<R32C, u16imm_i32, hi16>;
// Loads the high portion of an address
def hi: ILHURegInst<R32C, symbolHi, hi16>;
@@ -436,7 +452,7 @@
multiclass ImmOrHalfwordLower
{
def v2i64: IOHLVecInst<v2i64, u16imm_i64>;
- def v4i32: IOHLVecInst<v4i32, u16imm>;
+ def v4i32: IOHLVecInst<v4i32, u16imm_i32>;
def r32: IOHLRegInst<R32C, i32imm>;
def f32: IOHLRegInst<R32FP, f32imm>;
@@ -453,7 +469,7 @@
RI16Form<0b101001100, (outs VECREG:$rT), (ins u16imm:$val),
"fsmbi\t$rT, $val",
SelectOp,
- [(set (vectype VECREG:$rT), (SPUfsmbi (i32 immU16:$val)))]>;
+ [(set (vectype VECREG:$rT), (SPUselmask (i16 immU16:$val)))]>;
multiclass FormSelectMaskBytesImm
{
@@ -469,21 +485,37 @@
def FSMB:
RRForm_1<0b01101101100, (outs VECREG:$rT), (ins R16C:$rA),
"fsmb\t$rT, $rA", SelectOp,
- [(set (v16i8 VECREG:$rT), (SPUfsmbi R16C:$rA))]>;
+ [(set (v16i8 VECREG:$rT), (SPUselmask R16C:$rA))]>;
// fsmh: Form select mask for halfwords. N.B., Input operand, $rA, is
// only 8-bits wide (even though it's input as 16-bits here)
def FSMH:
RRForm_1<0b10101101100, (outs VECREG:$rT), (ins R16C:$rA),
"fsmh\t$rT, $rA", SelectOp,
- [(set (v8i16 VECREG:$rT), (SPUfsmbi R16C:$rA))]>;
+ [(set (v8i16 VECREG:$rT), (SPUselmask R16C:$rA))]>;
// fsm: Form select mask for words. Like the other fsm* instructions,
// only the lower 4 bits of $rA are significant.
-def FSM:
- RRForm_1<0b00101101100, (outs VECREG:$rT), (ins R16C:$rA),
- "fsm\t$rT, $rA", SelectOp,
- [(set (v4i32 VECREG:$rT), (SPUfsmbi R16C:$rA))]>;
+class FSMInst<ValueType vectype, RegisterClass rclass>:
+ RRForm_1<0b00101101100, (outs VECREG:$rT), (ins rclass:$rA),
+ "fsm\t$rT, $rA",
+ SelectOp,
+ [(set (vectype VECREG:$rT), (SPUselmask rclass:$rA))]>;
+
+multiclass FormSelectMaskWord {
+ def r32 : FSMInst<v4i32, R32C>;
+ def r16 : FSMInst<v4i32, R16C>;
+}
+
+defm FSM : FormSelectMaskWord;
+
+// Special case when used for i64 math operations
+multiclass FormSelectMaskWord64 {
+ def r32 : FSMInst<v2i64, R32C>;
+ def r16 : FSMInst<v2i64, R16C>;
+}
+
+defm FSM64 : FormSelectMaskWord64;
//===----------------------------------------------------------------------===//
// Integer and Logical Operations:
@@ -529,7 +561,7 @@
def Ar8:
RRForm<0b00000011000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
"a\t$rT, $rA, $rB", IntegerOp,
- [(set R8C:$rT, (add R8C:$rA, R8C:$rB))]>;
+ [/* no pattern */]>;
def AIvec:
RI10Form<0b00111000, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
@@ -584,42 +616,125 @@
[(set R32C:$rT, (sub i32ImmSExt10:$val, R32C:$rA))]>;
// ADDX: only available in vector form, doesn't match a pattern.
-def ADDXvec:
- RRForm<0b00000010110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB,
- VECREG:$rCarry),
- "addx\t$rT, $rA, $rB", IntegerOp,
- []>,
+class ADDXInst<dag OOL, dag IOL, list<dag> pattern>:
+ RRForm<0b00000010110, OOL, IOL,
+ "addx\t$rT, $rA, $rB",
+ IntegerOp, pattern>;
+
+class ADDXVecInst<ValueType vectype>:
+ ADDXInst<(outs VECREG:$rT),
+ (ins VECREG:$rA, VECREG:$rB, VECREG:$rCarry),
+ [(set (vectype VECREG:$rT),
+ (SPUaddx (vectype VECREG:$rA), (vectype VECREG:$rB),
+ (vectype VECREG:$rCarry)))]>,
RegConstraint<"$rCarry = $rT">,
NoEncode<"$rCarry">;
-// CG: only available in vector form, doesn't match a pattern.
-def CGvec:
- RRForm<0b01000011000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB,
- VECREG:$rCarry),
- "cg\t$rT, $rA, $rB", IntegerOp,
- []>,
+class ADDXRegInst<RegisterClass rclass>:
+ ADDXInst<(outs rclass:$rT),
+ (ins rclass:$rA, rclass:$rB, rclass:$rCarry),
+ [(set rclass:$rT,
+ (SPUaddx rclass:$rA, rclass:$rB, rclass:$rCarry))]>,
RegConstraint<"$rCarry = $rT">,
NoEncode<"$rCarry">;
-// SFX: only available in vector form, doesn't match a pattern
-def SFXvec:
- RRForm<0b10000010110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB,
- VECREG:$rCarry),
- "sfx\t$rT, $rA, $rB", IntegerOp,
- []>,
+multiclass AddExtended {
+ def v2i64 : ADDXVecInst<v2i64>;
+ def v4i32 : ADDXVecInst<v4i32>;
+ def r64 : ADDXRegInst<R64C>;
+ def r32 : ADDXRegInst<R32C>;
+}
+
+defm ADDX : AddExtended;
+
+// CG: Generate carry for add
+class CGInst<dag OOL, dag IOL, list<dag> pattern>:
+ RRForm<0b01000011000, OOL, IOL,
+ "cg\t$rT, $rA, $rB",
+ IntegerOp, pattern>;
+
+class CGVecInst<ValueType vectype>:
+ CGInst<(outs VECREG:$rT),
+ (ins VECREG:$rA, VECREG:$rB),
+ [(set (vectype VECREG:$rT),
+ (SPUcarry_gen (vectype VECREG:$rA), (vectype VECREG:$rB)))]>;
+
+class CGRegInst<RegisterClass rclass>:
+ CGInst<(outs rclass:$rT),
+ (ins rclass:$rA, rclass:$rB),
+ [(set rclass:$rT,
+ (SPUcarry_gen rclass:$rA, rclass:$rB))]>;
+
+multiclass CarryGenerate {
+ def v2i64 : CGVecInst<v2i64>;
+ def v4i32 : CGVecInst<v4i32>;
+ def r64 : CGRegInst<R64C>;
+ def r32 : CGRegInst<R32C>;
+}
+
+defm CG : CarryGenerate;
+
+// SFX: Subract from, extended. This is used in conjunction with BG to subtract
+// with carry (borrow, in this case)
+class SFXInst<dag OOL, dag IOL, list<dag> pattern>:
+ RRForm<0b10000010110, OOL, IOL,
+ "sfx\t$rT, $rA, $rB",
+ IntegerOp, pattern>;
+
+class SFXVecInst<ValueType vectype>:
+ SFXInst<(outs VECREG:$rT),
+ (ins VECREG:$rA, VECREG:$rB, VECREG:$rCarry),
+ [(set (vectype VECREG:$rT),
+ (SPUsubx (vectype VECREG:$rA), (vectype VECREG:$rB),
+ (vectype VECREG:$rCarry)))]>,
RegConstraint<"$rCarry = $rT">,
NoEncode<"$rCarry">;
-// BG: only available in vector form, doesn't match a pattern.
-def BGvec:
- RRForm<0b01000010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB,
- VECREG:$rCarry),
- "bg\t$rT, $rA, $rB", IntegerOp,
- []>,
+class SFXRegInst<RegisterClass rclass>:
+ SFXInst<(outs rclass:$rT),
+ (ins rclass:$rA, rclass:$rB, rclass:$rCarry),
+ [(set rclass:$rT,
+ (SPUsubx rclass:$rA, rclass:$rB, rclass:$rCarry))]>,
RegConstraint<"$rCarry = $rT">,
NoEncode<"$rCarry">;
-// BGX: only available in vector form, doesn't match a pattern.
+multiclass SubtractExtended {
+ def v2i64 : SFXVecInst<v2i64>;
+ def v4i32 : SFXVecInst<v4i32>;
+ def r64 : SFXRegInst<R64C>;
+ def r32 : SFXRegInst<R32C>;
+}
+
+defm SFX : SubtractExtended;
+
+// BG: only available in vector form, doesn't match a pattern.
+class BGInst<dag OOL, dag IOL, list<dag> pattern>:
+ RRForm<0b01000010000, OOL, IOL,
+ "bg\t$rT, $rA, $rB",
+ IntegerOp, pattern>;
+
+class BGVecInst<ValueType vectype>:
+ BGInst<(outs VECREG:$rT),
+ (ins VECREG:$rA, VECREG:$rB),
+ [(set (vectype VECREG:$rT),
+ (SPUborrow_gen (vectype VECREG:$rA), (vectype VECREG:$rB)))]>;
+
+class BGRegInst<RegisterClass rclass>:
+ BGInst<(outs rclass:$rT),
+ (ins rclass:$rA, rclass:$rB),
+ [(set rclass:$rT,
+ (SPUborrow_gen rclass:$rA, rclass:$rB))]>;
+
+multiclass BorrowGenerate {
+ def v4i32 : BGVecInst<v4i32>;
+ def v2i64 : BGVecInst<v2i64>;
+ def r64 : BGRegInst<R64C>;
+ def r32 : BGRegInst<R32C>;
+}
+
+defm BG : BorrowGenerate;
+
+// BGX: Borrow generate, extended.
def BGXvec:
RRForm<0b11000010110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB,
VECREG:$rCarry),
@@ -801,17 +916,17 @@
def CNTBv16i8:
RRForm_1<0b00101101010, (outs VECREG:$rT), (ins VECREG:$rA),
"cntb\t$rT, $rA", IntegerOp,
- [(set (v16i8 VECREG:$rT), (SPUcntb_v16i8 (v16i8 VECREG:$rA)))]>;
+ [(set (v16i8 VECREG:$rT), (SPUcntb (v16i8 VECREG:$rA)))]>;
def CNTBv8i16 :
RRForm_1<0b00101101010, (outs VECREG:$rT), (ins VECREG:$rA),
"cntb\t$rT, $rA", IntegerOp,
- [(set (v8i16 VECREG:$rT), (SPUcntb_v8i16 (v8i16 VECREG:$rA)))]>;
+ [(set (v8i16 VECREG:$rT), (SPUcntb (v8i16 VECREG:$rA)))]>;
def CNTBv4i32 :
RRForm_1<0b00101101010, (outs VECREG:$rT), (ins VECREG:$rA),
"cntb\t$rT, $rA", IntegerOp,
- [(set (v4i32 VECREG:$rT), (SPUcntb_v4i32 (v4i32 VECREG:$rA)))]>;
+ [(set (v4i32 VECREG:$rT), (SPUcntb (v4i32 VECREG:$rA)))]>;
// gbb: Gather all low order bits from each byte in $rA into a single 16-bit
// quantity stored into $rT
@@ -853,31 +968,38 @@
[]>;
// Sign extension operations:
-def XSBHvec:
- RRForm_1<0b01101101010, (outs VECREG:$rDst), (ins VECREG:$rSrc),
- "xsbh\t$rDst, $rSrc", IntegerOp,
- [(set (v8i16 VECREG:$rDst), (sext (v16i8 VECREG:$rSrc)))]>;
-
-// Ordinary form for XSBH
-def XSBHr16:
- RRForm_1<0b01101101010, (outs R16C:$rDst), (ins R16C:$rSrc),
- "xsbh\t$rDst, $rSrc", IntegerOp,
- [(set R16C:$rDst, (sext_inreg R16C:$rSrc, i8))]>;
+class XSBHInst<dag OOL, dag IOL, list<dag> pattern>:
+ RRForm_1<0b01101101010, OOL, IOL,
+ "xsbh\t$rDst, $rSrc",
+ IntegerOp, pattern>;
+
+class XSBHVecInst<ValueType vectype>:
+ XSBHInst<(outs VECREG:$rDst), (ins VECREG:$rSrc),
+ [(set (v8i16 VECREG:$rDst), (sext (vectype VECREG:$rSrc)))]>;
+
+class XSBHRegInst<RegisterClass rclass>:
+ XSBHInst<(outs rclass:$rDst), (ins rclass:$rSrc),
+ [(set rclass:$rDst, (sext_inreg rclass:$rSrc, i8))]>;
+
+multiclass ExtendByteHalfword {
+ def v16i8: XSBHVecInst<v8i16>;
+ def r16: XSBHRegInst<R16C>;
+
+ // 32-bit form for XSBH: used to sign extend 8-bit quantities to 16-bit
+ // quantities to 32-bit quantities via a 32-bit register (see the sext 8->32
+ // pattern below). Intentionally doesn't match a pattern because we want the
+ // sext 8->32 pattern to do the work for us, namely because we need the extra
+ // XSHWr32.
+ def r32: XSBHRegInst<R32C>;
+}
+defm XSBH : ExtendByteHalfword;
+
+// Sign-extend, but take an 8-bit register to a 16-bit register (not done as
+// sext_inreg)
def XSBHr8:
- RRForm_1<0b01101101010, (outs R16C:$rDst), (ins R8C:$rSrc),
- "xsbh\t$rDst, $rSrc", IntegerOp,
- [(set R16C:$rDst, (sext R8C:$rSrc))]>;
-
-// 32-bit form for XSBH: used to sign extend 8-bit quantities to 16-bit
-// quantities to 32-bit quantities via a 32-bit register (see the sext 8->32
-// pattern below). Intentionally doesn't match a pattern because we want the
-// sext 8->32 pattern to do the work for us, namely because we need the extra
-// XSHWr32.
-def XSBHr32:
- RRForm_1<0b01101101010, (outs R32C:$rDst), (ins R32C:$rSrc),
- "xsbh\t$rDst, $rSrc", IntegerOp,
- [(set R32C:$rDst, (sext_inreg R32C:$rSrc, i8))]>;
+ XSBHInst<(outs R16C:$rDst), (ins R8C:$rSrc),
+ [(set R16C:$rDst, (sext R8C:$rSrc))]>;
// Sign extend halfwords to words:
def XSHWvec:
@@ -1642,9 +1764,9 @@
// It's this pattern that's probably the most useful, since SPUISelLowering
// methods create a v16i8 vector for $rC:
-class SHUFBVecPat1<ValueType vectype, SPUInstr inst>:
+class SHUFBVecPat1<ValueType vectype, ValueType masktype, SPUInstr inst>:
Pat<(SPUshuffle (vectype VECREG:$rA), (vectype VECREG:$rB),
- (v16i8 VECREG:$rC)),
+ (masktype VECREG:$rC)),
(inst VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
multiclass ShuffleBytes
@@ -1660,11 +1782,19 @@
defm SHUFB : ShuffleBytes;
-def : SHUFBVecPat1<v8i16, SHUFBv16i8>;
-def : SHUFBVecPat1<v4i32, SHUFBv16i8>;
-def : SHUFBVecPat1<v2i64, SHUFBv16i8>;
-def : SHUFBVecPat1<v4f32, SHUFBv16i8>;
-def : SHUFBVecPat1<v2f64, SHUFBv16i8>;
+// Shuffle mask is a v16i8 vector
+def : SHUFBVecPat1<v8i16, v16i8, SHUFBv16i8>;
+def : SHUFBVecPat1<v4i32, v16i8, SHUFBv16i8>;
+def : SHUFBVecPat1<v2i64, v16i8, SHUFBv16i8>;
+def : SHUFBVecPat1<v4f32, v16i8, SHUFBv16i8>;
+def : SHUFBVecPat1<v2f64, v16i8, SHUFBv16i8>;
+
+// Shuffle mask is a v4i32 vector:
+def : SHUFBVecPat1<v8i16, v4i32, SHUFBv4i32>;
+def : SHUFBVecPat1<v4i32, v4i32, SHUFBv4i32>;
+def : SHUFBVecPat1<v2i64, v4i32, SHUFBv4i32>;
+def : SHUFBVecPat1<v4f32, v4i32, SHUFBv4i32>;
+def : SHUFBVecPat1<v2f64, v4i32, SHUFBv4i32>;
//===----------------------------------------------------------------------===//
// Shift and rotate group:
@@ -2063,10 +2193,24 @@
(ROTQBYIv2i64 VECREG:$rA, uimm7:$val)>;
// See ROTQBY note above.
-def ROTQBYBIvec:
- RI7Form<0b00110011100, (outs VECREG:$rT), (ins VECREG:$rA, u7imm:$val),
- "rotqbybi\t$rT, $rA, $val", RotateShift,
- [/* intrinsic */]>;
+class ROTQBYBIInst<dag OOL, dag IOL, list<dag> pattern>:
+ RI7Form<0b00110011100, OOL, IOL,
+ "rotqbybi\t$rT, $rA, $shift",
+ RotateShift, pattern>;
+
+class ROTQBYBIVecInst<ValueType vectype, RegisterClass rclass>:
+ ROTQBYBIInst<(outs VECREG:$rT), (ins VECREG:$rA, rclass:$shift),
+ [(set (vectype VECREG:$rT),
+ (SPUrotbytes_left_bits (vectype VECREG:$rA), rclass:$shift))]>;
+
+multiclass RotateQuadByBytesByBitshift {
+ def v16i8_r32: ROTQBYBIVecInst<v16i8, R32C>;
+ def v8i16_r32: ROTQBYBIVecInst<v8i16, R32C>;
+ def v4i32_r32: ROTQBYBIVecInst<v4i32, R32C>;
+ def v2i64_r32: ROTQBYBIVecInst<v2i64, R32C>;
+}
+
+defm ROTQBYBI : RotateQuadByBytesByBitshift;
//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~
// See ROTQBY note above.
@@ -2342,7 +2486,6 @@
defm ROTQMBYI : RotateQuadBytesImm;
-
//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~
// Rotate right and mask by bit count
//-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~
@@ -2529,25 +2672,28 @@
(ROTMAr32 R32C:$rA,
(SFIr32 (XSHWr16 (XSBHr8 R8C:$rB)), 0))>;
-def ROTMAIv4i32:
- RRForm<0b01011110000, (outs VECREG:$rT), (ins VECREG:$rA, rotNeg7imm:$val),
- "rotmai\t$rT, $rA, $val", RotateShift,
- [(set (v4i32 VECREG:$rT),
- (SPUvec_sra VECREG:$rA, (i32 uimm7:$val)))]>;
-
-def : Pat<(SPUvec_sra VECREG:$rA, (i16 uimm7:$val)),
- (ROTMAIv4i32 VECREG:$rA, uimm7:$val)>;
-
-def ROTMAIr32:
- RRForm<0b01011110000, (outs R32C:$rT), (ins R32C:$rA, rotNeg7imm:$val),
- "rotmai\t$rT, $rA, $val", RotateShift,
- [(set R32C:$rT, (sra R32C:$rA, (i32 uimm7:$val)))]>;
-
-def : Pat<(sra R32C:$rA, (i16 uimm7:$val)),
- (ROTMAIr32 R32C:$rA, uimm7:$val)>;
+class ROTMAIInst<dag OOL, dag IOL, list<dag> pattern>:
+ RRForm<0b01011110000, OOL, IOL,
+ "rotmai\t$rT, $rA, $val",
+ RotateShift, pattern>;
+
+class ROTMAIVecInst<ValueType vectype, Operand intop, ValueType inttype>:
+ ROTMAIInst<(outs VECREG:$rT), (ins VECREG:$rA, intop:$val),
+ [(set (vectype VECREG:$rT),
+ (SPUvec_sra VECREG:$rA, (inttype uimm7:$val)))]>;
+
+class ROTMAIRegInst<RegisterClass rclass, Operand intop, ValueType inttype>:
+ ROTMAIInst<(outs rclass:$rT), (ins rclass:$rA, intop:$val),
+ [(set rclass:$rT, (sra rclass:$rA, (inttype uimm7:$val)))]>;
+
+multiclass RotateMaskAlgebraicImm {
+ def v2i64_i32 : ROTMAIVecInst<v2i64, rotNeg7imm, i32>;
+ def v4i32_i32 : ROTMAIVecInst<v4i32, rotNeg7imm, i32>;
+ def r64_i32 : ROTMAIRegInst<R64C, rotNeg7imm, i32>;
+ def r32_i32 : ROTMAIRegInst<R32C, rotNeg7imm, i32>;
+}
-def : Pat<(sra R32C:$rA, (i8 uimm7:$val)),
- (ROTMAIr32 R32C:$rA, uimm7:$val)>;
+defm ROTMAI : RotateMaskAlgebraicImm;
//===----------------------------------------------------------------------===//
// Branch and conditionals:
@@ -3873,6 +4019,13 @@
(SPUlo tconstpool:$in, 0)),
(IOHLlo (ILHUhi tconstpool:$in), tconstpool:$in)>;
+def : Pat<(SPUindirect R32C:$sp, i32ImmSExt10:$imm),
+ (AIr32 R32C:$sp, i32ImmSExt10:$imm)>;
+
+def : Pat<(SPUindirect R32C:$sp, imm:$imm),
+ (Ar32 R32C:$sp,
+ (IOHLr32 (ILHUr32 (HI16 imm:$imm)), (LO16 imm:$imm)))>;
+
def : Pat<(add (SPUhi tglobaladdr:$in, 0), (SPUlo tglobaladdr:$in, 0)),
(IOHLlo (ILHUhi tglobaladdr:$in), tglobaladdr:$in)>;
Modified: llvm/branches/non-call-eh/lib/Target/CellSPU/SPUNodes.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/CellSPU/SPUNodes.td?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/CellSPU/SPUNodes.td (original)
+++ llvm/branches/non-call-eh/lib/Target/CellSPU/SPUNodes.td Sun Jul 6 15:45:41 2008
@@ -36,30 +36,27 @@
]>;
// Unary, binary v16i8 operator type constraints:
-def SPUv16i8_unop: SDTypeProfile<1, 1, [
- SDTCisVT<0, v16i8>, SDTCisSameAs<0, 1>]>;
-
def SPUv16i8_binop: SDTypeProfile<1, 2, [
SDTCisVT<0, v16i8>, SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>]>;
// Binary v8i16 operator type constraints:
-def SPUv8i16_unop: SDTypeProfile<1, 1, [
- SDTCisVT<0, v8i16>, SDTCisSameAs<0, 1>]>;
-
def SPUv8i16_binop: SDTypeProfile<1, 2, [
SDTCisVT<0, v8i16>, SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>]>;
// Binary v4i32 operator type constraints:
-def SPUv4i32_unop: SDTypeProfile<1, 1, [
- SDTCisVT<0, v4i32>, SDTCisSameAs<0, 1>]>;
-
def SPUv4i32_binop: SDTypeProfile<1, 2, [
SDTCisVT<0, v4i32>, SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>]>;
-// FSMBI type constraints: There are several variations for the various
+// Trinary operators, e.g., addx, carry generate
+def SPUIntTrinaryOp : SDTypeProfile<1, 3, [
+ SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>, SDTCisInt<0>
+]>;
+
+// SELECT_MASK type constraints: There are several variations for the various
// vector types (this avoids having to bit_convert all over the place.)
-def SPUfsmbi_type: SDTypeProfile<1, 1, [
- /* SDTCisVT<1, i32> */ SDTCisInt<1>]>;
+def SPUselmask_type: SDTypeProfile<1, 1, [
+ SDTCisInt<1>
+]>;
// SELB type constraints:
def SPUselb_type: SDTypeProfile<1, 3, [
@@ -73,10 +70,16 @@
// Synthetic/pseudo-instructions
//===----------------------------------------------------------------------===//
+/// Add extended, carry generate:
+def SPUaddx : SDNode<"SPUISD::ADD_EXTENDED", SPUIntTrinaryOp, []>;
+def SPUcarry_gen : SDNode<"SPUISD::CARRY_GENERATE", SDTIntBinOp, []>;
+
+// Subtract extended, borrow generate
+def SPUsubx : SDNode<"SPUISD::SUB_EXTENDED", SPUIntTrinaryOp, []>;
+def SPUborrow_gen : SDNode<"SPUISD::BORROW_GENERATE", SDTIntBinOp, []>;
+
// SPU CNTB:
-def SPUcntb_v16i8: SDNode<"SPUISD::CNTB", SPUv16i8_unop, []>;
-def SPUcntb_v8i16: SDNode<"SPUISD::CNTB", SPUv8i16_unop, []>;
-def SPUcntb_v4i32: SDNode<"SPUISD::CNTB", SPUv4i32_unop, []>;
+def SPUcntb : SDNode<"SPUISD::CNTB", SDTIntUnaryOp>;
// SPU vector shuffle node, matched by the SPUISD::SHUFB enum (see
// SPUISelLowering.h):
@@ -121,14 +124,23 @@
def SPUrotbytes_right_sfill: SDNode<"SPUISD::ROTBYTES_RIGHT_S",
SPUvecshift_type, []>;
+// Vector rotate left, bits shifted out of the left are rotated in on the right
def SPUrotbytes_left: SDNode<"SPUISD::ROTBYTES_LEFT",
SPUvecshift_type, []>;
+// Same as above, but the node also has a chain associated (used in loads and
+// stores)
def SPUrotbytes_left_chained : SDNode<"SPUISD::ROTBYTES_LEFT_CHAINED",
SPUvecshift_type, [SDNPHasChain]>;
+// Vector rotate left by bytes, but the count is given in bits and the SPU
+// internally converts it to bytes (saves an instruction to mask off lower
+// three bits)
+def SPUrotbytes_left_bits : SDNode<"SPUISD::ROTBYTES_LEFT_BITS",
+ SPUvecshift_type>;
+
// SPU form select mask for bytes, immediate
-def SPUfsmbi: SDNode<"SPUISD::FSMBI", SPUfsmbi_type, []>;
+def SPUselmask: SDNode<"SPUISD::SELECT_MASK", SPUselmask_type, []>;
// SPU select bits instruction
def SPUselb: SDNode<"SPUISD::SELB", SPUselb_type, []>;
Modified: llvm/branches/non-call-eh/lib/Target/CellSPU/SPUOperands.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/CellSPU/SPUOperands.td?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/CellSPU/SPUOperands.td (original)
+++ llvm/branches/non-call-eh/lib/Target/CellSPU/SPUOperands.td Sun Jul 6 15:45:41 2008
@@ -509,7 +509,11 @@
let PrintMethod = "printU16ImmOperand";
}
-def u16imm : Operand<i32> {
+def u16imm_i32 : Operand<i32> {
+ let PrintMethod = "printU16ImmOperand";
+}
+
+def u16imm : Operand<i16> {
let PrintMethod = "printU16ImmOperand";
}
@@ -555,6 +559,10 @@
let PrintMethod = "printROTNeg7Imm";
}
+def rotNeg7imm_i8 : Operand<i8> {
+ let PrintMethod = "printROTNeg7Imm";
+}
+
def target : Operand<OtherVT> {
let PrintMethod = "printBranchOperand";
}
Modified: llvm/branches/non-call-eh/lib/Target/CellSPU/SPURegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/CellSPU/SPURegisterInfo.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/CellSPU/SPURegisterInfo.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/CellSPU/SPURegisterInfo.cpp Sun Jul 6 15:45:41 2008
@@ -40,7 +40,6 @@
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/STLExtras.h"
#include <cstdlib>
-#include <iostream>
using namespace llvm;
@@ -178,7 +177,7 @@
case SPU::R126: return 126;
case SPU::R127: return 127;
default:
- std::cerr << "Unhandled reg in SPURegisterInfo::getRegisterNumbering!\n";
+ cerr << "Unhandled reg in SPURegisterInfo::getRegisterNumbering!\n";
abort();
}
}
@@ -445,7 +444,7 @@
if (hasDebugInfo) {
// Mark effective beginning of when frame pointer becomes valid.
FrameLabelId = MMI->NextLabelID();
- BuildMI(MBB, MBBI, TII.get(ISD::LABEL)).addImm(FrameLabelId).addImm(0);
+ BuildMI(MBB, MBBI, TII.get(SPU::DBG_LABEL)).addImm(FrameLabelId);
}
// Adjust stack pointer, spilling $lr -> 16($sp) and $sp -> -FrameSize($sp)
@@ -505,7 +504,7 @@
// Mark effective beginning of when frame pointer is ready.
unsigned ReadyLabelId = MMI->NextLabelID();
- BuildMI(MBB, MBBI, TII.get(ISD::LABEL)).addImm(ReadyLabelId).addImm(0);
+ BuildMI(MBB, MBBI, TII.get(SPU::DBG_LABEL)).addImm(ReadyLabelId);
MachineLocation FPDst(SPU::R1);
MachineLocation FPSrc(MachineLocation::VirtualFP);
@@ -519,7 +518,7 @@
MachineBasicBlock::iterator MBBI = prior(MBB.end());
// Insert terminator label
unsigned BranchLabelId = MMI->NextLabelID();
- BuildMI(MBB, MBBI, TII.get(SPU::LABEL)).addImm(BranchLabelId).addImm(0);
+ BuildMI(MBB, MBBI, TII.get(SPU::DBG_LABEL)).addImm(BranchLabelId);
}
}
}
Modified: llvm/branches/non-call-eh/lib/Target/CellSPU/SPUTargetMachine.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/CellSPU/SPUTargetMachine.h?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/CellSPU/SPUTargetMachine.h (original)
+++ llvm/branches/non-call-eh/lib/Target/CellSPU/SPUTargetMachine.h Sun Jul 6 15:45:41 2008
@@ -49,7 +49,7 @@
virtual const SPUInstrInfo *getInstrInfo() const {
return &InstrInfo;
}
- virtual const TargetFrameInfo *getFrameInfo() const {
+ virtual const SPUFrameInfo *getFrameInfo() const {
return &FrameInfo;
}
/*!
@@ -70,7 +70,7 @@
return const_cast<SPUTargetLowering*>(&TLInfo);
}
- virtual const TargetRegisterInfo *getRegisterInfo() const {
+ virtual const SPURegisterInfo *getRegisterInfo() const {
return &InstrInfo.getRegisterInfo();
}
Propchange: llvm/branches/non-call-eh/lib/Target/CppBackend/
------------------------------------------------------------------------------
--- svn:ignore (added)
+++ svn:ignore Sun Jul 6 15:45:41 2008
@@ -0,0 +1,3 @@
+Release
+Debug
+
Added: llvm/branches/non-call-eh/lib/Target/CppBackend/CPPBackend.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/CppBackend/CPPBackend.cpp?rev=53163&view=auto
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/CppBackend/CPPBackend.cpp (added)
+++ llvm/branches/non-call-eh/lib/Target/CppBackend/CPPBackend.cpp Sun Jul 6 15:45:41 2008
@@ -0,0 +1,1990 @@
+//===-- CPPBackend.cpp - Library for converting LLVM code to C++ code -----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the writing of the LLVM IR as a set of C++ calls to the
+// LLVM IR interface. The input module is assumed to be verified.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CPPTargetMachine.h"
+#include "llvm/CallingConv.h"
+#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/InlineAsm.h"
+#include "llvm/Instruction.h"
+#include "llvm/Instructions.h"
+#include "llvm/Module.h"
+#include "llvm/Pass.h"
+#include "llvm/PassManager.h"
+#include "llvm/TypeSymbolTable.h"
+#include "llvm/Target/TargetMachineRegistry.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Config/config.h"
+#include <algorithm>
+#include <iostream>
+#include <set>
+
+using namespace llvm;
+
+static cl::opt<std::string>
+FuncName("cppfname", cl::desc("Specify the name of the generated function"),
+ cl::value_desc("function name"));
+
+enum WhatToGenerate {
+ GenProgram,
+ GenModule,
+ GenContents,
+ GenFunction,
+ GenFunctions,
+ GenInline,
+ GenVariable,
+ GenType
+};
+
+static cl::opt<WhatToGenerate> GenerationType("cppgen", cl::Optional,
+ cl::desc("Choose what kind of output to generate"),
+ cl::init(GenProgram),
+ cl::values(
+ clEnumValN(GenProgram, "program", "Generate a complete program"),
+ clEnumValN(GenModule, "module", "Generate a module definition"),
+ clEnumValN(GenContents, "contents", "Generate contents of a module"),
+ clEnumValN(GenFunction, "function", "Generate a function definition"),
+ clEnumValN(GenFunctions,"functions", "Generate all function definitions"),
+ clEnumValN(GenInline, "inline", "Generate an inline function"),
+ clEnumValN(GenVariable, "variable", "Generate a variable definition"),
+ clEnumValN(GenType, "type", "Generate a type definition"),
+ clEnumValEnd
+ )
+);
+
+static cl::opt<std::string> NameToGenerate("cppfor", cl::Optional,
+ cl::desc("Specify the name of the thing to generate"),
+ cl::init("!bad!"));
+
+// Register the target.
+static RegisterTarget<CPPTargetMachine> X("cpp", " C++ backend");
+
+namespace {
+ typedef std::vector<const Type*> TypeList;
+ typedef std::map<const Type*,std::string> TypeMap;
+ typedef std::map<const Value*,std::string> ValueMap;
+ typedef std::set<std::string> NameSet;
+ typedef std::set<const Type*> TypeSet;
+ typedef std::set<const Value*> ValueSet;
+ typedef std::map<const Value*,std::string> ForwardRefMap;
+
+ /// CppWriter - This class is the main chunk of code that converts an LLVM
+ /// module to a C++ translation unit.
+ class CppWriter : public ModulePass {
+ const char* progname;
+ std::ostream &Out;
+ const Module *TheModule;
+ uint64_t uniqueNum;
+ TypeMap TypeNames;
+ ValueMap ValueNames;
+ TypeMap UnresolvedTypes;
+ TypeList TypeStack;
+ NameSet UsedNames;
+ TypeSet DefinedTypes;
+ ValueSet DefinedValues;
+ ForwardRefMap ForwardRefs;
+ bool is_inline;
+
+ public:
+ static char ID;
+ explicit CppWriter(std::ostream &o) :
+ ModulePass((intptr_t)&ID), Out(o), uniqueNum(0), is_inline(false) {}
+
+ virtual const char *getPassName() const { return "C++ backend"; }
+
+ bool runOnModule(Module &M);
+
+ void printProgram(const std::string& fname, const std::string& modName );
+ void printModule(const std::string& fname, const std::string& modName );
+ void printContents(const std::string& fname, const std::string& modName );
+ void printFunction(const std::string& fname, const std::string& funcName );
+ void printFunctions();
+ void printInline(const std::string& fname, const std::string& funcName );
+ void printVariable(const std::string& fname, const std::string& varName );
+ void printType(const std::string& fname, const std::string& typeName );
+
+ void error(const std::string& msg);
+
+ private:
+ void printLinkageType(GlobalValue::LinkageTypes LT);
+ void printVisibilityType(GlobalValue::VisibilityTypes VisTypes);
+ void printCallingConv(unsigned cc);
+ void printEscapedString(const std::string& str);
+ void printCFP(const ConstantFP* CFP);
+
+ std::string getCppName(const Type* val);
+ inline void printCppName(const Type* val);
+
+ std::string getCppName(const Value* val);
+ inline void printCppName(const Value* val);
+
+ void printParamAttrs(const PAListPtr &PAL, const std::string &name);
+ bool printTypeInternal(const Type* Ty);
+ inline void printType(const Type* Ty);
+ void printTypes(const Module* M);
+
+ void printConstant(const Constant *CPV);
+ void printConstants(const Module* M);
+
+ void printVariableUses(const GlobalVariable *GV);
+ void printVariableHead(const GlobalVariable *GV);
+ void printVariableBody(const GlobalVariable *GV);
+
+ void printFunctionUses(const Function *F);
+ void printFunctionHead(const Function *F);
+ void printFunctionBody(const Function *F);
+ void printInstruction(const Instruction *I, const std::string& bbname);
+ std::string getOpName(Value*);
+
+ void printModuleBody();
+ };
+
+ static unsigned indent_level = 0;
+ inline std::ostream& nl(std::ostream& Out, int delta = 0) {
+ Out << "\n";
+ if (delta >= 0 || indent_level >= unsigned(-delta))
+ indent_level += delta;
+ for (unsigned i = 0; i < indent_level; ++i)
+ Out << " ";
+ return Out;
+ }
+
+ inline void in() { indent_level++; }
+ inline void out() { if (indent_level >0) indent_level--; }
+
+ inline void
+ sanitize(std::string& str) {
+ for (size_t i = 0; i < str.length(); ++i)
+ if (!isalnum(str[i]) && str[i] != '_')
+ str[i] = '_';
+ }
+
+ inline std::string
+ getTypePrefix(const Type* Ty ) {
+ switch (Ty->getTypeID()) {
+ case Type::VoidTyID: return "void_";
+ case Type::IntegerTyID:
+ return std::string("int") + utostr(cast<IntegerType>(Ty)->getBitWidth()) +
+ "_";
+ case Type::FloatTyID: return "float_";
+ case Type::DoubleTyID: return "double_";
+ case Type::LabelTyID: return "label_";
+ case Type::FunctionTyID: return "func_";
+ case Type::StructTyID: return "struct_";
+ case Type::ArrayTyID: return "array_";
+ case Type::PointerTyID: return "ptr_";
+ case Type::VectorTyID: return "packed_";
+ case Type::OpaqueTyID: return "opaque_";
+ default: return "other_";
+ }
+ return "unknown_";
+ }
+
+ // Looks up the type in the symbol table and returns a pointer to its name or
+ // a null pointer if it wasn't found. Note that this isn't the same as the
+ // Mode::getTypeName function which will return an empty string, not a null
+ // pointer if the name is not found.
+ inline const std::string*
+ findTypeName(const TypeSymbolTable& ST, const Type* Ty) {
+ TypeSymbolTable::const_iterator TI = ST.begin();
+ TypeSymbolTable::const_iterator TE = ST.end();
+ for (;TI != TE; ++TI)
+ if (TI->second == Ty)
+ return &(TI->first);
+ return 0;
+ }
+
+ void CppWriter::error(const std::string& msg) {
+ std::cerr << progname << ": " << msg << "\n";
+ exit(2);
+ }
+
+ // printCFP - Print a floating point constant .. very carefully :)
+ // This makes sure that conversion to/from floating yields the same binary
+ // result so that we don't lose precision.
+ void CppWriter::printCFP(const ConstantFP *CFP) {
+ APFloat APF = APFloat(CFP->getValueAPF()); // copy
+ if (CFP->getType() == Type::FloatTy)
+ APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven);
+ Out << "ConstantFP::get(";
+ Out << "APFloat(";
+#if HAVE_PRINTF_A
+ char Buffer[100];
+ sprintf(Buffer, "%A", APF.convertToDouble());
+ if ((!strncmp(Buffer, "0x", 2) ||
+ !strncmp(Buffer, "-0x", 3) ||
+ !strncmp(Buffer, "+0x", 3)) &&
+ APF.bitwiseIsEqual(APFloat(atof(Buffer)))) {
+ if (CFP->getType() == Type::DoubleTy)
+ Out << "BitsToDouble(" << Buffer << ")";
+ else
+ Out << "BitsToFloat((float)" << Buffer << ")";
+ Out << ")";
+ } else {
+#endif
+ std::string StrVal = ftostr(CFP->getValueAPF());
+
+ while (StrVal[0] == ' ')
+ StrVal.erase(StrVal.begin());
+
+ // Check to make sure that the stringized number is not some string like
+ // "Inf" or NaN. Check that the string matches the "[-+]?[0-9]" regex.
+ if (((StrVal[0] >= '0' && StrVal[0] <= '9') ||
+ ((StrVal[0] == '-' || StrVal[0] == '+') &&
+ (StrVal[1] >= '0' && StrVal[1] <= '9'))) &&
+ (CFP->isExactlyValue(atof(StrVal.c_str())))) {
+ if (CFP->getType() == Type::DoubleTy)
+ Out << StrVal;
+ else
+ Out << StrVal << "f";
+ } else if (CFP->getType() == Type::DoubleTy)
+ Out << "BitsToDouble(0x" << std::hex
+ << CFP->getValueAPF().convertToAPInt().getZExtValue()
+ << std::dec << "ULL) /* " << StrVal << " */";
+ else
+ Out << "BitsToFloat(0x" << std::hex
+ << (uint32_t)CFP->getValueAPF().convertToAPInt().getZExtValue()
+ << std::dec << "U) /* " << StrVal << " */";
+ Out << ")";
+#if HAVE_PRINTF_A
+ }
+#endif
+ Out << ")";
+ }
+
+ void CppWriter::printCallingConv(unsigned cc){
+ // Print the calling convention.
+ switch (cc) {
+ case CallingConv::C: Out << "CallingConv::C"; break;
+ case CallingConv::Fast: Out << "CallingConv::Fast"; break;
+ case CallingConv::Cold: Out << "CallingConv::Cold"; break;
+ case CallingConv::FirstTargetCC: Out << "CallingConv::FirstTargetCC"; break;
+ default: Out << cc; break;
+ }
+ }
+
+ void CppWriter::printLinkageType(GlobalValue::LinkageTypes LT) {
+ switch (LT) {
+ case GlobalValue::InternalLinkage:
+ Out << "GlobalValue::InternalLinkage"; break;
+ case GlobalValue::LinkOnceLinkage:
+ Out << "GlobalValue::LinkOnceLinkage "; break;
+ case GlobalValue::WeakLinkage:
+ Out << "GlobalValue::WeakLinkage"; break;
+ case GlobalValue::AppendingLinkage:
+ Out << "GlobalValue::AppendingLinkage"; break;
+ case GlobalValue::ExternalLinkage:
+ Out << "GlobalValue::ExternalLinkage"; break;
+ case GlobalValue::DLLImportLinkage:
+ Out << "GlobalValue::DLLImportLinkage"; break;
+ case GlobalValue::DLLExportLinkage:
+ Out << "GlobalValue::DLLExportLinkage"; break;
+ case GlobalValue::ExternalWeakLinkage:
+ Out << "GlobalValue::ExternalWeakLinkage"; break;
+ case GlobalValue::GhostLinkage:
+ Out << "GlobalValue::GhostLinkage"; break;
+ case GlobalValue::CommonLinkage:
+ Out << "GlobalValue::CommonLinkage"; break;
+ }
+ }
+
+ void CppWriter::printVisibilityType(GlobalValue::VisibilityTypes VisType) {
+ switch (VisType) {
+ default: assert(0 && "Unknown GVar visibility");
+ case GlobalValue::DefaultVisibility:
+ Out << "GlobalValue::DefaultVisibility";
+ break;
+ case GlobalValue::HiddenVisibility:
+ Out << "GlobalValue::HiddenVisibility";
+ break;
+ case GlobalValue::ProtectedVisibility:
+ Out << "GlobalValue::ProtectedVisibility";
+ break;
+ }
+ }
+
+ // printEscapedString - Print each character of the specified string, escaping
+ // it if it is not printable or if it is an escape char.
+ void CppWriter::printEscapedString(const std::string &Str) {
+ for (unsigned i = 0, e = Str.size(); i != e; ++i) {
+ unsigned char C = Str[i];
+ if (isprint(C) && C != '"' && C != '\\') {
+ Out << C;
+ } else {
+ Out << "\\x"
+ << (char) ((C/16 < 10) ? ( C/16 +'0') : ( C/16 -10+'A'))
+ << (char)(((C&15) < 10) ? ((C&15)+'0') : ((C&15)-10+'A'));
+ }
+ }
+ }
+
+ std::string CppWriter::getCppName(const Type* Ty) {
+ // First, handle the primitive types .. easy
+ if (Ty->isPrimitiveType() || Ty->isInteger()) {
+ switch (Ty->getTypeID()) {
+ case Type::VoidTyID: return "Type::VoidTy";
+ case Type::IntegerTyID: {
+ unsigned BitWidth = cast<IntegerType>(Ty)->getBitWidth();
+ return "IntegerType::get(" + utostr(BitWidth) + ")";
+ }
+ case Type::FloatTyID: return "Type::FloatTy";
+ case Type::DoubleTyID: return "Type::DoubleTy";
+ case Type::LabelTyID: return "Type::LabelTy";
+ default:
+ error("Invalid primitive type");
+ break;
+ }
+ return "Type::VoidTy"; // shouldn't be returned, but make it sensible
+ }
+
+ // Now, see if we've seen the type before and return that
+ TypeMap::iterator I = TypeNames.find(Ty);
+ if (I != TypeNames.end())
+ return I->second;
+
+ // Okay, let's build a new name for this type. Start with a prefix
+ const char* prefix = 0;
+ switch (Ty->getTypeID()) {
+ case Type::FunctionTyID: prefix = "FuncTy_"; break;
+ case Type::StructTyID: prefix = "StructTy_"; break;
+ case Type::ArrayTyID: prefix = "ArrayTy_"; break;
+ case Type::PointerTyID: prefix = "PointerTy_"; break;
+ case Type::OpaqueTyID: prefix = "OpaqueTy_"; break;
+ case Type::VectorTyID: prefix = "VectorTy_"; break;
+ default: prefix = "OtherTy_"; break; // prevent breakage
+ }
+
+ // See if the type has a name in the symboltable and build accordingly
+ const std::string* tName = findTypeName(TheModule->getTypeSymbolTable(), Ty);
+ std::string name;
+ if (tName)
+ name = std::string(prefix) + *tName;
+ else
+ name = std::string(prefix) + utostr(uniqueNum++);
+ sanitize(name);
+
+ // Save the name
+ return TypeNames[Ty] = name;
+ }
+
+ void CppWriter::printCppName(const Type* Ty) {
+ printEscapedString(getCppName(Ty));
+ }
+
+ std::string CppWriter::getCppName(const Value* val) {
+ std::string name;
+ ValueMap::iterator I = ValueNames.find(val);
+ if (I != ValueNames.end() && I->first == val)
+ return I->second;
+
+ if (const GlobalVariable* GV = dyn_cast<GlobalVariable>(val)) {
+ name = std::string("gvar_") +
+ getTypePrefix(GV->getType()->getElementType());
+ } else if (isa<Function>(val)) {
+ name = std::string("func_");
+ } else if (const Constant* C = dyn_cast<Constant>(val)) {
+ name = std::string("const_") + getTypePrefix(C->getType());
+ } else if (const Argument* Arg = dyn_cast<Argument>(val)) {
+ if (is_inline) {
+ unsigned argNum = std::distance(Arg->getParent()->arg_begin(),
+ Function::const_arg_iterator(Arg)) + 1;
+ name = std::string("arg_") + utostr(argNum);
+ NameSet::iterator NI = UsedNames.find(name);
+ if (NI != UsedNames.end())
+ name += std::string("_") + utostr(uniqueNum++);
+ UsedNames.insert(name);
+ return ValueNames[val] = name;
+ } else {
+ name = getTypePrefix(val->getType());
+ }
+ } else {
+ name = getTypePrefix(val->getType());
+ }
+ name += (val->hasName() ? val->getName() : utostr(uniqueNum++));
+ sanitize(name);
+ NameSet::iterator NI = UsedNames.find(name);
+ if (NI != UsedNames.end())
+ name += std::string("_") + utostr(uniqueNum++);
+ UsedNames.insert(name);
+ return ValueNames[val] = name;
+ }
+
+ void CppWriter::printCppName(const Value* val) {
+ printEscapedString(getCppName(val));
+ }
+
+ void CppWriter::printParamAttrs(const PAListPtr &PAL,
+ const std::string &name) {
+ Out << "PAListPtr " << name << "_PAL;";
+ nl(Out);
+ if (!PAL.isEmpty()) {
+ Out << '{'; in(); nl(Out);
+ Out << "SmallVector<ParamAttrsWithIndex, 4> Attrs;"; nl(Out);
+ Out << "ParamAttrsWithIndex PAWI;"; nl(Out);
+ for (unsigned i = 0; i < PAL.getNumSlots(); ++i) {
+ uint16_t index = PAL.getSlot(i).Index;
+ ParameterAttributes attrs = PAL.getSlot(i).Attrs;
+ Out << "PAWI.Index = " << index << "; PAWI.Attrs = 0 ";
+ if (attrs & ParamAttr::SExt)
+ Out << " | ParamAttr::SExt";
+ if (attrs & ParamAttr::ZExt)
+ Out << " | ParamAttr::ZExt";
+ if (attrs & ParamAttr::StructRet)
+ Out << " | ParamAttr::StructRet";
+ if (attrs & ParamAttr::InReg)
+ Out << " | ParamAttr::InReg";
+ if (attrs & ParamAttr::NoReturn)
+ Out << " | ParamAttr::NoReturn";
+ if (attrs & ParamAttr::NoUnwind)
+ Out << " | ParamAttr::NoUnwind";
+ if (attrs & ParamAttr::ByVal)
+ Out << " | ParamAttr::ByVal";
+ if (attrs & ParamAttr::NoAlias)
+ Out << " | ParamAttr::NoAlias";
+ if (attrs & ParamAttr::Nest)
+ Out << " | ParamAttr::Nest";
+ if (attrs & ParamAttr::ReadNone)
+ Out << " | ParamAttr::ReadNone";
+ if (attrs & ParamAttr::ReadOnly)
+ Out << " | ParamAttr::ReadOnly";
+ Out << ";";
+ nl(Out);
+ Out << "Attrs.push_back(PAWI);";
+ nl(Out);
+ }
+ Out << name << "_PAL = PAListPtr::get(Attrs.begin(), Attrs.end());";
+ nl(Out);
+ out(); nl(Out);
+ Out << '}'; nl(Out);
+ }
+ }
+
+ bool CppWriter::printTypeInternal(const Type* Ty) {
+ // We don't print definitions for primitive types
+ if (Ty->isPrimitiveType() || Ty->isInteger())
+ return false;
+
+ // If we already defined this type, we don't need to define it again.
+ if (DefinedTypes.find(Ty) != DefinedTypes.end())
+ return false;
+
+ // Everything below needs the name for the type so get it now.
+ std::string typeName(getCppName(Ty));
+
+ // Search the type stack for recursion. If we find it, then generate this
+ // as an OpaqueType, but make sure not to do this multiple times because
+ // the type could appear in multiple places on the stack. Once the opaque
+ // definition is issued, it must not be re-issued. Consequently we have to
+ // check the UnresolvedTypes list as well.
+ TypeList::const_iterator TI = std::find(TypeStack.begin(), TypeStack.end(),
+ Ty);
+ if (TI != TypeStack.end()) {
+ TypeMap::const_iterator I = UnresolvedTypes.find(Ty);
+ if (I == UnresolvedTypes.end()) {
+ Out << "PATypeHolder " << typeName << "_fwd = OpaqueType::get();";
+ nl(Out);
+ UnresolvedTypes[Ty] = typeName;
+ }
+ return true;
+ }
+
+ // We're going to print a derived type which, by definition, contains other
+ // types. So, push this one we're printing onto the type stack to assist with
+ // recursive definitions.
+ TypeStack.push_back(Ty);
+
+ // Print the type definition
+ switch (Ty->getTypeID()) {
+ case Type::FunctionTyID: {
+ const FunctionType* FT = cast<FunctionType>(Ty);
+ Out << "std::vector<const Type*>" << typeName << "_args;";
+ nl(Out);
+ FunctionType::param_iterator PI = FT->param_begin();
+ FunctionType::param_iterator PE = FT->param_end();
+ for (; PI != PE; ++PI) {
+ const Type* argTy = static_cast<const Type*>(*PI);
+ bool isForward = printTypeInternal(argTy);
+ std::string argName(getCppName(argTy));
+ Out << typeName << "_args.push_back(" << argName;
+ if (isForward)
+ Out << "_fwd";
+ Out << ");";
+ nl(Out);
+ }
+ bool isForward = printTypeInternal(FT->getReturnType());
+ std::string retTypeName(getCppName(FT->getReturnType()));
+ Out << "FunctionType* " << typeName << " = FunctionType::get(";
+ in(); nl(Out) << "/*Result=*/" << retTypeName;
+ if (isForward)
+ Out << "_fwd";
+ Out << ",";
+ nl(Out) << "/*Params=*/" << typeName << "_args,";
+ nl(Out) << "/*isVarArg=*/" << (FT->isVarArg() ? "true" : "false") << ");";
+ out();
+ nl(Out);
+ break;
+ }
+ case Type::StructTyID: {
+ const StructType* ST = cast<StructType>(Ty);
+ Out << "std::vector<const Type*>" << typeName << "_fields;";
+ nl(Out);
+ StructType::element_iterator EI = ST->element_begin();
+ StructType::element_iterator EE = ST->element_end();
+ for (; EI != EE; ++EI) {
+ const Type* fieldTy = static_cast<const Type*>(*EI);
+ bool isForward = printTypeInternal(fieldTy);
+ std::string fieldName(getCppName(fieldTy));
+ Out << typeName << "_fields.push_back(" << fieldName;
+ if (isForward)
+ Out << "_fwd";
+ Out << ");";
+ nl(Out);
+ }
+ Out << "StructType* " << typeName << " = StructType::get("
+ << typeName << "_fields, /*isPacked=*/"
+ << (ST->isPacked() ? "true" : "false") << ");";
+ nl(Out);
+ break;
+ }
+ case Type::ArrayTyID: {
+ const ArrayType* AT = cast<ArrayType>(Ty);
+ const Type* ET = AT->getElementType();
+ bool isForward = printTypeInternal(ET);
+ std::string elemName(getCppName(ET));
+ Out << "ArrayType* " << typeName << " = ArrayType::get("
+ << elemName << (isForward ? "_fwd" : "")
+ << ", " << utostr(AT->getNumElements()) << ");";
+ nl(Out);
+ break;
+ }
+ case Type::PointerTyID: {
+ const PointerType* PT = cast<PointerType>(Ty);
+ const Type* ET = PT->getElementType();
+ bool isForward = printTypeInternal(ET);
+ std::string elemName(getCppName(ET));
+ Out << "PointerType* " << typeName << " = PointerType::get("
+ << elemName << (isForward ? "_fwd" : "")
+ << ", " << utostr(PT->getAddressSpace()) << ");";
+ nl(Out);
+ break;
+ }
+ case Type::VectorTyID: {
+ const VectorType* PT = cast<VectorType>(Ty);
+ const Type* ET = PT->getElementType();
+ bool isForward = printTypeInternal(ET);
+ std::string elemName(getCppName(ET));
+ Out << "VectorType* " << typeName << " = VectorType::get("
+ << elemName << (isForward ? "_fwd" : "")
+ << ", " << utostr(PT->getNumElements()) << ");";
+ nl(Out);
+ break;
+ }
+ case Type::OpaqueTyID: {
+ Out << "OpaqueType* " << typeName << " = OpaqueType::get();";
+ nl(Out);
+ break;
+ }
+ default:
+ error("Invalid TypeID");
+ }
+
+ // If the type had a name, make sure we recreate it.
+ const std::string* progTypeName =
+ findTypeName(TheModule->getTypeSymbolTable(),Ty);
+ if (progTypeName) {
+ Out << "mod->addTypeName(\"" << *progTypeName << "\", "
+ << typeName << ");";
+ nl(Out);
+ }
+
+ // Pop us off the type stack
+ TypeStack.pop_back();
+
+ // Indicate that this type is now defined.
+ DefinedTypes.insert(Ty);
+
+ // Early resolve as many unresolved types as possible. Search the unresolved
+ // types map for the type we just printed. Now that its definition is complete
+ // we can resolve any previous references to it. This prevents a cascade of
+ // unresolved types.
+ TypeMap::iterator I = UnresolvedTypes.find(Ty);
+ if (I != UnresolvedTypes.end()) {
+ Out << "cast<OpaqueType>(" << I->second
+ << "_fwd.get())->refineAbstractTypeTo(" << I->second << ");";
+ nl(Out);
+ Out << I->second << " = cast<";
+ switch (Ty->getTypeID()) {
+ case Type::FunctionTyID: Out << "FunctionType"; break;
+ case Type::ArrayTyID: Out << "ArrayType"; break;
+ case Type::StructTyID: Out << "StructType"; break;
+ case Type::VectorTyID: Out << "VectorType"; break;
+ case Type::PointerTyID: Out << "PointerType"; break;
+ case Type::OpaqueTyID: Out << "OpaqueType"; break;
+ default: Out << "NoSuchDerivedType"; break;
+ }
+ Out << ">(" << I->second << "_fwd.get());";
+ nl(Out); nl(Out);
+ UnresolvedTypes.erase(I);
+ }
+
+ // Finally, separate the type definition from other with a newline.
+ nl(Out);
+
+ // We weren't a recursive type
+ return false;
+ }
+
+ // Prints a type definition. Returns true if it could not resolve all the
+ // types in the definition but had to use a forward reference.
+ void CppWriter::printType(const Type* Ty) {
+ assert(TypeStack.empty());
+ TypeStack.clear();
+ printTypeInternal(Ty);
+ assert(TypeStack.empty());
+ }
+
+ void CppWriter::printTypes(const Module* M) {
+ // Walk the symbol table and print out all its types
+ const TypeSymbolTable& symtab = M->getTypeSymbolTable();
+ for (TypeSymbolTable::const_iterator TI = symtab.begin(), TE = symtab.end();
+ TI != TE; ++TI) {
+
+ // For primitive types and types already defined, just add a name
+ TypeMap::const_iterator TNI = TypeNames.find(TI->second);
+ if (TI->second->isInteger() || TI->second->isPrimitiveType() ||
+ TNI != TypeNames.end()) {
+ Out << "mod->addTypeName(\"";
+ printEscapedString(TI->first);
+ Out << "\", " << getCppName(TI->second) << ");";
+ nl(Out);
+ // For everything else, define the type
+ } else {
+ printType(TI->second);
+ }
+ }
+
+ // Add all of the global variables to the value table...
+ for (Module::const_global_iterator I = TheModule->global_begin(),
+ E = TheModule->global_end(); I != E; ++I) {
+ if (I->hasInitializer())
+ printType(I->getInitializer()->getType());
+ printType(I->getType());
+ }
+
+ // Add all the functions to the table
+ for (Module::const_iterator FI = TheModule->begin(), FE = TheModule->end();
+ FI != FE; ++FI) {
+ printType(FI->getReturnType());
+ printType(FI->getFunctionType());
+ // Add all the function arguments
+ for (Function::const_arg_iterator AI = FI->arg_begin(),
+ AE = FI->arg_end(); AI != AE; ++AI) {
+ printType(AI->getType());
+ }
+
+ // Add all of the basic blocks and instructions
+ for (Function::const_iterator BB = FI->begin(),
+ E = FI->end(); BB != E; ++BB) {
+ printType(BB->getType());
+ for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I!=E;
+ ++I) {
+ printType(I->getType());
+ for (unsigned i = 0; i < I->getNumOperands(); ++i)
+ printType(I->getOperand(i)->getType());
+ }
+ }
+ }
+ }
+
+
+ // printConstant - Print out a constant pool entry...
+ void CppWriter::printConstant(const Constant *CV) {
+ // First, if the constant is actually a GlobalValue (variable or function)
+ // or its already in the constant list then we've printed it already and we
+ // can just return.
+ if (isa<GlobalValue>(CV) || ValueNames.find(CV) != ValueNames.end())
+ return;
+
+ std::string constName(getCppName(CV));
+ std::string typeName(getCppName(CV->getType()));
+ if (CV->isNullValue()) {
+ Out << "Constant* " << constName << " = Constant::getNullValue("
+ << typeName << ");";
+ nl(Out);
+ return;
+ }
+ if (isa<GlobalValue>(CV)) {
+ // Skip variables and functions, we emit them elsewhere
+ return;
+ }
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(CV)) {
+ Out << "ConstantInt* " << constName << " = ConstantInt::get(APInt("
+ << cast<IntegerType>(CI->getType())->getBitWidth() << ", "
+ << " \"" << CI->getValue().toStringSigned(10) << "\", 10));";
+ } else if (isa<ConstantAggregateZero>(CV)) {
+ Out << "ConstantAggregateZero* " << constName
+ << " = ConstantAggregateZero::get(" << typeName << ");";
+ } else if (isa<ConstantPointerNull>(CV)) {
+ Out << "ConstantPointerNull* " << constName
+ << " = ConstanPointerNull::get(" << typeName << ");";
+ } else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CV)) {
+ Out << "ConstantFP* " << constName << " = ";
+ printCFP(CFP);
+ Out << ";";
+ } else if (const ConstantArray *CA = dyn_cast<ConstantArray>(CV)) {
+ if (CA->isString() && CA->getType()->getElementType() == Type::Int8Ty) {
+ Out << "Constant* " << constName << " = ConstantArray::get(\"";
+ std::string tmp = CA->getAsString();
+ bool nullTerminate = false;
+ if (tmp[tmp.length()-1] == 0) {
+ tmp.erase(tmp.length()-1);
+ nullTerminate = true;
+ }
+ printEscapedString(tmp);
+ // Determine if we want null termination or not.
+ if (nullTerminate)
+ Out << "\", true"; // Indicate that the null terminator should be
+ // added.
+ else
+ Out << "\", false";// No null terminator
+ Out << ");";
+ } else {
+ Out << "std::vector<Constant*> " << constName << "_elems;";
+ nl(Out);
+ unsigned N = CA->getNumOperands();
+ for (unsigned i = 0; i < N; ++i) {
+ printConstant(CA->getOperand(i)); // recurse to print operands
+ Out << constName << "_elems.push_back("
+ << getCppName(CA->getOperand(i)) << ");";
+ nl(Out);
+ }
+ Out << "Constant* " << constName << " = ConstantArray::get("
+ << typeName << ", " << constName << "_elems);";
+ }
+ } else if (const ConstantStruct *CS = dyn_cast<ConstantStruct>(CV)) {
+ Out << "std::vector<Constant*> " << constName << "_fields;";
+ nl(Out);
+ unsigned N = CS->getNumOperands();
+ for (unsigned i = 0; i < N; i++) {
+ printConstant(CS->getOperand(i));
+ Out << constName << "_fields.push_back("
+ << getCppName(CS->getOperand(i)) << ");";
+ nl(Out);
+ }
+ Out << "Constant* " << constName << " = ConstantStruct::get("
+ << typeName << ", " << constName << "_fields);";
+ } else if (const ConstantVector *CP = dyn_cast<ConstantVector>(CV)) {
+ Out << "std::vector<Constant*> " << constName << "_elems;";
+ nl(Out);
+ unsigned N = CP->getNumOperands();
+ for (unsigned i = 0; i < N; ++i) {
+ printConstant(CP->getOperand(i));
+ Out << constName << "_elems.push_back("
+ << getCppName(CP->getOperand(i)) << ");";
+ nl(Out);
+ }
+ Out << "Constant* " << constName << " = ConstantVector::get("
+ << typeName << ", " << constName << "_elems);";
+ } else if (isa<UndefValue>(CV)) {
+ Out << "UndefValue* " << constName << " = UndefValue::get("
+ << typeName << ");";
+ } else if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(CV)) {
+ if (CE->getOpcode() == Instruction::GetElementPtr) {
+ Out << "std::vector<Constant*> " << constName << "_indices;";
+ nl(Out);
+ printConstant(CE->getOperand(0));
+ for (unsigned i = 1; i < CE->getNumOperands(); ++i ) {
+ printConstant(CE->getOperand(i));
+ Out << constName << "_indices.push_back("
+ << getCppName(CE->getOperand(i)) << ");";
+ nl(Out);
+ }
+ Out << "Constant* " << constName
+ << " = ConstantExpr::getGetElementPtr("
+ << getCppName(CE->getOperand(0)) << ", "
+ << "&" << constName << "_indices[0], "
+ << constName << "_indices.size()"
+ << " );";
+ } else if (CE->isCast()) {
+ printConstant(CE->getOperand(0));
+ Out << "Constant* " << constName << " = ConstantExpr::getCast(";
+ switch (CE->getOpcode()) {
+ default: assert(0 && "Invalid cast opcode");
+ case Instruction::Trunc: Out << "Instruction::Trunc"; break;
+ case Instruction::ZExt: Out << "Instruction::ZExt"; break;
+ case Instruction::SExt: Out << "Instruction::SExt"; break;
+ case Instruction::FPTrunc: Out << "Instruction::FPTrunc"; break;
+ case Instruction::FPExt: Out << "Instruction::FPExt"; break;
+ case Instruction::FPToUI: Out << "Instruction::FPToUI"; break;
+ case Instruction::FPToSI: Out << "Instruction::FPToSI"; break;
+ case Instruction::UIToFP: Out << "Instruction::UIToFP"; break;
+ case Instruction::SIToFP: Out << "Instruction::SIToFP"; break;
+ case Instruction::PtrToInt: Out << "Instruction::PtrToInt"; break;
+ case Instruction::IntToPtr: Out << "Instruction::IntToPtr"; break;
+ case Instruction::BitCast: Out << "Instruction::BitCast"; break;
+ }
+ Out << ", " << getCppName(CE->getOperand(0)) << ", "
+ << getCppName(CE->getType()) << ");";
+ } else {
+ unsigned N = CE->getNumOperands();
+ for (unsigned i = 0; i < N; ++i ) {
+ printConstant(CE->getOperand(i));
+ }
+ Out << "Constant* " << constName << " = ConstantExpr::";
+ switch (CE->getOpcode()) {
+ case Instruction::Add: Out << "getAdd("; break;
+ case Instruction::Sub: Out << "getSub("; break;
+ case Instruction::Mul: Out << "getMul("; break;
+ case Instruction::UDiv: Out << "getUDiv("; break;
+ case Instruction::SDiv: Out << "getSDiv("; break;
+ case Instruction::FDiv: Out << "getFDiv("; break;
+ case Instruction::URem: Out << "getURem("; break;
+ case Instruction::SRem: Out << "getSRem("; break;
+ case Instruction::FRem: Out << "getFRem("; break;
+ case Instruction::And: Out << "getAnd("; break;
+ case Instruction::Or: Out << "getOr("; break;
+ case Instruction::Xor: Out << "getXor("; break;
+ case Instruction::ICmp:
+ Out << "getICmp(ICmpInst::ICMP_";
+ switch (CE->getPredicate()) {
+ case ICmpInst::ICMP_EQ: Out << "EQ"; break;
+ case ICmpInst::ICMP_NE: Out << "NE"; break;
+ case ICmpInst::ICMP_SLT: Out << "SLT"; break;
+ case ICmpInst::ICMP_ULT: Out << "ULT"; break;
+ case ICmpInst::ICMP_SGT: Out << "SGT"; break;
+ case ICmpInst::ICMP_UGT: Out << "UGT"; break;
+ case ICmpInst::ICMP_SLE: Out << "SLE"; break;
+ case ICmpInst::ICMP_ULE: Out << "ULE"; break;
+ case ICmpInst::ICMP_SGE: Out << "SGE"; break;
+ case ICmpInst::ICMP_UGE: Out << "UGE"; break;
+ default: error("Invalid ICmp Predicate");
+ }
+ break;
+ case Instruction::FCmp:
+ Out << "getFCmp(FCmpInst::FCMP_";
+ switch (CE->getPredicate()) {
+ case FCmpInst::FCMP_FALSE: Out << "FALSE"; break;
+ case FCmpInst::FCMP_ORD: Out << "ORD"; break;
+ case FCmpInst::FCMP_UNO: Out << "UNO"; break;
+ case FCmpInst::FCMP_OEQ: Out << "OEQ"; break;
+ case FCmpInst::FCMP_UEQ: Out << "UEQ"; break;
+ case FCmpInst::FCMP_ONE: Out << "ONE"; break;
+ case FCmpInst::FCMP_UNE: Out << "UNE"; break;
+ case FCmpInst::FCMP_OLT: Out << "OLT"; break;
+ case FCmpInst::FCMP_ULT: Out << "ULT"; break;
+ case FCmpInst::FCMP_OGT: Out << "OGT"; break;
+ case FCmpInst::FCMP_UGT: Out << "UGT"; break;
+ case FCmpInst::FCMP_OLE: Out << "OLE"; break;
+ case FCmpInst::FCMP_ULE: Out << "ULE"; break;
+ case FCmpInst::FCMP_OGE: Out << "OGE"; break;
+ case FCmpInst::FCMP_UGE: Out << "UGE"; break;
+ case FCmpInst::FCMP_TRUE: Out << "TRUE"; break;
+ default: error("Invalid FCmp Predicate");
+ }
+ break;
+ case Instruction::Shl: Out << "getShl("; break;
+ case Instruction::LShr: Out << "getLShr("; break;
+ case Instruction::AShr: Out << "getAShr("; break;
+ case Instruction::Select: Out << "getSelect("; break;
+ case Instruction::ExtractElement: Out << "getExtractElement("; break;
+ case Instruction::InsertElement: Out << "getInsertElement("; break;
+ case Instruction::ShuffleVector: Out << "getShuffleVector("; break;
+ default:
+ error("Invalid constant expression");
+ break;
+ }
+ Out << getCppName(CE->getOperand(0));
+ for (unsigned i = 1; i < CE->getNumOperands(); ++i)
+ Out << ", " << getCppName(CE->getOperand(i));
+ Out << ");";
+ }
+ } else {
+ error("Bad Constant");
+ Out << "Constant* " << constName << " = 0; ";
+ }
+ nl(Out);
+ }
+
+ void CppWriter::printConstants(const Module* M) {
+ // Traverse all the global variables looking for constant initializers
+ for (Module::const_global_iterator I = TheModule->global_begin(),
+ E = TheModule->global_end(); I != E; ++I)
+ if (I->hasInitializer())
+ printConstant(I->getInitializer());
+
+ // Traverse the LLVM functions looking for constants
+ for (Module::const_iterator FI = TheModule->begin(), FE = TheModule->end();
+ FI != FE; ++FI) {
+ // Add all of the basic blocks and instructions
+ for (Function::const_iterator BB = FI->begin(),
+ E = FI->end(); BB != E; ++BB) {
+ for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I!=E;
+ ++I) {
+ for (unsigned i = 0; i < I->getNumOperands(); ++i) {
+ if (Constant* C = dyn_cast<Constant>(I->getOperand(i))) {
+ printConstant(C);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ void CppWriter::printVariableUses(const GlobalVariable *GV) {
+ nl(Out) << "// Type Definitions";
+ nl(Out);
+ printType(GV->getType());
+ if (GV->hasInitializer()) {
+ Constant* Init = GV->getInitializer();
+ printType(Init->getType());
+ if (Function* F = dyn_cast<Function>(Init)) {
+ nl(Out)<< "/ Function Declarations"; nl(Out);
+ printFunctionHead(F);
+ } else if (GlobalVariable* gv = dyn_cast<GlobalVariable>(Init)) {
+ nl(Out) << "// Global Variable Declarations"; nl(Out);
+ printVariableHead(gv);
+ } else {
+ nl(Out) << "// Constant Definitions"; nl(Out);
+ printConstant(gv);
+ }
+ if (GlobalVariable* gv = dyn_cast<GlobalVariable>(Init)) {
+ nl(Out) << "// Global Variable Definitions"; nl(Out);
+ printVariableBody(gv);
+ }
+ }
+ }
+
+ void CppWriter::printVariableHead(const GlobalVariable *GV) {
+ nl(Out) << "GlobalVariable* " << getCppName(GV);
+ if (is_inline) {
+ Out << " = mod->getGlobalVariable(";
+ printEscapedString(GV->getName());
+ Out << ", " << getCppName(GV->getType()->getElementType()) << ",true)";
+ nl(Out) << "if (!" << getCppName(GV) << ") {";
+ in(); nl(Out) << getCppName(GV);
+ }
+ Out << " = new GlobalVariable(";
+ nl(Out) << "/*Type=*/";
+ printCppName(GV->getType()->getElementType());
+ Out << ",";
+ nl(Out) << "/*isConstant=*/" << (GV->isConstant()?"true":"false");
+ Out << ",";
+ nl(Out) << "/*Linkage=*/";
+ printLinkageType(GV->getLinkage());
+ Out << ",";
+ nl(Out) << "/*Initializer=*/0, ";
+ if (GV->hasInitializer()) {
+ Out << "// has initializer, specified below";
+ }
+ nl(Out) << "/*Name=*/\"";
+ printEscapedString(GV->getName());
+ Out << "\",";
+ nl(Out) << "mod);";
+ nl(Out);
+
+ if (GV->hasSection()) {
+ printCppName(GV);
+ Out << "->setSection(\"";
+ printEscapedString(GV->getSection());
+ Out << "\");";
+ nl(Out);
+ }
+ if (GV->getAlignment()) {
+ printCppName(GV);
+ Out << "->setAlignment(" << utostr(GV->getAlignment()) << ");";
+ nl(Out);
+ }
+ if (GV->getVisibility() != GlobalValue::DefaultVisibility) {
+ printCppName(GV);
+ Out << "->setVisibility(";
+ printVisibilityType(GV->getVisibility());
+ Out << ");";
+ nl(Out);
+ }
+ if (is_inline) {
+ out(); Out << "}"; nl(Out);
+ }
+ }
+
+ void CppWriter::printVariableBody(const GlobalVariable *GV) {
+ if (GV->hasInitializer()) {
+ printCppName(GV);
+ Out << "->setInitializer(";
+ Out << getCppName(GV->getInitializer()) << ");";
+ nl(Out);
+ }
+ }
+
+ std::string CppWriter::getOpName(Value* V) {
+ if (!isa<Instruction>(V) || DefinedValues.find(V) != DefinedValues.end())
+ return getCppName(V);
+
+ // See if its alread in the map of forward references, if so just return the
+ // name we already set up for it
+ ForwardRefMap::const_iterator I = ForwardRefs.find(V);
+ if (I != ForwardRefs.end())
+ return I->second;
+
+ // This is a new forward reference. Generate a unique name for it
+ std::string result(std::string("fwdref_") + utostr(uniqueNum++));
+
+ // Yes, this is a hack. An Argument is the smallest instantiable value that
+ // we can make as a placeholder for the real value. We'll replace these
+ // Argument instances later.
+ Out << "Argument* " << result << " = new Argument("
+ << getCppName(V->getType()) << ");";
+ nl(Out);
+ ForwardRefs[V] = result;
+ return result;
+ }
+
+ // printInstruction - This member is called for each Instruction in a function.
+ void CppWriter::printInstruction(const Instruction *I,
+ const std::string& bbname) {
+ std::string iName(getCppName(I));
+
+ // Before we emit this instruction, we need to take care of generating any
+ // forward references. So, we get the names of all the operands in advance
+ std::string* opNames = new std::string[I->getNumOperands()];
+ for (unsigned i = 0; i < I->getNumOperands(); i++) {
+ opNames[i] = getOpName(I->getOperand(i));
+ }
+
+ switch (I->getOpcode()) {
+ default:
+ error("Invalid instruction");
+ break;
+
+ case Instruction::Ret: {
+ const ReturnInst* ret = cast<ReturnInst>(I);
+ Out << "ReturnInst::Create("
+ << (ret->getReturnValue() ? opNames[0] + ", " : "") << bbname << ");";
+ break;
+ }
+ case Instruction::Br: {
+ const BranchInst* br = cast<BranchInst>(I);
+ Out << "BranchInst::Create(" ;
+ if (br->getNumOperands() == 3 ) {
+ Out << opNames[0] << ", "
+ << opNames[1] << ", "
+ << opNames[2] << ", ";
+
+ } else if (br->getNumOperands() == 1) {
+ Out << opNames[0] << ", ";
+ } else {
+ error("Branch with 2 operands?");
+ }
+ Out << bbname << ");";
+ break;
+ }
+ case Instruction::Switch: {
+ const SwitchInst* sw = cast<SwitchInst>(I);
+ Out << "SwitchInst* " << iName << " = SwitchInst::Create("
+ << opNames[0] << ", "
+ << opNames[1] << ", "
+ << sw->getNumCases() << ", " << bbname << ");";
+ nl(Out);
+ for (unsigned i = 2; i < sw->getNumOperands(); i += 2 ) {
+ Out << iName << "->addCase("
+ << opNames[i] << ", "
+ << opNames[i+1] << ");";
+ nl(Out);
+ }
+ break;
+ }
+ case Instruction::Invoke: {
+ const InvokeInst* inv = cast<InvokeInst>(I);
+ Out << "std::vector<Value*> " << iName << "_params;";
+ nl(Out);
+ for (unsigned i = 3; i < inv->getNumOperands(); ++i) {
+ Out << iName << "_params.push_back("
+ << opNames[i] << ");";
+ nl(Out);
+ }
+ Out << "InvokeInst *" << iName << " = InvokeInst::Create("
+ << opNames[0] << ", "
+ << opNames[1] << ", "
+ << opNames[2] << ", "
+ << iName << "_params.begin(), " << iName << "_params.end(), \"";
+ printEscapedString(inv->getName());
+ Out << "\", " << bbname << ");";
+ nl(Out) << iName << "->setCallingConv(";
+ printCallingConv(inv->getCallingConv());
+ Out << ");";
+ printParamAttrs(inv->getParamAttrs(), iName);
+ Out << iName << "->setParamAttrs(" << iName << "_PAL);";
+ nl(Out);
+ break;
+ }
+ case Instruction::Unwind: {
+ Out << "new UnwindInst("
+ << bbname << ");";
+ break;
+ }
+ case Instruction::Unreachable:{
+ Out << "new UnreachableInst("
+ << bbname << ");";
+ break;
+ }
+ case Instruction::Add:
+ case Instruction::Sub:
+ case Instruction::Mul:
+ case Instruction::UDiv:
+ case Instruction::SDiv:
+ case Instruction::FDiv:
+ case Instruction::URem:
+ case Instruction::SRem:
+ case Instruction::FRem:
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Xor:
+ case Instruction::Shl:
+ case Instruction::LShr:
+ case Instruction::AShr:{
+ Out << "BinaryOperator* " << iName << " = BinaryOperator::Create(";
+ switch (I->getOpcode()) {
+ case Instruction::Add: Out << "Instruction::Add"; break;
+ case Instruction::Sub: Out << "Instruction::Sub"; break;
+ case Instruction::Mul: Out << "Instruction::Mul"; break;
+ case Instruction::UDiv:Out << "Instruction::UDiv"; break;
+ case Instruction::SDiv:Out << "Instruction::SDiv"; break;
+ case Instruction::FDiv:Out << "Instruction::FDiv"; break;
+ case Instruction::URem:Out << "Instruction::URem"; break;
+ case Instruction::SRem:Out << "Instruction::SRem"; break;
+ case Instruction::FRem:Out << "Instruction::FRem"; break;
+ case Instruction::And: Out << "Instruction::And"; break;
+ case Instruction::Or: Out << "Instruction::Or"; break;
+ case Instruction::Xor: Out << "Instruction::Xor"; break;
+ case Instruction::Shl: Out << "Instruction::Shl"; break;
+ case Instruction::LShr:Out << "Instruction::LShr"; break;
+ case Instruction::AShr:Out << "Instruction::AShr"; break;
+ default: Out << "Instruction::BadOpCode"; break;
+ }
+ Out << ", " << opNames[0] << ", " << opNames[1] << ", \"";
+ printEscapedString(I->getName());
+ Out << "\", " << bbname << ");";
+ break;
+ }
+ case Instruction::FCmp: {
+ Out << "FCmpInst* " << iName << " = new FCmpInst(";
+ switch (cast<FCmpInst>(I)->getPredicate()) {
+ case FCmpInst::FCMP_FALSE: Out << "FCmpInst::FCMP_FALSE"; break;
+ case FCmpInst::FCMP_OEQ : Out << "FCmpInst::FCMP_OEQ"; break;
+ case FCmpInst::FCMP_OGT : Out << "FCmpInst::FCMP_OGT"; break;
+ case FCmpInst::FCMP_OGE : Out << "FCmpInst::FCMP_OGE"; break;
+ case FCmpInst::FCMP_OLT : Out << "FCmpInst::FCMP_OLT"; break;
+ case FCmpInst::FCMP_OLE : Out << "FCmpInst::FCMP_OLE"; break;
+ case FCmpInst::FCMP_ONE : Out << "FCmpInst::FCMP_ONE"; break;
+ case FCmpInst::FCMP_ORD : Out << "FCmpInst::FCMP_ORD"; break;
+ case FCmpInst::FCMP_UNO : Out << "FCmpInst::FCMP_UNO"; break;
+ case FCmpInst::FCMP_UEQ : Out << "FCmpInst::FCMP_UEQ"; break;
+ case FCmpInst::FCMP_UGT : Out << "FCmpInst::FCMP_UGT"; break;
+ case FCmpInst::FCMP_UGE : Out << "FCmpInst::FCMP_UGE"; break;
+ case FCmpInst::FCMP_ULT : Out << "FCmpInst::FCMP_ULT"; break;
+ case FCmpInst::FCMP_ULE : Out << "FCmpInst::FCMP_ULE"; break;
+ case FCmpInst::FCMP_UNE : Out << "FCmpInst::FCMP_UNE"; break;
+ case FCmpInst::FCMP_TRUE : Out << "FCmpInst::FCMP_TRUE"; break;
+ default: Out << "FCmpInst::BAD_ICMP_PREDICATE"; break;
+ }
+ Out << ", " << opNames[0] << ", " << opNames[1] << ", \"";
+ printEscapedString(I->getName());
+ Out << "\", " << bbname << ");";
+ break;
+ }
+ case Instruction::ICmp: {
+ Out << "ICmpInst* " << iName << " = new ICmpInst(";
+ switch (cast<ICmpInst>(I)->getPredicate()) {
+ case ICmpInst::ICMP_EQ: Out << "ICmpInst::ICMP_EQ"; break;
+ case ICmpInst::ICMP_NE: Out << "ICmpInst::ICMP_NE"; break;
+ case ICmpInst::ICMP_ULE: Out << "ICmpInst::ICMP_ULE"; break;
+ case ICmpInst::ICMP_SLE: Out << "ICmpInst::ICMP_SLE"; break;
+ case ICmpInst::ICMP_UGE: Out << "ICmpInst::ICMP_UGE"; break;
+ case ICmpInst::ICMP_SGE: Out << "ICmpInst::ICMP_SGE"; break;
+ case ICmpInst::ICMP_ULT: Out << "ICmpInst::ICMP_ULT"; break;
+ case ICmpInst::ICMP_SLT: Out << "ICmpInst::ICMP_SLT"; break;
+ case ICmpInst::ICMP_UGT: Out << "ICmpInst::ICMP_UGT"; break;
+ case ICmpInst::ICMP_SGT: Out << "ICmpInst::ICMP_SGT"; break;
+ default: Out << "ICmpInst::BAD_ICMP_PREDICATE"; break;
+ }
+ Out << ", " << opNames[0] << ", " << opNames[1] << ", \"";
+ printEscapedString(I->getName());
+ Out << "\", " << bbname << ");";
+ break;
+ }
+ case Instruction::Malloc: {
+ const MallocInst* mallocI = cast<MallocInst>(I);
+ Out << "MallocInst* " << iName << " = new MallocInst("
+ << getCppName(mallocI->getAllocatedType()) << ", ";
+ if (mallocI->isArrayAllocation())
+ Out << opNames[0] << ", " ;
+ Out << "\"";
+ printEscapedString(mallocI->getName());
+ Out << "\", " << bbname << ");";
+ if (mallocI->getAlignment())
+ nl(Out) << iName << "->setAlignment("
+ << mallocI->getAlignment() << ");";
+ break;
+ }
+ case Instruction::Free: {
+ Out << "FreeInst* " << iName << " = new FreeInst("
+ << getCppName(I->getOperand(0)) << ", " << bbname << ");";
+ break;
+ }
+ case Instruction::Alloca: {
+ const AllocaInst* allocaI = cast<AllocaInst>(I);
+ Out << "AllocaInst* " << iName << " = new AllocaInst("
+ << getCppName(allocaI->getAllocatedType()) << ", ";
+ if (allocaI->isArrayAllocation())
+ Out << opNames[0] << ", ";
+ Out << "\"";
+ printEscapedString(allocaI->getName());
+ Out << "\", " << bbname << ");";
+ if (allocaI->getAlignment())
+ nl(Out) << iName << "->setAlignment("
+ << allocaI->getAlignment() << ");";
+ break;
+ }
+ case Instruction::Load:{
+ const LoadInst* load = cast<LoadInst>(I);
+ Out << "LoadInst* " << iName << " = new LoadInst("
+ << opNames[0] << ", \"";
+ printEscapedString(load->getName());
+ Out << "\", " << (load->isVolatile() ? "true" : "false" )
+ << ", " << bbname << ");";
+ break;
+ }
+ case Instruction::Store: {
+ const StoreInst* store = cast<StoreInst>(I);
+ Out << "StoreInst* " << iName << " = new StoreInst("
+ << opNames[0] << ", "
+ << opNames[1] << ", "
+ << (store->isVolatile() ? "true" : "false")
+ << ", " << bbname << ");";
+ break;
+ }
+ case Instruction::GetElementPtr: {
+ const GetElementPtrInst* gep = cast<GetElementPtrInst>(I);
+ if (gep->getNumOperands() <= 2) {
+ Out << "GetElementPtrInst* " << iName << " = GetElementPtrInst::Create("
+ << opNames[0];
+ if (gep->getNumOperands() == 2)
+ Out << ", " << opNames[1];
+ } else {
+ Out << "std::vector<Value*> " << iName << "_indices;";
+ nl(Out);
+ for (unsigned i = 1; i < gep->getNumOperands(); ++i ) {
+ Out << iName << "_indices.push_back("
+ << opNames[i] << ");";
+ nl(Out);
+ }
+ Out << "Instruction* " << iName << " = GetElementPtrInst::Create("
+ << opNames[0] << ", " << iName << "_indices.begin(), "
+ << iName << "_indices.end()";
+ }
+ Out << ", \"";
+ printEscapedString(gep->getName());
+ Out << "\", " << bbname << ");";
+ break;
+ }
+ case Instruction::PHI: {
+ const PHINode* phi = cast<PHINode>(I);
+
+ Out << "PHINode* " << iName << " = PHINode::Create("
+ << getCppName(phi->getType()) << ", \"";
+ printEscapedString(phi->getName());
+ Out << "\", " << bbname << ");";
+ nl(Out) << iName << "->reserveOperandSpace("
+ << phi->getNumIncomingValues()
+ << ");";
+ nl(Out);
+ for (unsigned i = 0; i < phi->getNumOperands(); i+=2) {
+ Out << iName << "->addIncoming("
+ << opNames[i] << ", " << opNames[i+1] << ");";
+ nl(Out);
+ }
+ break;
+ }
+ case Instruction::Trunc:
+ case Instruction::ZExt:
+ case Instruction::SExt:
+ case Instruction::FPTrunc:
+ case Instruction::FPExt:
+ case Instruction::FPToUI:
+ case Instruction::FPToSI:
+ case Instruction::UIToFP:
+ case Instruction::SIToFP:
+ case Instruction::PtrToInt:
+ case Instruction::IntToPtr:
+ case Instruction::BitCast: {
+ const CastInst* cst = cast<CastInst>(I);
+ Out << "CastInst* " << iName << " = new ";
+ switch (I->getOpcode()) {
+ case Instruction::Trunc: Out << "TruncInst"; break;
+ case Instruction::ZExt: Out << "ZExtInst"; break;
+ case Instruction::SExt: Out << "SExtInst"; break;
+ case Instruction::FPTrunc: Out << "FPTruncInst"; break;
+ case Instruction::FPExt: Out << "FPExtInst"; break;
+ case Instruction::FPToUI: Out << "FPToUIInst"; break;
+ case Instruction::FPToSI: Out << "FPToSIInst"; break;
+ case Instruction::UIToFP: Out << "UIToFPInst"; break;
+ case Instruction::SIToFP: Out << "SIToFPInst"; break;
+ case Instruction::PtrToInt: Out << "PtrToIntInst"; break;
+ case Instruction::IntToPtr: Out << "IntToPtrInst"; break;
+ case Instruction::BitCast: Out << "BitCastInst"; break;
+ default: assert(!"Unreachable"); break;
+ }
+ Out << "(" << opNames[0] << ", "
+ << getCppName(cst->getType()) << ", \"";
+ printEscapedString(cst->getName());
+ Out << "\", " << bbname << ");";
+ break;
+ }
+ case Instruction::Call:{
+ const CallInst* call = cast<CallInst>(I);
+ if (InlineAsm* ila = dyn_cast<InlineAsm>(call->getOperand(0))) {
+ Out << "InlineAsm* " << getCppName(ila) << " = InlineAsm::get("
+ << getCppName(ila->getFunctionType()) << ", \""
+ << ila->getAsmString() << "\", \""
+ << ila->getConstraintString() << "\","
+ << (ila->hasSideEffects() ? "true" : "false") << ");";
+ nl(Out);
+ }
+ if (call->getNumOperands() > 2) {
+ Out << "std::vector<Value*> " << iName << "_params;";
+ nl(Out);
+ for (unsigned i = 1; i < call->getNumOperands(); ++i) {
+ Out << iName << "_params.push_back(" << opNames[i] << ");";
+ nl(Out);
+ }
+ Out << "CallInst* " << iName << " = CallInst::Create("
+ << opNames[0] << ", " << iName << "_params.begin(), "
+ << iName << "_params.end(), \"";
+ } else if (call->getNumOperands() == 2) {
+ Out << "CallInst* " << iName << " = CallInst::Create("
+ << opNames[0] << ", " << opNames[1] << ", \"";
+ } else {
+ Out << "CallInst* " << iName << " = CallInst::Create(" << opNames[0]
+ << ", \"";
+ }
+ printEscapedString(call->getName());
+ Out << "\", " << bbname << ");";
+ nl(Out) << iName << "->setCallingConv(";
+ printCallingConv(call->getCallingConv());
+ Out << ");";
+ nl(Out) << iName << "->setTailCall("
+ << (call->isTailCall() ? "true":"false");
+ Out << ");";
+ printParamAttrs(call->getParamAttrs(), iName);
+ Out << iName << "->setParamAttrs(" << iName << "_PAL);";
+ nl(Out);
+ break;
+ }
+ case Instruction::Select: {
+ const SelectInst* sel = cast<SelectInst>(I);
+ Out << "SelectInst* " << getCppName(sel) << " = SelectInst::Create(";
+ Out << opNames[0] << ", " << opNames[1] << ", " << opNames[2] << ", \"";
+ printEscapedString(sel->getName());
+ Out << "\", " << bbname << ");";
+ break;
+ }
+ case Instruction::UserOp1:
+ /// FALL THROUGH
+ case Instruction::UserOp2: {
+ /// FIXME: What should be done here?
+ break;
+ }
+ case Instruction::VAArg: {
+ const VAArgInst* va = cast<VAArgInst>(I);
+ Out << "VAArgInst* " << getCppName(va) << " = new VAArgInst("
+ << opNames[0] << ", " << getCppName(va->getType()) << ", \"";
+ printEscapedString(va->getName());
+ Out << "\", " << bbname << ");";
+ break;
+ }
+ case Instruction::ExtractElement: {
+ const ExtractElementInst* eei = cast<ExtractElementInst>(I);
+ Out << "ExtractElementInst* " << getCppName(eei)
+ << " = new ExtractElementInst(" << opNames[0]
+ << ", " << opNames[1] << ", \"";
+ printEscapedString(eei->getName());
+ Out << "\", " << bbname << ");";
+ break;
+ }
+ case Instruction::InsertElement: {
+ const InsertElementInst* iei = cast<InsertElementInst>(I);
+ Out << "InsertElementInst* " << getCppName(iei)
+ << " = InsertElementInst::Create(" << opNames[0]
+ << ", " << opNames[1] << ", " << opNames[2] << ", \"";
+ printEscapedString(iei->getName());
+ Out << "\", " << bbname << ");";
+ break;
+ }
+ case Instruction::ShuffleVector: {
+ const ShuffleVectorInst* svi = cast<ShuffleVectorInst>(I);
+ Out << "ShuffleVectorInst* " << getCppName(svi)
+ << " = new ShuffleVectorInst(" << opNames[0]
+ << ", " << opNames[1] << ", " << opNames[2] << ", \"";
+ printEscapedString(svi->getName());
+ Out << "\", " << bbname << ");";
+ break;
+ }
+ case Instruction::ExtractValue: {
+ const ExtractValueInst *evi = cast<ExtractValueInst>(I);
+ Out << "std::vector<unsigned> " << iName << "_indices;";
+ nl(Out);
+ for (unsigned i = 0; i < evi->getNumIndices(); ++i) {
+ Out << iName << "_indices.push_back("
+ << evi->idx_begin()[i] << ");";
+ nl(Out);
+ }
+ Out << "ExtractValueInst* " << getCppName(evi)
+ << " = ExtractValueInst::Create(" << opNames[0]
+ << ", "
+ << iName << "_indices.begin(), " << iName << "_indices.end(), \"";
+ printEscapedString(evi->getName());
+ Out << "\", " << bbname << ");";
+ break;
+ }
+ case Instruction::InsertValue: {
+ const InsertValueInst *ivi = cast<InsertValueInst>(I);
+ Out << "std::vector<unsigned> " << iName << "_indices;";
+ nl(Out);
+ for (unsigned i = 0; i < ivi->getNumIndices(); ++i) {
+ Out << iName << "_indices.push_back("
+ << ivi->idx_begin()[i] << ");";
+ nl(Out);
+ }
+ Out << "InsertValueInst* " << getCppName(ivi)
+ << " = InsertValueInst::Create(" << opNames[0]
+ << ", " << opNames[1] << ", "
+ << iName << "_indices.begin(), " << iName << "_indices.end(), \"";
+ printEscapedString(ivi->getName());
+ Out << "\", " << bbname << ");";
+ break;
+ }
+ }
+ DefinedValues.insert(I);
+ nl(Out);
+ delete [] opNames;
+}
+
+ // Print out the types, constants and declarations needed by one function
+ void CppWriter::printFunctionUses(const Function* F) {
+ nl(Out) << "// Type Definitions"; nl(Out);
+ if (!is_inline) {
+ // Print the function's return type
+ printType(F->getReturnType());
+
+ // Print the function's function type
+ printType(F->getFunctionType());
+
+ // Print the types of each of the function's arguments
+ for (Function::const_arg_iterator AI = F->arg_begin(), AE = F->arg_end();
+ AI != AE; ++AI) {
+ printType(AI->getType());
+ }
+ }
+
+ // Print type definitions for every type referenced by an instruction and
+ // make a note of any global values or constants that are referenced
+ SmallPtrSet<GlobalValue*,64> gvs;
+ SmallPtrSet<Constant*,64> consts;
+ for (Function::const_iterator BB = F->begin(), BE = F->end();
+ BB != BE; ++BB){
+ for (BasicBlock::const_iterator I = BB->begin(), E = BB->end();
+ I != E; ++I) {
+ // Print the type of the instruction itself
+ printType(I->getType());
+
+ // Print the type of each of the instruction's operands
+ for (unsigned i = 0; i < I->getNumOperands(); ++i) {
+ Value* operand = I->getOperand(i);
+ printType(operand->getType());
+
+ // If the operand references a GVal or Constant, make a note of it
+ if (GlobalValue* GV = dyn_cast<GlobalValue>(operand)) {
+ gvs.insert(GV);
+ if (GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
+ if (GVar->hasInitializer())
+ consts.insert(GVar->getInitializer());
+ } else if (Constant* C = dyn_cast<Constant>(operand))
+ consts.insert(C);
+ }
+ }
+ }
+
+ // Print the function declarations for any functions encountered
+ nl(Out) << "// Function Declarations"; nl(Out);
+ for (SmallPtrSet<GlobalValue*,64>::iterator I = gvs.begin(), E = gvs.end();
+ I != E; ++I) {
+ if (Function* Fun = dyn_cast<Function>(*I)) {
+ if (!is_inline || Fun != F)
+ printFunctionHead(Fun);
+ }
+ }
+
+ // Print the global variable declarations for any variables encountered
+ nl(Out) << "// Global Variable Declarations"; nl(Out);
+ for (SmallPtrSet<GlobalValue*,64>::iterator I = gvs.begin(), E = gvs.end();
+ I != E; ++I) {
+ if (GlobalVariable* F = dyn_cast<GlobalVariable>(*I))
+ printVariableHead(F);
+ }
+
+ // Print the constants found
+ nl(Out) << "// Constant Definitions"; nl(Out);
+ for (SmallPtrSet<Constant*,64>::iterator I = consts.begin(),
+ E = consts.end(); I != E; ++I) {
+ printConstant(*I);
+ }
+
+ // Process the global variables definitions now that all the constants have
+ // been emitted. These definitions just couple the gvars with their constant
+ // initializers.
+ nl(Out) << "// Global Variable Definitions"; nl(Out);
+ for (SmallPtrSet<GlobalValue*,64>::iterator I = gvs.begin(), E = gvs.end();
+ I != E; ++I) {
+ if (GlobalVariable* GV = dyn_cast<GlobalVariable>(*I))
+ printVariableBody(GV);
+ }
+ }
+
+ void CppWriter::printFunctionHead(const Function* F) {
+ nl(Out) << "Function* " << getCppName(F);
+ if (is_inline) {
+ Out << " = mod->getFunction(\"";
+ printEscapedString(F->getName());
+ Out << "\", " << getCppName(F->getFunctionType()) << ");";
+ nl(Out) << "if (!" << getCppName(F) << ") {";
+ nl(Out) << getCppName(F);
+ }
+ Out<< " = Function::Create(";
+ nl(Out,1) << "/*Type=*/" << getCppName(F->getFunctionType()) << ",";
+ nl(Out) << "/*Linkage=*/";
+ printLinkageType(F->getLinkage());
+ Out << ",";
+ nl(Out) << "/*Name=*/\"";
+ printEscapedString(F->getName());
+ Out << "\", mod); " << (F->isDeclaration()? "// (external, no body)" : "");
+ nl(Out,-1);
+ printCppName(F);
+ Out << "->setCallingConv(";
+ printCallingConv(F->getCallingConv());
+ Out << ");";
+ nl(Out);
+ if (F->hasSection()) {
+ printCppName(F);
+ Out << "->setSection(\"" << F->getSection() << "\");";
+ nl(Out);
+ }
+ if (F->getAlignment()) {
+ printCppName(F);
+ Out << "->setAlignment(" << F->getAlignment() << ");";
+ nl(Out);
+ }
+ if (F->getVisibility() != GlobalValue::DefaultVisibility) {
+ printCppName(F);
+ Out << "->setVisibility(";
+ printVisibilityType(F->getVisibility());
+ Out << ");";
+ nl(Out);
+ }
+ if (F->hasCollector()) {
+ printCppName(F);
+ Out << "->setCollector(\"" << F->getCollector() << "\");";
+ nl(Out);
+ }
+ if (is_inline) {
+ Out << "}";
+ nl(Out);
+ }
+ printParamAttrs(F->getParamAttrs(), getCppName(F));
+ printCppName(F);
+ Out << "->setParamAttrs(" << getCppName(F) << "_PAL);";
+ nl(Out);
+ }
+
+ void CppWriter::printFunctionBody(const Function *F) {
+ if (F->isDeclaration())
+ return; // external functions have no bodies.
+
+ // Clear the DefinedValues and ForwardRefs maps because we can't have
+ // cross-function forward refs
+ ForwardRefs.clear();
+ DefinedValues.clear();
+
+ // Create all the argument values
+ if (!is_inline) {
+ if (!F->arg_empty()) {
+ Out << "Function::arg_iterator args = " << getCppName(F)
+ << "->arg_begin();";
+ nl(Out);
+ }
+ for (Function::const_arg_iterator AI = F->arg_begin(), AE = F->arg_end();
+ AI != AE; ++AI) {
+ Out << "Value* " << getCppName(AI) << " = args++;";
+ nl(Out);
+ if (AI->hasName()) {
+ Out << getCppName(AI) << "->setName(\"" << AI->getName() << "\");";
+ nl(Out);
+ }
+ }
+ }
+
+ // Create all the basic blocks
+ nl(Out);
+ for (Function::const_iterator BI = F->begin(), BE = F->end();
+ BI != BE; ++BI) {
+ std::string bbname(getCppName(BI));
+ Out << "BasicBlock* " << bbname << " = BasicBlock::Create(\"";
+ if (BI->hasName())
+ printEscapedString(BI->getName());
+ Out << "\"," << getCppName(BI->getParent()) << ",0);";
+ nl(Out);
+ }
+
+ // Output all of its basic blocks... for the function
+ for (Function::const_iterator BI = F->begin(), BE = F->end();
+ BI != BE; ++BI) {
+ std::string bbname(getCppName(BI));
+ nl(Out) << "// Block " << BI->getName() << " (" << bbname << ")";
+ nl(Out);
+
+ // Output all of the instructions in the basic block...
+ for (BasicBlock::const_iterator I = BI->begin(), E = BI->end();
+ I != E; ++I) {
+ printInstruction(I,bbname);
+ }
+ }
+
+ // Loop over the ForwardRefs and resolve them now that all instructions
+ // are generated.
+ if (!ForwardRefs.empty()) {
+ nl(Out) << "// Resolve Forward References";
+ nl(Out);
+ }
+
+ while (!ForwardRefs.empty()) {
+ ForwardRefMap::iterator I = ForwardRefs.begin();
+ Out << I->second << "->replaceAllUsesWith("
+ << getCppName(I->first) << "); delete " << I->second << ";";
+ nl(Out);
+ ForwardRefs.erase(I);
+ }
+ }
+
+ void CppWriter::printInline(const std::string& fname,
+ const std::string& func) {
+ const Function* F = TheModule->getFunction(func);
+ if (!F) {
+ error(std::string("Function '") + func + "' not found in input module");
+ return;
+ }
+ if (F->isDeclaration()) {
+ error(std::string("Function '") + func + "' is external!");
+ return;
+ }
+ nl(Out) << "BasicBlock* " << fname << "(Module* mod, Function *"
+ << getCppName(F);
+ unsigned arg_count = 1;
+ for (Function::const_arg_iterator AI = F->arg_begin(), AE = F->arg_end();
+ AI != AE; ++AI) {
+ Out << ", Value* arg_" << arg_count;
+ }
+ Out << ") {";
+ nl(Out);
+ is_inline = true;
+ printFunctionUses(F);
+ printFunctionBody(F);
+ is_inline = false;
+ Out << "return " << getCppName(F->begin()) << ";";
+ nl(Out) << "}";
+ nl(Out);
+ }
+
+ void CppWriter::printModuleBody() {
+ // Print out all the type definitions
+ nl(Out) << "// Type Definitions"; nl(Out);
+ printTypes(TheModule);
+
+ // Functions can call each other and global variables can reference them so
+ // define all the functions first before emitting their function bodies.
+ nl(Out) << "// Function Declarations"; nl(Out);
+ for (Module::const_iterator I = TheModule->begin(), E = TheModule->end();
+ I != E; ++I)
+ printFunctionHead(I);
+
+ // Process the global variables declarations. We can't initialze them until
+ // after the constants are printed so just print a header for each global
+ nl(Out) << "// Global Variable Declarations\n"; nl(Out);
+ for (Module::const_global_iterator I = TheModule->global_begin(),
+ E = TheModule->global_end(); I != E; ++I) {
+ printVariableHead(I);
+ }
+
+ // Print out all the constants definitions. Constants don't recurse except
+ // through GlobalValues. All GlobalValues have been declared at this point
+ // so we can proceed to generate the constants.
+ nl(Out) << "// Constant Definitions"; nl(Out);
+ printConstants(TheModule);
+
+ // Process the global variables definitions now that all the constants have
+ // been emitted. These definitions just couple the gvars with their constant
+ // initializers.
+ nl(Out) << "// Global Variable Definitions"; nl(Out);
+ for (Module::const_global_iterator I = TheModule->global_begin(),
+ E = TheModule->global_end(); I != E; ++I) {
+ printVariableBody(I);
+ }
+
+ // Finally, we can safely put out all of the function bodies.
+ nl(Out) << "// Function Definitions"; nl(Out);
+ for (Module::const_iterator I = TheModule->begin(), E = TheModule->end();
+ I != E; ++I) {
+ if (!I->isDeclaration()) {
+ nl(Out) << "// Function: " << I->getName() << " (" << getCppName(I)
+ << ")";
+ nl(Out) << "{";
+ nl(Out,1);
+ printFunctionBody(I);
+ nl(Out,-1) << "}";
+ nl(Out);
+ }
+ }
+ }
+
+ void CppWriter::printProgram(const std::string& fname,
+ const std::string& mName) {
+ Out << "#include <llvm/Module.h>\n";
+ Out << "#include <llvm/DerivedTypes.h>\n";
+ Out << "#include <llvm/Constants.h>\n";
+ Out << "#include <llvm/GlobalVariable.h>\n";
+ Out << "#include <llvm/Function.h>\n";
+ Out << "#include <llvm/CallingConv.h>\n";
+ Out << "#include <llvm/BasicBlock.h>\n";
+ Out << "#include <llvm/Instructions.h>\n";
+ Out << "#include <llvm/InlineAsm.h>\n";
+ Out << "#include <llvm/Support/MathExtras.h>\n";
+ Out << "#include <llvm/Pass.h>\n";
+ Out << "#include <llvm/PassManager.h>\n";
+ Out << "#include <llvm/ADT/SmallVector.h>\n";
+ Out << "#include <llvm/Analysis/Verifier.h>\n";
+ Out << "#include <llvm/Assembly/PrintModulePass.h>\n";
+ Out << "#include <algorithm>\n";
+ Out << "#include <iostream>\n\n";
+ Out << "using namespace llvm;\n\n";
+ Out << "Module* " << fname << "();\n\n";
+ Out << "int main(int argc, char**argv) {\n";
+ Out << " Module* Mod = " << fname << "();\n";
+ Out << " verifyModule(*Mod, PrintMessageAction);\n";
+ Out << " std::cerr.flush();\n";
+ Out << " std::cout.flush();\n";
+ Out << " PassManager PM;\n";
+ Out << " PM.add(new PrintModulePass(&llvm::cout));\n";
+ Out << " PM.run(*Mod);\n";
+ Out << " return 0;\n";
+ Out << "}\n\n";
+ printModule(fname,mName);
+ }
+
+ void CppWriter::printModule(const std::string& fname,
+ const std::string& mName) {
+ nl(Out) << "Module* " << fname << "() {";
+ nl(Out,1) << "// Module Construction";
+ nl(Out) << "Module* mod = new Module(\"" << mName << "\");";
+ if (!TheModule->getTargetTriple().empty()) {
+ nl(Out) << "mod->setDataLayout(\"" << TheModule->getDataLayout() << "\");";
+ }
+ if (!TheModule->getTargetTriple().empty()) {
+ nl(Out) << "mod->setTargetTriple(\"" << TheModule->getTargetTriple()
+ << "\");";
+ }
+
+ if (!TheModule->getModuleInlineAsm().empty()) {
+ nl(Out) << "mod->setModuleInlineAsm(\"";
+ printEscapedString(TheModule->getModuleInlineAsm());
+ Out << "\");";
+ }
+ nl(Out);
+
+ // Loop over the dependent libraries and emit them.
+ Module::lib_iterator LI = TheModule->lib_begin();
+ Module::lib_iterator LE = TheModule->lib_end();
+ while (LI != LE) {
+ Out << "mod->addLibrary(\"" << *LI << "\");";
+ nl(Out);
+ ++LI;
+ }
+ printModuleBody();
+ nl(Out) << "return mod;";
+ nl(Out,-1) << "}";
+ nl(Out);
+ }
+
+ void CppWriter::printContents(const std::string& fname,
+ const std::string& mName) {
+ Out << "\nModule* " << fname << "(Module *mod) {\n";
+ Out << "\nmod->setModuleIdentifier(\"" << mName << "\");\n";
+ printModuleBody();
+ Out << "\nreturn mod;\n";
+ Out << "\n}\n";
+ }
+
+ void CppWriter::printFunction(const std::string& fname,
+ const std::string& funcName) {
+ const Function* F = TheModule->getFunction(funcName);
+ if (!F) {
+ error(std::string("Function '") + funcName + "' not found in input module");
+ return;
+ }
+ Out << "\nFunction* " << fname << "(Module *mod) {\n";
+ printFunctionUses(F);
+ printFunctionHead(F);
+ printFunctionBody(F);
+ Out << "return " << getCppName(F) << ";\n";
+ Out << "}\n";
+ }
+
+ void CppWriter::printFunctions() {
+ const Module::FunctionListType &funcs = TheModule->getFunctionList();
+ Module::const_iterator I = funcs.begin();
+ Module::const_iterator IE = funcs.end();
+
+ for (; I != IE; ++I) {
+ const Function &func = *I;
+ if (!func.isDeclaration()) {
+ std::string name("define_");
+ name += func.getName();
+ printFunction(name, func.getName());
+ }
+ }
+ }
+
+ void CppWriter::printVariable(const std::string& fname,
+ const std::string& varName) {
+ const GlobalVariable* GV = TheModule->getNamedGlobal(varName);
+
+ if (!GV) {
+ error(std::string("Variable '") + varName + "' not found in input module");
+ return;
+ }
+ Out << "\nGlobalVariable* " << fname << "(Module *mod) {\n";
+ printVariableUses(GV);
+ printVariableHead(GV);
+ printVariableBody(GV);
+ Out << "return " << getCppName(GV) << ";\n";
+ Out << "}\n";
+ }
+
+ void CppWriter::printType(const std::string& fname,
+ const std::string& typeName) {
+ const Type* Ty = TheModule->getTypeByName(typeName);
+ if (!Ty) {
+ error(std::string("Type '") + typeName + "' not found in input module");
+ return;
+ }
+ Out << "\nType* " << fname << "(Module *mod) {\n";
+ printType(Ty);
+ Out << "return " << getCppName(Ty) << ";\n";
+ Out << "}\n";
+ }
+
+ bool CppWriter::runOnModule(Module &M) {
+ TheModule = &M;
+
+ // Emit a header
+ Out << "// Generated by llvm2cpp - DO NOT MODIFY!\n\n";
+
+ // Get the name of the function we're supposed to generate
+ std::string fname = FuncName.getValue();
+
+ // Get the name of the thing we are to generate
+ std::string tgtname = NameToGenerate.getValue();
+ if (GenerationType == GenModule ||
+ GenerationType == GenContents ||
+ GenerationType == GenProgram ||
+ GenerationType == GenFunctions) {
+ if (tgtname == "!bad!") {
+ if (M.getModuleIdentifier() == "-")
+ tgtname = "<stdin>";
+ else
+ tgtname = M.getModuleIdentifier();
+ }
+ } else if (tgtname == "!bad!")
+ error("You must use the -for option with -gen-{function,variable,type}");
+
+ switch (WhatToGenerate(GenerationType)) {
+ case GenProgram:
+ if (fname.empty())
+ fname = "makeLLVMModule";
+ printProgram(fname,tgtname);
+ break;
+ case GenModule:
+ if (fname.empty())
+ fname = "makeLLVMModule";
+ printModule(fname,tgtname);
+ break;
+ case GenContents:
+ if (fname.empty())
+ fname = "makeLLVMModuleContents";
+ printContents(fname,tgtname);
+ break;
+ case GenFunction:
+ if (fname.empty())
+ fname = "makeLLVMFunction";
+ printFunction(fname,tgtname);
+ break;
+ case GenFunctions:
+ printFunctions();
+ break;
+ case GenInline:
+ if (fname.empty())
+ fname = "makeLLVMInline";
+ printInline(fname,tgtname);
+ break;
+ case GenVariable:
+ if (fname.empty())
+ fname = "makeLLVMVariable";
+ printVariable(fname,tgtname);
+ break;
+ case GenType:
+ if (fname.empty())
+ fname = "makeLLVMType";
+ printType(fname,tgtname);
+ break;
+ default:
+ error("Invalid generation option");
+ }
+
+ return false;
+ }
+}
+
+char CppWriter::ID = 0;
+
+//===----------------------------------------------------------------------===//
+// External Interface declaration
+//===----------------------------------------------------------------------===//
+
+bool CPPTargetMachine::addPassesToEmitWholeFile(PassManager &PM,
+ std::ostream &o,
+ CodeGenFileType FileType,
+ bool Fast) {
+ if (FileType != TargetMachine::AssemblyFile) return true;
+ PM.add(new CppWriter(o));
+ return false;
+}
Added: llvm/branches/non-call-eh/lib/Target/CppBackend/CPPTargetMachine.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/CppBackend/CPPTargetMachine.h?rev=53163&view=auto
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/CppBackend/CPPTargetMachine.h (added)
+++ llvm/branches/non-call-eh/lib/Target/CppBackend/CPPTargetMachine.h Sun Jul 6 15:45:41 2008
@@ -0,0 +1,41 @@
+//===-- CPPTargetMachine.h - TargetMachine for the C++ backend --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the TargetMachine that is used by the C++ backend.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CPPTARGETMACHINE_H
+#define CPPTARGETMACHINE_H
+
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetData.h"
+
+namespace llvm {
+
+struct CPPTargetMachine : public TargetMachine {
+ const TargetData DataLayout; // Calculates type size & alignment
+
+ CPPTargetMachine(const Module &M, const std::string &FS)
+ : DataLayout(&M) {}
+
+ virtual bool WantsWholeFile() const { return true; }
+ virtual bool addPassesToEmitWholeFile(PassManager &PM, std::ostream &Out,
+ CodeGenFileType FileType, bool Fast);
+
+ // This class always works, but shouldn't be the default in most cases.
+ static unsigned getModuleMatchQuality(const Module &M) { return 1; }
+
+ virtual const TargetData *getTargetData() const { return &DataLayout; }
+};
+
+} // End llvm namespace
+
+
+#endif
Added: llvm/branches/non-call-eh/lib/Target/CppBackend/Makefile
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/CppBackend/Makefile?rev=53163&view=auto
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/CppBackend/Makefile (added)
+++ llvm/branches/non-call-eh/lib/Target/CppBackend/Makefile Sun Jul 6 15:45:41 2008
@@ -0,0 +1,14 @@
+##===- lib/Target/CppBackend/Makefile --- ------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+
+LEVEL = ../../..
+LIBRARYNAME = LLVMCppBackend
+include $(LEVEL)/Makefile.common
+
+CompileCommonOpts += -Wno-format
Modified: llvm/branches/non-call-eh/lib/Target/IA64/IA64AsmPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/IA64/IA64AsmPrinter.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/IA64/IA64AsmPrinter.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/IA64/IA64AsmPrinter.cpp Sun Jul 6 15:45:41 2008
@@ -275,6 +275,7 @@
if (C->isNullValue() &&
(I->hasLinkOnceLinkage() || I->hasInternalLinkage() ||
+ I->hasCommonLinkage() ||
I->hasWeakLinkage() /* FIXME: Verify correct */)) {
SwitchToDataSection(".data", I);
if (I->hasInternalLinkage()) {
@@ -289,6 +290,7 @@
} else {
switch (I->getLinkage()) {
case GlobalValue::LinkOnceLinkage:
+ case GlobalValue::CommonLinkage:
case GlobalValue::WeakLinkage: // FIXME: Verify correct for weak.
// Nonnull linkonce -> weak
O << "\t.weak " << name << "\n";
Modified: llvm/branches/non-call-eh/lib/Target/IA64/IA64ISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/IA64/IA64ISelDAGToDAG.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/IA64/IA64ISelDAGToDAG.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/IA64/IA64ISelDAGToDAG.cpp Sun Jul 6 15:45:41 2008
@@ -78,9 +78,9 @@
/// operation.
bool SelectAddr(SDOperand Addr, SDOperand &Op1, SDOperand &Op2);
- /// InstructionSelectBasicBlock - This callback is invoked by
+ /// InstructionSelect - This callback is invoked by
/// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
- virtual void InstructionSelectBasicBlock(SelectionDAG &DAG);
+ virtual void InstructionSelect(SelectionDAG &DAG);
virtual const char *getPassName() const {
return "IA64 (Itanium) DAG->DAG Instruction Selector";
@@ -94,17 +94,14 @@
};
}
-/// InstructionSelectBasicBlock - This callback is invoked by
+/// InstructionSelect - This callback is invoked by
/// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
-void IA64DAGToDAGISel::InstructionSelectBasicBlock(SelectionDAG &DAG) {
+void IA64DAGToDAGISel::InstructionSelect(SelectionDAG &DAG) {
DEBUG(BB->dump());
// Select target instructions for the DAG.
DAG.setRoot(SelectRoot(DAG.getRoot()));
DAG.RemoveDeadNodes();
-
- // Emit machine code to BB.
- ScheduleAndEmitDAG(DAG);
}
SDNode *IA64DAGToDAGISel::SelectDIV(SDOperand Op) {
@@ -119,7 +116,7 @@
bool isFP=false;
- if(MVT::isFloatingPoint(Tmp1.getValueType()))
+ if(Tmp1.getValueType().isFloatingPoint())
isFP=true;
bool isModulus=false; // is it a division or a modulus?
@@ -348,7 +345,8 @@
// load the branch target's entry point [mem] and
// GP value [mem+8]
SDOperand targetEntryPoint=
- SDOperand(CurDAG->getTargetNode(IA64::LD8, MVT::i64, FnDescriptor), 0);
+ SDOperand(CurDAG->getTargetNode(IA64::LD8, MVT::i64, MVT::Other,
+ FnDescriptor, CurDAG->getEntryNode()), 0);
Chain = targetEntryPoint.getValue(1);
SDOperand targetGPAddr=
SDOperand(CurDAG->getTargetNode(IA64::ADDS, MVT::i64,
@@ -356,7 +354,8 @@
CurDAG->getConstant(8, MVT::i64)), 0);
Chain = targetGPAddr.getValue(1);
SDOperand targetGP =
- SDOperand(CurDAG->getTargetNode(IA64::LD8, MVT::i64, targetGPAddr), 0);
+ SDOperand(CurDAG->getTargetNode(IA64::LD8, MVT::i64,MVT::Other,
+ targetGPAddr, CurDAG->getEntryNode()), 0);
Chain = targetGP.getValue(1);
Chain = CurDAG->getCopyToReg(Chain, IA64::r1, targetGP, InFlag);
@@ -444,7 +443,7 @@
SDOperand(CurDAG->getTargetNode(IA64::ADDL_GA, MVT::i64,
CurDAG->getRegister(IA64::r1,
MVT::i64), GA), 0);
- return CurDAG->getTargetNode(IA64::LD8, MVT::i64, Tmp);
+ return CurDAG->getTargetNode(IA64::LD8, MVT::i64, MVT::Other, Tmp);
}
/* XXX
@@ -467,9 +466,9 @@
AddToISelQueue(Chain);
AddToISelQueue(Address);
- MVT::ValueType TypeBeingLoaded = LD->getMemoryVT();
+ MVT TypeBeingLoaded = LD->getMemoryVT();
unsigned Opc;
- switch (TypeBeingLoaded) {
+ switch (TypeBeingLoaded.getSimpleVT()) {
default:
#ifndef NDEBUG
N->dump(CurDAG);
@@ -509,7 +508,7 @@
unsigned Opc;
if (ISD::isNON_TRUNCStore(N)) {
- switch (N->getOperand(1).getValueType()) {
+ switch (N->getOperand(1).getValueType().getSimpleVT()) {
default: assert(0 && "unknown type in store");
case MVT::i1: { // this is a bool
Opc = IA64::ST1; // we store either 0 or 1 as a byte
@@ -529,7 +528,7 @@
case MVT::f64: Opc = IA64::STF8; break;
}
} else { // Truncating store
- switch(ST->getMemoryVT()) {
+ switch(ST->getMemoryVT().getSimpleVT()) {
default: assert(0 && "unknown type in truncstore");
case MVT::i8: Opc = IA64::ST1; break;
case MVT::i16: Opc = IA64::ST2; break;
Modified: llvm/branches/non-call-eh/lib/Target/IA64/IA64ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/IA64/IA64ISelLowering.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/IA64/IA64ISelLowering.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/IA64/IA64ISelLowering.cpp Sun Jul 6 15:45:41 2008
@@ -26,101 +26,109 @@
IA64TargetLowering::IA64TargetLowering(TargetMachine &TM)
: TargetLowering(TM) {
- // register class for general registers
- addRegisterClass(MVT::i64, IA64::GRRegisterClass);
+ // register class for general registers
+ addRegisterClass(MVT::i64, IA64::GRRegisterClass);
- // register class for FP registers
- addRegisterClass(MVT::f64, IA64::FPRegisterClass);
+ // register class for FP registers
+ addRegisterClass(MVT::f64, IA64::FPRegisterClass);
- // register class for predicate registers
- addRegisterClass(MVT::i1, IA64::PRRegisterClass);
+ // register class for predicate registers
+ addRegisterClass(MVT::i1, IA64::PRRegisterClass);
- setLoadXAction(ISD::EXTLOAD , MVT::i1 , Promote);
+ setLoadXAction(ISD::EXTLOAD , MVT::i1 , Promote);
- setLoadXAction(ISD::ZEXTLOAD , MVT::i1 , Promote);
+ setLoadXAction(ISD::ZEXTLOAD , MVT::i1 , Promote);
- setLoadXAction(ISD::SEXTLOAD , MVT::i1 , Promote);
- setLoadXAction(ISD::SEXTLOAD , MVT::i8 , Expand);
- setLoadXAction(ISD::SEXTLOAD , MVT::i16 , Expand);
- setLoadXAction(ISD::SEXTLOAD , MVT::i32 , Expand);
+ setLoadXAction(ISD::SEXTLOAD , MVT::i1 , Promote);
+ setLoadXAction(ISD::SEXTLOAD , MVT::i8 , Expand);
+ setLoadXAction(ISD::SEXTLOAD , MVT::i16 , Expand);
+ setLoadXAction(ISD::SEXTLOAD , MVT::i32 , Expand);
- setOperationAction(ISD::BRIND , MVT::Other, Expand);
- setOperationAction(ISD::BR_JT , MVT::Other, Expand);
- setOperationAction(ISD::BR_CC , MVT::Other, Expand);
- setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
+ setOperationAction(ISD::BRIND , MVT::Other, Expand);
+ setOperationAction(ISD::BR_JT , MVT::Other, Expand);
+ setOperationAction(ISD::BR_CC , MVT::Other, Expand);
+ setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
- // ia64 uses SELECT not SELECT_CC
- setOperationAction(ISD::SELECT_CC , MVT::Other, Expand);
-
- // We need to handle ISD::RET for void functions ourselves,
- // so we get a chance to restore ar.pfs before adding a
- // br.ret insn
- setOperationAction(ISD::RET, MVT::Other, Custom);
-
- setShiftAmountType(MVT::i64);
-
- setOperationAction(ISD::FREM , MVT::f32 , Expand);
- setOperationAction(ISD::FREM , MVT::f64 , Expand);
-
- setOperationAction(ISD::UREM , MVT::f32 , Expand);
- setOperationAction(ISD::UREM , MVT::f64 , Expand);
-
- setOperationAction(ISD::MEMBARRIER , MVT::Other, Expand);
-
- setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
- setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
-
- // We don't support sin/cos/sqrt/pow
- setOperationAction(ISD::FSIN , MVT::f64, Expand);
- setOperationAction(ISD::FCOS , MVT::f64, Expand);
- setOperationAction(ISD::FSQRT, MVT::f64, Expand);
- setOperationAction(ISD::FPOW , MVT::f64, Expand);
- setOperationAction(ISD::FSIN , MVT::f32, Expand);
- setOperationAction(ISD::FCOS , MVT::f32, Expand);
- setOperationAction(ISD::FSQRT, MVT::f32, Expand);
- setOperationAction(ISD::FPOW , MVT::f32, Expand);
-
- // FIXME: IA64 supports fcopysign natively!
- setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
- setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
-
- // We don't have line number support yet.
- setOperationAction(ISD::LOCATION, MVT::Other, Expand);
- setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
- setOperationAction(ISD::LABEL, MVT::Other, Expand);
-
- //IA64 has these, but they are not implemented
- setOperationAction(ISD::CTTZ , MVT::i64 , Expand);
- setOperationAction(ISD::CTLZ , MVT::i64 , Expand);
- setOperationAction(ISD::ROTL , MVT::i64 , Expand);
- setOperationAction(ISD::ROTR , MVT::i64 , Expand);
- setOperationAction(ISD::BSWAP, MVT::i64 , Expand); // mux @rev
-
- // VASTART needs to be custom lowered to use the VarArgsFrameIndex
- setOperationAction(ISD::VAARG , MVT::Other, Custom);
- setOperationAction(ISD::VASTART , MVT::Other, Custom);
-
- // Use the default implementation.
- setOperationAction(ISD::VACOPY , MVT::Other, Expand);
- setOperationAction(ISD::VAEND , MVT::Other, Expand);
- setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
- setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
- setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
+ // ia64 uses SELECT not SELECT_CC
+ setOperationAction(ISD::SELECT_CC , MVT::Other, Expand);
+
+ // We need to handle ISD::RET for void functions ourselves,
+ // so we get a chance to restore ar.pfs before adding a
+ // br.ret insn
+ setOperationAction(ISD::RET, MVT::Other, Custom);
+
+ setShiftAmountType(MVT::i64);
+
+ setOperationAction(ISD::FREM , MVT::f32 , Expand);
+ setOperationAction(ISD::FREM , MVT::f64 , Expand);
+
+ setOperationAction(ISD::UREM , MVT::f32 , Expand);
+ setOperationAction(ISD::UREM , MVT::f64 , Expand);
+
+ setOperationAction(ISD::MEMBARRIER , MVT::Other, Expand);
+
+ setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
+ setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
+
+ // We don't support sin/cos/sqrt/pow
+ setOperationAction(ISD::FSIN , MVT::f64, Expand);
+ setOperationAction(ISD::FCOS , MVT::f64, Expand);
+ setOperationAction(ISD::FSQRT, MVT::f64, Expand);
+ setOperationAction(ISD::FPOW , MVT::f64, Expand);
+ setOperationAction(ISD::FSIN , MVT::f32, Expand);
+ setOperationAction(ISD::FCOS , MVT::f32, Expand);
+ setOperationAction(ISD::FSQRT, MVT::f32, Expand);
+ setOperationAction(ISD::FPOW , MVT::f32, Expand);
- // Thread Local Storage
- setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
+
+ // FIXME: IA64 supports fcopysign natively!
+ setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
+ setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
+
+ // We don't have line number support yet.
+ setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
+ setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
+ setOperationAction(ISD::DBG_LABEL, MVT::Other, Expand);
+ setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
+
+ // IA64 has ctlz in the form of the 'fnorm' instruction. The Legalizer
+ // expansion for ctlz/cttz in terms of ctpop is much larger, but lower
+ // latency.
+ // FIXME: Custom lower CTLZ when compiling for size?
+ setOperationAction(ISD::CTLZ , MVT::i64 , Expand);
+ setOperationAction(ISD::CTTZ , MVT::i64 , Expand);
+ setOperationAction(ISD::ROTL , MVT::i64 , Expand);
+ setOperationAction(ISD::ROTR , MVT::i64 , Expand);
+
+ // FIXME: IA64 has this, but is not implemented. should be mux @rev
+ setOperationAction(ISD::BSWAP, MVT::i64 , Expand);
+
+ // VASTART needs to be custom lowered to use the VarArgsFrameIndex
+ setOperationAction(ISD::VAARG , MVT::Other, Custom);
+ setOperationAction(ISD::VASTART , MVT::Other, Custom);
+
+ // Use the default implementation.
+ setOperationAction(ISD::VACOPY , MVT::Other, Expand);
+ setOperationAction(ISD::VAEND , MVT::Other, Expand);
+ setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
+ setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
+ setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
- setStackPointerRegisterToSaveRestore(IA64::r12);
+ // Thread Local Storage
+ setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
- setJumpBufSize(704); // on ia64-linux, jmp_bufs are 704 bytes..
- setJumpBufAlignment(16); // ...and must be 16-byte aligned
-
- computeRegisterProperties();
+ setStackPointerRegisterToSaveRestore(IA64::r12);
- addLegalFPImmediate(APFloat(+0.0));
- addLegalFPImmediate(APFloat(-0.0));
- addLegalFPImmediate(APFloat(+1.0));
- addLegalFPImmediate(APFloat(-1.0));
+ setJumpBufSize(704); // on ia64-linux, jmp_bufs are 704 bytes..
+ setJumpBufAlignment(16); // ...and must be 16-byte aligned
+
+ computeRegisterProperties();
+
+ addLegalFPImmediate(APFloat(+0.0));
+ addLegalFPImmediate(APFloat(-0.0));
+ addLegalFPImmediate(APFloat(+1.0));
+ addLegalFPImmediate(APFloat(-1.0));
}
const char *IA64TargetLowering::getTargetNodeName(unsigned Opcode) const {
@@ -132,14 +140,12 @@
}
}
-MVT::ValueType
-IA64TargetLowering::getSetCCResultType(const SDOperand &) const {
+MVT IA64TargetLowering::getSetCCResultType(const SDOperand &) const {
return MVT::i1;
}
-std::vector<SDOperand>
-IA64TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
- std::vector<SDOperand> ArgValues;
+void IA64TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG,
+ SmallVectorImpl<SDOperand> &ArgValues) {
//
// add beautiful description of IA64 stack frame format
// here (from intel 24535803.pdf most likely)
@@ -174,7 +180,7 @@
SDOperand newroot, argt;
if(count < 8) { // need to fix this logic? maybe.
- switch (getValueType(I->getType())) {
+ switch (getValueType(I->getType()).getSimpleVT()) {
default:
assert(0 && "ERROR in LowerArgs: can't lower this type of arg.\n");
case MVT::f32:
@@ -279,7 +285,7 @@
// Finally, inform the code generator which regs we return values in.
// (see the ISD::RET: case in the instruction selector)
- switch (getValueType(F.getReturnType())) {
+ switch (getValueType(F.getReturnType()).getSimpleVT()) {
default: assert(0 && "i have no idea where to return this type!");
case MVT::isVoid: break;
case MVT::i1:
@@ -294,8 +300,6 @@
MF.getRegInfo().addLiveOut(IA64::F8);
break;
}
-
- return ArgValues;
}
std::pair<SDOperand, SDOperand>
@@ -340,10 +344,10 @@
for (unsigned i = 0, e = Args.size(); i != e; ++i)
{
SDOperand Val = Args[i].Node;
- MVT::ValueType ObjectVT = Val.getValueType();
+ MVT ObjectVT = Val.getValueType();
SDOperand ValToStore(0, 0), ValToConvert(0, 0);
unsigned ObjSize=8;
- switch (ObjectVT) {
+ switch (ObjectVT.getSimpleVT()) {
default: assert(0 && "unexpected argument type!");
case MVT::i1:
case MVT::i8:
@@ -435,7 +439,7 @@
// flagged for now, but shouldn't have to be (TODO)
unsigned seenConverts = 0;
for (unsigned i = 0, e = RegValuesToPass.size(); i != e; ++i) {
- if(MVT::isFloatingPoint(RegValuesToPass[i].getValueType())) {
+ if(RegValuesToPass[i].getValueType().isFloatingPoint()) {
Chain = DAG.getCopyToReg(Chain, IntArgRegs[i], Converts[seenConverts++],
InFlag);
InFlag = Chain.getValue(1);
@@ -446,7 +450,7 @@
unsigned usedFPArgs = 0;
for (unsigned i = 0, e = RegValuesToPass.size(); i != e; ++i) {
Chain = DAG.getCopyToReg(Chain,
- MVT::isInteger(RegValuesToPass[i].getValueType()) ?
+ RegValuesToPass[i].getValueType().isInteger() ?
IntArgRegs[i] : FPArgRegs[usedFPArgs++], RegValuesToPass[i], InFlag);
InFlag = Chain.getValue(1);
}
@@ -459,7 +463,7 @@
}
*/
- std::vector<MVT::ValueType> NodeTys;
+ std::vector<MVT> NodeTys;
std::vector<SDOperand> CallOperands;
NodeTys.push_back(MVT::Other); // Returns a chain
NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
@@ -485,14 +489,14 @@
Chain = DAG.getCopyToReg(Chain, IA64::rp, RPBeforeCall, InFlag);
InFlag = Chain.getValue(1);
- std::vector<MVT::ValueType> RetVals;
+ std::vector<MVT> RetVals;
RetVals.push_back(MVT::Other);
RetVals.push_back(MVT::Flag);
- MVT::ValueType RetTyVT = getValueType(RetTy);
+ MVT RetTyVT = getValueType(RetTy);
SDOperand RetVal;
if (RetTyVT != MVT::isVoid) {
- switch (RetTyVT) {
+ switch (RetTyVT.getSimpleVT()) {
default: assert(0 && "Unknown value type to return!");
case MVT::i1: { // bools are just like other integers (returned in r8)
// we *could* fall through to the truncate below, but this saves a
@@ -528,7 +532,8 @@
case MVT::f32:
RetVal = DAG.getCopyFromReg(Chain, IA64::F8, MVT::f64, InFlag);
Chain = RetVal.getValue(1);
- RetVal = DAG.getNode(ISD::TRUNCATE, MVT::f32, RetVal);
+ RetVal = DAG.getNode(ISD::FP_ROUND, MVT::f32, RetVal,
+ DAG.getIntPtrConstant(0));
break;
case MVT::f64:
RetVal = DAG.getCopyFromReg(Chain, IA64::F8, MVT::f64, InFlag);
@@ -565,8 +570,8 @@
return DAG.getNode(IA64ISD::RET_FLAG, MVT::Other, AR_PFSVal);
case 3: {
// Copy the result into the output register & restore ar.pfs
- MVT::ValueType ArgVT = Op.getOperand(1).getValueType();
- unsigned ArgReg = MVT::isInteger(ArgVT) ? IA64::r8 : IA64::F8;
+ MVT ArgVT = Op.getOperand(1).getValueType();
+ unsigned ArgReg = ArgVT.isInteger() ? IA64::r8 : IA64::F8;
AR_PFSVal = DAG.getCopyFromReg(Op.getOperand(0), VirtGPR, MVT::i64);
Copy = DAG.getCopyToReg(AR_PFSVal.getValue(1), ArgReg, Op.getOperand(1),
@@ -580,13 +585,13 @@
return SDOperand();
}
case ISD::VAARG: {
- MVT::ValueType VT = getPointerTy();
+ MVT VT = getPointerTy();
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
SDOperand VAList = DAG.getLoad(VT, Op.getOperand(0), Op.getOperand(1),
SV, 0);
// Increment the pointer, VAList, to the next vaarg
SDOperand VAIncr = DAG.getNode(ISD::ADD, VT, VAList,
- DAG.getConstant(MVT::getSizeInBits(VT)/8,
+ DAG.getConstant(VT.getSizeInBits()/8,
VT));
// Store the incremented VAList to the legalized pointer
VAIncr = DAG.getStore(VAList.getValue(1), VAIncr,
Modified: llvm/branches/non-call-eh/lib/Target/IA64/IA64ISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/IA64/IA64ISelLowering.h?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/IA64/IA64ISelLowering.h (original)
+++ llvm/branches/non-call-eh/lib/Target/IA64/IA64ISelLowering.h Sun Jul 6 15:45:41 2008
@@ -49,12 +49,12 @@
const char *getTargetNodeName(unsigned Opcode) const;
/// getSetCCResultType: return ISD::SETCC's result type.
- virtual MVT::ValueType getSetCCResultType(const SDOperand &) const;
+ virtual MVT getSetCCResultType(const SDOperand &) const;
/// LowerArguments - This hook must be implemented to indicate how we should
/// lower the arguments for the specified function, into the specified DAG.
- virtual std::vector<SDOperand>
- LowerArguments(Function &F, SelectionDAG &DAG);
+ virtual void LowerArguments(Function &F, SelectionDAG &DAG,
+ SmallVectorImpl<SDOperand> &ArgValues);
/// LowerCallTo - This hook lowers an abstract call to a function into an
/// actual call.
Modified: llvm/branches/non-call-eh/lib/Target/IA64/IA64InstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/IA64/IA64InstrInfo.h?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/IA64/IA64InstrInfo.h (original)
+++ llvm/branches/non-call-eh/lib/Target/IA64/IA64InstrInfo.h Sun Jul 6 15:45:41 2008
@@ -28,7 +28,7 @@
/// such, whenever a client has an instance of instruction info, it should
/// always be able to get register info as well (through this method).
///
- virtual const TargetRegisterInfo &getRegisterInfo() const { return RI; }
+ virtual const IA64RegisterInfo &getRegisterInfo() const { return RI; }
//
// Return true if the instruction is a register to register move and
Modified: llvm/branches/non-call-eh/lib/Target/IA64/IA64TargetMachine.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/IA64/IA64TargetMachine.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/IA64/IA64TargetMachine.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/IA64/IA64TargetMachine.cpp Sun Jul 6 15:45:41 2008
@@ -26,9 +26,7 @@
extern "C" int IA64TargetMachineModule;
int IA64TargetMachineModule = 0;
-namespace {
- RegisterTarget<IA64TargetMachine> X("ia64", " IA-64 (Itanium)");
-}
+static RegisterTarget<IA64TargetMachine> X("ia64", " IA-64 (Itanium)");
const TargetAsmInfo *IA64TargetMachine::createTargetAsmInfo() const {
return new IA64TargetAsmInfo(*this);
Modified: llvm/branches/non-call-eh/lib/Target/IA64/IA64TargetMachine.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/IA64/IA64TargetMachine.h?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/IA64/IA64TargetMachine.h (original)
+++ llvm/branches/non-call-eh/lib/Target/IA64/IA64TargetMachine.h Sun Jul 6 15:45:41 2008
@@ -40,7 +40,7 @@
virtual IA64TargetLowering *getTargetLowering() const {
return const_cast<IA64TargetLowering*>(&TLInfo);
}
- virtual const TargetRegisterInfo *getRegisterInfo() const {
+ virtual const IA64RegisterInfo *getRegisterInfo() const {
return &InstrInfo.getRegisterInfo();
}
virtual const TargetData *getTargetData() const { return &DataLayout; }
Modified: llvm/branches/non-call-eh/lib/Target/MSIL/MSILWriter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/MSIL/MSILWriter.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/MSIL/MSILWriter.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/MSIL/MSILWriter.cpp Sun Jul 6 15:45:41 2008
@@ -46,7 +46,7 @@
}
-RegisterTarget<MSILTarget> X("msil", " MSIL backend");
+static RegisterTarget<MSILTarget> X("msil", " MSIL backend");
bool MSILModule::runOnModule(Module &M) {
ModulePtr = &M;
Modified: llvm/branches/non-call-eh/lib/Target/Mips/Mips.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/Mips/Mips.td?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/Mips/Mips.td (original)
+++ llvm/branches/non-call-eh/lib/Target/Mips/Mips.td Sun Jul 6 15:45:41 2008
@@ -16,7 +16,7 @@
include "../Target.td"
//===----------------------------------------------------------------------===//
-// Descriptions
+// Register File, Calling Conv, Instruction Descriptions
//===----------------------------------------------------------------------===//
include "MipsRegisterInfo.td"
@@ -30,22 +30,43 @@
}
//===----------------------------------------------------------------------===//
-// CPU Directives //
+// Mips Subtarget features //
//===----------------------------------------------------------------------===//
-// Not currently supported, but work as SubtargetFeature placeholder.
-def FeatureMipsIII : SubtargetFeature<"mips3", "IsMipsIII", "true",
- "MipsIII ISA Support">;
+def FeatureGP64Bit : SubtargetFeature<"gp64", "IsGP64bit", "true",
+ "General Purpose Registers are 64-bit wide.">;
+def FeatureFP64Bit : SubtargetFeature<"fp64", "IsFP64bit", "true",
+ "Support 64-bit FP registers.">;
+def FeatureSingleFloat : SubtargetFeature<"single-float", "IsSingleFloat",
+ "true", "Only supports single precision float">;
+def FeatureAllegrexVFPU : SubtargetFeature<"allegrex-vfpu", "HasAllegrexVFPU",
+ "true", "Enable Allegrex VFPU instructions.">;
+def FeatureMips2 : SubtargetFeature<"mips2", "MipsArchVersion", "Mips2",
+ "Mips2 ISA Support">;
+def FeatureO32 : SubtargetFeature<"o32", "MipsABI", "O32",
+ "Enable o32 ABI">;
+def FeatureEABI : SubtargetFeature<"eabi", "MipsABI", "EABI",
+ "Enable eabi ABI">;
//===----------------------------------------------------------------------===//
// Mips processors supported.
//===----------------------------------------------------------------------===//
-def : Processor<"mips1", MipsGenericItineraries, []>;
-def : Processor<"r2000", MipsGenericItineraries, []>;
-def : Processor<"r3000", MipsGenericItineraries, []>;
+class Proc<string Name, list<SubtargetFeature> Features>
+ : Processor<Name, MipsGenericItineraries, Features>;
+
+def : Proc<"mips1", []>;
+def : Proc<"r2000", []>;
+def : Proc<"r3000", []>;
+
+def : Proc<"mips2", [FeatureMips2]>;
+def : Proc<"r6000", [FeatureMips2]>;
+
+// Allegrex is a 32bit subset of r4000, both for interger and fp registers,
+// but much more similar to Mips2 than Mips3.
+def : Proc<"allegrex", [FeatureMips2, FeatureSingleFloat, FeatureAllegrexVFPU,
+ FeatureEABI]>;
def Mips : Target {
let InstructionSet = MipsInstrInfo;
}
-
Modified: llvm/branches/non-call-eh/lib/Target/Mips/MipsAsmPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/Mips/MipsAsmPrinter.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/Mips/MipsAsmPrinter.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/Mips/MipsAsmPrinter.cpp Sun Jul 6 15:45:41 2008
@@ -32,7 +32,6 @@
#include "llvm/Target/TargetOptions.h"
#include "llvm/Support/Mangler.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/CommandLine.h"
@@ -63,6 +62,8 @@
void printOperand(const MachineInstr *MI, int opNum);
void printMemOperand(const MachineInstr *MI, int opNum,
const char *Modifier = 0);
+ void printFCCOperand(const MachineInstr *MI, int opNum,
+ const char *Modifier = 0);
unsigned int getSavedRegsBitmask(bool isFloat, MachineFunction &MF);
void printHex32(unsigned int Value);
@@ -429,6 +430,13 @@
O << ")";
}
+void MipsAsmPrinter::
+printFCCOperand(const MachineInstr *MI, int opNum, const char *Modifier)
+{
+ const MachineOperand& MO = MI->getOperand(opNum);
+ O << Mips::MipsFCCToString((Mips::CondCode)MO.getImm());
+}
+
bool MipsAsmPrinter::
doInitialization(Module &M)
{
@@ -461,7 +469,8 @@
// Is this correct ?
if (C->isNullValue() && (I->hasLinkOnceLinkage() ||
- I->hasInternalLinkage() || I->hasWeakLinkage()))
+ I->hasInternalLinkage() || I->hasWeakLinkage() ||
+ I->hasCommonLinkage()))
{
if (Size == 0) Size = 1; // .comm Foo, 0 is undefined, avoid it.
@@ -487,7 +496,8 @@
switch (I->getLinkage())
{
case GlobalValue::LinkOnceLinkage:
- case GlobalValue::WeakLinkage:
+ case GlobalValue::CommonLinkage:
+ case GlobalValue::WeakLinkage:
// FIXME: Verify correct for weak.
// Nonnull linkonce -> weak
O << "\t.weak " << name << "\n";
@@ -545,5 +555,7 @@
}
}
+ O << "\n";
+
return AsmPrinter::doFinalization(M);
}
Modified: llvm/branches/non-call-eh/lib/Target/Mips/MipsCallingConv.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/Mips/MipsCallingConv.td?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/Mips/MipsCallingConv.td (original)
+++ llvm/branches/non-call-eh/lib/Target/Mips/MipsCallingConv.td Sun Jul 6 15:45:41 2008
@@ -14,26 +14,76 @@
CCIf<!strconcat("State.getTarget().getSubtarget<MipsSubtarget>().", F), A>;
//===----------------------------------------------------------------------===//
-// Mips Return Value Calling Convention
+// Mips O32 Calling Convention
//===----------------------------------------------------------------------===//
-def RetCC_Mips : CallingConv<[
+def CC_MipsO32 : CallingConv<[
+ // Promote i8/i16 arguments to i32.
+ CCIfType<[i8, i16], CCPromoteToType<i32>>,
+
+ // The first 4 integer arguments are passed in integer registers.
+ CCIfType<[i32], CCAssignToReg<[A0, A1, A2, A3]>>,
+
+ // Integer values get stored in stack slots that are 4 bytes in
+ // size and 4-byte aligned.
+ CCIfType<[i32], CCAssignToStack<4, 4>>
+]>;
+
+def RetCC_MipsO32 : CallingConv<[
// i32 are returned in registers V0, V1
CCIfType<[i32], CCAssignToReg<[V0, V1]>>
]>;
-
//===----------------------------------------------------------------------===//
-// Mips Argument Calling Conventions
+// Mips EABI Calling Convention
//===----------------------------------------------------------------------===//
-def CC_Mips : CallingConv<[
+def CC_MipsEABI : CallingConv<[
// Promote i8/i16 arguments to i32.
CCIfType<[i8, i16], CCPromoteToType<i32>>,
- // The first 4 integer arguments are passed in integer registers.
- CCIfType<[i32], CCAssignToReg<[A0, A1, A2, A3]>>,
+ // Integer arguments are passed in integer registers.
+ CCIfType<[i32], CCAssignToReg<[A0, A1, A2, A3, T0, T1, T2, T3]>>,
+
+ // Single fp arguments are passed in pairs within 32-bit mode
+ CCIfType<[f32], CCIfSubtarget<"isSingleFloat()",
+ CCAssignToReg<[F12, F13, F14, F15, F16, F17, F18, F19]>>>,
+
+ CCIfType<[f32], CCIfSubtarget<"isNotSingleFloat()",
+ CCAssignToReg<[F12, F14, F16, F18]>>>,
+
+ // The first 4 doubl fp arguments are passed in single fp registers.
+ CCIfType<[f64], CCIfSubtarget<"isNotSingleFloat()",
+ CCAssignToReg<[D6, D7, D8, D9]>>>,
// Integer values get stored in stack slots that are 4 bytes in
// size and 4-byte aligned.
- CCIfType<[i32], CCAssignToStack<4, 4>>
+ CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
+
+ // Integer values get stored in stack slots that are 8 bytes in
+ // size and 8-byte aligned.
+ CCIfType<[f64], CCIfSubtarget<"isNotSingleFloat()", CCAssignToStack<8, 8>>>
+]>;
+
+def RetCC_MipsEABI : CallingConv<[
+ // i32 are returned in registers V0, V1
+ CCIfType<[i32], CCAssignToReg<[V0, V1]>>,
+
+ // f32 are returned in registers F0, F1
+ CCIfType<[f32], CCAssignToReg<[F0, F1]>>,
+
+ // f64 are returned in register D0
+ CCIfType<[f64], CCIfSubtarget<"isNotSingleFloat()", CCAssignToReg<[D0]>>>
]>;
+//===----------------------------------------------------------------------===//
+// Mips Calling Convention Dispatch
+//===----------------------------------------------------------------------===//
+
+def CC_Mips : CallingConv<[
+ CCIfSubtarget<"isABI_EABI()", CCDelegateTo<CC_MipsEABI>>,
+ CCDelegateTo<CC_MipsO32>
+]>;
+
+def RetCC_Mips : CallingConv<[
+ CCIfSubtarget<"isABI_EABI()", CCDelegateTo<RetCC_MipsEABI>>,
+ CCDelegateTo<RetCC_MipsO32>
+]>;
Modified: llvm/branches/non-call-eh/lib/Target/Mips/MipsISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/Mips/MipsISelDAGToDAG.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/Mips/MipsISelDAGToDAG.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/Mips/MipsISelDAGToDAG.cpp Sun Jul 6 15:45:41 2008
@@ -58,15 +58,14 @@
/// Subtarget - Keep a pointer to the MipsSubtarget around so that we can
/// make the right decision when generating code for different targets.
- //TODO: add initialization on constructor
- //const MipsSubtarget *Subtarget;
+ const MipsSubtarget &Subtarget;
public:
- MipsDAGToDAGISel(MipsTargetMachine &tm) :
- SelectionDAGISel(MipsLowering),
- TM(tm), MipsLowering(*TM.getTargetLowering()) {}
+ MipsDAGToDAGISel(MipsTargetMachine &tm) : SelectionDAGISel(MipsLowering),
+ TM(tm), MipsLowering(*TM.getTargetLowering()),
+ Subtarget(tm.getSubtarget<MipsSubtarget>()) {}
- virtual void InstructionSelectBasicBlock(SelectionDAG &SD);
+ virtual void InstructionSelect(SelectionDAG &SD);
// Pass Name
virtual const char *getPassName() const {
@@ -100,10 +99,10 @@
}
-/// InstructionSelectBasicBlock - This callback is invoked by
+/// InstructionSelect - This callback is invoked by
/// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
void MipsDAGToDAGISel::
-InstructionSelectBasicBlock(SelectionDAG &SD)
+InstructionSelect(SelectionDAG &SD)
{
DEBUG(BB->dump());
// Codegen the basic block.
@@ -120,9 +119,6 @@
#endif
SD.RemoveDeadNodes();
-
- // Emit machine code to BB.
- ScheduleAndEmitDAG(SD);
}
/// getGlobalBaseReg - Output the instructions required to put the
@@ -226,7 +222,74 @@
default: break;
- /// Special Mul operations
+ case ISD::SUBE:
+ case ISD::ADDE: {
+ SDOperand InFlag = Node->getOperand(2), CmpLHS;
+ unsigned Opc = InFlag.getOpcode(), MOp;
+
+ assert(((Opc == ISD::ADDC || Opc == ISD::ADDE) ||
+ (Opc == ISD::SUBC || Opc == ISD::SUBE)) &&
+ "(ADD|SUB)E flag operand must come from (ADD|SUB)C/E insn");
+
+ if (Opcode == ISD::ADDE) {
+ CmpLHS = InFlag.getValue(0);
+ MOp = Mips::ADDu;
+ } else {
+ CmpLHS = InFlag.getOperand(0);
+ MOp = Mips::SUBu;
+ }
+
+ SDOperand Ops[] = { CmpLHS, InFlag.getOperand(1) };
+
+ SDOperand LHS = Node->getOperand(0);
+ SDOperand RHS = Node->getOperand(1);
+ AddToISelQueue(LHS);
+ AddToISelQueue(RHS);
+
+ MVT VT = LHS.getValueType();
+ SDNode *Carry = CurDAG->getTargetNode(Mips::SLTu, VT, Ops, 2);
+ SDNode *AddCarry = CurDAG->getTargetNode(Mips::ADDu, VT,
+ SDOperand(Carry,0), RHS);
+
+ return CurDAG->SelectNodeTo(N.Val, MOp, VT, MVT::Flag,
+ LHS, SDOperand(AddCarry,0));
+ }
+
+ /// Mul/Div with two results
+ case ISD::SDIVREM:
+ case ISD::UDIVREM:
+ case ISD::SMUL_LOHI:
+ case ISD::UMUL_LOHI: {
+ SDOperand Op1 = Node->getOperand(0);
+ SDOperand Op2 = Node->getOperand(1);
+ AddToISelQueue(Op1);
+ AddToISelQueue(Op2);
+
+ unsigned Op;
+ if (Opcode == ISD::UMUL_LOHI || Opcode == ISD::SMUL_LOHI)
+ Op = (Opcode == ISD::UMUL_LOHI ? Mips::MULTu : Mips::MULT);
+ else
+ Op = (Opcode == ISD::UDIVREM ? Mips::DIVu : Mips::DIV);
+
+ SDNode *Node = CurDAG->getTargetNode(Op, MVT::Flag, Op1, Op2);
+
+ SDOperand InFlag = SDOperand(Node, 0);
+ SDNode *Lo = CurDAG->getTargetNode(Mips::MFLO, MVT::i32, MVT::Flag, InFlag);
+
+ InFlag = SDOperand(Lo,1);
+ SDNode *Hi = CurDAG->getTargetNode(Mips::MFHI, MVT::i32, InFlag);
+
+ if (!N.getValue(0).use_empty())
+ ReplaceUses(N.getValue(0), SDOperand(Lo,0));
+
+ if (!N.getValue(1).use_empty())
+ ReplaceUses(N.getValue(1), SDOperand(Hi,0));
+
+ return NULL;
+ }
+
+ /// Special Muls
+ case ISD::MUL:
case ISD::MULHS:
case ISD::MULHU: {
SDOperand MulOp1 = Node->getOperand(0);
@@ -237,38 +300,36 @@
unsigned MulOp = (Opcode == ISD::MULHU ? Mips::MULTu : Mips::MULT);
SDNode *MulNode = CurDAG->getTargetNode(MulOp, MVT::Flag, MulOp1, MulOp2);
- SDOperand MFInFlag = SDOperand(MulNode, 0);
- return CurDAG->getTargetNode(Mips::MFHI, MVT::i32, MFInFlag);
+ SDOperand InFlag = SDOperand(MulNode, 0);
+
+ if (MulOp == ISD::MUL)
+ return CurDAG->getTargetNode(Mips::MFLO, MVT::i32, InFlag);
+ else
+ return CurDAG->getTargetNode(Mips::MFHI, MVT::i32, InFlag);
}
- /// Div operations
+ /// Div/Rem operations
+ case ISD::SREM:
+ case ISD::UREM:
case ISD::SDIV:
case ISD::UDIV: {
- SDOperand DivOp1 = Node->getOperand(0);
- SDOperand DivOp2 = Node->getOperand(1);
- AddToISelQueue(DivOp1);
- AddToISelQueue(DivOp2);
-
- unsigned DivOp = (Opcode == ISD::SDIV ? Mips::DIV : Mips::DIVu);
- SDNode *DivNode = CurDAG->getTargetNode(DivOp, MVT::Flag, DivOp1, DivOp2);
-
- SDOperand MFInFlag = SDOperand(DivNode, 0);
- return CurDAG->getTargetNode(Mips::MFLO, MVT::i32, MFInFlag);
- }
-
- /// Rem operations
- case ISD::SREM:
- case ISD::UREM: {
- SDOperand RemOp1 = Node->getOperand(0);
- SDOperand RemOp2 = Node->getOperand(1);
- AddToISelQueue(RemOp1);
- AddToISelQueue(RemOp2);
-
- unsigned RemOp = (Opcode == ISD::SREM ? Mips::DIV : Mips::DIVu);
- SDNode *RemNode = CurDAG->getTargetNode(RemOp, MVT::Flag, RemOp1, RemOp2);
+ SDOperand Op1 = Node->getOperand(0);
+ SDOperand Op2 = Node->getOperand(1);
+ AddToISelQueue(Op1);
+ AddToISelQueue(Op2);
+
+ unsigned Op, MOp;
+ if (Opcode == ISD::SDIV || Opcode == ISD::UDIV) {
+ Op = (Opcode == ISD::SDIV ? Mips::DIV : Mips::DIVu);
+ MOp = Mips::MFLO;
+ } else {
+ Op = (Opcode == ISD::SREM ? Mips::DIV : Mips::DIVu);
+ MOp = Mips::MFHI;
+ }
+ SDNode *Node = CurDAG->getTargetNode(Op, MVT::Flag, Op1, Op2);
- SDOperand MFInFlag = SDOperand(RemNode, 0);
- return CurDAG->getTargetNode(Mips::MFHI, MVT::i32, MFInFlag);
+ SDOperand InFlag = SDOperand(Node, 0);
+ return CurDAG->getTargetNode(MOp, MVT::i32, InFlag);
}
// Get target GOT address.
Modified: llvm/branches/non-call-eh/lib/Target/Mips/MipsISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/Mips/MipsISelLowering.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/Mips/MipsISelLowering.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/Mips/MipsISelLowering.cpp Sun Jul 6 15:45:41 2008
@@ -17,6 +17,7 @@
#include "MipsISelLowering.h"
#include "MipsMachineFunction.h"
#include "MipsTargetMachine.h"
+#include "MipsSubtarget.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Function.h"
#include "llvm/Intrinsics.h"
@@ -39,17 +40,22 @@
{
switch (Opcode)
{
- case MipsISD::JmpLink : return "MipsISD::JmpLink";
- case MipsISD::Hi : return "MipsISD::Hi";
- case MipsISD::Lo : return "MipsISD::Lo";
- case MipsISD::Ret : return "MipsISD::Ret";
- default : return NULL;
+ case MipsISD::JmpLink : return "MipsISD::JmpLink";
+ case MipsISD::Hi : return "MipsISD::Hi";
+ case MipsISD::Lo : return "MipsISD::Lo";
+ case MipsISD::Ret : return "MipsISD::Ret";
+ case MipsISD::SelectCC : return "MipsISD::SelectCC";
+ case MipsISD::FPBrcond : return "MipsISD::FPBrcond";
+ case MipsISD::FPCmp : return "MipsISD::FPCmp";
+ default : return NULL;
}
}
MipsTargetLowering::
MipsTargetLowering(MipsTargetMachine &TM): TargetLowering(TM)
{
+ Subtarget = &TM.getSubtarget<MipsSubtarget>();
+
// Mips does not have i1 type, so use i32 for
// setcc operations results (slt, sgt, ...).
setSetCCResultContents(ZeroOrOneSetCCResult);
@@ -60,11 +66,24 @@
// Set up the register classes
addRegisterClass(MVT::i32, Mips::CPURegsRegisterClass);
+ // When dealing with single precision only, use libcalls
+ if (!Subtarget->isSingleFloat()) {
+ addRegisterClass(MVT::f32, Mips::AFGR32RegisterClass);
+ if (!Subtarget->isFP64bit())
+ addRegisterClass(MVT::f64, Mips::AFGR64RegisterClass);
+ } else
+ addRegisterClass(MVT::f32, Mips::FGR32RegisterClass);
+
// Custom
setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
setOperationAction(ISD::RET, MVT::Other, Custom);
setOperationAction(ISD::JumpTable, MVT::i32, Custom);
+ setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
+ setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
+
+ if (Subtarget->isSingleFloat())
+ setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
// Load extented operations for i1 types must be promoted
setLoadXAction(ISD::EXTLOAD, MVT::i1, Promote);
@@ -75,10 +94,14 @@
setOperationAction(ISD::BR_JT, MVT::Other, Expand);
setOperationAction(ISD::BR_CC, MVT::Other, Expand);
setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
- setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
setOperationAction(ISD::SELECT, MVT::i32, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
+ if (!Subtarget->isAllegrex()) {
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
+ }
+
// Mips not supported intrinsics.
setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand);
@@ -94,9 +117,10 @@
setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
// We don't have line number support yet.
- setOperationAction(ISD::LOCATION, MVT::Other, Expand);
+ setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
- setOperationAction(ISD::LABEL, MVT::Other, Expand);
+ setOperationAction(ISD::DBG_LABEL, MVT::Other, Expand);
+ setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
// Use the default for now
setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
@@ -107,8 +131,7 @@
}
-MVT::ValueType
-MipsTargetLowering::getSetCCResultType(const SDOperand &) const {
+MVT MipsTargetLowering::getSetCCResultType(const SDOperand &) const {
return MVT::i32;
}
@@ -124,10 +147,75 @@
case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
case ISD::JumpTable: return LowerJumpTable(Op, DAG);
+ case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
}
return SDOperand();
}
+MachineBasicBlock *
+MipsTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
+ MachineBasicBlock *BB)
+{
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ switch (MI->getOpcode()) {
+ default: assert(false && "Unexpected instr type to insert");
+ case Mips::Select_CC: {
+ // To "insert" a SELECT_CC instruction, we actually have to insert the
+ // diamond control-flow pattern. The incoming instruction knows the
+ // destination vreg to set, the condition code register to branch on, the
+ // true/false values to select between, and a branch opcode to use.
+ const BasicBlock *LLVM_BB = BB->getBasicBlock();
+ ilist<MachineBasicBlock>::iterator It = BB;
+ ++It;
+
+ // thisMBB:
+ // ...
+ // TrueVal = ...
+ // setcc r1, r2, r3
+ // bNE r1, r0, copy1MBB
+ // fallthrough --> copy0MBB
+ MachineBasicBlock *thisMBB = BB;
+ MachineBasicBlock *copy0MBB = new MachineBasicBlock(LLVM_BB);
+ MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB);
+ BuildMI(BB, TII->get(Mips::BNE)).addReg(MI->getOperand(1).getReg())
+ .addReg(Mips::ZERO).addMBB(sinkMBB);
+ MachineFunction *F = BB->getParent();
+ F->getBasicBlockList().insert(It, copy0MBB);
+ F->getBasicBlockList().insert(It, sinkMBB);
+ // Update machine-CFG edges by first adding all successors of the current
+ // block to the new block which will contain the Phi node for the select.
+ for(MachineBasicBlock::succ_iterator i = BB->succ_begin(),
+ e = BB->succ_end(); i != e; ++i)
+ sinkMBB->addSuccessor(*i);
+ // Next, remove all successors of the current block, and add the true
+ // and fallthrough blocks as its successors.
+ while(!BB->succ_empty())
+ BB->removeSuccessor(BB->succ_begin());
+ BB->addSuccessor(copy0MBB);
+ BB->addSuccessor(sinkMBB);
+
+ // copy0MBB:
+ // %FalseValue = ...
+ // # fallthrough to sinkMBB
+ BB = copy0MBB;
+
+ // Update machine-CFG edges
+ BB->addSuccessor(sinkMBB);
+
+ // sinkMBB:
+ // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
+ // ...
+ BB = sinkMBB;
+ BuildMI(BB, TII->get(Mips::PHI), MI->getOperand(0).getReg())
+ .addReg(MI->getOperand(2).getReg()).addMBB(copy0MBB)
+ .addReg(MI->getOperand(3).getReg()).addMBB(thisMBB);
+
+ delete MI; // The pseudo instruction is gone now.
+ return BB;
+ }
+ }
+}
+
//===----------------------------------------------------------------------===//
// Lower helper functions
//===----------------------------------------------------------------------===//
@@ -157,7 +245,7 @@
SDOperand HiPart;
if (!isPIC) {
- const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::i32);
+ const MVT *VTs = DAG.getNodeValueTypes(MVT::i32);
SDOperand Ops[] = { GA };
HiPart = DAG.getNode(MipsISD::Hi, VTs, 1, Ops, 1);
} else // Emit Load from Global Pointer
@@ -182,17 +270,34 @@
}
SDOperand MipsTargetLowering::
+LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG)
+{
+ SDOperand LHS = Op.getOperand(0);
+ SDOperand RHS = Op.getOperand(1);
+ SDOperand True = Op.getOperand(2);
+ SDOperand False = Op.getOperand(3);
+ SDOperand CC = Op.getOperand(4);
+
+ const MVT *VTs = DAG.getNodeValueTypes(MVT::i32);
+ SDOperand Ops[] = { LHS, RHS, CC };
+ SDOperand SetCCRes = DAG.getNode(ISD::SETCC, VTs, 1, Ops, 3);
+
+ return DAG.getNode(MipsISD::SelectCC, True.getValueType(),
+ SetCCRes, True, False);
+}
+
+SDOperand MipsTargetLowering::
LowerJumpTable(SDOperand Op, SelectionDAG &DAG)
{
SDOperand ResNode;
SDOperand HiPart;
- MVT::ValueType PtrVT = Op.getValueType();
+ MVT PtrVT = Op.getValueType();
JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
SDOperand JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
if (getTargetMachine().getRelocationModel() != Reloc::PIC_) {
- const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::i32);
+ const MVT *VTs = DAG.getNodeValueTypes(MVT::i32);
SDOperand Ops[] = { JTI };
HiPart = DAG.getNode(MipsISD::Hi, VTs, 1, Ops, 1);
} else // Emit Load from Global Pointer
@@ -240,7 +345,7 @@
/// LowerCCCCallTo - functions arguments are copied from virtual
/// regs to (physical regs)/(stack frame), CALLSEQ_START and
/// CALLSEQ_END are emitted.
-/// TODO: isVarArg, isTailCall, sret.
+/// TODO: isVarArg, isTailCall.
SDOperand MipsTargetLowering::
LowerCCCCallTo(SDOperand Op, SelectionDAG &DAG, unsigned CC)
{
@@ -258,7 +363,7 @@
// To meet ABI, Mips must always allocate 16 bytes on
// the stack (even if less than 4 are used as arguments)
- int VTsize = MVT::getSizeInBits(MVT::i32)/8;
+ int VTsize = MVT(MVT::i32).getSizeInBits()/8;
MFI->CreateFixedObject(VTsize, (VTsize*3));
CCInfo.AnalyzeCallOperands(Op.Val, CC_Mips);
@@ -268,10 +373,14 @@
Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes,
getPointerTy()));
- SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass;
+ // With EABI is it possible to have 16 args on registers.
+ SmallVector<std::pair<unsigned, SDOperand>, 16> RegsToPass;
SmallVector<SDOperand, 8> MemOpChains;
- int LastStackLoc = 0;
+ // First/LastArgStackLoc contains the first/last
+ // "at stack" argument location.
+ int LastArgStackLoc = 0;
+ unsigned FirstStackArgLoc = (Subtarget->isABI_EABI() ? 0 : 16);
// Walk the register/memloc assignments, inserting copies/loads.
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
@@ -302,14 +411,16 @@
continue;
}
+ // Register cant get to this point...
assert(VA.isMemLoc());
// Create the frame index object for this incoming parameter
// This guarantees that when allocating Local Area the firsts
- // 16 bytes which are alwayes reserved won't be overwritten.
- LastStackLoc = (16 + VA.getLocMemOffset());
- int FI = MFI->CreateFixedObject(MVT::getSizeInBits(VA.getValVT())/8,
- LastStackLoc);
+ // 16 bytes which are alwayes reserved won't be overwritten
+ // if O32 ABI is used. For EABI the first address is zero.
+ LastArgStackLoc = (FirstStackArgLoc + VA.getLocMemOffset());
+ int FI = MFI->CreateFixedObject(VA.getValVT().getSizeInBits()/8,
+ LastArgStackLoc);
SDOperand PtrOff = DAG.getFrameIndex(FI,getPointerTy());
@@ -318,8 +429,8 @@
MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
}
- // Transform all store nodes into one single node because
- // all store nodes are independent of each other.
+ // Transform all store nodes into one single node because all store
+ // nodes are independent of each other.
if (!MemOpChains.empty())
Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
&MemOpChains[0], MemOpChains.size());
@@ -365,23 +476,30 @@
Chain = DAG.getNode(MipsISD::JmpLink, NodeTys, &Ops[0], Ops.size());
InFlag = Chain.getValue(1);
+ // Create the CALLSEQ_END node.
+ Chain = DAG.getCALLSEQ_END(Chain,
+ DAG.getConstant(NumBytes, getPointerTy()),
+ DAG.getConstant(0, getPointerTy()),
+ InFlag);
+ InFlag = Chain.getValue(1);
+
// Create a stack location to hold GP when PIC is used. This stack
// location is used on function prologue to save GP and also after all
// emited CALL's to restore GP.
if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
// Function can have an arbitrary number of calls, so
- // hold the LastStackLoc with the biggest offset.
+ // hold the LastArgStackLoc with the biggest offset.
int FI;
MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
- if (LastStackLoc >= MipsFI->getGPStackOffset()) {
- LastStackLoc = (!LastStackLoc) ? (16) : (LastStackLoc+4);
+ if (LastArgStackLoc >= MipsFI->getGPStackOffset()) {
+ LastArgStackLoc = (!LastArgStackLoc) ? (16) : (LastArgStackLoc+4);
// Create the frame index only once. SPOffset here can be anything
// (this will be fixed on processFunctionBeforeFrameFinalized)
if (MipsFI->getGPStackOffset() == -1) {
FI = MFI->CreateFixedObject(4, 0);
MipsFI->setGPFI(FI);
}
- MipsFI->setGPStackOffset(LastStackLoc);
+ MipsFI->setGPStackOffset(LastArgStackLoc);
}
// Reload GP value.
@@ -391,15 +509,9 @@
Chain = GPLoad.getValue(1);
Chain = DAG.getCopyToReg(Chain, DAG.getRegister(Mips::GP, MVT::i32),
GPLoad, SDOperand(0,0));
+ InFlag = Chain.getValue(1);
}
- // Create the CALLSEQ_END node.
- Chain = DAG.getCALLSEQ_END(Chain,
- DAG.getConstant(NumBytes, getPointerTy()),
- DAG.getConstant(0, getPointerTy()),
- InFlag);
- InFlag = Chain.getValue(1);
-
// Handle result values, copying them out of physregs into vregs that we
// return.
return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo);
@@ -434,8 +546,8 @@
ResultVals.push_back(Chain);
// Merge everything together with a MERGE_VALUES node.
- return DAG.getNode(ISD::MERGE_VALUES, TheCall->getVTList(),
- &ResultVals[0], ResultVals.size()).Val;
+ return DAG.getMergeValues(TheCall->getVTList(), &ResultVals[0],
+ ResultVals.size()).Val;
}
//===----------------------------------------------------------------------===//
@@ -459,7 +571,7 @@
/// LowerCCCArguments - transform physical registers into
/// virtual registers and generate load operations for
/// arguments places on the stack.
-/// TODO: isVarArg, sret
+/// TODO: isVarArg
SDOperand MipsTargetLowering::
LowerCCCArguments(SDOperand Op, SelectionDAG &DAG)
{
@@ -482,22 +594,32 @@
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs);
CCInfo.AnalyzeFormalArguments(Op.Val, CC_Mips);
- SmallVector<SDOperand, 8> ArgValues;
+ SmallVector<SDOperand, 16> ArgValues;
SDOperand StackPtr;
+ unsigned FirstStackArgLoc = (Subtarget->isABI_EABI() ? 0 : 16);
+
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
// Arguments stored on registers
if (VA.isRegLoc()) {
- MVT::ValueType RegVT = VA.getLocVT();
+ MVT RegVT = VA.getLocVT();
TargetRegisterClass *RC;
if (RegVT == MVT::i32)
- RC = Mips::CPURegsRegisterClass;
- else
- assert(0 && "support only Mips::CPURegsRegisterClass");
+ RC = Mips::CPURegsRegisterClass;
+ else if (RegVT == MVT::f32) {
+ if (Subtarget->isSingleFloat())
+ RC = Mips::FGR32RegisterClass;
+ else
+ RC = Mips::AFGR32RegisterClass;
+ } else if (RegVT == MVT::f64) {
+ if (!Subtarget->isSingleFloat())
+ RC = Mips::AFGR64RegisterClass;
+ } else
+ assert(0 && "RegVT not supported by FORMAL_ARGUMENTS Lowering");
// Transform the arguments stored on
// physical registers into virtual ones
@@ -521,8 +643,7 @@
// To meet ABI, when VARARGS are passed on registers, the registers
// must have their values written to the caller stack frame.
- if (isVarArg) {
-
+ if ((isVarArg) && (Subtarget->isABI_O32())) {
if (StackPtr.Val == 0)
StackPtr = DAG.getRegister(StackReg, getPointerTy());
@@ -543,7 +664,8 @@
ArgValues.push_back(DAG.getStore(Root, ArgValue, PtrOff, NULL, 0));
}
- } else {
+ } else { // VA.isRegLoc()
+
// sanity check
assert(VA.isMemLoc());
@@ -555,19 +677,35 @@
// be used on emitPrologue) to avoid mis-calc of the first stack
// offset on PEI::calculateFrameObjectOffsets.
// Arguments are always 32-bit.
- int FI = MFI->CreateFixedObject(4, 0);
- MipsFI->recordLoadArgsFI(FI, -(4+(16+VA.getLocMemOffset())));
+ unsigned ArgSize = VA.getLocVT().getSizeInBits()/8;
+ int FI = MFI->CreateFixedObject(ArgSize, 0);
+ MipsFI->recordLoadArgsFI(FI, -(ArgSize+
+ (FirstStackArgLoc + VA.getLocMemOffset())));
// Create load nodes to retrieve arguments from the stack
SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy());
ArgValues.push_back(DAG.getLoad(VA.getValVT(), Root, FIN, NULL, 0));
}
}
+
+ // The mips ABIs for returning structs by value requires that we copy
+ // the sret argument into $v0 for the return. Save the argument into
+ // a virtual register so that we can access it from the return points.
+ if (DAG.getMachineFunction().getFunction()->hasStructRetAttr()) {
+ unsigned Reg = MipsFI->getSRetReturnReg();
+ if (!Reg) {
+ Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i32));
+ MipsFI->setSRetReturnReg(Reg);
+ }
+ SDOperand Copy = DAG.getCopyToReg(DAG.getEntryNode(), Reg, ArgValues[0]);
+ Root = DAG.getNode(ISD::TokenFactor, MVT::Other, Copy, Root);
+ }
+
ArgValues.push_back(Root);
// Return the new list of results.
- return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(),
- &ArgValues[0], ArgValues.size()).getValue(Op.ResNo);
+ return DAG.getMergeValues(Op.Val->getVTList(), &ArgValues[0],
+ ArgValues.size()).getValue(Op.ResNo);
}
//===----------------------------------------------------------------------===//
@@ -615,6 +753,23 @@
Flag = Chain.getValue(1);
}
+ // The mips ABIs for returning structs by value requires that we copy
+ // the sret argument into $v0 for the return. We saved the argument into
+ // a virtual register in the entry block, so now we copy the value out
+ // and into $v0.
+ if (DAG.getMachineFunction().getFunction()->hasStructRetAttr()) {
+ MachineFunction &MF = DAG.getMachineFunction();
+ MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
+ unsigned Reg = MipsFI->getSRetReturnReg();
+
+ if (!Reg)
+ assert(0 && "sret virtual register not created in the entry block");
+ SDOperand Val = DAG.getCopyFromReg(Chain, Reg, getPointerTy());
+
+ Chain = DAG.getCopyToReg(Chain, Mips::V0, Val, Flag);
+ Flag = Chain.getValue(1);
+ }
+
// Return on Mips is always a "jr $ra"
if (Flag.Val)
return DAG.getNode(MipsISD::Ret, MVT::Other,
@@ -633,19 +788,20 @@
MipsTargetLowering::ConstraintType MipsTargetLowering::
getConstraintType(const std::string &Constraint) const
{
+ // Mips specific constrainy
+ // GCC config/mips/constraints.md
+ //
+ // 'd' : An address register. Equivalent to r
+ // unless generating MIPS16 code.
+ // 'y' : Equivalent to r; retained for
+ // backwards compatibility.
+ // 'f' : Float Point registers.
if (Constraint.size() == 1) {
- // Mips specific constrainy
- // GCC config/mips/constraints.md
- //
- // 'd' : An address register. Equivalent to r
- // unless generating MIPS16 code.
- // 'y' : Equivalent to r; retained for
- // backwards compatibility.
- //
switch (Constraint[0]) {
default : break;
case 'd':
case 'y':
+ case 'f':
return C_RegisterClass;
break;
}
@@ -653,23 +809,36 @@
return TargetLowering::getConstraintType(Constraint);
}
+/// getRegClassForInlineAsmConstraint - Given a constraint letter (e.g. "r"),
+/// return a list of registers that can be used to satisfy the constraint.
+/// This should only be used for C_RegisterClass constraints.
std::pair<unsigned, const TargetRegisterClass*> MipsTargetLowering::
-getRegForInlineAsmConstraint(const std::string &Constraint,
- MVT::ValueType VT) const
+getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const
{
if (Constraint.size() == 1) {
switch (Constraint[0]) {
case 'r':
return std::make_pair(0U, Mips::CPURegsRegisterClass);
- break;
+ case 'f':
+ if (VT == MVT::f32)
+ if (Subtarget->isSingleFloat())
+ return std::make_pair(0U, Mips::FGR32RegisterClass);
+ else
+ return std::make_pair(0U, Mips::AFGR32RegisterClass);
+ if (VT == MVT::f64)
+ if ((!Subtarget->isSingleFloat()) && (!Subtarget->isFP64bit()))
+ return std::make_pair(0U, Mips::AFGR64RegisterClass);
}
}
return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
}
+/// Given a register class constraint, like 'r', if this corresponds directly
+/// to an LLVM register class, return a register of 0 and the register class
+/// pointer.
std::vector<unsigned> MipsTargetLowering::
getRegClassForInlineAsmConstraint(const std::string &Constraint,
- MVT::ValueType VT) const
+ MVT VT) const
{
if (Constraint.size() != 1)
return std::vector<unsigned>();
@@ -680,15 +849,29 @@
// GCC Mips Constraint Letters
case 'd':
case 'y':
- return make_vector<unsigned>(Mips::V0, Mips::V1, Mips::A0,
- Mips::A1, Mips::A2, Mips::A3,
- Mips::T0, Mips::T1, Mips::T2,
- Mips::T3, Mips::T4, Mips::T5,
- Mips::T6, Mips::T7, Mips::S0,
- Mips::S1, Mips::S2, Mips::S3,
- Mips::S4, Mips::S5, Mips::S6,
- Mips::S7, Mips::T8, Mips::T9, 0);
- break;
+ return make_vector<unsigned>(Mips::T0, Mips::T1, Mips::T2, Mips::T3,
+ Mips::T4, Mips::T5, Mips::T6, Mips::T7, Mips::S0, Mips::S1,
+ Mips::S2, Mips::S3, Mips::S4, Mips::S5, Mips::S6, Mips::S7,
+ Mips::T8, 0);
+
+ case 'f':
+ if (VT == MVT::f32)
+ if (Subtarget->isSingleFloat())
+ return make_vector<unsigned>(Mips::F2, Mips::F3, Mips::F4, Mips::F5,
+ Mips::F6, Mips::F7, Mips::F8, Mips::F9, Mips::F10, Mips::F11,
+ Mips::F20, Mips::F21, Mips::F22, Mips::F23, Mips::F24,
+ Mips::F25, Mips::F26, Mips::F27, Mips::F28, Mips::F29,
+ Mips::F30, Mips::F31, 0);
+ else
+ return make_vector<unsigned>(Mips::F2, Mips::F4, Mips::F6, Mips::F8,
+ Mips::F10, Mips::F20, Mips::F22, Mips::F24, Mips::F26,
+ Mips::F28, Mips::F30, 0);
+
+ if (VT == MVT::f64)
+ if ((!Subtarget->isSingleFloat()) && (!Subtarget->isFP64bit()))
+ return make_vector<unsigned>(Mips::D1, Mips::D2, Mips::D3, Mips::D4,
+ Mips::D5, Mips::D10, Mips::D11, Mips::D12, Mips::D13,
+ Mips::D14, Mips::D15, 0);
}
return std::vector<unsigned>();
}
Modified: llvm/branches/non-call-eh/lib/Target/Mips/MipsISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/Mips/MipsISelLowering.h?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/Mips/MipsISelLowering.h (original)
+++ llvm/branches/non-call-eh/lib/Target/Mips/MipsISelLowering.h Sun Jul 6 15:45:41 2008
@@ -37,6 +37,15 @@
// No relation with Mips Lo register
Lo,
+ // Select CC Pseudo Instruction
+ SelectCC,
+
+ // Float Point Branch Conditional
+ FPBrcond,
+
+ // Float Point Compare
+ FPCmp,
+
// Return
Ret
};
@@ -63,9 +72,12 @@
virtual const char *getTargetNodeName(unsigned Opcode) const;
/// getSetCCResultType - get the ISD::SETCC result ValueType
- MVT::ValueType getSetCCResultType(const SDOperand &) const;
+ MVT getSetCCResultType(const SDOperand &) const;
private:
+ // Subtarget Info
+ const MipsSubtarget *Subtarget;
+
// Lower Operand helpers
SDOperand LowerCCCArguments(SDOperand Op, SelectionDAG &DAG);
SDOperand LowerCCCCallTo(SDOperand Op, SelectionDAG &DAG, unsigned CC);
@@ -80,17 +92,21 @@
SDOperand LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG);
SDOperand LowerGlobalTLSAddress(SDOperand Op, SelectionDAG &DAG);
SDOperand LowerJumpTable(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG);
+
+ virtual MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI,
+ MachineBasicBlock *MBB);
// Inline asm support
ConstraintType getConstraintType(const std::string &Constraint) const;
std::pair<unsigned, const TargetRegisterClass*>
getRegForInlineAsmConstraint(const std::string &Constraint,
- MVT::ValueType VT) const;
+ MVT VT) const;
std::vector<unsigned>
getRegClassForInlineAsmConstraint(const std::string &Constraint,
- MVT::ValueType VT) const;
+ MVT VT) const;
};
}
Added: llvm/branches/non-call-eh/lib/Target/Mips/MipsInstrFPU.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/Mips/MipsInstrFPU.td?rev=53163&view=auto
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/Mips/MipsInstrFPU.td (added)
+++ llvm/branches/non-call-eh/lib/Target/Mips/MipsInstrFPU.td Sun Jul 6 15:45:41 2008
@@ -0,0 +1,296 @@
+//===- MipsInstrFPU.td - Mips FPU Instruction Information -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the Mips implementation of the TargetInstrInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Float Point Instructions
+// ------------------------
+// * 64bit fp:
+// - 32 64-bit registers (default mode)
+// - 16 even 32-bit registers (32-bit compatible mode) for
+// single and double access.
+// * 32bit fp:
+// - 16 even 32-bit registers - single and double (aliased)
+// - 32 32-bit registers (within single-only mode)
+//===----------------------------------------------------------------------===//
+
+// Float Point Compare and Branch
+def SDT_MipsFPBrcond : SDTypeProfile<0, 3, [SDTCisSameAs<0, 2>, SDTCisInt<0>,
+ SDTCisVT<1, OtherVT>]>;
+def SDT_MipsFPCmp : SDTypeProfile<0, 3, [SDTCisSameAs<0, 1>, SDTCisFP<0>,
+ SDTCisInt<2>]>;
+def MipsFPBrcond : SDNode<"MipsISD::FPBrcond", SDT_MipsFPBrcond,
+ [SDNPHasChain]>;
+def MipsFPCmp : SDNode<"MipsISD::FPCmp", SDT_MipsFPCmp>;
+
+// Operand for printing out a condition code.
+let PrintMethod = "printFCCOperand" in
+ def condcode : Operand<i32>;
+
+//===----------------------------------------------------------------------===//
+// Feature predicates.
+//===----------------------------------------------------------------------===//
+
+def In32BitMode : Predicate<"!Subtarget.isFP64bit()">;
+def In64BitMode : Predicate<"Subtarget.isFP64bit()">;
+def IsSingleFloat : Predicate<"Subtarget.isSingleFloat()">;
+def IsNotSingleFloat : Predicate<"!Subtarget.isSingleFloat()">;
+
+//===----------------------------------------------------------------------===//
+// Instruction Class Templates
+//
+// A set of multiclasses is used to address this in one shot.
+// SO32 - single precision only, uses all 32 32-bit fp registers
+// require FGR32 Register Class and IsSingleFloat
+// AS32 - 16 even fp registers are used for single precision
+// require AFGR32 Register Class and In32BitMode
+// S64 - 32 64 bit registers are used to hold 32-bit single precision values.
+// require FGR64 Register Class and In64BitMode
+// D32 - 16 even fp registers are used for double precision
+// require AFGR64 Register Class and In32BitMode
+// D64 - 32 64 bit registers are used to hold 64-bit double precision values.
+// require FGR64 Register Class and In64BitMode
+//
+// Only SO32, AS32 and D32 are supported right now.
+//
+//===----------------------------------------------------------------------===//
+
+multiclass FFR1_1<bits<6> funct, string asmstr>
+{
+ def _SO32 : FFR<0x11, funct, 0x0, (outs FGR32:$fd), (ins FGR32:$fs),
+ !strconcat(asmstr, ".s $fd, $fs"), []>, Requires<[IsSingleFloat]>;
+
+ def _AS32 : FFR<0x11, funct, 0x0, (outs AFGR32:$fd), (ins AFGR32:$fs),
+ !strconcat(asmstr, ".s $fd, $fs"), []>, Requires<[In32BitMode]>;
+
+ def _D32 : FFR<0x11, funct, 0x1, (outs AFGR64:$fd), (ins AFGR64:$fs),
+ !strconcat(asmstr, ".d $fd, $fs"), []>, Requires<[In32BitMode]>;
+}
+
+multiclass FFR1_2<bits<6> funct, string asmstr, SDNode FOp>
+{
+ def _SO32 : FFR<0x11, funct, 0x0, (outs FGR32:$fd), (ins FGR32:$fs),
+ !strconcat(asmstr, ".s $fd, $fs"),
+ [(set FGR32:$fd, (FOp FGR32:$fs))]>, Requires<[IsSingleFloat]>;
+
+ def _AS32 : FFR<0x11, funct, 0x0, (outs AFGR32:$fd), (ins AFGR32:$fs),
+ !strconcat(asmstr, ".s $fd, $fs"),
+ [(set AFGR32:$fd, (FOp AFGR32:$fs))]>, Requires<[In32BitMode]>;
+
+ def _D32 : FFR<0x11, funct, 0x1, (outs AFGR64:$fd), (ins AFGR64:$fs),
+ !strconcat(asmstr, ".d $fd, $fs"),
+ [(set AFGR64:$fd, (FOp AFGR64:$fs))]>, Requires<[In32BitMode]>;
+}
+
+class FFR1_3<bits<6> funct, bits<5> fmt, RegisterClass RcSrc,
+ RegisterClass RcDst, string asmstr>:
+ FFR<0x11, funct, fmt, (outs RcSrc:$fd), (ins RcDst:$fs),
+ !strconcat(asmstr, " $fd, $fs"), []>;
+
+
+multiclass FFR1_4<bits<6> funct, string asmstr, SDNode FOp> {
+ def _SO32 : FFR<0x11, funct, 0x0, (outs FGR32:$fd), (ins FGR32:$fs, FGR32:$ft),
+ !strconcat(asmstr, ".s $fd, $fs, $ft"),
+ [(set FGR32:$fd, (FOp FGR32:$fs, FGR32:$ft))]>,
+ Requires<[IsSingleFloat]>;
+
+ def _AS32 : FFR<0x11, funct, 0x0, (outs AFGR32:$fd),
+ (ins AFGR32:$fs, AFGR32:$ft),
+ !strconcat(asmstr, ".s $fd, $fs, $ft"),
+ [(set AFGR32:$fd, (FOp AFGR32:$fs, AFGR32:$ft))]>,
+ Requires<[In32BitMode]>;
+
+ def _D32 : FFR<0x11, funct, 0x1, (outs AFGR64:$fd),
+ (ins AFGR64:$fs, AFGR64:$ft),
+ !strconcat(asmstr, ".d $fd, $fs, $ft"),
+ [(set AFGR64:$fd, (FOp AFGR64:$fs, AFGR64:$ft))]>,
+ Requires<[In32BitMode]>;
+}
+
+//===----------------------------------------------------------------------===//
+// Float Point Instructions
+//===----------------------------------------------------------------------===//
+
+let ft = 0 in {
+ defm FLOOR_W : FFR1_1<0b001111, "floor.w">;
+ defm CEIL_W : FFR1_1<0b001110, "ceil.w">;
+ defm ROUND_W : FFR1_1<0b001100, "round.w">;
+ defm TRUNC_W : FFR1_1<0b001101, "trunc.w">;
+ defm CVTW : FFR1_1<0b100100, "cvt.w">;
+ defm FMOV : FFR1_1<0b000110, "mov">;
+
+ defm FABS : FFR1_2<0b000101, "abs", fabs>;
+ defm FNEG : FFR1_2<0b000111, "neg", fneg>;
+ defm FSQRT : FFR1_2<0b000100, "sqrt", fsqrt>;
+
+ let Predicates = [IsNotSingleFloat] in {
+ /// Ceil to long signed integer
+ def CEIL_LS : FFR1_3<0b001010, 0x0, AFGR32, AFGR32, "ceil.l">;
+ def CEIL_LD : FFR1_3<0b001010, 0x1, AFGR64, AFGR64, "ceil.l">;
+
+ /// Round to long signed integer
+ def ROUND_LS : FFR1_3<0b001000, 0x0, AFGR32, AFGR32, "round.l">;
+ def ROUND_LD : FFR1_3<0b001000, 0x1, AFGR64, AFGR64, "round.l">;
+
+ /// Floor to long signed integer
+ def FLOOR_LS : FFR1_3<0b001011, 0x0, AFGR32, AFGR32, "floor.l">;
+ def FLOOR_LD : FFR1_3<0b001011, 0x1, AFGR64, AFGR64, "floor.l">;
+
+ /// Trunc to long signed integer
+ def TRUNC_LS : FFR1_3<0b001001, 0x0, AFGR32, AFGR32, "trunc.l">;
+ def TRUNC_LD : FFR1_3<0b001001, 0x1, AFGR64, AFGR64, "trunc.l">;
+
+ /// Convert to long signed integer
+ def CVTL_S : FFR1_3<0b100101, 0x0, AFGR32, AFGR32, "cvt.l">;
+ def CVTL_D : FFR1_3<0b100101, 0x1, AFGR64, AFGR64, "cvt.l">;
+
+ /// Convert to Double Precison
+ def CVTD_S32 : FFR1_3<0b100001, 0x0, AFGR64, FGR32, "cvt.d.s">;
+ def CVTD_W32 : FFR1_3<0b100001, 0x2, AFGR64, FGR32, "cvt.d.w">;
+ def CVTD_L32 : FFR1_3<0b100001, 0x3, AFGR64, AFGR64, "cvt.d.l">;
+
+ /// Convert to Single Precison
+ def CVTS_D32 : FFR1_3<0b100000, 0x1, FGR32, AFGR64, "cvt.s.d">;
+ def CVTS_L32 : FFR1_3<0b100000, 0x3, FGR32, AFGR64, "cvt.s.l">;
+ }
+
+ /// Convert to Single Precison
+ def CVTS_W32 : FFR1_3<0b100000, 0x2, FGR32, FGR32, "cvt.s.w">,
+ Requires<[IsSingleFloat]>;
+}
+
+// The odd-numbered registers are only referenced when doing loads,
+// stores, and moves between floating-point and integer registers.
+// When defining instructions, we reference all 32-bit registers,
+// regardless of register aliasing.
+let fd = 0 in {
+ /// Move Control Registers From/To CPU Registers
+ ///def CFC1 : FFR<0x11, 0x0, 0x2, (outs CPURegs:$rt), (ins FGR32:$fs),
+ /// "cfc1 $rt, $fs", []>;
+
+ ///def CTC1 : FFR<0x11, 0x0, 0x6, (outs CPURegs:$rt), (ins FGR32:$fs),
+ /// "ctc1 $rt, $fs", []>;
+ ///
+ ///def CFC1A : FFR<0x11, 0x0, 0x2, (outs CPURegs:$rt), (ins AFGR32:$fs),
+ /// "cfc1 $rt, $fs", []>;
+
+ ///def CTC1A : FFR<0x11, 0x0, 0x6, (outs CPURegs:$rt), (ins AFGR32:$fs),
+ /// "ctc1 $rt, $fs", []>;
+
+ def MFC1 : FFR<0x11, 0x00, 0x00, (outs CPURegs:$rt), (ins FGR32:$fs),
+ "mfc1 $rt, $fs", []>;
+
+ def MTC1 : FFR<0x11, 0x00, 0x04, (outs FGR32:$fs), (ins CPURegs:$rt),
+ "mtc1 $fs, $rt", []>;
+
+ def MFC1A : FFR<0x11, 0x00, 0x00, (outs CPURegs:$rt), (ins AFGR32:$fs),
+ "mfc1 $rt, $fs", []>;
+
+ def MTC1A : FFR<0x11, 0x00, 0x04, (outs AFGR32:$fs), (ins CPURegs:$rt),
+ "mtc1 $fs, $rt", []>;
+}
+
+/// Float Point Memory Instructions
+let Predicates = [IsNotSingleFloat] in {
+ def LDC1 : FFI<0b110101, (outs AFGR64:$ft), (ins mem:$addr),
+ "ldc1 $ft, $addr", [(set AFGR64:$ft, (load addr:$addr))]>;
+
+ def SDC1 : FFI<0b111101, (outs), (ins AFGR64:$ft, mem:$addr),
+ "sdc1 $ft, $addr", [(store AFGR64:$ft, addr:$addr)]>;
+}
+
+// LWC1 and SWC1 can always be emited with odd registers.
+def LWC1 : FFI<0b110001, (outs FGR32:$ft), (ins mem:$addr), "lwc1 $ft, $addr",
+ [(set FGR32:$ft, (load addr:$addr))]>;
+def SWC1 : FFI<0b111001, (outs), (ins FGR32:$ft, mem:$addr), "swc1 $ft, $addr",
+ [(store FGR32:$ft, addr:$addr)]>;
+
+def LWC1A : FFI<0b110001, (outs AFGR32:$ft), (ins mem:$addr), "lwc1 $ft, $addr",
+ [(set AFGR32:$ft, (load addr:$addr))]>;
+def SWC1A : FFI<0b111001, (outs), (ins AFGR32:$ft, mem:$addr), "swc1 $ft, $addr",
+ [(store AFGR32:$ft, addr:$addr)]>;
+
+/// Floating-point Aritmetic
+defm FADD : FFR1_4<0x10, "add", fadd>;
+defm FDIV : FFR1_4<0x03, "div", fdiv>;
+defm FMUL : FFR1_4<0x02, "mul", fmul>;
+defm FSUB : FFR1_4<0x01, "sub", fsub>;
+
+//===----------------------------------------------------------------------===//
+// Float Point Branch Codes
+//===----------------------------------------------------------------------===//
+// Mips branch codes. These correspond to condcode in MipsInstrInfo.h.
+// They must be kept in synch.
+def MIPS_BRANCH_F : PatLeaf<(i32 0)>;
+def MIPS_BRANCH_T : PatLeaf<(i32 1)>;
+def MIPS_BRANCH_FL : PatLeaf<(i32 2)>;
+def MIPS_BRANCH_TL : PatLeaf<(i32 3)>;
+
+/// Float Point Branch of False/True (Likely)
+let isBranch=1, isTerminator=1, hasDelaySlot=1, base=0x8, Uses=[FCR31] in {
+ class FBRANCH<PatLeaf op, string asmstr> : FFI<0x11, (ops),
+ (ins brtarget:$dst), !strconcat(asmstr, " $dst"),
+ [(MipsFPBrcond op, bb:$dst, FCR31)]>;
+}
+def BC1F : FBRANCH<MIPS_BRANCH_F, "bc1f">;
+def BC1T : FBRANCH<MIPS_BRANCH_T, "bc1t">;
+def BC1FL : FBRANCH<MIPS_BRANCH_FL, "bc1fl">;
+def BC1TL : FBRANCH<MIPS_BRANCH_TL, "bc1tl">;
+
+//===----------------------------------------------------------------------===//
+// Float Point Flag Conditions
+//===----------------------------------------------------------------------===//
+// Mips condition codes. They must correspond to condcode in MipsInstrInfo.h.
+// They must be kept in synch.
+def MIPS_FCOND_F : PatLeaf<(i32 0)>;
+def MIPS_FCOND_UN : PatLeaf<(i32 1)>;
+def MIPS_FCOND_EQ : PatLeaf<(i32 2)>;
+def MIPS_FCOND_UEQ : PatLeaf<(i32 3)>;
+def MIPS_FCOND_OLT : PatLeaf<(i32 4)>;
+def MIPS_FCOND_ULT : PatLeaf<(i32 5)>;
+def MIPS_FCOND_OLE : PatLeaf<(i32 6)>;
+def MIPS_FCOND_ULE : PatLeaf<(i32 7)>;
+def MIPS_FCOND_SF : PatLeaf<(i32 8)>;
+def MIPS_FCOND_NGLE : PatLeaf<(i32 9)>;
+def MIPS_FCOND_SEQ : PatLeaf<(i32 10)>;
+def MIPS_FCOND_NGL : PatLeaf<(i32 11)>;
+def MIPS_FCOND_LT : PatLeaf<(i32 12)>;
+def MIPS_FCOND_NGE : PatLeaf<(i32 13)>;
+def MIPS_FCOND_LE : PatLeaf<(i32 14)>;
+def MIPS_FCOND_NGT : PatLeaf<(i32 15)>;
+
+/// Floating Point Compare
+let hasDelaySlot = 1, Defs=[FCR31] in {
+
+//multiclass FCC1_1<RegisterClass RC>
+
+ def FCMP_SO32 : FCC<0x0, (outs), (ins FGR32:$fs, FGR32:$ft, condcode:$cc),
+ "c.$cc.s $fs $ft", [(MipsFPCmp FGR32:$fs, FGR32:$ft, imm:$cc),
+ (implicit FCR31)]>, Requires<[IsSingleFloat]>;
+
+ def FCMP_AS32 : FCC<0x0, (outs), (ins AFGR32:$fs, AFGR32:$ft, condcode:$cc),
+ "c.$cc.s $fs $ft", [(MipsFPCmp AFGR32:$fs, AFGR32:$ft, imm:$cc),
+ (implicit FCR31)]>, Requires<[In32BitMode]>;
+
+ def FCMP_D32 : FCC<0x1, (outs), (ins AFGR64:$fs, AFGR64:$ft, condcode:$cc),
+ "c.$cc.d $fs $ft", [(MipsFPCmp AFGR64:$fs, AFGR64:$ft, imm:$cc),
+ (implicit FCR31)]>, Requires<[In32BitMode]>;
+}
+
+//===----------------------------------------------------------------------===//
+// Float Point Patterns
+//===----------------------------------------------------------------------===//
+def : Pat<(f32 (sint_to_fp CPURegs:$src)), (CVTS_W32 (MTC1 CPURegs:$src))>;
+def : Pat<(f64 (sint_to_fp CPURegs:$src)), (CVTD_W32 (MTC1 CPURegs:$src))>;
+def : Pat<(i32 (fp_to_sint FGR32:$src)), (MFC1 (CVTW_SO32 FGR32:$src))>;
+def : Pat<(i32 (fp_to_sint AFGR32:$src)), (MFC1 (CVTW_AS32 AFGR32:$src))>;
+
Modified: llvm/branches/non-call-eh/lib/Target/Mips/MipsInstrFormats.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/Mips/MipsInstrFormats.td?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/Mips/MipsInstrFormats.td (original)
+++ llvm/branches/non-call-eh/lib/Target/Mips/MipsInstrFormats.td Sun Jul 6 15:45:41 2008
@@ -10,7 +10,7 @@
//===----------------------------------------------------------------------===//
// Describe MIPS instructions format
//
-// All the possible Mips fields are:
+// CPU INSTRUCTION FORMATS
//
// opcode - operation code.
// rs - src reg.
@@ -43,7 +43,7 @@
}
// Mips Pseudo Instructions Format
-class PseudoInstMips<dag outs, dag ins, string asmstr, list<dag> pattern>:
+class MipsPseudo<dag outs, dag ins, string asmstr, list<dag> pattern>:
MipsInst<outs, ins, asmstr, pattern, IIPseudo>;
//===----------------------------------------------------------------------===//
@@ -102,3 +102,81 @@
let Inst{25-0} = addr;
}
+//===----------------------------------------------------------------------===//
+//
+// FLOAT POINT INSTRUCTION FORMATS
+//
+// opcode - operation code.
+// fs - src reg.
+// ft - dst reg (on a 2 regs instr) or src reg (on a 3 reg instr).
+// fd - dst reg, only used on 3 regs instr.
+// fmt - double or single precision.
+// funct - combined with opcode field give us an operation code.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Format FR instruction class in Mips : <|opcode|fmt|ft|fs|fd|funct|>
+//===----------------------------------------------------------------------===//
+
+class FFR<bits<6> op, bits<6> _funct, bits<5> _fmt, dag outs, dag ins,
+ string asmstr, list<dag> pattern> :
+ MipsInst<outs, ins, asmstr, pattern, NoItinerary>
+{
+ bits<5> fd;
+ bits<5> fs;
+ bits<5> ft;
+ bits<5> fmt;
+ bits<6> funct;
+
+ let opcode = op;
+ let funct = _funct;
+ let fmt = _fmt;
+
+ let Inst{25-21} = fmt;
+ let Inst{20-16} = ft;
+ let Inst{15-11} = fs;
+ let Inst{10-6} = fd;
+ let Inst{5-0} = funct;
+}
+
+//===----------------------------------------------------------------------===//
+// Format FI instruction class in Mips : <|opcode|base|ft|immediate|>
+//===----------------------------------------------------------------------===//
+
+class FFI<bits<6> op, dag outs, dag ins, string asmstr, list<dag> pattern>:
+ MipsInst<outs, ins, asmstr, pattern, NoItinerary>
+{
+ bits<5> ft;
+ bits<5> base;
+ bits<16> imm16;
+
+ let opcode = op;
+
+ let Inst{25-21} = base;
+ let Inst{20-16} = ft;
+ let Inst{15-0} = imm16;
+}
+
+//===----------------------------------------------------------------------===//
+// Compare instruction class in Mips : <|010001|fmt|ft|fs|0000011|condcode|>
+//===----------------------------------------------------------------------===//
+
+class FCC<bits<5> _fmt, dag outs, dag ins, string asmstr, list<dag> pattern> :
+ MipsInst<outs, ins, asmstr, pattern, NoItinerary>
+{
+ bits<5> fs;
+ bits<5> ft;
+ bits<4> cc;
+ bits<5> fmt;
+
+ let opcode = 0x11;
+ let fmt = _fmt;
+
+ let Inst{25-21} = fmt;
+ let Inst{20-16} = ft;
+ let Inst{15-11} = fs;
+ let Inst{10-6} = 0;
+ let Inst{5-4} = 0b11;
+ let Inst{3-0} = cc;
+}
Modified: llvm/branches/non-call-eh/lib/Target/Mips/MipsInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/Mips/MipsInstrInfo.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/Mips/MipsInstrInfo.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/Mips/MipsInstrInfo.cpp Sun Jul 6 15:45:41 2008
@@ -19,7 +19,6 @@
using namespace llvm;
-// TODO: Add the subtarget support on this constructor
MipsInstrInfo::MipsInstrInfo(MipsTargetMachine &tm)
: TargetInstrInfoImpl(MipsInsts, array_lengthof(MipsInsts)),
TM(tm), RI(*this) {}
@@ -35,8 +34,7 @@
{
// addu $dst, $src, $zero || addu $dst, $zero, $src
// or $dst, $src, $zero || or $dst, $zero, $src
- if ((MI.getOpcode() == Mips::ADDu) || (MI.getOpcode() == Mips::OR))
- {
+ if ((MI.getOpcode() == Mips::ADDu) || (MI.getOpcode() == Mips::OR)) {
if (MI.getOperand(1).getReg() == Mips::ZERO) {
DstReg = MI.getOperand(0).getReg();
SrcReg = MI.getOperand(2).getReg();
@@ -48,9 +46,20 @@
}
}
+ // mov $fpDst, $fpSrc
+ // mfc $gpDst, $fpSrc
+ // mtc $fpDst, $gpSrc
+ if (MI.getOpcode() == Mips::FMOV_SO32 || MI.getOpcode() == Mips::FMOV_AS32 ||
+ MI.getOpcode() == Mips::FMOV_D32 || MI.getOpcode() == Mips::MFC1A ||
+ MI.getOpcode() == Mips::MFC1 || MI.getOpcode() == Mips::MTC1A ||
+ MI.getOpcode() == Mips::MTC1 ) {
+ DstReg = MI.getOperand(0).getReg();
+ SrcReg = MI.getOperand(1).getReg();
+ return true;
+ }
+
// addiu $dst, $src, 0
- if (MI.getOpcode() == Mips::ADDiu)
- {
+ if (MI.getOpcode() == Mips::ADDiu) {
if ((MI.getOperand(1).isRegister()) && (isZeroImm(MI.getOperand(2)))) {
DstReg = MI.getOperand(0).getReg();
SrcReg = MI.getOperand(1).getReg();
@@ -68,12 +77,11 @@
unsigned MipsInstrInfo::
isLoadFromStackSlot(MachineInstr *MI, int &FrameIndex) const
{
- if (MI->getOpcode() == Mips::LW)
- {
+ if ((MI->getOpcode() == Mips::LW) || (MI->getOpcode() == Mips::LWC1) ||
+ (MI->getOpcode() == Mips::LWC1A) || (MI->getOpcode() == Mips::LDC1)) {
if ((MI->getOperand(2).isFrameIndex()) && // is a stack slot
(MI->getOperand(1).isImmediate()) && // the imm is zero
- (isZeroImm(MI->getOperand(1))))
- {
+ (isZeroImm(MI->getOperand(1)))) {
FrameIndex = MI->getOperand(2).getIndex();
return MI->getOperand(0).getReg();
}
@@ -90,11 +98,11 @@
unsigned MipsInstrInfo::
isStoreToStackSlot(MachineInstr *MI, int &FrameIndex) const
{
- if (MI->getOpcode() == Mips::SW) {
+ if ((MI->getOpcode() == Mips::SW) || (MI->getOpcode() == Mips::SWC1) ||
+ (MI->getOpcode() == Mips::SWC1A) || (MI->getOpcode() == Mips::SDC1)) {
if ((MI->getOperand(0).isFrameIndex()) && // is a stack slot
(MI->getOperand(1).isImmediate()) && // the imm is zero
- (isZeroImm(MI->getOperand(1))))
- {
+ (isZeroImm(MI->getOperand(1)))) {
FrameIndex = MI->getOperand(0).getIndex();
return MI->getOperand(2).getReg();
}
@@ -110,6 +118,208 @@
BuildMI(MBB, MI, get(Mips::NOP));
}
+void MipsInstrInfo::
+copyRegToReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ unsigned DestReg, unsigned SrcReg,
+ const TargetRegisterClass *DestRC,
+ const TargetRegisterClass *SrcRC) const {
+ if (DestRC != SrcRC) {
+ if ((DestRC == Mips::CPURegsRegisterClass) &&
+ (SrcRC == Mips::FGR32RegisterClass))
+ BuildMI(MBB, I, get(Mips::MFC1), DestReg).addReg(SrcReg);
+ else if ((DestRC == Mips::CPURegsRegisterClass) &&
+ (SrcRC == Mips::AFGR32RegisterClass))
+ BuildMI(MBB, I, get(Mips::MFC1A), DestReg).addReg(SrcReg);
+ else if ((DestRC == Mips::FGR32RegisterClass) &&
+ (SrcRC == Mips::CPURegsRegisterClass))
+ BuildMI(MBB, I, get(Mips::MTC1), DestReg).addReg(SrcReg);
+ else if ((DestRC == Mips::AFGR32RegisterClass) &&
+ (SrcRC == Mips::CPURegsRegisterClass))
+ BuildMI(MBB, I, get(Mips::MTC1A), DestReg).addReg(SrcReg);
+ else
+ assert (0 && "DestRC != SrcRC, Can't copy this register");
+ }
+
+ if (DestRC == Mips::CPURegsRegisterClass)
+ BuildMI(MBB, I, get(Mips::ADDu), DestReg).addReg(Mips::ZERO)
+ .addReg(SrcReg);
+ else if (DestRC == Mips::FGR32RegisterClass)
+ BuildMI(MBB, I, get(Mips::FMOV_SO32), DestReg).addReg(SrcReg);
+ else if (DestRC == Mips::AFGR32RegisterClass)
+ BuildMI(MBB, I, get(Mips::FMOV_AS32), DestReg).addReg(SrcReg);
+ else if (DestRC == Mips::AFGR64RegisterClass)
+ BuildMI(MBB, I, get(Mips::FMOV_D32), DestReg).addReg(SrcReg);
+ else
+ assert (0 && "Can't copy this register");
+}
+
+void MipsInstrInfo::
+storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ unsigned SrcReg, bool isKill, int FI,
+ const TargetRegisterClass *RC) const
+{
+ unsigned Opc;
+ if (RC == Mips::CPURegsRegisterClass)
+ Opc = Mips::SW;
+ else if (RC == Mips::FGR32RegisterClass)
+ Opc = Mips::SWC1;
+ else if (RC == Mips::AFGR32RegisterClass)
+ Opc = Mips::SWC1A;
+ else if (RC == Mips::AFGR64RegisterClass)
+ Opc = Mips::SDC1;
+ else
+ assert(0 && "Can't store this register to stack slot");
+
+ BuildMI(MBB, I, get(Opc)).addReg(SrcReg, false, false, isKill)
+ .addImm(0).addFrameIndex(FI);
+}
+
+void MipsInstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
+ bool isKill, SmallVectorImpl<MachineOperand> &Addr,
+ const TargetRegisterClass *RC, SmallVectorImpl<MachineInstr*> &NewMIs) const
+{
+ unsigned Opc;
+ if (RC == Mips::CPURegsRegisterClass)
+ Opc = Mips::SW;
+ else if (RC == Mips::FGR32RegisterClass)
+ Opc = Mips::SWC1;
+ else if (RC == Mips::AFGR32RegisterClass)
+ Opc = Mips::SWC1A;
+ else if (RC == Mips::AFGR64RegisterClass)
+ Opc = Mips::SDC1;
+ else
+ assert(0 && "Can't store this register");
+
+ MachineInstrBuilder MIB = BuildMI(get(Opc))
+ .addReg(SrcReg, false, false, isKill);
+ for (unsigned i = 0, e = Addr.size(); i != e; ++i) {
+ MachineOperand &MO = Addr[i];
+ if (MO.isRegister())
+ MIB.addReg(MO.getReg());
+ else if (MO.isImmediate())
+ MIB.addImm(MO.getImm());
+ else
+ MIB.addFrameIndex(MO.getIndex());
+ }
+ NewMIs.push_back(MIB);
+ return;
+}
+
+void MipsInstrInfo::
+loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ unsigned DestReg, int FI,
+ const TargetRegisterClass *RC) const
+{
+ unsigned Opc;
+ if (RC == Mips::CPURegsRegisterClass)
+ Opc = Mips::LW;
+ else if (RC == Mips::FGR32RegisterClass)
+ Opc = Mips::LWC1;
+ else if (RC == Mips::AFGR32RegisterClass)
+ Opc = Mips::LWC1A;
+ else if (RC == Mips::AFGR64RegisterClass)
+ Opc = Mips::LDC1;
+ else
+ assert(0 && "Can't load this register from stack slot");
+
+ BuildMI(MBB, I, get(Opc), DestReg).addImm(0).addFrameIndex(FI);
+}
+
+void MipsInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
+ SmallVectorImpl<MachineOperand> &Addr,
+ const TargetRegisterClass *RC,
+ SmallVectorImpl<MachineInstr*> &NewMIs) const {
+ unsigned Opc;
+ if (RC == Mips::CPURegsRegisterClass)
+ Opc = Mips::LW;
+ else if (RC == Mips::FGR32RegisterClass)
+ Opc = Mips::LWC1;
+ else if (RC == Mips::AFGR32RegisterClass)
+ Opc = Mips::LWC1A;
+ else if (RC == Mips::AFGR64RegisterClass)
+ Opc = Mips::LDC1;
+ else
+ assert(0 && "Can't load this register");
+
+ MachineInstrBuilder MIB = BuildMI(get(Opc), DestReg);
+ for (unsigned i = 0, e = Addr.size(); i != e; ++i) {
+ MachineOperand &MO = Addr[i];
+ if (MO.isRegister())
+ MIB.addReg(MO.getReg());
+ else if (MO.isImmediate())
+ MIB.addImm(MO.getImm());
+ else
+ MIB.addFrameIndex(MO.getIndex());
+ }
+ NewMIs.push_back(MIB);
+ return;
+}
+
+MachineInstr *MipsInstrInfo::
+foldMemoryOperand(MachineFunction &MF,
+ MachineInstr* MI,
+ SmallVectorImpl<unsigned> &Ops, int FI) const
+{
+ if (Ops.size() != 1) return NULL;
+
+ MachineInstr *NewMI = NULL;
+
+ switch (MI->getOpcode()) {
+ case Mips::ADDu:
+ if ((MI->getOperand(0).isRegister()) &&
+ (MI->getOperand(1).isRegister()) &&
+ (MI->getOperand(1).getReg() == Mips::ZERO) &&
+ (MI->getOperand(2).isRegister())) {
+ if (Ops[0] == 0) { // COPY -> STORE
+ unsigned SrcReg = MI->getOperand(2).getReg();
+ bool isKill = MI->getOperand(2).isKill();
+ NewMI = BuildMI(get(Mips::SW)).addFrameIndex(FI)
+ .addImm(0).addReg(SrcReg, false, false, isKill);
+ } else { // COPY -> LOAD
+ unsigned DstReg = MI->getOperand(0).getReg();
+ bool isDead = MI->getOperand(0).isDead();
+ NewMI = BuildMI(get(Mips::LW))
+ .addReg(DstReg, true, false, false, isDead)
+ .addImm(0).addFrameIndex(FI);
+ }
+ }
+ break;
+ case Mips::FMOV_SO32:
+ case Mips::FMOV_AS32:
+ case Mips::FMOV_D32:
+ if ((MI->getOperand(0).isRegister()) &&
+ (MI->getOperand(1).isRegister())) {
+ const TargetRegisterClass *RC = RI.getRegClass(MI->getOperand(0).getReg());
+ unsigned StoreOpc, LoadOpc;
+
+ if (RC == Mips::FGR32RegisterClass) {
+ LoadOpc = Mips::LWC1; StoreOpc = Mips::SWC1;
+ } else if (RC == Mips::AFGR32RegisterClass) {
+ LoadOpc = Mips::LWC1A; StoreOpc = Mips::SWC1A;
+ } else if (RC == Mips::AFGR64RegisterClass) {
+ LoadOpc = Mips::LDC1; StoreOpc = Mips::SDC1;
+ } else
+ assert(0 && "foldMemoryOperand register unknown");
+
+ if (Ops[0] == 0) { // COPY -> STORE
+ unsigned SrcReg = MI->getOperand(1).getReg();
+ bool isKill = MI->getOperand(1).isKill();
+ NewMI = BuildMI(get(StoreOpc)).addFrameIndex(FI)
+ .addImm(0).addReg(SrcReg, false, false, isKill);
+ } else { // COPY -> LOAD
+ unsigned DstReg = MI->getOperand(0).getReg();
+ bool isDead = MI->getOperand(0).isDead();
+ NewMI = BuildMI(get(LoadOpc))
+ .addReg(DstReg, true, false, false, isDead)
+ .addImm(0).addFrameIndex(FI);
+ }
+ }
+ break;
+ }
+
+ return NewMI;
+}
+
//===----------------------------------------------------------------------===//
// Branch Analysis
//===----------------------------------------------------------------------===//
@@ -120,12 +330,12 @@
{
switch (BrOpc) {
default: return Mips::COND_INVALID;
- case Mips::BEQ : return Mips::COND_E;
- case Mips::BNE : return Mips::COND_NE;
- case Mips::BGTZ : return Mips::COND_GZ;
- case Mips::BGEZ : return Mips::COND_GEZ;
- case Mips::BLTZ : return Mips::COND_LZ;
- case Mips::BLEZ : return Mips::COND_LEZ;
+ case Mips::BEQ : return Mips::COND_E;
+ case Mips::BNE : return Mips::COND_NE;
+ case Mips::BGTZ : return Mips::COND_GZ;
+ case Mips::BGEZ : return Mips::COND_GEZ;
+ case Mips::BLTZ : return Mips::COND_LZ;
+ case Mips::BLEZ : return Mips::COND_LEZ;
}
}
@@ -156,6 +366,22 @@
case Mips::COND_GEZ : return Mips::COND_LZ;
case Mips::COND_LZ : return Mips::COND_GEZ;
case Mips::COND_LEZ : return Mips::COND_GZ;
+ case Mips::FCOND_F : return Mips::FCOND_T;
+ case Mips::FCOND_UN : return Mips::FCOND_OR;
+ case Mips::FCOND_EQ : return Mips::FCOND_NEQ;
+ case Mips::FCOND_UEQ: return Mips::FCOND_OGL;
+ case Mips::FCOND_OLT: return Mips::FCOND_UGE;
+ case Mips::FCOND_ULT: return Mips::FCOND_OGE;
+ case Mips::FCOND_OLE: return Mips::FCOND_UGT;
+ case Mips::FCOND_ULE: return Mips::FCOND_OGT;
+ case Mips::FCOND_SF: return Mips::FCOND_ST;
+ case Mips::FCOND_NGLE:return Mips::FCOND_GLE;
+ case Mips::FCOND_SEQ: return Mips::FCOND_SNE;
+ case Mips::FCOND_NGL: return Mips::FCOND_GL;
+ case Mips::FCOND_LT: return Mips::FCOND_NLT;
+ case Mips::FCOND_NGE: return Mips::FCOND_GE;
+ case Mips::FCOND_LE: return Mips::FCOND_NLE;
+ case Mips::FCOND_NGT: return Mips::FCOND_GT;
}
}
@@ -287,120 +513,6 @@
return 2;
}
-void MipsInstrInfo::
-copyRegToReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC) const {
- if (DestRC != SrcRC) {
- cerr << "Not yet supported!";
- abort();
- }
-
- if (DestRC == Mips::CPURegsRegisterClass)
- BuildMI(MBB, I, get(Mips::ADDu), DestReg).addReg(Mips::ZERO)
- .addReg(SrcReg);
- else
- assert (0 && "Can't copy this register");
-}
-
-void MipsInstrInfo::
-storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
- unsigned SrcReg, bool isKill, int FI,
- const TargetRegisterClass *RC) const
-{
- if (RC == Mips::CPURegsRegisterClass)
- BuildMI(MBB, I, get(Mips::SW)).addReg(SrcReg, false, false, isKill)
- .addImm(0).addFrameIndex(FI);
- else
- assert(0 && "Can't store this register to stack slot");
-}
-
-void MipsInstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
- bool isKill,
- SmallVectorImpl<MachineOperand> &Addr,
- const TargetRegisterClass *RC,
- SmallVectorImpl<MachineInstr*> &NewMIs) const {
- if (RC != Mips::CPURegsRegisterClass)
- assert(0 && "Can't store this register");
- MachineInstrBuilder MIB = BuildMI(get(Mips::SW))
- .addReg(SrcReg, false, false, isKill);
- for (unsigned i = 0, e = Addr.size(); i != e; ++i) {
- MachineOperand &MO = Addr[i];
- if (MO.isRegister())
- MIB.addReg(MO.getReg());
- else if (MO.isImmediate())
- MIB.addImm(MO.getImm());
- else
- MIB.addFrameIndex(MO.getIndex());
- }
- NewMIs.push_back(MIB);
- return;
-}
-
-void MipsInstrInfo::
-loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
- unsigned DestReg, int FI,
- const TargetRegisterClass *RC) const
-{
- if (RC == Mips::CPURegsRegisterClass)
- BuildMI(MBB, I, get(Mips::LW), DestReg).addImm(0).addFrameIndex(FI);
- else
- assert(0 && "Can't load this register from stack slot");
-}
-
-void MipsInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
- SmallVectorImpl<MachineOperand> &Addr,
- const TargetRegisterClass *RC,
- SmallVectorImpl<MachineInstr*> &NewMIs) const {
- if (RC != Mips::CPURegsRegisterClass)
- assert(0 && "Can't load this register");
- MachineInstrBuilder MIB = BuildMI(get(Mips::LW), DestReg);
- for (unsigned i = 0, e = Addr.size(); i != e; ++i) {
- MachineOperand &MO = Addr[i];
- if (MO.isRegister())
- MIB.addReg(MO.getReg());
- else if (MO.isImmediate())
- MIB.addImm(MO.getImm());
- else
- MIB.addFrameIndex(MO.getIndex());
- }
- NewMIs.push_back(MIB);
- return;
-}
-
-MachineInstr *MipsInstrInfo::
-foldMemoryOperand(MachineFunction &MF,
- MachineInstr* MI,
- SmallVectorImpl<unsigned> &Ops, int FI) const
-{
- if (Ops.size() != 1) return NULL;
-
- MachineInstr *NewMI = NULL;
-
- switch (MI->getOpcode())
- {
- case Mips::ADDu:
- if ((MI->getOperand(0).isRegister()) &&
- (MI->getOperand(1).isRegister()) &&
- (MI->getOperand(1).getReg() == Mips::ZERO) &&
- (MI->getOperand(2).isRegister()))
- {
- if (Ops[0] == 0) // COPY -> STORE
- NewMI = BuildMI(get(Mips::SW)).addFrameIndex(FI)
- .addImm(0).addReg(MI->getOperand(2).getReg());
- else // COPY -> LOAD
- NewMI = BuildMI(get(Mips::LW), MI->getOperand(0)
- .getReg()).addImm(0).addFrameIndex(FI);
- }
- break;
- }
-
- if (NewMI)
- NewMI->copyKillDeadInfo(MI);
- return NewMI;
-}
-
unsigned MipsInstrInfo::
RemoveBranch(MachineBasicBlock &MBB) const
{
@@ -452,5 +564,3 @@
Cond[0].setImm(GetOppositeBranchCondition((Mips::CondCode)Cond[0].getImm()));
return false;
}
-
-
Modified: llvm/branches/non-call-eh/lib/Target/Mips/MipsInstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/Mips/MipsInstrInfo.h?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/Mips/MipsInstrInfo.h (original)
+++ llvm/branches/non-call-eh/lib/Target/Mips/MipsInstrInfo.h Sun Jul 6 15:45:41 2008
@@ -24,6 +24,45 @@
// Mips Condition Codes
enum CondCode {
+ // To be used with float branch True
+ FCOND_F,
+ FCOND_UN,
+ FCOND_EQ,
+ FCOND_UEQ,
+ FCOND_OLT,
+ FCOND_ULT,
+ FCOND_OLE,
+ FCOND_ULE,
+ FCOND_SF,
+ FCOND_NGLE,
+ FCOND_SEQ,
+ FCOND_NGL,
+ FCOND_LT,
+ FCOND_NGE,
+ FCOND_LE,
+ FCOND_NGT,
+
+ // To be used with float branch False
+ // This conditions have the same mnemonic as the
+ // above ones, but are used with a branch False;
+ FCOND_T,
+ FCOND_OR,
+ FCOND_NEQ,
+ FCOND_OGL,
+ FCOND_UGE,
+ FCOND_OGE,
+ FCOND_UGT,
+ FCOND_OGT,
+ FCOND_ST,
+ FCOND_GLE,
+ FCOND_SNE,
+ FCOND_GL,
+ FCOND_NLT,
+ FCOND_GE,
+ FCOND_NLE,
+ FCOND_GT,
+
+ // Only integer conditions
COND_E,
COND_GZ,
COND_GEZ,
@@ -40,6 +79,45 @@
/// e.g. turning COND_E to COND_NE.
CondCode GetOppositeBranchCondition(Mips::CondCode CC);
+ /// MipsCCToString - Map each FP condition code to its string
+ inline static const char *MipsFCCToString(Mips::CondCode CC)
+ {
+ switch (CC) {
+ default: assert(0 && "Unknown condition code");
+ case FCOND_F:
+ case FCOND_T: return "f";
+ case FCOND_UN:
+ case FCOND_OR: return "un";
+ case FCOND_EQ:
+ case FCOND_NEQ: return "eq";
+ case FCOND_UEQ:
+ case FCOND_OGL: return "ueq";
+ case FCOND_OLT:
+ case FCOND_UGE: return "olt";
+ case FCOND_ULT:
+ case FCOND_OGE: return "ult";
+ case FCOND_OLE:
+ case FCOND_UGT: return "ole";
+ case FCOND_ULE:
+ case FCOND_OGT: return "ule";
+ case FCOND_SF:
+ case FCOND_ST: return "sf";
+ case FCOND_NGLE:
+ case FCOND_GLE: return "ngle";
+ case FCOND_SEQ:
+ case FCOND_SNE: return "seq";
+ case FCOND_NGL:
+ case FCOND_GL: return "ngl";
+ case FCOND_LT:
+ case FCOND_NLT: return "lt";
+ case FCOND_NGE:
+ case FCOND_GE: return "ge";
+ case FCOND_LE:
+ case FCOND_NLE: return "nle";
+ case FCOND_NGT:
+ case FCOND_GT: return "gt";
+ }
+ }
}
class MipsInstrInfo : public TargetInstrInfoImpl {
@@ -52,7 +130,7 @@
/// such, whenever a client has an instance of instruction info, it should
/// always be able to get register info as well (through this method).
///
- virtual const TargetRegisterInfo &getRegisterInfo() const { return RI; }
+ virtual const MipsRegisterInfo &getRegisterInfo() const { return RI; }
/// Return true if the instruction is a register to register move and
/// leave the source and dest operands in the passed parameters.
Modified: llvm/branches/non-call-eh/lib/Target/Mips/MipsInstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/Mips/MipsInstrInfo.td?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/Mips/MipsInstrInfo.td (original)
+++ llvm/branches/non-call-eh/lib/Target/Mips/MipsInstrInfo.td Sun Jul 6 15:45:41 2008
@@ -17,10 +17,16 @@
// Mips profiles and nodes
//===----------------------------------------------------------------------===//
+def SDT_MipsRet : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
+def SDT_MipsJmpLink : SDTypeProfile<0, 1, [SDTCisVT<0, iPTR>]>;
+def SDT_MipsSelectCC : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>,
+ SDTCisSameAs<1, 2>, SDTCisInt<3>]>;
+def SDT_MipsCallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>]>;
+def SDT_MipsCallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>, SDTCisVT<1, i32>]>;
+
// Call
-def SDT_MipsJmpLink : SDTypeProfile<0, 1, [SDTCisVT<0, iPTR>]>;
-def MipsJmpLink : SDNode<"MipsISD::JmpLink",SDT_MipsJmpLink, [SDNPHasChain,
- SDNPOutFlag]>;
+def MipsJmpLink : SDNode<"MipsISD::JmpLink",SDT_MipsJmpLink, [SDNPHasChain,
+ SDNPOutFlag]>;
// Hi and Lo nodes are used to handle global addresses. Used on
// MipsISelLowering to lower stuff like GlobalAddress, ExternalSymbol
@@ -29,24 +35,22 @@
def MipsLo : SDNode<"MipsISD::Lo", SDTIntUnaryOp>;
// Return
-def SDT_MipsRet : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
-def MipsRet : SDNode<"MipsISD::Ret", SDT_MipsRet, [SDNPHasChain,
- SDNPOptInFlag]>;
+def MipsRet : SDNode<"MipsISD::Ret", SDT_MipsRet, [SDNPHasChain,
+ SDNPOptInFlag]>;
// These are target-independent nodes, but have target-specific formats.
-def SDT_MipsCallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>]>;
-def SDT_MipsCallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>,
- SDTCisVT<1, i32>]>;
+def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_MipsCallSeqStart,
+ [SDNPHasChain, SDNPOutFlag]>;
+def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_MipsCallSeqEnd,
+ [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
-def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_MipsCallSeqStart,
- [SDNPHasChain, SDNPOutFlag]>;
-def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_MipsCallSeqEnd,
- [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
+// Select Condition Code
+def MipsSelectCC : SDNode<"MipsISD::SelectCC", SDT_MipsSelectCC>;
//===----------------------------------------------------------------------===//
// Mips Instruction Predicate Definitions.
//===----------------------------------------------------------------------===//
-def IsStatic : Predicate<"TM.getRelocationModel() == Reloc::Static">;
+def IsAllegrex : Predicate<"Subtarget.isAllegrex()">;
//===----------------------------------------------------------------------===//
// Mips Operand, Complex Patterns and Transformations Definitions.
@@ -58,7 +62,6 @@
def uimm16 : Operand<i32>;
def simm16 : Operand<i32>;
def shamt : Operand<i32>;
-def addrlabel : Operand<i32>;
// Address operand
def mem : Operand<i32> {
@@ -135,7 +138,6 @@
[], IIAlu>;
// Arithmetic 2 register operands
-let isCommutable = 1 in
class ArithI<bits<6> op, string instr_asm, SDNode OpNode,
Operand Od, PatLeaf imm_type> :
FI< op,
@@ -168,7 +170,7 @@
(outs CPURegs:$dst),
(ins CPURegs:$b, uimm16:$c),
!strconcat(instr_asm, " $dst, $b, $c"),
- [(set CPURegs:$dst, (OpNode CPURegs:$b, immSExt16:$c))], IIAlu>;
+ [(set CPURegs:$dst, (OpNode CPURegs:$b, immZExt16:$c))], IIAlu>;
class LogicNOR<bits<6> op, bits<6> func, string instr_asm>:
FR< op,
@@ -341,28 +343,46 @@
instr_asm,
[(set CPURegs:$dst, addr:$addr)], IIAlu>;
+class SignExtInReg<bits<6> func, string instr_asm, ValueType vt>:
+ FR< 0x3f, func, (outs CPURegs:$dst), (ins CPURegs:$src),
+ !strconcat(instr_asm, " $dst, $src"),
+ [(set CPURegs:$dst, (sext_inreg CPURegs:$src, vt))], NoItinerary>;
+
+
//===----------------------------------------------------------------------===//
// Pseudo instructions
//===----------------------------------------------------------------------===//
// As stack alignment is always done with addiu, we need a 16-bit immediate
let Defs = [SP], Uses = [SP] in {
-def ADJCALLSTACKDOWN : PseudoInstMips<(outs), (ins uimm16:$amt),
- "!ADJCALLSTACKDOWN $amt",
- [(callseq_start imm:$amt)]>;
-def ADJCALLSTACKUP : PseudoInstMips<(outs), (ins uimm16:$amt1, uimm16:$amt2),
- "!ADJCALLSTACKUP $amt1",
- [(callseq_end imm:$amt1, imm:$amt2)]>;
+def ADJCALLSTACKDOWN : MipsPseudo<(outs), (ins uimm16:$amt),
+ "!ADJCALLSTACKDOWN $amt",
+ [(callseq_start imm:$amt)]>;
+def ADJCALLSTACKUP : MipsPseudo<(outs), (ins uimm16:$amt1, uimm16:$amt2),
+ "!ADJCALLSTACKUP $amt1",
+ [(callseq_end imm:$amt1, imm:$amt2)]>;
}
// When handling PIC code the assembler needs .cpload and .cprestore
// directives. If the real instructions corresponding these directives
// are used, we have the same behavior, but get also a bunch of warnings
// from the assembler.
-def CPLOAD: PseudoInstMips<(outs), (ins CPURegs:$reg),
- ".set noreorder\n\t.cpload $reg\n\t.set reorder\n", []>;
-def CPRESTORE: PseudoInstMips<(outs), (ins uimm16:$loc),
- ".cprestore $loc\n", []>;
+def CPLOAD : MipsPseudo<(outs), (ins CPURegs:$reg),
+ ".set noreorder\n\t.cpload $reg\n\t.set reorder\n",
+ []>;
+def CPRESTORE : MipsPseudo<(outs), (ins uimm16:$loc),
+ ".cprestore $loc\n", []>;
+
+// The supported Mips ISAs dont have any instruction close to the SELECT_CC
+// operation. The solution is to create a Mips pseudo SELECT_CC instruction
+// (MipsSelectCC), use LowerSELECT_CC to generate this instruction and finally
+// replace it for real supported nodes into EmitInstrWithCustomInserter
+let usesCustomDAGSchedInserter = 1 in {
+ def Select_CC : MipsPseudo<(outs CPURegs:$dst),
+ (ins CPURegs:$CmpRes, CPURegs:$T, CPURegs:$F), "# MipsSelect_CC",
+ [(set CPURegs:$dst, (MipsSelectCC CPURegs:$CmpRes,
+ CPURegs:$T, CPURegs:$F))]>;
+}
//===----------------------------------------------------------------------===//
// Instruction definition
@@ -376,9 +396,10 @@
// ADDiu just accept 16-bit immediates but we handle this on Pat's.
// immZExt32 is used here so it can match GlobalAddress immediates.
+// MUL is a assembly macro in the current used ISAs.
def ADDiu : ArithI<0x09, "addiu", add, uimm16, immZExt16>;
def ADDi : ArithI<0x08, "addi", add, simm16, immSExt16>;
-def MUL : ArithR<0x1c, 0x02, "mul", mul, IIImul>;
+//def MUL : ArithR<0x1c, 0x02, "mul", mul, IIImul>;
def ADDu : ArithR<0x00, 0x21, "addu", add, IIAlu>;
def SUBu : ArithR<0x00, 0x23, "subu", sub, IIAlu>;
def ADD : ArithOverflowR<0x00, 0x20, "add">;
@@ -457,19 +478,6 @@
def MTHI : MoveFromTo<0x11, "mthi">;
def MTLO : MoveFromTo<0x13, "mtlo">;
-// Count Leading
-// CLO/CLZ are part of the newer MIPS32(tm) instruction
-// set and not older Mips I keep this for future use
-// though.
-//def CLO : CountLeading<0x21, "clo">;
-//def CLZ : CountLeading<0x20, "clz">;
-
-// MADD*/MSUB* are not part of MipsI either.
-//def MADD : MArithR<0x00, "madd">;
-//def MADDU : MArithR<0x01, "maddu">;
-//def MSUB : MArithR<0x04, "msub">;
-//def MSUBU : MArithR<0x05, "msubu">;
-
// No operation
let addr=0 in
def NOP : FJ<0, (outs), (ins), "nop", [], IIAlu>;
@@ -489,6 +497,27 @@
// can be matched. It's similar to Sparc LEA_ADDRi
def LEA_ADDiu : EffectiveAddress<"addiu $dst, ${addr:stackloc}">;
+// Count Leading
+// CLO/CLZ are part of the newer MIPS32(tm) instruction
+// set and not older Mips I keep this for future use
+// though.
+//def CLO : CountLeading<0x21, "clo">;
+//def CLZ : CountLeading<0x20, "clz">;
+
+// MADD*/MSUB* are not part of MipsI either.
+//def MADD : MArithR<0x00, "madd">;
+//def MADDU : MArithR<0x01, "maddu">;
+//def MSUB : MArithR<0x04, "msub">;
+//def MSUBU : MArithR<0x05, "msubu">;
+
+let Predicates = [IsAllegrex] in {
+ let shamt = 0x10, rs = 0 in
+ def SEB : SignExtInReg<0x21, "seb", i8>;
+
+ let shamt = 0x18, rs = 0 in
+ def SEH : SignExtInReg<0x20, "seh", i16>;
+}
+
//===----------------------------------------------------------------------===//
// Arbitrary patterns that map to one or more instructions
//===----------------------------------------------------------------------===//
@@ -503,6 +532,14 @@
def : Pat<(i32 imm:$imm),
(ORi (LUi (HI16 imm:$imm)), (LO16 imm:$imm))>;
+// Carry patterns
+def : Pat<(subc CPURegs:$lhs, CPURegs:$rhs),
+ (SUBu CPURegs:$lhs, CPURegs:$rhs)>;
+def : Pat<(addc CPURegs:$lhs, CPURegs:$rhs),
+ (ADDu CPURegs:$lhs, CPURegs:$rhs)>;
+def : Pat<(addc CPURegs:$src, imm:$imm),
+ (ADDiu CPURegs:$src, imm:$imm)>;
+
// Call
def : Pat<(MipsJmpLink (i32 tglobaladdr:$dst)),
(JAL tglobaladdr:$dst)>;
@@ -521,7 +558,7 @@
def : Pat<(add CPURegs:$hi, (MipsLo tjumptable:$lo)),
(ADDiu CPURegs:$hi, tjumptable:$lo)>;
-// Mips does not have not, so we increase the operation
+// Mips does not have "not", so we expand our way
def : Pat<(not CPURegs:$in),
(NOR CPURegs:$in, ZERO)>;
@@ -530,13 +567,10 @@
def : Pat<(i32 (extloadi8 addr:$src)), (LBu addr:$src)>;
def : Pat<(i32 (extloadi16 addr:$src)), (LHu addr:$src)>;
-// some peepholes
+// peepholes
def : Pat<(store (i32 0), addr:$dst), (SW ZERO, addr:$dst)>;
-///
-/// brcond patterns
-///
-
+// brcond patterns
// direct match equal/notequal zero branches
def : Pat<(brcond (setne CPURegs:$lhs, 0), bb:$dst),
(BNE CPURegs:$lhs, ZERO, bb:$dst)>;
@@ -576,12 +610,8 @@
def : Pat<(brcond CPURegs:$cond, bb:$dst),
(BNE CPURegs:$cond, ZERO, bb:$dst)>;
-///
/// setcc patterns, only matched when there
/// is no brcond following a setcc operation
-///
-
-// setcc 2 register operands
def : Pat<(setle CPURegs:$lhs, CPURegs:$rhs),
(XORi (SLT CPURegs:$rhs, CPURegs:$lhs), 1)>;
def : Pat<(setule CPURegs:$lhs, CPURegs:$rhs),
@@ -605,8 +635,14 @@
(XORi (OR (SLT CPURegs:$lhs, CPURegs:$rhs),
(SLT CPURegs:$rhs, CPURegs:$lhs)), 1)>;
-// setcc reg/imm operands
def : Pat<(setge CPURegs:$lhs, immSExt16:$rhs),
(XORi (SLTi CPURegs:$lhs, immSExt16:$rhs), 1)>;
def : Pat<(setuge CPURegs:$lhs, immZExt16:$rhs),
(XORi (SLTiu CPURegs:$lhs, immZExt16:$rhs), 1)>;
+
+//===----------------------------------------------------------------------===//
+// Floating Point Support
+//===----------------------------------------------------------------------===//
+
+include "MipsInstrFPU.td"
+
Modified: llvm/branches/non-call-eh/lib/Target/Mips/MipsMachineFunction.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/Mips/MipsMachineFunction.h?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/Mips/MipsMachineFunction.h (original)
+++ llvm/branches/non-call-eh/lib/Target/Mips/MipsMachineFunction.h Sun Jul 6 15:45:41 2008
@@ -25,12 +25,12 @@
class MipsFunctionInfo : public MachineFunctionInfo {
private:
- /// Holds for each function where on the stack
- /// the Frame Pointer must be saved
+ /// Holds for each function where on the stack the Frame Pointer must be
+ /// saved.
int FPStackOffset;
- /// Holds for each function where on the stack
- /// the Return Address must be saved
+ /// Holds for each function where on the stack the Return Address must be
+ /// saved.
int RAStackOffset;
/// MipsFIHolder - Holds a FrameIndex and it's Stack Pointer Offset
@@ -43,31 +43,34 @@
: FI(FrameIndex), SPOffset(StackPointerOffset) {}
};
- /// When PIC is used the GP must be saved on the stack
- /// on the function prologue and must be reloaded from this
- /// stack location after every call. A reference to its stack
- /// location and frame index must be kept to be used on
- /// emitPrologue and processFunctionBeforeFrameFinalized.
+ /// When PIC is used the GP must be saved on the stack on the function
+ /// prologue and must be reloaded from this stack location after every
+ /// call. A reference to its stack location and frame index must be kept
+ /// to be used on emitPrologue and processFunctionBeforeFrameFinalized.
MipsFIHolder GPHolder;
- // On LowerFORMAL_ARGUMENTS the stack size is unknown,
- // so the Stack Pointer Offset calculation of "not in
- // register arguments" must be postponed to emitPrologue.
+ // On LowerFORMAL_ARGUMENTS the stack size is unknown, so the Stack
+ // Pointer Offset calculation of "not in register arguments" must be
+ // postponed to emitPrologue.
SmallVector<MipsFIHolder, 16> FnLoadArgs;
bool HasLoadArgs;
- // When VarArgs, we must write registers back to caller
- // stack, preserving on register arguments. Since the
- // stack size is unknown on LowerFORMAL_ARGUMENTS,
- // the Stack Pointer Offset calculation must be
+ // When VarArgs, we must write registers back to caller stack, preserving
+ // on register arguments. Since the stack size is unknown on
+ // LowerFORMAL_ARGUMENTS, the Stack Pointer Offset calculation must be
// postponed to emitPrologue.
SmallVector<MipsFIHolder, 4> FnStoreVarArgs;
bool HasStoreVarArgs;
+ /// SRetReturnReg - Some subtargets require that sret lowering includes
+ /// returning the value of the returned struct in a register. This field
+ /// holds the virtual register into which the sret argument is passed.
+ unsigned SRetReturnReg;
+
public:
MipsFunctionInfo(MachineFunction& MF)
- : FPStackOffset(0), RAStackOffset(0), GPHolder(-1,-1),
- HasLoadArgs(false), HasStoreVarArgs(false)
+ : FPStackOffset(0), RAStackOffset(0), GPHolder(-1,-1), HasLoadArgs(false),
+ HasStoreVarArgs(false), SRetReturnReg(0)
{}
int getFPStackOffset() const { return FPStackOffset; }
@@ -109,6 +112,8 @@
MFI->setObjectOffset( FnStoreVarArgs[i].FI, FnStoreVarArgs[i].SPOffset );
}
+ unsigned getSRetReturnReg() const { return SRetReturnReg; }
+ void setSRetReturnReg(unsigned Reg) { SRetReturnReg = Reg; }
};
} // end of namespace llvm
Modified: llvm/branches/non-call-eh/lib/Target/Mips/MipsRegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/Mips/MipsRegisterInfo.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/Mips/MipsRegisterInfo.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/Mips/MipsRegisterInfo.cpp Sun Jul 6 15:45:41 2008
@@ -32,14 +32,12 @@
#include "llvm/Support/Debug.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/STLExtras.h"
-//#include "MipsSubtarget.h"
using namespace llvm;
-// TODO: add subtarget support
MipsRegisterInfo::MipsRegisterInfo(const TargetInstrInfo &tii)
: MipsGenRegisterInfo(Mips::ADJCALLSTACKDOWN, Mips::ADJCALLSTACKUP),
- TII(tii) {}
+ TII(tii) {}
/// getRegisterNumbering - Given the enum value for some register, e.g.
/// Mips::RA, return the number that it corresponds to (e.g. 31).
@@ -47,38 +45,38 @@
getRegisterNumbering(unsigned RegEnum)
{
switch (RegEnum) {
- case Mips::ZERO : return 0;
- case Mips::AT : return 1;
- case Mips::V0 : return 2;
- case Mips::V1 : return 3;
- case Mips::A0 : return 4;
- case Mips::A1 : return 5;
- case Mips::A2 : return 6;
- case Mips::A3 : return 7;
- case Mips::T0 : return 8;
- case Mips::T1 : return 9;
- case Mips::T2 : return 10;
- case Mips::T3 : return 11;
- case Mips::T4 : return 12;
- case Mips::T5 : return 13;
- case Mips::T6 : return 14;
- case Mips::T7 : return 15;
- case Mips::T8 : return 16;
- case Mips::T9 : return 17;
- case Mips::S0 : return 18;
- case Mips::S1 : return 19;
- case Mips::S2 : return 20;
- case Mips::S3 : return 21;
- case Mips::S4 : return 22;
- case Mips::S5 : return 23;
- case Mips::S6 : return 24;
- case Mips::S7 : return 25;
- case Mips::K0 : return 26;
- case Mips::K1 : return 27;
- case Mips::GP : return 28;
- case Mips::SP : return 29;
- case Mips::FP : return 30;
- case Mips::RA : return 31;
+ case Mips::ZERO : case Mips::F0 : return 0;
+ case Mips::AT : case Mips::F1 : return 1;
+ case Mips::V0 : case Mips::F2 : return 2;
+ case Mips::V1 : case Mips::F3 : return 3;
+ case Mips::A0 : case Mips::F4 : return 4;
+ case Mips::A1 : case Mips::F5 : return 5;
+ case Mips::A2 : case Mips::F6 : return 6;
+ case Mips::A3 : case Mips::F7 : return 7;
+ case Mips::T0 : case Mips::F8 : return 8;
+ case Mips::T1 : case Mips::F9 : return 9;
+ case Mips::T2 : case Mips::F10: return 10;
+ case Mips::T3 : case Mips::F11: return 11;
+ case Mips::T4 : case Mips::F12: return 12;
+ case Mips::T5 : case Mips::F13: return 13;
+ case Mips::T6 : case Mips::F14: return 14;
+ case Mips::T7 : case Mips::F15: return 15;
+ case Mips::T8 : case Mips::F16: return 16;
+ case Mips::T9 : case Mips::F17: return 17;
+ case Mips::S0 : case Mips::F18: return 18;
+ case Mips::S1 : case Mips::F19: return 19;
+ case Mips::S2 : case Mips::F20: return 20;
+ case Mips::S3 : case Mips::F21: return 21;
+ case Mips::S4 : case Mips::F22: return 22;
+ case Mips::S5 : case Mips::F23: return 23;
+ case Mips::S6 : case Mips::F24: return 24;
+ case Mips::S7 : case Mips::F25: return 25;
+ case Mips::K0 : case Mips::F26: return 26;
+ case Mips::K1 : case Mips::F27: return 27;
+ case Mips::GP : case Mips::F28: return 28;
+ case Mips::SP : case Mips::F29: return 29;
+ case Mips::FP : case Mips::F30: return 30;
+ case Mips::RA : case Mips::F31: return 31;
default: assert(0 && "Unknown register number!");
}
return 0; // Not reached
@@ -94,11 +92,12 @@
const unsigned* MipsRegisterInfo::
getCalleeSavedRegs(const MachineFunction *MF) const
{
- // Mips calle-save register range is $16-$26(s0-s7)
+ // Mips callee-save register range is $16-$23(s0-s7)
static const unsigned CalleeSavedRegs[] = {
Mips::S0, Mips::S1, Mips::S2, Mips::S3,
Mips::S4, Mips::S5, Mips::S6, Mips::S7, 0
};
+
return CalleeSavedRegs;
}
@@ -147,7 +146,7 @@
//
// 0 ----------
// 4 Args to pass
-// . saved $GP (used in PIC - not supported yet)
+// . saved $GP (used in PIC)
// . Local Area
// . saved "Callee Saved" Registers
// . saved FP
@@ -271,6 +270,8 @@
int FPOffset, RAOffset;
// Allocate space for saved RA and FP when needed
+ // FIXME: within 64-bit registers, change hardcoded
+ // sizes for RA and FP offsets.
if ((hasFP(MF)) && (MFI->hasCalls())) {
FPOffset = NumBytes;
RAOffset = (NumBytes+4);
@@ -283,8 +284,7 @@
FPOffset = NumBytes;
RAOffset = 0;
NumBytes += 4;
- } else {
- // No calls and no fp.
+ } else { // No calls and no fp.
RAOffset = FPOffset = 0;
}
@@ -369,7 +369,7 @@
// lw $ra, stack_loc($sp)
if (MFI->hasCalls()) {
BuildMI(MBB, MBBI, TII.get(Mips::LW))
- .addReg(Mips::RA).addImm(RAOffset).addReg(Mips::SP);
+ .addReg(Mips::RA).addImm(RAOffset).addReg(Mips::SP);
}
// adjust stack : insert addi sp, sp, (imm)
Modified: llvm/branches/non-call-eh/lib/Target/Mips/MipsRegisterInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/Mips/MipsRegisterInfo.td?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/Mips/MipsRegisterInfo.td (original)
+++ llvm/branches/non-call-eh/lib/Target/Mips/MipsRegisterInfo.td Sun Jul 6 15:45:41 2008
@@ -22,41 +22,118 @@
let Num = num;
}
-// CPU GPR Registers
-def ZERO : MipsGPRReg< 0, "ZERO">, DwarfRegNum<[0]>;
-def AT : MipsGPRReg< 1, "AT">, DwarfRegNum<[1]>;
-def V0 : MipsGPRReg< 2, "2">, DwarfRegNum<[2]>;
-def V1 : MipsGPRReg< 3, "3">, DwarfRegNum<[3]>;
-def A0 : MipsGPRReg< 4, "4">, DwarfRegNum<[5]>;
-def A1 : MipsGPRReg< 5, "5">, DwarfRegNum<[5]>;
-def A2 : MipsGPRReg< 6, "6">, DwarfRegNum<[6]>;
-def A3 : MipsGPRReg< 7, "7">, DwarfRegNum<[7]>;
-def T0 : MipsGPRReg< 8, "8">, DwarfRegNum<[8]>;
-def T1 : MipsGPRReg< 9, "9">, DwarfRegNum<[9]>;
-def T2 : MipsGPRReg< 10, "10">, DwarfRegNum<[10]>;
-def T3 : MipsGPRReg< 11, "11">, DwarfRegNum<[11]>;
-def T4 : MipsGPRReg< 12, "12">, DwarfRegNum<[12]>;
-def T5 : MipsGPRReg< 13, "13">, DwarfRegNum<[13]>;
-def T6 : MipsGPRReg< 14, "14">, DwarfRegNum<[14]>;
-def T7 : MipsGPRReg< 15, "15">, DwarfRegNum<[15]>;
-def S0 : MipsGPRReg< 16, "16">, DwarfRegNum<[16]>;
-def S1 : MipsGPRReg< 17, "17">, DwarfRegNum<[17]>;
-def S2 : MipsGPRReg< 18, "18">, DwarfRegNum<[18]>;
-def S3 : MipsGPRReg< 19, "19">, DwarfRegNum<[19]>;
-def S4 : MipsGPRReg< 20, "20">, DwarfRegNum<[20]>;
-def S5 : MipsGPRReg< 21, "21">, DwarfRegNum<[21]>;
-def S6 : MipsGPRReg< 22, "22">, DwarfRegNum<[22]>;
-def S7 : MipsGPRReg< 23, "23">, DwarfRegNum<[23]>;
-def T8 : MipsGPRReg< 24, "24">, DwarfRegNum<[24]>;
-def T9 : MipsGPRReg< 25, "25">, DwarfRegNum<[25]>;
-def K0 : MipsGPRReg< 26, "26">, DwarfRegNum<[26]>;
-def K1 : MipsGPRReg< 27, "27">, DwarfRegNum<[27]>;
-def GP : MipsGPRReg< 28, "GP">, DwarfRegNum<[28]>;
-def SP : MipsGPRReg< 29, "SP">, DwarfRegNum<[29]>;
-def FP : MipsGPRReg< 30, "FP">, DwarfRegNum<[30]>;
-def RA : MipsGPRReg< 31, "RA">, DwarfRegNum<[31]>;
+// Mips 32-bit FPU Registers
+class FPR<bits<5> num, string n> : MipsReg<n> {
+ let Num = num;
+}
+
+// Mips 64-bit (aliased) FPU Registers
+class AFPR<bits<5> num, string n, list<Register> aliases> : MipsReg<n> {
+ let Num = num;
+ let Aliases = aliases;
+}
+
+//===----------------------------------------------------------------------===//
+// Registers
+//===----------------------------------------------------------------------===//
+
+let Namespace = "Mips" in {
+
+ // General Purpose Registers
+ def ZERO : MipsGPRReg< 0, "ZERO">, DwarfRegNum<[0]>;
+ def AT : MipsGPRReg< 1, "AT">, DwarfRegNum<[1]>;
+ def V0 : MipsGPRReg< 2, "2">, DwarfRegNum<[2]>;
+ def V1 : MipsGPRReg< 3, "3">, DwarfRegNum<[3]>;
+ def A0 : MipsGPRReg< 4, "4">, DwarfRegNum<[5]>;
+ def A1 : MipsGPRReg< 5, "5">, DwarfRegNum<[5]>;
+ def A2 : MipsGPRReg< 6, "6">, DwarfRegNum<[6]>;
+ def A3 : MipsGPRReg< 7, "7">, DwarfRegNum<[7]>;
+ def T0 : MipsGPRReg< 8, "8">, DwarfRegNum<[8]>;
+ def T1 : MipsGPRReg< 9, "9">, DwarfRegNum<[9]>;
+ def T2 : MipsGPRReg< 10, "10">, DwarfRegNum<[10]>;
+ def T3 : MipsGPRReg< 11, "11">, DwarfRegNum<[11]>;
+ def T4 : MipsGPRReg< 12, "12">, DwarfRegNum<[12]>;
+ def T5 : MipsGPRReg< 13, "13">, DwarfRegNum<[13]>;
+ def T6 : MipsGPRReg< 14, "14">, DwarfRegNum<[14]>;
+ def T7 : MipsGPRReg< 15, "15">, DwarfRegNum<[15]>;
+ def S0 : MipsGPRReg< 16, "16">, DwarfRegNum<[16]>;
+ def S1 : MipsGPRReg< 17, "17">, DwarfRegNum<[17]>;
+ def S2 : MipsGPRReg< 18, "18">, DwarfRegNum<[18]>;
+ def S3 : MipsGPRReg< 19, "19">, DwarfRegNum<[19]>;
+ def S4 : MipsGPRReg< 20, "20">, DwarfRegNum<[20]>;
+ def S5 : MipsGPRReg< 21, "21">, DwarfRegNum<[21]>;
+ def S6 : MipsGPRReg< 22, "22">, DwarfRegNum<[22]>;
+ def S7 : MipsGPRReg< 23, "23">, DwarfRegNum<[23]>;
+ def T8 : MipsGPRReg< 24, "24">, DwarfRegNum<[24]>;
+ def T9 : MipsGPRReg< 25, "25">, DwarfRegNum<[25]>;
+ def K0 : MipsGPRReg< 26, "26">, DwarfRegNum<[26]>;
+ def K1 : MipsGPRReg< 27, "27">, DwarfRegNum<[27]>;
+ def GP : MipsGPRReg< 28, "GP">, DwarfRegNum<[28]>;
+ def SP : MipsGPRReg< 29, "SP">, DwarfRegNum<[29]>;
+ def FP : MipsGPRReg< 30, "FP">, DwarfRegNum<[30]>;
+ def RA : MipsGPRReg< 31, "RA">, DwarfRegNum<[31]>;
+
+ /// Mips Single point precision FPU Registers
+ def F0 : FPR< 0, "F0">, DwarfRegNum<[32]>;
+ def F1 : FPR< 1, "F1">, DwarfRegNum<[33]>;
+ def F2 : FPR< 2, "F2">, DwarfRegNum<[34]>;
+ def F3 : FPR< 3, "F3">, DwarfRegNum<[35]>;
+ def F4 : FPR< 4, "F4">, DwarfRegNum<[36]>;
+ def F5 : FPR< 5, "F5">, DwarfRegNum<[37]>;
+ def F6 : FPR< 6, "F6">, DwarfRegNum<[38]>;
+ def F7 : FPR< 7, "F7">, DwarfRegNum<[39]>;
+ def F8 : FPR< 8, "F8">, DwarfRegNum<[40]>;
+ def F9 : FPR< 9, "F9">, DwarfRegNum<[41]>;
+ def F10 : FPR<10, "F10">, DwarfRegNum<[42]>;
+ def F11 : FPR<11, "F11">, DwarfRegNum<[43]>;
+ def F12 : FPR<12, "F12">, DwarfRegNum<[44]>;
+ def F13 : FPR<13, "F13">, DwarfRegNum<[45]>;
+ def F14 : FPR<14, "F14">, DwarfRegNum<[46]>;
+ def F15 : FPR<15, "F15">, DwarfRegNum<[47]>;
+ def F16 : FPR<16, "F16">, DwarfRegNum<[48]>;
+ def F17 : FPR<17, "F17">, DwarfRegNum<[49]>;
+ def F18 : FPR<18, "F18">, DwarfRegNum<[50]>;
+ def F19 : FPR<19, "F19">, DwarfRegNum<[51]>;
+ def F20 : FPR<20, "F20">, DwarfRegNum<[52]>;
+ def F21 : FPR<21, "F21">, DwarfRegNum<[53]>;
+ def F22 : FPR<22, "F22">, DwarfRegNum<[54]>;
+ def F23 : FPR<23, "F23">, DwarfRegNum<[55]>;
+ def F24 : FPR<24, "F24">, DwarfRegNum<[56]>;
+ def F25 : FPR<25, "F25">, DwarfRegNum<[57]>;
+ def F26 : FPR<26, "F26">, DwarfRegNum<[58]>;
+ def F27 : FPR<27, "F27">, DwarfRegNum<[59]>;
+ def F28 : FPR<28, "F28">, DwarfRegNum<[60]>;
+ def F29 : FPR<29, "F29">, DwarfRegNum<[61]>;
+ def F30 : FPR<30, "F30">, DwarfRegNum<[62]>;
+ def F31 : FPR<31, "F31">, DwarfRegNum<[63]>;
+
+ /// Mips Double point precision FPU Registers (aliased
+ /// with the single precision to hold 64 bit values)
+ def D0 : AFPR< 0, "F0", [F0, F1]>, DwarfRegNum<[32]>;
+ def D1 : AFPR< 2, "F2", [F2, F3]>, DwarfRegNum<[34]>;
+ def D2 : AFPR< 4, "F4", [F4, F5]>, DwarfRegNum<[36]>;
+ def D3 : AFPR< 6, "F6", [F6, F7]>, DwarfRegNum<[38]>;
+ def D4 : AFPR< 8, "F8", [F8, F9]>, DwarfRegNum<[40]>;
+ def D5 : AFPR<10, "F10", [F10, F11]>, DwarfRegNum<[42]>;
+ def D6 : AFPR<12, "F12", [F12, F13]>, DwarfRegNum<[44]>;
+ def D7 : AFPR<14, "F14", [F14, F15]>, DwarfRegNum<[46]>;
+ def D8 : AFPR<16, "F16", [F16, F17]>, DwarfRegNum<[48]>;
+ def D9 : AFPR<18, "F18", [F18, F19]>, DwarfRegNum<[50]>;
+ def D10 : AFPR<20, "F20", [F20, F21]>, DwarfRegNum<[52]>;
+ def D11 : AFPR<22, "F22", [F22, F23]>, DwarfRegNum<[54]>;
+ def D12 : AFPR<24, "F24", [F24, F25]>, DwarfRegNum<[56]>;
+ def D13 : AFPR<26, "F26", [F26, F27]>, DwarfRegNum<[58]>;
+ def D14 : AFPR<28, "F28", [F28, F29]>, DwarfRegNum<[60]>;
+ def D15 : AFPR<30, "F30", [F30, F31]>, DwarfRegNum<[62]>;
+
+ // Status flags register
+ def FCR31 : Register<"FCR31">;
+}
+
+//===----------------------------------------------------------------------===//
+// Register Classes
+//===----------------------------------------------------------------------===//
-// CPU Registers Class
def CPURegs : RegisterClass<"Mips", [i32], 32,
// Return Values and Arguments
[V0, V1, A0, A1, A2, A3,
@@ -78,3 +155,81 @@
}
}];
}
+
+// * 64bit fp:
+// - FGR64 = 32 64-bit registers (default mode)
+// - AFGR32/AFGR64 = 16 even 32-bit registers (32-bit compatible mode) for
+// single and double access.
+// * 32bit fp:
+// - AFGR32/AFGR64 = 16 even 32-bit registers - single and double
+// - FGR32 = 32 32-bit registers (within single-only mode)
+def FGR32 : RegisterClass<"Mips", [f32], 32,
+ // Return Values and Arguments
+ [F0, F1, F2, F3, F12, F13, F14, F15,
+ // Not preserved across procedure calls
+ F4, F5, F6, F7, F8, F9, F10, F11, F16, F17, F18, F19,
+ // Callee save
+ F20, F21, F22, F23, F24, F25, F26, F27, F28, F29, F30,
+ // Reserved
+ F31]>
+{
+ let MethodProtos = [{
+ iterator allocation_order_end(const MachineFunction &MF) const;
+ }];
+ let MethodBodies = [{
+ FGR32Class::iterator
+ FGR32Class::allocation_order_end(const MachineFunction &MF) const {
+ // The last register on the list above is reserved
+ return end()-1;
+ }
+ }];
+}
+
+def AFGR32 : RegisterClass<"Mips", [f32], 32,
+ // Return Values and Arguments
+ [F0, F2, F12, F14,
+ // Not preserved across procedure calls
+ F4, F6, F8, F10, F16, F18,
+ // Callee save
+ F20, F22, F24, F26, F28, F30,
+ // Reserved
+ F31]>
+{
+ let MethodProtos = [{
+ iterator allocation_order_end(const MachineFunction &MF) const;
+ }];
+ let MethodBodies = [{
+ AFGR32Class::iterator
+ AFGR32Class::allocation_order_end(const MachineFunction &MF) const {
+ // The last register on the list above is reserved
+ return end()-1;
+ }
+ }];
+}
+
+def AFGR64 : RegisterClass<"Mips", [f64], 64,
+ // Return Values and Arguments
+ [D0, D1, D6, D7,
+ // Not preserved across procedure calls
+ D2, D3, D4, D5, D8, D9,
+ // Callee save
+ D10, D11, D12, D13, D14,
+ // Reserved
+ D15]>
+{
+ let MethodProtos = [{
+ iterator allocation_order_end(const MachineFunction &MF) const;
+ }];
+ let MethodBodies = [{
+ AFGR64Class::iterator
+ AFGR64Class::allocation_order_end(const MachineFunction &MF) const {
+ // The last register on the list above is reserved
+ return end()-1;
+ }
+ }];
+}
+
+def CCR : RegisterClass<"Mips", [i32], 32, [FCR31]> {
+ let CopyCost = -1; // Don't allow copying of status registers.
+}
+
Modified: llvm/branches/non-call-eh/lib/Target/Mips/MipsSubtarget.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/Mips/MipsSubtarget.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/Mips/MipsSubtarget.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/Mips/MipsSubtarget.cpp Sun Jul 6 15:45:41 2008
@@ -14,14 +14,29 @@
#include "MipsSubtarget.h"
#include "Mips.h"
#include "MipsGenSubtarget.inc"
+#include "llvm/Module.h"
using namespace llvm;
MipsSubtarget::MipsSubtarget(const TargetMachine &TM, const Module &M,
- const std::string &FS) :
- IsMipsIII(false)
+ const std::string &FS, bool little) :
+ MipsArchVersion(Mips1), MipsABI(O32), IsLittle(little), IsSingleFloat(false),
+ IsFP64bit(false), IsGP64bit(false), HasAllegrexVFPU(false), IsAllegrex(false)
{
std::string CPU = "mips1";
// Parse features string.
ParseSubtargetFeatures(FS, CPU);
+
+ // When only the target triple is specified and is
+ // a allegrex target, set the features. We also match
+ // big and little endian allegrex cores (dont really
+ // know if a big one exists)
+ const std::string& TT = M.getTargetTriple();
+ if (TT.find("mipsallegrex") != std::string::npos) {
+ MipsABI = EABI;
+ IsSingleFloat = true;
+ MipsArchVersion = Mips2;
+ HasAllegrexVFPU = true; // Enables Allegrex Vector FPU (not supported yet)
+ IsAllegrex = true;
+ }
}
Modified: llvm/branches/non-call-eh/lib/Target/Mips/MipsSubtarget.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/Mips/MipsSubtarget.h?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/Mips/MipsSubtarget.h (original)
+++ llvm/branches/non-call-eh/lib/Target/Mips/MipsSubtarget.h Sun Jul 6 15:45:41 2008
@@ -26,23 +26,68 @@
protected:
- bool IsMipsIII;
+ enum MipsArchEnum {
+ Mips1, Mips2, Mips3, Mips4, Mips32, Mips32r2
+ };
+
+ enum MipsABIEnum {
+ O32, EABI
+ };
+
+ // Mips architecture version
+ MipsArchEnum MipsArchVersion;
+
+ // Mips supported ABIs
+ MipsABIEnum MipsABI;
+
+ // IsLittle - The target is Little Endian
+ bool IsLittle;
+
+ // IsSingleFloat - The target only supports single precision float
+ // point operations. This enable the target to use all 32 32-bit
+ // float point registers instead of only using even ones.
+ bool IsSingleFloat;
+
+ // IsFP64bit - The target processor has 64-bit float point registers.
+ bool IsFP64bit;
+
+ // IsFP64bit - General-purpose registers are 64 bits wide
+ bool IsGP64bit;
+
+ // HasAllegrexVFPU - Allegrex processor has a vector float point unit.
+ bool HasAllegrexVFPU;
+
+ // IsAllegrex - The target processor is a Allegrex core.
+ bool IsAllegrex;
+
InstrItineraryData InstrItins;
public:
+
+ /// Only O32 and EABI supported right now.
+ bool isABI_EABI() const { return MipsABI == EABI; }
+ bool isABI_O32() const { return MipsABI == O32; }
+
/// This constructor initializes the data members to match that
/// of the specified module.
- ///
MipsSubtarget(const TargetMachine &TM, const Module &M,
- const std::string &FS);
+ const std::string &FS, bool little);
/// ParseSubtargetFeatures - Parses features string setting specified
/// subtarget options. Definition of function is auto generated by tblgen.
void ParseSubtargetFeatures(const std::string &FS, const std::string &CPU);
- /// isMipsIII - Return true if the selected CPU supports MipsIII ISA
- /// support.
- bool isMipsIII() const { return IsMipsIII; }
+ bool hasMips2Ops() const { return MipsArchVersion >= Mips2; }
+
+ bool isLittle() const { return IsLittle; }
+ bool isFP64bit() const { return IsFP64bit; };
+ bool isGP64bit() const { return IsGP64bit; };
+ bool isGP32bit() const { return !IsGP64bit; };
+ bool isSingleFloat() const { return IsSingleFloat; };
+ bool isNotSingleFloat() const { return !IsSingleFloat; };
+ bool hasAllegrexVFPU() const { return HasAllegrexVFPU; };
+ bool isAllegrex() const { return IsAllegrex; };
+
};
} // End llvm namespace
Modified: llvm/branches/non-call-eh/lib/Target/Mips/MipsTargetMachine.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/Mips/MipsTargetMachine.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/Mips/MipsTargetMachine.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/Mips/MipsTargetMachine.cpp Sun Jul 6 15:45:41 2008
@@ -19,10 +19,9 @@
#include "llvm/Target/TargetMachineRegistry.h"
using namespace llvm;
-namespace {
- // Register the target.
- RegisterTarget<MipsTargetMachine> X("mips", " Mips");
-}
+// Register the target.
+static RegisterTarget<MipsTargetMachine> X("mips", " Mips");
+static RegisterTarget<MipselTargetMachine> Y("mipsel", " Mipsel");
const TargetAsmInfo *MipsTargetMachine::
createTargetAsmInfo() const
@@ -35,13 +34,14 @@
// On function prologue, the stack is created by decrementing
// its pointer. Once decremented, all references are done with positive
// offset from the stack/frame pointer, so StackGrowsUp is used.
-// When using CodeModel::Large the behaviour
-//
-//
+// Using CodeModel::Large enables different CALL behavior.
MipsTargetMachine::
-MipsTargetMachine(const Module &M, const std::string &FS):
- Subtarget(*this, M, FS), DataLayout("E-p:32:32:32"),
- InstrInfo(*this), FrameInfo(TargetFrameInfo::StackGrowsUp, 8, 0),
+MipsTargetMachine(const Module &M, const std::string &FS, bool isLittle=false):
+ Subtarget(*this, M, FS, isLittle),
+ DataLayout(isLittle ? std::string("e-p:32:32:32") :
+ std::string("E-p:32:32:32")),
+ InstrInfo(*this),
+ FrameInfo(TargetFrameInfo::StackGrowsUp, 8, 0),
TLInfo(*this)
{
if (getRelocationModel() != Reloc::Static)
@@ -50,15 +50,42 @@
setCodeModel(CodeModel::Small);
}
+MipselTargetMachine::
+MipselTargetMachine(const Module &M, const std::string &FS) :
+ MipsTargetMachine(M, FS, true) {}
+
// return 0 and must specify -march to gen MIPS code.
unsigned MipsTargetMachine::
getModuleMatchQuality(const Module &M)
{
- // We strongly match "mips-*".
+ // We strongly match "mips*-*".
std::string TT = M.getTargetTriple();
if (TT.size() >= 5 && std::string(TT.begin(), TT.begin()+5) == "mips-")
return 20;
+ if (TT.size() >= 13 && std::string(TT.begin(),
+ TT.begin()+13) == "mipsallegrex-")
+ return 20;
+
+ return 0;
+}
+
+// return 0 and must specify -march to gen MIPSEL code.
+unsigned MipselTargetMachine::
+getModuleMatchQuality(const Module &M)
+{
+ // We strongly match "mips*el-*".
+ std::string TT = M.getTargetTriple();
+ if (TT.size() >= 7 && std::string(TT.begin(), TT.begin()+7) == "mipsel-")
+ return 20;
+
+ if (TT.size() >= 15 && std::string(TT.begin(),
+ TT.begin()+15) == "mipsallegrexel-")
+ return 20;
+
+ if (TT.size() == 3 && std::string(TT.begin(), TT.begin()+3) == "psp")
+ return 20;
+
return 0;
}
Modified: llvm/branches/non-call-eh/lib/Target/Mips/MipsTargetMachine.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/Mips/MipsTargetMachine.h?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/Mips/MipsTargetMachine.h (original)
+++ llvm/branches/non-call-eh/lib/Target/Mips/MipsTargetMachine.h Sun Jul 6 15:45:41 2008
@@ -33,18 +33,18 @@
virtual const TargetAsmInfo *createTargetAsmInfo() const;
public:
- MipsTargetMachine(const Module &M, const std::string &FS);
+ MipsTargetMachine(const Module &M, const std::string &FS, bool isLittle);
virtual const MipsInstrInfo *getInstrInfo() const
{ return &InstrInfo; }
virtual const TargetFrameInfo *getFrameInfo() const
{ return &FrameInfo; }
- virtual const TargetSubtarget *getSubtargetImpl() const
+ virtual const MipsSubtarget *getSubtargetImpl() const
{ return &Subtarget; }
virtual const TargetData *getTargetData() const
{ return &DataLayout;}
- virtual const TargetRegisterInfo *getRegisterInfo() const {
+ virtual const MipsRegisterInfo *getRegisterInfo() const {
return &InstrInfo.getRegisterInfo();
}
@@ -60,6 +60,16 @@
virtual bool addAssemblyEmitter(PassManagerBase &PM, bool Fast,
std::ostream &Out);
};
+
+/// MipselTargetMachine - Mipsel target machine.
+///
+class MipselTargetMachine : public MipsTargetMachine {
+public:
+ MipselTargetMachine(const Module &M, const std::string &FS);
+
+ static unsigned getModuleMatchQuality(const Module &M);
+};
+
} // End llvm namespace
#endif
Propchange: llvm/branches/non-call-eh/lib/Target/PIC16/
------------------------------------------------------------------------------
--- svn:ignore (added)
+++ svn:ignore Sun Jul 6 15:45:41 2008
@@ -0,0 +1,3 @@
+Release
+Debug
+PIC16Gen*.inc
Added: llvm/branches/non-call-eh/lib/Target/PIC16/Makefile
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PIC16/Makefile?rev=53163&view=auto
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PIC16/Makefile (added)
+++ llvm/branches/non-call-eh/lib/Target/PIC16/Makefile Sun Jul 6 15:45:41 2008
@@ -0,0 +1,21 @@
+##===- lib/Target/PIC16/Makefile ---------------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+LEVEL = ../../..
+LIBRARYNAME = LLVMPIC16
+TARGET = PIC16
+
+# Make sure that tblgen is run, first thing.
+BUILT_SOURCES = PIC16GenRegisterInfo.h.inc PIC16GenRegisterNames.inc \
+ PIC16GenRegisterInfo.inc PIC16GenInstrNames.inc \
+ PIC16GenInstrInfo.inc PIC16GenAsmWriter.inc \
+ PIC16GenDAGISel.inc PIC16GenCallingConv.inc \
+ PIC16GenSubtarget.inc
+
+include $(LEVEL)/Makefile.common
+
Added: llvm/branches/non-call-eh/lib/Target/PIC16/PIC16.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PIC16/PIC16.h?rev=53163&view=auto
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PIC16/PIC16.h (added)
+++ llvm/branches/non-call-eh/lib/Target/PIC16/PIC16.h Sun Jul 6 15:45:41 2008
@@ -0,0 +1,38 @@
+//===-- PIC16.h - Top-level interface for PIC16 representation --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the entry points for global functions defined in
+// the LLVM PIC16 back-end.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef TARGET_PIC16_H
+#define TARGET_PIC16_H
+
+#include <iosfwd>
+
+namespace llvm {
+ class PIC16TargetMachine;
+ class FunctionPassManager;
+ class FunctionPass;
+ class MachineCodeEmitter;
+
+ FunctionPass *createPIC16ISelDag(PIC16TargetMachine &TM);
+ FunctionPass *createPIC16CodePrinterPass(std::ostream &OS,
+ PIC16TargetMachine &TM);
+} // end namespace llvm;
+
+// Defines symbolic names for PIC16 registers. This defines a mapping from
+// register name to register number.
+#include "PIC16GenRegisterNames.inc"
+
+// Defines symbolic names for the PIC16 instructions.
+#include "PIC16GenInstrNames.inc"
+
+#endif
Added: llvm/branches/non-call-eh/lib/Target/PIC16/PIC16.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PIC16/PIC16.td?rev=53163&view=auto
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PIC16/PIC16.td (added)
+++ llvm/branches/non-call-eh/lib/Target/PIC16/PIC16.td Sun Jul 6 15:45:41 2008
@@ -0,0 +1,46 @@
+//===- PIC16.td - Describe the PIC16 Target Machine -----------*- tblgen -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This is the top level entry point for the PIC16 target.
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Target-independent interfaces
+//===----------------------------------------------------------------------===//
+
+include "../Target.td"
+
+//===----------------------------------------------------------------------===//
+// Descriptions
+//===----------------------------------------------------------------------===//
+
+include "PIC16RegisterInfo.td"
+include "PIC16CallingConv.td"
+include "PIC16InstrInfo.td"
+
+def PIC16InstrInfo : InstrInfo {
+ let TSFlagsFields = [];
+ let TSFlagsShifts = [];
+}
+
+
+
+// Not currently supported, but work as SubtargetFeature placeholder.
+def FeaturePIC16Old : SubtargetFeature<"pic16old", "IsPIC16Old", "true",
+ "PIC16 Old ISA Support">;
+
+//===----------------------------------------------------------------------===//
+// PIC16 processors supported.
+//===----------------------------------------------------------------------===//
+
+def : Processor<"generic", NoItineraries, []>;
+
+def PIC16 : Target {
+ let InstructionSet = PIC16InstrInfo;
+}
+
Added: llvm/branches/non-call-eh/lib/Target/PIC16/PIC16AsmPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PIC16/PIC16AsmPrinter.cpp?rev=53163&view=auto
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PIC16/PIC16AsmPrinter.cpp (added)
+++ llvm/branches/non-call-eh/lib/Target/PIC16/PIC16AsmPrinter.cpp Sun Jul 6 15:45:41 2008
@@ -0,0 +1,536 @@
+//===-- PIC16AsmPrinter.cpp - PIC16 LLVM assembly writer ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains a printer that converts from our internal representation
+// of machine-dependent LLVM code to PIC16 assembly language.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "asm-printer"
+#include "PIC16.h"
+#include "PIC16TargetMachine.h"
+#include "PIC16ConstantPoolValue.h"
+#include "PIC16InstrInfo.h"
+#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineConstantPool.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Mangler.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Target/TargetAsmInfo.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include <cctype>
+
+using namespace llvm;
+
+STATISTIC(EmittedInsts, "Number of machine instrs printed");
+
+namespace {
+ struct VISIBILITY_HIDDEN PIC16AsmPrinter : public AsmPrinter {
+ PIC16AsmPrinter(std::ostream &O, TargetMachine &TM, const TargetAsmInfo *T)
+ : AsmPrinter(O, TM, T) {
+ }
+
+
+ /// We name each basic block in a Function with a unique number, so
+ /// that we can consistently refer to them later. This is cleared
+ /// at the beginning of each call to runOnMachineFunction().
+ ///
+ typedef std::map<const Value *, unsigned> ValueMapTy;
+ ValueMapTy NumberForBB;
+
+ /// Keeps the set of GlobalValues that require non-lazy-pointers for
+ /// indirect access.
+ std::set<std::string> GVNonLazyPtrs;
+
+ /// Keeps the set of external function GlobalAddresses that the asm
+ /// printer should generate stubs for.
+ std::set<std::string> FnStubs;
+
+ /// True if asm printer is printing a series of CONSTPOOL_ENTRY.
+ bool InCPMode;
+
+ virtual const char *getPassName() const {
+ return "PIC16 Assembly Printer";
+ }
+
+ void printOperand(const MachineInstr *MI, int opNum,
+ const char *Modifier = 0);
+
+ void printSOImmOperand(const MachineInstr *MI, int opNum);
+
+ void printAddrModeOperand(const MachineInstr *MI, int OpNo);
+
+ void printRegisterList(const MachineInstr *MI, int opNum);
+ void printCPInstOperand(const MachineInstr *MI, int opNum,
+ const char *Modifier);
+
+
+ bool printInstruction(const MachineInstr *MI); // autogenerated.
+ void emitFunctionStart(MachineFunction &F);
+ bool runOnMachineFunction(MachineFunction &F);
+ bool doInitialization(Module &M);
+ bool doFinalization(Module &M);
+
+ virtual void EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV);
+
+ void getAnalysisUsage(AnalysisUsage &AU) const;
+
+ public:
+ void SwitchToTextSection(const char *NewSection,
+ const GlobalValue *GV = NULL);
+ void SwitchToDataSection(const char *NewSection,
+ const GlobalValue *GV = NULL);
+ void SwitchToDataOvrSection(const char *NewSection,
+ const GlobalValue *GV = NULL);
+ };
+} // end of anonymous namespace
+
+#include "PIC16GenAsmWriter.inc"
+
+/// createPIC16CodePrinterPass - Returns a pass that prints the PIC16
+/// assembly code for a MachineFunction to the given output stream,
+/// using the given target machine description. This should work
+/// regardless of whether the function is in SSA form.
+///
+FunctionPass *llvm::createPIC16CodePrinterPass(std::ostream &o,
+ PIC16TargetMachine &tm) {
+ return new PIC16AsmPrinter(o, tm, tm.getTargetAsmInfo());
+}
+
+void PIC16AsmPrinter::getAnalysisUsage(AnalysisUsage &AU) const
+{
+ // FIXME: Currently unimplemented.
+}
+
+
+void PIC16AsmPrinter ::
+EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV)
+{
+ printDataDirective(MCPV->getType());
+
+ PIC16ConstantPoolValue *ACPV = (PIC16ConstantPoolValue*)MCPV;
+ GlobalValue *GV = ACPV->getGV();
+ std::string Name = GV ? Mang->getValueName(GV) : TAI->getGlobalPrefix();
+ if (!GV)
+ Name += ACPV->getSymbol();
+ if (ACPV->isNonLazyPointer()) {
+ GVNonLazyPtrs.insert(Name);
+ O << TAI->getPrivateGlobalPrefix() << Name << "$non_lazy_ptr";
+ } else if (ACPV->isStub()) {
+ FnStubs.insert(Name);
+ O << TAI->getPrivateGlobalPrefix() << Name << "$stub";
+ } else {
+ O << Name;
+ }
+
+ if (ACPV->hasModifier()) O << "(" << ACPV->getModifier() << ")";
+
+ if (ACPV->getPCAdjustment() != 0) {
+ O << "-(" << TAI->getPrivateGlobalPrefix() << "PC"
+ << utostr(ACPV->getLabelId())
+ << "+" << (unsigned)ACPV->getPCAdjustment();
+
+ if (ACPV->mustAddCurrentAddress())
+ O << "-.";
+
+ O << ")";
+ }
+ O << "\n";
+
+ // If the constant pool value is a extern weak symbol, remember to emit
+ // the weak reference.
+ if (GV && GV->hasExternalWeakLinkage())
+ ExtWeakSymbols.insert(GV);
+}
+
+/// emitFunctionStart - Emit the directives used by ASM on the start of
+/// functions.
+void PIC16AsmPrinter::emitFunctionStart(MachineFunction &MF)
+{
+ // Print out the label for the function.
+ const Function *F = MF.getFunction();
+ MachineFrameInfo *FrameInfo = MF.getFrameInfo();
+ if (FrameInfo->hasStackObjects()) {
+ int indexBegin = FrameInfo->getObjectIndexBegin();
+ int indexEnd = FrameInfo->getObjectIndexEnd();
+ while (indexBegin < indexEnd) {
+ if (indexBegin == 0)
+ SwitchToDataOvrSection(F->getParent()->getModuleIdentifier().c_str(),
+ F);
+
+ O << "\t\t" << CurrentFnName << "_" << indexBegin << " " << "RES"
+ << " " << FrameInfo->getObjectSize(indexBegin) << "\n" ;
+ indexBegin++;
+ }
+ }
+ SwitchToTextSection(CurrentFnName.c_str(), F);
+ O << "_" << CurrentFnName << ":" ;
+ O << "\n";
+}
+
+
+/// runOnMachineFunction - This uses the printInstruction()
+/// method to print assembly for each instruction.
+///
+bool PIC16AsmPrinter::runOnMachineFunction(MachineFunction &MF)
+{
+ SetupMachineFunction(MF);
+ O << "\n";
+
+ // What's my mangled name?
+ CurrentFnName = Mang->getValueName(MF.getFunction());
+
+ // Emit the function start directives
+ emitFunctionStart(MF);
+
+ // Print out code for the function.
+ for (MachineFunction::const_iterator I = MF.begin(), E = MF.end();
+ I != E; ++I) {
+ // Print a label for the basic block.
+ if (I != MF.begin()) {
+ printBasicBlockLabel(I, true);
+ O << '\n';
+ }
+ for (MachineBasicBlock::const_iterator II = I->begin(), E = I->end();
+ II != E; ++II) {
+ // Print the assembly for the instruction.
+ O << '\t';
+ printInstruction(II);
+ ++EmittedInsts;
+ }
+ }
+
+ // We didn't modify anything.
+ return false;
+}
+
+void PIC16AsmPrinter::
+printOperand(const MachineInstr *MI, int opNum, const char *Modifier)
+{
+ const MachineOperand &MO = MI->getOperand(opNum);
+ const TargetRegisterInfo &RI = *TM.getRegisterInfo();
+
+ switch (MO.getType()) {
+ case MachineOperand::MO_Register:
+ if (TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
+ O << RI.get(MO.getReg()).Name;
+ else
+ assert(0 && "not implemented");
+ break;
+
+ case MachineOperand::MO_Immediate:
+ if (!Modifier || strcmp(Modifier, "no_hash") != 0)
+ O << "#";
+ O << (int)MO.getImm();
+ break;
+
+ case MachineOperand::MO_MachineBasicBlock:
+ printBasicBlockLabel(MO.getMBB());
+ return;
+
+ case MachineOperand::MO_GlobalAddress:
+ O << Mang->getValueName(MO.getGlobal())<<'+'<<MO.getOffset();
+ break;
+
+ case MachineOperand::MO_ExternalSymbol:
+ O << MO.getSymbolName();
+ break;
+
+ case MachineOperand::MO_ConstantPoolIndex:
+ O << TAI->getPrivateGlobalPrefix() << "CPI" << getFunctionNumber()
+ << '_' << MO.getIndex();
+ break;
+
+ case MachineOperand::MO_FrameIndex:
+ O << "_" << CurrentFnName
+ << '+' << MO.getIndex();
+ break;
+
+ case MachineOperand::MO_JumpTableIndex:
+ O << TAI->getPrivateGlobalPrefix() << "JTI" << getFunctionNumber()
+ << '_' << MO.getIndex();
+ break;
+
+ default:
+ O << "<unknown operand type>"; abort ();
+ break;
+ } // end switch.
+}
+
+static void
+printSOImm(std::ostream &O, int64_t V, const TargetAsmInfo *TAI)
+{
+ assert(V < (1 << 12) && "Not a valid so_imm value!");
+
+ O << (unsigned) V;
+}
+
+/// printSOImmOperand - SOImm is 4-bit rotated amount in bits 8-11 with 8-bit
+/// immediate in bits 0-7.
+void PIC16AsmPrinter::printSOImmOperand(const MachineInstr *MI, int OpNum)
+{
+ const MachineOperand &MO = MI->getOperand(OpNum);
+ assert(MO.isImmediate() && "Not a valid so_imm value!");
+ printSOImm(O, MO.getImm(), TAI);
+}
+
+
+void PIC16AsmPrinter::printAddrModeOperand(const MachineInstr *MI, int Op)
+{
+ const MachineOperand &MO1 = MI->getOperand(Op);
+ const MachineOperand &MO2 = MI->getOperand(Op+1);
+
+ if (MO2.isFrameIndex ()) {
+ printOperand(MI, Op+1);
+ return;
+ }
+
+ if (!MO1.isRegister()) {
+ // FIXME: This is for CP entries, but isn't right.
+ printOperand(MI, Op);
+ return;
+ }
+
+ // If this is Stack Slot
+ if (MO1.isRegister()) {
+ if (strcmp(TM.getRegisterInfo()->get(MO1.getReg()).Name, "SP") == 0) {
+ O << CurrentFnName <<"_"<< MO2.getImm();
+ return;
+ }
+ O << "[" << TM.getRegisterInfo()->get(MO1.getReg()).Name;
+ O << "+";
+ O << MO2.getImm();
+ O << "]";
+ return;
+ }
+
+ O << "[" << TM.getRegisterInfo()->get(MO1.getReg()).Name;
+ O << "]";
+}
+
+
+void PIC16AsmPrinter::printRegisterList(const MachineInstr *MI, int opNum)
+{
+ O << "{";
+ for (unsigned i = opNum, e = MI->getNumOperands(); i != e; ++i) {
+ printOperand(MI, i);
+ if (i != e-1) O << ", ";
+ }
+ O << "}";
+}
+
+void PIC16AsmPrinter::
+printCPInstOperand(const MachineInstr *MI, int OpNo, const char *Modifier)
+{
+ assert(Modifier && "This operand only works with a modifier!");
+
+ // There are two aspects to a CONSTANTPOOL_ENTRY operand, the label and the
+ // data itself.
+ if (!strcmp(Modifier, "label")) {
+ unsigned ID = MI->getOperand(OpNo).getImm();
+ O << TAI->getPrivateGlobalPrefix() << "CPI" << getFunctionNumber()
+ << '_' << ID << ":\n";
+ } else {
+ assert(!strcmp(Modifier, "cpentry") && "Unknown modifier for CPE");
+ unsigned CPI = MI->getOperand(OpNo).getIndex();
+
+ const MachineConstantPoolEntry &MCPE = // Chasing pointers is fun?
+ MI->getParent()->getParent()->getConstantPool()->getConstants()[CPI];
+
+ if (MCPE.isMachineConstantPoolEntry())
+ EmitMachineConstantPoolValue(MCPE.Val.MachineCPVal);
+ else {
+ EmitGlobalConstant(MCPE.Val.ConstVal);
+ // remember to emit the weak reference
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(MCPE.Val.ConstVal))
+ if (GV->hasExternalWeakLinkage())
+ ExtWeakSymbols.insert(GV);
+ }
+ }
+}
+
+
+bool PIC16AsmPrinter::doInitialization(Module &M)
+{
+ bool Result = AsmPrinter::doInitialization(M);
+ return Result;
+}
+
+bool PIC16AsmPrinter::doFinalization(Module &M)
+{
+ const TargetData *TD = TM.getTargetData();
+
+ for (Module::const_global_iterator I = M.global_begin(), E = M.global_end();
+ I != E; ++I) {
+ if (!I->hasInitializer()) // External global require no code
+ continue;
+
+ if (EmitSpecialLLVMGlobal(I)) {
+ continue;
+ }
+
+ std::string name = Mang->getValueName(I);
+ Constant *C = I->getInitializer();
+ const Type *Ty = C->getType();
+ unsigned Size = TD->getABITypeSize(Ty);
+ unsigned Align = TD->getPreferredAlignmentLog(I);
+
+ const char *VisibilityDirective = NULL;
+ if (I->hasHiddenVisibility())
+ VisibilityDirective = TAI->getHiddenDirective();
+ else if (I->hasProtectedVisibility())
+ VisibilityDirective = TAI->getProtectedDirective();
+
+ if (VisibilityDirective)
+ O << VisibilityDirective << name << "\n";
+
+ if (C->isNullValue()) {
+ if (I->hasExternalLinkage()) {
+ if (const char *Directive = TAI->getZeroFillDirective()) {
+ O << "\t.globl\t" << name << "\n";
+ O << Directive << "__DATA__, __common, " << name << ", "
+ << Size << ", " << Align << "\n";
+ continue;
+ }
+ }
+
+ if (!I->hasSection() &&
+ (I->hasInternalLinkage() || I->hasWeakLinkage() ||
+ I->hasLinkOnceLinkage() || I->hasCommonLinkage())) {
+ if (Size == 0) Size = 1; // .comm Foo, 0 is undefined, avoid it.
+ if (!NoZerosInBSS && TAI->getBSSSection())
+ SwitchToDataSection(M.getModuleIdentifier().c_str(), I);
+ else
+ SwitchToDataSection(TAI->getDataSection(), I);
+ if (TAI->getLCOMMDirective() != NULL) {
+ if (I->hasInternalLinkage()) {
+ O << TAI->getLCOMMDirective() << name << "," << Size;
+ } else
+ O << TAI->getCOMMDirective() << name << "," << Size;
+ } else {
+ if (I->hasInternalLinkage())
+ O << "\t.local\t" << name << "\n";
+
+ O << TAI->getCOMMDirective() <<"\t" << name << " " <<"RES"<< " "
+ << Size;
+ O << "\n\t\tGLOBAL" <<" "<< name;
+ if (TAI->getCOMMDirectiveTakesAlignment())
+ O << "," << (TAI->getAlignmentIsInBytes() ? (1 << Align) : Align);
+ }
+ continue;
+ }
+ }
+
+ switch (I->getLinkage()) {
+ case GlobalValue::AppendingLinkage:
+ // FIXME: appending linkage variables should go into a section of
+ // their name or something. For now, just emit them as external.
+ // FALL THROUGH
+
+ case GlobalValue::ExternalLinkage:
+ O << "\t.globl " << name << "\n";
+ // FALL THROUGH
+
+ case GlobalValue::InternalLinkage:
+ if (I->isConstant()) {
+ const ConstantArray *CVA = dyn_cast<ConstantArray>(C);
+ if (TAI->getCStringSection() && CVA && CVA->isCString()) {
+ SwitchToDataSection(TAI->getCStringSection(), I);
+ break;
+ }
+ }
+ break;
+
+ default:
+ assert(0 && "Unknown linkage type!");
+ break;
+ } // end switch.
+
+ EmitAlignment(Align, I);
+ O << name << ":\t\t\t\t" << TAI->getCommentString() << " " << I->getName()
+ << "\n";
+
+ // If the initializer is a extern weak symbol, remember to emit the weak
+ // reference!
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
+ if (GV->hasExternalWeakLinkage())
+ ExtWeakSymbols.insert(GV);
+
+ EmitGlobalConstant(C);
+ O << '\n';
+ } // end for.
+
+ O << "\n "<< "END";
+ return AsmPrinter::doFinalization(M);
+}
+
+void PIC16AsmPrinter::
+SwitchToTextSection(const char *NewSection, const GlobalValue *GV)
+{
+ O << "\n";
+ if (NewSection && *NewSection) {
+ std::string codeSection = "code_";
+ codeSection += NewSection;
+ codeSection += " ";
+ codeSection += "CODE";
+ AsmPrinter::SwitchToTextSection(codeSection.c_str(), GV);
+ }
+ else
+ AsmPrinter::SwitchToTextSection(NewSection, GV);
+}
+
+void PIC16AsmPrinter::
+SwitchToDataSection(const char *NewSection, const GlobalValue *GV)
+{
+ // Need to append index for page.
+ O << "\n";
+ if (NewSection && *NewSection) {
+ std::string dataSection = "udata_";
+ dataSection += NewSection;
+ if (dataSection.substr(dataSection.length() - 2).compare(".o") == 0) {
+ dataSection = dataSection.substr(0, dataSection.length() - 2);
+ }
+ dataSection += " ";
+ dataSection += "UDATA";
+ AsmPrinter::SwitchToDataSection(dataSection.c_str(), GV);
+ }
+ else
+ AsmPrinter::SwitchToDataSection(NewSection, GV);
+}
+
+void PIC16AsmPrinter::
+SwitchToDataOvrSection(const char *NewSection, const GlobalValue *GV)
+{
+ O << "\n";
+ if (NewSection && *NewSection) {
+ std::string dataSection = "frame_";
+ dataSection += NewSection;
+ if (dataSection.substr(dataSection.length() - 2).compare(".o") == 0) {
+ dataSection = dataSection.substr(0, dataSection.length() - 2);
+ }
+ dataSection += "_";
+ dataSection += CurrentFnName;
+ dataSection += " ";
+ dataSection += "UDATA_OVR";
+ AsmPrinter::SwitchToDataSection(dataSection.c_str(), GV);
+ }
+ else
+ AsmPrinter::SwitchToDataSection(NewSection, GV);
+}
Added: llvm/branches/non-call-eh/lib/Target/PIC16/PIC16CallingConv.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PIC16/PIC16CallingConv.td?rev=53163&view=auto
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PIC16/PIC16CallingConv.td (added)
+++ llvm/branches/non-call-eh/lib/Target/PIC16/PIC16CallingConv.td Sun Jul 6 15:45:41 2008
@@ -0,0 +1,16 @@
+//===- PIC16CallingConv.td - Calling Conventions PIC16 -----*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This describes the calling conventions for the PIC16 architectures.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Return Value Calling Conventions
+//===----------------------------------------------------------------------===//
Added: llvm/branches/non-call-eh/lib/Target/PIC16/PIC16ConstantPoolValue.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PIC16/PIC16ConstantPoolValue.cpp?rev=53163&view=auto
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PIC16/PIC16ConstantPoolValue.cpp (added)
+++ llvm/branches/non-call-eh/lib/Target/PIC16/PIC16ConstantPoolValue.cpp Sun Jul 6 15:45:41 2008
@@ -0,0 +1,88 @@
+//===- PIC16ConstantPoolValue.cpp - PIC16 constantpool value --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the PIC16 specific constantpool value class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "PIC16ConstantPoolValue.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/GlobalValue.h"
+#include "llvm/Type.h"
+using namespace llvm;
+
+PIC16ConstantPoolValue::PIC16ConstantPoolValue(GlobalValue *gv, unsigned id,
+ PIC16CP::PIC16CPKind k,
+ unsigned char PCAdj,
+ const char *Modif, bool AddCA)
+ : MachineConstantPoolValue((const Type*)gv->getType()),
+ GV(gv), S(NULL), LabelId(id), Kind(k), PCAdjust(PCAdj),
+ Modifier(Modif), AddCurrentAddress(AddCA) {}
+
+PIC16ConstantPoolValue::PIC16ConstantPoolValue(const char *s, unsigned id,
+ PIC16CP::PIC16CPKind k,
+ unsigned char PCAdj,
+ const char *Modif, bool AddCA)
+ : MachineConstantPoolValue((const Type*)Type::Int32Ty),
+ GV(NULL), S(s), LabelId(id), Kind(k), PCAdjust(PCAdj),
+ Modifier(Modif), AddCurrentAddress(AddCA) {}
+
+PIC16ConstantPoolValue::PIC16ConstantPoolValue(GlobalValue *gv,
+ PIC16CP::PIC16CPKind k,
+ const char *Modif)
+ : MachineConstantPoolValue((const Type*)Type::Int32Ty),
+ GV(gv), S(NULL), LabelId(0), Kind(k), PCAdjust(0),
+ Modifier(Modif) {}
+
+int PIC16ConstantPoolValue::getExistingMachineCPValue(MachineConstantPool *CP,
+ unsigned Alignment) {
+ unsigned AlignMask = (1 << Alignment)-1;
+ const std::vector<MachineConstantPoolEntry> Constants = CP->getConstants();
+ for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
+ if (Constants[i].isMachineConstantPoolEntry() &&
+ (Constants[i].Offset & AlignMask) == 0) {
+ PIC16ConstantPoolValue *CPV =
+ (PIC16ConstantPoolValue *)Constants[i].Val.MachineCPVal;
+ if (CPV->GV == GV &&
+ CPV->S == S &&
+ CPV->LabelId == LabelId &&
+ CPV->Kind == Kind &&
+ CPV->PCAdjust == PCAdjust)
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+void
+PIC16ConstantPoolValue::AddSelectionDAGCSEId(FoldingSetNodeID &ID) {
+ ID.AddPointer(GV);
+ ID.AddPointer(S);
+ ID.AddInteger(LabelId);
+ ID.AddInteger((unsigned)Kind);
+ ID.AddInteger(PCAdjust);
+}
+
+void PIC16ConstantPoolValue::print(std::ostream &O) const {
+ if (GV)
+ O << GV->getName();
+ else
+ O << S;
+ if (isNonLazyPointer()) O << "$non_lazy_ptr";
+ else if (isStub()) O << "$stub";
+ if (Modifier) O << "(" << Modifier << ")";
+ if (PCAdjust != 0) {
+ O << "-(LPIC" << LabelId << "+"
+ << (unsigned)PCAdjust;
+ if (AddCurrentAddress)
+ O << "-.";
+ O << ")";
+ }
+}
Added: llvm/branches/non-call-eh/lib/Target/PIC16/PIC16ConstantPoolValue.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PIC16/PIC16ConstantPoolValue.h?rev=53163&view=auto
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PIC16/PIC16ConstantPoolValue.h (added)
+++ llvm/branches/non-call-eh/lib/Target/PIC16/PIC16ConstantPoolValue.h Sun Jul 6 15:45:41 2008
@@ -0,0 +1,75 @@
+//===- PIC16ConstantPoolValue.h - PIC16 constantpool value ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the PIC16 specific constantpool value class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_PIC16_CONSTANTPOOLVALUE_H
+#define LLVM_TARGET_PIC16_CONSTANTPOOLVALUE_H
+
+#include "llvm/CodeGen/MachineConstantPool.h"
+
+namespace llvm {
+
+namespace PIC16CP {
+ enum PIC16CPKind {
+ CPValue,
+ CPNonLazyPtr,
+ CPStub
+ };
+}
+
+/// PIC16ConstantPoolValue - PIC16 specific constantpool value. This is used to
+/// represent PC relative displacement between the address of the load
+/// instruction and the global value being loaded, i.e. (&GV-(LPIC+8)).
+class PIC16ConstantPoolValue : public MachineConstantPoolValue {
+ GlobalValue *GV; // GlobalValue being loaded.
+ const char *S; // ExtSymbol being loaded.
+ unsigned LabelId; // Label id of the load.
+ PIC16CP::PIC16CPKind Kind; // non_lazy_ptr or stub?
+ unsigned char PCAdjust; // Extra adjustment if constantpool is pc relative.
+ // 8 for PIC16
+ const char *Modifier; // GV modifier i.e. (&GV(modifier)-(LPIC+8))
+ bool AddCurrentAddress;
+
+public:
+ PIC16ConstantPoolValue(GlobalValue *gv, unsigned id,
+ PIC16CP::PIC16CPKind Kind = PIC16CP::CPValue,
+ unsigned char PCAdj = 0, const char *Modifier = NULL,
+ bool AddCurrentAddress = false);
+ PIC16ConstantPoolValue(const char *s, unsigned id,
+ PIC16CP::PIC16CPKind Kind = PIC16CP::CPValue,
+ unsigned char PCAdj = 0, const char *Modifier = NULL,
+ bool AddCurrentAddress = false);
+ PIC16ConstantPoolValue(GlobalValue *GV, PIC16CP::PIC16CPKind Kind,
+ const char *Modifier);
+
+
+ GlobalValue *getGV() const { return GV; }
+ const char *getSymbol() const { return S; }
+ const char *getModifier() const { return Modifier; }
+ bool hasModifier() const { return Modifier != NULL; }
+ bool mustAddCurrentAddress() const { return AddCurrentAddress; }
+ unsigned getLabelId() const { return LabelId; }
+ bool isNonLazyPointer() const { return Kind == PIC16CP::CPNonLazyPtr; }
+ bool isStub() const { return Kind == PIC16CP::CPStub; }
+ unsigned char getPCAdjustment() const { return PCAdjust; }
+
+ virtual int getExistingMachineCPValue(MachineConstantPool *CP,
+ unsigned Alignment);
+
+ virtual void AddSelectionDAGCSEId(FoldingSetNodeID &ID);
+
+ virtual void print(std::ostream &O) const;
+};
+
+}
+
+#endif
Added: llvm/branches/non-call-eh/lib/Target/PIC16/PIC16ISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PIC16/PIC16ISelDAGToDAG.cpp?rev=53163&view=auto
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PIC16/PIC16ISelDAGToDAG.cpp (added)
+++ llvm/branches/non-call-eh/lib/Target/PIC16/PIC16ISelDAGToDAG.cpp Sun Jul 6 15:45:41 2008
@@ -0,0 +1,282 @@
+//===-- PIC16ISelDAGToDAG.cpp - A dag to dag inst selector for PIC16 ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines an instruction selector for the PIC16 target.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "pic16-isel"
+
+#include "PIC16.h"
+#include "PIC16ISelLowering.h"
+#include "PIC16RegisterInfo.h"
+#include "PIC16Subtarget.h"
+#include "PIC16TargetMachine.h"
+#include "llvm/GlobalValue.h"
+#include "llvm/Instructions.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Type.h"
+#include "llvm/CodeGen/MachineConstantPool.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/SelectionDAGISel.h"
+#include "llvm/CodeGen/SelectionDAGNodes.h"
+#include "llvm/Support/CFG.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Target/TargetMachine.h"
+#include <queue>
+#include <set>
+
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// Instruction Selector Implementation
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// PIC16DAGToDAGISel - PIC16 specific code to select PIC16 machine
+// instructions for SelectionDAG operations.
+//===----------------------------------------------------------------------===//
+namespace {
+
+class VISIBILITY_HIDDEN PIC16DAGToDAGISel : public SelectionDAGISel {
+
+ /// TM - Keep a reference to PIC16TargetMachine.
+ PIC16TargetMachine &TM;
+
+ /// PIC16Lowering - This object fully describes how to lower LLVM code to an
+ /// PIC16-specific SelectionDAG.
+ PIC16TargetLowering PIC16Lowering;
+
+public:
+ PIC16DAGToDAGISel(PIC16TargetMachine &tm) :
+ SelectionDAGISel(PIC16Lowering),
+ TM(tm), PIC16Lowering(*TM.getTargetLowering()) {}
+
+ virtual void InstructionSelect(SelectionDAG &SD);
+
+ // Pass Name
+ virtual const char *getPassName() const {
+ return "PIC16 DAG->DAG Pattern Instruction Selection";
+ }
+
+private:
+ // Include the pieces autogenerated from the target description.
+#include "PIC16GenDAGISel.inc"
+
+ SDNode *Select(SDOperand N);
+
+ // Select addressing mode. currently assume base + offset addr mode.
+ bool SelectAM(SDOperand Op, SDOperand N, SDOperand &Base, SDOperand &Offset);
+ bool SelectDirectAM(SDOperand Op, SDOperand N, SDOperand &Base,
+ SDOperand &Offset);
+ bool StoreInDirectAM(SDOperand Op, SDOperand N, SDOperand &fsr);
+ bool LoadFSR(SDOperand Op, SDOperand N, SDOperand &Base, SDOperand &Offset);
+ bool LoadNothing(SDOperand Op, SDOperand N, SDOperand &Base,
+ SDOperand &Offset);
+
+ // getI8Imm - Return a target constant with the specified
+ // value, of type i8.
+ inline SDOperand getI8Imm(unsigned Imm) {
+ return CurDAG->getTargetConstant(Imm, MVT::i8);
+ }
+
+
+#ifndef NDEBUG
+ unsigned Indent;
+#endif
+};
+
+}
+
+/// InstructionSelect - This callback is invoked by
+/// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
+void PIC16DAGToDAGISel::InstructionSelect(SelectionDAG &SD)
+{
+ DEBUG(BB->dump());
+ // Codegen the basic block.
+
+ DOUT << "===== Instruction selection begins:\n";
+#ifndef NDEBUG
+ Indent = 0;
+#endif
+
+ // Select target instructions for the DAG.
+ SD.setRoot(SelectRoot(SD.getRoot()));
+
+ DOUT << "===== Instruction selection ends:\n";
+
+ SD.RemoveDeadNodes();
+}
+
+
+bool PIC16DAGToDAGISel::
+SelectDirectAM (SDOperand Op, SDOperand N, SDOperand &Base, SDOperand &Offset)
+{
+ GlobalAddressSDNode *GA;
+ ConstantSDNode *GC;
+
+ // if Address is FI, get the TargetFrameIndex.
+ if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(N)) {
+ DOUT << "--------- its frame Index\n";
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
+ Offset = CurDAG->getTargetConstant(0, MVT::i32);
+ return true;
+ }
+
+ if (N.getOpcode() == ISD::GlobalAddress) {
+ GA = dyn_cast<GlobalAddressSDNode>(N);
+ Offset = CurDAG->getTargetConstant((unsigned char)GA->getOffset(), MVT::i8);
+ Base = CurDAG->getTargetGlobalAddress(GA->getGlobal(), MVT::i16,
+ GA->getOffset());
+ return true;
+ }
+
+ if (N.getOpcode() == ISD::ADD) {
+ GC = dyn_cast<ConstantSDNode>(N.getOperand(1));
+ Offset = CurDAG->getTargetConstant((unsigned char)GC->getValue(), MVT::i8);
+ if ((GA = dyn_cast<GlobalAddressSDNode>(N.getOperand(0)))) {
+ Base = CurDAG->getTargetGlobalAddress(GA->getGlobal(), MVT::i16,
+ GC->getValue());
+ return true;
+ }
+ else if (FrameIndexSDNode *FIN
+ = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
+ Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
+// FIXME: must also account for preinc/predec/postinc/postdec.
+bool PIC16DAGToDAGISel::
+StoreInDirectAM (SDOperand Op, SDOperand N, SDOperand &fsr)
+{
+ RegisterSDNode *Reg;
+ if (N.getOpcode() == ISD::LOAD) {
+ LoadSDNode *LD = dyn_cast<LoadSDNode>(N);
+ if (LD) {
+ fsr = LD->getBasePtr();
+ }
+ else if (isa<RegisterSDNode>(N.Val)) {
+ //FIXME an attempt to retrieve the register number
+ //but does not work
+ DOUT << "this is a register\n";
+ Reg = dyn_cast<RegisterSDNode>(N.Val);
+ fsr = CurDAG->getRegister(Reg->getReg(),MVT::i16);
+ }
+ else {
+ DOUT << "this is not a register\n";
+ // FIXME must use whatever load is using
+ fsr = CurDAG->getRegister(1,MVT::i16);
+ }
+ return true;
+ }
+ return false;
+}
+
+bool PIC16DAGToDAGISel::
+LoadFSR (SDOperand Op, SDOperand N, SDOperand &Base, SDOperand &Offset)
+{
+ GlobalAddressSDNode *GA;
+
+ if (N.getOpcode() == ISD::GlobalAddress) {
+ GA = dyn_cast<GlobalAddressSDNode>(N);
+ Offset = CurDAG->getTargetConstant((unsigned char)GA->getOffset(), MVT::i8);
+ Base = CurDAG->getTargetGlobalAddress(GA->getGlobal(), MVT::i16,
+ GA->getOffset());
+ return true;
+ }
+ else if (N.getOpcode() == PIC16ISD::Package) {
+ CurDAG->setGraphColor(Op.Val, "blue");
+ CurDAG->viewGraph();
+ }
+
+ return false;
+}
+
+// LoadNothing - Don't thake this seriously, it will change.
+bool PIC16DAGToDAGISel::
+LoadNothing (SDOperand Op, SDOperand N, SDOperand &Base, SDOperand &Offset)
+{
+ GlobalAddressSDNode *GA;
+ if (N.getOpcode() == ISD::GlobalAddress) {
+ GA = dyn_cast<GlobalAddressSDNode>(N);
+ DOUT << "==========" << GA->getOffset() << "\n";
+ Offset = CurDAG->getTargetConstant((unsigned char)GA->getOffset(), MVT::i8);
+ Base = CurDAG->getTargetGlobalAddress(GA->getGlobal(), MVT::i16,
+ GA->getOffset());
+ return true;
+ }
+
+ return false;
+}
+
+
+/// Select - Select instructions not customized! Used for
+/// expanded, promoted and normal instructions.
+SDNode* PIC16DAGToDAGISel::Select(SDOperand N)
+{
+ SDNode *Node = N.Val;
+ unsigned Opcode = Node->getOpcode();
+
+ // Dump information about the Node being selected
+#ifndef NDEBUG
+ DOUT << std::string(Indent, ' ') << "Selecting: ";
+ DEBUG(Node->dump(CurDAG));
+ DOUT << "\n";
+ Indent += 2;
+#endif
+
+ // If we have a custom node, we already have selected!
+ if (Opcode >= ISD::BUILTIN_OP_END && Opcode < PIC16ISD::FIRST_NUMBER) {
+#ifndef NDEBUG
+ DOUT << std::string(Indent-2, ' ') << "== ";
+ DEBUG(Node->dump(CurDAG));
+ DOUT << "\n";
+ Indent -= 2;
+#endif
+ return NULL;
+ }
+
+ ///
+ // FIXME: Instruction Selection not handled by custom or by the
+ // auto-generated tablegen selection should be handled here.
+ ///
+ switch(Opcode) {
+ default: break;
+ }
+
+ // Select the default instruction.
+ SDNode *ResNode = SelectCode(N);
+
+#ifndef NDEBUG
+ DOUT << std::string(Indent-2, ' ') << "=> ";
+ if (ResNode == NULL || ResNode == N.Val)
+ DEBUG(N.Val->dump(CurDAG));
+ else
+ DEBUG(ResNode->dump(CurDAG));
+ DOUT << "\n";
+ Indent -= 2;
+#endif
+
+ return ResNode;
+}
+
+/// createPIC16ISelDag - This pass converts a legalized DAG into a
+/// PIC16-specific DAG, ready for instruction scheduling.
+FunctionPass *llvm::createPIC16ISelDag(PIC16TargetMachine &TM) {
+ return new PIC16DAGToDAGISel(TM);
+}
+
Added: llvm/branches/non-call-eh/lib/Target/PIC16/PIC16ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PIC16/PIC16ISelLowering.cpp?rev=53163&view=auto
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PIC16/PIC16ISelLowering.cpp (added)
+++ llvm/branches/non-call-eh/lib/Target/PIC16/PIC16ISelLowering.cpp Sun Jul 6 15:45:41 2008
@@ -0,0 +1,768 @@
+//===-- PIC16ISelLowering.cpp - PIC16 DAG Lowering Implementation ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interfaces that PIC16 uses to lower LLVM code into a
+// selection DAG.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "pic16-lower"
+
+#include "PIC16ISelLowering.h"
+#include "PIC16TargetMachine.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Function.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/CallingConv.h"
+#include "llvm/CodeGen/CallingConvLower.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/SelectionDAGISel.h"
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/Support/Debug.h"
+#include <queue>
+#include <set>
+
+using namespace llvm;
+
+const char *PIC16TargetLowering:: getTargetNodeName(unsigned Opcode) const
+{
+ switch (Opcode) {
+ case PIC16ISD::Hi : return "PIC16ISD::Hi";
+ case PIC16ISD::Lo : return "PIC16ISD::Lo";
+ case PIC16ISD::Package : return "PIC16ISD::Package";
+ case PIC16ISD::Wrapper : return "PIC16ISD::Wrapper";
+ case PIC16ISD::SetBank : return "PIC16ISD::SetBank";
+ case PIC16ISD::SetPage : return "PIC16ISD::SetPage";
+ case PIC16ISD::Branch : return "PIC16ISD::Branch";
+ case PIC16ISD::Cmp : return "PIC16ISD::Cmp";
+ case PIC16ISD::BTFSS : return "PIC16ISD::BTFSS";
+ case PIC16ISD::BTFSC : return "PIC16ISD::BTFSC";
+ case PIC16ISD::XORCC : return "PIC16ISD::XORCC";
+ case PIC16ISD::SUBCC : return "PIC16ISD::SUBCC";
+ default : return NULL;
+ }
+}
+
+PIC16TargetLowering::
+PIC16TargetLowering(PIC16TargetMachine &TM): TargetLowering(TM)
+{
+ // Set up the register classes.
+ addRegisterClass(MVT::i8, PIC16::CPURegsRegisterClass);
+ addRegisterClass(MVT::i16, PIC16::PTRRegsRegisterClass);
+
+ // Load extented operations for i1 types must be promoted .
+ setLoadXAction(ISD::EXTLOAD, MVT::i1, Promote);
+ setLoadXAction(ISD::ZEXTLOAD, MVT::i1, Promote);
+ setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote);
+
+ setOperationAction(ISD::ADD, MVT::i1, Promote);
+ setOperationAction(ISD::ADD, MVT::i8, Legal);
+ setOperationAction(ISD::ADD, MVT::i16, Custom);
+ setOperationAction(ISD::ADD, MVT::i32, Expand);
+ setOperationAction(ISD::ADD, MVT::i64, Expand);
+
+ setOperationAction(ISD::SUB, MVT::i1, Promote);
+ setOperationAction(ISD::SUB, MVT::i8, Legal);
+ setOperationAction(ISD::SUB, MVT::i16, Custom);
+ setOperationAction(ISD::SUB, MVT::i32, Expand);
+ setOperationAction(ISD::SUB, MVT::i64, Expand);
+
+ setOperationAction(ISD::ADDC, MVT::i1, Promote);
+ setOperationAction(ISD::ADDC, MVT::i8, Legal);
+ setOperationAction(ISD::ADDC, MVT::i16, Custom);
+ setOperationAction(ISD::ADDC, MVT::i32, Expand);
+ setOperationAction(ISD::ADDC, MVT::i64, Expand);
+
+ setOperationAction(ISD::ADDE, MVT::i1, Promote);
+ setOperationAction(ISD::ADDE, MVT::i8, Legal);
+ setOperationAction(ISD::ADDE, MVT::i16, Custom);
+ setOperationAction(ISD::ADDE, MVT::i32, Expand);
+ setOperationAction(ISD::ADDE, MVT::i64, Expand);
+
+ setOperationAction(ISD::SUBC, MVT::i1, Promote);
+ setOperationAction(ISD::SUBC, MVT::i8, Legal);
+ setOperationAction(ISD::SUBC, MVT::i16, Custom);
+ setOperationAction(ISD::SUBC, MVT::i32, Expand);
+ setOperationAction(ISD::SUBC, MVT::i64, Expand);
+
+ setOperationAction(ISD::SUBE, MVT::i1, Promote);
+ setOperationAction(ISD::SUBE, MVT::i8, Legal);
+ setOperationAction(ISD::SUBE, MVT::i16, Custom);
+ setOperationAction(ISD::SUBE, MVT::i32, Expand);
+ setOperationAction(ISD::SUBE, MVT::i64, Expand);
+
+ // PIC16 does not have these NodeTypes below.
+ setOperationAction(ISD::SETCC, MVT::i1, Expand);
+ setOperationAction(ISD::SETCC, MVT::i8, Expand);
+ setOperationAction(ISD::SETCC, MVT::Other, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::i1, Custom);
+ setOperationAction(ISD::SELECT_CC, MVT::i8, Custom);
+
+ setOperationAction(ISD::BRCOND, MVT::i1, Expand);
+ setOperationAction(ISD::BRCOND, MVT::i8, Expand);
+ setOperationAction(ISD::BRCOND, MVT::Other, Expand);
+
+ setOperationAction(ISD::BR_CC, MVT::i1, Custom);
+ setOperationAction(ISD::BR_CC, MVT::i8, Custom);
+
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
+
+
+ // FIXME: Do we really need to Custom lower the GA ??
+ setOperationAction(ISD::GlobalAddress, MVT::i8, Custom);
+ setOperationAction(ISD::RET, MVT::Other, Custom);
+
+ setOperationAction(ISD::CTPOP, MVT::i32, Expand);
+ setOperationAction(ISD::CTTZ, MVT::i32, Expand);
+ setOperationAction(ISD::CTLZ, MVT::i32, Expand);
+ setOperationAction(ISD::ROTL, MVT::i32, Expand);
+ setOperationAction(ISD::ROTR, MVT::i32, Expand);
+ setOperationAction(ISD::BSWAP, MVT::i32, Expand);
+
+ setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
+ setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
+ setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
+
+ // We don't have line number support yet.
+ setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
+ setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
+ setOperationAction(ISD::DBG_LABEL, MVT::Other, Expand);
+ setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
+
+ // Use the default for now.
+ setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
+ setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
+
+ setOperationAction(ISD::LOAD, MVT::i1, Promote);
+ setOperationAction(ISD::LOAD, MVT::i8, Legal);
+
+ setTargetDAGCombine(ISD::LOAD);
+ setTargetDAGCombine(ISD::STORE);
+ setTargetDAGCombine(ISD::ADDE);
+ setTargetDAGCombine(ISD::ADDC);
+ setTargetDAGCombine(ISD::ADD);
+ setTargetDAGCombine(ISD::SUBE);
+ setTargetDAGCombine(ISD::SUBC);
+ setTargetDAGCombine(ISD::SUB);
+
+ setStackPointerRegisterToSaveRestore(PIC16::STKPTR);
+ computeRegisterProperties();
+}
+
+
+SDOperand PIC16TargetLowering:: LowerOperation(SDOperand Op, SelectionDAG &DAG)
+{
+ SDVTList VTList16 = DAG.getVTList(MVT::i16, MVT::i16, MVT::Other);
+ switch (Op.getOpcode()) {
+ case ISD::STORE:
+ DOUT << "reduce store\n";
+ break;
+
+ case ISD::FORMAL_ARGUMENTS:
+ DOUT << "==== lowering formal args\n";
+ return LowerFORMAL_ARGUMENTS(Op, DAG);
+
+ case ISD::GlobalAddress:
+ DOUT << "==== lowering GA\n";
+ return LowerGlobalAddress(Op, DAG);
+
+ case ISD::RET:
+ DOUT << "==== lowering ret\n";
+ return LowerRET(Op, DAG);
+
+ case ISD::FrameIndex:
+ DOUT << "==== lowering frame index\n";
+ return LowerFrameIndex(Op, DAG);
+
+ case ISD::ADDE:
+ DOUT << "==== lowering adde\n";
+ break;
+
+ case ISD::LOAD:
+ case ISD::ADD:
+ break;
+
+ case ISD::BR_CC:
+ DOUT << "==== lowering BR_CC\n";
+ return LowerBR_CC(Op, DAG);
+ } // end switch.
+ return SDOperand();
+}
+
+
+//===----------------------------------------------------------------------===//
+// Lower helper functions
+//===----------------------------------------------------------------------===//
+
+SDOperand PIC16TargetLowering::LowerBR_CC(SDOperand Op, SelectionDAG &DAG)
+{
+ MVT VT = Op.getValueType();
+ SDOperand Chain = Op.getOperand(0);
+ ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
+ SDOperand LHS = Op.getOperand(2);
+ SDOperand RHS = Op.getOperand(3);
+ SDOperand JumpVal = Op.getOperand(4);
+ SDOperand Result;
+ unsigned cmpOpcode;
+ unsigned branchOpcode;
+ SDOperand branchOperand;
+
+ SDOperand StatusReg = DAG.getRegister(PIC16::STATUSREG, MVT::i8);
+ SDOperand CPUReg = DAG.getRegister(PIC16::WREG, MVT::i8);
+ switch(CC) {
+ default:
+ assert(0 && "This condition code is not handled yet!!");
+ abort();
+
+ case ISD::SETNE:
+ DOUT << "setne\n";
+ cmpOpcode = PIC16ISD::XORCC;
+ branchOpcode = PIC16ISD::BTFSS;
+ branchOperand = DAG.getConstant(2, MVT::i8);
+ break;
+
+ case ISD::SETEQ:
+ DOUT << "seteq\n";
+ cmpOpcode = PIC16ISD::XORCC;
+ branchOpcode = PIC16ISD::BTFSC;
+ branchOperand = DAG.getConstant(2, MVT::i8);
+ break;
+
+ case ISD::SETGT:
+ assert(0 && "Greater Than condition code is not handled yet!!");
+ abort();
+ break;
+
+ case ISD::SETGE:
+ DOUT << "setge\n";
+ cmpOpcode = PIC16ISD::SUBCC;
+ branchOpcode = PIC16ISD::BTFSS;
+ branchOperand = DAG.getConstant(1, MVT::i8);
+ break;
+
+ case ISD::SETLT:
+ DOUT << "setlt\n";
+ cmpOpcode = PIC16ISD::SUBCC;
+ branchOpcode = PIC16ISD::BTFSC;
+ branchOperand = DAG.getConstant(1,MVT::i8);
+ break;
+
+ case ISD::SETLE:
+ assert(0 && "Less Than Equal condition code is not handled yet!!");
+ abort();
+ break;
+ } // End of Switch
+
+ SDVTList VTList = DAG.getVTList(MVT::i8, MVT::Flag);
+ SDOperand CmpValue = DAG.getNode(cmpOpcode, VTList, LHS, RHS).getValue(1);
+ Result = DAG.getNode(branchOpcode, VT, Chain, JumpVal, branchOperand,
+ StatusReg, CmpValue);
+ return Result;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Misc Lower Operation implementation
+//===----------------------------------------------------------------------===//
+
+// LowerGlobalAddress - Create a constant pool entry for global value
+// and wrap it in a wrapper node.
+SDOperand
+PIC16TargetLowering::LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG)
+{
+ MVT PtrVT = getPointerTy();
+ GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
+ GlobalValue *GV = GSDN->getGlobal();
+
+ // FIXME: for now only do the ram.
+ SDOperand CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 2);
+ SDOperand CPBank = DAG.getNode(PIC16ISD::SetBank, MVT::i8, CPAddr);
+ CPAddr = DAG.getNode(PIC16ISD::Wrapper, MVT::i8, CPAddr,CPBank);
+
+ return CPAddr;
+}
+
+SDOperand
+PIC16TargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG)
+{
+ switch(Op.getNumOperands()) {
+ default:
+ assert(0 && "Do not know how to return this many arguments!");
+ abort();
+
+ case 1:
+ return SDOperand(); // ret void is legal
+ }
+}
+
+SDOperand
+PIC16TargetLowering::LowerFrameIndex(SDOperand N, SelectionDAG &DAG)
+{
+ if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(N)) {
+ return DAG.getTargetFrameIndex(FIN->getIndex(), MVT::i32);
+ }
+
+ return N;
+}
+
+SDOperand
+PIC16TargetLowering::LowerLOAD(SDNode *N,
+ SelectionDAG &DAG,
+ DAGCombinerInfo &DCI) const
+{
+ SDOperand Outs[2];
+ SDOperand TF; //TokenFactor
+ SDOperand OutChains[2];
+ SDOperand Chain = N->getOperand(0);
+ SDOperand Src = N->getOperand(1);
+ SDOperand retVal;
+ SDVTList VTList;
+
+ // If this load is directly stored, replace the load value with the stored
+ // value.
+ // FIXME: Handle store large -> read small portion.
+ // FIXME: Handle TRUNCSTORE/LOADEXT
+ LoadSDNode *LD = cast<LoadSDNode>(N);
+ SDOperand Ptr = LD->getBasePtr();
+ if (LD->getExtensionType() == ISD::NON_EXTLOAD) {
+ if (ISD::isNON_TRUNCStore(Chain.Val)) {
+ StoreSDNode *PrevST = cast<StoreSDNode>(Chain);
+ if (PrevST->getBasePtr() == Ptr &&
+ PrevST->getValue().getValueType() == N->getValueType(0))
+ return DCI.CombineTo(N, Chain.getOperand(1), Chain);
+ }
+ }
+
+ if (N->getValueType(0) != MVT::i16)
+ return SDOperand();
+
+ SDOperand toWorklist;
+ Outs[0] = DAG.getLoad(MVT::i8, Chain, Src, NULL, 0);
+ toWorklist = DAG.getNode(ISD::ADD, MVT::i16, Src,
+ DAG.getConstant(1, MVT::i16));
+ Outs[1] = DAG.getLoad(MVT::i8, Chain, toWorklist, NULL, 0);
+ // FIXME: Add to worklist may not be needed.
+ // It is meant to merge sequences of add with constant into one.
+ DCI.AddToWorklist(toWorklist.Val);
+
+ // Create the tokenfactors and carry it on to the build_pair node
+ OutChains[0] = Outs[0].getValue(1);
+ OutChains[1] = Outs[1].getValue(1);
+ TF = DAG.getNode(ISD::TokenFactor, MVT::Other, &OutChains[0], 2);
+
+ VTList = DAG.getVTList(MVT::i16, MVT::Flag);
+ retVal = DAG.getNode (PIC16ISD::Package, VTList, &Outs[0], 2);
+
+ DCI.CombineTo (N, retVal, TF);
+
+ return retVal;
+}
+
+SDOperand
+PIC16TargetLowering::LowerADDSUB(SDNode *N, SelectionDAG &DAG,
+ DAGCombinerInfo &DCI) const
+{
+ bool changed = false;
+ int i;
+ SDOperand LoOps[3], HiOps[3];
+ SDOperand OutOps[3]; // [0]:left, [1]:right, [2]:carry
+ SDOperand InOp[2];
+ SDOperand retVal;
+ SDOperand as1,as2;
+ SDVTList VTList;
+ unsigned AS = 0, ASE = 0, ASC=0;
+
+ InOp[0] = N->getOperand(0);
+ InOp[1] = N->getOperand(1);
+
+ switch (N->getOpcode()) {
+ case ISD::ADD:
+ if (InOp[0].getOpcode() == ISD::Constant &&
+ InOp[1].getOpcode() == ISD::Constant) {
+ ConstantSDNode *CST0 = dyn_cast<ConstantSDNode>(InOp[0]);
+ ConstantSDNode *CST1 = dyn_cast<ConstantSDNode>(InOp[1]);
+ return DAG.getConstant(CST0->getValue() + CST1->getValue(), MVT::i16);
+ }
+ break;
+
+ case ISD::ADDE:
+ case ISD::ADDC:
+ AS = ISD::ADD;
+ ASE = ISD::ADDE;
+ ASC = ISD::ADDC;
+ break;
+
+ case ISD::SUB:
+ if (InOp[0].getOpcode() == ISD::Constant &&
+ InOp[1].getOpcode() == ISD::Constant) {
+ ConstantSDNode *CST0 = dyn_cast<ConstantSDNode>(InOp[0]);
+ ConstantSDNode *CST1 = dyn_cast<ConstantSDNode>(InOp[1]);
+ return DAG.getConstant(CST0->getValue() - CST1->getValue(), MVT::i16);
+ }
+ break;
+
+ case ISD::SUBE:
+ case ISD::SUBC:
+ AS = ISD::SUB;
+ ASE = ISD::SUBE;
+ ASC = ISD::SUBC;
+ break;
+ } // end switch.
+
+ assert ((N->getValueType(0) == MVT::i16)
+ && "expecting an MVT::i16 node for lowering");
+ assert ((N->getOperand(0).getValueType() == MVT::i16)
+ && (N->getOperand(1).getValueType() == MVT::i16)
+ && "both inputs to addx/subx:i16 must be i16");
+
+ for (i = 0; i < 2; i++) {
+ if (InOp[i].getOpcode() == ISD::GlobalAddress) {
+ // We don't want to lower subs/adds with global address yet.
+ return SDOperand();
+ }
+ else if (InOp[i].getOpcode() == ISD::Constant) {
+ changed = true;
+ ConstantSDNode *CST = dyn_cast<ConstantSDNode>(InOp[i]);
+ LoOps[i] = DAG.getConstant(CST->getValue() & 0xFF, MVT::i8);
+ HiOps[i] = DAG.getConstant(CST->getValue() >> 8, MVT::i8);
+ }
+ else if (InOp[i].getOpcode() == PIC16ISD::Package) {
+ LoOps[i] = InOp[i].getOperand(0);
+ HiOps[i] = InOp[i].getOperand(1);
+ }
+ else if (InOp[i].getOpcode() == ISD::LOAD) {
+ changed = true;
+ // LowerLOAD returns a Package node or it may combine and return
+ // anything else.
+ SDOperand lowered = LowerLOAD(InOp[i].Val, DAG, DCI);
+
+ // So If LowerLOAD returns something other than Package,
+ // then just call ADD again.
+ if (lowered.getOpcode() != PIC16ISD::Package)
+ return LowerADDSUB(N, DAG, DCI);
+
+ LoOps[i] = lowered.getOperand(0);
+ HiOps[i] = lowered.getOperand(1);
+ }
+ else if ((InOp[i].getOpcode() == ISD::ADD) ||
+ (InOp[i].getOpcode() == ISD::ADDE) ||
+ (InOp[i].getOpcode() == ISD::ADDC) ||
+ (InOp[i].getOpcode() == ISD::SUB) ||
+ (InOp[i].getOpcode() == ISD::SUBE) ||
+ (InOp[i].getOpcode() == ISD::SUBC)) {
+ changed = true;
+ // Must call LowerADDSUB recursively here,
+ // LowerADDSUB returns a Package node.
+ SDOperand lowered = LowerADDSUB(InOp[i].Val, DAG, DCI);
+
+ LoOps[i] = lowered.getOperand(0);
+ HiOps[i] = lowered.getOperand(1);
+ }
+ else if (InOp[i].getOpcode() == ISD::SIGN_EXTEND) {
+ // FIXME: I am just zero extending. for now.
+ changed = true;
+ LoOps[i] = InOp[i].getOperand(0);
+ HiOps[i] = DAG.getConstant(0, MVT::i8);
+ }
+ else {
+ DAG.setGraphColor(N, "blue");
+ DAG.viewGraph();
+ assert (0 && "not implemented yet");
+ }
+ } // end for.
+
+ assert (changed && "nothing changed while lowering SUBx/ADDx");
+
+ VTList = DAG.getVTList(MVT::i8, MVT::Flag);
+ if (N->getOpcode() == ASE) {
+ // We must take in the existing carry
+ // if this node is part of an existing subx/addx sequence.
+ LoOps[2] = N->getOperand(2).getValue(1);
+ as1 = DAG.getNode (ASE, VTList, LoOps, 3);
+ }
+ else {
+ as1 = DAG.getNode (ASC, VTList, LoOps, 2);
+ }
+ HiOps[2] = as1.getValue(1);
+ as2 = DAG.getNode (ASE, VTList, HiOps, 3);
+ // We must build a pair that also provides the carry from sube/adde.
+ OutOps[0] = as1;
+ OutOps[1] = as2;
+ OutOps[2] = as2.getValue(1);
+ // Breaking an original i16, so lets make the Package also an i16.
+ if (N->getOpcode() == ASE) {
+ VTList = DAG.getVTList(MVT::i16, MVT::Flag);
+ retVal = DAG.getNode (PIC16ISD::Package, VTList, OutOps, 3);
+ DCI.CombineTo (N, retVal, OutOps[2]);
+ }
+ else if (N->getOpcode() == ASC) {
+ VTList = DAG.getVTList(MVT::i16, MVT::Flag);
+ retVal = DAG.getNode (PIC16ISD::Package, VTList, OutOps, 2);
+ DCI.CombineTo (N, retVal, OutOps[2]);
+ }
+ else if (N->getOpcode() == AS) {
+ VTList = DAG.getVTList(MVT::i16);
+ retVal = DAG.getNode (PIC16ISD::Package, VTList, OutOps, 2);
+ DCI.CombineTo (N, retVal);
+ }
+
+ return retVal;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Calling Convention Implementation
+//===----------------------------------------------------------------------===//
+
+#include "PIC16GenCallingConv.inc"
+
+//===----------------------------------------------------------------------===//
+// CALL Calling Convention Implementation
+//===----------------------------------------------------------------------===//
+
+
+//===----------------------------------------------------------------------===//
+// FORMAL_ARGUMENTS Calling Convention Implementation
+//===----------------------------------------------------------------------===//
+SDOperand PIC16TargetLowering::
+LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG)
+{
+ SmallVector<SDOperand, 8> ArgValues;
+ SDOperand Root = Op.getOperand(0);
+
+ // Return the new list of results.
+ // FIXME: Just copy right now.
+ ArgValues.push_back(Root);
+
+ return DAG.getMergeValues(Op.Val->getVTList(), &ArgValues[0],
+ ArgValues.size()).getValue(Op.ResNo);
+}
+
+
+//===----------------------------------------------------------------------===//
+// Return Value Calling Convention Implementation
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// PIC16 Inline Assembly Support
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Target Optimization Hooks
+//===----------------------------------------------------------------------===//
+
+SDOperand PIC16TargetLowering::PerformDAGCombine(SDNode *N,
+ DAGCombinerInfo &DCI) const
+{
+ int i;
+ ConstantSDNode *CST;
+ SelectionDAG &DAG = DCI.DAG;
+
+ switch (N->getOpcode()) {
+ default:
+ break;
+
+ case PIC16ISD::Package:
+ DOUT << "==== combining PIC16ISD::Package\n";
+ return SDOperand();
+
+ case ISD::ADD:
+ case ISD::SUB:
+ if ((N->getOperand(0).getOpcode() == ISD::GlobalAddress) ||
+ (N->getOperand(0).getOpcode() == ISD::FrameIndex)) {
+ // Do not touch pointer adds.
+ return SDOperand ();
+ }
+ break;
+
+ case ISD::ADDE :
+ case ISD::ADDC :
+ case ISD::SUBE :
+ case ISD::SUBC :
+ if (N->getValueType(0) == MVT::i16) {
+ SDOperand retVal = LowerADDSUB(N, DAG,DCI);
+ // LowerADDSUB has already combined the result,
+ // so we just return nothing to avoid assertion failure from llvm
+ // if N has been deleted already.
+ return SDOperand();
+ }
+ else if (N->getValueType(0) == MVT::i8) {
+ // Sanity check ....
+ for (int i=0; i<2; i++) {
+ if (N->getOperand (i).getOpcode() == PIC16ISD::Package) {
+ assert (0 &&
+ "don't want to have PIC16ISD::Package as intput to add:i8");
+ }
+ }
+ }
+ break;
+
+ // FIXME: split this large chunk of code.
+ case ISD::STORE :
+ {
+ SDOperand Chain = N->getOperand(0);
+ SDOperand Src = N->getOperand(1);
+ SDOperand Dest = N->getOperand(2);
+ unsigned int DstOff = 0;
+ int NUM_STORES = 0;
+ SDOperand Stores[6];
+
+ // if source operand is expected to be extended to
+ // some higher type then - remove this extension
+ // SDNode and do the extension manually
+ if ((Src.getOpcode() == ISD::ANY_EXTEND) ||
+ (Src.getOpcode() == ISD::SIGN_EXTEND) ||
+ (Src.getOpcode() == ISD::ZERO_EXTEND)) {
+ Src = Src.Val->getOperand(0);
+ Stores[0] = DAG.getStore(Chain, Src, Dest, NULL,0);
+ return Stores[0];
+ }
+
+ switch(Src.getValueType().getSimpleVT()) {
+ default:
+ assert(false && "Invalid value type!");
+
+ case MVT::i8:
+ break;
+
+ case MVT::i16:
+ NUM_STORES = 2;
+ break;
+
+ case MVT::i32:
+ NUM_STORES = 4;
+ break;
+
+ case MVT::i64:
+ NUM_STORES = 8;
+ break;
+ }
+
+ if (isa<GlobalAddressSDNode>(Dest) && isa<LoadSDNode>(Src) &&
+ (Src.getValueType() != MVT::i8)) {
+ //create direct addressing a = b
+ Chain = Src.getOperand(0);
+ for (i=0; i<NUM_STORES; i++) {
+ SDOperand ADN = DAG.getNode(ISD::ADD, MVT::i16, Src.getOperand(1),
+ DAG.getConstant(DstOff, MVT::i16));
+ SDOperand LDN = DAG.getLoad(MVT::i8, Chain, ADN, NULL, 0);
+ SDOperand DSTADDR = DAG.getNode(ISD::ADD, MVT::i16, Dest,
+ DAG.getConstant(DstOff, MVT::i16));
+ Stores[i] = DAG.getStore(Chain, LDN, DSTADDR, NULL, 0);
+ Chain = Stores[i];
+ DstOff += 1;
+ }
+
+ Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, &Stores[0], i);
+ return Chain;
+ }
+ else if (isa<GlobalAddressSDNode>(Dest) && isa<ConstantSDNode>(Src)
+ && (Src.getValueType() != MVT::i8)) {
+ //create direct addressing a = CONST
+ CST = dyn_cast<ConstantSDNode>(Src);
+ for (i = 0; i < NUM_STORES; i++) {
+ SDOperand CNST = DAG.getConstant(CST->getValue() >> i*8, MVT::i8);
+ SDOperand ADN = DAG.getNode(ISD::ADD, MVT::i16, Dest,
+ DAG.getConstant(DstOff, MVT::i16));
+ Stores[i] = DAG.getStore(Chain, CNST, ADN, NULL, 0);
+ Chain = Stores[i];
+ DstOff += 1;
+ }
+
+ Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, &Stores[0], i);
+ return Chain;
+ }
+ else if (isa<LoadSDNode>(Dest) && isa<ConstantSDNode>(Src)
+ && (Src.getValueType() != MVT::i8)) {
+ // Create indirect addressing.
+ CST = dyn_cast<ConstantSDNode>(Src);
+ Chain = Dest.getOperand(0);
+ SDOperand Load;
+ Load = DAG.getLoad(MVT::i16, Chain,Dest.getOperand(1), NULL, 0);
+ Chain = Load.getValue(1);
+ for (i=0; i<NUM_STORES; i++) {
+ SDOperand CNST = DAG.getConstant(CST->getValue() >> i*8, MVT::i8);
+ Stores[i] = DAG.getStore(Chain, CNST, Load, NULL, 0);
+ Chain = Stores[i];
+ DstOff += 1;
+ }
+
+ Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, &Stores[0], i);
+ return Chain;
+ }
+ else if (isa<LoadSDNode>(Dest) && isa<GlobalAddressSDNode>(Src)) {
+ // GlobalAddressSDNode *GAD = dyn_cast<GlobalAddressSDNode>(Src);
+ return SDOperand();
+ }
+ else if (Src.getOpcode() == PIC16ISD::Package) {
+ StoreSDNode *st = dyn_cast<StoreSDNode>(N);
+ SDOperand toWorkList, retVal;
+ Chain = N->getOperand(0);
+
+ if (st->isTruncatingStore()) {
+ retVal = DAG.getStore(Chain, Src.getOperand(0), Dest, NULL, 0);
+ }
+ else {
+ toWorkList = DAG.getNode(ISD::ADD, MVT::i16, Dest,
+ DAG.getConstant(1, MVT::i16));
+ Stores[1] = DAG.getStore(Chain, Src.getOperand(0), Dest, NULL, 0);
+ Stores[0] = DAG.getStore(Chain, Src.getOperand(1), toWorkList, NULL,
+ 0);
+
+ // We want to merge sequence of add with constant to one add and a
+ // constant, so add the ADD node to worklist to have llvm do that
+ // automatically.
+ DCI.AddToWorklist(toWorkList.Val);
+
+ // We don't need the Package so add to worklist so llvm deletes it
+ DCI.AddToWorklist(Src.Val);
+ retVal = DAG.getNode(ISD::TokenFactor, MVT::Other, &Stores[0], 2);
+ }
+
+ return retVal;
+ }
+ else if (Src.getOpcode() == ISD::TRUNCATE) {
+ }
+ else {
+ }
+ } // end ISD::STORE.
+ break;
+
+ case ISD::LOAD :
+ {
+ SDOperand Ptr = N->getOperand(1);
+ if (Ptr.getOpcode() == PIC16ISD::Package) {
+ assert (0 && "not implemented yet");
+ }
+ }
+ break;
+ } // end switch.
+
+ return SDOperand();
+}
+
+//===----------------------------------------------------------------------===//
+// Utility functions
+//===----------------------------------------------------------------------===//
+const SDOperand *PIC16TargetLowering::
+findLoadi8(const SDOperand &Src, SelectionDAG &DAG) const
+{
+ unsigned int i;
+ if ((Src.getOpcode() == ISD::LOAD) && (Src.getValueType() == MVT::i8))
+ return &Src;
+ for (i=0; i<Src.getNumOperands(); i++) {
+ const SDOperand *retVal = findLoadi8(Src.getOperand(i),DAG);
+ if (retVal) return retVal;
+ }
+
+ return NULL;
+}
Added: llvm/branches/non-call-eh/lib/Target/PIC16/PIC16ISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PIC16/PIC16ISelLowering.h?rev=53163&view=auto
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PIC16/PIC16ISelLowering.h (added)
+++ llvm/branches/non-call-eh/lib/Target/PIC16/PIC16ISelLowering.h Sun Jul 6 15:45:41 2008
@@ -0,0 +1,92 @@
+//===-- PIC16ISelLowering.h - PIC16 DAG Lowering Interface ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interfaces that PIC16 uses to lower LLVM code into a
+// selection DAG.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef PIC16ISELLOWERING_H
+#define PIC16ISELLOWERING_H
+
+#include "PIC16.h"
+#include "PIC16Subtarget.h"
+#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/Target/TargetLowering.h"
+
+namespace llvm {
+ namespace PIC16ISD {
+ enum NodeType {
+ // Start the numbering from where ISD NodeType finishes.
+ FIRST_NUMBER = ISD::BUILTIN_OP_END+PIC16::INSTRUCTION_LIST_END,
+
+ // used for encapsulating the expanded nodes into one node.
+ Package,
+
+ // Get the Higher 16 bits from a 32-bit immediate
+ Hi,
+
+ // Get the Lower 16 bits from a 32-bit immediate
+ Lo,
+
+ Cmp, // PIC16 Generic Comparison instruction.
+ Branch, // PIC16 Generic Branch Instruction.
+ BTFSS, // PIC16 BitTest Instruction (Skip if set).
+ BTFSC, // PIC16 BitTest Instruction (Skip if clear).
+
+ // PIC16 comparison to be converted to either XOR or SUB
+ // Following instructions cater to those convertions.
+ XORCC,
+ SUBCC,
+
+ // Get the Global Address wrapped into a wrapper that also captures
+ // the bank or page.
+ Wrapper,
+ SetBank,
+ SetPage
+ };
+ }
+
+ //===--------------------------------------------------------------------===//
+ // TargetLowering Implementation
+ //===--------------------------------------------------------------------===//
+ class PIC16TargetLowering : public TargetLowering
+ {
+ public:
+ typedef std::map<SDNode *, SDNode *> NodeMap_t;
+
+ explicit PIC16TargetLowering(PIC16TargetMachine &TM);
+
+ /// LowerOperation - Provide custom lowering hooks for some operations.
+ virtual SDOperand LowerOperation(SDOperand Op, SelectionDAG &DAG);
+
+ SDOperand LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerFrameIndex(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerBR_CC(SDOperand Op, SelectionDAG &DAG);
+
+ SDOperand RemoveHiLo(SDNode *, SelectionDAG &DAG,
+ DAGCombinerInfo &DCI) const;
+ SDOperand LowerADDSUB(SDNode *, SelectionDAG &DAG,
+ DAGCombinerInfo &DCI) const;
+ SDOperand LowerLOAD(SDNode *, SelectionDAG &DAG,
+ DAGCombinerInfo &DCI) const;
+
+ /// getTargetNodeName - This method returns the name of a target specific
+ // DAG node.
+ virtual const char *getTargetNodeName(unsigned Opcode) const;
+ virtual SDOperand PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
+
+ // utility function.
+ const SDOperand *findLoadi8(const SDOperand &Src, SelectionDAG &DAG) const;
+ };
+} // namespace llvm
+
+#endif // PIC16ISELLOWERING_H
Added: llvm/branches/non-call-eh/lib/Target/PIC16/PIC16InstrFormats.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PIC16/PIC16InstrFormats.td?rev=53163&view=auto
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PIC16/PIC16InstrFormats.td (added)
+++ llvm/branches/non-call-eh/lib/Target/PIC16/PIC16InstrFormats.td Sun Jul 6 15:45:41 2008
@@ -0,0 +1,112 @@
+//===- PIC16RegisterInfo.td - PIC16 Register defs ------------*- tblgen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Describe PIC16 instructions format
+//
+// All the possible PIC16 fields are:
+//
+// opcode - operation code.
+// f - 7-bit register file address.
+// d - 1-bit direction specifier
+// k - 8/11 bit literals
+// b - 3 bits bit num specifier
+//
+//===----------------------------------------------------------------------===//
+
+// Generic PIC16 Format
+class PIC16Inst<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : Instruction
+{
+ field bits<14> Inst;
+
+ let Namespace = "PIC16";
+
+ dag OutOperandList = outs;
+ dag InOperandList = ins;
+
+ let AsmString = asmstr;
+ let Pattern = pattern;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Byte Oriented instruction class in PIC16 : <|opcode|d|f|>
+//===----------------------------------------------------------------------===//
+
+class ByteFormat<bits<6> op, dag outs, dag ins, string asmstr,
+ list<dag> pattern>
+ :PIC16Inst<outs, ins, asmstr, pattern>
+{
+ bits<1> d;
+ bits<7> f;
+
+ let Inst{13-8} = op;
+
+ let Inst{7} = d;
+ let Inst{6-0} = f;
+}
+
+//===----------------------------------------------------------------------===//
+// Bit Oriented instruction class in PIC16 : <|opcode|b|f|>
+//===----------------------------------------------------------------------===//
+
+class BitFormat<bits<4> op, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : PIC16Inst<outs, ins, asmstr, pattern>
+{
+ bits<3> b;
+ bits<7> f;
+
+ let Inst{13-10} = op;
+
+ let Inst{9-7} = b;
+ let Inst{6-0} = f;
+}
+
+//===----------------------------------------------------------------------===//
+// Literal Format instruction class in PIC16 : <|opcode|k|>
+//===----------------------------------------------------------------------===//
+
+class LiteralFormat<bits<6> op, dag outs, dag ins, string asmstr,
+ list<dag> pattern>
+ : PIC16Inst<outs, ins, asmstr, pattern>
+{
+ bits<8> k;
+
+
+ let Inst{13-8} = op;
+
+ let Inst{7-0} = k;
+}
+
+//===----------------------------------------------------------------------===//
+// Control Format instruction class in PIC16 : <|opcode|k|>
+//===----------------------------------------------------------------------===//
+
+class ControlFormat<bits<3> op, dag outs, dag ins, string asmstr,
+ list<dag> pattern>
+ :PIC16Inst<outs, ins, asmstr, pattern>
+{
+ bits<11> k;
+
+
+ let Inst{13-11} = op;
+
+ let Inst{10-0} = k;
+}
+
+//===----------------------------------------------------------------------===//
+// Pseudo instruction class in PIC16
+//===----------------------------------------------------------------------===//
+
+class Pseudo<bits<8> op, dag outs, dag ins, string asmstr, list<dag> pattern>:
+ PIC16Inst<outs, ins, asmstr, pattern>
+{
+ let Inst{13-6} = op;
+}
Added: llvm/branches/non-call-eh/lib/Target/PIC16/PIC16InstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PIC16/PIC16InstrInfo.cpp?rev=53163&view=auto
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PIC16/PIC16InstrInfo.cpp (added)
+++ llvm/branches/non-call-eh/lib/Target/PIC16/PIC16InstrInfo.cpp Sun Jul 6 15:45:41 2008
@@ -0,0 +1,143 @@
+//===- PIC16InstrInfo.cpp - PIC16 Instruction Information -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the PIC16 implementation of the TargetInstrInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "PIC16.h"
+#include "PIC16InstrInfo.h"
+#include "llvm/Function.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "PIC16GenInstrInfo.inc"
+
+using namespace llvm;
+
+// FIXME: Add the subtarget support on this constructor.
+PIC16InstrInfo::PIC16InstrInfo(PIC16TargetMachine &tm)
+ : TargetInstrInfoImpl(PIC16Insts, array_lengthof(PIC16Insts)),
+ TM(tm), RI(*this) {}
+
+static bool isZeroImm(const MachineOperand &op) {
+ return op.isImmediate() && op.getImm() == 0;
+}
+
+
+/// isLoadFromStackSlot - If the specified machine instruction is a direct
+/// load from a stack slot, return the virtual or physical register number of
+/// the destination along with the FrameIndex of the loaded stack slot. If
+/// not, return 0. This predicate must return 0 if the instruction has
+/// any side effects other than loading from the stack slot.
+unsigned PIC16InstrInfo::
+isLoadFromStackSlot(MachineInstr *MI, int &FrameIndex) const
+{
+ if (MI->getOpcode() == PIC16::MOVF) {
+ if ((MI->getOperand(2).isFrameIndex()) && // is a stack slot
+ (MI->getOperand(1).isImmediate()) && // the imm is zero
+ (isZeroImm(MI->getOperand(1)))) {
+ FrameIndex = MI->getOperand(2).getIndex();
+ return MI->getOperand(0).getReg();
+ }
+ }
+
+ return 0;
+}
+
+/// isStoreToStackSlot - If the specified machine instruction is a direct
+/// store to a stack slot, return the virtual or physical register number of
+/// the source reg along with the FrameIndex of the loaded stack slot. If
+/// not, return 0. This predicate must return 0 if the instruction has
+/// any side effects other than storing to the stack slot.
+unsigned PIC16InstrInfo::
+isStoreToStackSlot(MachineInstr *MI, int &FrameIndex) const
+{
+ if (MI->getOpcode() == PIC16::MOVWF) {
+ if ((MI->getOperand(0).isFrameIndex()) && // is a stack slot
+ (MI->getOperand(1).isImmediate()) && // the imm is zero
+ (isZeroImm(MI->getOperand(1)))) {
+ FrameIndex = MI->getOperand(0).getIndex();
+ return MI->getOperand(2).getReg();
+ }
+ }
+ return 0;
+}
+
+void PIC16InstrInfo::
+storeRegToStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ unsigned SrcReg, bool isKill, int FI,
+ const TargetRegisterClass *RC) const {
+ const Function *Func = MBB.getParent()->getFunction();
+ const std::string FuncName = Func->getName();
+
+ char *tmpName = new char [strlen(FuncName.c_str()) + 6];
+ sprintf(tmpName, "%s_tmp_%d",FuncName.c_str(),FI);
+
+ if (RC == PIC16::CPURegsRegisterClass) {
+ //src is always WREG.
+ BuildMI(MBB, I, this->get(PIC16::MOVWF))
+ .addReg(SrcReg,false,false,true,true)
+ .addExternalSymbol(tmpName) // the current printer expects 3 operands,
+ .addExternalSymbol(tmpName); // all we need is actually one,
+ // so we repeat.
+ }
+ else
+ assert(0 && "Can't store this register to stack slot");
+}
+
+void PIC16InstrInfo::
+loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ unsigned DestReg, int FI,
+ const TargetRegisterClass *RC) const
+{
+ const Function *Func = MBB.getParent()->getFunction();
+ const std::string FuncName = Func->getName();
+
+ char *tmpName = new char [strlen(FuncName.c_str()) + 6];
+ sprintf(tmpName, "%s_tmp_%d",FuncName.c_str(),FI);
+
+ if (RC == PIC16::CPURegsRegisterClass)
+ BuildMI(MBB, I, this->get(PIC16::MOVF), DestReg)
+ .addExternalSymbol(tmpName) // the current printer expects 3 operands,
+ .addExternalSymbol(tmpName); // all we need is actually one,so we repeat.
+ else
+ assert(0 && "Can't load this register from stack slot");
+}
+
+/// InsertBranch - Insert a branch into the end of the specified
+/// MachineBasicBlock. This operands to this method are the same as those
+/// returned by AnalyzeBranch. This is invoked in cases where AnalyzeBranch
+/// returns success and when an unconditional branch (TBB is non-null, FBB is
+/// null, Cond is empty) needs to be inserted. It returns the number of
+/// instructions inserted.
+unsigned PIC16InstrInfo::
+InsertBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *TBB, MachineBasicBlock *FBB,
+ const std::vector<MachineOperand> &Cond) const
+{
+ // Shouldn't be a fall through.
+ assert(TBB && "InsertBranch must not be told to insert a fallthrough");
+
+ if (FBB == 0) { // One way branch.
+ if (Cond.empty()) {
+ // Unconditional branch?
+ BuildMI(&MBB, get(PIC16::GOTO)).addMBB(TBB);
+ }
+ return 1;
+ }
+
+ // FIXME: If the there are some conditions specified then conditional branch
+ // should be generated.
+ // For the time being no instruction is being generated therefore
+ // returning NULL.
+ return 0;
+}
+
Added: llvm/branches/non-call-eh/lib/Target/PIC16/PIC16InstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PIC16/PIC16InstrInfo.h?rev=53163&view=auto
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PIC16/PIC16InstrInfo.h (added)
+++ llvm/branches/non-call-eh/lib/Target/PIC16/PIC16InstrInfo.h Sun Jul 6 15:45:41 2008
@@ -0,0 +1,78 @@
+//===- PIC16InstrInfo.h - PIC16 Instruction Information----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the niversity of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the PIC16 implementation of the TargetInstrInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef PIC16INSTRUCTIONINFO_H
+#define PIC16INSTRUCTIONINFO_H
+
+#include "PIC16.h"
+#include "PIC16RegisterInfo.h"
+#include "llvm/Target/TargetInstrInfo.h"
+
+namespace llvm {
+
+
+class PIC16InstrInfo : public TargetInstrInfoImpl
+{
+ PIC16TargetMachine &TM;
+ const PIC16RegisterInfo RI;
+public:
+ explicit PIC16InstrInfo(PIC16TargetMachine &TM);
+
+ /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
+ /// such, whenever a client has an instance of instruction info, it should
+ /// always be able to get register info as well (through this method).
+ ///
+ virtual const PIC16RegisterInfo &getRegisterInfo() const { return RI; }
+
+
+ /// isLoadFromStackSlot - If the specified machine instruction is a direct
+ /// load from a stack slot, return the virtual or physical register number of
+ /// the destination along with the FrameIndex of the loaded stack slot. If
+ /// not, return 0. This predicate must return 0 if the instruction has
+ /// any side effects other than loading from the stack slot.
+ virtual unsigned isLoadFromStackSlot(MachineInstr *MI, int &FrameIndex) const;
+
+ /// isStoreToStackSlot - If the specified machine instruction is a direct
+ /// store to a stack slot, return the virtual or physical register number of
+ /// the source reg along with the FrameIndex of the loaded stack slot. If
+ /// not, return 0. This predicate must return 0 if the instruction has
+ /// any side effects other than storing to the stack slot.
+ virtual unsigned isStoreToStackSlot(MachineInstr *MI, int &FrameIndex) const;
+
+ /// Used for spilling a register
+ void storeRegToStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ unsigned SrcReg, bool isKill, int FrameIndex,
+ const TargetRegisterClass *RC) const;
+
+
+ void loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ unsigned DestReg, int FrameIndex,
+ const TargetRegisterClass *RC) const;
+
+ /// InsertBranch - Insert a branch into the end of the specified
+ /// MachineBasicBlock. This operands to this method are the same as those
+ /// returned by AnalyzeBranch. This is invoked in cases where AnalyzeBranch
+ /// returns success and when an unconditional branch (TBB is non-null, FBB is
+ /// null, Cond is empty) needs to be inserted. It returns the number of
+ /// instructions inserted.
+ virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ const std::vector<MachineOperand> &Cond) const ;
+
+};
+
+} // namespace llvm
+
+#endif
Added: llvm/branches/non-call-eh/lib/Target/PIC16/PIC16InstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PIC16/PIC16InstrInfo.td?rev=53163&view=auto
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PIC16/PIC16InstrInfo.td (added)
+++ llvm/branches/non-call-eh/lib/Target/PIC16/PIC16InstrInfo.td Sun Jul 6 15:45:41 2008
@@ -0,0 +1,302 @@
+//===- PIC16InstrInfo.td - PIC16 Register defs ----------------*- tblgen-*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Instruction format superclass
+//===----------------------------------------------------------------------===//
+
+include "PIC16InstrFormats.td"
+
+//===----------------------------------------------------------------------===//
+// PIC16 profiles and nodes
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// PIC16 addressing mode.
+//===----------------------------------------------------------------------===//
+// It matches address of globals as well as the stack slots
+// that are created for locals and temporaries. This addressing mode
+// converts the GlobalAddress and FrameIndex nodes to TargetGlobalAddress
+// and TargetFrameIndex nodes.
+def diraddrmode : ComplexPattern<i16, 2, "SelectDirectAM", [frameindex], []>;
+def dirloadmode : ComplexPattern<i16, 2, "LoadNothing", [frameindex], []>;
+def indirloadmode : ComplexPattern<i16, 2, "LoadFSR", [frameindex], []>;
+
+
+// Address operand.
+def mem : Operand<i16> {
+ let PrintMethod = "printAddrModeOperand";
+ let MIOperandInfo = (ops i16imm, PTRRegs);
+}
+
+// Instruction operand types
+def simm8 : Operand<i8>;
+
+
+// These are target-independent nodes, but have target-specific formats.
+def SDT_PIC16CallSeq : SDTypeProfile<0, 1, [ SDTCisVT<0, i8> ]>;
+def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_PIC16CallSeq,
+ [SDNPHasChain, SDNPOutFlag]>;
+def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_PIC16CallSeq,
+ [SDNPHasChain, SDNPOutFlag]>;
+
+def PIC16Wrapper : SDNode<"PIC16ISD::Wrapper", SDTIntUnaryOp>;
+
+// so_imm_XFORM - Return a so_imm value packed into the format described for
+// so_imm def below.
+def so_imm_XFORM : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant((int8_t)N->getValue(), MVT::i32);
+}]>;
+
+def so_imm : Operand<i8>,
+ PatLeaf<(imm), [{}]> {
+ let PrintMethod = "printSOImmOperand";
+}
+
+
+
+// PIC16 Address Mode! SDNode frameindex could possibily be a match
+// since load and store instructions from stack used it.
+def addr : Operand<i16>;
+
+// Arithmetic 2 register operands
+class ArithI<bits<6> op, string instr_asm, SDNode OpNode,
+ Operand Od> :
+ LiteralFormat< op,
+ (outs CPURegs:$dst),
+ (ins CPURegs:$b, Od:$c),
+ !strconcat(instr_asm, " $c"),
+ [(set CPURegs:$dst, (OpNode CPURegs:$b, Od:$c))]>;
+
+// Memory Load/Store.
+class LoadDirect<bits<6> op, string instr_asm, PatFrag OpNode>:
+ ByteFormat< op,
+ (outs CPURegs:$dst),
+ (ins mem:$addr),
+ !strconcat(instr_asm, " $addr"),
+ [(set CPURegs:$dst, (OpNode diraddrmode:$addr))]>;
+
+class LoadInDirect<bits<6> op, string instr_asm, PatFrag OpNode>:
+ ByteFormat< op,
+ (outs PTRRegs:$dst),
+ (ins mem:$addr),
+ !strconcat(instr_asm, " $addr, $dst"),
+ [(set PTRRegs:$dst, (OpNode indirloadmode:$addr))]>;
+
+class StoreDirect<bits<6> op, string instr_asm, PatFrag OpNode>:
+ ByteFormat< op,
+ (outs),
+ (ins CPURegs:$src, mem:$addr),
+ !strconcat(instr_asm, " $addr"),
+ [(OpNode CPURegs:$src, diraddrmode:$addr)]>;
+
+class StoreInDirect<bits<6> op, string instr_asm, PatFrag OpNode>:
+ ByteFormat< op,
+ (outs),
+ (ins CPURegs:$src, PTRRegs:$fsr),
+ !strconcat(instr_asm, " $fsr"),
+ [(OpNode CPURegs:$src, PTRRegs:$fsr)]>;
+
+// Move.
+class MovLit<bits<6> op, string instr_asm>:
+ LiteralFormat< op,
+ (outs CPURegs:$dst),
+ (ins i8imm:$src),
+ !strconcat(instr_asm, " $src"),
+ [(set CPURegs:$dst, imm:$src)]>;
+
+
+// Arithmetic with memory store.
+// Arithmetic instrunctions involving W and memory location.
+// Since W is implicit, we only print the memory operand.
+class Arith1M<bits<6> op, string instr_asm, SDNode OpNode>:
+ ByteFormat< op,
+ (outs),
+ (ins CPURegs:$b, mem:$dst),
+ !strconcat(instr_asm, " $dst"),
+ [(store (OpNode (load diraddrmode:$dst), CPURegs:$b), diraddrmode:$dst),
+ (store (OpNode CPURegs:$b, (load diraddrmode:$dst)), diraddrmode:$dst)]>;
+
+// Arithmetic with memory load.
+// Arithmetic instrunctions involving W and memory location.
+// Since W is implicit, we only print the memory operand.
+class Arith1R<bits<6> op, string instr_asm, SDNode OpNode>:
+ ByteFormat< op,
+ (outs CPURegs:$dst),
+ (ins mem:$src1, CPURegs:$src2),
+ !strconcat(instr_asm, " $src1"),
+ [(set CPURegs:$dst, (OpNode (load diraddrmode:$src1), CPURegs:$src2))]>;
+
+// Arithmetic with memory load.
+// Arithmetic instrunctions involving W and memory location.
+// Since W is implicit, we only print the memory operand.
+class Arith2R<bits<6> op, string instr_asm, SDNode OpNode>:
+ ByteFormat< op,
+ (outs CPURegs:$dst),
+ (ins mem:$src1, CPURegs:$src2),
+ !strconcat(instr_asm, " $src1"),
+ [(set CPURegs:$dst, (OpNode CPURegs:$src2, (load diraddrmode:$src1)))]>;
+
+//===----------------------------------------------------------------------===//
+// Instruction definition
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// PIC16I Instructions
+//===----------------------------------------------------------------------===//
+
+// Arithmetic
+
+// ADDiu just accept 16-bit immediates but we handle this on Pat's.
+// immZExt32 is used here so it can match GlobalAddress immediates.
+// def ADDLW : ArithI<0x09, "addlw", add, so_imm>;
+
+let isReMaterializable = 1 in {
+def MOVLW : MovLit<0x24, "movlw">;
+}
+
+// Load/Store
+def LFSR1 : LoadInDirect <0x4, "lfsr", load>;
+
+let isReMaterializable = 1 in {
+def MOVF : LoadDirect <0x23, "movf", load>;
+}
+
+def MOVWF : StoreDirect <0x2b, "movwf", store>;
+
+def MOVFSRINC : StoreInDirect <0x5, "movfsrinc", store>;
+
+def RETURN : ControlFormat<0x03, (outs), (ins), "return", []>;
+
+def ADDWF : Arith1M<0x01, "addwf", add>;
+def ADDFW : Arith1R<0x02, "addfw", add>;
+
+def ADDWFE : Arith1M<0x03, "addwfe", adde>;
+def ADDFWE : Arith1R<0x04, "addfwe", adde>;
+
+def ADDWFC : Arith1M<0x05, "addwfc", addc>;
+def ADDFWC : Arith1R<0x06, "addfwc", addc>;
+
+def SUBWF : Arith1M<0x07, "subwf", sub>;
+def SUBFW : Arith1R<0x08, "subfw", sub>;
+
+def SUBWFE : Arith1M<0x09, "subwfe", sube>;
+def SUBFWE : Arith1R<0x0a, "subfwe", sube>;
+
+def SUBWFC : Arith1M<0x0b, "subwfc", subc>;
+def SUBFWC : Arith1R<0x0d, "subfwc", subc>;
+
+def SUBRFW : Arith2R<0x08, "subfw", sub>;
+
+def SUBRFWE : Arith2R<0x0a, "subfwe", sube>;
+
+def SUBRFWC : Arith2R<0x0d, "subfwc", subc>;
+
+def brtarget : Operand<OtherVT>;
+
+class UncondJump< bits<4> op, string instr_asm>:
+ BitFormat< op,
+ (outs),
+ (ins brtarget:$target),
+ !strconcat(instr_asm, " $target"),
+ [(br bb:$target)]>;
+
+def GOTO : UncondJump<0x1, "goto">;
+
+class LogicM<bits<6> op, string instr_asm, SDNode OpNode> :
+ ByteFormat< op,
+ (outs),
+ (ins CPURegs:$b, mem:$dst),
+ !strconcat(instr_asm, " $dst"),
+ [(store (OpNode (load diraddrmode:$dst), CPURegs:$b), diraddrmode:$dst)]>;
+
+class LogicR<bits<6> op, string instr_asm, SDNode OpNode> :
+ ByteFormat< op,
+ (outs CPURegs:$dst),
+ (ins CPURegs:$b, mem:$c),
+ !strconcat(instr_asm, " $c"),
+ [(set CPURegs:$dst, (OpNode (load diraddrmode:$c), CPURegs:$b))]>;
+
+class LogicI<bits<6> op, string instr_asm, SDNode OpNode, Operand Od> :
+ LiteralFormat< op,
+ (outs CPURegs:$dst),
+ (ins CPURegs:$b, Od:$c),
+ !strconcat(instr_asm, " $c"),
+ [(set CPURegs:$dst, (OpNode CPURegs:$b, Od:$c ))]>;
+
+def XORWF : LogicM<0x1,"xorwf",xor>;
+def XORFW : LogicR<0x1,"xorfw",xor>;
+def XORLW : LogicI<0x1,"xorlw",xor, so_imm>;
+
+def ANDWF : LogicM<0x1,"andwf",and>;
+def ANDFW : LogicR<0x1,"andfw",and>;
+def ANDLW : LogicI<0x1,"andlw",and, so_imm>;
+
+def IORWF : LogicM<0x1,"iorwf",or>;
+def IORFW : LogicR<0x1,"iorfw",or>;
+def IORLW : LogicI<0x1,"iorlw",or, so_imm>;
+
+
+/* For comparison before branch */
+def SDT_PIC16Cmp : SDTypeProfile<1, 3, [SDTCisSameAs<0,1>]>;
+def SDTIntBinOpPIC16 : SDTypeProfile<1, 2, [SDTCisSameAs<0,1>,
+ SDTCisSameAs<1,2>, SDTCisInt<1>]>;
+
+def PIC16Cmp : SDNode<"PIC16ISD::Cmp",SDTIntBinOpPIC16, [SDNPOutFlag]>;
+def PIC16XORCC : SDNode<"PIC16ISD::XORCC",SDTIntBinOpPIC16, [SDNPOutFlag]>;
+def PIC16SUBCC : SDNode<"PIC16ISD::SUBCC",SDTIntBinOpPIC16, [SDNPOutFlag]>;
+
+def XORFWCC : LogicR<0x1,"xorfw",PIC16XORCC>;
+def XORLWCC : LogicI<0x1,"xorlw",PIC16XORCC, so_imm>;
+def SUBFWCC : Arith1R<0x1,"subfw",PIC16SUBCC>;
+def SUBLWCC : ArithI<0x1,"sublw",PIC16SUBCC, so_imm>;
+
+
+/* For branch conditions */
+def SDT_PIC16Branch : SDTypeProfile<0, 3, [SDTCisVT<0, OtherVT>,
+ SDTCisVT<1,i8>, SDTCisVT<2,i8>]>;
+
+def PIC16Branch : SDNode<"PIC16ISD::Branch",SDT_PIC16Branch,
+ [SDNPHasChain, SDNPInFlag]>;
+
+def PIC16BTFSS : SDNode<"PIC16ISD::BTFSS",SDT_PIC16Branch,
+ [SDNPHasChain, SDNPInFlag]>;
+
+def PIC16BTFSC : SDNode<"PIC16ISD::BTFSC",SDT_PIC16Branch,
+ [SDNPHasChain, SDNPInFlag]>;
+
+class InstrBitTestCC<bits<4> op, string instr_asm,SDNode OpNode>:
+ BitFormat< op,
+ (outs),
+ (ins brtarget:$target ,so_imm:$i, STATUSRegs:$s ),
+ !strconcat(instr_asm, " $s, $i, $target"),
+ [(OpNode bb:$target, so_imm:$i, STATUSRegs:$s )]>;
+
+def BTFSS : InstrBitTestCC<0x1,"btfss",PIC16BTFSS>;
+def BTFSC : InstrBitTestCC<0x1,"btfsc",PIC16BTFSC>;
+
+
+//===----------------------------------------------------------------------===//
+// Pseudo instructions
+//===----------------------------------------------------------------------===//
+
+let Defs = [STKPTR], Uses = [STKPTR] in {
+def ADJCALLSTACKDOWN : Pseudo<255, (outs), (ins i8imm:$amt),
+ "!ADJCALLSTACKDOWN $amt",
+ [(callseq_start imm:$amt)]>;
+def ADJCALLSTACKUP : Pseudo<254, (outs), (ins i8imm:$amt),
+ "!ADJCALLSTACKUP $amt",
+ [(callseq_end imm:$amt)]>;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Arbitrary patterns that map to one or more instructions
+//===----------------------------------------------------------------------===//
+def : Pat<(ret), (RETURN)>;
Added: llvm/branches/non-call-eh/lib/Target/PIC16/PIC16RegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PIC16/PIC16RegisterInfo.cpp?rev=53163&view=auto
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PIC16/PIC16RegisterInfo.cpp (added)
+++ llvm/branches/non-call-eh/lib/Target/PIC16/PIC16RegisterInfo.cpp Sun Jul 6 15:45:41 2008
@@ -0,0 +1,220 @@
+//===- PIC16RegisterInfo.cpp - PIC16 Register Information -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the PIC16 implementation of the TargetRegisterInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "pic16-reg-info"
+
+#include "PIC16.h"
+#include "PIC16RegisterInfo.h"
+#include "llvm/Constants.h"
+#include "llvm/Function.h"
+#include "llvm/Type.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineLocation.h"
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Target/TargetFrameInfo.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+
+using namespace llvm;
+
+// FIXME: add subtarget support.
+PIC16RegisterInfo::PIC16RegisterInfo(const TargetInstrInfo &tii)
+ : PIC16GenRegisterInfo(PIC16::ADJCALLSTACKDOWN, PIC16::ADJCALLSTACKUP),
+ TII(tii) {}
+
+/// getRegisterNumbering - Given the enum value for some register, e.g.
+/// PIC16::RA, return the number that it corresponds to (e.g. 31).
+unsigned PIC16RegisterInfo::
+getRegisterNumbering(unsigned RegEnum)
+{
+ assert (RegEnum <= 31 && "Unknown register number!");
+ return RegEnum;
+}
+
+void PIC16RegisterInfo::
+copyRegToReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ unsigned DestReg, unsigned SrcReg,
+ const TargetRegisterClass *RC) const
+{
+ return;
+}
+
+void PIC16RegisterInfo::reMaterialize(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ unsigned DestReg,
+ const MachineInstr *Orig) const
+{
+ MachineInstr *MI = Orig->clone();
+ MI->getOperand(0).setReg(DestReg);
+ MBB.insert(I, MI);
+}
+
+MachineInstr *PIC16RegisterInfo::
+foldMemoryOperand(MachineInstr* MI, unsigned OpNum, int FI) const
+{
+ MachineInstr *NewMI = NULL;
+ return NewMI;
+}
+
+//===----------------------------------------------------------------------===//
+//
+// Callee Saved Registers methods
+//
+//===----------------------------------------------------------------------===//
+
+/// PIC16 Callee Saved Registers
+const unsigned* PIC16RegisterInfo::
+getCalleeSavedRegs(const MachineFunction *MF) const
+{
+ // PIC16 calle-save register range is $16-$26(s0-s7)
+ static const unsigned CalleeSavedRegs[] = { 0 };
+ return CalleeSavedRegs;
+}
+
+/// PIC16 Callee Saved Register Classes
+const TargetRegisterClass* const*
+PIC16RegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const
+{
+ static const TargetRegisterClass * const CalleeSavedRegClasses[] = { 0 };
+ return CalleeSavedRegClasses;
+}
+
+BitVector PIC16RegisterInfo::
+getReservedRegs(const MachineFunction &MF) const
+{
+ BitVector Reserved(getNumRegs());
+ return Reserved;
+}
+
+//===----------------------------------------------------------------------===//
+//
+// Stack Frame Processing methods
+// +----------------------------+
+//
+// FIXME: Add stack layout description here.
+//
+//
+//===----------------------------------------------------------------------===//
+
+// hasFP - Return true if the specified function should have a dedicated frame
+// pointer register. This is true if the function has variable sized allocas or
+// if frame pointer elimination is disabled.
+bool PIC16RegisterInfo::
+hasFP(const MachineFunction &MF) const {
+ return false;
+}
+
+// This function eliminate ADJCALLSTACKDOWN,
+// ADJCALLSTACKUP pseudo instructions
+void PIC16RegisterInfo::
+eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const {
+ // Simply discard ADJCALLSTACKDOWN, ADJCALLSTACKUP instructions.
+ MBB.erase(I);
+}
+
+// FrameIndex represent objects inside a abstract stack.
+// We must replace FrameIndex with an stack/frame pointer
+// direct reference.
+void PIC16RegisterInfo::
+eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj,
+ RegScavenger *RS) const
+{
+ MachineInstr &MI = *II;
+ MachineFunction &MF = *MI.getParent()->getParent();
+
+ unsigned i = 0;
+ while (!MI.getOperand(i).isFrameIndex()) {
+ ++i;
+ assert(i < MI.getNumOperands() &&
+ "Instr doesn't have FrameIndex operand!");
+ }
+
+ int FrameIndex = MI.getOperand(i).getIndex();
+ int stackSize = MF.getFrameInfo()->getStackSize();
+ int spOffset = MF.getFrameInfo()->getObjectOffset(FrameIndex);
+
+ DOUT << "\nFunction : " << MF.getFunction()->getName() << "\n";
+ DOUT << "<--------->\n";
+#ifndef NDEBUG
+ MI.print(DOUT);
+#endif
+ DOUT << "FrameIndex : " << FrameIndex << "\n";
+ DOUT << "spOffset : " << spOffset << "\n";
+ DOUT << "stackSize : " << stackSize << "\n";
+
+ // As explained on LowerFORMAL_ARGUMENTS, detect negative offsets
+ // and adjust SPOffsets considering the final stack size.
+ int Offset = ((spOffset < 0) ? (stackSize + (-(spOffset+4))) : (spOffset));
+
+ DOUT << "Offset : " << Offset << "\n";
+ DOUT << "<--------->\n";
+
+ // MI.getOperand(i+1).ChangeToImmediate(Offset);
+ MI.getOperand(i).ChangeToRegister(getFrameRegister(MF), false);
+}
+
+void PIC16RegisterInfo::
+emitPrologue(MachineFunction &MF) const
+{
+}
+
+void PIC16RegisterInfo::
+emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const
+{
+}
+
+void PIC16RegisterInfo::
+processFunctionBeforeFrameFinalized(MachineFunction &MF) const
+{
+}
+
+unsigned PIC16RegisterInfo::
+getRARegister() const {
+ assert(0 && "What is the return address register");
+ return 0;
+}
+
+unsigned PIC16RegisterInfo::
+getFrameRegister(MachineFunction &MF) const {
+ return PIC16::STKPTR;
+}
+
+unsigned PIC16RegisterInfo::
+getEHExceptionRegister() const {
+ assert(0 && "What is the exception register");
+ return 0;
+}
+
+unsigned PIC16RegisterInfo::
+getEHHandlerRegister() const {
+ assert(0 && "What is the exception handler register");
+ return 0;
+}
+
+int PIC16RegisterInfo::
+getDwarfRegNum(unsigned RegNum, bool isEH) const {
+ assert(0 && "What is the dwarf register number");
+ return -1;
+}
+
+
+#include "PIC16GenRegisterInfo.inc"
+
Added: llvm/branches/non-call-eh/lib/Target/PIC16/PIC16RegisterInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PIC16/PIC16RegisterInfo.h?rev=53163&view=auto
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PIC16/PIC16RegisterInfo.h (added)
+++ llvm/branches/non-call-eh/lib/Target/PIC16/PIC16RegisterInfo.h Sun Jul 6 15:45:41 2008
@@ -0,0 +1,86 @@
+//===- PIC16RegisterInfo.h - PIC16 Register Information Impl ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the PIC16 implementation of the TargetRegisterInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef PIC16REGISTERINFO_H
+#define PIC16REGISTERINFO_H
+
+#include "PIC16GenRegisterInfo.h.inc"
+#include "llvm/Target/TargetRegisterInfo.h"
+
+namespace llvm {
+
+// Forward Declarations.
+class TargetInstrInfo;
+class Type;
+
+struct PIC16RegisterInfo : public PIC16GenRegisterInfo {
+ const TargetInstrInfo &TII;
+
+ explicit PIC16RegisterInfo(const TargetInstrInfo &tii);
+
+ /// getRegisterNumbering - Given the enum value for some register, e.g.
+ /// PIC16::RA, return the number that it corresponds to (e.g. 31).
+ static unsigned getRegisterNumbering(unsigned RegEnum);
+
+ void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
+ unsigned DestReg, const MachineInstr *Orig) const;
+
+ MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum,
+ int FrameIndex) const;
+
+ MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum,
+ MachineInstr* LoadMI) const {
+ return 0;
+ }
+
+ void copyRegToReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ unsigned DestReg, unsigned SrcReg,
+ const TargetRegisterClass *RC) const;
+
+
+ const unsigned *getCalleeSavedRegs(const MachineFunction* MF = 0) const;
+
+ const TargetRegisterClass* const*
+ getCalleeSavedRegClasses(const MachineFunction* MF = 0) const;
+
+ BitVector getReservedRegs(const MachineFunction &MF) const;
+
+ bool hasFP(const MachineFunction &MF) const;
+
+ void eliminateCallFramePseudoInstr(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const;
+
+ /// Stack Frame Processing Methods.
+ void eliminateFrameIndex(MachineBasicBlock::iterator II,
+ int SPAdj, RegScavenger *RS = NULL) const;
+
+ void processFunctionBeforeFrameFinalized(MachineFunction &MF) const;
+
+ void emitPrologue(MachineFunction &MF) const;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
+
+ /// Debug information queries.
+ unsigned getRARegister() const;
+ unsigned getFrameRegister(MachineFunction &MF) const;
+
+ /// Exception handling queries.
+ unsigned getEHExceptionRegister() const;
+ unsigned getEHHandlerRegister() const;
+
+ int getDwarfRegNum(unsigned RegNum, bool isEH) const;
+};
+
+} // end namespace llvm
+
+#endif
Added: llvm/branches/non-call-eh/lib/Target/PIC16/PIC16RegisterInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PIC16/PIC16RegisterInfo.td?rev=53163&view=auto
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PIC16/PIC16RegisterInfo.td (added)
+++ llvm/branches/non-call-eh/lib/Target/PIC16/PIC16RegisterInfo.td Sun Jul 6 15:45:41 2008
@@ -0,0 +1,84 @@
+//===- PIC16RegisterInfo.td - PIC16 Register defs ------------*- tblgen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Declarations that describe the PIC16 register file
+//===----------------------------------------------------------------------===//
+
+// We have banks of 32 registers each.
+class PIC16Reg<string n> : Register<n> {
+ field bits<5> Num;
+ let Namespace = "PIC16";
+}
+
+// PIC16 CPU Registers
+class PIC16GPRReg<bits<5> num, string n> : PIC16Reg<n> {
+ let Num = num;
+}
+
+// CPU GPR Registers
+def FSR0 : PIC16GPRReg< 0, "FSR0">, DwarfRegNum<[0]>;
+def FSR1 : PIC16GPRReg< 1, "FSR1">, DwarfRegNum<[1]>;
+
+// CPU Registers Class
+def PTRRegs : RegisterClass<"PIC16", [i16], 8,
+ [FSR0, FSR1]>
+{
+ let MethodProtos = [{
+ iterator allocation_order_end(const MachineFunction &MF) const;
+ }];
+ let MethodBodies = [{
+ PTRRegsClass::iterator
+ PTRRegsClass::allocation_order_end(const MachineFunction &MF) const {
+ return end();
+ }
+ }];
+}
+
+def WREG : PIC16GPRReg< 0, "WREG">, DwarfRegNum<[0]>;
+
+// CPU Registers Class
+def CPURegs : RegisterClass<"PIC16", [i8], 8,
+ [WREG]>
+{
+ let MethodProtos = [{
+ iterator allocation_order_end(const MachineFunction &MF) const;
+ }];
+ let MethodBodies = [{
+ CPURegsClass::iterator
+ CPURegsClass::allocation_order_end(const MachineFunction &MF) const {
+ return end();
+ }
+ }];
+}
+
+def STATUSREG : PIC16GPRReg<2, "STATUS">, DwarfRegNum<[0]>;
+
+// STATUS Registers Class
+def STATUSRegs : RegisterClass<"PIC16", [i8], 8,
+ [STATUSREG]>;
+
+
+// Dummy stack pointer.
+def STKPTR : PIC16GPRReg< 0, "SP">, DwarfRegNum<[0]>;
+
+// CPU Registers Class
+def STKRegs : RegisterClass<"PIC16", [i8], 8,
+ [STKPTR]>
+{
+ let MethodProtos = [{
+ iterator allocation_order_end(const MachineFunction &MF) const;
+ }];
+ let MethodBodies = [{
+ STKRegsClass::iterator
+ STKRegsClass::allocation_order_end(const MachineFunction &MF) const {
+ return end();
+ }
+ }];
+}
Added: llvm/branches/non-call-eh/lib/Target/PIC16/PIC16Subtarget.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PIC16/PIC16Subtarget.cpp?rev=53163&view=auto
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PIC16/PIC16Subtarget.cpp (added)
+++ llvm/branches/non-call-eh/lib/Target/PIC16/PIC16Subtarget.cpp Sun Jul 6 15:45:41 2008
@@ -0,0 +1,27 @@
+//===- PIC16Subtarget.cpp - PIC16 Subtarget Information -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the PIC16 specific subclass of TargetSubtarget.
+//
+//===----------------------------------------------------------------------===//
+
+#include "PIC16.h"
+#include "PIC16Subtarget.h"
+#include "PIC16GenSubtarget.inc"
+using namespace llvm;
+
+PIC16Subtarget::PIC16Subtarget(const TargetMachine &TM, const Module &M,
+ const std::string &FS)
+ :IsPIC16Old(false)
+{
+ std::string CPU = "generic";
+
+ // Parse features string.
+ ParseSubtargetFeatures(FS, CPU);
+}
Added: llvm/branches/non-call-eh/lib/Target/PIC16/PIC16Subtarget.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PIC16/PIC16Subtarget.h?rev=53163&view=auto
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PIC16/PIC16Subtarget.h (added)
+++ llvm/branches/non-call-eh/lib/Target/PIC16/PIC16Subtarget.h Sun Jul 6 15:45:41 2008
@@ -0,0 +1,41 @@
+//=====-- PIC16Subtarget.h - Define Subtarget for the PIC16 ---*- C++ -*--====//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the PIC16 specific subclass of TargetSubtarget.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef PIC16SUBTARGET_H
+#define PIC16SUBTARGET_H
+
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetSubtarget.h"
+
+#include <string>
+
+namespace llvm {
+class Module;
+
+class PIC16Subtarget : public TargetSubtarget {
+ bool IsPIC16Old;
+
+public:
+ /// This constructor initializes the data members to match that
+ /// of the specified module.
+ ///
+ PIC16Subtarget(const TargetMachine &TM, const Module &M,
+ const std::string &FS);
+
+ /// ParseSubtargetFeatures - Parses features string setting specified
+ /// subtarget options. Definition of function is auto generated by tblgen.
+ void ParseSubtargetFeatures(const std::string &FS, const std::string &CPU);
+};
+} // End llvm namespace
+
+#endif
Added: llvm/branches/non-call-eh/lib/Target/PIC16/PIC16TargetAsmInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PIC16/PIC16TargetAsmInfo.cpp?rev=53163&view=auto
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PIC16/PIC16TargetAsmInfo.cpp (added)
+++ llvm/branches/non-call-eh/lib/Target/PIC16/PIC16TargetAsmInfo.cpp Sun Jul 6 15:45:41 2008
@@ -0,0 +1,26 @@
+//===-- PIC16TargetAsmInfo.cpp - PIC16 asm properties ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declarations of the PIC16TargetAsmInfo properties.
+//
+//===----------------------------------------------------------------------===//
+
+#include "PIC16TargetAsmInfo.h"
+
+using namespace llvm;
+
+PIC16TargetAsmInfo::
+PIC16TargetAsmInfo(const PIC16TargetMachine &TM)
+{
+ Data16bitsDirective = "\t.half\t";
+ Data32bitsDirective = "\t.word\t";
+ CommentString = ";";
+ COMMDirective = "\t";
+ COMMDirectiveTakesAlignment = 0;
+}
Added: llvm/branches/non-call-eh/lib/Target/PIC16/PIC16TargetAsmInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PIC16/PIC16TargetAsmInfo.h?rev=53163&view=auto
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PIC16/PIC16TargetAsmInfo.h (added)
+++ llvm/branches/non-call-eh/lib/Target/PIC16/PIC16TargetAsmInfo.h Sun Jul 6 15:45:41 2008
@@ -0,0 +1,30 @@
+//=====-- PIC16TargetAsmInfo.h - PIC16 asm properties ---------*- C++ -*--====//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the PIC16TargetAsmInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef PIC16TARGETASMINFO_H
+#define PIC16TARGETASMINFO_H
+
+#include "llvm/Target/TargetAsmInfo.h"
+
+namespace llvm {
+
+ // Forward declaration.
+ class PIC16TargetMachine;
+
+ struct PIC16TargetAsmInfo : public TargetAsmInfo {
+ PIC16TargetAsmInfo(const PIC16TargetMachine &TM);
+ };
+
+} // namespace llvm
+
+#endif
Added: llvm/branches/non-call-eh/lib/Target/PIC16/PIC16TargetMachine.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PIC16/PIC16TargetMachine.cpp?rev=53163&view=auto
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PIC16/PIC16TargetMachine.cpp (added)
+++ llvm/branches/non-call-eh/lib/Target/PIC16/PIC16TargetMachine.cpp Sun Jul 6 15:45:41 2008
@@ -0,0 +1,70 @@
+//===-- PIC16TargetMachine.cpp - Define TargetMachine for PIC16 -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Top-level implementation for the PIC16 target.
+//
+//===----------------------------------------------------------------------===//
+
+#include "PIC16.h"
+#include "PIC16TargetAsmInfo.h"
+#include "PIC16TargetMachine.h"
+#include "llvm/Module.h"
+#include "llvm/PassManager.h"
+#include "llvm/Target/TargetAsmInfo.h"
+#include "llvm/Target/TargetMachineRegistry.h"
+
+using namespace llvm;
+
+namespace {
+ // Register the targets
+ RegisterTarget<PIC16TargetMachine> X("pic16", " PIC16 14-bit");
+}
+
+PIC16TargetMachine::
+PIC16TargetMachine(const Module &M, const std::string &FS) :
+ Subtarget(*this, M, FS), DataLayout("e-p:16:8:8-i8:8:8-i16:8:8-i32:8:8"),
+ InstrInfo(*this), TLInfo(*this),
+ FrameInfo(TargetFrameInfo::StackGrowsUp, 8, 0) { }
+
+
+const TargetAsmInfo *PIC16TargetMachine::createTargetAsmInfo() const
+{
+ return new PIC16TargetAsmInfo(*this);
+}
+
+//===----------------------------------------------------------------------===//
+// Pass Pipeline Configuration
+//===----------------------------------------------------------------------===//
+
+bool PIC16TargetMachine::addInstSelector(PassManagerBase &PM, bool Fast)
+{
+ // Install an instruction selector.
+ PM.add(createPIC16ISelDag(*this));
+ return false;
+}
+
+bool PIC16TargetMachine::
+addPrologEpilogInserter(PassManagerBase &PM, bool Fast)
+{
+ return false;
+}
+
+bool PIC16TargetMachine::addPreEmitPass(PassManagerBase &PM, bool Fast)
+{
+ return true;
+}
+
+bool PIC16TargetMachine::
+addAssemblyEmitter(PassManagerBase &PM, bool Fast, std::ostream &Out)
+{
+ // Output assembly language.
+ PM.add(createPIC16CodePrinterPass(Out, *this));
+ return false;
+}
+
Added: llvm/branches/non-call-eh/lib/Target/PIC16/PIC16TargetMachine.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PIC16/PIC16TargetMachine.h?rev=53163&view=auto
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PIC16/PIC16TargetMachine.h (added)
+++ llvm/branches/non-call-eh/lib/Target/PIC16/PIC16TargetMachine.h Sun Jul 6 15:45:41 2008
@@ -0,0 +1,61 @@
+//===-- PIC16TargetMachine.h - Define TargetMachine for PIC16 ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the PIC16 specific subclass of TargetMachine.
+//
+//===----------------------------------------------------------------------===//
+
+
+#ifndef PIC16_TARGETMACHINE_H
+#define PIC16_TARGETMACHINE_H
+
+#include "PIC16InstrInfo.h"
+#include "PIC16ISelLowering.h"
+#include "PIC16Subtarget.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetFrameInfo.h"
+#include "llvm/Target/TargetMachine.h"
+
+namespace llvm {
+
+/// PIC16TargetMachine
+///
+class PIC16TargetMachine : public LLVMTargetMachine {
+ PIC16Subtarget Subtarget;
+ const TargetData DataLayout; // Calculates type size & alignment
+ PIC16InstrInfo InstrInfo;
+ PIC16TargetLowering TLInfo;
+ TargetFrameInfo FrameInfo;
+
+protected:
+ virtual const TargetAsmInfo *createTargetAsmInfo() const;
+
+public:
+ PIC16TargetMachine(const Module &M, const std::string &FS);
+
+ virtual const TargetFrameInfo *getFrameInfo() const
+ { return &FrameInfo; }
+ virtual const PIC16InstrInfo *getInstrInfo() const
+ { return &InstrInfo; }
+ virtual const TargetData *getTargetData() const
+ { return &DataLayout; }
+ virtual PIC16TargetLowering *getTargetLowering() const
+ { return const_cast<PIC16TargetLowering*>(&TLInfo); }
+ virtual const PIC16RegisterInfo *getRegisterInfo() const
+ { return &InstrInfo.getRegisterInfo(); }
+
+ virtual bool addInstSelector(PassManagerBase &PM, bool Fast);
+ virtual bool addPrologEpilogInserter(PassManagerBase &PM, bool Fast);
+ virtual bool addPreEmitPass(PassManagerBase &PM, bool Fast);
+ virtual bool addAssemblyEmitter(PassManagerBase &PM, bool Fast,
+ std::ostream &Out);
+};
+} // end namespace llvm
+
+#endif
Modified: llvm/branches/non-call-eh/lib/Target/PowerPC/PPCAsmPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PowerPC/PPCAsmPrinter.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PowerPC/PPCAsmPrinter.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/PowerPC/PPCAsmPrinter.cpp Sun Jul 6 15:45:41 2008
@@ -185,11 +185,11 @@
if (MO.getType() == MachineOperand::MO_GlobalAddress) {
GlobalValue *GV = MO.getGlobal();
if (((GV->isDeclaration() || GV->hasWeakLinkage() ||
- GV->hasLinkOnceLinkage()))) {
+ GV->hasLinkOnceLinkage() || GV->hasCommonLinkage()))) {
// Dynamically-resolved functions need a stub for the function.
std::string Name = Mang->getValueName(GV);
FnStubs.insert(Name);
- O << "L" << Name << "$stub";
+ printSuffixedName(Name, "$stub");
if (GV->hasExternalWeakLinkage())
ExtWeakSymbols.insert(GV);
return;
@@ -198,7 +198,7 @@
if (MO.getType() == MachineOperand::MO_ExternalSymbol) {
std::string Name(TAI->getGlobalPrefix()); Name += MO.getSymbolName();
FnStubs.insert(Name);
- O << "L" << Name << "$stub";
+ printSuffixedName(Name, "$stub");
return;
}
}
@@ -377,7 +377,7 @@
if (TM.getRelocationModel() != Reloc::Static) {
std::string Name(TAI->getGlobalPrefix()); Name += MO.getSymbolName();
GVStubs.insert(Name);
- O << "L" << Name << "$non_lazy_ptr";
+ printSuffixedName(Name, "$non_lazy_ptr");
return;
}
O << TAI->getGlobalPrefix() << MO.getSymbolName();
@@ -390,9 +390,11 @@
// External or weakly linked global variables need non-lazily-resolved stubs
if (TM.getRelocationModel() != Reloc::Static) {
if (((GV->isDeclaration() || GV->hasWeakLinkage() ||
- GV->hasLinkOnceLinkage()))) {
+ GV->hasLinkOnceLinkage() || GV->hasCommonLinkage()))) {
GVStubs.insert(Name);
- O << "L" << Name << "$non_lazy_ptr";
+ printSuffixedName(Name, "$non_lazy_ptr");
+ if (GV->hasExternalWeakLinkage())
+ ExtWeakSymbols.insert(GV);
return;
}
}
@@ -420,7 +422,7 @@
std::string Name = getGlobalLinkName(GV);
if (TM.getRelocationModel() != Reloc::Static) {
GVStubs.insert(Name);
- O << "L" << Name << "$non_lazy_ptr";
+ printSuffixedName(Name, "$non_lazy_ptr");
return;
}
O << Name;
@@ -671,8 +673,8 @@
unsigned Align = TD->getPreferredAlignmentLog(I);
if (C->isNullValue() && /* FIXME: Verify correct */
- !I->hasSection() &&
- (I->hasInternalLinkage() || I->hasWeakLinkage() ||
+ !I->hasSection() && (I->hasCommonLinkage() ||
+ I->hasInternalLinkage() || I->hasWeakLinkage() ||
I->hasLinkOnceLinkage() || I->hasExternalLinkage())) {
if (Size == 0) Size = 1; // .comm Foo, 0 is undefined, avoid it.
if (I->hasExternalLinkage()) {
@@ -696,6 +698,7 @@
switch (I->getLinkage()) {
case GlobalValue::LinkOnceLinkage:
case GlobalValue::WeakLinkage:
+ case GlobalValue::CommonLinkage:
O << "\t.global " << name << '\n'
<< "\t.type " << name << ", @object\n"
<< "\t.weak " << name << '\n';
@@ -936,8 +939,8 @@
unsigned Align = TD->getPreferredAlignmentLog(I);
if (C->isNullValue() && /* FIXME: Verify correct */
- !I->hasSection() &&
- (I->hasInternalLinkage() || I->hasWeakLinkage() ||
+ !I->hasSection() && (I->hasCommonLinkage() ||
+ I->hasInternalLinkage() || I->hasWeakLinkage() ||
I->hasLinkOnceLinkage() || I->hasExternalLinkage())) {
if (Size == 0) Size = 1; // .comm Foo, 0 is undefined, avoid it.
if (I->hasExternalLinkage()) {
@@ -947,6 +950,16 @@
} else if (I->hasInternalLinkage()) {
SwitchToDataSection("\t.data", I);
O << TAI->getLCOMMDirective() << name << "," << Size << "," << Align;
+ } else if (!I->hasCommonLinkage()) {
+ O << "\t.globl " << name << "\n"
+ << TAI->getWeakDefDirective() << name << "\n";
+ SwitchToDataSection("\t.section __DATA,__datacoal_nt,coalesced", I);
+ EmitAlignment(Align, I);
+ O << name << ":\t\t\t\t" << TAI->getCommentString() << " ";
+ PrintUnmangledNameSafely(I, O);
+ O << "\n";
+ EmitGlobalConstant(C);
+ continue;
} else {
SwitchToDataSection("\t.data", I);
O << ".comm " << name << "," << Size;
@@ -961,9 +974,18 @@
switch (I->getLinkage()) {
case GlobalValue::LinkOnceLinkage:
case GlobalValue::WeakLinkage:
+ case GlobalValue::CommonLinkage:
O << "\t.globl " << name << '\n'
<< "\t.weak_definition " << name << '\n';
- SwitchToDataSection("\t.section __DATA,__datacoal_nt,coalesced", I);
+ if (!I->isConstant())
+ SwitchToDataSection("\t.section __DATA,__datacoal_nt,coalesced", I);
+ else {
+ const ArrayType *AT = dyn_cast<ArrayType>(Type);
+ if (AT && AT->getElementType()==Type::Int8Ty)
+ SwitchToDataSection("\t.section __TEXT,__const_coal,coalesced", I);
+ else
+ SwitchToDataSection("\t.section __DATA,__const_coal,coalesced", I);
+ }
break;
case GlobalValue::AppendingLinkage:
// FIXME: appending linkage variables should go into a section of
@@ -1037,22 +1059,30 @@
SwitchToTextSection("\t.section __TEXT,__picsymbolstub1,symbol_stubs,"
"pure_instructions,32");
EmitAlignment(4);
- O << "L" << *i << "$stub:\n";
+ std::string p = *i;
+ std::string L0p = (p[0]=='\"') ? "\"L0$" + p.substr(1) : "L0$" + p ;
+ printSuffixedName(p, "$stub");
+ O << ":\n";
O << "\t.indirect_symbol " << *i << "\n";
O << "\tmflr r0\n";
- O << "\tbcl 20,31,L0$" << *i << "\n";
- O << "L0$" << *i << ":\n";
+ O << "\tbcl 20,31," << L0p << "\n";
+ O << L0p << ":\n";
O << "\tmflr r11\n";
- O << "\taddis r11,r11,ha16(L" << *i << "$lazy_ptr-L0$" << *i << ")\n";
+ O << "\taddis r11,r11,ha16(";
+ printSuffixedName(p, "$lazy_ptr");
+ O << "-" << L0p << ")\n";
O << "\tmtlr r0\n";
if (isPPC64)
- O << "\tldu r12,lo16(L" << *i << "$lazy_ptr-L0$" << *i << ")(r11)\n";
+ O << "\tldu r12,lo16(";
else
- O << "\tlwzu r12,lo16(L" << *i << "$lazy_ptr-L0$" << *i << ")(r11)\n";
+ O << "\tlwzu r12,lo16(";
+ printSuffixedName(p, "$lazy_ptr");
+ O << "-" << L0p << ")(r11)\n";
O << "\tmtctr r12\n";
O << "\tbctr\n";
SwitchToDataSection(".lazy_symbol_pointer");
- O << "L" << *i << "$lazy_ptr:\n";
+ printSuffixedName(p, "$lazy_ptr");
+ O << ":\n";
O << "\t.indirect_symbol " << *i << "\n";
if (isPPC64)
O << "\t.quad dyld_stub_binding_helper\n";
@@ -1065,17 +1095,24 @@
SwitchToTextSection("\t.section __TEXT,__symbol_stub1,symbol_stubs,"
"pure_instructions,16");
EmitAlignment(4);
- O << "L" << *i << "$stub:\n";
+ std::string p = *i;
+ printSuffixedName(p, "$stub");
+ O << ":\n";
O << "\t.indirect_symbol " << *i << "\n";
- O << "\tlis r11,ha16(L" << *i << "$lazy_ptr)\n";
+ O << "\tlis r11,ha16(";
+ printSuffixedName(p, "$lazy_ptr");
+ O << ")\n";
if (isPPC64)
- O << "\tldu r12,lo16(L" << *i << "$lazy_ptr)(r11)\n";
+ O << "\tldu r12,lo16(";
else
- O << "\tlwzu r12,lo16(L" << *i << "$lazy_ptr)(r11)\n";
+ O << "\tlwzu r12,lo16(";
+ printSuffixedName(p, "$lazy_ptr");
+ O << ")(r11)\n";
O << "\tmtctr r12\n";
O << "\tbctr\n";
SwitchToDataSection(".lazy_symbol_pointer");
- O << "L" << *i << "$lazy_ptr:\n";
+ printSuffixedName(p, "$lazy_ptr");
+ O << ":\n";
O << "\t.indirect_symbol " << *i << "\n";
if (isPPC64)
O << "\t.quad dyld_stub_binding_helper\n";
@@ -1101,7 +1138,9 @@
SwitchToDataSection(".non_lazy_symbol_pointer");
for (std::set<std::string>::iterator I = GVStubs.begin(),
E = GVStubs.end(); I != E; ++I) {
- O << "L" << *I << "$non_lazy_ptr:\n";
+ std::string p = *I;
+ printSuffixedName(p, "$non_lazy_ptr");
+ O << ":\n";
O << "\t.indirect_symbol " << *I << "\n";
if (isPPC64)
O << "\t.quad\t0\n";
Modified: llvm/branches/non-call-eh/lib/Target/PowerPC/PPCCodeEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PowerPC/PPCCodeEmitter.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PowerPC/PPCCodeEmitter.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/PowerPC/PPCCodeEmitter.cpp Sun Jul 6 15:45:41 2008
@@ -80,10 +80,6 @@
return new PPCCodeEmitter(TM, MCE);
}
-#ifdef __APPLE__
-extern "C" void sys_icache_invalidate(const void *Addr, size_t len);
-#endif
-
bool PPCCodeEmitter::runOnMachineFunction(MachineFunction &MF) {
assert((MF.getTarget().getRelocationModel() != Reloc::Default ||
MF.getTarget().getRelocationModel() != Reloc::Static) &&
@@ -109,7 +105,8 @@
default:
MCE.emitWordBE(getBinaryCodeForInstr(*I));
break;
- case TargetInstrInfo::LABEL:
+ case TargetInstrInfo::DBG_LABEL:
+ case TargetInstrInfo::EH_LABEL:
MCE.emitLabel(MI.getOperand(0).getImm());
break;
case TargetInstrInfo::IMPLICIT_DEF:
@@ -143,7 +140,8 @@
MO.isConstantPoolIndex() || MO.isJumpTableIndex()) {
unsigned Reloc = 0;
if (MI.getOpcode() == PPC::BL_Macho || MI.getOpcode() == PPC::BL8_Macho ||
- MI.getOpcode() == PPC::BL_ELF || MI.getOpcode() == PPC::BL8_ELF)
+ MI.getOpcode() == PPC::BL_ELF || MI.getOpcode() == PPC::BL8_ELF ||
+ MI.getOpcode() == PPC::TAILB || MI.getOpcode() == PPC::TAILB8)
Reloc = PPC::reloc_pcrel_bx;
else {
if (TM.getRelocationModel() == Reloc::PIC_) {
Modified: llvm/branches/non-call-eh/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PowerPC/PPCISelDAGToDAG.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PowerPC/PPCISelDAGToDAG.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/PowerPC/PPCISelDAGToDAG.cpp Sun Jul 6 15:45:41 2008
@@ -173,9 +173,9 @@
SDOperand BuildSDIVSequence(SDNode *N);
SDOperand BuildUDIVSequence(SDNode *N);
- /// InstructionSelectBasicBlock - This callback is invoked by
+ /// InstructionSelect - This callback is invoked by
/// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
- virtual void InstructionSelectBasicBlock(SelectionDAG &DAG);
+ virtual void InstructionSelect(SelectionDAG &DAG);
void InsertVRSaveCode(Function &Fn);
@@ -201,17 +201,14 @@
};
}
-/// InstructionSelectBasicBlock - This callback is invoked by
+/// InstructionSelect - This callback is invoked by
/// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
-void PPCDAGToDAGISel::InstructionSelectBasicBlock(SelectionDAG &DAG) {
+void PPCDAGToDAGISel::InstructionSelect(SelectionDAG &DAG) {
DEBUG(BB->dump());
// Select target instructions for the DAG.
DAG.setRoot(SelectRoot(DAG.getRoot()));
DAG.RemoveDeadNodes();
-
- // Emit machine code to BB.
- ScheduleAndEmitDAG(DAG);
}
/// InsertVRSaveCode - Once the entire function has been instruction selected,
@@ -921,7 +918,7 @@
case ISD::LOAD: {
// Handle preincrement loads.
LoadSDNode *LD = cast<LoadSDNode>(Op);
- MVT::ValueType LoadedVT = LD->getMemoryVT();
+ MVT LoadedVT = LD->getMemoryVT();
// Normal loads are handled by code generated from the .td file.
if (LD->getAddressingMode() != ISD::PRE_INC)
@@ -936,7 +933,7 @@
if (LD->getValueType(0) != MVT::i64) {
// Handle PPC32 integer and normal FP loads.
assert((!isSExt || LoadedVT == MVT::i16) && "Invalid sext update load");
- switch (LoadedVT) {
+ switch (LoadedVT.getSimpleVT()) {
default: assert(0 && "Invalid PPC load type!");
case MVT::f64: Opcode = PPC::LFDU; break;
case MVT::f32: Opcode = PPC::LFSU; break;
@@ -948,7 +945,7 @@
} else {
assert(LD->getValueType(0) == MVT::i64 && "Unknown load result type!");
assert((!isSExt || LoadedVT == MVT::i16) && "Invalid sext update load");
- switch (LoadedVT) {
+ switch (LoadedVT.getSimpleVT()) {
default: assert(0 && "Invalid PPC load type!");
case MVT::i64: Opcode = PPC::LDU; break;
case MVT::i32: Opcode = PPC::LWZU8; break;
Modified: llvm/branches/non-call-eh/lib/Target/PowerPC/PPCISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PowerPC/PPCISelLowering.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PowerPC/PPCISelLowering.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/PowerPC/PPCISelLowering.cpp Sun Jul 6 15:45:41 2008
@@ -18,7 +18,6 @@
#include "PPCPerfectShuffle.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/VectorExtras.h"
-#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -26,9 +25,11 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/CallingConv.h"
#include "llvm/Constants.h"
#include "llvm/Function.h"
#include "llvm/Intrinsics.h"
+#include "llvm/ParameterAttributes.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Support/CommandLine.h"
@@ -163,7 +164,7 @@
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
// Support label based line numbers.
- setOperationAction(ISD::LOCATION, MVT::Other, Expand);
+ setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand);
@@ -203,12 +204,12 @@
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom);
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom);
- setOperationAction(ISD::ATOMIC_LAS , MVT::i32 , Custom);
- setOperationAction(ISD::ATOMIC_LCS , MVT::i32 , Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_ADD , MVT::i32 , Custom);
+ setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i32 , Custom);
setOperationAction(ISD::ATOMIC_SWAP , MVT::i32 , Custom);
if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) {
- setOperationAction(ISD::ATOMIC_LAS , MVT::i64 , Custom);
- setOperationAction(ISD::ATOMIC_LCS , MVT::i64 , Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_ADD , MVT::i64 , Custom);
+ setOperationAction(ISD::ATOMIC_CMP_SWAP , MVT::i64 , Custom);
setOperationAction(ISD::ATOMIC_SWAP , MVT::i64 , Custom);
}
@@ -254,50 +255,52 @@
if (TM.getSubtarget<PPCSubtarget>().hasAltivec()) {
// First set operation action for all vector types to expand. Then we
// will selectively turn on ones that can be effectively codegen'd.
- for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
- VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) {
+ for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
+ i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
+ MVT VT = (MVT::SimpleValueType)i;
+
// add/sub are legal for all supported vector VT's.
- setOperationAction(ISD::ADD , (MVT::ValueType)VT, Legal);
- setOperationAction(ISD::SUB , (MVT::ValueType)VT, Legal);
+ setOperationAction(ISD::ADD , VT, Legal);
+ setOperationAction(ISD::SUB , VT, Legal);
// We promote all shuffles to v16i8.
- setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Promote);
- AddPromotedToType (ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, MVT::v16i8);
+ setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote);
+ AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8);
// We promote all non-typed operations to v4i32.
- setOperationAction(ISD::AND , (MVT::ValueType)VT, Promote);
- AddPromotedToType (ISD::AND , (MVT::ValueType)VT, MVT::v4i32);
- setOperationAction(ISD::OR , (MVT::ValueType)VT, Promote);
- AddPromotedToType (ISD::OR , (MVT::ValueType)VT, MVT::v4i32);
- setOperationAction(ISD::XOR , (MVT::ValueType)VT, Promote);
- AddPromotedToType (ISD::XOR , (MVT::ValueType)VT, MVT::v4i32);
- setOperationAction(ISD::LOAD , (MVT::ValueType)VT, Promote);
- AddPromotedToType (ISD::LOAD , (MVT::ValueType)VT, MVT::v4i32);
- setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote);
- AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v4i32);
- setOperationAction(ISD::STORE, (MVT::ValueType)VT, Promote);
- AddPromotedToType (ISD::STORE, (MVT::ValueType)VT, MVT::v4i32);
+ setOperationAction(ISD::AND , VT, Promote);
+ AddPromotedToType (ISD::AND , VT, MVT::v4i32);
+ setOperationAction(ISD::OR , VT, Promote);
+ AddPromotedToType (ISD::OR , VT, MVT::v4i32);
+ setOperationAction(ISD::XOR , VT, Promote);
+ AddPromotedToType (ISD::XOR , VT, MVT::v4i32);
+ setOperationAction(ISD::LOAD , VT, Promote);
+ AddPromotedToType (ISD::LOAD , VT, MVT::v4i32);
+ setOperationAction(ISD::SELECT, VT, Promote);
+ AddPromotedToType (ISD::SELECT, VT, MVT::v4i32);
+ setOperationAction(ISD::STORE, VT, Promote);
+ AddPromotedToType (ISD::STORE, VT, MVT::v4i32);
// No other operations are legal.
- setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::FNEG, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::UMUL_LOHI, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::SMUL_LOHI, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::UDIVREM, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::SDIVREM, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::SCALAR_TO_VECTOR, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::FPOW, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::CTPOP, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::CTLZ, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::CTTZ, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::MUL , VT, Expand);
+ setOperationAction(ISD::SDIV, VT, Expand);
+ setOperationAction(ISD::SREM, VT, Expand);
+ setOperationAction(ISD::UDIV, VT, Expand);
+ setOperationAction(ISD::UREM, VT, Expand);
+ setOperationAction(ISD::FDIV, VT, Expand);
+ setOperationAction(ISD::FNEG, VT, Expand);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand);
+ setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
+ setOperationAction(ISD::BUILD_VECTOR, VT, Expand);
+ setOperationAction(ISD::UMUL_LOHI, VT, Expand);
+ setOperationAction(ISD::SMUL_LOHI, VT, Expand);
+ setOperationAction(ISD::UDIVREM, VT, Expand);
+ setOperationAction(ISD::SDIVREM, VT, Expand);
+ setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand);
+ setOperationAction(ISD::FPOW, VT, Expand);
+ setOperationAction(ISD::CTPOP, VT, Expand);
+ setOperationAction(ISD::CTLZ, VT, Expand);
+ setOperationAction(ISD::CTTZ, VT, Expand);
}
// We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
@@ -412,12 +415,13 @@
case PPCISD::MTFSB1: return "PPCISD::MTFSB1";
case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ";
case PPCISD::MTFSF: return "PPCISD::MTFSF";
+ case PPCISD::TAILCALL: return "PPCISD::TAILCALL";
+ case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN";
}
}
-MVT::ValueType
-PPCTargetLowering::getSetCCResultType(const SDOperand &) const {
+MVT PPCTargetLowering::getSetCCResultType(const SDOperand &) const {
return MVT::i32;
}
@@ -686,7 +690,7 @@
uint64_t Value = 0;
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
Value = CN->getValue();
- ValSizeInBytes = MVT::getSizeInBits(CN->getValueType(0))/8;
+ ValSizeInBytes = CN->getValueType(0).getSizeInBits()/8;
} else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!");
Value = FloatToBits(CN->getValueAPF().convertToFloat());
@@ -1003,7 +1007,7 @@
if (!EnablePPCPreinc) return false;
SDOperand Ptr;
- MVT::ValueType VT;
+ MVT VT;
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
Ptr = LD->getBasePtr();
VT = LD->getMemoryVT();
@@ -1016,7 +1020,7 @@
return false;
// PowerPC doesn't have preinc load/store instructions for vectors.
- if (MVT::isVector(VT))
+ if (VT.isVector())
return false;
// TODO: Check reg+reg first.
@@ -1051,7 +1055,7 @@
SDOperand PPCTargetLowering::LowerConstantPool(SDOperand Op,
SelectionDAG &DAG) {
- MVT::ValueType PtrVT = Op.getValueType();
+ MVT PtrVT = Op.getValueType();
ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
Constant *C = CP->getConstVal();
SDOperand CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment());
@@ -1082,7 +1086,7 @@
}
SDOperand PPCTargetLowering::LowerJumpTable(SDOperand Op, SelectionDAG &DAG) {
- MVT::ValueType PtrVT = Op.getValueType();
+ MVT PtrVT = Op.getValueType();
JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
SDOperand JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
SDOperand Zero = DAG.getConstant(0, PtrVT);
@@ -1119,7 +1123,7 @@
SDOperand PPCTargetLowering::LowerGlobalAddress(SDOperand Op,
SelectionDAG &DAG) {
- MVT::ValueType PtrVT = Op.getValueType();
+ MVT PtrVT = Op.getValueType();
GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
GlobalValue *GV = GSDN->getGlobal();
SDOperand GA = DAG.getTargetGlobalAddress(GV, PtrVT, GSDN->getOffset());
@@ -1166,13 +1170,13 @@
// fold the new nodes.
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
if (C->isNullValue() && CC == ISD::SETEQ) {
- MVT::ValueType VT = Op.getOperand(0).getValueType();
+ MVT VT = Op.getOperand(0).getValueType();
SDOperand Zext = Op.getOperand(0);
- if (VT < MVT::i32) {
+ if (VT.bitsLT(MVT::i32)) {
VT = MVT::i32;
Zext = DAG.getNode(ISD::ZERO_EXTEND, VT, Op.getOperand(0));
}
- unsigned Log2b = Log2_32(MVT::getSizeInBits(VT));
+ unsigned Log2b = Log2_32(VT.getSizeInBits());
SDOperand Clz = DAG.getNode(ISD::CTLZ, VT, Zext);
SDOperand Scc = DAG.getNode(ISD::SRL, VT, Clz,
DAG.getConstant(Log2b, MVT::i32));
@@ -1190,9 +1194,9 @@
// condition register, reading it back out, and masking the correct bit. The
// normal approach here uses sub to do this instead of xor. Using xor exposes
// the result to other bit-twiddling opportunities.
- MVT::ValueType LHSVT = Op.getOperand(0).getValueType();
- if (MVT::isInteger(LHSVT) && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
- MVT::ValueType VT = Op.getValueType();
+ MVT LHSVT = Op.getOperand(0).getValueType();
+ if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
+ MVT VT = Op.getValueType();
SDOperand Sub = DAG.getNode(ISD::XOR, LHSVT, Op.getOperand(0),
Op.getOperand(1));
return DAG.getSetCC(VT, Sub, DAG.getConstant(0, LHSVT), CC);
@@ -1221,7 +1225,7 @@
if (Subtarget.isMachoABI()) {
// vastart just stores the address of the VarArgsFrameIndex slot into the
// memory location argument.
- MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+ MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
return DAG.getStore(Op.getOperand(0), FR, Op.getOperand(1), SV, 0);
@@ -1256,15 +1260,15 @@
SDOperand ArgFPR = DAG.getConstant(VarArgsNumFPR, MVT::i8);
- MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+ MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
SDOperand StackOffsetFI = DAG.getFrameIndex(VarArgsStackOffset, PtrVT);
SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
- uint64_t FrameOffset = MVT::getSizeInBits(PtrVT)/8;
+ uint64_t FrameOffset = PtrVT.getSizeInBits()/8;
SDOperand ConstFrameOffset = DAG.getConstant(FrameOffset, PtrVT);
- uint64_t StackOffset = MVT::getSizeInBits(PtrVT)/8 - 1;
+ uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1;
SDOperand ConstStackOffset = DAG.getConstant(StackOffset, PtrVT);
uint64_t FPROffset = 1;
@@ -1317,6 +1321,20 @@
return FPR;
}
+/// CalculateStackSlotSize - Calculates the size reserved for this argument on
+/// the stack.
+static unsigned CalculateStackSlotSize(SDOperand Arg, SDOperand Flag,
+ bool isVarArg, unsigned PtrByteSize) {
+ MVT ArgVT = Arg.getValueType();
+ ISD::ArgFlagsTy Flags = cast<ARG_FLAGSSDNode>(Flag)->getArgFlags();
+ unsigned ArgSize =ArgVT.getSizeInBits()/8;
+ if (Flags.isByVal())
+ ArgSize = Flags.getByValSize();
+ ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
+
+ return ArgSize;
+}
+
SDOperand
PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op,
SelectionDAG &DAG,
@@ -1334,14 +1352,19 @@
SDOperand Root = Op.getOperand(0);
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
- MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+ MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
bool isPPC64 = PtrVT == MVT::i64;
bool isMachoABI = Subtarget.isMachoABI();
bool isELF32_ABI = Subtarget.isELF32_ABI();
+ // Potential tail calls could cause overwriting of argument stack slots.
+ unsigned CC = MF.getFunction()->getCallingConv();
+ bool isImmutable = !(PerformTailCallOpt && (CC==CallingConv::Fast));
unsigned PtrByteSize = isPPC64 ? 8 : 4;
unsigned ArgOffset = PPCFrameInfo::getLinkageSize(isPPC64, isMachoABI);
-
+ // Area that is at least reserved in caller of this function.
+ unsigned MinReservedArea = ArgOffset;
+
static const unsigned GPR_32[] = { // 32-bit registers.
PPC::R3, PPC::R4, PPC::R5, PPC::R6,
PPC::R7, PPC::R8, PPC::R9, PPC::R10,
@@ -1379,8 +1402,8 @@
if (!isVarArg && !isPPC64) {
for (unsigned ArgNo = 0, e = Op.Val->getNumValues()-1; ArgNo != e;
++ArgNo) {
- MVT::ValueType ObjectVT = Op.getValue(ArgNo).getValueType();
- unsigned ObjSize = MVT::getSizeInBits(ObjectVT)/8;
+ MVT ObjectVT = Op.getValue(ArgNo).getValueType();
+ unsigned ObjSize = ObjectVT.getSizeInBits()/8;
ISD::ArgFlagsTy Flags =
cast<ARG_FLAGSSDNode>(Op.getOperand(ArgNo+3))->getArgFlags();
@@ -1393,7 +1416,7 @@
continue;
}
- switch(ObjectVT) {
+ switch(ObjectVT.getSimpleVT()) {
default: assert(0 && "Unhandled argument type!");
case MVT::i32:
case MVT::f32:
@@ -1426,12 +1449,12 @@
// even GPR_idx value or to an even ArgOffset value.
SmallVector<SDOperand, 8> MemOps;
-
+ unsigned nAltivecParamsAtEnd = 0;
for (unsigned ArgNo = 0, e = Op.Val->getNumValues()-1; ArgNo != e; ++ArgNo) {
SDOperand ArgVal;
bool needsLoad = false;
- MVT::ValueType ObjectVT = Op.getValue(ArgNo).getValueType();
- unsigned ObjSize = MVT::getSizeInBits(ObjectVT)/8;
+ MVT ObjectVT = Op.getValue(ArgNo).getValueType();
+ unsigned ObjSize = ObjectVT.getSizeInBits()/8;
unsigned ArgSize = ObjSize;
ISD::ArgFlagsTy Flags =
cast<ARG_FLAGSSDNode>(Op.getOperand(ArgNo+3))->getArgFlags();
@@ -1440,6 +1463,23 @@
unsigned CurArgOffset = ArgOffset;
+ // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary.
+ if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 ||
+ ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) {
+ if (isVarArg || isPPC64) {
+ MinReservedArea = ((MinReservedArea+15)/16)*16;
+ MinReservedArea += CalculateStackSlotSize(Op.getValue(ArgNo),
+ Op.getOperand(ArgNo+3),
+ isVarArg,
+ PtrByteSize);
+ } else nAltivecParamsAtEnd++;
+ } else
+ // Calculate min reserved area.
+ MinReservedArea += CalculateStackSlotSize(Op.getValue(ArgNo),
+ Op.getOperand(ArgNo+3),
+ isVarArg,
+ PtrByteSize);
+
// FIXME alignment for ELF may not be right
// FIXME the codegen can be much improved in some cases.
// We do not have to keep everything in memory.
@@ -1495,7 +1535,7 @@
continue;
}
- switch (ObjectVT) {
+ switch (ObjectVT.getSimpleVT()) {
default: assert(0 && "Unhandled argument type!");
case MVT::i32:
if (!isPPC64) {
@@ -1614,7 +1654,8 @@
// that we ran out of physical registers of the appropriate type.
if (needsLoad) {
int FI = MFI->CreateFixedObject(ObjSize,
- CurArgOffset + (ArgSize - ObjSize));
+ CurArgOffset + (ArgSize - ObjSize),
+ isImmutable);
SDOperand FIN = DAG.getFrameIndex(FI, PtrVT);
ArgVal = DAG.getLoad(ObjectVT, Root, FIN, NULL, 0);
}
@@ -1622,6 +1663,25 @@
ArgValues.push_back(ArgVal);
}
+ // Set the size that is at least reserved in caller of this function. Tail
+ // call optimized function's reserved stack space needs to be aligned so that
+ // taking the difference between two stack areas will result in an aligned
+ // stack.
+ PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
+ // Add the Altivec parameters at the end, if needed.
+ if (nAltivecParamsAtEnd) {
+ MinReservedArea = ((MinReservedArea+15)/16)*16;
+ MinReservedArea += 16*nAltivecParamsAtEnd;
+ }
+ MinReservedArea =
+ std::max(MinReservedArea,
+ PPCFrameInfo::getMinCallFrameSize(isPPC64, isMachoABI));
+ unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameInfo()->
+ getStackAlignment();
+ unsigned AlignMask = TargetAlign-1;
+ MinReservedArea = (MinReservedArea + AlignMask) & ~AlignMask;
+ FI->setMinReservedArea(MinReservedArea);
+
// If the function takes variable number of arguments, make a frame index for
// the start of the first vararg value... for expansion of llvm.va_start.
if (isVarArg) {
@@ -1633,18 +1693,18 @@
// Make room for Num_GPR_Regs, Num_FPR_Regs and for a possible frame
// pointer.
- depth = -(Num_GPR_Regs * MVT::getSizeInBits(PtrVT)/8 +
- Num_FPR_Regs * MVT::getSizeInBits(MVT::f64)/8 +
- MVT::getSizeInBits(PtrVT)/8);
+ depth = -(Num_GPR_Regs * PtrVT.getSizeInBits()/8 +
+ Num_FPR_Regs * MVT(MVT::f64).getSizeInBits()/8 +
+ PtrVT.getSizeInBits()/8);
- VarArgsStackOffset = MFI->CreateFixedObject(MVT::getSizeInBits(PtrVT)/8,
+ VarArgsStackOffset = MFI->CreateFixedObject(PtrVT.getSizeInBits()/8,
ArgOffset);
}
else
depth = ArgOffset;
- VarArgsFrameIndex = MFI->CreateFixedObject(MVT::getSizeInBits(PtrVT)/8,
+ VarArgsFrameIndex = MFI->CreateFixedObject(PtrVT.getSizeInBits()/8,
depth);
SDOperand FIN = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
@@ -1656,7 +1716,7 @@
SDOperand Store = DAG.getStore(Root, Val, FIN, NULL, 0);
MemOps.push_back(Store);
// Increment the address by four for the next argument to store
- SDOperand PtrOff = DAG.getConstant(MVT::getSizeInBits(PtrVT)/8, PtrVT);
+ SDOperand PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT);
FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff);
}
}
@@ -1676,7 +1736,7 @@
SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0);
MemOps.push_back(Store);
// Increment the address by four for the next argument to store
- SDOperand PtrOff = DAG.getConstant(MVT::getSizeInBits(PtrVT)/8, PtrVT);
+ SDOperand PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT);
FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff);
}
@@ -1688,7 +1748,7 @@
SDOperand Store = DAG.getStore(Root, Val, FIN, NULL, 0);
MemOps.push_back(Store);
// Increment the address by eight for the next argument to store
- SDOperand PtrOff = DAG.getConstant(MVT::getSizeInBits(MVT::f64)/8,
+ SDOperand PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8,
PtrVT);
FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff);
}
@@ -1702,7 +1762,7 @@
SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0);
MemOps.push_back(Store);
// Increment the address by eight for the next argument to store
- SDOperand PtrOff = DAG.getConstant(MVT::getSizeInBits(MVT::f64)/8,
+ SDOperand PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8,
PtrVT);
FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff);
}
@@ -1715,9 +1775,133 @@
ArgValues.push_back(Root);
// Return the new list of results.
- std::vector<MVT::ValueType> RetVT(Op.Val->value_begin(),
- Op.Val->value_end());
- return DAG.getNode(ISD::MERGE_VALUES, RetVT, &ArgValues[0], ArgValues.size());
+ return DAG.getMergeValues(Op.Val->getVTList(), &ArgValues[0],
+ ArgValues.size());
+}
+
+/// CalculateParameterAndLinkageAreaSize - Get the size of the paramter plus
+/// linkage area.
+static unsigned
+CalculateParameterAndLinkageAreaSize(SelectionDAG &DAG,
+ bool isPPC64,
+ bool isMachoABI,
+ bool isVarArg,
+ unsigned CC,
+ SDOperand Call,
+ unsigned &nAltivecParamsAtEnd) {
+ // Count how many bytes are to be pushed on the stack, including the linkage
+ // area, and parameter passing area. We start with 24/48 bytes, which is
+ // prereserved space for [SP][CR][LR][3 x unused].
+ unsigned NumBytes = PPCFrameInfo::getLinkageSize(isPPC64, isMachoABI);
+ unsigned NumOps = (Call.getNumOperands() - 5) / 2;
+ unsigned PtrByteSize = isPPC64 ? 8 : 4;
+
+ // Add up all the space actually used.
+ // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually
+ // they all go in registers, but we must reserve stack space for them for
+ // possible use by the caller. In varargs or 64-bit calls, parameters are
+ // assigned stack space in order, with padding so Altivec parameters are
+ // 16-byte aligned.
+ nAltivecParamsAtEnd = 0;
+ for (unsigned i = 0; i != NumOps; ++i) {
+ SDOperand Arg = Call.getOperand(5+2*i);
+ SDOperand Flag = Call.getOperand(5+2*i+1);
+ MVT ArgVT = Arg.getValueType();
+ // Varargs Altivec parameters are padded to a 16 byte boundary.
+ if (ArgVT==MVT::v4f32 || ArgVT==MVT::v4i32 ||
+ ArgVT==MVT::v8i16 || ArgVT==MVT::v16i8) {
+ if (!isVarArg && !isPPC64) {
+ // Non-varargs Altivec parameters go after all the non-Altivec
+ // parameters; handle those later so we know how much padding we need.
+ nAltivecParamsAtEnd++;
+ continue;
+ }
+ // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary.
+ NumBytes = ((NumBytes+15)/16)*16;
+ }
+ NumBytes += CalculateStackSlotSize(Arg, Flag, isVarArg, PtrByteSize);
+ }
+
+ // Allow for Altivec parameters at the end, if needed.
+ if (nAltivecParamsAtEnd) {
+ NumBytes = ((NumBytes+15)/16)*16;
+ NumBytes += 16*nAltivecParamsAtEnd;
+ }
+
+ // The prolog code of the callee may store up to 8 GPR argument registers to
+ // the stack, allowing va_start to index over them in memory if its varargs.
+ // Because we cannot tell if this is needed on the caller side, we have to
+ // conservatively assume that it is needed. As such, make sure we have at
+ // least enough stack space for the caller to store the 8 GPRs.
+ NumBytes = std::max(NumBytes,
+ PPCFrameInfo::getMinCallFrameSize(isPPC64, isMachoABI));
+
+ // Tail call needs the stack to be aligned.
+ if (CC==CallingConv::Fast && PerformTailCallOpt) {
+ unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameInfo()->
+ getStackAlignment();
+ unsigned AlignMask = TargetAlign-1;
+ NumBytes = (NumBytes + AlignMask) & ~AlignMask;
+ }
+
+ return NumBytes;
+}
+
+/// CalculateTailCallSPDiff - Get the amount the stack pointer has to be
+/// adjusted to accomodate the arguments for the tailcall.
+static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool IsTailCall,
+ unsigned ParamSize) {
+
+ if (!IsTailCall) return 0;
+
+ PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>();
+ unsigned CallerMinReservedArea = FI->getMinReservedArea();
+ int SPDiff = (int)CallerMinReservedArea - (int)ParamSize;
+ // Remember only if the new adjustement is bigger.
+ if (SPDiff < FI->getTailCallSPDelta())
+ FI->setTailCallSPDelta(SPDiff);
+
+ return SPDiff;
+}
+
+/// IsEligibleForTailCallElimination - Check to see whether the next instruction
+/// following the call is a return. A function is eligible if caller/callee
+/// calling conventions match, currently only fastcc supports tail calls, and
+/// the function CALL is immediatly followed by a RET.
+bool
+PPCTargetLowering::IsEligibleForTailCallOptimization(SDOperand Call,
+ SDOperand Ret,
+ SelectionDAG& DAG) const {
+ // Variable argument functions are not supported.
+ if (!PerformTailCallOpt ||
+ cast<ConstantSDNode>(Call.getOperand(2))->getValue() != 0) return false;
+
+ if (CheckTailCallReturnConstraints(Call, Ret)) {
+ MachineFunction &MF = DAG.getMachineFunction();
+ unsigned CallerCC = MF.getFunction()->getCallingConv();
+ unsigned CalleeCC = cast<ConstantSDNode>(Call.getOperand(1))->getValue();
+ if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
+ // Functions containing by val parameters are not supported.
+ for (unsigned i = 0; i != ((Call.getNumOperands()-5)/2); i++) {
+ ISD::ArgFlagsTy Flags = cast<ARG_FLAGSSDNode>(Call.getOperand(5+2*i+1))
+ ->getArgFlags();
+ if (Flags.isByVal()) return false;
+ }
+
+ SDOperand Callee = Call.getOperand(4);
+ // Non PIC/GOT tail calls are supported.
+ if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
+ return true;
+
+ // At the moment we can only do local tail calls (in same module, hidden
+ // or protected) if we are generating PIC.
+ if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
+ return G->getGlobal()->hasHiddenVisibility()
+ || G->getGlobal()->hasProtectedVisibility();
+ }
+ }
+
+ return false;
}
/// isCallCompatibleAddress - Return the immediate to use if the specified
@@ -1735,6 +1919,106 @@
DAG.getTargetLoweringInfo().getPointerTy()).Val;
}
+namespace {
+
+struct TailCallArgumentInfo {
+ SDOperand Arg;
+ SDOperand FrameIdxOp;
+ int FrameIdx;
+
+ TailCallArgumentInfo() : FrameIdx(0) {}
+};
+
+}
+
+/// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
+static void
+StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG,
+ SDOperand Chain,
+ const SmallVector<TailCallArgumentInfo, 8> &TailCallArgs,
+ SmallVector<SDOperand, 8> &MemOpChains) {
+ for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) {
+ SDOperand Arg = TailCallArgs[i].Arg;
+ SDOperand FIN = TailCallArgs[i].FrameIdxOp;
+ int FI = TailCallArgs[i].FrameIdx;
+ // Store relative to framepointer.
+ MemOpChains.push_back(DAG.getStore(Chain, Arg, FIN,
+ PseudoSourceValue::getFixedStack(),
+ FI));
+ }
+}
+
+/// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to
+/// the appropriate stack slot for the tail call optimized function call.
+static SDOperand EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG,
+ MachineFunction &MF,
+ SDOperand Chain,
+ SDOperand OldRetAddr,
+ SDOperand OldFP,
+ int SPDiff,
+ bool isPPC64,
+ bool isMachoABI) {
+ if (SPDiff) {
+ // Calculate the new stack slot for the return address.
+ int SlotSize = isPPC64 ? 8 : 4;
+ int NewRetAddrLoc = SPDiff + PPCFrameInfo::getReturnSaveOffset(isPPC64,
+ isMachoABI);
+ int NewRetAddr = MF.getFrameInfo()->CreateFixedObject(SlotSize,
+ NewRetAddrLoc);
+ int NewFPLoc = SPDiff + PPCFrameInfo::getFramePointerSaveOffset(isPPC64,
+ isMachoABI);
+ int NewFPIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, NewFPLoc);
+
+ MVT VT = isPPC64 ? MVT::i64 : MVT::i32;
+ SDOperand NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT);
+ Chain = DAG.getStore(Chain, OldRetAddr, NewRetAddrFrIdx,
+ PseudoSourceValue::getFixedStack(), NewRetAddr);
+ SDOperand NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT);
+ Chain = DAG.getStore(Chain, OldFP, NewFramePtrIdx,
+ PseudoSourceValue::getFixedStack(), NewFPIdx);
+ }
+ return Chain;
+}
+
+/// CalculateTailCallArgDest - Remember Argument for later processing. Calculate
+/// the position of the argument.
+static void
+CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64,
+ SDOperand Arg, int SPDiff, unsigned ArgOffset,
+ SmallVector<TailCallArgumentInfo, 8>& TailCallArguments) {
+ int Offset = ArgOffset + SPDiff;
+ uint32_t OpSize = (Arg.getValueType().getSizeInBits()+7)/8;
+ int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset);
+ MVT VT = isPPC64 ? MVT::i64 : MVT::i32;
+ SDOperand FIN = DAG.getFrameIndex(FI, VT);
+ TailCallArgumentInfo Info;
+ Info.Arg = Arg;
+ Info.FrameIdxOp = FIN;
+ Info.FrameIdx = FI;
+ TailCallArguments.push_back(Info);
+}
+
+/// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address
+/// stack slot. Returns the chain as result and the loaded frame pointers in
+/// LROpOut/FPOpout. Used when tail calling.
+SDOperand PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG,
+ int SPDiff,
+ SDOperand Chain,
+ SDOperand &LROpOut,
+ SDOperand &FPOpOut) {
+ if (SPDiff) {
+ // Load the LR and FP stack slot for later adjusting.
+ MVT VT = PPCSubTarget.isPPC64() ? MVT::i64 : MVT::i32;
+ LROpOut = getReturnAddrFrameIndex(DAG);
+ LROpOut = DAG.getLoad(VT, Chain, LROpOut, NULL, 0);
+ Chain = SDOperand(LROpOut.Val, 1);
+ FPOpOut = getFramePointerFrameIndex(DAG);
+ FPOpOut = DAG.getLoad(VT, Chain, FPOpOut, NULL, 0);
+ Chain = SDOperand(FPOpOut.Val, 1);
+ }
+ return Chain;
+}
+
/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
/// by "Src" to address "Dst" of size "Size". Alignment information is
/// specified by the specific parameter attribute. The copy will be passed as
@@ -1750,73 +2034,75 @@
NULL, 0, NULL, 0);
}
+/// LowerMemOpCallTo - Store the argument to the stack or remember it in case of
+/// tail calls.
+static void
+LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDOperand Chain,
+ SDOperand Arg, SDOperand PtrOff, int SPDiff,
+ unsigned ArgOffset, bool isPPC64, bool isTailCall,
+ bool isVector, SmallVector<SDOperand, 8> &MemOpChains,
+ SmallVector<TailCallArgumentInfo, 8>& TailCallArguments) {
+ MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+ if (!isTailCall) {
+ if (isVector) {
+ SDOperand StackPtr;
+ if (isPPC64)
+ StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
+ else
+ StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
+ PtrOff = DAG.getNode(ISD::ADD, PtrVT, StackPtr,
+ DAG.getConstant(ArgOffset, PtrVT));
+ }
+ MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
+ // Calculate and remember argument location.
+ } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset,
+ TailCallArguments);
+}
+
SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG,
const PPCSubtarget &Subtarget,
TargetMachine &TM) {
SDOperand Chain = Op.getOperand(0);
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0;
+ unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
+ bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0 &&
+ CC == CallingConv::Fast && PerformTailCallOpt;
SDOperand Callee = Op.getOperand(4);
unsigned NumOps = (Op.getNumOperands() - 5) / 2;
bool isMachoABI = Subtarget.isMachoABI();
bool isELF32_ABI = Subtarget.isELF32_ABI();
- MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+ MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
bool isPPC64 = PtrVT == MVT::i64;
unsigned PtrByteSize = isPPC64 ? 8 : 4;
+ MachineFunction &MF = DAG.getMachineFunction();
+
// args_to_use will accumulate outgoing args for the PPCISD::CALL case in
// SelectExpr to use to put the arguments in the appropriate registers.
std::vector<SDOperand> args_to_use;
- // Count how many bytes are to be pushed on the stack, including the linkage
- // area, and parameter passing area. We start with 24/48 bytes, which is
- // prereserved space for [SP][CR][LR][3 x unused].
- unsigned NumBytes = PPCFrameInfo::getLinkageSize(isPPC64, isMachoABI);
+ // Mark this function as potentially containing a function that contains a
+ // tail call. As a consequence the frame pointer will be used for dynamicalloc
+ // and restoring the callers stack pointer in this functions epilog. This is
+ // done because by tail calling the called function might overwrite the value
+ // in this function's (MF) stack pointer stack slot 0(SP).
+ if (PerformTailCallOpt && CC==CallingConv::Fast)
+ MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
- // Add up all the space actually used.
- // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually
- // they all go in registers, but we must reserve stack space for them for
- // possible use by the caller. In varargs or 64-bit calls, parameters are
- // assigned stack space in order, with padding so Altivec parameters are
- // 16-byte aligned.
unsigned nAltivecParamsAtEnd = 0;
- for (unsigned i = 0; i != NumOps; ++i) {
- SDOperand Arg = Op.getOperand(5+2*i);
- MVT::ValueType ArgVT = Arg.getValueType();
- if (ArgVT==MVT::v4f32 || ArgVT==MVT::v4i32 ||
- ArgVT==MVT::v8i16 || ArgVT==MVT::v16i8) {
- if (!isVarArg && !isPPC64) {
- // Non-varargs Altivec parameters go after all the non-Altivec parameters;
- // do those last so we know how much padding we need.
- nAltivecParamsAtEnd++;
- continue;
- } else {
- // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary.
- NumBytes = ((NumBytes+15)/16)*16;
- }
- }
- ISD::ArgFlagsTy Flags =
- cast<ARG_FLAGSSDNode>(Op.getOperand(5+2*i+1))->getArgFlags();
- unsigned ArgSize =MVT::getSizeInBits(Op.getOperand(5+2*i).getValueType())/8;
- if (Flags.isByVal())
- ArgSize = Flags.getByValSize();
- ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
- NumBytes += ArgSize;
- }
- // Allow for Altivec parameters at the end, if needed.
- if (nAltivecParamsAtEnd) {
- NumBytes = ((NumBytes+15)/16)*16;
- NumBytes += 16*nAltivecParamsAtEnd;
- }
- // The prolog code of the callee may store up to 8 GPR argument registers to
- // the stack, allowing va_start to index over them in memory if its varargs.
- // Because we cannot tell if this is needed on the caller side, we have to
- // conservatively assume that it is needed. As such, make sure we have at
- // least enough stack space for the caller to store the 8 GPRs.
- NumBytes = std::max(NumBytes,
- PPCFrameInfo::getMinCallFrameSize(isPPC64, isMachoABI));
+ // Count how many bytes are to be pushed on the stack, including the linkage
+ // area, and parameter passing area. We start with 24/48 bytes, which is
+ // prereserved space for [SP][CR][LR][3 x unused].
+ unsigned NumBytes =
+ CalculateParameterAndLinkageAreaSize(DAG, isPPC64, isMachoABI, isVarArg, CC,
+ Op, nAltivecParamsAtEnd);
+
+ // Calculate by how many bytes the stack has to be adjusted in case of tail
+ // call optimization.
+ int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
// Adjust the stack pointer for the new arguments...
// These operations are automatically eliminated by the prolog/epilog pass
@@ -1824,6 +2110,11 @@
DAG.getConstant(NumBytes, PtrVT));
SDOperand CallSeqStart = Chain;
+ // Load the return address and frame pointer so it can be move somewhere else
+ // later.
+ SDOperand LROp, FPOp;
+ Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp);
+
// Set up a copy of the stack pointer for use loading and storing any
// arguments that may not fit in the registers available for argument
// passing.
@@ -1861,6 +2152,8 @@
const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32;
std::vector<std::pair<unsigned, SDOperand> > RegsToPass;
+ SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
+
SmallVector<SDOperand, 8> MemOpChains;
for (unsigned i = 0; i != NumOps; ++i) {
bool inMem = false;
@@ -1898,7 +2191,7 @@
if (Size==1 || Size==2) {
// Very small objects are passed right-justified.
// Everything else is passed left-justified.
- MVT::ValueType VT = (Size==1) ? MVT::i8 : MVT::i16;
+ MVT VT = (Size==1) ? MVT::i8 : MVT::i16;
if (GPR_idx != NumGPRs) {
SDOperand Load = DAG.getExtLoad(ISD::EXTLOAD, PtrVT, Chain, Arg,
NULL, 0, VT);
@@ -1950,7 +2243,7 @@
continue;
}
- switch (Arg.getValueType()) {
+ switch (Arg.getValueType().getSimpleVT()) {
default: assert(0 && "Unexpected ValueType for argument!");
case MVT::i32:
case MVT::i64:
@@ -1959,7 +2252,9 @@
if (GPR_idx != NumGPRs) {
RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
} else {
- MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
+ LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
+ isPPC64, isTailCall, false, MemOpChains,
+ TailCallArguments);
inMem = true;
}
if (inMem || isMachoABI) {
@@ -2007,7 +2302,9 @@
}
}
} else {
- MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
+ LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
+ isPPC64, isTailCall, false, MemOpChains,
+ TailCallArguments);
inMem = true;
}
if (inMem || isMachoABI) {
@@ -2058,6 +2355,7 @@
}
break;
}
+
// Non-varargs Altivec params generally go in registers, but have
// stack space allocated at the end.
if (VR_idx != NumVRs) {
@@ -2065,10 +2363,9 @@
RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
} else if (nAltivecParamsAtEnd==0) {
// We are emitting Altivec params in order.
- PtrOff = DAG.getNode(ISD::ADD, PtrVT, StackPtr,
- DAG.getConstant(ArgOffset, PtrVT));
- SDOperand Store = DAG.getStore(Chain, Arg, PtrOff, NULL, 0);
- MemOpChains.push_back(Store);
+ LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
+ isPPC64, isTailCall, true, MemOpChains,
+ TailCallArguments);
ArgOffset += 16;
}
break;
@@ -2086,14 +2383,15 @@
ArgOffset += 12*16;
for (unsigned i = 0; i != NumOps; ++i) {
SDOperand Arg = Op.getOperand(5+2*i);
- MVT::ValueType ArgType = Arg.getValueType();
+ MVT ArgType = Arg.getValueType();
if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 ||
ArgType==MVT::v8i16 || ArgType==MVT::v16i8) {
if (++j > NumVRs) {
- SDOperand PtrOff = DAG.getNode(ISD::ADD, PtrVT, StackPtr,
- DAG.getConstant(ArgOffset, PtrVT));
- SDOperand Store = DAG.getStore(Chain, Arg, PtrOff, NULL, 0);
- MemOpChains.push_back(Store);
+ SDOperand PtrOff;
+ // We are emitting Altivec params in order.
+ LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
+ isPPC64, isTailCall, true, MemOpChains,
+ TailCallArguments);
ArgOffset += 16;
}
}
@@ -2120,7 +2418,38 @@
InFlag = Chain.getValue(1);
}
- std::vector<MVT::ValueType> NodeTys;
+ // Emit a sequence of copyto/copyfrom virtual registers for arguments that
+ // might overwrite each other in case of tail call optimization.
+ if (isTailCall) {
+ SmallVector<SDOperand, 8> MemOpChains2;
+ // Do not flag preceeding copytoreg stuff together with the following stuff.
+ InFlag = SDOperand();
+ StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments,
+ MemOpChains2);
+ if (!MemOpChains2.empty())
+ Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
+ &MemOpChains2[0], MemOpChains2.size());
+
+ // Store the return address to the appropriate stack slot.
+ Chain = EmitTailCallStoreFPAndRetAddr(DAG, MF, Chain, LROp, FPOp, SPDiff,
+ isPPC64, isMachoABI);
+ }
+
+ // Emit callseq_end just before tailcall node.
+ if (isTailCall) {
+ SmallVector<SDOperand, 8> CallSeqOps;
+ SDVTList CallSeqNodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
+ CallSeqOps.push_back(Chain);
+ CallSeqOps.push_back(DAG.getIntPtrConstant(NumBytes));
+ CallSeqOps.push_back(DAG.getIntPtrConstant(0));
+ if (InFlag.Val)
+ CallSeqOps.push_back(InFlag);
+ Chain = DAG.getNode(ISD::CALLSEQ_END, CallSeqNodeTys, &CallSeqOps[0],
+ CallSeqOps.size());
+ InFlag = Chain.getValue(1);
+ }
+
+ std::vector<MVT> NodeTys;
NodeTys.push_back(MVT::Other); // Returns a chain
NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
@@ -2157,6 +2486,9 @@
Ops.push_back(Chain);
CallOpc = isMachoABI ? PPCISD::BCTRL_Macho : PPCISD::BCTRL_ELF;
Callee.Val = 0;
+ // Add CTR register as callee so a bctr can be emitted later.
+ if (isTailCall)
+ Ops.push_back(DAG.getRegister(PPC::CTR, getPointerTy()));
}
// If this is a direct call, pass the chain and the callee.
@@ -2164,35 +2496,54 @@
Ops.push_back(Chain);
Ops.push_back(Callee);
}
-
+ // If this is a tail call add stack pointer delta.
+ if (isTailCall)
+ Ops.push_back(DAG.getConstant(SPDiff, MVT::i32));
+
// Add argument registers to the end of the list so that they are known live
// into the call.
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
Ops.push_back(DAG.getRegister(RegsToPass[i].first,
RegsToPass[i].second.getValueType()));
-
+
+ // When performing tail call optimization the callee pops its arguments off
+ // the stack. Account for this here so these bytes can be pushed back on in
+ // PPCRegisterInfo::eliminateCallFramePseudoInstr.
+ int BytesCalleePops =
+ (CC==CallingConv::Fast && PerformTailCallOpt) ? NumBytes : 0;
+
if (InFlag.Val)
Ops.push_back(InFlag);
+
+ // Emit tail call.
+ if (isTailCall) {
+ assert(InFlag.Val &&
+ "Flag must be set. Depend on flag being set in LowerRET");
+ Chain = DAG.getNode(PPCISD::TAILCALL,
+ Op.Val->getVTList(), &Ops[0], Ops.size());
+ return SDOperand(Chain.Val, Op.ResNo);
+ }
+
Chain = DAG.getNode(CallOpc, NodeTys, &Ops[0], Ops.size());
InFlag = Chain.getValue(1);
Chain = DAG.getCALLSEQ_END(Chain,
DAG.getConstant(NumBytes, PtrVT),
- DAG.getConstant(0, PtrVT),
+ DAG.getConstant(BytesCalleePops, PtrVT),
InFlag);
if (Op.Val->getValueType(0) != MVT::Other)
InFlag = Chain.getValue(1);
SmallVector<SDOperand, 16> ResultVals;
SmallVector<CCValAssign, 16> RVLocs;
- unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv();
- CCState CCInfo(CC, isVarArg, TM, RVLocs);
+ unsigned CallerCC = DAG.getMachineFunction().getFunction()->getCallingConv();
+ CCState CCInfo(CallerCC, isVarArg, TM, RVLocs);
CCInfo.AnalyzeCallResult(Op.Val, RetCC_PPC);
// Copy all of the result registers out of their specified physreg.
for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
CCValAssign &VA = RVLocs[i];
- MVT::ValueType VT = VA.getValVT();
+ MVT VT = VA.getValVT();
assert(VA.isRegLoc() && "Can only return in registers!");
Chain = DAG.getCopyFromReg(Chain, VA.getLocReg(), VT, InFlag).getValue(1);
ResultVals.push_back(Chain.getValue(0));
@@ -2205,8 +2556,8 @@
// Otherwise, merge everything together with a MERGE_VALUES node.
ResultVals.push_back(Chain);
- SDOperand Res = DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(),
- &ResultVals[0], ResultVals.size());
+ SDOperand Res = DAG.getMergeValues(Op.Val->getVTList(), &ResultVals[0],
+ ResultVals.size());
return Res.getValue(Op.ResNo);
}
@@ -2226,6 +2577,36 @@
}
SDOperand Chain = Op.getOperand(0);
+
+ Chain = GetPossiblePreceedingTailCall(Chain, PPCISD::TAILCALL);
+ if (Chain.getOpcode() == PPCISD::TAILCALL) {
+ SDOperand TailCall = Chain;
+ SDOperand TargetAddress = TailCall.getOperand(1);
+ SDOperand StackAdjustment = TailCall.getOperand(2);
+
+ assert(((TargetAddress.getOpcode() == ISD::Register &&
+ cast<RegisterSDNode>(TargetAddress)->getReg() == PPC::CTR) ||
+ TargetAddress.getOpcode() == ISD::TargetExternalSymbol ||
+ TargetAddress.getOpcode() == ISD::TargetGlobalAddress ||
+ isa<ConstantSDNode>(TargetAddress)) &&
+ "Expecting an global address, external symbol, absolute value or register");
+
+ assert(StackAdjustment.getOpcode() == ISD::Constant &&
+ "Expecting a const value");
+
+ SmallVector<SDOperand,8> Operands;
+ Operands.push_back(Chain.getOperand(0));
+ Operands.push_back(TargetAddress);
+ Operands.push_back(StackAdjustment);
+ // Copy registers used by the call. Last operand is a flag so it is not
+ // copied.
+ for (unsigned i=3; i < TailCall.getNumOperands()-1; i++) {
+ Operands.push_back(Chain.getOperand(i));
+ }
+ return DAG.getNode(PPCISD::TC_RETURN, MVT::Other, &Operands[0],
+ Operands.size());
+ }
+
SDOperand Flag;
// Copy the result values into the output registers.
@@ -2247,7 +2628,7 @@
// When we pop the dynamic allocation we need to restore the SP link.
// Get the corect type for pointers.
- MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+ MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
// Construct the stack pointer operand.
bool IsPPC64 = Subtarget.isPPC64();
@@ -2268,18 +2649,44 @@
return DAG.getStore(Chain, LoadLinkSP, StackPtr, NULL, 0);
}
-SDOperand PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op,
- SelectionDAG &DAG,
- const PPCSubtarget &Subtarget) {
+
+
+SDOperand
+PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG & DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
- bool IsPPC64 = Subtarget.isPPC64();
- bool isMachoABI = Subtarget.isMachoABI();
+ bool IsPPC64 = PPCSubTarget.isPPC64();
+ bool isMachoABI = PPCSubTarget.isMachoABI();
+ MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+
+ // Get current frame pointer save index. The users of this index will be
+ // primarily DYNALLOC instructions.
+ PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
+ int RASI = FI->getReturnAddrSaveIndex();
+
+ // If the frame pointer save index hasn't been defined yet.
+ if (!RASI) {
+ // Find out what the fix offset of the frame pointer save area.
+ int LROffset = PPCFrameInfo::getReturnSaveOffset(IsPPC64, isMachoABI);
+ // Allocate the frame index for frame pointer save area.
+ RASI = MF.getFrameInfo()->CreateFixedObject(IsPPC64? 8 : 4, LROffset);
+ // Save the result.
+ FI->setReturnAddrSaveIndex(RASI);
+ }
+ return DAG.getFrameIndex(RASI, PtrVT);
+}
+
+SDOperand
+PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ bool IsPPC64 = PPCSubTarget.isPPC64();
+ bool isMachoABI = PPCSubTarget.isMachoABI();
+ MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
// Get current frame pointer save index. The users of this index will be
// primarily DYNALLOC instructions.
PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
int FPSI = FI->getFramePointerSaveIndex();
-
+
// If the frame pointer save index hasn't been defined yet.
if (!FPSI) {
// Find out what the fix offset of the frame pointer save area.
@@ -2290,32 +2697,37 @@
// Save the result.
FI->setFramePointerSaveIndex(FPSI);
}
+ return DAG.getFrameIndex(FPSI, PtrVT);
+}
+SDOperand PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op,
+ SelectionDAG &DAG,
+ const PPCSubtarget &Subtarget) {
// Get the inputs.
SDOperand Chain = Op.getOperand(0);
SDOperand Size = Op.getOperand(1);
// Get the corect type for pointers.
- MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+ MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
// Negate the size.
SDOperand NegSize = DAG.getNode(ISD::SUB, PtrVT,
DAG.getConstant(0, PtrVT), Size);
// Construct a node for the frame pointer save index.
- SDOperand FPSIdx = DAG.getFrameIndex(FPSI, PtrVT);
+ SDOperand FPSIdx = getFramePointerFrameIndex(DAG);
// Build a DYNALLOC node.
SDOperand Ops[3] = { Chain, NegSize, FPSIdx };
SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other);
return DAG.getNode(PPCISD::DYNALLOC, VTs, Ops, 3);
}
-SDOperand PPCTargetLowering::LowerAtomicLAS(SDOperand Op, SelectionDAG &DAG) {
- MVT::ValueType VT = Op.Val->getValueType(0);
+SDOperand PPCTargetLowering::LowerAtomicLOAD_ADD(SDOperand Op, SelectionDAG &DAG) {
+ MVT VT = Op.Val->getValueType(0);
SDOperand Chain = Op.getOperand(0);
SDOperand Ptr = Op.getOperand(1);
SDOperand Incr = Op.getOperand(2);
// Issue a "load and reserve".
- std::vector<MVT::ValueType> VTs;
+ std::vector<MVT> VTs;
VTs.push_back(VT);
VTs.push_back(MVT::Other);
@@ -2340,19 +2752,18 @@
};
SDOperand Store = DAG.getNode(PPCISD::STCX, MVT::Other, Ops2, 4);
SDOperand OutOps[] = { Load, Store };
- return DAG.getNode(ISD::MERGE_VALUES, DAG.getVTList(VT, MVT::Other),
- OutOps, 2);
+ return DAG.getMergeValues(OutOps, 2);
}
-SDOperand PPCTargetLowering::LowerAtomicLCS(SDOperand Op, SelectionDAG &DAG) {
- MVT::ValueType VT = Op.Val->getValueType(0);
+SDOperand PPCTargetLowering::LowerAtomicCMP_SWAP(SDOperand Op, SelectionDAG &DAG) {
+ MVT VT = Op.Val->getValueType(0);
SDOperand Chain = Op.getOperand(0);
SDOperand Ptr = Op.getOperand(1);
SDOperand NewVal = Op.getOperand(2);
SDOperand OldVal = Op.getOperand(3);
// Issue a "load and reserve".
- std::vector<MVT::ValueType> VTs;
+ std::vector<MVT> VTs;
VTs.push_back(VT);
VTs.push_back(MVT::Other);
@@ -2383,18 +2794,17 @@
};
SDOperand Store = DAG.getNode(PPCISD::STCX, MVT::Other, Ops3, 4);
SDOperand OutOps[] = { Load, Store };
- return DAG.getNode(ISD::MERGE_VALUES, DAG.getVTList(VT, MVT::Other),
- OutOps, 2);
+ return DAG.getMergeValues(OutOps, 2);
}
SDOperand PPCTargetLowering::LowerAtomicSWAP(SDOperand Op, SelectionDAG &DAG) {
- MVT::ValueType VT = Op.Val->getValueType(0);
+ MVT VT = Op.Val->getValueType(0);
SDOperand Chain = Op.getOperand(0);
SDOperand Ptr = Op.getOperand(1);
SDOperand NewVal = Op.getOperand(2);
// Issue a "load and reserve".
- std::vector<MVT::ValueType> VTs;
+ std::vector<MVT> VTs;
VTs.push_back(VT);
VTs.push_back(MVT::Other);
@@ -2416,16 +2826,15 @@
};
SDOperand Store = DAG.getNode(PPCISD::STCX, MVT::Other, Ops2, 4);
SDOperand OutOps[] = { Load, Store };
- return DAG.getNode(ISD::MERGE_VALUES, DAG.getVTList(VT, MVT::Other),
- OutOps, 2);
+ return DAG.getMergeValues(OutOps, 2);
}
/// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
/// possible.
SDOperand PPCTargetLowering::LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG) {
// Not FP? Not a fsel.
- if (!MVT::isFloatingPoint(Op.getOperand(0).getValueType()) ||
- !MVT::isFloatingPoint(Op.getOperand(2).getValueType()))
+ if (!Op.getOperand(0).getValueType().isFloatingPoint() ||
+ !Op.getOperand(2).getValueType().isFloatingPoint())
return SDOperand();
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
@@ -2433,8 +2842,8 @@
// Cannot handle SETEQ/SETNE.
if (CC == ISD::SETEQ || CC == ISD::SETNE) return SDOperand();
- MVT::ValueType ResVT = Op.getValueType();
- MVT::ValueType CmpVT = Op.getOperand(0).getValueType();
+ MVT ResVT = Op.getValueType();
+ MVT CmpVT = Op.getOperand(0).getValueType();
SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1);
SDOperand TV = Op.getOperand(2), FV = Op.getOperand(3);
@@ -2503,13 +2912,13 @@
// FIXME: Split this code up when LegalizeDAGTypes lands.
SDOperand PPCTargetLowering::LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) {
- assert(MVT::isFloatingPoint(Op.getOperand(0).getValueType()));
+ assert(Op.getOperand(0).getValueType().isFloatingPoint());
SDOperand Src = Op.getOperand(0);
if (Src.getValueType() == MVT::f32)
Src = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Src);
SDOperand Tmp;
- switch (Op.getValueType()) {
+ switch (Op.getValueType().getSimpleVT()) {
default: assert(0 && "Unhandled FP_TO_SINT type in custom expander!");
case MVT::i32:
Tmp = DAG.getNode(PPCISD::FCTIWZ, MVT::f64, Src);
@@ -2545,7 +2954,7 @@
// This sequence changes FPSCR to do round-to-zero, adds the two halves
// of the long double, and puts FPSCR back the way it was. We do not
// actually model FPSCR.
- std::vector<MVT::ValueType> NodeTys;
+ std::vector<MVT> NodeTys;
SDOperand Ops[4], Result, MFFSreg, InFlag, FPreg;
NodeTys.push_back(MVT::f64); // Return register
@@ -2613,7 +3022,7 @@
// then lfd it and fcfid it.
MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
int FrameIdx = FrameInfo->CreateStackObject(8, 8);
- MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+ MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
SDOperand FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
SDOperand Ext64 = DAG.getNode(PPCISD::EXTSW_32, MVT::i32,
@@ -2656,9 +3065,9 @@
*/
MachineFunction &MF = DAG.getMachineFunction();
- MVT::ValueType VT = Op.getValueType();
- MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
- std::vector<MVT::ValueType> NodeTys;
+ MVT VT = Op.getValueType();
+ MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+ std::vector<MVT> NodeTys;
SDOperand MFFSreg, InFlag;
// Save FP Control Word to register
@@ -2692,13 +3101,13 @@
SDOperand RetVal =
DAG.getNode(ISD::XOR, MVT::i32, CWD1, CWD2);
- return DAG.getNode((MVT::getSizeInBits(VT) < 16 ?
+ return DAG.getNode((VT.getSizeInBits() < 16 ?
ISD::TRUNCATE : ISD::ZERO_EXTEND), VT, RetVal);
}
SDOperand PPCTargetLowering::LowerSHL_PARTS(SDOperand Op, SelectionDAG &DAG) {
- MVT::ValueType VT = Op.getValueType();
- unsigned BitWidth = MVT::getSizeInBits(VT);
+ MVT VT = Op.getValueType();
+ unsigned BitWidth = VT.getSizeInBits();
assert(Op.getNumOperands() == 3 &&
VT == Op.getOperand(1).getValueType() &&
"Unexpected SHL!");
@@ -2708,7 +3117,7 @@
SDOperand Lo = Op.getOperand(0);
SDOperand Hi = Op.getOperand(1);
SDOperand Amt = Op.getOperand(2);
- MVT::ValueType AmtVT = Amt.getValueType();
+ MVT AmtVT = Amt.getValueType();
SDOperand Tmp1 = DAG.getNode(ISD::SUB, AmtVT,
DAG.getConstant(BitWidth, AmtVT), Amt);
@@ -2721,13 +3130,12 @@
SDOperand OutHi = DAG.getNode(ISD::OR, VT, Tmp4, Tmp6);
SDOperand OutLo = DAG.getNode(PPCISD::SHL, VT, Lo, Amt);
SDOperand OutOps[] = { OutLo, OutHi };
- return DAG.getNode(ISD::MERGE_VALUES, DAG.getVTList(VT, VT),
- OutOps, 2);
+ return DAG.getMergeValues(OutOps, 2);
}
SDOperand PPCTargetLowering::LowerSRL_PARTS(SDOperand Op, SelectionDAG &DAG) {
- MVT::ValueType VT = Op.getValueType();
- unsigned BitWidth = MVT::getSizeInBits(VT);
+ MVT VT = Op.getValueType();
+ unsigned BitWidth = VT.getSizeInBits();
assert(Op.getNumOperands() == 3 &&
VT == Op.getOperand(1).getValueType() &&
"Unexpected SRL!");
@@ -2737,7 +3145,7 @@
SDOperand Lo = Op.getOperand(0);
SDOperand Hi = Op.getOperand(1);
SDOperand Amt = Op.getOperand(2);
- MVT::ValueType AmtVT = Amt.getValueType();
+ MVT AmtVT = Amt.getValueType();
SDOperand Tmp1 = DAG.getNode(ISD::SUB, AmtVT,
DAG.getConstant(BitWidth, AmtVT), Amt);
@@ -2750,13 +3158,12 @@
SDOperand OutLo = DAG.getNode(ISD::OR, VT, Tmp4, Tmp6);
SDOperand OutHi = DAG.getNode(PPCISD::SRL, VT, Hi, Amt);
SDOperand OutOps[] = { OutLo, OutHi };
- return DAG.getNode(ISD::MERGE_VALUES, DAG.getVTList(VT, VT),
- OutOps, 2);
+ return DAG.getMergeValues(OutOps, 2);
}
SDOperand PPCTargetLowering::LowerSRA_PARTS(SDOperand Op, SelectionDAG &DAG) {
- MVT::ValueType VT = Op.getValueType();
- unsigned BitWidth = MVT::getSizeInBits(VT);
+ MVT VT = Op.getValueType();
+ unsigned BitWidth = VT.getSizeInBits();
assert(Op.getNumOperands() == 3 &&
VT == Op.getOperand(1).getValueType() &&
"Unexpected SRA!");
@@ -2765,7 +3172,7 @@
SDOperand Lo = Op.getOperand(0);
SDOperand Hi = Op.getOperand(1);
SDOperand Amt = Op.getOperand(2);
- MVT::ValueType AmtVT = Amt.getValueType();
+ MVT AmtVT = Amt.getValueType();
SDOperand Tmp1 = DAG.getNode(ISD::SUB, AmtVT,
DAG.getConstant(BitWidth, AmtVT), Amt);
@@ -2779,8 +3186,7 @@
SDOperand OutLo = DAG.getSelectCC(Tmp5, DAG.getConstant(0, AmtVT),
Tmp4, Tmp6, ISD::SETLE);
SDOperand OutOps[] = { OutLo, OutHi };
- return DAG.getNode(ISD::MERGE_VALUES, DAG.getVTList(VT, VT),
- OutOps, 2);
+ return DAG.getMergeValues(OutOps, 2);
}
//===----------------------------------------------------------------------===//
@@ -2797,7 +3203,7 @@
// Start with zero'd results.
VectorBits[0] = VectorBits[1] = UndefBits[0] = UndefBits[1] = 0;
- unsigned EltBitSize = MVT::getSizeInBits(BV->getOperand(0).getValueType());
+ unsigned EltBitSize = BV->getOperand(0).getValueType().getSizeInBits();
for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
SDOperand OpVal = BV->getOperand(i);
@@ -2883,26 +3289,26 @@
/// BuildSplatI - Build a canonical splati of Val with an element size of
/// SplatSize. Cast the result to VT.
-static SDOperand BuildSplatI(int Val, unsigned SplatSize, MVT::ValueType VT,
+static SDOperand BuildSplatI(int Val, unsigned SplatSize, MVT VT,
SelectionDAG &DAG) {
assert(Val >= -16 && Val <= 15 && "vsplti is out of range!");
- static const MVT::ValueType VTys[] = { // canonical VT to use for each size.
+ static const MVT VTys[] = { // canonical VT to use for each size.
MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
};
- MVT::ValueType ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1];
+ MVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1];
// Force vspltis[hw] -1 to vspltisb -1 to canonicalize.
if (Val == -1)
SplatSize = 1;
- MVT::ValueType CanonicalVT = VTys[SplatSize-1];
+ MVT CanonicalVT = VTys[SplatSize-1];
// Build a canonical splat for this value.
- SDOperand Elt = DAG.getConstant(Val, MVT::getVectorElementType(CanonicalVT));
+ SDOperand Elt = DAG.getConstant(Val, CanonicalVT.getVectorElementType());
SmallVector<SDOperand, 8> Ops;
- Ops.assign(MVT::getVectorNumElements(CanonicalVT), Elt);
+ Ops.assign(CanonicalVT.getVectorNumElements(), Elt);
SDOperand Res = DAG.getNode(ISD::BUILD_VECTOR, CanonicalVT,
&Ops[0], Ops.size());
return DAG.getNode(ISD::BIT_CONVERT, ReqVT, Res);
@@ -2912,7 +3318,7 @@
/// specified intrinsic ID.
static SDOperand BuildIntrinsicOp(unsigned IID, SDOperand LHS, SDOperand RHS,
SelectionDAG &DAG,
- MVT::ValueType DestVT = MVT::Other) {
+ MVT DestVT = MVT::Other) {
if (DestVT == MVT::Other) DestVT = LHS.getValueType();
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT,
DAG.getConstant(IID, MVT::i32), LHS, RHS);
@@ -2922,7 +3328,7 @@
/// specified intrinsic ID.
static SDOperand BuildIntrinsicOp(unsigned IID, SDOperand Op0, SDOperand Op1,
SDOperand Op2, SelectionDAG &DAG,
- MVT::ValueType DestVT = MVT::Other) {
+ MVT DestVT = MVT::Other) {
if (DestVT == MVT::Other) DestVT = Op0.getValueType();
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT,
DAG.getConstant(IID, MVT::i32), Op0, Op1, Op2);
@@ -2932,7 +3338,7 @@
/// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
/// amount. The result has the specified value type.
static SDOperand BuildVSLDOI(SDOperand LHS, SDOperand RHS, unsigned Amt,
- MVT::ValueType VT, SelectionDAG &DAG) {
+ MVT VT, SelectionDAG &DAG) {
// Force LHS/RHS to be the right type.
LHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, LHS);
RHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, RHS);
@@ -3292,8 +3698,8 @@
// The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
// that it is in input element units, not in bytes. Convert now.
- MVT::ValueType EltVT = MVT::getVectorElementType(V1.getValueType());
- unsigned BytesPerElement = MVT::getSizeInBits(EltVT)/8;
+ MVT EltVT = V1.getValueType().getVectorElementType();
+ unsigned BytesPerElement = EltVT.getSizeInBits()/8;
SmallVector<SDOperand, 16> ResultMask;
for (unsigned i = 0, e = PermMask.getNumOperands(); i != e; ++i) {
@@ -3381,7 +3787,7 @@
Op.getOperand(3), // RHS
DAG.getConstant(CompareOpc, MVT::i32)
};
- std::vector<MVT::ValueType> VTs;
+ std::vector<MVT> VTs;
VTs.push_back(Op.getOperand(2).getValueType());
VTs.push_back(MVT::Flag);
SDOperand CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops, 3);
@@ -3430,7 +3836,7 @@
// Create a stack slot that is 16-byte aligned.
MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
int FrameIdx = FrameInfo->CreateStackObject(16, 16);
- MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+ MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
SDOperand FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
// Store the input value into Value#0 of the stack slot.
@@ -3529,8 +3935,8 @@
case ISD::DYNAMIC_STACKALLOC:
return LowerDYNAMIC_STACKALLOC(Op, DAG, PPCSubTarget);
- case ISD::ATOMIC_LAS: return LowerAtomicLAS(Op, DAG);
- case ISD::ATOMIC_LCS: return LowerAtomicLCS(Op, DAG);
+ case ISD::ATOMIC_LOAD_ADD: return LowerAtomicLOAD_ADD(Op, DAG);
+ case ISD::ATOMIC_CMP_SWAP: return LowerAtomicCMP_SWAP(Op, DAG);
case ISD::ATOMIC_SWAP: return LowerAtomicSWAP(Op, DAG);
case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
@@ -3558,7 +3964,7 @@
return SDOperand();
}
-SDNode *PPCTargetLowering::ExpandOperationResult(SDNode *N, SelectionDAG &DAG) {
+SDNode *PPCTargetLowering::ReplaceNodeResults(SDNode *N, SelectionDAG &DAG) {
switch (N->getOpcode()) {
default: assert(0 && "Wasn't expecting to be able to lower this!");
case ISD::FP_TO_SINT: return LowerFP_TO_SINT(SDOperand(N, 0), DAG).Val;
@@ -3604,15 +4010,10 @@
MachineFunction *F = BB->getParent();
F->getBasicBlockList().insert(It, copy0MBB);
F->getBasicBlockList().insert(It, sinkMBB);
- // Update machine-CFG edges by first adding all successors of the current
+ // Update machine-CFG edges by transferring all successors of the current
// block to the new block which will contain the Phi node for the select.
- for(MachineBasicBlock::succ_iterator i = BB->succ_begin(),
- e = BB->succ_end(); i != e; ++i)
- sinkMBB->addSuccessor(*i);
- // Next, remove all successors of the current block, and add the true
- // and fallthrough blocks as its successors.
- while(!BB->succ_empty())
- BB->removeSuccessor(BB->succ_begin());
+ sinkMBB->transferSuccessors(BB);
+ // Next, add the true and fallthrough blocks as its successors.
BB->addSuccessor(copy0MBB);
BB->addSuccessor(sinkMBB);
@@ -3741,7 +4142,7 @@
SDOperand Load = N->getOperand(0);
LoadSDNode *LD = cast<LoadSDNode>(Load);
// Create the byte-swapping load.
- std::vector<MVT::ValueType> VTs;
+ std::vector<MVT> VTs;
VTs.push_back(MVT::i32);
VTs.push_back(MVT::Other);
SDOperand MO = DAG.getMemOperand(LD->getMemOperand());
@@ -3851,7 +4252,7 @@
bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0);
// Create the PPCISD altivec 'dot' comparison node.
- std::vector<MVT::ValueType> VTs;
+ std::vector<MVT> VTs;
SDOperand Ops[] = {
LHS.getOperand(2), // LHS of compare
LHS.getOperand(3), // RHS of compare
@@ -3954,7 +4355,7 @@
std::pair<unsigned, const TargetRegisterClass*>
PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
- MVT::ValueType VT) const {
+ MVT VT) const {
if (Constraint.size() == 1) {
// GCC RS6000 Constraint Letters
switch (Constraint[0]) {
@@ -3984,7 +4385,7 @@
/// vector. If it is invalid, don't add anything to Ops.
void PPCTargetLowering::LowerAsmOperandForConstraint(SDOperand Op, char Letter,
std::vector<SDOperand>&Ops,
- SelectionDAG &DAG) {
+ SelectionDAG &DAG) const {
SDOperand Result(0,0);
switch (Letter) {
default: break;
@@ -4099,25 +4500,13 @@
MachineFunction &MF = DAG.getMachineFunction();
PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
- int RAIdx = FuncInfo->getReturnAddrSaveIndex();
- if (RAIdx == 0) {
- bool isPPC64 = PPCSubTarget.isPPC64();
- int Offset =
- PPCFrameInfo::getReturnSaveOffset(isPPC64, PPCSubTarget.isMachoABI());
-
- // Set up a frame object for the return address.
- RAIdx = MF.getFrameInfo()->CreateFixedObject(isPPC64 ? 8 : 4, Offset);
-
- // Remember it for next time.
- FuncInfo->setReturnAddrSaveIndex(RAIdx);
-
- // Make sure the function really does not optimize away the store of the RA
- // to the stack.
- FuncInfo->setLRStoreRequired();
- }
-
+
// Just load the return address off the stack.
- SDOperand RetAddrFI = DAG.getFrameIndex(RAIdx, getPointerTy());
+ SDOperand RetAddrFI = getReturnAddrFrameIndex(DAG);
+
+ // Make sure the function really does not optimize away the store of the RA
+ // to the stack.
+ FuncInfo->setLRStoreRequired();
return DAG.getLoad(getPointerTy(), DAG.getEntryNode(), RetAddrFI, NULL, 0);
}
@@ -4126,7 +4515,7 @@
if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0)
return SDOperand();
- MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+ MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
bool isPPC64 = PtrVT == MVT::i64;
MachineFunction &MF = DAG.getMachineFunction();
Modified: llvm/branches/non-call-eh/lib/Target/PowerPC/PPCISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PowerPC/PPCISelLowering.h?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PowerPC/PPCISelLowering.h (original)
+++ llvm/branches/non-call-eh/lib/Target/PowerPC/PPCISelLowering.h Sun Jul 6 15:45:41 2008
@@ -162,7 +162,16 @@
/// CMP_UNRESERVE = Test for equality and "unreserve" if not true. This
/// is used to implement atomic operations.
- CMP_UNRESERVE
+ CMP_UNRESERVE,
+
+ /// TAILCALL - Indicates a tail call should be taken.
+ TAILCALL,
+ /// TC_RETURN - A tail call return.
+ /// operand #0 chain
+ /// operand #1 callee (register or absolute)
+ /// operand #2 stack adjustment
+ /// operand #3 optional in flag
+ TC_RETURN
};
}
@@ -226,7 +235,7 @@
virtual const char *getTargetNodeName(unsigned Opcode) const;
/// getSetCCResultType - Return the ISD::SETCC ValueType
- virtual MVT::ValueType getSetCCResultType(const SDOperand &) const;
+ virtual MVT getSetCCResultType(const SDOperand &) const;
/// getPreIndexedAddressParts - returns true by value, base pointer and
/// offset pointer and addressing mode by reference if the node's address
@@ -264,7 +273,7 @@
///
virtual SDOperand LowerOperation(SDOperand Op, SelectionDAG &DAG);
- virtual SDNode *ExpandOperationResult(SDNode *N, SelectionDAG &DAG);
+ virtual SDNode *ReplaceNodeResults(SDNode *N, SelectionDAG &DAG);
virtual SDOperand PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
@@ -281,7 +290,7 @@
ConstraintType getConstraintType(const std::string &Constraint) const;
std::pair<unsigned, const TargetRegisterClass*>
getRegForInlineAsmConstraint(const std::string &Constraint,
- MVT::ValueType VT) const;
+ MVT VT) const;
/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
/// function arguments in the caller parameter area. This is the actual
@@ -293,7 +302,7 @@
virtual void LowerAsmOperandForConstraint(SDOperand Op,
char ConstraintLetter,
std::vector<SDOperand> &Ops,
- SelectionDAG &DAG);
+ SelectionDAG &DAG) const;
/// isLegalAddressingMode - Return true if the addressing mode represented
/// by AM is legal for this target, for a load/store of the specified type.
@@ -308,11 +317,27 @@
/// the offset of the target addressing mode.
virtual bool isLegalAddressImmediate(GlobalValue *GV) const;
+ /// IsEligibleForTailCallOptimization - Check whether the call is eligible
+ /// for tail call optimization. Target which want to do tail call
+ /// optimization should implement this function.
+ virtual bool IsEligibleForTailCallOptimization(SDOperand Call,
+ SDOperand Ret,
+ SelectionDAG &DAG) const;
+
private:
/// PPCAtomicLabelIndex - Keep track the number of PPC atomic labels.
///
unsigned PPCAtomicLabelIndex;
+ SDOperand getFramePointerFrameIndex(SelectionDAG & DAG) const;
+ SDOperand getReturnAddrFrameIndex(SelectionDAG & DAG) const;
+
+ SDOperand EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG,
+ int SPDiff,
+ SDOperand Chain,
+ SDOperand &LROpOut,
+ SDOperand &FPOpOut);
+
SDOperand LowerRETURNADDR(SDOperand Op, SelectionDAG &DAG);
SDOperand LowerFRAMEADDR(SDOperand Op, SelectionDAG &DAG);
SDOperand LowerConstantPool(SDOperand Op, SelectionDAG &DAG);
@@ -341,8 +366,8 @@
SDOperand LowerDYNAMIC_STACKALLOC(SDOperand Op, SelectionDAG &DAG,
const PPCSubtarget &Subtarget);
SDOperand LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG);
- SDOperand LowerAtomicLAS(SDOperand Op, SelectionDAG &DAG);
- SDOperand LowerAtomicLCS(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerAtomicLOAD_ADD(SDOperand Op, SelectionDAG &DAG);
+ SDOperand LowerAtomicCMP_SWAP(SDOperand Op, SelectionDAG &DAG);
SDOperand LowerAtomicSWAP(SDOperand Op, SelectionDAG &DAG);
SDOperand LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG);
SDOperand LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG);
Modified: llvm/branches/non-call-eh/lib/Target/PowerPC/PPCInstr64Bit.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PowerPC/PPCInstr64Bit.td?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PowerPC/PPCInstr64Bit.td (original)
+++ llvm/branches/non-call-eh/lib/Target/PowerPC/PPCInstr64Bit.td Sun Jul 6 15:45:41 2008
@@ -116,7 +116,6 @@
def : Pat<(PPCcall_ELF (i64 texternalsym:$dst)),
(BL8_ELF texternalsym:$dst)>;
-
// Atomic operations.
def LDARX : Pseudo<(outs G8RC:$rD), (ins memrr:$ptr, i32imm:$label),
"\nLa${label}_entry:\n\tldarx $rD, $ptr",
@@ -135,6 +134,53 @@
[(PPCcmp_unres G8RC:$rA, immSExt16:$imm, imm:$label)]>;
}
+let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
+def TCRETURNdi8 :Pseudo< (outs),
+ (ins calltarget:$dst, i32imm:$offset, variable_ops),
+ "#TC_RETURNd8 $dst $offset",
+ []>;
+
+let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
+def TCRETURNai8 :Pseudo<(outs), (ins aaddr:$func, i32imm:$offset, variable_ops),
+ "#TC_RETURNa8 $func $offset",
+ [(PPCtc_return (i64 imm:$func), imm:$offset)]>;
+
+let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
+def TCRETURNri8 : Pseudo<(outs), (ins CTRRC8:$dst, i32imm:$offset, variable_ops),
+ "#TC_RETURNr8 $dst $offset",
+ []>;
+
+
+let isTerminator = 1, isBarrier = 1, PPC970_Unit = 7, isBranch = 1,
+ isIndirectBranch = 1, isCall = 1, isReturn = 1 in
+def TAILBCTR8 : XLForm_2_ext<19, 528, 20, 0, 0, (outs), (ins), "bctr", BrB, []>,
+ Requires<[In64BitMode]>;
+
+
+
+let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7,
+ isBarrier = 1, isCall = 1, isReturn = 1 in
+def TAILB8 : IForm<18, 0, 0, (outs), (ins calltarget:$dst),
+ "b $dst", BrB,
+ []>;
+
+
+let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7,
+ isBarrier = 1, isCall = 1, isReturn = 1 in
+def TAILBA8 : IForm<18, 0, 0, (outs), (ins aaddr:$dst),
+ "ba $dst", BrB,
+ []>;
+
+def : Pat<(PPCtc_return (i64 tglobaladdr:$dst), imm:$imm),
+ (TCRETURNdi8 tglobaladdr:$dst, imm:$imm)>;
+
+def : Pat<(PPCtc_return (i64 texternalsym:$dst), imm:$imm),
+ (TCRETURNdi8 texternalsym:$dst, imm:$imm)>;
+
+def : Pat<(PPCtc_return CTRRC8:$dst, imm:$imm),
+ (TCRETURNri8 CTRRC8:$dst, imm:$imm)>;
+
+
//===----------------------------------------------------------------------===//
// 64-bit SPR manipulation instrs.
Modified: llvm/branches/non-call-eh/lib/Target/PowerPC/PPCInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PowerPC/PPCInstrInfo.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PowerPC/PPCInstrInfo.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/PowerPC/PPCInstrInfo.cpp Sun Jul 6 15:45:41 2008
@@ -136,10 +136,11 @@
// commuteInstruction - We can commute rlwimi instructions, but only if the
// rotate amt is zero. We also have to munge the immediates a bit.
-MachineInstr *PPCInstrInfo::commuteInstruction(MachineInstr *MI) const {
+MachineInstr *
+PPCInstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const {
// Normal instructions can be commuted the obvious way.
if (MI->getOpcode() != PPC::RLWIMI)
- return TargetInstrInfoImpl::commuteInstruction(MI);
+ return TargetInstrInfoImpl::commuteInstruction(MI, NewMI);
// Cannot commute if it has a non-zero rotate count.
if (MI->getOperand(3).getImm() != 0)
@@ -158,23 +159,40 @@
unsigned Reg2 = MI->getOperand(2).getReg();
bool Reg1IsKill = MI->getOperand(1).isKill();
bool Reg2IsKill = MI->getOperand(2).isKill();
+ bool ChangeReg0 = false;
// If machine instrs are no longer in two-address forms, update
// destination register as well.
if (Reg0 == Reg1) {
// Must be two address instruction!
assert(MI->getDesc().getOperandConstraint(0, TOI::TIED_TO) &&
"Expecting a two-address instruction!");
- MI->getOperand(0).setReg(Reg2);
Reg2IsKill = false;
+ ChangeReg0 = true;
}
+
+ // Masks.
+ unsigned MB = MI->getOperand(4).getImm();
+ unsigned ME = MI->getOperand(5).getImm();
+
+ if (NewMI) {
+ // Create a new instruction.
+ unsigned Reg0 = ChangeReg0 ? Reg2 : MI->getOperand(0).getReg();
+ bool Reg0IsDead = MI->getOperand(0).isDead();
+ return BuildMI(MI->getDesc()).addReg(Reg0, true, false, false, Reg0IsDead)
+ .addReg(Reg2, false, false, Reg2IsKill)
+ .addReg(Reg1, false, false, Reg1IsKill)
+ .addImm((ME+1) & 31)
+ .addImm((MB-1) & 31);
+ }
+
+ if (ChangeReg0)
+ MI->getOperand(0).setReg(Reg2);
MI->getOperand(2).setReg(Reg1);
MI->getOperand(1).setReg(Reg2);
MI->getOperand(2).setIsKill(Reg1IsKill);
MI->getOperand(1).setIsKill(Reg2IsKill);
// Swap the mask around.
- unsigned MB = MI->getOperand(4).getImm();
- unsigned ME = MI->getOperand(5).getImm();
MI->getOperand(4).setImm((ME+1) & 31);
MI->getOperand(5).setImm((MB-1) & 31);
return MI;
@@ -643,45 +661,62 @@
MI->getOperand(1).getReg() == MI->getOperand(2).getReg())) {
if (OpNum == 0) { // move -> store
unsigned InReg = MI->getOperand(1).getReg();
- NewMI = addFrameReference(BuildMI(get(PPC::STW)).addReg(InReg),
+ bool isKill = MI->getOperand(1).isKill();
+ NewMI = addFrameReference(BuildMI(get(PPC::STW))
+ .addReg(InReg, false, false, isKill),
FrameIndex);
} else { // move -> load
unsigned OutReg = MI->getOperand(0).getReg();
- NewMI = addFrameReference(BuildMI(get(PPC::LWZ), OutReg),
+ bool isDead = MI->getOperand(0).isDead();
+ NewMI = addFrameReference(BuildMI(get(PPC::LWZ))
+ .addReg(OutReg, true, false, false, isDead),
FrameIndex);
}
} else if ((Opc == PPC::OR8 &&
MI->getOperand(1).getReg() == MI->getOperand(2).getReg())) {
if (OpNum == 0) { // move -> store
unsigned InReg = MI->getOperand(1).getReg();
- NewMI = addFrameReference(BuildMI(get(PPC::STD)).addReg(InReg),
+ bool isKill = MI->getOperand(1).isKill();
+ NewMI = addFrameReference(BuildMI(get(PPC::STD))
+ .addReg(InReg, false, false, isKill),
FrameIndex);
} else { // move -> load
unsigned OutReg = MI->getOperand(0).getReg();
- NewMI = addFrameReference(BuildMI(get(PPC::LD), OutReg), FrameIndex);
+ bool isDead = MI->getOperand(0).isDead();
+ NewMI = addFrameReference(BuildMI(get(PPC::LD))
+ .addReg(OutReg, true, false, false, isDead),
+ FrameIndex);
}
} else if (Opc == PPC::FMRD) {
if (OpNum == 0) { // move -> store
unsigned InReg = MI->getOperand(1).getReg();
- NewMI = addFrameReference(BuildMI(get(PPC::STFD)).addReg(InReg),
+ bool isKill = MI->getOperand(1).isKill();
+ NewMI = addFrameReference(BuildMI(get(PPC::STFD))
+ .addReg(InReg, false, false, isKill),
FrameIndex);
} else { // move -> load
unsigned OutReg = MI->getOperand(0).getReg();
- NewMI = addFrameReference(BuildMI(get(PPC::LFD), OutReg), FrameIndex);
+ bool isDead = MI->getOperand(0).isDead();
+ NewMI = addFrameReference(BuildMI(get(PPC::LFD))
+ .addReg(OutReg, true, false, false, isDead),
+ FrameIndex);
}
} else if (Opc == PPC::FMRS) {
if (OpNum == 0) { // move -> store
unsigned InReg = MI->getOperand(1).getReg();
- NewMI = addFrameReference(BuildMI(get(PPC::STFS)).addReg(InReg),
+ bool isKill = MI->getOperand(1).isKill();
+ NewMI = addFrameReference(BuildMI(get(PPC::STFS))
+ .addReg(InReg, false, false, isKill),
FrameIndex);
} else { // move -> load
unsigned OutReg = MI->getOperand(0).getReg();
- NewMI = addFrameReference(BuildMI(get(PPC::LFS), OutReg), FrameIndex);
+ bool isDead = MI->getOperand(0).isDead();
+ NewMI = addFrameReference(BuildMI(get(PPC::LFS))
+ .addReg(OutReg, true, false, false, isDead),
+ FrameIndex);
}
}
- if (NewMI)
- NewMI->copyKillDeadInfo(MI);
return NewMI;
}
@@ -736,9 +771,10 @@
const char *AsmStr = MI->getOperand(0).getSymbolName();
return MF->getTarget().getTargetAsmInfo()->getInlineAsmLength(AsmStr);
}
- case PPC::LABEL: {
+ case PPC::DBG_LABEL:
+ case PPC::EH_LABEL:
+ case PPC::GC_LABEL:
return 0;
- }
default:
return 4; // PowerPC instructions are all 4 bytes
}
Modified: llvm/branches/non-call-eh/lib/Target/PowerPC/PPCInstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PowerPC/PPCInstrInfo.h?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PowerPC/PPCInstrInfo.h (original)
+++ llvm/branches/non-call-eh/lib/Target/PowerPC/PPCInstrInfo.h Sun Jul 6 15:45:41 2008
@@ -78,7 +78,7 @@
/// such, whenever a client has an instance of instruction info, it should
/// always be able to get register info as well (through this method).
///
- virtual const TargetRegisterInfo &getRegisterInfo() const { return RI; }
+ virtual const PPCRegisterInfo &getRegisterInfo() const { return RI; }
/// getPointerRegClass - Return the register class to use to hold pointers.
/// This is used for addressing modes.
@@ -96,7 +96,7 @@
// commuteInstruction - We can commute rlwimi instructions, but only if the
// rotate amt is zero. We also have to munge the immediates a bit.
- virtual MachineInstr *commuteInstruction(MachineInstr *MI) const;
+ virtual MachineInstr *commuteInstruction(MachineInstr *MI, bool NewMI) const;
virtual void insertNoop(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const;
Modified: llvm/branches/non-call-eh/lib/Target/PowerPC/PPCInstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PowerPC/PPCInstrInfo.td?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PowerPC/PPCInstrInfo.td (original)
+++ llvm/branches/non-call-eh/lib/Target/PowerPC/PPCInstrInfo.td Sun Jul 6 15:45:41 2008
@@ -42,6 +42,7 @@
SDTCisVT<0, i32>, SDTCisPtrTy<1>, SDTCisVT<2, OtherVT>, SDTCisVT<3, OtherVT>
]>;
+
def SDT_PPClarx : SDTypeProfile<1, 2, [
SDTCisInt<0>, SDTCisPtrTy<1>, SDTCisVT<2, i32>
]>;
@@ -52,6 +53,10 @@
SDTCisSameAs<0, 1>, SDTCisInt<1>, SDTCisVT<2, i32>
]>;
+def SDT_PPCTC_ret : SDTypeProfile<0, 2, [
+ SDTCisPtrTy<0>, SDTCisVT<1, i32>
+]>;
+
//===----------------------------------------------------------------------===//
// PowerPC specific DAG Nodes.
//
@@ -121,6 +126,12 @@
def retflag : SDNode<"PPCISD::RET_FLAG", SDTNone,
[SDNPHasChain, SDNPOptInFlag]>;
+def PPCtc_return : SDNode<"PPCISD::TC_RETURN", SDT_PPCTC_ret,
+ [SDNPHasChain, SDNPOptInFlag]>;
+
+def PPCtailcall : SDNode<"PPCISD::TAILCALL", SDT_PPCCall,
+ [SDNPHasChain, SDNPOutFlag, SDNPOptInFlag]>;
+
def PPCvcmp : SDNode<"PPCISD::VCMP" , SDT_PPCvcmp, []>;
def PPCvcmp_o : SDNode<"PPCISD::VCMPo", SDT_PPCvcmp, [SDNPOutFlag]>;
@@ -453,6 +464,46 @@
[(PPCbctrl_ELF)]>, Requires<[In32BitMode]>;
}
+
+let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
+def TCRETURNdi :Pseudo< (outs),
+ (ins calltarget:$dst, i32imm:$offset, variable_ops),
+ "#TC_RETURNd $dst $offset",
+ []>;
+
+
+let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
+def TCRETURNai :Pseudo<(outs), (ins aaddr:$func, i32imm:$offset, variable_ops),
+ "#TC_RETURNa $func $offset",
+ [(PPCtc_return (i32 imm:$func), imm:$offset)]>;
+
+let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
+def TCRETURNri : Pseudo<(outs), (ins CTRRC:$dst, i32imm:$offset, variable_ops),
+ "#TC_RETURNr $dst $offset",
+ []>;
+
+
+let isTerminator = 1, isBarrier = 1, PPC970_Unit = 7, isBranch = 1,
+ isIndirectBranch = 1, isCall = 1, isReturn = 1 in
+def TAILBCTR : XLForm_2_ext<19, 528, 20, 0, 0, (outs), (ins), "bctr", BrB, []>,
+ Requires<[In32BitMode]>;
+
+
+
+let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7,
+ isBarrier = 1, isCall = 1, isReturn = 1 in
+def TAILB : IForm<18, 0, 0, (outs), (ins calltarget:$dst),
+ "b $dst", BrB,
+ []>;
+
+
+let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7,
+ isBarrier = 1, isCall = 1, isReturn = 1 in
+def TAILBA : IForm<18, 0, 0, (outs), (ins aaddr:$dst),
+ "ba $dst", BrB,
+ []>;
+
+
// DCB* instructions.
def DCBA : DCB_Form<758, 0, (outs), (ins memrr:$dst),
"dcba $dst", LdStDCBF, [(int_ppc_dcba xoaddr:$dst)]>,
@@ -480,8 +531,8 @@
PPC970_DGroup_Single;
// Atomic operations.
-def LWARX : Pseudo<(outs GPRC:$rD), (ins memrr:$ptr, i32imm:$label),
- "\nLa${label}_entry:\n\tlwarx $rD, $ptr",
+def LWARX : XForm_1<31, 20, (outs GPRC:$rD), (ins memrr:$ptr, i32imm:$label),
+ "\nLa${label}_entry:\n\tlwarx $rD, $ptr", LdStLWARX,
[(set GPRC:$rD, (PPClarx xoaddr:$ptr, imm:$label))]>;
let Defs = [CR0] in {
@@ -1211,6 +1262,18 @@
def : Pat<(PPCcall_ELF (i32 texternalsym:$dst)),
(BL_ELF texternalsym:$dst)>;
+
+def : Pat<(PPCtc_return (i32 tglobaladdr:$dst), imm:$imm),
+ (TCRETURNdi tglobaladdr:$dst, imm:$imm)>;
+
+def : Pat<(PPCtc_return (i32 texternalsym:$dst), imm:$imm),
+ (TCRETURNdi texternalsym:$dst, imm:$imm)>;
+
+def : Pat<(PPCtc_return CTRRC:$dst, imm:$imm),
+ (TCRETURNri CTRRC:$dst, imm:$imm)>;
+
+
+
// Hi and Lo for Darwin Global Addresses.
def : Pat<(PPChi tglobaladdr:$in, 0), (LIS tglobaladdr:$in)>;
def : Pat<(PPClo tglobaladdr:$in, 0), (LI tglobaladdr:$in)>;
Modified: llvm/branches/non-call-eh/lib/Target/PowerPC/PPCJITInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PowerPC/PPCJITInfo.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PowerPC/PPCJITInfo.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/PowerPC/PPCJITInfo.cpp Sun Jul 6 15:45:41 2008
@@ -17,7 +17,7 @@
#include "PPCTargetMachine.h"
#include "llvm/Function.h"
#include "llvm/CodeGen/MachineCodeEmitter.h"
-#include "llvm/Config/alloca.h"
+#include "llvm/System/Memory.h"
#include "llvm/Support/Debug.h"
#include <set>
using namespace llvm;
@@ -72,7 +72,7 @@
extern "C" void PPC64CompilationCallback();
#if (defined(__POWERPC__) || defined (__ppc__) || defined(_POWER)) && \
- !defined(__ppc64__)
+ !(defined(__ppc64__) || defined(__FreeBSD__))
// CompilationCallback stub - We can't use a C function with inline assembly in
// it, because we the prolog/epilog inserted by GCC won't work for us. Instead,
// write our own wrapper, which does things our way, so we have complete control
@@ -138,7 +138,7 @@
);
#elif defined(__PPC__) && !defined(__ppc64__)
-// Linux/PPC support
+// Linux & FreeBSD / PPC 32 support
// CompilationCallback stub - We can't use a C function with inline assembly in
// it, because we the prolog/epilog inserted by GCC won't work for us. Instead,
@@ -330,15 +330,6 @@
extern "C" void sys_icache_invalidate(const void *Addr, size_t len);
#endif
-/// SyncICache - On PPC, the JIT emitted code must be explicitly refetched to
-/// ensure correct execution.
-static void SyncICache(const void *Addr, size_t len) {
-#if (defined(__POWERPC__) || defined (__ppc__) || defined(_POWER)) && \
-defined(__APPLE__)
- sys_icache_invalidate(Addr, len);
-#endif
-}
-
void *PPCJITInfo::emitFunctionStub(const Function* F, void *Fn,
MachineCodeEmitter &MCE) {
// If this is just a call to an external function, emit a branch instead of a
@@ -355,7 +346,7 @@
MCE.emitWordBE(0);
MCE.emitWordBE(0);
EmitBranchToAt(Addr, (intptr_t)Fn, false, is64Bit);
- SyncICache((void*)Addr, 7*4);
+ sys::Memory::InvalidateInstructionCache((void*)Addr, 7*4);
return MCE.finishFunctionStub(F);
}
@@ -383,7 +374,7 @@
MCE.emitWordBE(0);
MCE.emitWordBE(0);
EmitBranchToAt(BranchAddr, (intptr_t)Fn, true, is64Bit);
- SyncICache((void*)Addr, 10*4);
+ sys::Memory::InvalidateInstructionCache((void*)Addr, 10*4);
return MCE.finishFunctionStub(F);
}
Modified: llvm/branches/non-call-eh/lib/Target/PowerPC/PPCMachineFunctionInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PowerPC/PPCMachineFunctionInfo.h?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PowerPC/PPCMachineFunctionInfo.h (original)
+++ llvm/branches/non-call-eh/lib/Target/PowerPC/PPCMachineFunctionInfo.h Sun Jul 6 15:45:41 2008
@@ -43,19 +43,42 @@
/// requires that the code generator produce a store of LR to the stack on
/// entry, even though LR may otherwise apparently not be used.
bool LRStoreRequired;
+
+ /// MinReservedArea - This is the frame size that is at least reserved in a
+ /// potential caller (parameter+linkage area).
+ unsigned MinReservedArea;
+
+ /// TailCallSPDelta - Stack pointer delta used when tail calling. Maximum
+ /// amount the stack pointer is adjusted to make the frame bigger for tail
+ /// calls. Used for creating an area before the register spill area.
+ int TailCallSPDelta;
+
+ /// HasFastCall - Does this function contain a fast call. Used to determine
+ /// how the caller's stack pointer should be calculated (epilog/dynamicalloc).
+ bool HasFastCall;
+
public:
PPCFunctionInfo(MachineFunction &MF)
: FramePointerSaveIndex(0),
ReturnAddrSaveIndex(0),
SpillsCR(false),
- LRStoreRequired(false) {}
+ LRStoreRequired(false),
+ MinReservedArea(0),
+ TailCallSPDelta(0),
+ HasFastCall(false) {}
int getFramePointerSaveIndex() const { return FramePointerSaveIndex; }
void setFramePointerSaveIndex(int Idx) { FramePointerSaveIndex = Idx; }
int getReturnAddrSaveIndex() const { return ReturnAddrSaveIndex; }
void setReturnAddrSaveIndex(int idx) { ReturnAddrSaveIndex = idx; }
-
+
+ unsigned getMinReservedArea() const { return MinReservedArea; }
+ void setMinReservedArea(unsigned size) { MinReservedArea = size; }
+
+ int getTailCallSPDelta() const { return TailCallSPDelta; }
+ void setTailCallSPDelta(int size) { TailCallSPDelta = size; }
+
/// UsesLR - This is set when the prolog/epilog inserter does its initial scan
/// of the function, it is true if the LR/LR8 register is ever explicitly
/// accessed/clobbered in the machine function (e.g. by calls and movpctolr,
@@ -68,6 +91,9 @@
void setLRStoreRequired() { LRStoreRequired = true; }
bool isLRStoreRequired() const { return LRStoreRequired; }
+
+ void setHasFastCall() { HasFastCall = true; }
+ bool hasFastCall() const { return HasFastCall;}
};
} // end of namespace llvm
Modified: llvm/branches/non-call-eh/lib/Target/PowerPC/PPCRegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PowerPC/PPCRegisterInfo.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PowerPC/PPCRegisterInfo.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/PowerPC/PPCRegisterInfo.cpp Sun Jul 6 15:45:41 2008
@@ -19,6 +19,7 @@
#include "PPCRegisterInfo.h"
#include "PPCFrameInfo.h"
#include "PPCSubtarget.h"
+#include "llvm/CallingConv.h"
#include "llvm/Constants.h"
#include "llvm/Function.h"
#include "llvm/Type.h"
@@ -332,7 +333,8 @@
//
static bool needsFP(const MachineFunction &MF) {
const MachineFrameInfo *MFI = MF.getFrameInfo();
- return NoFramePointerElim || MFI->hasVarSizedObjects();
+ return NoFramePointerElim || MFI->hasVarSizedObjects() ||
+ (PerformTailCallOpt && MF.getInfo<PPCFunctionInfo>()->hasFastCall());
}
static bool spillsCR(const MachineFunction &MF) {
@@ -399,9 +401,42 @@
MF.getFrameInfo()->hasCalls();
}
+
+
void PPCRegisterInfo::
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
+ if (PerformTailCallOpt && I->getOpcode() == PPC::ADJCALLSTACKUP) {
+ // Add (actually substract) back the amount the callee popped on return.
+ if (int CalleeAmt = I->getOperand(1).getImm()) {
+ MachineInstr * New = NULL;
+ bool is64Bit = Subtarget.isPPC64();
+ CalleeAmt *= -1;
+ unsigned StackReg = is64Bit ? PPC::X1 : PPC::R1;
+ unsigned TmpReg = is64Bit ? PPC::X0 : PPC::R0;
+ unsigned ADDIInstr = is64Bit ? PPC::ADDI8 : PPC::ADDI;
+ unsigned ADDInstr = is64Bit ? PPC::ADD8 : PPC::ADD4;
+ unsigned LISInstr = is64Bit ? PPC::LIS8 : PPC::LIS;
+ unsigned ORIInstr = is64Bit ? PPC::ORI8 : PPC::ORI;
+
+ if (isInt16(CalleeAmt)) {
+ New = BuildMI(TII.get(ADDIInstr), StackReg).addReg(StackReg).
+ addImm(CalleeAmt);
+ MBB.insert(I, New);
+ } else {
+ MachineBasicBlock::iterator MBBI = I;
+ BuildMI(MBB, MBBI, TII.get(LISInstr), TmpReg)
+ .addImm(CalleeAmt >> 16);
+ BuildMI(MBB, MBBI, TII.get(ORIInstr), TmpReg)
+ .addReg(TmpReg, false, false, true)
+ .addImm(CalleeAmt & 0xFFFF);
+ BuildMI(MBB, MBBI, TII.get(ADDInstr))
+ .addReg(StackReg)
+ .addReg(StackReg)
+ .addReg(TmpReg);
+ }
+ }
+ }
// Simply discard ADJCALLSTACKDOWN, ADJCALLSTACKUP instructions.
MBB.erase(I);
}
@@ -924,6 +959,13 @@
FI->setFramePointerSaveIndex(FPSI);
}
+ // Reserve stack space to move the linkage area to in case of a tail call.
+ int TCSPDelta = 0;
+ if (PerformTailCallOpt && (TCSPDelta=FI->getTailCallSPDelta()) < 0) {
+ int AddFPOffsetAmount = IsELF32_ABI ? -4 : 0;
+ MF.getFrameInfo()->CreateFixedObject( -1 * TCSPDelta,
+ AddFPOffsetAmount + TCSPDelta);
+ }
// Reserve a slot closest to SP or frame pointer if we have a dynalloc or
// a large stack, which will require scavenging a register to materialize a
// large offset.
@@ -1026,7 +1068,7 @@
if (needsFrameMoves) {
// Mark effective beginning of when frame pointer becomes valid.
FrameLabelId = MMI->NextLabelID();
- BuildMI(MBB, MBBI, TII.get(PPC::LABEL)).addImm(FrameLabelId).addImm(0);
+ BuildMI(MBB, MBBI, TII.get(PPC::DBG_LABEL)).addImm(FrameLabelId);
}
// Adjust stack pointer: r1 += NegFrameSize.
@@ -1135,7 +1177,7 @@
// Mark effective beginning of when frame pointer is ready.
unsigned ReadyLabelId = MMI->NextLabelID();
- BuildMI(MBB, MBBI, TII.get(PPC::LABEL)).addImm(ReadyLabelId).addImm(0);
+ BuildMI(MBB, MBBI, TII.get(PPC::DBG_LABEL)).addImm(ReadyLabelId);
MachineLocation FPDst(HasFP ? (IsPPC64 ? PPC::X31 : PPC::R31) :
(IsPPC64 ? PPC::X1 : PPC::R1));
@@ -1160,7 +1202,15 @@
void PPCRegisterInfo::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
MachineBasicBlock::iterator MBBI = prior(MBB.end());
- assert(MBBI->getOpcode() == PPC::BLR &&
+ unsigned RetOpcode = MBBI->getOpcode();
+
+ assert( (RetOpcode == PPC::BLR ||
+ RetOpcode == PPC::TCRETURNri ||
+ RetOpcode == PPC::TCRETURNdi ||
+ RetOpcode == PPC::TCRETURNai ||
+ RetOpcode == PPC::TCRETURNri8 ||
+ RetOpcode == PPC::TCRETURNdi8 ||
+ RetOpcode == PPC::TCRETURNai8) &&
"Can only insert epilog into returning blocks");
// Get alignment info so we know how to restore r1
@@ -1169,7 +1219,7 @@
unsigned MaxAlign = MFI->getMaxAlignment();
// Get the number of bytes allocated from the FrameInfo.
- unsigned FrameSize = MFI->getStackSize();
+ int FrameSize = MFI->getStackSize();
// Get processor type.
bool IsPPC64 = Subtarget.isPPC64();
@@ -1183,19 +1233,75 @@
int LROffset = PPCFrameInfo::getReturnSaveOffset(IsPPC64, IsMachoABI);
int FPOffset = PPCFrameInfo::getFramePointerSaveOffset(IsPPC64, IsMachoABI);
+ bool UsesTCRet = RetOpcode == PPC::TCRETURNri ||
+ RetOpcode == PPC::TCRETURNdi ||
+ RetOpcode == PPC::TCRETURNai ||
+ RetOpcode == PPC::TCRETURNri8 ||
+ RetOpcode == PPC::TCRETURNdi8 ||
+ RetOpcode == PPC::TCRETURNai8;
+
+ PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
+
+ if (UsesTCRet) {
+ int MaxTCRetDelta = FI->getTailCallSPDelta();
+ MachineOperand &StackAdjust = MBBI->getOperand(1);
+ assert( StackAdjust.isImmediate() && "Expecting immediate value.");
+ // Adjust stack pointer.
+ int StackAdj = StackAdjust.getImm();
+ int Delta = StackAdj - MaxTCRetDelta;
+ assert((Delta >= 0) && "Delta must be positive");
+ if (MaxTCRetDelta>0)
+ FrameSize += (StackAdj +Delta);
+ else
+ FrameSize += StackAdj;
+ }
+
if (FrameSize) {
// The loaded (or persistent) stack pointer value is offset by the 'stwu'
// on entry to the function. Add this offset back now.
- if (!Subtarget.isPPC64()) {
- if (isInt16(FrameSize) && (!ALIGN_STACK || TargetAlign >= MaxAlign) &&
- !MFI->hasVarSizedObjects()) {
- BuildMI(MBB, MBBI, TII.get(PPC::ADDI), PPC::R1)
- .addReg(PPC::R1).addImm(FrameSize);
+ if (!IsPPC64) {
+ // If this function contained a fastcc call and PerformTailCallOpt is
+ // enabled (=> hasFastCall()==true) the fastcc call might contain a tail
+ // call which invalidates the stack pointer value in SP(0). So we use the
+ // value of R31 in this case.
+ if (FI->hasFastCall() && isInt16(FrameSize)) {
+ assert(hasFP(MF) && "Expecting a valid the frame pointer.");
+ BuildMI(MBB, MBBI, TII.get(PPC::ADDI), PPC::R1)
+ .addReg(PPC::R31).addImm(FrameSize);
+ } else if(FI->hasFastCall()) {
+ BuildMI(MBB, MBBI, TII.get(PPC::LIS), PPC::R0)
+ .addImm(FrameSize >> 16);
+ BuildMI(MBB, MBBI, TII.get(PPC::ORI), PPC::R0)
+ .addReg(PPC::R0, false, false, true)
+ .addImm(FrameSize & 0xFFFF);
+ BuildMI(MBB, MBBI, TII.get(PPC::ADD4))
+ .addReg(PPC::R1)
+ .addReg(PPC::R31)
+ .addReg(PPC::R0);
+ } else if (isInt16(FrameSize) &&
+ (!ALIGN_STACK || TargetAlign >= MaxAlign) &&
+ !MFI->hasVarSizedObjects()) {
+ BuildMI(MBB, MBBI, TII.get(PPC::ADDI), PPC::R1)
+ .addReg(PPC::R1).addImm(FrameSize);
} else {
BuildMI(MBB, MBBI, TII.get(PPC::LWZ),PPC::R1).addImm(0).addReg(PPC::R1);
}
} else {
- if (isInt16(FrameSize) && TargetAlign >= MaxAlign &&
+ if (FI->hasFastCall() && isInt16(FrameSize)) {
+ assert(hasFP(MF) && "Expecting a valid the frame pointer.");
+ BuildMI(MBB, MBBI, TII.get(PPC::ADDI8), PPC::X1)
+ .addReg(PPC::X31).addImm(FrameSize);
+ } else if(FI->hasFastCall()) {
+ BuildMI(MBB, MBBI, TII.get(PPC::LIS8), PPC::X0)
+ .addImm(FrameSize >> 16);
+ BuildMI(MBB, MBBI, TII.get(PPC::ORI8), PPC::X0)
+ .addReg(PPC::X0, false, false, true)
+ .addImm(FrameSize & 0xFFFF);
+ BuildMI(MBB, MBBI, TII.get(PPC::ADD8))
+ .addReg(PPC::X1)
+ .addReg(PPC::X31)
+ .addReg(PPC::X0);
+ } else if (isInt16(FrameSize) && TargetAlign >= MaxAlign &&
!MFI->hasVarSizedObjects()) {
BuildMI(MBB, MBBI, TII.get(PPC::ADDI8), PPC::X1)
.addReg(PPC::X1).addImm(FrameSize);
@@ -1228,6 +1334,64 @@
if (UsesLR)
BuildMI(MBB, MBBI, TII.get(PPC::MTLR)).addReg(PPC::R0);
}
+
+ // Callee pop calling convention. Pop parameter/linkage area. Used for tail
+ // call optimization
+ if (PerformTailCallOpt && RetOpcode == PPC::BLR &&
+ MF.getFunction()->getCallingConv() == CallingConv::Fast) {
+ PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
+ unsigned CallerAllocatedAmt = FI->getMinReservedArea();
+ unsigned StackReg = IsPPC64 ? PPC::X1 : PPC::R1;
+ unsigned FPReg = IsPPC64 ? PPC::X31 : PPC::R31;
+ unsigned TmpReg = IsPPC64 ? PPC::X0 : PPC::R0;
+ unsigned ADDIInstr = IsPPC64 ? PPC::ADDI8 : PPC::ADDI;
+ unsigned ADDInstr = IsPPC64 ? PPC::ADD8 : PPC::ADD4;
+ unsigned LISInstr = IsPPC64 ? PPC::LIS8 : PPC::LIS;
+ unsigned ORIInstr = IsPPC64 ? PPC::ORI8 : PPC::ORI;
+
+ if (CallerAllocatedAmt && isInt16(CallerAllocatedAmt)) {
+ BuildMI(MBB, MBBI, TII.get(ADDIInstr), StackReg)
+ .addReg(StackReg).addImm(CallerAllocatedAmt);
+ } else {
+ BuildMI(MBB, MBBI, TII.get(LISInstr), TmpReg)
+ .addImm(CallerAllocatedAmt >> 16);
+ BuildMI(MBB, MBBI, TII.get(ORIInstr), TmpReg)
+ .addReg(TmpReg, false, false, true)
+ .addImm(CallerAllocatedAmt & 0xFFFF);
+ BuildMI(MBB, MBBI, TII.get(ADDInstr))
+ .addReg(StackReg)
+ .addReg(FPReg)
+ .addReg(TmpReg);
+ }
+ } else if (RetOpcode == PPC::TCRETURNdi) {
+ MBBI = prior(MBB.end());
+ MachineOperand &JumpTarget = MBBI->getOperand(0);
+ BuildMI(MBB, MBBI, TII.get(PPC::TAILB)).
+ addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset());
+ } else if (RetOpcode == PPC::TCRETURNri) {
+ MBBI = prior(MBB.end());
+ MachineOperand &JumpTarget = MBBI->getOperand(0);
+ assert(JumpTarget.isReg() && "Expecting register operand.");
+ BuildMI(MBB, MBBI, TII.get(PPC::TAILBCTR));
+ } else if (RetOpcode == PPC::TCRETURNai) {
+ MBBI = prior(MBB.end());
+ MachineOperand &JumpTarget = MBBI->getOperand(0);
+ BuildMI(MBB, MBBI, TII.get(PPC::TAILBA)).addImm(JumpTarget.getImm());
+ } else if (RetOpcode == PPC::TCRETURNdi8) {
+ MBBI = prior(MBB.end());
+ MachineOperand &JumpTarget = MBBI->getOperand(0);
+ BuildMI(MBB, MBBI, TII.get(PPC::TAILB8)).
+ addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset());
+ } else if (RetOpcode == PPC::TCRETURNri8) {
+ MBBI = prior(MBB.end());
+ MachineOperand &JumpTarget = MBBI->getOperand(0);
+ assert(JumpTarget.isReg() && "Expecting register operand.");
+ BuildMI(MBB, MBBI, TII.get(PPC::TAILBCTR8));
+ } else if (RetOpcode == PPC::TCRETURNai8) {
+ MBBI = prior(MBB.end());
+ MachineOperand &JumpTarget = MBBI->getOperand(0);
+ BuildMI(MBB, MBBI, TII.get(PPC::TAILBA8)).addImm(JumpTarget.getImm());
+ }
}
unsigned PPCRegisterInfo::getRARegister() const {
Modified: llvm/branches/non-call-eh/lib/Target/PowerPC/PPCRegisterInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PowerPC/PPCRegisterInfo.td?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PowerPC/PPCRegisterInfo.td (original)
+++ llvm/branches/non-call-eh/lib/Target/PowerPC/PPCRegisterInfo.td Sun Jul 6 15:45:41 2008
@@ -346,4 +346,6 @@
let CopyCost = -1;
}
-
+
+def CTRRC : RegisterClass<"PPC", [i32], 32, [CTR]>;
+def CTRRC8 : RegisterClass<"PPC", [i64], 64, [CTR8]>;
Modified: llvm/branches/non-call-eh/lib/Target/PowerPC/PPCSubtarget.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PowerPC/PPCSubtarget.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PowerPC/PPCSubtarget.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/PowerPC/PPCSubtarget.cpp Sun Jul 6 15:45:41 2008
@@ -143,5 +143,6 @@
return false;
return GV->hasWeakLinkage() || GV->hasLinkOnceLinkage() ||
+ GV->hasCommonLinkage() ||
(GV->isDeclaration() && !GV->hasNotBeenReadFromBitcode());
}
Modified: llvm/branches/non-call-eh/lib/Target/PowerPC/PPCTargetAsmInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PowerPC/PPCTargetAsmInfo.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PowerPC/PPCTargetAsmInfo.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/PowerPC/PPCTargetAsmInfo.cpp Sun Jul 6 15:45:41 2008
@@ -39,6 +39,7 @@
CommentString = ";";
GlobalPrefix = "_";
PrivateGlobalPrefix = "L";
+ StringConstantPrefix = "\1LC";
ConstantPoolSection = "\t.const\t";
JumpTableDataSection = ".const";
CStringSection = "\t.cstring";
Modified: llvm/branches/non-call-eh/lib/Target/PowerPC/PPCTargetMachine.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PowerPC/PPCTargetMachine.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PowerPC/PPCTargetMachine.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/PowerPC/PPCTargetMachine.cpp Sun Jul 6 15:45:41 2008
@@ -19,13 +19,11 @@
#include "llvm/Target/TargetMachineRegistry.h"
using namespace llvm;
-namespace {
- // Register the targets
- RegisterTarget<PPC32TargetMachine>
- X("ppc32", " PowerPC 32");
- RegisterTarget<PPC64TargetMachine>
- Y("ppc64", " PowerPC 64");
-}
+// Register the targets
+static RegisterTarget<PPC32TargetMachine>
+X("ppc32", " PowerPC 32");
+static RegisterTarget<PPC64TargetMachine>
+Y("ppc64", " PowerPC 64");
const TargetAsmInfo *PPCTargetMachine::createTargetAsmInfo() const {
if (Subtarget.isDarwin())
Modified: llvm/branches/non-call-eh/lib/Target/PowerPC/PPCTargetMachine.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PowerPC/PPCTargetMachine.h?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PowerPC/PPCTargetMachine.h (original)
+++ llvm/branches/non-call-eh/lib/Target/PowerPC/PPCTargetMachine.h Sun Jul 6 15:45:41 2008
@@ -46,12 +46,12 @@
PPCTargetMachine(const Module &M, const std::string &FS, bool is64Bit);
virtual const PPCInstrInfo *getInstrInfo() const { return &InstrInfo; }
- virtual const TargetFrameInfo *getFrameInfo() const { return &FrameInfo; }
- virtual TargetJITInfo *getJITInfo() { return &JITInfo; }
+ virtual const PPCFrameInfo *getFrameInfo() const { return &FrameInfo; }
+ virtual PPCJITInfo *getJITInfo() { return &JITInfo; }
virtual PPCTargetLowering *getTargetLowering() const {
return const_cast<PPCTargetLowering*>(&TLInfo);
}
- virtual const TargetRegisterInfo *getRegisterInfo() const {
+ virtual const PPCRegisterInfo *getRegisterInfo() const {
return &InstrInfo.getRegisterInfo();
}
Modified: llvm/branches/non-call-eh/lib/Target/PowerPC/README_ALTIVEC.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/PowerPC/README_ALTIVEC.txt?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/PowerPC/README_ALTIVEC.txt (original)
+++ llvm/branches/non-call-eh/lib/Target/PowerPC/README_ALTIVEC.txt Sun Jul 6 15:45:41 2008
@@ -186,3 +186,26 @@
3. lvsl 0; splat index; vcmpeq to generate a select mask
4. lvsl slot + x; vperm to rotate result into correct slot
5. vsel result together.
+
+//===----------------------------------------------------------------------===//
+
+Should codegen branches on vec_any/vec_all to avoid mfcr. Two examples:
+
+#include <altivec.h>
+ int f(vector float a, vector float b)
+ {
+ int aa = 0;
+ if (vec_all_ge(a, b))
+ aa |= 0x1;
+ if (vec_any_ge(a,b))
+ aa |= 0x2;
+ return aa;
+}
+
+vector float f(vector float a, vector float b) {
+ if (vec_any_eq(a, b))
+ return a;
+ else
+ return b;
+}
+
Modified: llvm/branches/non-call-eh/lib/Target/README.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/README.txt?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/README.txt (original)
+++ llvm/branches/non-call-eh/lib/Target/README.txt Sun Jul 6 15:45:41 2008
@@ -317,11 +317,6 @@
//===---------------------------------------------------------------------===//
-Instcombine misses several of these cases (see the testcase in the patch):
-http://gcc.gnu.org/ml/gcc-patches/2006-10/msg01519.html
-
-//===---------------------------------------------------------------------===//
-
viterbi speeds up *significantly* if the various "history" related copy loops
are turned into memcpy calls at the source level. We need a "loops to memcpy"
pass.
@@ -780,29 +775,12 @@
//===---------------------------------------------------------------------===//
-define i32 @test2(float %X, float %Y) {
-entry:
- %tmp3 = fcmp uno float %X, %Y ; <i1> [#uses=1]
- %tmp34 = zext i1 %tmp3 to i8 ; <i8> [#uses=1]
- %tmp = xor i8 %tmp34, 1 ; <i8> [#uses=1]
- %toBoolnot5 = zext i8 %tmp to i32 ; <i32> [#uses=1]
- ret i32 %toBoolnot5
-}
-
-could be optimized further. Instcombine should use its bitwise analysis to
-collapse the zext/xor/zext structure to an xor/zext and then remove the
-xor by reversing the fcmp.
-
-Desired output:
+We should be able to evaluate this loop:
-define i32 @test2(float %X, float %Y) {
-entry:
- %tmp3 = fcmp ord float %X, %Y ; <i1> [#uses=1]
- %tmp34 = zext i1 %tmp3 to i32 ; <i32> [#uses=1]
- ret i32 %tmp34
+int test(int x_offs) {
+ while (x_offs > 4)
+ x_offs -= 4;
+ return x_offs;
}
-To fix this, we need to make CanEvaluateInDifferentType smarter.
-
//===---------------------------------------------------------------------===//
-
Modified: llvm/branches/non-call-eh/lib/Target/Sparc/SparcAsmPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/Sparc/SparcAsmPrinter.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/Sparc/SparcAsmPrinter.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/Sparc/SparcAsmPrinter.cpp Sun Jul 6 15:45:41 2008
@@ -231,8 +231,8 @@
unsigned Size = TD->getABITypeSize(C->getType());
unsigned Align = TD->getPreferredAlignment(I);
- if (C->isNullValue() &&
- (I->hasLinkOnceLinkage() || I->hasInternalLinkage() ||
+ if (C->isNullValue() && (I->hasCommonLinkage() ||
+ I->hasLinkOnceLinkage() || I->hasInternalLinkage() ||
I->hasWeakLinkage() /* FIXME: Verify correct */)) {
SwitchToDataSection(".data", I);
if (I->hasInternalLinkage())
@@ -243,6 +243,7 @@
O << "\n";
} else {
switch (I->getLinkage()) {
+ case GlobalValue::CommonLinkage:
case GlobalValue::LinkOnceLinkage:
case GlobalValue::WeakLinkage: // FIXME: Verify correct for weak.
// Nonnull linkonce -> weak
Modified: llvm/branches/non-call-eh/lib/Target/Sparc/SparcISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/Sparc/SparcISelDAGToDAG.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/Sparc/SparcISelDAGToDAG.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/Sparc/SparcISelDAGToDAG.cpp Sun Jul 6 15:45:41 2008
@@ -47,9 +47,9 @@
bool SelectADDRri(SDOperand Op, SDOperand N, SDOperand &Base,
SDOperand &Offset);
- /// InstructionSelectBasicBlock - This callback is invoked by
+ /// InstructionSelect - This callback is invoked by
/// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
- virtual void InstructionSelectBasicBlock(SelectionDAG &DAG);
+ virtual void InstructionSelect(SelectionDAG &DAG);
virtual const char *getPassName() const {
return "SPARC DAG->DAG Pattern Instruction Selection";
@@ -60,17 +60,14 @@
};
} // end anonymous namespace
-/// InstructionSelectBasicBlock - This callback is invoked by
+/// InstructionSelect - This callback is invoked by
/// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
-void SparcDAGToDAGISel::InstructionSelectBasicBlock(SelectionDAG &DAG) {
+void SparcDAGToDAGISel::InstructionSelect(SelectionDAG &DAG) {
DEBUG(BB->dump());
// Select target instructions for the DAG.
DAG.setRoot(SelectRoot(DAG.getRoot()));
DAG.RemoveDeadNodes();
-
- // Emit machine code to BB.
- ScheduleAndEmitDAG(DAG);
}
bool SparcDAGToDAGISel::SelectADDRri(SDOperand Op, SDOperand Addr,
Modified: llvm/branches/non-call-eh/lib/Target/Sparc/SparcISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/Sparc/SparcISelLowering.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/Sparc/SparcISelLowering.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/Sparc/SparcISelLowering.cpp Sun Jul 6 15:45:41 2008
@@ -74,11 +74,11 @@
/// LowerArguments - V8 uses a very simple ABI, where all values are passed in
/// either one or two GPRs, including FP values. TODO: we should pass FP values
/// in FP registers for fastcc functions.
-std::vector<SDOperand>
-SparcTargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
+void
+SparcTargetLowering::LowerArguments(Function &F, SelectionDAG &DAG,
+ SmallVectorImpl<SDOperand> &ArgValues) {
MachineFunction &MF = DAG.getMachineFunction();
MachineRegisterInfo &RegInfo = MF.getRegInfo();
- std::vector<SDOperand> ArgValues;
static const unsigned ArgRegs[] = {
SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
@@ -91,9 +91,9 @@
std::vector<SDOperand> OutChains;
for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
- MVT::ValueType ObjectVT = getValueType(I->getType());
+ MVT ObjectVT = getValueType(I->getType());
- switch (ObjectVT) {
+ switch (ObjectVT.getSimpleVT()) {
default: assert(0 && "Unhandled argument type!");
case MVT::i1:
case MVT::i8:
@@ -123,7 +123,7 @@
ISD::LoadExtType LoadOp = ISD::SEXTLOAD;
// Sparc is big endian, so add an offset based on the ObjectVT.
- unsigned Offset = 4-std::max(1U, MVT::getSizeInBits(ObjectVT)/8);
+ unsigned Offset = 4-std::max(1U, ObjectVT.getSizeInBits()/8);
FIPtr = DAG.getNode(ISD::ADD, MVT::i32, FIPtr,
DAG.getConstant(Offset, MVT::i32));
Load = DAG.getExtLoad(LoadOp, MVT::i32, Root, FIPtr,
@@ -221,8 +221,6 @@
if (!OutChains.empty())
DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other,
&OutChains[0], OutChains.size()));
-
- return ArgValues;
}
static SDOperand LowerCALL(SDOperand Op, SelectionDAG &DAG) {
@@ -246,7 +244,7 @@
// Count the size of the outgoing arguments.
unsigned ArgsSize = 0;
for (unsigned i = 5, e = Op.getNumOperands(); i != e; i += 2) {
- switch (Op.getOperand(i).getValueType()) {
+ switch (Op.getOperand(i).getValueType().getSimpleVT()) {
default: assert(0 && "Unknown value type!");
case MVT::i1:
case MVT::i8:
@@ -323,10 +321,10 @@
for (unsigned i = 5, e = Op.getNumOperands(); i != e; i += 2) {
SDOperand Val = Op.getOperand(i);
- MVT::ValueType ObjectVT = Val.getValueType();
+ MVT ObjectVT = Val.getValueType();
SDOperand ValToStore(0, 0);
unsigned ObjSize;
- switch (ObjectVT) {
+ switch (ObjectVT.getSimpleVT()) {
default: assert(0 && "Unhandled argument type!");
case MVT::i32:
ObjSize = 4;
@@ -414,7 +412,7 @@
else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
- std::vector<MVT::ValueType> NodeTys;
+ std::vector<MVT> NodeTys;
NodeTys.push_back(MVT::Other); // Returns a chain
NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
SDOperand Ops[] = { Chain, Callee, InFlag };
@@ -448,10 +446,10 @@
}
ResultVals.push_back(Chain);
-
+
// Merge everything together with a MERGE_VALUES node.
- return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(),
- &ResultVals[0], ResultVals.size());
+ return DAG.getMergeValues(Op.Val->getVTList(), &ResultVals[0],
+ ResultVals.size());
}
@@ -597,9 +595,10 @@
setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
// We don't have line number support yet.
- setOperationAction(ISD::LOCATION, MVT::Other, Expand);
+ setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
- setOperationAction(ISD::LABEL, MVT::Other, Expand);
+ setOperationAction(ISD::DBG_LABEL, MVT::Other, Expand);
+ setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
// RET must be custom lowered, to meet ABI requirements
setOperationAction(ISD::RET , MVT::Other, Custom);
@@ -617,8 +616,9 @@
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom);
// No debug info support yet.
- setOperationAction(ISD::LOCATION, MVT::Other, Expand);
- setOperationAction(ISD::LABEL, MVT::Other, Expand);
+ setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
+ setOperationAction(ISD::DBG_LABEL, MVT::Other, Expand);
+ setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
setOperationAction(ISD::DECLARE, MVT::Other, Expand);
setStackPointerRegisterToSaveRestore(SP::O6);
@@ -744,7 +744,7 @@
// Get the condition flag.
SDOperand CompareFlag;
if (LHS.getValueType() == MVT::i32) {
- std::vector<MVT::ValueType> VTs;
+ std::vector<MVT> VTs;
VTs.push_back(MVT::i32);
VTs.push_back(MVT::Flag);
SDOperand Ops[2] = { LHS, RHS };
@@ -774,7 +774,7 @@
SDOperand CompareFlag;
if (LHS.getValueType() == MVT::i32) {
- std::vector<MVT::ValueType> VTs;
+ std::vector<MVT> VTs;
VTs.push_back(LHS.getValueType()); // subcc returns a value
VTs.push_back(MVT::Flag);
SDOperand Ops[2] = { LHS, RHS };
@@ -804,14 +804,14 @@
static SDOperand LowerVAARG(SDOperand Op, SelectionDAG &DAG) {
SDNode *Node = Op.Val;
- MVT::ValueType VT = Node->getValueType(0);
+ MVT VT = Node->getValueType(0);
SDOperand InChain = Node->getOperand(0);
SDOperand VAListPtr = Node->getOperand(1);
const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
SDOperand VAList = DAG.getLoad(MVT::i32, InChain, VAListPtr, SV, 0);
// Increment the pointer, VAList, to the next vaarg
SDOperand NextPtr = DAG.getNode(ISD::ADD, MVT::i32, VAList,
- DAG.getConstant(MVT::getSizeInBits(VT)/8,
+ DAG.getConstant(VT.getSizeInBits()/8,
MVT::i32));
// Store the incremented VAList to the legalized pointer
InChain = DAG.getStore(VAList.getValue(1), NextPtr,
@@ -829,8 +829,7 @@
DAG.getNode(ISD::BIT_CONVERT, MVT::f64, V),
V.getValue(1)
};
- return DAG.getNode(ISD::MERGE_VALUES, DAG.getVTList(MVT::f64, MVT::Other),
- Ops, 2);
+ return DAG.getMergeValues(Ops, 2);
}
static SDOperand LowerDYNAMIC_STACKALLOC(SDOperand Op, SelectionDAG &DAG) {
@@ -846,11 +845,8 @@
// to provide a register spill area.
SDOperand NewVal = DAG.getNode(ISD::ADD, MVT::i32, NewSP,
DAG.getConstant(96, MVT::i32));
- std::vector<MVT::ValueType> Tys;
- Tys.push_back(MVT::i32);
- Tys.push_back(MVT::Other);
SDOperand Ops[2] = { NewVal, Chain };
- return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2);
+ return DAG.getMergeValues(Ops, 2);
}
@@ -920,15 +916,10 @@
MachineFunction *F = BB->getParent();
F->getBasicBlockList().insert(It, copy0MBB);
F->getBasicBlockList().insert(It, sinkMBB);
- // Update machine-CFG edges by first adding all successors of the current
+ // Update machine-CFG edges by transferring all successors of the current
// block to the new block which will contain the Phi node for the select.
- for(MachineBasicBlock::succ_iterator i = BB->succ_begin(),
- e = BB->succ_end(); i != e; ++i)
- sinkMBB->addSuccessor(*i);
- // Next, remove all successors of the current block, and add the true
- // and fallthrough blocks as its successors.
- while(!BB->succ_empty())
- BB->removeSuccessor(BB->succ_begin());
+ sinkMBB->transferSuccessors(BB);
+ // Next, add the true and fallthrough blocks as its successors.
BB->addSuccessor(copy0MBB);
BB->addSuccessor(sinkMBB);
Modified: llvm/branches/non-call-eh/lib/Target/Sparc/SparcISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/Sparc/SparcISelLowering.h?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/Sparc/SparcISelLowering.h (original)
+++ llvm/branches/non-call-eh/lib/Target/Sparc/SparcISelLowering.h Sun Jul 6 15:45:41 2008
@@ -57,8 +57,8 @@
const SelectionDAG &DAG,
unsigned Depth = 0) const;
- virtual std::vector<SDOperand>
- LowerArguments(Function &F, SelectionDAG &DAG);
+ virtual void LowerArguments(Function &F, SelectionDAG &DAG,
+ SmallVectorImpl<SDOperand> &ArgValues);
virtual MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *MBB);
Modified: llvm/branches/non-call-eh/lib/Target/Sparc/SparcInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/Sparc/SparcInstrInfo.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/Sparc/SparcInstrInfo.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/Sparc/SparcInstrInfo.cpp Sun Jul 6 15:45:41 2008
@@ -247,16 +247,19 @@
isFloat = true;
// FALLTHROUGH
case SP::FMOVD:
- if (OpNum == 0) // COPY -> STORE
+ if (OpNum == 0) { // COPY -> STORE
+ unsigned SrcReg = MI->getOperand(1).getReg();
+ bool isKill = MI->getOperand(1).isKill();
NewMI = BuildMI(get(isFloat ? SP::STFri : SP::STDFri))
- .addFrameIndex(FI).addImm(0).addReg(MI->getOperand(1).getReg());
- else // COPY -> LOAD
- NewMI = BuildMI(get(isFloat ? SP::LDFri : SP::LDDFri),
- MI->getOperand(0).getReg()).addFrameIndex(FI).addImm(0);
+ .addFrameIndex(FI).addImm(0).addReg(SrcReg, false, false, isKill);
+ } else { // COPY -> LOAD
+ unsigned DstReg = MI->getOperand(0).getReg();
+ bool isDead = MI->getOperand(0).isDead();
+ NewMI = BuildMI(get(isFloat ? SP::LDFri : SP::LDDFri))
+ .addReg(DstReg, true, false, false, isDead).addFrameIndex(FI).addImm(0);
+ }
break;
}
- if (NewMI)
- NewMI->copyKillDeadInfo(MI);
return NewMI;
}
Modified: llvm/branches/non-call-eh/lib/Target/Sparc/SparcInstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/Sparc/SparcInstrInfo.h?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/Sparc/SparcInstrInfo.h (original)
+++ llvm/branches/non-call-eh/lib/Target/Sparc/SparcInstrInfo.h Sun Jul 6 15:45:41 2008
@@ -41,7 +41,7 @@
/// such, whenever a client has an instance of instruction info, it should
/// always be able to get register info as well (through this method).
///
- virtual const TargetRegisterInfo &getRegisterInfo() const { return RI; }
+ virtual const SparcRegisterInfo &getRegisterInfo() const { return RI; }
/// Return true if the instruction is a register to register move and
/// leave the source and dest operands in the passed parameters.
Modified: llvm/branches/non-call-eh/lib/Target/Sparc/SparcTargetMachine.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/Sparc/SparcTargetMachine.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/Sparc/SparcTargetMachine.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/Sparc/SparcTargetMachine.cpp Sun Jul 6 15:45:41 2008
@@ -18,10 +18,8 @@
#include "llvm/Target/TargetMachineRegistry.h"
using namespace llvm;
-namespace {
- // Register the target.
- RegisterTarget<SparcTargetMachine> X("sparc", " SPARC");
-}
+// Register the target.
+static RegisterTarget<SparcTargetMachine> X("sparc", " SPARC");
const TargetAsmInfo *SparcTargetMachine::createTargetAsmInfo() const {
return new SparcTargetAsmInfo(*this);
Modified: llvm/branches/non-call-eh/lib/Target/Sparc/SparcTargetMachine.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/Sparc/SparcTargetMachine.h?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/Sparc/SparcTargetMachine.h (original)
+++ llvm/branches/non-call-eh/lib/Target/Sparc/SparcTargetMachine.h Sun Jul 6 15:45:41 2008
@@ -38,8 +38,8 @@
virtual const SparcInstrInfo *getInstrInfo() const { return &InstrInfo; }
virtual const TargetFrameInfo *getFrameInfo() const { return &FrameInfo; }
- virtual const TargetSubtarget *getSubtargetImpl() const{ return &Subtarget; }
- virtual const TargetRegisterInfo *getRegisterInfo() const {
+ virtual const SparcSubtarget *getSubtargetImpl() const{ return &Subtarget; }
+ virtual const SparcRegisterInfo *getRegisterInfo() const {
return &InstrInfo.getRegisterInfo();
}
virtual const TargetData *getTargetData() const { return &DataLayout; }
Modified: llvm/branches/non-call-eh/lib/Target/Target.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/Target.td?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/Target.td (original)
+++ llvm/branches/non-call-eh/lib/Target/Target.td Sun Jul 6 15:45:41 2008
@@ -203,22 +203,25 @@
bit usesCustomDAGSchedInserter = 0; // Pseudo instr needing special help.
bit hasCtrlDep = 0; // Does this instruction r/w ctrl-flow chains?
bit isNotDuplicable = 0; // Is it unsafe to duplicate this instruction?
+ bit isAsCheapAsAMove = 0; // As cheap (or cheaper) than a move instruction.
// Side effect flags - When set, the flags have these meanings:
//
// hasSideEffects - The instruction has side effects that are not
// captured by any operands of the instruction or other flags.
+ //
// mayHaveSideEffects - Some instances of the instruction can have side
// effects. The virtual method "isReallySideEffectFree" is called to
// determine this. Load instructions are an example of where this is
// useful. In general, loads always have side effects. However, loads from
// constant pools don't. Individual back ends make this determination.
+ //
// neverHasSideEffects - Set on an instruction with no pattern if it has no
// side effects.
bit hasSideEffects = 0;
bit mayHaveSideEffects = 0;
bit neverHasSideEffects = 0;
-
+
InstrItinClass Itinerary = NoItinerary;// Execution steps used for scheduling.
string Constraints = ""; // OperandConstraint, e.g. $src = $dst.
@@ -339,9 +342,23 @@
let AsmString = "";
let Namespace = "TargetInstrInfo";
}
-def LABEL : Instruction {
+def DBG_LABEL : Instruction {
+ let OutOperandList = (ops);
+ let InOperandList = (ops i32imm:$id);
+ let AsmString = "";
+ let Namespace = "TargetInstrInfo";
+ let hasCtrlDep = 1;
+}
+def EH_LABEL : Instruction {
+ let OutOperandList = (ops);
+ let InOperandList = (ops i32imm:$id);
+ let AsmString = "";
+ let Namespace = "TargetInstrInfo";
+ let hasCtrlDep = 1;
+}
+def GC_LABEL : Instruction {
let OutOperandList = (ops);
- let InOperandList = (ops i32imm:$id, i32imm:$flavor);
+ let InOperandList = (ops i32imm:$id);
let AsmString = "";
let Namespace = "TargetInstrInfo";
let hasCtrlDep = 1;
Modified: llvm/branches/non-call-eh/lib/Target/TargetAsmInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/TargetAsmInfo.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/TargetAsmInfo.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/TargetAsmInfo.cpp Sun Jul 6 15:45:41 2008
@@ -12,7 +12,13 @@
//
//===----------------------------------------------------------------------===//
+#include "llvm/Constants.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/Function.h"
+#include "llvm/Module.h"
+#include "llvm/Type.h"
#include "llvm/Target/TargetAsmInfo.h"
+#include "llvm/Target/TargetOptions.h"
#include "llvm/Support/Dwarf.h"
#include <cctype>
#include <cstring>
@@ -26,6 +32,7 @@
TLSDataSection("\t.section .tdata,\"awT\", at progbits"),
TLSBSSSection("\t.section .tbss,\"awT\", at nobits"),
ZeroFillDirective(0),
+ NonexecutableStackDirective(0),
NeedsSet(false),
MaxInstLength(4),
PCSymbol("$"),
@@ -44,6 +51,7 @@
InlineAsmStart("#APP"),
InlineAsmEnd("#NO_APP"),
AssemblerDialect(0),
+ StringConstantPrefix(".str"),
ZeroDirective("\t.zero\t"),
ZeroDirectiveSuffix(0),
AsciiDirective("\t.ascii\t"),
@@ -89,7 +97,6 @@
DwarfRequiresFrameSection(true),
GlobalEHDirective(0),
SupportsWeakOmittedEHFrame(true),
- ShortenEHDataOn64Bit(false),
DwarfSectionOffsetDirective(0),
DwarfAbbrevSection(".debug_abbrev"),
DwarfInfoSection(".debug_info"),
@@ -141,3 +148,46 @@
return dwarf::DW_EH_PE_absptr;
}
+static bool isSuitableForBSS(const GlobalVariable *GV) {
+ if (!GV->hasInitializer())
+ return true;
+
+ // Leave constant zeros in readonly constant sections, so they can be shared
+ Constant *C = GV->getInitializer();
+ return (C->isNullValue() && !GV->isConstant() && !NoZerosInBSS);
+}
+
+SectionKind::Kind
+TargetAsmInfo::SectionKindForGlobal(const GlobalValue *GV) const {
+ // Early exit - functions should be always in text sections.
+ if (isa<Function>(GV))
+ return SectionKind::Text;
+
+ const GlobalVariable* GVar = dyn_cast<GlobalVariable>(GV);
+ bool isThreadLocal = GVar->isThreadLocal();
+ assert(GVar && "Invalid global value for section selection");
+
+ SectionKind::Kind kind;
+ if (isSuitableForBSS(GVar)) {
+ // Variable can be easily put to BSS section.
+ return (isThreadLocal ? SectionKind::ThreadBSS : SectionKind::BSS);
+ } else if (GVar->isConstant() && !isThreadLocal) {
+ // Now we know, that varible has initializer and it is constant. We need to
+ // check its initializer to decide, which section to output it into. Also
+ // note, there is no thread-local r/o section.
+ Constant *C = GVar->getInitializer();
+ if (C->ContainsRelocations())
+ kind = SectionKind::ROData;
+ else {
+ const ConstantArray *CVA = dyn_cast<ConstantArray>(C);
+ // Check, if initializer is a null-terminated string
+ if (CVA && CVA->isCString())
+ kind = SectionKind::RODataMergeStr;
+ else
+ kind = SectionKind::RODataMergeConst;
+ }
+ }
+
+ // Variable is not constant or thread-local - emit to generic data section.
+ return (isThreadLocal ? SectionKind::ThreadData : SectionKind::Data);
+}
Modified: llvm/branches/non-call-eh/lib/Target/TargetData.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/TargetData.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/TargetData.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/TargetData.cpp Sun Jul 6 15:45:41 2008
@@ -30,11 +30,10 @@
using namespace llvm;
// Handle the Pass registration stuff necessary to use TargetData's.
-namespace {
- // Register the default SparcV9 implementation...
- RegisterPass<TargetData> X("targetdata", "Target Data Layout", false,
- true);
-}
+
+// Register the default SparcV9 implementation...
+static RegisterPass<TargetData> X("targetdata", "Target Data Layout", false,
+ true);
char TargetData::ID = 0;
//===----------------------------------------------------------------------===//
@@ -49,10 +48,7 @@
// Loop over each of the elements, placing them in memory...
for (unsigned i = 0, e = NumElements; i != e; ++i) {
const Type *Ty = ST->getElementType(i);
- unsigned TyAlign = ST->isPacked() ?
- 1 : TD.getABITypeAlignment(Ty);
- uint64_t TySize = ST->isPacked() ?
- TD.getTypeStoreSize(Ty) : TD.getABITypeSize(Ty);
+ unsigned TyAlign = ST->isPacked() ? 1 : TD.getABITypeAlignment(Ty);
// Add padding if necessary to align the data element properly...
StructSize = (StructSize + TyAlign - 1)/TyAlign * TyAlign;
@@ -61,7 +57,7 @@
StructAlignment = std::max(TyAlign, StructAlignment);
MemberOffsets[i] = StructSize;
- StructSize += TySize; // Consume space for this data item
+ StructSize += TD.getABITypeSize(Ty); // Consume space for this data item
}
// Empty structures have alignment of 1 byte.
@@ -318,6 +314,8 @@
: Alignments[BestMatchIdx].PrefAlign;
}
+namespace {
+
/// LayoutInfo - The lazy cache of structure layout information maintained by
/// TargetData. Note that the struct types must have been free'd before
/// llvm_shutdown is called (and thus this is deallocated) because all the
@@ -342,8 +340,10 @@
};
typedef DenseMap<LayoutKey, StructLayout*, DenseMapLayoutKeyInfo> LayoutInfoTy;
-static ManagedStatic<LayoutInfoTy> LayoutInfo;
+}
+
+static ManagedStatic<LayoutInfoTy> LayoutInfo;
TargetData::~TargetData() {
if (LayoutInfo.isConstructed()) {
Modified: llvm/branches/non-call-eh/lib/Target/TargetMachine.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/TargetMachine.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/TargetMachine.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/TargetMachine.cpp Sun Jul 6 15:45:41 2008
@@ -36,106 +36,126 @@
CodeModel::Model CMModel;
bool PerformTailCallOpt;
bool OptimizeForSize;
+ unsigned StackAlignment;
+ bool RealignStack;
+ bool VerboseAsm;
}
-namespace {
- cl::opt<bool, true> PrintCode("print-machineinstrs",
- cl::desc("Print generated machine code"),
- cl::location(PrintMachineCode), cl::init(false));
-
- cl::opt<bool, true>
- DisableFPElim("disable-fp-elim",
- cl::desc("Disable frame pointer elimination optimization"),
- cl::location(NoFramePointerElim),
- cl::init(false));
- cl::opt<bool, true>
- DisableExcessPrecision("disable-excess-fp-precision",
- cl::desc("Disable optimizations that may increase FP precision"),
- cl::location(NoExcessFPPrecision),
- cl::init(false));
- cl::opt<bool, true>
- EnableUnsafeFPMath("enable-unsafe-fp-math",
- cl::desc("Enable optimizations that may decrease FP precision"),
- cl::location(UnsafeFPMath),
- cl::init(false));
- cl::opt<bool, true>
- EnableFiniteOnlyFPMath("enable-finite-only-fp-math",
- cl::desc("Enable optimizations that assumes non- NaNs / +-Infs"),
- cl::location(FiniteOnlyFPMathOption),
- cl::init(false));
- cl::opt<bool, true>
- EnableHonorSignDependentRoundingFPMath(cl::Hidden,
- "enable-sign-dependent-rounding-fp-math",
- cl::desc("Force codegen to assume rounding mode can change dynamically"),
- cl::location(HonorSignDependentRoundingFPMathOption),
- cl::init(false));
-
- cl::opt<bool, true>
- GenerateSoftFloatCalls("soft-float",
- cl::desc("Generate software floating point library calls"),
- cl::location(UseSoftFloat),
- cl::init(false));
- cl::opt<bool, true>
- DontPlaceZerosInBSS("nozero-initialized-in-bss",
- cl::desc("Don't place zero-initialized symbols into bss section"),
- cl::location(NoZerosInBSS),
+
+static cl::opt<bool, true> PrintCode("print-machineinstrs",
+ cl::desc("Print generated machine code"),
+ cl::location(PrintMachineCode), cl::init(false));
+
+static cl::opt<bool, true>
+DisableFPElim("disable-fp-elim",
+ cl::desc("Disable frame pointer elimination optimization"),
+ cl::location(NoFramePointerElim),
cl::init(false));
- cl::opt<bool, true>
- EnableExceptionHandling("enable-eh",
- cl::desc("Emit DWARF exception handling (default if target supports)"),
- cl::location(ExceptionHandling),
- cl::init(false));
- cl::opt<bool, true>
- EnableUnwindTables("unwind-tables",
- cl::desc("Generate unwinding tables for all functions"),
- cl::location(UnwindTablesMandatory),
- cl::init(false));
-
- cl::opt<llvm::Reloc::Model, true>
- DefRelocationModel(
- "relocation-model",
- cl::desc("Choose relocation model"),
- cl::location(RelocationModel),
- cl::init(Reloc::Default),
- cl::values(
- clEnumValN(Reloc::Default, "default",
- " Target default relocation model"),
- clEnumValN(Reloc::Static, "static",
- " Non-relocatable code"),
- clEnumValN(Reloc::PIC_, "pic",
- " Fully relocatable, position independent code"),
- clEnumValN(Reloc::DynamicNoPIC, "dynamic-no-pic",
- " Relocatable external references, non-relocatable code"),
- clEnumValEnd));
- cl::opt<llvm::CodeModel::Model, true>
- DefCodeModel(
- "code-model",
- cl::desc("Choose code model"),
- cl::location(CMModel),
- cl::init(CodeModel::Default),
- cl::values(
- clEnumValN(CodeModel::Default, "default",
- " Target default code model"),
- clEnumValN(CodeModel::Small, "small",
- " Small code model"),
- clEnumValN(CodeModel::Kernel, "kernel",
- " Kernel code model"),
- clEnumValN(CodeModel::Medium, "medium",
- " Medium code model"),
- clEnumValN(CodeModel::Large, "large",
- " Large code model"),
- clEnumValEnd));
-
- cl::opt<bool, true>
- EnablePerformTailCallOpt("tailcallopt",
- cl::desc("Turn on tail call optimization."),
- cl::location(PerformTailCallOpt),
- cl::init(false));
- cl::opt<bool, true>
- EnableOptimizeForSize("optimize-size",
- cl::desc("Optimize for size."),
- cl::location(OptimizeForSize),
- cl::init(false));
-}
+static cl::opt<bool, true>
+DisableExcessPrecision("disable-excess-fp-precision",
+ cl::desc("Disable optimizations that may increase FP precision"),
+ cl::location(NoExcessFPPrecision),
+ cl::init(false));
+static cl::opt<bool, true>
+EnableUnsafeFPMath("enable-unsafe-fp-math",
+ cl::desc("Enable optimizations that may decrease FP precision"),
+ cl::location(UnsafeFPMath),
+ cl::init(false));
+static cl::opt<bool, true>
+EnableFiniteOnlyFPMath("enable-finite-only-fp-math",
+ cl::desc("Enable optimizations that assumes non- NaNs / +-Infs"),
+ cl::location(FiniteOnlyFPMathOption),
+ cl::init(false));
+static cl::opt<bool, true>
+EnableHonorSignDependentRoundingFPMath(cl::Hidden,
+ "enable-sign-dependent-rounding-fp-math",
+ cl::desc("Force codegen to assume rounding mode can change dynamically"),
+ cl::location(HonorSignDependentRoundingFPMathOption),
+ cl::init(false));
+
+static cl::opt<bool, true>
+GenerateSoftFloatCalls("soft-float",
+ cl::desc("Generate software floating point library calls"),
+ cl::location(UseSoftFloat),
+ cl::init(false));
+static cl::opt<bool, true>
+DontPlaceZerosInBSS("nozero-initialized-in-bss",
+ cl::desc("Don't place zero-initialized symbols into bss section"),
+ cl::location(NoZerosInBSS),
+ cl::init(false));
+static cl::opt<bool, true>
+EnableExceptionHandling("enable-eh",
+ cl::desc("Emit DWARF exception handling (default if target supports)"),
+ cl::location(ExceptionHandling),
+ cl::init(false));
+static cl::opt<bool, true>
+EnableUnwindTables("unwind-tables",
+ cl::desc("Generate unwinding tables for all functions"),
+ cl::location(UnwindTablesMandatory),
+ cl::init(false));
+
+static cl::opt<llvm::Reloc::Model, true>
+DefRelocationModel(
+ "relocation-model",
+ cl::desc("Choose relocation model"),
+ cl::location(RelocationModel),
+ cl::init(Reloc::Default),
+ cl::values(
+ clEnumValN(Reloc::Default, "default",
+ " Target default relocation model"),
+ clEnumValN(Reloc::Static, "static",
+ " Non-relocatable code"),
+ clEnumValN(Reloc::PIC_, "pic",
+ " Fully relocatable, position independent code"),
+ clEnumValN(Reloc::DynamicNoPIC, "dynamic-no-pic",
+ " Relocatable external references, non-relocatable code"),
+ clEnumValEnd));
+static cl::opt<llvm::CodeModel::Model, true>
+DefCodeModel(
+ "code-model",
+ cl::desc("Choose code model"),
+ cl::location(CMModel),
+ cl::init(CodeModel::Default),
+ cl::values(
+ clEnumValN(CodeModel::Default, "default",
+ " Target default code model"),
+ clEnumValN(CodeModel::Small, "small",
+ " Small code model"),
+ clEnumValN(CodeModel::Kernel, "kernel",
+ " Kernel code model"),
+ clEnumValN(CodeModel::Medium, "medium",
+ " Medium code model"),
+ clEnumValN(CodeModel::Large, "large",
+ " Large code model"),
+ clEnumValEnd));
+
+static cl::opt<bool, true>
+EnablePerformTailCallOpt("tailcallopt",
+ cl::desc("Turn on tail call optimization."),
+ cl::location(PerformTailCallOpt),
+ cl::init(false));
+static cl::opt<bool, true>
+EnableOptimizeForSize("optimize-size",
+ cl::desc("Optimize for size."),
+ cl::location(OptimizeForSize),
+ cl::init(false));
+
+static cl::opt<unsigned, true>
+OverrideStackAlignment("stack-alignment",
+ cl::desc("Override default stack alignment"),
+ cl::location(StackAlignment),
+ cl::init(0));
+
+static cl::opt<bool, true>
+EnableRealignStack("realign-stack",
+ cl::desc("Realign stack if needed"),
+ cl::location(RealignStack),
+ cl::init(true));
+
+static cl::opt<bool, true>
+AsmVerbose("asm-verbose", cl::desc("Add comments to directives."),
+ cl::location(VerboseAsm),
+ cl::init(false));
+
//---------------------------------------------------------------------------
// TargetMachine Class
Modified: llvm/branches/non-call-eh/lib/Target/TargetRegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/TargetRegisterInfo.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/TargetRegisterInfo.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/TargetRegisterInfo.cpp Sun Jul 6 15:45:41 2008
@@ -22,8 +22,10 @@
TargetRegisterInfo::TargetRegisterInfo(const TargetRegisterDesc *D, unsigned NR,
regclass_iterator RCB, regclass_iterator RCE,
- int CFSO, int CFDO)
- : Desc(D), NumRegs(NR), RegClassBegin(RCB), RegClassEnd(RCE) {
+ int CFSO, int CFDO,
+ const unsigned* subregs, const unsigned subregsize)
+ : SubregHash(subregs), SubregHashSize(subregsize), Desc(D), NumRegs(NR),
+ RegClassBegin(RCB), RegClassEnd(RCE) {
assert(NumRegs < FirstVirtualRegister &&
"Target has too many physical registers!");
@@ -48,13 +50,12 @@
/// register of the given type. If type is MVT::Other, then just return any
/// register class the register belongs to.
const TargetRegisterClass *
-TargetRegisterInfo::getPhysicalRegisterRegClass(unsigned reg,
- MVT::ValueType VT) const {
+TargetRegisterInfo::getPhysicalRegisterRegClass(unsigned reg, MVT VT) const {
assert(isPhysicalRegister(reg) && "reg must be a physical register");
// Pick the register class of the right type that contains this physreg.
SmallVector<const TargetRegisterClass*, 4> RCs;
- for (regclass_iterator I = regclass_begin(), E = regclass_end(); I != E; ++I) {
+ for (regclass_iterator I = regclass_begin(), E = regclass_end(); I != E; ++I){
if ((VT == MVT::Other || (*I)->hasType(VT)) && (*I)->contains(reg))
RCs.push_back(*I);
}
Modified: llvm/branches/non-call-eh/lib/Target/TargetSelectionDAG.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/TargetSelectionDAG.td?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/TargetSelectionDAG.td (original)
+++ llvm/branches/non-call-eh/lib/Target/TargetSelectionDAG.td Sun Jul 6 15:45:41 2008
@@ -53,8 +53,8 @@
/// SDTCisIntVectorOfSameSize - This indicates that ThisOp and OtherOp are
/// vector types, and that ThisOp is the result of
-/// MVT::getIntVectorWithNumElements with the number of elements that ThisOp
-/// has.
+/// MVT::getIntVectorWithNumElements with the number of elements
+/// that ThisOp has.
class SDTCisIntVectorOfSameSize<int ThisOp, int OtherOp>
: SDTypeConstraint<ThisOp> {
int OtherOpNum = OtherOp;
@@ -62,7 +62,7 @@
/// SDTCisEltOfVec - This indicates that ThisOp is a scalar type of the same
/// type as the element type of OtherOp, which is a vector type.
-class SDTCisEltOfVec<int ThisOp, int OtherOp>
+class SDTCisEltOfVec<int ThisOp, int OtherOp>
: SDTypeConstraint<ThisOp> {
int OtherOpNum = OtherOp;
}
@@ -220,6 +220,7 @@
def SDNPMayStore : SDNodeProperty; // May write to memory, sets 'mayStore'.
def SDNPMayLoad : SDNodeProperty; // May read memory, sets 'mayLoad'.
def SDNPSideEffect : SDNodeProperty; // Sets 'HasUnmodelledSideEffects'.
+def SDNPMemOperand : SDNodeProperty; // Touches memory, has assoc MemOperand
//===----------------------------------------------------------------------===//
// Selection DAG Node definitions.
@@ -337,6 +338,7 @@
def setcc : SDNode<"ISD::SETCC" , SDTSetCC>;
def select : SDNode<"ISD::SELECT" , SDTSelect>;
def selectcc : SDNode<"ISD::SELECT_CC" , SDTSelectCC>;
+def vsetcc : SDNode<"ISD::VSETCC" , SDTSetCC>;
def brcond : SDNode<"ISD::BRCOND" , SDTBrcond, [SDNPHasChain]>;
def brind : SDNode<"ISD::BRIND" , SDTBrind, [SDNPHasChain]>;
@@ -352,21 +354,39 @@
[SDNPHasChain, SDNPSideEffect]>;
// Do not use atomic_* directly, use atomic_*_size (see below)
-def atomic_lcs : SDNode<"ISD::ATOMIC_LCS" , STDAtomic3,
- [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
-def atomic_las : SDNode<"ISD::ATOMIC_LAS" , STDAtomic2,
- [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
-def atomic_swap : SDNode<"ISD::ATOMIC_SWAP", STDAtomic2,
- [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
+def atomic_cmp_swap : SDNode<"ISD::ATOMIC_CMP_SWAP" , STDAtomic3,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_add : SDNode<"ISD::ATOMIC_LOAD_ADD" , STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_swap : SDNode<"ISD::ATOMIC_SWAP", STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_sub : SDNode<"ISD::ATOMIC_LOAD_SUB" , STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_and : SDNode<"ISD::ATOMIC_LOAD_AND" , STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_or : SDNode<"ISD::ATOMIC_LOAD_OR" , STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_xor : SDNode<"ISD::ATOMIC_LOAD_XOR" , STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_nand: SDNode<"ISD::ATOMIC_LOAD_NAND", STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_min : SDNode<"ISD::ATOMIC_LOAD_MIN", STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_max : SDNode<"ISD::ATOMIC_LOAD_MAX", STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_umin : SDNode<"ISD::ATOMIC_LOAD_UMIN", STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_umax : SDNode<"ISD::ATOMIC_LOAD_UMAX", STDAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
// Do not use ld, st directly. Use load, extload, sextload, zextload, store,
// and truncst (see below).
def ld : SDNode<"ISD::LOAD" , SDTLoad,
- [SDNPHasChain, SDNPMayLoad]>;
+ [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
def st : SDNode<"ISD::STORE" , SDTStore,
- [SDNPHasChain, SDNPMayStore]>;
+ [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
def ist : SDNode<"ISD::STORE" , SDTIStore,
- [SDNPHasChain, SDNPMayStore]>;
+ [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
def vector_shuffle : SDNode<"ISD::VECTOR_SHUFFLE", SDTVecShuffle, []>;
def build_vector : SDNode<"ISD::BUILD_VECTOR", SDTypeProfile<1, 0, []>, []>;
@@ -450,8 +470,8 @@
// Leaf fragments.
-def vtInt : PatLeaf<(vt), [{ return MVT::isInteger(N->getVT()); }]>;
-def vtFP : PatLeaf<(vt), [{ return MVT::isFloatingPoint(N->getVT()); }]>;
+def vtInt : PatLeaf<(vt), [{ return N->getVT().isInteger(); }]>;
+def vtFP : PatLeaf<(vt), [{ return N->getVT().isFloatingPoint(); }]>;
def immAllOnes : PatLeaf<(imm), [{ return N->isAllOnesValue(); }]>;
def immAllOnesV: PatLeaf<(build_vector), [{
@@ -745,78 +765,78 @@
}]>;
//Atomic patterns
-def atomic_lcs_8 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp),
- (atomic_lcs node:$ptr, node:$cmp, node:$swp), [{
+def atomic_cmp_swap_8 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp),
+ (atomic_cmp_swap node:$ptr, node:$cmp, node:$swp), [{
if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
- return V->getVT() == MVT::i8;
+ return V->getValueType(0) == MVT::i8;
return false;
}]>;
-def atomic_lcs_16 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp),
- (atomic_lcs node:$ptr, node:$cmp, node:$swp), [{
+def atomic_cmp_swap_16 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp),
+ (atomic_cmp_swap node:$ptr, node:$cmp, node:$swp), [{
if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
- return V->getVT() == MVT::i16;
+ return V->getValueType(0) == MVT::i16;
return false;
}]>;
-def atomic_lcs_32 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp),
- (atomic_lcs node:$ptr, node:$cmp, node:$swp), [{
+def atomic_cmp_swap_32 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp),
+ (atomic_cmp_swap node:$ptr, node:$cmp, node:$swp), [{
if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
- return V->getVT() == MVT::i32;
+ return V->getValueType(0) == MVT::i32;
return false;
}]>;
-def atomic_lcs_64 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp),
- (atomic_lcs node:$ptr, node:$cmp, node:$swp), [{
+def atomic_cmp_swap_64 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp),
+ (atomic_cmp_swap node:$ptr, node:$cmp, node:$swp), [{
if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
- return V->getVT() == MVT::i64;
+ return V->getValueType(0) == MVT::i64;
return false;
}]>;
-def atomic_las_8 : PatFrag<(ops node:$ptr, node:$inc),
- (atomic_las node:$ptr, node:$inc), [{
+def atomic_load_add_8 : PatFrag<(ops node:$ptr, node:$inc),
+ (atomic_load_add node:$ptr, node:$inc), [{
if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
- return V->getVT() == MVT::i8;
+ return V->getValueType(0) == MVT::i8;
return false;
}]>;
-def atomic_las_16 : PatFrag<(ops node:$ptr, node:$inc),
- (atomic_las node:$ptr, node:$inc), [{
+def atomic_load_add_16 : PatFrag<(ops node:$ptr, node:$inc),
+ (atomic_load_add node:$ptr, node:$inc), [{
if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
- return V->getVT() == MVT::i16;
+ return V->getValueType(0) == MVT::i16;
return false;
}]>;
-def atomic_las_32 : PatFrag<(ops node:$ptr, node:$inc),
- (atomic_las node:$ptr, node:$inc), [{
+def atomic_load_add_32 : PatFrag<(ops node:$ptr, node:$inc),
+ (atomic_load_add node:$ptr, node:$inc), [{
if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
- return V->getVT() == MVT::i32;
+ return V->getValueType(0) == MVT::i32;
return false;
}]>;
-def atomic_las_64 : PatFrag<(ops node:$ptr, node:$inc),
- (atomic_las node:$ptr, node:$inc), [{
+def atomic_load_add_64 : PatFrag<(ops node:$ptr, node:$inc),
+ (atomic_load_add node:$ptr, node:$inc), [{
if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
- return V->getVT() == MVT::i64;
+ return V->getValueType(0) == MVT::i64;
return false;
}]>;
def atomic_swap_8 : PatFrag<(ops node:$ptr, node:$inc),
(atomic_swap node:$ptr, node:$inc), [{
if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
- return V->getVT() == MVT::i8;
+ return V->getValueType(0) == MVT::i8;
return false;
}]>;
def atomic_swap_16 : PatFrag<(ops node:$ptr, node:$inc),
(atomic_swap node:$ptr, node:$inc), [{
if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
- return V->getVT() == MVT::i16;
+ return V->getValueType(0) == MVT::i16;
return false;
}]>;
def atomic_swap_32 : PatFrag<(ops node:$ptr, node:$inc),
(atomic_swap node:$ptr, node:$inc), [{
if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
- return V->getVT() == MVT::i32;
+ return V->getValueType(0) == MVT::i32;
return false;
}]>;
def atomic_swap_64 : PatFrag<(ops node:$ptr, node:$inc),
(atomic_swap node:$ptr, node:$inc), [{
if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
- return V->getVT() == MVT::i64;
+ return V->getValueType(0) == MVT::i64;
return false;
}]>;
Modified: llvm/branches/non-call-eh/lib/Target/X86/README-SSE.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/X86/README-SSE.txt?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/X86/README-SSE.txt (original)
+++ llvm/branches/non-call-eh/lib/Target/X86/README-SSE.txt Sun Jul 6 15:45:41 2008
@@ -382,106 +382,6 @@
//===---------------------------------------------------------------------===//
-For this:
-
-#include <emmintrin.h>
-void test(__m128d *r, __m128d *A, double B) {
- *r = _mm_loadl_pd(*A, &B);
-}
-
-We generates:
-
- subl $12, %esp
- movsd 24(%esp), %xmm0
- movsd %xmm0, (%esp)
- movl 20(%esp), %eax
- movapd (%eax), %xmm0
- movlpd (%esp), %xmm0
- movl 16(%esp), %eax
- movapd %xmm0, (%eax)
- addl $12, %esp
- ret
-
-icc generates:
-
- movl 4(%esp), %edx #3.6
- movl 8(%esp), %eax #3.6
- movapd (%eax), %xmm0 #4.22
- movlpd 12(%esp), %xmm0 #4.8
- movapd %xmm0, (%edx) #4.3
- ret #5.1
-
-So icc is smart enough to know that B is in memory so it doesn't load it and
-store it back to stack.
-
-This should be fixed by eliminating the llvm.x86.sse2.loadl.pd intrinsic,
-lowering it to a load+insertelement instead. Already match the load+shuffle
-as movlpd, so this should be easy. We already get optimal code for:
-
-define void @test2(<2 x double>* %r, <2 x double>* %A, double %B) {
-entry:
- %tmp2 = load <2 x double>* %A, align 16
- %tmp8 = insertelement <2 x double> %tmp2, double %B, i32 0
- store <2 x double> %tmp8, <2 x double>* %r, align 16
- ret void
-}
-
-//===---------------------------------------------------------------------===//
-
-Consider (PR2108):
-
-#include <xmmintrin.h>
-__m128i doload64(unsigned long long x) { return _mm_loadl_epi64(&x);}
-__m128i doload64_2(unsigned long long *x) { return _mm_loadl_epi64(x);}
-
-These are very similar routines, but we generate significantly worse code for
-the first one on x86-32:
-
-_doload64:
- subl $12, %esp
- movl 20(%esp), %eax
- movl %eax, 4(%esp)
- movl 16(%esp), %eax
- movl %eax, (%esp)
- movsd (%esp), %xmm0
- addl $12, %esp
- ret
-_doload64_2:
- movl 4(%esp), %eax
- movsd (%eax), %xmm0
- ret
-
-The problem is that the argument lowering logic splits the i64 argument into
-2x i32 loads early, the f64 insert doesn't match. Here's a reduced testcase:
-
-define fastcc double @doload64(i64 %x) nounwind {
-entry:
- %tmp717 = bitcast i64 %x to double ; <double> [#uses=1]
- ret double %tmp717
-}
-
-compiles to:
-
-_doload64:
- subl $12, %esp
- movl 20(%esp), %eax
- movl %eax, 4(%esp)
- movl 16(%esp), %eax
- movl %eax, (%esp)
- movsd (%esp), %xmm0
- addl $12, %esp
- ret
-
-instead of movsd from the stack. This is actually not too bad to implement. The
-best way to do this is to implement a dag combine that turns
-bitconvert(build_pair(load a, load b)) into one load of the right type. The
-only trick to this is writing the predicate that determines that a/b are at the
-right offset from each other. For the enterprising hacker, InferAlignment is a
-helpful place to start poking if interested.
-
-
-//===---------------------------------------------------------------------===//
-
__m128d test1( __m128d A, __m128d B) {
return _mm_shuffle_pd(A, B, 0x3);
}
@@ -559,75 +459,6 @@
//===---------------------------------------------------------------------===//
-We should compile this:
-#include <xmmintrin.h>
-typedef union {
- int i[4];
- float f[4];
- __m128 v;
-} vector4_t;
-void swizzle (const void *a, vector4_t * b, vector4_t * c) {
- b->v = _mm_loadl_pi (b->v, (__m64 *) a);
- c->v = _mm_loadl_pi (c->v, ((__m64 *) a) + 1);
-}
-
-to:
-
-_swizzle:
- movl 4(%esp), %eax
- movl 8(%esp), %edx
- movl 12(%esp), %ecx
- movlps (%eax), %xmm0
- movlps %xmm0, (%edx)
- movlps 8(%eax), %xmm0
- movlps %xmm0, (%ecx)
- ret
-
-not:
-
-swizzle:
- movl 8(%esp), %eax
- movaps (%eax), %xmm0
- movl 4(%esp), %ecx
- movlps (%ecx), %xmm0
- movaps %xmm0, (%eax)
- movl 12(%esp), %eax
- movaps (%eax), %xmm0
- movlps 8(%ecx), %xmm0
- movaps %xmm0, (%eax)
- ret
-
-//===---------------------------------------------------------------------===//
-
-These functions should produce the same code:
-
-#include <emmintrin.h>
-
-typedef long long __m128i __attribute__ ((__vector_size__ (16)));
-
-int foo(__m128i* val) {
- return __builtin_ia32_vec_ext_v4si(*val, 1);
-}
-int bar(__m128i* val) {
- union vs {
- __m128i *_v;
- int* _s;
- } v = {val};
- return v._s[1];
-}
-
-We currently produce (with -m64):
-
-_foo:
- pshufd $1, (%rdi), %xmm0
- movd %xmm0, %eax
- ret
-_bar:
- movl 4(%rdi), %eax
- ret
-
-//===---------------------------------------------------------------------===//
-
We should materialize vector constants like "all ones" and "signbit" with
code like:
@@ -811,27 +642,200 @@
//===---------------------------------------------------------------------===//
-Take the following code:
+LLVM currently generates stack realignment code, when it is not necessary
+needed. The problem is that we need to know about stack alignment too early,
+before RA runs.
-#include <xmmintrin.h>
-__m128i doload64(short x) {return _mm_set_epi16(x,x,x,x,x,x,x,x);}
+At that point we don't know, whether there will be vector spill, or not.
+Stack realignment logic is overly conservative here, but otherwise we can
+produce unaligned loads/stores.
-LLVM currently generates the following on x86:
-doload64:
- movzwl 4(%esp), %eax
- movd %eax, %xmm0
- punpcklwd %xmm0, %xmm0
- pshufd $0, %xmm0, %xmm0
- ret
+Fixing this will require some huge RA changes.
-gcc's generated code:
-doload64:
- movd 4(%esp), %xmm0
- punpcklwd %xmm0, %xmm0
- pshufd $0, %xmm0, %xmm0
+Testcase:
+#include <emmintrin.h>
+
+typedef short vSInt16 __attribute__ ((__vector_size__ (16)));
+
+static const vSInt16 a = {- 22725, - 12873, - 22725, - 12873, - 22725, - 12873,
+- 22725, - 12873};;
+
+vSInt16 madd(vSInt16 b)
+{
+ return _mm_madd_epi16(a, b);
+}
+
+Generated code (x86-32, linux):
+madd:
+ pushl %ebp
+ movl %esp, %ebp
+ andl $-16, %esp
+ movaps .LCPI1_0, %xmm1
+ pmaddwd %xmm1, %xmm0
+ movl %ebp, %esp
+ popl %ebp
ret
-LLVM should be able to generate the same thing as gcc. This looks like it is
-just a matter of matching (scalar_to_vector (load x)) to movd.
+//===---------------------------------------------------------------------===//
+
+Consider:
+#include <emmintrin.h>
+__m128 foo2 (float x) {
+ return _mm_set_ps (0, 0, x, 0);
+}
+
+In x86-32 mode, we generate this spiffy code:
+
+_foo2:
+ movss 4(%esp), %xmm0
+ pshufd $81, %xmm0, %xmm0
+ ret
+
+in x86-64 mode, we generate this code, which could be better:
+
+_foo2:
+ xorps %xmm1, %xmm1
+ movss %xmm0, %xmm1
+ pshufd $81, %xmm1, %xmm0
+ ret
+
+In sse4 mode, we could use insertps to make both better.
+
+Here's another testcase that could use insertps [mem]:
+
+#include <xmmintrin.h>
+extern float x2, x3;
+__m128 foo1 (float x1, float x4) {
+ return _mm_set_ps (x2, x1, x3, x4);
+}
+
+gcc mainline compiles it to:
+
+foo1:
+ insertps $0x10, x2(%rip), %xmm0
+ insertps $0x10, x3(%rip), %xmm1
+ movaps %xmm1, %xmm2
+ movlhps %xmm0, %xmm2
+ movaps %xmm2, %xmm0
+ ret
+
+//===---------------------------------------------------------------------===//
+
+We compile vector multiply-by-constant into poor code:
+
+define <4 x i32> @f(<4 x i32> %i) nounwind {
+ %A = mul <4 x i32> %i, < i32 10, i32 10, i32 10, i32 10 >
+ ret <4 x i32> %A
+}
+
+On targets without SSE4.1, this compiles into:
+
+LCPI1_0: ## <4 x i32>
+ .long 10
+ .long 10
+ .long 10
+ .long 10
+ .text
+ .align 4,0x90
+ .globl _f
+_f:
+ pshufd $3, %xmm0, %xmm1
+ movd %xmm1, %eax
+ imull LCPI1_0+12, %eax
+ movd %eax, %xmm1
+ pshufd $1, %xmm0, %xmm2
+ movd %xmm2, %eax
+ imull LCPI1_0+4, %eax
+ movd %eax, %xmm2
+ punpckldq %xmm1, %xmm2
+ movd %xmm0, %eax
+ imull LCPI1_0, %eax
+ movd %eax, %xmm1
+ movhlps %xmm0, %xmm0
+ movd %xmm0, %eax
+ imull LCPI1_0+8, %eax
+ movd %eax, %xmm0
+ punpckldq %xmm0, %xmm1
+ movaps %xmm1, %xmm0
+ punpckldq %xmm2, %xmm0
+ ret
+
+It would be better to synthesize integer vector multiplication by constants
+using shifts and adds, pslld and paddd here. And even on targets with SSE4.1,
+simple cases such as multiplication by powers of two would be better as
+vector shifts than as multiplications.
+
+//===---------------------------------------------------------------------===//
+
+We compile this:
+
+__m128i
+foo2 (char x)
+{
+ return _mm_set_epi8 (1, 0, 0, 0, 0, 0, 0, 0, 0, x, 0, 1, 0, 0, 0, 0);
+}
+
+into:
+ movl $1, %eax
+ xorps %xmm0, %xmm0
+ pinsrw $2, %eax, %xmm0
+ movzbl 4(%esp), %eax
+ pinsrw $3, %eax, %xmm0
+ movl $256, %eax
+ pinsrw $7, %eax, %xmm0
+ ret
+
+
+gcc-4.2:
+ subl $12, %esp
+ movzbl 16(%esp), %eax
+ movdqa LC0, %xmm0
+ pinsrw $3, %eax, %xmm0
+ addl $12, %esp
+ ret
+ .const
+ .align 4
+LC0:
+ .word 0
+ .word 0
+ .word 1
+ .word 0
+ .word 0
+ .word 0
+ .word 0
+ .word 256
+
+With SSE4, it should be
+ movdqa .LC0(%rip), %xmm0
+ pinsrb $6, %edi, %xmm0
//===---------------------------------------------------------------------===//
+
+We should transform a shuffle of two vectors of constants into a single vector
+of constants. Also, insertelement of a constant into a vector of constants
+should also result in a vector of constants. e.g. 2008-06-25-VecISelBug.ll.
+
+We compiled it to something horrible:
+
+ .align 4
+LCPI1_1: ## float
+ .long 1065353216 ## float 1
+ .const
+
+ .align 4
+LCPI1_0: ## <4 x float>
+ .space 4
+ .long 1065353216 ## float 1
+ .space 4
+ .long 1065353216 ## float 1
+ .text
+ .align 4,0x90
+ .globl _t
+_t:
+ xorps %xmm0, %xmm0
+ movhps LCPI1_0, %xmm0
+ movss LCPI1_1, %xmm1
+ movaps %xmm0, %xmm2
+ shufps $2, %xmm1, %xmm2
+ shufps $132, %xmm2, %xmm0
+ movaps %xmm0, 0
Modified: llvm/branches/non-call-eh/lib/Target/X86/README-X86-64.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/X86/README-X86-64.txt?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/X86/README-X86-64.txt (original)
+++ llvm/branches/non-call-eh/lib/Target/X86/README-X86-64.txt Sun Jul 6 15:45:41 2008
@@ -236,3 +236,24 @@
//===---------------------------------------------------------------------===//
+The x86-64 ABI for hidden-argument struct returns requires that the
+incoming value of %rdi be copied into %rax by the callee upon return.
+
+The idea is that it saves callers from having to remember this value,
+which would often require a callee-saved register. Callees usually
+need to keep this value live for most of their body anyway, so it
+doesn't add a significant burden on them.
+
+We currently implement this in codegen, however this is suboptimal
+because it means that it would be quite awkward to implement the
+optimization for callers.
+
+A better implementation would be to relax the LLVM IR rules for sret
+arguments to allow a function with an sret argument to have a non-void
+return type, and to have the front-end to set up the sret argument value
+as the return value of the function. The front-end could more easily
+emit uses of the returned struct value to be in terms of the function's
+lowered return value, and it would free non-C frontends from a
+complication only required by a C-based ABI.
+
+//===---------------------------------------------------------------------===//
Modified: llvm/branches/non-call-eh/lib/Target/X86/README.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/X86/README.txt?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/X86/README.txt (original)
+++ llvm/branches/non-call-eh/lib/Target/X86/README.txt Sun Jul 6 15:45:41 2008
@@ -1656,3 +1656,63 @@
enough to warrant the spill.
Also check why xmm7 is not used at all in the function.
+
+//===---------------------------------------------------------------------===//
+
+Legalize loses track of the fact that bools are always zero extended when in
+memory. This causes us to compile abort_gzip (from 164.gzip) from:
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i386-apple-darwin8"
+ at in_exit.4870.b = internal global i1 false ; <i1*> [#uses=2]
+define fastcc void @abort_gzip() noreturn nounwind {
+entry:
+ %tmp.b.i = load i1* @in_exit.4870.b ; <i1> [#uses=1]
+ br i1 %tmp.b.i, label %bb.i, label %bb4.i
+bb.i: ; preds = %entry
+ tail call void @exit( i32 1 ) noreturn nounwind
+ unreachable
+bb4.i: ; preds = %entry
+ store i1 true, i1* @in_exit.4870.b
+ tail call void @exit( i32 1 ) noreturn nounwind
+ unreachable
+}
+declare void @exit(i32) noreturn nounwind
+
+into:
+
+_abort_gzip:
+ subl $12, %esp
+ movb _in_exit.4870.b, %al
+ notb %al
+ testb $1, %al
+ jne LBB1_2 ## bb4.i
+LBB1_1: ## bb.i
+ ...
+
+//===---------------------------------------------------------------------===//
+
+We compile:
+
+int test(int x, int y) {
+ return x-y-1;
+}
+
+into (-m64):
+
+_test:
+ decl %edi
+ movl %edi, %eax
+ subl %esi, %eax
+ ret
+
+it would be better to codegen as: x+~y (notl+addl)
+
+//===---------------------------------------------------------------------===//
+
+We should consider using __i686.get_pc_thunk.bx for MOVPC32r (used for PIC)
+on targets that support it, such as Linux and similar targets, in place of
+the call-a-label trick. It's said to be friendlier to branch-prediction
+hardware because it pairs a ret with the call.
+
+//===---------------------------------------------------------------------===//
Modified: llvm/branches/non-call-eh/lib/Target/X86/X86.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/X86/X86.h?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/X86/X86.h (original)
+++ llvm/branches/non-call-eh/lib/Target/X86/X86.h Sun Jul 6 15:45:41 2008
@@ -51,6 +51,11 @@
///
FunctionPass *createEmitX86CodeToMemory();
+/// createX86MaxStackAlignmentCalculatorPass - This function returns a pass which
+/// calculates maximal stack alignment required for function
+///
+FunctionPass *createX86MaxStackAlignmentCalculatorPass();
+
} // End llvm namespace
// Defines symbolic names for X86 registers. This defines a mapping from
Modified: llvm/branches/non-call-eh/lib/Target/X86/X86ATTAsmPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/X86/X86ATTAsmPrinter.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/X86/X86ATTAsmPrinter.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/X86/X86ATTAsmPrinter.cpp Sun Jul 6 15:45:41 2008
@@ -20,14 +20,16 @@
#include "X86MachineFunctionInfo.h"
#include "X86TargetMachine.h"
#include "X86TargetAsmInfo.h"
-#include "llvm/ADT/StringExtras.h"
#include "llvm/CallingConv.h"
-#include "llvm/CodeGen/MachineJumpTableInfo.h"
+#include "llvm/DerivedTypes.h"
#include "llvm/Module.h"
+#include "llvm/Type.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/Support/Mangler.h"
#include "llvm/Target/TargetAsmInfo.h"
#include "llvm/Target/TargetOptions.h"
-#include "llvm/ADT/Statistic.h"
using namespace llvm;
STATISTIC(EmittedInsts, "Number of machine instrs printed");
@@ -39,19 +41,116 @@
if (Subtarget->isTargetDarwin())
label = "\"L" + utostr_32(FnNum) + "$pb\"";
else if (Subtarget->isTargetELF())
- label = ".Lllvm$" + utostr_32(FnNum) + "." + "$piclabel";
+ label = ".Lllvm$" + utostr_32(FnNum) + "." "$piclabel";
else
assert(0 && "Don't know how to print PIC label!\n");
return label;
}
+static X86MachineFunctionInfo calculateFunctionInfo(const Function *F,
+ const TargetData *TD) {
+ X86MachineFunctionInfo Info;
+ uint64_t Size = 0;
+
+ switch (F->getCallingConv()) {
+ case CallingConv::X86_StdCall:
+ Info.setDecorationStyle(StdCall);
+ break;
+ case CallingConv::X86_FastCall:
+ Info.setDecorationStyle(FastCall);
+ break;
+ default:
+ return Info;
+ }
+
+ unsigned argNum = 1;
+ for (Function::const_arg_iterator AI = F->arg_begin(), AE = F->arg_end();
+ AI != AE; ++AI, ++argNum) {
+ const Type* Ty = AI->getType();
+
+ // 'Dereference' type in case of byval parameter attribute
+ if (F->paramHasAttr(argNum, ParamAttr::ByVal))
+ Ty = cast<PointerType>(Ty)->getElementType();
+
+ // Size should be aligned to DWORD boundary
+ Size += ((TD->getABITypeSize(Ty) + 3)/4)*4;
+ }
+
+ // We're not supporting tooooo huge arguments :)
+ Info.setBytesToPopOnReturn((unsigned int)Size);
+ return Info;
+}
+
+/// PrintUnmangledNameSafely - Print out the printable characters in the name.
+/// Don't print things like \n or \0.
+static void PrintUnmangledNameSafely(const Value *V, std::ostream &OS) {
+ for (const char *Name = V->getNameStart(), *E = Name+V->getNameLen();
+ Name != E; ++Name)
+ if (isprint(*Name))
+ OS << *Name;
+}
+
+/// decorateName - Query FunctionInfoMap and use this information for various
+/// name decoration.
+void X86ATTAsmPrinter::decorateName(std::string &Name,
+ const GlobalValue *GV) {
+ const Function *F = dyn_cast<Function>(GV);
+ if (!F) return;
+
+ // We don't want to decorate non-stdcall or non-fastcall functions right now
+ unsigned CC = F->getCallingConv();
+ if (CC != CallingConv::X86_StdCall && CC != CallingConv::X86_FastCall)
+ return;
+
+ // Decorate names only when we're targeting Cygwin/Mingw32 targets
+ if (!Subtarget->isTargetCygMing())
+ return;
+
+ FMFInfoMap::const_iterator info_item = FunctionInfoMap.find(F);
+
+ const X86MachineFunctionInfo *Info;
+ if (info_item == FunctionInfoMap.end()) {
+ // Calculate apropriate function info and populate map
+ FunctionInfoMap[F] = calculateFunctionInfo(F, TM.getTargetData());
+ Info = &FunctionInfoMap[F];
+ } else {
+ Info = &info_item->second;
+ }
+
+ const FunctionType *FT = F->getFunctionType();
+ switch (Info->getDecorationStyle()) {
+ case None:
+ break;
+ case StdCall:
+ // "Pure" variadic functions do not receive @0 suffix.
+ if (!FT->isVarArg() || (FT->getNumParams() == 0) ||
+ (FT->getNumParams() == 1 && F->hasStructRetAttr()))
+ Name += '@' + utostr_32(Info->getBytesToPopOnReturn());
+ break;
+ case FastCall:
+ // "Pure" variadic functions do not receive @0 suffix.
+ if (!FT->isVarArg() || (FT->getNumParams() == 0) ||
+ (FT->getNumParams() == 1 && F->hasStructRetAttr()))
+ Name += '@' + utostr_32(Info->getBytesToPopOnReturn());
+
+ if (Name[0] == '_') {
+ Name[0] = '@';
+ } else {
+ Name = '@' + Name;
+ }
+ break;
+ default:
+ assert(0 && "Unsupported DecorationStyle");
+ }
+}
+
/// getSectionForFunction - Return the section that we should emit the
/// specified function body into.
std::string X86ATTAsmPrinter::getSectionForFunction(const Function &F) const {
switch (F.getLinkage()) {
default: assert(0 && "Unknown linkage type!");
- case Function::InternalLinkage:
+ case Function::InternalLinkage:
case Function::DLLExportLinkage:
case Function::ExternalLinkage:
return TAI->getTextSection();
@@ -68,36 +167,13 @@
}
}
-/// runOnMachineFunction - This uses the printMachineInstruction()
-/// method to print assembly for each instruction.
-///
-bool X86ATTAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
- if (TAI->doesSupportDebugInformation()) {
- // Let PassManager know we need debug information and relay
- // the MachineModuleInfo address on to DwarfWriter.
- MMI = &getAnalysis<MachineModuleInfo>();
- DW.SetModuleInfo(MMI);
- }
-
- SetupMachineFunction(MF);
- O << "\n\n";
-
- // Print out constants referenced by the function
- EmitConstantPool(MF.getConstantPool());
-
- // Print out labels for the function.
+void X86ATTAsmPrinter::emitFunctionHeader(const MachineFunction &MF) {
const Function *F = MF.getFunction();
- unsigned CC = F->getCallingConv();
- // Populate function information map. Actually, We don't want to populate
- // non-stdcall or non-fastcall functions' information right now.
- if (CC == CallingConv::X86_StdCall || CC == CallingConv::X86_FastCall)
- FunctionInfoMap[F] = *MF.getInfo<X86MachineFunctionInfo>();
-
- X86SharedAsmPrinter::decorateName(CurrentFnName, F);
+ decorateName(CurrentFnName, F);
SwitchToTextSection(getSectionForFunction(*F).c_str(), F);
-
+
unsigned FnAlign = OptimizeForSize ? 1 : 4;
switch (F->getLinkage()) {
default: assert(0 && "Unknown linkage type!");
@@ -105,32 +181,30 @@
EmitAlignment(FnAlign, F);
break;
case Function::DLLExportLinkage:
- DLLExportedFns.insert(Mang->makeNameProper(F->getName(), ""));
- //FALLS THROUGH
case Function::ExternalLinkage:
EmitAlignment(FnAlign, F);
- O << "\t.globl\t" << CurrentFnName << "\n";
+ O << "\t.globl\t" << CurrentFnName << '\n';
break;
case Function::LinkOnceLinkage:
case Function::WeakLinkage:
EmitAlignment(FnAlign, F);
if (Subtarget->isTargetDarwin()) {
- O << "\t.globl\t" << CurrentFnName << "\n";
- O << TAI->getWeakDefDirective() << CurrentFnName << "\n";
+ O << "\t.globl\t" << CurrentFnName << '\n';
+ O << TAI->getWeakDefDirective() << CurrentFnName << '\n';
} else if (Subtarget->isTargetCygMing()) {
- O << "\t.globl\t" << CurrentFnName << "\n";
- O << "\t.linkonce discard\n";
+ O << "\t.globl\t" << CurrentFnName << "\n"
+ "\t.linkonce discard\n";
} else {
- O << "\t.weak\t" << CurrentFnName << "\n";
+ O << "\t.weak\t" << CurrentFnName << '\n';
}
break;
}
if (F->hasHiddenVisibility()) {
if (const char *Directive = TAI->getHiddenDirective())
- O << Directive << CurrentFnName << "\n";
+ O << Directive << CurrentFnName << '\n';
} else if (F->hasProtectedVisibility()) {
if (const char *Directive = TAI->getProtectedDirective())
- O << Directive << CurrentFnName << "\n";
+ O << Directive << CurrentFnName << '\n';
}
if (Subtarget->isTargetELF())
@@ -149,13 +223,43 @@
(F->getLinkage() == Function::LinkOnceLinkage ||
F->getLinkage() == Function::WeakLinkage))
O << "Lllvm$workaround$fake$stub$" << CurrentFnName << ":\n";
+}
- if (TAI->doesSupportDebugInformation() ||
- TAI->doesSupportExceptionHandling()) {
- // Emit pre-function debug and/or EH information.
- DW.BeginFunction(&MF);
+/// runOnMachineFunction - This uses the printMachineInstruction()
+/// method to print assembly for each instruction.
+///
+bool X86ATTAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
+ const Function *F = MF.getFunction();
+ unsigned CC = F->getCallingConv();
+
+ if (TAI->doesSupportDebugInformation()) {
+ // Let PassManager know we need debug information and relay
+ // the MachineModuleInfo address on to DwarfWriter.
+ MMI = &getAnalysis<MachineModuleInfo>();
+ DW.SetModuleInfo(MMI);
}
+ SetupMachineFunction(MF);
+ O << "\n\n";
+
+ // Populate function information map. Actually, We don't want to populate
+ // non-stdcall or non-fastcall functions' information right now.
+ if (CC == CallingConv::X86_StdCall || CC == CallingConv::X86_FastCall)
+ FunctionInfoMap[F] = *MF.getInfo<X86MachineFunctionInfo>();
+
+ // Print out constants referenced by the function
+ EmitConstantPool(MF.getConstantPool());
+
+ if (F->hasDLLExportLinkage())
+ DLLExportedFns.insert(Mang->makeNameProper(F->getName(), ""));
+
+ // Print the 'header' of function
+ emitFunctionHeader(MF);
+
+ // Emit pre-function debug and/or EH information.
+ if (TAI->doesSupportDebugInformation() || TAI->doesSupportExceptionHandling())
+ DW.BeginFunction(&MF);
+
// Print out code for the function.
bool hasAnyRealCode = false;
for (MachineFunction::const_iterator I = MF.begin(), E = MF.end();
@@ -168,7 +272,7 @@
for (MachineBasicBlock::const_iterator II = I->begin(), IE = I->end();
II != IE; ++II) {
// Print the assembly for the instruction.
- if (II->getOpcode() != X86::LABEL)
+ if (!II->isLabel())
hasAnyRealCode = true;
printMachineInstruction(II);
}
@@ -183,25 +287,29 @@
}
if (TAI->hasDotTypeDotSizeDirective())
- O << "\t.size\t" << CurrentFnName << ", .-" << CurrentFnName << "\n";
+ O << "\t.size\t" << CurrentFnName << ", .-" << CurrentFnName << '\n';
- if (TAI->doesSupportDebugInformation()) {
- // Emit post-function debug information.
+ // Emit post-function debug information.
+ if (TAI->doesSupportDebugInformation())
DW.EndFunction();
- }
// Print out jump tables referenced by the function.
EmitJumpTableInfo(MF.getJumpTableInfo(), MF);
-
+
// We didn't modify anything.
return false;
}
-static inline bool printGOT(TargetMachine &TM, const X86Subtarget* ST) {
+static inline bool shouldPrintGOT(TargetMachine &TM, const X86Subtarget* ST) {
return ST->isPICStyleGOT() && TM.getRelocationModel() == Reloc::PIC_;
}
-static inline bool printStub(TargetMachine &TM, const X86Subtarget* ST) {
+static inline bool shouldPrintPLT(TargetMachine &TM, const X86Subtarget* ST) {
+ return ST->isTargetELF() && TM.getRelocationModel() == Reloc::PIC_ &&
+ (ST->isPICStyleRIPRel() || ST->isPICStyleGOT());
+}
+
+static inline bool shouldPrintStub(TargetMachine &TM, const X86Subtarget* ST) {
return ST->isPICStyleStub() && TM.getRelocationModel() != Reloc::Static;
}
@@ -215,7 +323,7 @@
O << '%';
unsigned Reg = MO.getReg();
if (Modifier && strncmp(Modifier, "subreg", strlen("subreg")) == 0) {
- MVT::ValueType VT = (strcmp(Modifier+6,"64") == 0) ?
+ MVT VT = (strcmp(Modifier+6,"64") == 0) ?
MVT::i64 : ((strcmp(Modifier+6, "32") == 0) ? MVT::i32 :
((strcmp(Modifier+6,"16") == 0) ? MVT::i16 : MVT::i8));
Reg = getX86SubSuperRegister(Reg, VT);
@@ -237,7 +345,7 @@
case MachineOperand::MO_JumpTableIndex: {
bool isMemOp = Modifier && !strcmp(Modifier, "mem");
if (!isMemOp) O << '$';
- O << TAI->getPrivateGlobalPrefix() << "JTI" << getFunctionNumber() << "_"
+ O << TAI->getPrivateGlobalPrefix() << "JTI" << getFunctionNumber() << '_'
<< MO.getIndex();
if (TM.getRelocationModel() == Reloc::PIC_) {
@@ -247,7 +355,7 @@
else if (Subtarget->isPICStyleGOT())
O << "@GOTOFF";
}
-
+
if (isMemOp && Subtarget->isPICStyleRIPRel() && !NotRIPRel)
O << "(%rip)";
return;
@@ -255,7 +363,7 @@
case MachineOperand::MO_ConstantPoolIndex: {
bool isMemOp = Modifier && !strcmp(Modifier, "mem");
if (!isMemOp) O << '$';
- O << TAI->getPrivateGlobalPrefix() << "CPI" << getFunctionNumber() << "_"
+ O << TAI->getPrivateGlobalPrefix() << "CPI" << getFunctionNumber() << '_'
<< MO.getIndex();
if (TM.getRelocationModel() == Reloc::PIC_) {
@@ -265,10 +373,10 @@
else if (Subtarget->isPICStyleGOT())
O << "@GOTOFF";
}
-
+
int Offset = MO.getOffset();
if (Offset > 0)
- O << "+" << Offset;
+ O << '+' << Offset;
else if (Offset < 0)
O << Offset;
@@ -293,8 +401,8 @@
bool isThreadLocal = GVar && GVar->isThreadLocal();
std::string Name = Mang->getValueName(GV);
- X86SharedAsmPrinter::decorateName(Name, GV);
-
+ decorateName(Name, GV);
+
if (!isMemOp && !isCallOp)
O << '$';
else if (Name[0] == '$') {
@@ -304,39 +412,40 @@
needCloseParen = true;
}
- if (printStub(TM, Subtarget)) {
+ if (shouldPrintStub(TM, Subtarget)) {
// Link-once, declaration, or Weakly-linked global variables need
// non-lazily-resolved stubs
if (GV->isDeclaration() ||
GV->hasWeakLinkage() ||
- GV->hasLinkOnceLinkage()) {
+ GV->hasLinkOnceLinkage() ||
+ GV->hasCommonLinkage()) {
// Dynamically-resolved functions need a stub for the function.
if (isCallOp && isa<Function>(GV)) {
FnStubs.insert(Name);
- O << TAI->getPrivateGlobalPrefix() << Name << "$stub";
+ printSuffixedName(Name, "$stub");
} else {
GVStubs.insert(Name);
- O << TAI->getPrivateGlobalPrefix() << Name << "$non_lazy_ptr";
+ printSuffixedName(Name, "$non_lazy_ptr");
}
} else {
if (GV->hasDLLImportLinkage())
- O << "__imp_";
+ O << "__imp_";
O << Name;
}
-
+
if (!isCallOp && TM.getRelocationModel() == Reloc::PIC_)
O << '-' << getPICLabelString(getFunctionNumber(), TAI, Subtarget);
} else {
if (GV->hasDLLImportLinkage()) {
- O << "__imp_";
- }
+ O << "__imp_";
+ }
O << Name;
- if (isCallOp && isa<Function>(GV)) {
- if (printGOT(TM, Subtarget)) {
- // Assemble call via PLT for non-local symbols
- if (!(GV->hasHiddenVisibility() || GV->hasProtectedVisibility()) ||
- GV->isDeclaration())
+ if (isCallOp) {
+ if (shouldPrintPLT(TM, Subtarget)) {
+ // Assemble call via PLT for externally visible symbols
+ if (!GV->hasHiddenVisibility() && !GV->hasProtectedVisibility() &&
+ !GV->hasInternalLinkage())
O << "@PLT";
}
if (Subtarget->isTargetCygMing() && GV->isDeclaration())
@@ -347,15 +456,15 @@
if (GV->hasExternalWeakLinkage())
ExtWeakSymbols.insert(GV);
-
+
int Offset = MO.getOffset();
if (Offset > 0)
- O << "+" << Offset;
+ O << '+' << Offset;
else if (Offset < 0)
O << Offset;
if (isThreadLocal) {
- if (TM.getRelocationModel() == Reloc::PIC_)
+ if (TM.getRelocationModel() == Reloc::PIC_ || Subtarget->is64Bit())
O << "@TLSGD"; // general dynamic TLS model
else
if (GV->isDeclaration())
@@ -363,7 +472,7 @@
else
O << "@NTPOFF"; // local exec TLS model
} else if (isMemOp) {
- if (printGOT(TM, Subtarget)) {
+ if (shouldPrintGOT(TM, Subtarget)) {
if (Subtarget->GVRequiresExtraLoad(GV, TM, false))
O << "@GOT";
else
@@ -395,9 +504,9 @@
bool needCloseParen = false;
std::string Name(TAI->getGlobalPrefix());
Name += MO.getSymbolName();
- if (isCallOp && printStub(TM, Subtarget)) {
+ if (isCallOp && shouldPrintStub(TM, Subtarget)) {
FnStubs.insert(Name);
- O << TAI->getPrivateGlobalPrefix() << Name << "$stub";
+ printSuffixedName(Name, "$stub");
return;
}
if (!isCallOp)
@@ -411,7 +520,7 @@
O << Name;
- if (printGOT(TM, Subtarget)) {
+ if (shouldPrintPLT(TM, Subtarget)) {
std::string GOTName(TAI->getGlobalPrefix());
GOTName+="_GLOBAL_OFFSET_TABLE_";
if (Name == GOTName)
@@ -423,7 +532,7 @@
// popl %some_register
// addl $_GLOBAL_ADDRESS_TABLE_ + [.-piclabel], %some_register
O << " + [.-"
- << getPICLabelString(getFunctionNumber(), TAI, Subtarget) << "]";
+ << getPICLabelString(getFunctionNumber(), TAI, Subtarget) << ']';
if (isCallOp)
O << "@PLT";
@@ -478,7 +587,7 @@
if (IndexReg.getReg() || BaseReg.getReg()) {
unsigned ScaleVal = MI->getOperand(Op+1).getImm();
unsigned BaseRegOperand = 0, IndexRegOperand = 2;
-
+
// There are cases where we can end up with ESP/RSP in the indexreg slot.
// If this happens, swap the base/index register to support assemblers that
// don't work when the index is *SP.
@@ -487,22 +596,22 @@
std::swap(BaseReg, IndexReg);
std::swap(BaseRegOperand, IndexRegOperand);
}
-
- O << "(";
+
+ O << '(';
if (BaseReg.getReg())
printOperand(MI, Op+BaseRegOperand, Modifier);
if (IndexReg.getReg()) {
- O << ",";
+ O << ',';
printOperand(MI, Op+IndexRegOperand, Modifier);
if (ScaleVal != 1)
- O << "," << ScaleVal;
+ O << ',' << ScaleVal;
}
- O << ")";
+ O << ')';
}
}
-void X86ATTAsmPrinter::printPICJumpTableSetLabel(unsigned uid,
+void X86ATTAsmPrinter::printPICJumpTableSetLabel(unsigned uid,
const MachineBasicBlock *MBB) const {
if (!TAI->getSetDirective())
return;
@@ -510,12 +619,12 @@
// We don't need .set machinery if we have GOT-style relocations
if (Subtarget->isPICStyleGOT())
return;
-
+
O << TAI->getSetDirective() << ' ' << TAI->getPrivateGlobalPrefix()
<< getFunctionNumber() << '_' << uid << "_set_" << MBB->getNumber() << ',';
printBasicBlockLabel(MBB, false, false, false);
if (Subtarget->isPICStyleRIPRel())
- O << '-' << TAI->getPrivateGlobalPrefix() << "JTI" << getFunctionNumber()
+ O << '-' << TAI->getPrivateGlobalPrefix() << "JTI" << getFunctionNumber()
<< '_' << uid << '\n';
else
O << '-' << getPICLabelString(getFunctionNumber(), TAI, Subtarget) << '\n';
@@ -523,14 +632,14 @@
void X86ATTAsmPrinter::printPICLabel(const MachineInstr *MI, unsigned Op) {
std::string label = getPICLabelString(getFunctionNumber(), TAI, Subtarget);
- O << label << "\n" << label << ":";
+ O << label << '\n' << label << ':';
}
void X86ATTAsmPrinter::printPICJumpTableEntry(const MachineJumpTableInfo *MJTI,
const MachineBasicBlock *MBB,
- unsigned uid) const
-{
+ unsigned uid) const
+{
const char *JTEntryDirective = MJTI->getEntrySize() == 4 ?
TAI->getData32bitsDirective() : TAI->getData64bitsDirective();
@@ -580,12 +689,12 @@
/// PrintAsmOperand - Print out an operand for an inline asm expression.
///
bool X86ATTAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
- unsigned AsmVariant,
+ unsigned AsmVariant,
const char *ExtraCode) {
// Does this asm operand have a single letter operand modifier?
if (ExtraCode && ExtraCode[0]) {
if (ExtraCode[1] != 0) return true; // Unknown modifier.
-
+
switch (ExtraCode[0]) {
default: return true; // Unknown modifier.
case 'c': // Don't print "$" before a global var name or constant.
@@ -600,24 +709,24 @@
return printAsmMRegister(MI->getOperand(OpNo), ExtraCode[0]);
printOperand(MI, OpNo);
return false;
-
+
case 'P': // Don't print @PLT, but do print as memory.
printOperand(MI, OpNo, "mem");
return false;
}
}
-
+
printOperand(MI, OpNo);
return false;
}
bool X86ATTAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
unsigned OpNo,
- unsigned AsmVariant,
+ unsigned AsmVariant,
const char *ExtraCode) {
if (ExtraCode && ExtraCode[0]) {
if (ExtraCode[1] != 0) return true; // Unknown modifier.
-
+
switch (ExtraCode[0]) {
default: return true; // Unknown modifier.
case 'b': // Print QImode register
@@ -643,6 +752,329 @@
printInstruction(MI);
}
+/// doInitialization
+bool X86ATTAsmPrinter::doInitialization(Module &M) {
+ if (TAI->doesSupportDebugInformation()) {
+ // Emit initial debug information.
+ DW.BeginModule(&M);
+ }
+
+ bool Result = AsmPrinter::doInitialization(M);
+
+ // Darwin wants symbols to be quoted if they have complex names.
+ if (Subtarget->isTargetDarwin())
+ Mang->setUseQuotes(true);
+
+ return Result;
+}
+
+
+void X86ATTAsmPrinter::printModuleLevelGV(const GlobalVariable* GVar) {
+ const TargetData *TD = TM.getTargetData();
+
+ if (!GVar->hasInitializer())
+ return; // External global require no code
+
+ // Check to see if this is a special global used by LLVM, if so, emit it.
+ if (EmitSpecialLLVMGlobal(GVar)) {
+ if (Subtarget->isTargetDarwin() &&
+ TM.getRelocationModel() == Reloc::Static) {
+ if (GVar->getName() == "llvm.global_ctors")
+ O << ".reference .constructors_used\n";
+ else if (GVar->getName() == "llvm.global_dtors")
+ O << ".reference .destructors_used\n";
+ }
+ return;
+ }
+
+ std::string name = Mang->getValueName(GVar);
+ Constant *C = GVar->getInitializer();
+ const Type *Type = C->getType();
+ unsigned Size = TD->getABITypeSize(Type);
+ unsigned Align = TD->getPreferredAlignmentLog(GVar);
+
+ if (GVar->hasHiddenVisibility()) {
+ if (const char *Directive = TAI->getHiddenDirective())
+ O << Directive << name << '\n';
+ } else if (GVar->hasProtectedVisibility()) {
+ if (const char *Directive = TAI->getProtectedDirective())
+ O << Directive << name << '\n';
+ }
+
+ if (Subtarget->isTargetELF())
+ O << "\t.type\t" << name << ", at object\n";
+
+ if (C->isNullValue() && !GVar->hasSection()) {
+ if (GVar->hasExternalLinkage()) {
+ if (const char *Directive = TAI->getZeroFillDirective()) {
+ O << "\t.globl " << name << '\n';
+ O << Directive << "__DATA, __common, " << name << ", "
+ << Size << ", " << Align << '\n';
+ return;
+ }
+ }
+
+ if (!GVar->isThreadLocal() &&
+ (GVar->hasInternalLinkage() || GVar->hasWeakLinkage() ||
+ GVar->hasLinkOnceLinkage() || GVar->hasCommonLinkage())) {
+ if (Size == 0) Size = 1; // .comm Foo, 0 is undefined, avoid it.
+ if (!NoZerosInBSS && TAI->getBSSSection())
+ SwitchToDataSection(TAI->getBSSSection(), GVar);
+ else
+ SwitchToDataSection(TAI->getDataSection(), GVar);
+ if (TAI->getLCOMMDirective() != NULL) {
+ if (GVar->hasInternalLinkage()) {
+ O << TAI->getLCOMMDirective() << name << ',' << Size;
+ if (Subtarget->isTargetDarwin())
+ O << ',' << Align;
+ } else if (Subtarget->isTargetDarwin() && !GVar->hasCommonLinkage()) {
+ O << "\t.globl " << name << '\n'
+ << TAI->getWeakDefDirective() << name << '\n';
+ SwitchToDataSection("\t.section __DATA,__datacoal_nt,coalesced", GVar);
+ EmitAlignment(Align, GVar);
+ O << name << ":\t\t\t\t" << TAI->getCommentString() << ' ';
+ PrintUnmangledNameSafely(GVar, O);
+ O << '\n';
+ EmitGlobalConstant(C);
+ return;
+ } else {
+ O << TAI->getCOMMDirective() << name << ',' << Size;
+
+ // Leopard and above support aligned common symbols.
+ if (Subtarget->getDarwinVers() >= 9)
+ O << ',' << Align;
+ }
+ } else {
+ if (!Subtarget->isTargetCygMing()) {
+ if (GVar->hasInternalLinkage())
+ O << "\t.local\t" << name << '\n';
+ }
+ O << TAI->getCOMMDirective() << name << ',' << Size;
+ if (TAI->getCOMMDirectiveTakesAlignment())
+ O << ',' << (TAI->getAlignmentIsInBytes() ? (1 << Align) : Align);
+ }
+ O << "\t\t" << TAI->getCommentString() << ' ';
+ PrintUnmangledNameSafely(GVar, O);
+ O << '\n';
+ return;
+ }
+ }
+
+ switch (GVar->getLinkage()) {
+ case GlobalValue::CommonLinkage:
+ case GlobalValue::LinkOnceLinkage:
+ case GlobalValue::WeakLinkage:
+ if (Subtarget->isTargetDarwin()) {
+ O << "\t.globl " << name << '\n'
+ << TAI->getWeakDefDirective() << name << '\n';
+ if (!GVar->isConstant())
+ SwitchToDataSection("\t.section __DATA,__datacoal_nt,coalesced", GVar);
+ else {
+ const ArrayType *AT = dyn_cast<ArrayType>(Type);
+ if (AT && AT->getElementType()==Type::Int8Ty)
+ SwitchToDataSection("\t.section __TEXT,__const_coal,coalesced", GVar);
+ else
+ SwitchToDataSection("\t.section __DATA,__const_coal,coalesced", GVar);
+ }
+ } else if (Subtarget->isTargetCygMing()) {
+ std::string SectionName(".section\t.data$linkonce." +
+ name +
+ ",\"aw\"");
+ SwitchToDataSection(SectionName.c_str(), GVar);
+ O << "\t.globl\t" << name << "\n"
+ "\t.linkonce same_size\n";
+ } else {
+ std::string SectionName("\t.section\t.llvm.linkonce.d." +
+ name +
+ ",\"aw\", at progbits");
+ SwitchToDataSection(SectionName.c_str(), GVar);
+ O << "\t.weak\t" << name << '\n';
+ }
+ break;
+ case GlobalValue::DLLExportLinkage:
+ case GlobalValue::AppendingLinkage:
+ // FIXME: appending linkage variables should go into a section of
+ // their name or something. For now, just emit them as external.
+ case GlobalValue::ExternalLinkage:
+ // If external or appending, declare as a global symbol
+ O << "\t.globl " << name << '\n';
+ // FALL THROUGH
+ case GlobalValue::InternalLinkage: {
+ if (GVar->isConstant()) {
+ const ConstantArray *CVA = dyn_cast<ConstantArray>(C);
+ if (TAI->getCStringSection() && CVA && CVA->isCString()) {
+ SwitchToDataSection(TAI->getCStringSection(), GVar);
+ break;
+ }
+ }
+ // FIXME: special handling for ".ctors" & ".dtors" sections
+ if (GVar->hasSection() &&
+ (GVar->getSection() == ".ctors" || GVar->getSection() == ".dtors")) {
+ std::string SectionName = ".section " + GVar->getSection();
+
+ if (Subtarget->isTargetCygMing()) {
+ SectionName += ",\"aw\"";
+ } else {
+ assert(!Subtarget->isTargetDarwin());
+ SectionName += ",\"aw\", at progbits";
+ }
+ SwitchToDataSection(SectionName.c_str());
+ } else if (GVar->hasSection() && Subtarget->isTargetDarwin()) {
+ // Honor all section names on Darwin; ObjC uses this
+ std::string SectionName = ".section " + GVar->getSection();
+ SwitchToDataSection(SectionName.c_str());
+ } else {
+ if (C->isNullValue() && !NoZerosInBSS && TAI->getBSSSection())
+ SwitchToDataSection(GVar->isThreadLocal() ? TAI->getTLSBSSSection() :
+ TAI->getBSSSection(), GVar);
+ else if (!GVar->isConstant())
+ SwitchToDataSection(GVar->isThreadLocal() ? TAI->getTLSDataSection() :
+ TAI->getDataSection(), GVar);
+ else if (GVar->isThreadLocal())
+ SwitchToDataSection(TAI->getTLSDataSection());
+ else {
+ // Read-only data.
+ bool HasReloc = C->ContainsRelocations();
+ if (HasReloc &&
+ Subtarget->isTargetDarwin() &&
+ TM.getRelocationModel() != Reloc::Static)
+ SwitchToDataSection("\t.const_data\n");
+ else if (!HasReloc && Size == 4 &&
+ TAI->getFourByteConstantSection())
+ SwitchToDataSection(TAI->getFourByteConstantSection(), GVar);
+ else if (!HasReloc && Size == 8 &&
+ TAI->getEightByteConstantSection())
+ SwitchToDataSection(TAI->getEightByteConstantSection(), GVar);
+ else if (!HasReloc && Size == 16 &&
+ TAI->getSixteenByteConstantSection())
+ SwitchToDataSection(TAI->getSixteenByteConstantSection(), GVar);
+ else if (TAI->getReadOnlySection())
+ SwitchToDataSection(TAI->getReadOnlySection(), GVar);
+ else
+ SwitchToDataSection(TAI->getDataSection(), GVar);
+ }
+ }
+
+ break;
+ }
+ default:
+ assert(0 && "Unknown linkage type!");
+ }
+
+ EmitAlignment(Align, GVar);
+ O << name << ":\t\t\t\t" << TAI->getCommentString() << ' ';
+ PrintUnmangledNameSafely(GVar, O);
+ O << '\n';
+ if (TAI->hasDotTypeDotSizeDirective())
+ O << "\t.size\t" << name << ", " << Size << '\n';
+
+ // If the initializer is a extern weak symbol, remember to emit the weak
+ // reference!
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
+ if (GV->hasExternalWeakLinkage())
+ ExtWeakSymbols.insert(GV);
+
+ EmitGlobalConstant(C);
+}
+
+
+bool X86ATTAsmPrinter::doFinalization(Module &M) {
+ // Print out module-level global variables here.
+ for (Module::const_global_iterator I = M.global_begin(), E = M.global_end();
+ I != E; ++I) {
+ printModuleLevelGV(I);
+
+ if (I->hasDLLExportLinkage())
+ DLLExportedGVs.insert(Mang->makeNameProper(I->getName(),""));
+ }
+
+ // Output linker support code for dllexported globals
+ if (!DLLExportedGVs.empty())
+ SwitchToDataSection(".section .drectve");
+
+ for (StringSet<>::iterator i = DLLExportedGVs.begin(),
+ e = DLLExportedGVs.end();
+ i != e; ++i)
+ O << "\t.ascii \" -export:" << i->getKeyData() << ",data\"\n";
+
+ if (!DLLExportedFns.empty()) {
+ SwitchToDataSection(".section .drectve");
+ }
+
+ for (StringSet<>::iterator i = DLLExportedFns.begin(),
+ e = DLLExportedFns.end();
+ i != e; ++i)
+ O << "\t.ascii \" -export:" << i->getKeyData() << "\"\n";
+
+ if (Subtarget->isTargetDarwin()) {
+ SwitchToDataSection("");
+
+ // Output stubs for dynamically-linked functions
+ unsigned j = 1;
+ for (StringSet<>::iterator i = FnStubs.begin(), e = FnStubs.end();
+ i != e; ++i, ++j) {
+ SwitchToDataSection("\t.section __IMPORT,__jump_table,symbol_stubs,"
+ "self_modifying_code+pure_instructions,5", 0);
+ std::string p = i->getKeyData();
+ printSuffixedName(p, "$stub");
+ O << ":\n"
+ "\t.indirect_symbol " << p << "\n"
+ "\thlt ; hlt ; hlt ; hlt ; hlt\n";
+ }
+
+ O << '\n';
+
+ if (TAI->doesSupportExceptionHandling() && MMI && !Subtarget->is64Bit()) {
+ // Add the (possibly multiple) personalities to the set of global values.
+ // Only referenced functions get into the Personalities list.
+ const std::vector<Function *>& Personalities = MMI->getPersonalities();
+
+ for (std::vector<Function *>::const_iterator I = Personalities.begin(),
+ E = Personalities.end(); I != E; ++I)
+ if (*I) GVStubs.insert('_' + (*I)->getName());
+ }
+
+ // Output stubs for external and common global variables.
+ if (!GVStubs.empty())
+ SwitchToDataSection(
+ "\t.section __IMPORT,__pointers,non_lazy_symbol_pointers");
+ for (StringSet<>::iterator i = GVStubs.begin(), e = GVStubs.end();
+ i != e; ++i) {
+ std::string p = i->getKeyData();
+ printSuffixedName(p, "$non_lazy_ptr");
+ O << ":\n"
+ "\t.indirect_symbol " << p << "\n"
+ "\t.long\t0\n";
+ }
+
+ // Emit final debug information.
+ DW.EndModule();
+
+ // Funny Darwin hack: This flag tells the linker that no global symbols
+ // contain code that falls through to other global symbols (e.g. the obvious
+ // implementation of multiple entry points). If this doesn't occur, the
+ // linker can safely perform dead code stripping. Since LLVM never
+ // generates code that does this, it is always safe to set.
+ O << "\t.subsections_via_symbols\n";
+ } else if (Subtarget->isTargetCygMing()) {
+ // Emit type information for external functions
+ for (StringSet<>::iterator i = FnStubs.begin(), e = FnStubs.end();
+ i != e; ++i) {
+ O << "\t.def\t " << i->getKeyData()
+ << ";\t.scl\t" << COFF::C_EXT
+ << ";\t.type\t" << (COFF::DT_FCN << COFF::N_BTSHFT)
+ << ";\t.endef\n";
+ }
+
+ // Emit final debug information.
+ DW.EndModule();
+ } else if (Subtarget->isTargetELF()) {
+ // Emit final debug information.
+ DW.EndModule();
+ }
+
+ return AsmPrinter::doFinalization(M);
+}
+
// Include the auto-generated portion of the assembly writer.
#include "X86GenAsmWriter.inc"
-
Modified: llvm/branches/non-call-eh/lib/Target/X86/X86ATTAsmPrinter.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/X86/X86ATTAsmPrinter.h?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/X86/X86ATTAsmPrinter.h (original)
+++ llvm/branches/non-call-eh/lib/Target/X86/X86ATTAsmPrinter.h Sun Jul 6 15:45:41 2008
@@ -14,21 +14,49 @@
#ifndef X86ATTASMPRINTER_H
#define X86ATTASMPRINTER_H
-#include "X86AsmPrinter.h"
+#include "X86.h"
+#include "X86MachineFunctionInfo.h"
+#include "X86TargetMachine.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/CodeGen/DwarfWriter.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/Support/Compiler.h"
namespace llvm {
struct MachineJumpTableInfo;
-
-struct VISIBILITY_HIDDEN X86ATTAsmPrinter : public X86SharedAsmPrinter {
- X86ATTAsmPrinter(std::ostream &O, X86TargetMachine &TM, const TargetAsmInfo *T)
- : X86SharedAsmPrinter(O, TM, T) { }
+
+struct VISIBILITY_HIDDEN X86ATTAsmPrinter : public AsmPrinter {
+ DwarfWriter DW;
+ MachineModuleInfo *MMI;
+
+ const X86Subtarget *Subtarget;
+
+ X86ATTAsmPrinter(std::ostream &O, X86TargetMachine &TM,
+ const TargetAsmInfo *T)
+ : AsmPrinter(O, TM, T), DW(O, this, T), MMI(0) {
+ Subtarget = &TM.getSubtarget<X86Subtarget>();
+ }
virtual const char *getPassName() const {
return "X86 AT&T-Style Assembly Printer";
}
+ void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ if (Subtarget->isTargetDarwin() ||
+ Subtarget->isTargetELF() ||
+ Subtarget->isTargetCygMing()) {
+ AU.addRequired<MachineModuleInfo>();
+ }
+ AsmPrinter::getAnalysisUsage(AU);
+ }
+
+ bool doInitialization(Module &M);
+ bool doFinalization(Module &M);
+
/// printInstruction - This method is automatically generated by tablegen
/// from the instruction set description. This method returns true if the
/// machine instruction was sufficiently described to print it, otherwise it
@@ -68,13 +96,13 @@
void printlea64_32mem(const MachineInstr *MI, unsigned OpNo) {
printMemReference(MI, OpNo, "subreg64");
}
-
+
bool printAsmMRegister(const MachineOperand &MO, const char Mode);
bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
unsigned AsmVariant, const char *ExtraCode);
bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
unsigned AsmVariant, const char *ExtraCode);
-
+
void printMachineInstruction(const MachineInstr *MI);
void printSSECC(const MachineInstr *MI, unsigned Op);
void printMemReference(const MachineInstr *MI, unsigned Op,
@@ -88,13 +116,39 @@
void printPICJumpTableEntry(const MachineJumpTableInfo *MJTI,
const MachineBasicBlock *MBB,
unsigned uid) const;
-
+
void printPICLabel(const MachineInstr *MI, unsigned Op);
+ void printModuleLevelGV(const GlobalVariable* GVar);
+
bool runOnMachineFunction(MachineFunction &F);
-
+
/// getSectionForFunction - Return the section that we should emit the
/// specified function body into.
virtual std::string getSectionForFunction(const Function &F) const;
+
+ void emitFunctionHeader(const MachineFunction &MF);
+
+ // Necessary for Darwin to print out the apprioriate types of linker stubs
+ StringSet<> FnStubs, GVStubs, LinkOnceStubs;
+
+ // Necessary for dllexport support
+ StringSet<> DLLExportedFns, DLLExportedGVs;
+
+ // We have to propagate some information about MachineFunction to
+ // AsmPrinter. It's ok, when we're printing the function, since we have
+ // access to MachineFunction and can get the appropriate MachineFunctionInfo.
+ // Unfortunately, this is not possible when we're printing reference to
+ // Function (e.g. calling it and so on). Even more, there is no way to get the
+ // corresponding MachineFunctions: it can even be not created at all. That's
+ // why we should use additional structure, when we're collecting all necessary
+ // information.
+ //
+ // This structure is using e.g. for name decoration for stdcall & fastcall'ed
+ // function, since we have to use arguments' size for decoration.
+ typedef std::map<const Function*, X86MachineFunctionInfo> FMFInfoMap;
+ FMFInfoMap FunctionInfoMap;
+
+ void decorateName(std::string& Name, const GlobalValue* GV);
};
} // end namespace llvm
Modified: llvm/branches/non-call-eh/lib/Target/X86/X86AsmPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/X86/X86AsmPrinter.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/X86/X86AsmPrinter.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/X86/X86AsmPrinter.cpp Sun Jul 6 15:45:41 2008
@@ -14,423 +14,11 @@
//
//===----------------------------------------------------------------------===//
-#include "X86AsmPrinter.h"
#include "X86ATTAsmPrinter.h"
-#include "X86COFF.h"
#include "X86IntelAsmPrinter.h"
-#include "X86MachineFunctionInfo.h"
#include "X86Subtarget.h"
-#include "llvm/ADT/StringExtras.h"
-#include "llvm/CallingConv.h"
-#include "llvm/Constants.h"
-#include "llvm/Module.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/ParameterAttributes.h"
-#include "llvm/Type.h"
-#include "llvm/Assembly/Writer.h"
-#include "llvm/Support/Mangler.h"
-#include "llvm/Target/TargetAsmInfo.h"
-#include "llvm/Target/TargetOptions.h"
using namespace llvm;
-static X86MachineFunctionInfo calculateFunctionInfo(const Function *F,
- const TargetData *TD) {
- X86MachineFunctionInfo Info;
- uint64_t Size = 0;
-
- switch (F->getCallingConv()) {
- case CallingConv::X86_StdCall:
- Info.setDecorationStyle(StdCall);
- break;
- case CallingConv::X86_FastCall:
- Info.setDecorationStyle(FastCall);
- break;
- default:
- return Info;
- }
-
- unsigned argNum = 1;
- for (Function::const_arg_iterator AI = F->arg_begin(), AE = F->arg_end();
- AI != AE; ++AI, ++argNum) {
- const Type* Ty = AI->getType();
-
- // 'Dereference' type in case of byval parameter attribute
- if (F->paramHasAttr(argNum, ParamAttr::ByVal))
- Ty = cast<PointerType>(Ty)->getElementType();
-
- // Size should be aligned to DWORD boundary
- Size += ((TD->getABITypeSize(Ty) + 3)/4)*4;
- }
-
- // We're not supporting tooooo huge arguments :)
- Info.setBytesToPopOnReturn((unsigned int)Size);
- return Info;
-}
-
-
-/// decorateName - Query FunctionInfoMap and use this information for various
-/// name decoration.
-void X86SharedAsmPrinter::decorateName(std::string &Name,
- const GlobalValue *GV) {
- const Function *F = dyn_cast<Function>(GV);
- if (!F) return;
-
- // We don't want to decorate non-stdcall or non-fastcall functions right now
- unsigned CC = F->getCallingConv();
- if (CC != CallingConv::X86_StdCall && CC != CallingConv::X86_FastCall)
- return;
-
- // Decorate names only when we're targeting Cygwin/Mingw32 targets
- if (!Subtarget->isTargetCygMing())
- return;
-
- FMFInfoMap::const_iterator info_item = FunctionInfoMap.find(F);
-
- const X86MachineFunctionInfo *Info;
- if (info_item == FunctionInfoMap.end()) {
- // Calculate apropriate function info and populate map
- FunctionInfoMap[F] = calculateFunctionInfo(F, TM.getTargetData());
- Info = &FunctionInfoMap[F];
- } else {
- Info = &info_item->second;
- }
-
- const FunctionType *FT = F->getFunctionType();
- switch (Info->getDecorationStyle()) {
- case None:
- break;
- case StdCall:
- // "Pure" variadic functions do not receive @0 suffix.
- if (!FT->isVarArg() || (FT->getNumParams() == 0) ||
- (FT->getNumParams() == 1 && F->hasStructRetAttr()))
- Name += '@' + utostr_32(Info->getBytesToPopOnReturn());
- break;
- case FastCall:
- // "Pure" variadic functions do not receive @0 suffix.
- if (!FT->isVarArg() || (FT->getNumParams() == 0) ||
- (FT->getNumParams() == 1 && F->hasStructRetAttr()))
- Name += '@' + utostr_32(Info->getBytesToPopOnReturn());
-
- if (Name[0] == '_') {
- Name[0] = '@';
- } else {
- Name = '@' + Name;
- }
- break;
- default:
- assert(0 && "Unsupported DecorationStyle");
- }
-}
-
-/// doInitialization
-bool X86SharedAsmPrinter::doInitialization(Module &M) {
- if (TAI->doesSupportDebugInformation()) {
- // Emit initial debug information.
- DW.BeginModule(&M);
- }
-
- bool Result = AsmPrinter::doInitialization(M);
-
- // Darwin wants symbols to be quoted if they have complex names.
- if (Subtarget->isTargetDarwin())
- Mang->setUseQuotes(true);
-
- return Result;
-}
-
-/// PrintUnmangledNameSafely - Print out the printable characters in the name.
-/// Don't print things like \n or \0.
-static void PrintUnmangledNameSafely(const Value *V, std::ostream &OS) {
- for (const char *Name = V->getNameStart(), *E = Name+V->getNameLen();
- Name != E; ++Name)
- if (isprint(*Name))
- OS << *Name;
-}
-
-bool X86SharedAsmPrinter::doFinalization(Module &M) {
- // Note: this code is not shared by the Intel printer as it is too different
- // from how MASM does things. When making changes here don't forget to look
- // at X86IntelAsmPrinter::doFinalization().
- const TargetData *TD = TM.getTargetData();
-
- // Print out module-level global variables here.
- for (Module::const_global_iterator I = M.global_begin(), E = M.global_end();
- I != E; ++I) {
- if (!I->hasInitializer())
- continue; // External global require no code
-
- // Check to see if this is a special global used by LLVM, if so, emit it.
- if (EmitSpecialLLVMGlobal(I)) {
- if (Subtarget->isTargetDarwin() &&
- TM.getRelocationModel() == Reloc::Static) {
- if (I->getName() == "llvm.global_ctors")
- O << ".reference .constructors_used\n";
- else if (I->getName() == "llvm.global_dtors")
- O << ".reference .destructors_used\n";
- }
- continue;
- }
-
- std::string name = Mang->getValueName(I);
- Constant *C = I->getInitializer();
- const Type *Type = C->getType();
- unsigned Size = TD->getABITypeSize(Type);
- unsigned Align = TD->getPreferredAlignmentLog(I);
-
- if (I->hasHiddenVisibility()) {
- if (const char *Directive = TAI->getHiddenDirective())
- O << Directive << name << "\n";
- } else if (I->hasProtectedVisibility()) {
- if (const char *Directive = TAI->getProtectedDirective())
- O << Directive << name << "\n";
- }
-
- if (Subtarget->isTargetELF())
- O << "\t.type\t" << name << ", at object\n";
-
- if (C->isNullValue() && !I->hasSection()) {
- if (I->hasExternalLinkage()) {
- if (const char *Directive = TAI->getZeroFillDirective()) {
- O << "\t.globl " << name << "\n";
- O << Directive << "__DATA, __common, " << name << ", "
- << Size << ", " << Align << "\n";
- continue;
- }
- }
-
- if (!I->isThreadLocal() &&
- (I->hasInternalLinkage() || I->hasWeakLinkage() ||
- I->hasLinkOnceLinkage())) {
- if (Size == 0) Size = 1; // .comm Foo, 0 is undefined, avoid it.
- if (!NoZerosInBSS && TAI->getBSSSection())
- SwitchToDataSection(TAI->getBSSSection(), I);
- else
- SwitchToDataSection(TAI->getDataSection(), I);
- if (TAI->getLCOMMDirective() != NULL) {
- if (I->hasInternalLinkage()) {
- O << TAI->getLCOMMDirective() << name << "," << Size;
- if (Subtarget->isTargetDarwin())
- O << "," << Align;
- } else {
- O << TAI->getCOMMDirective() << name << "," << Size;
-
- // Leopard and above support aligned common symbols.
- if (Subtarget->getDarwinVers() >= 9)
- O << "," << Align;
- }
- } else {
- if (!Subtarget->isTargetCygMing()) {
- if (I->hasInternalLinkage())
- O << "\t.local\t" << name << "\n";
- }
- O << TAI->getCOMMDirective() << name << "," << Size;
- if (TAI->getCOMMDirectiveTakesAlignment())
- O << "," << (TAI->getAlignmentIsInBytes() ? (1 << Align) : Align);
- }
- O << "\t\t" << TAI->getCommentString() << " ";
- PrintUnmangledNameSafely(I, O);
- O << "\n";
- continue;
- }
- }
-
- switch (I->getLinkage()) {
- case GlobalValue::LinkOnceLinkage:
- case GlobalValue::WeakLinkage:
- if (Subtarget->isTargetDarwin()) {
- O << "\t.globl " << name << "\n"
- << TAI->getWeakDefDirective() << name << "\n";
- SwitchToDataSection("\t.section __DATA,__datacoal_nt,coalesced", I);
- } else if (Subtarget->isTargetCygMing()) {
- std::string SectionName(".section\t.data$linkonce." +
- name +
- ",\"aw\"");
- SwitchToDataSection(SectionName.c_str(), I);
- O << "\t.globl\t" << name << "\n"
- << "\t.linkonce same_size\n";
- } else {
- std::string SectionName("\t.section\t.llvm.linkonce.d." +
- name +
- ",\"aw\", at progbits");
- SwitchToDataSection(SectionName.c_str(), I);
- O << "\t.weak\t" << name << "\n";
- }
- break;
- case GlobalValue::DLLExportLinkage:
- DLLExportedGVs.insert(Mang->makeNameProper(I->getName(),""));
- // FALL THROUGH
- case GlobalValue::AppendingLinkage:
- // FIXME: appending linkage variables should go into a section of
- // their name or something. For now, just emit them as external.
- case GlobalValue::ExternalLinkage:
- // If external or appending, declare as a global symbol
- O << "\t.globl " << name << "\n";
- // FALL THROUGH
- case GlobalValue::InternalLinkage: {
- if (I->isConstant()) {
- const ConstantArray *CVA = dyn_cast<ConstantArray>(C);
- if (TAI->getCStringSection() && CVA && CVA->isCString()) {
- SwitchToDataSection(TAI->getCStringSection(), I);
- break;
- }
- }
- // FIXME: special handling for ".ctors" & ".dtors" sections
- if (I->hasSection() &&
- (I->getSection() == ".ctors" ||
- I->getSection() == ".dtors")) {
- std::string SectionName = ".section " + I->getSection();
-
- if (Subtarget->isTargetCygMing()) {
- SectionName += ",\"aw\"";
- } else {
- assert(!Subtarget->isTargetDarwin());
- SectionName += ",\"aw\", at progbits";
- }
- SwitchToDataSection(SectionName.c_str());
- } else if (I->hasSection() && Subtarget->isTargetDarwin()) {
- // Honor all section names on Darwin; ObjC uses this
- std::string SectionName = ".section " + I->getSection();
- SwitchToDataSection(SectionName.c_str());
- } else {
- if (C->isNullValue() && !NoZerosInBSS && TAI->getBSSSection())
- SwitchToDataSection(I->isThreadLocal() ? TAI->getTLSBSSSection() :
- TAI->getBSSSection(), I);
- else if (!I->isConstant())
- SwitchToDataSection(I->isThreadLocal() ? TAI->getTLSDataSection() :
- TAI->getDataSection(), I);
- else if (I->isThreadLocal())
- SwitchToDataSection(TAI->getTLSDataSection());
- else {
- // Read-only data.
- bool HasReloc = C->ContainsRelocations();
- if (HasReloc &&
- Subtarget->isTargetDarwin() &&
- TM.getRelocationModel() != Reloc::Static)
- SwitchToDataSection("\t.const_data\n");
- else if (!HasReloc && Size == 4 &&
- TAI->getFourByteConstantSection())
- SwitchToDataSection(TAI->getFourByteConstantSection(), I);
- else if (!HasReloc && Size == 8 &&
- TAI->getEightByteConstantSection())
- SwitchToDataSection(TAI->getEightByteConstantSection(), I);
- else if (!HasReloc && Size == 16 &&
- TAI->getSixteenByteConstantSection())
- SwitchToDataSection(TAI->getSixteenByteConstantSection(), I);
- else if (TAI->getReadOnlySection())
- SwitchToDataSection(TAI->getReadOnlySection(), I);
- else
- SwitchToDataSection(TAI->getDataSection(), I);
- }
- }
-
- break;
- }
- default:
- assert(0 && "Unknown linkage type!");
- }
-
- EmitAlignment(Align, I);
- O << name << ":\t\t\t\t" << TAI->getCommentString() << " ";
- PrintUnmangledNameSafely(I, O);
- O << "\n";
- if (TAI->hasDotTypeDotSizeDirective())
- O << "\t.size\t" << name << ", " << Size << "\n";
- // If the initializer is a extern weak symbol, remember to emit the weak
- // reference!
- if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
- if (GV->hasExternalWeakLinkage())
- ExtWeakSymbols.insert(GV);
-
- EmitGlobalConstant(C);
- }
-
- // Output linker support code for dllexported globals
- if (!DLLExportedGVs.empty()) {
- SwitchToDataSection(".section .drectve");
- }
-
- for (std::set<std::string>::iterator i = DLLExportedGVs.begin(),
- e = DLLExportedGVs.end();
- i != e; ++i) {
- O << "\t.ascii \" -export:" << *i << ",data\"\n";
- }
-
- if (!DLLExportedFns.empty()) {
- SwitchToDataSection(".section .drectve");
- }
-
- for (std::set<std::string>::iterator i = DLLExportedFns.begin(),
- e = DLLExportedFns.end();
- i != e; ++i) {
- O << "\t.ascii \" -export:" << *i << "\"\n";
- }
-
- if (Subtarget->isTargetDarwin()) {
- SwitchToDataSection("");
-
- // Output stubs for dynamically-linked functions
- unsigned j = 1;
- for (std::set<std::string>::iterator i = FnStubs.begin(), e = FnStubs.end();
- i != e; ++i, ++j) {
- SwitchToDataSection("\t.section __IMPORT,__jump_table,symbol_stubs,"
- "self_modifying_code+pure_instructions,5", 0);
- O << "L" << *i << "$stub:\n";
- O << "\t.indirect_symbol " << *i << "\n";
- O << "\thlt ; hlt ; hlt ; hlt ; hlt\n";
- }
-
- O << "\n";
-
- if (TAI->doesSupportExceptionHandling() && MMI && !Subtarget->is64Bit()) {
- // Add the (possibly multiple) personalities to the set of global values.
- // Only referenced functions get into the Personalities list.
- const std::vector<Function *>& Personalities = MMI->getPersonalities();
-
- for (std::vector<Function *>::const_iterator I = Personalities.begin(),
- E = Personalities.end(); I != E; ++I)
- if (*I) GVStubs.insert("_" + (*I)->getName());
- }
-
- // Output stubs for external and common global variables.
- if (!GVStubs.empty())
- SwitchToDataSection(
- "\t.section __IMPORT,__pointers,non_lazy_symbol_pointers");
- for (std::set<std::string>::iterator i = GVStubs.begin(), e = GVStubs.end();
- i != e; ++i) {
- O << "L" << *i << "$non_lazy_ptr:\n";
- O << "\t.indirect_symbol " << *i << "\n";
- O << "\t.long\t0\n";
- }
-
- // Emit final debug information.
- DW.EndModule();
-
- // Funny Darwin hack: This flag tells the linker that no global symbols
- // contain code that falls through to other global symbols (e.g. the obvious
- // implementation of multiple entry points). If this doesn't occur, the
- // linker can safely perform dead code stripping. Since LLVM never
- // generates code that does this, it is always safe to set.
- O << "\t.subsections_via_symbols\n";
- } else if (Subtarget->isTargetCygMing()) {
- // Emit type information for external functions
- for (std::set<std::string>::iterator i = FnStubs.begin(), e = FnStubs.end();
- i != e; ++i) {
- O << "\t.def\t " << *i
- << ";\t.scl\t" << COFF::C_EXT
- << ";\t.type\t" << (COFF::DT_FCN << COFF::N_BTSHFT)
- << ";\t.endef\n";
- }
-
- // Emit final debug information.
- DW.EndModule();
- } else if (Subtarget->isTargetELF()) {
- // Emit final debug information.
- DW.EndModule();
- }
-
- return AsmPrinter::doFinalization(M);
-}
-
/// createX86CodePrinterPass - Returns a pass that prints the X86 assembly code
/// for a MachineFunction to the given output stream, using the given target
/// machine description.
Modified: llvm/branches/non-call-eh/lib/Target/X86/X86AsmPrinter.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/X86/X86AsmPrinter.h?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/X86/X86AsmPrinter.h (original)
+++ llvm/branches/non-call-eh/lib/Target/X86/X86AsmPrinter.h Sun Jul 6 15:45:41 2008
@@ -1,98 +0,0 @@
-//===-- X86AsmPrinter.h - Convert X86 LLVM code to Intel assembly ---------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file the shared super class printer that converts from our internal
-// representation of machine-dependent LLVM code to Intel and AT&T format
-// assembly language. This printer is the output mechanism used by `llc'.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef X86ASMPRINTER_H
-#define X86ASMPRINTER_H
-
-#include "X86.h"
-#include "X86MachineFunctionInfo.h"
-#include "X86TargetMachine.h"
-#include "llvm/CodeGen/AsmPrinter.h"
-#include "llvm/CodeGen/DwarfWriter.h"
-#include "llvm/CodeGen/MachineModuleInfo.h"
-#include "llvm/Support/Compiler.h"
-#include <set>
-
-
-namespace llvm {
-
-struct VISIBILITY_HIDDEN X86SharedAsmPrinter : public AsmPrinter {
- DwarfWriter DW;
- MachineModuleInfo *MMI;
-
- X86SharedAsmPrinter(std::ostream &O, X86TargetMachine &TM,
- const TargetAsmInfo *T)
- : AsmPrinter(O, TM, T), DW(O, this, T), MMI(0) {
- Subtarget = &TM.getSubtarget<X86Subtarget>();
- }
-
- // We have to propagate some information about MachineFunction to
- // AsmPrinter. It's ok, when we're printing the function, since we have
- // access to MachineFunction and can get the appropriate MachineFunctionInfo.
- // Unfortunately, this is not possible when we're printing reference to
- // Function (e.g. calling it and so on). Even more, there is no way to get the
- // corresponding MachineFunctions: it can even be not created at all. That's
- // why we should use additional structure, when we're collecting all necessary
- // information.
- //
- // This structure is using e.g. for name decoration for stdcall & fastcall'ed
- // function, since we have to use arguments' size for decoration.
- typedef std::map<const Function*, X86MachineFunctionInfo> FMFInfoMap;
- FMFInfoMap FunctionInfoMap;
-
- void decorateName(std::string& Name, const GlobalValue* GV);
-
- bool doInitialization(Module &M);
- bool doFinalization(Module &M);
-
- void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesAll();
- if (Subtarget->isTargetDarwin() ||
- Subtarget->isTargetELF() ||
- Subtarget->isTargetCygMing()) {
- AU.addRequired<MachineModuleInfo>();
- }
- AsmPrinter::getAnalysisUsage(AU);
- }
-
- const X86Subtarget *Subtarget;
-
- // Necessary for Darwin to print out the apprioriate types of linker stubs
- std::set<std::string> FnStubs, GVStubs, LinkOnceStubs;
-
- // Necessary for dllexport support
- std::set<std::string> DLLExportedFns, DLLExportedGVs;
-
- inline static bool isScale(const MachineOperand &MO) {
- return MO.isImmediate() &&
- (MO.getImm() == 1 || MO.getImm() == 2 ||
- MO.getImm() == 4 || MO.getImm() == 8);
- }
-
- inline static bool isMem(const MachineInstr *MI, unsigned Op) {
- if (MI->getOperand(Op).isFrameIndex()) return true;
- return Op+4 <= MI->getNumOperands() &&
- MI->getOperand(Op ).isRegister() && isScale(MI->getOperand(Op+1)) &&
- MI->getOperand(Op+2).isRegister() &&
- (MI->getOperand(Op+3).isImmediate() ||
- MI->getOperand(Op+3).isGlobalAddress() ||
- MI->getOperand(Op+3).isConstantPoolIndex() ||
- MI->getOperand(Op+3).isJumpTableIndex());
- }
-};
-
-} // end namespace llvm
-
-#endif
Modified: llvm/branches/non-call-eh/lib/Target/X86/X86CallingConv.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/X86/X86CallingConv.td?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/X86/X86CallingConv.td (original)
+++ llvm/branches/non-call-eh/lib/Target/X86/X86CallingConv.td Sun Jul 6 15:45:41 2008
@@ -35,7 +35,7 @@
// MMX vector types are always returned in MM0. If the target doesn't have
// MM0, it doesn't support these vector types.
- CCIfType<[v8i8, v4i16, v2i32, v1i64], CCAssignToReg<[MM0]>>,
+ CCIfType<[v8i8, v4i16, v2i32, v1i64, v2f32], CCAssignToReg<[MM0]>>,
// Long double types are always returned in ST0 (even with SSE).
CCIfType<[f80], CCAssignToReg<[ST0, ST1]>>
@@ -75,6 +75,9 @@
// The X86-64 calling convention always returns FP values in XMM0.
CCIfType<[f32], CCAssignToReg<[XMM0, XMM1]>>,
CCIfType<[f64], CCAssignToReg<[XMM0, XMM1]>>,
+
+ // MMX vector types are always returned in XMM0.
+ CCIfType<[v8i8, v4i16, v2i32, v1i64, v2f32], CCAssignToReg<[XMM0, XMM1]>>,
CCDelegateTo<RetCC_X86Common>
]>;
@@ -83,6 +86,10 @@
// The X86-Win64 calling convention always returns __m64 values in RAX.
CCIfType<[v8i8, v4i16, v2i32, v1i64], CCAssignToReg<[RAX]>>,
+ // And FP in XMM0 only.
+ CCIfType<[f32], CCAssignToReg<[XMM0]>>,
+ CCIfType<[f64], CCAssignToReg<[XMM0]>>,
+
// Otherwise, everything is the same as 'normal' X86-64 C CC.
CCDelegateTo<RetCC_X86_64_C>
]>;
@@ -133,12 +140,20 @@
// The first 8 FP/Vector arguments are passed in XMM registers.
CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
- CCAssignToReg<[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>>,
-
- // The first 8 MMX vector arguments are passed in GPRs.
- CCIfType<[v8i8, v4i16, v2i32, v1i64],
- CCAssignToReg<[RDI, RSI, RDX, RCX, R8 , R9 ]>>,
+ CCAssignToReg<[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>>,
+ // The first 8 MMX (except for v1i64) vector arguments are passed in XMM
+ // registers on Darwin.
+ CCIfType<[v8i8, v4i16, v2i32, v2f32],
+ CCIfSubtarget<"isTargetDarwin()",
+ CCIfSubtarget<"hasSSE2()",
+ CCAssignToReg<[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>>>>,
+
+ // The first 8 v1i64 vector arguments are passed in GPRs on Darwin.
+ CCIfType<[v1i64],
+ CCIfSubtarget<"isTargetDarwin()",
+ CCAssignToReg<[RDI, RSI, RDX, RCX, R8]>>>,
+
// Integer/FP values get stored in stack slots that are 8 bytes in size and
// 8-byte aligned if there are no more registers to hold them.
CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>,
@@ -151,13 +166,12 @@
CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToStack<16, 16>>,
// __m64 vectors get 8-byte stack slots that are 8-byte aligned.
- CCIfType<[v8i8, v4i16, v2i32, v1i64], CCAssignToStack<8, 8>>
+ CCIfType<[v8i8, v4i16, v2i32, v1i64, v2f32], CCAssignToStack<8, 8>>
]>;
// Calling convention used on Win64
def CC_X86_Win64_C : CallingConv<[
// FIXME: Handle byval stuff.
- // FIXME: Handle fp80.
// FIXME: Handle varargs.
// Promote i8/i16 arguments to i32.
@@ -178,7 +192,7 @@
[RCX , RDX , R8 , R9 ]>>,
// The first 4 MMX vector arguments are passed in GPRs.
- CCIfType<[v8i8, v4i16, v2i32, v1i64],
+ CCIfType<[v8i8, v4i16, v2i32, v1i64, v2f32],
CCAssignToRegWithShadow<[RCX , RDX , R8 , R9 ],
[XMM0, XMM1, XMM2, XMM3]>>,
@@ -186,6 +200,10 @@
// 16-byte aligned if there are no more registers to hold them.
CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 16>>,
+ // Long doubles get stack slots whose size and alignment depends on the
+ // subtarget.
+ CCIfType<[f80], CCAssignToStack<0, 0>>,
+
// Vectors get 16-byte stack slots that are 16-byte aligned.
CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToStack<16, 16>>,
@@ -211,12 +229,19 @@
// The first 8 FP/Vector arguments are passed in XMM registers.
CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
- CCAssignToReg<[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>>,
-
- // The first 8 MMX vector arguments are passed in GPRs.
- CCIfType<[v8i8, v4i16, v2i32, v1i64],
- CCAssignToReg<[RDI, RSI, RDX, RCX, R8]>>,
+ CCAssignToReg<[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>>,
+ // The first 8 MMX (except for v1i64) vector arguments are passed in XMM
+ // registers on Darwin.
+ CCIfType<[v8i8, v4i16, v2i32, v2f32],
+ CCIfSubtarget<"isTargetDarwin()",
+ CCAssignToReg<[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>>>,
+
+ // The first 8 v1i64 vector arguments are passed in GPRs on Darwin.
+ CCIfType<[v1i64],
+ CCIfSubtarget<"isTargetDarwin()",
+ CCAssignToReg<[RDI, RSI, RDX, RCX, R8]>>>,
+
// Integer/FP values get stored in stack slots that are 8 bytes in size and
// 8-byte aligned if there are no more registers to hold them.
CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>,
@@ -242,9 +267,15 @@
// The first 3 float or double arguments, if marked 'inreg' and if the call
// is not a vararg call and if SSE2 is available, are passed in SSE registers.
- CCIfNotVarArg<CCIfInReg<CCIfType<[f32,f64], CCIfSubtarget<"hasSSE2()",
+ CCIfNotVarArg<CCIfInReg<CCIfType<[f32,f64],
+ CCIfSubtarget<"hasSSE2()",
CCAssignToReg<[XMM0,XMM1,XMM2]>>>>>,
+ // The first 3 __m64 (except for v1i64) vector arguments are passed in mmx
+ // registers if the call is not a vararg call.
+ CCIfNotVarArg<CCIfType<[v8i8, v4i16, v2i32, v2f32],
+ CCAssignToReg<[MM0, MM1, MM2]>>>,
+
// Integer/Float values get stored in stack slots that are 4 bytes in
// size and 4-byte aligned.
CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
@@ -264,8 +295,7 @@
// __m64 vectors get 8-byte stack slots that are 4-byte aligned. They are
// passed in the parameter area.
- CCIfType<[v8i8, v4i16, v2i32, v1i64], CCAssignToStack<8, 4>>
-]>;
+ CCIfType<[v8i8, v4i16, v2i32, v1i64], CCAssignToStack<8, 4>>]>;
def CC_X86_32_C : CallingConv<[
// Promote i8/i16 arguments to i32.
Modified: llvm/branches/non-call-eh/lib/Target/X86/X86CodeEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/X86/X86CodeEmitter.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/X86/X86CodeEmitter.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/X86/X86CodeEmitter.cpp Sun Jul 6 15:45:41 2008
@@ -38,18 +38,18 @@
class VISIBILITY_HIDDEN Emitter : public MachineFunctionPass {
const X86InstrInfo *II;
const TargetData *TD;
- TargetMachine &TM;
+ X86TargetMachine &TM;
MachineCodeEmitter &MCE;
intptr_t PICBaseOffset;
bool Is64BitMode;
bool IsPIC;
public:
static char ID;
- explicit Emitter(TargetMachine &tm, MachineCodeEmitter &mce)
+ explicit Emitter(X86TargetMachine &tm, MachineCodeEmitter &mce)
: MachineFunctionPass((intptr_t)&ID), II(0), TD(0), TM(tm),
MCE(mce), PICBaseOffset(0), Is64BitMode(false),
IsPIC(TM.getRelocationModel() == Reloc::PIC_) {}
- Emitter(TargetMachine &tm, MachineCodeEmitter &mce,
+ Emitter(X86TargetMachine &tm, MachineCodeEmitter &mce,
const X86InstrInfo &ii, const TargetData &td, bool is64)
: MachineFunctionPass((intptr_t)&ID), II(&ii), TD(&td), TM(tm),
MCE(mce), PICBaseOffset(0), Is64BitMode(is64),
@@ -112,9 +112,10 @@
MCE.setModuleInfo(&getAnalysis<MachineModuleInfo>());
- II = ((X86TargetMachine&)TM).getInstrInfo();
- TD = ((X86TargetMachine&)TM).getTargetData();
+ II = TM.getInstrInfo();
+ TD = TM.getTargetData();
Is64BitMode = TM.getSubtarget<X86Subtarget>().is64Bit();
+ IsPIC = TM.getRelocationModel() == Reloc::PIC_;
do {
DOUT << "JITTing function '" << MF.getFunction()->getName() << "'\n";
@@ -220,7 +221,7 @@
}
unsigned Emitter::getX86RegNum(unsigned RegNo) const {
- return ((const X86RegisterInfo&)II->getRegisterInfo()).getX86RegNum(RegNo);
+ return II->getRegisterInfo().getX86RegNum(RegNo);
}
inline static unsigned char ModRMByte(unsigned Mod, unsigned RegOpcode,
@@ -489,7 +490,8 @@
case TargetInstrInfo::INLINEASM:
assert(0 && "JIT does not support inline asm!\n");
break;
- case TargetInstrInfo::LABEL:
+ case TargetInstrInfo::DBG_LABEL:
+ case TargetInstrInfo::EH_LABEL:
MCE.emitLabel(MI.getOperand(0).getImm());
break;
case TargetInstrInfo::IMPLICIT_DEF:
@@ -503,7 +505,7 @@
emitConstant(0, X86InstrInfo::sizeOfImm(Desc));
// Remember PIC base.
PICBaseOffset = MCE.getCurrentPCOffset();
- X86JITInfo *JTI = dynamic_cast<X86JITInfo*>(TM.getJITInfo());
+ X86JITInfo *JTI = TM.getJITInfo();
JTI->setPICBase(MCE.getCurrentPCValue());
break;
}
Modified: llvm/branches/non-call-eh/lib/Target/X86/X86ISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/non-call-eh/lib/Target/X86/X86ISelDAGToDAG.cpp?rev=53163&r1=53162&r2=53163&view=diff
==============================================================================
--- llvm/branches/non-call-eh/lib/Target/X86/X86ISelDAGToDAG.cpp (original)
+++ llvm/branches/non-call-eh/lib/Target/X86/X86ISelDAGToDAG.cpp Sun Jul 6 15:45:41 2008
@@ -32,10 +32,10 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/Target/TargetMachine.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
#include <queue>
#include <set>
@@ -90,10 +90,6 @@
/// register should set this to true.
bool ContainsFPCode;
- /// FastISel - Enable fast(er) instruction selection.
- ///
- bool FastISel;
-
/// TM - Keep a reference to X86TargetMachine.
///
X86TargetMachine &TM;
@@ -110,10 +106,14 @@
/// base register.
unsigned GlobalBaseReg;
+ /// CurBB - Current BB being isel'd.
+ ///
+ MachineBasicBlock *CurBB;
+
public:
X86DAGToDAGISel(X86TargetMachine &tm, bool fast)
- : SelectionDAGISel(X86Lowering),
- ContainsFPCode(false), FastISel(fast), TM(tm),
+ : SelectionDAGISel(X86Lowering, fast),
+ ContainsFPCode(false), TM(tm),
X86Lowering(*TM.getTargetLowering()),
Subtarget(&TM.getSubtarget<X86Subtarget>()) {}
@@ -127,9 +127,13 @@
return "X86 DAG->DAG Instruction Selection";
}
- /// InstructionSelectBasicBlock - This callback is invoked by
+ /// InstructionSelect - This callback is invoked by
/// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
- virtual void InstructionSelectBasicBlock(SelectionDAG &DAG);
+ virtual void InstructionSelect(SelectionDAG &DAG);
+
+ /// InstructionSelectPostProcessing - Post processing of selected and
+ /// scheduled basic blocks.
+ virtual void InstructionSelectPostProcessing(SelectionDAG &DAG);
virtual void EmitFunctionEntryCode(Function &Fn, MachineFunction &MF);
@@ -214,7 +218,7 @@
/// getTruncate - return an SDNode that implements a subreg based truncate
/// of the specified operand to the the specified value type.
- SDNode *getTruncate(SDOperand N0, MVT::ValueType VT);
+ SDNode *getTruncate(SDOperand N0, MVT VT);
#ifndef NDEBUG
unsigned Indent;
@@ -222,6 +226,8 @@
};
}
+/// findFlagUse - Return use of MVT::Flag value produced by the specified SDNode.
+///
static SDNode *findFlagUse(SDNode *N) {
unsigned FlagResNo = N->getNumValues()-1;
for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
@@ -235,29 +241,38 @@
return NULL;
}
+/// findNonImmUse - Return true by reference in "found" if "Use" is an
+/// non-immediate use of "Def". This function recursively traversing
+/// up the operand chain ignoring certain nodes.
static void findNonImmUse(SDNode *Use, SDNode* Def, SDNode *ImmedUse,
SDNode *Root, SDNode *Skip, bool &found,
- std::set<SDNode *> &Visited) {
+ SmallPtrSet<SDNode*, 16> &Visited) {
if (found ||
Use->getNodeId() > Def->getNodeId() ||
- !Visited.insert(Use).second)
+ !Visited.insert(Use))
return;
-
+
for (unsigned i = 0, e = Use->getNumOperands(); !found && i != e; ++i) {
SDNode *N = Use->getOperand(i).Val;
if (N == Skip)
continue;
if (N == Def) {
if (Use == ImmedUse)
- continue; // Immediate use is ok.
+ continue; // We are not looking for immediate use.
if (Use == Root) {
+ // Must be a chain reading node where it is possible to reach its own
+ // chain operand through a path started from another operand.
assert(Use->getOpcode() == ISD::STORE ||
- Use->getOpcode() == X86ISD::CMP);
+ Use->getOpcode() == X86ISD::CMP ||
+ Use->getOpcode() == ISD::INTRINSIC_W_CHAIN ||
+ Use->getOpcode() == ISD::INTRINSIC_VOID);
continue;
}
found = true;
break;
}
+
+ // Traverse up the operand chain.
findNonImmUse(N, Def, ImmedUse, Root, Skip, found, Visited);
}
}
@@ -273,7 +288,7 @@
/// its chain operand.
static inline bool isNonImmUse(SDNode *Root, SDNode *Def, SDNode *ImmedUse,
SDNode *Skip = NULL) {
- std::set<SDNode *> Visited;
+ SmallPtrSet<SDNode*, 16> Visited;
bool found = false;
findNonImmUse(Root, Def, ImmedUse, Root, Skip, found, Visited);
return found;
@@ -317,7 +332,7 @@
// NU), then TF is a predecessor of FU and a successor of NU. But since
// NU and FU are flagged together, this effectively creates a cycle.
bool HasFlagUse = false;
- MVT::ValueType VT = Root->getValueType(Root->getNumValues()-1);
+ MVT VT = Root->getValueType(Root->getNumValues()-1);
while ((VT == MVT::Flag && !Root->use_empty())) {
SDNode *FU = findFlagUse(Root);
if (FU == NULL)
@@ -351,6 +366,32 @@
Store.getOperand(2), Store.getOperand(3));
}
+/// isRMWLoad - Return true if N is a load that's part of RMW sub-DAG.
+///
+static bool isRMWLoad(SDOperand N, SDOperand Chain, SDOperand Address,
+ SDOperand &Load) {
+ if (N.getOpcode() == ISD::BIT_CONVERT)
+ N = N.getOperand(0);
+
+ LoadSDNode *LD = dyn_cast<LoadSDNode>(N);
+ if (!LD || LD->isVolatile())
+ return false;
+ if (LD->getAddressingMode() != ISD::UNINDEXED)
+ return false;
+
+ ISD::LoadExtType ExtType = LD->getExtensionType();
+ if (ExtType != ISD::NON_EXTLOAD && ExtType != ISD::EXTLOAD)
+ return false;
+
+ if (N.hasOneUse() &&
+ N.getOperand(1) == Address &&
+ N.Val->isOperandOf(Chain.Val)) {
+ Load = N;
+ return true;
+ }
+ return false;
+}
+
/// PreprocessForRMW - Preprocess the DAG to make instruction selection better.
/// This is only run if not in -fast mode (aka -O0).
/// This allows the instruction selector to pick more read-modify-write
@@ -402,8 +443,8 @@
SDOperand N1 = I->getOperand(1);
SDOperand N2 = I->getOperand(2);
- if (MVT::isFloatingPoint(N1.getValueType()) ||
- MVT::isVector(N1.getValueType()) ||
+ if ((N1.getValueType().isFloatingPoint() &&
+ !N1.getValueType().isVector()) ||
!N1.hasOneUse())
continue;
@@ -417,20 +458,13 @@
case ISD::OR:
case ISD::XOR:
case ISD::ADDC:
- case ISD::ADDE: {
+ case ISD::ADDE:
+ case ISD::VECTOR_SHUFFLE: {
SDOperand N10 = N1.getOperand(0);
SDOperand N11 = N1.getOperand(1);
- if (ISD::isNON_EXTLoad(N10.Val))
- RModW = true;
- else if (ISD::isNON_EXTLoad(N11.Val)) {
- RModW = true;
- std::swap(N10, N11);
- }
- RModW = RModW && N10.Val->isOperandOf(Chain.Val) && N10.hasOneUse() &&
- (N10.getOperand(1) == N2) &&
- (N10.Val->getValueType(0) == N1.getValueType());
- if (RModW)
- Load = N10;
+ RModW = isRMWLoad(N10, Chain, N2, Load);
+ if (!RModW)
+ RModW = isRMWLoad(N11, Chain, N2, Load);
break;
}
case ISD::SUB:
@@ -444,12 +478,7 @@
case X86ISD::SHLD:
case X86ISD::SHRD: {
SDOperand N10 = N1.getOperand(0);
- if (ISD::isNON_EXTLoad(N10.Val))
- RModW = N10.Val->isOperandOf(Chain.Val) && N10.hasOneUse() &&
- (N10.getOperand(1) == N2) &&
- (N10.Val->getValueType(0) == N1.getValueType());
- if (RModW)
- Load = N10;
+ RModW = isRMWLoad(N10, Chain, N2, Load);
break;
}
}
@@ -479,8 +508,8 @@
// If the source and destination are SSE registers, then this is a legal
// conversion that should not be lowered.
- MVT::ValueType SrcVT = N->getOperand(0).getValueType();
- MVT::ValueType DstVT = N->getValueType(0);
+ MVT SrcVT = N->getOperand(0).getValueType();
+ MVT DstVT = N->getValueType(0);
bool SrcIsSSE = X86Lowering.isScalarFPTypeInSSEReg(SrcVT);
bool DstIsSSE = X86Lowering.isScalarFPTypeInSSEReg(DstVT);
if (SrcIsSSE && DstIsSSE)
@@ -498,7 +527,7 @@
// Here we could have an FP stack truncation or an FPStack <-> SSE convert.
// FPStack has extload and truncstore. SSE can fold direct loads into other
// operations. Based on this, decide what we want to do.
- MVT::ValueType MemVT;
+ MVT MemVT;
if (N->getOpcode() == ISD::FP_ROUND)
MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'.
else
@@ -528,10 +557,10 @@
/// InstructionSelectBasicBlock - This callback is invoked by SelectionDAGISel
/// when it has created a SelectionDAG for us to codegen.
-void X86DAGToDAGISel::InstructionSelectBasicBlock(SelectionDAG &DAG) {
- DEBUG(BB->dump());
- MachineFunction::iterator FirstMBB = BB;
+void X86DAGToDAGISel::InstructionSelect(SelectionDAG &DAG) {
+ CurBB = BB; // BB can change as result of isel.
+ DEBUG(BB->dump());
if (!FastISel)
PreprocessForRMW(DAG);
@@ -549,11 +578,9 @@
#endif
DAG.RemoveDeadNodes();
+}
- // Emit machine code to BB. This can change 'BB' to the last block being
- // inserted into.
- ScheduleAndEmitDAG(DAG);
-
+void X86DAGToDAGISel::InstructionSelectPostProcessing(SelectionDAG &DAG) {
// If we are emitting FP stack code, scan the basic block to determine if this
// block defines any FP values. If so, put an FP_REG_KILL instruction before
// the terminator of the block.
@@ -566,7 +593,7 @@
// Scan all of the machine instructions in these MBBs, checking for FP
// stores. (RFP32 and RFP64 will not exist in SSE mode, but RFP80 might.)
- MachineFunction::iterator MBBI = FirstMBB;
+ MachineFunction::iterator MBBI = CurBB;
MachineFunction::iterator EndMBB = BB; ++EndMBB;
for (; MBBI != EndMBB; ++MBBI) {
MachineBasicBlock *MBB = MBBI;
@@ -916,7 +943,7 @@
if (MatchAddress(N, AM))
return false;
- MVT::ValueType VT = N.getValueType();
+ MVT VT = N.getValueType();
if (AM.BaseType == X86ISelAddressMode::RegBase) {
if (!AM.Base.Reg.Val)
AM.Base.Reg = CurDAG->getRegister(0, VT);
@@ -963,38 +990,19 @@
// Also handle the case where we explicitly require zeros in the top
// elements. This is a vector shuffle from the zero vector.
- if (N.getOpcode() == ISD::VECTOR_SHUFFLE && N.Val->hasOneUse() &&
+ if (N.getOpcode() == X86ISD::VZEXT_MOVL && N.Val->hasOneUse() &&
// Check to see if the top elements are all zeros (or bitcast of zeros).
- ISD::isBuildVectorAllZeros(N.getOperand(0).Val) &&
- N.getOperand(1).getOpcode() == ISD::SCALAR_TO_VECTOR &&
- N.getOperand(1).Val->hasOneUse() &&
- ISD::isNON_EXTLoad(N.getOperand(1).getOperand(0).Val) &&
- N.getOperand(1).getOperand(0).hasOneUse()) {
- // Check to see if the shuffle mask is 4/L/L/L or 2/L, where L is something
- // from the LHS.
- unsigned VecWidth=MVT::getVectorNumElements(N.getOperand(0).getValueType());
- SDOperand ShufMask = N.getOperand(2);
- assert(ShufMask.getOpcode() == ISD::BUILD_VECTOR && "Invalid shuf mask!");
- if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(ShufMask.getOperand(0))) {
- if (C->getValue() == VecWidth) {
- for (unsigned i = 1; i != VecWidth; ++i) {
- if (ShufMask.getOperand(i).getOpcode() == ISD::UNDEF) {
- // ok.
- } else {
- ConstantSDNode *C = cast<ConstantSDNode>(ShufMask.getOperand(i));
- if (C->getValue() >= VecWidth) return false;
- }
- }
- }
-
- // Okay, this is a zero extending load. Fold it.
- LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(1).getOperand(0));
- if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp))
- return false;
- OutChain = LD->getChain();
- InChain = SDOperand(LD, 1);
- return true;
- }
+ N.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
+ N.getOperand(0).Val->hasOneUse() &&
+ ISD::isNON_EXTLoad(N.getOperand(0).getOperand(0).Val) &&
+ N.getOperand(0).getOperand(0).hasOneUse()) {
+ // Okay, this is a zero extending load. Fold it.
+ LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(0).getOperand(0));
+ if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp))
+ return false;
+ OutChain = LD->getChain();
+ InChain = SDOperand(LD, 1);
+ return true;
}
return false;
}
@@ -1009,7 +1017,7 @@
if (MatchAddress(N, AM))
return false;
- MVT::ValueType VT = N.getValueType();
+ MVT VT = N.getValueType();
unsigned Complexity = 0;
if (AM.BaseType == X86ISelAddressMode::RegBase)
if (AM.Base.Reg.Val)
@@ -1103,16 +1111,17 @@
return FindCallStartFromCall(Node->getOperand(0).Val);
}
-SDNode *X86DAGToDAGISel::getTruncate(SDOperand N0, MVT::ValueType VT) {
+SDNode *X86DAGToDAGISel::getTruncate(SDOperand N0, MVT VT) {
SDOperand SRIdx;
- switch (VT) {
+ switch (VT.getSimpleVT()) {
+ default: assert(0 && "Unknown truncate!");
case MVT::i8:
SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1
// Ensure that the source register has an 8-bit subreg on 32-bit targets
if (!Subtarget->is64Bit()) {
unsigned Opc;
- MVT::ValueType VT;
- switch (N0.getValueType()) {
+ MVT VT;
+ switch (N0.getValueType().getSimpleVT()) {
default: assert(0 && "Unknown truncate!");
case MVT::i16:
Opc = X86::MOV16to16_;
@@ -1134,7 +1143,6 @@
case MVT::i32:
SRIdx = CurDAG->getTargetConstant(3, MVT::i32); // SubRegSet 3
break;
- default: assert(0 && "Unknown truncate!"); break;
}
return CurDAG->getTargetNode(X86::EXTRACT_SUBREG, VT, N0, SRIdx);
}
@@ -1142,7 +1150,7 @@
SDNode *X86DAGToDAGISel::Select(SDOperand N) {
SDNode *Node = N.Val;
- MVT::ValueType NVT = Node->getValueType(0);
+ MVT NVT = Node->getValueType(0);
unsigned Opc, MOpc;
unsigned Opcode = Node->getOpcode();
@@ -1168,35 +1176,6 @@
case X86ISD::GlobalBaseReg:
return getGlobalBaseReg();
- // FIXME: This is a workaround for a tblgen problem: rdar://5791600
- case X86ISD::RET_FLAG:
- if (ConstantSDNode *Amt = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
- if (Amt->getSignExtended() != 0) break;
-
- // Match (X86retflag 0).
- SDOperand Chain = N.getOperand(0);
- bool HasInFlag = N.getOperand(N.getNumOperands()-1).getValueType()
- == MVT::Flag;
- SmallVector<SDOperand, 8> Ops0;
- AddToISelQueue(Chain);
- SDOperand InFlag(0, 0);
- if (HasInFlag) {
- InFlag = N.getOperand(N.getNumOperands()-1);
- AddToISelQueue(InFlag);
- }
- for (unsigned i = 2, e = N.getNumOperands()-(HasInFlag?1:0); i != e;
- ++i) {
- AddToISelQueue(N.getOperand(i));
- Ops0.push_back(N.getOperand(i));
- }
- Ops0.push_back(Chain);
- if (HasInFlag)
- Ops0.push_back(InFlag);
- return CurDAG->getTargetNode(X86::RET, MVT::Other,
- &Ops0[0], Ops0.size());
- }
- break;
-
case ISD::ADD: {
// Turn ADD X, c to MOV32ri X+c. This cannot be done with tblgen'd
// code and is matched first so to prevent it from being turned into
@@ -1205,7 +1184,7 @@
// RIP-relative addressing.
if (TM.getCodeModel() != CodeModel::Small)
break;
- MVT::ValueType PtrVT = TLI.getPointerTy();
+ MVT PtrVT = TLI.getPointerTy();
SDOperand N0 = N.getOperand(0);
SDOperand N1 = N.getOperand(1);
if (N.Val->getValueType(0) == PtrVT &&
@@ -1246,7 +1225,7 @@
bool isSigned = Opcode == ISD::SMUL_LOHI;
if (!isSigned)
- switch (NVT) {
+ switch (NVT.getSimpleVT()) {
default: assert(0 && "Unsupported VT!");
case MVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break;
case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break;
@@ -1254,7 +1233,7 @@
case MVT::i64: Opc = X86::MUL64r; MOpc = X86::MUL64m; break;
}
else
- switch (NVT) {
+ switch (NVT.getSimpleVT()) {
default: assert(0 && "Unsupported VT!");
case MVT::i8: Opc = X86::IMUL8r; MOpc = X86::IMUL8m; break;
case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break;
@@ -1263,7 +1242,7 @@
}
unsigned LoReg, HiReg;
- switch (NVT) {
+ switch (NVT.getSimpleVT()) {
default: assert(0 && "Unsupported VT!");
case MVT::i8: LoReg = X86::AL; HiReg = X86::AH; break;
case MVT::i16: LoReg = X86::AX; HiReg = X86::DX; break;
@@ -1356,7 +1335,7 @@
bool isSigned = Opcode == ISD::SDIVREM;
if (!isSigned)
- switch (NVT) {
+ switch (NVT.getSimpleVT()) {
default: assert(0 && "Unsupported VT!");
case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break;
case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break;
@@ -1364,7 +1343,7 @@
case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break;
}
else
- switch (NVT) {
+ switch (NVT.getSimpleVT()) {
default: assert(0 && "Unsupported VT!");
case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break;
case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
@@ -1374,7 +1353,7 @@
unsigned LoReg, HiReg;
unsigned ClrOpcode, SExtOpcode;
- switch (NVT) {
+ switch (NVT.getSimpleVT()) {
default: assert(0 && "Unsupported VT!");
case MVT::i8:
LoReg = X86::AL; HiReg = X86::AH;
@@ -1515,7 +1494,7 @@
SDOperand N0 = Node->getOperand(0);
// Get the subregsiter index for the type to extend.
- MVT::ValueType N0VT = N0.getValueType();
+ MVT N0VT = N0.getValueType();
unsigned Idx = (N0VT == MVT::i32) ? X86::SUBREG_32BIT :
(N0VT == MVT::i16) ? X86::SUBREG_16BIT :
(Subtarget->is64Bit()) ? X86::SUBREG_8BIT : 0;
@@ -1545,30 +1524,30 @@
SDOperand N0 = Node->getOperand(0);
AddToISelQueue(N0);
- MVT::ValueType SVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
+ MVT SVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
SDOperand TruncOp = SDOperand(getTruncate(N0, SVT), 0);
unsigned Opc = 0;
- switch (NVT) {
+ switch (NVT.getSimpleVT()) {
+ default: assert(0 && "Unknown sign_extend_inreg!");
case MVT::i16:
if (SVT == MVT::i8) Opc = X86::MOVSX16rr8;
else assert(0 && "Unknown sign_extend_inreg!");
break;
case MVT::i32:
- switch (SVT) {
+ switch (SVT.getSimpleVT()) {
+ default: assert(0 && "Unknown sign_extend_inreg!");
case MVT::i8: Opc = X86::MOVSX32rr8; break;
case MVT::i16: Opc = X86::MOVSX32rr16; break;
- default: assert(0 && "Unknown sign_extend_inreg!");
}
break;
case MVT::i64:
- switch (SVT) {
+ switch (SVT.getSimpleVT()) {
+ default: assert(0 && "Unknown sign_extend_inreg!");
case MVT::i8: Opc = X86::MOVSX64rr8; break;
case MVT::i16: Opc = X86::MOVSX64rr16; break;
case MVT::i32: Opc = X86::MOVSX64rr32; break;
- default: assert(0 && "Unknown sign_extend_inreg!");
}
break;
- default: assert(0 && "Unknown sign_extend_inreg!");
}
SDNode *ResNode = CurDAG->getTargetNode(Opc, NVT, TruncOp);
@@ -1600,6 +1579,32 @@
return ResNode;
break;
}
+
+ case ISD::DECLARE: {
+ // Handle DECLARE nodes here because the second operand may have been
+ // wrapped in X86ISD::Wrapper.
+ SDOperand Chain = Node->getOperand(0);
+ SDOperand N1 = Node->getOperand(1);
+ SDOperand N2 = Node->getOperand(2);
+ if (!isa<FrameIndexSDNode>(N1))
+ break;
+ int FI = cast<FrameIndexSDNode>(N1)->getIndex();
+ if (N2.getOpcode() == ISD::ADD &&
+ N2.getOperand(0).getOpcode() == X86ISD::GlobalBaseReg)
+ N2 = N2.getOperand(1);
+ if (N2.getOpcode() == X86ISD::Wrapper &&
+ isa<GlobalAddressSDNode>(N2.getOperand(0))) {
+ GlobalValue *GV =
+ cast<GlobalAddressSDNode>(N2.getOperand(0))->getGlobal();
+ SDOperand Tmp1 = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
+ SDOperand Tmp2 = CurDAG->getTargetGlobalAddress(GV, TLI.getPointerTy());
+ AddToISelQueue(Chain);
+ SDOperand Ops[] = { Tmp1, Tmp2, Chain };
+ return CurDAG->getTargetNode(TargetInstrInfo::DECLARE,
+ MVT::Other, Ops, 3);
+ }
+ break;
+ }
}
SDNode *ResNode = SelectCode(N);
More information about the llvm-commits
mailing list