[llvm-branch-commits] [llvm-branch] r172541 [2/8] - in /llvm/branches/AMDILBackend: ./ autoconf/ bindings/ocaml/executionengine/ bindings/ocaml/llvm/ bindings/ocaml/target/ cmake/ cmake/modules/ cmake/platforms/ docs/ docs/CommandGuide/ docs/_themes/ docs/_themes/llvm-theme/ docs/_themes/llvm-theme/static/ docs/llvm-theme/ docs/llvm-theme/static/ docs/tutorial/ examples/ExceptionDemo/ examples/Fibonacci/ examples/Kaleidoscope/Chapter4/ examples/Kaleidoscope/Chapter5/ examples/Kaleidoscope/Chapter6/ examples/Kaleidoscope/Chapt...
Richard Relph
Richard.Relph at amd.com
Tue Jan 15 09:16:26 PST 2013
Modified: llvm/branches/AMDILBackend/include/llvm/ExecutionEngine/RuntimeDyld.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/ExecutionEngine/RuntimeDyld.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/ExecutionEngine/RuntimeDyld.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/ExecutionEngine/RuntimeDyld.h Tue Jan 15 11:16:16 2013
@@ -15,43 +15,55 @@
#define LLVM_RUNTIME_DYLD_H
#include "llvm/ADT/StringRef.h"
+#include "llvm/ExecutionEngine/ObjectBuffer.h"
#include "llvm/Support/Memory.h"
namespace llvm {
class RuntimeDyldImpl;
-class MemoryBuffer;
+class ObjectImage;
// RuntimeDyld clients often want to handle the memory management of
-// what gets placed where. For JIT clients, this is an abstraction layer
-// over the JITMemoryManager, which references objects by their source
-// representations in LLVM IR.
+// what gets placed where. For JIT clients, this is the subset of
+// JITMemoryManager required for dynamic loading of binaries.
+//
// FIXME: As the RuntimeDyld fills out, additional routines will be needed
// for the varying types of objects to be allocated.
class RTDyldMemoryManager {
- RTDyldMemoryManager(const RTDyldMemoryManager&); // DO NOT IMPLEMENT
- void operator=(const RTDyldMemoryManager&); // DO NOT IMPLEMENT
+ RTDyldMemoryManager(const RTDyldMemoryManager&) LLVM_DELETED_FUNCTION;
+ void operator=(const RTDyldMemoryManager&) LLVM_DELETED_FUNCTION;
public:
RTDyldMemoryManager() {}
virtual ~RTDyldMemoryManager();
/// allocateCodeSection - Allocate a memory block of (at least) the given
- /// size suitable for executable code.
+ /// size suitable for executable code. The SectionID is a unique identifier
+ /// assigned by the JIT engine, and optionally recorded by the memory manager
+ /// to access a loaded section.
virtual uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
unsigned SectionID) = 0;
/// allocateDataSection - Allocate a memory block of (at least) the given
- /// size suitable for data.
+ /// size suitable for data. The SectionID is a unique identifier
+ /// assigned by the JIT engine, and optionally recorded by the memory manager
+ /// to access a loaded section.
virtual uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
unsigned SectionID) = 0;
+ /// getPointerToNamedFunction - This method returns the address of the
+ /// specified function. As such it is only useful for resolving library
+ /// symbols, not code generated symbols.
+ ///
+ /// If AbortOnFailure is false and no function with the given name is
+ /// found, this function returns a null pointer. Otherwise, it prints a
+ /// message to stderr and aborts.
virtual void *getPointerToNamedFunction(const std::string &Name,
bool AbortOnFailure = true) = 0;
};
class RuntimeDyld {
- RuntimeDyld(const RuntimeDyld &); // DO NOT IMPLEMENT
- void operator=(const RuntimeDyld &); // DO NOT IMPLEMENT
+ RuntimeDyld(const RuntimeDyld &) LLVM_DELETED_FUNCTION;
+ void operator=(const RuntimeDyld &) LLVM_DELETED_FUNCTION;
// RuntimeDyldImpl is the actual class. RuntimeDyld is just the public
// interface.
@@ -62,17 +74,24 @@
// Any relocations already associated with the symbol will be re-resolved.
void reassignSectionAddress(unsigned SectionID, uint64_t Addr);
public:
- RuntimeDyld(RTDyldMemoryManager*);
+ RuntimeDyld(RTDyldMemoryManager *);
~RuntimeDyld();
- /// Load an in-memory object file into the dynamic linker.
- bool loadObject(MemoryBuffer *InputBuffer);
+ /// loadObject - prepare the object contained in the input buffer for
+ /// execution. Ownership of the input buffer is transferred to the
+ /// ObjectImage instance returned from this function if successful.
+ /// In the case of load failure, the input buffer will be deleted.
+ ObjectImage *loadObject(ObjectBuffer *InputBuffer);
/// Get the address of our local copy of the symbol. This may or may not
/// be the address used for relocation (clients can copy the data around
/// and resolve relocatons based on where they put it).
void *getSymbolAddress(StringRef Name);
+ /// Get the address of the target copy of the symbol. This is the address
+ /// used for relocation.
+ uint64_t getSymbolLoadAddress(StringRef Name);
+
/// Resolve the relocations for all symbols we currently know about.
void resolveRelocations();
@@ -80,7 +99,7 @@
/// Map the address of a JIT section as returned from the memory manager
/// to the address in the target process as the running code will see it.
/// This is the address which will be used for relocation resolution.
- void mapSectionAddress(void *LocalAddress, uint64_t TargetAddress);
+ void mapSectionAddress(const void *LocalAddress, uint64_t TargetAddress);
StringRef getErrorString();
};
Modified: llvm/branches/AMDILBackend/include/llvm/Function.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Function.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Function.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Function.h Tue Jan 15 11:16:16 2013
@@ -109,9 +109,9 @@
BuildLazyArguments();
}
void BuildLazyArguments() const;
-
- Function(const Function&); // DO NOT IMPLEMENT
- void operator=(const Function&); // DO NOT IMPLEMENT
+
+ Function(const Function&) LLVM_DELETED_FUNCTION;
+ void operator=(const Function&) LLVM_DELETED_FUNCTION;
/// Function ctor - If the (optional) Module argument is specified, the
/// function is automatically inserted into the end of the function list for
@@ -168,17 +168,17 @@
///
void setAttributes(const AttrListPtr &attrs) { AttributeList = attrs; }
- /// hasFnAttr - Return true if this function has the given attribute.
- bool hasFnAttr(Attributes N) const {
- // Function Attributes are stored at ~0 index
- return AttributeList.paramHasAttr(~0U, N);
+ /// getFnAttributes - Return the function attributes for querying.
+ ///
+ Attributes getFnAttributes() const {
+ return AttributeList.getFnAttributes();
}
/// addFnAttr - Add function attributes to this function.
///
- void addFnAttr(Attributes N) {
+ void addFnAttr(Attributes::AttrVal N) {
// Function Attributes are stored at ~0 index
- addAttribute(~0U, N);
+ addAttribute(AttrListPtr::FunctionIndex, Attributes::get(getContext(), N));
}
/// removeFnAttr - Remove function attributes from this function.
@@ -195,9 +195,15 @@
void setGC(const char *Str);
void clearGC();
- /// @brief Determine whether the function has the given attribute.
- bool paramHasAttr(unsigned i, Attributes attr) const {
- return AttributeList.paramHasAttr(i, attr);
+
+ /// getRetAttributes - Return the return attributes for querying.
+ Attributes getRetAttributes() const {
+ return AttributeList.getRetAttributes();
+ }
+
+ /// getParamAttributes - Return the parameter attributes for querying.
+ Attributes getParamAttributes(unsigned Idx) const {
+ return AttributeList.getParamAttributes(Idx);
}
/// addAttribute - adds the attribute to the list of attributes.
@@ -213,50 +219,44 @@
/// @brief Determine if the function does not access memory.
bool doesNotAccessMemory() const {
- return hasFnAttr(Attribute::ReadNone);
+ return getFnAttributes().hasAttribute(Attributes::ReadNone);
}
- void setDoesNotAccessMemory(bool DoesNotAccessMemory = true) {
- if (DoesNotAccessMemory) addFnAttr(Attribute::ReadNone);
- else removeFnAttr(Attribute::ReadNone);
+ void setDoesNotAccessMemory() {
+ addFnAttr(Attributes::ReadNone);
}
/// @brief Determine if the function does not access or only reads memory.
bool onlyReadsMemory() const {
- return doesNotAccessMemory() || hasFnAttr(Attribute::ReadOnly);
+ return doesNotAccessMemory() ||
+ getFnAttributes().hasAttribute(Attributes::ReadOnly);
}
- void setOnlyReadsMemory(bool OnlyReadsMemory = true) {
- if (OnlyReadsMemory) addFnAttr(Attribute::ReadOnly);
- else removeFnAttr(Attribute::ReadOnly | Attribute::ReadNone);
+ void setOnlyReadsMemory() {
+ addFnAttr(Attributes::ReadOnly);
}
/// @brief Determine if the function cannot return.
bool doesNotReturn() const {
- return hasFnAttr(Attribute::NoReturn);
+ return getFnAttributes().hasAttribute(Attributes::NoReturn);
}
- void setDoesNotReturn(bool DoesNotReturn = true) {
- if (DoesNotReturn) addFnAttr(Attribute::NoReturn);
- else removeFnAttr(Attribute::NoReturn);
+ void setDoesNotReturn() {
+ addFnAttr(Attributes::NoReturn);
}
/// @brief Determine if the function cannot unwind.
bool doesNotThrow() const {
- return hasFnAttr(Attribute::NoUnwind);
+ return getFnAttributes().hasAttribute(Attributes::NoUnwind);
}
- void setDoesNotThrow(bool DoesNotThrow = true) {
- if (DoesNotThrow) addFnAttr(Attribute::NoUnwind);
- else removeFnAttr(Attribute::NoUnwind);
+ void setDoesNotThrow() {
+ addFnAttr(Attributes::NoUnwind);
}
/// @brief True if the ABI mandates (or the user requested) that this
/// function be in a unwind table.
bool hasUWTable() const {
- return hasFnAttr(Attribute::UWTable);
+ return getFnAttributes().hasAttribute(Attributes::UWTable);
}
- void setHasUWTable(bool HasUWTable = true) {
- if (HasUWTable)
- addFnAttr(Attribute::UWTable);
- else
- removeFnAttr(Attribute::UWTable);
+ void setHasUWTable() {
+ addFnAttr(Attributes::UWTable);
}
/// @brief True if this function needs an unwind table.
@@ -267,27 +267,25 @@
/// @brief Determine if the function returns a structure through first
/// pointer argument.
bool hasStructRetAttr() const {
- return paramHasAttr(1, Attribute::StructRet);
+ return getParamAttributes(1).hasAttribute(Attributes::StructRet);
}
/// @brief Determine if the parameter does not alias other parameters.
/// @param n The parameter to check. 1 is the first parameter, 0 is the return
bool doesNotAlias(unsigned n) const {
- return paramHasAttr(n, Attribute::NoAlias);
+ return getParamAttributes(n).hasAttribute(Attributes::NoAlias);
}
- void setDoesNotAlias(unsigned n, bool DoesNotAlias = true) {
- if (DoesNotAlias) addAttribute(n, Attribute::NoAlias);
- else removeAttribute(n, Attribute::NoAlias);
+ void setDoesNotAlias(unsigned n) {
+ addAttribute(n, Attributes::get(getContext(), Attributes::NoAlias));
}
/// @brief Determine if the parameter can be captured.
/// @param n The parameter to check. 1 is the first parameter, 0 is the return
bool doesNotCapture(unsigned n) const {
- return paramHasAttr(n, Attribute::NoCapture);
+ return getParamAttributes(n).hasAttribute(Attributes::NoCapture);
}
- void setDoesNotCapture(unsigned n, bool DoesNotCapture = true) {
- if (DoesNotCapture) addAttribute(n, Attribute::NoCapture);
- else removeAttribute(n, Attribute::NoCapture);
+ void setDoesNotCapture(unsigned n) {
+ addAttribute(n, Attributes::get(getContext(), Attributes::NoCapture));
}
/// copyAttributesFrom - copy all additional attributes (those not needed to
@@ -400,7 +398,6 @@
void viewCFGOnly() const;
/// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const Function *) { return true; }
static inline bool classof(const Value *V) {
return V->getValueID() == Value::FunctionVal;
}
Modified: llvm/branches/AMDILBackend/include/llvm/GlobalAlias.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/GlobalAlias.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/GlobalAlias.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/GlobalAlias.h Tue Jan 15 11:16:16 2013
@@ -28,8 +28,8 @@
class GlobalAlias : public GlobalValue, public ilist_node<GlobalAlias> {
friend class SymbolTableListTraits<GlobalAlias, Module>;
- void operator=(const GlobalAlias &); // Do not implement
- GlobalAlias(const GlobalAlias &); // Do not implement
+ void operator=(const GlobalAlias &) LLVM_DELETED_FUNCTION;
+ GlobalAlias(const GlobalAlias &) LLVM_DELETED_FUNCTION;
void setParent(Module *parent);
@@ -76,7 +76,6 @@
const GlobalValue *resolveAliasedGlobal(bool stopOnWeak = true) const;
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const GlobalAlias *) { return true; }
static inline bool classof(const Value *V) {
return V->getValueID() == Value::GlobalAliasVal;
}
Modified: llvm/branches/AMDILBackend/include/llvm/GlobalValue.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/GlobalValue.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/GlobalValue.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/GlobalValue.h Tue Jan 15 11:16:16 2013
@@ -26,7 +26,7 @@
class Module;
class GlobalValue : public Constant {
- GlobalValue(const GlobalValue &); // do not implement
+ GlobalValue(const GlobalValue &) LLVM_DELETED_FUNCTION;
public:
/// @brief An enumeration for the kinds of linkage for global values.
enum LinkageTypes {
@@ -34,6 +34,7 @@
AvailableExternallyLinkage, ///< Available for inspection, not emission.
LinkOnceAnyLinkage, ///< Keep one copy of function when linking (inline)
LinkOnceODRLinkage, ///< Same, but only replaced by something equivalent.
+ LinkOnceODRAutoHideLinkage, ///< Like LinkOnceODRLinkage but addr not taken.
WeakAnyLinkage, ///< Keep one copy of named function when linking (weak)
WeakODRLinkage, ///< Same, but only replaced by something equivalent.
AppendingLinkage, ///< Special purpose, only applies to global arrays
@@ -41,8 +42,6 @@
PrivateLinkage, ///< Like Internal, but omit from symbol table.
LinkerPrivateLinkage, ///< Like Private, but linker removes.
LinkerPrivateWeakLinkage, ///< Like LinkerPrivate, but weak.
- LinkerPrivateWeakDefAutoLinkage, ///< Like LinkerPrivateWeak, but possibly
- /// hidden.
DLLImportLinkage, ///< Function to be imported from DLL
DLLExportLinkage, ///< Function to be accessible from DLL.
ExternalWeakLinkage,///< ExternalWeak linkage description.
@@ -123,7 +122,12 @@
return Linkage == AvailableExternallyLinkage;
}
static bool isLinkOnceLinkage(LinkageTypes Linkage) {
- return Linkage == LinkOnceAnyLinkage || Linkage == LinkOnceODRLinkage;
+ return Linkage == LinkOnceAnyLinkage ||
+ Linkage == LinkOnceODRLinkage ||
+ Linkage == LinkOnceODRAutoHideLinkage;
+ }
+ static bool isLinkOnceODRAutoHideLinkage(LinkageTypes Linkage) {
+ return Linkage == LinkOnceODRAutoHideLinkage;
}
static bool isWeakLinkage(LinkageTypes Linkage) {
return Linkage == WeakAnyLinkage || Linkage == WeakODRLinkage;
@@ -143,13 +147,9 @@
static bool isLinkerPrivateWeakLinkage(LinkageTypes Linkage) {
return Linkage == LinkerPrivateWeakLinkage;
}
- static bool isLinkerPrivateWeakDefAutoLinkage(LinkageTypes Linkage) {
- return Linkage == LinkerPrivateWeakDefAutoLinkage;
- }
static bool isLocalLinkage(LinkageTypes Linkage) {
return isInternalLinkage(Linkage) || isPrivateLinkage(Linkage) ||
- isLinkerPrivateLinkage(Linkage) || isLinkerPrivateWeakLinkage(Linkage) ||
- isLinkerPrivateWeakDefAutoLinkage(Linkage);
+ isLinkerPrivateLinkage(Linkage) || isLinkerPrivateWeakLinkage(Linkage);
}
static bool isDLLImportLinkage(LinkageTypes Linkage) {
return Linkage == DLLImportLinkage;
@@ -178,8 +178,7 @@
Linkage == LinkOnceAnyLinkage ||
Linkage == CommonLinkage ||
Linkage == ExternalWeakLinkage ||
- Linkage == LinkerPrivateWeakLinkage ||
- Linkage == LinkerPrivateWeakDefAutoLinkage;
+ Linkage == LinkerPrivateWeakLinkage;
}
/// isWeakForLinker - Whether the definition of this global may be replaced at
@@ -192,10 +191,10 @@
Linkage == WeakODRLinkage ||
Linkage == LinkOnceAnyLinkage ||
Linkage == LinkOnceODRLinkage ||
+ Linkage == LinkOnceODRAutoHideLinkage ||
Linkage == CommonLinkage ||
Linkage == ExternalWeakLinkage ||
- Linkage == LinkerPrivateWeakLinkage ||
- Linkage == LinkerPrivateWeakDefAutoLinkage;
+ Linkage == LinkerPrivateWeakLinkage;
}
bool hasExternalLinkage() const { return isExternalLinkage(Linkage); }
@@ -205,6 +204,9 @@
bool hasLinkOnceLinkage() const {
return isLinkOnceLinkage(Linkage);
}
+ bool hasLinkOnceODRAutoHideLinkage() const {
+ return isLinkOnceODRAutoHideLinkage(Linkage);
+ }
bool hasWeakLinkage() const {
return isWeakLinkage(Linkage);
}
@@ -215,9 +217,6 @@
bool hasLinkerPrivateWeakLinkage() const {
return isLinkerPrivateWeakLinkage(Linkage);
}
- bool hasLinkerPrivateWeakDefAutoLinkage() const {
- return isLinkerPrivateWeakDefAutoLinkage(Linkage);
- }
bool hasLocalLinkage() const { return isLocalLinkage(Linkage); }
bool hasDLLImportLinkage() const { return isDLLImportLinkage(Linkage); }
bool hasDLLExportLinkage() const { return isDLLExportLinkage(Linkage); }
@@ -288,7 +287,6 @@
inline const Module *getParent() const { return Parent; }
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const GlobalValue *) { return true; }
static inline bool classof(const Value *V) {
return V->getValueID() == Value::FunctionVal ||
V->getValueID() == Value::GlobalVariableVal ||
Modified: llvm/branches/AMDILBackend/include/llvm/GlobalVariable.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/GlobalVariable.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/GlobalVariable.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/GlobalVariable.h Tue Jan 15 11:16:16 2013
@@ -34,9 +34,9 @@
class GlobalVariable : public GlobalValue, public ilist_node<GlobalVariable> {
friend class SymbolTableListTraits<GlobalVariable, Module>;
- void *operator new(size_t, unsigned); // Do not implement
- void operator=(const GlobalVariable &); // Do not implement
- GlobalVariable(const GlobalVariable &); // Do not implement
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
+ void operator=(const GlobalVariable &) LLVM_DELETED_FUNCTION;
+ GlobalVariable(const GlobalVariable &) LLVM_DELETED_FUNCTION;
void setParent(Module *parent);
@@ -174,7 +174,6 @@
virtual void replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U);
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const GlobalVariable *) { return true; }
static inline bool classof(const Value *V) {
return V->getValueID() == Value::GlobalVariableVal;
}
Modified: llvm/branches/AMDILBackend/include/llvm/IRBuilder.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/IRBuilder.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/IRBuilder.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/IRBuilder.h Tue Jan 15 11:16:16 2013
@@ -17,6 +17,7 @@
#include "llvm/Instructions.h"
#include "llvm/BasicBlock.h"
+#include "llvm/DataLayout.h"
#include "llvm/LLVMContext.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
@@ -266,6 +267,10 @@
return Type::getInt8PtrTy(Context, AddrSpace);
}
+ IntegerType* getIntPtrTy(DataLayout *DL, unsigned AddrSpace = 0) {
+ return DL->getIntPtrType(Context, AddrSpace);
+ }
+
//===--------------------------------------------------------------------===//
// Intrinsic creation methods
//===--------------------------------------------------------------------===//
@@ -285,12 +290,15 @@
/// If the pointers aren't i8*, they will be converted. If a TBAA tag is
/// specified, it will be added to the instruction.
CallInst *CreateMemCpy(Value *Dst, Value *Src, uint64_t Size, unsigned Align,
- bool isVolatile = false, MDNode *TBAATag = 0) {
- return CreateMemCpy(Dst, Src, getInt64(Size), Align, isVolatile, TBAATag);
+ bool isVolatile = false, MDNode *TBAATag = 0,
+ MDNode *TBAAStructTag = 0) {
+ return CreateMemCpy(Dst, Src, getInt64(Size), Align, isVolatile, TBAATag,
+ TBAAStructTag);
}
CallInst *CreateMemCpy(Value *Dst, Value *Src, Value *Size, unsigned Align,
- bool isVolatile = false, MDNode *TBAATag = 0);
+ bool isVolatile = false, MDNode *TBAATag = 0,
+ MDNode *TBAAStructTag = 0);
/// CreateMemMove - Create and insert a memmove between the specified
/// pointers. If the pointers aren't i8*, they will be converted. If a TBAA
@@ -810,6 +818,31 @@
StoreInst *CreateStore(Value *Val, Value *Ptr, bool isVolatile = false) {
return Insert(new StoreInst(Val, Ptr, isVolatile));
}
+ // Provided to resolve 'CreateAlignedLoad(Ptr, Align, "...")' correctly,
+ // instead of converting the string to 'bool' for the isVolatile parameter.
+ LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, const char *Name) {
+ LoadInst *LI = CreateLoad(Ptr, Name);
+ LI->setAlignment(Align);
+ return LI;
+ }
+ LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align,
+ const Twine &Name = "") {
+ LoadInst *LI = CreateLoad(Ptr, Name);
+ LI->setAlignment(Align);
+ return LI;
+ }
+ LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, bool isVolatile,
+ const Twine &Name = "") {
+ LoadInst *LI = CreateLoad(Ptr, isVolatile, Name);
+ LI->setAlignment(Align);
+ return LI;
+ }
+ StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, unsigned Align,
+ bool isVolatile = false) {
+ StoreInst *SI = CreateStore(Val, Ptr, isVolatile);
+ SI->setAlignment(Align);
+ return SI;
+ }
FenceInst *CreateFence(AtomicOrdering Ordering,
SynchronizationScope SynchScope = CrossThread) {
return Insert(new FenceInst(Context, Ordering, SynchScope));
@@ -970,6 +1003,30 @@
Value *CreateSExt(Value *V, Type *DestTy, const Twine &Name = "") {
return CreateCast(Instruction::SExt, V, DestTy, Name);
}
+ /// CreateZExtOrTrunc - Create a ZExt or Trunc from the integer value V to
+ /// DestTy. Return the value untouched if the type of V is already DestTy.
+ Value *CreateZExtOrTrunc(Value *V, IntegerType *DestTy,
+ const Twine &Name = "") {
+ assert(isa<IntegerType>(V->getType()) && "Can only zero extend integers!");
+ IntegerType *IntTy = cast<IntegerType>(V->getType());
+ if (IntTy->getBitWidth() < DestTy->getBitWidth())
+ return CreateZExt(V, DestTy, Name);
+ if (IntTy->getBitWidth() > DestTy->getBitWidth())
+ return CreateTrunc(V, DestTy, Name);
+ return V;
+ }
+ /// CreateSExtOrTrunc - Create a SExt or Trunc from the integer value V to
+ /// DestTy. Return the value untouched if the type of V is already DestTy.
+ Value *CreateSExtOrTrunc(Value *V, IntegerType *DestTy,
+ const Twine &Name = "") {
+ assert(isa<IntegerType>(V->getType()) && "Can only sign extend integers!");
+ IntegerType *IntTy = cast<IntegerType>(V->getType());
+ if (IntTy->getBitWidth() < DestTy->getBitWidth())
+ return CreateSExt(V, DestTy, Name);
+ if (IntTy->getBitWidth() > DestTy->getBitWidth())
+ return CreateTrunc(V, DestTy, Name);
+ return V;
+ }
Value *CreateFPToUI(Value *V, Type *DestTy, const Twine &Name = ""){
return CreateCast(Instruction::FPToUI, V, DestTy, Name);
}
@@ -1052,7 +1109,7 @@
private:
// Provided to resolve 'CreateIntCast(Ptr, Ptr, "...")', giving a compile time
// error, instead of converting the string to bool for the isSigned parameter.
- Value *CreateIntCast(Value *, Type *, const char *); // DO NOT IMPLEMENT
+ Value *CreateIntCast(Value *, Type *, const char *) LLVM_DELETED_FUNCTION;
public:
Value *CreateFPCast(Value *V, Type *DestTy, const Twine &Name = "") {
if (V->getType() == DestTy)
@@ -1261,13 +1318,13 @@
// Utility creation methods
//===--------------------------------------------------------------------===//
- /// CreateIsNull - Return an i1 value testing if \arg Arg is null.
+ /// CreateIsNull - Return an i1 value testing if \p Arg is null.
Value *CreateIsNull(Value *Arg, const Twine &Name = "") {
return CreateICmpEQ(Arg, Constant::getNullValue(Arg->getType()),
Name);
}
- /// CreateIsNotNull - Return an i1 value testing if \arg Arg is not null.
+ /// CreateIsNotNull - Return an i1 value testing if \p Arg is not null.
Value *CreateIsNotNull(Value *Arg, const Twine &Name = "") {
return CreateICmpNE(Arg, Constant::getNullValue(Arg->getType()),
Name);
Modified: llvm/branches/AMDILBackend/include/llvm/InitializePasses.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/InitializePasses.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/InitializePasses.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/InitializePasses.h Tue Jan 15 11:16:16 2013
@@ -66,6 +66,7 @@
void initializeAliasSetPrinterPass(PassRegistry&);
void initializeAlwaysInlinerPass(PassRegistry&);
void initializeArgPromotionPass(PassRegistry&);
+void initializeBarrierNoopPass(PassRegistry&);
void initializeBasicAliasAnalysisPass(PassRegistry&);
void initializeBasicCallGraphPass(PassRegistry&);
void initializeBlockExtractorPassPass(PassRegistry&);
@@ -87,6 +88,7 @@
void initializeConstantMergePass(PassRegistry&);
void initializeConstantPropagationPass(PassRegistry&);
void initializeMachineCopyPropagationPass(PassRegistry&);
+void initializeCostModelAnalysisPass(PassRegistry&);
void initializeCorrelatedValuePropagationPass(PassRegistry&);
void initializeDAEPass(PassRegistry&);
void initializeDAHPass(PassRegistry&);
@@ -94,6 +96,7 @@
void initializeDSEPass(PassRegistry&);
void initializeDeadInstEliminationPass(PassRegistry&);
void initializeDeadMachineInstructionElimPass(PassRegistry&);
+void initializeDependenceAnalysisPass(PassRegistry&);
void initializeDomOnlyPrinterPass(PassRegistry&);
void initializeDomOnlyViewerPass(PassRegistry&);
void initializeDomPrinterPass(PassRegistry&);
@@ -141,10 +144,10 @@
void initializeLiveStacksPass(PassRegistry&);
void initializeLiveVariablesPass(PassRegistry&);
void initializeLoaderPassPass(PassRegistry&);
+void initializeProfileMetadataLoaderPassPass(PassRegistry&);
void initializePathProfileLoaderPassPass(PassRegistry&);
void initializeLocalStackSlotPassPass(PassRegistry&);
void initializeLoopDeletionPass(PassRegistry&);
-void initializeLoopDependenceAnalysisPass(PassRegistry&);
void initializeLoopExtractorPass(PassRegistry&);
void initializeLoopInfoPass(PassRegistry&);
void initializeLoopInstSimplifyPass(PassRegistry&);
@@ -166,6 +169,7 @@
void initializeMachineBranchProbabilityInfoPass(PassRegistry&);
void initializeMachineCSEPass(PassRegistry&);
void initializeMachineDominatorTreePass(PassRegistry&);
+void initializeMachinePostDominatorTreePass(PassRegistry&);
void initializeMachineLICMPass(PassRegistry&);
void initializeMachineLoopInfoPass(PassRegistry&);
void initializeMachineLoopRangesPass(PassRegistry&);
@@ -177,6 +181,7 @@
void initializeMemCpyOptPass(PassRegistry&);
void initializeMemDepPrinterPass(PassRegistry&);
void initializeMemoryDependenceAnalysisPass(PassRegistry&);
+void initializeMetaRenamerPass(PassRegistry&);
void initializeMergeFunctionsPass(PassRegistry&);
void initializeModuleDebugInfoPrinterPass(PassRegistry&);
void initializeNoAAPass(PassRegistry&);
@@ -219,6 +224,7 @@
void initializeRegionPrinterPass(PassRegistry&);
void initializeRegionViewerPass(PassRegistry&);
void initializeSCCPPass(PassRegistry&);
+void initializeSROAPass(PassRegistry&);
void initializeSROA_DTPass(PassRegistry&);
void initializeSROA_SSAUpPass(PassRegistry&);
void initializeScalarEvolutionAliasAnalysisPass(PassRegistry&);
@@ -231,6 +237,7 @@
void initializeSlotIndexesPass(PassRegistry&);
void initializeSpillPlacementPass(PassRegistry&);
void initializeStackProtectorPass(PassRegistry&);
+void initializeStackColoringPass(PassRegistry&);
void initializeStackSlotColoringPass(PassRegistry&);
void initializeStripDeadDebugInfoPass(PassRegistry&);
void initializeStripDeadPrototypesPassPass(PassRegistry&);
@@ -241,7 +248,8 @@
void initializeTailCallElimPass(PassRegistry&);
void initializeTailDuplicatePassPass(PassRegistry&);
void initializeTargetPassConfigPass(PassRegistry&);
-void initializeTargetDataPass(PassRegistry&);
+void initializeDataLayoutPass(PassRegistry&);
+void initializeTargetTransformInfoPass(PassRegistry&);
void initializeTargetLibraryInfoPass(PassRegistry&);
void initializeTwoAddressInstructionPassPass(PassRegistry&);
void initializeTypeBasedAliasAnalysisPass(PassRegistry&);
@@ -254,6 +262,7 @@
void initializeInstSimplifierPass(PassRegistry&);
void initializeUnpackMachineBundlesPass(PassRegistry&);
void initializeFinalizeMachineBundlesPass(PassRegistry&);
+void initializeLoopVectorizePass(PassRegistry&);
void initializeBBVectorizePass(PassRegistry&);
void initializeMachineFunctionPrinterPassPass(PassRegistry&);
}
Modified: llvm/branches/AMDILBackend/include/llvm/InlineAsm.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/InlineAsm.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/InlineAsm.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/InlineAsm.h Tue Jan 15 11:16:16 2013
@@ -33,20 +33,28 @@
struct ConstantCreator;
class InlineAsm : public Value {
+public:
+ enum AsmDialect {
+ AD_ATT,
+ AD_Intel
+ };
+
+private:
friend struct ConstantCreator<InlineAsm, PointerType, InlineAsmKeyType>;
friend class ConstantUniqueMap<InlineAsmKeyType, const InlineAsmKeyType&,
PointerType, InlineAsm, false>;
- InlineAsm(const InlineAsm &); // do not implement
- void operator=(const InlineAsm&); // do not implement
+ InlineAsm(const InlineAsm &) LLVM_DELETED_FUNCTION;
+ void operator=(const InlineAsm&) LLVM_DELETED_FUNCTION;
std::string AsmString, Constraints;
bool HasSideEffects;
bool IsAlignStack;
-
+ AsmDialect Dialect;
+
InlineAsm(PointerType *Ty, const std::string &AsmString,
const std::string &Constraints, bool hasSideEffects,
- bool isAlignStack);
+ bool isAlignStack, AsmDialect asmDialect);
virtual ~InlineAsm();
/// When the ConstantUniqueMap merges two types and makes two InlineAsms
@@ -58,11 +66,13 @@
///
static InlineAsm *get(FunctionType *Ty, StringRef AsmString,
StringRef Constraints, bool hasSideEffects,
- bool isAlignStack = false);
+ bool isAlignStack = false,
+ AsmDialect asmDialect = AD_ATT);
bool hasSideEffects() const { return HasSideEffects; }
bool isAlignStack() const { return IsAlignStack; }
-
+ AsmDialect getDialect() const { return Dialect; }
+
/// getType - InlineAsm's are always pointers.
///
PointerType *getType() const {
@@ -179,7 +189,6 @@
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const InlineAsm *) { return true; }
static inline bool classof(const Value *V) {
return V->getValueID() == Value::InlineAsmVal;
}
@@ -193,17 +202,20 @@
Op_InputChain = 0,
Op_AsmString = 1,
Op_MDNode = 2,
- Op_ExtraInfo = 3, // HasSideEffects, IsAlignStack
+ Op_ExtraInfo = 3, // HasSideEffects, IsAlignStack, AsmDialect.
Op_FirstOperand = 4,
// Fixed operands on an INLINEASM MachineInstr.
MIOp_AsmString = 0,
- MIOp_ExtraInfo = 1, // HasSideEffects, IsAlignStack
+ MIOp_ExtraInfo = 1, // HasSideEffects, IsAlignStack, AsmDialect.
MIOp_FirstOperand = 2,
// Interpretation of the MIOp_ExtraInfo bit field.
Extra_HasSideEffects = 1,
Extra_IsAlignStack = 2,
+ Extra_AsmDialect = 4,
+ Extra_MayLoad = 8,
+ Extra_MayStore = 16,
// Inline asm operands map to multiple SDNode / MachineInstr operands.
// The first operand is an immediate describing the asm operand, the low
Modified: llvm/branches/AMDILBackend/include/llvm/InstrTypes.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/InstrTypes.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/InstrTypes.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/InstrTypes.h Tue Jan 15 11:16:16 2013
@@ -73,7 +73,6 @@
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const TerminatorInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->isTerminator();
}
@@ -88,7 +87,7 @@
//===----------------------------------------------------------------------===//
class UnaryInstruction : public Instruction {
- void *operator new(size_t, unsigned); // Do not implement
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
protected:
UnaryInstruction(Type *Ty, unsigned iType, Value *V,
@@ -113,7 +112,6 @@
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const UnaryInstruction *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::Alloca ||
I->getOpcode() == Instruction::Load ||
@@ -138,14 +136,14 @@
//===----------------------------------------------------------------------===//
class BinaryOperator : public Instruction {
- void *operator new(size_t, unsigned); // Do not implement
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
protected:
void init(BinaryOps iType);
BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
const Twine &Name, Instruction *InsertBefore);
BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
const Twine &Name, BasicBlock *InsertAtEnd);
- virtual BinaryOperator *clone_impl() const;
+ virtual BinaryOperator *clone_impl() const LLVM_OVERRIDE;
public:
// allocate space for exactly two operands
void *operator new(size_t s) {
@@ -361,7 +359,6 @@
bool isExact() const;
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const BinaryOperator *) { return true; }
static inline bool classof(const Instruction *I) {
return I->isBinaryOp();
}
@@ -388,7 +385,7 @@
/// if (isa<CastInst>(Instr)) { ... }
/// @brief Base class of casting instructions.
class CastInst : public UnaryInstruction {
- virtual void anchor();
+ virtual void anchor() LLVM_OVERRIDE;
protected:
/// @brief Constructor with insert-before-instruction semantics for subclasses
CastInst(Type *Ty, unsigned iType, Value *S,
@@ -563,7 +560,7 @@
/// IntPtrTy argument is used to make accurate determinations for casts
/// involving Integer and Pointer types. They are no-op casts if the integer
/// is the same size as the pointer. However, pointer size varies with
- /// platform. Generally, the result of TargetData::getIntPtrType() should be
+ /// platform. Generally, the result of DataLayout::getIntPtrType() should be
/// passed in. If that's not available, use Type::Int64Ty, which will make
/// the isNoopCast call conservative.
/// @brief Determine if the described cast is a no-op cast.
@@ -581,8 +578,8 @@
/// Determine how a pair of casts can be eliminated, if they can be at all.
/// This is a helper function for both CastInst and ConstantExpr.
- /// @returns 0 if the CastInst pair can't be eliminated
- /// @returns Instruction::CastOps value for a cast that can replace
+ /// @returns 0 if the CastInst pair can't be eliminated, otherwise
+ /// returns Instruction::CastOps value for a cast that can replace
/// the pair, casting SrcTy to DstTy.
/// @brief Determine if a cast pair is eliminable
static unsigned isEliminableCastPair(
@@ -591,7 +588,9 @@
Type *SrcTy, ///< SrcTy of 1st cast
Type *MidTy, ///< DstTy of 1st cast & SrcTy of 2nd cast
Type *DstTy, ///< DstTy of 2nd cast
- Type *IntPtrTy ///< Integer type corresponding to Ptr types, or null
+ Type *SrcIntPtrTy, ///< Integer type corresponding to Ptr SrcTy, or null
+ Type *MidIntPtrTy, ///< Integer type corresponding to Ptr MidTy, or null
+ Type *DstIntPtrTy ///< Integer type corresponding to Ptr DstTy, or null
);
/// @brief Return the opcode of this CastInst
@@ -611,7 +610,6 @@
static bool castIsValid(Instruction::CastOps op, Value *S, Type *DstTy);
/// @brief Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const CastInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->isCast();
}
@@ -627,8 +625,8 @@
/// This class is the base class for the comparison instructions.
/// @brief Abstract base class of comparison instructions.
class CmpInst : public Instruction {
- void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
- CmpInst(); // do not implement
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
+ CmpInst() LLVM_DELETED_FUNCTION;
protected:
CmpInst(Type *ty, Instruction::OtherOps op, unsigned short pred,
Value *LHS, Value *RHS, const Twine &Name = "",
@@ -638,7 +636,7 @@
Value *LHS, Value *RHS, const Twine &Name,
BasicBlock *InsertAtEnd);
- virtual void Anchor() const; // Out of line virtual method.
+ virtual void anchor() LLVM_OVERRIDE; // Out of line virtual method.
public:
/// This enumeration lists the possible predicates for CmpInst subclasses.
/// Values in the range 0-31 are reserved for FCmpInst, while values in the
@@ -816,7 +814,6 @@
static bool isFalseWhenEqual(unsigned short predicate);
/// @brief Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const CmpInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::ICmp ||
I->getOpcode() == Instruction::FCmp;
Modified: llvm/branches/AMDILBackend/include/llvm/Instruction.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Instruction.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Instruction.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Instruction.h Tue Jan 15 11:16:16 2013
@@ -28,8 +28,8 @@
class SymbolTableListTraits;
class Instruction : public User, public ilist_node<Instruction> {
- void operator=(const Instruction &); // Do not implement
- Instruction(const Instruction &); // Do not implement
+ void operator=(const Instruction &) LLVM_DELETED_FUNCTION;
+ Instruction(const Instruction &) LLVM_DELETED_FUNCTION;
BasicBlock *Parent;
DebugLoc DbgLoc; // 'dbg' Metadata cache.
@@ -310,7 +310,6 @@
/// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const Instruction *) { return true; }
static inline bool classof(const Value *V) {
return V->getValueID() >= Value::InstructionVal;
}
Modified: llvm/branches/AMDILBackend/include/llvm/Instructions.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Instructions.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Instructions.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Instructions.h Tue Jan 15 11:16:16 2013
@@ -112,7 +112,6 @@
bool isStaticAlloca() const;
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const AllocaInst *) { return true; }
static inline bool classof(const Instruction *I) {
return (I->getOpcode() == Instruction::Alloca);
}
@@ -226,13 +225,13 @@
const Value *getPointerOperand() const { return getOperand(0); }
static unsigned getPointerOperandIndex() { return 0U; }
+ /// \brief Returns the address space of the pointer operand.
unsigned getPointerAddressSpace() const {
- return cast<PointerType>(getPointerOperand()->getType())->getAddressSpace();
+ return getPointerOperand()->getType()->getPointerAddressSpace();
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const LoadInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::Load;
}
@@ -255,7 +254,7 @@
/// StoreInst - an instruction for storing to memory
///
class StoreInst : public Instruction {
- void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
void AssertOK();
protected:
virtual StoreInst *clone_impl() const;
@@ -349,12 +348,12 @@
const Value *getPointerOperand() const { return getOperand(1); }
static unsigned getPointerOperandIndex() { return 1U; }
+ /// \brief Returns the address space of the pointer operand.
unsigned getPointerAddressSpace() const {
- return cast<PointerType>(getPointerOperand()->getType())->getAddressSpace();
+ return getPointerOperand()->getType()->getPointerAddressSpace();
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const StoreInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::Store;
}
@@ -382,7 +381,7 @@
/// FenceInst - an instruction for ordering other memory operations
///
class FenceInst : public Instruction {
- void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
void Init(AtomicOrdering Ordering, SynchronizationScope SynchScope);
protected:
virtual FenceInst *clone_impl() const;
@@ -426,7 +425,6 @@
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const FenceInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::Fence;
}
@@ -450,7 +448,7 @@
/// there. Returns the value that was loaded.
///
class AtomicCmpXchgInst : public Instruction {
- void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
void Init(Value *Ptr, Value *Cmp, Value *NewVal,
AtomicOrdering Ordering, SynchronizationScope SynchScope);
protected:
@@ -521,12 +519,12 @@
Value *getNewValOperand() { return getOperand(2); }
const Value *getNewValOperand() const { return getOperand(2); }
+ /// \brief Returns the address space of the pointer operand.
unsigned getPointerAddressSpace() const {
- return cast<PointerType>(getPointerOperand()->getType())->getAddressSpace();
+ return getPointerOperand()->getType()->getPointerAddressSpace();
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const AtomicCmpXchgInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::AtomicCmpXchg;
}
@@ -557,7 +555,7 @@
/// the old value.
///
class AtomicRMWInst : public Instruction {
- void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
protected:
virtual AtomicRMWInst *clone_impl() const;
public:
@@ -665,12 +663,12 @@
Value *getValOperand() { return getOperand(1); }
const Value *getValOperand() const { return getOperand(1); }
+ /// \brief Returns the address space of the pointer operand.
unsigned getPointerAddressSpace() const {
- return cast<PointerType>(getPointerOperand()->getType())->getAddressSpace();
+ return getPointerOperand()->getType()->getPointerAddressSpace();
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const AtomicRMWInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::AtomicRMW;
}
@@ -768,6 +766,13 @@
return reinterpret_cast<PointerType*>(Instruction::getType());
}
+ /// \brief Returns the address space of this instruction's pointer type.
+ unsigned getAddressSpace() const {
+ // Note that this is always the same as the pointer operand's address space
+ // and that is cheaper to compute, so cheat here.
+ return getPointerAddressSpace();
+ }
+
/// getIndexedType - Returns the type of the element that would be loaded with
/// a load instruction with the specified parameters.
///
@@ -778,10 +783,6 @@
static Type *getIndexedType(Type *Ptr, ArrayRef<Constant *> IdxList);
static Type *getIndexedType(Type *Ptr, ArrayRef<uint64_t> IdxList);
- /// getIndexedType - Returns the address space used by the GEP pointer.
- ///
- static unsigned getAddressSpace(Value *Ptr);
-
inline op_iterator idx_begin() { return op_begin()+1; }
inline const_op_iterator idx_begin() const { return op_begin()+1; }
inline op_iterator idx_end() { return op_end(); }
@@ -797,22 +798,23 @@
return 0U; // get index for modifying correct operand.
}
- unsigned getPointerAddressSpace() const {
- return cast<PointerType>(getType())->getAddressSpace();
- }
-
/// getPointerOperandType - Method to return the pointer operand as a
/// PointerType.
Type *getPointerOperandType() const {
return getPointerOperand()->getType();
}
+ /// \brief Returns the address space of the pointer operand.
+ unsigned getPointerAddressSpace() const {
+ return getPointerOperandType()->getPointerAddressSpace();
+ }
+
/// GetGEPReturnType - Returns the pointer type returned by the GEP
/// instruction, which may be a vector of pointers.
static Type *getGEPReturnType(Value *Ptr, ArrayRef<Value *> IdxList) {
Type *PtrTy = PointerType::get(checkGEPType(
getIndexedType(Ptr->getType(), IdxList)),
- getAddressSpace(Ptr));
+ Ptr->getType()->getPointerAddressSpace());
// Vector GEP
if (Ptr->getType()->isVectorTy()) {
unsigned NumElem = cast<VectorType>(Ptr->getType())->getNumElements();
@@ -849,7 +851,6 @@
bool isInBounds() const;
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const GetElementPtrInst *) { return true; }
static inline bool classof(const Instruction *I) {
return (I->getOpcode() == Instruction::GetElementPtr);
}
@@ -897,13 +898,13 @@
/// This instruction compares its operands according to the predicate given
/// to the constructor. It only operates on integers or pointers. The operands
/// must be identical types.
-/// @brief Represent an integer comparison operator.
+/// \brief Represent an integer comparison operator.
class ICmpInst: public CmpInst {
protected:
- /// @brief Clone an identical ICmpInst
+ /// \brief Clone an identical ICmpInst
virtual ICmpInst *clone_impl() const;
public:
- /// @brief Constructor with insert-before-instruction semantics.
+ /// \brief Constructor with insert-before-instruction semantics.
ICmpInst(
Instruction *InsertBefore, ///< Where to insert
Predicate pred, ///< The predicate to use for the comparison
@@ -924,7 +925,7 @@
"Invalid operand types for ICmp instruction");
}
- /// @brief Constructor with insert-at-end semantics.
+ /// \brief Constructor with insert-at-end semantics.
ICmpInst(
BasicBlock &InsertAtEnd, ///< Block to insert into.
Predicate pred, ///< The predicate to use for the comparison
@@ -945,7 +946,7 @@
"Invalid operand types for ICmp instruction");
}
- /// @brief Constructor with no-insertion semantics
+ /// \brief Constructor with no-insertion semantics
ICmpInst(
Predicate pred, ///< The predicate to use for the comparison
Value *LHS, ///< The left-hand-side of the expression
@@ -967,25 +968,25 @@
/// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
/// @returns the predicate that would be the result if the operand were
/// regarded as signed.
- /// @brief Return the signed version of the predicate
+ /// \brief Return the signed version of the predicate
Predicate getSignedPredicate() const {
return getSignedPredicate(getPredicate());
}
/// This is a static version that you can use without an instruction.
- /// @brief Return the signed version of the predicate.
+ /// \brief Return the signed version of the predicate.
static Predicate getSignedPredicate(Predicate pred);
/// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
/// @returns the predicate that would be the result if the operand were
/// regarded as unsigned.
- /// @brief Return the unsigned version of the predicate
+ /// \brief Return the unsigned version of the predicate
Predicate getUnsignedPredicate() const {
return getUnsignedPredicate(getPredicate());
}
/// This is a static version that you can use without an instruction.
- /// @brief Return the unsigned version of the predicate.
+ /// \brief Return the unsigned version of the predicate.
static Predicate getUnsignedPredicate(Predicate pred);
/// isEquality - Return true if this predicate is either EQ or NE. This also
@@ -1001,7 +1002,7 @@
}
/// @returns true if the predicate of this ICmpInst is commutative
- /// @brief Determine if this relation is commutative.
+ /// \brief Determine if this relation is commutative.
bool isCommutative() const { return isEquality(); }
/// isRelational - Return true if the predicate is relational (not EQ or NE).
@@ -1017,21 +1018,20 @@
}
/// Initialize a set of values that all satisfy the predicate with C.
- /// @brief Make a ConstantRange for a relation with a constant value.
+ /// \brief Make a ConstantRange for a relation with a constant value.
static ConstantRange makeConstantRange(Predicate pred, const APInt &C);
/// Exchange the two operands to this instruction in such a way that it does
/// not modify the semantics of the instruction. The predicate value may be
/// changed to retain the same result if the predicate is order dependent
/// (e.g. ult).
- /// @brief Swap operands and adjust predicate.
+ /// \brief Swap operands and adjust predicate.
void swapOperands() {
setPredicate(getSwappedPredicate());
Op<0>().swap(Op<1>());
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const ICmpInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::ICmp;
}
@@ -1048,13 +1048,13 @@
/// This instruction compares its operands according to the predicate given
/// to the constructor. It only operates on floating point values or packed
/// vectors of floating point values. The operands must be identical types.
-/// @brief Represents a floating point comparison operator.
+/// \brief Represents a floating point comparison operator.
class FCmpInst: public CmpInst {
protected:
- /// @brief Clone an identical FCmpInst
+ /// \brief Clone an identical FCmpInst
virtual FCmpInst *clone_impl() const;
public:
- /// @brief Constructor with insert-before-instruction semantics.
+ /// \brief Constructor with insert-before-instruction semantics.
FCmpInst(
Instruction *InsertBefore, ///< Where to insert
Predicate pred, ///< The predicate to use for the comparison
@@ -1073,7 +1073,7 @@
"Invalid operand types for FCmp instruction");
}
- /// @brief Constructor with insert-at-end semantics.
+ /// \brief Constructor with insert-at-end semantics.
FCmpInst(
BasicBlock &InsertAtEnd, ///< Block to insert into.
Predicate pred, ///< The predicate to use for the comparison
@@ -1092,7 +1092,7 @@
"Invalid operand types for FCmp instruction");
}
- /// @brief Constructor with no-insertion semantics
+ /// \brief Constructor with no-insertion semantics
FCmpInst(
Predicate pred, ///< The predicate to use for the comparison
Value *LHS, ///< The left-hand-side of the expression
@@ -1110,14 +1110,14 @@
}
/// @returns true if the predicate of this instruction is EQ or NE.
- /// @brief Determine if this is an equality predicate.
+ /// \brief Determine if this is an equality predicate.
bool isEquality() const {
return getPredicate() == FCMP_OEQ || getPredicate() == FCMP_ONE ||
getPredicate() == FCMP_UEQ || getPredicate() == FCMP_UNE;
}
/// @returns true if the predicate of this instruction is commutative.
- /// @brief Determine if this is a commutative predicate.
+ /// \brief Determine if this is a commutative predicate.
bool isCommutative() const {
return isEquality() ||
getPredicate() == FCMP_FALSE ||
@@ -1127,21 +1127,20 @@
}
/// @returns true if the predicate is relational (not EQ or NE).
- /// @brief Determine if this a relational predicate.
+ /// \brief Determine if this a relational predicate.
bool isRelational() const { return !isEquality(); }
/// Exchange the two operands to this instruction in such a way that it does
/// not modify the semantics of the instruction. The predicate value may be
/// changed to retain the same result if the predicate is order dependent
/// (e.g. ult).
- /// @brief Swap operands and adjust predicate.
+ /// \brief Swap operands and adjust predicate.
void swapOperands() {
setPredicate(getSwappedPredicate());
Op<0>().swap(Op<1>());
}
- /// @brief Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const FCmpInst *) { return true; }
+ /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::FCmp;
}
@@ -1163,12 +1162,12 @@
void init(Value *Func, const Twine &NameStr);
/// Construct a CallInst given a range of arguments.
- /// @brief Construct a CallInst from a range of arguments
+ /// \brief Construct a CallInst from a range of arguments
inline CallInst(Value *Func, ArrayRef<Value *> Args,
const Twine &NameStr, Instruction *InsertBefore);
/// Construct a CallInst given a range of arguments.
- /// @brief Construct a CallInst from a range of arguments
+ /// \brief Construct a CallInst from a range of arguments
inline CallInst(Value *Func, ArrayRef<Value *> Args,
const Twine &NameStr, BasicBlock *InsertAtEnd);
@@ -1267,77 +1266,78 @@
/// removeAttribute - removes the attribute from the list of attributes.
void removeAttribute(unsigned i, Attributes attr);
- /// \brief Return true if this call has the given attribute.
- bool hasFnAttr(Attributes N) const {
- return paramHasAttr(~0, N);
- }
+ /// \brief Determine whether this call has the given attribute.
+ bool hasFnAttr(Attributes::AttrVal A) const;
- /// @brief Determine whether the call or the callee has the given attribute.
- bool paramHasAttr(unsigned i, Attributes attr) const;
+ /// \brief Determine whether the call or the callee has the given attributes.
+ bool paramHasAttr(unsigned i, Attributes::AttrVal A) const;
- /// @brief Extract the alignment for a call or parameter (0=unknown).
+ /// \brief Extract the alignment for a call or parameter (0=unknown).
unsigned getParamAlignment(unsigned i) const {
return AttributeList.getParamAlignment(i);
}
- /// @brief Return true if the call should not be inlined.
- bool isNoInline() const { return hasFnAttr(Attribute::NoInline); }
- void setIsNoInline(bool Value = true) {
- if (Value) addAttribute(~0, Attribute::NoInline);
- else removeAttribute(~0, Attribute::NoInline);
+ /// \brief Return true if the call should not be inlined.
+ bool isNoInline() const { return hasFnAttr(Attributes::NoInline); }
+ void setIsNoInline() {
+ addAttribute(AttrListPtr::FunctionIndex,
+ Attributes::get(getContext(), Attributes::NoInline));
}
- /// @brief Return true if the call can return twice
+ /// \brief Return true if the call can return twice
bool canReturnTwice() const {
- return hasFnAttr(Attribute::ReturnsTwice);
+ return hasFnAttr(Attributes::ReturnsTwice);
}
- void setCanReturnTwice(bool Value = true) {
- if (Value) addAttribute(~0, Attribute::ReturnsTwice);
- else removeAttribute(~0, Attribute::ReturnsTwice);
+ void setCanReturnTwice() {
+ addAttribute(AttrListPtr::FunctionIndex,
+ Attributes::get(getContext(), Attributes::ReturnsTwice));
}
- /// @brief Determine if the call does not access memory.
+ /// \brief Determine if the call does not access memory.
bool doesNotAccessMemory() const {
- return hasFnAttr(Attribute::ReadNone);
+ return hasFnAttr(Attributes::ReadNone);
}
- void setDoesNotAccessMemory(bool NotAccessMemory = true) {
- if (NotAccessMemory) addAttribute(~0, Attribute::ReadNone);
- else removeAttribute(~0, Attribute::ReadNone);
+ void setDoesNotAccessMemory() {
+ addAttribute(AttrListPtr::FunctionIndex,
+ Attributes::get(getContext(), Attributes::ReadNone));
}
- /// @brief Determine if the call does not access or only reads memory.
+ /// \brief Determine if the call does not access or only reads memory.
bool onlyReadsMemory() const {
- return doesNotAccessMemory() || hasFnAttr(Attribute::ReadOnly);
+ return doesNotAccessMemory() || hasFnAttr(Attributes::ReadOnly);
}
- void setOnlyReadsMemory(bool OnlyReadsMemory = true) {
- if (OnlyReadsMemory) addAttribute(~0, Attribute::ReadOnly);
- else removeAttribute(~0, Attribute::ReadOnly | Attribute::ReadNone);
+ void setOnlyReadsMemory() {
+ addAttribute(AttrListPtr::FunctionIndex,
+ Attributes::get(getContext(), Attributes::ReadOnly));
}
- /// @brief Determine if the call cannot return.
- bool doesNotReturn() const { return hasFnAttr(Attribute::NoReturn); }
- void setDoesNotReturn(bool DoesNotReturn = true) {
- if (DoesNotReturn) addAttribute(~0, Attribute::NoReturn);
- else removeAttribute(~0, Attribute::NoReturn);
+ /// \brief Determine if the call cannot return.
+ bool doesNotReturn() const { return hasFnAttr(Attributes::NoReturn); }
+ void setDoesNotReturn() {
+ addAttribute(AttrListPtr::FunctionIndex,
+ Attributes::get(getContext(), Attributes::NoReturn));
}
- /// @brief Determine if the call cannot unwind.
- bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); }
- void setDoesNotThrow(bool DoesNotThrow = true) {
- if (DoesNotThrow) addAttribute(~0, Attribute::NoUnwind);
- else removeAttribute(~0, Attribute::NoUnwind);
+ /// \brief Determine if the call cannot unwind.
+ bool doesNotThrow() const { return hasFnAttr(Attributes::NoUnwind); }
+ void setDoesNotThrow() {
+ addAttribute(AttrListPtr::FunctionIndex,
+ Attributes::get(getContext(), Attributes::NoUnwind));
}
- /// @brief Determine if the call returns a structure through first
+ /// \brief Determine if the call returns a structure through first
/// pointer argument.
bool hasStructRetAttr() const {
// Be friendly and also check the callee.
- return paramHasAttr(1, Attribute::StructRet);
+ return paramHasAttr(1, Attributes::StructRet);
}
- /// @brief Determine if any call argument is an aggregate passed by value.
+ /// \brief Determine if any call argument is an aggregate passed by value.
bool hasByValArgument() const {
- return AttributeList.hasAttrSomewhere(Attribute::ByVal);
+ for (unsigned I = 0, E = AttributeList.getNumAttrs(); I != E; ++I)
+ if (AttributeList.getAttributesAtIndex(I).hasAttribute(Attributes::ByVal))
+ return true;
+ return false;
}
/// getCalledFunction - Return the function called, or null if this is an
@@ -1363,7 +1363,6 @@
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const CallInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::Call;
}
@@ -1469,7 +1468,6 @@
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const SelectInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::Select;
}
@@ -1512,7 +1510,6 @@
static unsigned getPointerOperandIndex() { return 0U; }
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const VAArgInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == VAArg;
}
@@ -1566,7 +1563,6 @@
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const ExtractElementInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::ExtractElement;
}
@@ -1625,7 +1621,6 @@
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const InsertElementInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::InsertElement;
}
@@ -1706,7 +1701,6 @@
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const ShuffleVectorInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::ShuffleVector;
}
@@ -1802,7 +1796,6 @@
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const ExtractValueInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::ExtractValue;
}
@@ -1839,7 +1832,7 @@
class InsertValueInst : public Instruction {
SmallVector<unsigned, 4> Indices;
- void *operator new(size_t, unsigned); // Do not implement
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
InsertValueInst(const InsertValueInst &IVI);
void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
const Twine &NameStr);
@@ -1924,7 +1917,6 @@
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const InsertValueInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::InsertValue;
}
@@ -1970,7 +1962,7 @@
// scientist's overactive imagination.
//
class PHINode : public Instruction {
- void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
/// ReservedSpace - The number of operands actually allocated. NumOperands is
/// the number actually in use.
unsigned ReservedSpace;
@@ -2141,7 +2133,6 @@
Value *hasConstantValue() const;
/// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const PHINode *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::PHI;
}
@@ -2178,7 +2169,7 @@
public:
enum ClauseType { Catch, Filter };
private:
- void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
// Allocate space for exactly zero operands.
void *operator new(size_t s) {
return User::operator new(s, 0);
@@ -2249,7 +2240,6 @@
void reserveClauses(unsigned Size) { growOperands(Size); }
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const LandingPadInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::LandingPad;
}
@@ -2318,7 +2308,6 @@
unsigned getNumSuccessors() const { return 0; }
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const ReturnInst *) { return true; }
static inline bool classof(const Instruction *I) {
return (I->getOpcode() == Instruction::Ret);
}
@@ -2418,7 +2407,6 @@
void swapSuccessors();
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const BranchInst *) { return true; }
static inline bool classof(const Instruction *I) {
return (I->getOpcode() == Instruction::Br);
}
@@ -2445,7 +2433,7 @@
/// SwitchInst - Multiway switch
///
class SwitchInst : public TerminatorInst {
- void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
unsigned ReservedSpace;
// Operands format:
// Operand[0] = Value to switch on
@@ -2613,7 +2601,7 @@
}
/// addCase - Add an entry to the switch instruction...
- /// @Deprecated
+ /// @deprecated
/// Note:
/// This action invalidates case_end(). Old case_end() iterator will
/// point to the added case.
@@ -2699,7 +2687,7 @@
}
/// Resolves case value for current case.
- /// @Deprecated
+ /// @deprecated
ConstantIntTy *getCaseValue() {
assert(Index < SI->getNumCases() && "Index out the number of cases.");
IntegersSubsetRef CaseRanges = *SubsetIt;
@@ -2803,7 +2791,7 @@
CaseIt(const ParentTy& Src) : ParentTy(Src) {}
/// Sets the new value for current case.
- /// @Deprecated.
+ /// @deprecated.
void setValue(ConstantInt *V) {
assert(Index < SI->getNumCases() && "Index out the number of cases.");
IntegersSubsetToBB Mapping;
@@ -2829,7 +2817,6 @@
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const SwitchInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::Switch;
}
@@ -2857,7 +2844,7 @@
/// IndirectBrInst - Indirect Branch Instruction.
///
class IndirectBrInst : public TerminatorInst {
- void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
unsigned ReservedSpace;
// Operand[0] = Value to switch on
// Operand[1] = Default basic block destination
@@ -2928,7 +2915,6 @@
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const IndirectBrInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::IndirectBr;
}
@@ -2963,14 +2949,14 @@
/// Construct an InvokeInst given a range of arguments.
///
- /// @brief Construct an InvokeInst from a range of arguments
+ /// \brief Construct an InvokeInst from a range of arguments
inline InvokeInst(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException,
ArrayRef<Value *> Args, unsigned Values,
const Twine &NameStr, Instruction *InsertBefore);
/// Construct an InvokeInst given a range of arguments.
///
- /// @brief Construct an InvokeInst from a range of arguments
+ /// \brief Construct an InvokeInst from a range of arguments
inline InvokeInst(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException,
ArrayRef<Value *> Args, unsigned Values,
const Twine &NameStr, BasicBlock *InsertAtEnd);
@@ -3029,68 +3015,69 @@
/// removeAttribute - removes the attribute from the list of attributes.
void removeAttribute(unsigned i, Attributes attr);
- /// \brief Return true if this call has the given attribute.
- bool hasFnAttr(Attributes N) const {
- return paramHasAttr(~0, N);
- }
+ /// \brief Determine whether this call has the NoAlias attribute.
+ bool hasFnAttr(Attributes::AttrVal A) const;
- /// @brief Determine whether the call or the callee has the given attribute.
- bool paramHasAttr(unsigned i, Attributes attr) const;
+ /// \brief Determine whether the call or the callee has the given attributes.
+ bool paramHasAttr(unsigned i, Attributes::AttrVal A) const;
- /// @brief Extract the alignment for a call or parameter (0=unknown).
+ /// \brief Extract the alignment for a call or parameter (0=unknown).
unsigned getParamAlignment(unsigned i) const {
return AttributeList.getParamAlignment(i);
}
- /// @brief Return true if the call should not be inlined.
- bool isNoInline() const { return hasFnAttr(Attribute::NoInline); }
- void setIsNoInline(bool Value = true) {
- if (Value) addAttribute(~0, Attribute::NoInline);
- else removeAttribute(~0, Attribute::NoInline);
+ /// \brief Return true if the call should not be inlined.
+ bool isNoInline() const { return hasFnAttr(Attributes::NoInline); }
+ void setIsNoInline() {
+ addAttribute(AttrListPtr::FunctionIndex,
+ Attributes::get(getContext(), Attributes::NoInline));
}
- /// @brief Determine if the call does not access memory.
+ /// \brief Determine if the call does not access memory.
bool doesNotAccessMemory() const {
- return hasFnAttr(Attribute::ReadNone);
+ return hasFnAttr(Attributes::ReadNone);
}
- void setDoesNotAccessMemory(bool NotAccessMemory = true) {
- if (NotAccessMemory) addAttribute(~0, Attribute::ReadNone);
- else removeAttribute(~0, Attribute::ReadNone);
+ void setDoesNotAccessMemory() {
+ addAttribute(AttrListPtr::FunctionIndex,
+ Attributes::get(getContext(), Attributes::ReadNone));
}
- /// @brief Determine if the call does not access or only reads memory.
+ /// \brief Determine if the call does not access or only reads memory.
bool onlyReadsMemory() const {
- return doesNotAccessMemory() || hasFnAttr(Attribute::ReadOnly);
+ return doesNotAccessMemory() || hasFnAttr(Attributes::ReadOnly);
}
- void setOnlyReadsMemory(bool OnlyReadsMemory = true) {
- if (OnlyReadsMemory) addAttribute(~0, Attribute::ReadOnly);
- else removeAttribute(~0, Attribute::ReadOnly | Attribute::ReadNone);
+ void setOnlyReadsMemory() {
+ addAttribute(AttrListPtr::FunctionIndex,
+ Attributes::get(getContext(), Attributes::ReadOnly));
}
- /// @brief Determine if the call cannot return.
- bool doesNotReturn() const { return hasFnAttr(Attribute::NoReturn); }
- void setDoesNotReturn(bool DoesNotReturn = true) {
- if (DoesNotReturn) addAttribute(~0, Attribute::NoReturn);
- else removeAttribute(~0, Attribute::NoReturn);
+ /// \brief Determine if the call cannot return.
+ bool doesNotReturn() const { return hasFnAttr(Attributes::NoReturn); }
+ void setDoesNotReturn() {
+ addAttribute(AttrListPtr::FunctionIndex,
+ Attributes::get(getContext(), Attributes::NoReturn));
}
- /// @brief Determine if the call cannot unwind.
- bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); }
- void setDoesNotThrow(bool DoesNotThrow = true) {
- if (DoesNotThrow) addAttribute(~0, Attribute::NoUnwind);
- else removeAttribute(~0, Attribute::NoUnwind);
+ /// \brief Determine if the call cannot unwind.
+ bool doesNotThrow() const { return hasFnAttr(Attributes::NoUnwind); }
+ void setDoesNotThrow() {
+ addAttribute(AttrListPtr::FunctionIndex,
+ Attributes::get(getContext(), Attributes::NoUnwind));
}
- /// @brief Determine if the call returns a structure through first
+ /// \brief Determine if the call returns a structure through first
/// pointer argument.
bool hasStructRetAttr() const {
// Be friendly and also check the callee.
- return paramHasAttr(1, Attribute::StructRet);
+ return paramHasAttr(1, Attributes::StructRet);
}
- /// @brief Determine if any call argument is an aggregate passed by value.
+ /// \brief Determine if any call argument is an aggregate passed by value.
bool hasByValArgument() const {
- return AttributeList.hasAttrSomewhere(Attribute::ByVal);
+ for (unsigned I = 0, E = AttributeList.getNumAttrs(); I != E; ++I)
+ if (AttributeList.getAttributesAtIndex(I).hasAttribute(Attributes::ByVal))
+ return true;
+ return false;
}
/// getCalledFunction - Return the function called, or null if this is an
@@ -3141,7 +3128,6 @@
unsigned getNumSuccessors() const { return 2; }
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const InvokeInst *) { return true; }
static inline bool classof(const Instruction *I) {
return (I->getOpcode() == Instruction::Invoke);
}
@@ -3221,7 +3207,6 @@
unsigned getNumSuccessors() const { return 0; }
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const ResumeInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::Resume;
}
@@ -3251,7 +3236,7 @@
/// end of the block cannot be reached.
///
class UnreachableInst : public TerminatorInst {
- void *operator new(size_t, unsigned); // DO NOT IMPLEMENT
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
protected:
virtual UnreachableInst *clone_impl() const;
@@ -3266,7 +3251,6 @@
unsigned getNumSuccessors() const { return 0; }
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const UnreachableInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::Unreachable;
}
@@ -3283,14 +3267,14 @@
// TruncInst Class
//===----------------------------------------------------------------------===//
-/// @brief This class represents a truncation of integer types.
+/// \brief This class represents a truncation of integer types.
class TruncInst : public CastInst {
protected:
- /// @brief Clone an identical TruncInst
+ /// \brief Clone an identical TruncInst
virtual TruncInst *clone_impl() const;
public:
- /// @brief Constructor with insert-before-instruction semantics
+ /// \brief Constructor with insert-before-instruction semantics
TruncInst(
Value *S, ///< The value to be truncated
Type *Ty, ///< The (smaller) type to truncate to
@@ -3298,7 +3282,7 @@
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
- /// @brief Constructor with insert-at-end-of-block semantics
+ /// \brief Constructor with insert-at-end-of-block semantics
TruncInst(
Value *S, ///< The value to be truncated
Type *Ty, ///< The (smaller) type to truncate to
@@ -3306,8 +3290,7 @@
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
- /// @brief Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const TruncInst *) { return true; }
+ /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Trunc;
}
@@ -3320,14 +3303,14 @@
// ZExtInst Class
//===----------------------------------------------------------------------===//
-/// @brief This class represents zero extension of integer types.
+/// \brief This class represents zero extension of integer types.
class ZExtInst : public CastInst {
protected:
- /// @brief Clone an identical ZExtInst
+ /// \brief Clone an identical ZExtInst
virtual ZExtInst *clone_impl() const;
public:
- /// @brief Constructor with insert-before-instruction semantics
+ /// \brief Constructor with insert-before-instruction semantics
ZExtInst(
Value *S, ///< The value to be zero extended
Type *Ty, ///< The type to zero extend to
@@ -3335,7 +3318,7 @@
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
- /// @brief Constructor with insert-at-end semantics.
+ /// \brief Constructor with insert-at-end semantics.
ZExtInst(
Value *S, ///< The value to be zero extended
Type *Ty, ///< The type to zero extend to
@@ -3343,8 +3326,7 @@
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
- /// @brief Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const ZExtInst *) { return true; }
+ /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Instruction *I) {
return I->getOpcode() == ZExt;
}
@@ -3357,14 +3339,14 @@
// SExtInst Class
//===----------------------------------------------------------------------===//
-/// @brief This class represents a sign extension of integer types.
+/// \brief This class represents a sign extension of integer types.
class SExtInst : public CastInst {
protected:
- /// @brief Clone an identical SExtInst
+ /// \brief Clone an identical SExtInst
virtual SExtInst *clone_impl() const;
public:
- /// @brief Constructor with insert-before-instruction semantics
+ /// \brief Constructor with insert-before-instruction semantics
SExtInst(
Value *S, ///< The value to be sign extended
Type *Ty, ///< The type to sign extend to
@@ -3372,7 +3354,7 @@
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
- /// @brief Constructor with insert-at-end-of-block semantics
+ /// \brief Constructor with insert-at-end-of-block semantics
SExtInst(
Value *S, ///< The value to be sign extended
Type *Ty, ///< The type to sign extend to
@@ -3380,8 +3362,7 @@
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
- /// @brief Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const SExtInst *) { return true; }
+ /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Instruction *I) {
return I->getOpcode() == SExt;
}
@@ -3394,14 +3375,14 @@
// FPTruncInst Class
//===----------------------------------------------------------------------===//
-/// @brief This class represents a truncation of floating point types.
+/// \brief This class represents a truncation of floating point types.
class FPTruncInst : public CastInst {
protected:
- /// @brief Clone an identical FPTruncInst
+ /// \brief Clone an identical FPTruncInst
virtual FPTruncInst *clone_impl() const;
public:
- /// @brief Constructor with insert-before-instruction semantics
+ /// \brief Constructor with insert-before-instruction semantics
FPTruncInst(
Value *S, ///< The value to be truncated
Type *Ty, ///< The type to truncate to
@@ -3409,7 +3390,7 @@
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
- /// @brief Constructor with insert-before-instruction semantics
+ /// \brief Constructor with insert-before-instruction semantics
FPTruncInst(
Value *S, ///< The value to be truncated
Type *Ty, ///< The type to truncate to
@@ -3417,8 +3398,7 @@
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
- /// @brief Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const FPTruncInst *) { return true; }
+ /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Instruction *I) {
return I->getOpcode() == FPTrunc;
}
@@ -3431,14 +3411,14 @@
// FPExtInst Class
//===----------------------------------------------------------------------===//
-/// @brief This class represents an extension of floating point types.
+/// \brief This class represents an extension of floating point types.
class FPExtInst : public CastInst {
protected:
- /// @brief Clone an identical FPExtInst
+ /// \brief Clone an identical FPExtInst
virtual FPExtInst *clone_impl() const;
public:
- /// @brief Constructor with insert-before-instruction semantics
+ /// \brief Constructor with insert-before-instruction semantics
FPExtInst(
Value *S, ///< The value to be extended
Type *Ty, ///< The type to extend to
@@ -3446,7 +3426,7 @@
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
- /// @brief Constructor with insert-at-end-of-block semantics
+ /// \brief Constructor with insert-at-end-of-block semantics
FPExtInst(
Value *S, ///< The value to be extended
Type *Ty, ///< The type to extend to
@@ -3454,8 +3434,7 @@
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
- /// @brief Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const FPExtInst *) { return true; }
+ /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Instruction *I) {
return I->getOpcode() == FPExt;
}
@@ -3468,14 +3447,14 @@
// UIToFPInst Class
//===----------------------------------------------------------------------===//
-/// @brief This class represents a cast unsigned integer to floating point.
+/// \brief This class represents a cast unsigned integer to floating point.
class UIToFPInst : public CastInst {
protected:
- /// @brief Clone an identical UIToFPInst
+ /// \brief Clone an identical UIToFPInst
virtual UIToFPInst *clone_impl() const;
public:
- /// @brief Constructor with insert-before-instruction semantics
+ /// \brief Constructor with insert-before-instruction semantics
UIToFPInst(
Value *S, ///< The value to be converted
Type *Ty, ///< The type to convert to
@@ -3483,7 +3462,7 @@
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
- /// @brief Constructor with insert-at-end-of-block semantics
+ /// \brief Constructor with insert-at-end-of-block semantics
UIToFPInst(
Value *S, ///< The value to be converted
Type *Ty, ///< The type to convert to
@@ -3491,8 +3470,7 @@
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
- /// @brief Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const UIToFPInst *) { return true; }
+ /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Instruction *I) {
return I->getOpcode() == UIToFP;
}
@@ -3505,14 +3483,14 @@
// SIToFPInst Class
//===----------------------------------------------------------------------===//
-/// @brief This class represents a cast from signed integer to floating point.
+/// \brief This class represents a cast from signed integer to floating point.
class SIToFPInst : public CastInst {
protected:
- /// @brief Clone an identical SIToFPInst
+ /// \brief Clone an identical SIToFPInst
virtual SIToFPInst *clone_impl() const;
public:
- /// @brief Constructor with insert-before-instruction semantics
+ /// \brief Constructor with insert-before-instruction semantics
SIToFPInst(
Value *S, ///< The value to be converted
Type *Ty, ///< The type to convert to
@@ -3520,7 +3498,7 @@
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
- /// @brief Constructor with insert-at-end-of-block semantics
+ /// \brief Constructor with insert-at-end-of-block semantics
SIToFPInst(
Value *S, ///< The value to be converted
Type *Ty, ///< The type to convert to
@@ -3528,8 +3506,7 @@
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
- /// @brief Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const SIToFPInst *) { return true; }
+ /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Instruction *I) {
return I->getOpcode() == SIToFP;
}
@@ -3542,14 +3519,14 @@
// FPToUIInst Class
//===----------------------------------------------------------------------===//
-/// @brief This class represents a cast from floating point to unsigned integer
+/// \brief This class represents a cast from floating point to unsigned integer
class FPToUIInst : public CastInst {
protected:
- /// @brief Clone an identical FPToUIInst
+ /// \brief Clone an identical FPToUIInst
virtual FPToUIInst *clone_impl() const;
public:
- /// @brief Constructor with insert-before-instruction semantics
+ /// \brief Constructor with insert-before-instruction semantics
FPToUIInst(
Value *S, ///< The value to be converted
Type *Ty, ///< The type to convert to
@@ -3557,7 +3534,7 @@
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
- /// @brief Constructor with insert-at-end-of-block semantics
+ /// \brief Constructor with insert-at-end-of-block semantics
FPToUIInst(
Value *S, ///< The value to be converted
Type *Ty, ///< The type to convert to
@@ -3565,8 +3542,7 @@
BasicBlock *InsertAtEnd ///< Where to insert the new instruction
);
- /// @brief Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const FPToUIInst *) { return true; }
+ /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Instruction *I) {
return I->getOpcode() == FPToUI;
}
@@ -3579,14 +3555,14 @@
// FPToSIInst Class
//===----------------------------------------------------------------------===//
-/// @brief This class represents a cast from floating point to signed integer.
+/// \brief This class represents a cast from floating point to signed integer.
class FPToSIInst : public CastInst {
protected:
- /// @brief Clone an identical FPToSIInst
+ /// \brief Clone an identical FPToSIInst
virtual FPToSIInst *clone_impl() const;
public:
- /// @brief Constructor with insert-before-instruction semantics
+ /// \brief Constructor with insert-before-instruction semantics
FPToSIInst(
Value *S, ///< The value to be converted
Type *Ty, ///< The type to convert to
@@ -3594,7 +3570,7 @@
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
- /// @brief Constructor with insert-at-end-of-block semantics
+ /// \brief Constructor with insert-at-end-of-block semantics
FPToSIInst(
Value *S, ///< The value to be converted
Type *Ty, ///< The type to convert to
@@ -3602,8 +3578,7 @@
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
- /// @brief Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const FPToSIInst *) { return true; }
+ /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Instruction *I) {
return I->getOpcode() == FPToSI;
}
@@ -3616,10 +3591,10 @@
// IntToPtrInst Class
//===----------------------------------------------------------------------===//
-/// @brief This class represents a cast from an integer to a pointer.
+/// \brief This class represents a cast from an integer to a pointer.
class IntToPtrInst : public CastInst {
public:
- /// @brief Constructor with insert-before-instruction semantics
+ /// \brief Constructor with insert-before-instruction semantics
IntToPtrInst(
Value *S, ///< The value to be converted
Type *Ty, ///< The type to convert to
@@ -3627,7 +3602,7 @@
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
- /// @brief Constructor with insert-at-end-of-block semantics
+ /// \brief Constructor with insert-at-end-of-block semantics
IntToPtrInst(
Value *S, ///< The value to be converted
Type *Ty, ///< The type to convert to
@@ -3635,11 +3610,15 @@
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
- /// @brief Clone an identical IntToPtrInst
+ /// \brief Clone an identical IntToPtrInst
virtual IntToPtrInst *clone_impl() const;
+ /// \brief Returns the address space of this instruction's pointer type.
+ unsigned getAddressSpace() const {
+ return getType()->getPointerAddressSpace();
+ }
+
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const IntToPtrInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == IntToPtr;
}
@@ -3652,14 +3631,14 @@
// PtrToIntInst Class
//===----------------------------------------------------------------------===//
-/// @brief This class represents a cast from a pointer to an integer
+/// \brief This class represents a cast from a pointer to an integer
class PtrToIntInst : public CastInst {
protected:
- /// @brief Clone an identical PtrToIntInst
+ /// \brief Clone an identical PtrToIntInst
virtual PtrToIntInst *clone_impl() const;
public:
- /// @brief Constructor with insert-before-instruction semantics
+ /// \brief Constructor with insert-before-instruction semantics
PtrToIntInst(
Value *S, ///< The value to be converted
Type *Ty, ///< The type to convert to
@@ -3667,7 +3646,7 @@
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
- /// @brief Constructor with insert-at-end-of-block semantics
+ /// \brief Constructor with insert-at-end-of-block semantics
PtrToIntInst(
Value *S, ///< The value to be converted
Type *Ty, ///< The type to convert to
@@ -3675,8 +3654,19 @@
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
+ /// \brief Gets the pointer operand.
+ Value *getPointerOperand() { return getOperand(0); }
+ /// \brief Gets the pointer operand.
+ const Value *getPointerOperand() const { return getOperand(0); }
+ /// \brief Gets the operand index of the pointer operand.
+ static unsigned getPointerOperandIndex() { return 0U; }
+
+ /// \brief Returns the address space of the pointer operand.
+ unsigned getPointerAddressSpace() const {
+ return getPointerOperand()->getType()->getPointerAddressSpace();
+ }
+
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const PtrToIntInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == PtrToInt;
}
@@ -3689,14 +3679,14 @@
// BitCastInst Class
//===----------------------------------------------------------------------===//
-/// @brief This class represents a no-op cast from one type to another.
+/// \brief This class represents a no-op cast from one type to another.
class BitCastInst : public CastInst {
protected:
- /// @brief Clone an identical BitCastInst
+ /// \brief Clone an identical BitCastInst
virtual BitCastInst *clone_impl() const;
public:
- /// @brief Constructor with insert-before-instruction semantics
+ /// \brief Constructor with insert-before-instruction semantics
BitCastInst(
Value *S, ///< The value to be casted
Type *Ty, ///< The type to casted to
@@ -3704,7 +3694,7 @@
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
- /// @brief Constructor with insert-at-end-of-block semantics
+ /// \brief Constructor with insert-at-end-of-block semantics
BitCastInst(
Value *S, ///< The value to be casted
Type *Ty, ///< The type to casted to
@@ -3713,7 +3703,6 @@
);
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const BitCastInst *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == BitCast;
}
Modified: llvm/branches/AMDILBackend/include/llvm/IntrinsicInst.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/IntrinsicInst.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/IntrinsicInst.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/IntrinsicInst.h Tue Jan 15 11:16:16 2013
@@ -34,9 +34,9 @@
/// functions. This allows the standard isa/dyncast/cast functionality to
/// work with calls to intrinsic functions.
class IntrinsicInst : public CallInst {
- IntrinsicInst(); // DO NOT IMPLEMENT
- IntrinsicInst(const IntrinsicInst&); // DO NOT IMPLEMENT
- void operator=(const IntrinsicInst&); // DO NOT IMPLEMENT
+ IntrinsicInst() LLVM_DELETED_FUNCTION;
+ IntrinsicInst(const IntrinsicInst&) LLVM_DELETED_FUNCTION;
+ void operator=(const IntrinsicInst&) LLVM_DELETED_FUNCTION;
public:
/// getIntrinsicID - Return the intrinsic ID of this intrinsic.
///
@@ -45,7 +45,6 @@
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const IntrinsicInst *) { return true; }
static inline bool classof(const CallInst *I) {
if (const Function *CF = I->getCalledFunction())
return CF->getIntrinsicID() != 0;
@@ -62,7 +61,6 @@
public:
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const DbgInfoIntrinsic *) { return true; }
static inline bool classof(const IntrinsicInst *I) {
switch (I->getIntrinsicID()) {
case Intrinsic::dbg_declare:
@@ -86,7 +84,6 @@
MDNode *getVariable() const { return cast<MDNode>(getArgOperand(1)); }
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const DbgDeclareInst *) { return true; }
static inline bool classof(const IntrinsicInst *I) {
return I->getIntrinsicID() == Intrinsic::dbg_declare;
}
@@ -108,7 +105,6 @@
MDNode *getVariable() const { return cast<MDNode>(getArgOperand(2)); }
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const DbgValueInst *) { return true; }
static inline bool classof(const IntrinsicInst *I) {
return I->getIntrinsicID() == Intrinsic::dbg_value;
}
@@ -175,7 +171,6 @@
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const MemIntrinsic *) { return true; }
static inline bool classof(const IntrinsicInst *I) {
switch (I->getIntrinsicID()) {
case Intrinsic::memcpy:
@@ -205,7 +200,6 @@
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const MemSetInst *) { return true; }
static inline bool classof(const IntrinsicInst *I) {
return I->getIntrinsicID() == Intrinsic::memset;
}
@@ -238,7 +232,6 @@
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const MemTransferInst *) { return true; }
static inline bool classof(const IntrinsicInst *I) {
return I->getIntrinsicID() == Intrinsic::memcpy ||
I->getIntrinsicID() == Intrinsic::memmove;
@@ -254,7 +247,6 @@
class MemCpyInst : public MemTransferInst {
public:
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const MemCpyInst *) { return true; }
static inline bool classof(const IntrinsicInst *I) {
return I->getIntrinsicID() == Intrinsic::memcpy;
}
@@ -268,7 +260,6 @@
class MemMoveInst : public MemTransferInst {
public:
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const MemMoveInst *) { return true; }
static inline bool classof(const IntrinsicInst *I) {
return I->getIntrinsicID() == Intrinsic::memmove;
}
@@ -277,6 +268,49 @@
}
};
+ /// VAStartInst - This represents the llvm.va_start intrinsic.
+ ///
+ class VAStartInst : public IntrinsicInst {
+ public:
+ static inline bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::vastart;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+
+ Value *getArgList() const { return const_cast<Value*>(getArgOperand(0)); }
+ };
+
+ /// VAEndInst - This represents the llvm.va_end intrinsic.
+ ///
+ class VAEndInst : public IntrinsicInst {
+ public:
+ static inline bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::vaend;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+
+ Value *getArgList() const { return const_cast<Value*>(getArgOperand(0)); }
+ };
+
+ /// VACopyInst - This represents the llvm.va_copy intrinsic.
+ ///
+ class VACopyInst : public IntrinsicInst {
+ public:
+ static inline bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::vacopy;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+
+ Value *getDest() const { return const_cast<Value*>(getArgOperand(0)); }
+ Value *getSrc() const { return const_cast<Value*>(getArgOperand(1)); }
+ };
+
}
#endif
Modified: llvm/branches/AMDILBackend/include/llvm/Intrinsics.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Intrinsics.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Intrinsics.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Intrinsics.h Tue Jan 15 11:16:16 2013
@@ -50,7 +50,7 @@
/// Intrinsic::getType(ID) - Return the function type for an intrinsic.
///
FunctionType *getType(LLVMContext &Context, ID id,
- ArrayRef<Type*> Tys = ArrayRef<Type*>());
+ ArrayRef<Type*> Tys = ArrayRef<Type*>());
/// Intrinsic::isOverloaded(ID) - Returns true if the intrinsic can be
/// overloaded.
@@ -58,7 +58,7 @@
/// Intrinsic::getAttributes(ID) - Return the attributes for an intrinsic.
///
- AttrListPtr getAttributes(ID id);
+ AttrListPtr getAttributes(LLVMContext &C, ID id);
/// Intrinsic::getDeclaration(M, ID) - Create or insert an LLVM Function
/// declaration for an intrinsic, and return it.
Modified: llvm/branches/AMDILBackend/include/llvm/Intrinsics.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Intrinsics.td?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Intrinsics.td (original)
+++ llvm/branches/AMDILBackend/include/llvm/Intrinsics.td Tue Jan 15 11:16:16 2013
@@ -121,15 +121,21 @@
def llvm_x86mmx_ty : LLVMType<x86mmx>;
def llvm_ptrx86mmx_ty : LLVMPointerType<llvm_x86mmx_ty>; // <1 x i64>*
+def llvm_v2i1_ty : LLVMType<v2i1>; // 2 x i1
+def llvm_v4i1_ty : LLVMType<v4i1>; // 4 x i1
+def llvm_v8i1_ty : LLVMType<v8i1>; // 8 x i1
+def llvm_v16i1_ty : LLVMType<v16i1>; // 16 x i1
def llvm_v2i8_ty : LLVMType<v2i8>; // 2 x i8
def llvm_v4i8_ty : LLVMType<v4i8>; // 4 x i8
def llvm_v8i8_ty : LLVMType<v8i8>; // 8 x i8
def llvm_v16i8_ty : LLVMType<v16i8>; // 16 x i8
def llvm_v32i8_ty : LLVMType<v32i8>; // 32 x i8
+def llvm_v1i16_ty : LLVMType<v1i16>; // 1 x i16
def llvm_v2i16_ty : LLVMType<v2i16>; // 2 x i16
def llvm_v4i16_ty : LLVMType<v4i16>; // 4 x i16
def llvm_v8i16_ty : LLVMType<v8i16>; // 8 x i16
def llvm_v16i16_ty : LLVMType<v16i16>; // 16 x i16
+def llvm_v1i32_ty : LLVMType<v1i32>; // 1 x i32
def llvm_v2i32_ty : LLVMType<v2i32>; // 2 x i32
def llvm_v4i32_ty : LLVMType<v4i32>; // 4 x i32
def llvm_v8i32_ty : LLVMType<v8i32>; // 8 x i32
@@ -279,9 +285,9 @@
// NOTE: these are internal interfaces.
def int_setjmp : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty]>;
-def int_longjmp : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty]>;
+def int_longjmp : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty], [IntrNoReturn]>;
def int_sigsetjmp : Intrinsic<[llvm_i32_ty] , [llvm_ptr_ty, llvm_i32_ty]>;
-def int_siglongjmp : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty]>;
+def int_siglongjmp : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty], [IntrNoReturn]>;
// Internal interface for object size checking
def int_objectsize : Intrinsic<[llvm_anyint_ty], [llvm_ptr_ty, llvm_i1_ty],
@@ -339,7 +345,7 @@
}
def int_eh_sjlj_functioncontext : Intrinsic<[], [llvm_ptr_ty]>;
def int_eh_sjlj_setjmp : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty]>;
-def int_eh_sjlj_longjmp : Intrinsic<[], [llvm_ptr_ty]>;
+def int_eh_sjlj_longjmp : Intrinsic<[], [llvm_ptr_ty], [IntrNoReturn]>;
//===---------------- Generic Variable Attribute Intrinsics----------------===//
//
Modified: llvm/branches/AMDILBackend/include/llvm/IntrinsicsARM.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/IntrinsicsARM.td?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/IntrinsicsARM.td (original)
+++ llvm/branches/AMDILBackend/include/llvm/IntrinsicsARM.td Tue Jan 15 11:16:16 2013
@@ -16,147 +16,136 @@
// TLS
let TargetPrefix = "arm" in { // All intrinsics start with "llvm.arm.".
- def int_arm_thread_pointer : GCCBuiltin<"__builtin_thread_pointer">,
- Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
-}
+
+def int_arm_thread_pointer : GCCBuiltin<"__builtin_thread_pointer">,
+ Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
//===----------------------------------------------------------------------===//
// Saturating Arithmentic
-let TargetPrefix = "arm" in { // All intrinsics start with "llvm.arm.".
- def int_arm_qadd : GCCBuiltin<"__builtin_arm_qadd">,
- Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem, Commutative]>;
- def int_arm_qsub : GCCBuiltin<"__builtin_arm_qsub">,
- Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_arm_ssat : GCCBuiltin<"__builtin_arm_ssat">,
- Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
- def int_arm_usat : GCCBuiltin<"__builtin_arm_usat">,
- Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
-}
+def int_arm_qadd : GCCBuiltin<"__builtin_arm_qadd">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, Commutative]>;
+def int_arm_qsub : GCCBuiltin<"__builtin_arm_qsub">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_ssat : GCCBuiltin<"__builtin_arm_ssat">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_usat : GCCBuiltin<"__builtin_arm_usat">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
//===----------------------------------------------------------------------===//
// Load and Store exclusive doubleword
-let TargetPrefix = "arm" in { // All intrinsics start with "llvm.arm.".
- def int_arm_strexd : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
- llvm_ptr_ty], [IntrReadWriteArgMem]>;
- def int_arm_ldrexd : Intrinsic<[llvm_i32_ty, llvm_i32_ty], [llvm_ptr_ty],
- [IntrReadArgMem]>;
-}
+def int_arm_strexd : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
+ llvm_ptr_ty], [IntrReadWriteArgMem]>;
+def int_arm_ldrexd : Intrinsic<[llvm_i32_ty, llvm_i32_ty], [llvm_ptr_ty],
+ [IntrReadArgMem]>;
//===----------------------------------------------------------------------===//
// VFP
-let TargetPrefix = "arm" in { // All intrinsics start with "llvm.arm.".
- def int_arm_get_fpscr : GCCBuiltin<"__builtin_arm_get_fpscr">,
- Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>;
- def int_arm_set_fpscr : GCCBuiltin<"__builtin_arm_set_fpscr">,
- Intrinsic<[], [llvm_i32_ty], []>;
- def int_arm_vcvtr : Intrinsic<[llvm_float_ty], [llvm_anyfloat_ty],
- [IntrNoMem]>;
- def int_arm_vcvtru : Intrinsic<[llvm_float_ty], [llvm_anyfloat_ty],
- [IntrNoMem]>;
-}
+def int_arm_get_fpscr : GCCBuiltin<"__builtin_arm_get_fpscr">,
+ Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>;
+def int_arm_set_fpscr : GCCBuiltin<"__builtin_arm_set_fpscr">,
+ Intrinsic<[], [llvm_i32_ty], []>;
+def int_arm_vcvtr : Intrinsic<[llvm_float_ty], [llvm_anyfloat_ty],
+ [IntrNoMem]>;
+def int_arm_vcvtru : Intrinsic<[llvm_float_ty], [llvm_anyfloat_ty],
+ [IntrNoMem]>;
//===----------------------------------------------------------------------===//
// Coprocessor
-let TargetPrefix = "arm" in { // All intrinsics start with "llvm.arm.".
- // Move to coprocessor
- def int_arm_mcr : GCCBuiltin<"__builtin_arm_mcr">,
- Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
- llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
- def int_arm_mcr2 : GCCBuiltin<"__builtin_arm_mcr2">,
- Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
- llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
-
- // Move from coprocessor
- def int_arm_mrc : GCCBuiltin<"__builtin_arm_mrc">,
- Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
- llvm_i32_ty, llvm_i32_ty], []>;
- def int_arm_mrc2 : GCCBuiltin<"__builtin_arm_mrc2">,
- Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
- llvm_i32_ty, llvm_i32_ty], []>;
-
- // Coprocessor data processing
- def int_arm_cdp : GCCBuiltin<"__builtin_arm_cdp">,
- Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
- llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
- def int_arm_cdp2 : GCCBuiltin<"__builtin_arm_cdp2">,
- Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
- llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
-
- // Move from two registers to coprocessor
- def int_arm_mcrr : GCCBuiltin<"__builtin_arm_mcrr">,
- Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
- llvm_i32_ty, llvm_i32_ty], []>;
- def int_arm_mcrr2 : GCCBuiltin<"__builtin_arm_mcrr2">,
- Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
- llvm_i32_ty, llvm_i32_ty], []>;
-}
+// Move to coprocessor
+def int_arm_mcr : GCCBuiltin<"__builtin_arm_mcr">,
+ Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
+def int_arm_mcr2 : GCCBuiltin<"__builtin_arm_mcr2">,
+ Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
+
+// Move from coprocessor
+def int_arm_mrc : GCCBuiltin<"__builtin_arm_mrc">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], []>;
+def int_arm_mrc2 : GCCBuiltin<"__builtin_arm_mrc2">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], []>;
+
+// Coprocessor data processing
+def int_arm_cdp : GCCBuiltin<"__builtin_arm_cdp">,
+ Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
+def int_arm_cdp2 : GCCBuiltin<"__builtin_arm_cdp2">,
+ Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
+
+// Move from two registers to coprocessor
+def int_arm_mcrr : GCCBuiltin<"__builtin_arm_mcrr">,
+ Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], []>;
+def int_arm_mcrr2 : GCCBuiltin<"__builtin_arm_mcrr2">,
+ Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], []>;
//===----------------------------------------------------------------------===//
// Advanced SIMD (NEON)
-let TargetPrefix = "arm" in { // All intrinsics start with "llvm.arm.".
-
- // The following classes do not correspond directly to GCC builtins.
- class Neon_1Arg_Intrinsic
- : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>], [IntrNoMem]>;
- class Neon_1Arg_Narrow_Intrinsic
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMExtendedElementVectorType<0>], [IntrNoMem]>;
- class Neon_2Arg_Intrinsic
- : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
- [IntrNoMem]>;
- class Neon_2Arg_Narrow_Intrinsic
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMExtendedElementVectorType<0>,
- LLVMExtendedElementVectorType<0>],
- [IntrNoMem]>;
- class Neon_2Arg_Long_Intrinsic
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMTruncatedElementVectorType<0>,
- LLVMTruncatedElementVectorType<0>],
- [IntrNoMem]>;
- class Neon_3Arg_Intrinsic
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
- [IntrNoMem]>;
- class Neon_3Arg_Long_Intrinsic
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>,
- LLVMTruncatedElementVectorType<0>,
- LLVMTruncatedElementVectorType<0>],
- [IntrNoMem]>;
- class Neon_CvtFxToFP_Intrinsic
- : Intrinsic<[llvm_anyfloat_ty], [llvm_anyint_ty, llvm_i32_ty], [IntrNoMem]>;
- class Neon_CvtFPToFx_Intrinsic
- : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, llvm_i32_ty], [IntrNoMem]>;
-
- // The table operands for VTBL and VTBX consist of 1 to 4 v8i8 vectors.
- // Besides the table, VTBL has one other v8i8 argument and VTBX has two.
- // Overall, the classes range from 2 to 6 v8i8 arguments.
- class Neon_Tbl2Arg_Intrinsic
- : Intrinsic<[llvm_v8i8_ty],
- [llvm_v8i8_ty, llvm_v8i8_ty], [IntrNoMem]>;
- class Neon_Tbl3Arg_Intrinsic
- : Intrinsic<[llvm_v8i8_ty],
- [llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty], [IntrNoMem]>;
- class Neon_Tbl4Arg_Intrinsic
- : Intrinsic<[llvm_v8i8_ty],
- [llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty],
- [IntrNoMem]>;
- class Neon_Tbl5Arg_Intrinsic
- : Intrinsic<[llvm_v8i8_ty],
- [llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty,
- llvm_v8i8_ty], [IntrNoMem]>;
- class Neon_Tbl6Arg_Intrinsic
- : Intrinsic<[llvm_v8i8_ty],
- [llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty,
- llvm_v8i8_ty, llvm_v8i8_ty], [IntrNoMem]>;
-}
+// The following classes do not correspond directly to GCC builtins.
+class Neon_1Arg_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>], [IntrNoMem]>;
+class Neon_1Arg_Narrow_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMExtendedElementVectorType<0>], [IntrNoMem]>;
+class Neon_2Arg_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem]>;
+class Neon_2Arg_Narrow_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMExtendedElementVectorType<0>,
+ LLVMExtendedElementVectorType<0>],
+ [IntrNoMem]>;
+class Neon_2Arg_Long_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMTruncatedElementVectorType<0>,
+ LLVMTruncatedElementVectorType<0>],
+ [IntrNoMem]>;
+class Neon_3Arg_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem]>;
+class Neon_3Arg_Long_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>,
+ LLVMTruncatedElementVectorType<0>,
+ LLVMTruncatedElementVectorType<0>],
+ [IntrNoMem]>;
+class Neon_CvtFxToFP_Intrinsic
+ : Intrinsic<[llvm_anyfloat_ty], [llvm_anyint_ty, llvm_i32_ty], [IntrNoMem]>;
+class Neon_CvtFPToFx_Intrinsic
+ : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, llvm_i32_ty], [IntrNoMem]>;
+
+// The table operands for VTBL and VTBX consist of 1 to 4 v8i8 vectors.
+// Besides the table, VTBL has one other v8i8 argument and VTBX has two.
+// Overall, the classes range from 2 to 6 v8i8 arguments.
+class Neon_Tbl2Arg_Intrinsic
+ : Intrinsic<[llvm_v8i8_ty],
+ [llvm_v8i8_ty, llvm_v8i8_ty], [IntrNoMem]>;
+class Neon_Tbl3Arg_Intrinsic
+ : Intrinsic<[llvm_v8i8_ty],
+ [llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty], [IntrNoMem]>;
+class Neon_Tbl4Arg_Intrinsic
+ : Intrinsic<[llvm_v8i8_ty],
+ [llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty],
+ [IntrNoMem]>;
+class Neon_Tbl5Arg_Intrinsic
+ : Intrinsic<[llvm_v8i8_ty],
+ [llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty,
+ llvm_v8i8_ty], [IntrNoMem]>;
+class Neon_Tbl6Arg_Intrinsic
+ : Intrinsic<[llvm_v8i8_ty],
+ [llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty,
+ llvm_v8i8_ty, llvm_v8i8_ty], [IntrNoMem]>;
// Arithmetic ops
@@ -209,20 +198,18 @@
def int_arm_neon_vrsubhn : Neon_2Arg_Narrow_Intrinsic;
// Vector Absolute Compare.
-let TargetPrefix = "arm" in {
- def int_arm_neon_vacged : Intrinsic<[llvm_v2i32_ty],
- [llvm_v2f32_ty, llvm_v2f32_ty],
- [IntrNoMem]>;
- def int_arm_neon_vacgeq : Intrinsic<[llvm_v4i32_ty],
- [llvm_v4f32_ty, llvm_v4f32_ty],
- [IntrNoMem]>;
- def int_arm_neon_vacgtd : Intrinsic<[llvm_v2i32_ty],
- [llvm_v2f32_ty, llvm_v2f32_ty],
- [IntrNoMem]>;
- def int_arm_neon_vacgtq : Intrinsic<[llvm_v4i32_ty],
- [llvm_v4f32_ty, llvm_v4f32_ty],
- [IntrNoMem]>;
-}
+def int_arm_neon_vacged : Intrinsic<[llvm_v2i32_ty],
+ [llvm_v2f32_ty, llvm_v2f32_ty],
+ [IntrNoMem]>;
+def int_arm_neon_vacgeq : Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty],
+ [IntrNoMem]>;
+def int_arm_neon_vacgtd : Intrinsic<[llvm_v2i32_ty],
+ [llvm_v2f32_ty, llvm_v2f32_ty],
+ [IntrNoMem]>;
+def int_arm_neon_vacgtq : Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty],
+ [IntrNoMem]>;
// Vector Absolute Differences.
def int_arm_neon_vabds : Neon_2Arg_Intrinsic;
@@ -235,24 +222,20 @@
// Note: This is different than the other "long" NEON intrinsics because
// the result vector has half as many elements as the source vector.
// The source and destination vector types must be specified separately.
-let TargetPrefix = "arm" in {
- def int_arm_neon_vpaddls : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty],
- [IntrNoMem]>;
- def int_arm_neon_vpaddlu : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty],
- [IntrNoMem]>;
-}
+def int_arm_neon_vpaddls : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty],
+ [IntrNoMem]>;
+def int_arm_neon_vpaddlu : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty],
+ [IntrNoMem]>;
// Vector Pairwise Add and Accumulate Long.
// Note: This is similar to vpaddl but the destination vector also appears
// as the first argument.
-let TargetPrefix = "arm" in {
- def int_arm_neon_vpadals : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_anyvector_ty],
- [IntrNoMem]>;
- def int_arm_neon_vpadalu : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_anyvector_ty],
- [IntrNoMem]>;
-}
+def int_arm_neon_vpadals : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_anyvector_ty],
+ [IntrNoMem]>;
+def int_arm_neon_vpadalu : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_anyvector_ty],
+ [IntrNoMem]>;
// Vector Pairwise Maximum and Minimum.
def int_arm_neon_vpmaxs : Neon_2Arg_Intrinsic;
@@ -364,79 +347,83 @@
def int_arm_neon_vtbx3 : Neon_Tbl5Arg_Intrinsic;
def int_arm_neon_vtbx4 : Neon_Tbl6Arg_Intrinsic;
-let TargetPrefix = "arm" in {
+// De-interleaving vector loads from N-element structures.
+// Source operands are the address and alignment.
+def int_arm_neon_vld1 : Intrinsic<[llvm_anyvector_ty],
+ [llvm_ptr_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+def int_arm_neon_vld2 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
+ [llvm_ptr_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+def int_arm_neon_vld3 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
+ LLVMMatchType<0>],
+ [llvm_ptr_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+def int_arm_neon_vld4 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
+ LLVMMatchType<0>, LLVMMatchType<0>],
+ [llvm_ptr_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+
+// Vector load N-element structure to one lane.
+// Source operands are: the address, the N input vectors (since only one
+// lane is assigned), the lane number, and the alignment.
+def int_arm_neon_vld2lane : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
+ [llvm_ptr_ty, LLVMMatchType<0>,
+ LLVMMatchType<0>, llvm_i32_ty,
+ llvm_i32_ty], [IntrReadArgMem]>;
+def int_arm_neon_vld3lane : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
+ LLVMMatchType<0>],
+ [llvm_ptr_ty, LLVMMatchType<0>,
+ LLVMMatchType<0>, LLVMMatchType<0>,
+ llvm_i32_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+def int_arm_neon_vld4lane : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
+ LLVMMatchType<0>, LLVMMatchType<0>],
+ [llvm_ptr_ty, LLVMMatchType<0>,
+ LLVMMatchType<0>, LLVMMatchType<0>,
+ LLVMMatchType<0>, llvm_i32_ty,
+ llvm_i32_ty], [IntrReadArgMem]>;
+
+// Interleaving vector stores from N-element structures.
+// Source operands are: the address, the N vectors, and the alignment.
+def int_arm_neon_vst1 : Intrinsic<[],
+ [llvm_ptr_ty, llvm_anyvector_ty,
+ llvm_i32_ty], [IntrReadWriteArgMem]>;
+def int_arm_neon_vst2 : Intrinsic<[],
+ [llvm_ptr_ty, llvm_anyvector_ty,
+ LLVMMatchType<0>, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+def int_arm_neon_vst3 : Intrinsic<[],
+ [llvm_ptr_ty, llvm_anyvector_ty,
+ LLVMMatchType<0>, LLVMMatchType<0>,
+ llvm_i32_ty], [IntrReadWriteArgMem]>;
+def int_arm_neon_vst4 : Intrinsic<[],
+ [llvm_ptr_ty, llvm_anyvector_ty,
+ LLVMMatchType<0>, LLVMMatchType<0>,
+ LLVMMatchType<0>, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+
+// Vector store N-element structure from one lane.
+// Source operands are: the address, the N vectors, the lane number, and
+// the alignment.
+def int_arm_neon_vst2lane : Intrinsic<[],
+ [llvm_ptr_ty, llvm_anyvector_ty,
+ LLVMMatchType<0>, llvm_i32_ty,
+ llvm_i32_ty], [IntrReadWriteArgMem]>;
+def int_arm_neon_vst3lane : Intrinsic<[],
+ [llvm_ptr_ty, llvm_anyvector_ty,
+ LLVMMatchType<0>, LLVMMatchType<0>,
+ llvm_i32_ty, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+def int_arm_neon_vst4lane : Intrinsic<[],
+ [llvm_ptr_ty, llvm_anyvector_ty,
+ LLVMMatchType<0>, LLVMMatchType<0>,
+ LLVMMatchType<0>, llvm_i32_ty,
+ llvm_i32_ty], [IntrReadWriteArgMem]>;
+
+// Vector bitwise select.
+def int_arm_neon_vbsl : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem]>;
- // De-interleaving vector loads from N-element structures.
- // Source operands are the address and alignment.
- def int_arm_neon_vld1 : Intrinsic<[llvm_anyvector_ty],
- [llvm_ptr_ty, llvm_i32_ty],
- [IntrReadArgMem]>;
- def int_arm_neon_vld2 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
- [llvm_ptr_ty, llvm_i32_ty],
- [IntrReadArgMem]>;
- def int_arm_neon_vld3 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
- LLVMMatchType<0>],
- [llvm_ptr_ty, llvm_i32_ty],
- [IntrReadArgMem]>;
- def int_arm_neon_vld4 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
- LLVMMatchType<0>, LLVMMatchType<0>],
- [llvm_ptr_ty, llvm_i32_ty],
- [IntrReadArgMem]>;
-
- // Vector load N-element structure to one lane.
- // Source operands are: the address, the N input vectors (since only one
- // lane is assigned), the lane number, and the alignment.
- def int_arm_neon_vld2lane : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
- [llvm_ptr_ty, LLVMMatchType<0>,
- LLVMMatchType<0>, llvm_i32_ty,
- llvm_i32_ty], [IntrReadArgMem]>;
- def int_arm_neon_vld3lane : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
- LLVMMatchType<0>],
- [llvm_ptr_ty, LLVMMatchType<0>,
- LLVMMatchType<0>, LLVMMatchType<0>,
- llvm_i32_ty, llvm_i32_ty],
- [IntrReadArgMem]>;
- def int_arm_neon_vld4lane : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
- LLVMMatchType<0>, LLVMMatchType<0>],
- [llvm_ptr_ty, LLVMMatchType<0>,
- LLVMMatchType<0>, LLVMMatchType<0>,
- LLVMMatchType<0>, llvm_i32_ty,
- llvm_i32_ty], [IntrReadArgMem]>;
-
- // Interleaving vector stores from N-element structures.
- // Source operands are: the address, the N vectors, and the alignment.
- def int_arm_neon_vst1 : Intrinsic<[],
- [llvm_ptr_ty, llvm_anyvector_ty,
- llvm_i32_ty], [IntrReadWriteArgMem]>;
- def int_arm_neon_vst2 : Intrinsic<[],
- [llvm_ptr_ty, llvm_anyvector_ty,
- LLVMMatchType<0>, llvm_i32_ty],
- [IntrReadWriteArgMem]>;
- def int_arm_neon_vst3 : Intrinsic<[],
- [llvm_ptr_ty, llvm_anyvector_ty,
- LLVMMatchType<0>, LLVMMatchType<0>,
- llvm_i32_ty], [IntrReadWriteArgMem]>;
- def int_arm_neon_vst4 : Intrinsic<[],
- [llvm_ptr_ty, llvm_anyvector_ty,
- LLVMMatchType<0>, LLVMMatchType<0>,
- LLVMMatchType<0>, llvm_i32_ty],
- [IntrReadWriteArgMem]>;
-
- // Vector store N-element structure from one lane.
- // Source operands are: the address, the N vectors, the lane number, and
- // the alignment.
- def int_arm_neon_vst2lane : Intrinsic<[],
- [llvm_ptr_ty, llvm_anyvector_ty,
- LLVMMatchType<0>, llvm_i32_ty,
- llvm_i32_ty], [IntrReadWriteArgMem]>;
- def int_arm_neon_vst3lane : Intrinsic<[],
- [llvm_ptr_ty, llvm_anyvector_ty,
- LLVMMatchType<0>, LLVMMatchType<0>,
- llvm_i32_ty, llvm_i32_ty],
- [IntrReadWriteArgMem]>;
- def int_arm_neon_vst4lane : Intrinsic<[],
- [llvm_ptr_ty, llvm_anyvector_ty,
- LLVMMatchType<0>, LLVMMatchType<0>,
- LLVMMatchType<0>, llvm_i32_ty,
- llvm_i32_ty], [IntrReadWriteArgMem]>;
-}
+} // end TargetPrefix
Modified: llvm/branches/AMDILBackend/include/llvm/IntrinsicsHexagon.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/IntrinsicsHexagon.td?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/IntrinsicsHexagon.td (original)
+++ llvm/branches/AMDILBackend/include/llvm/IntrinsicsHexagon.td Tue Jan 15 11:16:16 2013
@@ -15,7 +15,7 @@
//
// All Hexagon intrinsics start with "llvm.hexagon.".
let TargetPrefix = "hexagon" in {
- /// Hexagon_Intrinsic - Base class for all altivec intrinsics.
+ /// Hexagon_Intrinsic - Base class for all Hexagon intrinsics.
class Hexagon_Intrinsic<string GCCIntSuffix, list<LLVMType> ret_types,
list<LLVMType> param_types,
list<IntrinsicProperty> properties>
Modified: llvm/branches/AMDILBackend/include/llvm/IntrinsicsMips.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/IntrinsicsMips.td?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/IntrinsicsMips.td (original)
+++ llvm/branches/AMDILBackend/include/llvm/IntrinsicsMips.td Tue Jan 15 11:16:16 2013
@@ -14,11 +14,15 @@
//===----------------------------------------------------------------------===//
// MIPS DSP data types
def mips_v2q15_ty: LLVMType<v2i16>;
+def mips_v4q7_ty: LLVMType<v4i8>;
def mips_q31_ty: LLVMType<i32>;
let TargetPrefix = "mips" in { // All intrinsics start with "llvm.mips.".
//===----------------------------------------------------------------------===//
+// MIPS DSP Rev 1
+
+//===----------------------------------------------------------------------===//
// Addition/subtraction
def int_mips_addu_qb : GCCBuiltin<"__builtin_mips_addu_qb">,
@@ -261,4 +265,125 @@
Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty], [IntrReadArgMem]>;
def int_mips_lwx: GCCBuiltin<"__builtin_mips_lwx">,
Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty], [IntrReadArgMem]>;
+
+//===----------------------------------------------------------------------===//
+// MIPS DSP Rev 2
+
+def int_mips_absq_s_qb: GCCBuiltin<"__builtin_mips_absq_s_qb">,
+ Intrinsic<[mips_v4q7_ty], [mips_v4q7_ty], []>;
+
+def int_mips_addqh_ph: GCCBuiltin<"__builtin_mips_addqh_ph">,
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty],
+ [IntrNoMem, Commutative]>;
+def int_mips_addqh_r_ph: GCCBuiltin<"__builtin_mips_addqh_r_ph">,
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty],
+ [IntrNoMem, Commutative]>;
+def int_mips_addqh_w: GCCBuiltin<"__builtin_mips_addqh_w">,
+ Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty],
+ [IntrNoMem, Commutative]>;
+def int_mips_addqh_r_w: GCCBuiltin<"__builtin_mips_addqh_r_w">,
+ Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty],
+ [IntrNoMem, Commutative]>;
+
+def int_mips_addu_ph: GCCBuiltin<"__builtin_mips_addu_ph">,
+ Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_v2i16_ty], [Commutative]>;
+def int_mips_addu_s_ph: GCCBuiltin<"__builtin_mips_addu_s_ph">,
+ Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_v2i16_ty], [Commutative]>;
+
+def int_mips_adduh_qb: GCCBuiltin<"__builtin_mips_adduh_qb">,
+ Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty],
+ [IntrNoMem, Commutative]>;
+def int_mips_adduh_r_qb: GCCBuiltin<"__builtin_mips_adduh_r_qb">,
+ Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty],
+ [IntrNoMem, Commutative]>;
+
+def int_mips_append: GCCBuiltin<"__builtin_mips_append">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_mips_balign: GCCBuiltin<"__builtin_mips_balign">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+def int_mips_cmpgdu_eq_qb: GCCBuiltin<"__builtin_mips_cmpgdu_eq_qb">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [Commutative]>;
+def int_mips_cmpgdu_lt_qb: GCCBuiltin<"__builtin_mips_cmpgdu_lt_qb">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [Commutative]>;
+def int_mips_cmpgdu_le_qb: GCCBuiltin<"__builtin_mips_cmpgdu_le_qb">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [Commutative]>;
+
+def int_mips_dpa_w_ph: GCCBuiltin<"__builtin_mips_dpa_w_ph">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v2i16_ty, llvm_v2i16_ty],
+ [IntrNoMem]>;
+def int_mips_dps_w_ph: GCCBuiltin<"__builtin_mips_dps_w_ph">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v2i16_ty, llvm_v2i16_ty],
+ [IntrNoMem]>;
+
+def int_mips_dpaqx_s_w_ph: GCCBuiltin<"__builtin_mips_dpaqx_s_w_ph">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
+def int_mips_dpaqx_sa_w_ph: GCCBuiltin<"__builtin_mips_dpaqx_sa_w_ph">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
+def int_mips_dpax_w_ph: GCCBuiltin<"__builtin_mips_dpax_w_ph">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v2i16_ty, llvm_v2i16_ty],
+ [IntrNoMem]>;
+def int_mips_dpsx_w_ph: GCCBuiltin<"__builtin_mips_dpsx_w_ph">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v2i16_ty, llvm_v2i16_ty],
+ [IntrNoMem]>;
+def int_mips_dpsqx_s_w_ph: GCCBuiltin<"__builtin_mips_dpsqx_s_w_ph">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
+def int_mips_dpsqx_sa_w_ph: GCCBuiltin<"__builtin_mips_dpsqx_sa_w_ph">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
+
+def int_mips_mul_ph: GCCBuiltin<"__builtin_mips_mul_ph">,
+ Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_v2i16_ty], [Commutative]>;
+def int_mips_mul_s_ph: GCCBuiltin<"__builtin_mips_mul_s_ph">,
+ Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_v2i16_ty], [Commutative]>;
+
+def int_mips_mulq_rs_w: GCCBuiltin<"__builtin_mips_mulq_rs_w">,
+ Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty], [Commutative]>;
+def int_mips_mulq_s_ph: GCCBuiltin<"__builtin_mips_mulq_s_ph">,
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [Commutative]>;
+def int_mips_mulq_s_w: GCCBuiltin<"__builtin_mips_mulq_s_w">,
+ Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty], [Commutative]>;
+def int_mips_mulsa_w_ph: GCCBuiltin<"__builtin_mips_mulsa_w_ph">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v2i16_ty, llvm_v2i16_ty],
+ [IntrNoMem]>;
+
+def int_mips_precr_qb_ph: GCCBuiltin<"__builtin_mips_precr_qb_ph">,
+ Intrinsic<[llvm_v4i8_ty], [llvm_v2i16_ty, llvm_v2i16_ty], []>;
+def int_mips_precr_sra_ph_w: GCCBuiltin<"__builtin_mips_precr_sra_ph_w">,
+ Intrinsic<[llvm_v2i16_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_mips_precr_sra_r_ph_w: GCCBuiltin<"__builtin_mips_precr_sra_r_ph_w">,
+ Intrinsic<[llvm_v2i16_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+def int_mips_prepend: GCCBuiltin<"__builtin_mips_prepend">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+def int_mips_shra_qb: GCCBuiltin<"__builtin_mips_shra_qb">,
+ Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_shra_r_qb: GCCBuiltin<"__builtin_mips_shra_r_qb">,
+ Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_shrl_ph: GCCBuiltin<"__builtin_mips_shrl_ph">,
+ Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_subqh_ph: GCCBuiltin<"__builtin_mips_subqh_ph">,
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [IntrNoMem]>;
+def int_mips_subqh_r_ph: GCCBuiltin<"__builtin_mips_subqh_r_ph">,
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [IntrNoMem]>;
+def int_mips_subqh_w: GCCBuiltin<"__builtin_mips_subqh_w">,
+ Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty], [IntrNoMem]>;
+def int_mips_subqh_r_w: GCCBuiltin<"__builtin_mips_subqh_r_w">,
+ Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty], [IntrNoMem]>;
+
+def int_mips_subu_ph: GCCBuiltin<"__builtin_mips_subu_ph">,
+ Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_v2i16_ty], []>;
+def int_mips_subu_s_ph: GCCBuiltin<"__builtin_mips_subu_s_ph">,
+ Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_v2i16_ty], []>;
+
+def int_mips_subuh_qb: GCCBuiltin<"__builtin_mips_subuh_qb">,
+ Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [IntrNoMem]>;
+def int_mips_subuh_r_qb: GCCBuiltin<"__builtin_mips_subuh_r_qb">,
+ Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [IntrNoMem]>;
}
Modified: llvm/branches/AMDILBackend/include/llvm/IntrinsicsX86.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/IntrinsicsX86.td?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/IntrinsicsX86.td (original)
+++ llvm/branches/AMDILBackend/include/llvm/IntrinsicsX86.td Tue Jan 15 11:16:16 2013
@@ -219,7 +219,7 @@
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_sse_storeu_ps : GCCBuiltin<"__builtin_ia32_storeups">,
Intrinsic<[], [llvm_ptr_ty,
- llvm_v4f32_ty], []>;
+ llvm_v4f32_ty], [IntrReadWriteArgMem]>;
}
// Cacheability support ops
@@ -502,13 +502,13 @@
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_sse2_storeu_pd : GCCBuiltin<"__builtin_ia32_storeupd">,
Intrinsic<[], [llvm_ptr_ty,
- llvm_v2f64_ty], []>;
+ llvm_v2f64_ty], [IntrReadWriteArgMem]>;
def int_x86_sse2_storeu_dq : GCCBuiltin<"__builtin_ia32_storedqu">,
Intrinsic<[], [llvm_ptr_ty,
- llvm_v16i8_ty], []>;
+ llvm_v16i8_ty], [IntrReadWriteArgMem]>;
def int_x86_sse2_storel_dq : GCCBuiltin<"__builtin_ia32_storelv4si">,
Intrinsic<[], [llvm_ptr_ty,
- llvm_v4i32_ty], []>;
+ llvm_v4i32_ty], [IntrReadWriteArgMem]>;
}
// Misc.
@@ -1270,19 +1270,19 @@
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx_vbroadcast_ss :
GCCBuiltin<"__builtin_ia32_vbroadcastss">,
- Intrinsic<[llvm_v4f32_ty], [llvm_ptr_ty], [IntrReadMem]>;
+ Intrinsic<[llvm_v4f32_ty], [llvm_ptr_ty], [IntrReadArgMem]>;
def int_x86_avx_vbroadcast_sd_256 :
GCCBuiltin<"__builtin_ia32_vbroadcastsd256">,
- Intrinsic<[llvm_v4f64_ty], [llvm_ptr_ty], [IntrReadMem]>;
+ Intrinsic<[llvm_v4f64_ty], [llvm_ptr_ty], [IntrReadArgMem]>;
def int_x86_avx_vbroadcast_ss_256 :
GCCBuiltin<"__builtin_ia32_vbroadcastss256">,
- Intrinsic<[llvm_v8f32_ty], [llvm_ptr_ty], [IntrReadMem]>;
+ Intrinsic<[llvm_v8f32_ty], [llvm_ptr_ty], [IntrReadArgMem]>;
def int_x86_avx_vbroadcastf128_pd_256 :
GCCBuiltin<"__builtin_ia32_vbroadcastf128_pd256">,
- Intrinsic<[llvm_v4f64_ty], [llvm_ptr_ty], [IntrReadMem]>;
+ Intrinsic<[llvm_v4f64_ty], [llvm_ptr_ty], [IntrReadArgMem]>;
def int_x86_avx_vbroadcastf128_ps_256 :
GCCBuiltin<"__builtin_ia32_vbroadcastf128_ps256">,
- Intrinsic<[llvm_v8f32_ty], [llvm_ptr_ty], [IntrReadMem]>;
+ Intrinsic<[llvm_v8f32_ty], [llvm_ptr_ty], [IntrReadArgMem]>;
}
// SIMD load ops
@@ -1294,41 +1294,45 @@
// SIMD store ops
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx_storeu_pd_256 : GCCBuiltin<"__builtin_ia32_storeupd256">,
- Intrinsic<[], [llvm_ptr_ty, llvm_v4f64_ty], []>;
+ Intrinsic<[], [llvm_ptr_ty, llvm_v4f64_ty], [IntrReadWriteArgMem]>;
def int_x86_avx_storeu_ps_256 : GCCBuiltin<"__builtin_ia32_storeups256">,
- Intrinsic<[], [llvm_ptr_ty, llvm_v8f32_ty], []>;
+ Intrinsic<[], [llvm_ptr_ty, llvm_v8f32_ty], [IntrReadWriteArgMem]>;
def int_x86_avx_storeu_dq_256 : GCCBuiltin<"__builtin_ia32_storedqu256">,
- Intrinsic<[], [llvm_ptr_ty, llvm_v32i8_ty], []>;
+ Intrinsic<[], [llvm_ptr_ty, llvm_v32i8_ty], [IntrReadWriteArgMem]>;
}
// Conditional load ops
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx_maskload_pd : GCCBuiltin<"__builtin_ia32_maskloadpd">,
- Intrinsic<[llvm_v2f64_ty], [llvm_ptr_ty, llvm_v2f64_ty], [IntrReadMem]>;
+ Intrinsic<[llvm_v2f64_ty], [llvm_ptr_ty, llvm_v2f64_ty],
+ [IntrReadArgMem]>;
def int_x86_avx_maskload_ps : GCCBuiltin<"__builtin_ia32_maskloadps">,
- Intrinsic<[llvm_v4f32_ty], [llvm_ptr_ty, llvm_v4f32_ty], [IntrReadMem]>;
+ Intrinsic<[llvm_v4f32_ty], [llvm_ptr_ty, llvm_v4f32_ty],
+ [IntrReadArgMem]>;
def int_x86_avx_maskload_pd_256 : GCCBuiltin<"__builtin_ia32_maskloadpd256">,
- Intrinsic<[llvm_v4f64_ty], [llvm_ptr_ty, llvm_v4f64_ty], [IntrReadMem]>;
+ Intrinsic<[llvm_v4f64_ty], [llvm_ptr_ty, llvm_v4f64_ty],
+ [IntrReadArgMem]>;
def int_x86_avx_maskload_ps_256 : GCCBuiltin<"__builtin_ia32_maskloadps256">,
- Intrinsic<[llvm_v8f32_ty], [llvm_ptr_ty, llvm_v8f32_ty], [IntrReadMem]>;
+ Intrinsic<[llvm_v8f32_ty], [llvm_ptr_ty, llvm_v8f32_ty],
+ [IntrReadArgMem]>;
}
// Conditional store ops
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx_maskstore_pd : GCCBuiltin<"__builtin_ia32_maskstorepd">,
Intrinsic<[], [llvm_ptr_ty,
- llvm_v2f64_ty, llvm_v2f64_ty], []>;
+ llvm_v2f64_ty, llvm_v2f64_ty], [IntrReadWriteArgMem]>;
def int_x86_avx_maskstore_ps : GCCBuiltin<"__builtin_ia32_maskstoreps">,
Intrinsic<[], [llvm_ptr_ty,
- llvm_v4f32_ty, llvm_v4f32_ty], []>;
+ llvm_v4f32_ty, llvm_v4f32_ty], [IntrReadWriteArgMem]>;
def int_x86_avx_maskstore_pd_256 :
GCCBuiltin<"__builtin_ia32_maskstorepd256">,
Intrinsic<[], [llvm_ptr_ty,
- llvm_v4f64_ty, llvm_v4f64_ty], []>;
+ llvm_v4f64_ty, llvm_v4f64_ty], [IntrReadWriteArgMem]>;
def int_x86_avx_maskstore_ps_256 :
GCCBuiltin<"__builtin_ia32_maskstoreps256">,
Intrinsic<[], [llvm_ptr_ty,
- llvm_v8f32_ty, llvm_v8f32_ty], []>;
+ llvm_v8f32_ty, llvm_v8f32_ty], [IntrReadWriteArgMem]>;
}
//===----------------------------------------------------------------------===//
@@ -1632,7 +1636,7 @@
Intrinsic<[llvm_v8f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
def int_x86_avx2_vbroadcasti128 :
GCCBuiltin<"__builtin_ia32_vbroadcastsi256">,
- Intrinsic<[llvm_v4i64_ty], [llvm_ptr_ty], [IntrReadMem]>;
+ Intrinsic<[llvm_v4i64_ty], [llvm_ptr_ty], [IntrReadArgMem]>;
def int_x86_avx2_pbroadcastb_128 :
GCCBuiltin<"__builtin_ia32_pbroadcastb128">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>;
@@ -1685,27 +1689,35 @@
// Conditional load ops
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx2_maskload_d : GCCBuiltin<"__builtin_ia32_maskloadd">,
- Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty, llvm_v4i32_ty], [IntrReadMem]>;
+ Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty, llvm_v4i32_ty],
+ [IntrReadArgMem]>;
def int_x86_avx2_maskload_q : GCCBuiltin<"__builtin_ia32_maskloadq">,
- Intrinsic<[llvm_v2i64_ty], [llvm_ptr_ty, llvm_v2i64_ty], [IntrReadMem]>;
+ Intrinsic<[llvm_v2i64_ty], [llvm_ptr_ty, llvm_v2i64_ty],
+ [IntrReadArgMem]>;
def int_x86_avx2_maskload_d_256 : GCCBuiltin<"__builtin_ia32_maskloadd256">,
- Intrinsic<[llvm_v8i32_ty], [llvm_ptr_ty, llvm_v8i32_ty], [IntrReadMem]>;
+ Intrinsic<[llvm_v8i32_ty], [llvm_ptr_ty, llvm_v8i32_ty],
+ [IntrReadArgMem]>;
def int_x86_avx2_maskload_q_256 : GCCBuiltin<"__builtin_ia32_maskloadq256">,
- Intrinsic<[llvm_v4i64_ty], [llvm_ptr_ty, llvm_v4i64_ty], [IntrReadMem]>;
+ Intrinsic<[llvm_v4i64_ty], [llvm_ptr_ty, llvm_v4i64_ty],
+ [IntrReadArgMem]>;
}
// Conditional store ops
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx2_maskstore_d : GCCBuiltin<"__builtin_ia32_maskstored">,
- Intrinsic<[], [llvm_ptr_ty, llvm_v4i32_ty, llvm_v4i32_ty], []>;
+ Intrinsic<[], [llvm_ptr_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrReadWriteArgMem]>;
def int_x86_avx2_maskstore_q : GCCBuiltin<"__builtin_ia32_maskstoreq">,
- Intrinsic<[], [llvm_ptr_ty, llvm_v2i64_ty, llvm_v2i64_ty], []>;
+ Intrinsic<[], [llvm_ptr_ty, llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrReadWriteArgMem]>;
def int_x86_avx2_maskstore_d_256 :
GCCBuiltin<"__builtin_ia32_maskstored256">,
- Intrinsic<[], [llvm_ptr_ty, llvm_v8i32_ty, llvm_v8i32_ty], []>;
+ Intrinsic<[], [llvm_ptr_ty, llvm_v8i32_ty, llvm_v8i32_ty],
+ [IntrReadWriteArgMem]>;
def int_x86_avx2_maskstore_q_256 :
GCCBuiltin<"__builtin_ia32_maskstoreq256">,
- Intrinsic<[], [llvm_ptr_ty, llvm_v4i64_ty, llvm_v4i64_ty], []>;
+ Intrinsic<[], [llvm_ptr_ty, llvm_v4i64_ty, llvm_v4i64_ty],
+ [IntrReadWriteArgMem]>;
}
// Variable bit shift ops
@@ -2547,3 +2559,15 @@
def int_x86_rdrand_32 : Intrinsic<[llvm_i32_ty, llvm_i32_ty], [], []>;
def int_x86_rdrand_64 : Intrinsic<[llvm_i64_ty, llvm_i32_ty], [], []>;
}
+
+//===----------------------------------------------------------------------===//
+// RTM intrinsics. Transactional Memory support.
+
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_xbegin : GCCBuiltin<"__builtin_ia32_xbegin">,
+ Intrinsic<[llvm_i32_ty], [], []>;
+ def int_x86_xend : GCCBuiltin<"__builtin_ia32_xend">,
+ Intrinsic<[], [], []>;
+ def int_x86_xabort : GCCBuiltin<"__builtin_ia32_xabort">,
+ Intrinsic<[], [llvm_i8_ty], [IntrNoReturn]>;
+}
Modified: llvm/branches/AMDILBackend/include/llvm/LLVMContext.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/LLVMContext.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/LLVMContext.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/LLVMContext.h Tue Jan 15 11:16:16 2013
@@ -15,6 +15,8 @@
#ifndef LLVM_LLVMCONTEXT_H
#define LLVM_LLVMCONTEXT_H
+#include "llvm/Support/Compiler.h"
+
namespace llvm {
class LLVMContextImpl;
@@ -43,7 +45,8 @@
MD_tbaa = 1, // "tbaa"
MD_prof = 2, // "prof"
MD_fpmath = 3, // "fpmath"
- MD_range = 4 // "range"
+ MD_range = 4, // "range"
+ MD_tbaa_struct = 5 // "tbaa.struct"
};
/// getMDKindID - Return a unique non-zero ID for the specified metadata kind.
@@ -87,9 +90,8 @@
void emitError(const Twine &ErrorStr);
private:
- // DO NOT IMPLEMENT
- LLVMContext(LLVMContext&);
- void operator=(LLVMContext&);
+ LLVMContext(LLVMContext&) LLVM_DELETED_FUNCTION;
+ void operator=(LLVMContext&) LLVM_DELETED_FUNCTION;
/// addModule - Register a module as being instantiated in this context. If
/// the context is deleted, the module will be deleted as well.
Modified: llvm/branches/AMDILBackend/include/llvm/LinkAllPasses.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/LinkAllPasses.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/LinkAllPasses.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/LinkAllPasses.h Tue Jan 15 11:16:16 2013
@@ -60,10 +60,12 @@
(void) llvm::createCFGSimplificationPass();
(void) llvm::createConstantMergePass();
(void) llvm::createConstantPropagationPass();
+ (void) llvm::createCostModelAnalysisPass();
(void) llvm::createDeadArgEliminationPass();
(void) llvm::createDeadCodeEliminationPass();
(void) llvm::createDeadInstEliminationPass();
(void) llvm::createDeadStoreEliminationPass();
+ (void) llvm::createDependenceAnalysisPass();
(void) llvm::createDomOnlyPrinterPass();
(void) llvm::createDomPrinterPass();
(void) llvm::createDomOnlyViewerPass();
@@ -81,11 +83,10 @@
(void) llvm::createIPSCCPPass();
(void) llvm::createIndVarSimplifyPass();
(void) llvm::createInstructionCombiningPass();
- (void) llvm::createInternalizePass(false);
+ (void) llvm::createInternalizePass();
(void) llvm::createLCSSAPass();
(void) llvm::createLICMPass();
(void) llvm::createLazyValueInfoPass();
- (void) llvm::createLoopDependenceAnalysisPass();
(void) llvm::createLoopExtractorPass();
(void) llvm::createLoopSimplifyPass();
(void) llvm::createLoopStrengthReducePass();
@@ -107,6 +108,7 @@
(void) llvm::createProfileVerifierPass();
(void) llvm::createPathProfileVerifierPass();
(void) llvm::createProfileLoaderPass();
+ (void) llvm::createProfileMetadataLoaderPass();
(void) llvm::createPathProfileLoaderPass();
(void) llvm::createPromoteMemoryToRegisterPass();
(void) llvm::createDemoteRegisterToMemoryPass();
@@ -140,6 +142,7 @@
(void) llvm::createLoopDeletionPass();
(void) llvm::createPostDomTree();
(void) llvm::createInstructionNamerPass();
+ (void) llvm::createMetaRenamerPass();
(void) llvm::createFunctionAttrsPass();
(void) llvm::createMergeFunctionsPass();
(void) llvm::createPrintModulePass(0);
@@ -153,6 +156,7 @@
(void) llvm::createCorrelatedValuePropagationPass();
(void) llvm::createMemDepPrinter();
(void) llvm::createInstructionSimplifierPass();
+ (void) llvm::createLoopVectorizePass();
(void) llvm::createBBVectorizePass();
(void)new llvm::IntervalPartition();
Modified: llvm/branches/AMDILBackend/include/llvm/MC/MCAsmBackend.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/MC/MCAsmBackend.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/MC/MCAsmBackend.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/MC/MCAsmBackend.h Tue Jan 15 11:16:16 2013
@@ -30,12 +30,13 @@
/// MCAsmBackend - Generic interface to target specific assembler backends.
class MCAsmBackend {
- MCAsmBackend(const MCAsmBackend &); // DO NOT IMPLEMENT
- void operator=(const MCAsmBackend &); // DO NOT IMPLEMENT
+ MCAsmBackend(const MCAsmBackend &) LLVM_DELETED_FUNCTION;
+ void operator=(const MCAsmBackend &) LLVM_DELETED_FUNCTION;
protected: // Can only create subclasses.
MCAsmBackend();
unsigned HasReliableSymbolDifference : 1;
+ unsigned HasDataInCodeSupport : 1;
public:
virtual ~MCAsmBackend();
@@ -65,6 +66,12 @@
return HasReliableSymbolDifference;
}
+ /// hasDataInCodeSupport - Check whether this target implements data-in-code
+ /// markers. If not, data region directives will be ignored.
+ bool hasDataInCodeSupport() const {
+ return HasDataInCodeSupport;
+ }
+
/// doesSectionRequireSymbols - Check whether the given section requires that
/// all symbols (even temporaries) have symbol table entries.
virtual bool doesSectionRequireSymbols(const MCSection &Section) const {
@@ -99,7 +106,7 @@
/// @}
- /// applyFixup - Apply the \arg Value for given \arg Fixup into the provided
+ /// applyFixup - Apply the \p Value for given \p Fixup into the provided
/// data fragment, at the offset specified by the fixup and following the
/// fixup kind as appropriate.
virtual void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
@@ -126,13 +133,20 @@
/// RelaxInstruction - Relax the instruction in the given fragment to the next
/// wider instruction.
///
- /// \param Inst - The instruction to relax, which may be the same as the
+ /// \param Inst The instruction to relax, which may be the same as the
/// output.
- /// \parm Res [output] - On return, the relaxed instruction.
+ /// \param [out] Res On return, the relaxed instruction.
virtual void relaxInstruction(const MCInst &Inst, MCInst &Res) const = 0;
/// @}
+ /// getMinimumNopSize - Returns the minimum size of a nop in bytes on this
+ /// target. The assembler will use this to emit excess padding in situations
+ /// where the padding required for simple alignment would be less than the
+ /// minimum nop size.
+ ///
+ virtual unsigned getMinimumNopSize() const { return 1; }
+
/// writeNopData - Write an (optimal) nop sequence of Count bytes to the given
/// output. If the target cannot generate such a sequence, it should return an
/// error.
Modified: llvm/branches/AMDILBackend/include/llvm/MC/MCAsmInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/MC/MCAsmInfo.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/MC/MCAsmInfo.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/MC/MCAsmInfo.h Tue Jan 15 11:16:16 2013
@@ -33,7 +33,7 @@
}
namespace LCOMM {
- enum LCOMMType { None, NoAlignment, ByteAlignment };
+ enum LCOMMType { NoAlignment, ByteAlignment, Log2Alignment };
}
/// MCAsmInfo - This class is intended to be used as a base class for asm
@@ -247,14 +247,14 @@
/// .long a - b
bool HasAggressiveSymbolFolding; // Defaults to true.
- /// LCOMMDirectiveType - Describes if the target supports the .lcomm
- /// directive and whether it has an alignment parameter.
- LCOMM::LCOMMType LCOMMDirectiveType; // Defaults to LCOMM::None.
-
- /// COMMDirectiveAlignmentIsInBytes - True is COMMDirective's optional
+ /// COMMDirectiveAlignmentIsInBytes - True is .comm's and .lcomms optional
/// alignment is to be specified in bytes instead of log2(n).
bool COMMDirectiveAlignmentIsInBytes; // Defaults to true;
+ /// LCOMMDirectiveAlignment - Describes if the .lcomm directive for the
+ /// target supports an alignment argument and how it is interpreted.
+ LCOMM::LCOMMType LCOMMDirectiveAlignmentType; // Defaults to NoAlignment.
+
/// HasDotTypeDotSizeDirective - True if the target has .type and .size
/// directives, this is true for most ELF targets.
bool HasDotTypeDotSizeDirective; // Defaults to true.
@@ -496,13 +496,13 @@
bool hasAggressiveSymbolFolding() const {
return HasAggressiveSymbolFolding;
}
- LCOMM::LCOMMType getLCOMMDirectiveType() const {
- return LCOMMDirectiveType;
- }
- bool hasDotTypeDotSizeDirective() const {return HasDotTypeDotSizeDirective;}
bool getCOMMDirectiveAlignmentIsInBytes() const {
return COMMDirectiveAlignmentIsInBytes;
}
+ LCOMM::LCOMMType getLCOMMDirectiveAlignmentType() const {
+ return LCOMMDirectiveAlignmentType;
+ }
+ bool hasDotTypeDotSizeDirective() const {return HasDotTypeDotSizeDirective;}
bool hasSingleParameterDotFile() const { return HasSingleParameterDotFile; }
bool hasNoDeadStrip() const { return HasNoDeadStrip; }
bool hasSymbolResolver() const { return HasSymbolResolver; }
Modified: llvm/branches/AMDILBackend/include/llvm/MC/MCAssembler.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/MC/MCAssembler.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/MC/MCAssembler.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/MC/MCAssembler.h Tue Jan 15 11:16:16 2013
@@ -40,8 +40,8 @@
class MCFragment : public ilist_node<MCFragment> {
friend class MCAsmLayout;
- MCFragment(const MCFragment&); // DO NOT IMPLEMENT
- void operator=(const MCFragment&); // DO NOT IMPLEMENT
+ MCFragment(const MCFragment&) LLVM_DELETED_FUNCTION;
+ void operator=(const MCFragment&) LLVM_DELETED_FUNCTION;
public:
enum FragmentType {
@@ -99,8 +99,6 @@
unsigned getLayoutOrder() const { return LayoutOrder; }
void setLayoutOrder(unsigned Value) { LayoutOrder = Value; }
- static bool classof(const MCFragment *O) { return true; }
-
void dump();
};
@@ -151,7 +149,6 @@
static bool classof(const MCFragment *F) {
return F->getKind() == MCFragment::FT_Data;
}
- static bool classof(const MCDataFragment *) { return true; }
};
// FIXME: This current incarnation of MCInstFragment doesn't make much sense, as
@@ -176,7 +173,7 @@
typedef SmallVectorImpl<MCFixup>::iterator fixup_iterator;
public:
- MCInstFragment(MCInst _Inst, MCSectionData *SD = 0)
+ MCInstFragment(const MCInst &_Inst, MCSectionData *SD = 0)
: MCFragment(FT_Inst, SD), Inst(_Inst) {
}
@@ -191,7 +188,7 @@
MCInst &getInst() { return Inst; }
const MCInst &getInst() const { return Inst; }
- void setInst(MCInst Value) { Inst = Value; }
+ void setInst(const MCInst& Value) { Inst = Value; }
/// @}
/// @name Fixup Access
@@ -213,7 +210,6 @@
static bool classof(const MCFragment *F) {
return F->getKind() == MCFragment::FT_Inst;
}
- static bool classof(const MCInstFragment *) { return true; }
};
class MCAlignFragment : public MCFragment {
@@ -225,7 +221,7 @@
/// Value - Value to use for filling padding bytes.
int64_t Value;
- /// ValueSize - The size of the integer (in bytes) of \arg Value.
+ /// ValueSize - The size of the integer (in bytes) of \p Value.
unsigned ValueSize;
/// MaxBytesToEmit - The maximum number of bytes to emit; if the alignment
@@ -263,7 +259,6 @@
static bool classof(const MCFragment *F) {
return F->getKind() == MCFragment::FT_Align;
}
- static bool classof(const MCAlignFragment *) { return true; }
};
class MCFillFragment : public MCFragment {
@@ -272,7 +267,7 @@
/// Value - Value to use for filling bytes.
int64_t Value;
- /// ValueSize - The size (in bytes) of \arg Value to use when filling, or 0 if
+ /// ValueSize - The size (in bytes) of \p Value to use when filling, or 0 if
/// this is a virtual fill fragment.
unsigned ValueSize;
@@ -302,7 +297,6 @@
static bool classof(const MCFragment *F) {
return F->getKind() == MCFragment::FT_Fill;
}
- static bool classof(const MCFillFragment *) { return true; }
};
class MCOrgFragment : public MCFragment {
@@ -331,7 +325,6 @@
static bool classof(const MCFragment *F) {
return F->getKind() == MCFragment::FT_Org;
}
- static bool classof(const MCOrgFragment *) { return true; }
};
class MCLEBFragment : public MCFragment {
@@ -364,7 +357,6 @@
static bool classof(const MCFragment *F) {
return F->getKind() == MCFragment::FT_LEB;
}
- static bool classof(const MCLEBFragment *) { return true; }
};
class MCDwarfLineAddrFragment : public MCFragment {
@@ -401,7 +393,6 @@
static bool classof(const MCFragment *F) {
return F->getKind() == MCFragment::FT_Dwarf;
}
- static bool classof(const MCDwarfLineAddrFragment *) { return true; }
};
class MCDwarfCallFrameFragment : public MCFragment {
@@ -431,7 +422,6 @@
static bool classof(const MCFragment *F) {
return F->getKind() == MCFragment::FT_DwarfFrame;
}
- static bool classof(const MCDwarfCallFrameFragment *) { return true; }
};
// FIXME: Should this be a separate class, or just merged into MCSection? Since
@@ -440,8 +430,8 @@
class MCSectionData : public ilist_node<MCSectionData> {
friend class MCAsmLayout;
- MCSectionData(const MCSectionData&); // DO NOT IMPLEMENT
- void operator=(const MCSectionData&); // DO NOT IMPLEMENT
+ MCSectionData(const MCSectionData&) LLVM_DELETED_FUNCTION;
+ void operator=(const MCSectionData&) LLVM_DELETED_FUNCTION;
public:
typedef iplist<MCFragment> FragmentListType;
@@ -683,8 +673,8 @@
typedef std::vector<DataRegionData>::iterator data_region_iterator;
private:
- MCAssembler(const MCAssembler&); // DO NOT IMPLEMENT
- void operator=(const MCAssembler&); // DO NOT IMPLEMENT
+ MCAssembler(const MCAssembler&) LLVM_DELETED_FUNCTION;
+ void operator=(const MCAssembler&) LLVM_DELETED_FUNCTION;
MCContext &Context;
@@ -738,7 +728,7 @@
/// \param Value [out] On return, the value of the fixup as currently laid
/// out.
/// \return Whether the fixup value was fully resolved. This is true if the
- /// \arg Value result is fixed, otherwise the value may change due to
+ /// \p Value result is fixed, otherwise the value may change due to
/// relocation.
bool evaluateFixup(const MCAsmLayout &Layout,
const MCFixup &Fixup, const MCFragment *DF,
@@ -775,7 +765,7 @@
public:
/// Compute the effective fragment size assuming it is laid out at the given
- /// \arg SectionAddress and \arg FragmentOffset.
+ /// \p SectionAddress and \p FragmentOffset.
uint64_t computeFragmentSize(const MCAsmLayout &Layout,
const MCFragment &F) const;
@@ -804,7 +794,7 @@
public:
/// Construct a new assembler instance.
///
- /// \arg OS - The stream to output to.
+ /// \param OS The stream to output to.
//
// FIXME: How are we going to parameterize this? Two obvious options are stay
// concrete and require clients to pass in a target like object. The other
@@ -824,7 +814,7 @@
MCObjectWriter &getWriter() const { return Writer; }
/// Finish - Do final processing and write the object to the output stream.
- /// \arg Writer is used for custom object writer (as the MCJIT does),
+ /// \p Writer is used for custom object writer (as the MCJIT does),
/// if not specified it is automatically created from backend.
void Finish();
Modified: llvm/branches/AMDILBackend/include/llvm/MC/MCCodeEmitter.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/MC/MCCodeEmitter.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/MC/MCCodeEmitter.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/MC/MCCodeEmitter.h Tue Jan 15 11:16:16 2013
@@ -10,6 +10,8 @@
#ifndef LLVM_MC_MCCODEEMITTER_H
#define LLVM_MC_MCCODEEMITTER_H
+#include "llvm/Support/Compiler.h"
+
namespace llvm {
class MCFixup;
class MCInst;
@@ -19,16 +21,16 @@
/// MCCodeEmitter - Generic instruction encoding interface.
class MCCodeEmitter {
private:
- MCCodeEmitter(const MCCodeEmitter &); // DO NOT IMPLEMENT
- void operator=(const MCCodeEmitter &); // DO NOT IMPLEMENT
+ MCCodeEmitter(const MCCodeEmitter &) LLVM_DELETED_FUNCTION;
+ void operator=(const MCCodeEmitter &) LLVM_DELETED_FUNCTION;
protected: // Can only create subclasses.
MCCodeEmitter();
public:
virtual ~MCCodeEmitter();
- /// EncodeInstruction - Encode the given \arg Inst to bytes on the output
- /// stream \arg OS.
+ /// EncodeInstruction - Encode the given \p Inst to bytes on the output
+ /// stream \p OS.
virtual void EncodeInstruction(const MCInst &Inst, raw_ostream &OS,
SmallVectorImpl<MCFixup> &Fixups) const = 0;
};
Modified: llvm/branches/AMDILBackend/include/llvm/MC/MCContext.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/MC/MCContext.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/MC/MCContext.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/MC/MCContext.h Tue Jan 15 11:16:16 2013
@@ -40,8 +40,8 @@
/// of the sections that it creates.
///
class MCContext {
- MCContext(const MCContext&); // DO NOT IMPLEMENT
- MCContext &operator=(const MCContext&); // DO NOT IMPLEMENT
+ MCContext(const MCContext&) LLVM_DELETED_FUNCTION;
+ MCContext &operator=(const MCContext&) LLVM_DELETED_FUNCTION;
public:
typedef StringMap<MCSymbol*, BumpPtrAllocator&> SymbolTable;
private:
@@ -183,6 +183,7 @@
/// LookupSymbol - Get the symbol for \p Name, or null.
MCSymbol *LookupSymbol(StringRef Name) const;
+ MCSymbol *LookupSymbol(const Twine &Name) const;
/// getSymbols - Get a reference for the symbol table for clients that
/// want to, for example, iterate over all symbols. 'const' because we
Modified: llvm/branches/AMDILBackend/include/llvm/MC/MCDwarf.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/MC/MCDwarf.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/MC/MCDwarf.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/MC/MCDwarf.h Tue Jan 15 11:16:16 2013
@@ -19,6 +19,7 @@
#include "llvm/MC/MachineLocation.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/Dwarf.h"
+#include "llvm/Support/Compiler.h"
#include <vector>
namespace llvm {
@@ -48,8 +49,8 @@
MCDwarfFile(StringRef name, unsigned dirIndex)
: Name(name), DirIndex(dirIndex) {}
- MCDwarfFile(const MCDwarfFile&); // DO NOT IMPLEMENT
- void operator=(const MCDwarfFile&); // DO NOT IMPLEMENT
+ MCDwarfFile(const MCDwarfFile&) LLVM_DELETED_FUNCTION;
+ void operator=(const MCDwarfFile&) LLVM_DELETED_FUNCTION;
public:
/// getName - Get the base name of this MCDwarfFile.
StringRef getName() const { return Name; }
@@ -58,7 +59,7 @@
unsigned getDirIndex() const { return DirIndex; }
- /// print - Print the value to the stream \arg OS.
+ /// print - Print the value to the stream \p OS.
void print(raw_ostream &OS) const;
/// dump - Print the value to stderr.
@@ -177,8 +178,8 @@
class MCLineSection {
private:
- MCLineSection(const MCLineSection&); // DO NOT IMPLEMENT
- void operator=(const MCLineSection&); // DO NOT IMPLEMENT
+ MCLineSection(const MCLineSection&) LLVM_DELETED_FUNCTION;
+ void operator=(const MCLineSection&) LLVM_DELETED_FUNCTION;
public:
// Constructor to create an MCLineSection with an empty MCLineEntries
Modified: llvm/branches/AMDILBackend/include/llvm/MC/MCELFObjectWriter.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/MC/MCELFObjectWriter.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/MC/MCELFObjectWriter.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/MC/MCELFObjectWriter.h Tue Jan 15 11:16:16 2013
@@ -85,6 +85,9 @@
const MCFragment &F,
const MCFixup &Fixup,
bool IsPCRel) const;
+ virtual const MCSymbol *undefinedExplicitRelSym(const MCValue &Target,
+ const MCFixup &Fixup,
+ bool IsPCRel) const;
virtual void adjustFixupOffset(const MCFixup &Fixup,
uint64_t &RelocOffset);
@@ -93,9 +96,9 @@
/// @name Accessors
/// @{
- uint8_t getOSABI() { return OSABI; }
- uint16_t getEMachine() { return EMachine; }
- bool hasRelocationAddend() { return HasRelocationAddend; }
+ uint8_t getOSABI() const { return OSABI; }
+ uint16_t getEMachine() const { return EMachine; }
+ bool hasRelocationAddend() const { return HasRelocationAddend; }
bool is64Bit() const { return Is64Bit; }
bool isN64() const { return IsN64; }
/// @}
Modified: llvm/branches/AMDILBackend/include/llvm/MC/MCExpr.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/MC/MCExpr.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/MC/MCExpr.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/MC/MCExpr.h Tue Jan 15 11:16:16 2013
@@ -41,8 +41,8 @@
private:
ExprKind Kind;
- MCExpr(const MCExpr&); // DO NOT IMPLEMENT
- void operator=(const MCExpr&); // DO NOT IMPLEMENT
+ MCExpr(const MCExpr&) LLVM_DELETED_FUNCTION;
+ void operator=(const MCExpr&) LLVM_DELETED_FUNCTION;
bool EvaluateAsAbsolute(int64_t &Res, const MCAssembler *Asm,
const MCAsmLayout *Layout,
@@ -78,11 +78,11 @@
/// values. If not given, then only non-symbolic expressions will be
/// evaluated.
/// @result - True on success.
+ bool EvaluateAsAbsolute(int64_t &Res, const MCAsmLayout &Layout,
+ const SectionAddrMap &Addrs) const;
bool EvaluateAsAbsolute(int64_t &Res) const;
bool EvaluateAsAbsolute(int64_t &Res, const MCAssembler &Asm) const;
bool EvaluateAsAbsolute(int64_t &Res, const MCAsmLayout &Layout) const;
- bool EvaluateAsAbsolute(int64_t &Res, const MCAsmLayout &Layout,
- const SectionAddrMap &Addrs) const;
/// EvaluateAsRelocatable - Try to evaluate the expression to a relocatable
/// value, i.e. an expression of the fixed form (a - b + constant).
@@ -99,8 +99,6 @@
const MCSection *FindAssociatedSection() const;
/// @}
-
- static bool classof(const MCExpr *) { return true; }
};
inline raw_ostream &operator<<(raw_ostream &OS, const MCExpr &E) {
@@ -132,7 +130,6 @@
static bool classof(const MCExpr *E) {
return E->getKind() == MCExpr::Constant;
}
- static bool classof(const MCConstantExpr *) { return true; }
};
/// MCSymbolRefExpr - Represent a reference to a symbol from inside an
@@ -170,8 +167,10 @@
VK_ARM_TPOFF,
VK_ARM_GOTTPOFF,
VK_ARM_TARGET1,
+ VK_ARM_TARGET2,
- VK_PPC_TOC,
+ VK_PPC_TOC, // TOC base
+ VK_PPC_TOC_ENTRY, // TOC entry
VK_PPC_DARWIN_HA16, // ha16(symbol)
VK_PPC_DARWIN_LO16, // lo16(symbol)
VK_PPC_GAS_HA16, // symbol at ha
@@ -198,7 +197,11 @@
VK_Mips_GOT_PAGE,
VK_Mips_GOT_OFST,
VK_Mips_HIGHER,
- VK_Mips_HIGHEST
+ VK_Mips_HIGHEST,
+ VK_Mips_GOT_HI16,
+ VK_Mips_GOT_LO16,
+ VK_Mips_CALL_HI16,
+ VK_Mips_CALL_LO16
};
private:
@@ -247,7 +250,6 @@
static bool classof(const MCExpr *E) {
return E->getKind() == MCExpr::SymbolRef;
}
- static bool classof(const MCSymbolRefExpr *) { return true; }
};
/// MCUnaryExpr - Unary assembler expressions.
@@ -301,7 +303,6 @@
static bool classof(const MCExpr *E) {
return E->getKind() == MCExpr::Unary;
}
- static bool classof(const MCUnaryExpr *) { return true; }
};
/// MCBinaryExpr - Binary assembler expressions.
@@ -436,7 +437,6 @@
static bool classof(const MCExpr *E) {
return E->getKind() == MCExpr::Binary;
}
- static bool classof(const MCBinaryExpr *) { return true; }
};
/// MCTargetExpr - This is an extension point for target-specific MCExpr
@@ -445,7 +445,7 @@
/// NOTE: All subclasses are required to have trivial destructors because
/// MCExprs are bump pointer allocated and not destructed.
class MCTargetExpr : public MCExpr {
- virtual void Anchor();
+ virtual void anchor();
protected:
MCTargetExpr() : MCExpr(Target) {}
virtual ~MCTargetExpr() {}
@@ -460,7 +460,6 @@
static bool classof(const MCExpr *E) {
return E->getKind() == MCExpr::Target;
}
- static bool classof(const MCTargetExpr *) { return true; }
};
} // end namespace llvm
Modified: llvm/branches/AMDILBackend/include/llvm/MC/MCInst.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/MC/MCInst.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/MC/MCInst.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/MC/MCInst.h Tue Jan 15 11:16:16 2013
@@ -182,7 +182,7 @@
void dump() const;
/// \brief Dump the MCInst as prettily as possible using the additional MC
- /// structures, if given. Operators are separated by the \arg Separator
+ /// structures, if given. Operators are separated by the \p Separator
/// string.
void dump_pretty(raw_ostream &OS, const MCAsmInfo *MAI = 0,
const MCInstPrinter *Printer = 0,
Modified: llvm/branches/AMDILBackend/include/llvm/MC/MCInstPrinter.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/MC/MCInstPrinter.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/MC/MCInstPrinter.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/MC/MCInstPrinter.h Tue Jan 15 11:16:16 2013
@@ -33,12 +33,16 @@
/// The current set of available features.
unsigned AvailableFeatures;
+ /// True if we are printing marked up assembly.
+ bool UseMarkup;
+
/// Utility function for printing annotations.
void printAnnotation(raw_ostream &OS, StringRef Annot);
public:
MCInstPrinter(const MCAsmInfo &mai, const MCInstrInfo &mii,
const MCRegisterInfo &mri)
- : CommentStream(0), MAI(mai), MII(mii), MRI(mri), AvailableFeatures(0) {}
+ : CommentStream(0), MAI(mai), MII(mii), MRI(mri), AvailableFeatures(0),
+ UseMarkup(0) {}
virtual ~MCInstPrinter();
@@ -59,6 +63,13 @@
unsigned getAvailableFeatures() const { return AvailableFeatures; }
void setAvailableFeatures(unsigned Value) { AvailableFeatures = Value; }
+
+ bool getUseMarkup() const { return UseMarkup; }
+ void setUseMarkup(bool Value) { UseMarkup = Value; }
+
+ /// Utility functions to make adding mark ups simpler.
+ StringRef markup(StringRef s) const;
+ StringRef markup(StringRef a, StringRef b) const;
};
} // namespace llvm
Modified: llvm/branches/AMDILBackend/include/llvm/MC/MCInstrDesc.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/MC/MCInstrDesc.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/MC/MCInstrDesc.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/MC/MCInstrDesc.h Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-//===-- llvm/Mc/McInstrDesc.h - Instruction Descriptors -*- C++ -*-===//
+//===-- llvm/MC/MCInstrDesc.h - Instruction Descriptors -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -107,6 +107,7 @@
Compare,
MoveImm,
Bitcast,
+ Select,
DelaySlot,
FoldableAsLoad,
MayLoad,
@@ -282,6 +283,12 @@
return Flags & (1 << MCID::Bitcast);
}
+ /// isSelect - Return true if this is a select instruction.
+ ///
+ bool isSelect() const {
+ return Flags & (1 << MCID::Select);
+ }
+
/// isNotDuplicable - Return true if this instruction cannot be safely
/// duplicated. For example, if the instruction has a unique labels attached
/// to it, duplicating it would cause multiple definition errors.
Modified: llvm/branches/AMDILBackend/include/llvm/MC/MCLabel.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/MC/MCLabel.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/MC/MCLabel.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/MC/MCLabel.h Tue Jan 15 11:16:16 2013
@@ -14,6 +14,8 @@
#ifndef LLVM_MC_MCLABEL_H
#define LLVM_MC_MCLABEL_H
+#include "llvm/Support/Compiler.h"
+
namespace llvm {
class MCContext;
class raw_ostream;
@@ -30,8 +32,8 @@
MCLabel(unsigned instance)
: Instance(instance) {}
- MCLabel(const MCLabel&); // DO NOT IMPLEMENT
- void operator=(const MCLabel&); // DO NOT IMPLEMENT
+ MCLabel(const MCLabel&) LLVM_DELETED_FUNCTION;
+ void operator=(const MCLabel&) LLVM_DELETED_FUNCTION;
public:
/// getInstance - Get the current instance of this Directional Local Label.
unsigned getInstance() const { return Instance; }
@@ -40,7 +42,7 @@
/// Label.
unsigned incInstance() { return ++Instance; }
- /// print - Print the value to the stream \arg OS.
+ /// print - Print the value to the stream \p OS.
void print(raw_ostream &OS) const;
/// dump - Print the value to stderr.
Modified: llvm/branches/AMDILBackend/include/llvm/MC/MCMachObjectWriter.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/MC/MCMachObjectWriter.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/MC/MCMachObjectWriter.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/MC/MCMachObjectWriter.h Tue Jan 15 11:16:16 2013
@@ -153,8 +153,8 @@
/// WriteSegmentLoadCommand - Write a segment load command.
///
- /// \arg NumSections - The number of sections in this segment.
- /// \arg SectionDataSize - The total size of the sections.
+ /// \param NumSections The number of sections in this segment.
+ /// \param SectionDataSize The total size of the sections.
void WriteSegmentLoadCommand(unsigned NumSections,
uint64_t VMSize,
uint64_t SectionDataStartOffset,
@@ -233,6 +233,8 @@
void computeSectionAddresses(const MCAssembler &Asm,
const MCAsmLayout &Layout);
+ void markAbsoluteVariableSymbols(MCAssembler &Asm,
+ const MCAsmLayout &Layout);
void ExecutePostLayoutBinding(MCAssembler &Asm, const MCAsmLayout &Layout);
virtual bool IsSymbolRefDifferenceFullyResolvedImpl(const MCAssembler &Asm,
Modified: llvm/branches/AMDILBackend/include/llvm/MC/MCObjectFileInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/MC/MCObjectFileInfo.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/MC/MCObjectFileInfo.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/MC/MCObjectFileInfo.h Tue Jan 15 11:16:16 2013
@@ -84,7 +84,8 @@
/// this is the section to emit them into.
const MCSection *CompactUnwindSection;
- /// DwarfAccelNamesSection, DwarfAccelObjCSection
+ /// DwarfAccelNamesSection, DwarfAccelObjCSection,
+ /// DwarfAccelNamespaceSection, DwarfAccelTypesSection -
/// If we use the DWARF accelerated hash tables then we want toe emit these
/// sections.
const MCSection *DwarfAccelNamesSection;
Modified: llvm/branches/AMDILBackend/include/llvm/MC/MCObjectStreamer.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/MC/MCObjectStreamer.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/MC/MCObjectStreamer.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/MC/MCObjectStreamer.h Tue Jan 15 11:16:16 2013
@@ -72,6 +72,13 @@
virtual void ChangeSection(const MCSection *Section);
virtual void EmitInstruction(const MCInst &Inst);
virtual void EmitInstToFragment(const MCInst &Inst);
+ virtual void EmitBytes(StringRef Data, unsigned AddrSpace);
+ virtual void EmitValueToAlignment(unsigned ByteAlignment,
+ int64_t Value = 0,
+ unsigned ValueSize = 1,
+ unsigned MaxBytesToEmit = 0);
+ virtual void EmitCodeAlignment(unsigned ByteAlignment,
+ unsigned MaxBytesToEmit = 0);
virtual bool EmitValueToOffset(const MCExpr *Offset, unsigned char Value);
virtual void EmitDwarfAdvanceLineAddr(int64_t LineDelta,
const MCSymbol *LastLabel,
@@ -80,6 +87,9 @@
virtual void EmitDwarfAdvanceFrameAddr(const MCSymbol *LastLabel,
const MCSymbol *Label);
virtual void EmitGPRel32Value(const MCExpr *Value);
+ virtual void EmitGPRel64Value(const MCExpr *Value);
+ virtual void EmitFill(uint64_t NumBytes, uint8_t FillValue,
+ unsigned AddrSpace);
virtual void FinishImpl();
/// @}
Modified: llvm/branches/AMDILBackend/include/llvm/MC/MCObjectWriter.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/MC/MCObjectWriter.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/MC/MCObjectWriter.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/MC/MCObjectWriter.h Tue Jan 15 11:16:16 2013
@@ -11,6 +11,7 @@
#define LLVM_MC_MCOBJECTWRITER_H
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataTypes.h"
#include <cassert>
@@ -35,8 +36,8 @@
/// The object writer also contains a number of helper methods for writing
/// binary data to the output stream.
class MCObjectWriter {
- MCObjectWriter(const MCObjectWriter &); // DO NOT IMPLEMENT
- void operator=(const MCObjectWriter &); // DO NOT IMPLEMENT
+ MCObjectWriter(const MCObjectWriter &) LLVM_DELETED_FUNCTION;
+ void operator=(const MCObjectWriter &) LLVM_DELETED_FUNCTION;
protected:
raw_ostream &OS;
Modified: llvm/branches/AMDILBackend/include/llvm/MC/MCParser/AsmLexer.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/MC/MCParser/AsmLexer.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/MC/MCParser/AsmLexer.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/MC/MCParser/AsmLexer.h Tue Jan 15 11:16:16 2013
@@ -31,8 +31,8 @@
const MemoryBuffer *CurBuf;
bool isAtStartOfLine;
- void operator=(const AsmLexer&); // DO NOT IMPLEMENT
- AsmLexer(const AsmLexer&); // DO NOT IMPLEMENT
+ void operator=(const AsmLexer&) LLVM_DELETED_FUNCTION;
+ AsmLexer(const AsmLexer&) LLVM_DELETED_FUNCTION;
protected:
/// LexToken - Read the next token and return its code.
Modified: llvm/branches/AMDILBackend/include/llvm/MC/MCParser/MCAsmLexer.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/MC/MCParser/MCAsmLexer.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/MC/MCParser/MCAsmLexer.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/MC/MCParser/MCAsmLexer.h Tue Jan 15 11:16:16 2013
@@ -11,6 +11,7 @@
#define LLVM_MC_MCASMLEXER_H
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/SMLoc.h"
@@ -39,6 +40,7 @@
// No-value.
EndOfStatement,
Colon,
+ Space,
Plus, Minus, Tilde,
Slash, // '/'
BackSlash, // '\'
@@ -121,10 +123,11 @@
SMLoc ErrLoc;
std::string Err;
- MCAsmLexer(const MCAsmLexer &); // DO NOT IMPLEMENT
- void operator=(const MCAsmLexer &); // DO NOT IMPLEMENT
+ MCAsmLexer(const MCAsmLexer &) LLVM_DELETED_FUNCTION;
+ void operator=(const MCAsmLexer &) LLVM_DELETED_FUNCTION;
protected: // Can only create subclasses.
const char *TokStart;
+ bool SkipSpace;
MCAsmLexer();
@@ -169,11 +172,14 @@
/// getKind - Get the kind of current token.
AsmToken::TokenKind getKind() const { return CurTok.getKind(); }
- /// is - Check if the current token has kind \arg K.
+ /// is - Check if the current token has kind \p K.
bool is(AsmToken::TokenKind K) const { return CurTok.is(K); }
- /// isNot - Check if the current token has kind \arg K.
+ /// isNot - Check if the current token has kind \p K.
bool isNot(AsmToken::TokenKind K) const { return CurTok.isNot(K); }
+
+ /// setSkipSpace - Set whether spaces should be ignored by the lexer
+ void setSkipSpace(bool val) { SkipSpace = val; }
};
} // End llvm namespace
Modified: llvm/branches/AMDILBackend/include/llvm/MC/MCParser/MCAsmParser.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/MC/MCParser/MCAsmParser.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/MC/MCParser/MCAsmParser.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/MC/MCParser/MCAsmParser.h Tue Jan 15 11:16:16 2013
@@ -20,6 +20,9 @@
class MCAsmParserExtension;
class MCContext;
class MCExpr;
+class MCInstPrinter;
+class MCInstrInfo;
+class MCParsedAsmOperand;
class MCStreamer;
class MCTargetAsmParser;
class SMLoc;
@@ -28,6 +31,16 @@
class StringRef;
class Twine;
+/// MCAsmParserSemaCallback - Generic Sema callback for assembly parser.
+class MCAsmParserSemaCallback {
+public:
+ virtual ~MCAsmParserSemaCallback();
+ virtual void *LookupInlineAsmIdentifier(StringRef Name, void *Loc,
+ unsigned &Size) = 0;
+ virtual bool LookupInlineAsmField(StringRef Base, StringRef Member,
+ unsigned &Offset) = 0;
+};
+
/// MCAsmParser - Generic assembler parser interface, for use by target specific
/// assembly parsers.
class MCAsmParser {
@@ -35,8 +48,8 @@
typedef bool (*DirectiveHandler)(MCAsmParserExtension*, StringRef, SMLoc);
private:
- MCAsmParser(const MCAsmParser &); // DO NOT IMPLEMENT
- void operator=(const MCAsmParser &); // DO NOT IMPLEMENT
+ MCAsmParser(const MCAsmParser &) LLVM_DELETED_FUNCTION;
+ void operator=(const MCAsmParser &) LLVM_DELETED_FUNCTION;
MCTargetAsmParser *TargetParser;
@@ -73,15 +86,26 @@
/// Run - Run the parser on the input source buffer.
virtual bool Run(bool NoInitialTextSection, bool NoFinalize = false) = 0;
- /// Warning - Emit a warning at the location \arg L, with the message \arg
- /// Msg.
+ virtual void setParsingInlineAsm(bool V) = 0;
+ virtual bool isParsingInlineAsm() = 0;
+
+ /// ParseMSInlineAsm - Parse ms-style inline assembly.
+ virtual bool ParseMSInlineAsm(void *AsmLoc, std::string &AsmString,
+ unsigned &NumOutputs, unsigned &NumInputs,
+ SmallVectorImpl<std::pair<void *, bool> > &OpDecls,
+ SmallVectorImpl<std::string> &Constraints,
+ SmallVectorImpl<std::string> &Clobbers,
+ const MCInstrInfo *MII,
+ const MCInstPrinter *IP,
+ MCAsmParserSemaCallback &SI) = 0;
+
+ /// Warning - Emit a warning at the location \p L, with the message \p Msg.
///
/// \return The return value is true, if warnings are fatal.
virtual bool Warning(SMLoc L, const Twine &Msg,
ArrayRef<SMRange> Ranges = ArrayRef<SMRange>()) = 0;
- /// Error - Emit an error at the location \arg L, with the message \arg
- /// Msg.
+ /// Error - Emit an error at the location \p L, with the message \p Msg.
///
/// \return The return value is always true, as an idiomatic convenience to
/// clients.
@@ -100,7 +124,7 @@
ArrayRef<SMRange> Ranges = ArrayRef<SMRange>());
/// ParseIdentifier - Parse an identifier or string (as a quoted identifier)
- /// and set \arg Res to the identifier contents.
+ /// and set \p Res to the identifier contents.
virtual bool ParseIdentifier(StringRef &Res) = 0;
/// \brief Parse up to the end of statement and return the contents from the
Modified: llvm/branches/AMDILBackend/include/llvm/MC/MCParser/MCAsmParserExtension.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/MC/MCParser/MCAsmParserExtension.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/MC/MCParser/MCAsmParserExtension.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/MC/MCParser/MCAsmParserExtension.h Tue Jan 15 11:16:16 2013
@@ -21,8 +21,8 @@
/// which is implemented by target and object file assembly parser
/// implementations.
class MCAsmParserExtension {
- MCAsmParserExtension(const MCAsmParserExtension &); // DO NOT IMPLEMENT
- void operator=(const MCAsmParserExtension &); // DO NOT IMPLEMENT
+ MCAsmParserExtension(const MCAsmParserExtension &) LLVM_DELETED_FUNCTION;
+ void operator=(const MCAsmParserExtension &) LLVM_DELETED_FUNCTION;
MCAsmParser *Parser;
@@ -43,8 +43,8 @@
public:
virtual ~MCAsmParserExtension();
- /// \brief Initialize the extension for parsing using the given \arg
- /// Parser. The extension should use the AsmParser interfaces to register its
+ /// \brief Initialize the extension for parsing using the given \p Parser.
+ /// The extension should use the AsmParser interfaces to register its
/// parsing routines.
virtual void Initialize(MCAsmParser &Parser);
Modified: llvm/branches/AMDILBackend/include/llvm/MC/MCParser/MCParsedAsmOperand.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/MC/MCParser/MCParsedAsmOperand.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/MC/MCParser/MCParsedAsmOperand.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/MC/MCParser/MCParsedAsmOperand.h Tue Jan 15 11:16:16 2013
@@ -19,15 +19,69 @@
/// base class is used by target-independent clients and is the interface
/// between parsing an asm instruction and recognizing it.
class MCParsedAsmOperand {
+ /// MCOperandNum - The corresponding MCInst operand number. Only valid when
+ /// parsing MS-style inline assembly.
+ unsigned MCOperandNum;
+
+ /// Constraint - The constraint on this operand. Only valid when parsing
+ /// MS-style inline assembly.
+ std::string Constraint;
+
public:
MCParsedAsmOperand() {}
virtual ~MCParsedAsmOperand() {}
+ void setConstraint(StringRef C) { Constraint = C.str(); }
+ StringRef getConstraint() { return Constraint; }
+
+ void setMCOperandNum (unsigned OpNum) { MCOperandNum = OpNum; }
+ unsigned getMCOperandNum() { return MCOperandNum; }
+
+ unsigned getNameLen() {
+ assert (getStartLoc().isValid() && "Invalid StartLoc!");
+ assert (getEndLoc().isValid() && "Invalid EndLoc!");
+ return getEndLoc().getPointer() - getStartLoc().getPointer();
+ }
+
+ StringRef getName() {
+ return StringRef(getStartLoc().getPointer(), getNameLen());
+ }
+
+ /// isToken - Is this a token operand?
+ virtual bool isToken() const = 0;
+ /// isImm - Is this an immediate operand?
+ virtual bool isImm() const = 0;
+ /// isReg - Is this a register operand?
+ virtual bool isReg() const = 0;
+ virtual unsigned getReg() const = 0;
+
+ /// isMem - Is this a memory operand?
+ virtual bool isMem() const = 0;
+ virtual unsigned getMemSize() const { return 0; }
+
/// getStartLoc - Get the location of the first token of this operand.
virtual SMLoc getStartLoc() const = 0;
/// getEndLoc - Get the location of the last token of this operand.
virtual SMLoc getEndLoc() const = 0;
+ /// needAsmRewrite - AsmRewrites happen in both the target-independent and
+ /// target-dependent parsers. The target-independent parser calls this
+ /// function to determine if the target-dependent parser has already taken
+ /// care of the rewrites. Only valid when parsing MS-style inline assembly.
+ virtual bool needAsmRewrite() const { return true; }
+
+ /// isOffsetOf - Do we need to emit code to get the offset of the variable,
+ /// rather then the value of the variable? Only valid when parsing MS-style
+ /// inline assembly.
+ virtual bool isOffsetOf() const { return false; }
+
+ /// getOffsetOfLoc - Get the location of the offset operator.
+ virtual SMLoc getOffsetOfLoc() const { return SMLoc(); }
+
+ /// needSizeDirective - Do we need to emit a sizing directive for this
+ /// operand? Only valid when parsing MS-style inline assembly.
+ virtual bool needSizeDirective() const { return false; }
+
/// print - Print a debug representation of the operand to the given stream.
virtual void print(raw_ostream &OS) const = 0;
/// dump - Print to the debug stream.
Modified: llvm/branches/AMDILBackend/include/llvm/MC/MCRegisterInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/MC/MCRegisterInfo.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/MC/MCRegisterInfo.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/MC/MCRegisterInfo.h Tue Jan 15 11:16:16 2013
@@ -333,6 +333,13 @@
return NumRegs;
}
+ /// getNumSubRegIndices - Return the number of sub-register indices
+ /// understood by the target. Index 0 is reserved for the no-op sub-register,
+ /// while 1 to getNumSubRegIndices() - 1 represent real sub-registers.
+ unsigned getNumSubRegIndices() const {
+ return NumSubRegIndices;
+ }
+
/// getNumRegUnits - Return the number of (native) register units in the
/// target. Register units are numbered from 0 to getNumRegUnits() - 1. They
/// can be accessed through MCRegUnitIterator defined below.
@@ -363,7 +370,7 @@
/// getRegClass - Returns the register class associated with the enumeration
/// value. See class MCOperandInfo.
- const MCRegisterClass getRegClass(unsigned i) const {
+ const MCRegisterClass& getRegClass(unsigned i) const {
assert(i < getNumRegClasses() && "Register Class ID out of range");
return Classes[i];
}
Modified: llvm/branches/AMDILBackend/include/llvm/MC/MCSchedule.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/MC/MCSchedule.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/MC/MCSchedule.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/MC/MCSchedule.h Tue Jan 15 11:16:16 2013
@@ -16,17 +16,111 @@
#define LLVM_MC_MCSCHEDMODEL_H
#include "llvm/Support/DataTypes.h"
+#include <cassert>
namespace llvm {
struct InstrItinerary;
+/// Define a kind of processor resource that will be modeled by the scheduler.
+struct MCProcResourceDesc {
+#ifndef NDEBUG
+ const char *Name;
+#endif
+ unsigned NumUnits; // Number of resource of this kind
+ unsigned SuperIdx; // Index of the resources kind that contains this kind.
+
+ // Buffered resources may be consumed at some indeterminate cycle after
+ // dispatch (e.g. for instructions that may issue out-of-order). Unbuffered
+ // resources always consume their resource some fixed number of cycles after
+ // dispatch (e.g. for instruction interlocking that may stall the pipeline).
+ bool IsBuffered;
+
+ bool operator==(const MCProcResourceDesc &Other) const {
+ return NumUnits == Other.NumUnits && SuperIdx == Other.SuperIdx
+ && IsBuffered == Other.IsBuffered;
+ }
+};
+
+/// Identify one of the processor resource kinds consumed by a particular
+/// scheduling class for the specified number of cycles.
+struct MCWriteProcResEntry {
+ unsigned ProcResourceIdx;
+ unsigned Cycles;
+
+ bool operator==(const MCWriteProcResEntry &Other) const {
+ return ProcResourceIdx == Other.ProcResourceIdx && Cycles == Other.Cycles;
+ }
+};
+
+/// Specify the latency in cpu cycles for a particular scheduling class and def
+/// index. -1 indicates an invalid latency. Heuristics would typically consider
+/// an instruction with invalid latency to have infinite latency. Also identify
+/// the WriteResources of this def. When the operand expands to a sequence of
+/// writes, this ID is the last write in the sequence.
+struct MCWriteLatencyEntry {
+ int Cycles;
+ unsigned WriteResourceID;
+
+ bool operator==(const MCWriteLatencyEntry &Other) const {
+ return Cycles == Other.Cycles && WriteResourceID == Other.WriteResourceID;
+ }
+};
+
+/// Specify the number of cycles allowed after instruction issue before a
+/// particular use operand reads its registers. This effectively reduces the
+/// write's latency. Here we allow negative cycles for corner cases where
+/// latency increases. This rule only applies when the entry's WriteResource
+/// matches the write's WriteResource.
+///
+/// MCReadAdvanceEntries are sorted first by operand index (UseIdx), then by
+/// WriteResourceIdx.
+struct MCReadAdvanceEntry {
+ unsigned UseIdx;
+ unsigned WriteResourceID;
+ int Cycles;
+
+ bool operator==(const MCReadAdvanceEntry &Other) const {
+ return UseIdx == Other.UseIdx && WriteResourceID == Other.WriteResourceID
+ && Cycles == Other.Cycles;
+ }
+};
+
+/// Summarize the scheduling resources required for an instruction of a
+/// particular scheduling class.
+///
+/// Defined as an aggregate struct for creating tables with initializer lists.
+struct MCSchedClassDesc {
+ static const unsigned short InvalidNumMicroOps = UINT16_MAX;
+ static const unsigned short VariantNumMicroOps = UINT16_MAX - 1;
+
+#ifndef NDEBUG
+ const char* Name;
+#endif
+ unsigned short NumMicroOps;
+ bool BeginGroup;
+ bool EndGroup;
+ unsigned WriteProcResIdx; // First index into WriteProcResTable.
+ unsigned NumWriteProcResEntries;
+ unsigned WriteLatencyIdx; // First index into WriteLatencyTable.
+ unsigned NumWriteLatencyEntries;
+ unsigned ReadAdvanceIdx; // First index into ReadAdvanceTable.
+ unsigned NumReadAdvanceEntries;
+
+ bool isValid() const {
+ return NumMicroOps != InvalidNumMicroOps;
+ }
+ bool isVariant() const {
+ return NumMicroOps == VariantNumMicroOps;
+ }
+};
+
/// Machine model for scheduling, bundling, and heuristics.
///
/// The machine model directly provides basic information about the
/// microarchitecture to the scheduler in the form of properties. It also
-/// optionally refers to scheduler resources tables and itinerary
-/// tables. Scheduler resources tables model the latency and cost for each
+/// optionally refers to scheduler resource tables and itinerary
+/// tables. Scheduler resource tables model the latency and cost for each
/// instruction type. Itinerary tables are an independant mechanism that
/// provides a detailed reservation table describing each cycle of instruction
/// execution. Subtargets may define any or all of the above categories of data
@@ -84,8 +178,11 @@
static const unsigned DefaultMispredictPenalty = 10;
private:
- // TODO: Add a reference to proc resource types and sched resource tables.
-
+ unsigned ProcID;
+ const MCProcResourceDesc *ProcResourceTable;
+ const MCSchedClassDesc *SchedClassTable;
+ unsigned NumProcResourceKinds;
+ unsigned NumSchedClasses;
// Instruction itinerary tables used by InstrItineraryData.
friend class InstrItineraryData;
const InstrItinerary *InstrItineraries;
@@ -100,13 +197,45 @@
LoadLatency(DefaultLoadLatency),
HighLatency(DefaultHighLatency),
MispredictPenalty(DefaultMispredictPenalty),
- InstrItineraries(0) {}
+ ProcID(0), ProcResourceTable(0), SchedClassTable(0),
+ NumProcResourceKinds(0), NumSchedClasses(0),
+ InstrItineraries(0) {
+ (void)NumProcResourceKinds;
+ (void)NumSchedClasses;
+ }
// Table-gen driven ctor.
MCSchedModel(unsigned iw, int ml, unsigned ll, unsigned hl, unsigned mp,
+ unsigned pi, const MCProcResourceDesc *pr,
+ const MCSchedClassDesc *sc, unsigned npr, unsigned nsc,
const InstrItinerary *ii):
IssueWidth(iw), MinLatency(ml), LoadLatency(ll), HighLatency(hl),
- MispredictPenalty(mp), InstrItineraries(ii){}
+ MispredictPenalty(mp), ProcID(pi), ProcResourceTable(pr),
+ SchedClassTable(sc), NumProcResourceKinds(npr), NumSchedClasses(nsc),
+ InstrItineraries(ii) {}
+
+ unsigned getProcessorID() const { return ProcID; }
+
+ /// Does this machine model include instruction-level scheduling.
+ bool hasInstrSchedModel() const { return SchedClassTable; }
+
+ unsigned getNumProcResourceKinds() const {
+ return NumProcResourceKinds;
+ }
+
+ const MCProcResourceDesc *getProcResource(unsigned ProcResourceIdx) const {
+ assert(hasInstrSchedModel() && "No scheduling machine model");
+
+ assert(ProcResourceIdx < NumProcResourceKinds && "bad proc resource idx");
+ return &ProcResourceTable[ProcResourceIdx];
+ }
+
+ const MCSchedClassDesc *getSchedClassDesc(unsigned SchedClassIdx) const {
+ assert(hasInstrSchedModel() && "No scheduling machine model");
+
+ assert(SchedClassIdx < NumSchedClasses && "bad scheduling class idx");
+ return &SchedClassTable[SchedClassIdx];
+ }
};
} // End llvm namespace
Modified: llvm/branches/AMDILBackend/include/llvm/MC/MCSection.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/MC/MCSection.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/MC/MCSection.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/MC/MCSection.h Tue Jan 15 11:16:16 2013
@@ -15,7 +15,7 @@
#define LLVM_MC_MCSECTION_H
#include "llvm/MC/SectionKind.h"
-#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
namespace llvm {
class MCAsmInfo;
@@ -33,8 +33,8 @@
};
private:
- MCSection(const MCSection&); // DO NOT IMPLEMENT
- void operator=(const MCSection&); // DO NOT IMPLEMENT
+ MCSection(const MCSection&) LLVM_DELETED_FUNCTION;
+ void operator=(const MCSection&) LLVM_DELETED_FUNCTION;
protected:
MCSection(SectionVariant V, SectionKind K) : Variant(V), Kind(K) {}
SectionVariant Variant;
@@ -64,8 +64,6 @@
/// isVirtualSection - Check whether this section is "virtual", that is
/// has no actual object file contents.
virtual bool isVirtualSection() const = 0;
-
- static bool classof(const MCSection *) { return true; }
};
} // end namespace llvm
Modified: llvm/branches/AMDILBackend/include/llvm/MC/MCSectionCOFF.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/MC/MCSectionCOFF.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/MC/MCSectionCOFF.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/MC/MCSectionCOFF.h Tue Jan 15 11:16:16 2013
@@ -61,7 +61,6 @@
static bool classof(const MCSection *S) {
return S->getVariant() == SV_COFF;
}
- static bool classof(const MCSectionCOFF *) { return true; }
};
} // end namespace llvm
Modified: llvm/branches/AMDILBackend/include/llvm/MC/MCSectionELF.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/MC/MCSectionELF.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/MC/MCSectionELF.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/MC/MCSectionELF.h Tue Jan 15 11:16:16 2013
@@ -76,7 +76,6 @@
static bool classof(const MCSection *S) {
return S->getVariant() == SV_ELF;
}
- static bool classof(const MCSectionELF *) { return true; }
// Return the entry size for sections with fixed-width data.
static unsigned DetermineEntrySize(SectionKind Kind);
Modified: llvm/branches/AMDILBackend/include/llvm/MC/MCSectionMachO.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/MC/MCSectionMachO.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/MC/MCSectionMachO.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/MC/MCSectionMachO.h Tue Jan 15 11:16:16 2013
@@ -174,7 +174,6 @@
static bool classof(const MCSection *S) {
return S->getVariant() == SV_MachO;
}
- static bool classof(const MCSectionMachO *) { return true; }
};
} // end namespace llvm
Modified: llvm/branches/AMDILBackend/include/llvm/MC/MCStreamer.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/MC/MCStreamer.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/MC/MCStreamer.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/MC/MCStreamer.h Tue Jan 15 11:16:16 2013
@@ -47,8 +47,8 @@
class MCStreamer {
MCContext &Context;
- MCStreamer(const MCStreamer&); // DO NOT IMPLEMENT
- MCStreamer &operator=(const MCStreamer&); // DO NOT IMPLEMENT
+ MCStreamer(const MCStreamer&) LLVM_DELETED_FUNCTION;
+ MCStreamer &operator=(const MCStreamer&) LLVM_DELETED_FUNCTION;
bool EmitEHFrame;
bool EmitDebugFrame;
@@ -342,7 +342,7 @@
/// @name Generating Data
/// @{
- /// EmitBytes - Emit the bytes in \arg Data into the output.
+ /// EmitBytes - Emit the bytes in \p Data into the output.
///
/// This is used to implement assembler directives such as .byte, .ascii,
/// etc.
@@ -554,6 +554,11 @@
virtual void EmitRegSave(const SmallVectorImpl<unsigned> &RegList,
bool isVector);
+ /// PPC-related methods.
+ /// FIXME: Eventually replace it with some "target MC streamer" and move
+ /// these methods there.
+ virtual void EmitTCEntry(const MCSymbol &S);
+
/// FinishImpl - Streamer specific finalization.
virtual void FinishImpl() = 0;
/// Finish - Finish emission of machine code.
@@ -573,17 +578,14 @@
/// InstPrint.
///
/// \param CE - If given, a code emitter to use to show the instruction
- /// encoding inline with the assembly. This method takes ownership of \arg CE.
+ /// encoding inline with the assembly. This method takes ownership of \p CE.
///
/// \param TAB - If given, a target asm backend to use to show the fixup
/// information in conjunction with encoding information. This method takes
- /// ownership of \arg TAB.
+ /// ownership of \p TAB.
///
/// \param ShowInst - Whether to show the MCInst representation inline with
/// the assembly.
- ///
- /// \param DecodeLSDA - If true, emit comments that translates the LSDA into a
- /// human readable format. Only usable with CFI.
MCStreamer *createAsmStreamer(MCContext &Ctx, formatted_raw_ostream &OS,
bool isVerboseAsm,
bool useLoc,
@@ -597,7 +599,7 @@
/// createMachOStreamer - Create a machine code streamer which will generate
/// Mach-O format object files.
///
- /// Takes ownership of \arg TAB and \arg CE.
+ /// Takes ownership of \p TAB and \p CE.
MCStreamer *createMachOStreamer(MCContext &Ctx, MCAsmBackend &TAB,
raw_ostream &OS, MCCodeEmitter *CE,
bool RelaxAll = false);
@@ -605,7 +607,7 @@
/// createWinCOFFStreamer - Create a machine code streamer which will
/// generate Microsoft COFF format object files.
///
- /// Takes ownership of \arg TAB and \arg CE.
+ /// Takes ownership of \p TAB and \p CE.
MCStreamer *createWinCOFFStreamer(MCContext &Ctx,
MCAsmBackend &TAB,
MCCodeEmitter &CE, raw_ostream &OS,
@@ -620,7 +622,7 @@
/// createPureStreamer - Create a machine code streamer which will generate
/// "pure" MC object files, for use with MC-JIT and testing tools.
///
- /// Takes ownership of \arg TAB and \arg CE.
+ /// Takes ownership of \p TAB and \p CE.
MCStreamer *createPureStreamer(MCContext &Ctx, MCAsmBackend &TAB,
raw_ostream &OS, MCCodeEmitter *CE);
Modified: llvm/branches/AMDILBackend/include/llvm/MC/MCSubtargetInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/MC/MCSubtargetInfo.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/MC/MCSubtargetInfo.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/MC/MCSubtargetInfo.h Tue Jan 15 11:16:16 2013
@@ -30,7 +30,14 @@
std::string TargetTriple; // Target triple
const SubtargetFeatureKV *ProcFeatures; // Processor feature list
const SubtargetFeatureKV *ProcDesc; // Processor descriptions
- const SubtargetInfoKV *ProcSchedModel; // Scheduler machine model
+
+ // Scheduler machine model
+ const SubtargetInfoKV *ProcSchedModels;
+ const MCWriteProcResEntry *WriteProcResTable;
+ const MCWriteLatencyEntry *WriteLatencyTable;
+ const MCReadAdvanceEntry *ReadAdvanceTable;
+ const MCSchedModel *CPUSchedModel;
+
const InstrStage *Stages; // Instruction itinerary stages
const unsigned *OperandCycles; // Itinerary operand cycles
const unsigned *ForwardingPaths; // Forwarding paths
@@ -43,6 +50,9 @@
const SubtargetFeatureKV *PF,
const SubtargetFeatureKV *PD,
const SubtargetInfoKV *ProcSched,
+ const MCWriteProcResEntry *WPR,
+ const MCWriteLatencyEntry *WL,
+ const MCReadAdvanceEntry *RA,
const InstrStage *IS,
const unsigned *OC, const unsigned *FP,
unsigned NF, unsigned NP);
@@ -58,9 +68,9 @@
return FeatureBits;
}
- /// ReInitMCSubtargetInfo - Change CPU (and optionally supplemented with
- /// feature string), recompute and return feature bits.
- uint64_t ReInitMCSubtargetInfo(StringRef CPU, StringRef FS);
+ /// InitMCProcessorInfo - Set or change the CPU (optionally supplemented with
+ /// feature string). Recompute feature bits and scheduling model.
+ void InitMCProcessorInfo(StringRef CPU, StringRef FS);
/// ToggleFeature - Toggle a feature and returns the re-computed feature
/// bits. This version does not change the implied bits.
@@ -72,11 +82,56 @@
/// getSchedModelForCPU - Get the machine model of a CPU.
///
- MCSchedModel *getSchedModelForCPU(StringRef CPU) const;
+ const MCSchedModel *getSchedModelForCPU(StringRef CPU) const;
+
+ /// getSchedModel - Get the machine model for this subtarget's CPU.
+ ///
+ const MCSchedModel *getSchedModel() const { return CPUSchedModel; }
+
+ /// Return an iterator at the first process resource consumed by the given
+ /// scheduling class.
+ const MCWriteProcResEntry *getWriteProcResBegin(
+ const MCSchedClassDesc *SC) const {
+ return &WriteProcResTable[SC->WriteProcResIdx];
+ }
+ const MCWriteProcResEntry *getWriteProcResEnd(
+ const MCSchedClassDesc *SC) const {
+ return getWriteProcResBegin(SC) + SC->NumWriteProcResEntries;
+ }
+
+ const MCWriteLatencyEntry *getWriteLatencyEntry(const MCSchedClassDesc *SC,
+ unsigned DefIdx) const {
+ assert(DefIdx < SC->NumWriteLatencyEntries &&
+ "MachineModel does not specify a WriteResource for DefIdx");
+
+ return &WriteLatencyTable[SC->WriteLatencyIdx + DefIdx];
+ }
+
+ int getReadAdvanceCycles(const MCSchedClassDesc *SC, unsigned UseIdx,
+ unsigned WriteResID) const {
+ // TODO: The number of read advance entries in a class can be significant
+ // (~50). Consider compressing the WriteID into a dense ID of those that are
+ // used by ReadAdvance and representing them as a bitset.
+ for (const MCReadAdvanceEntry *I = &ReadAdvanceTable[SC->ReadAdvanceIdx],
+ *E = I + SC->NumReadAdvanceEntries; I != E; ++I) {
+ if (I->UseIdx < UseIdx)
+ continue;
+ if (I->UseIdx > UseIdx)
+ break;
+ // Find the first WriteResIdx match, which has the highest cycle count.
+ if (!I->WriteResourceID || I->WriteResourceID == WriteResID) {
+ return I->Cycles;
+ }
+ }
+ return 0;
+ }
/// getInstrItineraryForCPU - Get scheduling itinerary of a CPU.
///
InstrItineraryData getInstrItineraryForCPU(StringRef CPU) const;
+
+ /// Initialize an InstrItineraryData instance.
+ void initInstrItins(InstrItineraryData &InstrItins) const;
};
} // End llvm namespace
Modified: llvm/branches/AMDILBackend/include/llvm/MC/MCSymbol.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/MC/MCSymbol.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/MC/MCSymbol.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/MC/MCSymbol.h Tue Jan 15 11:16:16 2013
@@ -15,6 +15,7 @@
#define LLVM_MC_MCSYMBOL_H
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
namespace llvm {
class MCExpr;
@@ -62,8 +63,8 @@
: Name(name), Section(0), Value(0),
IsTemporary(isTemporary), IsUsed(false) {}
- MCSymbol(const MCSymbol&); // DO NOT IMPLEMENT
- void operator=(const MCSymbol&); // DO NOT IMPLEMENT
+ MCSymbol(const MCSymbol&) LLVM_DELETED_FUNCTION;
+ void operator=(const MCSymbol&) LLVM_DELETED_FUNCTION;
public:
/// getName - Get the symbol name.
StringRef getName() const { return Name; }
@@ -112,7 +113,7 @@
return *Section;
}
- /// setSection - Mark the symbol as defined in the section \arg S.
+ /// setSection - Mark the symbol as defined in the section \p S.
void setSection(const MCSection &S) { Section = &S; }
/// setUndefined - Mark the symbol as undefined.
@@ -132,7 +133,7 @@
return Value != 0;
}
- /// getValue() - Get the value for variable symbols.
+ /// getVariableValue() - Get the value for variable symbols.
const MCExpr *getVariableValue() const {
assert(isVariable() && "Invalid accessor!");
IsUsed = true;
@@ -148,7 +149,7 @@
/// @}
- /// print - Print the value to the stream \arg OS.
+ /// print - Print the value to the stream \p OS.
void print(raw_ostream &OS) const;
/// dump - Print the value to stderr.
Modified: llvm/branches/AMDILBackend/include/llvm/MC/MCTargetAsmLexer.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/MC/MCTargetAsmLexer.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/MC/MCTargetAsmLexer.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/MC/MCTargetAsmLexer.h Tue Jan 15 11:16:16 2013
@@ -24,8 +24,8 @@
SMLoc ErrLoc;
std::string Err;
- MCTargetAsmLexer(const MCTargetAsmLexer &); // DO NOT IMPLEMENT
- void operator=(const MCTargetAsmLexer &); // DO NOT IMPLEMENT
+ MCTargetAsmLexer(const MCTargetAsmLexer &) LLVM_DELETED_FUNCTION;
+ void operator=(const MCTargetAsmLexer &) LLVM_DELETED_FUNCTION;
protected: // Can only create subclasses.
MCTargetAsmLexer(const Target &);
@@ -45,7 +45,7 @@
const Target &getTarget() const { return TheTarget; }
- /// InstallLexer - Set the lexer to get tokens from lower-level lexer \arg L.
+ /// InstallLexer - Set the lexer to get tokens from lower-level lexer \p L.
void InstallLexer(MCAsmLexer &L) {
Lexer = &L;
}
@@ -77,10 +77,10 @@
/// getKind - Get the kind of current token.
AsmToken::TokenKind getKind() const { return CurTok.getKind(); }
- /// is - Check if the current token has kind \arg K.
+ /// is - Check if the current token has kind \p K.
bool is(AsmToken::TokenKind K) const { return CurTok.is(K); }
- /// isNot - Check if the current token has kind \arg K.
+ /// isNot - Check if the current token has kind \p K.
bool isNot(AsmToken::TokenKind K) const { return CurTok.isNot(K); }
};
Modified: llvm/branches/AMDILBackend/include/llvm/MC/MCTargetAsmParser.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/MC/MCTargetAsmParser.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/MC/MCTargetAsmParser.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/MC/MCTargetAsmParser.h Tue Jan 15 11:16:16 2013
@@ -21,11 +21,43 @@
class MCInst;
template <typename T> class SmallVectorImpl;
+enum AsmRewriteKind {
+ AOK_DotOperator, // Rewrite a dot operator expression as an immediate.
+ // E.g., [eax].foo.bar -> [eax].8
+ AOK_Emit, // Rewrite _emit as .byte.
+ AOK_Imm, // Rewrite as $$N.
+ AOK_ImmPrefix, // Add $$ before a parsed Imm.
+ AOK_Input, // Rewrite in terms of $N.
+ AOK_Output, // Rewrite in terms of $N.
+ AOK_SizeDirective, // Add a sizing directive (e.g., dword ptr).
+ AOK_Skip // Skip emission (e.g., offset/type operators).
+};
+
+struct AsmRewrite {
+ AsmRewriteKind Kind;
+ SMLoc Loc;
+ unsigned Len;
+ unsigned Val;
+public:
+ AsmRewrite(AsmRewriteKind kind, SMLoc loc, unsigned len = 0, unsigned val = 0)
+ : Kind(kind), Loc(loc), Len(len), Val(val) {}
+};
+
+struct ParseInstructionInfo {
+
+ SmallVectorImpl<AsmRewrite> *AsmRewrites;
+
+ ParseInstructionInfo() : AsmRewrites(0) {}
+ ParseInstructionInfo(SmallVectorImpl<AsmRewrite> *rewrites)
+ : AsmRewrites(rewrites) {}
+
+ ~ParseInstructionInfo() {}
+};
+
/// MCTargetAsmParser - Generic interface to target specific assembly parsers.
class MCTargetAsmParser : public MCAsmParserExtension {
public:
enum MatchResultTy {
- Match_ConversionFail,
Match_InvalidOperand,
Match_MissingFeature,
Match_MnemonicFail,
@@ -34,20 +66,34 @@
};
private:
- MCTargetAsmParser(const MCTargetAsmParser &); // DO NOT IMPLEMENT
- void operator=(const MCTargetAsmParser &); // DO NOT IMPLEMENT
+ MCTargetAsmParser(const MCTargetAsmParser &) LLVM_DELETED_FUNCTION;
+ void operator=(const MCTargetAsmParser &) LLVM_DELETED_FUNCTION;
protected: // Can only create subclasses.
MCTargetAsmParser();
/// AvailableFeatures - The current set of available features.
unsigned AvailableFeatures;
+ /// ParsingInlineAsm - Are we parsing ms-style inline assembly?
+ bool ParsingInlineAsm;
+
+ /// SemaCallback - The Sema callback implementation. Must be set when parsing
+ /// ms-style inline assembly.
+ MCAsmParserSemaCallback *SemaCallback;
+
public:
virtual ~MCTargetAsmParser();
unsigned getAvailableFeatures() const { return AvailableFeatures; }
void setAvailableFeatures(unsigned Value) { AvailableFeatures = Value; }
+ bool isParsingInlineAsm () { return ParsingInlineAsm; }
+ void setParsingInlineAsm (bool Value) { ParsingInlineAsm = Value; }
+
+ void setSemaCallback(MCAsmParserSemaCallback *Callback) {
+ SemaCallback = Callback;
+ }
+
virtual bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
SMLoc &EndLoc) = 0;
@@ -64,7 +110,8 @@
/// \param Operands [out] - The list of parsed operands, this returns
/// ownership of them to the caller.
/// \return True on failure.
- virtual bool ParseInstruction(StringRef Name, SMLoc NameLoc,
+ virtual bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
+ SMLoc NameLoc,
SmallVectorImpl<MCParsedAsmOperand*> &Operands) = 0;
/// ParseDirective - Parse a target specific assembler directive
@@ -79,18 +126,9 @@
/// \param DirectiveID - the identifier token of the directive.
virtual bool ParseDirective(AsmToken DirectiveID) = 0;
- /// MatchInstruction - Recognize a series of operands of a parsed instruction
- /// as an actual MCInst. This returns false on success and returns true on
- /// failure to match.
- ///
- /// On failure, the target parser is responsible for emitting a diagnostic
- /// explaining the match failure.
- virtual bool
- MatchInstruction(SMLoc IDLoc,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- SmallVectorImpl<MCInst> &MCInsts) {
- return true;
- }
+ /// mnemonicIsValid - This returns true if this is a valid mnemonic and false
+ /// otherwise.
+ virtual bool mnemonicIsValid(StringRef Mnemonic) = 0;
/// MatchAndEmitInstruction - Recognize a series of operands of a parsed
/// instruction as an actual MCInst and emit it to the specified MCStreamer.
@@ -99,9 +137,10 @@
/// On failure, the target parser is responsible for emitting a diagnostic
/// explaining the match failure.
virtual bool
- MatchAndEmitInstruction(SMLoc IDLoc,
+ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- MCStreamer &Out) = 0;
+ MCStreamer &Out, unsigned &ErrorInfo,
+ bool MatchingInlineAsm) = 0;
/// checkTargetMatchPredicate - Validate the instruction match against
/// any complex target predicates not expressible via match classes.
@@ -109,6 +148,8 @@
return Match_Success;
}
+ virtual void convertToMapAndConstraints(unsigned Kind,
+ const SmallVectorImpl<MCParsedAsmOperand*> &Operands) = 0;
};
} // End llvm namespace
Modified: llvm/branches/AMDILBackend/include/llvm/MC/MCValue.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/MC/MCValue.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/MC/MCValue.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/MC/MCValue.h Tue Jan 15 11:16:16 2013
@@ -46,7 +46,7 @@
/// isAbsolute - Is this an absolute (as opposed to relocatable) value.
bool isAbsolute() const { return !SymA && !SymB; }
- /// print - Print the value to the stream \arg OS.
+ /// print - Print the value to the stream \p OS.
void print(raw_ostream &OS, const MCAsmInfo *MAI) const;
/// dump - Print the value to stderr.
Modified: llvm/branches/AMDILBackend/include/llvm/MC/SubtargetFeature.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/MC/SubtargetFeature.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/MC/SubtargetFeature.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/MC/SubtargetFeature.h Tue Jan 15 11:16:16 2013
@@ -50,7 +50,7 @@
//
struct SubtargetInfoKV {
const char *Key; // K-V key string
- void *Value; // K-V pointer value
+ const void *Value; // K-V pointer value
// Compare routine for std binary search
bool operator<(const SubtargetInfoKV &S) const {
@@ -95,10 +95,6 @@
const SubtargetFeatureKV *FeatureTable,
size_t FeatureTableSize);
- /// Get scheduling itinerary of a CPU.
- void *getItinerary(const StringRef CPU,
- const SubtargetInfoKV *Table, size_t TableSize);
-
/// Print feature string.
void print(raw_ostream &OS) const;
Modified: llvm/branches/AMDILBackend/include/llvm/MDBuilder.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/MDBuilder.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/MDBuilder.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/MDBuilder.h Tue Jan 15 11:16:16 2013
@@ -134,6 +134,27 @@
}
}
+ struct TBAAStructField {
+ uint64_t Offset;
+ uint64_t Size;
+ MDNode *TBAA;
+ TBAAStructField(uint64_t Offset, uint64_t Size, MDNode *TBAA) :
+ Offset(Offset), Size(Size), TBAA(TBAA) {}
+ };
+
+ /// \brief Return metadata for a tbaa.struct node with the given
+ /// struct field descriptions.
+ MDNode *createTBAAStructNode(ArrayRef<TBAAStructField> Fields) {
+ SmallVector<Value *, 4> Vals(Fields.size() * 3);
+ Type *Int64 = IntegerType::get(Context, 64);
+ for (unsigned i = 0, e = Fields.size(); i != e; ++i) {
+ Vals[i * 3 + 0] = ConstantInt::get(Int64, Fields[i].Offset);
+ Vals[i * 3 + 1] = ConstantInt::get(Int64, Fields[i].Size);
+ Vals[i * 3 + 2] = Fields[i].TBAA;
+ }
+ return MDNode::get(Context, Vals);
+ }
+
};
} // end namespace llvm
Modified: llvm/branches/AMDILBackend/include/llvm/Metadata.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Metadata.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Metadata.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Metadata.h Tue Jan 15 11:16:16 2013
@@ -37,7 +37,7 @@
/// MDString is always unnamed.
class MDString : public Value {
virtual void anchor();
- MDString(const MDString &); // DO NOT IMPLEMENT
+ MDString(const MDString &) LLVM_DELETED_FUNCTION;
explicit MDString(LLVMContext &C);
public:
@@ -59,7 +59,6 @@
iterator end() const { return getName().end(); }
/// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const MDString *) { return true; }
static bool classof(const Value *V) {
return V->getValueID() == MDStringVal;
}
@@ -71,8 +70,8 @@
//===----------------------------------------------------------------------===//
/// MDNode - a tuple of other values.
class MDNode : public Value, public FoldingSetNode {
- MDNode(const MDNode &); // DO NOT IMPLEMENT
- void operator=(const MDNode &); // DO NOT IMPLEMENT
+ MDNode(const MDNode &) LLVM_DELETED_FUNCTION;
+ void operator=(const MDNode &) LLVM_DELETED_FUNCTION;
friend class MDNodeOperand;
friend class LLVMContextImpl;
friend struct FoldingSetTrait<MDNode>;
@@ -161,7 +160,6 @@
void Profile(FoldingSetNodeID &ID) const;
/// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const MDNode *) { return true; }
static bool classof(const Value *V) {
return V->getValueID() == MDNodeVal;
}
@@ -195,7 +193,7 @@
friend struct ilist_traits<NamedMDNode>;
friend class LLVMContextImpl;
friend class Module;
- NamedMDNode(const NamedMDNode &); // DO NOT IMPLEMENT
+ NamedMDNode(const NamedMDNode &) LLVM_DELETED_FUNCTION;
std::string Name;
Module *Parent;
Modified: llvm/branches/AMDILBackend/include/llvm/Object/Archive.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Object/Archive.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Object/Archive.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Object/Archive.h Tue Jan 15 11:16:16 2013
@@ -129,7 +129,6 @@
symbol_iterator end_symbols() const;
// Cast methods.
- static inline bool classof(Archive const *v) { return true; }
static inline bool classof(Binary const *v) {
return v->isArchive();
}
Modified: llvm/branches/AMDILBackend/include/llvm/Object/Binary.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Object/Binary.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Object/Binary.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Object/Binary.h Tue Jan 15 11:16:16 2013
@@ -26,8 +26,8 @@
class Binary {
private:
- Binary(); // = delete
- Binary(const Binary &other); // = delete
+ Binary() LLVM_DELETED_FUNCTION;
+ Binary(const Binary &other) LLVM_DELETED_FUNCTION;
unsigned int TypeID;
@@ -64,7 +64,6 @@
// Cast methods.
unsigned int getType() const { return TypeID; }
- static inline bool classof(const Binary *v) { return true; }
// Convenience methods
bool isObject() const {
Modified: llvm/branches/AMDILBackend/include/llvm/Object/COFF.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Object/COFF.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Object/COFF.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Object/COFF.h Tue Jan 15 11:16:16 2013
@@ -116,6 +116,7 @@
virtual error_code getSymbolType(DataRefImpl Symb, SymbolRef::Type &Res) const;
virtual error_code getSymbolSection(DataRefImpl Symb,
section_iterator &Res) const;
+ virtual error_code getSymbolValue(DataRefImpl Symb, uint64_t &Val) const;
virtual error_code getSectionNext(DataRefImpl Sec, SectionRef &Res) const;
virtual error_code getSectionName(DataRefImpl Sec, StringRef &Res) const;
@@ -128,6 +129,7 @@
virtual error_code isSectionBSS(DataRefImpl Sec, bool &Res) const;
virtual error_code isSectionVirtual(DataRefImpl Sec, bool &Res) const;
virtual error_code isSectionZeroInit(DataRefImpl Sec, bool &Res) const;
+ virtual error_code isSectionReadOnlyData(DataRefImpl Sec, bool &Res) const;
virtual error_code isSectionRequiredForExecution(DataRefImpl Sec,
bool &Res) const;
virtual error_code sectionContainsSymbol(DataRefImpl Sec, DataRefImpl Symb,
@@ -197,7 +199,6 @@
static inline bool classof(const Binary *v) {
return v->isCOFF();
}
- static inline bool classof(const COFFObjectFile *v) { return true; }
};
}
Modified: llvm/branches/AMDILBackend/include/llvm/Object/ELF.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Object/ELF.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Object/ELF.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Object/ELF.h Tue Jan 15 11:16:16 2013
@@ -387,11 +387,65 @@
}
};
+template<support::endianness target_endianness, bool is64Bits>
+struct Elf_Ehdr_Impl {
+ LLVM_ELF_IMPORT_TYPES(target_endianness, is64Bits)
+ unsigned char e_ident[ELF::EI_NIDENT]; // ELF Identification bytes
+ Elf_Half e_type; // Type of file (see ET_*)
+ Elf_Half e_machine; // Required architecture for this file (see EM_*)
+ Elf_Word e_version; // Must be equal to 1
+ Elf_Addr e_entry; // Address to jump to in order to start program
+ Elf_Off e_phoff; // Program header table's file offset, in bytes
+ Elf_Off e_shoff; // Section header table's file offset, in bytes
+ Elf_Word e_flags; // Processor-specific flags
+ Elf_Half e_ehsize; // Size of ELF header, in bytes
+ Elf_Half e_phentsize;// Size of an entry in the program header table
+ Elf_Half e_phnum; // Number of entries in the program header table
+ Elf_Half e_shentsize;// Size of an entry in the section header table
+ Elf_Half e_shnum; // Number of entries in the section header table
+ Elf_Half e_shstrndx; // Section header table index of section name
+ // string table
+ bool checkMagic() const {
+ return (memcmp(e_ident, ELF::ElfMagic, strlen(ELF::ElfMagic))) == 0;
+ }
+ unsigned char getFileClass() const { return e_ident[ELF::EI_CLASS]; }
+ unsigned char getDataEncoding() const { return e_ident[ELF::EI_DATA]; }
+};
+
+template<support::endianness target_endianness, bool is64Bits>
+struct Elf_Phdr;
+
+template<support::endianness target_endianness>
+struct Elf_Phdr<target_endianness, false> {
+ LLVM_ELF_IMPORT_TYPES(target_endianness, false)
+ Elf_Word p_type; // Type of segment
+ Elf_Off p_offset; // FileOffset where segment is located, in bytes
+ Elf_Addr p_vaddr; // Virtual Address of beginning of segment
+ Elf_Addr p_paddr; // Physical address of beginning of segment (OS-specific)
+ Elf_Word p_filesz; // Num. of bytes in file image of segment (may be zero)
+ Elf_Word p_memsz; // Num. of bytes in mem image of segment (may be zero)
+ Elf_Word p_flags; // Segment flags
+ Elf_Word p_align; // Segment alignment constraint
+};
+
+template<support::endianness target_endianness>
+struct Elf_Phdr<target_endianness, true> {
+ LLVM_ELF_IMPORT_TYPES(target_endianness, true)
+ Elf_Word p_type; // Type of segment
+ Elf_Word p_flags; // Segment flags
+ Elf_Off p_offset; // FileOffset where segment is located, in bytes
+ Elf_Addr p_vaddr; // Virtual Address of beginning of segment
+ Elf_Addr p_paddr; // Physical address of beginning of segment (OS-specific)
+ Elf_Word p_filesz; // Num. of bytes in file image of segment (may be zero)
+ Elf_Word p_memsz; // Num. of bytes in mem image of segment (may be zero)
+ Elf_Word p_align; // Segment alignment constraint
+};
template<support::endianness target_endianness, bool is64Bits>
class ELFObjectFile : public ObjectFile {
LLVM_ELF_IMPORT_TYPES(target_endianness, is64Bits)
+ typedef Elf_Ehdr_Impl<target_endianness, is64Bits> Elf_Ehdr;
typedef Elf_Shdr_Impl<target_endianness, is64Bits> Elf_Shdr;
typedef Elf_Sym_Impl<target_endianness, is64Bits> Elf_Sym;
typedef Elf_Dyn_Impl<target_endianness, is64Bits> Elf_Dyn;
@@ -406,28 +460,6 @@
typedef content_iterator<DynRef> dyn_iterator;
protected:
- struct Elf_Ehdr {
- unsigned char e_ident[ELF::EI_NIDENT]; // ELF Identification bytes
- Elf_Half e_type; // Type of file (see ET_*)
- Elf_Half e_machine; // Required architecture for this file (see EM_*)
- Elf_Word e_version; // Must be equal to 1
- Elf_Addr e_entry; // Address to jump to in order to start program
- Elf_Off e_phoff; // Program header table's file offset, in bytes
- Elf_Off e_shoff; // Section header table's file offset, in bytes
- Elf_Word e_flags; // Processor-specific flags
- Elf_Half e_ehsize; // Size of ELF header, in bytes
- Elf_Half e_phentsize;// Size of an entry in the program header table
- Elf_Half e_phnum; // Number of entries in the program header table
- Elf_Half e_shentsize;// Size of an entry in the section header table
- Elf_Half e_shnum; // Number of entries in the section header table
- Elf_Half e_shstrndx; // Section header table index of section name
- // string table
- bool checkMagic() const {
- return (memcmp(e_ident, ELF::ElfMagic, strlen(ELF::ElfMagic))) == 0;
- }
- unsigned char getFileClass() const { return e_ident[ELF::EI_CLASS]; }
- unsigned char getDataEncoding() const { return e_ident[ELF::EI_DATA]; }
- };
// This flag is used for classof, to distinguish ELFObjectFile from
// its subclass. If more subclasses will be created, this flag will
// have to become an enum.
@@ -459,6 +491,59 @@
// This is set the first time getLoadName is called.
mutable const char *dt_soname;
+public:
+ /// \brief Iterate over relocations in a .rel or .rela section.
+ template<class RelocT>
+ class ELFRelocationIterator {
+ public:
+ typedef void difference_type;
+ typedef const RelocT value_type;
+ typedef std::forward_iterator_tag iterator_category;
+ typedef value_type &reference;
+ typedef value_type *pointer;
+
+ /// \brief Default construct iterator.
+ ELFRelocationIterator() : Section(0), Current(0) {}
+ ELFRelocationIterator(const Elf_Shdr *Sec, const char *Start)
+ : Section(Sec)
+ , Current(Start) {}
+
+ reference operator *() {
+ assert(Current && "Attempted to dereference an invalid iterator!");
+ return *reinterpret_cast<const RelocT*>(Current);
+ }
+
+ pointer operator ->() {
+ assert(Current && "Attempted to dereference an invalid iterator!");
+ return reinterpret_cast<const RelocT*>(Current);
+ }
+
+ bool operator ==(const ELFRelocationIterator &Other) {
+ return Section == Other.Section && Current == Other.Current;
+ }
+
+ bool operator !=(const ELFRelocationIterator &Other) {
+ return !(*this == Other);
+ }
+
+ ELFRelocationIterator &operator ++(int) {
+ assert(Current && "Attempted to increment an invalid iterator!");
+ Current += Section->sh_entsize;
+ return *this;
+ }
+
+ ELFRelocationIterator operator ++() {
+ ELFRelocationIterator Tmp = *this;
+ ++*this;
+ return Tmp;
+ }
+
+ private:
+ const Elf_Shdr *Section;
+ const char *Current;
+ };
+
+private:
// Records for each version index the corresponding Verdef or Vernaux entry.
// This is filled the first time LoadVersionMap() is called.
class VersionMapEntry : public PointerIntPair<const void*, 1> {
@@ -535,6 +620,7 @@
virtual error_code getSymbolType(DataRefImpl Symb, SymbolRef::Type &Res) const;
virtual error_code getSymbolSection(DataRefImpl Symb,
section_iterator &Res) const;
+ virtual error_code getSymbolValue(DataRefImpl Symb, uint64_t &Val) const;
friend class DynRefImpl<target_endianness, is64Bits>;
virtual error_code getDynNext(DataRefImpl DynData, DynRef &Result) const;
@@ -555,6 +641,7 @@
bool &Res) const;
virtual error_code isSectionVirtual(DataRefImpl Sec, bool &Res) const;
virtual error_code isSectionZeroInit(DataRefImpl Sec, bool &Res) const;
+ virtual error_code isSectionReadOnlyData(DataRefImpl Sec, bool &Res) const;
virtual error_code sectionContainsSymbol(DataRefImpl Sec, DataRefImpl Symb,
bool &Result) const;
virtual relocation_iterator getSectionRelBegin(DataRefImpl Sec) const;
@@ -594,6 +681,27 @@
virtual dyn_iterator begin_dynamic_table() const;
virtual dyn_iterator end_dynamic_table() const;
+ typedef ELFRelocationIterator<Elf_Rela> Elf_Rela_Iter;
+ typedef ELFRelocationIterator<Elf_Rel> Elf_Rel_Iter;
+
+ virtual Elf_Rela_Iter beginELFRela(const Elf_Shdr *sec) const {
+ return Elf_Rela_Iter(sec, (const char *)(base() + sec->sh_offset));
+ }
+
+ virtual Elf_Rela_Iter endELFRela(const Elf_Shdr *sec) const {
+ return Elf_Rela_Iter(sec, (const char *)
+ (base() + sec->sh_offset + sec->sh_size));
+ }
+
+ virtual Elf_Rel_Iter beginELFRel(const Elf_Shdr *sec) const {
+ return Elf_Rel_Iter(sec, (const char *)(base() + sec->sh_offset));
+ }
+
+ virtual Elf_Rel_Iter endELFRel(const Elf_Shdr *sec) const {
+ return Elf_Rel_Iter(sec, (const char *)
+ (base() + sec->sh_offset + sec->sh_size));
+ }
+
virtual uint8_t getBytesInAddress() const;
virtual StringRef getFileFormatName() const;
virtual StringRef getObjectType() const { return "ELF"; }
@@ -608,6 +716,7 @@
const Elf_Shdr *getSection(const Elf_Sym *symb) const;
const Elf_Shdr *getElfSection(section_iterator &It) const;
const Elf_Sym *getElfSymbol(symbol_iterator &It) const;
+ const Elf_Sym *getElfSymbol(uint32_t index) const;
// Methods for type inquiry through isa, cast, and dyn_cast
bool isDyldType() const { return isDyldELFObject; }
@@ -615,7 +724,6 @@
return v->getType() == getELFType(target_endianness == support::little,
is64Bits);
}
- static inline bool classof(const ELFObjectFile *v) { return true; }
};
// Iterate through the version definitions, and place each Elf_Verdef
@@ -804,6 +912,16 @@
}
template<support::endianness target_endianness, bool is64Bits>
+const typename ELFObjectFile<target_endianness, is64Bits>::Elf_Sym *
+ELFObjectFile<target_endianness, is64Bits>
+ ::getElfSymbol(uint32_t index) const {
+ DataRefImpl SymbolData;
+ SymbolData.d.a = index;
+ SymbolData.d.b = 1;
+ return getSymbol(SymbolData);
+}
+
+template<support::endianness target_endianness, bool is64Bits>
error_code ELFObjectFile<target_endianness, is64Bits>
::getSymbolFileOffset(DataRefImpl Symb,
uint64_t &Result) const {
@@ -863,7 +981,18 @@
case ELF::STT_FUNC:
case ELF::STT_OBJECT:
case ELF::STT_NOTYPE:
- Result = symb->st_value + (Section ? Section->sh_addr : 0);
+ bool IsRelocatable;
+ switch(Header->e_type) {
+ case ELF::ET_EXEC:
+ case ELF::ET_DYN:
+ IsRelocatable = false;
+ break;
+ default:
+ IsRelocatable = true;
+ }
+ Result = symb->st_value;
+ if (IsRelocatable && Section != 0)
+ Result += Section->sh_addr;
return object_error::success;
default:
Result = UnknownAddressOrSize;
@@ -1034,6 +1163,16 @@
template<support::endianness target_endianness, bool is64Bits>
error_code ELFObjectFile<target_endianness, is64Bits>
+ ::getSymbolValue(DataRefImpl Symb,
+ uint64_t &Val) const {
+ validateSymbol(Symb);
+ const Elf_Sym *symb = getSymbol(Symb);
+ Val = symb->st_value;
+ return object_error::success;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
::getSectionNext(DataRefImpl Sec, SectionRef &Result) const {
const uint8_t *sec = reinterpret_cast<const uint8_t *>(Sec.p);
sec += Header->e_shentsize;
@@ -1160,7 +1299,8 @@
}
template<support::endianness target_endianness, bool is64Bits>
-error_code ELFObjectFile<target_endianness, is64Bits>::isSectionZeroInit(DataRefImpl Sec,
+error_code ELFObjectFile<target_endianness, is64Bits>
+ ::isSectionZeroInit(DataRefImpl Sec,
bool &Result) const {
const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p);
// For ELF, all zero-init sections are virtual (that is, they occupy no space
@@ -1174,6 +1314,18 @@
template<support::endianness target_endianness, bool is64Bits>
error_code ELFObjectFile<target_endianness, is64Bits>
+ ::isSectionReadOnlyData(DataRefImpl Sec,
+ bool &Result) const {
+ const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p);
+ if (sec->sh_flags & ELF::SHF_WRITE || sec->sh_flags & ELF::SHF_EXECINSTR)
+ Result = false;
+ else
+ Result = true;
+ return object_error::success;
+}
+
+template<support::endianness target_endianness, bool is64Bits>
+error_code ELFObjectFile<target_endianness, is64Bits>
::sectionContainsSymbol(DataRefImpl Sec,
DataRefImpl Symb,
bool &Result) const {
@@ -1444,6 +1596,143 @@
res = "Unknown";
}
break;
+ case ELF::EM_ARM:
+ switch (type) {
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_NONE);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PC24);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ABS32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_REL32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDR_PC_G0);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ABS16);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ABS12);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_ABS5);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ABS8);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_SBREL32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_CALL);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_PC8);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_BREL_ADJ);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_DESC);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_SWI8);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_XPC25);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_XPC22);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_DTPMOD32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_DTPOFF32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_TPOFF32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_COPY);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_GLOB_DAT);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_JUMP_SLOT);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_RELATIVE);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_GOTOFF32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_BASE_PREL);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_GOT_BREL);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PLT32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_CALL);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_JUMP24);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_JUMP24);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_BASE_ABS);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_PCREL_7_0);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_PCREL_15_8);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_PCREL_23_15);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDR_SBREL_11_0_NC);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_SBREL_19_12_NC);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_SBREL_27_20_CK);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TARGET1);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_SBREL31);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_V4BX);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TARGET2);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PREL31);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_MOVW_ABS_NC);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_MOVT_ABS);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_MOVW_PREL_NC);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_MOVT_PREL);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_MOVW_ABS_NC);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_MOVT_ABS);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_MOVW_PREL_NC);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_MOVT_PREL);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_JUMP19);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_JUMP6);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_ALU_PREL_11_0);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_PC12);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ABS32_NOI);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_REL32_NOI);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_PC_G0_NC);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_PC_G0);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_PC_G1_NC);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_PC_G1);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_PC_G2);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDR_PC_G1);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDR_PC_G2);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDRS_PC_G0);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDRS_PC_G1);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDRS_PC_G2);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDC_PC_G0);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDC_PC_G1);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDC_PC_G2);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_SB_G0_NC);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_SB_G0);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_SB_G1_NC);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_SB_G1);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_SB_G2);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDR_SB_G0);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDR_SB_G1);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDR_SB_G2);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDRS_SB_G0);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDRS_SB_G1);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDRS_SB_G2);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDC_SB_G0);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDC_SB_G1);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDC_SB_G2);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_MOVW_BREL_NC);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_MOVT_BREL);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_MOVW_BREL);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_MOVW_BREL_NC);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_MOVT_BREL);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_MOVW_BREL);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_GOTDESC);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_CALL);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_DESCSEQ);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_TLS_CALL);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PLT32_ABS);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_GOT_ABS);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_GOT_PREL);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_GOT_BREL12);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_GOTOFF12);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_GOTRELAX);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_GNU_VTENTRY);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_GNU_VTINHERIT);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_JUMP11);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_JUMP8);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_GD32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_LDM32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_LDO32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_IE32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_LE32);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_LDO12);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_LE12);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_IE12GP);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_0);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_1);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_2);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_3);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_4);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_5);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_6);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_7);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_8);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_9);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_10);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_11);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_12);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_13);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_14);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_15);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ME_TOO);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_TLS_DESCSEQ16);
+ LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_TLS_DESCSEQ32);
+ default:
+ res = "Unknown";
+ }
+ break;
case ELF::EM_HEXAGON:
switch (type) {
LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_NONE);
@@ -1574,15 +1863,15 @@
int64_t addend = 0;
uint16_t symbol_index = 0;
switch (sec->sh_type) {
- default :
+ default:
return object_error::parse_failed;
- case ELF::SHT_REL : {
+ case ELF::SHT_REL: {
type = getRel(Rel)->getType();
symbol_index = getRel(Rel)->getSymbol();
// TODO: Read implicit addend from section data.
break;
}
- case ELF::SHT_RELA : {
+ case ELF::SHT_RELA: {
type = getRela(Rel)->getType();
symbol_index = getRela(Rel)->getSymbol();
addend = getRela(Rel)->r_addend;
@@ -1596,9 +1885,8 @@
switch (Header->e_machine) {
case ELF::EM_X86_64:
switch (type) {
- case ELF::R_X86_64_32S:
- res = symname;
- break;
+ case ELF::R_X86_64_PC8:
+ case ELF::R_X86_64_PC16:
case ELF::R_X86_64_PC32: {
std::string fmtbuf;
raw_string_ostream fmt(fmtbuf);
@@ -1607,10 +1895,23 @@
Result.append(fmtbuf.begin(), fmtbuf.end());
}
break;
+ case ELF::R_X86_64_8:
+ case ELF::R_X86_64_16:
+ case ELF::R_X86_64_32:
+ case ELF::R_X86_64_32S:
+ case ELF::R_X86_64_64: {
+ std::string fmtbuf;
+ raw_string_ostream fmt(fmtbuf);
+ fmt << symname << (addend < 0 ? "" : "+") << addend;
+ fmt.flush();
+ Result.append(fmtbuf.begin(), fmtbuf.end());
+ }
+ break;
default:
res = "Unknown";
}
break;
+ case ELF::EM_ARM:
case ELF::EM_HEXAGON:
res = symname;
break;
@@ -2024,6 +2325,8 @@
return "ELF64-i386";
case ELF::EM_X86_64:
return "ELF64-x86-64";
+ case ELF::EM_PPC64:
+ return "ELF64-ppc64";
default:
return "ELF64-unknown";
}
@@ -2044,6 +2347,11 @@
return Triple::arm;
case ELF::EM_HEXAGON:
return Triple::hexagon;
+ case ELF::EM_MIPS:
+ return (target_endianness == support::little) ?
+ Triple::mipsel : Triple::mips;
+ case ELF::EM_PPC64:
+ return Triple::ppc64;
default:
return Triple::UnknownArch;
}
Modified: llvm/branches/AMDILBackend/include/llvm/Object/MachO.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Object/MachO.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Object/MachO.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Object/MachO.h Tue Jan 15 11:16:16 2013
@@ -49,7 +49,6 @@
static inline bool classof(const Binary *v) {
return v->isMachO();
}
- static inline bool classof(const MachOObjectFile *v) { return true; }
protected:
virtual error_code getSymbolNext(DataRefImpl Symb, SymbolRef &Res) const;
@@ -62,6 +61,7 @@
virtual error_code getSymbolType(DataRefImpl Symb, SymbolRef::Type &Res) const;
virtual error_code getSymbolSection(DataRefImpl Symb,
section_iterator &Res) const;
+ virtual error_code getSymbolValue(DataRefImpl Symb, uint64_t &Val) const;
virtual error_code getSectionNext(DataRefImpl Sec, SectionRef &Res) const;
virtual error_code getSectionName(DataRefImpl Sec, StringRef &Res) const;
@@ -76,6 +76,7 @@
bool &Res) const;
virtual error_code isSectionVirtual(DataRefImpl Sec, bool &Res) const;
virtual error_code isSectionZeroInit(DataRefImpl Sec, bool &Res) const;
+ virtual error_code isSectionReadOnlyData(DataRefImpl Sec, bool &Res) const;
virtual error_code sectionContainsSymbol(DataRefImpl DRI, DataRefImpl S,
bool &Result) const;
virtual relocation_iterator getSectionRelBegin(DataRefImpl Sec) const;
Modified: llvm/branches/AMDILBackend/include/llvm/Object/MachOFormat.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Object/MachOFormat.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Object/MachOFormat.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Object/MachOFormat.h Tue Jan 15 11:16:16 2013
@@ -61,7 +61,10 @@
CSARM_V6 = 6,
CSARM_V5TEJ = 7,
CSARM_XSCALE = 8,
- CSARM_V7 = 9
+ CSARM_V7 = 9,
+ CSARM_V7F = 10,
+ CSARM_V7S = 11,
+ CSARM_V7K = 12
};
/// \brief PowerPC Machine Subtypes.
@@ -273,6 +276,10 @@
uint16_t Flags;
uint32_t Value;
};
+ // Despite containing a uint64_t, this structure is only 4-byte aligned within
+ // a MachO file.
+#pragma pack(push)
+#pragma pack(4)
struct Symbol64TableEntry {
uint32_t StringIndex;
uint8_t Type;
@@ -280,6 +287,7 @@
uint16_t Flags;
uint64_t Value;
};
+#pragma pack(pop)
/// @}
/// @name Data-in-code Table Entry
Modified: llvm/branches/AMDILBackend/include/llvm/Object/ObjectFile.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Object/ObjectFile.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Object/ObjectFile.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Object/ObjectFile.h Tue Jan 15 11:16:16 2013
@@ -76,13 +76,13 @@
}
};
-inline bool operator ==(const DataRefImpl &a, const DataRefImpl &b) {
+inline bool operator==(const DataRefImpl &a, const DataRefImpl &b) {
// Check bitwise identical. This is the only legal way to compare a union w/o
// knowing which member is in use.
return std::memcmp(&a, &b, sizeof(DataRefImpl)) == 0;
}
-inline bool operator <(const DataRefImpl &a, const DataRefImpl &b) {
+inline bool operator<(const DataRefImpl &a, const DataRefImpl &b) {
// Check bitwise identical. This is the only legal way to compare a union w/o
// knowing which member is in use.
return std::memcmp(&a, &b, sizeof(DataRefImpl)) < 0;
@@ -144,7 +144,7 @@
SectionRef(DataRefImpl SectionP, const ObjectFile *Owner);
bool operator==(const SectionRef &Other) const;
- bool operator <(const SectionRef &Other) const;
+ bool operator<(const SectionRef &Other) const;
error_code getNext(SectionRef &Result) const;
@@ -163,6 +163,7 @@
error_code isRequiredForExecution(bool &Result) const;
error_code isVirtual(bool &Result) const;
error_code isZeroInit(bool &Result) const;
+ error_code isReadOnlyData(bool &Result) const;
error_code containsSymbol(SymbolRef S, bool &Result) const;
@@ -207,11 +208,13 @@
SymbolRef(DataRefImpl SymbolP, const ObjectFile *Owner);
bool operator==(const SymbolRef &Other) const;
- bool operator <(const SymbolRef &Other) const;
+ bool operator<(const SymbolRef &Other) const;
error_code getNext(SymbolRef &Result) const;
error_code getName(StringRef &Result) const;
+ /// Returns the symbol virtual address (i.e. address at which it will be
+ /// mapped).
error_code getAddress(uint64_t &Result) const;
error_code getFileOffset(uint64_t &Result) const;
error_code getSize(uint64_t &Result) const;
@@ -231,6 +234,9 @@
/// end_sections() if it is undefined or is an absolute symbol.
error_code getSection(section_iterator &Result) const;
+ /// @brief Get value of the symbol in the symbol table.
+ error_code getValue(uint64_t &Val) const;
+
DataRefImpl getRawDataRefImpl() const;
};
typedef content_iterator<SymbolRef> symbol_iterator;
@@ -248,7 +254,7 @@
LibraryRef(DataRefImpl LibraryP, const ObjectFile *Owner);
bool operator==(const LibraryRef &Other) const;
- bool operator <(const LibraryRef &Other) const;
+ bool operator<(const LibraryRef &Other) const;
error_code getNext(LibraryRef &Result) const;
@@ -263,11 +269,11 @@
/// ObjectFile - This class is the base class for all object file types.
/// Concrete instances of this object are created by createObjectFile, which
-/// figure out which type to create.
+/// figures out which type to create.
class ObjectFile : public Binary {
virtual void anchor();
- ObjectFile(); // = delete
- ObjectFile(const ObjectFile &other); // = delete
+ ObjectFile() LLVM_DELETED_FUNCTION;
+ ObjectFile(const ObjectFile &other) LLVM_DELETED_FUNCTION;
protected:
ObjectFile(unsigned int Type, MemoryBuffer *source, error_code &ec);
@@ -287,8 +293,8 @@
friend class SymbolRef;
virtual error_code getSymbolNext(DataRefImpl Symb, SymbolRef &Res) const = 0;
virtual error_code getSymbolName(DataRefImpl Symb, StringRef &Res) const = 0;
- virtual error_code getSymbolAddress(DataRefImpl Symb, uint64_t &Res) const =0;
- virtual error_code getSymbolFileOffset(DataRefImpl Symb, uint64_t &Res) const =0;
+ virtual error_code getSymbolAddress(DataRefImpl Symb, uint64_t &Res) const = 0;
+ virtual error_code getSymbolFileOffset(DataRefImpl Symb, uint64_t &Res)const=0;
virtual error_code getSymbolSize(DataRefImpl Symb, uint64_t &Res) const = 0;
virtual error_code getSymbolType(DataRefImpl Symb,
SymbolRef::Type &Res) const = 0;
@@ -297,6 +303,7 @@
uint32_t &Res) const = 0;
virtual error_code getSymbolSection(DataRefImpl Symb,
section_iterator &Res) const = 0;
+ virtual error_code getSymbolValue(DataRefImpl Symb, uint64_t &Val) const = 0;
// Same as above for SectionRef.
friend class SectionRef;
@@ -314,6 +321,7 @@
// A section is 'virtual' if its contents aren't present in the object image.
virtual error_code isSectionVirtual(DataRefImpl Sec, bool &Res) const = 0;
virtual error_code isSectionZeroInit(DataRefImpl Sec, bool &Res) const = 0;
+ virtual error_code isSectionReadOnlyData(DataRefImpl Sec, bool &Res) const =0;
virtual error_code sectionContainsSymbol(DataRefImpl Sec, DataRefImpl Symb,
bool &Result) const = 0;
virtual relocation_iterator getSectionRelBegin(DataRefImpl Sec) const = 0;
@@ -384,7 +392,6 @@
static inline bool classof(const Binary *v) {
return v->isObject();
}
- static inline bool classof(const ObjectFile *v) { return true; }
public:
static ObjectFile *createCOFFObjectFile(MemoryBuffer *Object);
@@ -401,7 +408,7 @@
return SymbolPimpl == Other.SymbolPimpl;
}
-inline bool SymbolRef::operator <(const SymbolRef &Other) const {
+inline bool SymbolRef::operator<(const SymbolRef &Other) const {
return SymbolPimpl < Other.SymbolPimpl;
}
@@ -441,6 +448,10 @@
return OwningObject->getSymbolType(SymbolPimpl, Result);
}
+inline error_code SymbolRef::getValue(uint64_t &Val) const {
+ return OwningObject->getSymbolValue(SymbolPimpl, Val);
+}
+
inline DataRefImpl SymbolRef::getRawDataRefImpl() const {
return SymbolPimpl;
}
@@ -456,7 +467,7 @@
return SectionPimpl == Other.SectionPimpl;
}
-inline bool SectionRef::operator <(const SectionRef &Other) const {
+inline bool SectionRef::operator<(const SectionRef &Other) const {
return SectionPimpl < Other.SectionPimpl;
}
@@ -508,6 +519,10 @@
return OwningObject->isSectionZeroInit(SectionPimpl, Result);
}
+inline error_code SectionRef::isReadOnlyData(bool &Result) const {
+ return OwningObject->isSectionReadOnlyData(SectionPimpl, Result);
+}
+
inline error_code SectionRef::containsSymbol(SymbolRef S, bool &Result) const {
return OwningObject->sectionContainsSymbol(SectionPimpl, S.SymbolPimpl,
Result);
@@ -586,7 +601,7 @@
return LibraryPimpl == Other.LibraryPimpl;
}
-inline bool LibraryRef::operator <(const LibraryRef &Other) const {
+inline bool LibraryRef::operator<(const LibraryRef &Other) const {
return LibraryPimpl < Other.LibraryPimpl;
}
Modified: llvm/branches/AMDILBackend/include/llvm/Operator.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Operator.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Operator.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Operator.h Tue Jan 15 11:16:16 2013
@@ -16,6 +16,7 @@
#define LLVM_OPERATOR_H
#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
#include "llvm/Instruction.h"
#include "llvm/Type.h"
@@ -32,9 +33,14 @@
private:
// Do not implement any of these. The Operator class is intended to be used
// as a utility, and is never itself instantiated.
- void *operator new(size_t, unsigned);
- void *operator new(size_t s);
- Operator();
+ void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
+ void *operator new(size_t s) LLVM_DELETED_FUNCTION;
+ Operator() LLVM_DELETED_FUNCTION;
+
+protected:
+ // NOTE: Cannot use LLVM_DELETED_FUNCTION because it's not legal to delete
+ // an overridden method that's not deleted in the base class. Cannot leave
+ // this unimplemented because that leads to an ODR-violation.
~Operator();
public:
@@ -57,7 +63,6 @@
return Instruction::UserOp1;
}
- static inline bool classof(const Operator *) { return true; }
static inline bool classof(const Instruction *) { return true; }
static inline bool classof(const ConstantExpr *) { return true; }
static inline bool classof(const Value *V) {
@@ -77,8 +82,6 @@
};
private:
- ~OverflowingBinaryOperator(); // do not implement
-
friend class BinaryOperator;
friend class ConstantExpr;
void setHasNoUnsignedWrap(bool B) {
@@ -103,7 +106,6 @@
return (SubclassOptionalData & NoSignedWrap) != 0;
}
- static inline bool classof(const OverflowingBinaryOperator *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Instruction::Add ||
I->getOpcode() == Instruction::Sub ||
@@ -131,8 +133,6 @@
};
private:
- ~PossiblyExactOperator(); // do not implement
-
friend class BinaryOperator;
friend class ConstantExpr;
void setIsExact(bool B) {
@@ -167,9 +167,6 @@
/// FPMathOperator - Utility class for floating point operations which can have
/// information about relaxed accuracy requirements attached to them.
class FPMathOperator : public Operator {
-private:
- ~FPMathOperator(); // do not implement
-
public:
/// \brief Get the maximum error permitted by this operation in ULPs. An
@@ -177,7 +174,6 @@
/// default precision.
float getFPAccuracy() const;
- static inline bool classof(const FPMathOperator *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getType()->isFPOrFPVectorTy();
}
@@ -191,11 +187,7 @@
/// opcodes.
template<typename SuperClass, unsigned Opc>
class ConcreteOperator : public SuperClass {
- ~ConcreteOperator(); // DO NOT IMPLEMENT
public:
- static inline bool classof(const ConcreteOperator<SuperClass, Opc> *) {
- return true;
- }
static inline bool classof(const Instruction *I) {
return I->getOpcode() == Opc;
}
@@ -210,45 +202,35 @@
class AddOperator
: public ConcreteOperator<OverflowingBinaryOperator, Instruction::Add> {
- ~AddOperator(); // DO NOT IMPLEMENT
};
class SubOperator
: public ConcreteOperator<OverflowingBinaryOperator, Instruction::Sub> {
- ~SubOperator(); // DO NOT IMPLEMENT
};
class MulOperator
: public ConcreteOperator<OverflowingBinaryOperator, Instruction::Mul> {
- ~MulOperator(); // DO NOT IMPLEMENT
};
class ShlOperator
: public ConcreteOperator<OverflowingBinaryOperator, Instruction::Shl> {
- ~ShlOperator(); // DO NOT IMPLEMENT
};
-
+
class SDivOperator
: public ConcreteOperator<PossiblyExactOperator, Instruction::SDiv> {
- ~SDivOperator(); // DO NOT IMPLEMENT
};
class UDivOperator
: public ConcreteOperator<PossiblyExactOperator, Instruction::UDiv> {
- ~UDivOperator(); // DO NOT IMPLEMENT
};
class AShrOperator
: public ConcreteOperator<PossiblyExactOperator, Instruction::AShr> {
- ~AShrOperator(); // DO NOT IMPLEMENT
};
class LShrOperator
: public ConcreteOperator<PossiblyExactOperator, Instruction::LShr> {
- ~LShrOperator(); // DO NOT IMPLEMENT
};
-
-
-
+
+
+
class GEPOperator
: public ConcreteOperator<Operator, Instruction::GetElementPtr> {
- ~GEPOperator(); // DO NOT IMPLEMENT
-
enum {
IsInBounds = (1 << 0)
};
@@ -288,6 +270,12 @@
return getPointerOperand()->getType();
}
+ /// getPointerAddressSpace - Method to return the address space of the
+ /// pointer operand.
+ unsigned getPointerAddressSpace() const {
+ return cast<PointerType>(getPointerOperandType())->getAddressSpace();
+ }
+
unsigned getNumIndices() const { // Note: always non-negative
return getNumOperands() - 1;
}
Modified: llvm/branches/AMDILBackend/include/llvm/Pass.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Pass.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Pass.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Pass.h Tue Jan 15 11:16:16 2013
@@ -29,6 +29,7 @@
#ifndef LLVM_PASS_H
#define LLVM_PASS_H
+#include "llvm/Support/Compiler.h"
#include <string>
namespace llvm {
@@ -82,8 +83,8 @@
AnalysisResolver *Resolver; // Used to resolve analysis
const void *PassID;
PassKind Kind;
- void operator=(const Pass&); // DO NOT IMPLEMENT
- Pass(const Pass &); // DO NOT IMPLEMENT
+ void operator=(const Pass&) LLVM_DELETED_FUNCTION;
+ Pass(const Pass &) LLVM_DELETED_FUNCTION;
public:
explicit Pass(PassKind K, char &pid) : Resolver(0), PassID(&pid), Kind(K) { }
Modified: llvm/branches/AMDILBackend/include/llvm/PassAnalysisSupport.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/PassAnalysisSupport.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/PassAnalysisSupport.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/PassAnalysisSupport.h Tue Jan 15 11:16:16 2013
@@ -120,7 +120,7 @@
class PMDataManager;
class AnalysisResolver {
private:
- AnalysisResolver(); // DO NOT IMPLEMENT
+ AnalysisResolver() LLVM_DELETED_FUNCTION;
public:
explicit AnalysisResolver(PMDataManager &P) : PM(P) { }
Modified: llvm/branches/AMDILBackend/include/llvm/PassSupport.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/PassSupport.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/PassSupport.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/PassSupport.h Tue Jan 15 11:16:16 2013
@@ -126,8 +126,8 @@
}
private:
- void operator=(const PassInfo &); // do not implement
- PassInfo(const PassInfo &); // do not implement
+ void operator=(const PassInfo &) LLVM_DELETED_FUNCTION;
+ PassInfo(const PassInfo &) LLVM_DELETED_FUNCTION;
};
#define CALL_ONCE_INITIALIZATION(function) \
Modified: llvm/branches/AMDILBackend/include/llvm/Support/AlignOf.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/AlignOf.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/AlignOf.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/AlignOf.h Tue Jan 15 11:16:16 2013
@@ -68,24 +68,20 @@
/// integer literal can be used to specify an alignment constraint. Once built
/// up here, we can then begin to indirect between these using normal C++
/// template parameters.
-template <size_t Alignment> struct AlignedCharArrayImpl {};
-template <> struct AlignedCharArrayImpl<0> {
- typedef char type;
-};
+template <size_t Alignment> struct AlignedCharArrayImpl;
+
+// MSVC requires special handling here.
+#ifndef _MSC_VER
+
#if __has_feature(cxx_alignas)
#define LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(x) \
template <> struct AlignedCharArrayImpl<x> { \
- typedef char alignas(x) type; \
+ char alignas(x) aligned; \
}
-#elif defined(__clang__) || defined(__GNUC__)
+#elif defined(__GNUC__) || defined(__IBM_ATTRIBUTES)
#define LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(x) \
template <> struct AlignedCharArrayImpl<x> { \
- typedef char type __attribute__((aligned(x))); \
- }
-#elif defined(_MSC_VER)
-#define LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(x) \
- template <> struct AlignedCharArrayImpl<x> { \
- typedef __declspec(align(x)) char type; \
+ char aligned __attribute__((aligned(x))); \
}
#else
# error No supported align as directive.
@@ -104,11 +100,40 @@
LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(2048);
LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(4096);
LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(8192);
+
+#undef LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT
+
+#else // _MSC_VER
+
+// We provide special variations of this template for the most common
+// alignments because __declspec(align(...)) doesn't actually work when it is
+// a member of a by-value function argument in MSVC, even if the alignment
+// request is something reasonably like 8-byte or 16-byte.
+template <> struct AlignedCharArrayImpl<1> { char aligned; };
+template <> struct AlignedCharArrayImpl<2> { short aligned; };
+template <> struct AlignedCharArrayImpl<4> { int aligned; };
+template <> struct AlignedCharArrayImpl<8> { double aligned; };
+
+#define LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(x) \
+ template <> struct AlignedCharArrayImpl<x> { \
+ __declspec(align(x)) char aligned; \
+ }
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(16);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(32);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(64);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(128);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(512);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(1024);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(2048);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(4096);
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(8192);
// Any larger and MSVC complains.
#undef LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT
-/// \brief This class template exposes a typedef for type containing a suitable
-/// aligned character array to hold elements of any of up to four types.
+#endif // _MSC_VER
+
+/// \brief This union template exposes a suitably aligned and sized character
+/// array member which can hold elements of any of up to four types.
///
/// These types may be arrays, structs, or any other types. The goal is to
/// produce a union type containing a character array which, when used, forms
@@ -116,7 +141,8 @@
/// than four types can be added at the cost of more boiler plate.
template <typename T1,
typename T2 = char, typename T3 = char, typename T4 = char>
-class AlignedCharArray {
+union AlignedCharArrayUnion {
+private:
class AlignerImpl {
T1 t1; T2 t2; T3 t3; T4 t4;
@@ -127,23 +153,17 @@
};
public:
- // Sadly, Clang and GCC both fail to align a character array properly even
- // with an explicit alignment attribute. To work around this, we union
- // the character array that will actually be used with a struct that contains
- // a single aligned character member. Tests seem to indicate that both Clang
- // and GCC will properly register the alignment of a struct containing an
- // aligned member, and this alignment should carry over to the character
- // array in the union.
- union union_type {
- // This is the only member of the union which should be used by clients:
- char buffer[sizeof(SizerImpl)];
-
- // This member of the union only exists to force the alignment.
- struct {
- typename llvm::AlignedCharArrayImpl<AlignOf<AlignerImpl>::Alignment>::type
- nonce_inner_member;
- } nonce_member;
- };
+ /// \brief The character array buffer for use by clients.
+ ///
+ /// No other member of this union should be referenced. The exist purely to
+ /// constrain the layout of this character array.
+ char buffer[sizeof(SizerImpl)];
+
+private:
+ // Tests seem to indicate that both Clang and GCC will properly register the
+ // alignment of a struct containing an aligned member, and this alignment
+ // should carry over to the character array in the union.
+ llvm::AlignedCharArrayImpl<AlignOf<AlignerImpl>::Alignment> nonce_member;
};
} // end namespace llvm
Modified: llvm/branches/AMDILBackend/include/llvm/Support/Allocator.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/Allocator.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/Allocator.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/Allocator.h Tue Jan 15 11:16:16 2013
@@ -79,8 +79,8 @@
public:
MallocSlabAllocator() : Allocator() { }
virtual ~MallocSlabAllocator();
- virtual MemSlab *Allocate(size_t Size);
- virtual void Deallocate(MemSlab *Slab);
+ virtual MemSlab *Allocate(size_t Size) LLVM_OVERRIDE;
+ virtual void Deallocate(MemSlab *Slab) LLVM_OVERRIDE;
};
/// BumpPtrAllocator - This allocator is useful for containers that need
@@ -88,8 +88,8 @@
/// allocating memory, and never deletes it until the entire block is dead. This
/// makes allocation speedy, but must only be used when the trade-off is ok.
class BumpPtrAllocator {
- BumpPtrAllocator(const BumpPtrAllocator &); // do not implement
- void operator=(const BumpPtrAllocator &); // do not implement
+ BumpPtrAllocator(const BumpPtrAllocator &) LLVM_DELETED_FUNCTION;
+ void operator=(const BumpPtrAllocator &) LLVM_DELETED_FUNCTION;
/// SlabSize - Allocate data into slabs of this size unless we get an
/// allocation above SizeThreshold.
Modified: llvm/branches/AMDILBackend/include/llvm/Support/COFF.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/COFF.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/COFF.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/COFF.h Tue Jan 15 11:16:16 2013
@@ -50,7 +50,7 @@
};
enum MachineTypes {
- MT_Invalid = -1,
+ MT_Invalid = 0xffff,
IMAGE_FILE_MACHINE_UNKNOWN = 0x0,
IMAGE_FILE_MACHINE_AM33 = 0x13,
@@ -142,7 +142,7 @@
/// Storage class tells where and what the symbol represents
enum SymbolStorageClass {
- SSC_Invalid = -1,
+ SSC_Invalid = 0xff,
IMAGE_SYM_CLASS_END_OF_FUNCTION = -1, ///< Physical end of function
IMAGE_SYM_CLASS_NULL = 0, ///< No symbol
@@ -220,7 +220,7 @@
};
enum SectionCharacteristics {
- SC_Invalid = -1,
+ SC_Invalid = 0xffffffff,
IMAGE_SCN_TYPE_NO_PAD = 0x00000008,
IMAGE_SCN_CNT_CODE = 0x00000020,
Modified: llvm/branches/AMDILBackend/include/llvm/Support/CallSite.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/CallSite.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/CallSite.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/CallSite.h Tue Jan 15 11:16:16 2013
@@ -81,7 +81,7 @@
InstrTy *operator->() const { return I.getPointer(); }
operator bool() const { return I.getPointer(); }
- /// getCalledValue - Return the pointer to function that is being called...
+ /// getCalledValue - Return the pointer to function that is being called.
///
ValTy *getCalledValue() const {
assert(getInstruction() && "Not a call or invoke instruction!");
@@ -95,7 +95,7 @@
return dyn_cast<FunTy>(getCalledValue());
}
- /// setCalledFunction - Set the callee to the specified value...
+ /// setCalledFunction - Set the callee to the specified value.
///
void setCalledFunction(Value *V) {
assert(getInstruction() && "Not a call or invoke instruction!");
@@ -130,7 +130,7 @@
}
/// arg_iterator - The type of iterator to use when looping over actual
- /// arguments at this call site...
+ /// arguments at this call site.
typedef IterTy arg_iterator;
/// arg_begin/arg_end - Return iterators corresponding to the actual argument
@@ -185,13 +185,13 @@
}
/// \brief Return true if this function has the given attribute.
- bool hasFnAttr(Attributes N) const {
- CALLSITE_DELEGATE_GETTER(hasFnAttr(N));
+ bool hasFnAttr(Attributes::AttrVal A) const {
+ CALLSITE_DELEGATE_GETTER(hasFnAttr(A));
}
- /// paramHasAttr - whether the call or the callee has the given attribute.
- bool paramHasAttr(uint16_t i, Attributes attr) const {
- CALLSITE_DELEGATE_GETTER(paramHasAttr(i, attr));
+ /// \brief Return true if the call or the callee has the given attribute.
+ bool paramHasAttr(unsigned i, Attributes::AttrVal A) const {
+ CALLSITE_DELEGATE_GETTER(paramHasAttr(i, A));
}
/// @brief Extract the alignment for a call or parameter (0=unknown).
@@ -211,32 +211,32 @@
bool doesNotAccessMemory() const {
CALLSITE_DELEGATE_GETTER(doesNotAccessMemory());
}
- void setDoesNotAccessMemory(bool doesNotAccessMemory = true) {
- CALLSITE_DELEGATE_SETTER(setDoesNotAccessMemory(doesNotAccessMemory));
+ void setDoesNotAccessMemory() {
+ CALLSITE_DELEGATE_SETTER(setDoesNotAccessMemory());
}
/// @brief Determine if the call does not access or only reads memory.
bool onlyReadsMemory() const {
CALLSITE_DELEGATE_GETTER(onlyReadsMemory());
}
- void setOnlyReadsMemory(bool onlyReadsMemory = true) {
- CALLSITE_DELEGATE_SETTER(setOnlyReadsMemory(onlyReadsMemory));
+ void setOnlyReadsMemory() {
+ CALLSITE_DELEGATE_SETTER(setOnlyReadsMemory());
}
/// @brief Determine if the call cannot return.
bool doesNotReturn() const {
CALLSITE_DELEGATE_GETTER(doesNotReturn());
}
- void setDoesNotReturn(bool doesNotReturn = true) {
- CALLSITE_DELEGATE_SETTER(setDoesNotReturn(doesNotReturn));
+ void setDoesNotReturn() {
+ CALLSITE_DELEGATE_SETTER(setDoesNotReturn());
}
/// @brief Determine if the call cannot unwind.
bool doesNotThrow() const {
CALLSITE_DELEGATE_GETTER(doesNotThrow());
}
- void setDoesNotThrow(bool doesNotThrow = true) {
- CALLSITE_DELEGATE_SETTER(setDoesNotThrow(doesNotThrow));
+ void setDoesNotThrow() {
+ CALLSITE_DELEGATE_SETTER(setDoesNotThrow());
}
#undef CALLSITE_DELEGATE_GETTER
@@ -244,12 +244,12 @@
/// @brief Determine whether this argument is not captured.
bool doesNotCapture(unsigned ArgNo) const {
- return paramHasAttr(ArgNo + 1, Attribute::NoCapture);
+ return paramHasAttr(ArgNo + 1, Attributes::NoCapture);
}
/// @brief Determine whether this argument is passed by value.
bool isByValArgument(unsigned ArgNo) const {
- return paramHasAttr(ArgNo + 1, Attribute::ByVal);
+ return paramHasAttr(ArgNo + 1, Attributes::ByVal);
}
/// hasArgument - Returns true if this CallSite passes the given Value* as an
Modified: llvm/branches/AMDILBackend/include/llvm/Support/Casting.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/Casting.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/Casting.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/Casting.h Tue Jan 15 11:16:16 2013
@@ -15,6 +15,7 @@
#ifndef LLVM_SUPPORT_CASTING_H
#define LLVM_SUPPORT_CASTING_H
+#include "llvm/Support/type_traits.h"
#include <cassert>
namespace llvm {
@@ -44,13 +45,23 @@
// The core of the implementation of isa<X> is here; To and From should be
// the names of classes. This template can be specialized to customize the
// implementation of isa<> without rewriting it from scratch.
-template <typename To, typename From>
+template <typename To, typename From, typename Enabler = void>
struct isa_impl {
static inline bool doit(const From &Val) {
return To::classof(&Val);
}
};
+/// \brief Always allow upcasts, and perform no dynamic check for them.
+template <typename To, typename From>
+struct isa_impl<To, From,
+ typename llvm::enable_if_c<
+ llvm::is_base_of<To, From>::value
+ >::type
+ > {
+ static inline bool doit(const From &) { return true; }
+};
+
template <typename To, typename From> struct isa_impl_cl {
static inline bool doit(const From &Val) {
return isa_impl<To, From>::doit(Val);
@@ -65,18 +76,21 @@
template <typename To, typename From> struct isa_impl_cl<To, From*> {
static inline bool doit(const From *Val) {
+ assert(Val && "isa<> used on a null pointer");
return isa_impl<To, From>::doit(*Val);
}
};
template <typename To, typename From> struct isa_impl_cl<To, const From*> {
static inline bool doit(const From *Val) {
+ assert(Val && "isa<> used on a null pointer");
return isa_impl<To, From>::doit(*Val);
}
};
template <typename To, typename From> struct isa_impl_cl<To, const From*const> {
static inline bool doit(const From *Val) {
+ assert(Val && "isa<> used on a null pointer");
return isa_impl<To, From>::doit(*Val);
}
};
Modified: llvm/branches/AMDILBackend/include/llvm/Support/CommandLine.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/CommandLine.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/CommandLine.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/CommandLine.h Tue Jan 15 11:16:16 2013
@@ -41,16 +41,14 @@
// ParseCommandLineOptions - Command line option processing entry point.
//
void ParseCommandLineOptions(int argc, const char * const *argv,
- const char *Overview = 0,
- bool ReadResponseFiles = false);
+ const char *Overview = 0);
//===----------------------------------------------------------------------===//
// ParseEnvironmentOptions - Environment variable option processing alternate
// entry point.
//
void ParseEnvironmentOptions(const char *progName, const char *envvar,
- const char *Overview = 0,
- bool ReadResponseFiles = false);
+ const char *Overview = 0);
///===---------------------------------------------------------------------===//
/// SetVersionPrinter - Override the default (LLVM specific) version printer
@@ -1509,7 +1507,7 @@
typename ParserClass::parser_data_type();
if (Parser.parse(*this, ArgName, Arg, Val))
return true; // Parse Error!
- addValue(Val);
+ this->addValue(Val);
setPosition(pos);
Positions.push_back(pos);
return false;
@@ -1608,15 +1606,16 @@
class alias : public Option {
Option *AliasFor;
virtual bool handleOccurrence(unsigned pos, StringRef /*ArgName*/,
- StringRef Arg) {
+ StringRef Arg) LLVM_OVERRIDE {
return AliasFor->handleOccurrence(pos, AliasFor->ArgStr, Arg);
}
// Handle printing stuff...
- virtual size_t getOptionWidth() const;
- virtual void printOptionInfo(size_t GlobalWidth) const;
+ virtual size_t getOptionWidth() const LLVM_OVERRIDE;
+ virtual void printOptionInfo(size_t GlobalWidth) const LLVM_OVERRIDE;
// Aliases do not need to print their values.
- virtual void printOptionValue(size_t /*GlobalWidth*/, bool /*Force*/) const {}
+ virtual void printOptionValue(size_t /*GlobalWidth*/,
+ bool /*Force*/) const LLVM_OVERRIDE {}
void done() {
if (!hasArgStr())
Modified: llvm/branches/AMDILBackend/include/llvm/Support/Compiler.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/Compiler.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/Compiler.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/Compiler.h Tue Jan 15 11:16:16 2013
@@ -24,7 +24,7 @@
/// does not imply the existence of any other C++ library features.
#if (__has_feature(cxx_rvalue_references) \
|| defined(__GXX_EXPERIMENTAL_CXX0X__) \
- || _MSC_VER >= 1600)
+ || (defined(_MSC_VER) && _MSC_VER >= 1600))
#define LLVM_USE_RVALUE_REFERENCES 1
#else
#define LLVM_USE_RVALUE_REFERENCES 0
@@ -38,6 +38,41 @@
#define llvm_move(value) (value)
#endif
+/// LLVM_DELETED_FUNCTION - Expands to = delete if the compiler supports it.
+/// Use to mark functions as uncallable. Member functions with this should
+/// be declared private so that some behavior is kept in C++03 mode.
+///
+/// class DontCopy {
+/// private:
+/// DontCopy(const DontCopy&) LLVM_DELETED_FUNCTION;
+/// DontCopy &operator =(const DontCopy&) LLVM_DELETED_FUNCTION;
+/// public:
+/// ...
+/// };
+#if (__has_feature(cxx_deleted_functions) \
+ || defined(__GXX_EXPERIMENTAL_CXX0X__))
+ // No version of MSVC currently supports this.
+#define LLVM_DELETED_FUNCTION = delete
+#else
+#define LLVM_DELETED_FUNCTION
+#endif
+
+/// LLVM_FINAL - Expands to 'final' if the compiler supports it.
+/// Use to mark classes or virtual methods as final.
+#if (__has_feature(cxx_override_control))
+#define LLVM_FINAL final
+#else
+#define LLVM_FINAL
+#endif
+
+/// LLVM_OVERRIDE - Expands to 'override' if the compiler supports it.
+/// Use to mark virtual methods as overriding a base class method.
+#if (__has_feature(cxx_override_control))
+#define LLVM_OVERRIDE override
+#else
+#define LLVM_OVERRIDE
+#endif
+
/// LLVM_LIBRARY_VISIBILITY - If a class marked with this attribute is linked
/// into a shared library, then the class should be private to the library and
/// not accessible from outside it. Can also be used to mark variables and
@@ -87,9 +122,11 @@
#endif
#if (__GNUC__ >= 4)
-#define BUILTIN_EXPECT(EXPR, VALUE) __builtin_expect((EXPR), (VALUE))
+#define LLVM_LIKELY(EXPR) __builtin_expect((bool)(EXPR), true)
+#define LLVM_UNLIKELY(EXPR) __builtin_expect((bool)(EXPR), false)
#else
-#define BUILTIN_EXPECT(EXPR, VALUE) (EXPR)
+#define LLVM_LIKELY(EXPR) (EXPR)
+#define LLVM_UNLIKELY(EXPR) (EXPR)
#endif
@@ -168,4 +205,13 @@
# define LLVM_BUILTIN_UNREACHABLE __builtin_unreachable()
#endif
+// LLVM_BUILTIN_TRAP - On compilers which support it, expands to an expression
+// which causes the program to exit abnormally.
+#if defined(__clang__) || (__GNUC__ > 4) \
+ || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)
+# define LLVM_BUILTIN_TRAP __builtin_trap()
+#else
+# define LLVM_BUILTIN_TRAP *(volatile int*)0x11 = 0
+#endif
+
#endif
Modified: llvm/branches/AMDILBackend/include/llvm/Support/DataExtractor.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/DataExtractor.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/DataExtractor.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/DataExtractor.h Tue Jan 15 11:16:16 2013
@@ -10,6 +10,7 @@
#ifndef LLVM_SUPPORT_DATAEXTRACTOR_H
#define LLVM_SUPPORT_DATAEXTRACTOR_H
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/DataTypes.h"
@@ -99,8 +100,8 @@
/// enough bytes to extract this value, the offset will be left
/// unmodified.
///
- /// @param[in] byte_size
- /// The size in byte of the integer to extract.
+ /// @param[in] size
+ /// The size in bytes of the integer to extract.
///
/// @return
/// The sign extended signed integer value that was extracted,
Modified: llvm/branches/AMDILBackend/include/llvm/Support/ELF.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/ELF.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/ELF.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/ELF.h Tue Jan 15 11:16:16 2013
@@ -441,6 +441,7 @@
R_MICROBLAZE_COPY = 21
};
+// ELF Relocation types for PPC32
enum {
R_PPC_NONE = 0, /* No relocation. */
R_PPC_ADDR32 = 1,
@@ -456,7 +457,23 @@
R_PPC_REL14 = 11,
R_PPC_REL14_BRTAKEN = 12,
R_PPC_REL14_BRNTAKEN = 13,
- R_PPC_REL32 = 26
+ R_PPC_REL32 = 26,
+ R_PPC_TPREL16_LO = 70,
+ R_PPC_TPREL16_HA = 72
+};
+
+// ELF Relocation types for PPC64
+enum {
+ R_PPC64_ADDR16_LO = 4,
+ R_PPC64_ADDR16_HI = 5,
+ R_PPC64_ADDR14 = 7,
+ R_PPC64_REL24 = 10,
+ R_PPC64_ADDR64 = 38,
+ R_PPC64_ADDR16_HIGHER = 39,
+ R_PPC64_ADDR16_HIGHEST = 41,
+ R_PPC64_TOC16 = 47,
+ R_PPC64_TOC = 51,
+ R_PPC64_TOC16_DS = 63
};
// ARM Specific e_flags
@@ -674,8 +691,36 @@
R_MIPS_NUM = 218
};
+// Hexagon Specific e_flags
+// Release 5 ABI
+enum {
+ // Object processor version flags, bits[3:0]
+ EF_HEXAGON_MACH_V2 = 0x00000001, // Hexagon V2
+ EF_HEXAGON_MACH_V3 = 0x00000002, // Hexagon V3
+ EF_HEXAGON_MACH_V4 = 0x00000003, // Hexagon V4
+ EF_HEXAGON_MACH_V5 = 0x00000004, // Hexagon V5
+
+ // Highest ISA version flags
+ EF_HEXAGON_ISA_MACH = 0x00000000, // Same as specified in bits[3:0]
+ // of e_flags
+ EF_HEXAGON_ISA_V2 = 0x00000010, // Hexagon V2 ISA
+ EF_HEXAGON_ISA_V3 = 0x00000020, // Hexagon V3 ISA
+ EF_HEXAGON_ISA_V4 = 0x00000030, // Hexagon V4 ISA
+ EF_HEXAGON_ISA_V5 = 0x00000040 // Hexagon V5 ISA
+};
+
+// Hexagon specific Section indexes for common small data
+// Release 5 ABI
+enum {
+ SHN_HEXAGON_SCOMMON = 0xff00, // Other access sizes
+ SHN_HEXAGON_SCOMMON_1 = 0xff01, // Byte-sized access
+ SHN_HEXAGON_SCOMMON_2 = 0xff02, // Half-word-sized access
+ SHN_HEXAGON_SCOMMON_4 = 0xff03, // Word-sized access
+ SHN_HEXAGON_SCOMMON_8 = 0xff04 // Double-word-size access
+};
+
// ELF Relocation types for Hexagon
-// Release 5 ABI - Document: 80-V9418-3 Rev. J
+// Release 5 ABI
enum {
R_HEX_NONE = 0,
R_HEX_B22_PCREL = 1,
@@ -1103,6 +1148,9 @@
PT_PHDR = 6, // The program header table itself.
PT_TLS = 7, // The thread-local storage template.
PT_LOOS = 0x60000000, // Lowest operating system-specific pt entry type.
+ PT_HIOS = 0x6fffffff, // Highest operating system-specific pt entry type.
+ PT_LOPROC = 0x70000000, // Lowest processor-specific program hdr entry type.
+ PT_HIPROC = 0x7fffffff, // Highest processor-specific program hdr entry type.
// x86-64 program header types.
// These all contain stack unwind tables.
@@ -1113,9 +1161,11 @@
PT_GNU_STACK = 0x6474e551, // Indicates stack executability.
PT_GNU_RELRO = 0x6474e552, // Read-only after relocation.
- PT_HIOS = 0x6fffffff, // Highest operating system-specific pt entry type.
- PT_LOPROC = 0x70000000, // Lowest processor-specific program hdr entry type.
- PT_HIPROC = 0x7fffffff // Highest processor-specific program hdr entry type.
+ // ARM program header types.
+ PT_ARM_ARCHEXT = 0x70000000, // Platform architecture compatibility information
+ // These all contain stack unwind tables.
+ PT_ARM_EXIDX = 0x70000001,
+ PT_ARM_UNWIND = 0x70000001
};
// Segment flag bits.
Modified: llvm/branches/AMDILBackend/include/llvm/Support/FileOutputBuffer.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/FileOutputBuffer.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/FileOutputBuffer.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/FileOutputBuffer.h Tue Jan 15 11:16:16 2013
@@ -78,10 +78,11 @@
~FileOutputBuffer();
+private:
+ FileOutputBuffer(const FileOutputBuffer &) LLVM_DELETED_FUNCTION;
+ FileOutputBuffer &operator=(const FileOutputBuffer &) LLVM_DELETED_FUNCTION;
protected:
- FileOutputBuffer(const FileOutputBuffer &); // DO NOT IMPLEMENT
- FileOutputBuffer &operator=(const FileOutputBuffer &); // DO NOT IMPLEMENT
- FileOutputBuffer(uint8_t *Start, uint8_t *End,
+ FileOutputBuffer(uint8_t *Start, uint8_t *End,
StringRef Path, StringRef TempPath);
uint8_t *BufferStart;
Modified: llvm/branches/AMDILBackend/include/llvm/Support/FileSystem.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/FileSystem.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/FileSystem.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/FileSystem.h Tue Jan 15 11:16:16 2013
@@ -28,6 +28,7 @@
#define LLVM_SUPPORT_FILE_SYSTEM_H
#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/DataTypes.h"
@@ -39,7 +40,7 @@
#include <string>
#include <vector>
-#if HAVE_SYS_STAT_H
+#ifdef HAVE_SYS_STAT_H
#include <sys/stat.h>
#endif
@@ -279,7 +280,7 @@
/// @brief Get the current path.
///
/// @param result Holds the current path on return.
-/// @results errc::success if the current path has been stored in result,
+/// @returns errc::success if the current path has been stored in result,
/// otherwise a platform specific error_code.
error_code current_path(SmallVectorImpl<char> &result);
@@ -288,7 +289,7 @@
/// @param path Input path.
/// @param existed Set to true if \a path existed, false if it did not.
/// undefined otherwise.
-/// @results errc::success if path has been removed and existed has been
+/// @returns errc::success if path has been removed and existed has been
/// successfully set, otherwise a platform specific error_code.
error_code remove(const Twine &path, bool &existed);
@@ -297,7 +298,7 @@
///
/// @param path Input path.
/// @param num_removed Number of files removed.
-/// @results errc::success if path has been removed and num_removed has been
+/// @returns errc::success if path has been removed and num_removed has been
/// successfully set, otherwise a platform specific error_code.
error_code remove_all(const Twine &path, uint32_t &num_removed);
@@ -322,7 +323,7 @@
/// @brief Does file exist?
///
/// @param status A file_status previously returned from stat.
-/// @results True if the file represented by status exists, false if it does
+/// @returns True if the file represented by status exists, false if it does
/// not.
bool exists(file_status status);
@@ -331,7 +332,7 @@
/// @param path Input path.
/// @param result Set to true if the file represented by status exists, false if
/// it does not. Undefined otherwise.
-/// @results errc::success if result has been successfully set, otherwise a
+/// @returns errc::success if result has been successfully set, otherwise a
/// platform specific error_code.
error_code exists(const Twine &path, bool &result);
@@ -349,7 +350,7 @@
///
/// assert(status_known(A) || status_known(B));
///
-/// @results True if A and B both represent the same file system entity, false
+/// @returns True if A and B both represent the same file system entity, false
/// otherwise.
bool equivalent(file_status A, file_status B);
@@ -361,7 +362,7 @@
/// @param B Input path B.
/// @param result Set to true if stat(A) and stat(B) have the same device and
/// inode (or equivalent).
-/// @results errc::success if result has been successfully set, otherwise a
+/// @returns errc::success if result has been successfully set, otherwise a
/// platform specific error_code.
error_code equivalent(const Twine &A, const Twine &B, bool &result);
@@ -383,7 +384,7 @@
/// @brief Does status represent a directory?
///
/// @param status A file_status previously returned from status.
-/// @results status.type() == file_type::directory_file.
+/// @returns status.type() == file_type::directory_file.
bool is_directory(file_status status);
/// @brief Is path a directory?
@@ -391,14 +392,14 @@
/// @param path Input path.
/// @param result Set to true if \a path is a directory, false if it is not.
/// Undefined otherwise.
-/// @results errc::success if result has been successfully set, otherwise a
+/// @returns errc::success if result has been successfully set, otherwise a
/// platform specific error_code.
error_code is_directory(const Twine &path, bool &result);
/// @brief Does status represent a regular file?
///
/// @param status A file_status previously returned from status.
-/// @results status_known(status) && status.type() == file_type::regular_file.
+/// @returns status_known(status) && status.type() == file_type::regular_file.
bool is_regular_file(file_status status);
/// @brief Is path a regular file?
@@ -406,7 +407,7 @@
/// @param path Input path.
/// @param result Set to true if \a path is a regular file, false if it is not.
/// Undefined otherwise.
-/// @results errc::success if result has been successfully set, otherwise a
+/// @returns errc::success if result has been successfully set, otherwise a
/// platform specific error_code.
error_code is_regular_file(const Twine &path, bool &result);
@@ -414,7 +415,7 @@
/// directory, regular file, or symlink?
///
/// @param status A file_status previously returned from status.
-/// @results exists(s) && !is_regular_file(s) && !is_directory(s) &&
+/// @returns exists(s) && !is_regular_file(s) && !is_directory(s) &&
/// !is_symlink(s)
bool is_other(file_status status);
@@ -424,14 +425,14 @@
/// @param path Input path.
/// @param result Set to true if \a path exists, but is not a directory, regular
/// file, or a symlink, false if it does not. Undefined otherwise.
-/// @results errc::success if result has been successfully set, otherwise a
+/// @returns errc::success if result has been successfully set, otherwise a
/// platform specific error_code.
error_code is_other(const Twine &path, bool &result);
/// @brief Does status represent a symlink?
///
/// @param status A file_status previously returned from stat.
-/// @param result status.type() == symlink_file.
+/// @returns status.type() == symlink_file.
bool is_symlink(file_status status);
/// @brief Is path a symlink?
@@ -439,7 +440,7 @@
/// @param path Input path.
/// @param result Set to true if \a path is a symlink, false if it is not.
/// Undefined otherwise.
-/// @results errc::success if result has been successfully set, otherwise a
+/// @returns errc::success if result has been successfully set, otherwise a
/// platform specific error_code.
error_code is_symlink(const Twine &path, bool &result);
@@ -447,28 +448,28 @@
///
/// @param path Input path.
/// @param result Set to the file status.
-/// @results errc::success if result has been successfully set, otherwise a
+/// @returns errc::success if result has been successfully set, otherwise a
/// platform specific error_code.
error_code status(const Twine &path, file_status &result);
/// @brief Modifies permission bits on a file
///
/// @param path Input path.
-/// @results errc::success if permissions have been changed, otherwise a
+/// @returns errc::success if permissions have been changed, otherwise a
/// platform specific error_code.
error_code permissions(const Twine &path, perms prms);
/// @brief Is status available?
///
-/// @param path Input path.
-/// @results True if status() != status_error.
+/// @param s Input file status.
+/// @returns True if status() != status_error.
bool status_known(file_status s);
/// @brief Is status available?
///
/// @param path Input path.
/// @param result Set to true if status() != status_error.
-/// @results errc::success if result has been successfully set, otherwise a
+/// @returns errc::success if result has been successfully set, otherwise a
/// platform specific error_code.
error_code status_known(const Twine &path, bool &result);
@@ -485,11 +486,11 @@
/// clang-%%-%%-%%-%%-%%.s => /tmp/clang-a0-b1-c2-d3-e4.s
///
/// @param model Name to base unique path off of.
-/// @param result_fs Set to the opened file's file descriptor.
+/// @param result_fd Set to the opened file's file descriptor.
/// @param result_path Set to the opened file's absolute path.
-/// @param makeAbsolute If true and @model is not an absolute path, a temp
+/// @param makeAbsolute If true and \a model is not an absolute path, a temp
/// directory will be prepended.
-/// @results errc::success if result_{fd,path} have been successfully set,
+/// @returns errc::success if result_{fd,path} have been successfully set,
/// otherwise a platform specific error_code.
error_code unique_file(const Twine &model, int &result_fd,
SmallVectorImpl<char> &result_path,
@@ -502,7 +503,7 @@
///
/// @param path Input path.
/// @param result Set to the canonicalized version of \a path.
-/// @results errc::success if result has been successfully set, otherwise a
+/// @returns errc::success if result has been successfully set, otherwise a
/// platform specific error_code.
error_code canonicalize(const Twine &path, SmallVectorImpl<char> &result);
@@ -510,7 +511,7 @@
///
/// @param path Input path.
/// @param magic Byte sequence to compare \a path's first len(magic) bytes to.
-/// @results errc::success if result has been successfully set, otherwise a
+/// @returns errc::success if result has been successfully set, otherwise a
/// platform specific error_code.
error_code has_magic(const Twine &path, const Twine &magic, bool &result);
@@ -521,7 +522,7 @@
/// @param result Set to the first \a len bytes in the file pointed to by
/// \a path. Or the entire file if file_size(path) < len, in which
/// case result.size() returns the size of the file.
-/// @results errc::success if result has been successfully set,
+/// @returns errc::success if result has been successfully set,
/// errc::value_too_large if len is larger then the file pointed to by
/// \a path, otherwise a platform specific error_code.
error_code get_magic(const Twine &path, uint32_t len,
@@ -534,14 +535,14 @@
///
/// @param path Input path.
/// @param result Set to the type of file, or LLVMFileType::Unknown_FileType.
-/// @results errc::success if result has been successfully set, otherwise a
+/// @returns errc::success if result has been successfully set, otherwise a
/// platform specific error_code.
error_code identify_magic(const Twine &path, file_magic &result);
/// @brief Get library paths the system linker uses.
///
/// @param result Set to the list of system library paths.
-/// @results errc::success if result has been successfully set, otherwise a
+/// @returns errc::success if result has been successfully set, otherwise a
/// platform specific error_code.
error_code GetSystemLibraryPaths(SmallVectorImpl<std::string> &result);
@@ -549,7 +550,7 @@
/// + LLVM_LIB_SEARCH_PATH + LLVM_LIBDIR.
///
/// @param result Set to the list of bitcode library paths.
-/// @results errc::success if result has been successfully set, otherwise a
+/// @returns errc::success if result has been successfully set, otherwise a
/// platform specific error_code.
error_code GetBitcodeLibraryPaths(SmallVectorImpl<std::string> &result);
@@ -562,7 +563,7 @@
///
/// @param short_name Library name one would give to the system linker.
/// @param result Set to the absolute path \a short_name represents.
-/// @results errc::success if result has been successfully set, otherwise a
+/// @returns errc::success if result has been successfully set, otherwise a
/// platform specific error_code.
error_code FindLibrary(const Twine &short_name, SmallVectorImpl<char> &result);
@@ -571,23 +572,99 @@
/// @param argv0 The program name as it was spelled on the command line.
/// @param MainAddr Address of some symbol in the executable (not in a library).
/// @param result Set to the absolute path of the current executable.
-/// @results errc::success if result has been successfully set, otherwise a
+/// @returns errc::success if result has been successfully set, otherwise a
/// platform specific error_code.
error_code GetMainExecutable(const char *argv0, void *MainAddr,
SmallVectorImpl<char> &result);
+/// This class represents a memory mapped file. It is based on
+/// boost::iostreams::mapped_file.
+class mapped_file_region {
+ mapped_file_region() LLVM_DELETED_FUNCTION;
+ mapped_file_region(mapped_file_region&) LLVM_DELETED_FUNCTION;
+ mapped_file_region &operator =(mapped_file_region&) LLVM_DELETED_FUNCTION;
+
+public:
+ enum mapmode {
+ readonly, ///< May only access map via const_data as read only.
+ readwrite, ///< May access map via data and modify it. Written to path.
+ priv ///< May modify via data, but changes are lost on destruction.
+ };
+
+private:
+ /// Platform specific mapping state.
+ mapmode Mode;
+ uint64_t Size;
+ void *Mapping;
+#ifdef LLVM_ON_WIN32
+ int FileDescriptor;
+ void *FileHandle;
+ void *FileMappingHandle;
+#endif
+
+ error_code init(int FD, uint64_t Offset);
+
+public:
+ typedef char char_type;
+
+#if LLVM_USE_RVALUE_REFERENCES
+ mapped_file_region(mapped_file_region&&);
+ mapped_file_region &operator =(mapped_file_region&&);
+#endif
+
+ /// Construct a mapped_file_region at \a path starting at \a offset of length
+ /// \a length and with access \a mode.
+ ///
+ /// \param path Path to the file to map. If it does not exist it will be
+ /// created.
+ /// \param mode How to map the memory.
+ /// \param length Number of bytes to map in starting at \a offset. If the file
+ /// is shorter than this, it will be extended. If \a length is
+ /// 0, the entire file will be mapped.
+ /// \param offset Byte offset from the beginning of the file where the map
+ /// should begin. Must be a multiple of
+ /// mapped_file_region::alignment().
+ /// \param ec This is set to errc::success if the map was constructed
+ /// sucessfully. Otherwise it is set to a platform dependent error.
+ mapped_file_region(const Twine &path,
+ mapmode mode,
+ uint64_t length,
+ uint64_t offset,
+ error_code &ec);
+
+ /// \param fd An open file descriptor to map. mapped_file_region takes
+ /// ownership. It must have been opended in the correct mode.
+ mapped_file_region(int fd,
+ mapmode mode,
+ uint64_t length,
+ uint64_t offset,
+ error_code &ec);
+
+ ~mapped_file_region();
+
+ mapmode flags() const;
+ uint64_t size() const;
+ char *data() const;
+
+ /// Get a const view of the data. Modifying this memory has undefined
+ /// behaivor.
+ const char *const_data() const;
+
+ /// \returns The minimum alignment offset must be.
+ static int alignment();
+};
/// @brief Memory maps the contents of a file
///
/// @param path Path to file to map.
/// @param file_offset Byte offset in file where mapping should begin.
-/// @param size_t Byte length of range of the file to map.
+/// @param size Byte length of range of the file to map.
/// @param map_writable If true, the file will be mapped in r/w such
/// that changes to the mapped buffer will be flushed back
/// to the file. If false, the file will be mapped read-only
/// and the buffer will be read-only.
/// @param result Set to the start address of the mapped buffer.
-/// @results errc::success if result has been successfully set, otherwise a
+/// @returns errc::success if result has been successfully set, otherwise a
/// platform specific error_code.
error_code map_file_pages(const Twine &path, off_t file_offset, size_t size,
bool map_writable, void *&result);
@@ -597,7 +674,7 @@
///
/// @param base Pointer to the start of the buffer.
/// @param size Byte length of the range to unmmap.
-/// @results errc::success if result has been successfully set, otherwise a
+/// @returns errc::success if result has been successfully set, otherwise a
/// platform specific error_code.
error_code unmap_file_pages(void *base, size_t size);
Modified: llvm/branches/AMDILBackend/include/llvm/Support/Format.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/Format.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/Format.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/Format.h Tue Jan 15 11:16:16 2013
@@ -170,31 +170,47 @@
}
};
-/// format - This is a helper function that is used to produce formatted output.
-/// This is typically used like: OS << format("%0.4f", myfloat) << '\n';
+/// This is a helper function that is used to produce formatted output.
+///
+/// This is typically used like:
+/// \code
+/// OS << format("%0.4f", myfloat) << '\n';
+/// \endcode
template <typename T>
inline format_object1<T> format(const char *Fmt, const T &Val) {
return format_object1<T>(Fmt, Val);
}
-/// format - This is a helper function that is used to produce formatted output.
-/// This is typically used like: OS << format("%0.4f", myfloat) << '\n';
+/// This is a helper function that is used to produce formatted output.
+///
+/// This is typically used like:
+/// \code
+/// OS << format("%0.4f", myfloat) << '\n';
+/// \endcode
template <typename T1, typename T2>
inline format_object2<T1, T2> format(const char *Fmt, const T1 &Val1,
const T2 &Val2) {
return format_object2<T1, T2>(Fmt, Val1, Val2);
}
-/// format - This is a helper function that is used to produce formatted output.
-/// This is typically used like: OS << format("%0.4f", myfloat) << '\n';
+/// This is a helper function that is used to produce formatted output.
+///
+/// This is typically used like:
+/// \code
+/// OS << format("%0.4f", myfloat) << '\n';
+/// \endcode
template <typename T1, typename T2, typename T3>
inline format_object3<T1, T2, T3> format(const char *Fmt, const T1 &Val1,
const T2 &Val2, const T3 &Val3) {
return format_object3<T1, T2, T3>(Fmt, Val1, Val2, Val3);
}
-/// format - This is a helper function that is used to produce formatted output.
-/// This is typically used like: OS << format("%0.4f", myfloat) << '\n';
+/// This is a helper function that is used to produce formatted output.
+///
+/// This is typically used like:
+/// \code
+/// OS << format("%0.4f", myfloat) << '\n';
+/// \endcode
template <typename T1, typename T2, typename T3, typename T4>
inline format_object4<T1, T2, T3, T4> format(const char *Fmt, const T1 &Val1,
const T2 &Val2, const T3 &Val3,
@@ -202,8 +218,12 @@
return format_object4<T1, T2, T3, T4>(Fmt, Val1, Val2, Val3, Val4);
}
-/// format - This is a helper function that is used to produce formatted output.
-/// This is typically used like: OS << format("%0.4f", myfloat) << '\n';
+/// This is a helper function that is used to produce formatted output.
+///
+/// This is typically used like:
+/// \code
+/// OS << format("%0.4f", myfloat) << '\n';
+/// \endcode
template <typename T1, typename T2, typename T3, typename T4, typename T5>
inline format_object5<T1, T2, T3, T4, T5> format(const char *Fmt,const T1 &Val1,
const T2 &Val2, const T3 &Val3,
Modified: llvm/branches/AMDILBackend/include/llvm/Support/FormattedStream.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/FormattedStream.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/FormattedStream.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/FormattedStream.h Tue Jan 15 11:16:16 2013
@@ -55,14 +55,15 @@
///
const char *Scanned;
- virtual void write_impl(const char *Ptr, size_t Size);
+ virtual void write_impl(const char *Ptr, size_t Size) LLVM_OVERRIDE;
/// current_pos - Return the current position within the stream,
/// not counting the bytes currently in the buffer.
- virtual uint64_t current_pos() const {
- // This has the same effect as calling TheStream.current_pos(),
- // but that interface is private.
- return TheStream->tell() - TheStream->GetNumBytesInBuffer();
+ virtual uint64_t current_pos() const LLVM_OVERRIDE {
+ // Our current position in the stream is all the contents which have been
+ // written to the underlying stream (*not* the current position of the
+ // underlying stream).
+ return TheStream->tell();
}
/// ComputeColumn - Examine the given output buffer and figure out which
Modified: llvm/branches/AMDILBackend/include/llvm/Support/GCOV.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/GCOV.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/GCOV.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/GCOV.h Tue Jan 15 11:16:16 2013
@@ -27,13 +27,15 @@
class GCOVLines;
class FileInfo;
-enum GCOVFormat {
- InvalidGCOV,
- GCNO_402,
- GCNO_404,
- GCDA_402,
- GCDA_404
-};
+namespace GCOV {
+ enum GCOVFormat {
+ InvalidGCOV,
+ GCNO_402,
+ GCNO_404,
+ GCDA_402,
+ GCDA_404
+ };
+} // end GCOV namespace
/// GCOVBuffer - A wrapper around MemoryBuffer to provide GCOV specific
/// read operations.
@@ -42,20 +44,20 @@
GCOVBuffer(MemoryBuffer *B) : Buffer(B), Cursor(0) {}
/// readGCOVFormat - Read GCOV signature at the beginning of buffer.
- enum GCOVFormat readGCOVFormat() {
+ GCOV::GCOVFormat readGCOVFormat() {
StringRef Magic = Buffer->getBuffer().slice(0, 12);
Cursor = 12;
if (Magic == "oncg*404MVLL")
- return GCNO_404;
+ return GCOV::GCNO_404;
else if (Magic == "oncg*204MVLL")
- return GCNO_402;
+ return GCOV::GCNO_402;
else if (Magic == "adcg*404MVLL")
- return GCDA_404;
+ return GCOV::GCDA_404;
else if (Magic == "adcg*204MVLL")
- return GCDA_402;
+ return GCOV::GCDA_402;
Cursor = 0;
- return InvalidGCOV;
+ return GCOV::InvalidGCOV;
}
/// readFunctionTag - If cursor points to a function tag then increment the
@@ -128,7 +130,7 @@
StringRef Str = Buffer->getBuffer().slice(Cursor, Cursor+4);
assert (Str.empty() == false && "Unexpected memory buffer end!");
Cursor += 4;
- Result = *(uint32_t *)(Str.data());
+ Result = *(const uint32_t *)(Str.data());
return Result;
}
@@ -170,7 +172,7 @@
public:
GCOVFunction() : Ident(0), LineNumber(0) {}
~GCOVFunction();
- bool read(GCOVBuffer &Buffer, GCOVFormat Format);
+ bool read(GCOVBuffer &Buffer, GCOV::GCOVFormat Format);
void dump();
void collectLineCounts(FileInfo &FI);
private:
Modified: llvm/branches/AMDILBackend/include/llvm/Support/InstVisitor.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/InstVisitor.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/InstVisitor.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/InstVisitor.h Tue Jan 15 11:16:16 2013
@@ -209,6 +209,9 @@
RetTy visitMemMoveInst(MemMoveInst &I) { DELEGATE(MemTransferInst); }
RetTy visitMemTransferInst(MemTransferInst &I) { DELEGATE(MemIntrinsic); }
RetTy visitMemIntrinsic(MemIntrinsic &I) { DELEGATE(IntrinsicInst); }
+ RetTy visitVAStartInst(VAStartInst &I) { DELEGATE(IntrinsicInst); }
+ RetTy visitVAEndInst(VAEndInst &I) { DELEGATE(IntrinsicInst); }
+ RetTy visitVACopyInst(VACopyInst &I) { DELEGATE(IntrinsicInst); }
RetTy visitIntrinsicInst(IntrinsicInst &I) { DELEGATE(CallInst); }
// Call and Invoke are slightly different as they delegate first through
@@ -262,6 +265,9 @@
case Intrinsic::memcpy: DELEGATE(MemCpyInst);
case Intrinsic::memmove: DELEGATE(MemMoveInst);
case Intrinsic::memset: DELEGATE(MemSetInst);
+ case Intrinsic::vastart: DELEGATE(VAStartInst);
+ case Intrinsic::vaend: DELEGATE(VAEndInst);
+ case Intrinsic::vacopy: DELEGATE(VACopyInst);
case Intrinsic::not_intrinsic: break;
}
}
Modified: llvm/branches/AMDILBackend/include/llvm/Support/IntegersSubset.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/IntegersSubset.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/IntegersSubset.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/IntegersSubset.h Tue Jan 15 11:16:16 2013
@@ -411,8 +411,8 @@
unsigned getSize() const {
APInt sz(((const APInt&)getItem(0).getLow()).getBitWidth(), 0);
for (unsigned i = 0, e = getNumItems(); i != e; ++i) {
- const APInt &Low = getItem(i).getLow();
- const APInt &High = getItem(i).getHigh();
+ const APInt Low = getItem(i).getLow();
+ const APInt High = getItem(i).getHigh();
APInt S = High - Low + 1;
sz += S;
}
@@ -426,8 +426,8 @@
APInt getSingleValue(unsigned idx) const {
APInt sz(((const APInt&)getItem(0).getLow()).getBitWidth(), 0);
for (unsigned i = 0, e = getNumItems(); i != e; ++i) {
- const APInt &Low = getItem(i).getLow();
- const APInt &High = getItem(i).getHigh();
+ const APInt Low = getItem(i).getLow();
+ const APInt High = getItem(i).getHigh();
APInt S = High - Low + 1;
APInt oldSz = sz;
sz += S;
Modified: llvm/branches/AMDILBackend/include/llvm/Support/IntegersSubsetMapping.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/IntegersSubsetMapping.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/IntegersSubsetMapping.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/IntegersSubsetMapping.h Tue Jan 15 11:16:16 2013
@@ -42,6 +42,7 @@
struct RangeEx : public RangeTy {
RangeEx() : Weight(1) {}
RangeEx(const RangeTy &R) : RangeTy(R), Weight(1) {}
+ RangeEx(const RangeTy &R, unsigned W) : RangeTy(R), Weight(W) {}
RangeEx(const IntTy &C) : RangeTy(C), Weight(1) {}
RangeEx(const IntTy &L, const IntTy &H) : RangeTy(L, H), Weight(1) {}
RangeEx(const IntTy &L, const IntTy &H, unsigned W) :
@@ -316,13 +317,13 @@
Items.clear();
const IntTy *Low = &OldItems.begin()->first.getLow();
const IntTy *High = &OldItems.begin()->first.getHigh();
- unsigned Weight = 1;
+ unsigned Weight = OldItems.begin()->first.Weight;
SuccessorClass *Successor = OldItems.begin()->second;
for (CaseItemIt j = OldItems.begin(), i = j++, e = OldItems.end();
j != e; i = j++) {
if (isJoinable(i, j)) {
const IntTy *CurHigh = &j->first.getHigh();
- ++Weight;
+ Weight += j->first.Weight;
if (*CurHigh > *High)
High = CurHigh;
} else {
@@ -330,7 +331,7 @@
add(R, Successor);
Low = &j->first.getLow();
High = &j->first.getHigh();
- Weight = 1;
+ Weight = j->first.Weight;
Successor = j->second;
}
}
@@ -362,10 +363,17 @@
/// Adds all ranges and values from given ranges set to the current
/// mapping.
- void add(const IntegersSubsetTy &CRS, SuccessorClass *S = 0) {
+ void add(const IntegersSubsetTy &CRS, SuccessorClass *S = 0,
+ unsigned Weight = 0) {
+ unsigned ItemWeight = 1;
+ if (Weight)
+ // Weight is associated with CRS, for now we perform a division to
+ // get the weight for each item.
+ ItemWeight = Weight / CRS.getNumItems();
for (unsigned i = 0, e = CRS.getNumItems(); i < e; ++i) {
RangeTy R = CRS.getItem(i);
- add(R, S);
+ RangeEx REx(R, ItemWeight);
+ add(REx, S);
}
}
Modified: llvm/branches/AMDILBackend/include/llvm/Support/LEB128.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/LEB128.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/LEB128.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/LEB128.h Tue Jan 15 11:16:16 2013
@@ -15,11 +15,11 @@
#ifndef LLVM_SYSTEM_LEB128_H
#define LLVM_SYSTEM_LEB128_H
-#include <llvm/Support/raw_ostream.h>
+#include "llvm/Support/raw_ostream.h"
namespace llvm {
-/// Utility function to encode a SLEB128 value.
+/// Utility function to encode a SLEB128 value to an output stream.
static inline void encodeSLEB128(int64_t Value, raw_ostream &OS) {
bool More;
do {
@@ -34,7 +34,7 @@
} while (More);
}
-/// Utility function to encode a ULEB128 value.
+/// Utility function to encode a ULEB128 value to an output stream.
static inline void encodeULEB128(uint64_t Value, raw_ostream &OS,
unsigned Padding = 0) {
do {
@@ -53,6 +53,43 @@
}
}
+/// Utility function to encode a ULEB128 value to a buffer. Returns
+/// the length in bytes of the encoded value.
+static inline unsigned encodeULEB128(uint64_t Value, uint8_t *p,
+ unsigned Padding = 0) {
+ uint8_t *orig_p = p;
+ do {
+ uint8_t Byte = Value & 0x7f;
+ Value >>= 7;
+ if (Value != 0 || Padding != 0)
+ Byte |= 0x80; // Mark this byte that that more bytes will follow.
+ *p++ = Byte;
+ } while (Value != 0);
+
+ // Pad with 0x80 and emit a null byte at the end.
+ if (Padding != 0) {
+ for (; Padding != 1; --Padding)
+ *p++ = '\x80';
+ *p++ = '\x00';
+ }
+ return (unsigned)(p - orig_p);
+}
+
+
+/// Utility function to decode a ULEB128 value.
+static inline uint64_t decodeULEB128(const uint8_t *p, unsigned *n = 0) {
+ const uint8_t *orig_p = p;
+ uint64_t Value = 0;
+ unsigned Shift = 0;
+ do {
+ Value += (*p & 0x7f) << Shift;
+ Shift += 7;
+ } while (*p++ >= 128);
+ if (n)
+ *n = (unsigned)(p - orig_p);
+ return Value;
+}
+
} // namespace llvm
#endif // LLVM_SYSTEM_LEB128_H
Modified: llvm/branches/AMDILBackend/include/llvm/Support/LockFileManager.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/LockFileManager.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/LockFileManager.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/LockFileManager.h Tue Jan 15 11:16:16 2013
@@ -47,8 +47,8 @@
Optional<std::pair<std::string, int> > Owner;
Optional<error_code> Error;
- LockFileManager(const LockFileManager &);
- LockFileManager &operator=(const LockFileManager &);
+ LockFileManager(const LockFileManager &) LLVM_DELETED_FUNCTION;
+ LockFileManager &operator=(const LockFileManager &) LLVM_DELETED_FUNCTION;
static Optional<std::pair<std::string, int> >
readLockFile(StringRef LockFileName);
Modified: llvm/branches/AMDILBackend/include/llvm/Support/MathExtras.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/MathExtras.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/MathExtras.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/MathExtras.h Tue Jan 15 11:16:16 2013
@@ -431,21 +431,22 @@
return A + 1;
}
-/// RoundUpToAlignment - Returns the next integer (mod 2**64) that is
-/// greater than or equal to \arg Value and is a multiple of \arg
-/// Align. Align must be non-zero.
+/// Returns the next integer (mod 2**64) that is greater than or equal to
+/// \p Value and is a multiple of \p Align. \p Align must be non-zero.
///
/// Examples:
-/// RoundUpToAlignment(5, 8) = 8
-/// RoundUpToAlignment(17, 8) = 24
-/// RoundUpToAlignment(~0LL, 8) = 0
+/// \code
+/// RoundUpToAlignment(5, 8) = 8
+/// RoundUpToAlignment(17, 8) = 24
+/// RoundUpToAlignment(~0LL, 8) = 0
+/// \endcode
inline uint64_t RoundUpToAlignment(uint64_t Value, uint64_t Align) {
return ((Value + Align - 1) / Align) * Align;
}
-/// OffsetToAlignment - Return the offset to the next integer (mod 2**64) that
-/// is greater than or equal to \arg Value and is a multiple of \arg
-/// Align. Align must be non-zero.
+/// Returns the offset to the next integer (mod 2**64) that is greater than
+/// or equal to \p Value and is a multiple of \p Align. \p Align must be
+/// non-zero.
inline uint64_t OffsetToAlignment(uint64_t Value, uint64_t Align) {
return RoundUpToAlignment(Value, Align) - Value;
}
@@ -463,12 +464,24 @@
return int32_t(x << (32 - B)) >> (32 - B);
}
+/// \brief Sign extend number in the bottom B bits of X to a 32-bit int.
+/// Requires 0 < B <= 32.
+inline int32_t SignExtend32(uint32_t X, unsigned B) {
+ return int32_t(X << (32 - B)) >> (32 - B);
+}
+
/// SignExtend64 - Sign extend B-bit number x to 64-bit int.
/// Usage int64_t r = SignExtend64<5>(x);
template <unsigned B> inline int64_t SignExtend64(uint64_t x) {
return int64_t(x << (64 - B)) >> (64 - B);
}
+/// \brief Sign extend number in the bottom B bits of X to a 64-bit int.
+/// Requires 0 < B <= 64.
+inline int64_t SignExtend64(uint64_t X, unsigned B) {
+ return int64_t(X << (64 - B)) >> (64 - B);
+}
+
} // End llvm namespace
#endif
Modified: llvm/branches/AMDILBackend/include/llvm/Support/Memory.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/Memory.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/Memory.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/Memory.h Tue Jan 15 11:16:16 2013
@@ -15,6 +15,7 @@
#define LLVM_SYSTEM_MEMORY_H
#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/system_error.h"
#include <string>
namespace llvm {
@@ -43,6 +44,70 @@
/// @brief An abstraction for memory operations.
class Memory {
public:
+ enum ProtectionFlags {
+ MF_READ = 0x1000000,
+ MF_WRITE = 0x2000000,
+ MF_EXEC = 0x4000000
+ };
+
+ /// This method allocates a block of memory that is suitable for loading
+ /// dynamically generated code (e.g. JIT). An attempt to allocate
+ /// \p NumBytes bytes of virtual memory is made.
+ /// \p NearBlock may point to an existing allocation in which case
+ /// an attempt is made to allocate more memory near the existing block.
+ /// The actual allocated address is not guaranteed to be near the requested
+ /// address.
+ /// \p Flags is used to set the initial protection flags for the block
+ /// of the memory.
+ /// \p EC [out] returns an object describing any error that occurs.
+ ///
+ /// This method may allocate more than the number of bytes requested. The
+ /// actual number of bytes allocated is indicated in the returned
+ /// MemoryBlock.
+ ///
+ /// The start of the allocated block must be aligned with the
+ /// system allocation granularity (64K on Windows, page size on Linux).
+ /// If the address following \p NearBlock is not so aligned, it will be
+ /// rounded up to the next allocation granularity boundary.
+ ///
+ /// \r a non-null MemoryBlock if the function was successful,
+ /// otherwise a null MemoryBlock is with \p EC describing the error.
+ ///
+ /// @brief Allocate mapped memory.
+ static MemoryBlock allocateMappedMemory(size_t NumBytes,
+ const MemoryBlock *const NearBlock,
+ unsigned Flags,
+ error_code &EC);
+
+ /// This method releases a block of memory that was allocated with the
+ /// allocateMappedMemory method. It should not be used to release any
+ /// memory block allocated any other way.
+ /// \p Block describes the memory to be released.
+ ///
+ /// \r error_success if the function was successful, or an error_code
+ /// describing the failure if an error occurred.
+ ///
+ /// @brief Release mapped memory.
+ static error_code releaseMappedMemory(MemoryBlock &Block);
+
+ /// This method sets the protection flags for a block of memory to the
+ /// state specified by /p Flags. The behavior is not specified if the
+ /// memory was not allocated using the allocateMappedMemory method.
+ /// \p Block describes the memory block to be protected.
+ /// \p Flags specifies the new protection state to be assigned to the block.
+ /// \p ErrMsg [out] returns a string describing any error that occured.
+ ///
+ /// If \p Flags is MF_WRITE, the actual behavior varies
+ /// with the operating system (i.e. MF_READ | MF_WRITE on Windows) and the
+ /// target architecture (i.e. MF_WRITE -> MF_READ | MF_WRITE on i386).
+ ///
+ /// \r error_success if the function was successful, or an error_code
+ /// describing the failure if an error occurred.
+ ///
+ /// @brief Set memory protection state.
+ static error_code protectMappedMemory(const MemoryBlock &Block,
+ unsigned Flags);
+
/// This method allocates a block of Read/Write/Execute memory that is
/// suitable for executing dynamically generated code (e.g. JIT). An
/// attempt to allocate \p NumBytes bytes of virtual memory is made.
Modified: llvm/branches/AMDILBackend/include/llvm/Support/MemoryBuffer.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/MemoryBuffer.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/MemoryBuffer.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/MemoryBuffer.h Tue Jan 15 11:16:16 2013
@@ -15,6 +15,7 @@
#define LLVM_SUPPORT_MEMORYBUFFER_H
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataTypes.h"
namespace llvm {
@@ -36,8 +37,8 @@
const char *BufferStart; // Start of the buffer.
const char *BufferEnd; // End of the buffer.
- MemoryBuffer(const MemoryBuffer &); // DO NOT IMPLEMENT
- MemoryBuffer &operator=(const MemoryBuffer &); // DO NOT IMPLEMENT
+ MemoryBuffer(const MemoryBuffer &) LLVM_DELETED_FUNCTION;
+ MemoryBuffer &operator=(const MemoryBuffer &) LLVM_DELETED_FUNCTION;
protected:
MemoryBuffer() {}
void init(const char *BufStart, const char *BufEnd,
Modified: llvm/branches/AMDILBackend/include/llvm/Support/Mutex.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/Mutex.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/Mutex.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/Mutex.h Tue Jan 15 11:16:16 2013
@@ -14,6 +14,7 @@
#ifndef LLVM_SYSTEM_MUTEX_H
#define LLVM_SYSTEM_MUTEX_H
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Threading.h"
#include <cassert>
@@ -75,8 +76,8 @@
/// @name Do Not Implement
/// @{
private:
- MutexImpl(const MutexImpl & original);
- void operator=(const MutexImpl &);
+ MutexImpl(const MutexImpl &) LLVM_DELETED_FUNCTION;
+ void operator=(const MutexImpl &) LLVM_DELETED_FUNCTION;
/// @}
};
Modified: llvm/branches/AMDILBackend/include/llvm/Support/MutexGuard.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/MutexGuard.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/MutexGuard.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/MutexGuard.h Tue Jan 15 11:16:16 2013
@@ -26,8 +26,8 @@
/// @brief Guard a section of code with a Mutex.
class MutexGuard {
sys::Mutex &M;
- MutexGuard(const MutexGuard &); // DO NOT IMPLEMENT
- void operator=(const MutexGuard &); // DO NOT IMPLEMENT
+ MutexGuard(const MutexGuard &) LLVM_DELETED_FUNCTION;
+ void operator=(const MutexGuard &) LLVM_DELETED_FUNCTION;
public:
MutexGuard(sys::Mutex &m) : M(m) { M.acquire(); }
~MutexGuard() { M.release(); }
Modified: llvm/branches/AMDILBackend/include/llvm/Support/NoFolder.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/NoFolder.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/NoFolder.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/NoFolder.h Tue Jan 15 11:16:16 2013
@@ -181,6 +181,12 @@
ArrayRef<Constant *> IdxList) const {
return ConstantExpr::getGetElementPtr(C, IdxList);
}
+ Constant *CreateGetElementPtr(Constant *C, Constant *Idx) const {
+ // This form of the function only exists to avoid ambiguous overload
+ // warnings about whether to convert Idx to ArrayRef<Constant *> or
+ // ArrayRef<Value *>.
+ return ConstantExpr::getGetElementPtr(C, Idx);
+ }
Instruction *CreateGetElementPtr(Constant *C,
ArrayRef<Value *> IdxList) const {
return GetElementPtrInst::Create(C, IdxList);
@@ -190,6 +196,12 @@
ArrayRef<Constant *> IdxList) const {
return ConstantExpr::getInBoundsGetElementPtr(C, IdxList);
}
+ Constant *CreateInBoundsGetElementPtr(Constant *C, Constant *Idx) const {
+ // This form of the function only exists to avoid ambiguous overload
+ // warnings about whether to convert Idx to ArrayRef<Constant *> or
+ // ArrayRef<Value *>.
+ return ConstantExpr::getInBoundsGetElementPtr(C, Idx);
+ }
Instruction *CreateInBoundsGetElementPtr(Constant *C,
ArrayRef<Value *> IdxList) const {
return GetElementPtrInst::CreateInBounds(C, IdxList);
Modified: llvm/branches/AMDILBackend/include/llvm/Support/PathV1.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/PathV1.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/PathV1.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/PathV1.h Tue Jan 15 11:16:16 2013
@@ -683,8 +683,8 @@
/// This function returns status information about the file. The type of
/// path (file or directory) is updated to reflect the actual contents
/// of the file system.
- /// @returns 0 on failure, with Error explaining why (if non-zero)
- /// @returns a pointer to a FileStatus structure on success.
+ /// @returns 0 on failure, with Error explaining why (if non-zero),
+ /// otherwise returns a pointer to a FileStatus structure on success.
/// @brief Get file status.
const FileStatus *getFileStatus(
bool forceUpdate = false, ///< Force an update from the file system
Modified: llvm/branches/AMDILBackend/include/llvm/Support/PathV2.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/PathV2.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/PathV2.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/PathV2.h Tue Jan 15 11:16:16 2013
@@ -39,13 +39,14 @@
/// The backwards traversal order is the reverse of forward traversal.
///
/// Iteration examples. Each component is separated by ',':
-/// / => /
-/// /foo => /,foo
-/// foo/ => foo,.
-/// /foo/bar => /,foo,bar
-/// ../ => ..,.
-/// C:\foo\bar => C:,/,foo,bar
-///
+/// @code
+/// / => /
+/// /foo => /,foo
+/// foo/ => foo,.
+/// /foo/bar => /,foo,bar
+/// ../ => ..,.
+/// C:\foo\bar => C:,/,foo,bar
+/// @endcode
class const_iterator {
StringRef Path; ///< The entire path.
StringRef Component; ///< The current component. Not necessarily in Path.
@@ -107,18 +108,22 @@
/// @brief Remove the last component from \a path unless it is the root dir.
///
-/// directory/filename.cpp => directory/
-/// directory/ => directory
-/// / => /
+/// @code
+/// directory/filename.cpp => directory/
+/// directory/ => directory
+/// / => /
+/// @endcode
///
/// @param path A path that is modified to not have a file component.
void remove_filename(SmallVectorImpl<char> &path);
/// @brief Replace the file extension of \a path with \a extension.
///
-/// ./filename.cpp => ./filename.extension
-/// ./filename => ./filename.extension
-/// ./ => ./.extension
+/// @code
+/// ./filename.cpp => ./filename.extension
+/// ./filename => ./filename.extension
+/// ./ => ./.extension
+/// @endcode
///
/// @param path A path that has its extension replaced with \a extension.
/// @param extension The extension to be added. It may be empty. It may also
@@ -128,12 +133,14 @@
/// @brief Append to path.
///
-/// /foo + bar/f => /foo/bar/f
-/// /foo/ + bar/f => /foo/bar/f
-/// foo + bar/f => foo/bar/f
+/// @code
+/// /foo + bar/f => /foo/bar/f
+/// /foo/ + bar/f => /foo/bar/f
+/// foo + bar/f => foo/bar/f
+/// @endcode
///
/// @param path Set to \a path + \a component.
-/// @param component The component to be appended to \a path.
+/// @param a The component to be appended to \a path.
void append(SmallVectorImpl<char> &path, const Twine &a,
const Twine &b = "",
const Twine &c = "",
@@ -141,9 +148,11 @@
/// @brief Append to path.
///
-/// /foo + [bar,f] => /foo/bar/f
-/// /foo/ + [bar,f] => /foo/bar/f
-/// foo + [bar,f] => foo/bar/f
+/// @code
+/// /foo + [bar,f] => /foo/bar/f
+/// /foo/ + [bar,f] => /foo/bar/f
+/// foo + [bar,f] => foo/bar/f
+/// @endcode
///
/// @param path Set to \a path + [\a begin, \a end).
/// @param begin Start of components to append.
@@ -169,9 +178,11 @@
/// @brief Get root name.
///
-/// //net/hello => //net
-/// c:/hello => c: (on Windows, on other platforms nothing)
-/// /hello => <empty>
+/// @code
+/// //net/hello => //net
+/// c:/hello => c: (on Windows, on other platforms nothing)
+/// /hello => <empty>
+/// @endcode
///
/// @param path Input path.
/// @result The root name of \a path if it has one, otherwise "".
@@ -179,9 +190,11 @@
/// @brief Get root directory.
///
-/// /goo/hello => /
-/// c:/hello => /
-/// d/file.txt => <empty>
+/// @code
+/// /goo/hello => /
+/// c:/hello => /
+/// d/file.txt => <empty>
+/// @endcode
///
/// @param path Input path.
/// @result The root directory of \a path if it has one, otherwise
@@ -198,9 +211,11 @@
/// @brief Get relative path.
///
-/// C:\hello\world => hello\world
-/// foo/bar => foo/bar
-/// /foo/bar => foo/bar
+/// @code
+/// C:\hello\world => hello\world
+/// foo/bar => foo/bar
+/// /foo/bar => foo/bar
+/// @endcode
///
/// @param path Input path.
/// @result The path starting after root_path if one exists, otherwise "".
@@ -208,9 +223,11 @@
/// @brief Get parent path.
///
-/// / => <empty>
-/// /foo => /
-/// foo/../bar => foo/..
+/// @code
+/// / => <empty>
+/// /foo => /
+/// foo/../bar => foo/..
+/// @endcode
///
/// @param path Input path.
/// @result The parent path of \a path if one exists, otherwise "".
@@ -218,10 +235,12 @@
/// @brief Get filename.
///
-/// /foo.txt => foo.txt
-/// . => .
-/// .. => ..
-/// / => /
+/// @code
+/// /foo.txt => foo.txt
+/// . => .
+/// .. => ..
+/// / => /
+/// @endcode
///
/// @param path Input path.
/// @result The filename part of \a path. This is defined as the last component
@@ -234,11 +253,13 @@
/// substring of filename ending at (but not including) the last dot. Otherwise
/// it is filename.
///
-/// /foo/bar.txt => bar
-/// /foo/bar => bar
-/// /foo/.txt => <empty>
-/// /foo/. => .
-/// /foo/.. => ..
+/// @code
+/// /foo/bar.txt => bar
+/// /foo/bar => bar
+/// /foo/.txt => <empty>
+/// /foo/. => .
+/// /foo/.. => ..
+/// @endcode
///
/// @param path Input path.
/// @result The stem of \a path.
@@ -250,9 +271,11 @@
/// substring of filename starting at (and including) the last dot, and ending
/// at the end of \a path. Otherwise "".
///
-/// /foo/bar.txt => .txt
-/// /foo/bar => <empty>
-/// /foo/.txt => .txt
+/// @code
+/// /foo/bar.txt => .txt
+/// /foo/bar => <empty>
+/// /foo/.txt => .txt
+/// @endcode
///
/// @param path Input path.
/// @result The extension of \a path.
@@ -272,7 +295,7 @@
/// ignored if the user or system has set the typical environment variable
/// (e.g., TEMP on Windows, TMPDIR on *nix) to specify a temporary directory.
///
-/// @param Result Holds the resulting path name.
+/// @param result Holds the resulting path name.
void system_temp_directory(bool erasedOnReboot, SmallVectorImpl<char> &result);
/// @brief Has root name?
Modified: llvm/branches/AMDILBackend/include/llvm/Support/PrettyStackTrace.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/PrettyStackTrace.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/PrettyStackTrace.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/PrettyStackTrace.h Tue Jan 15 11:16:16 2013
@@ -16,6 +16,8 @@
#ifndef LLVM_SUPPORT_PRETTYSTACKTRACE_H
#define LLVM_SUPPORT_PRETTYSTACKTRACE_H
+#include "llvm/Support/Compiler.h"
+
namespace llvm {
class raw_ostream;
@@ -32,8 +34,8 @@
/// virtual stack trace. This gets dumped out if the program crashes.
class PrettyStackTraceEntry {
const PrettyStackTraceEntry *NextEntry;
- PrettyStackTraceEntry(const PrettyStackTraceEntry &); // DO NOT IMPLEMENT
- void operator=(const PrettyStackTraceEntry&); // DO NOT IMPLEMENT
+ PrettyStackTraceEntry(const PrettyStackTraceEntry &) LLVM_DELETED_FUNCTION;
+ void operator=(const PrettyStackTraceEntry&) LLVM_DELETED_FUNCTION;
public:
PrettyStackTraceEntry();
virtual ~PrettyStackTraceEntry();
@@ -52,7 +54,7 @@
const char *Str;
public:
PrettyStackTraceString(const char *str) : Str(str) {}
- virtual void print(raw_ostream &OS) const;
+ virtual void print(raw_ostream &OS) const LLVM_OVERRIDE;
};
/// PrettyStackTraceProgram - This object prints a specified program arguments
@@ -63,7 +65,7 @@
public:
PrettyStackTraceProgram(int argc, const char * const*argv)
: ArgC(argc), ArgV(argv) {}
- virtual void print(raw_ostream &OS) const;
+ virtual void print(raw_ostream &OS) const LLVM_OVERRIDE;
};
} // end namespace llvm
Modified: llvm/branches/AMDILBackend/include/llvm/Support/Program.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/Program.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/Program.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/Program.h Tue Jan 15 11:16:16 2013
@@ -34,8 +34,8 @@
void *Data_;
// Noncopyable.
- Program(const Program& other);
- Program& operator=(const Program& other);
+ Program(const Program& other) LLVM_DELETED_FUNCTION;
+ Program& operator=(const Program& other) LLVM_DELETED_FUNCTION;
/// @name Methods
/// @{
Modified: llvm/branches/AMDILBackend/include/llvm/Support/RWMutex.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/RWMutex.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/RWMutex.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/RWMutex.h Tue Jan 15 11:16:16 2013
@@ -14,6 +14,7 @@
#ifndef LLVM_SYSTEM_RWMUTEX_H
#define LLVM_SYSTEM_RWMUTEX_H
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Threading.h"
#include <cassert>
@@ -75,8 +76,8 @@
/// @name Do Not Implement
/// @{
private:
- RWMutexImpl(const RWMutexImpl & original);
- void operator=(const RWMutexImpl &);
+ RWMutexImpl(const RWMutexImpl & original) LLVM_DELETED_FUNCTION;
+ void operator=(const RWMutexImpl &) LLVM_DELETED_FUNCTION;
/// @}
};
Modified: llvm/branches/AMDILBackend/include/llvm/Support/Regex.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/Regex.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/Regex.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/Regex.h Tue Jan 15 11:16:16 2013
@@ -36,7 +36,7 @@
Newline=2
};
- /// Compiles the given POSIX Extended Regular Expression \arg Regex.
+ /// Compiles the given POSIX Extended Regular Expression \p Regex.
/// This implementation supports regexes and matching strings with embedded
/// NUL characters.
Regex(StringRef Regex, unsigned Flags = NoFlags);
@@ -51,17 +51,17 @@
/// many entries plus one for the whole regex (as element 0).
unsigned getNumMatches() const;
- /// matches - Match the regex against a given \arg String.
+ /// matches - Match the regex against a given \p String.
///
/// \param Matches - If given, on a successful match this will be filled in
- /// with references to the matched group expressions (inside \arg String),
+ /// with references to the matched group expressions (inside \p String),
/// the first group is always the entire pattern.
///
/// This returns true on a successful match.
bool match(StringRef String, SmallVectorImpl<StringRef> *Matches = 0);
/// sub - Return the result of replacing the first match of the regex in
- /// \arg String with the \arg Repl string. Backreferences like "\0" in the
+ /// \p String with the \p Repl string. Backreferences like "\0" in the
/// replacement string are replaced with the appropriate match substring.
///
/// Note that the replacement string has backslash escaping performed on
Modified: llvm/branches/AMDILBackend/include/llvm/Support/Registry.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/Registry.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/Registry.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/Registry.h Tue Jan 15 11:16:16 2013
@@ -37,7 +37,7 @@
/// is necessary to define an alternate traits class.
template <typename T>
class RegistryTraits {
- RegistryTraits(); // Do not implement.
+ RegistryTraits() LLVM_DELETED_FUNCTION;
public:
typedef SimpleRegistryEntry<T> entry;
@@ -63,7 +63,7 @@
class iterator;
private:
- Registry(); // Do not implement.
+ Registry() LLVM_DELETED_FUNCTION;
static void Announce(const entry &E) {
for (listener *Cur = ListenerHead; Cur; Cur = Cur->Next)
@@ -120,6 +120,7 @@
/// Abstract base class for registry listeners, which are informed when new
/// entries are added to the registry. Simply subclass and instantiate:
///
+ /// \code
/// class CollectorPrinter : public Registry<Collector>::listener {
/// protected:
/// void registered(const Registry<Collector>::entry &e) {
@@ -131,7 +132,7 @@
/// };
///
/// CollectorPrinter Printer;
- ///
+ /// \endcode
class listener {
listener *Prev, *Next;
Modified: llvm/branches/AMDILBackend/include/llvm/Support/SourceMgr.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/SourceMgr.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/SourceMgr.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/SourceMgr.h Tue Jan 15 11:16:16 2013
@@ -64,9 +64,9 @@
DiagHandlerTy DiagHandler;
void *DiagContext;
-
- SourceMgr(const SourceMgr&); // DO NOT IMPLEMENT
- void operator=(const SourceMgr&); // DO NOT IMPLEMENT
+
+ SourceMgr(const SourceMgr&) LLVM_DELETED_FUNCTION;
+ void operator=(const SourceMgr&) LLVM_DELETED_FUNCTION;
public:
SourceMgr() : LineNoCache(0), DiagHandler(0), DiagContext(0) {}
~SourceMgr();
@@ -145,7 +145,7 @@
/// GetMessage - Return an SMDiagnostic at the specified location with the
/// specified string.
///
- /// @param Type - If non-null, the kind of message (e.g., "error") which is
+ /// @param Msg If non-null, the kind of message (e.g., "error") which is
/// prefixed to the message.
SMDiagnostic GetMessage(SMLoc Loc, DiagKind Kind, const Twine &Msg,
ArrayRef<SMRange> Ranges = ArrayRef<SMRange>()) const;
Modified: llvm/branches/AMDILBackend/include/llvm/Support/StreamableMemoryObject.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/StreamableMemoryObject.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/StreamableMemoryObject.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/StreamableMemoryObject.h Tue Jan 15 11:16:16 2013
@@ -12,6 +12,7 @@
#define STREAMABLEMEMORYOBJECT_H_
#include "llvm/ADT/OwningPtr.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/MemoryObject.h"
#include "llvm/Support/DataStream.h"
#include <vector>
@@ -107,14 +108,15 @@
class StreamingMemoryObject : public StreamableMemoryObject {
public:
StreamingMemoryObject(DataStreamer *streamer);
- virtual uint64_t getBase() const { return 0; }
- virtual uint64_t getExtent() const;
- virtual int readByte(uint64_t address, uint8_t* ptr) const;
+ virtual uint64_t getBase() const LLVM_OVERRIDE { return 0; }
+ virtual uint64_t getExtent() const LLVM_OVERRIDE;
+ virtual int readByte(uint64_t address, uint8_t* ptr) const LLVM_OVERRIDE;
virtual int readBytes(uint64_t address,
uint64_t size,
uint8_t* buf,
- uint64_t* copied) const ;
- virtual const uint8_t *getPointer(uint64_t address, uint64_t size) const {
+ uint64_t* copied) const LLVM_OVERRIDE;
+ virtual const uint8_t *getPointer(uint64_t address,
+ uint64_t size) const LLVM_OVERRIDE {
// This could be fixed by ensuring the bytes are fetched and making a copy,
// requiring that the bitcode size be known, or otherwise ensuring that
// the memory doesn't go away/get reallocated, but it's
@@ -122,8 +124,8 @@
assert(0 && "getPointer in streaming memory objects not allowed");
return NULL;
}
- virtual bool isValidAddress(uint64_t address) const;
- virtual bool isObjectEnd(uint64_t address) const;
+ virtual bool isValidAddress(uint64_t address) const LLVM_OVERRIDE;
+ virtual bool isObjectEnd(uint64_t address) const LLVM_OVERRIDE;
/// Drop s bytes from the front of the stream, pushing the positions of the
/// remaining bytes down by s. This is used to skip past the bitcode header,
@@ -170,8 +172,8 @@
return true;
}
- StreamingMemoryObject(const StreamingMemoryObject&); // DO NOT IMPLEMENT
- void operator=(const StreamingMemoryObject&); // DO NOT IMPLEMENT
+ StreamingMemoryObject(const StreamingMemoryObject&) LLVM_DELETED_FUNCTION;
+ void operator=(const StreamingMemoryObject&) LLVM_DELETED_FUNCTION;
};
StreamableMemoryObject *getNonStreamedMemoryObject(
Modified: llvm/branches/AMDILBackend/include/llvm/Support/TargetFolder.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/TargetFolder.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/TargetFolder.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/TargetFolder.h Tue Jan 15 11:16:16 2013
@@ -26,11 +26,11 @@
namespace llvm {
-class TargetData;
+class DataLayout;
/// TargetFolder - Create constants with target dependent folding.
class TargetFolder {
- const TargetData *TD;
+ const DataLayout *TD;
/// Fold - Fold the constant using target specific information.
Constant *Fold(Constant *C) const {
@@ -41,7 +41,7 @@
}
public:
- explicit TargetFolder(const TargetData *TheTD) : TD(TheTD) {}
+ explicit TargetFolder(const DataLayout *TheTD) : TD(TheTD) {}
//===--------------------------------------------------------------------===//
// Binary Operators
@@ -177,7 +177,14 @@
return Fold(ConstantExpr::getIntegerCast(C, DestTy, isSigned));
}
Constant *CreatePointerCast(Constant *C, Type *DestTy) const {
- return ConstantExpr::getPointerCast(C, DestTy);
+ if (C->getType() == DestTy)
+ return C; // avoid calling Fold
+ return Fold(ConstantExpr::getPointerCast(C, DestTy));
+ }
+ Constant *CreateFPCast(Constant *C, Type *DestTy) const {
+ if (C->getType() == DestTy)
+ return C; // avoid calling Fold
+ return Fold(ConstantExpr::getFPCast(C, DestTy));
}
Constant *CreateBitCast(Constant *C, Type *DestTy) const {
return CreateCast(Instruction::BitCast, C, DestTy);
Modified: llvm/branches/AMDILBackend/include/llvm/Support/TargetRegistry.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/TargetRegistry.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/TargetRegistry.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/TargetRegistry.h Tue Jan 15 11:16:16 2013
@@ -93,7 +93,9 @@
CodeGenOpt::Level OL);
typedef AsmPrinter *(*AsmPrinterCtorTy)(TargetMachine &TM,
MCStreamer &Streamer);
- typedef MCAsmBackend *(*MCAsmBackendCtorTy)(const Target &T, StringRef TT);
+ typedef MCAsmBackend *(*MCAsmBackendCtorTy)(const Target &T,
+ StringRef TT,
+ StringRef CPU);
typedef MCTargetAsmLexer *(*MCAsmLexerCtorTy)(const Target &T,
const MCRegisterInfo &MRI,
const MCAsmInfo &MAI);
@@ -271,7 +273,7 @@
/// createMCAsmInfo - Create a MCAsmInfo implementation for the specified
/// target triple.
///
- /// \arg Triple - This argument is used to determine the target machine
+ /// \param Triple This argument is used to determine the target machine
/// feature set; it should always be provided. Generally this should be
/// either the target triple from the module, or the target triple of the
/// host if that does not exist.
@@ -317,12 +319,12 @@
/// createMCSubtargetInfo - Create a MCSubtargetInfo implementation.
///
- /// \arg Triple - This argument is used to determine the target machine
+ /// \param Triple This argument is used to determine the target machine
/// feature set; it should always be provided. Generally this should be
/// either the target triple from the module, or the target triple of the
/// host if that does not exist.
- /// \arg CPU - This specifies the name of the target CPU.
- /// \arg Features - This specifies the string representation of the
+ /// \param CPU This specifies the name of the target CPU.
+ /// \param Features This specifies the string representation of the
/// additional target features.
MCSubtargetInfo *createMCSubtargetInfo(StringRef Triple, StringRef CPU,
StringRef Features) const {
@@ -332,9 +334,9 @@
}
/// createTargetMachine - Create a target specific machine implementation
- /// for the specified \arg Triple.
+ /// for the specified \p Triple.
///
- /// \arg Triple - This argument is used to determine the target machine
+ /// \param Triple This argument is used to determine the target machine
/// feature set; it should always be provided. Generally this should be
/// either the target triple from the module, or the target triple of the
/// host if that does not exist.
@@ -351,12 +353,11 @@
/// createMCAsmBackend - Create a target specific assembly parser.
///
- /// \arg Triple - The target triple string.
- /// \arg Backend - The target independent assembler object.
- MCAsmBackend *createMCAsmBackend(StringRef Triple) const {
+ /// \param Triple The target triple string.
+ MCAsmBackend *createMCAsmBackend(StringRef Triple, StringRef CPU) const {
if (!MCAsmBackendCtorFn)
return 0;
- return MCAsmBackendCtorFn(*this, Triple);
+ return MCAsmBackendCtorFn(*this, Triple, CPU);
}
/// createMCAsmLexer - Create a target specific assembly lexer.
@@ -370,7 +371,7 @@
/// createMCAsmParser - Create a target specific assembly parser.
///
- /// \arg Parser - The target independent parser implementation to use for
+ /// \param Parser The target independent parser implementation to use for
/// parsing and lexing.
MCTargetAsmParser *createMCAsmParser(MCSubtargetInfo &STI,
MCAsmParser &Parser) const {
@@ -416,13 +417,13 @@
/// createMCObjectStreamer - Create a target specific MCStreamer.
///
- /// \arg TT - The target triple.
- /// \arg Ctx - The target context.
- /// \arg TAB - The target assembler backend object. Takes ownership.
- /// \arg _OS - The stream object.
- /// \arg _Emitter - The target independent assembler object.Takes ownership.
- /// \arg RelaxAll - Relax all fixups?
- /// \arg NoExecStack - Mark file as not needing a executable stack.
+ /// \param TT The target triple.
+ /// \param Ctx The target context.
+ /// \param TAB The target assembler backend object. Takes ownership.
+ /// \param _OS The stream object.
+ /// \param _Emitter The target independent assembler object.Takes ownership.
+ /// \param RelaxAll Relax all fixups?
+ /// \param NoExecStack Mark file as not needing a executable stack.
MCStreamer *createMCObjectStreamer(StringRef TT, MCContext &Ctx,
MCAsmBackend &TAB,
raw_ostream &_OS,
@@ -1063,8 +1064,9 @@
}
private:
- static MCAsmBackend *Allocator(const Target &T, StringRef Triple) {
- return new MCAsmBackendImpl(T, Triple);
+ static MCAsmBackend *Allocator(const Target &T, StringRef Triple,
+ StringRef CPU) {
+ return new MCAsmBackendImpl(T, Triple, CPU);
}
};
Modified: llvm/branches/AMDILBackend/include/llvm/Support/Threading.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/Threading.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/Threading.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/Threading.h Tue Jan 15 11:16:16 2013
@@ -41,8 +41,8 @@
/// before llvm_start_multithreaded().
void llvm_release_global_lock();
- /// llvm_execute_on_thread - Execute the given \arg UserFn on a separate
- /// thread, passing it the provided \arg UserData.
+ /// llvm_execute_on_thread - Execute the given \p UserFn on a separate
+ /// thread, passing it the provided \p UserData.
///
/// This function does not guarantee that the code will actually be executed
/// on a separate thread or honoring the requested stack size, but tries to do
Modified: llvm/branches/AMDILBackend/include/llvm/Support/TimeValue.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/TimeValue.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/TimeValue.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/TimeValue.h Tue Jan 15 11:16:16 2013
@@ -153,7 +153,6 @@
/// Determine if \p this is greater than or equal to \p that.
/// @returns True iff *this >= that.
- /// @brief True if this >= that.
int operator >= (const TimeValue &that) const {
if ( this->seconds_ > that.seconds_ ) {
return 1;
@@ -164,8 +163,7 @@
}
/// Determines if two TimeValue objects represent the same moment in time.
- /// @brief True iff *this == that.
- /// @brief True if this == that.
+ /// @returns True iff *this == that.
int operator == (const TimeValue &that) const {
return (this->seconds_ == that.seconds_) &&
(this->nanos_ == that.nanos_);
@@ -173,8 +171,7 @@
/// Determines if two TimeValue objects represent times that are not the
/// same.
- /// @return True iff *this != that.
- /// @brief True if this != that.
+ /// @returns True iff *this != that.
int operator != (const TimeValue &that) const { return !(*this == that); }
/// Adds two TimeValue objects together.
Modified: llvm/branches/AMDILBackend/include/llvm/Support/Timer.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/Timer.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/Timer.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/Timer.h Tue Jan 15 11:16:16 2013
@@ -15,6 +15,7 @@
#ifndef LLVM_SUPPORT_TIMER_H
#define LLVM_SUPPORT_TIMER_H
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/ADT/StringRef.h"
#include <cassert>
@@ -130,7 +131,7 @@
///
class TimeRegion {
Timer *T;
- TimeRegion(const TimeRegion &); // DO NOT IMPLEMENT
+ TimeRegion(const TimeRegion &) LLVM_DELETED_FUNCTION;
public:
explicit TimeRegion(Timer &t) : T(&t) {
T->startTimer();
@@ -168,8 +169,8 @@
std::vector<std::pair<TimeRecord, std::string> > TimersToPrint;
TimerGroup **Prev, *Next; // Doubly linked list of TimerGroup's.
- TimerGroup(const TimerGroup &TG); // DO NOT IMPLEMENT
- void operator=(const TimerGroup &TG); // DO NOT IMPLEMENT
+ TimerGroup(const TimerGroup &TG) LLVM_DELETED_FUNCTION;
+ void operator=(const TimerGroup &TG) LLVM_DELETED_FUNCTION;
public:
explicit TimerGroup(StringRef name);
~TimerGroup();
Modified: llvm/branches/AMDILBackend/include/llvm/Support/ValueHandle.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/ValueHandle.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/ValueHandle.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/ValueHandle.h Tue Jan 15 11:16:16 2013
@@ -59,8 +59,8 @@
// pair. The 'setValPtrInt' and 'getValPtrInt' methods below give them this
// access.
PointerIntPair<Value*, 2> VP;
-
- explicit ValueHandleBase(const ValueHandleBase&); // DO NOT IMPLEMENT.
+
+ ValueHandleBase(const ValueHandleBase&) LLVM_DELETED_FUNCTION;
public:
explicit ValueHandleBase(HandleBaseKind Kind)
: PrevPair(0, Kind), Next(0), VP(0, 0) {}
@@ -110,11 +110,12 @@
V != DenseMapInfo<Value *>::getTombstoneKey();
}
-private:
+public:
// Callbacks made from Value.
static void ValueIsDeleted(Value *V);
static void ValueIsRAUWd(Value *Old, Value *New);
+private:
// Internal implementation details.
ValueHandleBase **getPrevPtr() const { return PrevPair.getPointer(); }
HandleBaseKind getKind() const { return PrevPair.getInt(); }
Modified: llvm/branches/AMDILBackend/include/llvm/Support/YAMLParser.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/YAMLParser.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/YAMLParser.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/YAMLParser.h Tue Jan 15 11:16:16 2013
@@ -133,7 +133,6 @@
virtual void skip() {}
unsigned int getType() const { return TypeID; }
- static inline bool classof(const Node *) { return true; }
void *operator new ( size_t Size
, BumpPtrAllocator &Alloc
@@ -166,7 +165,6 @@
public:
NullNode(OwningPtr<Document> &D) : Node(NK_Null, D, StringRef()) {}
- static inline bool classof(const NullNode *) { return true; }
static inline bool classof(const Node *N) {
return N->getType() == NK_Null;
}
@@ -199,7 +197,6 @@
/// This happens with escaped characters and multi-line literals.
StringRef getValue(SmallVectorImpl<char> &Storage) const;
- static inline bool classof(const ScalarNode *) { return true; }
static inline bool classof(const Node *N) {
return N->getType() == NK_Scalar;
}
@@ -241,12 +238,11 @@
/// @returns The value, or nullptr if failed() == true.
Node *getValue();
- virtual void skip() {
+ virtual void skip() LLVM_OVERRIDE {
getKey()->skip();
getValue()->skip();
}
- static inline bool classof(const KeyValueNode *) { return true; }
static inline bool classof(const Node *N) {
return N->getType() == NK_KeyValue;
}
@@ -358,11 +354,10 @@
iterator end() { return iterator(); }
- virtual void skip() {
+ virtual void skip() LLVM_OVERRIDE {
yaml::skip(*this);
}
- static inline bool classof(const MappingNode *) { return true; }
static inline bool classof(const Node *N) {
return N->getType() == NK_Mapping;
}
@@ -421,11 +416,10 @@
iterator end() { return iterator(); }
- virtual void skip() {
+ virtual void skip() LLVM_OVERRIDE {
yaml::skip(*this);
}
- static inline bool classof(const SequenceNode *) { return true; }
static inline bool classof(const Node *N) {
return N->getType() == NK_Sequence;
}
@@ -450,7 +444,6 @@
StringRef getName() const { return Name; }
Node *getTarget();
- static inline bool classof(const ScalarNode *) { return true; }
static inline bool classof(const Node *N) {
return N->getType() == NK_Alias;
}
Modified: llvm/branches/AMDILBackend/include/llvm/Support/circular_raw_ostream.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/circular_raw_ostream.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/circular_raw_ostream.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/circular_raw_ostream.h Tue Jan 15 11:16:16 2013
@@ -81,12 +81,12 @@
Filled = false;
}
- virtual void write_impl(const char *Ptr, size_t Size);
+ virtual void write_impl(const char *Ptr, size_t Size) LLVM_OVERRIDE;
/// current_pos - Return the current position within the stream,
/// not counting the bytes currently in the buffer.
///
- virtual uint64_t current_pos() const {
+ virtual uint64_t current_pos() const LLVM_OVERRIDE {
// This has the same effect as calling TheStream.current_pos(),
// but that interface is private.
return TheStream->tell() - TheStream->GetNumBytesInBuffer();
Modified: llvm/branches/AMDILBackend/include/llvm/Support/raw_os_ostream.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/raw_os_ostream.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/raw_os_ostream.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/raw_os_ostream.h Tue Jan 15 11:16:16 2013
@@ -24,14 +24,14 @@
/// use the underlying stream to detect errors.
class raw_os_ostream : public raw_ostream {
std::ostream &OS;
-
+
/// write_impl - See raw_ostream::write_impl.
- virtual void write_impl(const char *Ptr, size_t Size);
-
+ virtual void write_impl(const char *Ptr, size_t Size) LLVM_OVERRIDE;
+
/// current_pos - Return the current position within the stream, not
/// counting the bytes currently in the buffer.
- virtual uint64_t current_pos() const;
-
+ virtual uint64_t current_pos() const LLVM_OVERRIDE;
+
public:
raw_os_ostream(std::ostream &O) : OS(O) {}
~raw_os_ostream();
Modified: llvm/branches/AMDILBackend/include/llvm/Support/raw_ostream.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/raw_ostream.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/raw_ostream.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/raw_ostream.h Tue Jan 15 11:16:16 2013
@@ -15,6 +15,7 @@
#define LLVM_SUPPORT_RAW_OSTREAM_H
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataTypes.h"
namespace llvm {
@@ -29,8 +30,8 @@
class raw_ostream {
private:
// Do not implement. raw_ostream is noncopyable.
- void operator=(const raw_ostream &);
- raw_ostream(const raw_ostream &);
+ void operator=(const raw_ostream &) LLVM_DELETED_FUNCTION;
+ raw_ostream(const raw_ostream &) LLVM_DELETED_FUNCTION;
/// The buffer is handled in such a way that the buffer is
/// uninitialized, unbuffered, or out of space when OutBufCur >=
@@ -191,10 +192,10 @@
raw_ostream &operator<<(double N);
- /// write_hex - Output \arg N in hexadecimal, without any prefix or padding.
+ /// write_hex - Output \p N in hexadecimal, without any prefix or padding.
raw_ostream &write_hex(unsigned long long N);
- /// write_escaped - Output \arg Str, turning '\\', '\t', '\n', '"', and
+ /// write_escaped - Output \p Str, turning '\\', '\t', '\n', '"', and
/// anything that doesn't satisfy std::isprint into an escape sequence.
raw_ostream &write_escaped(StringRef Str, bool UseHexEscapes = false);
@@ -210,13 +211,19 @@
/// Changes the foreground color of text that will be output from this point
/// forward.
- /// @param colors ANSI color to use, the special SAVEDCOLOR can be used to
+ /// @param Color ANSI color to use, the special SAVEDCOLOR can be used to
/// change only the bold attribute, and keep colors untouched
- /// @param bold bold/brighter text, default false
- /// @param bg if true change the background, default: change foreground
+ /// @param Bold bold/brighter text, default false
+ /// @param BG if true change the background, default: change foreground
/// @returns itself so it can be used within << invocations
- virtual raw_ostream &changeColor(enum Colors, bool = false, bool = false) {
- return *this; }
+ virtual raw_ostream &changeColor(enum Colors Color,
+ bool Bold = false,
+ bool BG = false) {
+ (void)Color;
+ (void)Bold;
+ (void)BG;
+ return *this;
+ }
/// Resets the colors to terminal defaults. Call this when you are done
/// outputting colored text, or before program exit.
@@ -239,15 +246,16 @@
private:
/// write_impl - The is the piece of the class that is implemented
- /// by subclasses. This writes the \args Size bytes starting at
- /// \arg Ptr to the underlying stream.
+ /// by subclasses. This writes the \p Size bytes starting at
+ /// \p Ptr to the underlying stream.
///
/// This function is guaranteed to only be called at a point at which it is
/// safe for the subclass to install a new buffer via SetBuffer.
///
- /// \arg Ptr - The start of the data to be written. For buffered streams this
+ /// \param Ptr The start of the data to be written. For buffered streams this
/// is guaranteed to be the start of the buffer.
- /// \arg Size - The number of bytes to be written.
+ ///
+ /// \param Size The number of bytes to be written.
///
/// \invariant { Size > 0 }
virtual void write_impl(const char *Ptr, size_t Size) = 0;
@@ -314,14 +322,14 @@
uint64_t pos;
/// write_impl - See raw_ostream::write_impl.
- virtual void write_impl(const char *Ptr, size_t Size);
+ virtual void write_impl(const char *Ptr, size_t Size) LLVM_OVERRIDE;
/// current_pos - Return the current position within the stream, not
/// counting the bytes currently in the buffer.
- virtual uint64_t current_pos() const { return pos; }
+ virtual uint64_t current_pos() const LLVM_OVERRIDE { return pos; }
/// preferred_buffer_size - Determine an efficient buffer size.
- virtual size_t preferred_buffer_size() const;
+ virtual size_t preferred_buffer_size() const LLVM_OVERRIDE;
/// error_detected - Set the flag indicating that an output error has
/// been encountered.
@@ -382,14 +390,14 @@
}
virtual raw_ostream &changeColor(enum Colors colors, bool bold=false,
- bool bg=false);
- virtual raw_ostream &resetColor();
+ bool bg=false) LLVM_OVERRIDE;
+ virtual raw_ostream &resetColor() LLVM_OVERRIDE;
- virtual raw_ostream &reverseColor();
+ virtual raw_ostream &reverseColor() LLVM_OVERRIDE;
- virtual bool is_displayed() const;
+ virtual bool is_displayed() const LLVM_OVERRIDE;
- virtual bool has_colors() const;
+ virtual bool has_colors() const LLVM_OVERRIDE;
/// has_error - Return the value of the flag in this raw_fd_ostream indicating
/// whether an output error has been encountered.
@@ -435,11 +443,11 @@
std::string &OS;
/// write_impl - See raw_ostream::write_impl.
- virtual void write_impl(const char *Ptr, size_t Size);
+ virtual void write_impl(const char *Ptr, size_t Size) LLVM_OVERRIDE;
/// current_pos - Return the current position within the stream, not
/// counting the bytes currently in the buffer.
- virtual uint64_t current_pos() const { return OS.size(); }
+ virtual uint64_t current_pos() const LLVM_OVERRIDE { return OS.size(); }
public:
explicit raw_string_ostream(std::string &O) : OS(O) {}
~raw_string_ostream();
@@ -459,15 +467,15 @@
SmallVectorImpl<char> &OS;
/// write_impl - See raw_ostream::write_impl.
- virtual void write_impl(const char *Ptr, size_t Size);
+ virtual void write_impl(const char *Ptr, size_t Size) LLVM_OVERRIDE;
/// current_pos - Return the current position within the stream, not
/// counting the bytes currently in the buffer.
- virtual uint64_t current_pos() const;
+ virtual uint64_t current_pos() const LLVM_OVERRIDE;
public:
/// Construct a new raw_svector_ostream.
///
- /// \arg O - The vector to write to; this should generally have at least 128
+ /// \param O The vector to write to; this should generally have at least 128
/// bytes free to avoid any extraneous memory overhead.
explicit raw_svector_ostream(SmallVectorImpl<char> &O);
~raw_svector_ostream();
@@ -485,11 +493,11 @@
/// raw_null_ostream - A raw_ostream that discards all output.
class raw_null_ostream : public raw_ostream {
/// write_impl - See raw_ostream::write_impl.
- virtual void write_impl(const char *Ptr, size_t size);
+ virtual void write_impl(const char *Ptr, size_t size) LLVM_OVERRIDE;
/// current_pos - Return the current position within the stream, not
/// counting the bytes currently in the buffer.
- virtual uint64_t current_pos() const;
+ virtual uint64_t current_pos() const LLVM_OVERRIDE;
public:
explicit raw_null_ostream() {}
Modified: llvm/branches/AMDILBackend/include/llvm/Support/system_error.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/system_error.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/system_error.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/system_error.h Tue Jan 15 11:16:16 2013
@@ -17,6 +17,8 @@
#ifndef LLVM_SYSTEM_SYSTEM_ERROR_H
#define LLVM_SYSTEM_SYSTEM_ERROR_H
+#include "llvm/Support/Compiler.h"
+
/*
system_error synopsis
@@ -629,8 +631,8 @@
private:
error_category();
- error_category(const error_category&);// = delete;
- error_category& operator=(const error_category&);// = delete;
+ error_category(const error_category&) LLVM_DELETED_FUNCTION;
+ error_category& operator=(const error_category&) LLVM_DELETED_FUNCTION;
public:
virtual const char* name() const = 0;
@@ -651,7 +653,7 @@
class _do_message : public error_category
{
public:
- virtual std::string message(int ev) const;
+ virtual std::string message(int ev) const LLVM_OVERRIDE;
};
const error_category& generic_category();
Modified: llvm/branches/AMDILBackend/include/llvm/Support/type_traits.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Support/type_traits.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Support/type_traits.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Support/type_traits.h Tue Jan 15 11:16:16 2013
@@ -54,8 +54,9 @@
// is_class<> metafunction due to Paul Mensonides (leavings at attbi.com). For
// more details:
// http://groups.google.com/groups?hl=en&selm=000001c1cc83%24e154d5e0%247772e50c%40c161550a&rnum=1
- public:
- enum { value = sizeof(char) == sizeof(dont_use::is_class_helper<T>(0)) };
+public:
+ static const bool value =
+ sizeof(char) == sizeof(dont_use::is_class_helper<T>(0));
};
@@ -162,12 +163,11 @@
static UnderlyingT &nonce_instance;
public:
- enum {
+ static const bool
value = (!is_class<UnderlyingT>::value && !is_pointer<UnderlyingT>::value &&
!is_same<UnderlyingT, float>::value &&
!is_same<UnderlyingT, double>::value &&
- sizeof(char) != sizeof(check_int_convertible(nonce_instance)))
- };
+ sizeof(char) != sizeof(check_int_convertible(nonce_instance)));
};
// enable_if_c - Enable/disable a template based on a metafunction
Modified: llvm/branches/AMDILBackend/include/llvm/SymbolTableListTraits.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/SymbolTableListTraits.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/SymbolTableListTraits.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/SymbolTableListTraits.h Tue Jan 15 11:16:16 2013
@@ -46,7 +46,6 @@
/// getListOwner - Return the object that owns this list. If this is a list
/// of instructions, it returns the BasicBlock that owns them.
ItemParentClass *getListOwner() {
- typedef iplist<ValueSubClass> ItemParentClass::*Sublist;
size_t Offset(size_t(&((ItemParentClass*)0->*ItemParentClass::
getSublistAccess(static_cast<ValueSubClass*>(0)))));
iplist<ValueSubClass>* Anchor(static_cast<iplist<ValueSubClass>*>(this));
Modified: llvm/branches/AMDILBackend/include/llvm/TableGen/Error.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/TableGen/Error.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/TableGen/Error.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/TableGen/Error.h Tue Jan 15 11:16:16 2013
@@ -19,26 +19,17 @@
namespace llvm {
-class TGError {
- SMLoc Loc;
- std::string Message;
-public:
- TGError(SMLoc loc, const std::string &message) : Loc(loc), Message(message) {}
-
- SMLoc getLoc() const { return Loc; }
- const std::string &getMessage() const { return Message; }
-};
-
-void PrintWarning(SMLoc WarningLoc, const Twine &Msg);
+void PrintWarning(ArrayRef<SMLoc> WarningLoc, const Twine &Msg);
void PrintWarning(const char *Loc, const Twine &Msg);
void PrintWarning(const Twine &Msg);
-void PrintWarning(const TGError &Warning);
-void PrintError(SMLoc ErrorLoc, const Twine &Msg);
+void PrintError(ArrayRef<SMLoc> ErrorLoc, const Twine &Msg);
void PrintError(const char *Loc, const Twine &Msg);
void PrintError(const Twine &Msg);
-void PrintError(const TGError &Error);
+LLVM_ATTRIBUTE_NORETURN void PrintFatalError(const std::string &Msg);
+LLVM_ATTRIBUTE_NORETURN void PrintFatalError(ArrayRef<SMLoc> ErrorLoc,
+ const std::string &Msg);
extern SourceMgr SrcMgr;
Modified: llvm/branches/AMDILBackend/include/llvm/TableGen/Main.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/TableGen/Main.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/TableGen/Main.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/TableGen/Main.h Tue Jan 15 11:16:16 2013
@@ -16,10 +16,13 @@
namespace llvm {
-class TableGenAction;
+class RecordKeeper;
+class raw_ostream;
+/// \brief Perform the action using Records, and write output to OS.
+/// \returns true on error, false otherwise
+typedef bool TableGenMainFn(raw_ostream &OS, RecordKeeper &Records);
-/// Run the table generator, performing the specified Action on parsed records.
-int TableGenMain(char *argv0, TableGenAction &Action);
+int TableGenMain(char *argv0, TableGenMainFn *MainFn);
}
Modified: llvm/branches/AMDILBackend/include/llvm/TableGen/Record.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/TableGen/Record.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/TableGen/Record.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/TableGen/Record.h Tue Jan 15 11:16:16 2013
@@ -18,6 +18,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/ErrorHandling.h"
@@ -66,10 +67,27 @@
//===----------------------------------------------------------------------===//
class RecTy {
+public:
+ /// \brief Subclass discriminator (for dyn_cast<> et al.)
+ enum RecTyKind {
+ BitRecTyKind,
+ BitsRecTyKind,
+ IntRecTyKind,
+ StringRecTyKind,
+ ListRecTyKind,
+ DagRecTyKind,
+ RecordRecTyKind
+ };
+
+private:
+ RecTyKind Kind;
ListRecTy *ListTy;
virtual void anchor();
+
public:
- RecTy() : ListTy(0) {}
+ RecTyKind getRecTyKind() const { return Kind; }
+
+ RecTy(RecTyKind K) : Kind(K), ListTy(0) {}
virtual ~RecTy() {}
virtual std::string getAsString() const = 0;
@@ -132,8 +150,12 @@
///
class BitRecTy : public RecTy {
static BitRecTy Shared;
- BitRecTy() {}
+ BitRecTy() : RecTy(BitRecTyKind) {}
public:
+ static bool classof(const RecTy *RT) {
+ return RT->getRecTyKind() == BitRecTyKind;
+ }
+
static BitRecTy *get() { return &Shared; }
virtual Init *convertValue( UnsetInit *UI) { return (Init*)UI; }
@@ -152,9 +174,9 @@
virtual Init *convertValue( VarInit *VI) { return RecTy::convertValue(VI);}
virtual Init *convertValue( FieldInit *FI) { return RecTy::convertValue(FI);}
- std::string getAsString() const { return "bit"; }
+ virtual std::string getAsString() const { return "bit"; }
- bool typeIsConvertibleTo(const RecTy *RHS) const {
+ virtual bool typeIsConvertibleTo(const RecTy *RHS) const {
return RHS->baseClassOf(this);
}
virtual bool baseClassOf(const BitRecTy *RHS) const { return true; }
@@ -173,8 +195,12 @@
///
class BitsRecTy : public RecTy {
unsigned Size;
- explicit BitsRecTy(unsigned Sz) : Size(Sz) {}
+ explicit BitsRecTy(unsigned Sz) : RecTy(BitsRecTyKind), Size(Sz) {}
public:
+ static bool classof(const RecTy *RT) {
+ return RT->getRecTyKind() == BitsRecTyKind;
+ }
+
static BitsRecTy *get(unsigned Sz);
unsigned getNumBits() const { return Size; }
@@ -195,9 +221,9 @@
virtual Init *convertValue( VarInit *VI) { return RecTy::convertValue(VI);}
virtual Init *convertValue( FieldInit *FI) { return RecTy::convertValue(FI);}
- std::string getAsString() const;
+ virtual std::string getAsString() const;
- bool typeIsConvertibleTo(const RecTy *RHS) const {
+ virtual bool typeIsConvertibleTo(const RecTy *RHS) const {
return RHS->baseClassOf(this);
}
virtual bool baseClassOf(const BitRecTy *RHS) const { return Size == 1; }
@@ -217,8 +243,12 @@
///
class IntRecTy : public RecTy {
static IntRecTy Shared;
- IntRecTy() {}
+ IntRecTy() : RecTy(IntRecTyKind) {}
public:
+ static bool classof(const RecTy *RT) {
+ return RT->getRecTyKind() == IntRecTyKind;
+ }
+
static IntRecTy *get() { return &Shared; }
virtual Init *convertValue( UnsetInit *UI) { return (Init*)UI; }
@@ -237,9 +267,9 @@
virtual Init *convertValue( VarInit *VI) { return RecTy::convertValue(VI);}
virtual Init *convertValue( FieldInit *FI) { return RecTy::convertValue(FI);}
- std::string getAsString() const { return "int"; }
+ virtual std::string getAsString() const { return "int"; }
- bool typeIsConvertibleTo(const RecTy *RHS) const {
+ virtual bool typeIsConvertibleTo(const RecTy *RHS) const {
return RHS->baseClassOf(this);
}
@@ -257,8 +287,12 @@
///
class StringRecTy : public RecTy {
static StringRecTy Shared;
- StringRecTy() {}
+ StringRecTy() : RecTy(StringRecTyKind) {}
public:
+ static bool classof(const RecTy *RT) {
+ return RT->getRecTyKind() == StringRecTyKind;
+ }
+
static StringRecTy *get() { return &Shared; }
virtual Init *convertValue( UnsetInit *UI) { return (Init*)UI; }
@@ -278,9 +312,9 @@
virtual Init *convertValue( VarInit *VI) { return RecTy::convertValue(VI);}
virtual Init *convertValue( FieldInit *FI) { return RecTy::convertValue(FI);}
- std::string getAsString() const { return "string"; }
+ virtual std::string getAsString() const { return "string"; }
- bool typeIsConvertibleTo(const RecTy *RHS) const {
+ virtual bool typeIsConvertibleTo(const RecTy *RHS) const {
return RHS->baseClassOf(this);
}
@@ -300,9 +334,13 @@
///
class ListRecTy : public RecTy {
RecTy *Ty;
- explicit ListRecTy(RecTy *T) : Ty(T) {}
+ explicit ListRecTy(RecTy *T) : RecTy(ListRecTyKind), Ty(T) {}
friend ListRecTy *RecTy::getListTy();
public:
+ static bool classof(const RecTy *RT) {
+ return RT->getRecTyKind() == ListRecTyKind;
+ }
+
static ListRecTy *get(RecTy *T) { return T->getListTy(); }
RecTy *getElementType() const { return Ty; }
@@ -322,9 +360,9 @@
virtual Init *convertValue( VarInit *VI) { return RecTy::convertValue(VI);}
virtual Init *convertValue( FieldInit *FI) { return RecTy::convertValue(FI);}
- std::string getAsString() const;
+ virtual std::string getAsString() const;
- bool typeIsConvertibleTo(const RecTy *RHS) const {
+ virtual bool typeIsConvertibleTo(const RecTy *RHS) const {
return RHS->baseClassOf(this);
}
@@ -343,8 +381,12 @@
///
class DagRecTy : public RecTy {
static DagRecTy Shared;
- DagRecTy() {}
+ DagRecTy() : RecTy(DagRecTyKind) {}
public:
+ static bool classof(const RecTy *RT) {
+ return RT->getRecTyKind() == DagRecTyKind;
+ }
+
static DagRecTy *get() { return &Shared; }
virtual Init *convertValue( UnsetInit *UI) { return (Init*)UI; }
@@ -363,9 +405,9 @@
virtual Init *convertValue( VarInit *VI) { return RecTy::convertValue(VI);}
virtual Init *convertValue( FieldInit *FI) { return RecTy::convertValue(FI);}
- std::string getAsString() const { return "dag"; }
+ virtual std::string getAsString() const { return "dag"; }
- bool typeIsConvertibleTo(const RecTy *RHS) const {
+ virtual bool typeIsConvertibleTo(const RecTy *RHS) const {
return RHS->baseClassOf(this);
}
@@ -384,9 +426,13 @@
///
class RecordRecTy : public RecTy {
Record *Rec;
- explicit RecordRecTy(Record *R) : Rec(R) {}
+ explicit RecordRecTy(Record *R) : RecTy(RecordRecTyKind), Rec(R) {}
friend class Record;
public:
+ static bool classof(const RecTy *RT) {
+ return RT->getRecTyKind() == RecordRecTyKind;
+ }
+
static RecordRecTy *get(Record *R);
Record *getRecord() const { return Rec; }
@@ -407,9 +453,9 @@
virtual Init *convertValue( VarInit *VI) { return RecTy::convertValue(VI);}
virtual Init *convertValue( FieldInit *FI) { return RecTy::convertValue(FI);}
- std::string getAsString() const;
+ virtual std::string getAsString() const;
- bool typeIsConvertibleTo(const RecTy *RHS) const {
+ virtual bool typeIsConvertibleTo(const RecTy *RHS) const {
return RHS->baseClassOf(this);
}
virtual bool baseClassOf(const BitRecTy *RHS) const { return false; }
@@ -431,12 +477,53 @@
//===----------------------------------------------------------------------===//
class Init {
- Init(const Init &); // Do not define.
- Init &operator=(const Init &); // Do not define.
+protected:
+ /// \brief Discriminator enum (for isa<>, dyn_cast<>, et al.)
+ ///
+ /// This enum is laid out by a preorder traversal of the inheritance
+ /// hierarchy, and does not contain an entry for abstract classes, as per
+ /// the recommendation in docs/HowToSetUpLLVMStyleRTTI.rst.
+ ///
+ /// We also explicitly include "first" and "last" values for each
+ /// interior node of the inheritance tree, to make it easier to read the
+ /// corresponding classof().
+ ///
+ /// We could pack these a bit tighter by not having the IK_FirstXXXInit
+ /// and IK_LastXXXInit be their own values, but that would degrade
+ /// readability for really no benefit.
+ enum InitKind {
+ IK_BitInit,
+ IK_BitsInit,
+ IK_FirstTypedInit,
+ IK_DagInit,
+ IK_DefInit,
+ IK_FieldInit,
+ IK_IntInit,
+ IK_ListInit,
+ IK_FirstOpInit,
+ IK_BinOpInit,
+ IK_TernOpInit,
+ IK_UnOpInit,
+ IK_LastOpInit,
+ IK_StringInit,
+ IK_VarInit,
+ IK_VarListElementInit,
+ IK_LastTypedInit,
+ IK_UnsetInit,
+ IK_VarBitInit
+ };
+
+private:
+ const InitKind Kind;
+ Init(const Init &) LLVM_DELETED_FUNCTION;
+ Init &operator=(const Init &) LLVM_DELETED_FUNCTION;
virtual void anchor();
+public:
+ InitKind getKind() const { return Kind; }
+
protected:
- Init(void) {}
+ explicit Init(InitKind K) : Kind(K) {}
public:
virtual ~Init() {}
@@ -509,6 +596,18 @@
virtual Init *resolveReferences(Record &R, const RecordVal *RV) const {
return const_cast<Init *>(this);
}
+
+ /// getBit - This method is used to return the initializer for the specified
+ /// bit.
+ virtual Init *getBit(unsigned Bit) const = 0;
+
+ /// getBitVar - This method is used to retrieve the initializer for bit
+ /// reference. For non-VarBitInit, it simply returns itself.
+ virtual Init *getBitVar() const { return const_cast<Init*>(this); }
+
+ /// getBitNum - This method is used to retrieve the bit number of a bit
+ /// reference. For non-VarBitInit, it simply returns 0.
+ virtual unsigned getBitNum() const { return 0; }
};
inline raw_ostream &operator<<(raw_ostream &OS, const Init &I) {
@@ -521,13 +620,17 @@
class TypedInit : public Init {
RecTy *Ty;
- TypedInit(const TypedInit &Other); // Do not define.
- TypedInit &operator=(const TypedInit &Other); // Do not define.
+ TypedInit(const TypedInit &Other) LLVM_DELETED_FUNCTION;
+ TypedInit &operator=(const TypedInit &Other) LLVM_DELETED_FUNCTION;
protected:
- explicit TypedInit(RecTy *T) : Ty(T) {}
+ explicit TypedInit(InitKind K, RecTy *T) : Init(K), Ty(T) {}
public:
+ static bool classof(const Init *I) {
+ return I->getKind() >= IK_FirstTypedInit &&
+ I->getKind() <= IK_LastTypedInit;
+ }
RecTy *getType() const { return Ty; }
virtual Init *
@@ -541,13 +644,6 @@
///
virtual RecTy *getFieldType(const std::string &FieldName) const;
- /// resolveBitReference - This method is used to implement
- /// VarBitInit::resolveReferences. If the bit is able to be resolved, we
- /// simply return the resolved value, otherwise we return null.
- ///
- virtual Init *resolveBitReference(Record &R, const RecordVal *RV,
- unsigned Bit) const = 0;
-
/// resolveListElementReference - This method is used to implement
/// VarListElementInit::resolveReferences. If the list element is resolvable
/// now, we return the resolved value, otherwise we return null.
@@ -559,18 +655,25 @@
/// UnsetInit - ? - Represents an uninitialized value
///
class UnsetInit : public Init {
- UnsetInit() : Init() {}
- UnsetInit(const UnsetInit &); // Do not define.
- UnsetInit &operator=(const UnsetInit &Other); // Do not define.
+ UnsetInit() : Init(IK_UnsetInit) {}
+ UnsetInit(const UnsetInit &) LLVM_DELETED_FUNCTION;
+ UnsetInit &operator=(const UnsetInit &Other) LLVM_DELETED_FUNCTION;
virtual void anchor();
public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_UnsetInit;
+ }
static UnsetInit *get();
virtual Init *convertInitializerTo(RecTy *Ty) const {
return Ty->convertValue(const_cast<UnsetInit *>(this));
}
+ virtual Init *getBit(unsigned Bit) const {
+ return const_cast<UnsetInit*>(this);
+ }
+
virtual bool isComplete() const { return false; }
virtual std::string getAsString() const { return "?"; }
};
@@ -581,12 +684,15 @@
class BitInit : public Init {
bool Value;
- explicit BitInit(bool V) : Value(V) {}
- BitInit(const BitInit &Other); // Do not define.
- BitInit &operator=(BitInit &Other); // Do not define.
+ explicit BitInit(bool V) : Init(IK_BitInit), Value(V) {}
+ BitInit(const BitInit &Other) LLVM_DELETED_FUNCTION;
+ BitInit &operator=(BitInit &Other) LLVM_DELETED_FUNCTION;
virtual void anchor();
public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_BitInit;
+ }
static BitInit *get(bool V);
bool getValue() const { return Value; }
@@ -595,6 +701,11 @@
return Ty->convertValue(const_cast<BitInit *>(this));
}
+ virtual Init *getBit(unsigned Bit) const {
+ assert(Bit < 1 && "Bit index out of range!");
+ return const_cast<BitInit*>(this);
+ }
+
virtual std::string getAsString() const { return Value ? "1" : "0"; }
};
@@ -604,23 +715,22 @@
class BitsInit : public Init, public FoldingSetNode {
std::vector<Init*> Bits;
- BitsInit(ArrayRef<Init *> Range) : Bits(Range.begin(), Range.end()) {}
+ BitsInit(ArrayRef<Init *> Range)
+ : Init(IK_BitsInit), Bits(Range.begin(), Range.end()) {}
- BitsInit(const BitsInit &Other); // Do not define.
- BitsInit &operator=(const BitsInit &Other); // Do not define.
+ BitsInit(const BitsInit &Other) LLVM_DELETED_FUNCTION;
+ BitsInit &operator=(const BitsInit &Other) LLVM_DELETED_FUNCTION;
public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_BitsInit;
+ }
static BitsInit *get(ArrayRef<Init *> Range);
void Profile(FoldingSetNodeID &ID) const;
unsigned getNumBits() const { return Bits.size(); }
- Init *getBit(unsigned Bit) const {
- assert(Bit < Bits.size() && "Bit index out of range!");
- return Bits[Bit];
- }
-
virtual Init *convertInitializerTo(RecTy *Ty) const {
return Ty->convertValue(const_cast<BitsInit *>(this));
}
@@ -640,6 +750,11 @@
virtual std::string getAsString() const;
virtual Init *resolveReferences(Record &R, const RecordVal *RV) const;
+
+ virtual Init *getBit(unsigned Bit) const {
+ assert(Bit < Bits.size() && "Bit index out of range!");
+ return Bits[Bit];
+ }
};
@@ -648,12 +763,16 @@
class IntInit : public TypedInit {
int64_t Value;
- explicit IntInit(int64_t V) : TypedInit(IntRecTy::get()), Value(V) {}
+ explicit IntInit(int64_t V)
+ : TypedInit(IK_IntInit, IntRecTy::get()), Value(V) {}
- IntInit(const IntInit &Other); // Do not define.
- IntInit &operator=(const IntInit &Other); // Do note define.
+ IntInit(const IntInit &Other) LLVM_DELETED_FUNCTION;
+ IntInit &operator=(const IntInit &Other) LLVM_DELETED_FUNCTION;
public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_IntInit;
+ }
static IntInit *get(int64_t V);
int64_t getValue() const { return Value; }
@@ -666,15 +785,6 @@
virtual std::string getAsString() const;
- /// resolveBitReference - This method is used to implement
- /// VarBitInit::resolveReferences. If the bit is able to be resolved, we
- /// simply return the resolved value, otherwise we return null.
- ///
- virtual Init *resolveBitReference(Record &R, const RecordVal *RV,
- unsigned Bit) const {
- llvm_unreachable("Illegal bit reference off int");
- }
-
/// resolveListElementReference - This method is used to implement
/// VarListElementInit::resolveReferences. If the list element is resolvable
/// now, we return the resolved value, otherwise we return null.
@@ -682,6 +792,10 @@
unsigned Elt) const {
llvm_unreachable("Illegal element reference off int");
}
+
+ virtual Init *getBit(unsigned Bit) const {
+ return BitInit::get((Value & (1ULL << Bit)) != 0);
+ }
};
@@ -691,13 +805,16 @@
std::string Value;
explicit StringInit(const std::string &V)
- : TypedInit(StringRecTy::get()), Value(V) {}
+ : TypedInit(IK_StringInit, StringRecTy::get()), Value(V) {}
- StringInit(const StringInit &Other); // Do not define.
- StringInit &operator=(const StringInit &Other); // Do not define.
+ StringInit(const StringInit &Other) LLVM_DELETED_FUNCTION;
+ StringInit &operator=(const StringInit &Other) LLVM_DELETED_FUNCTION;
virtual void anchor();
public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_StringInit;
+ }
static StringInit *get(StringRef);
const std::string &getValue() const { return Value; }
@@ -709,15 +826,6 @@
virtual std::string getAsString() const { return "\"" + Value + "\""; }
virtual std::string getAsUnquotedString() const { return Value; }
- /// resolveBitReference - This method is used to implement
- /// VarBitInit::resolveReferences. If the bit is able to be resolved, we
- /// simply return the resolved value, otherwise we return null.
- ///
- virtual Init *resolveBitReference(Record &R, const RecordVal *RV,
- unsigned Bit) const {
- llvm_unreachable("Illegal bit reference off string");
- }
-
/// resolveListElementReference - This method is used to implement
/// VarListElementInit::resolveReferences. If the list element is resolvable
/// now, we return the resolved value, otherwise we return null.
@@ -725,6 +833,10 @@
unsigned Elt) const {
llvm_unreachable("Illegal element reference off string");
}
+
+ virtual Init *getBit(unsigned Bit) const {
+ llvm_unreachable("Illegal bit reference off string");
+ }
};
/// ListInit - [AL, AH, CL] - Represent a list of defs
@@ -736,12 +848,16 @@
private:
explicit ListInit(ArrayRef<Init *> Range, RecTy *EltTy)
- : TypedInit(ListRecTy::get(EltTy)), Values(Range.begin(), Range.end()) {}
+ : TypedInit(IK_ListInit, ListRecTy::get(EltTy)),
+ Values(Range.begin(), Range.end()) {}
- ListInit(const ListInit &Other); // Do not define.
- ListInit &operator=(const ListInit &Other); // Do not define.
+ ListInit(const ListInit &Other) LLVM_DELETED_FUNCTION;
+ ListInit &operator=(const ListInit &Other) LLVM_DELETED_FUNCTION;
public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_ListInit;
+ }
static ListInit *get(ArrayRef<Init *> Range, RecTy *EltTy);
void Profile(FoldingSetNodeID &ID) const;
@@ -754,7 +870,8 @@
Record *getElementAsRecord(unsigned i) const;
- Init *convertInitListSlice(const std::vector<unsigned> &Elements) const;
+ virtual Init *
+ convertInitListSlice(const std::vector<unsigned> &Elements) const;
virtual Init *convertInitializerTo(RecTy *Ty) const {
return Ty->convertValue(const_cast<ListInit *>(this));
@@ -777,33 +894,32 @@
inline size_t size () const { return Values.size(); }
inline bool empty() const { return Values.empty(); }
- /// resolveBitReference - This method is used to implement
- /// VarBitInit::resolveReferences. If the bit is able to be resolved, we
- /// simply return the resolved value, otherwise we return null.
- ///
- virtual Init *resolveBitReference(Record &R, const RecordVal *RV,
- unsigned Bit) const {
- llvm_unreachable("Illegal bit reference off list");
- }
-
/// resolveListElementReference - This method is used to implement
/// VarListElementInit::resolveReferences. If the list element is resolvable
/// now, we return the resolved value, otherwise we return null.
virtual Init *resolveListElementReference(Record &R, const RecordVal *RV,
unsigned Elt) const;
+
+ virtual Init *getBit(unsigned Bit) const {
+ llvm_unreachable("Illegal bit reference off list");
+ }
};
/// OpInit - Base class for operators
///
class OpInit : public TypedInit {
- OpInit(const OpInit &Other); // Do not define.
- OpInit &operator=(OpInit &Other); // Do not define.
+ OpInit(const OpInit &Other) LLVM_DELETED_FUNCTION;
+ OpInit &operator=(OpInit &Other) LLVM_DELETED_FUNCTION;
protected:
- explicit OpInit(RecTy *Type) : TypedInit(Type) {}
+ explicit OpInit(InitKind K, RecTy *Type) : TypedInit(K, Type) {}
public:
+ static bool classof(const Init *I) {
+ return I->getKind() >= IK_FirstOpInit &&
+ I->getKind() <= IK_LastOpInit;
+ }
// Clone - Clone this operator, replacing arguments with the new list
virtual OpInit *clone(std::vector<Init *> &Operands) const = 0;
@@ -818,10 +934,10 @@
return Ty->convertValue(const_cast<OpInit *>(this));
}
- virtual Init *resolveBitReference(Record &R, const RecordVal *RV,
- unsigned Bit) const;
virtual Init *resolveListElementReference(Record &R, const RecordVal *RV,
unsigned Elt) const;
+
+ virtual Init *getBit(unsigned Bit) const;
};
@@ -835,12 +951,15 @@
Init *LHS;
UnOpInit(UnaryOp opc, Init *lhs, RecTy *Type)
- : OpInit(Type), Opc(opc), LHS(lhs) {}
+ : OpInit(IK_UnOpInit, Type), Opc(opc), LHS(lhs) {}
- UnOpInit(const UnOpInit &Other); // Do not define.
- UnOpInit &operator=(const UnOpInit &Other); // Do not define.
+ UnOpInit(const UnOpInit &Other) LLVM_DELETED_FUNCTION;
+ UnOpInit &operator=(const UnOpInit &Other) LLVM_DELETED_FUNCTION;
public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_UnOpInit;
+ }
static UnOpInit *get(UnaryOp opc, Init *lhs, RecTy *Type);
// Clone - Clone this operator, replacing arguments with the new list
@@ -850,8 +969,8 @@
return UnOpInit::get(getOpcode(), *Operands.begin(), getType());
}
- int getNumOperands() const { return 1; }
- Init *getOperand(int i) const {
+ virtual int getNumOperands() const { return 1; }
+ virtual Init *getOperand(int i) const {
assert(i == 0 && "Invalid operand id for unary operator");
return getOperand();
}
@@ -861,7 +980,7 @@
// Fold - If possible, fold this to a simpler init. Return this if not
// possible to fold.
- Init *Fold(Record *CurRec, MultiClass *CurMultiClass) const;
+ virtual Init *Fold(Record *CurRec, MultiClass *CurMultiClass) const;
virtual Init *resolveReferences(Record &R, const RecordVal *RV) const;
@@ -878,12 +997,15 @@
Init *LHS, *RHS;
BinOpInit(BinaryOp opc, Init *lhs, Init *rhs, RecTy *Type) :
- OpInit(Type), Opc(opc), LHS(lhs), RHS(rhs) {}
+ OpInit(IK_BinOpInit, Type), Opc(opc), LHS(lhs), RHS(rhs) {}
- BinOpInit(const BinOpInit &Other); // Do not define.
- BinOpInit &operator=(const BinOpInit &Other); // Do not define.
+ BinOpInit(const BinOpInit &Other) LLVM_DELETED_FUNCTION;
+ BinOpInit &operator=(const BinOpInit &Other) LLVM_DELETED_FUNCTION;
public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_BinOpInit;
+ }
static BinOpInit *get(BinaryOp opc, Init *lhs, Init *rhs,
RecTy *Type);
@@ -894,8 +1016,8 @@
return BinOpInit::get(getOpcode(), Operands[0], Operands[1], getType());
}
- int getNumOperands() const { return 2; }
- Init *getOperand(int i) const {
+ virtual int getNumOperands() const { return 2; }
+ virtual Init *getOperand(int i) const {
assert((i == 0 || i == 1) && "Invalid operand id for binary operator");
if (i == 0) {
return getLHS();
@@ -910,7 +1032,7 @@
// Fold - If possible, fold this to a simpler init. Return this if not
// possible to fold.
- Init *Fold(Record *CurRec, MultiClass *CurMultiClass) const;
+ virtual Init *Fold(Record *CurRec, MultiClass *CurMultiClass) const;
virtual Init *resolveReferences(Record &R, const RecordVal *RV) const;
@@ -928,12 +1050,15 @@
TernOpInit(TernaryOp opc, Init *lhs, Init *mhs, Init *rhs,
RecTy *Type) :
- OpInit(Type), Opc(opc), LHS(lhs), MHS(mhs), RHS(rhs) {}
+ OpInit(IK_TernOpInit, Type), Opc(opc), LHS(lhs), MHS(mhs), RHS(rhs) {}
- TernOpInit(const TernOpInit &Other); // Do not define.
- TernOpInit &operator=(const TernOpInit &Other); // Do not define.
+ TernOpInit(const TernOpInit &Other) LLVM_DELETED_FUNCTION;
+ TernOpInit &operator=(const TernOpInit &Other) LLVM_DELETED_FUNCTION;
public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_TernOpInit;
+ }
static TernOpInit *get(TernaryOp opc, Init *lhs,
Init *mhs, Init *rhs,
RecTy *Type);
@@ -946,8 +1071,8 @@
getType());
}
- int getNumOperands() const { return 3; }
- Init *getOperand(int i) const {
+ virtual int getNumOperands() const { return 3; }
+ virtual Init *getOperand(int i) const {
assert((i == 0 || i == 1 || i == 2) &&
"Invalid operand id for ternary operator");
if (i == 0) {
@@ -966,7 +1091,7 @@
// Fold - If possible, fold this to a simpler init. Return this if not
// possible to fold.
- Init *Fold(Record *CurRec, MultiClass *CurMultiClass) const;
+ virtual Init *Fold(Record *CurRec, MultiClass *CurMultiClass) const;
virtual bool isComplete() const { return false; }
@@ -982,14 +1107,17 @@
Init *VarName;
explicit VarInit(const std::string &VN, RecTy *T)
- : TypedInit(T), VarName(StringInit::get(VN)) {}
+ : TypedInit(IK_VarInit, T), VarName(StringInit::get(VN)) {}
explicit VarInit(Init *VN, RecTy *T)
- : TypedInit(T), VarName(VN) {}
+ : TypedInit(IK_VarInit, T), VarName(VN) {}
- VarInit(const VarInit &Other); // Do not define.
- VarInit &operator=(const VarInit &Other); // Do not define.
+ VarInit(const VarInit &Other) LLVM_DELETED_FUNCTION;
+ VarInit &operator=(const VarInit &Other) LLVM_DELETED_FUNCTION;
public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_VarInit;
+ }
static VarInit *get(const std::string &VN, RecTy *T);
static VarInit *get(Init *VN, RecTy *T);
@@ -1003,8 +1131,6 @@
return getNameInit()->getAsUnquotedString();
}
- virtual Init *resolveBitReference(Record &R, const RecordVal *RV,
- unsigned Bit) const;
virtual Init *resolveListElementReference(Record &R, const RecordVal *RV,
unsigned Elt) const;
@@ -1019,6 +1145,8 @@
///
virtual Init *resolveReferences(Record &R, const RecordVal *RV) const;
+ virtual Init *getBit(unsigned Bit) const;
+
virtual std::string getAsString() const { return getName(); }
};
@@ -1029,27 +1157,37 @@
TypedInit *TI;
unsigned Bit;
- VarBitInit(TypedInit *T, unsigned B) : TI(T), Bit(B) {
- assert(T->getType() && dynamic_cast<BitsRecTy*>(T->getType()) &&
- ((BitsRecTy*)T->getType())->getNumBits() > B &&
+ VarBitInit(TypedInit *T, unsigned B) : Init(IK_VarBitInit), TI(T), Bit(B) {
+ assert(T->getType() &&
+ (isa<IntRecTy>(T->getType()) ||
+ (isa<BitsRecTy>(T->getType()) &&
+ cast<BitsRecTy>(T->getType())->getNumBits() > B)) &&
"Illegal VarBitInit expression!");
}
- VarBitInit(const VarBitInit &Other); // Do not define.
- VarBitInit &operator=(const VarBitInit &Other); // Do not define.
+ VarBitInit(const VarBitInit &Other) LLVM_DELETED_FUNCTION;
+ VarBitInit &operator=(const VarBitInit &Other) LLVM_DELETED_FUNCTION;
public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_VarBitInit;
+ }
static VarBitInit *get(TypedInit *T, unsigned B);
virtual Init *convertInitializerTo(RecTy *Ty) const {
return Ty->convertValue(const_cast<VarBitInit *>(this));
}
- TypedInit *getVariable() const { return TI; }
- unsigned getBitNum() const { return Bit; }
+ virtual Init *getBitVar() const { return TI; }
+ virtual unsigned getBitNum() const { return Bit; }
virtual std::string getAsString() const;
virtual Init *resolveReferences(Record &R, const RecordVal *RV) const;
+
+ virtual Init *getBit(unsigned B) const {
+ assert(B < 1 && "Bit index out of range!");
+ return const_cast<VarBitInit*>(this);
+ }
};
/// VarListElementInit - List[4] - Represent access to one element of a var or
@@ -1059,18 +1197,20 @@
unsigned Element;
VarListElementInit(TypedInit *T, unsigned E)
- : TypedInit(dynamic_cast<ListRecTy*>(T->getType())->getElementType()),
- TI(T), Element(E) {
- assert(T->getType() && dynamic_cast<ListRecTy*>(T->getType()) &&
+ : TypedInit(IK_VarListElementInit,
+ cast<ListRecTy>(T->getType())->getElementType()),
+ TI(T), Element(E) {
+ assert(T->getType() && isa<ListRecTy>(T->getType()) &&
"Illegal VarBitInit expression!");
}
- VarListElementInit(const VarListElementInit &Other); // Do not define.
- VarListElementInit &operator=(const VarListElementInit &Other); // Do
- // not
- // define.
+ VarListElementInit(const VarListElementInit &Other) LLVM_DELETED_FUNCTION;
+ void operator=(const VarListElementInit &Other) LLVM_DELETED_FUNCTION;
public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_VarListElementInit;
+ }
static VarListElementInit *get(TypedInit *T, unsigned E);
virtual Init *convertInitializerTo(RecTy *Ty) const {
@@ -1080,9 +1220,6 @@
TypedInit *getVariable() const { return TI; }
unsigned getElementNum() const { return Element; }
- virtual Init *resolveBitReference(Record &R, const RecordVal *RV,
- unsigned Bit) const;
-
/// resolveListElementReference - This method is used to implement
/// VarListElementInit::resolveReferences. If the list element is resolvable
/// now, we return the resolved value, otherwise we return null.
@@ -1092,6 +1229,8 @@
virtual std::string getAsString() const;
virtual Init *resolveReferences(Record &R, const RecordVal *RV) const;
+
+ virtual Init *getBit(unsigned Bit) const;
};
/// DefInit - AL - Represent a reference to a 'def' in the description
@@ -1099,13 +1238,16 @@
class DefInit : public TypedInit {
Record *Def;
- DefInit(Record *D, RecordRecTy *T) : TypedInit(T), Def(D) {}
+ DefInit(Record *D, RecordRecTy *T) : TypedInit(IK_DefInit, T), Def(D) {}
friend class Record;
- DefInit(const DefInit &Other); // Do not define.
- DefInit &operator=(const DefInit &Other); // Do not define.
+ DefInit(const DefInit &Other) LLVM_DELETED_FUNCTION;
+ DefInit &operator=(const DefInit &Other) LLVM_DELETED_FUNCTION;
public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_DefInit;
+ }
static DefInit *get(Record*);
virtual Init *convertInitializerTo(RecTy *Ty) const {
@@ -1122,12 +1264,7 @@
virtual std::string getAsString() const;
- /// resolveBitReference - This method is used to implement
- /// VarBitInit::resolveReferences. If the bit is able to be resolved, we
- /// simply return the resolved value, otherwise we return null.
- ///
- virtual Init *resolveBitReference(Record &R, const RecordVal *RV,
- unsigned Bit) const {
+ virtual Init *getBit(unsigned Bit) const {
llvm_unreachable("Illegal bit reference off def");
}
@@ -1148,14 +1285,17 @@
std::string FieldName; // Field we are accessing
FieldInit(Init *R, const std::string &FN)
- : TypedInit(R->getFieldType(FN)), Rec(R), FieldName(FN) {
+ : TypedInit(IK_FieldInit, R->getFieldType(FN)), Rec(R), FieldName(FN) {
assert(getType() && "FieldInit with non-record type!");
}
- FieldInit(const FieldInit &Other); // Do not define.
- FieldInit &operator=(const FieldInit &Other); // Do not define.
+ FieldInit(const FieldInit &Other) LLVM_DELETED_FUNCTION;
+ FieldInit &operator=(const FieldInit &Other) LLVM_DELETED_FUNCTION;
public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_FieldInit;
+ }
static FieldInit *get(Init *R, const std::string &FN);
static FieldInit *get(Init *R, const Init *FN);
@@ -1163,8 +1303,8 @@
return Ty->convertValue(const_cast<FieldInit *>(this));
}
- virtual Init *resolveBitReference(Record &R, const RecordVal *RV,
- unsigned Bit) const;
+ virtual Init *getBit(unsigned Bit) const;
+
virtual Init *resolveListElementReference(Record &R,
const RecordVal *RV,
unsigned Elt) const;
@@ -1189,14 +1329,17 @@
DagInit(Init *V, const std::string &VN,
ArrayRef<Init *> ArgRange,
ArrayRef<std::string> NameRange)
- : TypedInit(DagRecTy::get()), Val(V), ValName(VN),
+ : TypedInit(IK_DagInit, DagRecTy::get()), Val(V), ValName(VN),
Args(ArgRange.begin(), ArgRange.end()),
ArgNames(NameRange.begin(), NameRange.end()) {}
- DagInit(const DagInit &Other); // Do not define.
- DagInit &operator=(const DagInit &Other); // Do not define.
+ DagInit(const DagInit &Other) LLVM_DELETED_FUNCTION;
+ DagInit &operator=(const DagInit &Other) LLVM_DELETED_FUNCTION;
public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_DagInit;
+ }
static DagInit *get(Init *V, const std::string &VN,
ArrayRef<Init *> ArgRange,
ArrayRef<std::string> NameRange);
@@ -1243,8 +1386,7 @@
inline size_t name_size () const { return ArgNames.size(); }
inline bool name_empty() const { return ArgNames.empty(); }
- virtual Init *resolveBitReference(Record &R, const RecordVal *RV,
- unsigned Bit) const {
+ virtual Init *getBit(unsigned Bit) const {
llvm_unreachable("Illegal bit reference off dag");
}
@@ -1301,7 +1443,9 @@
// Unique record ID.
unsigned ID;
Init *Name;
- SMLoc Loc;
+ // Location where record was instantiated, followed by the location of
+ // multiclass prototypes used.
+ SmallVector<SMLoc, 4> Locs;
std::vector<Init *> TemplateArgs;
std::vector<RecordVal> Values;
std::vector<Record*> SuperClasses;
@@ -1317,15 +1461,25 @@
public:
// Constructs a record.
- explicit Record(const std::string &N, SMLoc loc, RecordKeeper &records) :
- ID(LastID++), Name(StringInit::get(N)), Loc(loc), TrackedRecords(records),
- TheInit(0) {
+ explicit Record(const std::string &N, ArrayRef<SMLoc> locs,
+ RecordKeeper &records) :
+ ID(LastID++), Name(StringInit::get(N)), Locs(locs.begin(), locs.end()),
+ TrackedRecords(records), TheInit(0) {
init();
}
- explicit Record(Init *N, SMLoc loc, RecordKeeper &records) :
- ID(LastID++), Name(N), Loc(loc), TrackedRecords(records), TheInit(0) {
+ explicit Record(Init *N, ArrayRef<SMLoc> locs, RecordKeeper &records) :
+ ID(LastID++), Name(N), Locs(locs.begin(), locs.end()),
+ TrackedRecords(records), TheInit(0) {
init();
}
+
+ // When copy-constructing a Record, we must still guarantee a globally unique
+ // ID number. All other fields can be copied normally.
+ Record(const Record &O) :
+ ID(LastID++), Name(O.Name), Locs(O.Locs), TemplateArgs(O.TemplateArgs),
+ Values(O.Values), SuperClasses(O.SuperClasses),
+ TrackedRecords(O.TrackedRecords), TheInit(O.TheInit) { }
+
~Record() {}
@@ -1345,7 +1499,7 @@
void setName(Init *Name); // Also updates RecordKeeper.
void setName(const std::string &Name); // Also updates RecordKeeper.
- SMLoc getLoc() const { return Loc; }
+ ArrayRef<SMLoc> getLoc() const { return Locs; }
/// get the corresponding DefInit.
DefInit *getDefInit();
@@ -1507,6 +1661,12 @@
///
bool getValueAsBit(StringRef FieldName) const;
+ /// getValueAsBitOrUnset - This method looks up the specified field and
+ /// returns its value as a bit. If the field is unset, sets Unset to true and
+ /// retunrs false.
+ ///
+ bool getValueAsBitOrUnset(StringRef FieldName, bool &Unset) const;
+
/// getValueAsInt - This method looks up the specified field and returns its
/// value as an int64_t, throwing an exception if the field does not exist or
/// if the value is not the right type.
@@ -1601,6 +1761,16 @@
}
};
+/// LessRecordByID - Sorting predicate to sort record pointers by their
+/// unique ID. If you just need a deterministic order, use this, since it
+/// just compares two `unsigned`; the other sorting predicates require
+/// string manipulation.
+struct LessRecordByID {
+ bool operator()(const Record *LHS, const Record *RHS) const {
+ return LHS->getID() < RHS->getID();
+ }
+};
+
/// LessRecordFieldName - Sorting predicate to sort record pointers by their
/// name field.
///
Removed: llvm/branches/AMDILBackend/include/llvm/TableGen/TableGenAction.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/TableGen/TableGenAction.h?rev=172540&view=auto
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/TableGen/TableGenAction.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/TableGen/TableGenAction.h (removed)
@@ -1,35 +0,0 @@
-//===- llvm/TableGen/TableGenAction.h - defines TableGenAction --*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the TableGenAction base class to be derived from by
-// tblgen tools.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TABLEGEN_TABLEGENACTION_H
-#define LLVM_TABLEGEN_TABLEGENACTION_H
-
-namespace llvm {
-
-class raw_ostream;
-class RecordKeeper;
-
-class TableGenAction {
- virtual void anchor();
-public:
- virtual ~TableGenAction() {}
-
- /// Perform the action using Records, and write output to OS.
- /// @returns true on error, false otherwise
- virtual bool operator()(raw_ostream &OS, RecordKeeper &Records) = 0;
-};
-
-}
-
-#endif
Modified: llvm/branches/AMDILBackend/include/llvm/Target/Mangler.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Target/Mangler.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Target/Mangler.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Target/Mangler.h Tue Jan 15 11:16:16 2013
@@ -22,7 +22,7 @@
template <typename T> class SmallVectorImpl;
class MCContext;
class MCSymbol;
-class TargetData;
+class DataLayout;
class Mangler {
public:
@@ -34,7 +34,7 @@
private:
MCContext &Context;
- const TargetData &TD;
+ const DataLayout &TD;
/// AnonGlobalIDs - We need to give global values the same name every time
/// they are mangled. This keeps track of the number we give to anonymous
@@ -47,20 +47,19 @@
unsigned NextAnonGlobalID;
public:
- Mangler(MCContext &context, const TargetData &td)
+ Mangler(MCContext &context, const DataLayout &td)
: Context(context), TD(td), NextAnonGlobalID(1) {}
/// getSymbol - Return the MCSymbol for the specified global value. This
/// symbol is the main label that is the address of the global.
MCSymbol *getSymbol(const GlobalValue *GV);
-
/// getNameWithPrefix - Fill OutName with the name of the appropriate prefix
/// and the specified global variable's name. If the global variable doesn't
/// have a name, this fills in a unique name for the global.
void getNameWithPrefix(SmallVectorImpl<char> &OutName, const GlobalValue *GV,
bool isImplicitlyPrivate);
-
+
/// getNameWithPrefix - Fill OutName with the name of the appropriate prefix
/// and the specified name as the global variable name. GVName must not be
/// empty.
Modified: llvm/branches/AMDILBackend/include/llvm/Target/Target.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Target/Target.td?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Target/Target.td (original)
+++ llvm/branches/AMDILBackend/include/llvm/Target/Target.td Tue Jan 15 11:16:16 2013
@@ -28,6 +28,24 @@
// ComposedOf - A list of two SubRegIndex instances, [A, B].
// This indicates that this SubRegIndex is the result of composing A and B.
list<SubRegIndex> ComposedOf = comps;
+
+ // CoveringSubRegIndices - A list of two or more sub-register indexes that
+ // cover this sub-register.
+ //
+ // This field should normally be left blank as TableGen can infer it.
+ //
+ // TableGen automatically detects sub-registers that straddle the registers
+ // in the SubRegs field of a Register definition. For example:
+ //
+ // Q0 = dsub_0 -> D0, dsub_1 -> D1
+ // Q1 = dsub_0 -> D2, dsub_1 -> D3
+ // D1_D2 = dsub_0 -> D1, dsub_1 -> D2
+ // QQ0 = qsub_0 -> Q0, qsub_1 -> Q1
+ //
+ // TableGen will infer that D1_D2 is a sub-register of QQ0. It will be given
+ // the synthetic index dsub_1_dsub_2 unless some SubRegIndex is defined with
+ // CoveringSubRegIndices = [dsub_1, dsub_2].
+ list<SubRegIndex> CoveringSubRegIndices = [];
}
// RegAltNameIndex - The alternate name set to use for register operands of
@@ -321,11 +339,12 @@
bit isCompare = 0; // Is this instruction a comparison instruction?
bit isMoveImm = 0; // Is this instruction a move immediate instruction?
bit isBitcast = 0; // Is this instruction a bitcast instruction?
+ bit isSelect = 0; // Is this instruction a select instruction?
bit isBarrier = 0; // Can control flow fall through this instruction?
bit isCall = 0; // Is this instruction a call instruction?
bit canFoldAsLoad = 0; // Can this be folded as a simple memory operand?
- bit mayLoad = 0; // Is it possible for this inst to read memory?
- bit mayStore = 0; // Is it possible for this inst to write memory?
+ bit mayLoad = ?; // Is it possible for this inst to read memory?
+ bit mayStore = ?; // Is it possible for this inst to write memory?
bit isConvertibleToThreeAddress = 0; // Can this 2-addr instruction promote?
bit isCommutable = 0; // Is this 3 operand instruction commutable?
bit isTerminator = 0; // Is this part of the terminator for a basic block?
@@ -350,7 +369,7 @@
//
// neverHasSideEffects - Set on an instruction with no pattern if it has no
// side effects.
- bit hasSideEffects = 0;
+ bit hasSideEffects = ?;
bit neverHasSideEffects = 0;
// Is this instruction a "real" instruction (with a distinct machine
@@ -476,7 +495,8 @@
/// unknown definition - Mark this operand as being of unknown type, causing
/// it to be resolved by inference in the context it is used.
-def unknown;
+class unknown_class;
+def unknown : unknown_class;
/// AsmOperandClass - Representation for the kinds of operands which the target
/// specific parser can create and the assembly matcher may need to distinguish.
@@ -583,23 +603,31 @@
///
def zero_reg;
+/// OperandWithDefaultOps - This Operand class can be used as the parent class
+/// for an Operand that needs to be initialized with a default value if
+/// no value is supplied in a pattern. This class can be used to simplify the
+/// pattern definitions for instructions that have target specific flags
+/// encoded as immediate operands.
+class OperandWithDefaultOps<ValueType ty, dag defaultops>
+ : Operand<ty> {
+ dag DefaultOps = defaultops;
+}
+
/// PredicateOperand - This can be used to define a predicate operand for an
/// instruction. OpTypes specifies the MIOperandInfo for the operand, and
/// AlwaysVal specifies the value of this predicate when set to "always
/// execute".
class PredicateOperand<ValueType ty, dag OpTypes, dag AlwaysVal>
- : Operand<ty> {
+ : OperandWithDefaultOps<ty, AlwaysVal> {
let MIOperandInfo = OpTypes;
- dag DefaultOps = AlwaysVal;
}
/// OptionalDefOperand - This is used to define a optional definition operand
/// for an instruction. DefaultOps is the register the operand represents if
/// none is supplied, e.g. zero_reg.
class OptionalDefOperand<ValueType ty, dag OpTypes, dag defaultops>
- : Operand<ty> {
+ : OperandWithDefaultOps<ty, defaultops> {
let MIOperandInfo = OpTypes;
- dag DefaultOps = defaultops;
}
@@ -612,6 +640,17 @@
// Sparc manual specifies its instructions in the format [31..0] (big), while
// PowerPC specifies them using the format [0..31] (little).
bit isLittleEndianEncoding = 0;
+
+ // The instruction properties mayLoad, mayStore, and hasSideEffects are unset
+ // by default, and TableGen will infer their value from the instruction
+ // pattern when possible.
+ //
+ // Normally, TableGen will issue an error it it can't infer the value of a
+ // property that hasn't been set explicitly. When guessInstructionProperties
+ // is set, it will guess a safe value instead.
+ //
+ // This option is a temporary migration help. It will go away.
+ bit guessInstructionProperties = 1;
}
// Standard Pseudo Instructions.
@@ -715,6 +754,18 @@
let InOperandList = (ins variable_ops);
let AsmString = "BUNDLE";
}
+def LIFETIME_START : Instruction {
+ let OutOperandList = (outs);
+ let InOperandList = (ins i32imm:$id);
+ let AsmString = "LIFETIME_START";
+ let neverHasSideEffects = 1;
+}
+def LIFETIME_END : Instruction {
+ let OutOperandList = (outs);
+ let InOperandList = (ins i32imm:$id);
+ let AsmString = "LIFETIME_END";
+ let neverHasSideEffects = 1;
+}
}
//===----------------------------------------------------------------------===//
@@ -734,6 +785,10 @@
// function of the AsmParser class to call on every matched instruction.
// This can be used to perform target specific instruction post-processing.
string AsmParserInstCleanup = "";
+
+ //ShouldEmitMatchRegisterName - Set to false if the target needs a hand
+ //written register name matcher
+ bit ShouldEmitMatchRegisterName = 1;
}
def DefaultAsmParser : AsmParser;
@@ -934,12 +989,64 @@
// ProcessorModel allows subtargets to specify the more general
// SchedMachineModel instead if a ProcessorItinerary. Subtargets will
// gradually move to this newer form.
+//
+// Although this class always passes NoItineraries to the Processor
+// class, the SchedMachineModel may still define valid Itineraries.
class ProcessorModel<string n, SchedMachineModel m, list<SubtargetFeature> f>
: Processor<n, NoItineraries, f> {
let SchedModel = m;
}
//===----------------------------------------------------------------------===//
+// InstrMapping - This class is used to create mapping tables to relate
+// instructions with each other based on the values specified in RowFields,
+// ColFields, KeyCol and ValueCols.
+//
+class InstrMapping {
+ // FilterClass - Used to limit search space only to the instructions that
+ // define the relationship modeled by this InstrMapping record.
+ string FilterClass;
+
+ // RowFields - List of fields/attributes that should be same for all the
+ // instructions in a row of the relation table. Think of this as a set of
+ // properties shared by all the instructions related by this relationship
+ // model and is used to categorize instructions into subgroups. For instance,
+ // if we want to define a relation that maps 'Add' instruction to its
+ // predicated forms, we can define RowFields like this:
+ //
+ // let RowFields = BaseOp
+ // All add instruction predicated/non-predicated will have to set their BaseOp
+ // to the same value.
+ //
+ // def Add: { let BaseOp = 'ADD'; let predSense = 'nopred' }
+ // def Add_predtrue: { let BaseOp = 'ADD'; let predSense = 'true' }
+ // def Add_predfalse: { let BaseOp = 'ADD'; let predSense = 'false' }
+ list<string> RowFields = [];
+
+ // List of fields/attributes that are same for all the instructions
+ // in a column of the relation table.
+ // Ex: let ColFields = 'predSense' -- It means that the columns are arranged
+ // based on the 'predSense' values. All the instruction in a specific
+ // column have the same value and it is fixed for the column according
+ // to the values set in 'ValueCols'.
+ list<string> ColFields = [];
+
+ // Values for the fields/attributes listed in 'ColFields'.
+ // Ex: let KeyCol = 'nopred' -- It means that the key instruction (instruction
+ // that models this relation) should be non-predicated.
+ // In the example above, 'Add' is the key instruction.
+ list<string> KeyCol = [];
+
+ // List of values for the fields/attributes listed in 'ColFields', one for
+ // each column in the relation table.
+ //
+ // Ex: let ValueCols = [['true'],['false']] -- It adds two columns in the
+ // table. First column requires all the instructions to have predSense
+ // set to 'true' and second column requires it to be 'false'.
+ list<list<string> > ValueCols = [];
+}
+
+//===----------------------------------------------------------------------===//
// Pull in the common support for calling conventions.
//
include "llvm/Target/TargetCallingConv.td"
Modified: llvm/branches/AMDILBackend/include/llvm/Target/TargetCallingConv.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Target/TargetCallingConv.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Target/TargetCallingConv.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Target/TargetCallingConv.h Tue Jan 15 11:16:16 2013
@@ -113,9 +113,18 @@
MVT VT;
bool Used;
+ /// Index original Function's argument.
+ unsigned OrigArgIndex;
+
+ /// Offset in bytes of current input value relative to the beginning of
+ /// original argument. E.g. if argument was splitted into four 32 bit
+ /// registers, we got 4 InputArgs with PartOffsets 0, 4, 8 and 12.
+ unsigned PartOffset;
+
InputArg() : VT(MVT::Other), Used(false) {}
- InputArg(ArgFlagsTy flags, EVT vt, bool used)
- : Flags(flags), Used(used) {
+ InputArg(ArgFlagsTy flags, EVT vt, bool used,
+ unsigned origIdx, unsigned partOffs)
+ : Flags(flags), Used(used), OrigArgIndex(origIdx), PartOffset(partOffs) {
VT = vt.getSimpleVT();
}
};
@@ -131,9 +140,19 @@
/// IsFixed - Is this a "fixed" value, ie not passed through a vararg "...".
bool IsFixed;
+ /// Index original Function's argument.
+ unsigned OrigArgIndex;
+
+ /// Offset in bytes of current output value relative to the beginning of
+ /// original argument. E.g. if argument was splitted into four 32 bit
+ /// registers, we got 4 OutputArgs with PartOffsets 0, 4, 8 and 12.
+ unsigned PartOffset;
+
OutputArg() : IsFixed(false) {}
- OutputArg(ArgFlagsTy flags, EVT vt, bool isfixed)
- : Flags(flags), IsFixed(isfixed) {
+ OutputArg(ArgFlagsTy flags, EVT vt, bool isfixed,
+ unsigned origIdx, unsigned partOffs)
+ : Flags(flags), IsFixed(isfixed), OrigArgIndex(origIdx),
+ PartOffset(partOffs) {
VT = vt.getSimpleVT();
}
};
Removed: llvm/branches/AMDILBackend/include/llvm/Target/TargetData.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Target/TargetData.h?rev=172540&view=auto
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Target/TargetData.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Target/TargetData.h (removed)
@@ -1,363 +0,0 @@
-//===-- llvm/Target/TargetData.h - Data size & alignment info ---*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines target properties related to datatype size/offset/alignment
-// information. It uses lazy annotations to cache information about how
-// structure types are laid out and used.
-//
-// This structure should be created once, filled in if the defaults are not
-// correct and then passed around by const&. None of the members functions
-// require modification to the object.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TARGET_TARGETDATA_H
-#define LLVM_TARGET_TARGETDATA_H
-
-#include "llvm/Pass.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/Support/DataTypes.h"
-
-namespace llvm {
-
-class Value;
-class Type;
-class IntegerType;
-class StructType;
-class StructLayout;
-class GlobalVariable;
-class LLVMContext;
-template<typename T>
-class ArrayRef;
-
-/// Enum used to categorize the alignment types stored by TargetAlignElem
-enum AlignTypeEnum {
- INTEGER_ALIGN = 'i', ///< Integer type alignment
- VECTOR_ALIGN = 'v', ///< Vector type alignment
- FLOAT_ALIGN = 'f', ///< Floating point type alignment
- AGGREGATE_ALIGN = 'a', ///< Aggregate alignment
- STACK_ALIGN = 's' ///< Stack objects alignment
-};
-
-/// Target alignment element.
-///
-/// Stores the alignment data associated with a given alignment type (pointer,
-/// integer, vector, float) and type bit width.
-///
-/// @note The unusual order of elements in the structure attempts to reduce
-/// padding and make the structure slightly more cache friendly.
-struct TargetAlignElem {
- AlignTypeEnum AlignType : 8; ///< Alignment type (AlignTypeEnum)
- unsigned ABIAlign; ///< ABI alignment for this type/bitw
- unsigned PrefAlign; ///< Pref. alignment for this type/bitw
- uint32_t TypeBitWidth; ///< Type bit width
-
- /// Initializer
- static TargetAlignElem get(AlignTypeEnum align_type, unsigned abi_align,
- unsigned pref_align, uint32_t bit_width);
- /// Equality predicate
- bool operator==(const TargetAlignElem &rhs) const;
-};
-
-/// TargetData - This class holds a parsed version of the target data layout
-/// string in a module and provides methods for querying it. The target data
-/// layout string is specified *by the target* - a frontend generating LLVM IR
-/// is required to generate the right target data for the target being codegen'd
-/// to. If some measure of portability is desired, an empty string may be
-/// specified in the module.
-class TargetData : public ImmutablePass {
-private:
- bool LittleEndian; ///< Defaults to false
- unsigned PointerMemSize; ///< Pointer size in bytes
- unsigned PointerABIAlign; ///< Pointer ABI alignment
- unsigned PointerPrefAlign; ///< Pointer preferred alignment
- unsigned StackNaturalAlign; ///< Stack natural alignment
-
- SmallVector<unsigned char, 8> LegalIntWidths; ///< Legal Integers.
-
- /// Alignments- Where the primitive type alignment data is stored.
- ///
- /// @sa init().
- /// @note Could support multiple size pointer alignments, e.g., 32-bit
- /// pointers vs. 64-bit pointers by extending TargetAlignment, but for now,
- /// we don't.
- SmallVector<TargetAlignElem, 16> Alignments;
-
- /// InvalidAlignmentElem - This member is a signal that a requested alignment
- /// type and bit width were not found in the SmallVector.
- static const TargetAlignElem InvalidAlignmentElem;
-
- // The StructType -> StructLayout map.
- mutable void *LayoutMap;
-
- //! Set/initialize target alignments
- void setAlignment(AlignTypeEnum align_type, unsigned abi_align,
- unsigned pref_align, uint32_t bit_width);
- unsigned getAlignmentInfo(AlignTypeEnum align_type, uint32_t bit_width,
- bool ABIAlign, Type *Ty) const;
- //! Internal helper method that returns requested alignment for type.
- unsigned getAlignment(Type *Ty, bool abi_or_pref) const;
-
- /// Valid alignment predicate.
- ///
- /// Predicate that tests a TargetAlignElem reference returned by get() against
- /// InvalidAlignmentElem.
- bool validAlignment(const TargetAlignElem &align) const {
- return &align != &InvalidAlignmentElem;
- }
-
- /// Initialise a TargetData object with default values, ensure that the
- /// target data pass is registered.
- void init();
-
-public:
- /// Default ctor.
- ///
- /// @note This has to exist, because this is a pass, but it should never be
- /// used.
- TargetData();
-
- /// Constructs a TargetData from a specification string. See init().
- explicit TargetData(StringRef TargetDescription)
- : ImmutablePass(ID) {
- std::string errMsg = parseSpecifier(TargetDescription, this);
- assert(errMsg == "" && "Invalid target data layout string.");
- (void)errMsg;
- }
-
- /// Parses a target data specification string. Returns an error message
- /// if the string is malformed, or the empty string on success. Optionally
- /// initialises a TargetData object if passed a non-null pointer.
- static std::string parseSpecifier(StringRef TargetDescription, TargetData* td = 0);
-
- /// Initialize target data from properties stored in the module.
- explicit TargetData(const Module *M);
-
- TargetData(const TargetData &TD) :
- ImmutablePass(ID),
- LittleEndian(TD.isLittleEndian()),
- PointerMemSize(TD.PointerMemSize),
- PointerABIAlign(TD.PointerABIAlign),
- PointerPrefAlign(TD.PointerPrefAlign),
- LegalIntWidths(TD.LegalIntWidths),
- Alignments(TD.Alignments),
- LayoutMap(0)
- { }
-
- ~TargetData(); // Not virtual, do not subclass this class
-
- /// Target endianness...
- bool isLittleEndian() const { return LittleEndian; }
- bool isBigEndian() const { return !LittleEndian; }
-
- /// getStringRepresentation - Return the string representation of the
- /// TargetData. This representation is in the same format accepted by the
- /// string constructor above.
- std::string getStringRepresentation() const;
-
- /// isLegalInteger - This function returns true if the specified type is
- /// known to be a native integer type supported by the CPU. For example,
- /// i64 is not native on most 32-bit CPUs and i37 is not native on any known
- /// one. This returns false if the integer width is not legal.
- ///
- /// The width is specified in bits.
- ///
- bool isLegalInteger(unsigned Width) const {
- for (unsigned i = 0, e = (unsigned)LegalIntWidths.size(); i != e; ++i)
- if (LegalIntWidths[i] == Width)
- return true;
- return false;
- }
-
- bool isIllegalInteger(unsigned Width) const {
- return !isLegalInteger(Width);
- }
-
- /// Returns true if the given alignment exceeds the natural stack alignment.
- bool exceedsNaturalStackAlignment(unsigned Align) const {
- return (StackNaturalAlign != 0) && (Align > StackNaturalAlign);
- }
-
- /// fitsInLegalInteger - This function returns true if the specified type fits
- /// in a native integer type supported by the CPU. For example, if the CPU
- /// only supports i32 as a native integer type, then i27 fits in a legal
- // integer type but i45 does not.
- bool fitsInLegalInteger(unsigned Width) const {
- for (unsigned i = 0, e = (unsigned)LegalIntWidths.size(); i != e; ++i)
- if (Width <= LegalIntWidths[i])
- return true;
- return false;
- }
-
- /// Target pointer alignment
- unsigned getPointerABIAlignment() const { return PointerABIAlign; }
- /// Return target's alignment for stack-based pointers
- unsigned getPointerPrefAlignment() const { return PointerPrefAlign; }
- /// Target pointer size
- unsigned getPointerSize() const { return PointerMemSize; }
- /// Target pointer size, in bits
- unsigned getPointerSizeInBits() const { return 8*PointerMemSize; }
-
- /// Size examples:
- ///
- /// Type SizeInBits StoreSizeInBits AllocSizeInBits[*]
- /// ---- ---------- --------------- ---------------
- /// i1 1 8 8
- /// i8 8 8 8
- /// i19 19 24 32
- /// i32 32 32 32
- /// i100 100 104 128
- /// i128 128 128 128
- /// Float 32 32 32
- /// Double 64 64 64
- /// X86_FP80 80 80 96
- ///
- /// [*] The alloc size depends on the alignment, and thus on the target.
- /// These values are for x86-32 linux.
-
- /// getTypeSizeInBits - Return the number of bits necessary to hold the
- /// specified type. For example, returns 36 for i36 and 80 for x86_fp80.
- uint64_t getTypeSizeInBits(Type* Ty) const;
-
- /// getTypeStoreSize - Return the maximum number of bytes that may be
- /// overwritten by storing the specified type. For example, returns 5
- /// for i36 and 10 for x86_fp80.
- uint64_t getTypeStoreSize(Type *Ty) const {
- return (getTypeSizeInBits(Ty)+7)/8;
- }
-
- /// getTypeStoreSizeInBits - Return the maximum number of bits that may be
- /// overwritten by storing the specified type; always a multiple of 8. For
- /// example, returns 40 for i36 and 80 for x86_fp80.
- uint64_t getTypeStoreSizeInBits(Type *Ty) const {
- return 8*getTypeStoreSize(Ty);
- }
-
- /// getTypeAllocSize - Return the offset in bytes between successive objects
- /// of the specified type, including alignment padding. This is the amount
- /// that alloca reserves for this type. For example, returns 12 or 16 for
- /// x86_fp80, depending on alignment.
- uint64_t getTypeAllocSize(Type* Ty) const {
- // Round up to the next alignment boundary.
- return RoundUpAlignment(getTypeStoreSize(Ty), getABITypeAlignment(Ty));
- }
-
- /// getTypeAllocSizeInBits - Return the offset in bits between successive
- /// objects of the specified type, including alignment padding; always a
- /// multiple of 8. This is the amount that alloca reserves for this type.
- /// For example, returns 96 or 128 for x86_fp80, depending on alignment.
- uint64_t getTypeAllocSizeInBits(Type* Ty) const {
- return 8*getTypeAllocSize(Ty);
- }
-
- /// getABITypeAlignment - Return the minimum ABI-required alignment for the
- /// specified type.
- unsigned getABITypeAlignment(Type *Ty) const;
-
- /// getABIIntegerTypeAlignment - Return the minimum ABI-required alignment for
- /// an integer type of the specified bitwidth.
- unsigned getABIIntegerTypeAlignment(unsigned BitWidth) const;
-
-
- /// getCallFrameTypeAlignment - Return the minimum ABI-required alignment
- /// for the specified type when it is part of a call frame.
- unsigned getCallFrameTypeAlignment(Type *Ty) const;
-
-
- /// getPrefTypeAlignment - Return the preferred stack/global alignment for
- /// the specified type. This is always at least as good as the ABI alignment.
- unsigned getPrefTypeAlignment(Type *Ty) const;
-
- /// getPreferredTypeAlignmentShift - Return the preferred alignment for the
- /// specified type, returned as log2 of the value (a shift amount).
- ///
- unsigned getPreferredTypeAlignmentShift(Type *Ty) const;
-
- /// getIntPtrType - Return an unsigned integer type that is the same size or
- /// greater to the host pointer size.
- ///
- IntegerType *getIntPtrType(LLVMContext &C) const;
-
- /// getIndexedOffset - return the offset from the beginning of the type for
- /// the specified indices. This is used to implement getelementptr.
- ///
- uint64_t getIndexedOffset(Type *Ty, ArrayRef<Value *> Indices) const;
-
- /// getStructLayout - Return a StructLayout object, indicating the alignment
- /// of the struct, its size, and the offsets of its fields. Note that this
- /// information is lazily cached.
- const StructLayout *getStructLayout(StructType *Ty) const;
-
- /// getPreferredAlignment - Return the preferred alignment of the specified
- /// global. This includes an explicitly requested alignment (if the global
- /// has one).
- unsigned getPreferredAlignment(const GlobalVariable *GV) const;
-
- /// getPreferredAlignmentLog - Return the preferred alignment of the
- /// specified global, returned in log form. This includes an explicitly
- /// requested alignment (if the global has one).
- unsigned getPreferredAlignmentLog(const GlobalVariable *GV) const;
-
- /// RoundUpAlignment - Round the specified value up to the next alignment
- /// boundary specified by Alignment. For example, 7 rounded up to an
- /// alignment boundary of 4 is 8. 8 rounded up to the alignment boundary of 4
- /// is 8 because it is already aligned.
- template <typename UIntTy>
- static UIntTy RoundUpAlignment(UIntTy Val, unsigned Alignment) {
- assert((Alignment & (Alignment-1)) == 0 && "Alignment must be power of 2!");
- return (Val + (Alignment-1)) & ~UIntTy(Alignment-1);
- }
-
- static char ID; // Pass identification, replacement for typeid
-};
-
-/// StructLayout - used to lazily calculate structure layout information for a
-/// target machine, based on the TargetData structure.
-///
-class StructLayout {
- uint64_t StructSize;
- unsigned StructAlignment;
- unsigned NumElements;
- uint64_t MemberOffsets[1]; // variable sized array!
-public:
-
- uint64_t getSizeInBytes() const {
- return StructSize;
- }
-
- uint64_t getSizeInBits() const {
- return 8*StructSize;
- }
-
- unsigned getAlignment() const {
- return StructAlignment;
- }
-
- /// getElementContainingOffset - Given a valid byte offset into the structure,
- /// return the structure index that contains it.
- ///
- unsigned getElementContainingOffset(uint64_t Offset) const;
-
- uint64_t getElementOffset(unsigned Idx) const {
- assert(Idx < NumElements && "Invalid element idx!");
- return MemberOffsets[Idx];
- }
-
- uint64_t getElementOffsetInBits(unsigned Idx) const {
- return getElementOffset(Idx)*8;
- }
-
-private:
- friend class TargetData; // Only TargetData can create this class
- StructLayout(StructType *ST, const TargetData &TD);
-};
-
-} // End llvm namespace
-
-#endif
Removed: llvm/branches/AMDILBackend/include/llvm/Target/TargetELFWriterInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Target/TargetELFWriterInfo.h?rev=172540&view=auto
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Target/TargetELFWriterInfo.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Target/TargetELFWriterInfo.h (removed)
@@ -1,121 +0,0 @@
-//===-- llvm/Target/TargetELFWriterInfo.h - ELF Writer Info -----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the TargetELFWriterInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TARGET_TARGETELFWRITERINFO_H
-#define LLVM_TARGET_TARGETELFWRITERINFO_H
-
-namespace llvm {
-
- //===--------------------------------------------------------------------===//
- // TargetELFWriterInfo
- //===--------------------------------------------------------------------===//
-
- class TargetELFWriterInfo {
- protected:
- // EMachine - This field is the target specific value to emit as the
- // e_machine member of the ELF header.
- unsigned short EMachine;
- bool is64Bit, isLittleEndian;
- public:
-
- // Machine architectures
- enum MachineType {
- EM_NONE = 0, // No machine
- EM_M32 = 1, // AT&T WE 32100
- EM_SPARC = 2, // SPARC
- EM_386 = 3, // Intel 386
- EM_68K = 4, // Motorola 68000
- EM_88K = 5, // Motorola 88000
- EM_486 = 6, // Intel 486 (deprecated)
- EM_860 = 7, // Intel 80860
- EM_MIPS = 8, // MIPS R3000
- EM_PPC = 20, // PowerPC
- EM_ARM = 40, // ARM
- EM_ALPHA = 41, // DEC Alpha
- EM_SPARCV9 = 43, // SPARC V9
- EM_X86_64 = 62, // AMD64
- EM_HEXAGON = 164 // Qualcomm Hexagon
- };
-
- // ELF File classes
- enum {
- ELFCLASS32 = 1, // 32-bit object file
- ELFCLASS64 = 2 // 64-bit object file
- };
-
- // ELF Endianess
- enum {
- ELFDATA2LSB = 1, // Little-endian object file
- ELFDATA2MSB = 2 // Big-endian object file
- };
-
- explicit TargetELFWriterInfo(bool is64Bit_, bool isLittleEndian_);
- virtual ~TargetELFWriterInfo();
-
- unsigned short getEMachine() const { return EMachine; }
- unsigned getEFlags() const { return 0; }
- unsigned getEIClass() const { return is64Bit ? ELFCLASS64 : ELFCLASS32; }
- unsigned getEIData() const {
- return isLittleEndian ? ELFDATA2LSB : ELFDATA2MSB;
- }
-
- /// ELF Header and ELF Section Header Info
- unsigned getHdrSize() const { return is64Bit ? 64 : 52; }
- unsigned getSHdrSize() const { return is64Bit ? 64 : 40; }
-
- /// Symbol Table Info
- unsigned getSymTabEntrySize() const { return is64Bit ? 24 : 16; }
-
- /// getPrefELFAlignment - Returns the preferred alignment for ELF. This
- /// is used to align some sections.
- unsigned getPrefELFAlignment() const { return is64Bit ? 8 : 4; }
-
- /// getRelocationEntrySize - Entry size used in the relocation section
- unsigned getRelocationEntrySize() const {
- return is64Bit ? (hasRelocationAddend() ? 24 : 16)
- : (hasRelocationAddend() ? 12 : 8);
- }
-
- /// getRelocationType - Returns the target specific ELF Relocation type.
- /// 'MachineRelTy' contains the object code independent relocation type
- virtual unsigned getRelocationType(unsigned MachineRelTy) const = 0;
-
- /// hasRelocationAddend - True if the target uses an addend in the
- /// ELF relocation entry.
- virtual bool hasRelocationAddend() const = 0;
-
- /// getDefaultAddendForRelTy - Gets the default addend value for a
- /// relocation entry based on the target ELF relocation type.
- virtual long int getDefaultAddendForRelTy(unsigned RelTy,
- long int Modifier = 0) const = 0;
-
- /// getRelTySize - Returns the size of relocatable field in bits
- virtual unsigned getRelocationTySize(unsigned RelTy) const = 0;
-
- /// isPCRelativeRel - True if the relocation type is pc relative
- virtual bool isPCRelativeRel(unsigned RelTy) const = 0;
-
- /// getJumpTableRelocationTy - Returns the machine relocation type used
- /// to reference a jumptable.
- virtual unsigned getAbsoluteLabelMachineRelTy() const = 0;
-
- /// computeRelocation - Some relocatable fields could be relocated
- /// directly, avoiding the relocation symbol emission, compute the
- /// final relocation value for this symbol.
- virtual long int computeRelocation(unsigned SymOffset, unsigned RelOffset,
- unsigned RelTy) const = 0;
- };
-
-} // end llvm namespace
-
-#endif // LLVM_TARGET_TARGETELFWRITERINFO_H
Modified: llvm/branches/AMDILBackend/include/llvm/Target/TargetInstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Target/TargetInstrInfo.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Target/TargetInstrInfo.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Target/TargetInstrInfo.h Tue Jan 15 11:16:16 2013
@@ -45,8 +45,8 @@
/// TargetInstrInfo - Interface to description of machine instruction set
///
class TargetInstrInfo : public MCInstrInfo {
- TargetInstrInfo(const TargetInstrInfo &); // DO NOT IMPLEMENT
- void operator=(const TargetInstrInfo &); // DO NOT IMPLEMENT
+ TargetInstrInfo(const TargetInstrInfo &) LLVM_DELETED_FUNCTION;
+ void operator=(const TargetInstrInfo &) LLVM_DELETED_FUNCTION;
public:
TargetInstrInfo(int CFSetupOpcode = -1, int CFDestroyOpcode = -1)
: CallFrameSetupOpcode(CFSetupOpcode),
@@ -188,14 +188,6 @@
const MachineInstr *Orig,
const TargetRegisterInfo &TRI) const = 0;
- /// scheduleTwoAddrSource - Schedule the copy / re-mat of the source of the
- /// two-addrss instruction inserted by two-address pass.
- virtual void scheduleTwoAddrSource(MachineInstr *SrcMI,
- MachineInstr *UseMI,
- const TargetRegisterInfo &TRI) const {
- // Do nothing.
- }
-
/// duplicate - Create a duplicate of the Orig instruction in MF. This is like
/// MachineFunction::CloneMachineInstr(), but the target may update operands
/// that are required to be unique.
@@ -421,7 +413,59 @@
llvm_unreachable("Target didn't implement TargetInstrInfo::insertSelect!");
}
+ /// analyzeSelect - Analyze the given select instruction, returning true if
+ /// it cannot be understood. It is assumed that MI->isSelect() is true.
+ ///
+ /// When successful, return the controlling condition and the operands that
+ /// determine the true and false result values.
+ ///
+ /// Result = SELECT Cond, TrueOp, FalseOp
+ ///
+ /// Some targets can optimize select instructions, for example by predicating
+ /// the instruction defining one of the operands. Such targets should set
+ /// Optimizable.
+ ///
+ /// @param MI Select instruction to analyze.
+ /// @param Cond Condition controlling the select.
+ /// @param TrueOp Operand number of the value selected when Cond is true.
+ /// @param FalseOp Operand number of the value selected when Cond is false.
+ /// @param Optimizable Returned as true if MI is optimizable.
+ /// @returns False on success.
+ virtual bool analyzeSelect(const MachineInstr *MI,
+ SmallVectorImpl<MachineOperand> &Cond,
+ unsigned &TrueOp, unsigned &FalseOp,
+ bool &Optimizable) const {
+ assert(MI && MI->isSelect() && "MI must be a select instruction");
+ return true;
+ }
+
+ /// optimizeSelect - Given a select instruction that was understood by
+ /// analyzeSelect and returned Optimizable = true, attempt to optimize MI by
+ /// merging it with one of its operands. Returns NULL on failure.
+ ///
+ /// When successful, returns the new select instruction. The client is
+ /// responsible for deleting MI.
+ ///
+ /// If both sides of the select can be optimized, PreferFalse is used to pick
+ /// a side.
+ ///
+ /// @param MI Optimizable select instruction.
+ /// @param PreferFalse Try to optimize FalseOp instead of TrueOp.
+ /// @returns Optimized instruction or NULL.
+ virtual MachineInstr *optimizeSelect(MachineInstr *MI,
+ bool PreferFalse = false) const {
+ // This function must be implemented if Optimizable is ever set.
+ llvm_unreachable("Target must implement TargetInstrInfo::optimizeSelect!");
+ }
+
/// copyPhysReg - Emit instructions to copy a pair of physical registers.
+ ///
+ /// This function should support copies within any legal register class as
+ /// well as any cross-class copies created during instruction selection.
+ ///
+ /// The source and destination registers may overlap, which may require a
+ /// careful implementation when multiple copy instructions are required for
+ /// large registers. See for example the ARM target.
virtual void copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
@@ -757,29 +801,6 @@
const MachineInstr *UseMI, unsigned UseIdx,
bool FindMin = false) const;
- /// computeOperandLatency - Compute and return the latency of the given data
- /// dependent def and use. DefMI must be a valid def. UseMI may be NULL for
- /// an unknown use. If the subtarget allows, this may or may not need to call
- /// getOperandLatency().
- ///
- /// FindMin may be set to get the minimum vs. expected latency. Minimum
- /// latency is used for scheduling groups, while expected latency is for
- /// instruction cost and critical path.
- unsigned computeOperandLatency(const InstrItineraryData *ItinData,
- const TargetRegisterInfo *TRI,
- const MachineInstr *DefMI,
- const MachineInstr *UseMI,
- unsigned Reg, bool FindMin) const;
-
- /// getOutputLatency - Compute and return the output dependency latency of a
- /// a given pair of defs which both target the same register. This is usually
- /// one.
- virtual unsigned getOutputLatency(const InstrItineraryData *ItinData,
- const MachineInstr *DefMI, unsigned DefIdx,
- const MachineInstr *DepMI) const {
- return 1;
- }
-
/// getInstrLatency - Compute the instruction latency of a given instruction.
/// If the instruction has higher cost when predicated, it's returned via
/// PredCost.
@@ -794,6 +815,9 @@
unsigned defaultDefLatency(const MCSchedModel *SchedModel,
const MachineInstr *DefMI) const;
+ int computeDefOperandLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *DefMI, bool FindMin) const;
+
/// isHighLatencyDef - Return true if this opcode has high latency to its
/// result.
virtual bool isHighLatencyDef(int opc) const { return false; }
Modified: llvm/branches/AMDILBackend/include/llvm/Target/TargetIntrinsicInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Target/TargetIntrinsicInfo.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Target/TargetIntrinsicInfo.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Target/TargetIntrinsicInfo.h Tue Jan 15 11:16:16 2013
@@ -14,6 +14,7 @@
#ifndef LLVM_TARGET_TARGETINTRINSICINFO_H
#define LLVM_TARGET_TARGETINTRINSICINFO_H
+#include "llvm/Support/Compiler.h"
#include <string>
namespace llvm {
@@ -27,8 +28,8 @@
/// TargetIntrinsicInfo - Interface to description of machine instruction set
///
class TargetIntrinsicInfo {
- TargetIntrinsicInfo(const TargetIntrinsicInfo &); // DO NOT IMPLEMENT
- void operator=(const TargetIntrinsicInfo &); // DO NOT IMPLEMENT
+ TargetIntrinsicInfo(const TargetIntrinsicInfo &) LLVM_DELETED_FUNCTION;
+ void operator=(const TargetIntrinsicInfo &) LLVM_DELETED_FUNCTION;
public:
TargetIntrinsicInfo();
virtual ~TargetIntrinsicInfo();
Modified: llvm/branches/AMDILBackend/include/llvm/Target/TargetLibraryInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Target/TargetLibraryInfo.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Target/TargetLibraryInfo.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Target/TargetLibraryInfo.h Tue Jan 15 11:16:16 2013
@@ -18,6 +18,26 @@
namespace LibFunc {
enum Func {
+ /// void operator delete[](void*);
+ ZdaPv,
+ /// void operator delete(void*);
+ ZdlPv,
+ /// void *new[](unsigned int);
+ Znaj,
+ /// void *new[](unsigned int, nothrow);
+ ZnajRKSt9nothrow_t,
+ /// void *new[](unsigned long);
+ Znam,
+ /// void *new[](unsigned long, nothrow);
+ ZnamRKSt9nothrow_t,
+ /// void *new(unsigned int);
+ Znwj,
+ /// void *new(unsigned int, nothrow);
+ ZnwjRKSt9nothrow_t,
+ /// void *new(unsigned long);
+ Znwm,
+ /// void *new(unsigned long, nothrow);
+ ZnwmRKSt9nothrow_t,
/// int __cxa_atexit(void (*f)(void *), void *p, void *d);
cxa_atexit,
/// void __cxa_guard_abort(guard_t *guard);
@@ -33,12 +53,24 @@
acos,
/// float acosf(float x);
acosf,
+ /// double acosh(double x);
+ acosh,
+ /// float acoshf(float x);
+ acoshf,
+ /// long double acoshl(long double x);
+ acoshl,
/// long double acosl(long double x);
acosl,
/// double asin(double x);
asin,
/// float asinf(float x);
asinf,
+ /// double asinh(double x);
+ asinh,
+ /// float asinhf(float x);
+ asinhf,
+ /// long double asinhl(long double x);
+ asinhl,
/// long double asinl(long double x);
asinl,
/// double atan(double x);
@@ -51,8 +83,22 @@
atan2l,
/// float atanf(float x);
atanf,
+ /// double atanh(double x);
+ atanh,
+ /// float atanhf(float x);
+ atanhf,
+ /// long double atanhl(long double x);
+ atanhl,
/// long double atanl(long double x);
atanl,
+ /// void *calloc(size_t count, size_t size);
+ calloc,
+ /// double cbrt(double x);
+ cbrt,
+ /// float cbrtf(float x);
+ cbrtf,
+ /// long double cbrtl(long double x);
+ cbrtl,
/// double ceil(double x);
ceil,
/// float ceilf(float x);
@@ -79,6 +125,12 @@
cosl,
/// double exp(double x);
exp,
+ /// double exp10(double x);
+ exp10,
+ /// float exp10f(float x);
+ exp10f,
+ /// long double exp10l(long double x);
+ exp10l,
/// double exp2(double x);
exp2,
/// float exp2f(float x);
@@ -119,6 +171,8 @@
fputc,
/// int fputs(const char *s, FILE *stream);
fputs,
+ /// void free(void *ptr);
+ free,
/// size_t fwrite(const void *ptr, size_t size, size_t nitems,
/// FILE *stream);
fwrite,
@@ -144,10 +198,18 @@
log2f,
/// double long double log2l(long double x);
log2l,
+ /// double logb(double x);
+ logb,
+ /// float logbf(float x);
+ logbf,
+ /// long double logbl(long double x);
+ logbl,
/// float logf(float x);
logf,
/// long double logl(long double x);
logl,
+ /// void *malloc(size_t size);
+ malloc,
/// void *memchr(const void *s, int c, size_t n);
memchr,
/// int memcmp(const void *s1, const void *s2, size_t n);
@@ -166,6 +228,8 @@
nearbyintf,
/// long double nearbyintl(long double x);
nearbyintl,
+ /// int posix_memalign(void **memptr, size_t alignment, size_t size);
+ posix_memalign,
/// double pow(double x, double y);
pow,
/// float powf(float x, float y);
@@ -176,6 +240,10 @@
putchar,
/// int puts(const char *s);
puts,
+ /// void *realloc(void *ptr, size_t size);
+ realloc,
+ /// void *reallocf(void *ptr, size_t size);
+ reallocf,
/// double rint(double x);
rint,
/// float rintf(float x);
@@ -208,12 +276,20 @@
sqrtf,
/// long double sqrtl(long double x);
sqrtl,
+ /// char *stpcpy(char *s1, const char *s2);
+ stpcpy,
/// char *strcat(char *s1, const char *s2);
strcat,
/// char *strchr(const char *s, int c);
strchr,
+ /// int strcmp(const char *s1, const char *s2);
+ strcmp,
/// char *strcpy(char *s1, const char *s2);
strcpy,
+ /// size_t strcspn(const char *s1, const char *s2);
+ strcspn,
+ /// char *strdup(const char *s1);
+ strdup,
/// size_t strlen(const char *s);
strlen,
/// char *strncat(char *s1, const char *s2, size_t n);
@@ -222,8 +298,33 @@
strncmp,
/// char *strncpy(char *s1, const char *s2, size_t n);
strncpy,
+ /// char *strndup(const char *s1, size_t n);
+ strndup,
/// size_t strnlen(const char *s, size_t maxlen);
strnlen,
+ /// char *strpbrk(const char *s1, const char *s2);
+ strpbrk,
+ /// char *strrchr(const char *s, int c);
+ strrchr,
+ /// size_t strspn(const char *s1, const char *s2);
+ strspn,
+ /// char *strstr(const char *s1, const char *s2);
+ strstr,
+ /// double strtod(const char *nptr, char **endptr);
+ strtod,
+ /// float strtof(const char *nptr, char **endptr);
+ strtof,
+ /// long int strtol(const char *nptr, char **endptr, int base);
+ strtol,
+ /// long double strtold(const char *nptr, char **endptr);
+ strtold,
+ /// long long int strtoll(const char *nptr, char **endptr, int base);
+ strtoll,
+ /// unsigned long int strtoul(const char *nptr, char **endptr, int base);
+ strtoul,
+ /// unsigned long long int strtoull(const char *nptr, char **endptr,
+ /// int base);
+ strtoull,
/// double tan(double x);
tan,
/// float tanf(float x);
@@ -242,6 +343,8 @@
truncf,
/// long double truncl(long double x);
truncl,
+ /// void *valloc(size_t size);
+ valloc,
NumLibFuncs
};
Modified: llvm/branches/AMDILBackend/include/llvm/Target/TargetLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Target/TargetLowering.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Target/TargetLowering.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Target/TargetLowering.h Tue Jan 15 11:16:16 2013
@@ -22,9 +22,11 @@
#ifndef LLVM_TARGET_TARGETLOWERING_H
#define LLVM_TARGET_TARGETLOWERING_H
+#include "llvm/AddressingMode.h"
#include "llvm/CallingConv.h"
#include "llvm/InlineAsm.h"
#include "llvm/Attributes.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/Support/CallSite.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/RuntimeLibcalls.h"
@@ -49,7 +51,7 @@
class MCContext;
class MCExpr;
template<typename T> class SmallVectorImpl;
- class TargetData;
+ class DataLayout;
class TargetRegisterClass;
class TargetLibraryInfo;
class TargetLoweringObjectFile;
@@ -76,8 +78,8 @@
/// target-specific constructs to SelectionDAG operators.
///
class TargetLowering {
- TargetLowering(const TargetLowering&); // DO NOT IMPLEMENT
- void operator=(const TargetLowering&); // DO NOT IMPLEMENT
+ TargetLowering(const TargetLowering&) LLVM_DELETED_FUNCTION;
+ void operator=(const TargetLowering&) LLVM_DELETED_FUNCTION;
public:
/// LegalizeAction - This enum indicates whether operations are valid for a
/// target, and if not, what action should be used to make them valid.
@@ -101,12 +103,24 @@
TypeWidenVector // This vector should be widened into a larger vector.
};
+ /// LegalizeKind holds the legalization kind that needs to happen to EVT
+ /// in order to type-legalize it.
+ typedef std::pair<LegalizeTypeAction, EVT> LegalizeKind;
+
enum BooleanContent { // How the target represents true/false values.
UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage.
ZeroOrOneBooleanContent, // All bits zero except for bit 0.
ZeroOrNegativeOneBooleanContent // All bits equal to bit 0.
};
+ enum SelectSupportKind {
+ ScalarValSelect, // The target supports scalar selects (ex: cmov).
+ ScalarCondVectorVal, // The target supports selects with a scalar condition
+ // and vector values (ex: cmov).
+ VectorMaskSelect // The target supports vector selects with a vector
+ // mask (ex: x86 blends).
+ };
+
static ISD::NodeType getExtendForContent(BooleanContent Content) {
switch (Content) {
case UndefinedBooleanContent:
@@ -128,22 +142,37 @@
virtual ~TargetLowering();
const TargetMachine &getTargetMachine() const { return TM; }
- const TargetData *getTargetData() const { return TD; }
+ const DataLayout *getDataLayout() const { return TD; }
const TargetLoweringObjectFile &getObjFileLowering() const { return TLOF; }
bool isBigEndian() const { return !IsLittleEndian; }
bool isLittleEndian() const { return IsLittleEndian; }
- MVT getPointerTy() const { return PointerTy; }
+ // Return the pointer type for the given address space, defaults to
+ // the pointer type from the data layout.
+ // FIXME: The default needs to be removed once all the code is updated.
+ virtual MVT getPointerTy(uint32_t AS = 0) const { return PointerTy; }
virtual MVT getShiftAmountTy(EVT LHSTy) const;
/// isSelectExpensive - Return true if the select operation is expensive for
/// this target.
bool isSelectExpensive() const { return SelectIsExpensive; }
+ virtual bool isSelectSupported(SelectSupportKind kind) const { return true; }
+
/// isIntDivCheap() - Return true if integer divide is usually cheaper than
/// a sequence of several shifts, adds, and multiplies for this target.
bool isIntDivCheap() const { return IntDivIsCheap; }
+ /// isSlowDivBypassed - Returns true if target has indicated at least one
+ /// type should be bypassed.
+ bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); }
+
+ /// getBypassSlowDivTypes - Returns map of slow types for division or
+ /// remainder with corresponding fast types
+ const DenseMap<unsigned int, unsigned int> &getBypassSlowDivWidths() const {
+ return BypassSlowDivWidths;
+ }
+
/// isPow2DivCheap() - Return true if pow2 div is cheaper than a chain of
/// srl/add/sra.
bool isPow2DivCheap() const { return Pow2DivIsCheap; }
@@ -382,6 +411,13 @@
getOperationAction(Op, VT) == Custom);
}
+ /// isOperationExpand - Return true if the specified operation is illegal on
+ /// this target or unlikely to be made legal with custom lowering. This is
+ /// used to help guide high-level lowering decisions.
+ bool isOperationExpand(unsigned Op, EVT VT) const {
+ return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand);
+ }
+
/// isOperationLegal - Return true if the specified operation is legal on this
/// target.
bool isOperationLegal(unsigned Op, EVT VT) const {
@@ -475,8 +511,12 @@
assert((unsigned)CC < array_lengthof(CondCodeActions) &&
(unsigned)VT.getSimpleVT().SimpleTy < sizeof(CondCodeActions[0])*4 &&
"Table isn't big enough!");
+ /// The lower 5 bits of the SimpleTy index into Nth 2bit set from the 64bit
+ /// value and the upper 27 bits index into the second dimension of the
+ /// array to select what 64bit value to use.
LegalizeAction Action = (LegalizeAction)
- ((CondCodeActions[CC] >> (2*VT.getSimpleVT().SimpleTy)) & 3);
+ ((CondCodeActions[CC][VT.getSimpleVT().SimpleTy >> 5]
+ >> (2*(VT.getSimpleVT().SimpleTy & 0x1F))) & 3);
assert(Action != Promote && "Can't promote condition code!");
return Action;
}
@@ -533,6 +573,7 @@
}
return EVT::getEVT(Ty, AllowUnknown);
}
+
/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
/// function arguments in the caller parameter area. This is the actual
@@ -686,6 +727,12 @@
return SupportJumpTables;
}
+ /// getMinimumJumpTableEntries - return integer threshold on number of
+ /// blocks to use jump tables rather than if sequence.
+ int getMinimumJumpTableEntries() const {
+ return MinimumJumpTableEntries;
+ }
+
/// getStackPointerRegisterToSaveRestore - If a physical register, this
/// specifies the register that llvm.savestack/llvm.restorestack should save
/// and restore.
@@ -1006,6 +1053,12 @@
SupportJumpTables = Val;
}
+ /// setMinimumJumpTableEntries - Indicate the number of blocks to generate
+ /// jump tables rather than if sequence.
+ void setMinimumJumpTableEntries(int Val) {
+ MinimumJumpTableEntries = Val;
+ }
+
/// setStackPointerRegisterToSaveRestore - If set to a physical register, this
/// specifies the register that llvm.savestack/llvm.restorestack should save
/// and restore.
@@ -1045,6 +1098,11 @@
/// of instructions not containing an integer divide.
void setIntDivIsCheap(bool isCheap = true) { IntDivIsCheap = isCheap; }
+ /// addBypassSlowDiv - Tells the code generator which bitwidths to bypass.
+ void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) {
+ BypassSlowDivWidths[SlowBitWidth] = FastBitWidth;
+ }
+
/// setPow2DivIsCheap - Tells the code generator that it shouldn't generate
/// srl/add/sra for a signed divide by power of two, and let the target handle
/// it.
@@ -1127,8 +1185,13 @@
assert(VT < MVT::LAST_VALUETYPE &&
(unsigned)CC < array_lengthof(CondCodeActions) &&
"Table isn't big enough!");
- CondCodeActions[(unsigned)CC] &= ~(uint64_t(3UL) << VT.SimpleTy*2);
- CondCodeActions[(unsigned)CC] |= (uint64_t)Action << VT.SimpleTy*2;
+ /// The lower 5 bits of the SimpleTy index into Nth 2bit set from the 64bit
+ /// value and the upper 27 bits index into the second dimension of the
+ /// array to select what 64bit value to use.
+ CondCodeActions[(unsigned)CC][VT.SimpleTy >> 5]
+ &= ~(uint64_t(3UL) << (VT.SimpleTy & 0x1F)*2);
+ CondCodeActions[(unsigned)CC][VT.SimpleTy >> 5]
+ |= (uint64_t)Action << (VT.SimpleTy & 0x1F)*2;
}
/// AddPromotedToType - If Opc/OrigVT is specified as being promoted, the
@@ -1201,7 +1264,7 @@
public:
//===--------------------------------------------------------------------===//
// Lowering methods - These methods must be implemented by targets so that
- // the SelectionDAGLowering code knows how to lower these.
+ // the SelectionDAGBuilder code knows how to lower these.
//
/// LowerFormalArguments - This hook must be implemented to lower the
@@ -1271,9 +1334,9 @@
FunctionType *FTy, bool isTailCall, SDValue callee,
ArgListTy &args, SelectionDAG &dag, DebugLoc dl,
ImmutableCallSite &cs)
- : Chain(chain), RetTy(retTy), RetSExt(cs.paramHasAttr(0, Attribute::SExt)),
- RetZExt(cs.paramHasAttr(0, Attribute::ZExt)), IsVarArg(FTy->isVarArg()),
- IsInReg(cs.paramHasAttr(0, Attribute::InReg)),
+ : Chain(chain), RetTy(retTy), RetSExt(cs.paramHasAttr(0, Attributes::SExt)),
+ RetZExt(cs.paramHasAttr(0, Attributes::ZExt)), IsVarArg(FTy->isVarArg()),
+ IsInReg(cs.paramHasAttr(0, Attributes::InReg)),
DoesNotReturn(cs.doesNotReturn()),
IsReturnValueUsed(!cs.getInstruction()->use_empty()),
IsTailCall(isTailCall), NumFixedArgs(FTy->getNumParams()),
@@ -1314,7 +1377,7 @@
}
/// HandleByVal - Target-specific cleanup for formal ByVal parameters.
- virtual void HandleByVal(CCState *, unsigned &) const {}
+ virtual void HandleByVal(CCState *, unsigned &, unsigned) const {}
/// CanLowerReturn - This hook should be implemented to check whether the
/// return values described by the Outs array can fit into the return
@@ -1584,22 +1647,6 @@
// Addressing mode description hooks (used by LSR etc).
//
- /// AddrMode - This represents an addressing mode of:
- /// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
- /// If BaseGV is null, there is no BaseGV.
- /// If BaseOffs is zero, there is no base offset.
- /// If HasBaseReg is false, there is no base register.
- /// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with
- /// no scale.
- ///
- struct AddrMode {
- GlobalValue *BaseGV;
- int64_t BaseOffs;
- bool HasBaseReg;
- int64_t Scale;
- AddrMode() : BaseGV(0), BaseOffs(0), HasBaseReg(false), Scale(0) {}
- };
-
/// GetAddrModeArguments - CodeGenPrepare sinks address calculations into the
/// same BB as Load/Store instructions reading the address. This allows as
/// much computation as possible to be done in the address mode for that
@@ -1741,10 +1788,11 @@
private:
const TargetMachine &TM;
- const TargetData *TD;
+ const DataLayout *TD;
const TargetLoweringObjectFile &TLOF;
- /// PointerTy - The type to use for pointers, usually i32 or i64.
+ /// PointerTy - The type to use for pointers for the default address space,
+ /// usually i32 or i64.
///
MVT PointerTy;
@@ -1762,6 +1810,12 @@
/// set to true unconditionally.
bool IntDivIsCheap;
+ /// BypassSlowDivMap - Tells the code generator to bypass slow divide or
+ /// remainder instructions. For example, BypassSlowDivWidths[32,8] tells the
+ /// code generator to bypass 32-bit integer div/rem with an 8-bit unsigned
+ /// integer div/rem when the operands are positive and less than 256.
+ DenseMap <unsigned int, unsigned int> BypassSlowDivWidths;
+
/// Pow2DivIsCheap - Tells the code generator that it shouldn't generate
/// srl/add/sra for a signed divide by power of two, and let the target handle
/// it.
@@ -1784,6 +1838,9 @@
/// If it's not true, then each jumptable must be lowered into if-then-else's.
bool SupportJumpTables;
+ /// MinimumJumpTableEntries - Number of blocks threshold to use jump tables.
+ int MinimumJumpTableEntries;
+
/// BooleanContents - Information about the contents of the high-bits in
/// boolean values held in a type wider than i1. See getBooleanContents.
BooleanContent BooleanContents;
@@ -1901,12 +1958,14 @@
/// CondCodeActions - For each condition code (ISD::CondCode) keep a
/// LegalizeAction that indicates how instruction selection should
/// deal with the condition code.
- uint64_t CondCodeActions[ISD::SETCC_INVALID];
+ /// Because each CC action takes up 2 bits, we need to have the array size
+ /// be large enough to fit all of the value types. This can be done by
+ /// dividing the MVT::LAST_VALUETYPE by 32 and adding one.
+ uint64_t CondCodeActions[ISD::SETCC_INVALID][(MVT::LAST_VALUETYPE / 32) + 1];
ValueTypeActionImpl ValueTypeActions;
- typedef std::pair<LegalizeTypeAction, EVT> LegalizeKind;
-
+public:
LegalizeKind
getTypeConversion(LLVMContext &Context, EVT VT) const {
// If this is a simple type, use the ComputeRegisterProp mechanism.
@@ -1921,6 +1980,9 @@
ValueTypeActions.getTypeAction(NVT.getSimpleVT()) != TypePromoteInteger)
&& "Promote may not follow Expand or Promote");
+ if (LA == TypeSplitVector)
+ NVT = EVT::getVectorVT(Context, VT.getVectorElementType(),
+ VT.getVectorNumElements() / 2);
return LegalizeKind(LA, NVT);
}
@@ -2023,6 +2085,7 @@
return LegalizeKind(TypeSplitVector, NVT);
}
+private:
std::vector<std::pair<EVT, const TargetRegisterClass*> > AvailableRegClasses;
/// TargetDAGCombineArray - Targets can specify ISD nodes that they would
Modified: llvm/branches/AMDILBackend/include/llvm/Target/TargetLoweringObjectFile.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Target/TargetLoweringObjectFile.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Target/TargetLoweringObjectFile.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Target/TargetLoweringObjectFile.h Tue Jan 15 11:16:16 2013
@@ -33,10 +33,11 @@
class TargetLoweringObjectFile : public MCObjectFileInfo {
MCContext *Ctx;
-
- TargetLoweringObjectFile(const TargetLoweringObjectFile&); // DO NOT IMPLEMENT
- void operator=(const TargetLoweringObjectFile&); // DO NOT IMPLEMENT
-
+
+ TargetLoweringObjectFile(
+ const TargetLoweringObjectFile&) LLVM_DELETED_FUNCTION;
+ void operator=(const TargetLoweringObjectFile&) LLVM_DELETED_FUNCTION;
+
public:
MCContext &getContext() const { return *Ctx; }
Modified: llvm/branches/AMDILBackend/include/llvm/Target/TargetMachine.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Target/TargetMachine.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Target/TargetMachine.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Target/TargetMachine.h Tue Jan 15 11:16:16 2013
@@ -17,6 +17,8 @@
#include "llvm/Pass.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Target/TargetOptions.h"
+#include "llvm/TargetTransformInfo.h"
+#include "llvm/Target/TargetTransformImpl.h"
#include "llvm/ADT/StringRef.h"
#include <cassert>
#include <string>
@@ -31,8 +33,7 @@
class MCContext;
class PassManagerBase;
class Target;
-class TargetData;
-class TargetELFWriterInfo;
+class DataLayout;
class TargetFrameLowering;
class TargetInstrInfo;
class TargetIntrinsicInfo;
@@ -52,8 +53,8 @@
/// through this interface.
///
class TargetMachine {
- TargetMachine(const TargetMachine &); // DO NOT IMPLEMENT
- void operator=(const TargetMachine &); // DO NOT IMPLEMENT
+ TargetMachine(const TargetMachine &) LLVM_DELETED_FUNCTION;
+ void operator=(const TargetMachine &) LLVM_DELETED_FUNCTION;
protected: // Can only create subclasses.
TargetMachine(const Target &T, StringRef TargetTriple,
StringRef CPU, StringRef FS, const TargetOptions &Options);
@@ -106,7 +107,11 @@
virtual const TargetFrameLowering *getFrameLowering() const { return 0; }
virtual const TargetLowering *getTargetLowering() const { return 0; }
virtual const TargetSelectionDAGInfo *getSelectionDAGInfo() const{ return 0; }
- virtual const TargetData *getTargetData() const { return 0; }
+ virtual const DataLayout *getDataLayout() const { return 0; }
+ virtual const ScalarTargetTransformInfo*
+ getScalarTargetTransformInfo() const { return 0; }
+ virtual const VectorTargetTransformInfo*
+ getVectorTargetTransformInfo() const { return 0; }
/// getMCAsmInfo - Return target specific asm information.
///
@@ -142,11 +147,6 @@
return 0;
}
- /// getELFWriterInfo - If this target supports an ELF writer, return
- /// information for it, otherwise return null.
- ///
- virtual const TargetELFWriterInfo *getELFWriterInfo() const { return 0; }
-
/// hasMCRelaxAll - Check whether all machine code instructions should be
/// relaxed.
bool hasMCRelaxAll() const { return MCRelaxAll; }
Modified: llvm/branches/AMDILBackend/include/llvm/Target/TargetOpcodes.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Target/TargetOpcodes.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Target/TargetOpcodes.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Target/TargetOpcodes.h Tue Jan 15 11:16:16 2013
@@ -87,7 +87,11 @@
/// BUNDLE - This instruction represents an instruction bundle. Instructions
/// which immediately follow a BUNDLE instruction which are marked with
/// 'InsideBundle' flag are inside the bundle.
- BUNDLE
+ BUNDLE = 14,
+
+ /// Lifetime markers.
+ LIFETIME_START = 15,
+ LIFETIME_END = 16
};
} // end namespace TargetOpcode
} // end namespace llvm
Modified: llvm/branches/AMDILBackend/include/llvm/Target/TargetOptions.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Target/TargetOptions.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Target/TargetOptions.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Target/TargetOptions.h Tue Jan 15 11:16:16 2013
@@ -155,6 +155,10 @@
/// automatically realigned, if needed.
unsigned RealignStack : 1;
+ /// SSPBufferSize - The minimum size of buffers that will receive stack
+ /// smashing protection when -fstack-protection is used.
+ unsigned SSPBufferSize;
+
/// EnableFastISel - This flag enables fast-path instruction selection
/// which trades away generated code quality in favor of reducing
/// compile time.
Modified: llvm/branches/AMDILBackend/include/llvm/Target/TargetRegisterInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Target/TargetRegisterInfo.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Target/TargetRegisterInfo.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Target/TargetRegisterInfo.h Tue Jan 15 11:16:16 2013
@@ -221,13 +221,17 @@
private:
const TargetRegisterInfoDesc *InfoDesc; // Extra desc array for codegen
const char *const *SubRegIndexNames; // Names of subreg indexes.
+ // Pointer to array of lane masks, one per sub-reg index.
+ const unsigned *SubRegIndexLaneMasks;
+
regclass_iterator RegClassBegin, RegClassEnd; // List of regclasses
protected:
TargetRegisterInfo(const TargetRegisterInfoDesc *ID,
regclass_iterator RegClassBegin,
regclass_iterator RegClassEnd,
- const char *const *subregindexnames);
+ const char *const *SRINames,
+ const unsigned *SRILaneMasks);
virtual ~TargetRegisterInfo();
public:
@@ -327,10 +331,36 @@
/// getSubRegIndexName - Return the human-readable symbolic target-specific
/// name for the specified SubRegIndex.
const char *getSubRegIndexName(unsigned SubIdx) const {
- assert(SubIdx && "This is not a subregister index");
+ assert(SubIdx && SubIdx < getNumSubRegIndices() &&
+ "This is not a subregister index");
return SubRegIndexNames[SubIdx-1];
}
+ /// getSubRegIndexLaneMask - Return a bitmask representing the parts of a
+ /// register that are covered by SubIdx.
+ ///
+ /// Lane masks for sub-register indices are similar to register units for
+ /// physical registers. The individual bits in a lane mask can't be assigned
+ /// any specific meaning. They can be used to check if two sub-register
+ /// indices overlap.
+ ///
+ /// If the target has a register such that:
+ ///
+ /// getSubReg(Reg, A) overlaps getSubReg(Reg, B)
+ ///
+ /// then:
+ ///
+ /// getSubRegIndexLaneMask(A) & getSubRegIndexLaneMask(B) != 0
+ ///
+ /// The converse is not necessarily true. If two lane masks have a common
+ /// bit, the corresponding sub-registers may not overlap, but it can be
+ /// assumed that they usually will.
+ unsigned getSubRegIndexLaneMask(unsigned SubIdx) const {
+ // SubIdx == 0 is allowed, it has the lane mask ~0u.
+ assert(SubIdx < getNumSubRegIndices() && "This is not a subregister index");
+ return SubRegIndexLaneMasks[SubIdx];
+ }
+
/// regsOverlap - Returns true if the two registers are equal or alias each
/// other. The registers may be virtual register.
bool regsOverlap(unsigned regA, unsigned regB) const {
@@ -416,18 +446,6 @@
return MCRegisterInfo::getMatchingSuperReg(Reg, SubIdx, RC->MC);
}
- /// canCombineSubRegIndices - Given a register class and a list of
- /// subregister indices, return true if it's possible to combine the
- /// subregister indices into one that corresponds to a larger
- /// subregister. Return the new subregister index by reference. Note the
- /// new index may be zero if the given subregisters can be combined to
- /// form the whole register.
- virtual bool canCombineSubRegIndices(const TargetRegisterClass *RC,
- SmallVectorImpl<unsigned> &SubIndices,
- unsigned &NewSubIdx) const {
- return 0;
- }
-
/// getMatchingSuperRegClass - Return a subclass of the specified register
/// class A so that each register in it has a sub-register of the
/// specified sub-register index which is in the specified register class B.
@@ -458,6 +476,8 @@
/// composeSubRegIndices - Return the subregister index you get from composing
/// two subregister indices.
///
+ /// The special null sub-register index composes as the identity.
+ ///
/// If R:a:b is the same register as R:c, then composeSubRegIndices(a, b)
/// returns c. Note that composeSubRegIndices does not tell you about illegal
/// compositions. If R does not have a subreg a, or R:a does not have a subreg
@@ -467,11 +487,19 @@
/// ssub_0:S0 - ssub_3:S3 subregs.
/// If you compose subreg indices dsub_1, ssub_0 you get ssub_2.
///
- virtual unsigned composeSubRegIndices(unsigned a, unsigned b) const {
- // This default implementation is correct for most targets.
- return b;
+ unsigned composeSubRegIndices(unsigned a, unsigned b) const {
+ if (!a) return b;
+ if (!b) return a;
+ return composeSubRegIndicesImpl(a, b);
}
+protected:
+ /// Overridden by TableGen in targets that have sub-registers.
+ virtual unsigned composeSubRegIndicesImpl(unsigned, unsigned) const {
+ llvm_unreachable("Target has no sub-registers");
+ }
+
+public:
/// getCommonSuperRegClass - Find a common super-register class if it exists.
///
/// Find a register class, SuperRC and two sub-register indices, PreA and
Modified: llvm/branches/AMDILBackend/include/llvm/Target/TargetSchedule.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Target/TargetSchedule.td?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Target/TargetSchedule.td (original)
+++ llvm/branches/AMDILBackend/include/llvm/Target/TargetSchedule.td Tue Jan 15 11:16:16 2013
@@ -10,25 +10,77 @@
// This file defines the target-independent scheduling interfaces which should
// be implemented by each target which is using TableGen based scheduling.
//
+// The SchedMachineModel is defined by subtargets for three categories of data:
+// 1. Basic properties for coarse grained instruction cost model.
+// 2. Scheduler Read/Write resources for simple per-opcode cost model.
+// 3. Instruction itineraties for detailed reservation tables.
+//
+// (1) Basic properties are defined by the SchedMachineModel
+// class. Target hooks allow subtargets to associate opcodes with
+// those properties.
+//
+// (2) A per-operand machine model can be implemented in any
+// combination of the following ways:
+//
+// A. Associate per-operand SchedReadWrite types with Instructions by
+// modifying the Instruction definition to inherit from Sched. For
+// each subtarget, define WriteRes and ReadAdvance to associate
+// processor resources and latency with each SchedReadWrite type.
+//
+// B. In each instruction definition, name an ItineraryClass. For each
+// subtarget, define ItinRW entries to map ItineraryClass to
+// per-operand SchedReadWrite types. Unlike method A, these types may
+// be subtarget specific and can be directly associated with resources
+// by defining SchedWriteRes and SchedReadAdvance.
+//
+// C. In the subtarget, map SchedReadWrite types to specific
+// opcodes. This overrides any SchedReadWrite types or
+// ItineraryClasses defined by the Instruction. As in method B, the
+// subtarget can directly associate resources with SchedReadWrite
+// types by defining SchedWriteRes and SchedReadAdvance.
+//
+// D. In either the target or subtarget, define SchedWriteVariant or
+// SchedReadVariant to map one SchedReadWrite type onto another
+// sequence of SchedReadWrite types. This allows dynamic selection of
+// an instruction's machine model via custom C++ code. It also allows
+// a machine-independent SchedReadWrite type to map to a sequence of
+// machine-dependent types.
+//
+// (3) A per-pipeline-stage machine model can be implemented by providing
+// Itineraries in addition to mapping instructions to ItineraryClasses.
//===----------------------------------------------------------------------===//
+// Include legacy support for instruction itineraries.
include "llvm/Target/TargetItinerary.td"
-// The SchedMachineModel is defined by subtargets for three categories of data:
-// 1) Basic properties for coarse grained instruction cost model.
-// 2) Scheduler Read/Write resources for simple per-opcode cost model.
-// 3) Instruction itineraties for detailed reservation tables.
+class Instruction; // Forward def
+
+// DAG operator that interprets the DAG args as Instruction defs.
+def instrs;
+
+// DAG operator that interprets each DAG arg as a regex pattern for
+// matching Instruction opcode names.
+// The regex must match the beginning of the opcode (as in Python re.match).
+// To avoid matching prefixes, append '$' to the pattern.
+def instregex;
+
+// Define the SchedMachineModel and provide basic properties for
+// coarse grained instruction cost model. Default values for the
+// properties are defined in MCSchedModel. A value of "-1" in the
+// target description's SchedMachineModel indicates that the property
+// is not overriden by the target.
//
-// Default values for basic properties are defined in MCSchedModel. "-1"
-// indicates that the property is not overriden by the target description.
+// Target hooks allow subtargets to associate LoadLatency and
+// HighLatency with groups of opcodes.
class SchedMachineModel {
- int IssueWidth = -1; // Max instructions that may be scheduled per cycle.
+ int IssueWidth = -1; // Max micro-ops that may be scheduled per cycle.
int MinLatency = -1; // Determines which instrucions are allowed in a group.
// (-1) inorder (0) ooo, (1): inorder +var latencies.
int LoadLatency = -1; // Cycles for loads to access the cache.
int HighLatency = -1; // Approximation of cycles for "high latency" ops.
int MispredictPenalty = -1; // Extra cycles for a mispredicted branch.
+ // Per-cycle resources tables.
ProcessorItineraries Itineraries = NoItineraries;
bit NoModel = 0; // Special tag to indicate missing machine model.
@@ -38,4 +90,276 @@
let NoModel = 1;
}
-// TODO: Define classes for processor and scheduler resources.
+// Define a kind of processor resource that may be common across
+// similar subtargets.
+class ProcResourceKind;
+
+// Define a number of interchangeable processor resources. NumUnits
+// determines the throughput of instructions that require the resource.
+//
+// An optional Super resource may be given to model these resources as
+// a subset of the more general super resources. Using one of these
+// resources implies using one of the super resoruces.
+//
+// ProcResourceUnits normally model a few buffered resources within an
+// out-of-order engine that the compiler attempts to conserve.
+// Buffered resources may be held for multiple clock cycles, but the
+// scheduler does not pin them to a particular clock cycle relative to
+// instruction dispatch. Setting Buffered=0 changes this to an
+// in-order resource. In this case, the scheduler counts down from the
+// cycle that the instruction issues in-order, forcing an interlock
+// with subsequent instructions that require the same resource until
+// the number of ResourceCyles specified in WriteRes expire.
+//
+// SchedModel ties these units to a processor for any stand-alone defs
+// of this class. Instances of subclass ProcResource will be automatically
+// attached to a processor, so SchedModel is not needed.
+class ProcResourceUnits<ProcResourceKind kind, int num> {
+ ProcResourceKind Kind = kind;
+ int NumUnits = num;
+ ProcResourceKind Super = ?;
+ bit Buffered = 1;
+ SchedMachineModel SchedModel = ?;
+}
+
+// EponymousProcResourceKind helps implement ProcResourceUnits by
+// allowing a ProcResourceUnits definition to reference itself. It
+// should not be referenced anywhere else.
+def EponymousProcResourceKind : ProcResourceKind;
+
+// Subtargets typically define processor resource kind and number of
+// units in one place.
+class ProcResource<int num> : ProcResourceKind,
+ ProcResourceUnits<EponymousProcResourceKind, num>;
+
+// A target architecture may define SchedReadWrite types and associate
+// them with instruction operands.
+class SchedReadWrite;
+
+// List the per-operand types that map to the machine model of an
+// instruction. One SchedWrite type must be listed for each explicit
+// def operand in order. Additional SchedWrite types may optionally be
+// listed for implicit def operands. SchedRead types may optionally
+// be listed for use operands in order. The order of defs relative to
+// uses is insignificant. This way, the same SchedReadWrite list may
+// be used for multiple forms of an operation. For example, a
+// two-address instruction could have two tied operands or single
+// operand that both reads and writes a reg. In both cases we have a
+// single SchedWrite and single SchedRead in any order.
+class Sched<list<SchedReadWrite> schedrw> {
+ list<SchedReadWrite> SchedRW = schedrw;
+}
+
+// Define a scheduler resource associated with a def operand.
+class SchedWrite : SchedReadWrite;
+def NoWrite : SchedWrite;
+
+// Define a scheduler resource associated with a use operand.
+class SchedRead : SchedReadWrite;
+
+// Define a SchedWrite that is modeled as a sequence of other
+// SchedWrites with additive latency. This allows a single operand to
+// be mapped the resources composed from a set of previously defined
+// SchedWrites.
+//
+// If the final write in this sequence is a SchedWriteVariant marked
+// Variadic, then the list of prior writes are distributed across all
+// operands after resolving the predicate for the final write.
+//
+// SchedModel silences warnings but is ignored.
+class WriteSequence<list<SchedWrite> writes, int rep = 1> : SchedWrite {
+ list<SchedWrite> Writes = writes;
+ int Repeat = rep;
+ SchedMachineModel SchedModel = ?;
+}
+
+// Define values common to WriteRes and SchedWriteRes.
+//
+// SchedModel ties these resources to a processor.
+class ProcWriteResources<list<ProcResourceKind> resources> {
+ list<ProcResourceKind> ProcResources = resources;
+ list<int> ResourceCycles = [];
+ int Latency = 1;
+ int NumMicroOps = 1;
+ bit BeginGroup = 0;
+ bit EndGroup = 0;
+ // Allow a processor to mark some scheduling classes as unsupported
+ // for stronger verification.
+ bit Unsupported = 0;
+ SchedMachineModel SchedModel = ?;
+}
+
+// Define the resources and latency of a SchedWrite. This will be used
+// directly by targets that have no itinerary classes. In this case,
+// SchedWrite is defined by the target, while WriteResources is
+// defined by the subtarget, and maps the SchedWrite to processor
+// resources.
+//
+// If a target already has itinerary classes, SchedWriteResources can
+// be used instead to define subtarget specific SchedWrites and map
+// them to processor resources in one place. Then ItinRW can map
+// itinerary classes to the subtarget's SchedWrites.
+//
+// ProcResources indicates the set of resources consumed by the write.
+// Optionally, ResourceCycles indicates the number of cycles the
+// resource is consumed. Each ResourceCycles item is paired with the
+// ProcResource item at the same position in its list. Since
+// ResourceCycles are rarely specialized, the list may be
+// incomplete. By default, resources are consumed for a single cycle,
+// regardless of latency, which models a fully pipelined processing
+// unit. A value of 0 for ResourceCycles means that the resource must
+// be available but is not consumed, which is only relevant for
+// unbuffered resources.
+//
+// By default, each SchedWrite takes one micro-op, which is counted
+// against the processor's IssueWidth limit. If an instruction can
+// write multiple registers with a single micro-op, the subtarget
+// should define one of the writes to be zero micro-ops. If a
+// subtarget requires multiple micro-ops to write a single result, it
+// should either override the write's NumMicroOps to be greater than 1
+// or require additional writes. Extra writes can be required either
+// by defining a WriteSequence, or simply listing extra writes in the
+// instruction's list of writers beyond the number of "def"
+// operands. The scheduler assumes that all micro-ops must be
+// dispatched in the same cycle. These micro-ops may be required to
+// begin or end the current dispatch group.
+class WriteRes<SchedWrite write, list<ProcResourceKind> resources>
+ : ProcWriteResources<resources> {
+ SchedWrite WriteType = write;
+}
+
+// Directly name a set of WriteResources defining a new SchedWrite
+// type at the same time. This class is unaware of its SchedModel so
+// must be referenced by InstRW or ItinRW.
+class SchedWriteRes<list<ProcResourceKind> resources> : SchedWrite,
+ ProcWriteResources<resources>;
+
+// Define values common to ReadAdvance and SchedReadAdvance.
+//
+// SchedModel ties these resources to a processor.
+class ProcReadAdvance<int cycles, list<SchedWrite> writes = []> {
+ int Cycles = cycles;
+ list<SchedWrite> ValidWrites = writes;
+ // Allow a processor to mark some scheduling classes as unsupported
+ // for stronger verification.
+ bit Unsupported = 0;
+ SchedMachineModel SchedModel = ?;
+}
+
+// A processor may define a ReadAdvance associated with a SchedRead
+// to reduce latency of a prior write by N cycles. A negative advance
+// effectively increases latency, which may be used for cross-domain
+// stalls.
+//
+// A ReadAdvance may be associated with a list of SchedWrites
+// to implement pipeline bypass. The Writes list may be empty to
+// indicate operands that are always read this number of Cycles later
+// than a normal register read, allowing the read's parent instruction
+// to issue earlier relative to the writer.
+class ReadAdvance<SchedRead read, int cycles, list<SchedWrite> writes = []>
+ : ProcReadAdvance<cycles, writes> {
+ SchedRead ReadType = read;
+}
+
+// Directly associate a new SchedRead type with a delay and optional
+// pipeline bypess. For use with InstRW or ItinRW.
+class SchedReadAdvance<int cycles, list<SchedWrite> writes = []> : SchedRead,
+ ProcReadAdvance<cycles, writes>;
+
+// Define SchedRead defaults. Reads seldom need special treatment.
+def ReadDefault : SchedRead;
+def NoReadAdvance : SchedReadAdvance<0>;
+
+// Define shared code that will be in the same scope as all
+// SchedPredicates. Available variables are:
+// (const MachineInstr *MI, const TargetSchedModel *SchedModel)
+class PredicateProlog<code c> {
+ code Code = c;
+}
+
+// Define a predicate to determine which SchedVariant applies to a
+// particular MachineInstr. The code snippet is used as an
+// if-statement's expression. Available variables are MI, SchedModel,
+// and anything defined in a PredicateProlog.
+//
+// SchedModel silences warnings but is ignored.
+class SchedPredicate<code pred> {
+ SchedMachineModel SchedModel = ?;
+ code Predicate = pred;
+}
+def NoSchedPred : SchedPredicate<[{true}]>;
+
+// Associate a predicate with a list of SchedReadWrites. By default,
+// the selected SchedReadWrites are still associated with a single
+// operand and assumed to execute sequentially with additive
+// latency. However, if the parent SchedWriteVariant or
+// SchedReadVariant is marked "Variadic", then each Selected
+// SchedReadWrite is mapped in place to the instruction's variadic
+// operands. In this case, latency is not additive. If the current Variant
+// is already part of a Sequence, then that entire chain leading up to
+// the Variant is distributed over the variadic operands.
+class SchedVar<SchedPredicate pred, list<SchedReadWrite> selected> {
+ SchedPredicate Predicate = pred;
+ list<SchedReadWrite> Selected = selected;
+}
+
+// SchedModel silences warnings but is ignored.
+class SchedVariant<list<SchedVar> variants> {
+ list<SchedVar> Variants = variants;
+ bit Variadic = 0;
+ SchedMachineModel SchedModel = ?;
+}
+
+// A SchedWriteVariant is a single SchedWrite type that maps to a list
+// of SchedWrite types under the conditions defined by its predicates.
+//
+// A Variadic write is expanded to cover multiple "def" operands. The
+// SchedVariant's Expansion list is then interpreted as one write
+// per-operand instead of the usual sequential writes feeding a single
+// operand.
+class SchedWriteVariant<list<SchedVar> variants> : SchedWrite,
+ SchedVariant<variants> {
+}
+
+// A SchedReadVariant is a single SchedRead type that maps to a list
+// of SchedRead types under the conditions defined by its predicates.
+//
+// A Variadic write is expanded to cover multiple "readsReg" operands as
+// explained above.
+class SchedReadVariant<list<SchedVar> variants> : SchedRead,
+ SchedVariant<variants> {
+}
+
+// Map a set of opcodes to a list of SchedReadWrite types. This allows
+// the subtarget to easily override specific operations.
+//
+// SchedModel ties this opcode mapping to a processor.
+class InstRW<list<SchedReadWrite> rw, dag instrlist> {
+ list<SchedReadWrite> OperandReadWrites = rw;
+ dag Instrs = instrlist;
+ SchedMachineModel SchedModel = ?;
+}
+
+// Map a set of itinerary classes to SchedReadWrite resources. This is
+// used to bootstrap a target (e.g. ARM) when itineraries already
+// exist and changing InstrInfo is undesirable.
+//
+// SchedModel ties this ItineraryClass mapping to a processor.
+class ItinRW<list<SchedReadWrite> rw, list<InstrItinClass> iic> {
+ list<InstrItinClass> MatchedItinClasses = iic;
+ list<SchedReadWrite> OperandReadWrites = rw;
+ SchedMachineModel SchedModel = ?;
+}
+
+// Alias a target-defined SchedReadWrite to a processor specific
+// SchedReadWrite. This allows a subtarget to easily map a
+// SchedReadWrite type onto a WriteSequence, SchedWriteVariant, or
+// SchedReadVariant.
+//
+// SchedModel will usually be provided by surrounding let statement
+// and ties this SchedAlias mapping to a processor.
+class SchedAlias<SchedReadWrite match, SchedReadWrite alias> {
+ SchedReadWrite MatchRW = match;
+ SchedReadWrite AliasRW = alias;
+ SchedMachineModel SchedModel = ?;
+}
Modified: llvm/branches/AMDILBackend/include/llvm/Target/TargetSelectionDAG.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Target/TargetSelectionDAG.td?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Target/TargetSelectionDAG.td (original)
+++ llvm/branches/AMDILBackend/include/llvm/Target/TargetSelectionDAG.td Tue Jan 15 11:16:16 2013
@@ -445,9 +445,9 @@
def atomic_load_umax : SDNode<"ISD::ATOMIC_LOAD_UMAX", SDTAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_load : SDNode<"ISD::ATOMIC_LOAD", SDTAtomicLoad,
- [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+ [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
def atomic_store : SDNode<"ISD::ATOMIC_STORE", SDTAtomicStore,
- [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+ [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
// Do not use ld, st directly. Use load, extload, sextload, zextload, store,
// and truncst (see below).
Modified: llvm/branches/AMDILBackend/include/llvm/Target/TargetSelectionDAGInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Target/TargetSelectionDAGInfo.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Target/TargetSelectionDAGInfo.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Target/TargetSelectionDAGInfo.h Tue Jan 15 11:16:16 2013
@@ -20,7 +20,7 @@
namespace llvm {
-class TargetData;
+class DataLayout;
class TargetMachine;
//===----------------------------------------------------------------------===//
@@ -28,13 +28,13 @@
/// SelectionDAG lowering and instruction selection process.
///
class TargetSelectionDAGInfo {
- TargetSelectionDAGInfo(const TargetSelectionDAGInfo &); // DO NOT IMPLEMENT
- void operator=(const TargetSelectionDAGInfo &); // DO NOT IMPLEMENT
+ TargetSelectionDAGInfo(const TargetSelectionDAGInfo &) LLVM_DELETED_FUNCTION;
+ void operator=(const TargetSelectionDAGInfo &) LLVM_DELETED_FUNCTION;
- const TargetData *TD;
+ const DataLayout *TD;
protected:
- const TargetData *getTargetData() const { return TD; }
+ const DataLayout *getDataLayout() const { return TD; }
public:
explicit TargetSelectionDAGInfo(const TargetMachine &TM);
Modified: llvm/branches/AMDILBackend/include/llvm/Target/TargetSubtargetInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Target/TargetSubtargetInfo.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Target/TargetSubtargetInfo.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Target/TargetSubtargetInfo.h Tue Jan 15 11:16:16 2013
@@ -19,9 +19,11 @@
namespace llvm {
+class MachineInstr;
class SDep;
class SUnit;
class TargetRegisterClass;
+class TargetSchedModel;
template <typename T> class SmallVectorImpl;
//===----------------------------------------------------------------------===//
@@ -31,8 +33,8 @@
/// be exposed through a TargetSubtargetInfo-derived class.
///
class TargetSubtargetInfo : public MCSubtargetInfo {
- TargetSubtargetInfo(const TargetSubtargetInfo&); // DO NOT IMPLEMENT
- void operator=(const TargetSubtargetInfo&); // DO NOT IMPLEMENT
+ TargetSubtargetInfo(const TargetSubtargetInfo&) LLVM_DELETED_FUNCTION;
+ void operator=(const TargetSubtargetInfo&) LLVM_DELETED_FUNCTION;
protected: // Can only create subclasses...
TargetSubtargetInfo();
public:
@@ -43,23 +45,26 @@
virtual ~TargetSubtargetInfo();
- /// getSpecialAddressLatency - For targets where it is beneficial to
- /// backschedule instructions that compute addresses, return a value
- /// indicating the number of scheduling cycles of backscheduling that
- /// should be attempted.
- virtual unsigned getSpecialAddressLatency() const { return 0; }
+ /// Resolve a SchedClass at runtime, where SchedClass identifies an
+ /// MCSchedClassDesc with the isVariant property. This may return the ID of
+ /// another variant SchedClass, but repeated invocation must quickly terminate
+ /// in a nonvariant SchedClass.
+ virtual unsigned resolveSchedClass(unsigned SchedClass, const MachineInstr *MI,
+ const TargetSchedModel* SchedModel) const {
+ return 0;
+ }
// enablePostRAScheduler - If the target can benefit from post-regalloc
// scheduling and the specified optimization level meets the requirement
// return true to enable post-register-allocation scheduling. In
// CriticalPathRCs return any register classes that should only be broken
- // if on the critical path.
+ // if on the critical path.
virtual bool enablePostRAScheduler(CodeGenOpt::Level OptLevel,
AntiDepBreakMode& Mode,
RegClassVector& CriticalPathRCs) const;
// adjustSchedDependency - Perform target specific adjustments to
// the latency of a schedule dependency.
- virtual void adjustSchedDependency(SUnit *def, SUnit *use,
+ virtual void adjustSchedDependency(SUnit *def, SUnit *use,
SDep& dep) const { }
};
Modified: llvm/branches/AMDILBackend/include/llvm/Transforms/IPO.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Transforms/IPO.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Transforms/IPO.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Transforms/IPO.h Tue Jan 15 11:16:16 2013
@@ -104,23 +104,14 @@
//===----------------------------------------------------------------------===//
/// createInternalizePass - This pass loops over all of the functions in the
-/// input module, internalizing all globals (functions and variables) not part
-/// of the api. If a list of symbols is specified with the
-/// -internalize-public-api-* command line options, those symbols are not
-/// internalized and all others are. Otherwise if AllButMain is set and the
-/// main function is found, all other globals are marked as internal. If no api
-/// is supplied and AllButMain is not set, or no main function is found, nothing
-/// is internalized.
-///
-ModulePass *createInternalizePass(bool AllButMain);
-
-/// createInternalizePass - This pass loops over all of the functions in the
/// input module, internalizing all globals (functions and variables) not in the
/// given exportList.
///
/// Note that commandline options that are used with the above function are not
-/// used now! Also, when exportList is empty, nothing is internalized.
+/// used now!
ModulePass *createInternalizePass(const std::vector<const char *> &exportList);
+/// createInternalizePass - Same as above, but with an empty exportList.
+ModulePass *createInternalizePass();
//===----------------------------------------------------------------------===//
/// createDeadArgEliminationPass - This pass removes arguments from functions
@@ -192,6 +183,16 @@
/// createPartialInliningPass - This pass inlines parts of functions.
///
ModulePass *createPartialInliningPass();
+
+//===----------------------------------------------------------------------===//
+// createMetaRenamerPass - Rename everything with metasyntatic names.
+//
+ModulePass *createMetaRenamerPass();
+
+//===----------------------------------------------------------------------===//
+/// createBarrierNoopPass - This pass is purely a module pass barrier in a pass
+/// manager.
+ModulePass *createBarrierNoopPass();
} // End llvm namespace
Modified: llvm/branches/AMDILBackend/include/llvm/Transforms/IPO/InlinerPass.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Transforms/IPO/InlinerPass.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Transforms/IPO/InlinerPass.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Transforms/IPO/InlinerPass.h Tue Jan 15 11:16:16 2013
@@ -21,7 +21,7 @@
namespace llvm {
class CallSite;
- class TargetData;
+ class DataLayout;
class InlineCost;
template<class PtrType, unsigned SmallSize>
class SmallPtrSet;
Modified: llvm/branches/AMDILBackend/include/llvm/Transforms/IPO/PassManagerBuilder.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Transforms/IPO/PassManagerBuilder.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Transforms/IPO/PassManagerBuilder.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Transforms/IPO/PassManagerBuilder.h Tue Jan 15 11:16:16 2013
@@ -104,6 +104,7 @@
bool DisableUnitAtATime;
bool DisableUnrollLoops;
bool Vectorize;
+ bool LoopVectorize;
private:
/// ExtensionList - This is list of all of the extensions that are registered.
Modified: llvm/branches/AMDILBackend/include/llvm/Transforms/Instrumentation.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Transforms/Instrumentation.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Transforms/Instrumentation.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Transforms/Instrumentation.h Tue Jan 15 11:16:16 2013
@@ -34,7 +34,7 @@
bool UseExtraChecksum = false);
// Insert AddressSanitizer (address sanity checking) instrumentation
-ModulePass *createAddressSanitizerPass();
+FunctionPass *createAddressSanitizerPass();
// Insert ThreadSanitizer (race detection) instrumentation
FunctionPass *createThreadSanitizerPass();
Modified: llvm/branches/AMDILBackend/include/llvm/Transforms/Scalar.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Transforms/Scalar.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Transforms/Scalar.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Transforms/Scalar.h Tue Jan 15 11:16:16 2013
@@ -70,6 +70,12 @@
//===----------------------------------------------------------------------===//
//
+// SROA - Replace aggregates or pieces of aggregates with scalar SSA values.
+//
+FunctionPass *createSROAPass(bool RequiresDomTree = true);
+
+//===----------------------------------------------------------------------===//
+//
// ScalarReplAggregates - Break up alloca's of aggregates into multiple allocas
// if possible.
//
Modified: llvm/branches/AMDILBackend/include/llvm/Transforms/Utils/AddrModeMatcher.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Transforms/Utils/AddrModeMatcher.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Transforms/Utils/AddrModeMatcher.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Transforms/Utils/AddrModeMatcher.h Tue Jan 15 11:16:16 2013
@@ -19,6 +19,7 @@
#ifndef LLVM_TRANSFORMS_UTILS_ADDRMODEMATCHER_H
#define LLVM_TRANSFORMS_UTILS_ADDRMODEMATCHER_H
+#include "llvm/AddressingMode.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Target/TargetLowering.h"
@@ -33,7 +34,7 @@
/// ExtAddrMode - This is an extended version of TargetLowering::AddrMode
/// which holds actual Value*'s for register values.
-struct ExtAddrMode : public TargetLowering::AddrMode {
+struct ExtAddrMode : public AddrMode {
Value *BaseReg;
Value *ScaledReg;
ExtAddrMode() : BaseReg(0), ScaledReg(0) {}
Modified: llvm/branches/AMDILBackend/include/llvm/Transforms/Utils/BasicBlockUtils.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Transforms/Utils/BasicBlockUtils.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Transforms/Utils/BasicBlockUtils.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Transforms/Utils/BasicBlockUtils.h Tue Jan 15 11:16:16 2013
@@ -25,8 +25,11 @@
class AliasAnalysis;
class Instruction;
+class MDNode;
class Pass;
class ReturnInst;
+class TargetLibraryInfo;
+class TerminatorInst;
/// DeleteDeadBlock - Delete the specified block, which must have no
/// predecessors.
@@ -44,7 +47,7 @@
/// a result. This includes tracing the def-use list from the PHI to see if
/// it is ultimately unused or if it reaches an unused cycle. Return true
/// if any PHIs were deleted.
-bool DeleteDeadPHIs(BasicBlock *BB);
+bool DeleteDeadPHIs(BasicBlock *BB, const TargetLibraryInfo *TLI = 0);
/// MergeBlockIntoPredecessor - Attempts to merge a block into its predecessor,
/// if possible. The return value indicates success or failure.
@@ -202,6 +205,29 @@
ReturnInst *FoldReturnIntoUncondBranch(ReturnInst *RI, BasicBlock *BB,
BasicBlock *Pred);
+/// SplitBlockAndInsertIfThen - Split the containing block at the
+/// specified instruction - everything before and including Cmp stays
+/// in the old basic block, and everything after Cmp is moved to a
+/// new block. The two blocks are connected by a conditional branch
+/// (with value of Cmp being the condition).
+/// Before:
+/// Head
+/// Cmp
+/// Tail
+/// After:
+/// Head
+/// Cmp
+/// if (Cmp)
+/// ThenBlock
+/// Tail
+///
+/// If Unreachable is true, then ThenBlock ends with
+/// UnreachableInst, otherwise it branches to Tail.
+/// Returns the NewBasicBlock's terminator.
+
+TerminatorInst *SplitBlockAndInsertIfThen(Instruction *Cmp,
+ bool Unreachable, MDNode *BranchWeights = 0);
+
} // End llvm namespace
#endif
Modified: llvm/branches/AMDILBackend/include/llvm/Transforms/Utils/BuildLibCalls.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Transforms/Utils/BuildLibCalls.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Transforms/Utils/BuildLibCalls.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Transforms/Utils/BuildLibCalls.h Tue Jan 15 11:16:16 2013
@@ -19,7 +19,7 @@
namespace llvm {
class Value;
- class TargetData;
+ class DataLayout;
class TargetLibraryInfo;
/// CastToCStr - Return V if it is an i8*, otherwise cast it to i8*.
@@ -28,52 +28,52 @@
/// EmitStrLen - Emit a call to the strlen function to the builder, for the
/// specified pointer. Ptr is required to be some pointer type, and the
/// return value has 'intptr_t' type.
- Value *EmitStrLen(Value *Ptr, IRBuilder<> &B, const TargetData *TD,
+ Value *EmitStrLen(Value *Ptr, IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI);
/// EmitStrNLen - Emit a call to the strnlen function to the builder, for the
/// specified pointer. Ptr is required to be some pointer type, MaxLen must
/// be of size_t type, and the return value has 'intptr_t' type.
Value *EmitStrNLen(Value *Ptr, Value *MaxLen, IRBuilder<> &B,
- const TargetData *TD, const TargetLibraryInfo *TLI);
+ const DataLayout *TD, const TargetLibraryInfo *TLI);
/// EmitStrChr - Emit a call to the strchr function to the builder, for the
/// specified pointer and character. Ptr is required to be some pointer type,
/// and the return value has 'i8*' type.
- Value *EmitStrChr(Value *Ptr, char C, IRBuilder<> &B, const TargetData *TD,
+ Value *EmitStrChr(Value *Ptr, char C, IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI);
/// EmitStrNCmp - Emit a call to the strncmp function to the builder.
Value *EmitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilder<> &B,
- const TargetData *TD, const TargetLibraryInfo *TLI);
+ const DataLayout *TD, const TargetLibraryInfo *TLI);
/// EmitStrCpy - Emit a call to the strcpy function to the builder, for the
/// specified pointer arguments.
Value *EmitStrCpy(Value *Dst, Value *Src, IRBuilder<> &B,
- const TargetData *TD, const TargetLibraryInfo *TLI,
+ const DataLayout *TD, const TargetLibraryInfo *TLI,
StringRef Name = "strcpy");
/// EmitStrNCpy - Emit a call to the strncpy function to the builder, for the
/// specified pointer arguments and length.
Value *EmitStrNCpy(Value *Dst, Value *Src, Value *Len, IRBuilder<> &B,
- const TargetData *TD, const TargetLibraryInfo *TLI,
+ const DataLayout *TD, const TargetLibraryInfo *TLI,
StringRef Name = "strncpy");
/// EmitMemCpyChk - Emit a call to the __memcpy_chk function to the builder.
/// This expects that the Len and ObjSize have type 'intptr_t' and Dst/Src
/// are pointers.
Value *EmitMemCpyChk(Value *Dst, Value *Src, Value *Len, Value *ObjSize,
- IRBuilder<> &B, const TargetData *TD,
+ IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI);
/// EmitMemChr - Emit a call to the memchr function. This assumes that Ptr is
/// a pointer, Val is an i32 value, and Len is an 'intptr_t' value.
Value *EmitMemChr(Value *Ptr, Value *Val, Value *Len, IRBuilder<> &B,
- const TargetData *TD, const TargetLibraryInfo *TLI);
+ const DataLayout *TD, const TargetLibraryInfo *TLI);
/// EmitMemCmp - Emit a call to the memcmp function.
Value *EmitMemCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilder<> &B,
- const TargetData *TD, const TargetLibraryInfo *TLI);
+ const DataLayout *TD, const TargetLibraryInfo *TLI);
/// EmitUnaryFloatFnCall - Emit a call to the unary function named 'Name'
/// (e.g. 'floor'). This function is known to take a single of type matching
@@ -85,28 +85,28 @@
/// EmitPutChar - Emit a call to the putchar function. This assumes that Char
/// is an integer.
- Value *EmitPutChar(Value *Char, IRBuilder<> &B, const TargetData *TD,
+ Value *EmitPutChar(Value *Char, IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI);
/// EmitPutS - Emit a call to the puts function. This assumes that Str is
/// some pointer.
- Value *EmitPutS(Value *Str, IRBuilder<> &B, const TargetData *TD,
+ Value *EmitPutS(Value *Str, IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI);
/// EmitFPutC - Emit a call to the fputc function. This assumes that Char is
/// an i32, and File is a pointer to FILE.
Value *EmitFPutC(Value *Char, Value *File, IRBuilder<> &B,
- const TargetData *TD, const TargetLibraryInfo *TLI);
+ const DataLayout *TD, const TargetLibraryInfo *TLI);
/// EmitFPutS - Emit a call to the puts function. Str is required to be a
/// pointer and File is a pointer to FILE.
- Value *EmitFPutS(Value *Str, Value *File, IRBuilder<> &B, const TargetData *TD,
+ Value *EmitFPutS(Value *Str, Value *File, IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI);
/// EmitFWrite - Emit a call to the fwrite function. This assumes that Ptr is
/// a pointer, Size is an 'intptr_t', and File is a pointer to FILE.
Value *EmitFWrite(Value *Ptr, Value *Size, Value *File, IRBuilder<> &B,
- const TargetData *TD, const TargetLibraryInfo *TLI);
+ const DataLayout *TD, const TargetLibraryInfo *TLI);
/// SimplifyFortifiedLibCalls - Helper class for folding checked library
/// calls (e.g. __strcpy_chk) into their unchecked counterparts.
@@ -118,7 +118,7 @@
bool isString) const = 0;
public:
virtual ~SimplifyFortifiedLibCalls();
- bool fold(CallInst *CI, const TargetData *TD, const TargetLibraryInfo *TLI);
+ bool fold(CallInst *CI, const DataLayout *TD, const TargetLibraryInfo *TLI);
};
}
Modified: llvm/branches/AMDILBackend/include/llvm/Transforms/Utils/Cloning.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Transforms/Utils/Cloning.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Transforms/Utils/Cloning.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Transforms/Utils/Cloning.h Tue Jan 15 11:16:16 2013
@@ -39,7 +39,7 @@
class CallSite;
class Trace;
class CallGraph;
-class TargetData;
+class DataLayout;
class Loop;
class LoopInfo;
class AllocaInst;
@@ -116,13 +116,6 @@
bool ModuleLevelChanges,
ClonedCodeInfo *CodeInfo = 0);
-/// CloneFunction - Version of the function that doesn't need the VMap.
-///
-inline Function *CloneFunction(const Function *F, ClonedCodeInfo *CodeInfo = 0){
- ValueToValueMapTy VMap;
- return CloneFunction(F, VMap, CodeInfo);
-}
-
/// Clone OldFunc into NewFunc, transforming the old arguments into references
/// to VMap values. Note that if NewFunc already has basic blocks, the ones
/// cloned into it will be added to the end of the function. This function
@@ -157,7 +150,7 @@
SmallVectorImpl<ReturnInst*> &Returns,
const char *NameSuffix = "",
ClonedCodeInfo *CodeInfo = 0,
- const TargetData *TD = 0,
+ const DataLayout *TD = 0,
Instruction *TheCall = 0);
@@ -165,13 +158,13 @@
/// InlineFunction call, and records the auxiliary results produced by it.
class InlineFunctionInfo {
public:
- explicit InlineFunctionInfo(CallGraph *cg = 0, const TargetData *td = 0)
+ explicit InlineFunctionInfo(CallGraph *cg = 0, const DataLayout *td = 0)
: CG(cg), TD(td) {}
/// CG - If non-null, InlineFunction will update the callgraph to reflect the
/// changes it makes.
CallGraph *CG;
- const TargetData *TD;
+ const DataLayout *TD;
/// StaticAllocas - InlineFunction fills this in with all static allocas that
/// get copied into the caller.
Modified: llvm/branches/AMDILBackend/include/llvm/Transforms/Utils/Local.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Transforms/Utils/Local.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Transforms/Utils/Local.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Transforms/Utils/Local.h Tue Jan 15 11:16:16 2013
@@ -18,7 +18,7 @@
#include "llvm/IRBuilder.h"
#include "llvm/Operator.h"
#include "llvm/Support/GetElementPtrTypeIterator.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
namespace llvm {
@@ -35,7 +35,9 @@
class PHINode;
class AllocaInst;
class ConstantExpr;
-class TargetData;
+class DataLayout;
+class TargetLibraryInfo;
+class TargetTransformInfo;
class DIBuilder;
template<typename T> class SmallVectorImpl;
@@ -51,7 +53,8 @@
/// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
/// conditions and indirectbr addresses this might make dead if
/// DeleteDeadConditions is true.
-bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions = false);
+bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions = false,
+ const TargetLibraryInfo *TLI = 0);
//===----------------------------------------------------------------------===//
// Local dead code elimination.
@@ -60,20 +63,21 @@
/// isInstructionTriviallyDead - Return true if the result produced by the
/// instruction is not used, and the instruction has no side effects.
///
-bool isInstructionTriviallyDead(Instruction *I);
+bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=0);
/// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a
/// trivially dead instruction, delete it. If that makes any of its operands
/// trivially dead, delete them too, recursively. Return true if any
/// instructions were deleted.
-bool RecursivelyDeleteTriviallyDeadInstructions(Value *V);
+bool RecursivelyDeleteTriviallyDeadInstructions(Value *V,
+ const TargetLibraryInfo *TLI=0);
/// RecursivelyDeleteDeadPHINode - If the specified value is an effectively
/// dead PHI node, due to being a def-use chain of single-use nodes that
/// either forms a cycle or is terminated by a trivially dead instruction,
/// delete it. If that makes any of its operands trivially dead, delete them
/// too, recursively. Return true if a change was made.
-bool RecursivelyDeleteDeadPHINode(PHINode *PN);
+bool RecursivelyDeleteDeadPHINode(PHINode *PN, const TargetLibraryInfo *TLI=0);
/// SimplifyInstructionsInBlock - Scan the specified basic block and try to
@@ -81,7 +85,8 @@
///
/// This returns true if it changed the code, note that it can delete
/// instructions in other blocks as well in this block.
-bool SimplifyInstructionsInBlock(BasicBlock *BB, const TargetData *TD = 0);
+bool SimplifyInstructionsInBlock(BasicBlock *BB, const DataLayout *TD = 0,
+ const TargetLibraryInfo *TLI = 0);
//===----------------------------------------------------------------------===//
// Control Flow Graph Restructuring.
@@ -99,7 +104,7 @@
/// .. and delete the predecessor corresponding to the '1', this will attempt to
/// recursively fold the 'and' to 0.
void RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred,
- TargetData *TD = 0);
+ DataLayout *TD = 0);
/// MergeBasicBlockIntoOnlyPred - BB is a block with one predecessor and its
@@ -130,7 +135,8 @@
/// of the CFG. It returns true if a modification was made, possibly deleting
/// the basic block that was pointed to.
///
-bool SimplifyCFG(BasicBlock *BB, const TargetData *TD = 0);
+bool SimplifyCFG(BasicBlock *BB, const DataLayout *TD = 0,
+ const TargetTransformInfo *TTI = 0);
/// FoldBranchToCommonDest - If this basic block is ONLY a setcc and a branch,
/// and if a predecessor branches to us and one of our successors, fold the
@@ -158,10 +164,10 @@
/// and it is more than the alignment of the ultimate object, see if we can
/// increase the alignment of the ultimate object, making this check succeed.
unsigned getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
- const TargetData *TD = 0);
+ const DataLayout *TD = 0);
/// getKnownAlignment - Try to infer an alignment for the specified pointer.
-static inline unsigned getKnownAlignment(Value *V, const TargetData *TD = 0) {
+static inline unsigned getKnownAlignment(Value *V, const DataLayout *TD = 0) {
return getOrEnforceKnownAlignment(V, 0, TD);
}
@@ -171,7 +177,7 @@
/// When NoAssumptions is true, no assumptions about index computation not
/// overflowing is made.
template<typename IRBuilderTy>
-Value *EmitGEPOffset(IRBuilderTy *Builder, const TargetData &TD, User *GEP,
+Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &TD, User *GEP,
bool NoAssumptions = false) {
gep_type_iterator GTI = gep_type_begin(GEP);
Type *IntPtrTy = TD.getIntPtrType(GEP->getContext());
Modified: llvm/branches/AMDILBackend/include/llvm/Transforms/Utils/SSAUpdater.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Transforms/Utils/SSAUpdater.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Transforms/Utils/SSAUpdater.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Transforms/Utils/SSAUpdater.h Tue Jan 15 11:16:16 2013
@@ -109,8 +109,8 @@
private:
Value *GetValueAtEndOfBlockInternal(BasicBlock *BB);
- void operator=(const SSAUpdater&); // DO NOT IMPLEMENT
- SSAUpdater(const SSAUpdater&); // DO NOT IMPLEMENT
+ void operator=(const SSAUpdater&) LLVM_DELETED_FUNCTION;
+ SSAUpdater(const SSAUpdater&) LLVM_DELETED_FUNCTION;
};
/// LoadAndStorePromoter - This little helper class provides a convenient way to
Modified: llvm/branches/AMDILBackend/include/llvm/Transforms/Utils/SimplifyIndVar.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Transforms/Utils/SimplifyIndVar.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Transforms/Utils/SimplifyIndVar.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Transforms/Utils/SimplifyIndVar.h Tue Jan 15 11:16:16 2013
@@ -21,8 +21,6 @@
namespace llvm {
-extern cl::opt<bool> DisableIVRewrite;
-
class CastInst;
class IVUsers;
class Loop;
Modified: llvm/branches/AMDILBackend/include/llvm/Transforms/Utils/ValueMapper.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Transforms/Utils/ValueMapper.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Transforms/Utils/ValueMapper.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Transforms/Utils/ValueMapper.h Tue Jan 15 11:16:16 2013
@@ -25,7 +25,7 @@
/// ValueMapTypeRemapper - This is a class that can be implemented by clients
/// to remap types when cloning constants and instructions.
class ValueMapTypeRemapper {
- virtual void Anchor(); // Out of line method.
+ virtual void anchor(); // Out of line method.
public:
virtual ~ValueMapTypeRemapper() {}
Modified: llvm/branches/AMDILBackend/include/llvm/Transforms/Vectorize.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Transforms/Vectorize.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Transforms/Vectorize.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Transforms/Vectorize.h Tue Jan 15 11:16:16 2013
@@ -107,6 +107,12 @@
createBBVectorizePass(const VectorizeConfig &C = VectorizeConfig());
//===----------------------------------------------------------------------===//
+//
+// LoopVectorize - Create a loop vectorization pass.
+//
+Pass * createLoopVectorizePass();
+
+//===----------------------------------------------------------------------===//
/// @brief Vectorize the BasicBlock.
///
/// @param BB The BasicBlock to be vectorized
Modified: llvm/branches/AMDILBackend/include/llvm/Type.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Type.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Type.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Type.h Tue Jan 15 11:16:16 2013
@@ -153,7 +153,7 @@
/// isPPC_FP128Ty - Return true if this is powerpc long double.
bool isPPC_FP128Ty() const { return getTypeID() == PPC_FP128TyID; }
- /// isFloatingPointTy - Return true if this is one of the five floating point
+ /// isFloatingPointTy - Return true if this is one of the six floating point
/// types
bool isFloatingPointTy() const {
return getTypeID() == HalfTyID || getTypeID() == FloatTyID ||
@@ -167,7 +167,7 @@
/// isFPOrFPVectorTy - Return true if this is a FP type or a vector of FP.
///
- bool isFPOrFPVectorTy() const;
+ bool isFPOrFPVectorTy() const { return getScalarType()->isFloatingPointTy(); }
/// isLabelTy - Return true if this is 'label'.
bool isLabelTy() const { return getTypeID() == LabelTyID; }
@@ -185,7 +185,7 @@
/// isIntOrIntVectorTy - Return true if this is an integer type or a vector of
/// integer types.
///
- bool isIntOrIntVectorTy() const;
+ bool isIntOrIntVectorTy() const { return getScalarType()->isIntegerTy(); }
/// isFunctionTy - True if this is an instance of FunctionType.
///
@@ -203,6 +203,11 @@
///
bool isPointerTy() const { return getTypeID() == PointerTyID; }
+ /// isPtrOrPtrVectorTy - Return true if this is a pointer type or a vector of
+ /// pointer types.
+ ///
+ bool isPtrOrPtrVectorTy() const { return getScalarType()->isPointerTy(); }
+
/// isVectorTy - True if this is an instance of VectorType.
///
bool isVectorTy() const { return getTypeID() == VectorTyID; }
@@ -252,7 +257,7 @@
/// isSized - Return true if it makes sense to take the size of this type. To
/// get the actual size for a particular target, it is reasonable to use the
- /// TargetData subsystem to do this.
+ /// DataLayout subsystem to do this.
///
bool isSized() const {
// If it's a primitive, it is always sized.
@@ -276,7 +281,7 @@
///
/// Note that this may not reflect the size of memory allocated for an
/// instance of the type or the number of bytes that are written when an
- /// instance of the type is stored to memory. The TargetData class provides
+ /// instance of the type is stored to memory. The DataLayout class provides
/// additional query functions to provide this information.
///
unsigned getPrimitiveSizeInBits() const;
@@ -293,6 +298,7 @@
/// getScalarType - If this is a vector type, return the element type,
/// otherwise return 'this'.
+ const Type *getScalarType() const;
Type *getScalarType();
//===--------------------------------------------------------------------===//
@@ -340,8 +346,10 @@
unsigned getVectorNumElements() const;
Type *getVectorElementType() const { return getSequentialElementType(); }
- unsigned getPointerAddressSpace() const;
Type *getPointerElementType() const { return getSequentialElementType(); }
+
+ /// \brief Get the address space of this pointer or pointer vector type.
+ unsigned getPointerAddressSpace() const;
//===--------------------------------------------------------------------===//
// Static members exported by the Type class itself. Useful for getting
@@ -389,9 +397,6 @@
static PointerType *getInt32PtrTy(LLVMContext &C, unsigned AS = 0);
static PointerType *getInt64PtrTy(LLVMContext &C, unsigned AS = 0);
- /// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const Type *) { return true; }
-
/// getPointerTo - Return a pointer to the current type. This is equivalent
/// to PointerType::get(Foo, AddrSpace).
PointerType *getPointerTo(unsigned AddrSpace = 0);
Modified: llvm/branches/AMDILBackend/include/llvm/Use.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Use.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Use.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Use.h Tue Jan 15 11:16:16 2013
@@ -26,6 +26,7 @@
#define LLVM_USE_H
#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/Support/Compiler.h"
#include <cstddef>
#include <iterator>
@@ -66,7 +67,7 @@
private:
/// Copy ctor - do not implement
- Use(const Use &U);
+ Use(const Use &U) LLVM_DELETED_FUNCTION;
/// Destructor - Only for zap()
~Use() {
Modified: llvm/branches/AMDILBackend/include/llvm/User.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/User.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/User.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/User.h Tue Jan 15 11:16:16 2013
@@ -31,8 +31,8 @@
struct OperandTraits;
class User : public Value {
- User(const User &); // Do not implement
- void *operator new(size_t); // Do not implement
+ User(const User &) LLVM_DELETED_FUNCTION;
+ void *operator new(size_t) LLVM_DELETED_FUNCTION;
template <unsigned>
friend struct HungoffOperandTraits;
virtual void anchor();
@@ -104,7 +104,7 @@
assert(i < NumOperands && "getOperandUse() out of range!");
return OperandList[i];
}
-
+
unsigned getNumOperands() const { return NumOperands; }
// ---------------------------------------------------------------------------
@@ -118,6 +118,45 @@
inline op_iterator op_end() { return OperandList+NumOperands; }
inline const_op_iterator op_end() const { return OperandList+NumOperands; }
+ /// Convenience iterator for directly iterating over the Values in the
+ /// OperandList
+ class value_op_iterator : public std::iterator<std::forward_iterator_tag,
+ Value*> {
+ op_iterator OI;
+ public:
+ explicit value_op_iterator(Use *U) : OI(U) {}
+
+ bool operator==(const value_op_iterator &x) const {
+ return OI == x.OI;
+ }
+ bool operator!=(const value_op_iterator &x) const {
+ return !operator==(x);
+ }
+
+ /// Iterator traversal: forward iteration only
+ value_op_iterator &operator++() { // Preincrement
+ ++OI;
+ return *this;
+ }
+ value_op_iterator operator++(int) { // Postincrement
+ value_op_iterator tmp = *this; ++*this; return tmp;
+ }
+
+ /// Retrieve a pointer to the current Value.
+ Value *operator*() const {
+ return *OI;
+ }
+
+ Value *operator->() const { return operator*(); }
+ };
+
+ inline value_op_iterator value_op_begin() {
+ return value_op_iterator(op_begin());
+ }
+ inline value_op_iterator value_op_end() {
+ return value_op_iterator(op_end());
+ }
+
// dropAllReferences() - This function is in charge of "letting go" of all
// objects that this User refers to. This allows one to
// 'delete' a whole class at a time, even though there may be circular
@@ -137,7 +176,6 @@
void replaceUsesOfWith(Value *From, Value *To);
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const User *) { return true; }
static inline bool classof(const Value *V) {
return isa<Instruction>(V) || isa<Constant>(V);
}
Modified: llvm/branches/AMDILBackend/include/llvm/Value.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/include/llvm/Value.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/include/llvm/Value.h (original)
+++ llvm/branches/AMDILBackend/include/llvm/Value.h Tue Jan 15 11:16:16 2013
@@ -16,6 +16,7 @@
#include "llvm/Use.h"
#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
namespace llvm {
@@ -80,8 +81,8 @@
friend class ValueHandleBase;
ValueName *Name;
- void operator=(const Value &); // Do not implement
- Value(const Value &); // Do not implement
+ void operator=(const Value &) LLVM_DELETED_FUNCTION;
+ Value(const Value &) LLVM_DELETED_FUNCTION;
protected:
/// printCustom - Value subclasses can override this to implement custom
@@ -120,7 +121,7 @@
/// setName() - Change the name of the value, choosing a new unique name if
/// the provided name is taken.
///
- /// \arg Name - The new name; or "" if the value's name should be removed.
+ /// \param Name The new name; or "" if the value's name should be removed.
void setName(const Twine &Name);
@@ -256,11 +257,6 @@
/// hasValueHandle - Return true if there is a value handle associated with
/// this value.
bool hasValueHandle() const { return HasValueHandle; }
-
- // Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const Value *) {
- return true; // Values are always values.
- }
/// stripPointerCasts - This method strips off any unneeded pointer casts and
/// all-zero GEPs from the specified value, returning the original uncasted
Modified: llvm/branches/AMDILBackend/lib/Analysis/AliasAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/Analysis/AliasAnalysis.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/Analysis/AliasAnalysis.cpp (original)
+++ llvm/branches/AMDILBackend/lib/Analysis/AliasAnalysis.cpp Tue Jan 15 11:16:16 2013
@@ -35,7 +35,8 @@
#include "llvm/Instructions.h"
#include "llvm/LLVMContext.h"
#include "llvm/Type.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
+#include "llvm/Target/TargetLibraryInfo.h"
using namespace llvm;
// Register the AliasAnalysis interface, providing a nice name to refer to.
@@ -451,7 +452,8 @@
/// AliasAnalysis interface before any other methods are called.
///
void AliasAnalysis::InitializeAliasAnalysis(Pass *P) {
- TD = P->getAnalysisIfAvailable<TargetData>();
+ TD = P->getAnalysisIfAvailable<DataLayout>();
+ TLI = P->getAnalysisIfAvailable<TargetLibraryInfo>();
AA = &P->getAnalysis<AliasAnalysis>();
}
@@ -461,7 +463,7 @@
AU.addRequired<AliasAnalysis>(); // All AA's chain
}
-/// getTypeStoreSize - Return the TargetData store size for the given type,
+/// getTypeStoreSize - Return the DataLayout store size for the given type,
/// if known, or a conservative value otherwise.
///
uint64_t AliasAnalysis::getTypeStoreSize(Type *Ty) {
@@ -501,7 +503,7 @@
bool llvm::isNoAliasCall(const Value *V) {
if (isa<CallInst>(V) || isa<InvokeInst>(V))
return ImmutableCallSite(cast<Instruction>(V))
- .paramHasAttr(0, Attribute::NoAlias);
+ .paramHasAttr(0, Attributes::NoAlias);
return false;
}
Modified: llvm/branches/AMDILBackend/lib/Analysis/AliasSetTracker.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/Analysis/AliasSetTracker.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/Analysis/AliasSetTracker.cpp (original)
+++ llvm/branches/AMDILBackend/lib/Analysis/AliasSetTracker.cpp Tue Jan 15 11:16:16 2013
@@ -18,7 +18,7 @@
#include "llvm/LLVMContext.h"
#include "llvm/Pass.h"
#include "llvm/Type.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Assembly/Writer.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@@ -550,7 +550,7 @@
//===----------------------------------------------------------------------===//
void AliasSet::print(raw_ostream &OS) const {
- OS << " AliasSet[" << (void*)this << ", " << RefCount << "] ";
+ OS << " AliasSet[" << (const void*)this << ", " << RefCount << "] ";
OS << (AliasTy == MustAlias ? "must" : "may") << " alias, ";
switch (AccessTy) {
case NoModRef: OS << "No access "; break;
@@ -590,8 +590,10 @@
OS << "\n";
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void AliasSet::dump() const { print(dbgs()); }
void AliasSetTracker::dump() const { print(dbgs()); }
+#endif
//===----------------------------------------------------------------------===//
// ASTCallbackVH Class Implementation
Modified: llvm/branches/AMDILBackend/lib/Analysis/Analysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/Analysis/Analysis.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/Analysis/Analysis.cpp (original)
+++ llvm/branches/AMDILBackend/lib/Analysis/Analysis.cpp Tue Jan 15 11:16:16 2013
@@ -26,11 +26,13 @@
initializeBasicAliasAnalysisPass(Registry);
initializeBlockFrequencyInfoPass(Registry);
initializeBranchProbabilityInfoPass(Registry);
+ initializeCostModelAnalysisPass(Registry);
initializeCFGViewerPass(Registry);
initializeCFGPrinterPass(Registry);
initializeCFGOnlyViewerPass(Registry);
initializeCFGOnlyPrinterPass(Registry);
initializePrintDbgInfoPass(Registry);
+ initializeDependenceAnalysisPass(Registry);
initializeDominanceFrontierPass(Registry);
initializeDomViewerPass(Registry);
initializeDomPrinterPass(Registry);
@@ -46,7 +48,6 @@
initializeLazyValueInfoPass(Registry);
initializeLibCallAliasAnalysisPass(Registry);
initializeLintPass(Registry);
- initializeLoopDependenceAnalysisPass(Registry);
initializeLoopInfoPass(Registry);
initializeMemDepPrinterPass(Registry);
initializeMemoryDependenceAnalysisPass(Registry);
@@ -61,6 +62,7 @@
initializePathProfileLoaderPassPass(Registry);
initializeProfileVerifierPassPass(Registry);
initializePathProfileVerifierPass(Registry);
+ initializeProfileMetadataLoaderPassPass(Registry);
initializeRegionInfoPass(Registry);
initializeRegionViewerPass(Registry);
initializeRegionPrinterPass(Registry);
Modified: llvm/branches/AMDILBackend/lib/Analysis/BasicAliasAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/Analysis/BasicAliasAnalysis.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/Analysis/BasicAliasAnalysis.cpp (original)
+++ llvm/branches/AMDILBackend/lib/Analysis/BasicAliasAnalysis.cpp Tue Jan 15 11:16:16 2013
@@ -29,7 +29,7 @@
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
@@ -58,12 +58,12 @@
// then it has not escaped before entering the function. Check if it escapes
// inside the function.
if (const Argument *A = dyn_cast<Argument>(V))
- if (A->hasByValAttr() || A->hasNoAliasAttr()) {
- // Don't bother analyzing arguments already known not to escape.
- if (A->hasNoCaptureAttr())
- return true;
+ if (A->hasByValAttr() || A->hasNoAliasAttr())
+ // Note even if the argument is marked nocapture we still need to check
+ // for copies made inside the function. The nocapture attribute only
+ // specifies that there are no copies made that outlive the function.
return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
- }
+
return false;
}
@@ -84,10 +84,11 @@
/// getObjectSize - Return the size of the object specified by V, or
/// UnknownSize if unknown.
-static uint64_t getObjectSize(const Value *V, const TargetData &TD,
+static uint64_t getObjectSize(const Value *V, const DataLayout &TD,
+ const TargetLibraryInfo &TLI,
bool RoundToAlign = false) {
uint64_t Size;
- if (getObjectSize(V, Size, &TD, RoundToAlign))
+ if (getObjectSize(V, Size, &TD, &TLI, RoundToAlign))
return Size;
return AliasAnalysis::UnknownSize;
}
@@ -95,10 +96,11 @@
/// isObjectSmallerThan - Return true if we can prove that the object specified
/// by V is smaller than Size.
static bool isObjectSmallerThan(const Value *V, uint64_t Size,
- const TargetData &TD) {
+ const DataLayout &TD,
+ const TargetLibraryInfo &TLI) {
// This function needs to use the aligned object size because we allow
// reads a bit past the end given sufficient alignment.
- uint64_t ObjectSize = getObjectSize(V, TD, /*RoundToAlign*/true);
+ uint64_t ObjectSize = getObjectSize(V, TD, TLI, /*RoundToAlign*/true);
return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize < Size;
}
@@ -106,8 +108,8 @@
/// isObjectSize - Return true if we can prove that the object specified
/// by V has size Size.
static bool isObjectSize(const Value *V, uint64_t Size,
- const TargetData &TD) {
- uint64_t ObjectSize = getObjectSize(V, TD);
+ const DataLayout &TD, const TargetLibraryInfo &TLI) {
+ uint64_t ObjectSize = getObjectSize(V, TD, TLI);
return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize == Size;
}
@@ -126,6 +128,15 @@
const Value *V;
ExtensionKind Extension;
int64_t Scale;
+
+ bool operator==(const VariableGEPIndex &Other) const {
+ return V == Other.V && Extension == Other.Extension &&
+ Scale == Other.Scale;
+ }
+
+ bool operator!=(const VariableGEPIndex &Other) const {
+ return !operator==(Other);
+ }
};
}
@@ -140,7 +151,7 @@
/// represented in the result.
static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset,
ExtensionKind &Extension,
- const TargetData &TD, unsigned Depth) {
+ const DataLayout &TD, unsigned Depth) {
assert(V->getType()->isIntegerTy() && "Not an integer value");
// Limit our recursion depth.
@@ -215,14 +226,14 @@
/// specified amount, but which may have other unrepresented high bits. As such,
/// the gep cannot necessarily be reconstructed from its decomposed form.
///
-/// When TargetData is around, this function is capable of analyzing everything
+/// When DataLayout is around, this function is capable of analyzing everything
/// that GetUnderlyingObject can look through. When not, it just looks
/// through pointer casts.
///
static const Value *
DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
SmallVectorImpl<VariableGEPIndex> &VarIndices,
- const TargetData *TD) {
+ const DataLayout *TD) {
// Limit recursion depth to limit compile time in crazy cases.
unsigned MaxLookup = 6;
@@ -266,7 +277,7 @@
->getElementType()->isSized())
return V;
- // If we are lacking TargetData information, we can't compute the offets of
+ // If we are lacking DataLayout information, we can't compute the offets of
// elements computed by GEPs. However, we can handle bitcast equivalent
// GEPs.
if (TD == 0) {
@@ -417,13 +428,7 @@
/// BasicAliasAnalysis - This is the primary alias analysis implementation.
struct BasicAliasAnalysis : public ImmutablePass, public AliasAnalysis {
static char ID; // Class identification, replacement for typeinfo
- BasicAliasAnalysis() : ImmutablePass(ID),
- // AliasCache rarely has more than 1 or 2 elements,
- // so start it off fairly small so that clear()
- // doesn't have to tromp through 64 (the default)
- // elements on each alias query. This really wants
- // something like a SmallDenseMap.
- AliasCache(8) {
+ BasicAliasAnalysis() : ImmutablePass(ID) {
initializeBasicAliasAnalysisPass(*PassRegistry::getPassRegistry());
}
@@ -443,7 +448,11 @@
"BasicAliasAnalysis doesn't support interprocedural queries.");
AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.TBAATag,
LocB.Ptr, LocB.Size, LocB.TBAATag);
- AliasCache.clear();
+ // AliasCache rarely has more than 1 or 2 elements, always use
+ // shrink_and_clear so it quickly returns to the inline capacity of the
+ // SmallDenseMap if it ever grows larger.
+ // FIXME: This should really be shrink_to_inline_capacity_and_clear().
+ AliasCache.shrink_and_clear();
return Alias;
}
@@ -481,7 +490,7 @@
private:
// AliasCache - Track alias queries to guard against recursion.
typedef std::pair<Location, Location> LocPair;
- typedef DenseMap<LocPair, AliasResult> AliasCacheTy;
+ typedef SmallDenseMap<LocPair, AliasResult, 8> AliasCacheTy;
AliasCacheTy AliasCache;
// Visited - Track instructions visited by pointsToConstantMemory.
@@ -490,6 +499,7 @@
// aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP
// instruction against another.
AliasResult aliasGEP(const GEPOperator *V1, uint64_t V1Size,
+ const MDNode *V1TBAAInfo,
const Value *V2, uint64_t V2Size,
const MDNode *V2TBAAInfo,
const Value *UnderlyingV1, const Value *UnderlyingV2);
@@ -807,6 +817,21 @@
return ModRefResult(AliasAnalysis::getModRefInfo(CS, Loc) & Min);
}
+static bool areVarIndicesEqual(SmallVector<VariableGEPIndex, 4> &Indices1,
+ SmallVector<VariableGEPIndex, 4> &Indices2) {
+ unsigned Size1 = Indices1.size();
+ unsigned Size2 = Indices2.size();
+
+ if (Size1 != Size2)
+ return false;
+
+ for (unsigned I = 0; I != Size1; ++I)
+ if (Indices1[I] != Indices2[I])
+ return false;
+
+ return true;
+}
+
/// aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP instruction
/// against another pointer. We know that V1 is a GEP, but we don't know
/// anything about V2. UnderlyingV1 is GetUnderlyingObject(GEP1, TD),
@@ -814,6 +839,7 @@
///
AliasAnalysis::AliasResult
BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
+ const MDNode *V1TBAAInfo,
const Value *V2, uint64_t V2Size,
const MDNode *V2TBAAInfo,
const Value *UnderlyingV1,
@@ -821,9 +847,41 @@
int64_t GEP1BaseOffset;
SmallVector<VariableGEPIndex, 4> GEP1VariableIndices;
- // If we have two gep instructions with must-alias'ing base pointers, figure
- // out if the indexes to the GEP tell us anything about the derived pointer.
+ // If we have two gep instructions with must-alias or not-alias'ing base
+ // pointers, figure out if the indexes to the GEP tell us anything about the
+ // derived pointer.
if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) {
+ // Check for geps of non-aliasing underlying pointers where the offsets are
+ // identical.
+ if (V1Size == V2Size) {
+ // Do the base pointers alias assuming type and size.
+ AliasResult PreciseBaseAlias = aliasCheck(UnderlyingV1, V1Size,
+ V1TBAAInfo, UnderlyingV2,
+ V2Size, V2TBAAInfo);
+ if (PreciseBaseAlias == NoAlias) {
+ // See if the computed offset from the common pointer tells us about the
+ // relation of the resulting pointer.
+ int64_t GEP2BaseOffset;
+ SmallVector<VariableGEPIndex, 4> GEP2VariableIndices;
+ const Value *GEP2BasePtr =
+ DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, TD);
+ const Value *GEP1BasePtr =
+ DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD);
+ // DecomposeGEPExpression and GetUnderlyingObject should return the
+ // same result except when DecomposeGEPExpression has no DataLayout.
+ if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) {
+ assert(TD == 0 &&
+ "DecomposeGEPExpression and GetUnderlyingObject disagree!");
+ return MayAlias;
+ }
+ // Same offsets.
+ if (GEP1BaseOffset == GEP2BaseOffset &&
+ areVarIndicesEqual(GEP1VariableIndices, GEP2VariableIndices))
+ return NoAlias;
+ GEP1VariableIndices.clear();
+ }
+ }
+
// Do the base pointers alias?
AliasResult BaseAlias = aliasCheck(UnderlyingV1, UnknownSize, 0,
UnderlyingV2, UnknownSize, 0);
@@ -843,9 +901,8 @@
const Value *GEP2BasePtr =
DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, TD);
- // If DecomposeGEPExpression isn't able to look all the way through the
- // addressing operation, we must not have TD and this is too complex for us
- // to handle without it.
+ // DecomposeGEPExpression and GetUnderlyingObject should return the
+ // same result except when DecomposeGEPExpression has no DataLayout.
if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) {
assert(TD == 0 &&
"DecomposeGEPExpression and GetUnderlyingObject disagree!");
@@ -879,9 +936,8 @@
const Value *GEP1BasePtr =
DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD);
- // If DecomposeGEPExpression isn't able to look all the way through the
- // addressing operation, we must not have TD and this is too complex for us
- // to handle without it.
+ // DecomposeGEPExpression and GetUnderlyingObject should return the
+ // same result except when DecomposeGEPExpression has no DataLayout.
if (GEP1BasePtr != UnderlyingV1) {
assert(TD == 0 &&
"DecomposeGEPExpression and GetUnderlyingObject disagree!");
@@ -1004,12 +1060,42 @@
// on corresponding edges.
if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
if (PN2->getParent() == PN->getParent()) {
+ LocPair Locs(Location(PN, PNSize, PNTBAAInfo),
+ Location(V2, V2Size, V2TBAAInfo));
+ if (PN > V2)
+ std::swap(Locs.first, Locs.second);
+
AliasResult Alias =
aliasCheck(PN->getIncomingValue(0), PNSize, PNTBAAInfo,
PN2->getIncomingValueForBlock(PN->getIncomingBlock(0)),
V2Size, V2TBAAInfo);
if (Alias == MayAlias)
return MayAlias;
+
+ // If the first source of the PHI nodes NoAlias and the other inputs are
+ // the PHI node itself through some amount of recursion this does not add
+ // any new information so just return NoAlias.
+ // bb:
+ // ptr = ptr2 + 1
+ // loop:
+ // ptr_phi = phi [bb, ptr], [loop, ptr_plus_one]
+ // ptr2_phi = phi [bb, ptr2], [loop, ptr2_plus_one]
+ // ...
+ // ptr_plus_one = gep ptr_phi, 1
+ // ptr2_plus_one = gep ptr2_phi, 1
+ // We assume for the recursion that the the phis (ptr_phi, ptr2_phi) do
+ // not alias each other.
+ bool ArePhisAssumedNoAlias = false;
+ AliasResult OrigAliasResult = NoAlias;
+ if (Alias == NoAlias) {
+ // Pretend the phis do not alias.
+ assert(AliasCache.count(Locs) &&
+ "There must exist an entry for the phi node");
+ OrigAliasResult = AliasCache[Locs];
+ AliasCache[Locs] = NoAlias;
+ ArePhisAssumedNoAlias = true;
+ }
+
for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) {
AliasResult ThisAlias =
aliasCheck(PN->getIncomingValue(i), PNSize, PNTBAAInfo,
@@ -1019,6 +1105,11 @@
if (Alias == MayAlias)
break;
}
+
+ // Reset if speculation failed.
+ if (ArePhisAssumedNoAlias && Alias != NoAlias)
+ AliasCache[Locs] = OrigAliasResult;
+
return Alias;
}
@@ -1133,8 +1224,8 @@
// If the size of one access is larger than the entire object on the other
// side, then we know such behavior is undefined and can assume no alias.
if (TD)
- if ((V1Size != UnknownSize && isObjectSmallerThan(O2, V1Size, *TD)) ||
- (V2Size != UnknownSize && isObjectSmallerThan(O1, V2Size, *TD)))
+ if ((V1Size != UnknownSize && isObjectSmallerThan(O2, V1Size, *TD, *TLI)) ||
+ (V2Size != UnknownSize && isObjectSmallerThan(O1, V2Size, *TD, *TLI)))
return NoAlias;
// Check the cache before climbing up use-def chains. This also terminates
@@ -1154,15 +1245,17 @@
std::swap(V1, V2);
std::swap(V1Size, V2Size);
std::swap(O1, O2);
+ std::swap(V1TBAAInfo, V2TBAAInfo);
}
if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
- AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, V2TBAAInfo, O1, O2);
+ AliasResult Result = aliasGEP(GV1, V1Size, V1TBAAInfo, V2, V2Size, V2TBAAInfo, O1, O2);
if (Result != MayAlias) return AliasCache[Locs] = Result;
}
if (isa<PHINode>(V2) && !isa<PHINode>(V1)) {
std::swap(V1, V2);
std::swap(V1Size, V2Size);
+ std::swap(V1TBAAInfo, V2TBAAInfo);
}
if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
AliasResult Result = aliasPHI(PN, V1Size, V1TBAAInfo,
@@ -1173,6 +1266,7 @@
if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) {
std::swap(V1, V2);
std::swap(V1Size, V2Size);
+ std::swap(V1TBAAInfo, V2TBAAInfo);
}
if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
AliasResult Result = aliasSelect(S1, V1Size, V1TBAAInfo,
@@ -1184,8 +1278,8 @@
// accesses is accessing the entire object, then the accesses must
// overlap in some way.
if (TD && O1 == O2)
- if ((V1Size != UnknownSize && isObjectSize(O1, V1Size, *TD)) ||
- (V2Size != UnknownSize && isObjectSize(O2, V2Size, *TD)))
+ if ((V1Size != UnknownSize && isObjectSize(O1, V1Size, *TD, *TLI)) ||
+ (V2Size != UnknownSize && isObjectSize(O2, V2Size, *TD, *TLI)))
return AliasCache[Locs] = PartialAlias;
AliasResult Result =
Modified: llvm/branches/AMDILBackend/lib/Analysis/BranchProbabilityInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/Analysis/BranchProbabilityInfo.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/Analysis/BranchProbabilityInfo.cpp (original)
+++ llvm/branches/AMDILBackend/lib/Analysis/BranchProbabilityInfo.cpp Tue Jan 15 11:16:16 2013
@@ -1,4 +1,4 @@
-//===-- BranchProbabilityInfo.cpp - Branch Probability Analysis -*- C++ -*-===//
+//===-- BranchProbabilityInfo.cpp - Branch Probability Analysis -----------===//
//
// The LLVM Compiler Infrastructure
//
@@ -78,6 +78,19 @@
static const uint32_t FPH_TAKEN_WEIGHT = 20;
static const uint32_t FPH_NONTAKEN_WEIGHT = 12;
+/// \brief Invoke-terminating normal branch taken weight
+///
+/// This is the weight for branching to the normal destination of an invoke
+/// instruction. We expect this to happen most of the time. Set the weight to an
+/// absurdly high value so that nested loops subsume it.
+static const uint32_t IH_TAKEN_WEIGHT = 1024 * 1024 - 1;
+
+/// \brief Invoke-terminating normal branch not-taken weight.
+///
+/// This is the weight for branching to the unwind destination of an invoke
+/// instruction. This is essentially never taken.
+static const uint32_t IH_NONTAKEN_WEIGHT = 1;
+
// Standard weight value. Used when none of the heuristics set weight for
// the edge.
static const uint32_t NORMAL_WEIGHT = 16;
@@ -102,14 +115,14 @@
return false;
}
- SmallPtrSet<BasicBlock *, 4> UnreachableEdges;
- SmallPtrSet<BasicBlock *, 4> ReachableEdges;
+ SmallVector<unsigned, 4> UnreachableEdges;
+ SmallVector<unsigned, 4> ReachableEdges;
for (succ_iterator I = succ_begin(BB), E = succ_end(BB); I != E; ++I) {
if (PostDominatedByUnreachable.count(*I))
- UnreachableEdges.insert(*I);
+ UnreachableEdges.push_back(I.getSuccessorIndex());
else
- ReachableEdges.insert(*I);
+ ReachableEdges.push_back(I.getSuccessorIndex());
}
// If all successors are in the set of blocks post-dominated by unreachable,
@@ -123,18 +136,19 @@
return false;
uint32_t UnreachableWeight =
- std::max(UR_TAKEN_WEIGHT / UnreachableEdges.size(), MIN_WEIGHT);
- for (SmallPtrSet<BasicBlock *, 4>::iterator I = UnreachableEdges.begin(),
- E = UnreachableEdges.end();
+ std::max(UR_TAKEN_WEIGHT / (unsigned)UnreachableEdges.size(), MIN_WEIGHT);
+ for (SmallVector<unsigned, 4>::iterator I = UnreachableEdges.begin(),
+ E = UnreachableEdges.end();
I != E; ++I)
setEdgeWeight(BB, *I, UnreachableWeight);
if (ReachableEdges.empty())
return true;
uint32_t ReachableWeight =
- std::max(UR_NONTAKEN_WEIGHT / ReachableEdges.size(), NORMAL_WEIGHT);
- for (SmallPtrSet<BasicBlock *, 4>::iterator I = ReachableEdges.begin(),
- E = ReachableEdges.end();
+ std::max(UR_NONTAKEN_WEIGHT / (unsigned)ReachableEdges.size(),
+ NORMAL_WEIGHT);
+ for (SmallVector<unsigned, 4>::iterator I = ReachableEdges.begin(),
+ E = ReachableEdges.end();
I != E; ++I)
setEdgeWeight(BB, *I, ReachableWeight);
@@ -174,7 +188,7 @@
}
assert(Weights.size() == TI->getNumSuccessors() && "Checked above");
for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
- setEdgeWeight(BB, TI->getSuccessor(i), Weights[i]);
+ setEdgeWeight(BB, i, Weights[i]);
return true;
}
@@ -198,19 +212,17 @@
assert(CI->getOperand(1)->getType()->isPointerTy());
- BasicBlock *Taken = BI->getSuccessor(0);
- BasicBlock *NonTaken = BI->getSuccessor(1);
-
// p != 0 -> isProb = true
// p == 0 -> isProb = false
// p != q -> isProb = true
// p == q -> isProb = false;
+ unsigned TakenIdx = 0, NonTakenIdx = 1;
bool isProb = CI->getPredicate() == ICmpInst::ICMP_NE;
if (!isProb)
- std::swap(Taken, NonTaken);
+ std::swap(TakenIdx, NonTakenIdx);
- setEdgeWeight(BB, Taken, PH_TAKEN_WEIGHT);
- setEdgeWeight(BB, NonTaken, PH_NONTAKEN_WEIGHT);
+ setEdgeWeight(BB, TakenIdx, PH_TAKEN_WEIGHT);
+ setEdgeWeight(BB, NonTakenIdx, PH_NONTAKEN_WEIGHT);
return true;
}
@@ -221,17 +233,17 @@
if (!L)
return false;
- SmallPtrSet<BasicBlock *, 8> BackEdges;
- SmallPtrSet<BasicBlock *, 8> ExitingEdges;
- SmallPtrSet<BasicBlock *, 8> InEdges; // Edges from header to the loop.
+ SmallVector<unsigned, 8> BackEdges;
+ SmallVector<unsigned, 8> ExitingEdges;
+ SmallVector<unsigned, 8> InEdges; // Edges from header to the loop.
for (succ_iterator I = succ_begin(BB), E = succ_end(BB); I != E; ++I) {
if (!L->contains(*I))
- ExitingEdges.insert(*I);
+ ExitingEdges.push_back(I.getSuccessorIndex());
else if (L->getHeader() == *I)
- BackEdges.insert(*I);
+ BackEdges.push_back(I.getSuccessorIndex());
else
- InEdges.insert(*I);
+ InEdges.push_back(I.getSuccessorIndex());
}
if (uint32_t numBackEdges = BackEdges.size()) {
@@ -239,10 +251,9 @@
if (backWeight < NORMAL_WEIGHT)
backWeight = NORMAL_WEIGHT;
- for (SmallPtrSet<BasicBlock *, 8>::iterator EI = BackEdges.begin(),
+ for (SmallVector<unsigned, 8>::iterator EI = BackEdges.begin(),
EE = BackEdges.end(); EI != EE; ++EI) {
- BasicBlock *Back = *EI;
- setEdgeWeight(BB, Back, backWeight);
+ setEdgeWeight(BB, *EI, backWeight);
}
}
@@ -251,10 +262,9 @@
if (inWeight < NORMAL_WEIGHT)
inWeight = NORMAL_WEIGHT;
- for (SmallPtrSet<BasicBlock *, 8>::iterator EI = InEdges.begin(),
+ for (SmallVector<unsigned, 8>::iterator EI = InEdges.begin(),
EE = InEdges.end(); EI != EE; ++EI) {
- BasicBlock *Back = *EI;
- setEdgeWeight(BB, Back, inWeight);
+ setEdgeWeight(BB, *EI, inWeight);
}
}
@@ -263,10 +273,9 @@
if (exitWeight < MIN_WEIGHT)
exitWeight = MIN_WEIGHT;
- for (SmallPtrSet<BasicBlock *, 8>::iterator EI = ExitingEdges.begin(),
+ for (SmallVector<unsigned, 8>::iterator EI = ExitingEdges.begin(),
EE = ExitingEdges.end(); EI != EE; ++EI) {
- BasicBlock *Exiting = *EI;
- setEdgeWeight(BB, Exiting, exitWeight);
+ setEdgeWeight(BB, *EI, exitWeight);
}
}
@@ -322,14 +331,13 @@
return false;
}
- BasicBlock *Taken = BI->getSuccessor(0);
- BasicBlock *NonTaken = BI->getSuccessor(1);
+ unsigned TakenIdx = 0, NonTakenIdx = 1;
if (!isProb)
- std::swap(Taken, NonTaken);
+ std::swap(TakenIdx, NonTakenIdx);
- setEdgeWeight(BB, Taken, ZH_TAKEN_WEIGHT);
- setEdgeWeight(BB, NonTaken, ZH_NONTAKEN_WEIGHT);
+ setEdgeWeight(BB, TakenIdx, ZH_TAKEN_WEIGHT);
+ setEdgeWeight(BB, NonTakenIdx, ZH_NONTAKEN_WEIGHT);
return true;
}
@@ -359,15 +367,24 @@
return false;
}
- BasicBlock *Taken = BI->getSuccessor(0);
- BasicBlock *NonTaken = BI->getSuccessor(1);
+ unsigned TakenIdx = 0, NonTakenIdx = 1;
if (!isProb)
- std::swap(Taken, NonTaken);
+ std::swap(TakenIdx, NonTakenIdx);
+
+ setEdgeWeight(BB, TakenIdx, FPH_TAKEN_WEIGHT);
+ setEdgeWeight(BB, NonTakenIdx, FPH_NONTAKEN_WEIGHT);
- setEdgeWeight(BB, Taken, FPH_TAKEN_WEIGHT);
- setEdgeWeight(BB, NonTaken, FPH_NONTAKEN_WEIGHT);
+ return true;
+}
+
+bool BranchProbabilityInfo::calcInvokeHeuristics(BasicBlock *BB) {
+ InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator());
+ if (!II)
+ return false;
+ setEdgeWeight(BB, 0/*Index for Normal*/, IH_TAKEN_WEIGHT);
+ setEdgeWeight(BB, 1/*Index for Unwind*/, IH_NONTAKEN_WEIGHT);
return true;
}
@@ -397,7 +414,9 @@
continue;
if (calcZeroHeuristics(*I))
continue;
- calcFloatingPointHeuristics(*I);
+ if (calcFloatingPointHeuristics(*I))
+ continue;
+ calcInvokeHeuristics(*I);
}
PostDominatedByUnreachable.clear();
@@ -422,8 +441,7 @@
uint32_t Sum = 0;
for (succ_const_iterator I = succ_begin(BB), E = succ_end(BB); I != E; ++I) {
- const BasicBlock *Succ = *I;
- uint32_t Weight = getEdgeWeight(BB, Succ);
+ uint32_t Weight = getEdgeWeight(BB, I.getSuccessorIndex());
uint32_t PrevSum = Sum;
Sum += Weight;
@@ -466,11 +484,13 @@
return 0;
}
-// Return edge's weight. If can't find it, return DEFAULT_WEIGHT value.
+/// Get the raw edge weight for the edge. If can't find it, return
+/// DEFAULT_WEIGHT value. Here an edge is specified using PredBlock and an index
+/// to the successors.
uint32_t BranchProbabilityInfo::
-getEdgeWeight(const BasicBlock *Src, const BasicBlock *Dst) const {
- Edge E(Src, Dst);
- DenseMap<Edge, uint32_t>::const_iterator I = Weights.find(E);
+getEdgeWeight(const BasicBlock *Src, unsigned IndexInSuccessors) const {
+ DenseMap<Edge, uint32_t>::const_iterator I =
+ Weights.find(std::make_pair(Src, IndexInSuccessors));
if (I != Weights.end())
return I->second;
@@ -478,15 +498,43 @@
return DEFAULT_WEIGHT;
}
+/// Get the raw edge weight calculated for the block pair. This returns the sum
+/// of all raw edge weights from Src to Dst.
+uint32_t BranchProbabilityInfo::
+getEdgeWeight(const BasicBlock *Src, const BasicBlock *Dst) const {
+ uint32_t Weight = 0;
+ DenseMap<Edge, uint32_t>::const_iterator MapI;
+ for (succ_const_iterator I = succ_begin(Src), E = succ_end(Src); I != E; ++I)
+ if (*I == Dst) {
+ MapI = Weights.find(std::make_pair(Src, I.getSuccessorIndex()));
+ if (MapI != Weights.end())
+ Weight += MapI->second;
+ }
+ return (Weight == 0) ? DEFAULT_WEIGHT : Weight;
+}
+
+/// Set the edge weight for a given edge specified by PredBlock and an index
+/// to the successors.
void BranchProbabilityInfo::
-setEdgeWeight(const BasicBlock *Src, const BasicBlock *Dst, uint32_t Weight) {
- Weights[std::make_pair(Src, Dst)] = Weight;
+setEdgeWeight(const BasicBlock *Src, unsigned IndexInSuccessors,
+ uint32_t Weight) {
+ Weights[std::make_pair(Src, IndexInSuccessors)] = Weight;
DEBUG(dbgs() << "set edge " << Src->getName() << " -> "
- << Dst->getName() << " weight to " << Weight
- << (isEdgeHot(Src, Dst) ? " [is HOT now]\n" : "\n"));
+ << IndexInSuccessors << " successor weight to "
+ << Weight << "\n");
}
+/// Get an edge's probability, relative to other out-edges from Src.
+BranchProbability BranchProbabilityInfo::
+getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const {
+ uint32_t N = getEdgeWeight(Src, IndexInSuccessors);
+ uint32_t D = getSumForBlock(Src);
+
+ return BranchProbability(N, D);
+}
+/// Get the probability of going from Src to Dst. It returns the sum of all
+/// probabilities for edges from Src to Dst.
BranchProbability BranchProbabilityInfo::
getEdgeProbability(const BasicBlock *Src, const BasicBlock *Dst) const {
Modified: llvm/branches/AMDILBackend/lib/Analysis/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/Analysis/CMakeLists.txt?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/Analysis/CMakeLists.txt (original)
+++ llvm/branches/AMDILBackend/lib/Analysis/CMakeLists.txt Tue Jan 15 11:16:16 2013
@@ -10,9 +10,11 @@
BranchProbabilityInfo.cpp
CFGPrinter.cpp
CaptureTracking.cpp
+ CostModel.cpp
CodeMetrics.cpp
ConstantFolding.cpp
DbgInfoPrinter.cpp
+ DependenceAnalysis.cpp
DomPrinter.cpp
DominanceFrontier.cpp
IVUsers.cpp
@@ -26,7 +28,6 @@
LibCallSemantics.cpp
Lint.cpp
Loads.cpp
- LoopDependenceAnalysis.cpp
LoopInfo.cpp
LoopPass.cpp
MemDepPrinter.cpp
@@ -44,6 +45,8 @@
ProfileInfoLoader.cpp
ProfileInfoLoaderPass.cpp
ProfileVerifierPass.cpp
+ ProfileDataLoader.cpp
+ ProfileDataLoaderPass.cpp
RegionInfo.cpp
RegionPass.cpp
RegionPrinter.cpp
Modified: llvm/branches/AMDILBackend/lib/Analysis/CaptureTracking.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/Analysis/CaptureTracking.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/Analysis/CaptureTracking.cpp (original)
+++ llvm/branches/AMDILBackend/lib/Analysis/CaptureTracking.cpp Tue Jan 15 11:16:16 2013
@@ -23,6 +23,8 @@
CaptureTracker::~CaptureTracker() {}
+bool CaptureTracker::shouldExplore(Use *U) { return true; }
+
namespace {
struct SimpleCaptureTracker : public CaptureTracker {
explicit SimpleCaptureTracker(bool ReturnCaptures)
@@ -30,8 +32,6 @@
void tooManyUses() { Captured = true; }
- bool shouldExplore(Use *U) { return true; }
-
bool captured(Use *U) {
if (isa<ReturnInst>(U->getUser()) && !ReturnCaptures)
return false;
Modified: llvm/branches/AMDILBackend/lib/Analysis/CodeMetrics.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/Analysis/CodeMetrics.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/Analysis/CodeMetrics.cpp (original)
+++ llvm/branches/AMDILBackend/lib/Analysis/CodeMetrics.cpp Tue Jan 15 11:16:16 2013
@@ -15,7 +15,7 @@
#include "llvm/Function.h"
#include "llvm/Support/CallSite.h"
#include "llvm/IntrinsicInst.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
using namespace llvm;
@@ -54,7 +54,7 @@
return false;
}
-bool llvm::isInstructionFree(const Instruction *I, const TargetData *TD) {
+bool llvm::isInstructionFree(const Instruction *I, const DataLayout *TD) {
if (isa<PHINode>(I))
return true;
@@ -119,7 +119,7 @@
/// analyzeBasicBlock - Fill in the current structure with information gleaned
/// from the specified block.
void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB,
- const TargetData *TD) {
+ const DataLayout *TD) {
++NumBlocks;
unsigned NumInstsBeforeThisBB = NumInsts;
for (BasicBlock::const_iterator II = BB->begin(), E = BB->end();
@@ -189,14 +189,14 @@
NumBBInsts[BB] = NumInsts - NumInstsBeforeThisBB;
}
-void CodeMetrics::analyzeFunction(Function *F, const TargetData *TD) {
+void CodeMetrics::analyzeFunction(Function *F, const DataLayout *TD) {
// If this function contains a call that "returns twice" (e.g., setjmp or
// _setjmp) and it isn't marked with "returns twice" itself, never inline it.
// This is a hack because we depend on the user marking their local variables
// as volatile if they are live across a setjmp call, and they probably
// won't do this in callers.
exposesReturnsTwice = F->callsFunctionThatReturnsTwice() &&
- !F->hasFnAttr(Attribute::ReturnsTwice);
+ !F->getFnAttributes().hasAttribute(Attributes::ReturnsTwice);
// Look at the size of the callee.
for (Function::const_iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
Modified: llvm/branches/AMDILBackend/lib/Analysis/ConstantFolding.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/Analysis/ConstantFolding.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/Analysis/ConstantFolding.cpp (original)
+++ llvm/branches/AMDILBackend/lib/Analysis/ConstantFolding.cpp Tue Jan 15 11:16:16 2013
@@ -11,7 +11,7 @@
//
// Also, to supplement the basic VMCore ConstantExpr simplifications,
// this file defines some additional folding routines that can make use of
-// TargetData information. These functions cannot go in VMCore due to library
+// DataLayout information. These functions cannot go in VMCore due to library
// dependency issues.
//
//===----------------------------------------------------------------------===//
@@ -25,7 +25,7 @@
#include "llvm/Intrinsics.h"
#include "llvm/Operator.h"
#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
@@ -41,11 +41,11 @@
// Constant Folding internal helper functions
//===----------------------------------------------------------------------===//
-/// FoldBitCast - Constant fold bitcast, symbolically evaluating it with
-/// TargetData. This always returns a non-null constant, but it may be a
+/// FoldBitCast - Constant fold bitcast, symbolically evaluating it with
+/// DataLayout. This always returns a non-null constant, but it may be a
/// ConstantExpr if unfoldable.
static Constant *FoldBitCast(Constant *C, Type *DestTy,
- const TargetData &TD) {
+ const DataLayout &TD) {
// Catch the obvious splat cases.
if (C->isNullValue() && !DestTy->isX86_MMXTy())
return Constant::getNullValue(DestTy);
@@ -59,9 +59,9 @@
return ConstantExpr::getBitCast(C, DestTy);
unsigned NumSrcElts = CDV->getType()->getNumElements();
-
+
Type *SrcEltTy = CDV->getType()->getElementType();
-
+
// If the vector is a vector of floating point, convert it to vector of int
// to simplify things.
if (SrcEltTy->isFloatingPointTy()) {
@@ -72,7 +72,7 @@
C = ConstantExpr::getBitCast(C, SrcIVTy);
CDV = cast<ConstantDataVector>(C);
}
-
+
// Now that we know that the input value is a vector of integers, just shift
// and insert them into our result.
unsigned BitShift = TD.getTypeAllocSizeInBits(SrcEltTy);
@@ -84,43 +84,43 @@
else
Result |= CDV->getElementAsInteger(i);
}
-
+
return ConstantInt::get(IT, Result);
}
-
+
// The code below only handles casts to vectors currently.
VectorType *DestVTy = dyn_cast<VectorType>(DestTy);
if (DestVTy == 0)
return ConstantExpr::getBitCast(C, DestTy);
-
+
// If this is a scalar -> vector cast, convert the input into a <1 x scalar>
// vector so the code below can handle it uniformly.
if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) {
Constant *Ops = C; // don't take the address of C!
return FoldBitCast(ConstantVector::get(Ops), DestTy, TD);
}
-
+
// If this is a bitcast from constant vector -> vector, fold it.
if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C))
return ConstantExpr::getBitCast(C, DestTy);
-
+
// If the element types match, VMCore can fold it.
unsigned NumDstElt = DestVTy->getNumElements();
unsigned NumSrcElt = C->getType()->getVectorNumElements();
if (NumDstElt == NumSrcElt)
return ConstantExpr::getBitCast(C, DestTy);
-
+
Type *SrcEltTy = C->getType()->getVectorElementType();
Type *DstEltTy = DestVTy->getElementType();
-
- // Otherwise, we're changing the number of elements in a vector, which
+
+ // Otherwise, we're changing the number of elements in a vector, which
// requires endianness information to do the right thing. For example,
// bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
// folds to (little endian):
// <4 x i32> <i32 0, i32 0, i32 1, i32 0>
// and to (big endian):
// <4 x i32> <i32 0, i32 0, i32 0, i32 1>
-
+
// First thing is first. We only want to think about integer here, so if
// we have something in FP form, recast it as integer.
if (DstEltTy->isFloatingPointTy()) {
@@ -130,11 +130,11 @@
VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt);
// Recursively handle this integer conversion, if possible.
C = FoldBitCast(C, DestIVTy, TD);
-
+
// Finally, VMCore can handle this now that #elts line up.
return ConstantExpr::getBitCast(C, DestTy);
}
-
+
// Okay, we know the destination is integer, if the input is FP, convert
// it to integer first.
if (SrcEltTy->isFloatingPointTy()) {
@@ -148,13 +148,13 @@
!isa<ConstantDataVector>(C))
return C;
}
-
+
// Now we know that the input and output vectors are both integer vectors
// of the same size, and that their #elements is not the same. Do the
// conversion here, which depends on whether the input or output has
// more elements.
bool isLittleEndian = TD.isLittleEndian();
-
+
SmallVector<Constant*, 32> Result;
if (NumDstElt < NumSrcElt) {
// Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
@@ -170,15 +170,15 @@
Constant *Src =dyn_cast<ConstantInt>(C->getAggregateElement(SrcElt++));
if (!Src) // Reject constantexpr elements.
return ConstantExpr::getBitCast(C, DestTy);
-
+
// Zero extend the element to the right size.
Src = ConstantExpr::getZExt(Src, Elt->getType());
-
+
// Shift it to the right place, depending on endianness.
- Src = ConstantExpr::getShl(Src,
+ Src = ConstantExpr::getShl(Src,
ConstantInt::get(Src->getType(), ShiftAmt));
ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
-
+
// Mix it in.
Elt = ConstantExpr::getOr(Elt, Src);
}
@@ -186,30 +186,30 @@
}
return ConstantVector::get(Result);
}
-
+
// Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
unsigned Ratio = NumDstElt/NumSrcElt;
unsigned DstBitSize = DstEltTy->getPrimitiveSizeInBits();
-
+
// Loop over each source value, expanding into multiple results.
for (unsigned i = 0; i != NumSrcElt; ++i) {
Constant *Src = dyn_cast<ConstantInt>(C->getAggregateElement(i));
if (!Src) // Reject constantexpr elements.
return ConstantExpr::getBitCast(C, DestTy);
-
+
unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
for (unsigned j = 0; j != Ratio; ++j) {
// Shift the piece of the value into the right place, depending on
// endianness.
- Constant *Elt = ConstantExpr::getLShr(Src,
+ Constant *Elt = ConstantExpr::getLShr(Src,
ConstantInt::get(Src->getType(), ShiftAmt));
ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
-
+
// Truncate and remember this piece.
Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy));
}
}
-
+
return ConstantVector::get(Result);
}
@@ -218,34 +218,34 @@
/// from a global, return the global and the constant. Because of
/// constantexprs, this function is recursive.
static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
- int64_t &Offset, const TargetData &TD) {
+ int64_t &Offset, const DataLayout &TD) {
// Trivial case, constant is the global.
if ((GV = dyn_cast<GlobalValue>(C))) {
Offset = 0;
return true;
}
-
+
// Otherwise, if this isn't a constant expr, bail out.
ConstantExpr *CE = dyn_cast<ConstantExpr>(C);
if (!CE) return false;
-
+
// Look through ptr->int and ptr->ptr casts.
if (CE->getOpcode() == Instruction::PtrToInt ||
CE->getOpcode() == Instruction::BitCast)
return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, TD);
-
- // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
+
+ // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
if (CE->getOpcode() == Instruction::GetElementPtr) {
// Cannot compute this if the element type of the pointer is missing size
// info.
if (!cast<PointerType>(CE->getOperand(0)->getType())
->getElementType()->isSized())
return false;
-
+
// If the base isn't a global+constant, we aren't either.
if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, TD))
return false;
-
+
// Otherwise, add any offset that our operands provide.
gep_type_iterator GTI = gep_type_begin(CE);
for (User::const_op_iterator i = CE->op_begin() + 1, e = CE->op_end();
@@ -253,7 +253,7 @@
ConstantInt *CI = dyn_cast<ConstantInt>(*i);
if (!CI) return false; // Index isn't a simple constant?
if (CI->isZero()) continue; // Not adding anything.
-
+
if (StructType *ST = dyn_cast<StructType>(*GTI)) {
// N = N + Offset
Offset += TD.getStructLayout(ST)->getElementOffset(CI->getZExtValue());
@@ -264,7 +264,7 @@
}
return true;
}
-
+
return false;
}
@@ -274,30 +274,33 @@
/// the CurPtr buffer. TD is the target data.
static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
unsigned char *CurPtr, unsigned BytesLeft,
- const TargetData &TD) {
+ const DataLayout &TD) {
assert(ByteOffset <= TD.getTypeAllocSize(C->getType()) &&
"Out of range access");
-
+
// If this element is zero or undefined, we can just return since *CurPtr is
// zero initialized.
if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C))
return true;
-
+
if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
if (CI->getBitWidth() > 64 ||
(CI->getBitWidth() & 7) != 0)
return false;
-
+
uint64_t Val = CI->getZExtValue();
unsigned IntBytes = unsigned(CI->getBitWidth()/8);
-
+
for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
- CurPtr[i] = (unsigned char)(Val >> (ByteOffset * 8));
+ int n = ByteOffset;
+ if (!TD.isLittleEndian())
+ n = IntBytes - n - 1;
+ CurPtr[i] = (unsigned char)(Val >> (n * 8));
++ByteOffset;
}
return true;
}
-
+
if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
if (CFP->getType()->isDoubleTy()) {
C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), TD);
@@ -309,13 +312,13 @@
}
return false;
}
-
+
if (ConstantStruct *CS = dyn_cast<ConstantStruct>(C)) {
const StructLayout *SL = TD.getStructLayout(CS->getType());
unsigned Index = SL->getElementContainingOffset(ByteOffset);
uint64_t CurEltOffset = SL->getElementOffset(Index);
ByteOffset -= CurEltOffset;
-
+
while (1) {
// If the element access is to the element itself and not to tail padding,
// read the bytes from the element.
@@ -325,9 +328,9 @@
!ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr,
BytesLeft, TD))
return false;
-
+
++Index;
-
+
// Check to see if we read from the last struct element, if so we're done.
if (Index == CS->getType()->getNumElements())
return true;
@@ -375,11 +378,11 @@
}
return true;
}
-
+
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
if (CE->getOpcode() == Instruction::IntToPtr &&
- CE->getOperand(0)->getType() == TD.getIntPtrType(CE->getContext()))
- return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
+ CE->getOperand(0)->getType() == TD.getIntPtrType(CE->getContext()))
+ return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
BytesLeft, TD);
}
@@ -388,10 +391,10 @@
}
static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
- const TargetData &TD) {
+ const DataLayout &TD) {
Type *LoadTy = cast<PointerType>(C->getType())->getElementType();
IntegerType *IntType = dyn_cast<IntegerType>(LoadTy);
-
+
// If this isn't an integer load we can't fold it directly.
if (!IntType) {
// If this is a float/double load, we can try folding it as an int32/64 load
@@ -415,15 +418,15 @@
return FoldBitCast(Res, LoadTy, TD);
return 0;
}
-
+
unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
if (BytesLoaded > 32 || BytesLoaded == 0) return 0;
-
+
GlobalValue *GVal;
int64_t Offset;
if (!IsConstantOffsetFromGlobal(C, GVal, Offset, TD))
return 0;
-
+
GlobalVariable *GV = dyn_cast<GlobalVariable>(GVal);
if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
!GV->getInitializer()->getType()->isSized())
@@ -432,20 +435,29 @@
// If we're loading off the beginning of the global, some bytes may be valid,
// but we don't try to handle this.
if (Offset < 0) return 0;
-
+
// If we're not accessing anything in this constant, the result is undefined.
if (uint64_t(Offset) >= TD.getTypeAllocSize(GV->getInitializer()->getType()))
return UndefValue::get(IntType);
-
+
unsigned char RawBytes[32] = {0};
if (!ReadDataFromGlobal(GV->getInitializer(), Offset, RawBytes,
BytesLoaded, TD))
return 0;
- APInt ResultVal = APInt(IntType->getBitWidth(), RawBytes[BytesLoaded-1]);
- for (unsigned i = 1; i != BytesLoaded; ++i) {
- ResultVal <<= 8;
- ResultVal |= RawBytes[BytesLoaded-1-i];
+ APInt ResultVal = APInt(IntType->getBitWidth(), 0);
+ if (TD.isLittleEndian()) {
+ ResultVal = RawBytes[BytesLoaded - 1];
+ for (unsigned i = 1; i != BytesLoaded; ++i) {
+ ResultVal <<= 8;
+ ResultVal |= RawBytes[BytesLoaded-1-i];
+ }
+ } else {
+ ResultVal = RawBytes[0];
+ for (unsigned i = 1; i != BytesLoaded; ++i) {
+ ResultVal <<= 8;
+ ResultVal |= RawBytes[i];
+ }
}
return ConstantInt::get(IntType->getContext(), ResultVal);
@@ -455,7 +467,7 @@
/// produce if it is constant and determinable. If this is not determinable,
/// return null.
Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
- const TargetData *TD) {
+ const DataLayout *TD) {
// First, try the easy cases:
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C))
if (GV->isConstant() && GV->hasDefinitiveInitializer())
@@ -464,15 +476,15 @@
// If the loaded value isn't a constant expr, we can't handle it.
ConstantExpr *CE = dyn_cast<ConstantExpr>(C);
if (!CE) return 0;
-
+
if (CE->getOpcode() == Instruction::GetElementPtr) {
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(CE->getOperand(0)))
if (GV->isConstant() && GV->hasDefinitiveInitializer())
- if (Constant *V =
+ if (Constant *V =
ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE))
return V;
}
-
+
// Instead of loading constant c string, use corresponding integer value
// directly if string length is small enough.
StringRef Str;
@@ -500,14 +512,14 @@
SingleChar = 0;
StrVal = (StrVal << 8) | SingleChar;
}
-
+
Constant *Res = ConstantInt::get(CE->getContext(), StrVal);
if (Ty->isFloatingPointTy())
Res = ConstantExpr::getBitCast(Res, Ty);
return Res;
}
}
-
+
// If this load comes from anywhere in a constant global, and if the global
// is all undef or zero, we know what it loads.
if (GlobalVariable *GV =
@@ -520,18 +532,16 @@
return UndefValue::get(ResTy);
}
}
-
- // Try hard to fold loads from bitcasted strange and non-type-safe things. We
- // currently don't do any of this for big endian systems. It can be
- // generalized in the future if someone is interested.
- if (TD && TD->isLittleEndian())
+
+ // Try hard to fold loads from bitcasted strange and non-type-safe things.
+ if (TD)
return FoldReinterpretLoadFromConstPtr(CE, *TD);
return 0;
}
-static Constant *ConstantFoldLoadInst(const LoadInst *LI, const TargetData *TD){
+static Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout *TD){
if (LI->isVolatile()) return 0;
-
+
if (Constant *C = dyn_cast<Constant>(LI->getOperand(0)))
return ConstantFoldLoadFromConstPtr(C, TD);
@@ -540,23 +550,23 @@
/// SymbolicallyEvaluateBinop - One of Op0/Op1 is a constant expression.
/// Attempt to symbolically evaluate the result of a binary operator merging
-/// these together. If target data info is available, it is provided as TD,
+/// these together. If target data info is available, it is provided as TD,
/// otherwise TD is null.
static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
- Constant *Op1, const TargetData *TD){
+ Constant *Op1, const DataLayout *TD){
// SROA
-
+
// Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
// Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute
// bits.
-
-
+
+
// If the constant expr is something like &A[123] - &A[4].f, fold this into a
// constant. This happens frequently when iterating over a global array.
if (Opc == Instruction::Sub && TD) {
GlobalValue *GV1, *GV2;
int64_t Offs1, Offs2;
-
+
if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, *TD))
if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, *TD) &&
GV1 == GV2) {
@@ -564,7 +574,7 @@
return ConstantInt::get(Op0->getType(), Offs1-Offs2);
}
}
-
+
return 0;
}
@@ -572,7 +582,7 @@
/// explicitly cast them so that they aren't implicitly casted by the
/// getelementptr.
static Constant *CastGEPIndices(ArrayRef<Constant *> Ops,
- Type *ResultTy, const TargetData *TD,
+ Type *ResultTy, const DataLayout *TD,
const TargetLibraryInfo *TLI) {
if (!TD) return 0;
Type *IntPtrTy = TD->getIntPtrType(ResultTy->getContext());
@@ -622,20 +632,20 @@
/// SymbolicallyEvaluateGEP - If we can symbolically evaluate the specified GEP
/// constant expression, do so.
static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
- Type *ResultTy, const TargetData *TD,
+ Type *ResultTy, const DataLayout *TD,
const TargetLibraryInfo *TLI) {
Constant *Ptr = Ops[0];
if (!TD || !cast<PointerType>(Ptr->getType())->getElementType()->isSized() ||
!Ptr->getType()->isPointerTy())
return 0;
-
+
Type *IntPtrTy = TD->getIntPtrType(Ptr->getContext());
// If this is a constant expr gep that is effectively computing an
// "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
for (unsigned i = 1, e = Ops.size(); i != e; ++i)
if (!isa<ConstantInt>(Ops[i])) {
-
+
// If this is "gep i8* Ptr, (sub 0, V)", fold this as:
// "inttoptr (sub (ptrtoint Ptr), V)"
if (Ops.size() == 2 &&
@@ -659,7 +669,8 @@
unsigned BitWidth = TD->getTypeSizeInBits(IntPtrTy);
APInt Offset =
APInt(BitWidth, TD->getIndexedOffset(Ptr->getType(),
- makeArrayRef((Value **)Ops.data() + 1,
+ makeArrayRef((Value *const*)
+ Ops.data() + 1,
Ops.size() - 1)));
Ptr = StripPtrCastKeepAS(Ptr);
@@ -708,12 +719,12 @@
// The only pointer indexing we'll do is on the first index of the GEP.
if (!NewIdxs.empty())
break;
-
+
// Only handle pointers to sized types, not pointers to functions.
if (!ATy->getElementType()->isSized())
return 0;
}
-
+
// Determine which element of the array the offset points into.
APInt ElemSize(BitWidth, TD->getTypeAllocSize(ATy->getElementType()));
IntegerType *IntPtrTy = TD->getIntPtrType(Ty->getContext());
@@ -785,7 +796,7 @@
/// this function can only fail when attempting to fold instructions like loads
/// and stores, which have no constant expression form.
Constant *llvm::ConstantFoldInstruction(Instruction *I,
- const TargetData *TD,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI) {
// Handle PHI nodes quickly here...
if (PHINode *PN = dyn_cast<PHINode>(I)) {
@@ -836,7 +847,7 @@
if (const CmpInst *CI = dyn_cast<CmpInst>(I))
return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1],
TD, TLI);
-
+
if (const LoadInst *LI = dyn_cast<LoadInst>(I))
return ConstantFoldLoadInst(LI, TD);
@@ -855,10 +866,10 @@
}
/// ConstantFoldConstantExpression - Attempt to fold the constant expression
-/// using the specified TargetData. If successful, the constant result is
+/// using the specified DataLayout. If successful, the constant result is
/// result is returned, if not, null is returned.
Constant *llvm::ConstantFoldConstantExpression(const ConstantExpr *CE,
- const TargetData *TD,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI) {
SmallVector<Constant*, 8> Ops;
for (User::const_op_iterator i = CE->op_begin(), e = CE->op_end();
@@ -886,19 +897,19 @@
/// information, due to only being passed an opcode and operands. Constant
/// folding using this function strips this information.
///
-Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
+Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
ArrayRef<Constant *> Ops,
- const TargetData *TD,
- const TargetLibraryInfo *TLI) {
+ const DataLayout *TD,
+ const TargetLibraryInfo *TLI) {
// Handle easy binops first.
if (Instruction::isBinaryOp(Opcode)) {
if (isa<ConstantExpr>(Ops[0]) || isa<ConstantExpr>(Ops[1]))
if (Constant *C = SymbolicallyEvaluateBinop(Opcode, Ops[0], Ops[1], TD))
return C;
-
+
return ConstantExpr::get(Opcode, Ops[0], Ops[1]);
}
-
+
switch (Opcode) {
default: return 0;
case Instruction::ICmp:
@@ -916,7 +927,7 @@
Constant *Input = CE->getOperand(0);
unsigned InWidth = Input->getType()->getScalarSizeInBits();
if (TD->getPointerSizeInBits() < InWidth) {
- Constant *Mask =
+ Constant *Mask =
ConstantInt::get(CE->getContext(), APInt::getLowBitsSet(InWidth,
TD->getPointerSizeInBits()));
Input = ConstantExpr::getAnd(Input, Mask);
@@ -964,7 +975,7 @@
return C;
if (Constant *C = SymbolicallyEvaluateGEP(Ops, DestTy, TD, TLI))
return C;
-
+
return ConstantExpr::getGetElementPtr(Ops[0], Ops.slice(1));
}
}
@@ -974,8 +985,8 @@
/// returns a constant expression of the specified operands.
///
Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
- Constant *Ops0, Constant *Ops1,
- const TargetData *TD,
+ Constant *Ops0, Constant *Ops1,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI) {
// fold: icmp (inttoptr x), null -> icmp x, 0
// fold: icmp (ptrtoint x), 0 -> icmp x, null
@@ -995,17 +1006,17 @@
Constant *Null = Constant::getNullValue(C->getType());
return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI);
}
-
+
// Only do this transformation if the int is intptrty in size, otherwise
// there is a truncation or extension that we aren't modeling.
- if (CE0->getOpcode() == Instruction::PtrToInt &&
+ if (CE0->getOpcode() == Instruction::PtrToInt &&
CE0->getType() == IntPtrTy) {
Constant *C = CE0->getOperand(0);
Constant *Null = Constant::getNullValue(C->getType());
return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI);
}
}
-
+
if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
if (TD && CE0->getOpcode() == CE1->getOpcode()) {
Type *IntPtrTy = TD->getIntPtrType(CE0->getContext());
@@ -1029,24 +1040,24 @@
CE1->getOperand(0), TD, TLI);
}
}
-
+
// icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0)
// icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0)
if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) &&
CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) {
- Constant *LHS =
+ Constant *LHS =
ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(0), Ops1,
TD, TLI);
- Constant *RHS =
+ Constant *RHS =
ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(1), Ops1,
TD, TLI);
- unsigned OpC =
+ unsigned OpC =
Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
Constant *Ops[] = { LHS, RHS };
return ConstantFoldInstOperands(OpC, LHS->getType(), Ops, TD, TLI);
}
}
-
+
return ConstantExpr::getCompare(Predicate, Ops0, Ops1);
}
@@ -1054,7 +1065,7 @@
/// ConstantFoldLoadThroughGEPConstantExpr - Given a constant and a
/// getelementptr constantexpr, return the constant value being addressed by the
/// constant expression, or null if something is funny and we can't decide.
-Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
+Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
ConstantExpr *CE) {
if (!CE->getOperand(1)->isNullValue())
return 0; // Do not allow stepping over the value!
@@ -1124,14 +1135,14 @@
if (!F->hasName()) return false;
StringRef Name = F->getName();
-
+
// In these cases, the check of the length is required. We don't want to
// return true for a name like "cos\0blah" which strcmp would return equal to
// "cos", but has length 8.
switch (Name[0]) {
default: return false;
case 'a':
- return Name == "acos" || Name == "asin" ||
+ return Name == "acos" || Name == "asin" ||
Name == "atan" || Name == "atan2";
case 'c':
return Name == "cos" || Name == "ceil" || Name == "cosf" || Name == "cosh";
@@ -1151,7 +1162,7 @@
}
}
-static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
+static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
Type *Ty) {
sys::llvm_fenv_clearexcept();
V = NativeFP(V);
@@ -1159,7 +1170,7 @@
sys::llvm_fenv_clearexcept();
return 0;
}
-
+
if (Ty->isFloatTy())
return ConstantFP::get(Ty->getContext(), APFloat((float)V));
if (Ty->isDoubleTy())
@@ -1175,7 +1186,7 @@
sys::llvm_fenv_clearexcept();
return 0;
}
-
+
if (Ty->isFloatTy())
return ConstantFP::get(Ty->getContext(), APFloat((float)V));
if (Ty->isDoubleTy())
@@ -1269,7 +1280,7 @@
case 'e':
if (Name == "exp" && TLI->has(LibFunc::exp))
return ConstantFoldFP(exp, V, Ty);
-
+
if (Name == "exp2" && TLI->has(LibFunc::exp2)) {
// Constant fold exp2(x) as pow(2,x) in case the host doesn't have a
// C99 library.
@@ -1345,7 +1356,7 @@
}
// Support ConstantVector in case we have an Undef in the top.
- if (isa<ConstantVector>(Operands[0]) ||
+ if (isa<ConstantVector>(Operands[0]) ||
isa<ConstantDataVector>(Operands[0])) {
Constant *Op = cast<Constant>(Operands[0]);
switch (F->getIntrinsicID()) {
@@ -1364,11 +1375,11 @@
case Intrinsic::x86_sse2_cvttsd2si64:
if (ConstantFP *FPOp =
dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
- return ConstantFoldConvertToInt(FPOp->getValueAPF(),
+ return ConstantFoldConvertToInt(FPOp->getValueAPF(),
/*roundTowardZero=*/true, Ty);
}
}
-
+
if (isa<UndefValue>(Operands[0])) {
if (F->getIntrinsicID() == Intrinsic::bswap)
return Operands[0];
@@ -1382,14 +1393,14 @@
if (ConstantFP *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
if (!Ty->isFloatTy() && !Ty->isDoubleTy())
return 0;
- double Op1V = Ty->isFloatTy() ?
+ double Op1V = Ty->isFloatTy() ?
(double)Op1->getValueAPF().convertToFloat() :
Op1->getValueAPF().convertToDouble();
if (ConstantFP *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
if (Op2->getType() != Op1->getType())
return 0;
- double Op2V = Ty->isFloatTy() ?
+ double Op2V = Ty->isFloatTy() ?
(double)Op2->getValueAPF().convertToFloat():
Op2->getValueAPF().convertToDouble();
@@ -1416,7 +1427,7 @@
}
return 0;
}
-
+
if (ConstantInt *Op1 = dyn_cast<ConstantInt>(Operands[0])) {
if (ConstantInt *Op2 = dyn_cast<ConstantInt>(Operands[1])) {
switch (F->getIntrinsicID()) {
@@ -1466,7 +1477,7 @@
return ConstantInt::get(Ty, Op1->getValue().countLeadingZeros());
}
}
-
+
return 0;
}
return 0;
Modified: llvm/branches/AMDILBackend/lib/Analysis/DominanceFrontier.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/Analysis/DominanceFrontier.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/Analysis/DominanceFrontier.cpp (original)
+++ llvm/branches/AMDILBackend/lib/Analysis/DominanceFrontier.cpp Tue Jan 15 11:16:16 2013
@@ -133,7 +133,9 @@
}
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void DominanceFrontierBase::dump() const {
print(dbgs());
}
+#endif
Modified: llvm/branches/AMDILBackend/lib/Analysis/IPA/CallGraph.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/Analysis/IPA/CallGraph.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/Analysis/IPA/CallGraph.cpp (original)
+++ llvm/branches/AMDILBackend/lib/Analysis/IPA/CallGraph.cpp Tue Jan 15 11:16:16 2013
@@ -141,12 +141,13 @@
for (BasicBlock::iterator II = BB->begin(), IE = BB->end();
II != IE; ++II) {
CallSite CS(cast<Value>(II));
- if (CS && !isa<IntrinsicInst>(II)) {
+ if (CS) {
const Function *Callee = CS.getCalledFunction();
- if (Callee)
- Node->addCalledFunction(CS, getOrInsertFunction(Callee));
- else
+ if (!Callee)
+ // Indirect calls of intrinsics are not allowed so no need to check.
Node->addCalledFunction(CS, CallsExternalNode);
+ else if (!Callee->isIntrinsic())
+ Node->addCalledFunction(CS, getOrInsertFunction(Callee));
}
}
}
@@ -198,9 +199,11 @@
for (CallGraph::const_iterator I = begin(), E = end(); I != E; ++I)
I->second->print(OS);
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void CallGraph::dump() const {
print(dbgs(), 0);
}
+#endif
//===----------------------------------------------------------------------===//
// Implementations of public modification methods
@@ -267,7 +270,9 @@
OS << '\n';
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void CallGraphNode::dump() const { print(dbgs()); }
+#endif
/// removeCallEdgeFor - This method removes the edge in the node for the
/// specified call site. Note that this method takes linear time, so it
Modified: llvm/branches/AMDILBackend/lib/Analysis/IPA/GlobalsModRef.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/Analysis/IPA/GlobalsModRef.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/Analysis/IPA/GlobalsModRef.cpp (original)
+++ llvm/branches/AMDILBackend/lib/Analysis/IPA/GlobalsModRef.cpp Tue Jan 15 11:16:16 2013
@@ -263,7 +263,7 @@
} else if (BitCastInst *BCI = dyn_cast<BitCastInst>(U)) {
if (AnalyzeUsesOfPointer(BCI, Readers, Writers, OkayStoreDest))
return true;
- } else if (isFreeCall(U)) {
+ } else if (isFreeCall(U, TLI)) {
Writers.push_back(cast<Instruction>(U)->getParent()->getParent());
} else if (CallInst *CI = dyn_cast<CallInst>(U)) {
// Make sure that this is just the function being called, not that it is
@@ -329,7 +329,7 @@
// Check the value being stored.
Value *Ptr = GetUnderlyingObject(SI->getOperand(0));
- if (!isAllocLikeFn(Ptr))
+ if (!isAllocLikeFn(Ptr, TLI))
return false; // Too hard to analyze.
// Analyze all uses of the allocation. If any of them are used in a
@@ -458,7 +458,7 @@
if (SI->isVolatile())
// Treat volatile stores as reading memory somewhere.
FunctionEffect |= Ref;
- } else if (isAllocationFn(&*II) || isFreeCall(&*II)) {
+ } else if (isAllocationFn(&*II, TLI) || isFreeCall(&*II, TLI)) {
FunctionEffect |= ModRef;
} else if (IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(&*II)) {
// The callgraph doesn't include intrinsic calls.
Modified: llvm/branches/AMDILBackend/lib/Analysis/IVUsers.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/Analysis/IVUsers.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/Analysis/IVUsers.cpp (original)
+++ llvm/branches/AMDILBackend/lib/Analysis/IVUsers.cpp Tue Jan 15 11:16:16 2013
@@ -22,7 +22,7 @@
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Assembly/Writer.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Debug.h"
@@ -235,7 +235,7 @@
LI = &getAnalysis<LoopInfo>();
DT = &getAnalysis<DominatorTree>();
SE = &getAnalysis<ScalarEvolution>();
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
// Find all uses of induction variables in this loop, and categorize
// them by stride. Start by finding all of the PHI nodes in the header for
@@ -273,9 +273,11 @@
}
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void IVUsers::dump() const {
print(dbgs());
}
+#endif
void IVUsers::releaseMemory() {
Processed.clear();
Modified: llvm/branches/AMDILBackend/lib/Analysis/InlineCost.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/Analysis/InlineCost.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/Analysis/InlineCost.cpp (original)
+++ llvm/branches/AMDILBackend/lib/Analysis/InlineCost.cpp Tue Jan 15 11:16:16 2013
@@ -24,7 +24,7 @@
#include "llvm/IntrinsicInst.h"
#include "llvm/Operator.h"
#include "llvm/GlobalAlias.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h"
@@ -41,8 +41,8 @@
typedef InstVisitor<CallAnalyzer, bool> Base;
friend class InstVisitor<CallAnalyzer, bool>;
- // TargetData if available, or null.
- const TargetData *const TD;
+ // DataLayout if available, or null.
+ const DataLayout *const TD;
// The called function.
Function &F;
@@ -51,9 +51,12 @@
int Cost;
const bool AlwaysInline;
- bool IsRecursive;
+ bool IsCallerRecursive;
+ bool IsRecursiveCall;
bool ExposesReturnsTwice;
bool HasDynamicAlloca;
+ /// Number of bytes allocated statically by the callee.
+ uint64_t AllocatedSize;
unsigned NumInstructions, NumVectorInstructions;
int FiftyPercentVectorBonus, TenPercentVectorBonus;
int VectorBonus;
@@ -123,10 +126,11 @@
bool visitCallSite(CallSite CS);
public:
- CallAnalyzer(const TargetData *TD, Function &Callee, int Threshold)
+ CallAnalyzer(const DataLayout *TD, Function &Callee, int Threshold)
: TD(TD), F(Callee), Threshold(Threshold), Cost(0),
- AlwaysInline(F.hasFnAttr(Attribute::AlwaysInline)),
- IsRecursive(false), ExposesReturnsTwice(false), HasDynamicAlloca(false),
+ AlwaysInline(F.getFnAttributes().hasAttribute(Attributes::AlwaysInline)),
+ IsCallerRecursive(false), IsRecursiveCall(false),
+ ExposesReturnsTwice(false), HasDynamicAlloca(false), AllocatedSize(0),
NumInstructions(0), NumVectorInstructions(0),
FiftyPercentVectorBonus(0), TenPercentVectorBonus(0), VectorBonus(0),
NumConstantArgs(0), NumConstantOffsetPtrArgs(0), NumAllocaArgs(0),
@@ -138,6 +142,7 @@
int getThreshold() { return Threshold; }
int getCost() { return Cost; }
+ bool isAlwaysInline() { return AlwaysInline; }
// Keep a bunch of stats about the cost savings found so we can print them
// out when debugging.
@@ -269,6 +274,13 @@
// FIXME: Check whether inlining will turn a dynamic alloca into a static
// alloca, and handle that case.
+ // Accumulate the allocated size.
+ if (I.isStaticAlloca()) {
+ Type *Ty = I.getAllocatedType();
+ AllocatedSize += (TD ? TD->getTypeAllocSize(Ty) :
+ Ty->getPrimitiveSizeInBits());
+ }
+
// We will happily inline static alloca instructions or dynamic alloca
// instructions in always-inline situations.
if (AlwaysInline || I.isStaticAlloca())
@@ -602,7 +614,7 @@
bool CallAnalyzer::visitCallSite(CallSite CS) {
if (CS.isCall() && cast<CallInst>(CS.getInstruction())->canReturnTwice() &&
- !F.hasFnAttr(Attribute::ReturnsTwice)) {
+ !F.getFnAttributes().hasAttribute(Attributes::ReturnsTwice)) {
// This aborts the entire analysis.
ExposesReturnsTwice = true;
return false;
@@ -625,7 +637,7 @@
if (F == CS.getInstruction()->getParent()->getParent()) {
// This flag will fully abort the analysis, so don't bother with anything
// else.
- IsRecursive = true;
+ IsRecursiveCall = true;
return false;
}
@@ -712,7 +724,14 @@
Cost += InlineConstants::InstrCost;
// If the visit this instruction detected an uninlinable pattern, abort.
- if (IsRecursive || ExposesReturnsTwice || HasDynamicAlloca)
+ if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca)
+ return false;
+
+ // If the caller is a recursive function then we don't want to inline
+ // functions which allocate a lot of stack space because it would increase
+ // the caller stack usage dramatically.
+ if (IsCallerRecursive &&
+ AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller)
return false;
if (NumVectorInstructions > NumInstructions/2)
@@ -814,7 +833,7 @@
// one load and one store per word copied.
// FIXME: The maxStoresPerMemcpy setting from the target should be used
// here instead of a magic number of 8, but it's not available via
- // TargetData.
+ // DataLayout.
NumStores = std::min(NumStores, 8U);
Cost -= 2 * NumStores * InlineConstants::InstrCost;
@@ -831,12 +850,14 @@
Cost += InlineConstants::LastCallToStaticBonus;
// If the instruction after the call, or if the normal destination of the
- // invoke is an unreachable instruction, the function is noreturn. As such,
- // there is little point in inlining this unless there is literally zero cost.
- if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
+ // invoke is an unreachable instruction, the function is noreturn. As such,
+ // there is little point in inlining this unless there is literally zero
+ // cost.
+ Instruction *Instr = CS.getInstruction();
+ if (InvokeInst *II = dyn_cast<InvokeInst>(Instr)) {
if (isa<UnreachableInst>(II->getNormalDest()->begin()))
Threshold = 1;
- } else if (isa<UnreachableInst>(++BasicBlock::iterator(CS.getInstruction())))
+ } else if (isa<UnreachableInst>(++BasicBlock::iterator(Instr)))
Threshold = 1;
// If this function uses the coldcc calling convention, prefer not to inline
@@ -852,6 +873,20 @@
if (F.empty())
return true;
+ Function *Caller = CS.getInstruction()->getParent()->getParent();
+ // Check if the caller function is recursive itself.
+ for (Value::use_iterator U = Caller->use_begin(), E = Caller->use_end();
+ U != E; ++U) {
+ CallSite Site(cast<Value>(*U));
+ if (!Site)
+ continue;
+ Instruction *I = Site.getInstruction();
+ if (I->getParent()->getParent() == Caller) {
+ IsCallerRecursive = true;
+ break;
+ }
+ }
+
// Track whether we've seen a return instruction. The first return
// instruction is free, as at least one will usually disappear in inlining.
bool HasReturn = false;
@@ -908,9 +943,9 @@
// We never want to inline functions that contain an indirectbr. This is
// incorrect because all the blockaddress's (in static global initializers
- // for example) would be referring to the original function, and this indirect
- // jump would jump from the inlined copy of the function into the original
- // function which is extremely undefined behavior.
+ // for example) would be referring to the original function, and this
+ // indirect jump would jump from the inlined copy of the function into the
+ // original function which is extremely undefined behavior.
// FIXME: This logic isn't really right; we can safely inline functions
// with indirectbr's as long as no other function or global references the
// blockaddress of a block within the current function. And as a QOI issue,
@@ -928,8 +963,16 @@
// Analyze the cost of this block. If we blow through the threshold, this
// returns false, and we can bail on out.
if (!analyzeBlock(BB)) {
- if (IsRecursive || ExposesReturnsTwice || HasDynamicAlloca)
+ if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca)
return false;
+
+ // If the caller is a recursive function then we don't want to inline
+ // functions which allocate a lot of stack space because it would increase
+ // the caller stack usage dramatically.
+ if (IsCallerRecursive &&
+ AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller)
+ return false;
+
break;
}
@@ -955,7 +998,8 @@
// If we're unable to select a particular successor, just count all of
// them.
- for (unsigned TIdx = 0, TSize = TI->getNumSuccessors(); TIdx != TSize; ++TIdx)
+ for (unsigned TIdx = 0, TSize = TI->getNumSuccessors(); TIdx != TSize;
+ ++TIdx)
BBWorklist.insert(TI->getSuccessor(TIdx));
// If we had any successors at this point, than post-inlining is likely to
@@ -974,6 +1018,7 @@
return AlwaysInline || Cost < Threshold;
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// \brief Dump stats about this call's analysis.
void CallAnalyzer::dump() {
#define DEBUG_PRINT_STAT(x) llvm::dbgs() << " " #x ": " << x << "\n"
@@ -987,6 +1032,7 @@
DEBUG_PRINT_STAT(SROACostSavingsLost);
#undef DEBUG_PRINT_STAT
}
+#endif
InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS, int Threshold) {
return getInlineCost(CS, CS.getCalledFunction(), Threshold);
@@ -998,10 +1044,12 @@
// something else. Don't inline functions marked noinline or call sites
// marked noinline.
if (!Callee || Callee->mayBeOverridden() ||
- Callee->hasFnAttr(Attribute::NoInline) || CS.isNoInline())
+ Callee->getFnAttributes().hasAttribute(Attributes::NoInline) ||
+ CS.isNoInline())
return llvm::InlineCost::getNever();
- DEBUG(llvm::dbgs() << " Analyzing call of " << Callee->getName() << "...\n");
+ DEBUG(llvm::dbgs() << " Analyzing call of " << Callee->getName()
+ << "...\n");
CallAnalyzer CA(TD, *Callee, Threshold);
bool ShouldInline = CA.analyzeCall(CS);
@@ -1011,7 +1059,8 @@
// Check if there was a reason to force inlining or no inlining.
if (!ShouldInline && CA.getCost() < CA.getThreshold())
return InlineCost::getNever();
- if (ShouldInline && CA.getCost() >= CA.getThreshold())
+ if (ShouldInline && (CA.isAlwaysInline() ||
+ CA.getCost() >= CA.getThreshold()))
return InlineCost::getAlways();
return llvm::InlineCost::get(CA.getCost(), CA.getThreshold());
Modified: llvm/branches/AMDILBackend/lib/Analysis/InstructionSimplify.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/Analysis/InstructionSimplify.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/Analysis/InstructionSimplify.cpp (original)
+++ llvm/branches/AMDILBackend/lib/Analysis/InstructionSimplify.cpp Tue Jan 15 11:16:16 2013
@@ -31,7 +31,7 @@
#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/PatternMatch.h"
#include "llvm/Support/ValueHandle.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
using namespace llvm;
using namespace llvm::PatternMatch;
@@ -42,11 +42,11 @@
STATISTIC(NumReassoc, "Number of reassociations");
struct Query {
- const TargetData *TD;
+ const DataLayout *TD;
const TargetLibraryInfo *TLI;
const DominatorTree *DT;
- Query(const TargetData *td, const TargetLibraryInfo *tli,
+ Query(const DataLayout *td, const TargetLibraryInfo *tli,
const DominatorTree *dt) : TD(td), TLI(tli), DT(dt) {}
};
@@ -651,7 +651,7 @@
}
Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
- const TargetData *TD, const TargetLibraryInfo *TLI,
+ const DataLayout *TD, const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyAddInst(Op0, Op1, isNSW, isNUW, Query (TD, TLI, DT),
RecursionLimit);
@@ -664,7 +664,7 @@
/// if the GEP has all-constant indices. Returns false if any non-constant
/// index is encountered leaving the 'Offset' in an undefined state. The
/// 'Offset' APInt must be the bitwidth of the target's pointer size.
-static bool accumulateGEPOffset(const TargetData &TD, GEPOperator *GEP,
+static bool accumulateGEPOffset(const DataLayout &TD, GEPOperator *GEP,
APInt &Offset) {
unsigned IntPtrWidth = TD.getPointerSizeInBits();
assert(IntPtrWidth == Offset.getBitWidth());
@@ -696,7 +696,7 @@
/// accumulates the total constant offset applied in the returned constant. It
/// returns 0 if V is not a pointer, and returns the constant '0' if there are
/// no constant offsets applied.
-static Constant *stripAndComputeConstantOffsets(const TargetData &TD,
+static Constant *stripAndComputeConstantOffsets(const DataLayout &TD,
Value *&V) {
if (!V->getType()->isPointerTy())
return 0;
@@ -731,7 +731,7 @@
/// \brief Compute the constant difference between two pointer values.
/// If the difference is not a constant, returns zero.
-static Constant *computePointerDifference(const TargetData &TD,
+static Constant *computePointerDifference(const DataLayout &TD,
Value *LHS, Value *RHS) {
Constant *LHSOffset = stripAndComputeConstantOffsets(TD, LHS);
if (!LHSOffset)
@@ -880,7 +880,7 @@
}
Value *llvm::SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
- const TargetData *TD, const TargetLibraryInfo *TLI,
+ const DataLayout *TD, const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, Query (TD, TLI, DT),
RecursionLimit);
@@ -951,7 +951,7 @@
return 0;
}
-Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const TargetData *TD,
+Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyMulInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@@ -1039,7 +1039,7 @@
return 0;
}
-Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const TargetData *TD,
+Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifySDivInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@@ -1055,7 +1055,7 @@
return 0;
}
-Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const TargetData *TD,
+Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyUDivInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@@ -1074,7 +1074,7 @@
return 0;
}
-Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, const TargetData *TD,
+Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyFDivInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@@ -1144,7 +1144,7 @@
return 0;
}
-Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const TargetData *TD,
+Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifySRemInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@@ -1160,7 +1160,7 @@
return 0;
}
-Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const TargetData *TD,
+Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyURemInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@@ -1179,7 +1179,7 @@
return 0;
}
-Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, const TargetData *TD,
+Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyFRemInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@@ -1248,7 +1248,7 @@
}
Value *llvm::SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
- const TargetData *TD, const TargetLibraryInfo *TLI,
+ const DataLayout *TD, const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, Query (TD, TLI, DT),
RecursionLimit);
@@ -1275,7 +1275,7 @@
}
Value *llvm::SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
- const TargetData *TD,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyLShrInst(Op0, Op1, isExact, Query (TD, TLI, DT),
@@ -1307,7 +1307,7 @@
}
Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
- const TargetData *TD,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyAShrInst(Op0, Op1, isExact, Query (TD, TLI, DT),
@@ -1407,7 +1407,7 @@
return 0;
}
-Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const TargetData *TD,
+Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyAndInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@@ -1501,7 +1501,7 @@
return 0;
}
-Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const TargetData *TD,
+Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyOrInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@@ -1561,7 +1561,7 @@
return 0;
}
-Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const TargetData *TD,
+Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyXorInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@@ -1591,7 +1591,7 @@
return 0;
}
-static Constant *computePointerICmp(const TargetData &TD,
+static Constant *computePointerICmp(const DataLayout &TD,
CmpInst::Predicate Pred,
Value *LHS, Value *RHS) {
// We can only fold certain predicates on pointer comparisons.
@@ -2065,8 +2065,25 @@
if (A && C && (A == C || A == D || B == C || B == D) &&
NoLHSWrapProblem && NoRHSWrapProblem) {
// Determine Y and Z in the form icmp (X+Y), (X+Z).
- Value *Y = (A == C || A == D) ? B : A;
- Value *Z = (C == A || C == B) ? D : C;
+ Value *Y, *Z;
+ if (A == C) {
+ // C + B == C + D -> B == D
+ Y = B;
+ Z = D;
+ } else if (A == D) {
+ // D + B == C + D -> B == C
+ Y = B;
+ Z = C;
+ } else if (B == C) {
+ // A + C == C + D -> A == D
+ Y = A;
+ Z = D;
+ } else {
+ assert(B == D);
+ // A + D == C + D -> A == C
+ Y = A;
+ Z = C;
+ }
if (Value *V = SimplifyICmpInst(Pred, Y, Z, Q, MaxRecurse-1))
return V;
}
@@ -2399,7 +2416,7 @@
}
Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const TargetData *TD,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyICmpInst(Predicate, LHS, RHS, Query (TD, TLI, DT),
@@ -2496,7 +2513,7 @@
}
Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const TargetData *TD,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyFCmpInst(Predicate, LHS, RHS, Query (TD, TLI, DT),
@@ -2531,7 +2548,7 @@
}
Value *llvm::SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
- const TargetData *TD,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifySelectInst(Cond, TrueVal, FalseVal, Query (TD, TLI, DT),
@@ -2579,7 +2596,7 @@
return ConstantExpr::getGetElementPtr(cast<Constant>(Ops[0]), Ops.slice(1));
}
-Value *llvm::SimplifyGEPInst(ArrayRef<Value *> Ops, const TargetData *TD,
+Value *llvm::SimplifyGEPInst(ArrayRef<Value *> Ops, const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyGEPInst(Ops, Query (TD, TLI, DT), RecursionLimit);
@@ -2616,7 +2633,7 @@
Value *llvm::SimplifyInsertValueInst(Value *Agg, Value *Val,
ArrayRef<unsigned> Idxs,
- const TargetData *TD,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyInsertValueInst(Agg, Val, Idxs, Query (TD, TLI, DT),
@@ -2664,7 +2681,7 @@
return 0;
}
-Value *llvm::SimplifyTruncInst(Value *Op, Type *Ty, const TargetData *TD,
+Value *llvm::SimplifyTruncInst(Value *Op, Type *Ty, const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyTruncInst(Op, Ty, Query (TD, TLI, DT), RecursionLimit);
@@ -2730,7 +2747,7 @@
}
Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
- const TargetData *TD, const TargetLibraryInfo *TLI,
+ const DataLayout *TD, const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyBinOp(Opcode, LHS, RHS, Query (TD, TLI, DT), RecursionLimit);
}
@@ -2745,7 +2762,7 @@
}
Value *llvm::SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const TargetData *TD, const TargetLibraryInfo *TLI,
+ const DataLayout *TD, const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return ::SimplifyCmpInst(Predicate, LHS, RHS, Query (TD, TLI, DT),
RecursionLimit);
@@ -2761,7 +2778,7 @@
/// SimplifyInstruction - See if we can compute a simplified version of this
/// instruction. If not, this returns null.
-Value *llvm::SimplifyInstruction(Instruction *I, const TargetData *TD,
+Value *llvm::SimplifyInstruction(Instruction *I, const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
Value *Result;
@@ -2881,7 +2898,7 @@
/// This routine returns 'true' only when *it* simplifies something. The passed
/// in simplified value does not count toward this.
static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV,
- const TargetData *TD,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
bool Simplified = false;
@@ -2936,14 +2953,14 @@
}
bool llvm::recursivelySimplifyInstruction(Instruction *I,
- const TargetData *TD,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
return replaceAndRecursivelySimplifyImpl(I, 0, TD, TLI, DT);
}
bool llvm::replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
- const TargetData *TD,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!");
Modified: llvm/branches/AMDILBackend/lib/Analysis/LazyValueInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/Analysis/LazyValueInfo.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/Analysis/LazyValueInfo.cpp (original)
+++ llvm/branches/AMDILBackend/lib/Analysis/LazyValueInfo.cpp Tue Jan 15 11:16:16 2013
@@ -13,13 +13,14 @@
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "lazy-value-info"
+#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/LazyValueInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Constants.h"
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/Analysis/ConstantFolding.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Support/CFG.h"
#include "llvm/Support/ConstantRange.h"
@@ -212,7 +213,7 @@
// Unless we can prove that the two Constants are different, we must
// move to overdefined.
- // FIXME: use TargetData/TargetLibraryInfo for smarter constant folding.
+ // FIXME: use DataLayout/TargetLibraryInfo for smarter constant folding.
if (ConstantInt *Res = dyn_cast<ConstantInt>(
ConstantFoldCompareInstOperands(CmpInst::ICMP_NE,
getConstant(),
@@ -238,7 +239,7 @@
// Unless we can prove that the two Constants are different, we must
// move to overdefined.
- // FIXME: use TargetData/TargetLibraryInfo for smarter constant folding.
+ // FIXME: use DataLayout/TargetLibraryInfo for smarter constant folding.
if (ConstantInt *Res = dyn_cast<ConstantInt>(
ConstantFoldCompareInstOperands(CmpInst::ICMP_NE,
getNotConstant(),
@@ -294,7 +295,7 @@
//===----------------------------------------------------------------------===//
namespace {
- /// LVIValueHandle - A callback value handle update the cache when
+ /// LVIValueHandle - A callback value handle updates the cache when
/// values are erased.
class LazyValueInfoCache;
struct LVIValueHandle : public CallbackVH {
@@ -470,8 +471,10 @@
return true;
LVIValueHandle ValHandle(Val, this);
- if (!ValueCache.count(ValHandle)) return false;
- return ValueCache[ValHandle].count(BB);
+ std::map<LVIValueHandle, ValueCacheEntryTy>::iterator I =
+ ValueCache.find(ValHandle);
+ if (I == ValueCache.end()) return false;
+ return I->second.count(BB);
}
LVILatticeVal LazyValueInfoCache::getBlockValue(Value *Val, BasicBlock *BB) {
@@ -555,13 +558,11 @@
static bool InstructionDereferencesPointer(Instruction *I, Value *Ptr) {
if (LoadInst *L = dyn_cast<LoadInst>(I)) {
return L->getPointerAddressSpace() == 0 &&
- GetUnderlyingObject(L->getPointerOperand()) ==
- GetUnderlyingObject(Ptr);
+ GetUnderlyingObject(L->getPointerOperand()) == Ptr;
}
if (StoreInst *S = dyn_cast<StoreInst>(I)) {
return S->getPointerAddressSpace() == 0 &&
- GetUnderlyingObject(S->getPointerOperand()) ==
- GetUnderlyingObject(Ptr);
+ GetUnderlyingObject(S->getPointerOperand()) == Ptr;
}
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
if (MI->isVolatile()) return false;
@@ -571,11 +572,11 @@
if (!Len || Len->isZero()) return false;
if (MI->getDestAddressSpace() == 0)
- if (MI->getRawDest() == Ptr || MI->getDest() == Ptr)
+ if (GetUnderlyingObject(MI->getRawDest()) == Ptr)
return true;
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI))
if (MTI->getSourceAddressSpace() == 0)
- if (MTI->getRawSource() == Ptr || MTI->getSource() == Ptr)
+ if (GetUnderlyingObject(MTI->getRawSource()) == Ptr)
return true;
}
return false;
@@ -589,13 +590,19 @@
// then we know that the pointer can't be NULL.
bool NotNull = false;
if (Val->getType()->isPointerTy()) {
- if (isa<AllocaInst>(Val)) {
+ if (isKnownNonNull(Val)) {
NotNull = true;
} else {
- for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();BI != BE;++BI){
- if (InstructionDereferencesPointer(BI, Val)) {
- NotNull = true;
- break;
+ Value *UnderlyingVal = GetUnderlyingObject(Val);
+ // If 'GetUnderlyingObject' didn't converge, skip it. It won't converge
+ // inside InstructionDereferencesPointer either.
+ if (UnderlyingVal == GetUnderlyingObject(UnderlyingVal, NULL, 1)) {
+ for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
+ BI != BE; ++BI) {
+ if (InstructionDereferencesPointer(BI, UnderlyingVal)) {
+ NotNull = true;
+ break;
+ }
}
}
}
@@ -845,9 +852,12 @@
for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
i != e; ++i) {
ConstantRange EdgeVal(i.getCaseValue()->getValue());
- if (DefaultCase)
- EdgesVals = EdgesVals.difference(EdgeVal);
- else if (i.getCaseSuccessor() == BBTo)
+ if (DefaultCase) {
+ // It is possible that the default destination is the destination of
+ // some cases. There is no need to perform difference for those cases.
+ if (i.getCaseSuccessor() != BBTo)
+ EdgesVals = EdgesVals.difference(EdgeVal);
+ } else if (i.getCaseSuccessor() == BBTo)
EdgesVals = EdgesVals.unionWith(EdgeVal);
}
Result = LVILatticeVal::getRange(EdgesVals);
@@ -1004,7 +1014,7 @@
if (PImpl)
getCache(PImpl).clear();
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
TLI = &getAnalysis<TargetLibraryInfo>();
// Fully lazy.
Modified: llvm/branches/AMDILBackend/lib/Analysis/Lint.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/Analysis/Lint.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/Analysis/Lint.cpp (original)
+++ llvm/branches/AMDILBackend/lib/Analysis/Lint.cpp Tue Jan 15 11:16:16 2013
@@ -43,7 +43,7 @@
#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Assembly/Writer.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Pass.h"
#include "llvm/PassManager.h"
@@ -103,7 +103,7 @@
Module *Mod;
AliasAnalysis *AA;
DominatorTree *DT;
- TargetData *TD;
+ DataLayout *TD;
TargetLibraryInfo *TLI;
std::string Messages;
@@ -177,7 +177,7 @@
Mod = F.getParent();
AA = &getAnalysis<AliasAnalysis>();
DT = &getAnalysis<DominatorTree>();
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
TLI = &getAnalysis<TargetLibraryInfo>();
visit(F);
dbgs() << MessagesStr.str();
@@ -411,14 +411,50 @@
"Undefined behavior: Branch to non-blockaddress", &I);
}
+ // Check for buffer overflows and misalignment.
if (TD) {
- if (Align == 0 && Ty) Align = TD->getABITypeAlignment(Ty);
+ // Only handles memory references that read/write something simple like an
+ // alloca instruction or a global variable.
+ int64_t Offset = 0;
+ if (Value *Base = GetPointerBaseWithConstantOffset(Ptr, Offset, *TD)) {
+ // OK, so the access is to a constant offset from Ptr. Check that Ptr is
+ // something we can handle and if so extract the size of this base object
+ // along with its alignment.
+ uint64_t BaseSize = AliasAnalysis::UnknownSize;
+ unsigned BaseAlign = 0;
+
+ if (AllocaInst *AI = dyn_cast<AllocaInst>(Base)) {
+ Type *ATy = AI->getAllocatedType();
+ if (!AI->isArrayAllocation() && ATy->isSized())
+ BaseSize = TD->getTypeAllocSize(ATy);
+ BaseAlign = AI->getAlignment();
+ if (BaseAlign == 0 && ATy->isSized())
+ BaseAlign = TD->getABITypeAlignment(ATy);
+ } else if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Base)) {
+ // If the global may be defined differently in another compilation unit
+ // then don't warn about funky memory accesses.
+ if (GV->hasDefinitiveInitializer()) {
+ Type *GTy = GV->getType()->getElementType();
+ if (GTy->isSized())
+ BaseSize = TD->getTypeAllocSize(GTy);
+ BaseAlign = GV->getAlignment();
+ if (BaseAlign == 0 && GTy->isSized())
+ BaseAlign = TD->getABITypeAlignment(GTy);
+ }
+ }
- if (Align != 0) {
- unsigned BitWidth = TD->getTypeSizeInBits(Ptr->getType());
- APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
- ComputeMaskedBits(Ptr, KnownZero, KnownOne, TD);
- Assert1(!(KnownOne & APInt::getLowBitsSet(BitWidth, Log2_32(Align))),
+ // Accesses from before the start or after the end of the object are not
+ // defined.
+ Assert1(Size == AliasAnalysis::UnknownSize ||
+ BaseSize == AliasAnalysis::UnknownSize ||
+ (Offset >= 0 && Offset + Size <= BaseSize),
+ "Undefined behavior: Buffer overflow", &I);
+
+ // Accesses that say that the memory is more aligned than it is are not
+ // defined.
+ if (Align == 0 && Ty && Ty->isSized())
+ Align = TD->getABITypeAlignment(Ty);
+ Assert1(!BaseAlign || Align <= MinAlign(BaseAlign, Offset),
"Undefined behavior: Memory reference address is misaligned", &I);
}
}
@@ -470,7 +506,7 @@
"Undefined result: Shift count out of range", &I);
}
-static bool isZero(Value *V, TargetData *TD) {
+static bool isZero(Value *V, DataLayout *TD) {
// Assume undef could be zero.
if (isa<UndefValue>(V)) return true;
Modified: llvm/branches/AMDILBackend/lib/Analysis/Loads.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/Analysis/Loads.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/Analysis/Loads.cpp (original)
+++ llvm/branches/AMDILBackend/lib/Analysis/Loads.cpp Tue Jan 15 11:16:16 2013
@@ -13,7 +13,7 @@
#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/GlobalAlias.h"
#include "llvm/GlobalVariable.h"
#include "llvm/IntrinsicInst.h"
@@ -52,8 +52,8 @@
/// bitcasts to get back to the underlying object being addressed, keeping
/// track of the offset in bytes from the GEPs relative to the result.
/// This is closely related to GetUnderlyingObject but is located
-/// here to avoid making VMCore depend on TargetData.
-static Value *getUnderlyingObjectWithOffset(Value *V, const TargetData *TD,
+/// here to avoid making VMCore depend on DataLayout.
+static Value *getUnderlyingObjectWithOffset(Value *V, const DataLayout *TD,
uint64_t &ByteOffset,
unsigned MaxLookup = 6) {
if (!V->getType()->isPointerTy())
@@ -85,7 +85,7 @@
/// specified pointer, we do a quick local scan of the basic block containing
/// ScanFrom, to determine if the address is already accessed.
bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
- unsigned Align, const TargetData *TD) {
+ unsigned Align, const DataLayout *TD) {
uint64_t ByteOffset = 0;
Value *Base = V;
if (TD)
Removed: llvm/branches/AMDILBackend/lib/Analysis/LoopDependenceAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/Analysis/LoopDependenceAnalysis.cpp?rev=172540&view=auto
==============================================================================
--- llvm/branches/AMDILBackend/lib/Analysis/LoopDependenceAnalysis.cpp (original)
+++ llvm/branches/AMDILBackend/lib/Analysis/LoopDependenceAnalysis.cpp (removed)
@@ -1,362 +0,0 @@
-//===- LoopDependenceAnalysis.cpp - LDA Implementation ----------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This is the (beginning) of an implementation of a loop dependence analysis
-// framework, which is used to detect dependences in memory accesses in loops.
-//
-// Please note that this is work in progress and the interface is subject to
-// change.
-//
-// TODO: adapt as implementation progresses.
-//
-// TODO: document lingo (pair, subscript, index)
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "lda"
-#include "llvm/ADT/DenseSet.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Analysis/LoopDependenceAnalysis.h"
-#include "llvm/Analysis/LoopPass.h"
-#include "llvm/Analysis/ScalarEvolution.h"
-#include "llvm/Analysis/ScalarEvolutionExpressions.h"
-#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Assembly/Writer.h"
-#include "llvm/Instructions.h"
-#include "llvm/Operator.h"
-#include "llvm/Support/Allocator.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
-using namespace llvm;
-
-STATISTIC(NumAnswered, "Number of dependence queries answered");
-STATISTIC(NumAnalysed, "Number of distinct dependence pairs analysed");
-STATISTIC(NumDependent, "Number of pairs with dependent accesses");
-STATISTIC(NumIndependent, "Number of pairs with independent accesses");
-STATISTIC(NumUnknown, "Number of pairs with unknown accesses");
-
-LoopPass *llvm::createLoopDependenceAnalysisPass() {
- return new LoopDependenceAnalysis();
-}
-
-INITIALIZE_PASS_BEGIN(LoopDependenceAnalysis, "lda",
- "Loop Dependence Analysis", false, true)
-INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
-INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
-INITIALIZE_PASS_END(LoopDependenceAnalysis, "lda",
- "Loop Dependence Analysis", false, true)
-char LoopDependenceAnalysis::ID = 0;
-
-//===----------------------------------------------------------------------===//
-// Utility Functions
-//===----------------------------------------------------------------------===//
-
-static inline bool IsMemRefInstr(const Value *V) {
- const Instruction *I = dyn_cast<const Instruction>(V);
- return I && (I->mayReadFromMemory() || I->mayWriteToMemory());
-}
-
-static void GetMemRefInstrs(const Loop *L,
- SmallVectorImpl<Instruction*> &Memrefs) {
- for (Loop::block_iterator b = L->block_begin(), be = L->block_end();
- b != be; ++b)
- for (BasicBlock::iterator i = (*b)->begin(), ie = (*b)->end();
- i != ie; ++i)
- if (IsMemRefInstr(i))
- Memrefs.push_back(i);
-}
-
-static bool IsLoadOrStoreInst(Value *I) {
- // Returns true if the load or store can be analyzed. Atomic and volatile
- // operations have properties which this analysis does not understand.
- if (LoadInst *LI = dyn_cast<LoadInst>(I))
- return LI->isUnordered();
- else if (StoreInst *SI = dyn_cast<StoreInst>(I))
- return SI->isUnordered();
- return false;
-}
-
-static Value *GetPointerOperand(Value *I) {
- if (LoadInst *i = dyn_cast<LoadInst>(I))
- return i->getPointerOperand();
- if (StoreInst *i = dyn_cast<StoreInst>(I))
- return i->getPointerOperand();
- llvm_unreachable("Value is no load or store instruction!");
-}
-
-static AliasAnalysis::AliasResult UnderlyingObjectsAlias(AliasAnalysis *AA,
- const Value *A,
- const Value *B) {
- const Value *aObj = GetUnderlyingObject(A);
- const Value *bObj = GetUnderlyingObject(B);
- return AA->alias(aObj, AA->getTypeStoreSize(aObj->getType()),
- bObj, AA->getTypeStoreSize(bObj->getType()));
-}
-
-static inline const SCEV *GetZeroSCEV(ScalarEvolution *SE) {
- return SE->getConstant(Type::getInt32Ty(SE->getContext()), 0L);
-}
-
-//===----------------------------------------------------------------------===//
-// Dependence Testing
-//===----------------------------------------------------------------------===//
-
-bool LoopDependenceAnalysis::isDependencePair(const Value *A,
- const Value *B) const {
- return IsMemRefInstr(A) &&
- IsMemRefInstr(B) &&
- (cast<const Instruction>(A)->mayWriteToMemory() ||
- cast<const Instruction>(B)->mayWriteToMemory());
-}
-
-bool LoopDependenceAnalysis::findOrInsertDependencePair(Value *A,
- Value *B,
- DependencePair *&P) {
- void *insertPos = 0;
- FoldingSetNodeID id;
- id.AddPointer(A);
- id.AddPointer(B);
-
- P = Pairs.FindNodeOrInsertPos(id, insertPos);
- if (P) return true;
-
- P = new (PairAllocator) DependencePair(id, A, B);
- Pairs.InsertNode(P, insertPos);
- return false;
-}
-
-void LoopDependenceAnalysis::getLoops(const SCEV *S,
- DenseSet<const Loop*>* Loops) const {
- // Refactor this into an SCEVVisitor, if efficiency becomes a concern.
- for (const Loop *L = this->L; L != 0; L = L->getParentLoop())
- if (!SE->isLoopInvariant(S, L))
- Loops->insert(L);
-}
-
-bool LoopDependenceAnalysis::isLoopInvariant(const SCEV *S) const {
- DenseSet<const Loop*> loops;
- getLoops(S, &loops);
- return loops.empty();
-}
-
-bool LoopDependenceAnalysis::isAffine(const SCEV *S) const {
- const SCEVAddRecExpr *rec = dyn_cast<SCEVAddRecExpr>(S);
- return isLoopInvariant(S) || (rec && rec->isAffine());
-}
-
-bool LoopDependenceAnalysis::isZIVPair(const SCEV *A, const SCEV *B) const {
- return isLoopInvariant(A) && isLoopInvariant(B);
-}
-
-bool LoopDependenceAnalysis::isSIVPair(const SCEV *A, const SCEV *B) const {
- DenseSet<const Loop*> loops;
- getLoops(A, &loops);
- getLoops(B, &loops);
- return loops.size() == 1;
-}
-
-LoopDependenceAnalysis::DependenceResult
-LoopDependenceAnalysis::analyseZIV(const SCEV *A,
- const SCEV *B,
- Subscript *S) const {
- assert(isZIVPair(A, B) && "Attempted to ZIV-test non-ZIV SCEVs!");
- return A == B ? Dependent : Independent;
-}
-
-LoopDependenceAnalysis::DependenceResult
-LoopDependenceAnalysis::analyseSIV(const SCEV *A,
- const SCEV *B,
- Subscript *S) const {
- return Unknown; // TODO: Implement.
-}
-
-LoopDependenceAnalysis::DependenceResult
-LoopDependenceAnalysis::analyseMIV(const SCEV *A,
- const SCEV *B,
- Subscript *S) const {
- return Unknown; // TODO: Implement.
-}
-
-LoopDependenceAnalysis::DependenceResult
-LoopDependenceAnalysis::analyseSubscript(const SCEV *A,
- const SCEV *B,
- Subscript *S) const {
- DEBUG(dbgs() << " Testing subscript: " << *A << ", " << *B << "\n");
-
- if (A == B) {
- DEBUG(dbgs() << " -> [D] same SCEV\n");
- return Dependent;
- }
-
- if (!isAffine(A) || !isAffine(B)) {
- DEBUG(dbgs() << " -> [?] not affine\n");
- return Unknown;
- }
-
- if (isZIVPair(A, B))
- return analyseZIV(A, B, S);
-
- if (isSIVPair(A, B))
- return analyseSIV(A, B, S);
-
- return analyseMIV(A, B, S);
-}
-
-LoopDependenceAnalysis::DependenceResult
-LoopDependenceAnalysis::analysePair(DependencePair *P) const {
- DEBUG(dbgs() << "Analysing:\n" << *P->A << "\n" << *P->B << "\n");
-
- // We only analyse loads and stores but no possible memory accesses by e.g.
- // free, call, or invoke instructions.
- if (!IsLoadOrStoreInst(P->A) || !IsLoadOrStoreInst(P->B)) {
- DEBUG(dbgs() << "--> [?] no load/store\n");
- return Unknown;
- }
-
- Value *aPtr = GetPointerOperand(P->A);
- Value *bPtr = GetPointerOperand(P->B);
-
- switch (UnderlyingObjectsAlias(AA, aPtr, bPtr)) {
- case AliasAnalysis::MayAlias:
- case AliasAnalysis::PartialAlias:
- // We can not analyse objects if we do not know about their aliasing.
- DEBUG(dbgs() << "---> [?] may alias\n");
- return Unknown;
-
- case AliasAnalysis::NoAlias:
- // If the objects noalias, they are distinct, accesses are independent.
- DEBUG(dbgs() << "---> [I] no alias\n");
- return Independent;
-
- case AliasAnalysis::MustAlias:
- break; // The underlying objects alias, test accesses for dependence.
- }
-
- const GEPOperator *aGEP = dyn_cast<GEPOperator>(aPtr);
- const GEPOperator *bGEP = dyn_cast<GEPOperator>(bPtr);
-
- if (!aGEP || !bGEP)
- return Unknown;
-
- // FIXME: Is filtering coupled subscripts necessary?
-
- // Collect GEP operand pairs (FIXME: use GetGEPOperands from BasicAA), adding
- // trailing zeroes to the smaller GEP, if needed.
- typedef SmallVector<std::pair<const SCEV*, const SCEV*>, 4> GEPOpdPairsTy;
- GEPOpdPairsTy opds;
- for(GEPOperator::const_op_iterator aIdx = aGEP->idx_begin(),
- aEnd = aGEP->idx_end(),
- bIdx = bGEP->idx_begin(),
- bEnd = bGEP->idx_end();
- aIdx != aEnd && bIdx != bEnd;
- aIdx += (aIdx != aEnd), bIdx += (bIdx != bEnd)) {
- const SCEV* aSCEV = (aIdx != aEnd) ? SE->getSCEV(*aIdx) : GetZeroSCEV(SE);
- const SCEV* bSCEV = (bIdx != bEnd) ? SE->getSCEV(*bIdx) : GetZeroSCEV(SE);
- opds.push_back(std::make_pair(aSCEV, bSCEV));
- }
-
- if (!opds.empty() && opds[0].first != opds[0].second) {
- // We cannot (yet) handle arbitrary GEP pointer offsets. By limiting
- //
- // TODO: this could be relaxed by adding the size of the underlying object
- // to the first subscript. If we have e.g. (GEP x,0,i; GEP x,2,-i) and we
- // know that x is a [100 x i8]*, we could modify the first subscript to be
- // (i, 200-i) instead of (i, -i).
- return Unknown;
- }
-
- // Now analyse the collected operand pairs (skipping the GEP ptr offsets).
- for (GEPOpdPairsTy::const_iterator i = opds.begin() + 1, end = opds.end();
- i != end; ++i) {
- Subscript subscript;
- DependenceResult result = analyseSubscript(i->first, i->second, &subscript);
- if (result != Dependent) {
- // We either proved independence or failed to analyse this subscript.
- // Further subscripts will not improve the situation, so abort early.
- return result;
- }
- P->Subscripts.push_back(subscript);
- }
- // We successfully analysed all subscripts but failed to prove independence.
- return Dependent;
-}
-
-bool LoopDependenceAnalysis::depends(Value *A, Value *B) {
- assert(isDependencePair(A, B) && "Values form no dependence pair!");
- ++NumAnswered;
-
- DependencePair *p;
- if (!findOrInsertDependencePair(A, B, p)) {
- // The pair is not cached, so analyse it.
- ++NumAnalysed;
- switch (p->Result = analysePair(p)) {
- case Dependent: ++NumDependent; break;
- case Independent: ++NumIndependent; break;
- case Unknown: ++NumUnknown; break;
- }
- }
- return p->Result != Independent;
-}
-
-//===----------------------------------------------------------------------===//
-// LoopDependenceAnalysis Implementation
-//===----------------------------------------------------------------------===//
-
-bool LoopDependenceAnalysis::runOnLoop(Loop *L, LPPassManager &) {
- this->L = L;
- AA = &getAnalysis<AliasAnalysis>();
- SE = &getAnalysis<ScalarEvolution>();
- return false;
-}
-
-void LoopDependenceAnalysis::releaseMemory() {
- Pairs.clear();
- PairAllocator.Reset();
-}
-
-void LoopDependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesAll();
- AU.addRequiredTransitive<AliasAnalysis>();
- AU.addRequiredTransitive<ScalarEvolution>();
-}
-
-static void PrintLoopInfo(raw_ostream &OS,
- LoopDependenceAnalysis *LDA, const Loop *L) {
- if (!L->empty()) return; // ignore non-innermost loops
-
- SmallVector<Instruction*, 8> memrefs;
- GetMemRefInstrs(L, memrefs);
-
- OS << "Loop at depth " << L->getLoopDepth() << ", header block: ";
- WriteAsOperand(OS, L->getHeader(), false);
- OS << "\n";
-
- OS << " Load/store instructions: " << memrefs.size() << "\n";
- for (SmallVector<Instruction*, 8>::const_iterator x = memrefs.begin(),
- end = memrefs.end(); x != end; ++x)
- OS << "\t" << (x - memrefs.begin()) << ": " << **x << "\n";
-
- OS << " Pairwise dependence results:\n";
- for (SmallVector<Instruction*, 8>::const_iterator x = memrefs.begin(),
- end = memrefs.end(); x != end; ++x)
- for (SmallVector<Instruction*, 8>::const_iterator y = x + 1;
- y != end; ++y)
- if (LDA->isDependencePair(*x, *y))
- OS << "\t" << (x - memrefs.begin()) << "," << (y - memrefs.begin())
- << ": " << (LDA->depends(*x, *y) ? "dependent" : "independent")
- << "\n";
-}
-
-void LoopDependenceAnalysis::print(raw_ostream &OS, const Module*) const {
- // TODO: doc why const_cast is safe
- PrintLoopInfo(OS, const_cast<LoopDependenceAnalysis*>(this), this->L);
-}
Modified: llvm/branches/AMDILBackend/lib/Analysis/LoopInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/Analysis/LoopInfo.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/Analysis/LoopInfo.cpp (original)
+++ llvm/branches/AMDILBackend/lib/Analysis/LoopInfo.cpp Tue Jan 15 11:16:16 2013
@@ -306,9 +306,11 @@
return 0;
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void Loop::dump() const {
print(dbgs());
}
+#endif
//===----------------------------------------------------------------------===//
// UnloopUpdater implementation
@@ -429,8 +431,8 @@
Unloop->removeChildLoop(llvm::prior(Unloop->end()));
assert(SubloopParents.count(Subloop) && "DFS failed to visit subloop");
- if (SubloopParents[Subloop])
- SubloopParents[Subloop]->addChildLoop(Subloop);
+ if (Loop *Parent = SubloopParents[Subloop])
+ Parent->addChildLoop(Subloop);
else
LI->addTopLevelLoop(Subloop);
}
@@ -456,9 +458,8 @@
assert(Subloop && "subloop is not an ancestor of the original loop");
}
// Get the current nearest parent of the Subloop exits, initially Unloop.
- if (!SubloopParents.count(Subloop))
- SubloopParents[Subloop] = Unloop;
- NearLoop = SubloopParents[Subloop];
+ NearLoop =
+ SubloopParents.insert(std::make_pair(Subloop, Unloop)).first->second;
}
succ_iterator I = succ_begin(BB), E = succ_end(BB);
Modified: llvm/branches/AMDILBackend/lib/Analysis/MemoryBuiltins.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/Analysis/MemoryBuiltins.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/Analysis/MemoryBuiltins.cpp (original)
+++ llvm/branches/AMDILBackend/lib/Analysis/MemoryBuiltins.cpp Tue Jan 15 11:16:16 2013
@@ -25,7 +25,8 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
@@ -39,7 +40,7 @@
};
struct AllocFnsTy {
- const char *Name;
+ LibFunc::Func Func;
AllocType AllocTy;
unsigned char NumParams;
// First and Second size parameters (or -1 if unused)
@@ -49,22 +50,22 @@
// FIXME: certain users need more information. E.g., SimplifyLibCalls needs to
// know which functions are nounwind, noalias, nocapture parameters, etc.
static const AllocFnsTy AllocationFnData[] = {
- {"malloc", MallocLike, 1, 0, -1},
- {"valloc", MallocLike, 1, 0, -1},
- {"_Znwj", MallocLike, 1, 0, -1}, // new(unsigned int)
- {"_ZnwjRKSt9nothrow_t", MallocLike, 2, 0, -1}, // new(unsigned int, nothrow)
- {"_Znwm", MallocLike, 1, 0, -1}, // new(unsigned long)
- {"_ZnwmRKSt9nothrow_t", MallocLike, 2, 0, -1}, // new(unsigned long, nothrow)
- {"_Znaj", MallocLike, 1, 0, -1}, // new[](unsigned int)
- {"_ZnajRKSt9nothrow_t", MallocLike, 2, 0, -1}, // new[](unsigned int, nothrow)
- {"_Znam", MallocLike, 1, 0, -1}, // new[](unsigned long)
- {"_ZnamRKSt9nothrow_t", MallocLike, 2, 0, -1}, // new[](unsigned long, nothrow)
- {"posix_memalign", MallocLike, 3, 2, -1},
- {"calloc", CallocLike, 2, 0, 1},
- {"realloc", ReallocLike, 2, 1, -1},
- {"reallocf", ReallocLike, 2, 1, -1},
- {"strdup", StrDupLike, 1, -1, -1},
- {"strndup", StrDupLike, 2, 1, -1}
+ {LibFunc::malloc, MallocLike, 1, 0, -1},
+ {LibFunc::valloc, MallocLike, 1, 0, -1},
+ {LibFunc::Znwj, MallocLike, 1, 0, -1}, // new(unsigned int)
+ {LibFunc::ZnwjRKSt9nothrow_t, MallocLike, 2, 0, -1}, // new(unsigned int, nothrow)
+ {LibFunc::Znwm, MallocLike, 1, 0, -1}, // new(unsigned long)
+ {LibFunc::ZnwmRKSt9nothrow_t, MallocLike, 2, 0, -1}, // new(unsigned long, nothrow)
+ {LibFunc::Znaj, MallocLike, 1, 0, -1}, // new[](unsigned int)
+ {LibFunc::ZnajRKSt9nothrow_t, MallocLike, 2, 0, -1}, // new[](unsigned int, nothrow)
+ {LibFunc::Znam, MallocLike, 1, 0, -1}, // new[](unsigned long)
+ {LibFunc::ZnamRKSt9nothrow_t, MallocLike, 2, 0, -1}, // new[](unsigned long, nothrow)
+ {LibFunc::posix_memalign, MallocLike, 3, 2, -1},
+ {LibFunc::calloc, CallocLike, 2, 0, 1},
+ {LibFunc::realloc, ReallocLike, 2, 1, -1},
+ {LibFunc::reallocf, ReallocLike, 2, 1, -1},
+ {LibFunc::strdup, StrDupLike, 1, -1, -1},
+ {LibFunc::strndup, StrDupLike, 2, 1, -1}
};
@@ -85,15 +86,22 @@
/// \brief Returns the allocation data for the given value if it is a call to a
/// known allocation function, and NULL otherwise.
static const AllocFnsTy *getAllocationData(const Value *V, AllocType AllocTy,
+ const TargetLibraryInfo *TLI,
bool LookThroughBitCast = false) {
Function *Callee = getCalledFunction(V, LookThroughBitCast);
if (!Callee)
return 0;
+ // Make sure that the function is available.
+ StringRef FnName = Callee->getName();
+ LibFunc::Func TLIFn;
+ if (!TLI || !TLI->getLibFunc(FnName, TLIFn) || !TLI->has(TLIFn))
+ return 0;
+
unsigned i = 0;
bool found = false;
for ( ; i < array_lengthof(AllocationFnData); ++i) {
- if (Callee->getName() == AllocationFnData[i].Name) {
+ if (AllocationFnData[i].Func == TLIFn) {
found = true;
break;
}
@@ -106,7 +114,6 @@
return 0;
// Check function prototype.
- // FIXME: Check the nobuiltin metadata?? (PR5130)
int FstParam = FnData->FstParam;
int SndParam = FnData->SndParam;
FunctionType *FTy = Callee->getFunctionType();
@@ -125,64 +132,72 @@
static bool hasNoAliasAttr(const Value *V, bool LookThroughBitCast) {
ImmutableCallSite CS(LookThroughBitCast ? V->stripPointerCasts() : V);
- return CS && CS.hasFnAttr(Attribute::NoAlias);
+ return CS && CS.hasFnAttr(Attributes::NoAlias);
}
/// \brief Tests if a value is a call or invoke to a library function that
/// allocates or reallocates memory (either malloc, calloc, realloc, or strdup
/// like).
-bool llvm::isAllocationFn(const Value *V, bool LookThroughBitCast) {
- return getAllocationData(V, AnyAlloc, LookThroughBitCast);
+bool llvm::isAllocationFn(const Value *V, const TargetLibraryInfo *TLI,
+ bool LookThroughBitCast) {
+ return getAllocationData(V, AnyAlloc, TLI, LookThroughBitCast);
}
/// \brief Tests if a value is a call or invoke to a function that returns a
/// NoAlias pointer (including malloc/calloc/realloc/strdup-like functions).
-bool llvm::isNoAliasFn(const Value *V, bool LookThroughBitCast) {
+bool llvm::isNoAliasFn(const Value *V, const TargetLibraryInfo *TLI,
+ bool LookThroughBitCast) {
// it's safe to consider realloc as noalias since accessing the original
// pointer is undefined behavior
- return isAllocationFn(V, LookThroughBitCast) ||
+ return isAllocationFn(V, TLI, LookThroughBitCast) ||
hasNoAliasAttr(V, LookThroughBitCast);
}
/// \brief Tests if a value is a call or invoke to a library function that
/// allocates uninitialized memory (such as malloc).
-bool llvm::isMallocLikeFn(const Value *V, bool LookThroughBitCast) {
- return getAllocationData(V, MallocLike, LookThroughBitCast);
+bool llvm::isMallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
+ bool LookThroughBitCast) {
+ return getAllocationData(V, MallocLike, TLI, LookThroughBitCast);
}
/// \brief Tests if a value is a call or invoke to a library function that
/// allocates zero-filled memory (such as calloc).
-bool llvm::isCallocLikeFn(const Value *V, bool LookThroughBitCast) {
- return getAllocationData(V, CallocLike, LookThroughBitCast);
+bool llvm::isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
+ bool LookThroughBitCast) {
+ return getAllocationData(V, CallocLike, TLI, LookThroughBitCast);
}
/// \brief Tests if a value is a call or invoke to a library function that
/// allocates memory (either malloc, calloc, or strdup like).
-bool llvm::isAllocLikeFn(const Value *V, bool LookThroughBitCast) {
- return getAllocationData(V, AllocLike, LookThroughBitCast);
+bool llvm::isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
+ bool LookThroughBitCast) {
+ return getAllocationData(V, AllocLike, TLI, LookThroughBitCast);
}
/// \brief Tests if a value is a call or invoke to a library function that
/// reallocates memory (such as realloc).
-bool llvm::isReallocLikeFn(const Value *V, bool LookThroughBitCast) {
- return getAllocationData(V, ReallocLike, LookThroughBitCast);
+bool llvm::isReallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
+ bool LookThroughBitCast) {
+ return getAllocationData(V, ReallocLike, TLI, LookThroughBitCast);
}
/// extractMallocCall - Returns the corresponding CallInst if the instruction
/// is a malloc call. Since CallInst::CreateMalloc() only creates calls, we
/// ignore InvokeInst here.
-const CallInst *llvm::extractMallocCall(const Value *I) {
- return isMallocLikeFn(I) ? dyn_cast<CallInst>(I) : 0;
+const CallInst *llvm::extractMallocCall(const Value *I,
+ const TargetLibraryInfo *TLI) {
+ return isMallocLikeFn(I, TLI) ? dyn_cast<CallInst>(I) : 0;
}
-static Value *computeArraySize(const CallInst *CI, const TargetData *TD,
+static Value *computeArraySize(const CallInst *CI, const DataLayout *TD,
+ const TargetLibraryInfo *TLI,
bool LookThroughSExt = false) {
if (!CI)
return NULL;
// The size of the malloc's result type must be known to determine array size.
- Type *T = getMallocAllocatedType(CI);
+ Type *T = getMallocAllocatedType(CI, TLI);
if (!T || !T->isSized() || !TD)
return NULL;
@@ -204,9 +219,11 @@
/// isArrayMalloc - Returns the corresponding CallInst if the instruction
/// is a call to malloc whose array size can be determined and the array size
/// is not constant 1. Otherwise, return NULL.
-const CallInst *llvm::isArrayMalloc(const Value *I, const TargetData *TD) {
- const CallInst *CI = extractMallocCall(I);
- Value *ArraySize = computeArraySize(CI, TD);
+const CallInst *llvm::isArrayMalloc(const Value *I,
+ const DataLayout *TD,
+ const TargetLibraryInfo *TLI) {
+ const CallInst *CI = extractMallocCall(I, TLI);
+ Value *ArraySize = computeArraySize(CI, TD, TLI);
if (ArraySize &&
ArraySize != ConstantInt::get(CI->getArgOperand(0)->getType(), 1))
@@ -221,8 +238,9 @@
/// 0: PointerType is the calls' return type.
/// 1: PointerType is the bitcast's result type.
/// >1: Unique PointerType cannot be determined, return NULL.
-PointerType *llvm::getMallocType(const CallInst *CI) {
- assert(isMallocLikeFn(CI) && "getMallocType and not malloc call");
+PointerType *llvm::getMallocType(const CallInst *CI,
+ const TargetLibraryInfo *TLI) {
+ assert(isMallocLikeFn(CI, TLI) && "getMallocType and not malloc call");
PointerType *MallocType = NULL;
unsigned NumOfBitCastUses = 0;
@@ -252,8 +270,9 @@
/// 0: PointerType is the malloc calls' return type.
/// 1: PointerType is the bitcast's result type.
/// >1: Unique PointerType cannot be determined, return NULL.
-Type *llvm::getMallocAllocatedType(const CallInst *CI) {
- PointerType *PT = getMallocType(CI);
+Type *llvm::getMallocAllocatedType(const CallInst *CI,
+ const TargetLibraryInfo *TLI) {
+ PointerType *PT = getMallocType(CI, TLI);
return PT ? PT->getElementType() : NULL;
}
@@ -262,22 +281,24 @@
/// then return that multiple. For non-array mallocs, the multiple is
/// constant 1. Otherwise, return NULL for mallocs whose array size cannot be
/// determined.
-Value *llvm::getMallocArraySize(CallInst *CI, const TargetData *TD,
+Value *llvm::getMallocArraySize(CallInst *CI, const DataLayout *TD,
+ const TargetLibraryInfo *TLI,
bool LookThroughSExt) {
- assert(isMallocLikeFn(CI) && "getMallocArraySize and not malloc call");
- return computeArraySize(CI, TD, LookThroughSExt);
+ assert(isMallocLikeFn(CI, TLI) && "getMallocArraySize and not malloc call");
+ return computeArraySize(CI, TD, TLI, LookThroughSExt);
}
/// extractCallocCall - Returns the corresponding CallInst if the instruction
/// is a calloc call.
-const CallInst *llvm::extractCallocCall(const Value *I) {
- return isCallocLikeFn(I) ? cast<CallInst>(I) : 0;
+const CallInst *llvm::extractCallocCall(const Value *I,
+ const TargetLibraryInfo *TLI) {
+ return isCallocLikeFn(I, TLI) ? cast<CallInst>(I) : 0;
}
/// isFreeCall - Returns non-null if the value is a call to the builtin free()
-const CallInst *llvm::isFreeCall(const Value *I) {
+const CallInst *llvm::isFreeCall(const Value *I, const TargetLibraryInfo *TLI) {
const CallInst *CI = dyn_cast<CallInst>(I);
if (!CI)
return 0;
@@ -285,9 +306,14 @@
if (Callee == 0 || !Callee->isDeclaration())
return 0;
- if (Callee->getName() != "free" &&
- Callee->getName() != "_ZdlPv" && // operator delete(void*)
- Callee->getName() != "_ZdaPv") // operator delete[](void*)
+ StringRef FnName = Callee->getName();
+ LibFunc::Func TLIFn;
+ if (!TLI || !TLI->getLibFunc(FnName, TLIFn) || !TLI->has(TLIFn))
+ return 0;
+
+ if (TLIFn != LibFunc::free &&
+ TLIFn != LibFunc::ZdlPv && // operator delete(void*)
+ TLIFn != LibFunc::ZdaPv) // operator delete[](void*)
return 0;
// Check free prototype.
@@ -315,12 +341,12 @@
/// object size in Size if successful, and false otherwise.
/// If RoundToAlign is true, then Size is rounded up to the aligment of allocas,
/// byval arguments, and global variables.
-bool llvm::getObjectSize(const Value *Ptr, uint64_t &Size, const TargetData *TD,
- bool RoundToAlign) {
+bool llvm::getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout *TD,
+ const TargetLibraryInfo *TLI, bool RoundToAlign) {
if (!TD)
return false;
- ObjectSizeOffsetVisitor Visitor(TD, Ptr->getContext(), RoundToAlign);
+ ObjectSizeOffsetVisitor Visitor(TD, TLI, Ptr->getContext(), RoundToAlign);
SizeOffsetType Data = Visitor.compute(const_cast<Value*>(Ptr));
if (!Visitor.bothKnown(Data))
return false;
@@ -347,10 +373,11 @@
return Size;
}
-ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const TargetData *TD,
+ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const DataLayout *TD,
+ const TargetLibraryInfo *TLI,
LLVMContext &Context,
bool RoundToAlign)
-: TD(TD), RoundToAlign(RoundToAlign) {
+: TD(TD), TLI(TLI), RoundToAlign(RoundToAlign) {
IntegerType *IntTy = TD->getIntPtrType(Context);
IntTyBits = IntTy->getBitWidth();
Zero = APInt::getNullValue(IntTyBits);
@@ -358,11 +385,16 @@
SizeOffsetType ObjectSizeOffsetVisitor::compute(Value *V) {
V = V->stripPointerCasts();
+ if (Instruction *I = dyn_cast<Instruction>(V)) {
+ // If we have already seen this instruction, bail out. Cycles can happen in
+ // unreachable code after constant propagation.
+ if (!SeenInsts.insert(I))
+ return unknown();
- if (GEPOperator *GEP = dyn_cast<GEPOperator>(V))
- return visitGEPOperator(*GEP);
- if (Instruction *I = dyn_cast<Instruction>(V))
+ if (GEPOperator *GEP = dyn_cast<GEPOperator>(V))
+ return visitGEPOperator(*GEP);
return visit(*I);
+ }
if (Argument *A = dyn_cast<Argument>(V))
return visitArgument(*A);
if (ConstantPointerNull *P = dyn_cast<ConstantPointerNull>(V))
@@ -371,9 +403,12 @@
return visitGlobalVariable(*GV);
if (UndefValue *UV = dyn_cast<UndefValue>(V))
return visitUndefValue(*UV);
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
if (CE->getOpcode() == Instruction::IntToPtr)
return unknown(); // clueless
+ if (CE->getOpcode() == Instruction::GetElementPtr)
+ return visitGEPOperator(cast<GEPOperator>(*CE));
+ }
DEBUG(dbgs() << "ObjectSizeOffsetVisitor::compute() unhandled value: " << *V
<< '\n');
@@ -408,7 +443,8 @@
}
SizeOffsetType ObjectSizeOffsetVisitor::visitCallSite(CallSite CS) {
- const AllocFnsTy *FnData = getAllocationData(CS.getInstruction(), AnyAlloc);
+ const AllocFnsTy *FnData = getAllocationData(CS.getInstruction(), AnyAlloc,
+ TLI);
if (!FnData)
return unknown();
@@ -506,10 +542,6 @@
}
SizeOffsetType ObjectSizeOffsetVisitor::visitSelectInst(SelectInst &I) {
- // ignore malformed self-looping selects
- if (I.getTrueValue() == &I || I.getFalseValue() == &I)
- return unknown();
-
SizeOffsetType TrueSide = compute(I.getTrueValue());
SizeOffsetType FalseSide = compute(I.getFalseValue());
if (bothKnown(TrueSide) && bothKnown(FalseSide) && TrueSide == FalseSide)
@@ -527,10 +559,10 @@
}
-ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator(const TargetData *TD,
+ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator(const DataLayout *TD,
+ const TargetLibraryInfo *TLI,
LLVMContext &Context)
-: TD(TD), Context(Context), Builder(Context, TargetFolder(TD)),
-Visitor(TD, Context) {
+: TD(TD), TLI(TLI), Context(Context), Builder(Context, TargetFolder(TD)) {
IntTy = TD->getIntPtrType(Context);
Zero = ConstantInt::get(IntTy, 0);
}
@@ -555,6 +587,7 @@
}
SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute_(Value *V) {
+ ObjectSizeOffsetVisitor Visitor(TD, TLI, Context);
SizeOffsetType Const = Visitor.compute(V);
if (Visitor.bothKnown(Const))
return std::make_pair(ConstantInt::get(Context, Const.first),
@@ -617,7 +650,8 @@
}
SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitCallSite(CallSite CS) {
- const AllocFnsTy *FnData = getAllocationData(CS.getInstruction(), AnyAlloc);
+ const AllocFnsTy *FnData = getAllocationData(CS.getInstruction(), AnyAlloc,
+ TLI);
if (!FnData)
return unknown();
@@ -715,10 +749,6 @@
}
SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitSelectInst(SelectInst &I) {
- // ignore malformed self-looping selects
- if (I.getTrueValue() == &I || I.getFalseValue() == &I)
- return unknown();
-
SizeOffsetEvalType TrueSide = compute_(I.getTrueValue());
SizeOffsetEvalType FalseSide = compute_(I.getFalseValue());
Modified: llvm/branches/AMDILBackend/lib/Analysis/MemoryDependenceAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/Analysis/MemoryDependenceAnalysis.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/Analysis/MemoryDependenceAnalysis.cpp (original)
+++ llvm/branches/AMDILBackend/lib/Analysis/MemoryDependenceAnalysis.cpp Tue Jan 15 11:16:16 2013
@@ -30,7 +30,7 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/PredIteratorCache.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
using namespace llvm;
STATISTIC(NumCacheNonLocal, "Number of fully cached non-local responses");
@@ -89,7 +89,7 @@
bool MemoryDependenceAnalysis::runOnFunction(Function &) {
AA = &getAnalysis<AliasAnalysis>();
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
DT = getAnalysisIfAvailable<DominatorTree>();
if (PredCache == 0)
PredCache.reset(new PredIteratorCache());
@@ -148,7 +148,7 @@
return AliasAnalysis::ModRef;
}
- if (const CallInst *CI = isFreeCall(Inst)) {
+ if (const CallInst *CI = isFreeCall(Inst, AA->getTargetLibraryInfo())) {
// calls to free() deallocate the entire structure
Loc = AliasAnalysis::Location(CI->getArgOperand(0));
return AliasAnalysis::Mod;
@@ -227,13 +227,18 @@
// Otherwise if the two calls don't interact (e.g. InstCS is readnone)
// keep scanning.
- break;
+ continue;
default:
return MemDepResult::getClobber(Inst);
}
}
+
+ // If we could not obtain a pointer for the instruction and the instruction
+ // touches memory then assume that this is a dependency.
+ if (MR != AliasAnalysis::NoModRef)
+ return MemDepResult::getClobber(Inst);
}
-
+
// No dependence found. If this is the entry block of the function, it is
// unknown, otherwise it is non-local.
if (BB != &BB->getParent()->getEntryBlock())
@@ -251,7 +256,7 @@
const Value *&MemLocBase,
int64_t &MemLocOffs,
const LoadInst *LI,
- const TargetData *TD) {
+ const DataLayout *TD) {
// If we have no target data, we can't do this.
if (TD == 0) return false;
@@ -275,7 +280,7 @@
unsigned MemoryDependenceAnalysis::
getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs,
unsigned MemLocSize, const LoadInst *LI,
- const TargetData &TD) {
+ const DataLayout &TD) {
// We can only extend simple integer loads.
if (!isa<IntegerType>(LI->getType()) || !LI->isSimple()) return 0;
@@ -322,12 +327,12 @@
return 0;
if (LIOffs+NewLoadByteSize > MemLocEnd &&
- LI->getParent()->getParent()->hasFnAttr(Attribute::AddressSafety)) {
+ LI->getParent()->getParent()->getFnAttributes().
+ hasAttribute(Attributes::AddressSafety))
// We will be reading past the location accessed by the original program.
// While this is safe in a regular build, Address Safety analysis tools
// may start reporting false warnings. So, don't do widening.
return 0;
- }
// If a load of this width would include all of MemLoc, then we succeed.
if (LIOffs+NewLoadByteSize >= MemLocEnd)
@@ -474,12 +479,20 @@
// a subsequent bitcast of the malloc call result. There can be stores to
// the malloced memory between the malloc call and its bitcast uses, and we
// need to continue scanning until the malloc call.
- if (isa<AllocaInst>(Inst) || isNoAliasFn(Inst)) {
+ const TargetLibraryInfo *TLI = AA->getTargetLibraryInfo();
+ if (isa<AllocaInst>(Inst) || isNoAliasFn(Inst, TLI)) {
const Value *AccessPtr = GetUnderlyingObject(MemLoc.Ptr, TD);
if (AccessPtr == Inst || AA->isMustAlias(Inst, AccessPtr))
return MemDepResult::getDef(Inst);
- continue;
+ // Be conservative if the accessed pointer may alias the allocation.
+ if (AA->alias(Inst, AccessPtr) != AliasAnalysis::NoAlias)
+ return MemDepResult::getClobber(Inst);
+ // If the allocation is not aliased and does not read memory (like
+ // strdup), it is safe to ignore.
+ if (isa<AllocaInst>(Inst) ||
+ isMallocLikeFn(Inst, TLI) || isCallocLikeFn(Inst, TLI))
+ continue;
}
// See if this instruction (e.g. a call or vaarg) mod/ref's the pointer.
@@ -970,7 +983,7 @@
for (NonLocalDepInfo::iterator I = Cache->begin(), E = Cache->end();
I != E; ++I) {
Visited.insert(std::make_pair(I->getBB(), Addr));
- if (!I->getResult().isNonLocal())
+ if (!I->getResult().isNonLocal() && DT->isReachableFromEntry(I->getBB()))
Result.push_back(NonLocalDepResult(I->getBB(), I->getResult(), Addr));
}
++NumCacheCompleteNonLocalPtr;
@@ -1016,7 +1029,7 @@
NumSortedEntries);
// If we got a Def or Clobber, add this to the list of results.
- if (!Dep.isNonLocal()) {
+ if (!Dep.isNonLocal() && DT->isReachableFromEntry(BB)) {
Result.push_back(NonLocalDepResult(BB, Dep, Pointer.getAddr()));
continue;
}
Modified: llvm/branches/AMDILBackend/lib/Analysis/NoAliasAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/Analysis/NoAliasAnalysis.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/Analysis/NoAliasAnalysis.cpp (original)
+++ llvm/branches/AMDILBackend/lib/Analysis/NoAliasAnalysis.cpp Tue Jan 15 11:16:16 2013
@@ -15,7 +15,7 @@
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/Passes.h"
#include "llvm/Pass.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
using namespace llvm;
namespace {
@@ -36,7 +36,7 @@
virtual void initializePass() {
// Note: NoAA does not call InitializeAliasAnalysis because it's
// special and does not support chaining.
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
}
virtual AliasResult alias(const Location &LocA, const Location &LocB) {
Modified: llvm/branches/AMDILBackend/lib/Analysis/PHITransAddr.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/Analysis/PHITransAddr.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/Analysis/PHITransAddr.cpp (original)
+++ llvm/branches/AMDILBackend/lib/Analysis/PHITransAddr.cpp Tue Jan 15 11:16:16 2013
@@ -41,6 +41,7 @@
return false;
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void PHITransAddr::dump() const {
if (Addr == 0) {
dbgs() << "PHITransAddr: null\n";
@@ -50,6 +51,7 @@
for (unsigned i = 0, e = InstInputs.size(); i != e; ++i)
dbgs() << " Input #" << i << " is " << *InstInputs[i] << "\n";
}
+#endif
static bool VerifySubExpr(Value *Expr,
Modified: llvm/branches/AMDILBackend/lib/Analysis/ProfileEstimatorPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/Analysis/ProfileEstimatorPass.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/Analysis/ProfileEstimatorPass.cpp (original)
+++ llvm/branches/AMDILBackend/lib/Analysis/ProfileEstimatorPass.cpp Tue Jan 15 11:16:16 2013
@@ -286,7 +286,7 @@
}
}
- double fraction = floor(BBWeight/Edges.size());
+ double fraction = Edges.size() ? floor(BBWeight/Edges.size()) : 0.0;
// Finally we know what flow is still not leaving the block, distribute this
// flow onto the empty edges.
for (SmallVector<Edge, 8>::iterator ei = Edges.begin(), ee = Edges.end();
Modified: llvm/branches/AMDILBackend/lib/Analysis/ProfileInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/Analysis/ProfileInfo.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/Analysis/ProfileInfo.cpp (original)
+++ llvm/branches/AMDILBackend/lib/Analysis/ProfileInfo.cpp Tue Jan 15 11:16:16 2013
@@ -1016,40 +1016,14 @@
}
}
-raw_ostream& operator<<(raw_ostream &O, const Function *F) {
- return O << F->getName();
-}
-
raw_ostream& operator<<(raw_ostream &O, const MachineFunction *MF) {
return O << MF->getFunction()->getName() << "(MF)";
}
-raw_ostream& operator<<(raw_ostream &O, const BasicBlock *BB) {
- return O << BB->getName();
-}
-
raw_ostream& operator<<(raw_ostream &O, const MachineBasicBlock *MBB) {
return O << MBB->getBasicBlock()->getName() << "(MB)";
}
-raw_ostream& operator<<(raw_ostream &O, std::pair<const BasicBlock *, const BasicBlock *> E) {
- O << "(";
-
- if (E.first)
- O << E.first;
- else
- O << "0";
-
- O << ",";
-
- if (E.second)
- O << E.second;
- else
- O << "0";
-
- return O << ")";
-}
-
raw_ostream& operator<<(raw_ostream &O, std::pair<const MachineBasicBlock *, const MachineBasicBlock *> E) {
O << "(";
Modified: llvm/branches/AMDILBackend/lib/Analysis/RegionInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/Analysis/RegionInfo.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/Analysis/RegionInfo.cpp (original)
+++ llvm/branches/AMDILBackend/lib/Analysis/RegionInfo.cpp Tue Jan 15 11:16:16 2013
@@ -47,7 +47,7 @@
cl::values(
clEnumValN(Region::PrintNone, "none", "print no details"),
clEnumValN(Region::PrintBB, "bb",
- "print regions in detail with block_node_iterator"),
+ "print regions in detail with block_iterator"),
clEnumValN(Region::PrintRN, "rn",
"print regions in detail with element_iterator"),
clEnumValEnd));
@@ -246,22 +246,6 @@
verifyRegion();
}
-Region::block_node_iterator Region::block_node_begin() {
- return GraphTraits<FlatIt<Region*> >::nodes_begin(this);
-}
-
-Region::block_node_iterator Region::block_node_end() {
- return GraphTraits<FlatIt<Region*> >::nodes_end(this);
-}
-
-Region::const_block_node_iterator Region::block_node_begin() const {
- return GraphTraits<FlatIt<const Region*> >::nodes_begin(this);
-}
-
-Region::const_block_node_iterator Region::block_node_end() const {
- return GraphTraits<FlatIt<const Region*> >::nodes_end(this);
-}
-
Region::element_iterator Region::element_begin() {
return GraphTraits<Region*>::nodes_begin(this);
}
@@ -425,10 +409,8 @@
OS.indent(level*2 + 2);
if (Style == PrintBB) {
- for (const_block_node_iterator I = block_node_begin(),
- E = block_node_end();
- I != E; ++I)
- OS << **I << ", "; // TODO: remove the last ","
+ for (const_block_iterator I = block_begin(), E = block_end(); I != E; ++I)
+ OS << (*I)->getName() << ", "; // TODO: remove the last ","
} else if (Style == PrintRN) {
for (const_element_iterator I = element_begin(), E = element_end(); I!=E; ++I)
OS << **I << ", "; // TODO: remove the last ",
@@ -445,9 +427,11 @@
OS.indent(level*2) << "} \n";
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void Region::dump() const {
print(dbgs(), true, getDepth(), printStyle.getValue());
}
+#endif
void Region::clearNodeCache() {
// Free the cached nodes.
Modified: llvm/branches/AMDILBackend/lib/Analysis/RegionPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/Analysis/RegionPass.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/Analysis/RegionPass.cpp (original)
+++ llvm/branches/AMDILBackend/lib/Analysis/RegionPass.cpp Tue Jan 15 11:16:16 2013
@@ -195,10 +195,9 @@
virtual bool runOnRegion(Region *R, RGPassManager &RGM) {
Out << Banner;
- for (Region::block_node_iterator I = R->block_node_begin(),
- E = R->block_node_end();
+ for (Region::block_iterator I = R->block_begin(), E = R->block_end();
I != E; ++I)
- (*I)->getEntry()->print(Out);
+ (*I)->print(Out);
return false;
}
Modified: llvm/branches/AMDILBackend/lib/Analysis/ScalarEvolution.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/Analysis/ScalarEvolution.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/Analysis/ScalarEvolution.cpp (original)
+++ llvm/branches/AMDILBackend/lib/Analysis/ScalarEvolution.cpp Tue Jan 15 11:16:16 2013
@@ -73,7 +73,7 @@
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Assembly/Writer.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ConstantRange.h"
@@ -105,6 +105,11 @@
"derived loop"),
cl::init(100));
+// FIXME: Enable this with XDEBUG when the test suite is clean.
+static cl::opt<bool>
+VerifySCEV("verify-scev",
+ cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"));
+
INITIALIZE_PASS_BEGIN(ScalarEvolution, "scalar-evolution",
"Scalar Evolution Analysis", false, true)
INITIALIZE_PASS_DEPENDENCY(LoopInfo)
@@ -122,10 +127,12 @@
// Implementation of the SCEV class.
//
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void SCEV::dump() const {
print(dbgs());
dbgs() << '\n';
}
+#endif
void SCEV::print(raw_ostream &OS) const {
switch (getSCEVType()) {
@@ -2580,7 +2587,7 @@
}
const SCEV *ScalarEvolution::getSizeOfExpr(Type *AllocTy) {
- // If we have TargetData, we can bypass creating a target-independent
+ // If we have DataLayout, we can bypass creating a target-independent
// constant expression and then folding it back into a ConstantInt.
// This is just a compile-time optimization.
if (TD)
@@ -2606,7 +2613,7 @@
const SCEV *ScalarEvolution::getOffsetOfExpr(StructType *STy,
unsigned FieldNo) {
- // If we have TargetData, we can bypass creating a target-independent
+ // If we have DataLayout, we can bypass creating a target-independent
// constant expression and then folding it back into a ConstantInt.
// This is just a compile-time optimization.
if (TD)
@@ -2671,7 +2678,7 @@
uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const {
assert(isSCEVable(Ty) && "Type is not SCEVable!");
- // If we have a TargetData, use it!
+ // If we have a DataLayout, use it!
if (TD)
return TD->getTypeSizeInBits(Ty);
@@ -2679,7 +2686,7 @@
if (Ty->isIntegerTy())
return Ty->getPrimitiveSizeInBits();
- // The only other support type is pointer. Without TargetData, conservatively
+ // The only other support type is pointer. Without DataLayout, conservatively
// assume pointers are 64-bit.
assert(Ty->isPointerTy() && "isSCEVable permitted a non-SCEVable type!");
return 64;
@@ -2699,7 +2706,7 @@
assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
if (TD) return TD->getIntPtrType(getContext());
- // Without TargetData, conservatively assume pointers are 64-bit.
+ // Without DataLayout, conservatively assume pointers are 64-bit.
return Type::getInt64Ty(getContext());
}
@@ -3978,8 +3985,11 @@
ConstantInt *Result = MulC->getValue();
- // Guard against huge trip counts.
- if (!Result || Result->getValue().getActiveBits() > 32)
+ // Guard against huge trip counts (this requires checking
+ // for zero to handle the case where the trip count == -1 and the
+ // addition wraps).
+ if (!Result || Result->getValue().getActiveBits() > 32 ||
+ Result->getValue().getActiveBits() == 0)
return 1;
return (unsigned)Result->getZExtValue();
@@ -4749,7 +4759,7 @@
/// reason, return null.
static Constant *EvaluateExpression(Value *V, const Loop *L,
DenseMap<Instruction *, Constant *> &Vals,
- const TargetData *TD,
+ const DataLayout *TD,
const TargetLibraryInfo *TLI) {
// Convenient constant check, but redundant for recursive calls.
if (Constant *C = dyn_cast<Constant>(V)) return C;
@@ -6141,7 +6151,7 @@
return CmpInst::isTrueWhenEqual(Pred);
if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS))
if (FoundLHS == FoundRHS)
- return CmpInst::isFalseWhenEqual(Pred);
+ return CmpInst::isFalseWhenEqual(FoundPred);
// Check to see if we can make the LHS or RHS match.
if (LHS == FoundRHS || RHS == FoundLHS) {
@@ -6588,7 +6598,7 @@
bool ScalarEvolution::runOnFunction(Function &F) {
this->F = &F;
LI = &getAnalysis<LoopInfo>();
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
TLI = &getAnalysis<TargetLibraryInfo>();
DT = &getAnalysis<DominatorTree>();
return false;
@@ -6930,3 +6940,87 @@
UnsignedRanges.erase(S);
SignedRanges.erase(S);
}
+
+typedef DenseMap<const Loop *, std::string> VerifyMap;
+
+/// replaceSubString - Replaces all occurences of From in Str with To.
+static void replaceSubString(std::string &Str, StringRef From, StringRef To) {
+ size_t Pos = 0;
+ while ((Pos = Str.find(From, Pos)) != std::string::npos) {
+ Str.replace(Pos, From.size(), To.data(), To.size());
+ Pos += To.size();
+ }
+}
+
+/// getLoopBackedgeTakenCounts - Helper method for verifyAnalysis.
+static void
+getLoopBackedgeTakenCounts(Loop *L, VerifyMap &Map, ScalarEvolution &SE) {
+ for (Loop::reverse_iterator I = L->rbegin(), E = L->rend(); I != E; ++I) {
+ getLoopBackedgeTakenCounts(*I, Map, SE); // recurse.
+
+ std::string &S = Map[L];
+ if (S.empty()) {
+ raw_string_ostream OS(S);
+ SE.getBackedgeTakenCount(L)->print(OS);
+
+ // false and 0 are semantically equivalent. This can happen in dead loops.
+ replaceSubString(OS.str(), "false", "0");
+ // Remove wrap flags, their use in SCEV is highly fragile.
+ // FIXME: Remove this when SCEV gets smarter about them.
+ replaceSubString(OS.str(), "<nw>", "");
+ replaceSubString(OS.str(), "<nsw>", "");
+ replaceSubString(OS.str(), "<nuw>", "");
+ }
+ }
+}
+
+void ScalarEvolution::verifyAnalysis() const {
+ if (!VerifySCEV)
+ return;
+
+ ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
+
+ // Gather stringified backedge taken counts for all loops using SCEV's caches.
+ // FIXME: It would be much better to store actual values instead of strings,
+ // but SCEV pointers will change if we drop the caches.
+ VerifyMap BackedgeDumpsOld, BackedgeDumpsNew;
+ for (LoopInfo::reverse_iterator I = LI->rbegin(), E = LI->rend(); I != E; ++I)
+ getLoopBackedgeTakenCounts(*I, BackedgeDumpsOld, SE);
+
+ // Gather stringified backedge taken counts for all loops without using
+ // SCEV's caches.
+ SE.releaseMemory();
+ for (LoopInfo::reverse_iterator I = LI->rbegin(), E = LI->rend(); I != E; ++I)
+ getLoopBackedgeTakenCounts(*I, BackedgeDumpsNew, SE);
+
+ // Now compare whether they're the same with and without caches. This allows
+ // verifying that no pass changed the cache.
+ assert(BackedgeDumpsOld.size() == BackedgeDumpsNew.size() &&
+ "New loops suddenly appeared!");
+
+ for (VerifyMap::iterator OldI = BackedgeDumpsOld.begin(),
+ OldE = BackedgeDumpsOld.end(),
+ NewI = BackedgeDumpsNew.begin();
+ OldI != OldE; ++OldI, ++NewI) {
+ assert(OldI->first == NewI->first && "Loop order changed!");
+
+ // Compare the stringified SCEVs. We don't care if undef backedgetaken count
+ // changes.
+ // FIXME: We currently ignore SCEV changes from/to CouldNotCompute. This
+ // means that a pass is buggy or SCEV has to learn a new pattern but is
+ // usually not harmful.
+ if (OldI->second != NewI->second &&
+ OldI->second.find("undef") == std::string::npos &&
+ NewI->second.find("undef") == std::string::npos &&
+ OldI->second != "***COULDNOTCOMPUTE***" &&
+ NewI->second != "***COULDNOTCOMPUTE***") {
+ dbgs() << "SCEVValidator: SCEV for loop '"
+ << OldI->first->getHeader()->getName()
+ << "' changed from '" << OldI->second
+ << "' to '" << NewI->second << "'!\n";
+ std::abort();
+ }
+ }
+
+ // TODO: Verify more things.
+}
Modified: llvm/branches/AMDILBackend/lib/Analysis/ScalarEvolutionExpander.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/Analysis/ScalarEvolutionExpander.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/Analysis/ScalarEvolutionExpander.cpp (original)
+++ llvm/branches/AMDILBackend/lib/Analysis/ScalarEvolutionExpander.cpp Tue Jan 15 11:16:16 2013
@@ -18,7 +18,7 @@
#include "llvm/IntrinsicInst.h"
#include "llvm/LLVMContext.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/ADT/STLExtras.h"
@@ -212,7 +212,7 @@
const SCEV *&Remainder,
const SCEV *Factor,
ScalarEvolution &SE,
- const TargetData *TD) {
+ const DataLayout *TD) {
// Everything is divisible by one.
if (Factor->isOne())
return true;
@@ -253,7 +253,7 @@
// of the given factor.
if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
if (TD) {
- // With TargetData, the size is known. Check if there is a constant
+ // With DataLayout, the size is known. Check if there is a constant
// operand which is a multiple of the given factor. If so, we can
// factor it.
const SCEVConstant *FC = cast<SCEVConstant>(Factor);
@@ -267,7 +267,7 @@
return true;
}
} else {
- // Without TargetData, check if Factor can be factored out of any of the
+ // Without DataLayout, check if Factor can be factored out of any of the
// Mul's operands. If so, we can just remove it.
for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
const SCEV *SOp = M->getOperand(i);
@@ -458,7 +458,7 @@
// An empty struct has no fields.
if (STy->getNumElements() == 0) break;
if (SE.TD) {
- // With TargetData, field offsets are known. See if a constant offset
+ // With DataLayout, field offsets are known. See if a constant offset
// falls within any of the struct fields.
if (Ops.empty()) break;
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0]))
@@ -477,7 +477,7 @@
}
}
} else {
- // Without TargetData, just check for an offsetof expression of the
+ // Without DataLayout, just check for an offsetof expression of the
// appropriate struct type.
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Ops[i])) {
@@ -1618,6 +1618,17 @@
PEnd = Phis.end(); PIter != PEnd; ++PIter) {
PHINode *Phi = *PIter;
+ // Fold constant phis. They may be congruent to other constant phis and
+ // would confuse the logic below that expects proper IVs.
+ if (Value *V = Phi->hasConstantValue()) {
+ Phi->replaceAllUsesWith(V);
+ DeadInsts.push_back(Phi);
+ ++NumElim;
+ DEBUG_WITH_TYPE(DebugType, dbgs()
+ << "INDVARS: Eliminated constant iv: " << *Phi << '\n');
+ continue;
+ }
+
if (!SE.isSCEVable(Phi->getType()))
continue;
Modified: llvm/branches/AMDILBackend/lib/Analysis/Trace.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/Analysis/Trace.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/Analysis/Trace.cpp (original)
+++ llvm/branches/AMDILBackend/lib/Analysis/Trace.cpp Tue Jan 15 11:16:16 2013
@@ -43,9 +43,11 @@
O << "; Trace parent function: \n" << *F;
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// dump - Debugger convenience method; writes trace to standard error
/// output stream.
///
void Trace::dump() const {
print(dbgs());
}
+#endif
Modified: llvm/branches/AMDILBackend/lib/Analysis/ValueTracking.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/Analysis/ValueTracking.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/Analysis/ValueTracking.cpp (original)
+++ llvm/branches/AMDILBackend/lib/Analysis/ValueTracking.cpp Tue Jan 15 11:16:16 2013
@@ -22,7 +22,7 @@
#include "llvm/LLVMContext.h"
#include "llvm/Metadata.h"
#include "llvm/Operator.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Support/ConstantRange.h"
#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/MathExtras.h"
@@ -36,7 +36,7 @@
/// getBitWidth - Returns the bitwidth of the given scalar or pointer type (if
/// unknown returns 0). For vector types, returns the element type's bitwidth.
-static unsigned getBitWidth(Type *Ty, const TargetData *TD) {
+static unsigned getBitWidth(Type *Ty, const DataLayout *TD) {
if (unsigned BitWidth = Ty->getScalarSizeInBits())
return BitWidth;
assert(isa<PointerType>(Ty) && "Expected a pointer type!");
@@ -46,7 +46,7 @@
static void ComputeMaskedBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW,
APInt &KnownZero, APInt &KnownOne,
APInt &KnownZero2, APInt &KnownOne2,
- const TargetData *TD, unsigned Depth) {
+ const DataLayout *TD, unsigned Depth) {
if (!Add) {
if (ConstantInt *CLHS = dyn_cast<ConstantInt>(Op0)) {
// We know that the top bits of C-X are clear if X contains less bits
@@ -132,7 +132,7 @@
static void ComputeMaskedBitsMul(Value *Op0, Value *Op1, bool NSW,
APInt &KnownZero, APInt &KnownOne,
APInt &KnownZero2, APInt &KnownOne2,
- const TargetData *TD, unsigned Depth) {
+ const DataLayout *TD, unsigned Depth) {
unsigned BitWidth = KnownZero.getBitWidth();
ComputeMaskedBits(Op1, KnownZero, KnownOne, TD, Depth+1);
ComputeMaskedBits(Op0, KnownZero2, KnownOne2, TD, Depth+1);
@@ -226,7 +226,7 @@
/// same width as the vector element, and the bit is set only if it is true
/// for all of the elements in the vector.
void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
- const TargetData *TD, unsigned Depth) {
+ const DataLayout *TD, unsigned Depth) {
assert(V && "No Value?");
assert(Depth <= MaxDepth && "Limit Search Depth");
unsigned BitWidth = KnownZero.getBitWidth();
@@ -308,11 +308,20 @@
}
if (Argument *A = dyn_cast<Argument>(V)) {
- // Get alignment information off byval arguments if specified in the IR.
- if (A->hasByValAttr())
- if (unsigned Align = A->getParamAlignment())
- KnownZero = APInt::getLowBitsSet(BitWidth,
- CountTrailingZeros_32(Align));
+ unsigned Align = 0;
+
+ if (A->hasByValAttr()) {
+ // Get alignment information off byval arguments if specified in the IR.
+ Align = A->getParamAlignment();
+ } else if (TD && A->hasStructRetAttr()) {
+ // An sret parameter has at least the ABI alignment of the return type.
+ Type *EltTy = cast<PointerType>(A->getType())->getElementType();
+ if (EltTy->isSized())
+ Align = TD->getABITypeAlignment(EltTy);
+ }
+
+ if (Align)
+ KnownZero = APInt::getLowBitsSet(BitWidth, CountTrailingZeros_32(Align));
return;
}
@@ -420,15 +429,13 @@
case Instruction::ZExt:
case Instruction::Trunc: {
Type *SrcTy = I->getOperand(0)->getType();
-
+
unsigned SrcBitWidth;
// Note that we handle pointer operands here because of inttoptr/ptrtoint
// which fall through here.
- if (SrcTy->isPointerTy())
- SrcBitWidth = TD->getTypeSizeInBits(SrcTy);
- else
- SrcBitWidth = SrcTy->getScalarSizeInBits();
-
+ SrcBitWidth = TD->getTypeSizeInBits(SrcTy->getScalarType());
+
+ assert(SrcBitWidth && "SrcBitWidth can't be zero");
KnownZero = KnownZero.zextOrTrunc(SrcBitWidth);
KnownOne = KnownOne.zextOrTrunc(SrcBitWidth);
ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
@@ -778,7 +785,7 @@
/// ComputeSignBit - Determine whether the sign bit is known to be zero or
/// one. Convenience wrapper around ComputeMaskedBits.
void llvm::ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
- const TargetData *TD, unsigned Depth) {
+ const DataLayout *TD, unsigned Depth) {
unsigned BitWidth = getBitWidth(V->getType(), TD);
if (!BitWidth) {
KnownZero = false;
@@ -796,7 +803,7 @@
/// bit set when defined. For vectors return true if every element is known to
/// be a power of two when defined. Supports values with integer or pointer
/// types and vectors of integers.
-bool llvm::isPowerOfTwo(Value *V, const TargetData *TD, bool OrZero,
+bool llvm::isPowerOfTwo(Value *V, const DataLayout *TD, bool OrZero,
unsigned Depth) {
if (Constant *C = dyn_cast<Constant>(V)) {
if (C->isNullValue())
@@ -859,7 +866,7 @@
/// when defined. For vectors return true if every element is known to be
/// non-zero when defined. Supports values with integer or pointer type and
/// vectors of integers.
-bool llvm::isKnownNonZero(Value *V, const TargetData *TD, unsigned Depth) {
+bool llvm::isKnownNonZero(Value *V, const DataLayout *TD, unsigned Depth) {
if (Constant *C = dyn_cast<Constant>(V)) {
if (C->isNullValue())
return false;
@@ -986,7 +993,7 @@
/// same width as the vector element, and the bit is set only if it is true
/// for all of the elements in the vector.
bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask,
- const TargetData *TD, unsigned Depth) {
+ const DataLayout *TD, unsigned Depth) {
APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0);
ComputeMaskedBits(V, KnownZero, KnownOne, TD, Depth);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
@@ -1003,10 +1010,10 @@
///
/// 'Op' must have a scalar integer type.
///
-unsigned llvm::ComputeNumSignBits(Value *V, const TargetData *TD,
+unsigned llvm::ComputeNumSignBits(Value *V, const DataLayout *TD,
unsigned Depth) {
assert((TD || V->getType()->isIntOrIntVectorTy()) &&
- "ComputeNumSignBits requires a TargetData object to operate "
+ "ComputeNumSignBits requires a DataLayout object to operate "
"on non-integer values!");
Type *Ty = V->getType();
unsigned TyBits = TD ? TD->getTypeSizeInBits(V->getType()->getScalarType()) :
@@ -1582,7 +1589,7 @@
/// it can be expressed as a base pointer plus a constant offset. Return the
/// base and offset to the caller.
Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
- const TargetData &TD) {
+ const DataLayout &TD) {
Operator *PtrOp = dyn_cast<Operator>(Ptr);
if (PtrOp == 0 || Ptr->getType()->isVectorTy())
return Ptr;
@@ -1614,7 +1621,7 @@
// right.
unsigned PtrSize = TD.getPointerSizeInBits();
if (PtrSize < 64)
- Offset = (Offset << (64-PtrSize)) >> (64-PtrSize);
+ Offset = SignExtend64(Offset, PtrSize);
return GetPointerBaseWithConstantOffset(GEP->getPointerOperand(), Offset, TD);
}
@@ -1768,7 +1775,7 @@
}
Value *
-llvm::GetUnderlyingObject(Value *V, const TargetData *TD, unsigned MaxLookup) {
+llvm::GetUnderlyingObject(Value *V, const DataLayout *TD, unsigned MaxLookup) {
if (!V->getType()->isPointerTy())
return V;
for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
@@ -1799,7 +1806,7 @@
void
llvm::GetUnderlyingObjects(Value *V,
SmallVectorImpl<Value *> &Objects,
- const TargetData *TD,
+ const DataLayout *TD,
unsigned MaxLookup) {
SmallPtrSet<Value *, 4> Visited;
SmallVector<Value *, 4> Worklist;
@@ -1844,7 +1851,7 @@
}
bool llvm::isSafeToSpeculativelyExecute(const Value *V,
- const TargetData *TD) {
+ const DataLayout *TD) {
const Operator *Inst = dyn_cast<Operator>(V);
if (!Inst)
return false;
Modified: llvm/branches/AMDILBackend/lib/Archive/ArchiveInternals.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/Archive/ArchiveInternals.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/Archive/ArchiveInternals.h (original)
+++ llvm/branches/AMDILBackend/lib/Archive/ArchiveInternals.h Tue Jan 15 11:16:16 2013
@@ -66,7 +66,7 @@
fmag[1] = '\n';
}
- bool checkSignature() {
+ bool checkSignature() const {
return 0 == memcmp(fmag, ARFILE_MEMBER_MAGIC,2);
}
};
Modified: llvm/branches/AMDILBackend/lib/Archive/ArchiveReader.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/Archive/ArchiveReader.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/Archive/ArchiveReader.cpp (original)
+++ llvm/branches/AMDILBackend/lib/Archive/ArchiveReader.cpp Tue Jan 15 11:16:16 2013
@@ -79,7 +79,7 @@
}
// Cast archive member header
- ArchiveMemberHeader* Hdr = (ArchiveMemberHeader*)At;
+ const ArchiveMemberHeader* Hdr = (const ArchiveMemberHeader*)At;
At += sizeof(ArchiveMemberHeader);
int flags = 0;
@@ -196,7 +196,7 @@
/* FALL THROUGH */
default:
- char* slash = (char*) memchr(Hdr->name, '/', 16);
+ const char* slash = (const char*) memchr(Hdr->name, '/', 16);
if (slash == 0)
slash = Hdr->name + 16;
pathname.assign(Hdr->name, slash - Hdr->name);
Modified: llvm/branches/AMDILBackend/lib/AsmParser/LLLexer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/AsmParser/LLLexer.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/AsmParser/LLLexer.cpp (original)
+++ llvm/branches/AMDILBackend/lib/AsmParser/LLLexer.cpp Tue Jan 15 11:16:16 2013
@@ -456,11 +456,12 @@
KEYWORD(private);
KEYWORD(linker_private);
KEYWORD(linker_private_weak);
- KEYWORD(linker_private_weak_def_auto);
+ KEYWORD(linker_private_weak_def_auto); // FIXME: For backwards compatibility.
KEYWORD(internal);
KEYWORD(available_externally);
KEYWORD(linkonce);
KEYWORD(linkonce_odr);
+ KEYWORD(linkonce_odr_auto_hide);
KEYWORD(weak);
KEYWORD(weak_odr);
KEYWORD(appending);
@@ -509,6 +510,7 @@
KEYWORD(asm);
KEYWORD(sideeffect);
KEYWORD(alignstack);
+ KEYWORD(inteldialect);
KEYWORD(gc);
KEYWORD(ccc);
@@ -523,6 +525,9 @@
KEYWORD(msp430_intrcc);
KEYWORD(ptx_kernel);
KEYWORD(ptx_device);
+ KEYWORD(spir_kernel);
+ KEYWORD(spir_func);
+ KEYWORD(intel_ocl_bicc);
KEYWORD(cc);
KEYWORD(c);
@@ -553,7 +558,7 @@
KEYWORD(naked);
KEYWORD(nonlazybind);
KEYWORD(address_safety);
- KEYWORD(ia_nsdialect);
+ KEYWORD(minsize);
KEYWORD(type);
KEYWORD(opaque);
Modified: llvm/branches/AMDILBackend/lib/AsmParser/LLParser.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/AsmParser/LLParser.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/AsmParser/LLParser.cpp (original)
+++ llvm/branches/AMDILBackend/lib/AsmParser/LLParser.cpp Tue Jan 15 11:16:16 2013
@@ -184,12 +184,13 @@
case lltok::kw_private: // OptionalLinkage
case lltok::kw_linker_private: // OptionalLinkage
case lltok::kw_linker_private_weak: // OptionalLinkage
- case lltok::kw_linker_private_weak_def_auto: // OptionalLinkage
+ case lltok::kw_linker_private_weak_def_auto: // FIXME: backwards compat.
case lltok::kw_internal: // OptionalLinkage
case lltok::kw_weak: // OptionalLinkage
case lltok::kw_weak_odr: // OptionalLinkage
case lltok::kw_linkonce: // OptionalLinkage
case lltok::kw_linkonce_odr: // OptionalLinkage
+ case lltok::kw_linkonce_odr_auto_hide: // OptionalLinkage
case lltok::kw_appending: // OptionalLinkage
case lltok::kw_dllexport: // OptionalLinkage
case lltok::kw_common: // OptionalLinkage
@@ -576,8 +577,7 @@
Linkage != GlobalValue::InternalLinkage &&
Linkage != GlobalValue::PrivateLinkage &&
Linkage != GlobalValue::LinkerPrivateLinkage &&
- Linkage != GlobalValue::LinkerPrivateWeakLinkage &&
- Linkage != GlobalValue::LinkerPrivateWeakDefAutoLinkage)
+ Linkage != GlobalValue::LinkerPrivateWeakLinkage)
return Error(LinkageLoc, "invalid linkage type for alias");
Constant *Aliasee;
@@ -779,7 +779,9 @@
FwdVal = Function::Create(FT, GlobalValue::ExternalWeakLinkage, Name, M);
else
FwdVal = new GlobalVariable(*M, PTy->getElementType(), false,
- GlobalValue::ExternalWeakLinkage, 0, Name);
+ GlobalValue::ExternalWeakLinkage, 0, Name,
+ 0, GlobalVariable::NotThreadLocal,
+ PTy->getAddressSpace());
ForwardRefVals[Name] = std::make_pair(FwdVal, Loc);
return FwdVal;
@@ -916,59 +918,50 @@
/// ParseOptionalAttrs - Parse a potentially empty attribute list. AttrKind
/// indicates what kind of attribute list this is: 0: function arg, 1: result,
/// 2: function attr.
-bool LLParser::ParseOptionalAttrs(Attributes &Attrs, unsigned AttrKind) {
- Attrs = Attribute::None;
+bool LLParser::ParseOptionalAttrs(AttrBuilder &B, unsigned AttrKind) {
LocTy AttrLoc = Lex.getLoc();
+ bool HaveError = false;
+
+ B.clear();
while (1) {
- switch (Lex.getKind()) {
+ lltok::Kind Token = Lex.getKind();
+ switch (Token) {
default: // End of attributes.
- if (AttrKind != 2 && (Attrs & Attribute::FunctionOnly))
- return Error(AttrLoc, "invalid use of function-only attribute");
-
- // As a hack, we allow "align 2" on functions as a synonym for
- // "alignstack 2".
- if (AttrKind == 2 &&
- (Attrs & ~(Attribute::FunctionOnly | Attribute::Alignment)))
- return Error(AttrLoc, "invalid use of attribute on a function");
-
- if (AttrKind != 0 && (Attrs & Attribute::ParameterOnly))
- return Error(AttrLoc, "invalid use of parameter-only attribute");
-
- return false;
- case lltok::kw_zeroext: Attrs |= Attribute::ZExt; break;
- case lltok::kw_signext: Attrs |= Attribute::SExt; break;
- case lltok::kw_inreg: Attrs |= Attribute::InReg; break;
- case lltok::kw_sret: Attrs |= Attribute::StructRet; break;
- case lltok::kw_noalias: Attrs |= Attribute::NoAlias; break;
- case lltok::kw_nocapture: Attrs |= Attribute::NoCapture; break;
- case lltok::kw_byval: Attrs |= Attribute::ByVal; break;
- case lltok::kw_nest: Attrs |= Attribute::Nest; break;
-
- case lltok::kw_noreturn: Attrs |= Attribute::NoReturn; break;
- case lltok::kw_nounwind: Attrs |= Attribute::NoUnwind; break;
- case lltok::kw_uwtable: Attrs |= Attribute::UWTable; break;
- case lltok::kw_returns_twice: Attrs |= Attribute::ReturnsTwice; break;
- case lltok::kw_noinline: Attrs |= Attribute::NoInline; break;
- case lltok::kw_readnone: Attrs |= Attribute::ReadNone; break;
- case lltok::kw_readonly: Attrs |= Attribute::ReadOnly; break;
- case lltok::kw_inlinehint: Attrs |= Attribute::InlineHint; break;
- case lltok::kw_alwaysinline: Attrs |= Attribute::AlwaysInline; break;
- case lltok::kw_optsize: Attrs |= Attribute::OptimizeForSize; break;
- case lltok::kw_ssp: Attrs |= Attribute::StackProtect; break;
- case lltok::kw_sspreq: Attrs |= Attribute::StackProtectReq; break;
- case lltok::kw_noredzone: Attrs |= Attribute::NoRedZone; break;
- case lltok::kw_noimplicitfloat: Attrs |= Attribute::NoImplicitFloat; break;
- case lltok::kw_naked: Attrs |= Attribute::Naked; break;
- case lltok::kw_nonlazybind: Attrs |= Attribute::NonLazyBind; break;
- case lltok::kw_address_safety: Attrs |= Attribute::AddressSafety; break;
- case lltok::kw_ia_nsdialect: Attrs |= Attribute::IANSDialect; break;
+ return HaveError;
+ case lltok::kw_zeroext: B.addAttribute(Attributes::ZExt); break;
+ case lltok::kw_signext: B.addAttribute(Attributes::SExt); break;
+ case lltok::kw_inreg: B.addAttribute(Attributes::InReg); break;
+ case lltok::kw_sret: B.addAttribute(Attributes::StructRet); break;
+ case lltok::kw_noalias: B.addAttribute(Attributes::NoAlias); break;
+ case lltok::kw_nocapture: B.addAttribute(Attributes::NoCapture); break;
+ case lltok::kw_byval: B.addAttribute(Attributes::ByVal); break;
+ case lltok::kw_nest: B.addAttribute(Attributes::Nest); break;
+
+ case lltok::kw_noreturn: B.addAttribute(Attributes::NoReturn); break;
+ case lltok::kw_nounwind: B.addAttribute(Attributes::NoUnwind); break;
+ case lltok::kw_uwtable: B.addAttribute(Attributes::UWTable); break;
+ case lltok::kw_returns_twice: B.addAttribute(Attributes::ReturnsTwice); break;
+ case lltok::kw_noinline: B.addAttribute(Attributes::NoInline); break;
+ case lltok::kw_readnone: B.addAttribute(Attributes::ReadNone); break;
+ case lltok::kw_readonly: B.addAttribute(Attributes::ReadOnly); break;
+ case lltok::kw_inlinehint: B.addAttribute(Attributes::InlineHint); break;
+ case lltok::kw_alwaysinline: B.addAttribute(Attributes::AlwaysInline); break;
+ case lltok::kw_optsize: B.addAttribute(Attributes::OptimizeForSize); break;
+ case lltok::kw_ssp: B.addAttribute(Attributes::StackProtect); break;
+ case lltok::kw_sspreq: B.addAttribute(Attributes::StackProtectReq); break;
+ case lltok::kw_noredzone: B.addAttribute(Attributes::NoRedZone); break;
+ case lltok::kw_noimplicitfloat: B.addAttribute(Attributes::NoImplicitFloat); break;
+ case lltok::kw_naked: B.addAttribute(Attributes::Naked); break;
+ case lltok::kw_nonlazybind: B.addAttribute(Attributes::NonLazyBind); break;
+ case lltok::kw_address_safety: B.addAttribute(Attributes::AddressSafety); break;
+ case lltok::kw_minsize: B.addAttribute(Attributes::MinSize); break;
case lltok::kw_alignstack: {
unsigned Alignment;
if (ParseOptionalStackAlignment(Alignment))
return true;
- Attrs |= Attribute::constructStackAlignmentFromInt(Alignment);
+ B.addStackAlignmentAttr(Alignment);
continue;
}
@@ -976,11 +969,57 @@
unsigned Alignment;
if (ParseOptionalAlignment(Alignment))
return true;
- Attrs |= Attribute::constructAlignmentFromInt(Alignment);
+ B.addAlignmentAttr(Alignment);
continue;
}
}
+
+ // Perform some error checking.
+ switch (Token) {
+ default:
+ if (AttrKind == 2)
+ HaveError |= Error(AttrLoc, "invalid use of attribute on a function");
+ break;
+ case lltok::kw_align:
+ // As a hack, we allow "align 2" on functions as a synonym for
+ // "alignstack 2".
+ break;
+
+ // Parameter Only:
+ case lltok::kw_sret:
+ case lltok::kw_nocapture:
+ case lltok::kw_byval:
+ case lltok::kw_nest:
+ if (AttrKind != 0)
+ HaveError |= Error(AttrLoc, "invalid use of parameter-only attribute");
+ break;
+
+ // Function Only:
+ case lltok::kw_noreturn:
+ case lltok::kw_nounwind:
+ case lltok::kw_readnone:
+ case lltok::kw_readonly:
+ case lltok::kw_noinline:
+ case lltok::kw_alwaysinline:
+ case lltok::kw_optsize:
+ case lltok::kw_ssp:
+ case lltok::kw_sspreq:
+ case lltok::kw_noredzone:
+ case lltok::kw_noimplicitfloat:
+ case lltok::kw_naked:
+ case lltok::kw_inlinehint:
+ case lltok::kw_alignstack:
+ case lltok::kw_uwtable:
+ case lltok::kw_nonlazybind:
+ case lltok::kw_returns_twice:
+ case lltok::kw_address_safety:
+ case lltok::kw_minsize:
+ if (AttrKind != 2)
+ HaveError |= Error(AttrLoc, "invalid use of function-only attribute");
+ break;
+ }
+
Lex.Lex();
}
}
@@ -990,12 +1029,12 @@
/// ::= 'private'
/// ::= 'linker_private'
/// ::= 'linker_private_weak'
-/// ::= 'linker_private_weak_def_auto'
/// ::= 'internal'
/// ::= 'weak'
/// ::= 'weak_odr'
/// ::= 'linkonce'
/// ::= 'linkonce_odr'
+/// ::= 'linkonce_odr_auto_hide'
/// ::= 'available_externally'
/// ::= 'appending'
/// ::= 'dllexport'
@@ -1012,14 +1051,15 @@
case lltok::kw_linker_private_weak:
Res = GlobalValue::LinkerPrivateWeakLinkage;
break;
- case lltok::kw_linker_private_weak_def_auto:
- Res = GlobalValue::LinkerPrivateWeakDefAutoLinkage;
- break;
case lltok::kw_internal: Res = GlobalValue::InternalLinkage; break;
case lltok::kw_weak: Res = GlobalValue::WeakAnyLinkage; break;
case lltok::kw_weak_odr: Res = GlobalValue::WeakODRLinkage; break;
case lltok::kw_linkonce: Res = GlobalValue::LinkOnceAnyLinkage; break;
case lltok::kw_linkonce_odr: Res = GlobalValue::LinkOnceODRLinkage; break;
+ case lltok::kw_linkonce_odr_auto_hide:
+ case lltok::kw_linker_private_weak_def_auto: // FIXME: For backwards compat.
+ Res = GlobalValue::LinkOnceODRAutoHideLinkage;
+ break;
case lltok::kw_available_externally:
Res = GlobalValue::AvailableExternallyLinkage;
break;
@@ -1056,6 +1096,7 @@
/// ::= /*empty*/
/// ::= 'ccc'
/// ::= 'fastcc'
+/// ::= 'kw_intel_ocl_bicc'
/// ::= 'coldcc'
/// ::= 'x86_stdcallcc'
/// ::= 'x86_fastcallcc'
@@ -1066,6 +1107,8 @@
/// ::= 'msp430_intrcc'
/// ::= 'ptx_kernel'
/// ::= 'ptx_device'
+/// ::= 'spir_func'
+/// ::= 'spir_kernel'
/// ::= 'cc' UINT
///
bool LLParser::ParseOptionalCallingConv(CallingConv::ID &CC) {
@@ -1083,6 +1126,9 @@
case lltok::kw_msp430_intrcc: CC = CallingConv::MSP430_INTR; break;
case lltok::kw_ptx_kernel: CC = CallingConv::PTX_Kernel; break;
case lltok::kw_ptx_device: CC = CallingConv::PTX_Device; break;
+ case lltok::kw_spir_kernel: CC = CallingConv::SPIR_KERNEL; break;
+ case lltok::kw_spir_func: CC = CallingConv::SPIR_FUNC; break;
+ case lltok::kw_intel_ocl_bicc: CC = CallingConv::Intel_OCL_BI; break;
case lltok::kw_cc: {
unsigned ArbitraryCC;
Lex.Lex();
@@ -1395,16 +1441,16 @@
// Parse the argument.
LocTy ArgLoc;
Type *ArgTy = 0;
- Attributes ArgAttrs1;
- Attributes ArgAttrs2;
+ AttrBuilder ArgAttrs;
Value *V;
if (ParseType(ArgTy, ArgLoc))
return true;
// Otherwise, handle normal operands.
- if (ParseOptionalAttrs(ArgAttrs1, 0) || ParseValue(ArgTy, V, PFS))
+ if (ParseOptionalAttrs(ArgAttrs, 0) || ParseValue(ArgTy, V, PFS))
return true;
- ArgList.push_back(ParamInfo(ArgLoc, V, ArgAttrs1|ArgAttrs2));
+ ArgList.push_back(ParamInfo(ArgLoc, V, Attributes::get(V->getContext(),
+ ArgAttrs)));
}
Lex.Lex(); // Lex the ')'.
@@ -1436,7 +1482,7 @@
} else {
LocTy TypeLoc = Lex.getLoc();
Type *ArgTy = 0;
- Attributes Attrs;
+ AttrBuilder Attrs;
std::string Name;
if (ParseType(ArgTy) ||
@@ -1453,7 +1499,9 @@
if (!FunctionType::isValidArgumentType(ArgTy))
return Error(TypeLoc, "invalid type for function argument");
- ArgList.push_back(ArgInfo(TypeLoc, ArgTy, Attrs, Name));
+ ArgList.push_back(ArgInfo(TypeLoc, ArgTy,
+ Attributes::get(ArgTy->getContext(),
+ Attrs), Name));
while (EatIfPresent(lltok::comma)) {
// Handle ... at end of arg list.
@@ -1479,7 +1527,9 @@
if (!ArgTy->isFirstClassType())
return Error(TypeLoc, "invalid type for function argument");
- ArgList.push_back(ArgInfo(TypeLoc, ArgTy, Attrs, Name));
+ ArgList.push_back(ArgInfo(TypeLoc, ArgTy,
+ Attributes::get(ArgTy->getContext(), Attrs),
+ Name));
}
}
@@ -1503,7 +1553,7 @@
for (unsigned i = 0, e = ArgList.size(); i != e; ++i) {
if (!ArgList[i].Name.empty())
return Error(ArgList[i].Loc, "argument name invalid in function type");
- if (ArgList[i].Attrs)
+ if (ArgList[i].Attrs.hasAttributes())
return Error(ArgList[i].Loc,
"argument attributes invalid in function type");
}
@@ -2069,16 +2119,18 @@
case lltok::kw_asm: {
// ValID ::= 'asm' SideEffect? AlignStack? STRINGCONSTANT ',' STRINGCONSTANT
- bool HasSideEffect, AlignStack;
+ bool HasSideEffect, AlignStack, AsmDialect;
Lex.Lex();
if (ParseOptionalToken(lltok::kw_sideeffect, HasSideEffect) ||
ParseOptionalToken(lltok::kw_alignstack, AlignStack) ||
+ ParseOptionalToken(lltok::kw_inteldialect, AsmDialect) ||
ParseStringConstant(ID.StrVal) ||
ParseToken(lltok::comma, "expected comma in inline asm expression") ||
ParseToken(lltok::StringConstant, "expected constraint string"))
return true;
ID.StrVal2 = Lex.getStrVal();
- ID.UIntVal = unsigned(HasSideEffect) | (unsigned(AlignStack)<<1);
+ ID.UIntVal = unsigned(HasSideEffect) | (unsigned(AlignStack)<<1) |
+ (unsigned(AsmDialect)<<2);
ID.Kind = ValID::t_InlineAsm;
return false;
}
@@ -2495,7 +2547,8 @@
PTy ? dyn_cast<FunctionType>(PTy->getElementType()) : 0;
if (!FTy || !InlineAsm::Verify(FTy, ID.StrVal2))
return Error(ID.Loc, "invalid type for inline asm constraint string");
- V = InlineAsm::get(FTy, ID.StrVal, ID.StrVal2, ID.UIntVal&1, ID.UIntVal>>1);
+ V = InlineAsm::get(FTy, ID.StrVal, ID.StrVal2, ID.UIntVal&1,
+ (ID.UIntVal>>1)&1, (InlineAsm::AsmDialect(ID.UIntVal>>2)));
return false;
}
case ValID::t_MDNode:
@@ -2630,7 +2683,7 @@
unsigned Linkage;
unsigned Visibility;
- Attributes RetAttrs;
+ AttrBuilder RetAttrs;
CallingConv::ID CC;
Type *RetType = 0;
LocTy RetTypeLoc = Lex.getLoc();
@@ -2653,11 +2706,11 @@
case GlobalValue::PrivateLinkage:
case GlobalValue::LinkerPrivateLinkage:
case GlobalValue::LinkerPrivateWeakLinkage:
- case GlobalValue::LinkerPrivateWeakDefAutoLinkage:
case GlobalValue::InternalLinkage:
case GlobalValue::AvailableExternallyLinkage:
case GlobalValue::LinkOnceAnyLinkage:
case GlobalValue::LinkOnceODRLinkage:
+ case GlobalValue::LinkOnceODRAutoHideLinkage:
case GlobalValue::WeakAnyLinkage:
case GlobalValue::WeakODRLinkage:
case GlobalValue::DLLExportLinkage:
@@ -2694,7 +2747,7 @@
SmallVector<ArgInfo, 8> ArgList;
bool isVarArg;
- Attributes FuncAttrs;
+ AttrBuilder FuncAttrs;
std::string Section;
unsigned Alignment;
std::string GC;
@@ -2713,9 +2766,9 @@
return true;
// If the alignment was parsed as an attribute, move to the alignment field.
- if (FuncAttrs & Attribute::Alignment) {
- Alignment = Attribute::getAlignmentFromAttrs(FuncAttrs);
- FuncAttrs &= ~Attribute::Alignment;
+ if (FuncAttrs.hasAlignmentAttr()) {
+ Alignment = FuncAttrs.getAlignment();
+ FuncAttrs.removeAttribute(Attributes::Alignment);
}
// Okay, if we got here, the function is syntactically valid. Convert types
@@ -2723,21 +2776,28 @@
std::vector<Type*> ParamTypeList;
SmallVector<AttributeWithIndex, 8> Attrs;
- if (RetAttrs != Attribute::None)
- Attrs.push_back(AttributeWithIndex::get(0, RetAttrs));
+ if (RetAttrs.hasAttributes())
+ Attrs.push_back(
+ AttributeWithIndex::get(AttrListPtr::ReturnIndex,
+ Attributes::get(RetType->getContext(),
+ RetAttrs)));
for (unsigned i = 0, e = ArgList.size(); i != e; ++i) {
ParamTypeList.push_back(ArgList[i].Ty);
- if (ArgList[i].Attrs != Attribute::None)
+ if (ArgList[i].Attrs.hasAttributes())
Attrs.push_back(AttributeWithIndex::get(i+1, ArgList[i].Attrs));
}
- if (FuncAttrs != Attribute::None)
- Attrs.push_back(AttributeWithIndex::get(~0, FuncAttrs));
+ if (FuncAttrs.hasAttributes())
+ Attrs.push_back(
+ AttributeWithIndex::get(AttrListPtr::FunctionIndex,
+ Attributes::get(RetType->getContext(),
+ FuncAttrs)));
- AttrListPtr PAL = AttrListPtr::get(Attrs);
+ AttrListPtr PAL = AttrListPtr::get(Context, Attrs);
- if (PAL.paramHasAttr(1, Attribute::StructRet) && !RetType->isVoidTy())
+ if (PAL.getParamAttributes(1).hasAttribute(Attributes::StructRet) &&
+ !RetType->isVoidTy())
return Error(RetTypeLoc, "functions with 'sret' argument must return void");
FunctionType *FT =
@@ -2752,6 +2812,9 @@
ForwardRefVals.find(FunctionName);
if (FRVI != ForwardRefVals.end()) {
Fn = M->getFunction(FunctionName);
+ if (!Fn)
+ return Error(FRVI->second.second, "invalid forward reference to "
+ "function as global value!");
if (Fn->getType() != PFT)
return Error(FRVI->second.second, "invalid forward reference to "
"function '" + FunctionName + "' with wrong type!");
@@ -3205,7 +3268,7 @@
/// OptionalAttrs 'to' TypeAndValue 'unwind' TypeAndValue
bool LLParser::ParseInvoke(Instruction *&Inst, PerFunctionState &PFS) {
LocTy CallLoc = Lex.getLoc();
- Attributes RetAttrs, FnAttrs;
+ AttrBuilder RetAttrs, FnAttrs;
CallingConv::ID CC;
Type *RetType = 0;
LocTy RetTypeLoc;
@@ -3250,8 +3313,11 @@
// Set up the Attributes for the function.
SmallVector<AttributeWithIndex, 8> Attrs;
- if (RetAttrs != Attribute::None)
- Attrs.push_back(AttributeWithIndex::get(0, RetAttrs));
+ if (RetAttrs.hasAttributes())
+ Attrs.push_back(
+ AttributeWithIndex::get(AttrListPtr::ReturnIndex,
+ Attributes::get(Callee->getContext(),
+ RetAttrs)));
SmallVector<Value*, 8> Args;
@@ -3271,18 +3337,21 @@
return Error(ArgList[i].Loc, "argument is not of expected type '" +
getTypeString(ExpectedTy) + "'");
Args.push_back(ArgList[i].V);
- if (ArgList[i].Attrs != Attribute::None)
+ if (ArgList[i].Attrs.hasAttributes())
Attrs.push_back(AttributeWithIndex::get(i+1, ArgList[i].Attrs));
}
if (I != E)
return Error(CallLoc, "not enough parameters specified for call");
- if (FnAttrs != Attribute::None)
- Attrs.push_back(AttributeWithIndex::get(~0, FnAttrs));
+ if (FnAttrs.hasAttributes())
+ Attrs.push_back(
+ AttributeWithIndex::get(AttrListPtr::FunctionIndex,
+ Attributes::get(Callee->getContext(),
+ FnAttrs)));
// Finish off the Attributes and check them
- AttrListPtr PAL = AttrListPtr::get(Attrs);
+ AttrListPtr PAL = AttrListPtr::get(Context, Attrs);
InvokeInst *II = InvokeInst::Create(Callee, NormalBB, UnwindBB, Args);
II->setCallingConv(CC);
@@ -3604,7 +3673,7 @@
/// ParameterList OptionalAttrs
bool LLParser::ParseCall(Instruction *&Inst, PerFunctionState &PFS,
bool isTail) {
- Attributes RetAttrs, FnAttrs;
+ AttrBuilder RetAttrs, FnAttrs;
CallingConv::ID CC;
Type *RetType = 0;
LocTy RetTypeLoc;
@@ -3646,8 +3715,11 @@
// Set up the Attributes for the function.
SmallVector<AttributeWithIndex, 8> Attrs;
- if (RetAttrs != Attribute::None)
- Attrs.push_back(AttributeWithIndex::get(0, RetAttrs));
+ if (RetAttrs.hasAttributes())
+ Attrs.push_back(
+ AttributeWithIndex::get(AttrListPtr::ReturnIndex,
+ Attributes::get(Callee->getContext(),
+ RetAttrs)));
SmallVector<Value*, 8> Args;
@@ -3667,18 +3739,21 @@
return Error(ArgList[i].Loc, "argument is not of expected type '" +
getTypeString(ExpectedTy) + "'");
Args.push_back(ArgList[i].V);
- if (ArgList[i].Attrs != Attribute::None)
+ if (ArgList[i].Attrs.hasAttributes())
Attrs.push_back(AttributeWithIndex::get(i+1, ArgList[i].Attrs));
}
if (I != E)
return Error(CallLoc, "not enough parameters specified for call");
- if (FnAttrs != Attribute::None)
- Attrs.push_back(AttributeWithIndex::get(~0, FnAttrs));
+ if (FnAttrs.hasAttributes())
+ Attrs.push_back(
+ AttributeWithIndex::get(AttrListPtr::FunctionIndex,
+ Attributes::get(Callee->getContext(),
+ FnAttrs)));
// Finish off the Attributes and check them
- AttrListPtr PAL = AttrListPtr::get(Attrs);
+ AttrListPtr PAL = AttrListPtr::get(Context, Attrs);
CallInst *CI = CallInst::Create(Callee, Args);
CI->setTailCall(isTail);
Modified: llvm/branches/AMDILBackend/lib/AsmParser/LLParser.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/AsmParser/LLParser.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/AsmParser/LLParser.h (original)
+++ llvm/branches/AMDILBackend/lib/AsmParser/LLParser.h Tue Jan 15 11:16:16 2013
@@ -175,7 +175,7 @@
bool ParseTLSModel(GlobalVariable::ThreadLocalMode &TLM);
bool ParseOptionalThreadLocal(GlobalVariable::ThreadLocalMode &TLM);
bool ParseOptionalAddrSpace(unsigned &AddrSpace);
- bool ParseOptionalAttrs(Attributes &Attrs, unsigned AttrKind);
+ bool ParseOptionalAttrs(AttrBuilder &Attrs, unsigned AttrKind);
bool ParseOptionalLinkage(unsigned &Linkage, bool &HasLinkage);
bool ParseOptionalLinkage(unsigned &Linkage) {
bool HasLinkage; return ParseOptionalLinkage(Linkage, HasLinkage);
Modified: llvm/branches/AMDILBackend/lib/AsmParser/LLToken.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/AsmParser/LLToken.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/AsmParser/LLToken.h (original)
+++ llvm/branches/AMDILBackend/lib/AsmParser/LLToken.h Tue Jan 15 11:16:16 2013
@@ -37,8 +37,10 @@
kw_global, kw_constant,
kw_private, kw_linker_private, kw_linker_private_weak,
- kw_linker_private_weak_def_auto, kw_internal,
- kw_linkonce, kw_linkonce_odr, kw_weak, kw_weak_odr, kw_appending,
+ kw_linker_private_weak_def_auto, // FIXME: For backwards compatibility.
+ kw_internal,
+ kw_linkonce, kw_linkonce_odr, kw_linkonce_odr_auto_hide,
+ kw_weak, kw_weak_odr, kw_appending,
kw_dllimport, kw_dllexport, kw_common, kw_available_externally,
kw_default, kw_hidden, kw_protected,
kw_unnamed_addr,
@@ -70,14 +72,17 @@
kw_asm,
kw_sideeffect,
kw_alignstack,
+ kw_inteldialect,
kw_gc,
kw_c,
kw_cc, kw_ccc, kw_fastcc, kw_coldcc,
+ kw_intel_ocl_bicc,
kw_x86_stdcallcc, kw_x86_fastcallcc, kw_x86_thiscallcc,
kw_arm_apcscc, kw_arm_aapcscc, kw_arm_aapcs_vfpcc,
kw_msp430_intrcc,
kw_ptx_kernel, kw_ptx_device,
+ kw_spir_kernel, kw_spir_func,
kw_signext,
kw_zeroext,
@@ -105,7 +110,7 @@
kw_naked,
kw_nonlazybind,
kw_address_safety,
- kw_ia_nsdialect,
+ kw_minsize,
kw_type,
kw_opaque,
Modified: llvm/branches/AMDILBackend/lib/Bitcode/Reader/BitcodeReader.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/Bitcode/Reader/BitcodeReader.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/Bitcode/Reader/BitcodeReader.cpp (original)
+++ llvm/branches/AMDILBackend/lib/Bitcode/Reader/BitcodeReader.cpp Tue Jan 15 11:16:16 2013
@@ -52,6 +52,8 @@
std::vector<Function*>().swap(FunctionsWithBodies);
DeferredFunctionInfo.clear();
MDKindMap.clear();
+
+ assert(BlockAddrFwdRefs.empty() && "Unresolved blockaddress fwd references");
}
//===----------------------------------------------------------------------===//
@@ -89,7 +91,7 @@
case 12: return GlobalValue::AvailableExternallyLinkage;
case 13: return GlobalValue::LinkerPrivateLinkage;
case 14: return GlobalValue::LinkerPrivateWeakLinkage;
- case 15: return GlobalValue::LinkerPrivateWeakDefAutoLinkage;
+ case 15: return GlobalValue::LinkOnceODRAutoHideLinkage;
}
}
@@ -197,7 +199,7 @@
/// @brief A class for maintaining the slot number definition
/// as a placeholder for the actual definition for forward constants defs.
class ConstantPlaceHolder : public ConstantExpr {
- void operator=(const ConstantPlaceHolder &); // DO NOT IMPLEMENT
+ void operator=(const ConstantPlaceHolder &) LLVM_DELETED_FUNCTION;
public:
// allocate space for exactly one operand
void *operator new(size_t s) {
@@ -209,7 +211,6 @@
}
/// @brief Methods to support type inquiry through isa, cast, and dyn_cast.
- //static inline bool classof(const ConstantPlaceHolder *) { return true; }
static bool classof(const Value *V) {
return isa<ConstantExpr>(V) &&
cast<ConstantExpr>(V)->getOpcode() == Instruction::UserOp1;
@@ -475,17 +476,18 @@
for (unsigned i = 0, e = Record.size(); i != e; i += 2) {
Attributes ReconstitutedAttr =
- Attribute::decodeLLVMAttributesForBitcode(Record[i+1]);
+ Attributes::decodeLLVMAttributesForBitcode(Context, Record[i+1]);
Record[i+1] = ReconstitutedAttr.Raw();
}
for (unsigned i = 0, e = Record.size(); i != e; i += 2) {
- if (Attributes(Record[i+1]) != Attribute::None)
+ AttrBuilder B(Record[i+1]);
+ if (B.hasAttributes())
Attrs.push_back(AttributeWithIndex::get(Record[i],
- Attributes(Record[i+1])));
+ Attributes::get(Context, B)));
}
- MAttributes.push_back(AttrListPtr::get(Attrs));
+ MAttributes.push_back(AttrListPtr::get(Context, Attrs));
Attrs.clear();
break;
}
@@ -889,9 +891,9 @@
}
}
-/// DecodeSignRotatedValue - Decode a signed value stored with the sign bit in
+/// decodeSignRotatedValue - Decode a signed value stored with the sign bit in
/// the LSB for dense VBR encoding.
-static uint64_t DecodeSignRotatedValue(uint64_t V) {
+uint64_t BitcodeReader::decodeSignRotatedValue(uint64_t V) {
if ((V & 1) == 0)
return V >> 1;
if (V != 1)
@@ -941,7 +943,7 @@
static APInt ReadWideAPInt(ArrayRef<uint64_t> Vals, unsigned TypeBits) {
SmallVector<uint64_t, 8> Words(Vals.size());
std::transform(Vals.begin(), Vals.end(), Words.begin(),
- DecodeSignRotatedValue);
+ BitcodeReader::decodeSignRotatedValue);
return APInt(TypeBits, Words);
}
@@ -995,7 +997,7 @@
case bitc::CST_CODE_INTEGER: // INTEGER: [intval]
if (!CurTy->isIntegerTy() || Record.empty())
return Error("Invalid CST_INTEGER record");
- V = ConstantInt::get(CurTy, DecodeSignRotatedValue(Record[0]));
+ V = ConstantInt::get(CurTy, decodeSignRotatedValue(Record[0]));
break;
case bitc::CST_CODE_WIDE_INTEGER: {// WIDE_INTEGER: [n x intval]
if (!CurTy->isIntegerTy() || Record.empty())
@@ -1245,7 +1247,9 @@
V = ConstantExpr::getICmp(Record[3], Op0, Op1);
break;
}
- case bitc::CST_CODE_INLINEASM: {
+ // This maintains backward compatibility, pre-asm dialect keywords.
+ // FIXME: Remove with the 4.0 release.
+ case bitc::CST_CODE_INLINEASM_OLD: {
if (Record.size() < 2) return Error("Invalid INLINEASM record");
std::string AsmStr, ConstrStr;
bool HasSideEffects = Record[0] & 1;
@@ -1266,6 +1270,31 @@
AsmStr, ConstrStr, HasSideEffects, IsAlignStack);
break;
}
+ // This version adds support for the asm dialect keywords (e.g.,
+ // inteldialect).
+ case bitc::CST_CODE_INLINEASM: {
+ if (Record.size() < 2) return Error("Invalid INLINEASM record");
+ std::string AsmStr, ConstrStr;
+ bool HasSideEffects = Record[0] & 1;
+ bool IsAlignStack = (Record[0] >> 1) & 1;
+ unsigned AsmDialect = Record[0] >> 2;
+ unsigned AsmStrSize = Record[1];
+ if (2+AsmStrSize >= Record.size())
+ return Error("Invalid INLINEASM record");
+ unsigned ConstStrSize = Record[2+AsmStrSize];
+ if (3+AsmStrSize+ConstStrSize > Record.size())
+ return Error("Invalid INLINEASM record");
+
+ for (unsigned i = 0; i != AsmStrSize; ++i)
+ AsmStr += (char)Record[2+i];
+ for (unsigned i = 0; i != ConstStrSize; ++i)
+ ConstrStr += (char)Record[3+AsmStrSize+i];
+ PointerType *PTy = cast<PointerType>(CurTy);
+ V = InlineAsm::get(cast<FunctionType>(PTy->getElementType()),
+ AsmStr, ConstrStr, HasSideEffects, IsAlignStack,
+ InlineAsm::AsmDialect(AsmDialect));
+ break;
+ }
case bitc::CST_CODE_BLOCKADDRESS:{
if (Record.size() < 3) return Error("Invalid CE_BLOCKADDRESS record");
Type *FnTy = getTypeByID(Record[0]);
@@ -1273,13 +1302,27 @@
Function *Fn =
dyn_cast_or_null<Function>(ValueList.getConstantFwdRef(Record[1],FnTy));
if (Fn == 0) return Error("Invalid CE_BLOCKADDRESS record");
-
- GlobalVariable *FwdRef = new GlobalVariable(*Fn->getParent(),
- Type::getInt8Ty(Context),
+
+ // If the function is already parsed we can insert the block address right
+ // away.
+ if (!Fn->empty()) {
+ Function::iterator BBI = Fn->begin(), BBE = Fn->end();
+ for (size_t I = 0, E = Record[2]; I != E; ++I) {
+ if (BBI == BBE)
+ return Error("Invalid blockaddress block #");
+ ++BBI;
+ }
+ V = BlockAddress::get(Fn, BBI);
+ } else {
+ // Otherwise insert a placeholder and remember it so it can be inserted
+ // when the function is parsed.
+ GlobalVariable *FwdRef = new GlobalVariable(*Fn->getParent(),
+ Type::getInt8Ty(Context),
false, GlobalValue::InternalLinkage,
- 0, "");
- BlockAddrFwdRefs[Fn].push_back(std::make_pair(Record[2], FwdRef));
- V = FwdRef;
+ 0, "");
+ BlockAddrFwdRefs[Fn].push_back(std::make_pair(Record[2], FwdRef));
+ V = FwdRef;
+ }
break;
}
}
@@ -1481,13 +1524,22 @@
// Read a record.
switch (Stream.ReadRecord(Code, Record)) {
default: break; // Default behavior, ignore unknown content.
- case bitc::MODULE_CODE_VERSION: // VERSION: [version#]
+ case bitc::MODULE_CODE_VERSION: { // VERSION: [version#]
if (Record.size() < 1)
return Error("Malformed MODULE_CODE_VERSION");
- // Only version #0 is supported so far.
- if (Record[0] != 0)
- return Error("Unknown bitstream version!");
+ // Only version #0 and #1 are supported so far.
+ unsigned module_version = Record[0];
+ switch (module_version) {
+ default: return Error("Unknown bitstream version!");
+ case 0:
+ UseRelativeIDs = false;
+ break;
+ case 1:
+ UseRelativeIDs = true;
+ break;
+ }
break;
+ }
case bitc::MODULE_CODE_TRIPLE: { // TRIPLE: [strchr x N]
std::string S;
if (ConvertToString(Record, 0, S))
@@ -1754,13 +1806,6 @@
// Read a record.
switch (Stream.ReadRecord(Code, Record)) {
default: break; // Default behavior, ignore unknown content.
- case bitc::MODULE_CODE_VERSION: // VERSION: [version#]
- if (Record.size() < 1)
- return Error("Malformed MODULE_CODE_VERSION");
- // Only version #0 is supported so far.
- if (Record[0] != 0)
- return Error("Unknown bitstream version!");
- break;
case bitc::MODULE_CODE_TRIPLE: { // TRIPLE: [strchr x N]
std::string S;
if (ConvertToString(Record, 0, S))
@@ -1973,7 +2018,7 @@
unsigned OpNum = 0;
Value *LHS, *RHS;
if (getValueTypePair(Record, OpNum, NextValueNo, LHS) ||
- getValue(Record, OpNum, LHS->getType(), RHS) ||
+ popValue(Record, OpNum, NextValueNo, LHS->getType(), RHS) ||
OpNum+1 > Record.size())
return Error("Invalid BINOP record");
@@ -2088,8 +2133,8 @@
unsigned OpNum = 0;
Value *TrueVal, *FalseVal, *Cond;
if (getValueTypePair(Record, OpNum, NextValueNo, TrueVal) ||
- getValue(Record, OpNum, TrueVal->getType(), FalseVal) ||
- getValue(Record, OpNum, Type::getInt1Ty(Context), Cond))
+ popValue(Record, OpNum, NextValueNo, TrueVal->getType(), FalseVal) ||
+ popValue(Record, OpNum, NextValueNo, Type::getInt1Ty(Context), Cond))
return Error("Invalid SELECT record");
I = SelectInst::Create(Cond, TrueVal, FalseVal);
@@ -2103,7 +2148,7 @@
unsigned OpNum = 0;
Value *TrueVal, *FalseVal, *Cond;
if (getValueTypePair(Record, OpNum, NextValueNo, TrueVal) ||
- getValue(Record, OpNum, TrueVal->getType(), FalseVal) ||
+ popValue(Record, OpNum, NextValueNo, TrueVal->getType(), FalseVal) ||
getValueTypePair(Record, OpNum, NextValueNo, Cond))
return Error("Invalid SELECT record");
@@ -2128,7 +2173,7 @@
unsigned OpNum = 0;
Value *Vec, *Idx;
if (getValueTypePair(Record, OpNum, NextValueNo, Vec) ||
- getValue(Record, OpNum, Type::getInt32Ty(Context), Idx))
+ popValue(Record, OpNum, NextValueNo, Type::getInt32Ty(Context), Idx))
return Error("Invalid EXTRACTELT record");
I = ExtractElementInst::Create(Vec, Idx);
InstructionList.push_back(I);
@@ -2139,9 +2184,9 @@
unsigned OpNum = 0;
Value *Vec, *Elt, *Idx;
if (getValueTypePair(Record, OpNum, NextValueNo, Vec) ||
- getValue(Record, OpNum,
+ popValue(Record, OpNum, NextValueNo,
cast<VectorType>(Vec->getType())->getElementType(), Elt) ||
- getValue(Record, OpNum, Type::getInt32Ty(Context), Idx))
+ popValue(Record, OpNum, NextValueNo, Type::getInt32Ty(Context), Idx))
return Error("Invalid INSERTELT record");
I = InsertElementInst::Create(Vec, Elt, Idx);
InstructionList.push_back(I);
@@ -2152,7 +2197,7 @@
unsigned OpNum = 0;
Value *Vec1, *Vec2, *Mask;
if (getValueTypePair(Record, OpNum, NextValueNo, Vec1) ||
- getValue(Record, OpNum, Vec1->getType(), Vec2))
+ popValue(Record, OpNum, NextValueNo, Vec1->getType(), Vec2))
return Error("Invalid SHUFFLEVEC record");
if (getValueTypePair(Record, OpNum, NextValueNo, Mask))
@@ -2172,7 +2217,7 @@
unsigned OpNum = 0;
Value *LHS, *RHS;
if (getValueTypePair(Record, OpNum, NextValueNo, LHS) ||
- getValue(Record, OpNum, LHS->getType(), RHS) ||
+ popValue(Record, OpNum, NextValueNo, LHS->getType(), RHS) ||
OpNum+1 != Record.size())
return Error("Invalid CMP record");
@@ -2217,7 +2262,8 @@
}
else {
BasicBlock *FalseDest = getBasicBlock(Record[1]);
- Value *Cond = getFnValueByID(Record[2], Type::getInt1Ty(Context));
+ Value *Cond = getValue(Record, 2, NextValueNo,
+ Type::getInt1Ty(Context));
if (FalseDest == 0 || Cond == 0)
return Error("Invalid BR record");
I = BranchInst::Create(TrueDest, FalseDest, Cond);
@@ -2233,7 +2279,7 @@
Type *OpTy = getTypeByID(Record[1]);
unsigned ValueBitWidth = cast<IntegerType>(OpTy)->getBitWidth();
- Value *Cond = getFnValueByID(Record[2], OpTy);
+ Value *Cond = getValue(Record, 2, NextValueNo, OpTy);
BasicBlock *Default = getBasicBlock(Record[3]);
if (OpTy == 0 || Cond == 0 || Default == 0)
return Error("Invalid SWITCH record");
@@ -2288,7 +2334,7 @@
if (Record.size() < 3 || (Record.size() & 1) == 0)
return Error("Invalid SWITCH record");
Type *OpTy = getTypeByID(Record[0]);
- Value *Cond = getFnValueByID(Record[1], OpTy);
+ Value *Cond = getValue(Record, 1, NextValueNo, OpTy);
BasicBlock *Default = getBasicBlock(Record[2]);
if (OpTy == 0 || Cond == 0 || Default == 0)
return Error("Invalid SWITCH record");
@@ -2312,7 +2358,7 @@
if (Record.size() < 2)
return Error("Invalid INDIRECTBR record");
Type *OpTy = getTypeByID(Record[0]);
- Value *Address = getFnValueByID(Record[1], OpTy);
+ Value *Address = getValue(Record, 1, NextValueNo, OpTy);
if (OpTy == 0 || Address == 0)
return Error("Invalid INDIRECTBR record");
unsigned NumDests = Record.size()-2;
@@ -2354,7 +2400,8 @@
SmallVector<Value*, 16> Ops;
for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i, ++OpNum) {
- Ops.push_back(getFnValueByID(Record[OpNum], FTy->getParamType(i)));
+ Ops.push_back(getValue(Record, OpNum, NextValueNo,
+ FTy->getParamType(i)));
if (Ops.back() == 0) return Error("Invalid INVOKE record");
}
@@ -2401,7 +2448,14 @@
InstructionList.push_back(PN);
for (unsigned i = 0, e = Record.size()-1; i != e; i += 2) {
- Value *V = getFnValueByID(Record[1+i], Ty);
+ Value *V;
+ // With the new function encoding, it is possible that operands have
+ // negative IDs (for forward references). Use a signed VBR
+ // representation to keep the encoding small.
+ if (UseRelativeIDs)
+ V = getValueSigned(Record, 1+i, NextValueNo, Ty);
+ else
+ V = getValue(Record, 1+i, NextValueNo, Ty);
BasicBlock *BB = getBasicBlock(Record[2+i]);
if (!V || !BB) return Error("Invalid PHI record");
PN->addIncoming(V, BB);
@@ -2499,7 +2553,7 @@
unsigned OpNum = 0;
Value *Val, *Ptr;
if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) ||
- getValue(Record, OpNum,
+ popValue(Record, OpNum, NextValueNo,
cast<PointerType>(Ptr->getType())->getElementType(), Val) ||
OpNum+2 != Record.size())
return Error("Invalid STORE record");
@@ -2513,7 +2567,7 @@
unsigned OpNum = 0;
Value *Val, *Ptr;
if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) ||
- getValue(Record, OpNum,
+ popValue(Record, OpNum, NextValueNo,
cast<PointerType>(Ptr->getType())->getElementType(), Val) ||
OpNum+4 != Record.size())
return Error("Invalid STOREATOMIC record");
@@ -2536,9 +2590,9 @@
unsigned OpNum = 0;
Value *Ptr, *Cmp, *New;
if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) ||
- getValue(Record, OpNum,
+ popValue(Record, OpNum, NextValueNo,
cast<PointerType>(Ptr->getType())->getElementType(), Cmp) ||
- getValue(Record, OpNum,
+ popValue(Record, OpNum, NextValueNo,
cast<PointerType>(Ptr->getType())->getElementType(), New) ||
OpNum+3 != Record.size())
return Error("Invalid CMPXCHG record");
@@ -2556,7 +2610,7 @@
unsigned OpNum = 0;
Value *Ptr, *Val;
if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) ||
- getValue(Record, OpNum,
+ popValue(Record, OpNum, NextValueNo,
cast<PointerType>(Ptr->getType())->getElementType(), Val) ||
OpNum+4 != Record.size())
return Error("Invalid ATOMICRMW record");
@@ -2610,7 +2664,8 @@
if (FTy->getParamType(i)->isLabelTy())
Args.push_back(getBasicBlock(Record[OpNum]));
else
- Args.push_back(getFnValueByID(Record[OpNum], FTy->getParamType(i)));
+ Args.push_back(getValue(Record, OpNum, NextValueNo,
+ FTy->getParamType(i)));
if (Args.back() == 0) return Error("Invalid CALL record");
}
@@ -2639,7 +2694,7 @@
if (Record.size() < 3)
return Error("Invalid VAARG record");
Type *OpTy = getTypeByID(Record[0]);
- Value *Op = getFnValueByID(Record[1], OpTy);
+ Value *Op = getValue(Record, 1, NextValueNo, OpTy);
Type *ResTy = getTypeByID(Record[2]);
if (!OpTy || !Op || !ResTy)
return Error("Invalid VAARG record");
@@ -2837,7 +2892,7 @@
}
bool BitcodeReader::InitStreamFromBuffer() {
- const unsigned char *BufPtr = (unsigned char *)Buffer->getBufferStart();
+ const unsigned char *BufPtr = (const unsigned char*)Buffer->getBufferStart();
const unsigned char *BufEnd = BufPtr+Buffer->getBufferSize();
if (Buffer->getBufferSize() & 3) {
Modified: llvm/branches/AMDILBackend/lib/Bitcode/Reader/BitcodeReader.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/Bitcode/Reader/BitcodeReader.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/Bitcode/Reader/BitcodeReader.h (original)
+++ llvm/branches/AMDILBackend/lib/Bitcode/Reader/BitcodeReader.h Tue Jan 15 11:16:16 2013
@@ -179,18 +179,27 @@
typedef std::pair<unsigned, GlobalVariable*> BlockAddrRefTy;
DenseMap<Function*, std::vector<BlockAddrRefTy> > BlockAddrFwdRefs;
+ /// UseRelativeIDs - Indicates that we are using a new encoding for
+ /// instruction operands where most operands in the current
+ /// FUNCTION_BLOCK are encoded relative to the instruction number,
+ /// for a more compact encoding. Some instruction operands are not
+ /// relative to the instruction ID: basic block numbers, and types.
+ /// Once the old style function blocks have been phased out, we would
+ /// not need this flag.
+ bool UseRelativeIDs;
+
public:
explicit BitcodeReader(MemoryBuffer *buffer, LLVMContext &C)
: Context(C), TheModule(0), Buffer(buffer), BufferOwned(false),
LazyStreamer(0), NextUnreadBit(0), SeenValueSymbolTable(false),
ErrorString(0), ValueList(C), MDValueList(C),
- SeenFirstFunctionBody(false) {
+ SeenFirstFunctionBody(false), UseRelativeIDs(false) {
}
explicit BitcodeReader(DataStreamer *streamer, LLVMContext &C)
: Context(C), TheModule(0), Buffer(0), BufferOwned(false),
LazyStreamer(streamer), NextUnreadBit(0), SeenValueSymbolTable(false),
ErrorString(0), ValueList(C), MDValueList(C),
- SeenFirstFunctionBody(false) {
+ SeenFirstFunctionBody(false), UseRelativeIDs(false) {
}
~BitcodeReader() {
FreeState();
@@ -223,6 +232,9 @@
/// @brief Cheap mechanism to just extract module triple
/// @returns true if an error occurred.
bool ParseTriple(std::string &Triple);
+
+ static uint64_t decodeSignRotatedValue(uint64_t V);
+
private:
Type *getTypeByID(unsigned ID);
Value *getFnValueByID(unsigned ID, Type *Ty) {
@@ -247,6 +259,9 @@
unsigned InstNum, Value *&ResVal) {
if (Slot == Record.size()) return true;
unsigned ValNo = (unsigned)Record[Slot++];
+ // Adjust the ValNo, if it was encoded relative to the InstNum.
+ if (UseRelativeIDs)
+ ValNo = InstNum - ValNo;
if (ValNo < InstNum) {
// If this is not a forward reference, just return the value we already
// have.
@@ -255,20 +270,54 @@
} else if (Slot == Record.size()) {
return true;
}
-
+
unsigned TypeNo = (unsigned)Record[Slot++];
ResVal = getFnValueByID(ValNo, getTypeByID(TypeNo));
return ResVal == 0;
}
- bool getValue(SmallVector<uint64_t, 64> &Record, unsigned &Slot,
- Type *Ty, Value *&ResVal) {
- if (Slot == Record.size()) return true;
- unsigned ValNo = (unsigned)Record[Slot++];
- ResVal = getFnValueByID(ValNo, Ty);
+
+ /// popValue - Read a value out of the specified record from slot 'Slot'.
+ /// Increment Slot past the number of slots used by the value in the record.
+ /// Return true if there is an error.
+ bool popValue(SmallVector<uint64_t, 64> &Record, unsigned &Slot,
+ unsigned InstNum, Type *Ty, Value *&ResVal) {
+ if (getValue(Record, Slot, InstNum, Ty, ResVal))
+ return true;
+ // All values currently take a single record slot.
+ ++Slot;
+ return false;
+ }
+
+ /// getValue -- Like popValue, but does not increment the Slot number.
+ bool getValue(SmallVector<uint64_t, 64> &Record, unsigned Slot,
+ unsigned InstNum, Type *Ty, Value *&ResVal) {
+ ResVal = getValue(Record, Slot, InstNum, Ty);
return ResVal == 0;
}
-
+ /// getValue -- Version of getValue that returns ResVal directly,
+ /// or 0 if there is an error.
+ Value *getValue(SmallVector<uint64_t, 64> &Record, unsigned Slot,
+ unsigned InstNum, Type *Ty) {
+ if (Slot == Record.size()) return 0;
+ unsigned ValNo = (unsigned)Record[Slot];
+ // Adjust the ValNo, if it was encoded relative to the InstNum.
+ if (UseRelativeIDs)
+ ValNo = InstNum - ValNo;
+ return getFnValueByID(ValNo, Ty);
+ }
+
+ /// getValueSigned -- Like getValue, but decodes signed VBRs.
+ Value *getValueSigned(SmallVector<uint64_t, 64> &Record, unsigned Slot,
+ unsigned InstNum, Type *Ty) {
+ if (Slot == Record.size()) return 0;
+ unsigned ValNo = (unsigned)decodeSignRotatedValue(Record[Slot]);
+ // Adjust the ValNo, if it was encoded relative to the InstNum.
+ if (UseRelativeIDs)
+ ValNo = InstNum - ValNo;
+ return getFnValueByID(ValNo, Ty);
+ }
+
bool ParseModule(bool Resume);
bool ParseAttributeBlock();
bool ParseTypeTable();
Modified: llvm/branches/AMDILBackend/lib/Bitcode/Writer/BitcodeWriter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/Bitcode/Writer/BitcodeWriter.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/Bitcode/Writer/BitcodeWriter.cpp (original)
+++ llvm/branches/AMDILBackend/lib/Bitcode/Writer/BitcodeWriter.cpp Tue Jan 15 11:16:16 2013
@@ -41,8 +41,6 @@
/// These are manifest constants used by the bitcode writer. They do not need to
/// be kept in sync with the reader, but need to be consistent within this file.
enum {
- CurVersion = 0,
-
// VALUE_SYMTAB_BLOCK abbrev id's.
VST_ENTRY_8_ABBREV = bitc::FIRST_APPLICATION_ABBREV,
VST_ENTRY_7_ABBREV,
@@ -177,7 +175,7 @@
for (unsigned i = 0, e = A.getNumSlots(); i != e; ++i) {
const AttributeWithIndex &PAWI = A.getSlot(i);
Record.push_back(PAWI.Index);
- Record.push_back(Attribute::encodeLLVMAttributesForBitcode(PAWI.Attrs));
+ Record.push_back(Attributes::encodeLLVMAttributesForBitcode(PAWI.Attrs));
}
Stream.EmitRecord(bitc::PARAMATTR_CODE_ENTRY, Record);
@@ -365,7 +363,7 @@
case GlobalValue::AvailableExternallyLinkage: return 12;
case GlobalValue::LinkerPrivateLinkage: return 13;
case GlobalValue::LinkerPrivateWeakLinkage: return 14;
- case GlobalValue::LinkerPrivateWeakDefAutoLinkage: return 15;
+ case GlobalValue::LinkOnceODRAutoHideLinkage: return 15;
}
llvm_unreachable("Invalid linkage");
}
@@ -722,16 +720,20 @@
Stream.ExitBlock();
}
+static void emitSignedInt64(SmallVectorImpl<uint64_t> &Vals, uint64_t V) {
+ if ((int64_t)V >= 0)
+ Vals.push_back(V << 1);
+ else
+ Vals.push_back((-V << 1) | 1);
+}
+
static void EmitAPInt(SmallVectorImpl<uint64_t> &Vals,
unsigned &Code, unsigned &AbbrevToUse, const APInt &Val,
bool EmitSizeForWideNumbers = false
) {
if (Val.getBitWidth() <= 64) {
uint64_t V = Val.getSExtValue();
- if ((int64_t)V >= 0)
- Vals.push_back(V << 1);
- else
- Vals.push_back((-V << 1) | 1);
+ emitSignedInt64(Vals, V);
Code = bitc::CST_CODE_INTEGER;
AbbrevToUse = CONSTANTS_INTEGER_ABBREV;
} else {
@@ -747,11 +749,7 @@
const uint64_t *RawWords = Val.getRawData();
for (unsigned i = 0; i != NWords; ++i) {
- int64_t V = RawWords[i];
- if (V >= 0)
- Vals.push_back(V << 1);
- else
- Vals.push_back((-V << 1) | 1);
+ emitSignedInt64(Vals, RawWords[i]);
}
Code = bitc::CST_CODE_WIDE_INTEGER;
}
@@ -814,7 +812,8 @@
if (const InlineAsm *IA = dyn_cast<InlineAsm>(V)) {
Record.push_back(unsigned(IA->hasSideEffects()) |
- unsigned(IA->isAlignStack()) << 1);
+ unsigned(IA->isAlignStack()) << 1 |
+ unsigned(IA->getDialect()&1) << 2);
// Add the asm string.
const std::string &AsmStr = IA->getAsmString();
@@ -1024,12 +1023,13 @@
///
/// This function adds V's value ID to Vals. If the value ID is higher than the
/// instruction ID, then it is a forward reference, and it also includes the
-/// type ID.
+/// type ID. The value ID that is written is encoded relative to the InstID.
static bool PushValueAndType(const Value *V, unsigned InstID,
SmallVector<unsigned, 64> &Vals,
ValueEnumerator &VE) {
unsigned ValID = VE.getValueID(V);
- Vals.push_back(ValID);
+ // Make encoding relative to the InstID.
+ Vals.push_back(InstID - ValID);
if (ValID >= InstID) {
Vals.push_back(VE.getTypeID(V->getType()));
return true;
@@ -1037,6 +1037,30 @@
return false;
}
+/// pushValue - Like PushValueAndType, but where the type of the value is
+/// omitted (perhaps it was already encoded in an earlier operand).
+static void pushValue(const Value *V, unsigned InstID,
+ SmallVector<unsigned, 64> &Vals,
+ ValueEnumerator &VE) {
+ unsigned ValID = VE.getValueID(V);
+ Vals.push_back(InstID - ValID);
+}
+
+static void pushValue64(const Value *V, unsigned InstID,
+ SmallVector<uint64_t, 128> &Vals,
+ ValueEnumerator &VE) {
+ uint64_t ValID = VE.getValueID(V);
+ Vals.push_back(InstID - ValID);
+}
+
+static void pushValueSigned(const Value *V, unsigned InstID,
+ SmallVector<uint64_t, 128> &Vals,
+ ValueEnumerator &VE) {
+ unsigned ValID = VE.getValueID(V);
+ int64_t diff = ((int32_t)InstID - (int32_t)ValID);
+ emitSignedInt64(Vals, diff);
+}
+
/// WriteInstruction - Emit an instruction to the specified stream.
static void WriteInstruction(const Instruction &I, unsigned InstID,
ValueEnumerator &VE, BitstreamWriter &Stream,
@@ -1057,7 +1081,7 @@
Code = bitc::FUNC_CODE_INST_BINOP;
if (!PushValueAndType(I.getOperand(0), InstID, Vals, VE))
AbbrevToUse = FUNCTION_INST_BINOP_ABBREV;
- Vals.push_back(VE.getValueID(I.getOperand(1)));
+ pushValue(I.getOperand(1), InstID, Vals, VE);
Vals.push_back(GetEncodedBinaryOpcode(I.getOpcode()));
uint64_t Flags = GetOptimizationFlags(&I);
if (Flags != 0) {
@@ -1095,32 +1119,32 @@
case Instruction::Select:
Code = bitc::FUNC_CODE_INST_VSELECT;
PushValueAndType(I.getOperand(1), InstID, Vals, VE);
- Vals.push_back(VE.getValueID(I.getOperand(2)));
+ pushValue(I.getOperand(2), InstID, Vals, VE);
PushValueAndType(I.getOperand(0), InstID, Vals, VE);
break;
case Instruction::ExtractElement:
Code = bitc::FUNC_CODE_INST_EXTRACTELT;
PushValueAndType(I.getOperand(0), InstID, Vals, VE);
- Vals.push_back(VE.getValueID(I.getOperand(1)));
+ pushValue(I.getOperand(1), InstID, Vals, VE);
break;
case Instruction::InsertElement:
Code = bitc::FUNC_CODE_INST_INSERTELT;
PushValueAndType(I.getOperand(0), InstID, Vals, VE);
- Vals.push_back(VE.getValueID(I.getOperand(1)));
- Vals.push_back(VE.getValueID(I.getOperand(2)));
+ pushValue(I.getOperand(1), InstID, Vals, VE);
+ pushValue(I.getOperand(2), InstID, Vals, VE);
break;
case Instruction::ShuffleVector:
Code = bitc::FUNC_CODE_INST_SHUFFLEVEC;
PushValueAndType(I.getOperand(0), InstID, Vals, VE);
- Vals.push_back(VE.getValueID(I.getOperand(1)));
- Vals.push_back(VE.getValueID(I.getOperand(2)));
+ pushValue(I.getOperand(1), InstID, Vals, VE);
+ pushValue(I.getOperand(2), InstID, Vals, VE);
break;
case Instruction::ICmp:
case Instruction::FCmp:
// compare returning Int1Ty or vector of Int1Ty
Code = bitc::FUNC_CODE_INST_CMP2;
PushValueAndType(I.getOperand(0), InstID, Vals, VE);
- Vals.push_back(VE.getValueID(I.getOperand(1)));
+ pushValue(I.getOperand(1), InstID, Vals, VE);
Vals.push_back(cast<CmpInst>(I).getPredicate());
break;
@@ -1146,7 +1170,7 @@
Vals.push_back(VE.getValueID(II.getSuccessor(0)));
if (II.isConditional()) {
Vals.push_back(VE.getValueID(II.getSuccessor(1)));
- Vals.push_back(VE.getValueID(II.getCondition()));
+ pushValue(II.getCondition(), InstID, Vals, VE);
}
}
break;
@@ -1163,7 +1187,7 @@
Vals64.push_back(SwitchRecordHeader);
Vals64.push_back(VE.getTypeID(SI.getCondition()->getType()));
- Vals64.push_back(VE.getValueID(SI.getCondition()));
+ pushValue64(SI.getCondition(), InstID, Vals64, VE);
Vals64.push_back(VE.getValueID(SI.getDefaultDest()));
Vals64.push_back(SI.getNumCases());
for (SwitchInst::CaseIt i = SI.case_begin(), e = SI.case_end();
@@ -1214,7 +1238,9 @@
case Instruction::IndirectBr:
Code = bitc::FUNC_CODE_INST_INDIRECTBR;
Vals.push_back(VE.getTypeID(I.getOperand(0)->getType()));
- for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
+ // Encode the address operand as relative, but not the basic blocks.
+ pushValue(I.getOperand(0), InstID, Vals, VE);
+ for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i)
Vals.push_back(VE.getValueID(I.getOperand(i)));
break;
@@ -1233,7 +1259,7 @@
// Emit value #'s for the fixed parameters.
for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
- Vals.push_back(VE.getValueID(I.getOperand(i))); // fixed param.
+ pushValue(I.getOperand(i), InstID, Vals, VE); // fixed param.
// Emit type/value pairs for varargs params.
if (FTy->isVarArg()) {
@@ -1255,12 +1281,19 @@
case Instruction::PHI: {
const PHINode &PN = cast<PHINode>(I);
Code = bitc::FUNC_CODE_INST_PHI;
- Vals.push_back(VE.getTypeID(PN.getType()));
+ // With the newer instruction encoding, forward references could give
+ // negative valued IDs. This is most common for PHIs, so we use
+ // signed VBRs.
+ SmallVector<uint64_t, 128> Vals64;
+ Vals64.push_back(VE.getTypeID(PN.getType()));
for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) {
- Vals.push_back(VE.getValueID(PN.getIncomingValue(i)));
- Vals.push_back(VE.getValueID(PN.getIncomingBlock(i)));
+ pushValueSigned(PN.getIncomingValue(i), InstID, Vals64, VE);
+ Vals64.push_back(VE.getValueID(PN.getIncomingBlock(i)));
}
- break;
+ // Emit a Vals64 vector and exit.
+ Stream.EmitRecord(Code, Vals64, AbbrevToUse);
+ Vals64.clear();
+ return;
}
case Instruction::LandingPad: {
@@ -1310,7 +1343,7 @@
else
Code = bitc::FUNC_CODE_INST_STORE;
PushValueAndType(I.getOperand(1), InstID, Vals, VE); // ptrty + ptr
- Vals.push_back(VE.getValueID(I.getOperand(0))); // val.
+ pushValue(I.getOperand(0), InstID, Vals, VE); // val.
Vals.push_back(Log2_32(cast<StoreInst>(I).getAlignment())+1);
Vals.push_back(cast<StoreInst>(I).isVolatile());
if (cast<StoreInst>(I).isAtomic()) {
@@ -1321,8 +1354,8 @@
case Instruction::AtomicCmpXchg:
Code = bitc::FUNC_CODE_INST_CMPXCHG;
PushValueAndType(I.getOperand(0), InstID, Vals, VE); // ptrty + ptr
- Vals.push_back(VE.getValueID(I.getOperand(1))); // cmp.
- Vals.push_back(VE.getValueID(I.getOperand(2))); // newval.
+ pushValue(I.getOperand(1), InstID, Vals, VE); // cmp.
+ pushValue(I.getOperand(2), InstID, Vals, VE); // newval.
Vals.push_back(cast<AtomicCmpXchgInst>(I).isVolatile());
Vals.push_back(GetEncodedOrdering(
cast<AtomicCmpXchgInst>(I).getOrdering()));
@@ -1332,7 +1365,7 @@
case Instruction::AtomicRMW:
Code = bitc::FUNC_CODE_INST_ATOMICRMW;
PushValueAndType(I.getOperand(0), InstID, Vals, VE); // ptrty + ptr
- Vals.push_back(VE.getValueID(I.getOperand(1))); // val.
+ pushValue(I.getOperand(1), InstID, Vals, VE); // val.
Vals.push_back(GetEncodedRMWOperation(
cast<AtomicRMWInst>(I).getOperation()));
Vals.push_back(cast<AtomicRMWInst>(I).isVolatile());
@@ -1357,8 +1390,13 @@
PushValueAndType(CI.getCalledValue(), InstID, Vals, VE); // Callee
// Emit value #'s for the fixed parameters.
- for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
- Vals.push_back(VE.getValueID(CI.getArgOperand(i))); // fixed param.
+ for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
+ // Check for labels (can happen with asm labels).
+ if (FTy->getParamType(i)->isLabelTy())
+ Vals.push_back(VE.getValueID(CI.getArgOperand(i)));
+ else
+ pushValue(CI.getArgOperand(i), InstID, Vals, VE); // fixed param.
+ }
// Emit type/value pairs for varargs params.
if (FTy->isVarArg()) {
@@ -1371,7 +1409,7 @@
case Instruction::VAArg:
Code = bitc::FUNC_CODE_INST_VAARG;
Vals.push_back(VE.getTypeID(I.getOperand(0)->getType())); // valistty
- Vals.push_back(VE.getValueID(I.getOperand(0))); // valist.
+ pushValue(I.getOperand(0), InstID, Vals, VE); // valist.
Vals.push_back(VE.getTypeID(I.getType())); // restype.
break;
}
@@ -1513,8 +1551,8 @@
// Emit blockinfo, which defines the standard abbreviations etc.
static void WriteBlockInfo(const ValueEnumerator &VE, BitstreamWriter &Stream) {
// We only want to emit block info records for blocks that have multiple
- // instances: CONSTANTS_BLOCK, FUNCTION_BLOCK and VALUE_SYMTAB_BLOCK. Other
- // blocks can defined their abbrevs inline.
+ // instances: CONSTANTS_BLOCK, FUNCTION_BLOCK and VALUE_SYMTAB_BLOCK.
+ // Other blocks can define their abbrevs inline.
Stream.EnterBlockInfoBlock(2);
{ // 8-bit fixed-width VST_ENTRY/VST_BBENTRY strings.
@@ -1772,12 +1810,10 @@
static void WriteModule(const Module *M, BitstreamWriter &Stream) {
Stream.EnterSubblock(bitc::MODULE_BLOCK_ID, 3);
- // Emit the version number if it is non-zero.
- if (CurVersion) {
- SmallVector<unsigned, 1> Vals;
- Vals.push_back(CurVersion);
- Stream.EmitRecord(bitc::MODULE_CODE_VERSION, Vals);
- }
+ SmallVector<unsigned, 1> Vals;
+ unsigned CurVersion = 1;
+ Vals.push_back(CurVersion);
+ Stream.EmitRecord(bitc::MODULE_CODE_VERSION, Vals);
// Analyze the module, enumerating globals, functions, etc.
ValueEnumerator VE(M);
Modified: llvm/branches/AMDILBackend/lib/Bitcode/Writer/ValueEnumerator.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/Bitcode/Writer/ValueEnumerator.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/Bitcode/Writer/ValueEnumerator.h (original)
+++ llvm/branches/AMDILBackend/lib/Bitcode/Writer/ValueEnumerator.h Tue Jan 15 11:16:16 2013
@@ -78,9 +78,9 @@
unsigned FirstFuncConstantID;
unsigned FirstInstID;
-
- ValueEnumerator(const ValueEnumerator &); // DO NOT IMPLEMENT
- void operator=(const ValueEnumerator &); // DO NOT IMPLEMENT
+
+ ValueEnumerator(const ValueEnumerator &) LLVM_DELETED_FUNCTION;
+ void operator=(const ValueEnumerator &) LLVM_DELETED_FUNCTION;
public:
ValueEnumerator(const Module *M);
Modified: llvm/branches/AMDILBackend/lib/CodeGen/AggressiveAntiDepBreaker.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/AggressiveAntiDepBreaker.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/AggressiveAntiDepBreaker.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/AggressiveAntiDepBreaker.cpp Tue Jan 15 11:16:16 2013
@@ -635,7 +635,7 @@
--R;
const unsigned NewSuperReg = Order[R];
// Don't consider non-allocatable registers
- if (!RegClassInfo.isAllocatable(NewSuperReg)) continue;
+ if (!MRI.isAllocatable(NewSuperReg)) continue;
// Don't replace a register with itself.
if (NewSuperReg == SuperReg) continue;
@@ -818,7 +818,7 @@
DEBUG(dbgs() << "\tAntidep reg: " << TRI->getName(AntiDepReg));
assert(AntiDepReg != 0 && "Anti-dependence on reg0?");
- if (!RegClassInfo.isAllocatable(AntiDepReg)) {
+ if (!MRI.isAllocatable(AntiDepReg)) {
// Don't break anti-dependencies on non-allocatable registers.
DEBUG(dbgs() << " (non-allocatable)\n");
continue;
Modified: llvm/branches/AMDILBackend/lib/CodeGen/AllocationOrder.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/AllocationOrder.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/AllocationOrder.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/AllocationOrder.cpp Tue Jan 15 11:16:16 2013
@@ -29,6 +29,7 @@
const TargetRegisterClass *RC = VRM.getRegInfo().getRegClass(VirtReg);
std::pair<unsigned, unsigned> HintPair =
VRM.getRegInfo().getRegAllocationHint(VirtReg);
+ const MachineRegisterInfo &MRI = VRM.getRegInfo();
// HintPair.second is a register, phys or virt.
Hint = HintPair.second;
@@ -52,7 +53,7 @@
unsigned *P = new unsigned[Order.size()];
Begin = P;
for (unsigned i = 0; i != Order.size(); ++i)
- if (!RCI.isReserved(Order[i]))
+ if (!MRI.isReserved(Order[i]))
*P++ = Order[i];
End = P;
@@ -69,7 +70,7 @@
// The hint must be a valid physreg for allocation.
if (Hint && (!TargetRegisterInfo::isPhysicalRegister(Hint) ||
- !RC->contains(Hint) || RCI.isReserved(Hint)))
+ !RC->contains(Hint) || MRI.isReserved(Hint)))
Hint = 0;
}
Modified: llvm/branches/AMDILBackend/lib/CodeGen/Analysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/Analysis.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/Analysis.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/Analysis.cpp Tue Jan 15 11:16:16 2013
@@ -21,7 +21,7 @@
#include "llvm/Module.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/SelectionDAG.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Support/ErrorHandling.h"
@@ -79,7 +79,7 @@
uint64_t StartingOffset) {
// Given a struct type, recursively traverse the elements.
if (StructType *STy = dyn_cast<StructType>(Ty)) {
- const StructLayout *SL = TLI.getTargetData()->getStructLayout(STy);
+ const StructLayout *SL = TLI.getDataLayout()->getStructLayout(STy);
for (StructType::element_iterator EB = STy->element_begin(),
EI = EB,
EE = STy->element_end();
@@ -91,7 +91,7 @@
// Given an array type, recursively traverse the elements.
if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
Type *EltTy = ATy->getElementType();
- uint64_t EltSize = TLI.getTargetData()->getTypeAllocSize(EltTy);
+ uint64_t EltSize = TLI.getDataLayout()->getTypeAllocSize(EltTy);
for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets,
StartingOffset + i * EltSize);
@@ -314,11 +314,13 @@
// the return. Ignore noalias because it doesn't affect the call sequence.
const Function *F = ExitBB->getParent();
Attributes CallerRetAttr = F->getAttributes().getRetAttributes();
- if ((CalleeRetAttr ^ CallerRetAttr) & ~Attribute::NoAlias)
+ if (AttrBuilder(CalleeRetAttr).removeAttribute(Attributes::NoAlias) !=
+ AttrBuilder(CallerRetAttr).removeAttribute(Attributes::NoAlias))
return false;
// It's not safe to eliminate the sign / zero extension of the return value.
- if ((CallerRetAttr & Attribute::ZExt) || (CallerRetAttr & Attribute::SExt))
+ if (CallerRetAttr.hasAttribute(Attributes::ZExt) ||
+ CallerRetAttr.hasAttribute(Attributes::SExt))
return false;
// Otherwise, make sure the unmodified return value of I is the return value.
@@ -354,11 +356,13 @@
// Conservatively require the attributes of the call to match those of
// the return. Ignore noalias because it doesn't affect the call sequence.
Attributes CallerRetAttr = F->getAttributes().getRetAttributes();
- if (CallerRetAttr & ~Attribute::NoAlias)
+ if (AttrBuilder(CallerRetAttr)
+ .removeAttribute(Attributes::NoAlias).hasAttributes())
return false;
// It's not safe to eliminate the sign / zero extension of the return value.
- if ((CallerRetAttr & Attribute::ZExt) || (CallerRetAttr & Attribute::SExt))
+ if (CallerRetAttr.hasAttribute(Attributes::ZExt) ||
+ CallerRetAttr.hasAttribute(Attributes::SExt))
return false;
// Check if the only use is a function return node.
Modified: llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/ARMException.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/ARMException.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/ARMException.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/ARMException.cpp Tue Jan 15 11:16:16 2013
@@ -24,7 +24,7 @@
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
Modified: llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/AsmPrinter.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/AsmPrinter.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/AsmPrinter.cpp Tue Jan 15 11:16:16 2013
@@ -33,7 +33,7 @@
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
@@ -67,7 +67,7 @@
/// getGVAlignmentLog2 - Return the alignment to use for the specified global
/// value in log2 form. This rounds up to the preferred alignment if possible
/// and legal.
-static unsigned getGVAlignmentLog2(const GlobalValue *GV, const TargetData &TD,
+static unsigned getGVAlignmentLog2(const GlobalValue *GV, const DataLayout &TD,
unsigned InBits = 0) {
unsigned NumBits = 0;
if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
@@ -131,9 +131,9 @@
}
-/// getTargetData - Return information about data layout.
-const TargetData &AsmPrinter::getTargetData() const {
- return *TM.getTargetData();
+/// getDataLayout - Return information about data layout.
+const DataLayout &AsmPrinter::getDataLayout() const {
+ return *TM.getDataLayout();
}
/// getCurrentSection() - Return the current section we are emitting to.
@@ -160,7 +160,7 @@
const_cast<TargetLoweringObjectFile&>(getObjFileLowering())
.Initialize(OutContext, TM);
- Mang = new Mangler(OutContext, *TM.getTargetData());
+ Mang = new Mangler(OutContext, *TM.getDataLayout());
// Allow the target to emit any magic that it wants at the start of the file.
EmitStartOfAsmFile(M);
@@ -213,16 +213,16 @@
case GlobalValue::CommonLinkage:
case GlobalValue::LinkOnceAnyLinkage:
case GlobalValue::LinkOnceODRLinkage:
+ case GlobalValue::LinkOnceODRAutoHideLinkage:
case GlobalValue::WeakAnyLinkage:
case GlobalValue::WeakODRLinkage:
case GlobalValue::LinkerPrivateWeakLinkage:
- case GlobalValue::LinkerPrivateWeakDefAutoLinkage:
if (MAI->getWeakDefDirective() != 0) {
// .globl _foo
OutStreamer.EmitSymbolAttribute(GVSym, MCSA_Global);
if ((GlobalValue::LinkageTypes)Linkage !=
- GlobalValue::LinkerPrivateWeakDefAutoLinkage)
+ GlobalValue::LinkOnceODRAutoHideLinkage)
// .weak_definition _foo
OutStreamer.EmitSymbolAttribute(GVSym, MCSA_WeakDefinition);
else
@@ -280,7 +280,7 @@
SectionKind GVKind = TargetLoweringObjectFile::getKindForGlobal(GV, TM);
- const TargetData *TD = TM.getTargetData();
+ const DataLayout *TD = TM.getDataLayout();
uint64_t Size = TD->getTypeAllocSize(GV->getType()->getElementType());
// If the alignment is specified, we *must* obey it. Overaligning a global
@@ -312,8 +312,8 @@
return;
}
- if (MAI->getLCOMMDirectiveType() != LCOMM::None &&
- (MAI->getLCOMMDirectiveType() != LCOMM::NoAlignment || Align == 1)) {
+ if (Align == 1 ||
+ MAI->getLCOMMDirectiveAlignmentType() != LCOMM::NoAlignment) {
// .lcomm _foo, 42
OutStreamer.EmitLocalCommonSymbol(GVSym, Size, Align);
return;
@@ -482,9 +482,8 @@
"' label emitted multiple times to assembly file");
}
-
-/// EmitComments - Pretty-print comments for instructions.
-static void EmitComments(const MachineInstr &MI, raw_ostream &CommentOS) {
+/// emitComments - Pretty-print comments for instructions.
+static void emitComments(const MachineInstr &MI, raw_ostream &CommentOS) {
const MachineFunction *MF = MI.getParent()->getParent();
const TargetMachine &TM = MF->getTarget();
@@ -519,16 +518,16 @@
CommentOS << " Reload Reuse\n";
}
-/// EmitImplicitDef - This method emits the specified machine instruction
+/// emitImplicitDef - This method emits the specified machine instruction
/// that is an implicit def.
-static void EmitImplicitDef(const MachineInstr *MI, AsmPrinter &AP) {
+static void emitImplicitDef(const MachineInstr *MI, AsmPrinter &AP) {
unsigned RegNo = MI->getOperand(0).getReg();
AP.OutStreamer.AddComment(Twine("implicit-def: ") +
AP.TM.getRegisterInfo()->getName(RegNo));
AP.OutStreamer.AddBlankLine();
}
-static void EmitKill(const MachineInstr *MI, AsmPrinter &AP) {
+static void emitKill(const MachineInstr *MI, AsmPrinter &AP) {
std::string Str = "kill:";
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &Op = MI->getOperand(i);
@@ -541,10 +540,10 @@
AP.OutStreamer.AddBlankLine();
}
-/// EmitDebugValueComment - This method handles the target-independent form
+/// emitDebugValueComment - This method handles the target-independent form
/// of DBG_VALUE, returning true if it was able to do so. A false return
/// means the target will need to handle MI in EmitInstruction.
-static bool EmitDebugValueComment(const MachineInstr *MI, AsmPrinter &AP) {
+static bool emitDebugValueComment(const MachineInstr *MI, AsmPrinter &AP) {
// This code handles only the 3-operand target-independent form.
if (MI->getNumOperands() != 3)
return false;
@@ -674,7 +673,7 @@
}
if (isVerbose())
- EmitComments(*II, OutStreamer.GetCommentOS());
+ emitComments(*II, OutStreamer.GetCommentOS());
switch (II->getOpcode()) {
case TargetOpcode::PROLOG_LABEL:
@@ -690,15 +689,15 @@
break;
case TargetOpcode::DBG_VALUE:
if (isVerbose()) {
- if (!EmitDebugValueComment(II, *this))
+ if (!emitDebugValueComment(II, *this))
EmitInstruction(II);
}
break;
case TargetOpcode::IMPLICIT_DEF:
- if (isVerbose()) EmitImplicitDef(II, *this);
+ if (isVerbose()) emitImplicitDef(II, *this);
break;
case TargetOpcode::KILL:
- if (isVerbose()) EmitKill(II, *this);
+ if (isVerbose()) emitKill(II, *this);
break;
default:
if (!TM.hasMCUseLoc())
@@ -992,7 +991,7 @@
Kind = SectionKind::getReadOnlyWithRelLocal();
break;
case 0:
- switch (TM.getTargetData()->getTypeAllocSize(CPE.getType())) {
+ switch (TM.getDataLayout()->getTypeAllocSize(CPE.getType())) {
case 4: Kind = SectionKind::getMergeableConst4(); break;
case 8: Kind = SectionKind::getMergeableConst8(); break;
case 16: Kind = SectionKind::getMergeableConst16();break;
@@ -1038,7 +1037,7 @@
OutStreamer.EmitFill(NewOffset - Offset, 0/*fillval*/, 0/*addrspace*/);
Type *Ty = CPE.getType();
- Offset = NewOffset + TM.getTargetData()->getTypeAllocSize(Ty);
+ Offset = NewOffset + TM.getDataLayout()->getTypeAllocSize(Ty);
OutStreamer.EmitLabel(GetCPISymbol(CPI));
if (CPE.isMachineConstantPoolEntry())
@@ -1081,7 +1080,12 @@
JTInDiffSection = true;
}
- EmitAlignment(Log2_32(MJTI->getEntryAlignment(*TM.getTargetData())));
+ EmitAlignment(Log2_32(MJTI->getEntryAlignment(*TM.getDataLayout())));
+
+ // Jump tables in code sections are marked with a data_region directive
+ // where that's supported.
+ if (!JTInDiffSection)
+ OutStreamer.EmitDataRegion(MCDR_DataRegionJT32);
for (unsigned JTI = 0, e = JT.size(); JTI != e; ++JTI) {
const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
@@ -1123,6 +1127,8 @@
for (unsigned ii = 0, ee = JTBBs.size(); ii != ee; ++ii)
EmitJumpTableEntry(MJTI, JTBBs[ii], JTI);
}
+ if (!JTInDiffSection)
+ OutStreamer.EmitDataRegion(MCDR_DataRegionEnd);
}
/// EmitJumpTableEntry - Emit a jump table entry for the specified MBB to the
@@ -1190,7 +1196,7 @@
assert(Value && "Unknown entry kind!");
- unsigned EntrySize = MJTI->getEntrySize(*TM.getTargetData());
+ unsigned EntrySize = MJTI->getEntrySize(*TM.getDataLayout());
OutStreamer.EmitValue(Value, EntrySize, /*addrspace*/0);
}
@@ -1292,7 +1298,7 @@
}
// Emit the function pointers in the target-specific order
- const TargetData *TD = TM.getTargetData();
+ const DataLayout *TD = TM.getDataLayout();
unsigned Align = Log2_32(TD->getPointerPrefAlignment());
std::stable_sort(Structors.begin(), Structors.end(), priority_order);
for (unsigned i = 0, e = Structors.size(); i != e; ++i) {
@@ -1408,7 +1414,7 @@
// if required for correctness.
//
void AsmPrinter::EmitAlignment(unsigned NumBits, const GlobalValue *GV) const {
- if (GV) NumBits = getGVAlignmentLog2(GV, *TM.getTargetData(), NumBits);
+ if (GV) NumBits = getGVAlignmentLog2(GV, *TM.getDataLayout(), NumBits);
if (NumBits == 0) return; // 1-byte aligned: no need to emit alignment.
@@ -1422,9 +1428,9 @@
// Constant emission.
//===----------------------------------------------------------------------===//
-/// LowerConstant - Lower the specified LLVM Constant to an MCExpr.
+/// lowerConstant - Lower the specified LLVM Constant to an MCExpr.
///
-static const MCExpr *LowerConstant(const Constant *CV, AsmPrinter &AP) {
+static const MCExpr *lowerConstant(const Constant *CV, AsmPrinter &AP) {
MCContext &Ctx = AP.OutContext;
if (CV->isNullValue() || isa<UndefValue>(CV))
@@ -1447,12 +1453,12 @@
switch (CE->getOpcode()) {
default:
// If the code isn't optimized, there may be outstanding folding
- // opportunities. Attempt to fold the expression using TargetData as a
+ // opportunities. Attempt to fold the expression using DataLayout as a
// last resort before giving up.
if (Constant *C =
- ConstantFoldConstantExpression(CE, AP.TM.getTargetData()))
+ ConstantFoldConstantExpression(CE, AP.TM.getDataLayout()))
if (C != CE)
- return LowerConstant(C, AP);
+ return lowerConstant(C, AP);
// Otherwise report the problem to the user.
{
@@ -1464,21 +1470,20 @@
report_fatal_error(OS.str());
}
case Instruction::GetElementPtr: {
- const TargetData &TD = *AP.TM.getTargetData();
+ const DataLayout &TD = *AP.TM.getDataLayout();
// Generate a symbolic expression for the byte address
const Constant *PtrVal = CE->getOperand(0);
SmallVector<Value*, 8> IdxVec(CE->op_begin()+1, CE->op_end());
int64_t Offset = TD.getIndexedOffset(PtrVal->getType(), IdxVec);
- const MCExpr *Base = LowerConstant(CE->getOperand(0), AP);
+ const MCExpr *Base = lowerConstant(CE->getOperand(0), AP);
if (Offset == 0)
return Base;
// Truncate/sext the offset to the pointer size.
- if (TD.getPointerSizeInBits() != 64) {
- int SExtAmount = 64-TD.getPointerSizeInBits();
- Offset = (Offset << SExtAmount) >> SExtAmount;
- }
+ unsigned Width = TD.getPointerSizeInBits();
+ if (Width < 64)
+ Offset = SignExtend64(Offset, Width);
return MCBinaryExpr::CreateAdd(Base, MCConstantExpr::Create(Offset, Ctx),
Ctx);
@@ -1491,26 +1496,26 @@
// is reasonable to treat their delta as a 32-bit value.
// FALL THROUGH.
case Instruction::BitCast:
- return LowerConstant(CE->getOperand(0), AP);
+ return lowerConstant(CE->getOperand(0), AP);
case Instruction::IntToPtr: {
- const TargetData &TD = *AP.TM.getTargetData();
+ const DataLayout &TD = *AP.TM.getDataLayout();
// Handle casts to pointers by changing them into casts to the appropriate
// integer type. This promotes constant folding and simplifies this code.
Constant *Op = CE->getOperand(0);
Op = ConstantExpr::getIntegerCast(Op, TD.getIntPtrType(CV->getContext()),
false/*ZExt*/);
- return LowerConstant(Op, AP);
+ return lowerConstant(Op, AP);
}
case Instruction::PtrToInt: {
- const TargetData &TD = *AP.TM.getTargetData();
+ const DataLayout &TD = *AP.TM.getDataLayout();
// Support only foldable casts to/from pointers that can be eliminated by
// changing the pointer to the appropriately sized integer type.
Constant *Op = CE->getOperand(0);
Type *Ty = CE->getType();
- const MCExpr *OpExpr = LowerConstant(Op, AP);
+ const MCExpr *OpExpr = lowerConstant(Op, AP);
// We can emit the pointer value into this slot if the slot is an
// integer slot equal to the size of the pointer.
@@ -1536,8 +1541,8 @@
case Instruction::And:
case Instruction::Or:
case Instruction::Xor: {
- const MCExpr *LHS = LowerConstant(CE->getOperand(0), AP);
- const MCExpr *RHS = LowerConstant(CE->getOperand(1), AP);
+ const MCExpr *LHS = lowerConstant(CE->getOperand(0), AP);
+ const MCExpr *RHS = lowerConstant(CE->getOperand(1), AP);
switch (CE->getOpcode()) {
default: llvm_unreachable("Unknown binary operator constant cast expr");
case Instruction::Add: return MCBinaryExpr::CreateAdd(LHS, RHS, Ctx);
@@ -1554,7 +1559,7 @@
}
}
-static void EmitGlobalConstantImpl(const Constant *C, unsigned AddrSpace,
+static void emitGlobalConstantImpl(const Constant *C, unsigned AddrSpace,
AsmPrinter &AP);
/// isRepeatedByteSequence - Determine whether the given value is
@@ -1578,7 +1583,7 @@
if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
if (CI->getBitWidth() > 64) return -1;
- uint64_t Size = TM.getTargetData()->getTypeAllocSize(V->getType());
+ uint64_t Size = TM.getDataLayout()->getTypeAllocSize(V->getType());
uint64_t Value = CI->getZExtValue();
// Make sure the constant is at least 8 bits long and has a power
@@ -1616,13 +1621,13 @@
return -1;
}
-static void EmitGlobalConstantDataSequential(const ConstantDataSequential *CDS,
+static void emitGlobalConstantDataSequential(const ConstantDataSequential *CDS,
unsigned AddrSpace,AsmPrinter &AP){
// See if we can aggregate this into a .fill, if so, emit it as such.
int Value = isRepeatedByteSequence(CDS, AP.TM);
if (Value != -1) {
- uint64_t Bytes = AP.TM.getTargetData()->getTypeAllocSize(CDS->getType());
+ uint64_t Bytes = AP.TM.getDataLayout()->getTypeAllocSize(CDS->getType());
// Don't emit a 1-byte object as a .fill.
if (Bytes > 1)
return AP.OutStreamer.EmitFill(Bytes, Value, AddrSpace);
@@ -1672,7 +1677,7 @@
}
}
- const TargetData &TD = *AP.TM.getTargetData();
+ const DataLayout &TD = *AP.TM.getDataLayout();
unsigned Size = TD.getTypeAllocSize(CDS->getType());
unsigned EmittedSize = TD.getTypeAllocSize(CDS->getType()->getElementType()) *
CDS->getNumElements();
@@ -1681,28 +1686,28 @@
}
-static void EmitGlobalConstantArray(const ConstantArray *CA, unsigned AddrSpace,
+static void emitGlobalConstantArray(const ConstantArray *CA, unsigned AddrSpace,
AsmPrinter &AP) {
// See if we can aggregate some values. Make sure it can be
// represented as a series of bytes of the constant value.
int Value = isRepeatedByteSequence(CA, AP.TM);
if (Value != -1) {
- uint64_t Bytes = AP.TM.getTargetData()->getTypeAllocSize(CA->getType());
+ uint64_t Bytes = AP.TM.getDataLayout()->getTypeAllocSize(CA->getType());
AP.OutStreamer.EmitFill(Bytes, Value, AddrSpace);
}
else {
for (unsigned i = 0, e = CA->getNumOperands(); i != e; ++i)
- EmitGlobalConstantImpl(CA->getOperand(i), AddrSpace, AP);
+ emitGlobalConstantImpl(CA->getOperand(i), AddrSpace, AP);
}
}
-static void EmitGlobalConstantVector(const ConstantVector *CV,
+static void emitGlobalConstantVector(const ConstantVector *CV,
unsigned AddrSpace, AsmPrinter &AP) {
for (unsigned i = 0, e = CV->getType()->getNumElements(); i != e; ++i)
- EmitGlobalConstantImpl(CV->getOperand(i), AddrSpace, AP);
+ emitGlobalConstantImpl(CV->getOperand(i), AddrSpace, AP);
- const TargetData &TD = *AP.TM.getTargetData();
+ const DataLayout &TD = *AP.TM.getDataLayout();
unsigned Size = TD.getTypeAllocSize(CV->getType());
unsigned EmittedSize = TD.getTypeAllocSize(CV->getType()->getElementType()) *
CV->getType()->getNumElements();
@@ -1710,10 +1715,10 @@
AP.OutStreamer.EmitZeros(Padding, AddrSpace);
}
-static void EmitGlobalConstantStruct(const ConstantStruct *CS,
+static void emitGlobalConstantStruct(const ConstantStruct *CS,
unsigned AddrSpace, AsmPrinter &AP) {
// Print the fields in successive locations. Pad to align if needed!
- const TargetData *TD = AP.TM.getTargetData();
+ const DataLayout *TD = AP.TM.getDataLayout();
unsigned Size = TD->getTypeAllocSize(CS->getType());
const StructLayout *Layout = TD->getStructLayout(CS->getType());
uint64_t SizeSoFar = 0;
@@ -1727,7 +1732,7 @@
SizeSoFar += FieldSize + PadSize;
// Now print the actual field value.
- EmitGlobalConstantImpl(Field, AddrSpace, AP);
+ emitGlobalConstantImpl(Field, AddrSpace, AP);
// Insert padding - this may include padding to increase the size of the
// current field up to the ABI size (if the struct is not packed) as well
@@ -1738,7 +1743,7 @@
"Layout of constant struct may be incorrect!");
}
-static void EmitGlobalConstantFP(const ConstantFP *CFP, unsigned AddrSpace,
+static void emitGlobalConstantFP(const ConstantFP *CFP, unsigned AddrSpace,
AsmPrinter &AP) {
if (CFP->getType()->isHalfTy()) {
if (AP.isVerbose()) {
@@ -1793,7 +1798,7 @@
<< DoubleVal.convertToDouble() << '\n';
}
- if (AP.TM.getTargetData()->isBigEndian()) {
+ if (AP.TM.getDataLayout()->isBigEndian()) {
AP.OutStreamer.EmitIntValue(p[1], 2, AddrSpace);
AP.OutStreamer.EmitIntValue(p[0], 8, AddrSpace);
} else {
@@ -1802,7 +1807,7 @@
}
// Emit the tail padding for the long double.
- const TargetData &TD = *AP.TM.getTargetData();
+ const DataLayout &TD = *AP.TM.getDataLayout();
AP.OutStreamer.EmitZeros(TD.getTypeAllocSize(CFP->getType()) -
TD.getTypeStoreSize(CFP->getType()), AddrSpace);
return;
@@ -1814,7 +1819,7 @@
// API needed to prevent premature destruction.
APInt API = CFP->getValueAPF().bitcastToAPInt();
const uint64_t *p = API.getRawData();
- if (AP.TM.getTargetData()->isBigEndian()) {
+ if (AP.TM.getDataLayout()->isBigEndian()) {
AP.OutStreamer.EmitIntValue(p[0], 8, AddrSpace);
AP.OutStreamer.EmitIntValue(p[1], 8, AddrSpace);
} else {
@@ -1823,9 +1828,9 @@
}
}
-static void EmitGlobalConstantLargeInt(const ConstantInt *CI,
+static void emitGlobalConstantLargeInt(const ConstantInt *CI,
unsigned AddrSpace, AsmPrinter &AP) {
- const TargetData *TD = AP.TM.getTargetData();
+ const DataLayout *TD = AP.TM.getDataLayout();
unsigned BitWidth = CI->getBitWidth();
assert((BitWidth & 63) == 0 && "only support multiples of 64-bits");
@@ -1839,9 +1844,9 @@
}
}
-static void EmitGlobalConstantImpl(const Constant *CV, unsigned AddrSpace,
+static void emitGlobalConstantImpl(const Constant *CV, unsigned AddrSpace,
AsmPrinter &AP) {
- const TargetData *TD = AP.TM.getTargetData();
+ const DataLayout *TD = AP.TM.getDataLayout();
uint64_t Size = TD->getTypeAllocSize(CV->getType());
if (isa<ConstantAggregateZero>(CV) || isa<UndefValue>(CV))
return AP.OutStreamer.EmitZeros(Size, AddrSpace);
@@ -1858,13 +1863,13 @@
AP.OutStreamer.EmitIntValue(CI->getZExtValue(), Size, AddrSpace);
return;
default:
- EmitGlobalConstantLargeInt(CI, AddrSpace, AP);
+ emitGlobalConstantLargeInt(CI, AddrSpace, AP);
return;
}
}
if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CV))
- return EmitGlobalConstantFP(CFP, AddrSpace, AP);
+ return emitGlobalConstantFP(CFP, AddrSpace, AP);
if (isa<ConstantPointerNull>(CV)) {
AP.OutStreamer.EmitIntValue(0, Size, AddrSpace);
@@ -1872,19 +1877,19 @@
}
if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(CV))
- return EmitGlobalConstantDataSequential(CDS, AddrSpace, AP);
+ return emitGlobalConstantDataSequential(CDS, AddrSpace, AP);
if (const ConstantArray *CVA = dyn_cast<ConstantArray>(CV))
- return EmitGlobalConstantArray(CVA, AddrSpace, AP);
+ return emitGlobalConstantArray(CVA, AddrSpace, AP);
if (const ConstantStruct *CVS = dyn_cast<ConstantStruct>(CV))
- return EmitGlobalConstantStruct(CVS, AddrSpace, AP);
+ return emitGlobalConstantStruct(CVS, AddrSpace, AP);
if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(CV)) {
// Look through bitcasts, which might not be able to be MCExpr'ized (e.g. of
// vectors).
if (CE->getOpcode() == Instruction::BitCast)
- return EmitGlobalConstantImpl(CE->getOperand(0), AddrSpace, AP);
+ return emitGlobalConstantImpl(CE->getOperand(0), AddrSpace, AP);
if (Size > 8) {
// If the constant expression's size is greater than 64-bits, then we have
@@ -1892,23 +1897,23 @@
// that way.
Constant *New = ConstantFoldConstantExpression(CE, TD);
if (New && New != CE)
- return EmitGlobalConstantImpl(New, AddrSpace, AP);
+ return emitGlobalConstantImpl(New, AddrSpace, AP);
}
}
if (const ConstantVector *V = dyn_cast<ConstantVector>(CV))
- return EmitGlobalConstantVector(V, AddrSpace, AP);
+ return emitGlobalConstantVector(V, AddrSpace, AP);
// Otherwise, it must be a ConstantExpr. Lower it to an MCExpr, then emit it
// thread the streamer with EmitValue.
- AP.OutStreamer.EmitValue(LowerConstant(CV, AP), Size, AddrSpace);
+ AP.OutStreamer.EmitValue(lowerConstant(CV, AP), Size, AddrSpace);
}
/// EmitGlobalConstant - Print a general LLVM constant to the .s file.
void AsmPrinter::EmitGlobalConstant(const Constant *CV, unsigned AddrSpace) {
- uint64_t Size = TM.getTargetData()->getTypeAllocSize(CV->getType());
+ uint64_t Size = TM.getDataLayout()->getTypeAllocSize(CV->getType());
if (Size)
- EmitGlobalConstantImpl(CV, AddrSpace, *this);
+ emitGlobalConstantImpl(CV, AddrSpace, *this);
else if (MAI->hasSubsectionsViaSymbols()) {
// If the global has zero size, emit a single byte so that two labels don't
// look like they are at the same location.
@@ -2023,8 +2028,8 @@
}
}
-/// EmitBasicBlockLoopComments - Pretty-print comments for basic blocks.
-static void EmitBasicBlockLoopComments(const MachineBasicBlock &MBB,
+/// emitBasicBlockLoopComments - Pretty-print comments for basic blocks.
+static void emitBasicBlockLoopComments(const MachineBasicBlock &MBB,
const MachineLoopInfo *LI,
const AsmPrinter &AP) {
// Add loop depth information
@@ -2090,7 +2095,7 @@
if (const BasicBlock *BB = MBB->getBasicBlock())
if (BB->hasName())
OutStreamer.AddComment("%" + BB->getName());
- EmitBasicBlockLoopComments(*MBB, LI, *this);
+ emitBasicBlockLoopComments(*MBB, LI, *this);
}
// Print the main label for the block.
Modified: llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp Tue Jan 15 11:16:16 2013
@@ -18,7 +18,7 @@
#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
@@ -112,7 +112,7 @@
switch (Encoding & 0x07) {
default: llvm_unreachable("Invalid encoded value.");
- case dwarf::DW_EH_PE_absptr: return TM.getTargetData()->getPointerSize();
+ case dwarf::DW_EH_PE_absptr: return TM.getDataLayout()->getPointerSize();
case dwarf::DW_EH_PE_udata2: return 2;
case dwarf::DW_EH_PE_udata4: return 4;
case dwarf::DW_EH_PE_udata8: return 8;
Modified: llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp Tue Jan 15 11:16:16 2013
@@ -43,10 +43,10 @@
};
}
-/// SrcMgrDiagHandler - This callback is invoked when the SourceMgr for an
+/// srcMgrDiagHandler - This callback is invoked when the SourceMgr for an
/// inline asm has an error in it. diagInfo is a pointer to the SrcMgrDiagInfo
/// struct above.
-static void SrcMgrDiagHandler(const SMDiagnostic &Diag, void *diagInfo) {
+static void srcMgrDiagHandler(const SMDiagnostic &Diag, void *diagInfo) {
SrcMgrDiagInfo *DiagInfo = static_cast<SrcMgrDiagInfo *>(diagInfo);
assert(DiagInfo && "Diagnostic context not passed down?");
@@ -68,7 +68,8 @@
}
/// EmitInlineAsm - Emit a blob of inline asm to the output streamer.
-void AsmPrinter::EmitInlineAsm(StringRef Str, const MDNode *LocMDNode) const {
+void AsmPrinter::EmitInlineAsm(StringRef Str, const MDNode *LocMDNode,
+ InlineAsm::AsmDialect Dialect) const {
assert(!Str.empty() && "Can't emit empty inline asm block");
// Remember if the buffer is nul terminated or not so we can avoid a copy.
@@ -91,12 +92,12 @@
LLVMContext &LLVMCtx = MMI->getModule()->getContext();
bool HasDiagHandler = false;
if (LLVMCtx.getInlineAsmDiagnosticHandler() != 0) {
- // If the source manager has an issue, we arrange for SrcMgrDiagHandler
+ // If the source manager has an issue, we arrange for srcMgrDiagHandler
// to be invoked, getting DiagInfo passed into it.
DiagInfo.LocInfo = LocMDNode;
DiagInfo.DiagHandler = LLVMCtx.getInlineAsmDiagnosticHandler();
DiagInfo.DiagContext = LLVMCtx.getInlineAsmDiagnosticContext();
- SrcMgr.setDiagHandler(SrcMgrDiagHandler, &DiagInfo);
+ SrcMgr.setDiagHandler(srcMgrDiagHandler, &DiagInfo);
HasDiagHandler = true;
}
@@ -126,6 +127,7 @@
if (!TAP)
report_fatal_error("Inline asm not supported by this streamer because"
" we don't have an asm parser for this target\n");
+ Parser->setAssemblerDialect(Dialect);
Parser->setTargetParser(*TAP.get());
// Don't implicitly switch to the text section before the asm.
@@ -135,71 +137,113 @@
report_fatal_error("Error parsing inline asm\n");
}
+static void EmitMSInlineAsmStr(const char *AsmStr, const MachineInstr *MI,
+ MachineModuleInfo *MMI, int InlineAsmVariant,
+ AsmPrinter *AP, unsigned LocCookie,
+ raw_ostream &OS) {
+ // Switch to the inline assembly variant.
+ OS << "\t.intel_syntax\n\t";
-/// EmitInlineAsm - This method formats and emits the specified machine
-/// instruction that is an inline asm.
-void AsmPrinter::EmitInlineAsm(const MachineInstr *MI) const {
- assert(MI->isInlineAsm() && "printInlineAsm only works on inline asms");
-
+ const char *LastEmitted = AsmStr; // One past the last character emitted.
unsigned NumOperands = MI->getNumOperands();
- // Count the number of register definitions to find the asm string.
- unsigned NumDefs = 0;
- for (; MI->getOperand(NumDefs).isReg() && MI->getOperand(NumDefs).isDef();
- ++NumDefs)
- assert(NumDefs != NumOperands-2 && "No asm string?");
+ while (*LastEmitted) {
+ switch (*LastEmitted) {
+ default: {
+ // Not a special case, emit the string section literally.
+ const char *LiteralEnd = LastEmitted+1;
+ while (*LiteralEnd && *LiteralEnd != '{' && *LiteralEnd != '|' &&
+ *LiteralEnd != '}' && *LiteralEnd != '$' && *LiteralEnd != '\n')
+ ++LiteralEnd;
- assert(MI->getOperand(NumDefs).isSymbol() && "No asm string?");
+ OS.write(LastEmitted, LiteralEnd-LastEmitted);
+ LastEmitted = LiteralEnd;
+ break;
+ }
+ case '\n':
+ ++LastEmitted; // Consume newline character.
+ OS << '\n'; // Indent code with newline.
+ break;
+ case '$': {
+ ++LastEmitted; // Consume '$' character.
+ bool Done = true;
- // Disassemble the AsmStr, printing out the literal pieces, the operands, etc.
- const char *AsmStr = MI->getOperand(NumDefs).getSymbolName();
+ // Handle escapes.
+ switch (*LastEmitted) {
+ default: Done = false; break;
+ case '$':
+ ++LastEmitted; // Consume second '$' character.
+ break;
+ }
+ if (Done) break;
- // If this asmstr is empty, just print the #APP/#NOAPP markers.
- // These are useful to see where empty asm's wound up.
- if (AsmStr[0] == 0) {
- // Don't emit the comments if writing to a .o file.
- if (!OutStreamer.hasRawTextSupport()) return;
+ const char *IDStart = LastEmitted;
+ const char *IDEnd = IDStart;
+ while (*IDEnd >= '0' && *IDEnd <= '9') ++IDEnd;
- OutStreamer.EmitRawText(Twine("\t")+MAI->getCommentString()+
- MAI->getInlineAsmStart());
- OutStreamer.EmitRawText(Twine("\t")+MAI->getCommentString()+
- MAI->getInlineAsmEnd());
- return;
- }
+ unsigned Val;
+ if (StringRef(IDStart, IDEnd-IDStart).getAsInteger(10, Val))
+ report_fatal_error("Bad $ operand number in inline asm string: '" +
+ Twine(AsmStr) + "'");
+ LastEmitted = IDEnd;
- // Emit the #APP start marker. This has to happen even if verbose-asm isn't
- // enabled, so we use EmitRawText.
- if (OutStreamer.hasRawTextSupport())
- OutStreamer.EmitRawText(Twine("\t")+MAI->getCommentString()+
- MAI->getInlineAsmStart());
+ if (Val >= NumOperands-1)
+ report_fatal_error("Invalid $ operand number in inline asm string: '" +
+ Twine(AsmStr) + "'");
- // Get the !srcloc metadata node if we have it, and decode the loc cookie from
- // it.
- unsigned LocCookie = 0;
- const MDNode *LocMD = 0;
- for (unsigned i = MI->getNumOperands(); i != 0; --i) {
- if (MI->getOperand(i-1).isMetadata() &&
- (LocMD = MI->getOperand(i-1).getMetadata()) &&
- LocMD->getNumOperands() != 0) {
- if (const ConstantInt *CI = dyn_cast<ConstantInt>(LocMD->getOperand(0))) {
- LocCookie = CI->getZExtValue();
- break;
- }
- }
- }
+ // Okay, we finally have a value number. Ask the target to print this
+ // operand!
+ unsigned OpNo = InlineAsm::MIOp_FirstOperand;
- // Emit the inline asm to a temporary string so we can emit it through
- // EmitInlineAsm.
- SmallString<256> StringData;
- raw_svector_ostream OS(StringData);
+ bool Error = false;
- OS << '\t';
+ // Scan to find the machine operand number for the operand.
+ for (; Val; --Val) {
+ if (OpNo >= MI->getNumOperands()) break;
+ unsigned OpFlags = MI->getOperand(OpNo).getImm();
+ OpNo += InlineAsm::getNumOperandRegisters(OpFlags) + 1;
+ }
- // The variant of the current asmprinter.
- int AsmPrinterVariant = MAI->getAssemblerDialect();
+ // We may have a location metadata attached to the end of the
+ // instruction, and at no point should see metadata at any
+ // other point while processing. It's an error if so.
+ if (OpNo >= MI->getNumOperands() ||
+ MI->getOperand(OpNo).isMetadata()) {
+ Error = true;
+ } else {
+ unsigned OpFlags = MI->getOperand(OpNo).getImm();
+ ++OpNo; // Skip over the ID number.
+
+ if (InlineAsm::isMemKind(OpFlags)) {
+ Error = AP->PrintAsmMemoryOperand(MI, OpNo, InlineAsmVariant,
+ /*Modifier*/ 0, OS);
+ } else {
+ Error = AP->PrintAsmOperand(MI, OpNo, InlineAsmVariant,
+ /*Modifier*/ 0, OS);
+ }
+ }
+ if (Error) {
+ std::string msg;
+ raw_string_ostream Msg(msg);
+ Msg << "invalid operand in inline asm: '" << AsmStr << "'";
+ MMI->getModule()->getContext().emitError(LocCookie, Msg.str());
+ }
+ break;
+ }
+ }
+ }
+ OS << "\n\t.att_syntax\n" << (char)0; // null terminate string.
+}
+static void EmitGCCInlineAsmStr(const char *AsmStr, const MachineInstr *MI,
+ MachineModuleInfo *MMI, int InlineAsmVariant,
+ int AsmPrinterVariant, AsmPrinter *AP,
+ unsigned LocCookie, raw_ostream &OS) {
int CurVariant = -1; // The number of the {.|.|.} region we are in.
const char *LastEmitted = AsmStr; // One past the last character emitted.
+ unsigned NumOperands = MI->getNumOperands();
+
+ OS << '\t';
while (*LastEmitted) {
switch (*LastEmitted) {
@@ -272,7 +316,7 @@
" string: '" + Twine(AsmStr) + "'");
std::string Val(StrStart, StrEnd);
- PrintSpecial(MI, OS, Val.c_str());
+ AP->PrintSpecial(MI, OS, Val.c_str());
LastEmitted = StrEnd+1;
break;
}
@@ -340,13 +384,12 @@
// FIXME: What if the operand isn't an MBB, report error?
OS << *MI->getOperand(OpNo).getMBB()->getSymbol();
else {
- AsmPrinter *AP = const_cast<AsmPrinter*>(this);
if (InlineAsm::isMemKind(OpFlags)) {
- Error = AP->PrintAsmMemoryOperand(MI, OpNo, AsmPrinterVariant,
+ Error = AP->PrintAsmMemoryOperand(MI, OpNo, InlineAsmVariant,
Modifier[0] ? Modifier : 0,
OS);
} else {
- Error = AP->PrintAsmOperand(MI, OpNo, AsmPrinterVariant,
+ Error = AP->PrintAsmOperand(MI, OpNo, InlineAsmVariant,
Modifier[0] ? Modifier : 0, OS);
}
}
@@ -363,7 +406,74 @@
}
}
OS << '\n' << (char)0; // null terminate string.
- EmitInlineAsm(OS.str(), LocMD);
+}
+
+/// EmitInlineAsm - This method formats and emits the specified machine
+/// instruction that is an inline asm.
+void AsmPrinter::EmitInlineAsm(const MachineInstr *MI) const {
+ assert(MI->isInlineAsm() && "printInlineAsm only works on inline asms");
+
+ // Count the number of register definitions to find the asm string.
+ unsigned NumDefs = 0;
+ for (; MI->getOperand(NumDefs).isReg() && MI->getOperand(NumDefs).isDef();
+ ++NumDefs)
+ assert(NumDefs != MI->getNumOperands()-2 && "No asm string?");
+
+ assert(MI->getOperand(NumDefs).isSymbol() && "No asm string?");
+
+ // Disassemble the AsmStr, printing out the literal pieces, the operands, etc.
+ const char *AsmStr = MI->getOperand(NumDefs).getSymbolName();
+
+ // If this asmstr is empty, just print the #APP/#NOAPP markers.
+ // These are useful to see where empty asm's wound up.
+ if (AsmStr[0] == 0) {
+ // Don't emit the comments if writing to a .o file.
+ if (!OutStreamer.hasRawTextSupport()) return;
+
+ OutStreamer.EmitRawText(Twine("\t")+MAI->getCommentString()+
+ MAI->getInlineAsmStart());
+ OutStreamer.EmitRawText(Twine("\t")+MAI->getCommentString()+
+ MAI->getInlineAsmEnd());
+ return;
+ }
+
+ // Emit the #APP start marker. This has to happen even if verbose-asm isn't
+ // enabled, so we use EmitRawText.
+ if (OutStreamer.hasRawTextSupport())
+ OutStreamer.EmitRawText(Twine("\t")+MAI->getCommentString()+
+ MAI->getInlineAsmStart());
+
+ // Get the !srcloc metadata node if we have it, and decode the loc cookie from
+ // it.
+ unsigned LocCookie = 0;
+ const MDNode *LocMD = 0;
+ for (unsigned i = MI->getNumOperands(); i != 0; --i) {
+ if (MI->getOperand(i-1).isMetadata() &&
+ (LocMD = MI->getOperand(i-1).getMetadata()) &&
+ LocMD->getNumOperands() != 0) {
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(LocMD->getOperand(0))) {
+ LocCookie = CI->getZExtValue();
+ break;
+ }
+ }
+ }
+
+ // Emit the inline asm to a temporary string so we can emit it through
+ // EmitInlineAsm.
+ SmallString<256> StringData;
+ raw_svector_ostream OS(StringData);
+
+ // The variant of the current asmprinter.
+ int AsmPrinterVariant = MAI->getAssemblerDialect();
+ InlineAsm::AsmDialect InlineAsmVariant = MI->getInlineAsmDialect();
+ AsmPrinter *AP = const_cast<AsmPrinter*>(this);
+ if (InlineAsmVariant == InlineAsm::AD_ATT)
+ EmitGCCInlineAsmStr(AsmStr, MI, MMI, InlineAsmVariant, AsmPrinterVariant,
+ AP, LocCookie, OS);
+ else
+ EmitMSInlineAsmStr(AsmStr, MI, MMI, InlineAsmVariant, AP, LocCookie, OS);
+
+ EmitInlineAsm(OS.str(), LocMD, MI->getInlineAsmDialect());
// Emit the #NOAPP end marker. This has to happen even if verbose-asm isn't
// enabled, so we use EmitRawText.
@@ -409,8 +519,8 @@
/// instruction, using the specified assembler variant. Targets should
/// override this to format as appropriate.
bool AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
- unsigned AsmVariant, const char *ExtraCode,
- raw_ostream &O) {
+ unsigned AsmVariant, const char *ExtraCode,
+ raw_ostream &O) {
// Does this asm operand have a single letter operand modifier?
if (ExtraCode && ExtraCode[0]) {
if (ExtraCode[1] != 0) return true; // Unknown modifier.
Modified: llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DIE.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DIE.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DIE.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DIE.cpp Tue Jan 15 11:16:16 2013
@@ -17,7 +17,7 @@
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@@ -182,6 +182,12 @@
void DIEInteger::EmitValue(AsmPrinter *Asm, unsigned Form) const {
unsigned Size = ~0U;
switch (Form) {
+ case dwarf::DW_FORM_flag_present:
+ // Emit something to keep the lines and comments in sync.
+ // FIXME: Is there a better way to do this?
+ if (Asm->OutStreamer.hasRawTextSupport())
+ Asm->OutStreamer.EmitRawText(StringRef(""));
+ return;
case dwarf::DW_FORM_flag: // Fall thru
case dwarf::DW_FORM_ref1: // Fall thru
case dwarf::DW_FORM_data1: Size = 1; break;
@@ -193,7 +199,8 @@
case dwarf::DW_FORM_data8: Size = 8; break;
case dwarf::DW_FORM_udata: Asm->EmitULEB128(Integer); return;
case dwarf::DW_FORM_sdata: Asm->EmitSLEB128(Integer); return;
- case dwarf::DW_FORM_addr: Size = Asm->getTargetData().getPointerSize(); break;
+ case dwarf::DW_FORM_addr:
+ Size = Asm->getDataLayout().getPointerSize(); break;
default: llvm_unreachable("DIE Value form not supported yet");
}
Asm->OutStreamer.EmitIntValue(Integer, Size, 0/*addrspace*/);
@@ -203,6 +210,7 @@
///
unsigned DIEInteger::SizeOf(AsmPrinter *AP, unsigned Form) const {
switch (Form) {
+ case dwarf::DW_FORM_flag_present: return 0;
case dwarf::DW_FORM_flag: // Fall thru
case dwarf::DW_FORM_ref1: // Fall thru
case dwarf::DW_FORM_data1: return sizeof(int8_t);
@@ -214,7 +222,7 @@
case dwarf::DW_FORM_data8: return sizeof(int64_t);
case dwarf::DW_FORM_udata: return MCAsmInfo::getULEB128Size(Integer);
case dwarf::DW_FORM_sdata: return MCAsmInfo::getSLEB128Size(Integer);
- case dwarf::DW_FORM_addr: return AP->getTargetData().getPointerSize();
+ case dwarf::DW_FORM_addr: return AP->getDataLayout().getPointerSize();
default: llvm_unreachable("DIE Value form not supported yet");
}
}
@@ -241,7 +249,7 @@
unsigned DIELabel::SizeOf(AsmPrinter *AP, unsigned Form) const {
if (Form == dwarf::DW_FORM_data4) return 4;
if (Form == dwarf::DW_FORM_strp) return 4;
- return AP->getTargetData().getPointerSize();
+ return AP->getDataLayout().getPointerSize();
}
#ifndef NDEBUG
@@ -265,7 +273,7 @@
unsigned DIEDelta::SizeOf(AsmPrinter *AP, unsigned Form) const {
if (Form == dwarf::DW_FORM_data4) return 4;
if (Form == dwarf::DW_FORM_strp) return 4;
- return AP->getTargetData().getPointerSize();
+ return AP->getDataLayout().getPointerSize();
}
#ifndef NDEBUG
Modified: llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DIE.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DIE.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DIE.h (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DIE.h Tue Jan 15 11:16:16 2013
@@ -214,9 +214,6 @@
///
virtual unsigned SizeOf(AsmPrinter *AP, unsigned Form) const = 0;
- // Implement isa/cast/dyncast.
- static bool classof(const DIEValue *) { return true; }
-
#ifndef NDEBUG
virtual void print(raw_ostream &O) = 0;
void dump();
@@ -257,7 +254,6 @@
virtual unsigned SizeOf(AsmPrinter *AP, unsigned Form) const;
// Implement isa/cast/dyncast.
- static bool classof(const DIEInteger *) { return true; }
static bool classof(const DIEValue *I) { return I->getType() == isInteger; }
#ifndef NDEBUG
@@ -286,7 +282,6 @@
virtual unsigned SizeOf(AsmPrinter *AP, unsigned Form) const;
// Implement isa/cast/dyncast.
- static bool classof(const DIELabel *) { return true; }
static bool classof(const DIEValue *L) { return L->getType() == isLabel; }
#ifndef NDEBUG
@@ -313,7 +308,6 @@
virtual unsigned SizeOf(AsmPrinter *AP, unsigned Form) const;
// Implement isa/cast/dyncast.
- static bool classof(const DIEDelta *) { return true; }
static bool classof(const DIEValue *D) { return D->getType() == isDelta; }
#ifndef NDEBUG
@@ -343,7 +337,6 @@
}
// Implement isa/cast/dyncast.
- static bool classof(const DIEEntry *) { return true; }
static bool classof(const DIEValue *E) { return E->getType() == isEntry; }
#ifndef NDEBUG
@@ -383,7 +376,6 @@
virtual unsigned SizeOf(AsmPrinter *AP, unsigned Form) const;
// Implement isa/cast/dyncast.
- static bool classof(const DIEBlock *) { return true; }
static bool classof(const DIEValue *E) { return E->getType() == isBlock; }
#ifndef NDEBUG
Modified: llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DwarfAccelTable.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DwarfAccelTable.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DwarfAccelTable.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DwarfAccelTable.cpp Tue Jan 15 11:16:16 2013
@@ -133,8 +133,8 @@
}
}
-// Walk through and emit the buckets for the table. This will look
-// like a list of numbers of how many elements are in each bucket.
+// Walk through and emit the buckets for the table. Each index is
+// an offset into the list of hashes.
void DwarfAccelTable::EmitBuckets(AsmPrinter *Asm) {
unsigned index = 0;
for (size_t i = 0, e = Buckets.size(); i < e; ++i) {
Modified: llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DwarfAccelTable.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DwarfAccelTable.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DwarfAccelTable.h (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DwarfAccelTable.h Tue Jan 15 11:16:16 2013
@@ -237,8 +237,8 @@
#endif
};
- DwarfAccelTable(const DwarfAccelTable&); // DO NOT IMPLEMENT
- void operator=(const DwarfAccelTable&); // DO NOT IMPLEMENT
+ DwarfAccelTable(const DwarfAccelTable&) LLVM_DELETED_FUNCTION;
+ void operator=(const DwarfAccelTable&) LLVM_DELETED_FUNCTION;
// Internal Functions
void EmitHeader(AsmPrinter *);
Modified: llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp Tue Jan 15 11:16:16 2013
@@ -25,7 +25,7 @@
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
Modified: llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp Tue Jan 15 11:16:16 2013
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file contains support for writing dwarf compile unit.
+// This file contains support for constructing a dwarf compile unit.
//
//===----------------------------------------------------------------------===//
@@ -22,7 +22,7 @@
#include "llvm/Instructions.h"
#include "llvm/Support/Debug.h"
#include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
@@ -51,6 +51,15 @@
return Value;
}
+/// addFlag - Add a flag that is true.
+void CompileUnit::addFlag(DIE *Die, unsigned Attribute) {
+ if (!DD->useDarwinGDBCompat())
+ Die->addValue(Attribute, dwarf::DW_FORM_flag_present,
+ DIEIntegerOne);
+ else
+ addUInt(Die, Attribute, dwarf::DW_FORM_flag, 1);
+}
+
/// addUInt - Add an unsigned integer attribute data and value.
///
void CompileUnit::addUInt(DIE *Die, unsigned Attribute,
@@ -501,7 +510,7 @@
const char *FltPtr = (const char*)FltVal.getRawData();
int NumBytes = FltVal.getBitWidth() / 8; // 8 bits per byte.
- bool LittleEndian = Asm->getTargetData().isLittleEndian();
+ bool LittleEndian = Asm->getDataLayout().isLittleEndian();
int Incr = (LittleEndian ? 1 : -1);
int Start = (LittleEndian ? 0 : NumBytes - 1);
int Stop = (LittleEndian ? NumBytes : -1);
@@ -543,7 +552,7 @@
const uint64_t *Ptr64 = Val.getRawData();
int NumBytes = Val.getBitWidth() / 8; // 8 bits per byte.
- bool LittleEndian = Asm->getTargetData().isLittleEndian();
+ bool LittleEndian = Asm->getDataLayout().isLittleEndian();
// Output the constant to DWARF one byte at a time.
for (int i = 0; i < NumBytes; i++) {
@@ -794,7 +803,7 @@
(Language == dwarf::DW_LANG_C89 ||
Language == dwarf::DW_LANG_C99 ||
Language == dwarf::DW_LANG_ObjC))
- addUInt(&Buffer, dwarf::DW_AT_prototyped, dwarf::DW_FORM_flag, 1);
+ addFlag(&Buffer, dwarf::DW_AT_prototyped);
}
break;
case dwarf::DW_TAG_structure_type:
@@ -825,15 +834,15 @@
addUInt(ElemDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_data1,
dwarf::DW_ACCESS_public);
if (SP.isExplicit())
- addUInt(ElemDie, dwarf::DW_AT_explicit, dwarf::DW_FORM_flag, 1);
+ addFlag(ElemDie, dwarf::DW_AT_explicit);
}
else if (Element.isVariable()) {
DIVariable DV(Element);
ElemDie = new DIE(dwarf::DW_TAG_variable);
addString(ElemDie, dwarf::DW_AT_name, DV.getName());
addType(ElemDie, DV.getType());
- addUInt(ElemDie, dwarf::DW_AT_declaration, dwarf::DW_FORM_flag, 1);
- addUInt(ElemDie, dwarf::DW_AT_external, dwarf::DW_FORM_flag, 1);
+ addFlag(ElemDie, dwarf::DW_AT_declaration);
+ addFlag(ElemDie, dwarf::DW_AT_external);
addSourceLine(ElemDie, DV);
} else if (Element.isDerivedType()) {
DIDerivedType DDTy(Element);
@@ -883,7 +892,7 @@
}
if (CTy.isAppleBlockExtension())
- addUInt(&Buffer, dwarf::DW_AT_APPLE_block, dwarf::DW_FORM_flag, 1);
+ addFlag(&Buffer, dwarf::DW_AT_APPLE_block);
DICompositeType ContainingType = CTy.getContainingType();
if (DIDescriptor(ContainingType).isCompositeType())
@@ -895,8 +904,7 @@
}
if (CTy.isObjcClassComplete())
- addUInt(&Buffer, dwarf::DW_AT_APPLE_objc_complete_type,
- dwarf::DW_FORM_flag, 1);
+ addFlag(&Buffer, dwarf::DW_AT_APPLE_objc_complete_type);
// Add template parameters to a class, structure or union types.
// FIXME: The support isn't in the metadata for this yet.
@@ -929,7 +937,7 @@
// If we're a forward decl, say so.
if (CTy.isForwardDecl())
- addUInt(&Buffer, dwarf::DW_AT_declaration, dwarf::DW_FORM_flag, 1);
+ addFlag(&Buffer, dwarf::DW_AT_declaration);
// Add source line info if available.
if (!CTy.isForwardDecl())
@@ -1028,8 +1036,10 @@
// AT_specification code in order to work around a bug in older
// gdbs that requires the linkage name to resolve multiple template
// functions.
+ // TODO: Remove this set of code when we get rid of the old gdb
+ // compatibility.
StringRef LinkageName = SP.getLinkageName();
- if (!LinkageName.empty())
+ if (!LinkageName.empty() && DD->useDarwinGDBCompat())
addString(SPDie, dwarf::DW_AT_MIPS_linkage_name,
getRealLinkageName(LinkageName));
@@ -1043,6 +1053,11 @@
return SPDie;
}
+ // Add the linkage name if we have one.
+ if (!LinkageName.empty() && !DD->useDarwinGDBCompat())
+ addString(SPDie, dwarf::DW_AT_MIPS_linkage_name,
+ getRealLinkageName(LinkageName));
+
// Constructors and operators for anonymous aggregates do not have names.
if (!SP.getName().empty())
addString(SPDie, dwarf::DW_AT_name, SP.getName());
@@ -1055,7 +1070,7 @@
(Language == dwarf::DW_LANG_C89 ||
Language == dwarf::DW_LANG_C99 ||
Language == dwarf::DW_LANG_ObjC))
- addUInt(SPDie, dwarf::DW_AT_prototyped, dwarf::DW_FORM_flag, 1);
+ addFlag(SPDie, dwarf::DW_AT_prototyped);
// Add Return Type.
DICompositeType SPTy = SP.getType();
@@ -1079,7 +1094,7 @@
}
if (!SP.isDefinition()) {
- addUInt(SPDie, dwarf::DW_AT_declaration, dwarf::DW_FORM_flag, 1);
+ addFlag(SPDie, dwarf::DW_AT_declaration);
// Add arguments. Do not add arguments for subprogram definition. They will
// be handled while processing variables.
@@ -1090,22 +1105,22 @@
if (SPTag == dwarf::DW_TAG_subroutine_type)
for (unsigned i = 1, N = Args.getNumElements(); i < N; ++i) {
DIE *Arg = new DIE(dwarf::DW_TAG_formal_parameter);
- DIType ATy = DIType(DIType(Args.getElement(i)));
+ DIType ATy = DIType(Args.getElement(i));
addType(Arg, ATy);
if (ATy.isArtificial())
- addUInt(Arg, dwarf::DW_AT_artificial, dwarf::DW_FORM_flag, 1);
+ addFlag(Arg, dwarf::DW_AT_artificial);
SPDie->addChild(Arg);
}
}
if (SP.isArtificial())
- addUInt(SPDie, dwarf::DW_AT_artificial, dwarf::DW_FORM_flag, 1);
+ addFlag(SPDie, dwarf::DW_AT_artificial);
if (!SP.isLocalToUnit())
- addUInt(SPDie, dwarf::DW_AT_external, dwarf::DW_FORM_flag, 1);
+ addFlag(SPDie, dwarf::DW_AT_external);
if (SP.isOptimized())
- addUInt(SPDie, dwarf::DW_AT_APPLE_optimized, dwarf::DW_FORM_flag, 1);
+ addFlag(SPDie, dwarf::DW_AT_APPLE_optimized);
if (unsigned isa = Asm->getISAEncoding()) {
addUInt(SPDie, dwarf::DW_AT_APPLE_isa, dwarf::DW_FORM_flag, isa);
@@ -1168,7 +1183,7 @@
// Add scoping info.
if (!GV.isLocalToUnit())
- addUInt(VariableDIE, dwarf::DW_AT_external, dwarf::DW_FORM_flag, 1);
+ addFlag(VariableDIE, dwarf::DW_AT_external);
// Add line number info.
addSourceLine(VariableDIE, GV);
@@ -1193,8 +1208,7 @@
addDIEEntry(VariableSpecDIE, dwarf::DW_AT_specification,
dwarf::DW_FORM_ref4, VariableDIE);
addBlock(VariableSpecDIE, dwarf::DW_AT_location, 0, Block);
- addUInt(VariableDIE, dwarf::DW_AT_declaration, dwarf::DW_FORM_flag,
- 1);
+ addFlag(VariableDIE, dwarf::DW_AT_declaration);
addDie(VariableSpecDIE);
} else {
addBlock(VariableDIE, dwarf::DW_AT_location, 0, Block);
@@ -1213,7 +1227,7 @@
addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_constu);
SmallVector<Value*, 3> Idx(CE->op_begin()+1, CE->op_end());
addUInt(Block, 0, dwarf::DW_FORM_udata,
- Asm->getTargetData().getIndexedOffset(Ptr->getType(), Idx));
+ Asm->getDataLayout().getIndexedOffset(Ptr->getType(), Idx));
addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_plus);
addBlock(VariableDIE, dwarf::DW_AT_location, 0, Block);
}
@@ -1260,7 +1274,7 @@
DICompositeType *CTy) {
Buffer.setTag(dwarf::DW_TAG_array_type);
if (CTy->getTag() == dwarf::DW_TAG_vector_type)
- addUInt(&Buffer, dwarf::DW_AT_GNU_vector, dwarf::DW_FORM_flag, 1);
+ addFlag(&Buffer, dwarf::DW_AT_GNU_vector);
// Emit derived type.
addType(&Buffer, CTy->getTypeDerivedFrom());
@@ -1333,8 +1347,7 @@
}
if (DV->isArtificial())
- addUInt(VariableDie, dwarf::DW_AT_artificial,
- dwarf::DW_FORM_flag, 1);
+ addFlag(VariableDie, dwarf::DW_AT_artificial);
if (isScopeAbstract) {
DV->setDIE(VariableDie);
@@ -1446,7 +1459,7 @@
Offset -= FieldOffset;
// Maybe we need to work from the other end.
- if (Asm->getTargetData().isLittleEndian())
+ if (Asm->getDataLayout().isLittleEndian())
Offset = FieldSize - (Offset + Size);
addUInt(MemberDie, dwarf::DW_AT_bit_offset, 0, Offset);
Modified: llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h Tue Jan 15 11:16:16 2013
@@ -176,6 +176,9 @@
}
public:
+ /// addFlag - Add a flag that is true to the DIE.
+ void addFlag(DIE *Die, unsigned Attribute);
+
/// addUInt - Add an unsigned integer attribute data and value.
///
void addUInt(DIE *Die, unsigned Attribute, unsigned Form, uint64_t Integer);
@@ -280,8 +283,8 @@
/// for the given DITemplateTypeParameter.
DIE *getOrCreateTemplateTypeParameterDIE(DITemplateTypeParameter TP);
- /// getOrCreateTemplateValueParameterDIE - Find existing DIE or create new DIE
- /// for the given DITemplateValueParameter.
+ /// getOrCreateTemplateValueParameterDIE - Find existing DIE or create
+ /// new DIE for the given DITemplateValueParameter.
DIE *getOrCreateTemplateValueParameterDIE(DITemplateValueParameter TVP);
/// createDIEEntry - Creates a new DIEEntry to be a proxy for a debug
Modified: llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DwarfDebug.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DwarfDebug.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DwarfDebug.cpp Tue Jan 15 11:16:16 2013
@@ -27,7 +27,7 @@
#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
@@ -54,9 +54,29 @@
cl::desc("Make an absence of debug location information explicit."),
cl::init(false));
-static cl::opt<bool> DwarfAccelTables("dwarf-accel-tables", cl::Hidden,
+namespace {
+ enum DefaultOnOff {
+ Default, Enable, Disable
+ };
+}
+
+static cl::opt<DefaultOnOff> DwarfAccelTables("dwarf-accel-tables", cl::Hidden,
cl::desc("Output prototype dwarf accelerator tables."),
- cl::init(false));
+ cl::values(
+ clEnumVal(Default, "Default for platform"),
+ clEnumVal(Enable, "Enabled"),
+ clEnumVal(Disable, "Disabled"),
+ clEnumValEnd),
+ cl::init(Default));
+
+static cl::opt<DefaultOnOff> DarwinGDBCompat("darwin-gdb-compat", cl::Hidden,
+ cl::desc("Compatibility with Darwin gdb."),
+ cl::values(
+ clEnumVal(Default, "Default for platform"),
+ clEnumVal(Enable, "Enabled"),
+ clEnumVal(Disable, "Disabled"),
+ clEnumValEnd),
+ cl::init(Default));
namespace {
const char *DWARFGroupName = "DWARF Emission";
@@ -135,10 +155,25 @@
DwarfDebugRangeSectionSym = DwarfDebugLocSectionSym = 0;
FunctionBeginSym = FunctionEndSym = 0;
- // Turn on accelerator tables for Darwin.
- if (Triple(M->getTargetTriple()).isOSDarwin())
- DwarfAccelTables = true;
-
+ // Turn on accelerator tables and older gdb compatibility
+ // for Darwin.
+ bool isDarwin = Triple(M->getTargetTriple()).isOSDarwin();
+ if (DarwinGDBCompat == Default) {
+ if (isDarwin)
+ isDarwinGDBCompat = true;
+ else
+ isDarwinGDBCompat = false;
+ } else
+ isDarwinGDBCompat = DarwinGDBCompat == Enable ? true : false;
+
+ if (DwarfAccelTables == Default) {
+ if (isDarwin)
+ hasDwarfAccelTables = true;
+ else
+ hasDwarfAccelTables = false;
+ } else
+ hasDwarfAccelTables = DwarfAccelTables == Enable ? true : false;
+
{
NamedRegionTimer T(DbgTimerName, DWARFGroupName, TimePassesIsEnabled);
beginModule(M);
@@ -272,44 +307,51 @@
assert(SPDie && "Unable to find subprogram DIE!");
DISubprogram SP(SPNode);
- DISubprogram SPDecl = SP.getFunctionDeclaration();
- if (!SPDecl.isSubprogram()) {
- // There is not any need to generate specification DIE for a function
- // defined at compile unit level. If a function is defined inside another
- // function then gdb prefers the definition at top level and but does not
- // expect specification DIE in parent function. So avoid creating
- // specification DIE for a function defined inside a function.
- if (SP.isDefinition() && !SP.getContext().isCompileUnit() &&
- !SP.getContext().isFile() &&
- !isSubprogramContext(SP.getContext())) {
- SPCU->addUInt(SPDie, dwarf::DW_AT_declaration, dwarf::DW_FORM_flag, 1);
-
- // Add arguments.
- DICompositeType SPTy = SP.getType();
- DIArray Args = SPTy.getTypeArray();
- unsigned SPTag = SPTy.getTag();
- if (SPTag == dwarf::DW_TAG_subroutine_type)
- for (unsigned i = 1, N = Args.getNumElements(); i < N; ++i) {
- DIE *Arg = new DIE(dwarf::DW_TAG_formal_parameter);
- DIType ATy = DIType(DIType(Args.getElement(i)));
- SPCU->addType(Arg, ATy);
- if (ATy.isArtificial())
- SPCU->addUInt(Arg, dwarf::DW_AT_artificial, dwarf::DW_FORM_flag, 1);
- SPDie->addChild(Arg);
- }
- DIE *SPDeclDie = SPDie;
- SPDie = new DIE(dwarf::DW_TAG_subprogram);
- SPCU->addDIEEntry(SPDie, dwarf::DW_AT_specification, dwarf::DW_FORM_ref4,
- SPDeclDie);
- SPCU->addDie(SPDie);
- }
- }
- // Pick up abstract subprogram DIE.
+ // If we're updating an abstract DIE, then we will be adding the children and
+ // object pointer later on. But what we don't want to do is process the
+ // concrete DIE twice.
if (DIE *AbsSPDIE = AbstractSPDies.lookup(SPNode)) {
+ // Pick up abstract subprogram DIE.
SPDie = new DIE(dwarf::DW_TAG_subprogram);
SPCU->addDIEEntry(SPDie, dwarf::DW_AT_abstract_origin,
dwarf::DW_FORM_ref4, AbsSPDIE);
SPCU->addDie(SPDie);
+ } else {
+ DISubprogram SPDecl = SP.getFunctionDeclaration();
+ if (!SPDecl.isSubprogram()) {
+ // There is not any need to generate specification DIE for a function
+ // defined at compile unit level. If a function is defined inside another
+ // function then gdb prefers the definition at top level and but does not
+ // expect specification DIE in parent function. So avoid creating
+ // specification DIE for a function defined inside a function.
+ if (SP.isDefinition() && !SP.getContext().isCompileUnit() &&
+ !SP.getContext().isFile() &&
+ !isSubprogramContext(SP.getContext())) {
+ SPCU->addFlag(SPDie, dwarf::DW_AT_declaration);
+
+ // Add arguments.
+ DICompositeType SPTy = SP.getType();
+ DIArray Args = SPTy.getTypeArray();
+ unsigned SPTag = SPTy.getTag();
+ if (SPTag == dwarf::DW_TAG_subroutine_type)
+ for (unsigned i = 1, N = Args.getNumElements(); i < N; ++i) {
+ DIE *Arg = new DIE(dwarf::DW_TAG_formal_parameter);
+ DIType ATy = DIType(Args.getElement(i));
+ SPCU->addType(Arg, ATy);
+ if (ATy.isArtificial())
+ SPCU->addFlag(Arg, dwarf::DW_AT_artificial);
+ if (ATy.isObjectPointer())
+ SPCU->addDIEEntry(SPDie, dwarf::DW_AT_object_pointer,
+ dwarf::DW_FORM_ref4, Arg);
+ SPDie->addChild(Arg);
+ }
+ DIE *SPDeclDie = SPDie;
+ SPDie = new DIE(dwarf::DW_TAG_subprogram);
+ SPCU->addDIEEntry(SPDie, dwarf::DW_AT_specification, dwarf::DW_FORM_ref4,
+ SPDeclDie);
+ SPCU->addDie(SPDie);
+ }
+ }
}
SPCU->addLabel(SPDie, dwarf::DW_AT_low_pc, dwarf::DW_FORM_addr,
@@ -346,7 +388,7 @@
// DW_AT_ranges appropriately.
TheCU->addUInt(ScopeDIE, dwarf::DW_AT_ranges, dwarf::DW_FORM_data4,
DebugRangeSymbols.size()
- * Asm->getTargetData().getPointerSize());
+ * Asm->getDataLayout().getPointerSize());
for (SmallVector<InsnRange, 4>::const_iterator RI = Ranges.begin(),
RE = Ranges.end(); RI != RE; ++RI) {
DebugRangeSymbols.push_back(getLabelBeforeInsn(RI->first));
@@ -386,7 +428,7 @@
DISubprogram InlinedSP = getDISubprogram(DS);
DIE *OriginDIE = TheCU->getDIE(InlinedSP);
if (!OriginDIE) {
- DEBUG(dbgs() << "Unable to find original DIE for inlined subprogram.");
+ DEBUG(dbgs() << "Unable to find original DIE for an inlined subprogram.");
return NULL;
}
@@ -395,7 +437,7 @@
const MCSymbol *EndLabel = getLabelAfterInsn(RI->second);
if (StartLabel == 0 || EndLabel == 0) {
- llvm_unreachable("Unexpected Start and End labels for a inlined scope!");
+ llvm_unreachable("Unexpected Start and End labels for an inlined scope!");
}
assert(StartLabel->isDefined() &&
"Invalid starting label for an inlined scope!");
@@ -412,7 +454,7 @@
// DW_AT_ranges appropriately.
TheCU->addUInt(ScopeDIE, dwarf::DW_AT_ranges, dwarf::DW_FORM_data4,
DebugRangeSymbols.size()
- * Asm->getTargetData().getPointerSize());
+ * Asm->getDataLayout().getPointerSize());
for (SmallVector<InsnRange, 4>::const_iterator RI = Ranges.begin(),
RE = Ranges.end(); RI != RE; ++RI) {
DebugRangeSymbols.push_back(getLabelBeforeInsn(RI->first));
@@ -461,21 +503,26 @@
return NULL;
SmallVector<DIE *, 8> Children;
+ DIE *ObjectPointer = NULL;
// Collect arguments for current function.
if (LScopes.isCurrentFunctionScope(Scope))
for (unsigned i = 0, N = CurrentFnArguments.size(); i < N; ++i)
if (DbgVariable *ArgDV = CurrentFnArguments[i])
if (DIE *Arg =
- TheCU->constructVariableDIE(ArgDV, Scope->isAbstractScope()))
+ TheCU->constructVariableDIE(ArgDV, Scope->isAbstractScope())) {
Children.push_back(Arg);
+ if (ArgDV->isObjectPointer()) ObjectPointer = Arg;
+ }
// Collect lexical scope children first.
const SmallVector<DbgVariable *, 8> &Variables = ScopeVariables.lookup(Scope);
for (unsigned i = 0, N = Variables.size(); i < N; ++i)
if (DIE *Variable =
- TheCU->constructVariableDIE(Variables[i], Scope->isAbstractScope()))
+ TheCU->constructVariableDIE(Variables[i], Scope->isAbstractScope())) {
Children.push_back(Variable);
+ if (Variables[i]->isObjectPointer()) ObjectPointer = Variable;
+ }
const SmallVector<LexicalScope *, 4> &Scopes = Scope->getChildren();
for (unsigned j = 0, M = Scopes.size(); j < M; ++j)
if (DIE *Nested = constructScopeDIE(TheCU, Scopes[j]))
@@ -509,6 +556,10 @@
E = Children.end(); I != E; ++I)
ScopeDIE->addChild(*I);
+ if (DS.isSubprogram() && ObjectPointer != NULL)
+ TheCU->addDIEEntry(ScopeDIE, dwarf::DW_AT_object_pointer,
+ dwarf::DW_FORM_ref4, ObjectPointer);
+
if (DS.isSubprogram())
TheCU->addPubTypes(DISubprogram(DS));
@@ -556,7 +607,8 @@
unsigned ID = GetOrCreateSourceID(FN, CompilationDir);
DIE *Die = new DIE(dwarf::DW_TAG_compile_unit);
- CompileUnit *NewCU = new CompileUnit(ID, DIUnit.getLanguage(), Die, Asm, this);
+ CompileUnit *NewCU = new CompileUnit(ID, DIUnit.getLanguage(), Die,
+ Asm, this);
NewCU->addString(Die, dwarf::DW_AT_producer, DIUnit.getProducer());
NewCU->addUInt(Die, dwarf::DW_AT_language, dwarf::DW_FORM_data2,
DIUnit.getLanguage());
@@ -575,7 +627,7 @@
if (!CompilationDir.empty())
NewCU->addString(Die, dwarf::DW_AT_comp_dir, CompilationDir);
if (DIUnit.isOptimized())
- NewCU->addUInt(Die, dwarf::DW_AT_APPLE_optimized, dwarf::DW_FORM_flag, 1);
+ NewCU->addFlag(Die, dwarf::DW_AT_APPLE_optimized);
StringRef Flags = DIUnit.getFlags();
if (!Flags.empty())
@@ -755,7 +807,7 @@
LexicalScope *Scope =
new LexicalScope(NULL, DIDescriptor(SP), NULL, false);
DeadFnScopeMap[SP] = Scope;
-
+
// Construct subprogram DIE and add variables DIEs.
CompileUnit *SPCU = CUMap.lookup(TheCU);
assert(SPCU && "Unable to find Compile Unit!");
@@ -802,9 +854,9 @@
Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("data_end"));
// End text sections.
- for (unsigned i = 1, N = SectionMap.size(); i <= N; ++i) {
- Asm->OutStreamer.SwitchSection(SectionMap[i]);
- Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("section_end", i));
+ for (unsigned I = 0, E = SectionMap.size(); I != E; ++I) {
+ Asm->OutStreamer.SwitchSection(SectionMap[I]);
+ Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("section_end", I+1));
}
// Compute DIE offsets and sizes.
@@ -816,8 +868,8 @@
// Corresponding abbreviations into a abbrev section.
emitAbbreviations();
- // Emit info into a dwarf accelerator table sections.
- if (DwarfAccelTables) {
+ // Emit info into the dwarf accelerator table sections.
+ if (useDwarfAccelTables()) {
emitAccelNames();
emitAccelObjC();
emitAccelNamespaces();
@@ -825,7 +877,10 @@
}
// Emit info into a debug pubtypes section.
- emitDebugPubTypes();
+ // TODO: When we don't need the option anymore we can
+ // remove all of the code that adds to the table.
+ if (useDarwinGDBCompat())
+ emitDebugPubTypes();
// Emit info into a debug loc section.
emitDebugLoc();
@@ -840,7 +895,11 @@
emitDebugMacInfo();
// Emit inline info.
- emitDebugInlineInfo();
+ // TODO: When we don't need the option anymore we
+ // can remove all of the code that this section
+ // depends upon.
+ if (useDarwinGDBCompat())
+ emitDebugInlineInfo();
// Emit info into a debug str section.
emitDebugStr();
@@ -1014,7 +1073,7 @@
if (AbsVar)
AbsVar->setMInsn(MInsn);
- // Simple ranges that are fully coalesced.
+ // Simplify ranges that are fully coalesced.
if (History.size() <= 1 || (History.size() == 2 &&
MInsn->isIdenticalTo(History.back()))) {
RegVar->setMInsn(MInsn);
@@ -1267,7 +1326,7 @@
// Coalesce identical entries at the end of History.
if (History.size() >= 2 &&
Prev->isIdenticalTo(History[History.size() - 2])) {
- DEBUG(dbgs() << "Coalesce identical DBG_VALUE entries:\n"
+ DEBUG(dbgs() << "Coalescing identical DBG_VALUE entries:\n"
<< "\t" << *Prev
<< "\t" << *History[History.size() - 2] << "\n");
History.pop_back();
@@ -1283,7 +1342,7 @@
PrevMBB->getLastNonDebugInstr();
if (LastMI == PrevMBB->end()) {
// Drop DBG_VALUE for empty range.
- DEBUG(dbgs() << "Drop DBG_VALUE for empty range:\n"
+ DEBUG(dbgs() << "Dropping DBG_VALUE for empty range:\n"
<< "\t" << *Prev << "\n");
History.pop_back();
}
@@ -1300,9 +1359,10 @@
if (!MI->isLabel())
AtBlockEntry = false;
- // First known non DBG_VALUE location marks beginning of function
- // body.
- if (PrologEndLoc.isUnknown() && !MI->getDebugLoc().isUnknown())
+ // First known non-DBG_VALUE and non-frame setup location marks
+ // the beginning of the function body.
+ if (!MI->getFlag(MachineInstr::FrameSetup) &&
+ (PrologEndLoc.isUnknown() && !MI->getDebugLoc().isUnknown()))
PrologEndLoc = MI->getDebugLoc();
// Check if the instruction clobbers any registers with debug vars.
@@ -1382,7 +1442,7 @@
MF->getFunction()->getContext());
recordSourceLine(FnStartDL.getLine(), FnStartDL.getCol(),
FnStartDL.getScope(MF->getFunction()->getContext()),
- DWARF2_LINE_DEFAULT_IS_STMT ? DWARF2_FLAG_IS_STMT : 0);
+ 0);
}
}
@@ -1439,8 +1499,7 @@
DIE *CurFnDIE = constructScopeDIE(TheCU, FnScope);
if (!MF->getTarget().Options.DisableFramePointerElim(*MF))
- TheCU->addUInt(CurFnDIE, dwarf::DW_AT_APPLE_omit_frame_ptr,
- dwarf::DW_FORM_flag, 1);
+ TheCU->addFlag(CurFnDIE, dwarf::DW_AT_APPLE_omit_frame_ptr);
DebugFrames.push_back(FunctionDebugFrameInfo(Asm->getFunctionNumber(),
MMI->getFrameMoves()));
@@ -1710,7 +1769,7 @@
Asm->EmitSectionOffset(Asm->GetTempSymbol("abbrev_begin"),
DwarfAbbrevSectionSym);
Asm->OutStreamer.AddComment("Address Size (in bytes)");
- Asm->EmitInt8(Asm->getTargetData().getPointerSize());
+ Asm->EmitInt8(Asm->getDataLayout().getPointerSize());
emitDIE(Die);
Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("info_end", TheCU->getID()));
@@ -1756,14 +1815,14 @@
Asm->EmitInt8(0);
Asm->OutStreamer.AddComment("Op size");
- Asm->EmitInt8(Asm->getTargetData().getPointerSize() + 1);
+ Asm->EmitInt8(Asm->getDataLayout().getPointerSize() + 1);
Asm->OutStreamer.AddComment("DW_LNE_set_address");
Asm->EmitInt8(dwarf::DW_LNE_set_address);
Asm->OutStreamer.AddComment("Section end label");
Asm->OutStreamer.EmitSymbolValue(Asm->GetTempSymbol("section_end",SectionEnd),
- Asm->getTargetData().getPointerSize(),
+ Asm->getDataLayout().getPointerSize(),
0/*AddrSpace*/);
// Mark end of matrix.
@@ -1992,7 +2051,7 @@
// Start the dwarf loc section.
Asm->OutStreamer.SwitchSection(
Asm->getObjFileLowering().getDwarfLocSection());
- unsigned char Size = Asm->getTargetData().getPointerSize();
+ unsigned char Size = Asm->getDataLayout().getPointerSize();
Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("debug_loc", 0));
unsigned index = 1;
for (SmallVector<DotDebugLocEntry, 4>::iterator
@@ -2089,7 +2148,7 @@
// Start the dwarf ranges section.
Asm->OutStreamer.SwitchSection(
Asm->getObjFileLowering().getDwarfRangesSection());
- unsigned char Size = Asm->getTargetData().getPointerSize();
+ unsigned char Size = Asm->getDataLayout().getPointerSize();
for (SmallVector<const MCSymbol *, 8>::iterator
I = DebugRangeSymbols.begin(), E = DebugRangeSymbols.end();
I != E; ++I) {
@@ -2147,7 +2206,7 @@
Asm->OutStreamer.AddComment("Dwarf Version");
Asm->EmitInt16(dwarf::DWARF_VERSION);
Asm->OutStreamer.AddComment("Address Size (in bytes)");
- Asm->EmitInt8(Asm->getTargetData().getPointerSize());
+ Asm->EmitInt8(Asm->getDataLayout().getPointerSize());
for (SmallVector<const MDNode *, 4>::iterator I = InlinedSPNodes.begin(),
E = InlinedSPNodes.end(); I != E; ++I) {
@@ -2178,7 +2237,7 @@
if (Asm->isVerbose()) Asm->OutStreamer.AddComment("low_pc");
Asm->OutStreamer.EmitSymbolValue(LI->first,
- Asm->getTargetData().getPointerSize(),0);
+ Asm->getDataLayout().getPointerSize(),0);
}
}
Modified: llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DwarfDebug.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DwarfDebug.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DwarfDebug.h (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DwarfDebug.h Tue Jan 15 11:16:16 2013
@@ -21,9 +21,9 @@
#include "llvm/MC/MachineLocation.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringMap.h"
-#include "llvm/ADT/UniqueVector.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/DebugLoc.h"
@@ -96,7 +96,8 @@
DotDebugLocEntry(const MCSymbol *B, const MCSymbol *E, const ConstantFP *FPtr)
: Begin(B), End(E), Variable(0), Merged(false),
Constant(true) { Constants.CFP = FPtr; EntryKind = E_ConstantFP; }
- DotDebugLocEntry(const MCSymbol *B, const MCSymbol *E, const ConstantInt *IPtr)
+ DotDebugLocEntry(const MCSymbol *B, const MCSymbol *E,
+ const ConstantInt *IPtr)
: Begin(B), End(E), Variable(0), Merged(false),
Constant(true) { Constants.CIP = IPtr; EntryKind = E_ConstantInt; }
@@ -158,11 +159,19 @@
bool isArtificial() const {
if (Var.isArtificial())
return true;
- if (Var.getTag() == dwarf::DW_TAG_arg_variable
- && getType().isArtificial())
+ if (getType().isArtificial())
return true;
return false;
}
+
+ bool isObjectPointer() const {
+ if (Var.isObjectPointer())
+ return true;
+ if (getType().isObjectPointer())
+ return true;
+ return false;
+ }
+
bool variableHasComplexAddress() const {
assert(Var.Verify() && "Invalid complex DbgVariable!");
return Var.hasComplexAddress();
@@ -222,7 +231,7 @@
/// SectionMap - Provides a unique id per text section.
///
- UniqueVector<const MCSection*> SectionMap;
+ SetVector<const MCSection*> SectionMap;
/// CurrentFnArguments - List of Arguments (DbgValues) for current function.
SmallVector<DbgVariable *, 8> CurrentFnArguments;
@@ -307,6 +316,9 @@
// table for the same directory as DW_at_comp_dir.
StringRef CompilationDir;
+ // A holder for the DarwinGDBCompat flag so that the compile unit can use it.
+ bool isDarwinGDBCompat;
+ bool hasDwarfAccelTables;
private:
/// assignAbbrevNumber - Define a unique number for the abbreviation.
@@ -520,6 +532,11 @@
/// getStringPoolEntry - returns an entry into the string pool with the given
/// string text.
MCSymbol *getStringPoolEntry(StringRef Str);
+
+ /// useDarwinGDBCompat - returns whether or not to limit some of our debug
+ /// output to the limitations of darwin gdb.
+ bool useDarwinGDBCompat() { return isDarwinGDBCompat; }
+ bool useDwarfAccelTables() { return hasDwarfAccelTables; }
};
} // End of namespace llvm
Modified: llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DwarfException.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DwarfException.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DwarfException.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DwarfException.cpp Tue Jan 15 11:16:16 2013
@@ -24,7 +24,7 @@
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
@@ -417,7 +417,7 @@
// that we're omitting that bit.
TTypeEncoding = dwarf::DW_EH_PE_omit;
// dwarf::DW_EH_PE_absptr
- TypeFormatSize = Asm->getTargetData().getPointerSize();
+ TypeFormatSize = Asm->getDataLayout().getPointerSize();
} else {
// Okay, we have actual filters or typeinfos to emit. As such, we need to
// pick a type encoding for them. We're about to emit a list of pointers to
Modified: llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DwarfException.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DwarfException.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DwarfException.h (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/DwarfException.h Tue Jan 15 11:16:16 2013
@@ -43,26 +43,6 @@
/// MMI - Collected machine module information.
MachineModuleInfo *MMI;
- /// EmitExceptionTable - Emit landing pads and actions.
- ///
- /// The general organization of the table is complex, but the basic concepts
- /// are easy. First there is a header which describes the location and
- /// organization of the three components that follow.
- /// 1. The landing pad site information describes the range of code covered
- /// by the try. In our case it's an accumulation of the ranges covered
- /// by the invokes in the try. There is also a reference to the landing
- /// pad that handles the exception once processed. Finally an index into
- /// the actions table.
- /// 2. The action table, in our case, is composed of pairs of type ids
- /// and next action offset. Starting with the action index from the
- /// landing pad site, each type Id is checked for a match to the current
- /// exception. If it matches then the exception and type id are passed
- /// on to the landing pad. Otherwise the next action is looked up. This
- /// chain is terminated with a next action of zero. If no type id is
- /// found the frame is unwound and handling continues.
- /// 3. Type id table contains references to all the C++ typeinfo for all
- /// catches in the function. This tables is reversed indexed base 1.
-
/// SharedTypeIds - How many leading type ids two landing pads have in common.
static unsigned SharedTypeIds(const LandingPadInfo *L,
const LandingPadInfo *R);
@@ -119,6 +99,26 @@
const RangeMapType &PadMap,
const SmallVectorImpl<const LandingPadInfo *> &LPs,
const SmallVectorImpl<unsigned> &FirstActions);
+
+ /// EmitExceptionTable - Emit landing pads and actions.
+ ///
+ /// The general organization of the table is complex, but the basic concepts
+ /// are easy. First there is a header which describes the location and
+ /// organization of the three components that follow.
+ /// 1. The landing pad site information describes the range of code covered
+ /// by the try. In our case it's an accumulation of the ranges covered
+ /// by the invokes in the try. There is also a reference to the landing
+ /// pad that handles the exception once processed. Finally an index into
+ /// the actions table.
+ /// 2. The action table, in our case, is composed of pairs of type ids
+ /// and next action offset. Starting with the action index from the
+ /// landing pad site, each type Id is checked for a match to the current
+ /// exception. If it matches then the exception and type id are passed
+ /// on to the landing pad. Otherwise the next action is looked up. This
+ /// chain is terminated with a next action of zero. If no type id is
+ /// found the frame is unwound and handling continues.
+ /// 3. Type id table contains references to all the C++ typeinfo for all
+ /// catches in the function. This tables is reversed indexed base 1.
void EmitExceptionTable();
public:
Modified: llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp Tue Jan 15 11:16:16 2013
@@ -20,7 +20,7 @@
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/ADT/SmallString.h"
@@ -91,7 +91,7 @@
/// either condition is detected in a function which uses the GC.
///
void OcamlGCMetadataPrinter::finishAssembly(AsmPrinter &AP) {
- unsigned IntPtrSize = AP.TM.getTargetData()->getPointerSize();
+ unsigned IntPtrSize = AP.TM.getDataLayout()->getPointerSize();
AP.OutStreamer.SwitchSection(AP.getObjFileLowering().getTextSection());
EmitCamlGlobal(getModule(), AP, "code_end");
Modified: llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/Win64Exception.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/Win64Exception.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/Win64Exception.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/AsmPrinter/Win64Exception.cpp Tue Jan 15 11:16:16 2013
@@ -24,7 +24,7 @@
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
Modified: llvm/branches/AMDILBackend/lib/CodeGen/BranchFolding.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/BranchFolding.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/BranchFolding.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/BranchFolding.cpp Tue Jan 15 11:16:16 2013
@@ -357,9 +357,8 @@
if (I1 == MBB1->begin() && I2 != MBB2->begin()) {
--I2;
while (I2->isDebugValue()) {
- if (I2 == MBB2->begin()) {
+ if (I2 == MBB2->begin())
return TailLen;
- }
--I2;
}
++I2;
@@ -482,21 +481,19 @@
BranchFolder::MergePotentialsElt::operator<(const MergePotentialsElt &o) const {
if (getHash() < o.getHash())
return true;
- else if (getHash() > o.getHash())
+ if (getHash() > o.getHash())
return false;
- else if (getBlock()->getNumber() < o.getBlock()->getNumber())
+ if (getBlock()->getNumber() < o.getBlock()->getNumber())
return true;
- else if (getBlock()->getNumber() > o.getBlock()->getNumber())
+ if (getBlock()->getNumber() > o.getBlock()->getNumber())
return false;
- else {
- // _GLIBCXX_DEBUG checks strict weak ordering, which involves comparing
- // an object with itself.
+ // _GLIBCXX_DEBUG checks strict weak ordering, which involves comparing
+ // an object with itself.
#ifndef _GLIBCXX_DEBUG
- llvm_unreachable("Predecessor appears twice");
+ llvm_unreachable("Predecessor appears twice");
#else
- return false;
+ return false;
#endif
- }
}
/// CountTerminators - Count the number of terminators in the given
@@ -574,7 +571,8 @@
// instructions that would be deleted in the merge.
MachineFunction *MF = MBB1->getParent();
if (EffectiveTailLen >= 2 &&
- MF->getFunction()->hasFnAttr(Attribute::OptimizeForSize) &&
+ MF->getFunction()->getFnAttributes().
+ hasAttribute(Attributes::OptimizeForSize) &&
(I1 == MBB1->begin() || I2 == MBB2->begin()))
return true;
@@ -1554,8 +1552,7 @@
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
Uses.insert(*AI);
} else {
- if (Uses.count(Reg)) {
- Uses.erase(Reg);
+ if (Uses.erase(Reg)) {
for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs)
Uses.erase(*SubRegs); // Use sub-registers to be conservative
}
Modified: llvm/branches/AMDILBackend/lib/CodeGen/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/CMakeLists.txt?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/CMakeLists.txt (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/CMakeLists.txt Tue Jan 15 11:16:16 2013
@@ -45,6 +45,7 @@
MachineCopyPropagation.cpp
MachineCSE.cpp
MachineDominators.cpp
+ MachinePostDominators.cpp
MachineFunction.cpp
MachineFunctionAnalysis.cpp
MachineFunctionPass.cpp
@@ -95,12 +96,14 @@
SplitKit.cpp
StackProtector.cpp
StackSlotColoring.cpp
+ StackColoring.cpp
StrongPHIElimination.cpp
TailDuplication.cpp
TargetFrameLoweringImpl.cpp
TargetInstrInfoImpl.cpp
TargetLoweringObjectFileImpl.cpp
TargetOptionsImpl.cpp
+ TargetSchedule.cpp
TwoAddressInstructionPass.cpp
UnreachableBlockElim.cpp
VirtRegMap.cpp
Modified: llvm/branches/AMDILBackend/lib/CodeGen/CalcSpillWeights.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/CalcSpillWeights.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/CalcSpillWeights.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/CalcSpillWeights.cpp Tue Jan 15 11:16:16 2013
@@ -9,7 +9,6 @@
#define DEBUG_TYPE "calcspillweights"
-#include "llvm/Function.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/CodeGen/CalcSpillWeights.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
@@ -42,8 +41,7 @@
bool CalculateSpillWeights::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << "********** Compute Spill Weights **********\n"
- << "********** Function: "
- << MF.getFunction()->getName() << '\n');
+ << "********** Function: " << MF.getName() << '\n');
LiveIntervals &LIS = getAnalysis<LiveIntervals>();
MachineRegisterInfo &MRI = MF.getRegInfo();
@@ -166,7 +164,7 @@
continue;
float hweight = Hint[hint] += weight;
if (TargetRegisterInfo::isPhysicalRegister(hint)) {
- if (hweight > bestPhys && LIS.isAllocatable(hint))
+ if (hweight > bestPhys && mri.isAllocatable(hint))
bestPhys = hweight, hintPhys = hint;
} else {
if (hweight > bestVirt)
Modified: llvm/branches/AMDILBackend/lib/CodeGen/CallingConvLower.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/CallingConvLower.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/CallingConvLower.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/CallingConvLower.cpp Tue Jan 15 11:16:16 2013
@@ -18,7 +18,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetLowering.h"
using namespace llvm;
@@ -50,7 +50,7 @@
if (MinAlign > (int)Align)
Align = MinAlign;
MF.getFrameInfo()->ensureMaxAlignment(Align);
- TM.getTargetLowering()->HandleByVal(this, Size);
+ TM.getTargetLowering()->HandleByVal(this, Size, Align);
unsigned Offset = AllocateStack(Size, Align);
addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
}
Modified: llvm/branches/AMDILBackend/lib/CodeGen/CodeGen.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/CodeGen.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/CodeGen.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/CodeGen.cpp Tue Jan 15 11:16:16 2013
@@ -41,6 +41,7 @@
initializeMachineCopyPropagationPass(Registry);
initializeMachineCSEPass(Registry);
initializeMachineDominatorTreePass(Registry);
+ initializeMachinePostDominatorTreePass(Registry);
initializeMachineLICMPass(Registry);
initializeMachineLoopInfoPass(Registry);
initializeMachineModuleInfoPass(Registry);
@@ -56,6 +57,7 @@
initializeRegisterCoalescerPass(Registry);
initializeSlotIndexesPass(Registry);
initializeStackProtectorPass(Registry);
+ initializeStackColoringPass(Registry);
initializeStackSlotColoringPass(Registry);
initializeStrongPHIEliminationPass(Registry);
initializeTailDuplicatePassPass(Registry);
Modified: llvm/branches/AMDILBackend/lib/CodeGen/CodePlacementOpt.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/CodePlacementOpt.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/CodePlacementOpt.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/CodePlacementOpt.cpp Tue Jan 15 11:16:16 2013
@@ -373,7 +373,7 @@
///
bool CodePlacementOpt::AlignLoops(MachineFunction &MF) {
const Function *F = MF.getFunction();
- if (F->hasFnAttr(Attribute::OptimizeForSize))
+ if (F->getFnAttributes().hasAttribute(Attributes::OptimizeForSize))
return false;
unsigned Align = TLI->getPrefLoopAlignment();
Modified: llvm/branches/AMDILBackend/lib/CodeGen/CriticalAntiDepBreaker.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/CriticalAntiDepBreaker.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/CriticalAntiDepBreaker.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/CriticalAntiDepBreaker.cpp Tue Jan 15 11:16:16 2013
@@ -527,7 +527,7 @@
if (Edge->getKind() == SDep::Anti) {
AntiDepReg = Edge->getReg();
assert(AntiDepReg != 0 && "Anti-dependence on reg0?");
- if (!RegClassInfo.isAllocatable(AntiDepReg))
+ if (!MRI.isAllocatable(AntiDepReg))
// Don't break anti-dependencies on non-allocatable registers.
AntiDepReg = 0;
else if (KeepRegs.test(AntiDepReg))
Modified: llvm/branches/AMDILBackend/lib/CodeGen/DeadMachineInstructionElim.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/DeadMachineInstructionElim.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/DeadMachineInstructionElim.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/DeadMachineInstructionElim.cpp Tue Jan 15 11:16:16 2013
@@ -33,7 +33,6 @@
const MachineRegisterInfo *MRI;
const TargetInstrInfo *TII;
BitVector LivePhysRegs;
- BitVector ReservedRegs;
public:
static char ID; // Pass identification, replacement for typeid
@@ -70,7 +69,7 @@
unsigned Reg = MO.getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
// Don't delete live physreg defs, or any reserved register defs.
- if (LivePhysRegs.test(Reg) || ReservedRegs.test(Reg))
+ if (LivePhysRegs.test(Reg) || MRI->isReserved(Reg))
return false;
} else {
if (!MRI->use_nodbg_empty(Reg))
@@ -90,9 +89,6 @@
TRI = MF.getTarget().getRegisterInfo();
TII = MF.getTarget().getInstrInfo();
- // Treat reserved registers as always live.
- ReservedRegs = TRI->getReservedRegs(MF);
-
// Loop over all instructions in all blocks, from bottom to top, so that it's
// more likely that chains of dependent but ultimately dead instructions will
// be cleaned up.
@@ -101,7 +97,7 @@
MachineBasicBlock *MBB = &*I;
// Start out assuming that reserved registers are live out of this block.
- LivePhysRegs = ReservedRegs;
+ LivePhysRegs = MRI->getReservedRegs();
// Also add any explicit live-out physregs for this block.
if (!MBB->empty() && MBB->back().isReturn())
Modified: llvm/branches/AMDILBackend/lib/CodeGen/EarlyIfConversion.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/EarlyIfConversion.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/EarlyIfConversion.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/EarlyIfConversion.cpp Tue Jan 15 11:16:16 2013
@@ -18,12 +18,12 @@
#define DEBUG_TYPE "early-ifcvt"
#include "MachineTraceMetrics.h"
-#include "llvm/Function.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SparseSet.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -31,9 +31,9 @@
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
-#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -50,7 +50,10 @@
static cl::opt<bool> Stress("stress-early-ifcvt", cl::Hidden,
cl::desc("Turn all knobs to 11"));
-typedef SmallSetVector<MachineBasicBlock*, 8> BlockSetVector;
+STATISTIC(NumDiamondsSeen, "Number of diamonds");
+STATISTIC(NumDiamondsConv, "Number of diamonds converted");
+STATISTIC(NumTrianglesSeen, "Number of triangles");
+STATISTIC(NumTrianglesConv, "Number of triangles converted");
//===----------------------------------------------------------------------===//
// SSAIfConv
@@ -96,6 +99,12 @@
/// equal to Tail.
bool isTriangle() const { return TBB == Tail || FBB == Tail; }
+ /// Returns the Tail predecessor for the True side.
+ MachineBasicBlock *getTPred() const { return TBB == Tail ? Head : TBB; }
+
+ /// Returns the Tail predecessor for the False side.
+ MachineBasicBlock *getFPred() const { return FBB == Tail ? Head : FBB; }
+
/// Information about each phi in the Tail block.
struct PHIInfo {
MachineInstr *PHI;
@@ -134,6 +143,12 @@
/// Find a valid insertion point in Head.
bool findInsertionPoint();
+ /// Replace PHI instructions in Tail with selects.
+ void replacePHIInstrs();
+
+ /// Insert selects and rewrite PHI operands to use them.
+ void rewritePHIOperands();
+
public:
/// runOnMachineFunction - Initialize per-function data structures.
void runOnMachineFunction(MachineFunction &MF) {
@@ -337,11 +352,7 @@
if (Succ0->pred_size() != 1 || Succ0->succ_size() != 1)
return false;
- // We could support additional Tail predecessors by updating phis instead of
- // eliminating them. Let's see an example where it matters first.
Tail = Succ0->succ_begin()[0];
- if (Tail->pred_size() != 2)
- return false;
// This is not a triangle.
if (Tail != Succ1) {
@@ -391,8 +402,8 @@
// Any phis in the tail block must be convertible to selects.
PHIs.clear();
- MachineBasicBlock *TPred = TBB == Tail ? Head : TBB;
- MachineBasicBlock *FPred = FBB == Tail ? Head : FBB;
+ MachineBasicBlock *TPred = getTPred();
+ MachineBasicBlock *FPred = getFPred();
for (MachineBasicBlock::iterator I = Tail->begin(), E = Tail->end();
I != E && I->isPHI(); ++I) {
PHIs.push_back(&*I);
@@ -428,24 +439,18 @@
if (!findInsertionPoint())
return false;
+ if (isTriangle())
+ ++NumTrianglesSeen;
+ else
+ ++NumDiamondsSeen;
return true;
}
-
-/// convertIf - Execute the if conversion after canConvertIf has determined the
-/// feasibility.
-///
-/// Any basic blocks erased will be added to RemovedBlocks.
-///
-void SSAIfConv::convertIf(SmallVectorImpl<MachineBasicBlock*> &RemovedBlocks) {
- assert(Head && Tail && TBB && FBB && "Call canConvertIf first.");
-
- // Move all instructions into Head, except for the terminators.
- if (TBB != Tail)
- Head->splice(InsertionPoint, TBB, TBB->begin(), TBB->getFirstTerminator());
- if (FBB != Tail)
- Head->splice(InsertionPoint, FBB, FBB->begin(), FBB->getFirstTerminator());
-
+/// replacePHIInstrs - Completely replace PHI instructions with selects.
+/// This is possible when the only Tail predecessors are the if-converted
+/// blocks.
+void SSAIfConv::replacePHIInstrs() {
+ assert(Tail->pred_size() == 2 && "Cannot replace PHIs");
MachineBasicBlock::iterator FirstTerm = Head->getFirstTerminator();
assert(FirstTerm != Head->end() && "No terminators");
DebugLoc HeadDL = FirstTerm->getDebugLoc();
@@ -461,6 +466,66 @@
PI.PHI->eraseFromParent();
PI.PHI = 0;
}
+}
+
+/// rewritePHIOperands - When there are additional Tail predecessors, insert
+/// select instructions in Head and rewrite PHI operands to use the selects.
+/// Keep the PHI instructions in Tail to handle the other predecessors.
+void SSAIfConv::rewritePHIOperands() {
+ MachineBasicBlock::iterator FirstTerm = Head->getFirstTerminator();
+ assert(FirstTerm != Head->end() && "No terminators");
+ DebugLoc HeadDL = FirstTerm->getDebugLoc();
+
+ // Convert all PHIs to select instructions inserted before FirstTerm.
+ for (unsigned i = 0, e = PHIs.size(); i != e; ++i) {
+ PHIInfo &PI = PHIs[i];
+ DEBUG(dbgs() << "If-converting " << *PI.PHI);
+ unsigned PHIDst = PI.PHI->getOperand(0).getReg();
+ unsigned DstReg = MRI->createVirtualRegister(MRI->getRegClass(PHIDst));
+ TII->insertSelect(*Head, FirstTerm, HeadDL, DstReg, Cond, PI.TReg, PI.FReg);
+ DEBUG(dbgs() << " --> " << *llvm::prior(FirstTerm));
+
+ // Rewrite PHI operands TPred -> (DstReg, Head), remove FPred.
+ for (unsigned i = PI.PHI->getNumOperands(); i != 1; i -= 2) {
+ MachineBasicBlock *MBB = PI.PHI->getOperand(i-1).getMBB();
+ if (MBB == getTPred()) {
+ PI.PHI->getOperand(i-1).setMBB(Head);
+ PI.PHI->getOperand(i-2).setReg(DstReg);
+ } else if (MBB == getFPred()) {
+ PI.PHI->RemoveOperand(i-1);
+ PI.PHI->RemoveOperand(i-2);
+ }
+ }
+ DEBUG(dbgs() << " --> " << *PI.PHI);
+ }
+}
+
+/// convertIf - Execute the if conversion after canConvertIf has determined the
+/// feasibility.
+///
+/// Any basic blocks erased will be added to RemovedBlocks.
+///
+void SSAIfConv::convertIf(SmallVectorImpl<MachineBasicBlock*> &RemovedBlocks) {
+ assert(Head && Tail && TBB && FBB && "Call canConvertIf first.");
+
+ // Update statistics.
+ if (isTriangle())
+ ++NumTrianglesConv;
+ else
+ ++NumDiamondsConv;
+
+ // Move all instructions into Head, except for the terminators.
+ if (TBB != Tail)
+ Head->splice(InsertionPoint, TBB, TBB->begin(), TBB->getFirstTerminator());
+ if (FBB != Tail)
+ Head->splice(InsertionPoint, FBB, FBB->begin(), FBB->getFirstTerminator());
+
+ // Are there extra Tail predecessors?
+ bool ExtraPreds = Tail->pred_size() != 2;
+ if (ExtraPreds)
+ rewritePHIOperands();
+ else
+ replacePHIInstrs();
// Fix up the CFG, temporarily leave Head without any successors.
Head->removeSuccessor(TBB);
@@ -472,6 +537,7 @@
// Fix up Head's terminators.
// It should become a single branch or a fallthrough.
+ DebugLoc HeadDL = Head->getFirstTerminator()->getDebugLoc();
TII->RemoveBranch(*Head);
// Erase the now empty conditional blocks. It is likely that Head can fall
@@ -486,7 +552,7 @@
}
assert(Head->succ_empty() && "Additional head successors?");
- if (Head->isLayoutSuccessor(Tail)) {
+ if (!ExtraPreds && Head->isLayoutSuccessor(Tail)) {
// Splice Tail onto the end of Head.
DEBUG(dbgs() << "Joining tail BB#" << Tail->getNumber()
<< " into head BB#" << Head->getNumber() << '\n');
@@ -593,10 +659,16 @@
Traces->invalidate(IfConv.Tail);
Traces->invalidate(IfConv.TBB);
Traces->invalidate(IfConv.FBB);
- DEBUG(if (MinInstr) MinInstr->print(dbgs()));
Traces->verifyAnalysis();
}
+// Adjust cycles with downward saturation.
+static unsigned adjCycles(unsigned Cyc, int Delta) {
+ if (Delta < 0 && Cyc + Delta > Cyc)
+ return 0;
+ return Cyc + Delta;
+}
+
/// Apply cost model and heuristics to the if-conversion in IfConv.
/// Return true if the conversion is a good idea.
///
@@ -608,21 +680,79 @@
if (!MinInstr)
MinInstr = Traces->getEnsemble(MachineTraceMetrics::TS_MinInstrCount);
- // Compare the critical path through TBB and FBB. If the difference is
- // greater than the branch misprediction penalty, it would never pay to
- // if-convert. The triangle/diamond topology guarantees that these traces
- // have the same head and tail, so they can be compared.
- MachineTraceMetrics::Trace TBBTrace = MinInstr->getTrace(IfConv.TBB);
- MachineTraceMetrics::Trace FBBTrace = MinInstr->getTrace(IfConv.FBB);
+ MachineTraceMetrics::Trace TBBTrace = MinInstr->getTrace(IfConv.getTPred());
+ MachineTraceMetrics::Trace FBBTrace = MinInstr->getTrace(IfConv.getFPred());
DEBUG(dbgs() << "TBB: " << TBBTrace << "FBB: " << FBBTrace);
- unsigned TBBCrit = TBBTrace.getCriticalPath();
- unsigned FBBCrit = FBBTrace.getCriticalPath();
- unsigned ExtraCrit = TBBCrit > FBBCrit ? TBBCrit-FBBCrit : FBBCrit-TBBCrit;
- if (ExtraCrit >= SchedModel->MispredictPenalty) {
- DEBUG(dbgs() << "Critical path difference larger than "
- << SchedModel->MispredictPenalty << ".\n");
+ unsigned MinCrit = std::min(TBBTrace.getCriticalPath(),
+ FBBTrace.getCriticalPath());
+
+ // Set a somewhat arbitrary limit on the critical path extension we accept.
+ unsigned CritLimit = SchedModel->MispredictPenalty/2;
+
+ // If-conversion only makes sense when there is unexploited ILP. Compute the
+ // maximum-ILP resource length of the trace after if-conversion. Compare it
+ // to the shortest critical path.
+ SmallVector<const MachineBasicBlock*, 1> ExtraBlocks;
+ if (IfConv.TBB != IfConv.Tail)
+ ExtraBlocks.push_back(IfConv.TBB);
+ unsigned ResLength = FBBTrace.getResourceLength(ExtraBlocks);
+ DEBUG(dbgs() << "Resource length " << ResLength
+ << ", minimal critical path " << MinCrit << '\n');
+ if (ResLength > MinCrit + CritLimit) {
+ DEBUG(dbgs() << "Not enough available ILP.\n");
return false;
}
+
+ // Assume that the depth of the first head terminator will also be the depth
+ // of the select instruction inserted, as determined by the flag dependency.
+ // TBB / FBB data dependencies may delay the select even more.
+ MachineTraceMetrics::Trace HeadTrace = MinInstr->getTrace(IfConv.Head);
+ unsigned BranchDepth =
+ HeadTrace.getInstrCycles(IfConv.Head->getFirstTerminator()).Depth;
+ DEBUG(dbgs() << "Branch depth: " << BranchDepth << '\n');
+
+ // Look at all the tail phis, and compute the critical path extension caused
+ // by inserting select instructions.
+ MachineTraceMetrics::Trace TailTrace = MinInstr->getTrace(IfConv.Tail);
+ for (unsigned i = 0, e = IfConv.PHIs.size(); i != e; ++i) {
+ SSAIfConv::PHIInfo &PI = IfConv.PHIs[i];
+ unsigned Slack = TailTrace.getInstrSlack(PI.PHI);
+ unsigned MaxDepth = Slack + TailTrace.getInstrCycles(PI.PHI).Depth;
+ DEBUG(dbgs() << "Slack " << Slack << ":\t" << *PI.PHI);
+
+ // The condition is pulled into the critical path.
+ unsigned CondDepth = adjCycles(BranchDepth, PI.CondCycles);
+ if (CondDepth > MaxDepth) {
+ unsigned Extra = CondDepth - MaxDepth;
+ DEBUG(dbgs() << "Condition adds " << Extra << " cycles.\n");
+ if (Extra > CritLimit) {
+ DEBUG(dbgs() << "Exceeds limit of " << CritLimit << '\n');
+ return false;
+ }
+ }
+
+ // The TBB value is pulled into the critical path.
+ unsigned TDepth = adjCycles(TBBTrace.getPHIDepth(PI.PHI), PI.TCycles);
+ if (TDepth > MaxDepth) {
+ unsigned Extra = TDepth - MaxDepth;
+ DEBUG(dbgs() << "TBB data adds " << Extra << " cycles.\n");
+ if (Extra > CritLimit) {
+ DEBUG(dbgs() << "Exceeds limit of " << CritLimit << '\n');
+ return false;
+ }
+ }
+
+ // The FBB value is pulled into the critical path.
+ unsigned FDepth = adjCycles(FBBTrace.getPHIDepth(PI.PHI), PI.FCycles);
+ if (FDepth > MaxDepth) {
+ unsigned Extra = FDepth - MaxDepth;
+ DEBUG(dbgs() << "FBB data adds " << Extra << " cycles.\n");
+ if (Extra > CritLimit) {
+ DEBUG(dbgs() << "Exceeds limit of " << CritLimit << '\n');
+ return false;
+ }
+ }
+ }
return true;
}
@@ -644,11 +774,11 @@
bool EarlyIfConverter::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << "********** EARLY IF-CONVERSION **********\n"
- << "********** Function: "
- << ((Value*)MF.getFunction())->getName() << '\n');
+ << "********** Function: " << MF.getName() << '\n');
TII = MF.getTarget().getInstrInfo();
TRI = MF.getTarget().getRegisterInfo();
- SchedModel = MF.getTarget().getInstrItineraryData()->SchedModel;
+ SchedModel =
+ MF.getTarget().getSubtarget<TargetSubtargetInfo>().getSchedModel();
MRI = &MF.getRegInfo();
DomTree = &getAnalysis<MachineDominatorTree>();
Loops = getAnalysisIfAvailable<MachineLoopInfo>();
@@ -667,6 +797,5 @@
if (tryConvertIf(I->getBlock()))
Changed = true;
- MF.verify(this, "After early if-conversion");
return Changed;
}
Modified: llvm/branches/AMDILBackend/lib/CodeGen/ExecutionDepsFix.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/ExecutionDepsFix.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/ExecutionDepsFix.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/ExecutionDepsFix.cpp Tue Jan 15 11:16:16 2013
@@ -626,9 +626,12 @@
}
dv->Instrs.push_back(mi);
- // Finally set all defs and non-collapsed uses to dv.
- for (unsigned i = 0, e = mi->getDesc().getNumOperands(); i != e; ++i) {
- MachineOperand &mo = mi->getOperand(i);
+ // Finally set all defs and non-collapsed uses to dv. We must iterate through
+ // all the operators, including imp-def ones.
+ for (MachineInstr::mop_iterator ii = mi->operands_begin(),
+ ee = mi->operands_end();
+ ii != ee; ++ii) {
+ MachineOperand &mo = *ii;
if (!mo.isReg()) continue;
int rx = regIndex(mo.getReg());
if (rx < 0) continue;
@@ -654,7 +657,7 @@
bool anyregs = false;
for (TargetRegisterClass::const_iterator I = RC->begin(), E = RC->end();
I != E; ++I)
- if (MF->getRegInfo().isPhysRegOrOverlapUsed(*I)) {
+ if (MF->getRegInfo().isPhysRegUsed(*I)) {
anyregs = true;
break;
}
Modified: llvm/branches/AMDILBackend/lib/CodeGen/ExpandPostRAPseudos.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/ExpandPostRAPseudos.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/ExpandPostRAPseudos.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/ExpandPostRAPseudos.cpp Tue Jan 15 11:16:16 2013
@@ -14,7 +14,6 @@
#define DEBUG_TYPE "postrapseudos"
#include "llvm/CodeGen/Passes.h"
-#include "llvm/Function.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
@@ -190,8 +189,7 @@
bool ExpandPostRA::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << "Machine Function\n"
<< "********** EXPANDING POST-RA PSEUDO INSTRS **********\n"
- << "********** Function: "
- << MF.getFunction()->getName() << '\n');
+ << "********** Function: " << MF.getName() << '\n');
TRI = MF.getTarget().getRegisterInfo();
TII = MF.getTarget().getInstrInfo();
Modified: llvm/branches/AMDILBackend/lib/CodeGen/GCStrategy.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/GCStrategy.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/GCStrategy.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/GCStrategy.cpp Tue Jan 15 11:16:16 2013
@@ -20,6 +20,7 @@
#include "llvm/IntrinsicInst.h"
#include "llvm/Module.h"
#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/DominatorInternals.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
@@ -387,9 +388,16 @@
const TargetFrameLowering *TFI = TM->getFrameLowering();
assert(TFI && "TargetRegisterInfo not available!");
- for (GCFunctionInfo::roots_iterator RI = FI->roots_begin(),
- RE = FI->roots_end(); RI != RE; ++RI)
- RI->StackOffset = TFI->getFrameIndexOffset(MF, RI->Num);
+ for (GCFunctionInfo::roots_iterator RI = FI->roots_begin();
+ RI != FI->roots_end();) {
+ // If the root references a dead object, no need to keep it.
+ if (MF.getFrameInfo()->isDeadObjectIndex(RI->Num)) {
+ RI = FI->removeStackRoot(RI);
+ } else {
+ RI->StackOffset = TFI->getFrameIndexOffset(MF, RI->Num);
+ ++RI;
+ }
+ }
}
bool GCMachineCodeAnalysis::runOnMachineFunction(MachineFunction &MF) {
Modified: llvm/branches/AMDILBackend/lib/CodeGen/IfConversion.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/IfConversion.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/IfConversion.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/IfConversion.cpp Tue Jan 15 11:16:16 2013
@@ -13,7 +13,6 @@
#define DEBUG_TYPE "ifcvt"
#include "BranchFolding.h"
-#include "llvm/Function.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
@@ -282,7 +281,7 @@
}
DEBUG(dbgs() << "\nIfcvt: function (" << ++FnNum << ") \'"
- << MF.getFunction()->getName() << "\'");
+ << MF.getName() << "\'");
if (FnNum < IfCvtFnStart || (IfCvtFnStop != -1 && FnNum > IfCvtFnStop)) {
DEBUG(dbgs() << " skipped\n");
@@ -997,14 +996,13 @@
}
for (unsigned i = 0, e = Defs.size(); i != e; ++i) {
unsigned Reg = Defs[i];
- if (Redefs.count(Reg)) {
+ if (!Redefs.insert(Reg)) {
if (AddImpUse)
// Treat predicated update as read + write.
MI->addOperand(MachineOperand::CreateReg(Reg, false/*IsDef*/,
true/*IsImp*/,false/*IsKill*/,
false/*IsDead*/,true/*IsUndef*/));
} else {
- Redefs.insert(Reg);
for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs)
Redefs.insert(*SubRegs);
}
Modified: llvm/branches/AMDILBackend/lib/CodeGen/InlineSpiller.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/InlineSpiller.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/InlineSpiller.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/InlineSpiller.cpp Tue Jan 15 11:16:16 2013
@@ -613,7 +613,7 @@
propagateSiblingValue(SVI);
} while (!WorkList.empty());
- // Look up the value we were looking for. We already did this lokup at the
+ // Look up the value we were looking for. We already did this lookup at the
// top of the function, but SibValues may have been invalidated.
SVI = SibValues.find(UseVNI);
assert(SVI != SibValues.end() && "Didn't compute requested info");
@@ -863,7 +863,7 @@
// If the instruction also writes VirtReg.reg, it had better not require the
// same register for uses and defs.
SmallVector<std::pair<MachineInstr*, unsigned>, 8> Ops;
- MIBundleOperands::RegInfo RI =
+ MIBundleOperands::VirtRegInfo RI =
MIBundleOperands(MI).analyzeVirtReg(VirtReg.reg, &Ops);
if (RI.Tied) {
markValueUsed(&VirtReg, ParentVNI);
@@ -1142,7 +1142,7 @@
// Analyze instruction.
SmallVector<std::pair<MachineInstr*, unsigned>, 8> Ops;
- MIBundleOperands::RegInfo RI =
+ MIBundleOperands::VirtRegInfo RI =
MIBundleOperands(MI).analyzeVirtReg(Reg, &Ops);
// Find the slot index where this instruction reads and writes OldLI.
Modified: llvm/branches/AMDILBackend/lib/CodeGen/IntrinsicLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/IntrinsicLowering.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/IntrinsicLowering.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/IntrinsicLowering.cpp Tue Jan 15 11:16:16 2013
@@ -21,7 +21,7 @@
#include "llvm/Support/CallSite.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
using namespace llvm;
template <class ArgIt>
@@ -457,7 +457,7 @@
break; // Strip out annotate intrinsic
case Intrinsic::memcpy: {
- IntegerType *IntPtr = TD.getIntPtrType(Context);
+ Type *IntPtr = TD.getIntPtrType(Context);
Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
/* isSigned */ false);
Value *Ops[3];
@@ -468,7 +468,7 @@
break;
}
case Intrinsic::memmove: {
- IntegerType *IntPtr = TD.getIntPtrType(Context);
+ Type *IntPtr = TD.getIntPtrType(Context);
Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
/* isSigned */ false);
Value *Ops[3];
@@ -479,7 +479,7 @@
break;
}
case Intrinsic::memset: {
- IntegerType *IntPtr = TD.getIntPtrType(Context);
+ Type *IntPtr = TD.getIntPtrType(Context);
Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
/* isSigned */ false);
Value *Ops[3];
Modified: llvm/branches/AMDILBackend/lib/CodeGen/LLVMTargetMachine.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/LLVMTargetMachine.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/LLVMTargetMachine.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/LLVMTargetMachine.cpp Tue Jan 15 11:16:16 2013
@@ -172,7 +172,7 @@
const MCSubtargetInfo &STI = getSubtarget<MCSubtargetInfo>();
MCE = getTarget().createMCCodeEmitter(*getInstrInfo(), MRI, STI,
*Context);
- MAB = getTarget().createMCAsmBackend(getTargetTriple());
+ MAB = getTarget().createMCAsmBackend(getTargetTriple(), TargetCPU);
}
MCStreamer *S = getTarget().createAsmStreamer(*Context, Out,
@@ -191,7 +191,7 @@
// emission fails.
MCCodeEmitter *MCE = getTarget().createMCCodeEmitter(*getInstrInfo(), MRI,
STI, *Context);
- MCAsmBackend *MAB = getTarget().createMCAsmBackend(getTargetTriple());
+ MCAsmBackend *MAB = getTarget().createMCAsmBackend(getTargetTriple(), TargetCPU);
if (MCE == 0 || MAB == 0)
return true;
@@ -266,7 +266,7 @@
const MCSubtargetInfo &STI = getSubtarget<MCSubtargetInfo>();
MCCodeEmitter *MCE = getTarget().createMCCodeEmitter(*getInstrInfo(), MRI,
STI, *Ctx);
- MCAsmBackend *MAB = getTarget().createMCAsmBackend(getTargetTriple());
+ MCAsmBackend *MAB = getTarget().createMCAsmBackend(getTargetTriple(), TargetCPU);
if (MCE == 0 || MAB == 0)
return true;
Modified: llvm/branches/AMDILBackend/lib/CodeGen/LiveDebugVariables.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/LiveDebugVariables.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/LiveDebugVariables.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/LiveDebugVariables.cpp Tue Jan 15 11:16:16 2013
@@ -687,8 +687,7 @@
clear();
LS.initialize(mf);
DEBUG(dbgs() << "********** COMPUTING LIVE DEBUG VARIABLES: "
- << ((Value*)mf.getFunction())->getName()
- << " **********\n");
+ << mf.getName() << " **********\n");
bool Changed = collectDebugValues(mf);
computeIntervals();
Modified: llvm/branches/AMDILBackend/lib/CodeGen/LiveInterval.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/LiveInterval.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/LiveInterval.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/LiveInterval.cpp Tue Jan 15 11:16:16 2013
@@ -27,6 +27,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "RegisterCoalescer.h"
#include <algorithm>
using namespace llvm;
@@ -58,8 +59,16 @@
return VNI;
}
if (SlotIndex::isSameInstr(Def, I->start)) {
- assert(I->start == Def && "Cannot insert def, already live");
- assert(I->valno->def == Def && "Inconsistent existing value def");
+ assert(I->valno->def == I->start && "Inconsistent existing value def");
+
+ // It is possible to have both normal and early-clobber defs of the same
+ // register on an instruction. It doesn't make a lot of sense, but it is
+ // possible to specify in inline assembly.
+ //
+ // Just convert everything to early-clobber.
+ Def = std::min(Def, I->start);
+ if (Def != I->start)
+ I->start = I->valno->def = Def;
return I->valno;
}
assert(SlotIndex::isEarlierInstr(Def, I->start) && "Already live at def");
@@ -68,21 +77,6 @@
return VNI;
}
-/// killedInRange - Return true if the interval has kills in [Start,End).
-bool LiveInterval::killedInRange(SlotIndex Start, SlotIndex End) const {
- Ranges::const_iterator r =
- std::lower_bound(ranges.begin(), ranges.end(), End);
-
- // Now r points to the first interval with start >= End, or ranges.end().
- if (r == ranges.begin())
- return false;
-
- --r;
- // Now r points to the last interval with end <= End.
- // r->end is the kill point.
- return r->end >= Start && r->end < End;
-}
-
// overlaps - Return true if the intersection of the two live intervals is
// not empty.
//
@@ -142,6 +136,48 @@
return false;
}
+bool LiveInterval::overlaps(const LiveInterval &Other,
+ const CoalescerPair &CP,
+ const SlotIndexes &Indexes) const {
+ assert(!empty() && "empty interval");
+ if (Other.empty())
+ return false;
+
+ // Use binary searches to find initial positions.
+ const_iterator I = find(Other.beginIndex());
+ const_iterator IE = end();
+ if (I == IE)
+ return false;
+ const_iterator J = Other.find(I->start);
+ const_iterator JE = Other.end();
+ if (J == JE)
+ return false;
+
+ for (;;) {
+ // J has just been advanced to satisfy:
+ assert(J->end >= I->start);
+ // Check for an overlap.
+ if (J->start < I->end) {
+ // I and J are overlapping. Find the later start.
+ SlotIndex Def = std::max(I->start, J->start);
+ // Allow the overlap if Def is a coalescable copy.
+ if (Def.isBlock() ||
+ !CP.isCoalescable(Indexes.getInstructionFromIndex(Def)))
+ return true;
+ }
+ // Advance the iterator that ends first to check for more overlaps.
+ if (J->end > I->end) {
+ std::swap(I, J);
+ std::swap(IE, JE);
+ }
+ // Advance J until J->end >= I->start.
+ do
+ if (++J == JE)
+ return false;
+ while (J->end < I->start);
+ }
+}
+
/// overlaps - Return true if the live interval overlaps a range specified
/// by [Start, End).
bool LiveInterval::overlaps(SlotIndex Start, SlotIndex End) const {
@@ -399,7 +435,7 @@
// If we have to apply a mapping to our base interval assignment, rewrite it
// now.
- if (MustMapCurValNos) {
+ if (MustMapCurValNos && !empty()) {
// Map the first live range.
iterator OutIt = begin();
@@ -673,27 +709,6 @@
return V2;
}
-void LiveInterval::Copy(const LiveInterval &RHS,
- MachineRegisterInfo *MRI,
- VNInfo::Allocator &VNInfoAllocator) {
- ranges.clear();
- valnos.clear();
- std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(RHS.reg);
- MRI->setRegAllocationHint(reg, Hint.first, Hint.second);
-
- weight = RHS.weight;
- for (unsigned i = 0, e = RHS.getNumValNums(); i != e; ++i) {
- const VNInfo *VNI = RHS.getValNumInfo(i);
- createValueCopy(VNI, VNInfoAllocator);
- }
- for (unsigned i = 0, e = RHS.ranges.size(); i != e; ++i) {
- const LiveRange &LR = RHS.ranges[i];
- addRange(LiveRange(LR.start, LR.end, getValNumInfo(LR.valno->id)));
- }
-
- verify();
-}
-
unsigned LiveInterval::getSize() const {
unsigned Sum = 0;
for (const_iterator I = begin(), E = end(); I != E; ++I)
@@ -705,9 +720,11 @@
return os << '[' << LR.start << ',' << LR.end << ':' << LR.valno->id << ")";
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void LiveRange::dump() const {
dbgs() << *this << "\n";
}
+#endif
void LiveInterval::print(raw_ostream &OS) const {
if (empty())
@@ -740,9 +757,11 @@
}
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void LiveInterval::dump() const {
dbgs() << *this << "\n";
}
+#endif
#ifndef NDEBUG
void LiveInterval::verify() const {
Modified: llvm/branches/AMDILBackend/lib/CodeGen/LiveIntervalAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/LiveIntervalAnalysis.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/LiveIntervalAnalysis.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/LiveIntervalAnalysis.cpp Tue Jan 15 11:16:16 2013
@@ -34,6 +34,7 @@
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
#include "LiveRangeCalc.h"
+#include "VirtRegMap.h"
#include <algorithm>
#include <limits>
#include <cmath>
@@ -109,8 +110,6 @@
DomTree = &getAnalysis<MachineDominatorTree>();
if (!LRCalc)
LRCalc = new LiveRangeCalc();
- AllocatableRegs = TRI->getAllocatableSet(fn);
- ReservedRegs = TRI->getReservedRegs(fn);
// Allocate space for all virtual registers.
VirtRegIntervals.resize(MRI->getNumVirtRegs());
@@ -147,6 +146,11 @@
OS << PrintReg(Reg) << " = " << getInterval(Reg) << '\n';
}
+ OS << "RegMasks:";
+ for (unsigned i = 0, e = RegMaskSlots.size(); i != e; ++i)
+ OS << ' ' << RegMaskSlots[i];
+ OS << '\n';
+
printInstrs(OS);
}
@@ -155,9 +159,11 @@
MF->print(OS, Indexes);
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void LiveIntervals::dumpInstrs() const {
printInstrs(dbgs());
}
+#endif
static
bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
@@ -382,8 +388,7 @@
/// which a variable is live
void LiveIntervals::computeIntervals() {
DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
- << "********** Function: "
- << ((Value*)MF->getFunction())->getName() << '\n');
+ << "********** Function: " << MF->getName() << '\n');
RegMaskBlocks.resize(MF->getNumBlockIDs());
@@ -440,7 +445,7 @@
// Compute the number of register mask instructions in this block.
std::pair<unsigned, unsigned> &RMB = RegMaskBlocks[MBB->getNumber()];
- RMB.second = RegMaskSlots.size() - RMB.first;;
+ RMB.second = RegMaskSlots.size() - RMB.first;
}
// Create empty intervals for registers defined by implicit_def's (except
@@ -497,7 +502,7 @@
RegMaskBits.push_back(MO->getRegMask());
}
// Compute the number of register mask instructions in this block.
- RMB.second = RegMaskSlots.size() - RMB.first;;
+ RMB.second = RegMaskSlots.size() - RMB.first;
}
}
@@ -540,11 +545,11 @@
// Ignore uses of reserved registers. We only track defs of those.
for (MCRegUnitRootIterator Roots(Unit, TRI); Roots.isValid(); ++Roots) {
unsigned Root = *Roots;
- if (!isReserved(Root) && !MRI->reg_empty(Root))
+ if (!MRI->isReserved(Root) && !MRI->reg_empty(Root))
LRCalc->extendToUses(LI, Root);
for (MCSuperRegIterator Supers(Root, TRI); Supers.isValid(); ++Supers) {
unsigned Reg = *Supers;
- if (!isReserved(Reg) && !MRI->reg_empty(Reg))
+ if (!MRI->isReserved(Reg) && !MRI->reg_empty(Reg))
LRCalc->extendToUses(LI, Reg);
}
}
@@ -729,17 +734,100 @@
return CanSeparate;
}
+void LiveIntervals::extendToIndices(LiveInterval *LI,
+ ArrayRef<SlotIndex> Indices) {
+ assert(LRCalc && "LRCalc not initialized.");
+ LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
+ for (unsigned i = 0, e = Indices.size(); i != e; ++i)
+ LRCalc->extend(LI, Indices[i]);
+}
+
+void LiveIntervals::pruneValue(LiveInterval *LI, SlotIndex Kill,
+ SmallVectorImpl<SlotIndex> *EndPoints) {
+ LiveRangeQuery LRQ(*LI, Kill);
+ VNInfo *VNI = LRQ.valueOut();
+ if (!VNI)
+ return;
+
+ MachineBasicBlock *KillMBB = Indexes->getMBBFromIndex(Kill);
+ SlotIndex MBBStart, MBBEnd;
+ tie(MBBStart, MBBEnd) = Indexes->getMBBRange(KillMBB);
+
+ // If VNI isn't live out from KillMBB, the value is trivially pruned.
+ if (LRQ.endPoint() < MBBEnd) {
+ LI->removeRange(Kill, LRQ.endPoint());
+ if (EndPoints) EndPoints->push_back(LRQ.endPoint());
+ return;
+ }
+
+ // VNI is live out of KillMBB.
+ LI->removeRange(Kill, MBBEnd);
+ if (EndPoints) EndPoints->push_back(MBBEnd);
+
+ // Find all blocks that are reachable from KillMBB without leaving VNI's live
+ // range. It is possible that KillMBB itself is reachable, so start a DFS
+ // from each successor.
+ typedef SmallPtrSet<MachineBasicBlock*, 9> VisitedTy;
+ VisitedTy Visited;
+ for (MachineBasicBlock::succ_iterator
+ SuccI = KillMBB->succ_begin(), SuccE = KillMBB->succ_end();
+ SuccI != SuccE; ++SuccI) {
+ for (df_ext_iterator<MachineBasicBlock*, VisitedTy>
+ I = df_ext_begin(*SuccI, Visited), E = df_ext_end(*SuccI, Visited);
+ I != E;) {
+ MachineBasicBlock *MBB = *I;
+
+ // Check if VNI is live in to MBB.
+ tie(MBBStart, MBBEnd) = Indexes->getMBBRange(MBB);
+ LiveRangeQuery LRQ(*LI, MBBStart);
+ if (LRQ.valueIn() != VNI) {
+ // This block isn't part of the VNI live range. Prune the search.
+ I.skipChildren();
+ continue;
+ }
+
+ // Prune the search if VNI is killed in MBB.
+ if (LRQ.endPoint() < MBBEnd) {
+ LI->removeRange(MBBStart, LRQ.endPoint());
+ if (EndPoints) EndPoints->push_back(LRQ.endPoint());
+ I.skipChildren();
+ continue;
+ }
+
+ // VNI is live through MBB.
+ LI->removeRange(MBBStart, MBBEnd);
+ if (EndPoints) EndPoints->push_back(MBBEnd);
+ ++I;
+ }
+ }
+}
//===----------------------------------------------------------------------===//
// Register allocator hooks.
//
-void LiveIntervals::addKillFlags() {
+void LiveIntervals::addKillFlags(const VirtRegMap *VRM) {
+ // Keep track of regunit ranges.
+ SmallVector<std::pair<LiveInterval*, LiveInterval::iterator>, 8> RU;
+
for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
if (MRI->reg_nodbg_empty(Reg))
continue;
LiveInterval *LI = &getInterval(Reg);
+ if (LI->empty())
+ continue;
+
+ // Find the regunit intervals for the assigned register. They may overlap
+ // the virtual register live range, cancelling any kills.
+ RU.clear();
+ for (MCRegUnitIterator Units(VRM->getPhys(Reg), TRI); Units.isValid();
+ ++Units) {
+ LiveInterval *RUInt = &getRegUnit(*Units);
+ if (RUInt->empty())
+ continue;
+ RU.push_back(std::make_pair(RUInt, RUInt->find(LI->begin()->end)));
+ }
// Every instruction that kills Reg corresponds to a live range end point.
for (LiveInterval::iterator RI = LI->begin(), RE = LI->end(); RI != RE;
@@ -750,7 +838,32 @@
MachineInstr *MI = getInstructionFromIndex(RI->end);
if (!MI)
continue;
- MI->addRegisterKilled(Reg, NULL);
+
+ // Check if any of the reguints are live beyond the end of RI. That could
+ // happen when a physreg is defined as a copy of a virtreg:
+ //
+ // %EAX = COPY %vreg5
+ // FOO %vreg5 <--- MI, cancel kill because %EAX is live.
+ // BAR %EAX<kill>
+ //
+ // There should be no kill flag on FOO when %vreg5 is rewritten as %EAX.
+ bool CancelKill = false;
+ for (unsigned u = 0, e = RU.size(); u != e; ++u) {
+ LiveInterval *RInt = RU[u].first;
+ LiveInterval::iterator &I = RU[u].second;
+ if (I == RInt->end())
+ continue;
+ I = RInt->advanceTo(I, RI->end);
+ if (I == RInt->end() || I->start >= RI->end)
+ continue;
+ // I is overlapping RI.
+ CancelKill = true;
+ break;
+ }
+ if (CancelKill)
+ MI->clearRegisterKills(Reg, NULL);
+ else
+ MI->addRegisterKilled(Reg, NULL);
}
}
}
@@ -900,497 +1013,321 @@
LiveIntervals& LIS;
const MachineRegisterInfo& MRI;
const TargetRegisterInfo& TRI;
+ SlotIndex OldIdx;
SlotIndex NewIdx;
-
- typedef std::pair<LiveInterval*, LiveRange*> IntRangePair;
- typedef DenseSet<IntRangePair> RangeSet;
-
- struct RegRanges {
- LiveRange* Use;
- LiveRange* EC;
- LiveRange* Dead;
- LiveRange* Def;
- RegRanges() : Use(0), EC(0), Dead(0), Def(0) {}
- };
- typedef DenseMap<unsigned, RegRanges> BundleRanges;
+ SmallPtrSet<LiveInterval*, 8> Updated;
+ bool UpdateFlags;
public:
HMEditor(LiveIntervals& LIS, const MachineRegisterInfo& MRI,
- const TargetRegisterInfo& TRI, SlotIndex NewIdx)
- : LIS(LIS), MRI(MRI), TRI(TRI), NewIdx(NewIdx) {}
-
- // Update intervals for all operands of MI from OldIdx to NewIdx.
- // This assumes that MI used to be at OldIdx, and now resides at
- // NewIdx.
- void moveAllRangesFrom(MachineInstr* MI, SlotIndex OldIdx) {
- assert(NewIdx != OldIdx && "No-op move? That's a bit strange.");
-
- // Collect the operands.
- RangeSet Entering, Internal, Exiting;
- bool hasRegMaskOp = false;
- collectRanges(MI, Entering, Internal, Exiting, hasRegMaskOp, OldIdx);
-
- // To keep the LiveRanges valid within an interval, move the ranges closest
- // to the destination first. This prevents ranges from overlapping, to that
- // APIs like removeRange still work.
- if (NewIdx < OldIdx) {
- moveAllEnteringFrom(OldIdx, Entering);
- moveAllInternalFrom(OldIdx, Internal);
- moveAllExitingFrom(OldIdx, Exiting);
- }
- else {
- moveAllExitingFrom(OldIdx, Exiting);
- moveAllInternalFrom(OldIdx, Internal);
- moveAllEnteringFrom(OldIdx, Entering);
- }
-
- if (hasRegMaskOp)
- updateRegMaskSlots(OldIdx);
-
-#ifndef NDEBUG
- LIValidator validator;
- validator = std::for_each(Entering.begin(), Entering.end(), validator);
- validator = std::for_each(Internal.begin(), Internal.end(), validator);
- validator = std::for_each(Exiting.begin(), Exiting.end(), validator);
- assert(validator.rangesOk() && "moveAllOperandsFrom broke liveness.");
-#endif
-
- }
-
- // Update intervals for all operands of MI to refer to BundleStart's
- // SlotIndex.
- void moveAllRangesInto(MachineInstr* MI, MachineInstr* BundleStart) {
- if (MI == BundleStart)
- return; // Bundling instr with itself - nothing to do.
-
- SlotIndex OldIdx = LIS.getSlotIndexes()->getInstructionIndex(MI);
- assert(LIS.getSlotIndexes()->getInstructionFromIndex(OldIdx) == MI &&
- "SlotIndex <-> Instruction mapping broken for MI");
-
- // Collect all ranges already in the bundle.
- MachineBasicBlock::instr_iterator BII(BundleStart);
- RangeSet Entering, Internal, Exiting;
- bool hasRegMaskOp = false;
- collectRanges(BII, Entering, Internal, Exiting, hasRegMaskOp, NewIdx);
- assert(!hasRegMaskOp && "Can't have RegMask operand in bundle.");
- for (++BII; &*BII == MI || BII->isInsideBundle(); ++BII) {
- if (&*BII == MI)
+ const TargetRegisterInfo& TRI,
+ SlotIndex OldIdx, SlotIndex NewIdx, bool UpdateFlags)
+ : LIS(LIS), MRI(MRI), TRI(TRI), OldIdx(OldIdx), NewIdx(NewIdx),
+ UpdateFlags(UpdateFlags) {}
+
+ // FIXME: UpdateFlags is a workaround that creates live intervals for all
+ // physregs, even those that aren't needed for regalloc, in order to update
+ // kill flags. This is wasteful. Eventually, LiveVariables will strip all kill
+ // flags, and postRA passes will use a live register utility instead.
+ LiveInterval *getRegUnitLI(unsigned Unit) {
+ if (UpdateFlags)
+ return &LIS.getRegUnit(Unit);
+ return LIS.getCachedRegUnit(Unit);
+ }
+
+ /// Update all live ranges touched by MI, assuming a move from OldIdx to
+ /// NewIdx.
+ void updateAllRanges(MachineInstr *MI) {
+ DEBUG(dbgs() << "handleMove " << OldIdx << " -> " << NewIdx << ": " << *MI);
+ bool hasRegMask = false;
+ for (MIOperands MO(MI); MO.isValid(); ++MO) {
+ if (MO->isRegMask())
+ hasRegMask = true;
+ if (!MO->isReg())
continue;
- collectRanges(BII, Entering, Internal, Exiting, hasRegMaskOp, NewIdx);
- assert(!hasRegMaskOp && "Can't have RegMask operand in bundle.");
- }
-
- BundleRanges BR = createBundleRanges(Entering, Internal, Exiting);
-
- Entering.clear();
- Internal.clear();
- Exiting.clear();
- collectRanges(MI, Entering, Internal, Exiting, hasRegMaskOp, OldIdx);
- assert(!hasRegMaskOp && "Can't have RegMask operand in bundle.");
-
- DEBUG(dbgs() << "Entering: " << Entering.size() << "\n");
- DEBUG(dbgs() << "Internal: " << Internal.size() << "\n");
- DEBUG(dbgs() << "Exiting: " << Exiting.size() << "\n");
-
- moveAllEnteringFromInto(OldIdx, Entering, BR);
- moveAllInternalFromInto(OldIdx, Internal, BR);
- moveAllExitingFromInto(OldIdx, Exiting, BR);
+ // Aggressively clear all kill flags.
+ // They are reinserted by VirtRegRewriter.
+ if (MO->isUse())
+ MO->setIsKill(false);
+ unsigned Reg = MO->getReg();
+ if (!Reg)
+ continue;
+ if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ updateRange(LIS.getInterval(Reg));
+ continue;
+ }
-#ifndef NDEBUG
- LIValidator validator;
- validator = std::for_each(Entering.begin(), Entering.end(), validator);
- validator = std::for_each(Internal.begin(), Internal.end(), validator);
- validator = std::for_each(Exiting.begin(), Exiting.end(), validator);
- assert(validator.rangesOk() && "moveAllOperandsInto broke liveness.");
-#endif
+ // For physregs, only update the regunits that actually have a
+ // precomputed live range.
+ for (MCRegUnitIterator Units(Reg, &TRI); Units.isValid(); ++Units)
+ if (LiveInterval *LI = getRegUnitLI(*Units))
+ updateRange(*LI);
+ }
+ if (hasRegMask)
+ updateRegMaskSlots();
}
private:
+ /// Update a single live range, assuming an instruction has been moved from
+ /// OldIdx to NewIdx.
+ void updateRange(LiveInterval &LI) {
+ if (!Updated.insert(&LI))
+ return;
+ DEBUG({
+ dbgs() << " ";
+ if (TargetRegisterInfo::isVirtualRegister(LI.reg))
+ dbgs() << PrintReg(LI.reg);
+ else
+ dbgs() << PrintRegUnit(LI.reg, &TRI);
+ dbgs() << ":\t" << LI << '\n';
+ });
+ if (SlotIndex::isEarlierInstr(OldIdx, NewIdx))
+ handleMoveDown(LI);
+ else
+ handleMoveUp(LI);
+ DEBUG(dbgs() << " -->\t" << LI << '\n');
+ LI.verify();
+ }
+
+ /// Update LI to reflect an instruction has been moved downwards from OldIdx
+ /// to NewIdx.
+ ///
+ /// 1. Live def at OldIdx:
+ /// Move def to NewIdx, assert endpoint after NewIdx.
+ ///
+ /// 2. Live def at OldIdx, killed at NewIdx:
+ /// Change to dead def at NewIdx.
+ /// (Happens when bundling def+kill together).
+ ///
+ /// 3. Dead def at OldIdx:
+ /// Move def to NewIdx, possibly across another live value.
+ ///
+ /// 4. Def at OldIdx AND at NewIdx:
+ /// Remove live range [OldIdx;NewIdx) and value defined at OldIdx.
+ /// (Happens when bundling multiple defs together).
+ ///
+ /// 5. Value read at OldIdx, killed before NewIdx:
+ /// Extend kill to NewIdx.
+ ///
+ void handleMoveDown(LiveInterval &LI) {
+ // First look for a kill at OldIdx.
+ LiveInterval::iterator I = LI.find(OldIdx.getBaseIndex());
+ LiveInterval::iterator E = LI.end();
+ // Is LI even live at OldIdx?
+ if (I == E || SlotIndex::isEarlierInstr(OldIdx, I->start))
+ return;
-#ifndef NDEBUG
- class LIValidator {
- private:
- DenseSet<const LiveInterval*> Checked, Bogus;
- public:
- void operator()(const IntRangePair& P) {
- const LiveInterval* LI = P.first;
- if (Checked.count(LI))
+ // Handle a live-in value.
+ if (!SlotIndex::isSameInstr(I->start, OldIdx)) {
+ bool isKill = SlotIndex::isSameInstr(OldIdx, I->end);
+ // If the live-in value already extends to NewIdx, there is nothing to do.
+ if (!SlotIndex::isEarlierInstr(I->end, NewIdx))
return;
- Checked.insert(LI);
- if (LI->empty())
+ // Aggressively remove all kill flags from the old kill point.
+ // Kill flags shouldn't be used while live intervals exist, they will be
+ // reinserted by VirtRegRewriter.
+ if (MachineInstr *KillMI = LIS.getInstructionFromIndex(I->end))
+ for (MIBundleOperands MO(KillMI); MO.isValid(); ++MO)
+ if (MO->isReg() && MO->isUse())
+ MO->setIsKill(false);
+ // Adjust I->end to reach NewIdx. This may temporarily make LI invalid by
+ // overlapping ranges. Case 5 above.
+ I->end = NewIdx.getRegSlot(I->end.isEarlyClobber());
+ // If this was a kill, there may also be a def. Otherwise we're done.
+ if (!isKill)
return;
- SlotIndex LastEnd = LI->begin()->start;
- for (LiveInterval::const_iterator LRI = LI->begin(), LRE = LI->end();
- LRI != LRE; ++LRI) {
- const LiveRange& LR = *LRI;
- if (LastEnd > LR.start || LR.start >= LR.end)
- Bogus.insert(LI);
- LastEnd = LR.end;
- }
+ ++I;
}
- bool rangesOk() const {
- return Bogus.empty();
+ // Check for a def at OldIdx.
+ if (I == E || !SlotIndex::isSameInstr(OldIdx, I->start))
+ return;
+ // We have a def at OldIdx.
+ VNInfo *DefVNI = I->valno;
+ assert(DefVNI->def == I->start && "Inconsistent def");
+ DefVNI->def = NewIdx.getRegSlot(I->start.isEarlyClobber());
+ // If the defined value extends beyond NewIdx, just move the def down.
+ // This is case 1 above.
+ if (SlotIndex::isEarlierInstr(NewIdx, I->end)) {
+ I->start = DefVNI->def;
+ return;
}
- };
-#endif
-
- // Collect IntRangePairs for all operands of MI that may need fixing.
- // Treat's MI's index as OldIdx (regardless of what it is in SlotIndexes'
- // maps).
- void collectRanges(MachineInstr* MI, RangeSet& Entering, RangeSet& Internal,
- RangeSet& Exiting, bool& hasRegMaskOp, SlotIndex OldIdx) {
- hasRegMaskOp = false;
- for (MachineInstr::mop_iterator MOI = MI->operands_begin(),
- MOE = MI->operands_end();
- MOI != MOE; ++MOI) {
- const MachineOperand& MO = *MOI;
-
- if (MO.isRegMask()) {
- hasRegMaskOp = true;
- continue;
- }
-
- if (!MO.isReg() || MO.getReg() == 0)
- continue;
-
- unsigned Reg = MO.getReg();
-
- // TODO: Currently we're skipping uses that are reserved or have no
- // interval, but we're not updating their kills. This should be
- // fixed.
- if (TargetRegisterInfo::isPhysicalRegister(Reg) && LIS.isReserved(Reg))
- continue;
-
- // Collect ranges for register units. These live ranges are computed on
- // demand, so just skip any that haven't been computed yet.
- if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
- for (MCRegUnitIterator Units(Reg, &TRI); Units.isValid(); ++Units)
- if (LiveInterval *LI = LIS.getCachedRegUnit(*Units))
- collectRanges(MO, LI, Entering, Internal, Exiting, OldIdx);
- } else {
- // Collect ranges for individual virtual registers.
- collectRanges(MO, &LIS.getInterval(Reg),
- Entering, Internal, Exiting, OldIdx);
- }
+ // The remaining possibilities are now:
+ // 2. Live def at OldIdx, killed at NewIdx: isSameInstr(I->end, NewIdx).
+ // 3. Dead def at OldIdx: I->end = OldIdx.getDeadSlot().
+ // In either case, it is possible that there is an existing def at NewIdx.
+ assert((I->end == OldIdx.getDeadSlot() ||
+ SlotIndex::isSameInstr(I->end, NewIdx)) &&
+ "Cannot move def below kill");
+ LiveInterval::iterator NewI = LI.advanceTo(I, NewIdx.getRegSlot());
+ if (NewI != E && SlotIndex::isSameInstr(NewI->start, NewIdx)) {
+ // There is an existing def at NewIdx, case 4 above. The def at OldIdx is
+ // coalesced into that value.
+ assert(NewI->valno != DefVNI && "Multiple defs of value?");
+ LI.removeValNo(DefVNI);
+ return;
}
- }
+ // There was no existing def at NewIdx. Turn *I into a dead def at NewIdx.
+ // If the def at OldIdx was dead, we allow it to be moved across other LI
+ // values. The new range should be placed immediately before NewI, move any
+ // intermediate ranges up.
+ assert(NewI != I && "Inconsistent iterators");
+ std::copy(llvm::next(I), NewI, I);
+ *llvm::prior(NewI) = LiveRange(DefVNI->def, NewIdx.getDeadSlot(), DefVNI);
+ }
+
+ /// Update LI to reflect an instruction has been moved upwards from OldIdx
+ /// to NewIdx.
+ ///
+ /// 1. Live def at OldIdx:
+ /// Hoist def to NewIdx.
+ ///
+ /// 2. Dead def at OldIdx:
+ /// Hoist def+end to NewIdx, possibly move across other values.
+ ///
+ /// 3. Dead def at OldIdx AND existing def at NewIdx:
+ /// Remove value defined at OldIdx, coalescing it with existing value.
+ ///
+ /// 4. Live def at OldIdx AND existing def at NewIdx:
+ /// Remove value defined at NewIdx, hoist OldIdx def to NewIdx.
+ /// (Happens when bundling multiple defs together).
+ ///
+ /// 5. Value killed at OldIdx:
+ /// Hoist kill to NewIdx, then scan for last kill between NewIdx and
+ /// OldIdx.
+ ///
+ void handleMoveUp(LiveInterval &LI) {
+ // First look for a kill at OldIdx.
+ LiveInterval::iterator I = LI.find(OldIdx.getBaseIndex());
+ LiveInterval::iterator E = LI.end();
+ // Is LI even live at OldIdx?
+ if (I == E || SlotIndex::isEarlierInstr(OldIdx, I->start))
+ return;
- void collectRanges(const MachineOperand &MO, LiveInterval *LI,
- RangeSet &Entering, RangeSet &Internal, RangeSet &Exiting,
- SlotIndex OldIdx) {
- if (MO.readsReg()) {
- LiveRange* LR = LI->getLiveRangeContaining(OldIdx);
- if (LR != 0)
- Entering.insert(std::make_pair(LI, LR));
- }
- if (MO.isDef()) {
- LiveRange* LR = LI->getLiveRangeContaining(OldIdx.getRegSlot());
- assert(LR != 0 && "No live range for def?");
- if (LR->end > OldIdx.getDeadSlot())
- Exiting.insert(std::make_pair(LI, LR));
- else
- Internal.insert(std::make_pair(LI, LR));
+ // Handle a live-in value.
+ if (!SlotIndex::isSameInstr(I->start, OldIdx)) {
+ // If the live-in value isn't killed here, there is nothing to do.
+ if (!SlotIndex::isSameInstr(OldIdx, I->end))
+ return;
+ // Adjust I->end to end at NewIdx. If we are hoisting a kill above
+ // another use, we need to search for that use. Case 5 above.
+ I->end = NewIdx.getRegSlot(I->end.isEarlyClobber());
+ ++I;
+ // If OldIdx also defines a value, there couldn't have been another use.
+ if (I == E || !SlotIndex::isSameInstr(I->start, OldIdx)) {
+ // No def, search for the new kill.
+ // This can never be an early clobber kill since there is no def.
+ llvm::prior(I)->end = findLastUseBefore(LI.reg).getRegSlot();
+ return;
+ }
}
- }
- BundleRanges createBundleRanges(RangeSet& Entering,
- RangeSet& Internal,
- RangeSet& Exiting) {
- BundleRanges BR;
-
- for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end();
- EI != EE; ++EI) {
- LiveInterval* LI = EI->first;
- LiveRange* LR = EI->second;
- BR[LI->reg].Use = LR;
- }
-
- for (RangeSet::iterator II = Internal.begin(), IE = Internal.end();
- II != IE; ++II) {
- LiveInterval* LI = II->first;
- LiveRange* LR = II->second;
- if (LR->end.isDead()) {
- BR[LI->reg].Dead = LR;
- } else {
- BR[LI->reg].EC = LR;
+ // Now deal with the def at OldIdx.
+ assert(I != E && SlotIndex::isSameInstr(I->start, OldIdx) && "No def?");
+ VNInfo *DefVNI = I->valno;
+ assert(DefVNI->def == I->start && "Inconsistent def");
+ DefVNI->def = NewIdx.getRegSlot(I->start.isEarlyClobber());
+
+ // Check for an existing def at NewIdx.
+ LiveInterval::iterator NewI = LI.find(NewIdx.getRegSlot());
+ if (SlotIndex::isSameInstr(NewI->start, NewIdx)) {
+ assert(NewI->valno != DefVNI && "Same value defined more than once?");
+ // There is an existing def at NewIdx.
+ if (I->end.isDead()) {
+ // Case 3: Remove the dead def at OldIdx.
+ LI.removeValNo(DefVNI);
+ return;
}
+ // Case 4: Replace def at NewIdx with live def at OldIdx.
+ I->start = DefVNI->def;
+ LI.removeValNo(NewI->valno);
+ return;
}
- for (RangeSet::iterator EI = Exiting.begin(), EE = Exiting.end();
- EI != EE; ++EI) {
- LiveInterval* LI = EI->first;
- LiveRange* LR = EI->second;
- BR[LI->reg].Def = LR;
+ // There is no existing def at NewIdx. Hoist DefVNI.
+ if (!I->end.isDead()) {
+ // Leave the end point of a live def.
+ I->start = DefVNI->def;
+ return;
}
- return BR;
- }
-
- void moveKillFlags(unsigned reg, SlotIndex OldIdx, SlotIndex newKillIdx) {
- MachineInstr* OldKillMI = LIS.getInstructionFromIndex(OldIdx);
- if (!OldKillMI->killsRegister(reg))
- return; // Bail out if we don't have kill flags on the old register.
- MachineInstr* NewKillMI = LIS.getInstructionFromIndex(newKillIdx);
- assert(OldKillMI->killsRegister(reg) && "Old 'kill' instr isn't a kill.");
- assert(!NewKillMI->killsRegister(reg) &&
- "New kill instr is already a kill.");
- OldKillMI->clearRegisterKills(reg, &TRI);
- NewKillMI->addRegisterKilled(reg, &TRI);
+ // DefVNI is a dead def. It may have been moved across other values in LI,
+ // so move I up to NewI. Slide [NewI;I) down one position.
+ std::copy_backward(NewI, I, llvm::next(I));
+ *NewI = LiveRange(DefVNI->def, NewIdx.getDeadSlot(), DefVNI);
}
- void updateRegMaskSlots(SlotIndex OldIdx) {
+ void updateRegMaskSlots() {
SmallVectorImpl<SlotIndex>::iterator RI =
std::lower_bound(LIS.RegMaskSlots.begin(), LIS.RegMaskSlots.end(),
OldIdx);
- assert(*RI == OldIdx && "No RegMask at OldIdx.");
- *RI = NewIdx;
- assert(*prior(RI) < *RI && *RI < *next(RI) &&
- "RegSlots out of order. Did you move one call across another?");
+ assert(RI != LIS.RegMaskSlots.end() && *RI == OldIdx.getRegSlot() &&
+ "No RegMask at OldIdx.");
+ *RI = NewIdx.getRegSlot();
+ assert((RI == LIS.RegMaskSlots.begin() ||
+ SlotIndex::isEarlierInstr(*llvm::prior(RI), *RI)) &&
+ "Cannot move regmask instruction above another call");
+ assert((llvm::next(RI) == LIS.RegMaskSlots.end() ||
+ SlotIndex::isEarlierInstr(*RI, *llvm::next(RI))) &&
+ "Cannot move regmask instruction below another call");
}
// Return the last use of reg between NewIdx and OldIdx.
- SlotIndex findLastUseBefore(unsigned Reg, SlotIndex OldIdx) {
+ SlotIndex findLastUseBefore(unsigned Reg) {
SlotIndex LastUse = NewIdx;
- for (MachineRegisterInfo::use_nodbg_iterator
- UI = MRI.use_nodbg_begin(Reg),
- UE = MRI.use_nodbg_end();
- UI != UE; UI.skipInstruction()) {
- const MachineInstr* MI = &*UI;
- SlotIndex InstSlot = LIS.getSlotIndexes()->getInstructionIndex(MI);
- if (InstSlot > LastUse && InstSlot < OldIdx)
- LastUse = InstSlot;
- }
- return LastUse;
- }
- void moveEnteringUpFrom(SlotIndex OldIdx, IntRangePair& P) {
- LiveInterval* LI = P.first;
- LiveRange* LR = P.second;
- bool LiveThrough = LR->end > OldIdx.getRegSlot();
- if (LiveThrough)
- return;
- SlotIndex LastUse = findLastUseBefore(LI->reg, OldIdx);
- if (LastUse != NewIdx)
- moveKillFlags(LI->reg, NewIdx, LastUse);
- LR->end = LastUse.getRegSlot();
- }
-
- void moveEnteringDownFrom(SlotIndex OldIdx, IntRangePair& P) {
- LiveInterval* LI = P.first;
- LiveRange* LR = P.second;
- // Extend the LiveRange if NewIdx is past the end.
- if (NewIdx > LR->end) {
- // Move kill flags if OldIdx was not originally the end
- // (otherwise LR->end points to an invalid slot).
- if (LR->end.getRegSlot() != OldIdx.getRegSlot()) {
- assert(LR->end > OldIdx && "LiveRange does not cover original slot");
- moveKillFlags(LI->reg, LR->end, NewIdx);
+ if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ for (MachineRegisterInfo::use_nodbg_iterator
+ UI = MRI.use_nodbg_begin(Reg),
+ UE = MRI.use_nodbg_end();
+ UI != UE; UI.skipInstruction()) {
+ const MachineInstr* MI = &*UI;
+ SlotIndex InstSlot = LIS.getSlotIndexes()->getInstructionIndex(MI);
+ if (InstSlot > LastUse && InstSlot < OldIdx)
+ LastUse = InstSlot;
}
- LR->end = NewIdx.getRegSlot();
- }
- }
-
- void moveAllEnteringFrom(SlotIndex OldIdx, RangeSet& Entering) {
- bool GoingUp = NewIdx < OldIdx;
-
- if (GoingUp) {
- for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end();
- EI != EE; ++EI)
- moveEnteringUpFrom(OldIdx, *EI);
- } else {
- for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end();
- EI != EE; ++EI)
- moveEnteringDownFrom(OldIdx, *EI);
- }
- }
-
- void moveInternalFrom(SlotIndex OldIdx, IntRangePair& P) {
- LiveInterval* LI = P.first;
- LiveRange* LR = P.second;
- assert(OldIdx < LR->start && LR->start < OldIdx.getDeadSlot() &&
- LR->end <= OldIdx.getDeadSlot() &&
- "Range should be internal to OldIdx.");
- LiveRange Tmp(*LR);
- Tmp.start = NewIdx.getRegSlot(LR->start.isEarlyClobber());
- Tmp.valno->def = Tmp.start;
- Tmp.end = LR->end.isDead() ? NewIdx.getDeadSlot() : NewIdx.getRegSlot();
- LI->removeRange(*LR);
- LI->addRange(Tmp);
- }
-
- void moveAllInternalFrom(SlotIndex OldIdx, RangeSet& Internal) {
- for (RangeSet::iterator II = Internal.begin(), IE = Internal.end();
- II != IE; ++II)
- moveInternalFrom(OldIdx, *II);
- }
-
- void moveExitingFrom(SlotIndex OldIdx, IntRangePair& P) {
- LiveRange* LR = P.second;
- assert(OldIdx < LR->start && LR->start < OldIdx.getDeadSlot() &&
- "Range should start in OldIdx.");
- assert(LR->end > OldIdx.getDeadSlot() && "Range should exit OldIdx.");
- SlotIndex NewStart = NewIdx.getRegSlot(LR->start.isEarlyClobber());
- LR->start = NewStart;
- LR->valno->def = NewStart;
- }
-
- void moveAllExitingFrom(SlotIndex OldIdx, RangeSet& Exiting) {
- for (RangeSet::iterator EI = Exiting.begin(), EE = Exiting.end();
- EI != EE; ++EI)
- moveExitingFrom(OldIdx, *EI);
- }
-
- void moveEnteringUpFromInto(SlotIndex OldIdx, IntRangePair& P,
- BundleRanges& BR) {
- LiveInterval* LI = P.first;
- LiveRange* LR = P.second;
- bool LiveThrough = LR->end > OldIdx.getRegSlot();
- if (LiveThrough) {
- assert((LR->start < NewIdx || BR[LI->reg].Def == LR) &&
- "Def in bundle should be def range.");
- assert((BR[LI->reg].Use == 0 || BR[LI->reg].Use == LR) &&
- "If bundle has use for this reg it should be LR.");
- BR[LI->reg].Use = LR;
- return;
- }
-
- SlotIndex LastUse = findLastUseBefore(LI->reg, OldIdx);
- moveKillFlags(LI->reg, OldIdx, LastUse);
-
- if (LR->start < NewIdx) {
- // Becoming a new entering range.
- assert(BR[LI->reg].Dead == 0 && BR[LI->reg].Def == 0 &&
- "Bundle shouldn't be re-defining reg mid-range.");
- assert((BR[LI->reg].Use == 0 || BR[LI->reg].Use == LR) &&
- "Bundle shouldn't have different use range for same reg.");
- LR->end = LastUse.getRegSlot();
- BR[LI->reg].Use = LR;
- } else {
- // Becoming a new Dead-def.
- assert(LR->start == NewIdx.getRegSlot(LR->start.isEarlyClobber()) &&
- "Live range starting at unexpected slot.");
- assert(BR[LI->reg].Def == LR && "Reg should have def range.");
- assert(BR[LI->reg].Dead == 0 &&
- "Can't have def and dead def of same reg in a bundle.");
- LR->end = LastUse.getDeadSlot();
- BR[LI->reg].Dead = BR[LI->reg].Def;
- BR[LI->reg].Def = 0;
- }
- }
-
- void moveEnteringDownFromInto(SlotIndex OldIdx, IntRangePair& P,
- BundleRanges& BR) {
- LiveInterval* LI = P.first;
- LiveRange* LR = P.second;
- if (NewIdx > LR->end) {
- // Range extended to bundle. Add to bundle uses.
- // Note: Currently adds kill flags to bundle start.
- assert(BR[LI->reg].Use == 0 &&
- "Bundle already has use range for reg.");
- moveKillFlags(LI->reg, LR->end, NewIdx);
- LR->end = NewIdx.getRegSlot();
- BR[LI->reg].Use = LR;
} else {
- assert(BR[LI->reg].Use != 0 &&
- "Bundle should already have a use range for reg.");
- }
- }
-
- void moveAllEnteringFromInto(SlotIndex OldIdx, RangeSet& Entering,
- BundleRanges& BR) {
- bool GoingUp = NewIdx < OldIdx;
-
- if (GoingUp) {
- for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end();
- EI != EE; ++EI)
- moveEnteringUpFromInto(OldIdx, *EI, BR);
- } else {
- for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end();
- EI != EE; ++EI)
- moveEnteringDownFromInto(OldIdx, *EI, BR);
- }
- }
-
- void moveInternalFromInto(SlotIndex OldIdx, IntRangePair& P,
- BundleRanges& BR) {
- // TODO: Sane rules for moving ranges into bundles.
- }
+ MachineInstr* MI = LIS.getSlotIndexes()->getInstructionFromIndex(NewIdx);
+ MachineBasicBlock::iterator MII(MI);
+ ++MII;
+ MachineBasicBlock* MBB = MI->getParent();
+ for (; MII != MBB->end() && LIS.getInstructionIndex(MII) < OldIdx; ++MII){
+ for (MachineInstr::mop_iterator MOI = MII->operands_begin(),
+ MOE = MII->operands_end();
+ MOI != MOE; ++MOI) {
+ const MachineOperand& mop = *MOI;
+ if (!mop.isReg() || mop.getReg() == 0 ||
+ TargetRegisterInfo::isVirtualRegister(mop.getReg()))
+ continue;
- void moveAllInternalFromInto(SlotIndex OldIdx, RangeSet& Internal,
- BundleRanges& BR) {
- for (RangeSet::iterator II = Internal.begin(), IE = Internal.end();
- II != IE; ++II)
- moveInternalFromInto(OldIdx, *II, BR);
- }
-
- void moveExitingFromInto(SlotIndex OldIdx, IntRangePair& P,
- BundleRanges& BR) {
- LiveInterval* LI = P.first;
- LiveRange* LR = P.second;
-
- assert(LR->start.isRegister() &&
- "Don't know how to merge exiting ECs into bundles yet.");
-
- if (LR->end > NewIdx.getDeadSlot()) {
- // This range is becoming an exiting range on the bundle.
- // If there was an old dead-def of this reg, delete it.
- if (BR[LI->reg].Dead != 0) {
- LI->removeRange(*BR[LI->reg].Dead);
- BR[LI->reg].Dead = 0;
- }
- assert(BR[LI->reg].Def == 0 &&
- "Can't have two defs for the same variable exiting a bundle.");
- LR->start = NewIdx.getRegSlot();
- LR->valno->def = LR->start;
- BR[LI->reg].Def = LR;
- } else {
- // This range is becoming internal to the bundle.
- assert(LR->end == NewIdx.getRegSlot() &&
- "Can't bundle def whose kill is before the bundle");
- if (BR[LI->reg].Dead || BR[LI->reg].Def) {
- // Already have a def for this. Just delete range.
- LI->removeRange(*LR);
- } else {
- // Make range dead, record.
- LR->end = NewIdx.getDeadSlot();
- BR[LI->reg].Dead = LR;
- assert(BR[LI->reg].Use == LR &&
- "Range becoming dead should currently be use.");
+ if (TRI.hasRegUnit(mop.getReg(), Reg))
+ LastUse = LIS.getInstructionIndex(MII);
+ }
}
- // In both cases the range is no longer a use on the bundle.
- BR[LI->reg].Use = 0;
}
+ return LastUse;
}
-
- void moveAllExitingFromInto(SlotIndex OldIdx, RangeSet& Exiting,
- BundleRanges& BR) {
- for (RangeSet::iterator EI = Exiting.begin(), EE = Exiting.end();
- EI != EE; ++EI)
- moveExitingFromInto(OldIdx, *EI, BR);
- }
-
};
-void LiveIntervals::handleMove(MachineInstr* MI) {
+void LiveIntervals::handleMove(MachineInstr* MI, bool UpdateFlags) {
+ assert(!MI->isBundled() && "Can't handle bundled instructions yet.");
SlotIndex OldIndex = Indexes->getInstructionIndex(MI);
Indexes->removeMachineInstrFromMaps(MI);
- SlotIndex NewIndex = MI->isInsideBundle() ?
- Indexes->getInstructionIndex(MI) :
- Indexes->insertMachineInstrInMaps(MI);
+ SlotIndex NewIndex = Indexes->insertMachineInstrInMaps(MI);
assert(getMBBStartIdx(MI->getParent()) <= OldIndex &&
OldIndex < getMBBEndIdx(MI->getParent()) &&
"Cannot handle moves across basic block boundaries.");
- assert(!MI->isBundled() && "Can't handle bundled instructions yet.");
- HMEditor HME(*this, *MRI, *TRI, NewIndex);
- HME.moveAllRangesFrom(MI, OldIndex);
+ HMEditor HME(*this, *MRI, *TRI, OldIndex, NewIndex, UpdateFlags);
+ HME.updateAllRanges(MI);
}
void LiveIntervals::handleMoveIntoBundle(MachineInstr* MI,
- MachineInstr* BundleStart) {
+ MachineInstr* BundleStart,
+ bool UpdateFlags) {
+ SlotIndex OldIndex = Indexes->getInstructionIndex(MI);
SlotIndex NewIndex = Indexes->getInstructionIndex(BundleStart);
- HMEditor HME(*this, *MRI, *TRI, NewIndex);
- HME.moveAllRangesInto(MI, BundleStart);
+ HMEditor HME(*this, *MRI, *TRI, OldIndex, NewIndex, UpdateFlags);
+ HME.updateAllRanges(MI);
}
Modified: llvm/branches/AMDILBackend/lib/CodeGen/LiveIntervalUnion.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/LiveIntervalUnion.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/LiveIntervalUnion.h (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/LiveIntervalUnion.h Tue Jan 15 11:16:16 2013
@@ -178,8 +178,8 @@
bool checkLoopInterference(MachineLoopRange*);
private:
- Query(const Query&); // DO NOT IMPLEMENT
- void operator=(const Query&); // DO NOT IMPLEMENT
+ Query(const Query&) LLVM_DELETED_FUNCTION;
+ void operator=(const Query&) LLVM_DELETED_FUNCTION;
};
// Array of LiveIntervalUnions.
Modified: llvm/branches/AMDILBackend/lib/CodeGen/LiveRangeCalc.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/LiveRangeCalc.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/LiveRangeCalc.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/LiveRangeCalc.cpp Tue Jan 15 11:16:16 2013
@@ -65,7 +65,11 @@
// Visit all operands that read Reg. This may include partial defs.
for (MachineRegisterInfo::reg_nodbg_iterator I = MRI->reg_nodbg_begin(Reg),
E = MRI->reg_nodbg_end(); I != E; ++I) {
- const MachineOperand &MO = I.getOperand();
+ MachineOperand &MO = I.getOperand();
+ // Clear all kill flags. They will be reinserted after register allocation
+ // by LiveIntervalAnalysis::addKillFlags().
+ if (MO.isUse())
+ MO.setIsKill(false);
if (!MO.readsReg())
continue;
// MI is reading Reg. We may have visited MI before if it happens to be
Modified: llvm/branches/AMDILBackend/lib/CodeGen/LiveRangeEdit.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/LiveRangeEdit.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/LiveRangeEdit.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/LiveRangeEdit.cpp Tue Jan 15 11:16:16 2013
@@ -87,7 +87,7 @@
// We can't remat physreg uses, unless it is a constant.
if (TargetRegisterInfo::isPhysicalRegister(MO.getReg())) {
- if (MRI.isConstantPhysReg(MO.getReg(), VRM->getMachineFunction()))
+ if (MRI.isConstantPhysReg(MO.getReg(), *OrigMI->getParent()->getParent()))
continue;
return false;
}
@@ -96,6 +96,13 @@
const VNInfo *OVNI = li.getVNInfoAt(OrigIdx);
if (!OVNI)
continue;
+
+ // Don't allow rematerialization immediately after the original def.
+ // It would be incorrect if OrigMI redefines the register.
+ // See PR14098.
+ if (SlotIndex::isSameInstr(OrigIdx, UseIdx))
+ return false;
+
if (OVNI != li.getVNInfoAt(UseIdx))
return false;
}
@@ -249,7 +256,7 @@
unsigned Reg = MOI->getReg();
if (!TargetRegisterInfo::isVirtualRegister(Reg)) {
// Check if MI reads any unreserved physregs.
- if (Reg && MOI->readsReg() && !LIS.isReserved(Reg))
+ if (Reg && MOI->readsReg() && !MRI.isReserved(Reg))
ReadsPhysRegs = true;
continue;
}
Modified: llvm/branches/AMDILBackend/lib/CodeGen/LiveRegMatrix.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/LiveRegMatrix.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/LiveRegMatrix.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/LiveRegMatrix.cpp Tue Jan 15 11:16:16 2013
@@ -13,6 +13,7 @@
#define DEBUG_TYPE "regalloc"
#include "LiveRegMatrix.h"
+#include "RegisterCoalescer.h"
#include "VirtRegMap.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
@@ -117,8 +118,9 @@
unsigned PhysReg) {
if (VirtReg.empty())
return false;
+ CoalescerPair CP(VirtReg.reg, PhysReg, *TRI);
for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units)
- if (VirtReg.overlaps(LIS->getRegUnit(*Units)))
+ if (VirtReg.overlaps(LIS->getRegUnit(*Units), CP, *LIS->getSlotIndexes()))
return true;
return false;
}
Modified: llvm/branches/AMDILBackend/lib/CodeGen/LiveRegMatrix.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/LiveRegMatrix.h?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/LiveRegMatrix.h (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/LiveRegMatrix.h Tue Jan 15 11:16:16 2013
@@ -15,7 +15,7 @@
// Register units are defined in MCRegisterInfo.h, they represent the smallest
// unit of interference when dealing with overlapping physical registers. The
// LiveRegMatrix is represented as a LiveIntervalUnion per register unit. When
-// a virtual register is assigned to a physicval register, the live range for
+// a virtual register is assigned to a physical register, the live range for
// the virtual register is inserted into the LiveIntervalUnion for each regunit
// in the physreg.
//
Modified: llvm/branches/AMDILBackend/lib/CodeGen/LiveStackAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/LiveStackAnalysis.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/LiveStackAnalysis.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/LiveStackAnalysis.cpp Tue Jan 15 11:16:16 2013
@@ -25,7 +25,10 @@
using namespace llvm;
char LiveStacks::ID = 0;
-INITIALIZE_PASS(LiveStacks, "livestacks",
+INITIALIZE_PASS_BEGIN(LiveStacks, "livestacks",
+ "Live Stack Slot Analysis", false, false)
+INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
+INITIALIZE_PASS_END(LiveStacks, "livestacks",
"Live Stack Slot Analysis", false, false)
char &llvm::LiveStacksID = LiveStacks::ID;
Modified: llvm/branches/AMDILBackend/lib/CodeGen/LiveVariables.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/LiveVariables.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/LiveVariables.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/LiveVariables.cpp Tue Jan 15 11:16:16 2013
@@ -65,6 +65,7 @@
}
void LiveVariables::VarInfo::dump() const {
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dbgs() << " Alive in blocks: ";
for (SparseBitVector<>::iterator I = AliveBlocks.begin(),
E = AliveBlocks.end(); I != E; ++I)
@@ -77,6 +78,7 @@
dbgs() << "\n #" << i << ": " << *Kills[i];
dbgs() << "\n";
}
+#endif
}
/// getVarInfo - Get (possibly creating) a VarInfo object for the given vreg.
@@ -501,8 +503,6 @@
MRI = &mf.getRegInfo();
TRI = MF->getTarget().getRegisterInfo();
- ReservedRegisters = TRI->getReservedRegs(mf);
-
unsigned NumRegs = TRI->getNumRegs();
PhysRegDef = new MachineInstr*[NumRegs];
PhysRegUse = new MachineInstr*[NumRegs];
@@ -586,7 +586,7 @@
unsigned MOReg = UseRegs[i];
if (TargetRegisterInfo::isVirtualRegister(MOReg))
HandleVirtRegUse(MOReg, MBB, MI);
- else if (!ReservedRegisters[MOReg])
+ else if (!MRI->isReserved(MOReg))
HandlePhysRegUse(MOReg, MI);
}
@@ -599,7 +599,7 @@
unsigned MOReg = DefRegs[i];
if (TargetRegisterInfo::isVirtualRegister(MOReg))
HandleVirtRegDef(MOReg, MI);
- else if (!ReservedRegisters[MOReg])
+ else if (!MRI->isReserved(MOReg))
HandlePhysRegDef(MOReg, MI, Defs);
}
UpdatePhysRegDefs(MI, Defs);
@@ -806,18 +806,44 @@
MachineBasicBlock *SuccBB) {
const unsigned NumNew = BB->getNumber();
- // All registers used by PHI nodes in SuccBB must be live through BB.
- for (MachineBasicBlock::iterator BBI = SuccBB->begin(),
- BBE = SuccBB->end(); BBI != BBE && BBI->isPHI(); ++BBI)
+ SmallSet<unsigned, 16> Defs, Kills;
+
+ MachineBasicBlock::iterator BBI = SuccBB->begin(), BBE = SuccBB->end();
+ for (; BBI != BBE && BBI->isPHI(); ++BBI) {
+ // Record the def of the PHI node.
+ Defs.insert(BBI->getOperand(0).getReg());
+
+ // All registers used by PHI nodes in SuccBB must be live through BB.
for (unsigned i = 1, e = BBI->getNumOperands(); i != e; i += 2)
if (BBI->getOperand(i+1).getMBB() == BB)
getVarInfo(BBI->getOperand(i).getReg()).AliveBlocks.set(NumNew);
+ }
+
+ // Record all vreg defs and kills of all instructions in SuccBB.
+ for (; BBI != BBE; ++BBI) {
+ for (MachineInstr::mop_iterator I = BBI->operands_begin(),
+ E = BBI->operands_end(); I != E; ++I) {
+ if (I->isReg() && TargetRegisterInfo::isVirtualRegister(I->getReg())) {
+ if (I->isDef())
+ Defs.insert(I->getReg());
+ else if (I->isKill())
+ Kills.insert(I->getReg());
+ }
+ }
+ }
// Update info for all live variables
for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
+
+ // If the Defs is defined in the successor it can't be live in BB.
+ if (Defs.count(Reg))
+ continue;
+
+ // If the register is either killed in or live through SuccBB it's also live
+ // through BB.
VarInfo &VI = getVarInfo(Reg);
- if (!VI.AliveBlocks.test(NumNew) && VI.isLiveIn(*SuccBB, Reg, *MRI))
+ if (Kills.count(Reg) || VI.AliveBlocks.test(SuccBB->getNumber()))
VI.AliveBlocks.set(NumNew);
}
}
Modified: llvm/branches/AMDILBackend/lib/CodeGen/MachineBasicBlock.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/MachineBasicBlock.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/MachineBasicBlock.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/MachineBasicBlock.cpp Tue Jan 15 11:16:16 2013
@@ -21,7 +21,7 @@
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Assembly/Writer.h"
@@ -145,7 +145,8 @@
instr_iterator I = instr_begin(), E = instr_end();
while (I != E && I->isPHI())
++I;
- assert(!I->isInsideBundle() && "First non-phi MI cannot be inside a bundle!");
+ assert((I == E || !I->isInsideBundle()) &&
+ "First non-phi MI cannot be inside a bundle!");
return I;
}
@@ -156,7 +157,7 @@
++I;
// FIXME: This needs to change if we wish to bundle labels / dbg_values
// inside the bundle.
- assert(!I->isInsideBundle() &&
+ assert((I == E || !I->isInsideBundle()) &&
"First non-phi / non-label instruction is inside a bundle!");
return I;
}
@@ -228,9 +229,11 @@
return 0;
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void MachineBasicBlock::dump() const {
print(dbgs());
}
+#endif
StringRef MachineBasicBlock::getName() const {
if (const BasicBlock *LBB = getBasicBlock())
@@ -243,7 +246,7 @@
std::string MachineBasicBlock::getFullName() const {
std::string Name;
if (getParent())
- Name = (getParent()->getFunction()->getName() + ":").str();
+ Name = (getParent()->getName() + ":").str();
if (getBasicBlock())
Name += getBasicBlock()->getName();
else
@@ -311,8 +314,11 @@
if (!succ_empty()) {
if (Indexes) OS << '\t';
OS << " Successors according to CFG:";
- for (const_succ_iterator SI = succ_begin(), E = succ_end(); SI != E; ++SI)
+ for (const_succ_iterator SI = succ_begin(), E = succ_end(); SI != E; ++SI) {
OS << " BB#" << (*SI)->getNumber();
+ if (!Weights.empty())
+ OS << '(' << *getWeightIterator(SI) << ')';
+ }
OS << '\n';
}
}
@@ -532,14 +538,13 @@
while (!fromMBB->succ_empty()) {
MachineBasicBlock *Succ = *fromMBB->succ_begin();
- uint32_t weight = 0;
-
+ uint32_t Weight = 0;
// If Weight list is empty it means we don't use it (disabled optimization).
if (!fromMBB->Weights.empty())
- weight = *fromMBB->Weights.begin();
+ Weight = *fromMBB->Weights.begin();
- addSuccessor(Succ, weight);
+ addSuccessor(Succ, Weight);
fromMBB->removeSuccessor(Succ);
}
}
@@ -551,7 +556,10 @@
while (!fromMBB->succ_empty()) {
MachineBasicBlock *Succ = *fromMBB->succ_begin();
- addSuccessor(Succ);
+ uint32_t Weight = 0;
+ if (!fromMBB->Weights.empty())
+ Weight = *fromMBB->Weights.begin();
+ addSuccessor(Succ, Weight);
fromMBB->removeSuccessor(Succ);
// Fix up any PHI nodes in the successor.
@@ -937,12 +945,11 @@
/// getSuccWeight - Return weight of the edge from this block to MBB.
///
-uint32_t MachineBasicBlock::getSuccWeight(const MachineBasicBlock *succ) const {
+uint32_t MachineBasicBlock::getSuccWeight(const_succ_iterator Succ) const {
if (Weights.empty())
return 0;
- const_succ_iterator I = std::find(Successors.begin(), Successors.end(), succ);
- return *getWeightIterator(I);
+ return *getWeightIterator(Succ);
}
/// getWeightIterator - Return wight iterator corresonding to the I successor
@@ -965,6 +972,80 @@
return Weights.begin() + index;
}
+/// Return whether (physical) register "Reg" has been <def>ined and not <kill>ed
+/// as of just before "MI".
+///
+/// Search is localised to a neighborhood of
+/// Neighborhood instructions before (searching for defs or kills) and N
+/// instructions after (searching just for defs) MI.
+MachineBasicBlock::LivenessQueryResult
+MachineBasicBlock::computeRegisterLiveness(const TargetRegisterInfo *TRI,
+ unsigned Reg, MachineInstr *MI,
+ unsigned Neighborhood) {
+
+ unsigned N = Neighborhood;
+ MachineBasicBlock *MBB = MI->getParent();
+
+ // Start by searching backwards from MI, looking for kills, reads or defs.
+
+ MachineBasicBlock::iterator I(MI);
+ // If this is the first insn in the block, don't search backwards.
+ if (I != MBB->begin()) {
+ do {
+ --I;
+
+ MachineOperandIteratorBase::PhysRegInfo Analysis =
+ MIOperands(I).analyzePhysReg(Reg, TRI);
+
+ if (Analysis.Kills)
+ // Register killed, so isn't live.
+ return LQR_Dead;
+
+ else if (Analysis.DefinesOverlap || Analysis.ReadsOverlap)
+ // Defined or read without a previous kill - live.
+ return (Analysis.Defines || Analysis.Reads) ?
+ LQR_Live : LQR_OverlappingLive;
+
+ } while (I != MBB->begin() && --N > 0);
+ }
+
+ // Did we get to the start of the block?
+ if (I == MBB->begin()) {
+ // If so, the register's state is definitely defined by the live-in state.
+ for (MCRegAliasIterator RAI(Reg, TRI, /*IncludeSelf=*/true);
+ RAI.isValid(); ++RAI) {
+ if (MBB->isLiveIn(*RAI))
+ return (*RAI == Reg) ? LQR_Live : LQR_OverlappingLive;
+ }
+
+ return LQR_Dead;
+ }
+
+ N = Neighborhood;
+
+ // Try searching forwards from MI, looking for reads or defs.
+ I = MachineBasicBlock::iterator(MI);
+ // If this is the last insn in the block, don't search forwards.
+ if (I != MBB->end()) {
+ for (++I; I != MBB->end() && N > 0; ++I, --N) {
+ MachineOperandIteratorBase::PhysRegInfo Analysis =
+ MIOperands(I).analyzePhysReg(Reg, TRI);
+
+ if (Analysis.ReadsOverlap)
+ // Used, therefore must have been live.
+ return (Analysis.Reads) ?
+ LQR_Live : LQR_OverlappingLive;
+
+ else if (Analysis.DefinesOverlap)
+ // Defined (but not read) therefore cannot have been live.
+ return LQR_Dead;
+ }
+ }
+
+ // At this point we have no idea of the liveness of the register.
+ return LQR_Unknown;
+}
+
void llvm::WriteAsOperand(raw_ostream &OS, const MachineBasicBlock *MBB,
bool t) {
OS << "BB#" << MBB->getNumber();
Modified: llvm/branches/AMDILBackend/lib/CodeGen/MachineBlockPlacement.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/MachineBlockPlacement.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/MachineBlockPlacement.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/MachineBlockPlacement.cpp Tue Jan 15 11:16:16 2013
@@ -500,11 +500,10 @@
assert(BB);
assert(BlockToChain[BB] == &Chain);
assert(*llvm::prior(Chain.end()) == BB);
- MachineBasicBlock *BestSucc = 0;
// Look for the best viable successor if there is one to place immediately
// after this block.
- BestSucc = selectBestSuccessor(BB, Chain, BlockFilter);
+ MachineBasicBlock *BestSucc = selectBestSuccessor(BB, Chain, BlockFilter);
// If an immediate successor isn't available, look for the best viable
// block among those we've identified as not violating the loop's CFG at
@@ -1014,7 +1013,8 @@
// exclusively on the loop info here so that we can align backedges in
// unnatural CFGs and backedges that were introduced purely because of the
// loop rotations done during this layout pass.
- if (F.getFunction()->hasFnAttr(Attribute::OptimizeForSize))
+ if (F.getFunction()->getFnAttributes().
+ hasAttribute(Attributes::OptimizeForSize))
return;
unsigned Align = TLI->getPrefLoopAlignment();
if (!Align)
Modified: llvm/branches/AMDILBackend/lib/CodeGen/MachineBranchProbabilityInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/MachineBranchProbabilityInfo.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/MachineBranchProbabilityInfo.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/MachineBranchProbabilityInfo.cpp Tue Jan 15 11:16:16 2013
@@ -38,7 +38,7 @@
Scale = 1;
for (MachineBasicBlock::const_succ_iterator I = MBB->succ_begin(),
E = MBB->succ_end(); I != E; ++I) {
- uint32_t Weight = getEdgeWeight(MBB, *I);
+ uint32_t Weight = getEdgeWeight(MBB, I);
Sum += Weight;
}
@@ -53,22 +53,30 @@
Sum = 0;
for (MachineBasicBlock::const_succ_iterator I = MBB->succ_begin(),
E = MBB->succ_end(); I != E; ++I) {
- uint32_t Weight = getEdgeWeight(MBB, *I);
+ uint32_t Weight = getEdgeWeight(MBB, I);
Sum += Weight / Scale;
}
assert(Sum <= UINT32_MAX);
return Sum;
}
-uint32_t
-MachineBranchProbabilityInfo::getEdgeWeight(const MachineBasicBlock *Src,
- const MachineBasicBlock *Dst) const {
+uint32_t MachineBranchProbabilityInfo::
+getEdgeWeight(const MachineBasicBlock *Src,
+ MachineBasicBlock::const_succ_iterator Dst) const {
uint32_t Weight = Src->getSuccWeight(Dst);
if (!Weight)
return DEFAULT_WEIGHT;
return Weight;
}
+uint32_t MachineBranchProbabilityInfo::
+getEdgeWeight(const MachineBasicBlock *Src,
+ const MachineBasicBlock *Dst) const {
+ // This is a linear search. Try to use the const_succ_iterator version when
+ // possible.
+ return getEdgeWeight(Src, std::find(Src->succ_begin(), Src->succ_end(), Dst));
+}
+
bool MachineBranchProbabilityInfo::isEdgeHot(MachineBasicBlock *Src,
MachineBasicBlock *Dst) const {
// Hot probability is at least 4/5 = 80%
@@ -82,7 +90,7 @@
MachineBasicBlock *MaxSucc = 0;
for (MachineBasicBlock::const_succ_iterator I = MBB->succ_begin(),
E = MBB->succ_end(); I != E; ++I) {
- uint32_t Weight = getEdgeWeight(MBB, *I);
+ uint32_t Weight = getEdgeWeight(MBB, I);
if (Weight > MaxWeight) {
MaxWeight = Weight;
MaxSucc = *I;
Modified: llvm/branches/AMDILBackend/lib/CodeGen/MachineCSE.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/AMDILBackend/lib/CodeGen/MachineCSE.cpp?rev=172541&r1=172540&r2=172541&view=diff
==============================================================================
--- llvm/branches/AMDILBackend/lib/CodeGen/MachineCSE.cpp (original)
+++ llvm/branches/AMDILBackend/lib/CodeGen/MachineCSE.cpp Tue Jan 15 11:16:16 2013
@@ -63,8 +63,6 @@
virtual void releaseMemory() {
ScopeMap.clear();
Exps.clear();
- AllocatableRegs.clear();
- ReservedRegs.clear();
}
private:
@@ -78,8 +76,6 @@
ScopedHTType VNT;
SmallVector<MachineInstr*, 64> Exps;
unsigned CurrVN;
- BitVector AllocatableRegs;
- BitVector ReservedRegs;
bool PerformTrivialCoalescing(MachineInstr *MI, MachineBasicBlock *MBB);
bool isPhysDefTriviallyDead(unsigned Reg,
@@ -88,7 +84,8 @@
bool hasLivePhysRegDefUses(const MachineInstr *MI,
const MachineBasicBlock *MBB,
SmallSet<unsigned,8> &PhysRefs,
- SmallVector<unsigned,2> &PhysDefs) const;
+ SmallVector<unsigned,2> &PhysDefs,
+ bool &PhysUseDef) const;
bool PhysRegDefsReach(MachineInstr *CSMI, MachineInstr *MI,
SmallSet<unsigned,8> &PhysRefs,
SmallVector<unsigned,2> &PhysDefs,
@@ -198,29 +195,52 @@
bool MachineCSE::hasLivePhysRegDefUses(const MachineInstr *MI,
const MachineBasicBlock *MBB,
SmallSet<unsigned,8> &PhysRefs,
- SmallVector<unsigned,2> &PhysDefs) const{
+ SmallVector<unsigned,2> &PhysDefs,
+ bool &PhysUseDef) const{
+ // First, add all uses to PhysRefs.
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg() || MO.isDef())
+ continue;
+ unsigned Reg = MO.getReg();
+ if (!Reg)
+ continue;
+ if (TargetRegisterInfo::isVirtualRegister(Reg))
+ continue;
+ // Reading constant physregs is ok.
+ if (!MRI->isConstantPhysReg(Reg, *MBB->getParent()))
+ for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
+ PhysRefs.insert(*AI);
+ }
+
+ // Next, collect all defs into PhysDefs. If any is already in PhysRefs
+ // (which currently contains only uses), set the PhysUseDef flag.
+ PhysUseDef = false;
MachineBasicBlock::const_iterator I = MI; I = llvm::next(I);
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
- if (!MO.isReg())
+ if (!MO.isReg() || !MO.isDef())
continue;
unsigned Reg = MO.getReg();
if (!Reg)
continue;
if (TargetRegisterInfo::isVirtualRegister(Reg))
continue;
+ // Check against PhysRefs even if the def is "dead".
+ if (PhysRefs.count(Reg))
+ PhysUseDef = true;
// If the def is dead, it's ok. But the def may not marked "dead". That's
// common since this pass is run before livevariables. We can scan
// forward a few instructions and check if it is obviously dead.
- if (MO.isDef() &&
- (MO.isDead() || isPhysDefTriviallyDead(Reg, I, MBB->end())))
- continue;
- for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
- PhysRefs.insert(*AI);
- if (MO.isDef())
+ if (!MO.isDead() && !isPhysDefTriviallyDead(Reg, I, MBB->end()))
PhysDefs.push_back(Reg);
}
+ // Finally, add all defs to PhysRefs as well.
+ for (unsigned i = 0, e = PhysDefs.size(); i != e; ++i)
+ for (MCRegAliasIterator AI(PhysDefs[i], TRI, true); AI.isValid(); ++AI)
+ PhysRefs.insert(*AI);
+
return !PhysRefs.empty();
}
@@ -240,7 +260,7 @@
return false;
for (unsigned i = 0, e = PhysDefs.size(); i != e; ++i) {
- if (AllocatableRegs.test(PhysDefs[i]) || ReservedRegs.test(PhysDefs[i]))
+ if (MRI->isAllocatable(PhysDefs[i]) || MRI->isReserved(PhysDefs[i]))
// Avoid extending live range of physical registers if they are
//allocatable or reserved.
return false;
@@ -409,8 +429,8 @@
DEBUG(dbgs() << "Exiting: " << MBB->getName() << '\n');
DenseMap<MachineBasicBlock*, ScopeType*>::iterator SI = ScopeMap.find(MBB);
assert(SI != ScopeMap.end());
- ScopeMap.erase(SI);
delete SI->second;
+ ScopeMap.erase(SI);
}
bool MachineCSE::ProcessBlock(MachineBasicBlock *MBB) {
@@ -461,16 +481,22 @@
bool CrossMBBPhysDef = false;
SmallSet<unsigned, 8> PhysRefs;
SmallVector<unsigned, 2> PhysDefs;
- if (FoundCSE && hasLivePhysRegDefUses(MI, MBB, PhysRefs, PhysDefs)) {
+ bool PhysUseDef = false;
+ if (FoundCSE && hasLivePhysRegDefUses(MI, MBB, PhysRefs,
+ PhysDefs, PhysUseDef)) {
FoundCSE = false;
// ... Unless the CS is local or is in the sole predecessor block
// and it also defines the physical register which is not clobbered
// in between and the physical register uses were not clobbered.
- unsigned CSVN = VNT.lookup(MI);
- MachineInstr *CSMI = Exps[CSVN];
- if (PhysRegDefsReach(CSMI, MI, PhysRefs, PhysDefs, CrossMBBPhysDef))
- FoundCSE = true;
+ // This can never be the case if the instruction both uses and
+ // defines the same physical register, which was detected above.
+ if (!PhysUseDef) {
+ unsigned CSVN = VNT.lookup(MI);
+ MachineInstr *CSMI = Exps[CSVN];
+ if (PhysRegDefsReach(CSMI, MI, PhysRefs, PhysDefs, CrossMBBPhysDef))
+ FoundCSE = true;
+ }
}
if (!FoundCSE) {
@@ -633,7 +659,5 @@
MRI = &MF.getRegInfo();
AA = &getAnalysis<AliasAnalysis>();
DT = &getAnalysis<MachineDominatorTree>();
- AllocatableRegs = TRI->getAllocatableSet(MF);
- ReservedRegs = TRI->getReservedRegs(MF);
return PerformCSE(DT->getRootNode());
}
More information about the llvm-branch-commits
mailing list