[llvm-branch-commits] [llvm-branch] r117425 [2/9] - in /llvm/branches/wendling/eh: ./ autoconf/ autoconf/m4/ bindings/ada/ bindings/ocaml/llvm/ bindings/ocaml/transforms/scalar/ cmake/ cmake/modules/ docs/ docs/CommandGuide/ docs/tutorial/ examples/ examples/BrainF/ examples/ExceptionDemo/ examples/Fibonacci/ examples/Kaleidoscope/Chapter7/ examples/ModuleMaker/ include/llvm-c/ include/llvm-c/Transforms/ include/llvm/ include/llvm/ADT/ include/llvm/Analysis/ include/llvm/Assembly/ include/llvm/Bitcode/ include/llvm/CodeGen/ i...

Bill Wendling isanbard at gmail.com
Tue Oct 26 17:48:11 PDT 2010


Modified: llvm/branches/wendling/eh/include/llvm/Support/StandardPasses.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Support/StandardPasses.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Support/StandardPasses.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Support/StandardPasses.h Tue Oct 26 19:48:03 2010
@@ -69,9 +69,18 @@
 
   // Implementations
 
+  static inline void createStandardAliasAnalysisPasses(PassManagerBase *PM) {
+    // Add TypeBasedAliasAnalysis before BasicAliasAnalysis so that
+    // BasicAliasAnalysis wins if they disagree. This is intended to help
+    // support "obvious" type-punning idioms.
+    PM->add(createTypeBasedAliasAnalysisPass());
+    PM->add(createBasicAliasAnalysisPass());
+  }
+
   static inline void createStandardFunctionPasses(PassManagerBase *PM,
                                                   unsigned OptimizationLevel) {
     if (OptimizationLevel > 0) {
+      createStandardAliasAnalysisPasses(PM);
       PM->add(createCFGSimplificationPass());
       if (OptimizationLevel == 1)
         PM->add(createPromoteMemoryToRegisterPass());
@@ -91,6 +100,8 @@
                                                 bool SimplifyLibCalls,
                                                 bool HaveExceptions,
                                                 Pass *InliningPass) {
+    createStandardAliasAnalysisPasses(PM);
+
     if (OptimizationLevel == 0) {
       if (InliningPass)
         PM->add(InliningPass);
@@ -117,20 +128,18 @@
       PM->add(createArgumentPromotionPass());   // Scalarize uninlined fn args
     
     // Start of function pass.
-    
     PM->add(createScalarReplAggregatesPass());  // Break up aggregate allocas
     if (SimplifyLibCalls)
       PM->add(createSimplifyLibCallsPass());    // Library Call Optimizations
     PM->add(createInstructionCombiningPass());  // Cleanup for scalarrepl.
     PM->add(createJumpThreadingPass());         // Thread jumps.
+    PM->add(createCorrelatedValuePropagationPass()); // Propagate conditionals
     PM->add(createCFGSimplificationPass());     // Merge & remove BBs
     PM->add(createInstructionCombiningPass());  // Combine silly seq's
     
     PM->add(createTailCallEliminationPass());   // Eliminate tail calls
     PM->add(createCFGSimplificationPass());     // Merge & remove BBs
     PM->add(createReassociatePass());           // Reassociate expressions
-    // Explicitly schedule this to ensure that it runs before any loop pass.
-    PM->add(new DominanceFrontier());           // Calculate Dominance Frontiers
     PM->add(createLoopRotatePass());            // Rotate Loop
     PM->add(createLICMPass());                  // Hoist loop invariants
     PM->add(createLoopUnswitchPass(OptimizeSize || OptimizationLevel < 3));
@@ -149,6 +158,7 @@
     // opened up by them.
     PM->add(createInstructionCombiningPass());
     PM->add(createJumpThreadingPass());         // Thread jumps
+    PM->add(createCorrelatedValuePropagationPass());
     PM->add(createDeadStoreEliminationPass());  // Delete dead stores
     PM->add(createAggressiveDCEPass());         // Delete dead instructions
     PM->add(createCFGSimplificationPass());     // Merge & remove BBs
@@ -178,6 +188,9 @@
                                              bool Internalize,
                                              bool RunInliner,
                                              bool VerifyEach) {
+    // Provide AliasAnalysis services for optimizations.
+    createStandardAliasAnalysisPasses(PM);
+
     // Now that composite has been compiled, scan through the module, looking
     // for a main function.  If main is defined, mark all other functions
     // internal.

Modified: llvm/branches/wendling/eh/include/llvm/Support/SystemUtils.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Support/SystemUtils.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Support/SystemUtils.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Support/SystemUtils.h Tue Oct 26 19:48:03 2010
@@ -21,10 +21,9 @@
   class raw_ostream;
   namespace sys { class Path; }
 
-/// Determine if the raw_ostream provided is connected to the outs() and
-/// displayed or not (to a console window). If so, generate a warning message
-/// advising against display of bitcode and return true. Otherwise just return
-/// false
+/// Determine if the raw_ostream provided is connected to a terminal. If so,
+/// generate a warning message to errs() advising against display of bitcode
+/// and return true. Otherwise just return false.
 /// @brief Check for output written to a console
 bool CheckBitcodeOutputToConsole(
   raw_ostream &stream_to_check, ///< The stream to be checked

Modified: llvm/branches/wendling/eh/include/llvm/Support/TypeBuilder.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Support/TypeBuilder.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Support/TypeBuilder.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Support/TypeBuilder.h Tue Oct 26 19:48:03 2010
@@ -73,7 +73,7 @@
 ///
 /// TypeBuilder cannot handle recursive types or types you only know at runtime.
 /// If you try to give it a recursive type, it will deadlock, infinitely
-/// recurse, or throw a recursive_init exception.
+/// recurse, or do something similarly undesirable.
 template<typename T, bool cross_compilable> class TypeBuilder {};
 
 // Types for use with cross-compilable TypeBuilders.  These correspond
@@ -88,6 +88,8 @@
 class x86_fp80 {};
 class fp128 {};
 class ppc_fp128 {};
+// X86 MMX.
+class x86_mmx {};
 }  // namespace types
 
 // LLVM doesn't have const or volatile types.
@@ -219,6 +221,10 @@
 public:
   static const Type *get(LLVMContext& C) { return Type::getPPC_FP128Ty(C); }
 };
+template<bool cross> class TypeBuilder<types::x86_mmx, cross> {
+public:
+  static const Type *get(LLVMContext& C) { return Type::getX86_MMXTy(C); }
+};
 
 template<bool cross> class TypeBuilder<void, cross> {
 public:

Modified: llvm/branches/wendling/eh/include/llvm/Support/raw_ostream.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Support/raw_ostream.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Support/raw_ostream.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Support/raw_ostream.h Tue Oct 26 19:48:03 2010
@@ -58,10 +58,6 @@
     ExternalBuffer
   } BufferMode;
 
-  /// Error This flag is true if an error of any kind has been detected.
-  ///
-  bool Error;
-
 public:
   // color order matches ANSI escape sequence, don't change
   enum Colors {
@@ -77,7 +73,7 @@
   };
 
   explicit raw_ostream(bool unbuffered=false)
-    : BufferMode(unbuffered ? Unbuffered : InternalBuffer), Error(false) {
+    : BufferMode(unbuffered ? Unbuffered : InternalBuffer) {
     // Start out ready to flush.
     OutBufStart = OutBufEnd = OutBufCur = 0;
   }
@@ -87,21 +83,6 @@
   /// tell - Return the current offset with the file.
   uint64_t tell() const { return current_pos() + GetNumBytesInBuffer(); }
 
-  /// has_error - Return the value of the flag in this raw_ostream indicating
-  /// whether an output error has been encountered.
-  /// This doesn't implicitly flush any pending output.
-  bool has_error() const {
-    return Error;
-  }
-
-  /// clear_error - Set the flag read by has_error() to false. If the error
-  /// flag is set at the time when this raw_ostream's destructor is called,
-  /// report_fatal_error is called to report the error. Use clear_error()
-  /// after handling the error to avoid this behavior.
-  void clear_error() {
-    Error = false;
-  }
-
   //===--------------------------------------------------------------------===//
   // Configuration Interface
   //===--------------------------------------------------------------------===//
@@ -234,7 +215,7 @@
   /// @param bold bold/brighter text, default false
   /// @param bg if true change the background, default: change foreground
   /// @returns itself so it can be used within << invocations
-  virtual raw_ostream &changeColor(enum Colors, bool = false, bool = false) { 
+  virtual raw_ostream &changeColor(enum Colors, bool = false, bool = false) {
     return *this; }
 
   /// Resets the colors to terminal defaults. Call this when you are done
@@ -285,10 +266,6 @@
   /// underlying output mechanism.
   virtual size_t preferred_buffer_size() const;
 
-  /// error_detected - Set the flag indicating that an output error has
-  /// been encountered.
-  void error_detected() { Error = true; }
-
   /// getBufferStart - Return the beginning of the current stream buffer, or 0
   /// if the stream is unbuffered.
   const char *getBufferStart() const { return OutBufStart; }
@@ -319,6 +296,11 @@
 class raw_fd_ostream : public raw_ostream {
   int FD;
   bool ShouldClose;
+
+  /// Error This flag is true if an error of any kind has been detected.
+  ///
+  bool Error;
+
   uint64_t pos;
 
   /// write_impl - See raw_ostream::write_impl.
@@ -331,6 +313,10 @@
   /// preferred_buffer_size - Determine an efficient buffer size.
   virtual size_t preferred_buffer_size() const;
 
+  /// error_detected - Set the flag indicating that an output error has
+  /// been encountered.
+  void error_detected() { Error = true; }
+
 public:
 
   enum {
@@ -353,24 +339,26 @@
   /// be immediately destroyed; the string will be empty if no error occurred.
   /// This allows optional flags to control how the file will be opened.
   ///
-  /// \param Filename - The file to open. If this is "-" then the
-  /// stream will use stdout instead.
+  /// As a special case, if Filename is "-", then the stream will use
+  /// STDOUT_FILENO instead of opening a file. Note that it will still consider
+  /// itself to own the file descriptor. In particular, it will close the
+  /// file descriptor when it is done (this is necessary to detect
+  /// output errors).
   raw_fd_ostream(const char *Filename, std::string &ErrorInfo,
                  unsigned Flags = 0);
 
   /// raw_fd_ostream ctor - FD is the file descriptor that this writes to.  If
   /// ShouldClose is true, this closes the file when the stream is destroyed.
-  raw_fd_ostream(int fd, bool shouldClose,
-                 bool unbuffered=false) : raw_ostream(unbuffered), FD(fd),
-                                          ShouldClose(shouldClose) {}
+  raw_fd_ostream(int fd, bool shouldClose, bool unbuffered=false);
 
   ~raw_fd_ostream();
 
   /// close - Manually flush the stream and close the file.
+  /// Note that this does not call fsync.
   void close();
 
   /// seek - Flushes the stream and repositions the underlying file descriptor
-  ///  positition to the offset specified from the beginning of the file.
+  /// positition to the offset specified from the beginning of the file.
   uint64_t seek(uint64_t off);
 
   virtual raw_ostream &changeColor(enum Colors colors, bool bold=false,
@@ -378,24 +366,27 @@
   virtual raw_ostream &resetColor();
 
   virtual bool is_displayed() const;
-};
 
-/// raw_stdout_ostream - This is a stream that always prints to stdout.
-///
-class raw_stdout_ostream : public raw_fd_ostream {
-  // An out of line virtual method to provide a home for the class vtable.
-  virtual void handle();
-public:
-  raw_stdout_ostream();
-};
+  /// has_error - Return the value of the flag in this raw_fd_ostream indicating
+  /// whether an output error has been encountered.
+  /// This doesn't implicitly flush any pending output.  Also, it doesn't
+  /// guarantee to detect all errors unless the the stream has been closed.
+  bool has_error() const {
+    return Error;
+  }
 
-/// raw_stderr_ostream - This is a stream that always prints to stderr.
-///
-class raw_stderr_ostream : public raw_fd_ostream {
-  // An out of line virtual method to provide a home for the class vtable.
-  virtual void handle();
-public:
-  raw_stderr_ostream();
+  /// clear_error - Set the flag read by has_error() to false. If the error
+  /// flag is set at the time when this raw_ostream's destructor is called,
+  /// report_fatal_error is called to report the error. Use clear_error()
+  /// after handling the error to avoid this behavior.
+  ///
+  ///   "Errors should never pass silently.
+  ///    Unless explicitly silenced."
+  ///      - from The Zen of Python, by Tim Peters
+  ///
+  void clear_error() {
+    Error = false;
+  }
 };
 
 /// outs() - This returns a reference to a raw_ostream for standard output.
@@ -461,7 +452,7 @@
   /// outside of the raw_svector_ostream's control.  It is only safe to do this
   /// if the raw_svector_ostream has previously been flushed.
   void resync();
-  
+
   /// str - Flushes the stream contents to the target vector and return a
   /// StringRef for the vector contents.
   StringRef str();

Modified: llvm/branches/wendling/eh/include/llvm/System/Memory.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/System/Memory.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/System/Memory.h (original)
+++ llvm/branches/wendling/eh/include/llvm/System/Memory.h Tue Oct 26 19:48:03 2010
@@ -63,7 +63,6 @@
     ///
     /// On success, this returns false, otherwise it returns true and fills
     /// in *ErrMsg.
-    /// @throws std::string if an error occurred.
     /// @brief Release Read/Write/Execute memory.
     static bool ReleaseRWX(MemoryBlock &block, std::string *ErrMsg = 0);
     

Modified: llvm/branches/wendling/eh/include/llvm/System/Path.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/System/Path.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/System/Path.h (original)
+++ llvm/branches/wendling/eh/include/llvm/System/Path.h Tue Oct 26 19:48:03 2010
@@ -164,6 +164,7 @@
 
       /// GetMainExecutable - Return the path to the main executable, given the
       /// value of argv[0] from program startup and the address of main itself.
+      /// In extremis, this function may fail and return an empty path.
       static Path GetMainExecutable(const char *argv0, void *MainAddr);
 
       /// This is one of the very few ways in which a path can be constructed
@@ -336,11 +337,22 @@
       /// native Dynamic Library (shared library, shared object) by looking at
       /// the file's magic number. The Path object must reference a file, not a
       /// directory.
-      /// @return strue if the file starts with the magid number for a native
+      /// @returns true if the file starts with the magic number for a native
       /// shared library.
-      /// @brief Determine if the path reference a dynamic library.
+      /// @brief Determine if the path references a dynamic library.
       bool isDynamicLibrary() const;
 
+      /// This function determines if the path name in the object references a
+      /// native object file by looking at it's magic number. The term object
+      /// file is defined as "an organized collection of separate, named
+      /// sequences of binary data." This covers the obvious file formats such as
+      /// COFF and ELF, but it also includes llvm ir bitcode, archives,
+      /// libraries, etc...
+      /// @returns true if the file starts with the magic number for an object
+      /// file.
+      /// @brief Determine if the path references an object file.
+      bool isObjectFile() const;
+
       /// This function determines if the path name references an existing file
       /// or directory in the file system.
       /// @returns true if the pathname references an existing file or

Modified: llvm/branches/wendling/eh/include/llvm/System/Process.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/System/Process.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/System/Process.h (original)
+++ llvm/branches/wendling/eh/include/llvm/System/Process.h Tue Oct 26 19:48:03 2010
@@ -30,7 +30,6 @@
       /// This static function will return the operating system's virtual memory
       /// page size.
       /// @returns The number of bytes in a virtual memory page.
-      /// @throws nothing
       /// @brief Get the virtual memory page size
       static unsigned GetPageSize();
 
@@ -38,7 +37,6 @@
       /// by the process. This only counts the memory allocated via the malloc,
       /// calloc and realloc functions and includes any "free" holes in the
       /// allocated space.
-      /// @throws nothing
       /// @brief Return process memory usage.
       static size_t GetMallocUsage();
 

Modified: llvm/branches/wendling/eh/include/llvm/System/Program.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/System/Program.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/System/Program.h (original)
+++ llvm/branches/wendling/eh/include/llvm/System/Program.h Tue Oct 26 19:48:03 2010
@@ -116,7 +116,6 @@
     /// locations to search (e.g. the PATH on Unix).
     /// @returns A Path object initialized to the path of the program or a
     /// Path object that is empty (invalid) if the program could not be found.
-    /// @throws nothing
     /// @brief Construct a Program by finding it by name.
     static Path FindProgramByName(const std::string& name);
 
@@ -129,7 +128,6 @@
 
     /// A convenience function equivalent to Program prg; prg.Execute(..);
     /// prg.Wait(..);
-    /// @throws nothing
     /// @see Execute, Wait
     static int ExecuteAndWait(const Path& path,
                               const char** args,
@@ -140,7 +138,6 @@
                               std::string* ErrMsg = 0);
 
     /// A convenience function equivalent to Program prg; prg.Execute(..);
-    /// @throws nothing
     /// @see Execute
     static void ExecuteNoWait(const Path& path,
                               const char** args,

Modified: llvm/branches/wendling/eh/include/llvm/System/Signals.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/System/Signals.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/System/Signals.h (original)
+++ llvm/branches/wendling/eh/include/llvm/System/Signals.h Tue Oct 26 19:48:03 2010
@@ -29,6 +29,10 @@
   /// @brief Remove a file if a fatal signal occurs.
   bool RemoveFileOnSignal(const Path &Filename, std::string* ErrMsg = 0);
 
+  /// This function removes a file from the list of files to be removed on
+  /// signal delivery.
+  void DontRemoveFileOnSignal(const Path &Filename);
+
   /// When an error signal (such as SIBABRT or SIGSEGV) is delivered to the
   /// process, print a stack trace and then exit.
   /// @brief Print a stack trace if a fatal signal occurs.

Modified: llvm/branches/wendling/eh/include/llvm/Target/Target.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Target/Target.td?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Target/Target.td (original)
+++ llvm/branches/wendling/eh/include/llvm/Target/Target.td Tue Oct 26 19:48:03 2010
@@ -511,10 +511,6 @@
   // perform target specific instruction post-processing.
   string AsmParserInstCleanup  = "";
 
-  // MatchInstructionName - The name of the instruction matching function to
-  // generate.
-  string MatchInstructionName  = "MatchInstruction";
-
   // Variant - AsmParsers can be of multiple different variants.  Variants are
   // used to support targets that need to parser multiple formats for the
   // assembly language.
@@ -569,6 +565,11 @@
   
   // OperandSpacing - Space between operand columns.
   int OperandSpacing = -1;
+
+  // isMCAsmWriter - Is this assembly writer for an MC emitter? This controls
+  // generation of the printInstruction() method. For MC printers, it takes
+  // an MCInstr* operand, otherwise it takes a MachineInstr*.
+  bit isMCAsmWriter = 0;
 }
 def DefaultAsmWriter : AsmWriter;
 

Modified: llvm/branches/wendling/eh/include/llvm/Target/TargetAsmBackend.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Target/TargetAsmBackend.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Target/TargetAsmBackend.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Target/TargetAsmBackend.h Tue Oct 26 19:48:03 2010
@@ -16,6 +16,7 @@
 class MCDataFragment;
 class MCFixup;
 class MCInst;
+class MCObjectFormat;
 class MCObjectWriter;
 class MCSection;
 template<typename T>
@@ -33,7 +34,6 @@
   /// TheTarget - The Target that this machine was created for.
   const Target &TheTarget;
 
-  unsigned HasAbsolutizedSet : 1;
   unsigned HasReliableSymbolDifference : 1;
   unsigned HasScatteredSymbols : 1;
 
@@ -42,23 +42,12 @@
 
   const Target &getTarget() const { return TheTarget; }
 
+  virtual const MCObjectFormat &getObjectFormat() const = 0;
+
   /// createObjectWriter - Create a new MCObjectWriter instance for use by the
   /// assembler backend to emit the final object file.
   virtual MCObjectWriter *createObjectWriter(raw_ostream &OS) const = 0;
 
-  /// hasAbsolutizedSet - Check whether this target "absolutizes"
-  /// assignments. That is, given code like:
-  ///   a:
-  ///   ...
-  ///   b:
-  ///   tmp = a - b
-  ///       .long tmp
-  /// will the value of 'tmp' be a relocatable expression, or the assembly time
-  /// value of L0 - L1. This distinction is only relevant for platforms that
-  /// support scattered symbols, since in the absence of scattered symbols (a -
-  /// b) cannot change after assembly.
-  bool hasAbsolutizedSet() const { return HasAbsolutizedSet; }
-
   /// hasReliableSymbolDifference - Check whether this target implements
   /// accurate relocations for differences between symbols. If not, differences
   /// between symbols will always be relocatable expressions and any references
@@ -68,7 +57,7 @@
   /// This should always be true (since it results in fewer relocations with no
   /// loss of functionality), but is currently supported as a way to maintain
   /// exact object compatibility with Darwin 'as' (on non-x86_64). It should
-  /// eventually should be eliminated. See also \see hasAbsolutizedSet.
+  /// eventually should be eliminated.
   bool hasReliableSymbolDifference() const {
     return HasReliableSymbolDifference;
   }
@@ -101,6 +90,9 @@
   /// has no actual object file contents.
   virtual bool isVirtualSection(const MCSection &Section) const = 0;
 
+  /// getPointerSize - Get the pointer size in bytes.
+  virtual unsigned getPointerSize() const = 0;
+
   /// ApplyFixup - Apply the \arg Value for given \arg Fixup into the provided
   /// data fragment, at the offset specified by the fixup and following the
   /// fixup kind as appropriate.

Modified: llvm/branches/wendling/eh/include/llvm/Target/TargetAsmParser.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Target/TargetAsmParser.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Target/TargetAsmParser.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Target/TargetAsmParser.h Tue Oct 26 19:48:03 2010
@@ -13,7 +13,7 @@
 #include "llvm/MC/MCParser/MCAsmParserExtension.h"
 
 namespace llvm {
-class MCInst;
+class MCStreamer;
 class StringRef;
 class Target;
 class SMLoc;
@@ -70,12 +70,16 @@
   /// \param DirectiveID - the identifier token of the directive.
   virtual bool ParseDirective(AsmToken DirectiveID) = 0;
   
-  /// MatchInstruction - Recognize a series of operands of a parsed instruction
-  /// as an actual MCInst.  This returns false and fills in Inst on success and
-  /// returns true on failure to match.
+  /// MatchAndEmitInstruction - Recognize a series of operands of a parsed
+  /// instruction as an actual MCInst and emit it to the specified MCStreamer.
+  /// This returns false on success and returns true on failure to match.
+  ///
+  /// On failure, the target parser is responsible for emitting a diagnostic
+  /// explaining the match failure.
   virtual bool 
-  MatchInstruction(const SmallVectorImpl<MCParsedAsmOperand*> &Operands,
-                   MCInst &Inst) = 0;
+  MatchAndEmitInstruction(SMLoc IDLoc,
+                          SmallVectorImpl<MCParsedAsmOperand*> &Operands,
+                          MCStreamer &Out) = 0;
   
 };
 

Modified: llvm/branches/wendling/eh/include/llvm/Target/TargetCallingConv.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Target/TargetCallingConv.td?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Target/TargetCallingConv.td (original)
+++ llvm/branches/wendling/eh/include/llvm/Target/TargetCallingConv.td Tue Oct 26 19:48:03 2010
@@ -42,7 +42,7 @@
 class CCIfByVal<CCAction A> : CCIf<"ArgFlags.isByVal()", A> {
 }
 
-/// CCIfCC - Match of the current calling convention is 'CC'.
+/// CCIfCC - Match if the current calling convention is 'CC'.
 class CCIfCC<string CC, CCAction A>
   : CCIf<!strconcat("State.getCallingConv() == ", CC), A> {}
 
@@ -89,6 +89,13 @@
   int Align = align;
 }
 
+/// CCAssignToStackWithShadow - Same as CCAssignToStack, but with a register
+/// to be shadowed.
+class CCAssignToStackWithShadow<int size, int align, Register reg> :
+        CCAssignToStack<size, align> {
+  Register ShadowReg = reg;
+}
+
 /// CCPassByVal - This action always matches: it assigns the value to a stack
 /// slot to implement ByVal aggregate parameter passing. Size and alignment
 /// specify the minimum size and alignment for the stack slot.

Modified: llvm/branches/wendling/eh/include/llvm/Target/TargetData.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Target/TargetData.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Target/TargetData.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Target/TargetData.h Tue Oct 26 19:48:03 2010
@@ -22,6 +22,7 @@
 
 #include "llvm/Pass.h"
 #include "llvm/ADT/SmallVector.h"
+#include "llvm/System/DataTypes.h"
 
 namespace llvm {
 
@@ -50,13 +51,13 @@
 /// padding and make the structure slightly more cache friendly.
 struct TargetAlignElem {
   AlignTypeEnum       AlignType : 8;  //< Alignment type (AlignTypeEnum)
-  unsigned char       ABIAlign;       //< ABI alignment for this type/bitw
-  unsigned char       PrefAlign;      //< Pref. alignment for this type/bitw
+  unsigned            ABIAlign;       //< ABI alignment for this type/bitw
+  unsigned            PrefAlign;      //< Pref. alignment for this type/bitw
   uint32_t            TypeBitWidth;   //< Type bit width
 
   /// Initializer
-  static TargetAlignElem get(AlignTypeEnum align_type, unsigned char abi_align,
-                             unsigned char pref_align, uint32_t bit_width);
+  static TargetAlignElem get(AlignTypeEnum align_type, unsigned abi_align,
+                             unsigned pref_align, uint32_t bit_width);
   /// Equality predicate
   bool operator==(const TargetAlignElem &rhs) const;
 };
@@ -64,9 +65,9 @@
 class TargetData : public ImmutablePass {
 private:
   bool          LittleEndian;          ///< Defaults to false
-  unsigned char PointerMemSize;        ///< Pointer size in bytes
-  unsigned char PointerABIAlign;       ///< Pointer ABI alignment
-  unsigned char PointerPrefAlign;      ///< Pointer preferred alignment
+  unsigned      PointerMemSize;        ///< Pointer size in bytes
+  unsigned      PointerABIAlign;       ///< Pointer ABI alignment
+  unsigned      PointerPrefAlign;      ///< Pointer preferred alignment
 
   SmallVector<unsigned char, 8> LegalIntWidths; ///< Legal Integers.
   
@@ -86,12 +87,12 @@
   mutable void *LayoutMap;
 
   //! Set/initialize target alignments
-  void setAlignment(AlignTypeEnum align_type, unsigned char abi_align,
-                    unsigned char pref_align, uint32_t bit_width);
+  void setAlignment(AlignTypeEnum align_type, unsigned abi_align,
+                    unsigned pref_align, uint32_t bit_width);
   unsigned getAlignmentInfo(AlignTypeEnum align_type, uint32_t bit_width,
                             bool ABIAlign, const Type *Ty) const;
   //! Internal helper method that returns requested alignment for type.
-  unsigned char getAlignment(const Type *Ty, bool abi_or_pref) const;
+  unsigned getAlignment(const Type *Ty, bool abi_or_pref) const;
 
   /// Valid alignment predicate.
   ///
@@ -110,7 +111,7 @@
   
   /// Constructs a TargetData from a specification string. See init().
   explicit TargetData(StringRef TargetDescription)
-    : ImmutablePass(&ID) {
+    : ImmutablePass(ID) {
     init(TargetDescription);
   }
 
@@ -118,7 +119,7 @@
   explicit TargetData(const Module *M);
 
   TargetData(const TargetData &TD) :
-    ImmutablePass(&ID),
+    ImmutablePass(ID),
     LittleEndian(TD.isLittleEndian()),
     PointerMemSize(TD.PointerMemSize),
     PointerABIAlign(TD.PointerABIAlign),
@@ -161,13 +162,13 @@
   }
   
   /// Target pointer alignment
-  unsigned char getPointerABIAlignment() const { return PointerABIAlign; }
+  unsigned getPointerABIAlignment() const { return PointerABIAlign; }
   /// Return target's alignment for stack-based pointers
-  unsigned char getPointerPrefAlignment() const { return PointerPrefAlign; }
+  unsigned getPointerPrefAlignment() const { return PointerPrefAlign; }
   /// Target pointer size
-  unsigned char getPointerSize()         const { return PointerMemSize; }
+  unsigned getPointerSize()         const { return PointerMemSize; }
   /// Target pointer size, in bits
-  unsigned char getPointerSizeInBits()   const { return 8*PointerMemSize; }
+  unsigned getPointerSizeInBits()   const { return 8*PointerMemSize; }
 
   /// Size examples:
   ///
@@ -223,26 +224,26 @@
 
   /// getABITypeAlignment - Return the minimum ABI-required alignment for the
   /// specified type.
-  unsigned char getABITypeAlignment(const Type *Ty) const;
+  unsigned getABITypeAlignment(const Type *Ty) const;
   
   /// getABIIntegerTypeAlignment - Return the minimum ABI-required alignment for
   /// an integer type of the specified bitwidth.
-  unsigned char getABIIntegerTypeAlignment(unsigned BitWidth) const;
+  unsigned getABIIntegerTypeAlignment(unsigned BitWidth) const;
   
 
   /// getCallFrameTypeAlignment - Return the minimum ABI-required alignment
   /// for the specified type when it is part of a call frame.
-  unsigned char getCallFrameTypeAlignment(const Type *Ty) const;
+  unsigned getCallFrameTypeAlignment(const Type *Ty) const;
 
 
   /// getPrefTypeAlignment - Return the preferred stack/global alignment for
   /// the specified type.  This is always at least as good as the ABI alignment.
-  unsigned char getPrefTypeAlignment(const Type *Ty) const;
+  unsigned getPrefTypeAlignment(const Type *Ty) const;
 
   /// getPreferredTypeAlignmentShift - Return the preferred alignment for the
   /// specified type, returned as log2 of the value (a shift amount).
   ///
-  unsigned char getPreferredTypeAlignmentShift(const Type *Ty) const;
+  unsigned getPreferredTypeAlignmentShift(const Type *Ty) const;
 
   /// getIntPtrType - Return an unsigned integer type that is the same size or
   /// greater to the host pointer size.

Modified: llvm/branches/wendling/eh/include/llvm/Target/TargetELFWriterInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Target/TargetELFWriterInfo.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Target/TargetELFWriterInfo.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Target/TargetELFWriterInfo.h Tue Oct 26 19:48:03 2010
@@ -28,7 +28,6 @@
     // EMachine - This field is the target specific value to emit as the
     // e_machine member of the ELF header.
     unsigned short EMachine;
-    TargetMachine &TM;
     bool is64Bit, isLittleEndian;
   public:
 
@@ -62,7 +61,7 @@
       ELFDATA2MSB = 2  // Big-endian object file
     };
 
-    explicit TargetELFWriterInfo(TargetMachine &tm);
+    explicit TargetELFWriterInfo(bool is64Bit_, bool isLittleEndian_);
     virtual ~TargetELFWriterInfo();
 
     unsigned short getEMachine() const { return EMachine; }

Modified: llvm/branches/wendling/eh/include/llvm/Target/TargetInstrDesc.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Target/TargetInstrDesc.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Target/TargetInstrDesc.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Target/TargetInstrDesc.h Tue Oct 26 19:48:03 2010
@@ -152,6 +152,12 @@
     return -1;
   }
 
+  /// getRegClass - Returns the register class constraint for OpNum, or NULL.
+  const TargetRegisterClass *getRegClass(unsigned OpNum,
+                                         const TargetRegisterInfo *TRI) const {
+    return OpNum < NumOperands ? OpInfo[OpNum].getRegClass(TRI) : 0;
+  }
+
   /// getOpcode - Return the opcode number for this descriptor.
   unsigned getOpcode() const {
     return Opcode;

Modified: llvm/branches/wendling/eh/include/llvm/Target/TargetInstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Target/TargetInstrInfo.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Target/TargetInstrInfo.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Target/TargetInstrInfo.h Tue Oct 26 19:48:03 2010
@@ -24,6 +24,7 @@
 class LiveVariables;
 class MCAsmInfo;
 class MachineMemOperand;
+class MachineRegisterInfo;
 class MDNode;
 class MCInst;
 class SDNode;
@@ -304,27 +305,36 @@
   }
 
   /// isProfitableToIfCvt - Return true if it's profitable to first "NumInstrs"
-  /// of the specified basic block.
+  /// of the specified basic block, where the probability of the instructions
+  /// being executed is given by Probability, and Confidence is a measure
+  /// of our confidence that it will be properly predicted.
   virtual
-  bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumInstrs) const {
+  bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumInstrs,
+                           float Probability, float Confidence) const {
     return false;
   }
   
   /// isProfitableToIfCvt - Second variant of isProfitableToIfCvt, this one
   /// checks for the case where two basic blocks from true and false path
   /// of a if-then-else (diamond) are predicated on mutally exclusive
-  /// predicates.
+  /// predicates, where the probability of the true path being taken is given
+  /// by Probability, and Confidence is a measure of our confidence that it
+  /// will be properly predicted.
   virtual bool
   isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumTInstrs,
-                      MachineBasicBlock &FMBB, unsigned NumFInstrs) const {
+                      MachineBasicBlock &FMBB, unsigned NumFInstrs,
+                      float Probability, float Confidence) const {
     return false;
   }
 
   /// isProfitableToDupForIfCvt - Return true if it's profitable for
   /// if-converter to duplicate a specific number of instructions in the
-  /// specified MBB to enable if-conversion.
+  /// specified MBB to enable if-conversion, where the probability of the 
+  /// instructions being executed is given by Probability, and Confidence is
+  /// a measure of our confidence that it will be properly predicted.
   virtual bool
-  isProfitableToDupForIfCvt(MachineBasicBlock &MBB,unsigned NumInstrs) const {
+  isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumInstrs,
+                            float Probability, float Confidence) const {
     return false;
   }
   
@@ -575,7 +585,66 @@
   /// to use for this target when scheduling the machine instructions after
   /// register allocation.
   virtual ScheduleHazardRecognizer*
-  CreateTargetPostRAHazardRecognizer(const InstrItineraryData&) const = 0;
+  CreateTargetPostRAHazardRecognizer(const InstrItineraryData*) const = 0;
+
+  /// AnalyzeCompare - For a comparison instruction, return the source register
+  /// in SrcReg and the value it compares against in CmpValue. Return true if
+  /// the comparison instruction can be analyzed.
+  virtual bool AnalyzeCompare(const MachineInstr *MI,
+                              unsigned &SrcReg, int &Mask, int &Value) const {
+    return false;
+  }
+
+  /// OptimizeCompareInstr - See if the comparison instruction can be converted
+  /// into something more efficient. E.g., on ARM most instructions can set the
+  /// flags register, obviating the need for a separate CMP. Update the iterator
+  /// *only* if a transformation took place.
+  virtual bool OptimizeCompareInstr(MachineInstr *CmpInstr,
+                                    unsigned SrcReg, int Mask, int Value,
+                                    const MachineRegisterInfo *MRI,
+                                    MachineBasicBlock::iterator &) const {
+    return false;
+  }
+
+  /// getNumMicroOps - Return the number of u-operations the given machine
+  /// instruction will be decoded to on the target cpu.
+  virtual unsigned getNumMicroOps(const MachineInstr *MI,
+                                  const InstrItineraryData *ItinData) const;
+
+  /// getOperandLatency - Compute and return the use operand latency of a given
+  /// itinerary class and operand index if the value is produced by an
+  /// instruction of the specified itinerary class and def operand index.
+  /// In most cases, the static scheduling itinerary was enough to determine the
+  /// operand latency. But it may not be possible for instructions with variable
+  /// number of defs / uses.
+  virtual
+  int getOperandLatency(const InstrItineraryData *ItinData,
+                        const MachineInstr *DefMI, unsigned DefIdx,
+                        const MachineInstr *UseMI, unsigned UseIdx) const;
+
+  virtual
+  int getOperandLatency(const InstrItineraryData *ItinData,
+                        SDNode *DefNode, unsigned DefIdx,
+                        SDNode *UseNode, unsigned UseIdx) const;
+
+  /// hasHighOperandLatency - Compute operand latency between a def of 'Reg'
+  /// and an use in the current loop, return true if the target considered
+  /// it 'high'. This is used by optimization passes such as machine LICM to
+  /// determine whether it makes sense to hoist an instruction out even in
+  /// high register pressure situation.
+  virtual
+  bool hasHighOperandLatency(const InstrItineraryData *ItinData,
+                             const MachineRegisterInfo *MRI,
+                             const MachineInstr *DefMI, unsigned DefIdx,
+                             const MachineInstr *UseMI, unsigned UseIdx) const {
+    return false;
+  }
+
+  /// hasLowDefLatency - Compute operand latency of a def of 'Reg', return true
+  /// if the target considered it 'low'.
+  virtual
+  bool hasLowDefLatency(const InstrItineraryData *ItinData,
+                        const MachineInstr *DefMI, unsigned DefIdx) const;
 };
 
 /// TargetInstrInfoImpl - This is the default implementation of
@@ -611,7 +680,7 @@
                                     const MachineFunction &MF) const;
 
   virtual ScheduleHazardRecognizer *
-  CreateTargetPostRAHazardRecognizer(const InstrItineraryData&) const;
+  CreateTargetPostRAHazardRecognizer(const InstrItineraryData*) const;
 };
 
 } // End llvm namespace

Modified: llvm/branches/wendling/eh/include/llvm/Target/TargetInstrItineraries.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Target/TargetInstrItineraries.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Target/TargetInstrItineraries.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Target/TargetInstrItineraries.h Tue Oct 26 19:48:03 2010
@@ -95,6 +95,7 @@
 /// operands are read and written.
 ///
 struct InstrItinerary {
+  unsigned NumMicroOps;        ///< # of micro-ops, 0 means it's variable
   unsigned FirstStage;         ///< Index of first stage in itinerary
   unsigned LastStage;          ///< Index of last + 1 stage in itinerary
   unsigned FirstOperandCycle;  ///< Index of first operand rd/wr
@@ -110,38 +111,40 @@
 public:
   const InstrStage     *Stages;         ///< Array of stages selected
   const unsigned       *OperandCycles;  ///< Array of operand cycles selected
-  const InstrItinerary *Itineratries;   ///< Array of itineraries selected
+  const unsigned       *Forwardings;    ///< Array of pipeline forwarding pathes
+  const InstrItinerary *Itineraries;    ///< Array of itineraries selected
 
   /// Ctors.
   ///
-  InstrItineraryData() : Stages(0), OperandCycles(0), Itineratries(0) {}
+  InstrItineraryData() : Stages(0), OperandCycles(0), Forwardings(0),
+                         Itineraries(0) {}
   InstrItineraryData(const InstrStage *S, const unsigned *OS,
-                     const InstrItinerary *I)
-    : Stages(S), OperandCycles(OS), Itineratries(I) {}
+                     const unsigned *F, const InstrItinerary *I)
+    : Stages(S), OperandCycles(OS), Forwardings(F), Itineraries(I) {}
   
   /// isEmpty - Returns true if there are no itineraries.
   ///
-  bool isEmpty() const { return Itineratries == 0; }
+  bool isEmpty() const { return Itineraries == 0; }
 
   /// isEndMarker - Returns true if the index is for the end marker
   /// itinerary.
   ///
   bool isEndMarker(unsigned ItinClassIndx) const {
-    return ((Itineratries[ItinClassIndx].FirstStage == ~0U) &&
-            (Itineratries[ItinClassIndx].LastStage == ~0U));
+    return ((Itineraries[ItinClassIndx].FirstStage == ~0U) &&
+            (Itineraries[ItinClassIndx].LastStage == ~0U));
   }
 
   /// beginStage - Return the first stage of the itinerary.
   /// 
   const InstrStage *beginStage(unsigned ItinClassIndx) const {
-    unsigned StageIdx = Itineratries[ItinClassIndx].FirstStage;
+    unsigned StageIdx = Itineraries[ItinClassIndx].FirstStage;
     return Stages + StageIdx;
   }
 
   /// endStage - Return the last+1 stage of the itinerary.
   /// 
   const InstrStage *endStage(unsigned ItinClassIndx) const {
-    unsigned StageIdx = Itineratries[ItinClassIndx].LastStage;
+    unsigned StageIdx = Itineraries[ItinClassIndx].LastStage;
     return Stages + StageIdx;
   }
 
@@ -173,13 +176,68 @@
     if (isEmpty())
       return -1;
 
-    unsigned FirstIdx = Itineratries[ItinClassIndx].FirstOperandCycle;
-    unsigned LastIdx = Itineratries[ItinClassIndx].LastOperandCycle;
+    unsigned FirstIdx = Itineraries[ItinClassIndx].FirstOperandCycle;
+    unsigned LastIdx = Itineraries[ItinClassIndx].LastOperandCycle;
     if ((FirstIdx + OperandIdx) >= LastIdx)
       return -1;
 
     return (int)OperandCycles[FirstIdx + OperandIdx];
   }
+
+  /// hasPipelineForwarding - Return true if there is a pipeline forwarding
+  /// between instructions of itinerary classes DefClass and UseClasses so that
+  /// value produced by an instruction of itinerary class DefClass, operand
+  /// index DefIdx can be bypassed when it's read by an instruction of
+  /// itinerary class UseClass, operand index UseIdx.
+  bool hasPipelineForwarding(unsigned DefClass, unsigned DefIdx,
+                             unsigned UseClass, unsigned UseIdx) const {
+    unsigned FirstDefIdx = Itineraries[DefClass].FirstOperandCycle;
+    unsigned LastDefIdx = Itineraries[DefClass].LastOperandCycle;
+    if ((FirstDefIdx + DefIdx) >= LastDefIdx)
+      return false;
+    if (Forwardings[FirstDefIdx + DefIdx] == 0)
+      return false;
+
+    unsigned FirstUseIdx = Itineraries[UseClass].FirstOperandCycle;
+    unsigned LastUseIdx = Itineraries[UseClass].LastOperandCycle;
+    if ((FirstUseIdx + UseIdx) >= LastUseIdx)
+      return false;
+
+    return Forwardings[FirstDefIdx + DefIdx] ==
+      Forwardings[FirstUseIdx + UseIdx];
+  }
+
+  /// getOperandLatency - Compute and return the use operand latency of a given
+  /// itinerary class and operand index if the value is produced by an
+  /// instruction of the specified itinerary class and def operand index.
+  int getOperandLatency(unsigned DefClass, unsigned DefIdx,
+                        unsigned UseClass, unsigned UseIdx) const {
+    if (isEmpty())
+      return -1;
+
+    int DefCycle = getOperandCycle(DefClass, DefIdx);
+    if (DefCycle == -1)
+      return -1;
+
+    int UseCycle = getOperandCycle(UseClass, UseIdx);
+    if (UseCycle == -1)
+      return -1;
+
+    UseCycle = DefCycle - UseCycle + 1;
+    if (UseCycle > 0 &&
+        hasPipelineForwarding(DefClass, DefIdx, UseClass, UseIdx))
+      // FIXME: This assumes one cycle benefit for every pipeline forwarding.
+      --UseCycle;
+    return UseCycle;
+  }
+
+  /// isMicroCoded - Return true if the instructions in the given class decode
+  /// to more than one micro-ops.
+  bool isMicroCoded(unsigned ItinClassIndx) const {
+    if (isEmpty())
+      return false;
+    return Itineraries[ItinClassIndx].NumMicroOps != 1;
+  }
 };
 
 

Modified: llvm/branches/wendling/eh/include/llvm/Target/TargetLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Target/TargetLowering.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Target/TargetLowering.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Target/TargetLowering.h Tue Oct 26 19:48:03 2010
@@ -45,6 +45,7 @@
   class Function;
   class FastISel;
   class FunctionLoweringInfo;
+  class ImmutableCallSite;
   class MachineBasicBlock;
   class MachineFunction;
   class MachineFrameInfo;
@@ -203,35 +204,57 @@
     return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != 0;
   }
 
-  /// isTypeSynthesizable - Return true if it's OK for the compiler to create
-  /// new operations of this type.  All Legal types are synthesizable except
-  /// MMX vector types on X86.  Non-Legal types are not synthesizable.
-  bool isTypeSynthesizable(EVT VT) const {
-    return isTypeLegal(VT) && Synthesizable[VT.getSimpleVT().SimpleTy];
-  }
-
   class ValueTypeActionImpl {
     /// ValueTypeActions - For each value type, keep a LegalizeAction enum
     /// that indicates how instruction selection should deal with the type.
     uint8_t ValueTypeActions[MVT::LAST_VALUETYPE];
+    
+    LegalizeAction getExtendedTypeAction(EVT VT) const {
+      // Handle non-vector integers.
+      if (!VT.isVector()) {
+        assert(VT.isInteger() && "Unsupported extended type!");
+        unsigned BitSize = VT.getSizeInBits();
+        // First promote to a power-of-two size, then expand if necessary.
+        if (BitSize < 8 || !isPowerOf2_32(BitSize))
+          return Promote;
+        return Expand;
+      }
+      
+      // If this is a type smaller than a legal vector type, promote to that
+      // type, e.g. <2 x float> -> <4 x float>.
+      if (VT.getVectorElementType().isSimple() &&
+          VT.getVectorNumElements() != 1) {
+        MVT EltType = VT.getVectorElementType().getSimpleVT();
+        unsigned NumElts = VT.getVectorNumElements();
+        while (1) {
+          // Round up to the nearest power of 2.
+          NumElts = (unsigned)NextPowerOf2(NumElts);
+          
+          MVT LargerVector = MVT::getVectorVT(EltType, NumElts);
+          if (LargerVector == MVT()) break;
+          
+          // If this the larger type is legal, promote to it.
+          if (getTypeAction(LargerVector) == Legal) return Promote;
+        }
+      }
+      
+      return VT.isPow2VectorType() ? Expand : Promote;
+    }      
   public:
     ValueTypeActionImpl() {
       std::fill(ValueTypeActions, array_endof(ValueTypeActions), 0);
     }
-    LegalizeAction getTypeAction(LLVMContext &Context, EVT VT) const {
-      if (VT.isExtended()) {
-        if (VT.isVector()) {
-          return VT.isPow2VectorType() ? Expand : Promote;
-        }
-        if (VT.isInteger())
-          // First promote to a power-of-two size, then expand if necessary.
-          return VT == VT.getRoundIntegerType(Context) ? Expand : Promote;
-        assert(0 && "Unsupported extended type!");
-        return Legal;
-      }
-      unsigned I = VT.getSimpleVT().SimpleTy;
-      return (LegalizeAction)ValueTypeActions[I];
+    
+    LegalizeAction getTypeAction(EVT VT) const {
+      if (!VT.isExtended())
+        return getTypeAction(VT.getSimpleVT());
+      return getExtendedTypeAction(VT);
     }
+    
+    LegalizeAction getTypeAction(MVT VT) const {
+      return (LegalizeAction)ValueTypeActions[VT.SimpleTy];
+    }
+    
     void setTypeAction(EVT VT, LegalizeAction Action) {
       unsigned I = VT.getSimpleVT().SimpleTy;
       ValueTypeActions[I] = Action;
@@ -246,10 +269,13 @@
   /// it is already legal (return 'Legal') or we need to promote it to a larger
   /// type (return 'Promote'), or we need to expand it into multiple registers
   /// of smaller integer type (return 'Expand').  'Custom' is not an option.
-  LegalizeAction getTypeAction(LLVMContext &Context, EVT VT) const {
-    return ValueTypeActions.getTypeAction(Context, VT);
+  LegalizeAction getTypeAction(EVT VT) const {
+    return ValueTypeActions.getTypeAction(VT);
   }
-
+  LegalizeAction getTypeAction(MVT VT) const {
+    return ValueTypeActions.getTypeAction(VT);
+  }
+  
   /// getTypeToTransformTo - For types supported by the target, this is an
   /// identity function.  For types that must be promoted to larger types, this
   /// returns the larger type to promote to.  For integer types that are larger
@@ -261,7 +287,7 @@
       assert((unsigned)VT.getSimpleVT().SimpleTy <
              array_lengthof(TransformToType));
       EVT NVT = TransformToType[VT.getSimpleVT().SimpleTy];
-      assert(getTypeAction(Context, NVT) != Promote &&
+      assert(getTypeAction(NVT) != Promote &&
              "Promote may not follow Expand or Promote");
       return NVT;
     }
@@ -276,17 +302,16 @@
           EltVT : EVT::getVectorVT(Context, EltVT, NumElts / 2);
       }
       // Promote to a power of two size, avoiding multi-step promotion.
-      return getTypeAction(Context, NVT) == Promote ?
+      return getTypeAction(NVT) == Promote ?
         getTypeToTransformTo(Context, NVT) : NVT;
     } else if (VT.isInteger()) {
       EVT NVT = VT.getRoundIntegerType(Context);
-      if (NVT == VT)
-        // Size is a power of two - expand to half the size.
+      if (NVT == VT)      // Size is a power of two - expand to half the size.
         return EVT::getIntegerVT(Context, VT.getSizeInBits() / 2);
-      else
-        // Promote to a power of two size, avoiding multi-step promotion.
-        return getTypeAction(Context, NVT) == Promote ?
-          getTypeToTransformTo(Context, NVT) : NVT;
+      
+      // Promote to a power of two size, avoiding multi-step promotion.
+      return getTypeAction(NVT) == Promote ?
+        getTypeToTransformTo(Context, NVT) : NVT;
     }
     assert(0 && "Unsupported extended type!");
     return MVT(MVT::Other); // Not reached
@@ -299,7 +324,7 @@
   EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const {
     assert(!VT.isVector());
     while (true) {
-      switch (getTypeAction(Context, VT)) {
+      switch (getTypeAction(VT)) {
       case Legal:
         return VT;
       case Expand:
@@ -1005,12 +1030,10 @@
   /// addRegisterClass - Add the specified register class as an available
   /// regclass for the specified value type.  This indicates the selector can
   /// handle values of that class natively.
-  void addRegisterClass(EVT VT, TargetRegisterClass *RC,
-                        bool isSynthesizable = true) {
+  void addRegisterClass(EVT VT, TargetRegisterClass *RC) {
     assert((unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT));
     AvailableRegClasses.push_back(std::make_pair(VT, RC));
     RegClassForVT[VT.getSimpleVT().SimpleTy] = RC;
-    Synthesizable[VT.getSimpleVT().SimpleTy] = isSynthesizable;
   }
 
   /// findRepresentativeClass - Return the largest legal super-reg register class
@@ -1325,12 +1348,42 @@
     /// returns the output operand it matches.
     unsigned getMatchedOperand() const;
 
+    /// Copy constructor for copying from an AsmOperandInfo.
+    AsmOperandInfo(const AsmOperandInfo &info)
+      : InlineAsm::ConstraintInfo(info),
+        ConstraintCode(info.ConstraintCode),
+        ConstraintType(info.ConstraintType),
+        CallOperandVal(info.CallOperandVal),
+        ConstraintVT(info.ConstraintVT) {
+    }
+
+    /// Copy constructor for copying from a ConstraintInfo.
     AsmOperandInfo(const InlineAsm::ConstraintInfo &info)
       : InlineAsm::ConstraintInfo(info),
         ConstraintType(TargetLowering::C_Unknown),
         CallOperandVal(0), ConstraintVT(MVT::Other) {
     }
   };
+  
+  /// ParseConstraints - Split up the constraint string from the inline
+  /// assembly value into the specific constraints and their prefixes,
+  /// and also tie in the associated operand values.
+  /// If this returns an empty vector, and if the constraint string itself
+  /// isn't empty, there was an error parsing.
+  virtual std::vector<AsmOperandInfo> ParseConstraints(
+    ImmutableCallSite CS) const;
+  
+  /// Examine constraint type and operand type and determine a weight value,
+  /// where: -1 = invalid match, and 0 = so-so match to 5 = good match.
+  /// The operand object must already have been set up with the operand type.
+  virtual int getMultipleConstraintMatchWeight(
+      AsmOperandInfo &info, int maIndex) const;
+  
+  /// Examine constraint string and operand type and determine a weight value,
+  /// where: -1 = invalid match, and 0 = so-so match to 3 = good match.
+  /// The operand object must already have been set up with the operand type.
+  virtual int getSingleConstraintMatchWeight(
+      AsmOperandInfo &info, const char *constraint) const;
 
   /// ComputeConstraintToUse - Determines the constraint code and constraint
   /// type to use for the specific AsmOperandInfo, setting
@@ -1612,11 +1665,6 @@
   /// approximate register pressure.
   uint8_t RepRegClassCostForVT[MVT::LAST_VALUETYPE];
 
-  /// Synthesizable indicates whether it is OK for the compiler to create new
-  /// operations using this type.  All Legal types are Synthesizable except
-  /// MMX types on X86.  Non-Legal types are not Synthesizable.
-  bool Synthesizable[MVT::LAST_VALUETYPE];
-
   /// TransformToType - For any value types we are promoting or expanding, this
   /// contains the value type that we are changing to.  For Expanded types, this
   /// contains one step of the expand (e.g. i64 -> i32), even if there are

Modified: llvm/branches/wendling/eh/include/llvm/Target/TargetLoweringObjectFile.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Target/TargetLoweringObjectFile.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Target/TargetLoweringObjectFile.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Target/TargetLoweringObjectFile.h Tue Oct 26 19:48:03 2010
@@ -92,6 +92,11 @@
   // information for a TLS variable, it'll go here.
   const MCSection *TLSExtraDataSection;
   
+  /// CommDirectiveSupportsAlignment - True if .comm supports alignment.  This
+  /// is a hack for as long as we support 10.4 Tiger, whose assembler doesn't
+  /// support alignment on comm.
+  bool CommDirectiveSupportsAlignment;
+  
   /// SupportsWeakEmptyEHFrame - True if target object file supports a
   /// weak_definition of constant 0 for an omitted EH frame.
   bool SupportsWeakOmittedEHFrame;
@@ -128,6 +133,10 @@
     return SupportsWeakOmittedEHFrame;
   }
   
+  bool getCommDirectiveSupportsAlignment() const {
+    return CommDirectiveSupportsAlignment;
+  }
+
   const MCSection *getTextSection() const { return TextSection; }
   const MCSection *getDataSection() const { return DataSection; }
   const MCSection *getBSSSection() const { return BSSSection; }

Modified: llvm/branches/wendling/eh/include/llvm/Target/TargetMachine.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Target/TargetMachine.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Target/TargetMachine.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Target/TargetMachine.h Tue Oct 26 19:48:03 2010
@@ -98,7 +98,7 @@
 
   /// TheTarget - The Target that this machine was created for.
   const Target &TheTarget;
-  
+
   /// AsmInfo - Contains target specific asm information.
   ///
   const MCAsmInfo *AsmInfo;
@@ -121,11 +121,11 @@
   virtual const TargetLowering    *getTargetLowering() const { return 0; }
   virtual const TargetSelectionDAGInfo *getSelectionDAGInfo() const{ return 0; }
   virtual const TargetData            *getTargetData() const { return 0; }
-  
+
   /// getMCAsmInfo - Return target specific asm information.
   ///
   const MCAsmInfo *getMCAsmInfo() const { return AsmInfo; }
-  
+
   /// getSubtarget - This method returns a pointer to the specified type of
   /// TargetSubtarget.  In debug builds, it verifies that the object being
   /// returned is of the correct type.
@@ -138,7 +138,7 @@
   /// details of graph coloring register allocation removed from it.
   ///
   virtual const TargetRegisterInfo *getRegisterInfo() const { return 0; }
-  
+
   /// getIntrinsicInfo - If intrinsic information is available, return it.  If
   /// not, return null.
   ///
@@ -148,17 +148,17 @@
   /// otherwise return null.
   ///
   virtual TargetJITInfo *getJITInfo() { return 0; }
-  
+
   /// getInstrItineraryData - Returns instruction itinerary data for the target
   /// or specific subtarget.
   ///
-  virtual const InstrItineraryData getInstrItineraryData() const {  
-    return InstrItineraryData();
+  virtual const InstrItineraryData *getInstrItineraryData() const {
+    return 0;
   }
 
   /// getELFWriterInfo - If this target supports an ELF writer, return
   /// information for it, otherwise return null.
-  /// 
+  ///
   virtual const TargetELFWriterInfo *getELFWriterInfo() const { return 0; }
 
   /// hasMCRelaxAll - Check whether all machine code instructions should be
@@ -267,7 +267,7 @@
 
 protected: // Can only create subclasses.
   LLVMTargetMachine(const Target &T, const std::string &TargetTriple);
-  
+
 private:
   /// addCommonCodeGenPasses - Add standard LLVM codegen passes used for
   /// both emitting to assembly files or machine code output.
@@ -277,9 +277,11 @@
 
   virtual void setCodeModelForJIT();
   virtual void setCodeModelForStatic();
-  
+
 public:
-  
+
+  const std::string &getTargetTriple() const { return TargetTriple; }
+
   /// addPassesToEmitFile - Add passes to the specified pass manager to get the
   /// specified file emitted.  Typically this will involve several steps of code
   /// generation.  If OptLevel is None, the code generator should emit code as
@@ -289,7 +291,7 @@
                                    CodeGenFileType FileType,
                                    CodeGenOpt::Level,
                                    bool DisableVerify = true);
-  
+
   /// addPassesToEmitMachineCode - Add passes to the specified pass manager to
   /// get machine code emitted.  This uses a JITCodeEmitter object to handle
   /// actually outputting the machine code and resolving things like the address
@@ -310,7 +312,7 @@
                                  MCContext *&Ctx,
                                  CodeGenOpt::Level OptLevel,
                                  bool DisableVerify = true);
-  
+
   /// Target-Independent Code Generator Pass Configuration Options.
 
   /// addPreISelPasses - This method should add any "last minute" LLVM->LLVM
@@ -347,15 +349,15 @@
   virtual bool addPreSched2(PassManagerBase &, CodeGenOpt::Level) {
     return false;
   }
-  
+
   /// addPreEmitPass - This pass may be implemented by targets that want to run
   /// passes immediately before machine code is emitted.  This should return
   /// true if -print-machineinstrs should print out the code after the passes.
   virtual bool addPreEmitPass(PassManagerBase &, CodeGenOpt::Level) {
     return false;
   }
-  
-  
+
+
   /// addCodeEmitter - This pass should be overridden by the target to add a
   /// code emitter, if supported.  If this is not supported, 'true' should be
   /// returned.

Modified: llvm/branches/wendling/eh/include/llvm/Target/TargetRegisterInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Target/TargetRegisterInfo.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Target/TargetRegisterInfo.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Target/TargetRegisterInfo.h Tue Oct 26 19:48:03 2010
@@ -227,9 +227,12 @@
   /// cheaper to allocate caller saved registers.
   ///
   /// These methods take a MachineFunction argument, which can be used to tune
-  /// the allocatable registers based on the characteristics of the function.
-  /// One simple example is that the frame pointer register can be used if
-  /// frame-pointer-elimination is performed.
+  /// the allocatable registers based on the characteristics of the function,
+  /// subtarget, or other criteria.
+  ///
+  /// Register allocators should account for the fact that an allocation
+  /// order iterator may return a reserved register and always check
+  /// if the register is allocatable (getAllocatableSet()) before using it.
   ///
   /// By default, these methods return all registers in the class.
   ///
@@ -301,7 +304,7 @@
     /// considered to be a 'virtual' register, which is part of the SSA
     /// namespace.  This must be the same for all targets, which means that each
     /// target is limited to this fixed number of registers.
-    FirstVirtualRegister = 1024
+    FirstVirtualRegister = 16384
   };
 
   /// isPhysicalRegister - Return true if the specified register number is in
@@ -593,6 +596,13 @@
     return false;
   }
 
+  /// requiresVirtualBaseRegisters - Returns true if the target wants the
+  /// LocalStackAllocation pass to be run and virtual base registers
+  /// used for more efficient stack access.
+  virtual bool requiresVirtualBaseRegisters(const MachineFunction &MF) const {
+    return false;
+  }
+
   /// hasFP - Return true if the specified function should have a dedicated
   /// frame pointer register. For most targets this is true only if the function
   /// has variable sized allocas or if frame pointer elimination is disabled.
@@ -636,6 +646,44 @@
     return false;
   }
 
+  /// getFrameIndexInstrOffset - Get the offset from the referenced frame
+  /// index in the instruction, if the is one.
+  virtual int64_t getFrameIndexInstrOffset(const MachineInstr *MI,
+                                           int Idx) const {
+    return 0;
+  }
+
+  /// needsFrameBaseReg - Returns true if the instruction's frame index
+  /// reference would be better served by a base register other than FP
+  /// or SP. Used by LocalStackFrameAllocation to determine which frame index
+  /// references it should create new base registers for.
+  virtual bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
+    return false;
+  }
+
+  /// materializeFrameBaseRegister - Insert defining instruction(s) for
+  /// BaseReg to be a pointer to FrameIdx before insertion point I.
+  virtual void materializeFrameBaseRegister(MachineBasicBlock::iterator I,
+                                            unsigned BaseReg, int FrameIdx,
+                                            int64_t Offset) const {
+    assert(0 && "materializeFrameBaseRegister does not exist on this target");
+  }
+
+  /// resolveFrameIndex - Resolve a frame index operand of an instruction
+  /// to reference the indicated base register plus offset instead.
+  virtual void resolveFrameIndex(MachineBasicBlock::iterator I,
+                                 unsigned BaseReg, int64_t Offset) const {
+    assert(0 && "resolveFrameIndex does not exist on this target");
+  }
+
+  /// isFrameOffsetLegal - Determine whether a given offset immediate is
+  /// encodable to resolve a frame index.
+  virtual bool isFrameOffsetLegal(const MachineInstr *MI,
+                                  int64_t Offset) const {
+    assert(0 && "isFrameOffsetLegal does not exist on this target");
+    return false; // Must return a value in order to compile with VS 2005
+  }
+
   /// getCallFrameSetup/DestroyOpcode - These methods return the opcode of the
   /// frame setup/destroy instructions if they exist (-1 otherwise).  Some
   /// targets use pseudo instructions in order to abstract away the difference
@@ -671,7 +719,7 @@
   }
 
   /// processFunctionBeforeFrameFinalized - This method is called immediately
-  /// before the specified functions frame layout (MF.getFrameInfo()) is
+  /// before the specified function's frame layout (MF.getFrameInfo()) is
   /// finalized.  Once the frame is finalized, MO_FrameIndex operands are
   /// replaced with direct constants.  This method is optional.
   ///
@@ -698,14 +746,8 @@
   /// specified instruction, as long as it keeps the iterator pointing at the
   /// finished product. SPAdj is the SP adjustment due to call frame setup
   /// instruction.
-  ///
-  /// When -enable-frame-index-scavenging is enabled, the virtual register
-  /// allocated for this frame index is returned and its value is stored in
-  /// *Value.
-  typedef std::pair<unsigned, int> FrameIndexValue;
-  virtual unsigned eliminateFrameIndex(MachineBasicBlock::iterator MI,
-                                       int SPAdj, FrameIndexValue *Value = NULL,
-                                       RegScavenger *RS=NULL) const = 0;
+  virtual void eliminateFrameIndex(MachineBasicBlock::iterator MI,
+                                   int SPAdj, RegScavenger *RS=NULL) const = 0;
 
   /// emitProlog/emitEpilog - These methods insert prolog and epilog code into
   /// the function.

Modified: llvm/branches/wendling/eh/include/llvm/Target/TargetSchedule.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Target/TargetSchedule.td?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Target/TargetSchedule.td (original)
+++ llvm/branches/wendling/eh/include/llvm/Target/TargetSchedule.td Tue Oct 26 19:48:03 2010
@@ -22,6 +22,13 @@
 //  
 class FuncUnit;
 
+//===----------------------------------------------------------------------===//
+// Pipeline bypass / forwarding - These values specifies the symbolic names of
+// pipeline bypasses which can be used to forward results of instructions
+// that are forwarded to uses.
+class Bypass;
+def NoBypass : Bypass;
+
 class ReservationKind<bits<1> val> {
   int Value = val;
 }
@@ -66,30 +73,58 @@
 // across all chip sets.  Thus a new chip set can be added without modifying
 // instruction information.
 //
-class InstrItinClass;
+// NumMicroOps represents the number of micro-operations that each instruction
+// in the class are decoded to. If the number is zero, then it means the
+// instruction can decode into variable number of micro-ops and it must be
+// determined dynamically.
+//
+class InstrItinClass<int ops = 1> {
+  int NumMicroOps = ops;
+}
 def NoItinerary : InstrItinClass;
 
 //===----------------------------------------------------------------------===//
 // Instruction itinerary data - These values provide a runtime map of an 
 // instruction itinerary class (name) to its itinerary data.
 //
+// OperandCycles are optional "cycle counts". They specify the cycle after
+// instruction issue the values which correspond to specific operand indices
+// are defined or read. Bypasses are optional "pipeline forwarding pathes", if
+// a def by an instruction is available on a specific bypass and the use can
+// read from the same bypass, then the operand use latency is reduced by one.
+//
+//  InstrItinData<IIC_iLoad_i , [InstrStage<1, [A9_Pipe1]>,
+//                               InstrStage<1, [A9_AGU]>],
+//                              [3, 1], [A9_LdBypass]>,
+//  InstrItinData<IIC_iMVNr   , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>],
+//                              [1, 1], [NoBypass, A9_LdBypass]>,
+//
+// In this example, the instruction of IIC_iLoadi reads its input on cycle 1
+// (after issue) and the result of the load is available on cycle 3. The result
+// is available via forwarding path A9_LdBypass. If it's used by the first
+// source operand of instructions of IIC_iMVNr class, then the operand latency
+// is reduced by 1.
 class InstrItinData<InstrItinClass Class, list<InstrStage> stages,
-                    list<int> operandcycles = []> {
+                    list<int> operandcycles = [],
+                    list<Bypass> bypasses = []> {
   InstrItinClass TheClass = Class;
   list<InstrStage> Stages = stages;
   list<int> OperandCycles = operandcycles;
+  list<Bypass> Bypasses = bypasses;
 }
 
 //===----------------------------------------------------------------------===//
 // Processor itineraries - These values represent the set of all itinerary
 // classes for a given chip set.
 //
-class ProcessorItineraries<list<FuncUnit> fu, list<InstrItinData> iid> {
+class ProcessorItineraries<list<FuncUnit> fu, list<Bypass> bp,
+                           list<InstrItinData> iid> {
   list<FuncUnit> FU = fu;
+  list<Bypass> BP = bp;
   list<InstrItinData> IID = iid;
 }
 
 // NoItineraries - A marker that can be used by processors without schedule
 // info.
-def NoItineraries : ProcessorItineraries<[], []>;
+def NoItineraries : ProcessorItineraries<[], [], []>;
 

Modified: llvm/branches/wendling/eh/include/llvm/Target/TargetSelect.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Target/TargetSelect.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Target/TargetSelect.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Target/TargetSelect.h Tue Oct 26 19:48:03 2010
@@ -16,7 +16,7 @@
 #ifndef LLVM_TARGET_TARGETSELECT_H
 #define LLVM_TARGET_TARGETSELECT_H
 
-#include "llvm/Config/config.h"
+#include "llvm/Config/llvm-config.h"
 
 extern "C" {
   // Declare all of the target-initialization functions that are available.
@@ -100,15 +100,22 @@
   /// It is legal for a client to make multiple calls to this function.
   inline bool InitializeNativeTarget() {
   // If we have a native target, initialize it to ensure it is linked in.
-#ifdef LLVM_NATIVE_ARCH
-#define DoInit2(TARG) \
-    LLVMInitialize ## TARG ## Info ();          \
-    LLVMInitialize ## TARG ()
-#define DoInit(T) DoInit2(T)
-    DoInit(LLVM_NATIVE_ARCH);
+#ifdef LLVM_NATIVE_TARGET
+    LLVM_NATIVE_TARGETINFO();
+    LLVM_NATIVE_TARGET();
+    return false;
+#else
+    return true;
+#endif
+  }  
+
+  /// InitializeNativeTargetAsmPrinter - The main program should call
+  /// this function to initialize the native target asm printer.
+  inline bool InitializeNativeTargetAsmPrinter() {
+  // If we have a native target, initialize the corresponding asm printer.
+#ifdef LLVM_NATIVE_ASMPRINTER
+    LLVM_NATIVE_ASMPRINTER();
     return false;
-#undef DoInit
-#undef DoInit2
 #else
     return true;
 #endif

Modified: llvm/branches/wendling/eh/include/llvm/Target/TargetSelectionDAG.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Target/TargetSelectionDAG.td?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Target/TargetSelectionDAG.td (original)
+++ llvm/branches/wendling/eh/include/llvm/Target/TargetSelectionDAG.td Tue Oct 26 19:48:03 2010
@@ -224,12 +224,19 @@
 def SDNPSideEffect  : SDNodeProperty;   // Sets 'HasUnmodelledSideEffects'.
 def SDNPMemOperand  : SDNodeProperty;   // Touches memory, has assoc MemOperand
 def SDNPVariadic    : SDNodeProperty;   // Node has variable arguments.
+def SDNPWantRoot    : SDNodeProperty;   // ComplexPattern gets the root of match
+def SDNPWantParent  : SDNodeProperty;   // ComplexPattern gets the parent
+
+//===----------------------------------------------------------------------===//
+// Selection DAG Pattern Operations
+class SDPatternOperator;
 
 //===----------------------------------------------------------------------===//
 // Selection DAG Node definitions.
 //
 class SDNode<string opcode, SDTypeProfile typeprof,
-             list<SDNodeProperty> props = [], string sdclass = "SDNode"> {
+             list<SDNodeProperty> props = [], string sdclass = "SDNode"> 
+             : SDPatternOperator {
   string Opcode  = opcode;
   string SDClass = sdclass;
   list<SDNodeProperty> Properties = props;
@@ -469,10 +476,10 @@
 //
 
 /// PatFrag - Represents a pattern fragment.  This can match something on the
-/// DAG, frame a single node to multiply nested other fragments.
+/// DAG, from a single node to multiple nested other fragments.
 ///
 class PatFrag<dag ops, dag frag, code pred = [{}],
-              SDNodeXForm xform = NOOP_SDNodeXForm> {
+              SDNodeXForm xform = NOOP_SDNodeXForm> : SDPatternOperator {
   dag Operands = ops;
   dag Fragment = frag;
   code Predicate = pred;
@@ -822,7 +829,7 @@
 //===----------------------------------------------------------------------===//
 // Selection DAG Pattern Support.
 //
-// Patterns are what are actually matched against the target-flavored
+// Patterns are what are actually matched against by the target-flavored
 // instruction selection DAG.  Instructions defined by the target implicitly
 // define patterns in most cases, but patterns can also be explicitly added when
 // an operation is defined by a sequence of instructions (e.g. loading a large
@@ -834,7 +841,7 @@
   dag             PatternToMatch  = patternToMatch;
   list<dag>       ResultInstrs    = resultInstrs;
   list<Predicate> Predicates      = [];  // See class Instruction in Target.td.
-  int             AddedComplexity = 0;  // See class Instruction in Target.td.
+  int             AddedComplexity = 0;   // See class Instruction in Target.td.
 }
 
 // Pat - A simple (but common) form of a pattern, which produces a simple result

Modified: llvm/branches/wendling/eh/include/llvm/Target/TargetSelectionDAGInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Target/TargetSelectionDAGInfo.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Target/TargetSelectionDAGInfo.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Target/TargetSelectionDAGInfo.h Tue Oct 26 19:48:03 2010
@@ -59,8 +59,8 @@
                           SDValue Op1, SDValue Op2,
                           SDValue Op3, unsigned Align, bool isVolatile,
                           bool AlwaysInline,
-                          const Value *DstSV, uint64_t DstOff,
-                          const Value *SrcSV, uint64_t SrcOff) const {
+                          MachinePointerInfo DstPtrInfo,
+                          MachinePointerInfo SrcPtrInfo) const {
     return SDValue();
   }
 
@@ -75,8 +75,8 @@
                            SDValue Chain,
                            SDValue Op1, SDValue Op2,
                            SDValue Op3, unsigned Align, bool isVolatile,
-                           const Value *DstSV, uint64_t DstOff,
-                           const Value *SrcSV, uint64_t SrcOff) const {
+                           MachinePointerInfo DstPtrInfo,
+                           MachinePointerInfo SrcPtrInfo) const {
     return SDValue();
   }
 
@@ -91,7 +91,7 @@
                           SDValue Chain,
                           SDValue Op1, SDValue Op2,
                           SDValue Op3, unsigned Align, bool isVolatile,
-                          const Value *DstSV, uint64_t DstOff) const {
+                          MachinePointerInfo DstPtrInfo) const {
     return SDValue();
   }
 };

Modified: llvm/branches/wendling/eh/include/llvm/Transforms/IPO.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Transforms/IPO.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Transforms/IPO.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Transforms/IPO.h Tue Oct 26 19:48:03 2010
@@ -93,8 +93,7 @@
 /// possible, except for the global values specified.
 ///
 ModulePass *createGVExtractionPass(std::vector<GlobalValue*>& GVs, bool 
-                                   deleteFn = false, 
-                                   bool relinkCallees = false);
+                                   deleteFn = false);
 
 //===----------------------------------------------------------------------===//
 /// createFunctionInliningPass - Return a new pass object that uses a heuristic

Modified: llvm/branches/wendling/eh/include/llvm/Transforms/IPO/InlinerPass.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Transforms/IPO/InlinerPass.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Transforms/IPO/InlinerPass.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Transforms/IPO/InlinerPass.h Tue Oct 26 19:48:03 2010
@@ -30,8 +30,8 @@
 /// perform the inlining operations that do not depend on the policy.
 ///
 struct Inliner : public CallGraphSCCPass {
-  explicit Inliner(void *ID);
-  explicit Inliner(void *ID, int Threshold);
+  explicit Inliner(char &ID);
+  explicit Inliner(char &ID, int Threshold);
 
   /// getAnalysisUsage - For this class, we declare that we require and preserve
   /// the call graph.  If the derived class implements this method, it should

Modified: llvm/branches/wendling/eh/include/llvm/Transforms/Scalar.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Transforms/Scalar.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Transforms/Scalar.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Transforms/Scalar.h Tue Oct 26 19:48:03 2010
@@ -131,13 +131,6 @@
 
 //===----------------------------------------------------------------------===//
 //
-// LoopIndexSplit - This pass divides loop's iteration range by spliting loop
-// such that each individual loop is executed efficiently.
-//
-Pass *createLoopIndexSplitPass();
-
-//===----------------------------------------------------------------------===//
-//
 // PromoteMemoryToRegister - This pass is used to promote memory references to
 // be register references. A simple example of the transformation performed by
 // this pass is:
@@ -149,7 +142,6 @@
 //   ret i32 %Y
 //
 FunctionPass *createPromoteMemoryToRegisterPass();
-extern const PassInfo *const PromoteMemoryToRegisterID;
 
 //===----------------------------------------------------------------------===//
 //
@@ -158,7 +150,7 @@
 // hacking easier.
 //
 FunctionPass *createDemoteRegisterToMemoryPass();
-extern const PassInfo *const DemoteRegisterToMemoryID;
+extern char &DemoteRegisterToMemoryID;
 
 //===----------------------------------------------------------------------===//
 //
@@ -202,7 +194,7 @@
 // (set, immediate dominators, tree, and frontier) information.
 //
 FunctionPass *createBreakCriticalEdgesPass();
-extern const PassInfo *const BreakCriticalEdgesID;
+extern char &BreakCriticalEdgesID;
 
 //===----------------------------------------------------------------------===//
 //
@@ -213,7 +205,7 @@
 //   AU.addRequiredID(LoopSimplifyID);
 //
 Pass *createLoopSimplifyPass();
-extern const PassInfo *const LoopSimplifyID;
+extern char &LoopSimplifyID;
 
 //===----------------------------------------------------------------------===//
 //
@@ -228,7 +220,7 @@
 // chained binary branch instructions.
 //
 FunctionPass *createLowerSwitchPass();
-extern const PassInfo *const LowerSwitchID;
+extern char &LowerSwitchID;
 
 //===----------------------------------------------------------------------===//
 //
@@ -243,7 +235,7 @@
 FunctionPass *createLowerInvokePass(const TargetLowering *TLI = 0);
 FunctionPass *createLowerInvokePass(const TargetLowering *TLI,
                                     bool useExpensiveEHSupport);
-extern const PassInfo *const LowerInvokePassID;
+extern char &LowerInvokePassID;
 
 //===----------------------------------------------------------------------===//
 //
@@ -258,7 +250,7 @@
 // optimizations.
 //
 Pass *createLCSSAPass();
-extern const PassInfo *const LCSSAID;
+extern char &LCSSAID;
 
 //===----------------------------------------------------------------------===//
 //
@@ -304,39 +296,31 @@
 // InstructionNamer - Give any unnamed non-void instructions "tmp" names.
 //
 FunctionPass *createInstructionNamerPass();
-extern const PassInfo *const InstructionNamerID;
+extern char &InstructionNamerID;
   
 //===----------------------------------------------------------------------===//
 //
-// SSI - This pass converts instructions to Static Single Information form
-// on demand.
-//
-FunctionPass *createSSIPass();
-
-//===----------------------------------------------------------------------===//
-//
-// SSI - This pass converts every non-void instuction to Static Single
-// Information form.
+// GEPSplitter - Split complex GEPs into simple ones
 //
-FunctionPass *createSSIEverythingPass();
+FunctionPass *createGEPSplitterPass();
 
 //===----------------------------------------------------------------------===//
 //
-// GEPSplitter - Split complex GEPs into simple ones
+// Sink - Code Sinking
 //
-FunctionPass *createGEPSplitterPass();
+FunctionPass *createSinkingPass();
 
 //===----------------------------------------------------------------------===//
 //
-// ABCD - Elimination of Array Bounds Checks on Demand
+// LowerAtomic - Lower atomic intrinsics to non-atomic form
 //
-FunctionPass *createABCDPass();
+Pass *createLowerAtomicPass();
 
 //===----------------------------------------------------------------------===//
 //
-// Sink - Code Sinking
+// ValuePropagation - Propagate CFG-derived value information
 //
-FunctionPass *createSinkingPass();
+Pass *createCorrelatedValuePropagationPass();
 
 } // End llvm namespace
 

Modified: llvm/branches/wendling/eh/include/llvm/Transforms/Utils/Cloning.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Transforms/Utils/Cloning.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Transforms/Utils/Cloning.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Transforms/Utils/Cloning.h Tue Oct 26 19:48:03 2010
@@ -22,6 +22,7 @@
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/ADT/Twine.h"
 #include "llvm/Support/ValueHandle.h"
+#include "llvm/Transforms/Utils/ValueMapper.h"
 
 namespace llvm {
 
@@ -46,7 +47,7 @@
 /// CloneModule - Return an exact copy of the specified module
 ///
 Module *CloneModule(const Module *M);
-Module *CloneModule(const Module *M, ValueMap<const Value*, Value*> &VMap);
+Module *CloneModule(const Module *M, ValueToValueMapTy &VMap);
 
 /// ClonedCodeInfo - This struct can be used to capture information about code
 /// being cloned, while it is being cloned.
@@ -102,7 +103,7 @@
 /// parameter.
 ///
 BasicBlock *CloneBasicBlock(const BasicBlock *BB,
-                            ValueMap<const Value*, Value*> &VMap,
+                            ValueToValueMapTy &VMap,
                             const Twine &NameSuffix = "", Function *F = 0,
                             ClonedCodeInfo *CodeInfo = 0);
 
@@ -110,7 +111,7 @@
 /// CloneLoop - Clone Loop. Clone dominator info for loop insiders. Populate
 /// VMap using old blocks to new blocks mapping.
 Loop *CloneLoop(Loop *L, LPPassManager *LPM, LoopInfo *LI, 
-                ValueMap<const Value *, Value *> &VMap, Pass *P);
+                ValueToValueMapTy &VMap, Pass *P);
 
 /// CloneFunction - Return a copy of the specified function, but without
 /// embedding the function into another module.  Also, any references specified
@@ -121,25 +122,33 @@
 /// the function from their old to new values.  The final argument captures
 /// information about the cloned code if non-null.
 ///
+/// If ModuleLevelChanges is false, VMap contains no non-identity GlobalValue
+/// mappings.
+///
 Function *CloneFunction(const Function *F,
-                        ValueMap<const Value*, Value*> &VMap,
+                        ValueToValueMapTy &VMap,
+                        bool ModuleLevelChanges,
                         ClonedCodeInfo *CodeInfo = 0);
 
 /// CloneFunction - Version of the function that doesn't need the VMap.
 ///
 inline Function *CloneFunction(const Function *F, ClonedCodeInfo *CodeInfo = 0){
-  ValueMap<const Value*, Value*> VMap;
+  ValueToValueMapTy VMap;
   return CloneFunction(F, VMap, CodeInfo);
 }
 
 /// Clone OldFunc into NewFunc, transforming the old arguments into references
-/// to ArgMap values.  Note that if NewFunc already has basic blocks, the ones
+/// to VMap values.  Note that if NewFunc already has basic blocks, the ones
 /// cloned into it will be added to the end of the function.  This function
 /// fills in a list of return instructions, and can optionally append the
 /// specified suffix to all values cloned.
 ///
+/// If ModuleLevelChanges is false, VMap contains no non-identity GlobalValue
+/// mappings.
+///
 void CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
-                       ValueMap<const Value*, Value*> &VMap,
+                       ValueToValueMapTy &VMap,
+                       bool ModuleLevelChanges,
                        SmallVectorImpl<ReturnInst*> &Returns,
                        const char *NameSuffix = "", 
                        ClonedCodeInfo *CodeInfo = 0);
@@ -151,8 +160,13 @@
 /// constant arguments cause a significant amount of code in the callee to be
 /// dead.  Since this doesn't produce an exactly copy of the input, it can't be
 /// used for things like CloneFunction or CloneModule.
+///
+/// If ModuleLevelChanges is false, VMap contains no non-identity GlobalValue
+/// mappings.
+///
 void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
-                               ValueMap<const Value*, Value*> &VMap,
+                               ValueToValueMapTy &VMap,
+                               bool ModuleLevelChanges,
                                SmallVectorImpl<ReturnInst*> &Returns,
                                const char *NameSuffix = "", 
                                ClonedCodeInfo *CodeInfo = 0,

Modified: llvm/branches/wendling/eh/include/llvm/Transforms/Utils/Local.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Transforms/Utils/Local.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Transforms/Utils/Local.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Transforms/Utils/Local.h Tue Oct 26 19:48:03 2010
@@ -69,6 +69,10 @@
 ///
 /// This returns true if it changed the code, note that it can delete
 /// instructions in other blocks as well in this block.
+///
+/// WARNING: Do not use this function on unreachable blocks, as recursive
+/// simplification is not able to handle corner-case scenarios that can
+/// arise in them.
 bool SimplifyInstructionsInBlock(BasicBlock *BB, const TargetData *TD = 0);
     
 //===----------------------------------------------------------------------===//
@@ -118,8 +122,6 @@
 /// of the CFG.  It returns true if a modification was made, possibly deleting
 /// the basic block that was pointed to.
 ///
-/// WARNING:  The entry node of a method may not be simplified.
-///
 bool SimplifyCFG(BasicBlock *BB, const TargetData *TD = 0);
 
 /// FoldBranchToCommonDest - If this basic block is ONLY a setcc and a branch,

Modified: llvm/branches/wendling/eh/include/llvm/Transforms/Utils/SSAUpdater.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Transforms/Utils/SSAUpdater.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Transforms/Utils/SSAUpdater.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Transforms/Utils/SSAUpdater.h Tue Oct 26 19:48:03 2010
@@ -36,9 +36,11 @@
   //typedef DenseMap<BasicBlock*, Value*> AvailableValsTy;
   void *AV;
 
-  /// PrototypeValue is an arbitrary representative value, which we derive names
-  /// and a type for PHI nodes.
-  Value *PrototypeValue;
+  /// ProtoType holds the type of the values being rewritten.
+  const Type *ProtoType;
+
+  // PHI nodes are given a name based on ProtoName.
+  std::string ProtoName;
 
   /// InsertedPHIs - If this is non-null, the SSAUpdater adds all PHI nodes that
   /// it creates to the vector.
@@ -51,8 +53,8 @@
   ~SSAUpdater();
 
   /// Initialize - Reset this object to get ready for a new set of SSA
-  /// updates.  ProtoValue is the value used to name PHI nodes.
-  void Initialize(Value *ProtoValue);
+  /// updates with type 'Ty'.  PHI nodes get a name based on 'Name'.
+  void Initialize(const Type *Ty, StringRef Name);
 
   /// AddAvailableValue - Indicate that a rewritten value is available at the
   /// end of the specified block with the specified value.
@@ -94,6 +96,12 @@
   /// for the use's block will be considered to be below it.
   void RewriteUse(Use &U);
 
+  /// RewriteUseAfterInsertions - Rewrite a use, just like RewriteUse.  However,
+  /// this version of the method can rewrite uses in the same block as a
+  /// definition, because it assumes that all uses of a value are below any
+  /// inserted values.
+  void RewriteUseAfterInsertions(Use &U);
+
 private:
   Value *GetValueAtEndOfBlockInternal(BasicBlock *BB);
 

Removed: llvm/branches/wendling/eh/include/llvm/Transforms/Utils/SSI.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Transforms/Utils/SSI.h?rev=117424&view=auto
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Transforms/Utils/SSI.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Transforms/Utils/SSI.h (removed)
@@ -1,93 +0,0 @@
-//===------------------- SSI.h - Creates SSI Representation -----*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This pass converts a list of variables to the Static Single Information
-// form. This is a program representation described by Scott Ananian in his
-// Master Thesis: "The Static Single Information Form (1999)".
-// We are building an on-demand representation, that is, we do not convert
-// every single variable in the target function to SSI form. Rather, we receive
-// a list of target variables that must be converted. We also do not
-// completely convert a target variable to the SSI format. Instead, we only
-// change the variable in the points where new information can be attached
-// to its live range, that is, at branch points.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TRANSFORMS_UTILS_SSI_H
-#define LLVM_TRANSFORMS_UTILS_SSI_H
-
-#include "llvm/InstrTypes.h"
-#include "llvm/Pass.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/SmallVector.h"
-
-namespace llvm {
-
-  class DominatorTree;
-  class PHINode;
-  class Instruction;
-  class CmpInst;
-
-  class SSI : public FunctionPass {
-    public:
-      static char ID; // Pass identification, replacement for typeid.
-      SSI() :
-        FunctionPass(&ID) {
-      }
-
-      void getAnalysisUsage(AnalysisUsage &AU) const;
-
-      bool runOnFunction(Function&);
-
-      void createSSI(SmallVectorImpl<Instruction *> &value);
-
-    private:
-      // Variables always live
-      DominatorTree *DT_;
-
-      // Stores variables created by SSI
-      SmallPtrSet<Instruction *, 16> created;
-
-      // Phis created by SSI
-      DenseMap<PHINode *, Instruction*> phis;
-
-      // Sigmas created by SSI
-      DenseMap<PHINode *, Instruction*> sigmas;
-
-      // Phi nodes that have a phi as operand and has to be fixed
-      SmallPtrSet<PHINode *, 1> phisToFix;
-
-      // List of definition points for every variable
-      DenseMap<Instruction*, SmallVector<BasicBlock*, 4> > defsites;
-
-      // Basic Block of the original definition of each variable
-      DenseMap<Instruction*, BasicBlock*> value_original;
-
-      // Stack of last seen definition of a variable
-      DenseMap<Instruction*, SmallVector<Instruction *, 1> > value_stack;
-
-      void insertSigmaFunctions(SmallPtrSet<Instruction*, 4> &value);
-      void insertSigma(TerminatorInst *TI, Instruction *I);
-      void insertPhiFunctions(SmallPtrSet<Instruction*, 4> &value);
-      void renameInit(SmallPtrSet<Instruction*, 4> &value);
-      void rename(BasicBlock *BB);
-
-      void substituteUse(Instruction *I);
-      bool dominateAny(BasicBlock *BB, Instruction *value);
-      void fixPhis();
-
-      Instruction* getPositionPhi(PHINode *PN);
-      Instruction* getPositionSigma(PHINode *PN);
-
-      void init(SmallVectorImpl<Instruction *> &value);
-      void clean();
-  };
-} // end namespace
-#endif

Modified: llvm/branches/wendling/eh/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h Tue Oct 26 19:48:03 2010
@@ -26,8 +26,10 @@
   BasicBlock *ReturnBlock, *UnwindBlock, *UnreachableBlock;
 public:
   static char ID; // Pass identification, replacement for typeid
-  UnifyFunctionExitNodes() : FunctionPass(&ID),
-                             ReturnBlock(0), UnwindBlock(0) {}
+  UnifyFunctionExitNodes() : FunctionPass(ID),
+                             ReturnBlock(0), UnwindBlock(0) {
+    initializeUnifyFunctionExitNodesPass(*PassRegistry::getPassRegistry());
+  }
 
   // We can preserve non-critical-edgeness when we unify function exit nodes
   virtual void getAnalysisUsage(AnalysisUsage &AU) const;

Modified: llvm/branches/wendling/eh/include/llvm/Type.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Type.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Type.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Type.h Tue Oct 26 19:48:03 2010
@@ -12,7 +12,6 @@
 
 #include "llvm/AbstractTypeUser.h"
 #include "llvm/Support/Casting.h"
-#include "llvm/System/DataTypes.h"
 #include "llvm/ADT/GraphTraits.h"
 #include <string>
 #include <vector>
@@ -76,20 +75,20 @@
     PPC_FP128TyID,   ///<  5: 128 bit floating point type (two 64-bits)
     LabelTyID,       ///<  6: Labels
     MetadataTyID,    ///<  7: Metadata
+    X86_MMXTyID,     ///<  8: MMX vectors (64 bits)
 
     // Derived types... see DerivedTypes.h file...
     // Make sure FirstDerivedTyID stays up to date!!!
-    IntegerTyID,     ///<  8: Arbitrary bit width integers
-    FunctionTyID,    ///<  9: Functions
-    StructTyID,      ///< 10: Structures
-    UnionTyID,       ///< 11: Unions
+    IntegerTyID,     ///<  9: Arbitrary bit width integers
+    FunctionTyID,    ///< 10: Functions
+    StructTyID,      ///< 11: Structures
     ArrayTyID,       ///< 12: Arrays
     PointerTyID,     ///< 13: Pointers
     OpaqueTyID,      ///< 14: Opaque: type with unknown structure
     VectorTyID,      ///< 15: SIMD 'packed' format, or other vector type
 
     NumTypeIDs,                         // Must remain as last defined ID
-    LastPrimitiveTyID = MetadataTyID,
+    LastPrimitiveTyID = X86_MMXTyID,
     FirstDerivedTyID = IntegerTyID
   };
 
@@ -213,6 +212,9 @@
   bool isFloatingPointTy() const { return ID == FloatTyID || ID == DoubleTyID ||
       ID == X86_FP80TyID || ID == FP128TyID || ID == PPC_FP128TyID; }
 
+  /// isX86_MMXTy - Return true if this is X86 MMX.
+  bool isX86_MMXTy() const { return ID == X86_MMXTyID; }
+
   /// isFPOrFPVectorTy - Return true if this is a FP type or a vector of FP.
   ///
   bool isFPOrFPVectorTy() const;
@@ -243,10 +245,6 @@
   ///
   bool isStructTy() const { return ID == StructTyID; }
 
-  /// isUnionTy - True if this is an instance of UnionType.
-  ///
-  bool isUnionTy() const { return ID == UnionTyID; }
-
   /// isArrayTy - True if this is an instance of ArrayType.
   ///
   bool isArrayTy() const { return ID == ArrayTyID; }
@@ -306,7 +304,7 @@
   /// does not include vector types.
   ///
   inline bool isAggregateType() const {
-    return ID == StructTyID || ID == ArrayTyID || ID == UnionTyID;
+    return ID == StructTyID || ID == ArrayTyID;
   }
 
   /// isSized - Return true if it makes sense to take the size of this type.  To
@@ -315,12 +313,12 @@
   ///
   bool isSized() const {
     // If it's a primitive, it is always sized.
-    if (ID == IntegerTyID || isFloatingPointTy() || ID == PointerTyID)
+    if (ID == IntegerTyID || isFloatingPointTy() || ID == PointerTyID ||
+        ID == X86_MMXTyID)
       return true;
     // If it is not something that can have a size (e.g. a function or label),
     // it doesn't have a size.
-    if (ID != StructTyID && ID != ArrayTyID && ID != VectorTyID &&
-        ID != UnionTyID)
+    if (ID != StructTyID && ID != ArrayTyID && ID != VectorTyID)
       return false;
     // If it is something that can have a size and it's concrete, it definitely
     // has a size, otherwise we have to try harder to decide.
@@ -406,6 +404,7 @@
   static const Type *getX86_FP80Ty(LLVMContext &C);
   static const Type *getFP128Ty(LLVMContext &C);
   static const Type *getPPC_FP128Ty(LLVMContext &C);
+  static const Type *getX86_MMXTy(LLVMContext &C);
   static const IntegerType *getIntNTy(LLVMContext &C, unsigned N);
   static const IntegerType *getInt1Ty(LLVMContext &C);
   static const IntegerType *getInt8Ty(LLVMContext &C);
@@ -422,6 +421,7 @@
   static const PointerType *getX86_FP80PtrTy(LLVMContext &C, unsigned AS = 0);
   static const PointerType *getFP128PtrTy(LLVMContext &C, unsigned AS = 0);
   static const PointerType *getPPC_FP128PtrTy(LLVMContext &C, unsigned AS = 0);
+  static const PointerType *getX86_MMXPtrTy(LLVMContext &C, unsigned AS = 0);
   static const PointerType *getIntNPtrTy(LLVMContext &C, unsigned N,
                                          unsigned AS = 0);
   static const PointerType *getInt1PtrTy(LLVMContext &C, unsigned AS = 0);

Modified: llvm/branches/wendling/eh/include/llvm/TypeSymbolTable.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/TypeSymbolTable.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/TypeSymbolTable.h (original)
+++ llvm/branches/wendling/eh/include/llvm/TypeSymbolTable.h Tue Oct 26 19:48:03 2010
@@ -16,12 +16,11 @@
 
 #include "llvm/Type.h"
 #include "llvm/ADT/StringRef.h"
+#include "llvm/System/DataTypes.h"
 #include <map>
 
 namespace llvm {
 
-class StringRef;
-
 /// This class provides a symbol table of name/type pairs with operations to
 /// support constructing, searching and iterating over the symbol table. The
 /// class derives from AbstractTypeUser so that the contents of the symbol

Modified: llvm/branches/wendling/eh/include/llvm/Value.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Value.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Value.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Value.h Tue Oct 26 19:48:03 2010
@@ -215,7 +215,6 @@
     ConstantFPVal,            // This is an instance of ConstantFP
     ConstantArrayVal,         // This is an instance of ConstantArray
     ConstantStructVal,        // This is an instance of ConstantStruct
-    ConstantUnionVal,         // This is an instance of ConstantUnion
     ConstantVectorVal,        // This is an instance of ConstantVector
     ConstantPointerNullVal,   // This is an instance of ConstantPointerNull
     MDNodeVal,                // This is an instance of MDNode

Modified: llvm/branches/wendling/eh/lib/Analysis/AliasAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/AliasAnalysis.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/AliasAnalysis.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/AliasAnalysis.cpp Tue Oct 26 19:48:03 2010
@@ -30,12 +30,13 @@
 #include "llvm/Function.h"
 #include "llvm/IntrinsicInst.h"
 #include "llvm/Instructions.h"
+#include "llvm/LLVMContext.h"
 #include "llvm/Type.h"
 #include "llvm/Target/TargetData.h"
 using namespace llvm;
 
 // Register the AliasAnalysis interface, providing a nice name to refer to.
-static RegisterAnalysisGroup<AliasAnalysis> Z("Alias Analysis");
+INITIALIZE_ANALYSIS_GROUP(AliasAnalysis, "Alias Analysis", NoAA)
 char AliasAnalysis::ID = 0;
 
 //===----------------------------------------------------------------------===//
@@ -43,15 +44,14 @@
 //===----------------------------------------------------------------------===//
 
 AliasAnalysis::AliasResult
-AliasAnalysis::alias(const Value *V1, unsigned V1Size,
-                     const Value *V2, unsigned V2Size) {
+AliasAnalysis::alias(const Location &LocA, const Location &LocB) {
   assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
-  return AA->alias(V1, V1Size, V2, V2Size);
+  return AA->alias(LocA, LocB);
 }
 
-bool AliasAnalysis::pointsToConstantMemory(const Value *P) {
+bool AliasAnalysis::pointsToConstantMemory(const Location &Loc) {
   assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
-  return AA->pointsToConstantMemory(P);
+  return AA->pointsToConstantMemory(Loc);
 }
 
 void AliasAnalysis::deleteValue(Value *V) {
@@ -65,99 +65,195 @@
 }
 
 AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(CallSite CS1, CallSite CS2) {
-  // FIXME: we can do better.
+AliasAnalysis::getModRefInfo(ImmutableCallSite CS,
+                             const Location &Loc) {
   assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
-  return AA->getModRefInfo(CS1, CS2);
-}
 
+  ModRefBehavior MRB = getModRefBehavior(CS);
+  if (MRB == DoesNotAccessMemory)
+    return NoModRef;
 
-//===----------------------------------------------------------------------===//
-// AliasAnalysis non-virtual helper method implementation
-//===----------------------------------------------------------------------===//
+  ModRefResult Mask = ModRef;
+  if (MRB == OnlyReadsMemory)
+    Mask = Ref;
+  else if (MRB == AliasAnalysis::AccessesArguments) {
+    bool doesAlias = false;
+    for (ImmutableCallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end();
+         AI != AE; ++AI)
+      if (!isNoAlias(Location(*AI), Loc)) {
+        doesAlias = true;
+        break;
+      }
 
-AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(LoadInst *L, Value *P, unsigned Size) {
-  return alias(L->getOperand(0), getTypeStoreSize(L->getType()),
-               P, Size) ? Ref : NoModRef;
+    if (!doesAlias)
+      return NoModRef;
+  }
+
+  // If Loc is a constant memory location, the call definitely could not
+  // modify the memory location.
+  if ((Mask & Mod) && pointsToConstantMemory(Loc))
+    Mask = ModRefResult(Mask & ~Mod);
+
+  // If this is the end of the chain, don't forward.
+  if (!AA) return Mask;
+
+  // Otherwise, fall back to the next AA in the chain. But we can merge
+  // in any mask we've managed to compute.
+  return ModRefResult(AA->getModRefInfo(CS, Loc) & Mask);
 }
 
 AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(StoreInst *S, Value *P, unsigned Size) {
-  // If the stored address cannot alias the pointer in question, then the
-  // pointer cannot be modified by the store.
-  if (!alias(S->getOperand(1),
-             getTypeStoreSize(S->getOperand(0)->getType()), P, Size))
+AliasAnalysis::getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) {
+  assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
+
+  // If CS1 or CS2 are readnone, they don't interact.
+  ModRefBehavior CS1B = getModRefBehavior(CS1);
+  if (CS1B == DoesNotAccessMemory) return NoModRef;
+
+  ModRefBehavior CS2B = getModRefBehavior(CS2);
+  if (CS2B == DoesNotAccessMemory) return NoModRef;
+
+  // If they both only read from memory, there is no dependence.
+  if (CS1B == OnlyReadsMemory && CS2B == OnlyReadsMemory)
     return NoModRef;
 
-  // If the pointer is a pointer to constant memory, then it could not have been
-  // modified by this store.
-  return pointsToConstantMemory(P) ? NoModRef : Mod;
+  AliasAnalysis::ModRefResult Mask = ModRef;
+
+  // If CS1 only reads memory, the only dependence on CS2 can be
+  // from CS1 reading memory written by CS2.
+  if (CS1B == OnlyReadsMemory)
+    Mask = ModRefResult(Mask & Ref);
+
+  // If CS2 only access memory through arguments, accumulate the mod/ref
+  // information from CS1's references to the memory referenced by
+  // CS2's arguments.
+  if (CS2B == AccessesArguments) {
+    AliasAnalysis::ModRefResult R = NoModRef;
+    for (ImmutableCallSite::arg_iterator
+         I = CS2.arg_begin(), E = CS2.arg_end(); I != E; ++I) {
+      R = ModRefResult((R | getModRefInfo(CS1, *I, UnknownSize)) & Mask);
+      if (R == Mask)
+        break;
+    }
+    return R;
+  }
+
+  // If CS1 only accesses memory through arguments, check if CS2 references
+  // any of the memory referenced by CS1's arguments. If not, return NoModRef.
+  if (CS1B == AccessesArguments) {
+    AliasAnalysis::ModRefResult R = NoModRef;
+    for (ImmutableCallSite::arg_iterator
+         I = CS1.arg_begin(), E = CS1.arg_end(); I != E; ++I)
+      if (getModRefInfo(CS2, *I, UnknownSize) != NoModRef) {
+        R = Mask;
+        break;
+      }
+    if (R == NoModRef)
+      return R;
+  }
+
+  // If this is the end of the chain, don't forward.
+  if (!AA) return Mask;
+
+  // Otherwise, fall back to the next AA in the chain. But we can merge
+  // in any mask we've managed to compute.
+  return ModRefResult(AA->getModRefInfo(CS1, CS2) & Mask);
 }
 
 AliasAnalysis::ModRefBehavior
-AliasAnalysis::getModRefBehavior(CallSite CS,
-                                 std::vector<PointerAccessInfo> *Info) {
-  if (CS.doesNotAccessMemory())
-    // Can't do better than this.
-    return DoesNotAccessMemory;
-  ModRefBehavior MRB = getModRefBehavior(CS.getCalledFunction(), Info);
-  if (MRB != DoesNotAccessMemory && CS.onlyReadsMemory())
-    return OnlyReadsMemory;
-  return MRB;
+AliasAnalysis::getModRefBehavior(ImmutableCallSite CS) {
+  assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
+
+  ModRefBehavior Min = UnknownModRefBehavior;
+
+  // Call back into the alias analysis with the other form of getModRefBehavior
+  // to see if it can give a better response.
+  if (const Function *F = CS.getCalledFunction())
+    Min = getModRefBehavior(F);
+
+  // If this is the end of the chain, don't forward.
+  if (!AA) return Min;
+
+  // Otherwise, fall back to the next AA in the chain. But we can merge
+  // in any result we've managed to compute.
+  return std::min(AA->getModRefBehavior(CS), Min);
 }
 
 AliasAnalysis::ModRefBehavior
-AliasAnalysis::getModRefBehavior(Function *F,
-                                 std::vector<PointerAccessInfo> *Info) {
-  if (F) {
-    if (F->doesNotAccessMemory())
-      // Can't do better than this.
-      return DoesNotAccessMemory;
-    if (F->onlyReadsMemory())
-      return OnlyReadsMemory;
-    if (unsigned id = F->getIntrinsicID())
-      return getModRefBehavior(id);
-  }
-  return UnknownModRefBehavior;
+AliasAnalysis::getModRefBehavior(const Function *F) {
+  assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
+  return AA->getModRefBehavior(F);
 }
 
-AliasAnalysis::ModRefBehavior AliasAnalysis::getModRefBehavior(unsigned iid) {
-#define GET_INTRINSIC_MODREF_BEHAVIOR
-#include "llvm/Intrinsics.gen"
-#undef GET_INTRINSIC_MODREF_BEHAVIOR
+//===----------------------------------------------------------------------===//
+// AliasAnalysis non-virtual helper method implementation
+//===----------------------------------------------------------------------===//
+
+AliasAnalysis::ModRefResult
+AliasAnalysis::getModRefInfo(const LoadInst *L, const Location &Loc) {
+  // Be conservative in the face of volatile.
+  if (L->isVolatile())
+    return ModRef;
+
+  // If the load address doesn't alias the given address, it doesn't read
+  // or write the specified memory.
+  if (!alias(Location(L->getOperand(0),
+                      getTypeStoreSize(L->getType()),
+                      L->getMetadata(LLVMContext::MD_tbaa)),
+             Loc))
+    return NoModRef;
+
+  // Otherwise, a load just reads.
+  return Ref;
 }
 
 AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(CallSite CS, Value *P, unsigned Size) {
-  ModRefBehavior MRB = getModRefBehavior(CS);
-  if (MRB == DoesNotAccessMemory)
+AliasAnalysis::getModRefInfo(const StoreInst *S, const Location &Loc) {
+  // Be conservative in the face of volatile.
+  if (S->isVolatile())
+    return ModRef;
+
+  // If the store address cannot alias the pointer in question, then the
+  // specified memory cannot be modified by the store.
+  if (!alias(Location(S->getOperand(1),
+                      getTypeStoreSize(S->getOperand(0)->getType()),
+                      S->getMetadata(LLVMContext::MD_tbaa)),
+             Loc))
     return NoModRef;
-  
-  ModRefResult Mask = ModRef;
-  if (MRB == OnlyReadsMemory)
-    Mask = Ref;
-  else if (MRB == AliasAnalysis::AccessesArguments) {
-    bool doesAlias = false;
-    for (CallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end();
-         AI != AE; ++AI)
-      if (!isNoAlias(*AI, ~0U, P, Size)) {
-        doesAlias = true;
-        break;
-      }
 
-    if (!doesAlias)
-      return NoModRef;
-  }
+  // If the pointer is a pointer to constant memory, then it could not have been
+  // modified by this store.
+  if (pointsToConstantMemory(Loc))
+    return NoModRef;
 
-  if (!AA) return Mask;
+  // Otherwise, a store just writes.
+  return Mod;
+}
 
-  // If P points to a constant memory location, the call definitely could not
-  // modify the memory location.
-  if ((Mask & Mod) && AA->pointsToConstantMemory(P))
-    Mask = ModRefResult(Mask & ~Mod);
+AliasAnalysis::ModRefResult
+AliasAnalysis::getModRefInfo(const VAArgInst *V, const Location &Loc) {
+  // If the va_arg address cannot alias the pointer in question, then the
+  // specified memory cannot be accessed by the va_arg.
+  if (!alias(Location(V->getOperand(0),
+                      UnknownSize,
+                      V->getMetadata(LLVMContext::MD_tbaa)),
+             Loc))
+    return NoModRef;
+
+  // If the pointer is a pointer to constant memory, then it could not have been
+  // modified by this va_arg.
+  if (pointsToConstantMemory(Loc))
+    return NoModRef;
 
-  return ModRefResult(Mask & AA->getModRefInfo(CS, P, Size));
+  // Otherwise, a va_arg reads and writes.
+  return ModRef;
+}
+
+AliasAnalysis::ModRefBehavior
+AliasAnalysis::getIntrinsicModRefBehavior(unsigned iid) {
+#define GET_INTRINSIC_MODREF_BEHAVIOR
+#include "llvm/Intrinsics.gen"
+#undef GET_INTRINSIC_MODREF_BEHAVIOR
 }
 
 // AliasAnalysis destructor: DO NOT move this to the header file for
@@ -184,16 +280,16 @@
 /// getTypeStoreSize - Return the TargetData store size for the given type,
 /// if known, or a conservative value otherwise.
 ///
-unsigned AliasAnalysis::getTypeStoreSize(const Type *Ty) {
-  return TD ? TD->getTypeStoreSize(Ty) : ~0u;
+uint64_t AliasAnalysis::getTypeStoreSize(const Type *Ty) {
+  return TD ? TD->getTypeStoreSize(Ty) : UnknownSize;
 }
 
 /// canBasicBlockModify - Return true if it is possible for execution of the
 /// specified basic block to modify the value pointed to by Ptr.
 ///
 bool AliasAnalysis::canBasicBlockModify(const BasicBlock &BB,
-                                        const Value *Ptr, unsigned Size) {
-  return canInstructionRangeModify(BB.front(), BB.back(), Ptr, Size);
+                                        const Location &Loc) {
+  return canInstructionRangeModify(BB.front(), BB.back(), Loc);
 }
 
 /// canInstructionRangeModify - Return true if it is possible for the execution
@@ -203,15 +299,15 @@
 ///
 bool AliasAnalysis::canInstructionRangeModify(const Instruction &I1,
                                               const Instruction &I2,
-                                              const Value *Ptr, unsigned Size) {
+                                              const Location &Loc) {
   assert(I1.getParent() == I2.getParent() &&
          "Instructions not in same basic block!");
-  BasicBlock::iterator I = const_cast<Instruction*>(&I1);
-  BasicBlock::iterator E = const_cast<Instruction*>(&I2);
+  BasicBlock::const_iterator I = &I1;
+  BasicBlock::const_iterator E = &I2;
   ++E;  // Convert from inclusive to exclusive range.
 
   for (; I != E; ++I) // Check every instruction in range
-    if (getModRefInfo(I, const_cast<Value*>(Ptr), Size) & Mod)
+    if (getModRefInfo(I, Loc) & Mod)
       return true;
   return false;
 }
@@ -220,7 +316,7 @@
 /// function.
 bool llvm::isNoAliasCall(const Value *V) {
   if (isa<CallInst>(V) || isa<InvokeInst>(V))
-    return CallSite(const_cast<Instruction*>(cast<Instruction>(V)))
+    return ImmutableCallSite(cast<Instruction>(V))
       .paramHasAttr(0, Attribute::NoAlias);
   return false;
 }
@@ -243,9 +339,3 @@
     return A->hasNoAliasAttr() || A->hasByValAttr();
   return false;
 }
-
-// Because of the way .a files work, we must force the BasicAA implementation to
-// be pulled in if the AliasAnalysis classes are pulled in.  Otherwise we run
-// the risk of AliasAnalysis being used, but the default implementation not
-// being linked into the tool that uses it.
-DEFINING_FILE_FOR(AliasAnalysis)

Modified: llvm/branches/wendling/eh/lib/Analysis/AliasAnalysisCounter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/AliasAnalysisCounter.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/AliasAnalysisCounter.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/AliasAnalysisCounter.cpp Tue Oct 26 19:48:03 2010
@@ -34,7 +34,8 @@
     Module *M;
   public:
     static char ID; // Class identification, replacement for typeinfo
-    AliasAnalysisCounter() : ModulePass(&ID) {
+    AliasAnalysisCounter() : ModulePass(ID) {
+      initializeAliasAnalysisCounterPass(*PassRegistry::getPassRegistry());
       No = May = Must = 0;
       NoMR = JustRef = JustMod = MR = 0;
     }
@@ -87,24 +88,25 @@
     /// an analysis interface through multiple inheritance.  If needed, it
     /// should override this to adjust the this pointer as needed for the
     /// specified pass info.
-    virtual void *getAdjustedAnalysisPointer(const PassInfo *PI) {
-      if (PI->isPassID(&AliasAnalysis::ID))
+    virtual void *getAdjustedAnalysisPointer(AnalysisID PI) {
+      if (PI == &AliasAnalysis::ID)
         return (AliasAnalysis*)this;
       return this;
     }
     
     // FIXME: We could count these too...
-    bool pointsToConstantMemory(const Value *P) {
-      return getAnalysis<AliasAnalysis>().pointsToConstantMemory(P);
+    bool pointsToConstantMemory(const Location &Loc) {
+      return getAnalysis<AliasAnalysis>().pointsToConstantMemory(Loc);
     }
 
     // Forwarding functions: just delegate to a real AA implementation, counting
     // the number of responses...
-    AliasResult alias(const Value *V1, unsigned V1Size,
-                      const Value *V2, unsigned V2Size);
+    AliasResult alias(const Location &LocA, const Location &LocB);
 
-    ModRefResult getModRefInfo(CallSite CS, Value *P, unsigned Size);
-    ModRefResult getModRefInfo(CallSite CS1, CallSite CS2) {
+    ModRefResult getModRefInfo(ImmutableCallSite CS,
+                               const Location &Loc);
+    ModRefResult getModRefInfo(ImmutableCallSite CS1,
+                               ImmutableCallSite CS2) {
       return AliasAnalysis::getModRefInfo(CS1,CS2);
     }
   };
@@ -112,16 +114,15 @@
 
 char AliasAnalysisCounter::ID = 0;
 INITIALIZE_AG_PASS(AliasAnalysisCounter, AliasAnalysis, "count-aa",
-                   "Count Alias Analysis Query Responses", false, true, false);
+                   "Count Alias Analysis Query Responses", false, true, false)
 
 ModulePass *llvm::createAliasAnalysisCounterPass() {
   return new AliasAnalysisCounter();
 }
 
 AliasAnalysis::AliasResult
-AliasAnalysisCounter::alias(const Value *V1, unsigned V1Size,
-                            const Value *V2, unsigned V2Size) {
-  AliasResult R = getAnalysis<AliasAnalysis>().alias(V1, V1Size, V2, V2Size);
+AliasAnalysisCounter::alias(const Location &LocA, const Location &LocB) {
+  AliasResult R = getAnalysis<AliasAnalysis>().alias(LocA, LocB);
 
   const char *AliasString;
   switch (R) {
@@ -133,11 +134,11 @@
 
   if (PrintAll || (PrintAllFailures && R == MayAlias)) {
     errs() << AliasString << ":\t";
-    errs() << "[" << V1Size << "B] ";
-    WriteAsOperand(errs(), V1, true, M);
+    errs() << "[" << LocA.Size << "B] ";
+    WriteAsOperand(errs(), LocA.Ptr, true, M);
     errs() << ", ";
-    errs() << "[" << V2Size << "B] ";
-    WriteAsOperand(errs(), V2, true, M);
+    errs() << "[" << LocB.Size << "B] ";
+    WriteAsOperand(errs(), LocB.Ptr, true, M);
     errs() << "\n";
   }
 
@@ -145,8 +146,9 @@
 }
 
 AliasAnalysis::ModRefResult
-AliasAnalysisCounter::getModRefInfo(CallSite CS, Value *P, unsigned Size) {
-  ModRefResult R = getAnalysis<AliasAnalysis>().getModRefInfo(CS, P, Size);
+AliasAnalysisCounter::getModRefInfo(ImmutableCallSite CS,
+                                    const Location &Loc) {
+  ModRefResult R = getAnalysis<AliasAnalysis>().getModRefInfo(CS, Loc);
 
   const char *MRString;
   switch (R) {
@@ -159,8 +161,8 @@
 
   if (PrintAll || (PrintAllFailures && R == ModRef)) {
     errs() << MRString << ":  Ptr: ";
-    errs() << "[" << Size << "B] ";
-    WriteAsOperand(errs(), P, true, M);
+    errs() << "[" << Loc.Size << "B] ";
+    WriteAsOperand(errs(), Loc.Ptr, true, M);
     errs() << "\t<->" << *CS.getInstruction() << '\n';
   }
   return R;

Modified: llvm/branches/wendling/eh/lib/Analysis/AliasAnalysisEvaluator.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/AliasAnalysisEvaluator.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/AliasAnalysisEvaluator.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/AliasAnalysisEvaluator.cpp Tue Oct 26 19:48:03 2010
@@ -50,7 +50,9 @@
 
   public:
     static char ID; // Pass identification, replacement for typeid
-    AAEval() : FunctionPass(&ID) {}
+    AAEval() : FunctionPass(ID) {
+      initializeAAEvalPass(*PassRegistry::getPassRegistry());
+    }
 
     virtual void getAnalysisUsage(AnalysisUsage &AU) const {
       AU.addRequired<AliasAnalysis>();
@@ -74,8 +76,11 @@
 }
 
 char AAEval::ID = 0;
-INITIALIZE_PASS(AAEval, "aa-eval",
-                "Exhaustive Alias Analysis Precision Evaluator", false, true);
+INITIALIZE_PASS_BEGIN(AAEval, "aa-eval",
+                "Exhaustive Alias Analysis Precision Evaluator", false, true)
+INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_PASS_END(AAEval, "aa-eval",
+                "Exhaustive Alias Analysis Precision Evaluator", false, true)
 
 FunctionPass *llvm::createAAEvalPass() { return new AAEval(); }
 
@@ -107,6 +112,15 @@
   }
 }
 
+static inline void
+PrintModRefResults(const char *Msg, bool P, CallSite CSA, CallSite CSB,
+                   Module *M) {
+  if (P) {
+    errs() << "  " << Msg << ": " << *CSA.getInstruction()
+           << " <-> " << *CSB.getInstruction() << '\n';
+  }
+}
+
 static inline bool isInterestingPointer(Value *V) {
   return V->getType()->isPointerTy()
       && !isa<ConstantPointerNull>(V);
@@ -154,12 +168,12 @@
   // iterate over the worklist, and run the full (n^2)/2 disambiguations
   for (SetVector<Value *>::iterator I1 = Pointers.begin(), E = Pointers.end();
        I1 != E; ++I1) {
-    unsigned I1Size = ~0u;
+    uint64_t I1Size = AliasAnalysis::UnknownSize;
     const Type *I1ElTy = cast<PointerType>((*I1)->getType())->getElementType();
     if (I1ElTy->isSized()) I1Size = AA.getTypeStoreSize(I1ElTy);
 
     for (SetVector<Value *>::iterator I2 = Pointers.begin(); I2 != I1; ++I2) {
-      unsigned I2Size = ~0u;
+      uint64_t I2Size = AliasAnalysis::UnknownSize;
       const Type *I2ElTy =cast<PointerType>((*I2)->getType())->getElementType();
       if (I2ElTy->isSized()) I2Size = AA.getTypeStoreSize(I2ElTy);
 
@@ -186,7 +200,7 @@
 
     for (SetVector<Value *>::iterator V = Pointers.begin(), Ve = Pointers.end();
          V != Ve; ++V) {
-      unsigned Size = ~0u;
+      uint64_t Size = AliasAnalysis::UnknownSize;
       const Type *ElTy = cast<PointerType>((*V)->getType())->getElementType();
       if (ElTy->isSized()) Size = AA.getTypeStoreSize(ElTy);
 
@@ -195,13 +209,13 @@
         PrintModRefResults("NoModRef", PrintNoModRef, I, *V, F.getParent());
         ++NoModRef; break;
       case AliasAnalysis::Mod:
-        PrintModRefResults("     Mod", PrintMod, I, *V, F.getParent());
+        PrintModRefResults("Just Mod", PrintMod, I, *V, F.getParent());
         ++Mod; break;
       case AliasAnalysis::Ref:
-        PrintModRefResults("     Ref", PrintRef, I, *V, F.getParent());
+        PrintModRefResults("Just Ref", PrintRef, I, *V, F.getParent());
         ++Ref; break;
       case AliasAnalysis::ModRef:
-        PrintModRefResults("  ModRef", PrintModRef, I, *V, F.getParent());
+        PrintModRefResults("Both ModRef", PrintModRef, I, *V, F.getParent());
         ++ModRef; break;
       default:
         errs() << "Unknown alias query result!\n";
@@ -209,6 +223,29 @@
     }
   }
 
+  // Mod/ref alias analysis: compare all pairs of calls
+  for (SetVector<CallSite>::iterator C = CallSites.begin(),
+         Ce = CallSites.end(); C != Ce; ++C) {
+    for (SetVector<CallSite>::iterator D = CallSites.begin(); D != Ce; ++D) {
+      if (D == C)
+        continue;
+      switch (AA.getModRefInfo(*C, *D)) {
+      case AliasAnalysis::NoModRef:
+        PrintModRefResults("NoModRef", PrintNoModRef, *C, *D, F.getParent());
+        ++NoModRef; break;
+      case AliasAnalysis::Mod:
+        PrintModRefResults("Just Mod", PrintMod, *C, *D, F.getParent());
+        ++Mod; break;
+      case AliasAnalysis::Ref:
+        PrintModRefResults("Just Ref", PrintRef, *C, *D, F.getParent());
+        ++Ref; break;
+      case AliasAnalysis::ModRef:
+        PrintModRefResults("Both ModRef", PrintModRef, *C, *D, F.getParent());
+        ++ModRef; break;
+      }
+    }
+  }
+
   return false;
 }
 

Modified: llvm/branches/wendling/eh/lib/Analysis/AliasDebugger.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/AliasDebugger.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/AliasDebugger.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/AliasDebugger.cpp Tue Oct 26 19:48:03 2010
@@ -39,7 +39,9 @@
     
   public:
     static char ID; // Class identification, replacement for typeinfo
-    AliasDebugger() : ModulePass(&ID) {}
+    AliasDebugger() : ModulePass(ID) {
+      initializeAliasDebuggerPass(*PassRegistry::getPassRegistry());
+    }
 
     bool runOnModule(Module &M) {
       InitializeAliasAnalysis(this);                 // set up super class
@@ -83,8 +85,8 @@
     /// an analysis interface through multiple inheritance.  If needed, it
     /// should override this to adjust the this pointer as needed for the
     /// specified pass info.
-    virtual void *getAdjustedAnalysisPointer(const PassInfo *PI) {
-      if (PI->isPassID(&AliasAnalysis::ID))
+    virtual void *getAdjustedAnalysisPointer(AnalysisID PI) {
+      if (PI == &AliasAnalysis::ID)
         return (AliasAnalysis*)this;
       return this;
     }
@@ -92,25 +94,28 @@
     //------------------------------------------------
     // Implement the AliasAnalysis API
     //
-    AliasResult alias(const Value *V1, unsigned V1Size,
-                      const Value *V2, unsigned V2Size) {
-      assert(Vals.find(V1) != Vals.end() && "Never seen value in AA before");
-      assert(Vals.find(V2) != Vals.end() && "Never seen value in AA before");    
-      return AliasAnalysis::alias(V1, V1Size, V2, V2Size);
-    }
-
-    ModRefResult getModRefInfo(CallSite CS, Value *P, unsigned Size) {
-      assert(Vals.find(P) != Vals.end() && "Never seen value in AA before");
-      return AliasAnalysis::getModRefInfo(CS, P, Size);
+    AliasResult alias(const Location &LocA, const Location &LocB) {
+      assert(Vals.find(LocA.Ptr) != Vals.end() &&
+             "Never seen value in AA before");
+      assert(Vals.find(LocB.Ptr) != Vals.end() &&
+             "Never seen value in AA before");
+      return AliasAnalysis::alias(LocA, LocB);
+    }
+
+    ModRefResult getModRefInfo(ImmutableCallSite CS,
+                               const Location &Loc) {
+      assert(Vals.find(Loc.Ptr) != Vals.end() && "Never seen value in AA before");
+      return AliasAnalysis::getModRefInfo(CS, Loc);
     }
 
-    ModRefResult getModRefInfo(CallSite CS1, CallSite CS2) {
+    ModRefResult getModRefInfo(ImmutableCallSite CS1,
+                               ImmutableCallSite CS2) {
       return AliasAnalysis::getModRefInfo(CS1,CS2);
     }
     
-    bool pointsToConstantMemory(const Value *P) {
-      assert(Vals.find(P) != Vals.end() && "Never seen value in AA before");
-      return AliasAnalysis::pointsToConstantMemory(P);
+    bool pointsToConstantMemory(const Location &Loc) {
+      assert(Vals.find(Loc.Ptr) != Vals.end() && "Never seen value in AA before");
+      return AliasAnalysis::pointsToConstantMemory(Loc);
     }
 
     virtual void deleteValue(Value *V) {
@@ -127,7 +132,7 @@
 
 char AliasDebugger::ID = 0;
 INITIALIZE_AG_PASS(AliasDebugger, AliasAnalysis, "debug-aa",
-                   "AA use debugger", false, true, false);
+                   "AA use debugger", false, true, false)
 
 Pass *llvm::createAliasDebugger() { return new AliasDebugger(); }
 

Modified: llvm/branches/wendling/eh/lib/Analysis/AliasSetTracker.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/AliasSetTracker.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/AliasSetTracker.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/AliasSetTracker.cpp Tue Oct 26 19:48:03 2010
@@ -15,6 +15,7 @@
 #include "llvm/Analysis/AliasAnalysis.h"
 #include "llvm/Instructions.h"
 #include "llvm/IntrinsicInst.h"
+#include "llvm/LLVMContext.h"
 #include "llvm/Pass.h"
 #include "llvm/Type.h"
 #include "llvm/Target/TargetData.h"
@@ -22,7 +23,6 @@
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/InstIterator.h"
-#include "llvm/Support/Format.h"
 #include "llvm/Support/raw_ostream.h"
 using namespace llvm;
 
@@ -35,6 +35,7 @@
   // Update the alias and access types of this set...
   AccessTy |= AS.AccessTy;
   AliasTy  |= AS.AliasTy;
+  Volatile |= AS.Volatile;
 
   if (AliasTy == MustAlias) {
     // Check that these two merged sets really are must aliases.  Since both
@@ -87,7 +88,8 @@
 }
 
 void AliasSet::addPointer(AliasSetTracker &AST, PointerRec &Entry,
-                          unsigned Size, bool KnownMustAlias) {
+                          uint64_t Size, const MDNode *TBAAInfo,
+                          bool KnownMustAlias) {
   assert(!Entry.hasAliasSet() && "Entry already in set!");
 
   // Check to see if we have to downgrade to _may_ alias.
@@ -95,27 +97,29 @@
     if (PointerRec *P = getSomePointer()) {
       AliasAnalysis &AA = AST.getAliasAnalysis();
       AliasAnalysis::AliasResult Result =
-        AA.alias(P->getValue(), P->getSize(), Entry.getValue(), Size);
+        AA.alias(AliasAnalysis::Location(P->getValue(), P->getSize(),
+                                         P->getTBAAInfo()),
+                 AliasAnalysis::Location(Entry.getValue(), Size, TBAAInfo));
       if (Result == AliasAnalysis::MayAlias)
         AliasTy = MayAlias;
       else                  // First entry of must alias must have maximum size!
-        P->updateSize(Size);
+        P->updateSizeAndTBAAInfo(Size, TBAAInfo);
       assert(Result != AliasAnalysis::NoAlias && "Cannot be part of must set!");
     }
 
   Entry.setAliasSet(this);
-  Entry.updateSize(Size);
+  Entry.updateSizeAndTBAAInfo(Size, TBAAInfo);
 
   // Add it to the end of the list...
   assert(*PtrListEnd == 0 && "End of list is not null?");
   *PtrListEnd = &Entry;
   PtrListEnd = Entry.setPrevInList(PtrListEnd);
   assert(*PtrListEnd == 0 && "End of list is not null?");
-  addRef();               // Entry points to alias set...
+  addRef();               // Entry points to alias set.
 }
 
 void AliasSet::addCallSite(CallSite CS, AliasAnalysis &AA) {
-  CallSites.push_back(CS);
+  CallSites.push_back(CS.getInstruction());
 
   AliasAnalysis::ModRefBehavior Behavior = AA.getModRefBehavior(CS);
   if (Behavior == AliasAnalysis::DoesNotAccessMemory)
@@ -134,29 +138,36 @@
 /// aliasesPointer - Return true if the specified pointer "may" (or must)
 /// alias one of the members in the set.
 ///
-bool AliasSet::aliasesPointer(const Value *Ptr, unsigned Size,
+bool AliasSet::aliasesPointer(const Value *Ptr, uint64_t Size,
+                              const MDNode *TBAAInfo,
                               AliasAnalysis &AA) const {
   if (AliasTy == MustAlias) {
     assert(CallSites.empty() && "Illegal must alias set!");
 
     // If this is a set of MustAliases, only check to see if the pointer aliases
-    // SOME value in the set...
+    // SOME value in the set.
     PointerRec *SomePtr = getSomePointer();
     assert(SomePtr && "Empty must-alias set??");
-    return AA.alias(SomePtr->getValue(), SomePtr->getSize(), Ptr, Size);
+    return AA.alias(AliasAnalysis::Location(SomePtr->getValue(),
+                                            SomePtr->getSize(),
+                                            SomePtr->getTBAAInfo()),
+                    AliasAnalysis::Location(Ptr, Size, TBAAInfo));
   }
 
   // If this is a may-alias set, we have to check all of the pointers in the set
   // to be sure it doesn't alias the set...
   for (iterator I = begin(), E = end(); I != E; ++I)
-    if (AA.alias(Ptr, Size, I.getPointer(), I.getSize()))
+    if (AA.alias(AliasAnalysis::Location(Ptr, Size, TBAAInfo),
+                 AliasAnalysis::Location(I.getPointer(), I.getSize(),
+                                         I.getTBAAInfo())))
       return true;
 
   // Check the call sites list and invoke list...
   if (!CallSites.empty()) {
     for (unsigned i = 0, e = CallSites.size(); i != e; ++i)
-      if (AA.getModRefInfo(CallSites[i], const_cast<Value*>(Ptr), Size)
-                   != AliasAnalysis::NoModRef)
+      if (AA.getModRefInfo(CallSites[i],
+                           AliasAnalysis::Location(Ptr, Size, TBAAInfo)) !=
+            AliasAnalysis::NoModRef)
         return true;
   }
 
@@ -167,10 +178,11 @@
   if (AA.doesNotAccessMemory(CS))
     return false;
 
-  for (unsigned i = 0, e = CallSites.size(); i != e; ++i)
-    if (AA.getModRefInfo(CallSites[i], CS) != AliasAnalysis::NoModRef ||
-        AA.getModRefInfo(CS, CallSites[i]) != AliasAnalysis::NoModRef)
+  for (unsigned i = 0, e = CallSites.size(); i != e; ++i) {
+    if (AA.getModRefInfo(getCallSite(i), CS) != AliasAnalysis::NoModRef ||
+        AA.getModRefInfo(CS, getCallSite(i)) != AliasAnalysis::NoModRef)
       return true;
+  }
 
   for (iterator I = begin(), E = end(); I != E; ++I)
     if (AA.getModRefInfo(CS, I.getPointer(), I.getSize()) !=
@@ -198,16 +210,18 @@
 /// that may alias the pointer, merge them together and return the unified set.
 ///
 AliasSet *AliasSetTracker::findAliasSetForPointer(const Value *Ptr,
-                                                  unsigned Size) {
+                                                  uint64_t Size,
+                                                  const MDNode *TBAAInfo) {
   AliasSet *FoundSet = 0;
-  for (iterator I = begin(), E = end(); I != E; ++I)
-    if (!I->Forward && I->aliasesPointer(Ptr, Size, AA)) {
-      if (FoundSet == 0) {  // If this is the first alias set ptr can go into.
-        FoundSet = I;       // Remember it.
-      } else {              // Otherwise, we must merge the sets.
-        FoundSet->mergeSetIn(*I, *this);     // Merge in contents.
-      }
+  for (iterator I = begin(), E = end(); I != E; ++I) {
+    if (I->Forward || !I->aliasesPointer(Ptr, Size, TBAAInfo, AA)) continue;
+    
+    if (FoundSet == 0) {  // If this is the first alias set ptr can go into.
+      FoundSet = I;       // Remember it.
+    } else {              // Otherwise, we must merge the sets.
+      FoundSet->mergeSetIn(*I, *this);     // Merge in contents.
     }
+  }
 
   return FoundSet;
 }
@@ -215,9 +229,10 @@
 /// containsPointer - Return true if the specified location is represented by
 /// this alias set, false otherwise.  This does not modify the AST object or
 /// alias sets.
-bool AliasSetTracker::containsPointer(Value *Ptr, unsigned Size) const {
+bool AliasSetTracker::containsPointer(Value *Ptr, uint64_t Size,
+                                      const MDNode *TBAAInfo) const {
   for (const_iterator I = begin(), E = end(); I != E; ++I)
-    if (!I->Forward && I->aliasesPointer(Ptr, Size, AA))
+    if (!I->Forward && I->aliasesPointer(Ptr, Size, TBAAInfo, AA))
       return true;
   return false;
 }
@@ -226,15 +241,15 @@
 
 AliasSet *AliasSetTracker::findAliasSetForCallSite(CallSite CS) {
   AliasSet *FoundSet = 0;
-  for (iterator I = begin(), E = end(); I != E; ++I)
-    if (!I->Forward && I->aliasesCallSite(CS, AA)) {
-      if (FoundSet == 0) {  // If this is the first alias set ptr can go into.
-        FoundSet = I;       // Remember it.
-      } else if (!I->Forward) {     // Otherwise, we must merge the sets.
-        FoundSet->mergeSetIn(*I, *this);     // Merge in contents.
-      }
-    }
-
+  for (iterator I = begin(), E = end(); I != E; ++I) {
+    if (I->Forward || !I->aliasesCallSite(CS, AA))
+      continue;
+    
+    if (FoundSet == 0)        // If this is the first alias set ptr can go into.
+      FoundSet = I;           // Remember it.
+    else if (!I->Forward)     // Otherwise, we must merge the sets.
+      FoundSet->mergeSetIn(*I, *this);     // Merge in contents.
+  }
   return FoundSet;
 }
 
@@ -243,31 +258,34 @@
 
 /// getAliasSetForPointer - Return the alias set that the specified pointer
 /// lives in.
-AliasSet &AliasSetTracker::getAliasSetForPointer(Value *Pointer, unsigned Size,
+AliasSet &AliasSetTracker::getAliasSetForPointer(Value *Pointer, uint64_t Size,
+                                                 const MDNode *TBAAInfo,
                                                  bool *New) {
   AliasSet::PointerRec &Entry = getEntryFor(Pointer);
 
-  // Check to see if the pointer is already known...
+  // Check to see if the pointer is already known.
   if (Entry.hasAliasSet()) {
-    Entry.updateSize(Size);
+    Entry.updateSizeAndTBAAInfo(Size, TBAAInfo);
     // Return the set!
     return *Entry.getAliasSet(*this)->getForwardedTarget(*this);
-  } else if (AliasSet *AS = findAliasSetForPointer(Pointer, Size)) {
-    // Add it to the alias set it aliases...
-    AS->addPointer(*this, Entry, Size);
+  }
+  
+  if (AliasSet *AS = findAliasSetForPointer(Pointer, Size, TBAAInfo)) {
+    // Add it to the alias set it aliases.
+    AS->addPointer(*this, Entry, Size, TBAAInfo);
     return *AS;
-  } else {
-    if (New) *New = true;
-    // Otherwise create a new alias set to hold the loaded pointer...
-    AliasSets.push_back(new AliasSet());
-    AliasSets.back().addPointer(*this, Entry, Size);
-    return AliasSets.back();
   }
+  
+  if (New) *New = true;
+  // Otherwise create a new alias set to hold the loaded pointer.
+  AliasSets.push_back(new AliasSet());
+  AliasSets.back().addPointer(*this, Entry, Size, TBAAInfo);
+  return AliasSets.back();
 }
 
-bool AliasSetTracker::add(Value *Ptr, unsigned Size) {
+bool AliasSetTracker::add(Value *Ptr, uint64_t Size, const MDNode *TBAAInfo) {
   bool NewPtr;
-  addPointer(Ptr, Size, AliasSet::NoModRef, NewPtr);
+  addPointer(Ptr, Size, TBAAInfo, AliasSet::NoModRef, NewPtr);
   return NewPtr;
 }
 
@@ -276,6 +294,7 @@
   bool NewPtr;
   AliasSet &AS = addPointer(LI->getOperand(0),
                             AA.getTypeStoreSize(LI->getType()),
+                            LI->getMetadata(LLVMContext::MD_tbaa),
                             AliasSet::Refs, NewPtr);
   if (LI->isVolatile()) AS.setVolatile();
   return NewPtr;
@@ -286,6 +305,7 @@
   Value *Val = SI->getOperand(0);
   AliasSet &AS = addPointer(SI->getOperand(1),
                             AA.getTypeStoreSize(Val->getType()),
+                            SI->getMetadata(LLVMContext::MD_tbaa),
                             AliasSet::Mods, NewPtr);
   if (SI->isVolatile()) AS.setVolatile();
   return NewPtr;
@@ -293,7 +313,9 @@
 
 bool AliasSetTracker::add(VAArgInst *VAAI) {
   bool NewPtr;
-  addPointer(VAAI->getOperand(0), ~0, AliasSet::ModRef, NewPtr);
+  addPointer(VAAI->getOperand(0), AliasAnalysis::UnknownSize, 
+             VAAI->getMetadata(LLVMContext::MD_tbaa),
+             AliasSet::ModRef, NewPtr);
   return NewPtr;
 }
 
@@ -305,28 +327,27 @@
     return true; // doesn't alias anything
 
   AliasSet *AS = findAliasSetForCallSite(CS);
-  if (!AS) {
-    AliasSets.push_back(new AliasSet());
-    AS = &AliasSets.back();
-    AS->addCallSite(CS, AA);
-    return true;
-  } else {
+  if (AS) {
     AS->addCallSite(CS, AA);
     return false;
   }
+  AliasSets.push_back(new AliasSet());
+  AS = &AliasSets.back();
+  AS->addCallSite(CS, AA);
+  return true;
 }
 
 bool AliasSetTracker::add(Instruction *I) {
-  // Dispatch to one of the other add methods...
+  // Dispatch to one of the other add methods.
   if (LoadInst *LI = dyn_cast<LoadInst>(I))
     return add(LI);
-  else if (StoreInst *SI = dyn_cast<StoreInst>(I))
+  if (StoreInst *SI = dyn_cast<StoreInst>(I))
     return add(SI);
-  else if (CallInst *CI = dyn_cast<CallInst>(I))
+  if (CallInst *CI = dyn_cast<CallInst>(I))
     return add(CI);
-  else if (InvokeInst *II = dyn_cast<InvokeInst>(I))
+  if (InvokeInst *II = dyn_cast<InvokeInst>(I))
     return add(II);
-  else if (VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
+  if (VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
     return add(VAAI);
   return true;
 }
@@ -343,23 +364,24 @@
   // Loop over all of the alias sets in AST, adding the pointers contained
   // therein into the current alias sets.  This can cause alias sets to be
   // merged together in the current AST.
-  for (const_iterator I = AST.begin(), E = AST.end(); I != E; ++I)
-    if (!I->Forward) {   // Ignore forwarding alias sets
-      AliasSet &AS = const_cast<AliasSet&>(*I);
-
-      // If there are any call sites in the alias set, add them to this AST.
-      for (unsigned i = 0, e = AS.CallSites.size(); i != e; ++i)
-        add(AS.CallSites[i]);
-
-      // Loop over all of the pointers in this alias set...
-      AliasSet::iterator I = AS.begin(), E = AS.end();
-      bool X;
-      for (; I != E; ++I) {
-        AliasSet &NewAS = addPointer(I.getPointer(), I.getSize(),
-                                     (AliasSet::AccessType)AS.AccessTy, X);
-        if (AS.isVolatile()) NewAS.setVolatile();
-      }
+  for (const_iterator I = AST.begin(), E = AST.end(); I != E; ++I) {
+    if (I->Forward) continue;   // Ignore forwarding alias sets
+    
+    AliasSet &AS = const_cast<AliasSet&>(*I);
+
+    // If there are any call sites in the alias set, add them to this AST.
+    for (unsigned i = 0, e = AS.CallSites.size(); i != e; ++i)
+      add(AS.CallSites[i]);
+
+    // Loop over all of the pointers in this alias set.
+    bool X;
+    for (AliasSet::iterator ASI = AS.begin(), E = AS.end(); ASI != E; ++ASI) {
+      AliasSet &NewAS = addPointer(ASI.getPointer(), ASI.getSize(),
+                                   ASI.getTBAAInfo(),
+                                   (AliasSet::AccessType)AS.AccessTy, X);
+      if (AS.isVolatile()) NewAS.setVolatile();
     }
+  }
 }
 
 /// remove - Remove the specified (potentially non-empty) alias set from the
@@ -391,31 +413,36 @@
     AS.removeFromTracker(*this);
 }
 
-bool AliasSetTracker::remove(Value *Ptr, unsigned Size) {
-  AliasSet *AS = findAliasSetForPointer(Ptr, Size);
+bool
+AliasSetTracker::remove(Value *Ptr, uint64_t Size, const MDNode *TBAAInfo) {
+  AliasSet *AS = findAliasSetForPointer(Ptr, Size, TBAAInfo);
   if (!AS) return false;
   remove(*AS);
   return true;
 }
 
 bool AliasSetTracker::remove(LoadInst *LI) {
-  unsigned Size = AA.getTypeStoreSize(LI->getType());
-  AliasSet *AS = findAliasSetForPointer(LI->getOperand(0), Size);
+  uint64_t Size = AA.getTypeStoreSize(LI->getType());
+  const MDNode *TBAAInfo = LI->getMetadata(LLVMContext::MD_tbaa);
+  AliasSet *AS = findAliasSetForPointer(LI->getOperand(0), Size, TBAAInfo);
   if (!AS) return false;
   remove(*AS);
   return true;
 }
 
 bool AliasSetTracker::remove(StoreInst *SI) {
-  unsigned Size = AA.getTypeStoreSize(SI->getOperand(0)->getType());
-  AliasSet *AS = findAliasSetForPointer(SI->getOperand(1), Size);
+  uint64_t Size = AA.getTypeStoreSize(SI->getOperand(0)->getType());
+  const MDNode *TBAAInfo = SI->getMetadata(LLVMContext::MD_tbaa);
+  AliasSet *AS = findAliasSetForPointer(SI->getOperand(1), Size, TBAAInfo);
   if (!AS) return false;
   remove(*AS);
   return true;
 }
 
 bool AliasSetTracker::remove(VAArgInst *VAAI) {
-  AliasSet *AS = findAliasSetForPointer(VAAI->getOperand(0), ~0);
+  AliasSet *AS = findAliasSetForPointer(VAAI->getOperand(0),
+                                        AliasAnalysis::UnknownSize,
+                                        VAAI->getMetadata(LLVMContext::MD_tbaa));
   if (!AS) return false;
   remove(*AS);
   return true;
@@ -435,11 +462,11 @@
   // Dispatch to one of the other remove methods...
   if (LoadInst *LI = dyn_cast<LoadInst>(I))
     return remove(LI);
-  else if (StoreInst *SI = dyn_cast<StoreInst>(I))
+  if (StoreInst *SI = dyn_cast<StoreInst>(I))
     return remove(SI);
-  else if (CallInst *CI = dyn_cast<CallInst>(I))
+  if (CallInst *CI = dyn_cast<CallInst>(I))
     return remove(CI);
-  else if (VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
+  if (VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
     return remove(VAAI);
   return true;
 }
@@ -455,11 +482,17 @@
   AA.deleteValue(PtrVal);
 
   // If this is a call instruction, remove the callsite from the appropriate
-  // AliasSet.
-  if (CallSite CS = PtrVal)
-    if (!AA.doesNotAccessMemory(CS))
-      if (AliasSet *AS = findAliasSetForCallSite(CS))
-        AS->removeCallSite(CS);
+  // AliasSet (if present).
+  if (CallSite CS = PtrVal) {
+    if (!AA.doesNotAccessMemory(CS)) {
+      // Scan all the alias sets to see if this call site is contained.
+      for (iterator I = begin(), E = end(); I != E; ++I) {
+        if (I->Forward) continue;
+        
+        I->removeCallSite(CS);
+      }
+    }
+  }
 
   // First, look up the PointerRec for this pointer.
   PointerMapType::iterator I = PointerMap.find(PtrVal);
@@ -499,7 +532,9 @@
   // Add it to the alias set it aliases...
   I = PointerMap.find(From);
   AliasSet *AS = I->second->getAliasSet(*this);
-  AS->addPointer(*this, Entry, I->second->getSize(), true);
+  AS->addPointer(*this, Entry, I->second->getSize(),
+                 I->second->getTBAAInfo(),
+                 true);
 }
 
 
@@ -509,7 +544,7 @@
 //===----------------------------------------------------------------------===//
 
 void AliasSet::print(raw_ostream &OS) const {
-  OS << "  AliasSet[" << format("0x%p", (void*)this) << "," << RefCount << "] ";
+  OS << "  AliasSet[" << (void*)this << ", " << RefCount << "] ";
   OS << (AliasTy == MustAlias ? "must" : "may") << " alias, ";
   switch (AccessTy) {
   case NoModRef: OS << "No access "; break;
@@ -535,7 +570,7 @@
     OS << "\n    " << CallSites.size() << " Call Sites: ";
     for (unsigned i = 0, e = CallSites.size(); i != e; ++i) {
       if (i) OS << ", ";
-      WriteAsOperand(OS, CallSites[i].getCalledValue());
+      WriteAsOperand(OS, CallSites[i]);
     }
   }
   OS << "\n";
@@ -579,7 +614,9 @@
     AliasSetTracker *Tracker;
   public:
     static char ID; // Pass identification, replacement for typeid
-    AliasSetPrinter() : FunctionPass(&ID) {}
+    AliasSetPrinter() : FunctionPass(ID) {
+      initializeAliasSetPrinterPass(*PassRegistry::getPassRegistry());
+    }
 
     virtual void getAnalysisUsage(AnalysisUsage &AU) const {
       AU.setPreservesAll();
@@ -599,5 +636,8 @@
 }
 
 char AliasSetPrinter::ID = 0;
-INITIALIZE_PASS(AliasSetPrinter, "print-alias-sets",
-                "Alias Set Printer", false, true);
+INITIALIZE_PASS_BEGIN(AliasSetPrinter, "print-alias-sets",
+                "Alias Set Printer", false, true)
+INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_PASS_END(AliasSetPrinter, "print-alias-sets",
+                "Alias Set Printer", false, true)

Modified: llvm/branches/wendling/eh/lib/Analysis/Analysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/Analysis.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/Analysis.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/Analysis.cpp Tue Oct 26 19:48:03 2010
@@ -8,11 +8,67 @@
 //===----------------------------------------------------------------------===//
 
 #include "llvm-c/Analysis.h"
+#include "llvm/InitializePasses.h"
 #include "llvm/Analysis/Verifier.h"
 #include <cstring>
 
 using namespace llvm;
 
+/// initializeAnalysis - Initialize all passes linked into the Analysis library.
+void llvm::initializeAnalysis(PassRegistry &Registry) {
+  initializeAliasAnalysisAnalysisGroup(Registry);
+  initializeAliasAnalysisCounterPass(Registry);
+  initializeAAEvalPass(Registry);
+  initializeAliasDebuggerPass(Registry);
+  initializeAliasSetPrinterPass(Registry);
+  initializeNoAAPass(Registry);
+  initializeBasicAliasAnalysisPass(Registry);
+  initializeCFGViewerPass(Registry);
+  initializeCFGPrinterPass(Registry);
+  initializeCFGOnlyViewerPass(Registry);
+  initializeCFGOnlyPrinterPass(Registry);
+  initializePrintDbgInfoPass(Registry);
+  initializeDomViewerPass(Registry);
+  initializeDomPrinterPass(Registry);
+  initializeDomOnlyViewerPass(Registry);
+  initializePostDomViewerPass(Registry);
+  initializeDomOnlyPrinterPass(Registry);
+  initializePostDomPrinterPass(Registry);
+  initializePostDomOnlyViewerPass(Registry);
+  initializePostDomOnlyPrinterPass(Registry);
+  initializeIVUsersPass(Registry);
+  initializeInstCountPass(Registry);
+  initializeIntervalPartitionPass(Registry);
+  initializeLazyValueInfoPass(Registry);
+  initializeLibCallAliasAnalysisPass(Registry);
+  initializeLintPass(Registry);
+  initializeLiveValuesPass(Registry);
+  initializeLoopDependenceAnalysisPass(Registry);
+  initializeLoopInfoPass(Registry);
+  initializeMemDepPrinterPass(Registry);
+  initializeMemoryDependenceAnalysisPass(Registry);
+  initializeModuleDebugInfoPrinterPass(Registry);
+  initializePostDominatorTreePass(Registry);
+  initializePostDominanceFrontierPass(Registry);
+  initializeProfileEstimatorPassPass(Registry);
+  initializeNoProfileInfoPass(Registry);
+  initializeProfileInfoAnalysisGroup(Registry);
+  initializeLoaderPassPass(Registry);
+  initializeProfileVerifierPassPass(Registry);
+  initializeRegionInfoPass(Registry);
+  initializeRegionViewerPass(Registry);
+  initializeRegionPrinterPass(Registry);
+  initializeRegionOnlyViewerPass(Registry);
+  initializeRegionOnlyPrinterPass(Registry);
+  initializeScalarEvolutionPass(Registry);
+  initializeScalarEvolutionAliasAnalysisPass(Registry);
+  initializeTypeBasedAliasAnalysisPass(Registry);
+}
+
+void LLVMInitializeAnalysis(LLVMPassRegistryRef R) {
+  initializeAnalysis(*unwrap(R));
+}
+
 LLVMBool LLVMVerifyModule(LLVMModuleRef M, LLVMVerifierFailureAction Action,
                           char **OutMessages) {
   std::string Messages;

Modified: llvm/branches/wendling/eh/lib/Analysis/BasicAliasAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/BasicAliasAnalysis.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/BasicAliasAnalysis.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/BasicAliasAnalysis.cpp Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-//===- BasicAliasAnalysis.cpp - Local Alias Analysis Impl -----------------===//
+//===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
 //
 //                     The LLVM Compiler Infrastructure
 //
@@ -7,9 +7,9 @@
 //
 //===----------------------------------------------------------------------===//
 //
-// This file defines the default implementation of the Alias Analysis interface
-// that simply implements a few identities (two different globals cannot alias,
-// etc), but otherwise does no analysis.
+// This file defines the primary stateless implementation of the
+// Alias Analysis interface that implements identities (two different
+// globals cannot alias, etc), but does no stateful analysis.
 //
 //===----------------------------------------------------------------------===//
 
@@ -18,9 +18,11 @@
 #include "llvm/Constants.h"
 #include "llvm/DerivedTypes.h"
 #include "llvm/Function.h"
+#include "llvm/GlobalAlias.h"
 #include "llvm/GlobalVariable.h"
 #include "llvm/Instructions.h"
 #include "llvm/IntrinsicInst.h"
+#include "llvm/LLVMContext.h"
 #include "llvm/Operator.h"
 #include "llvm/Pass.h"
 #include "llvm/Analysis/CaptureTracking.h"
@@ -30,6 +32,7 @@
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/GetElementPtrTypeIterator.h"
 #include <algorithm>
 using namespace llvm;
 
@@ -95,7 +98,7 @@
 
 /// isObjectSmallerThan - Return true if we can prove that the object specified
 /// by V is smaller than Size.
-static bool isObjectSmallerThan(const Value *V, unsigned Size,
+static bool isObjectSmallerThan(const Value *V, uint64_t Size,
                                 const TargetData &TD) {
   const Type *AccessTy;
   if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
@@ -126,67 +129,270 @@
 }
 
 //===----------------------------------------------------------------------===//
-// NoAA Pass
+// GetElementPtr Instruction Decomposition and Analysis
 //===----------------------------------------------------------------------===//
 
 namespace {
-  /// NoAA - This class implements the -no-aa pass, which always returns "I
-  /// don't know" for alias queries.  NoAA is unlike other alias analysis
-  /// implementations, in that it does not chain to a previous analysis.  As
-  /// such it doesn't follow many of the rules that other alias analyses must.
-  ///
-  struct NoAA : public ImmutablePass, public AliasAnalysis {
-    static char ID; // Class identification, replacement for typeinfo
-    NoAA() : ImmutablePass(&ID) {}
-    explicit NoAA(void *PID) : ImmutablePass(PID) { }
-
-    virtual void getAnalysisUsage(AnalysisUsage &AU) const {
-    }
+  enum ExtensionKind {
+    EK_NotExtended,
+    EK_SignExt,
+    EK_ZeroExt
+  };
+  
+  struct VariableGEPIndex {
+    const Value *V;
+    ExtensionKind Extension;
+    int64_t Scale;
+  };
+}
 
-    virtual void initializePass() {
-      TD = getAnalysisIfAvailable<TargetData>();
-    }
 
-    virtual AliasResult alias(const Value *V1, unsigned V1Size,
-                              const Value *V2, unsigned V2Size) {
-      return MayAlias;
+/// GetLinearExpression - Analyze the specified value as a linear expression:
+/// "A*V + B", where A and B are constant integers.  Return the scale and offset
+/// values as APInts and return V as a Value*, and return whether we looked
+/// through any sign or zero extends.  The incoming Value is known to have
+/// IntegerType and it may already be sign or zero extended.
+///
+/// Note that this looks through extends, so the high bits may not be
+/// represented in the result.
+static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset,
+                                  ExtensionKind &Extension,
+                                  const TargetData &TD, unsigned Depth) {
+  assert(V->getType()->isIntegerTy() && "Not an integer value");
+
+  // Limit our recursion depth.
+  if (Depth == 6) {
+    Scale = 1;
+    Offset = 0;
+    return V;
+  }
+  
+  if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) {
+    if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
+      switch (BOp->getOpcode()) {
+      default: break;
+      case Instruction::Or:
+        // X|C == X+C if all the bits in C are unset in X.  Otherwise we can't
+        // analyze it.
+        if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), &TD))
+          break;
+        // FALL THROUGH.
+      case Instruction::Add:
+        V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension,
+                                TD, Depth+1);
+        Offset += RHSC->getValue();
+        return V;
+      case Instruction::Mul:
+        V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension,
+                                TD, Depth+1);
+        Offset *= RHSC->getValue();
+        Scale *= RHSC->getValue();
+        return V;
+      case Instruction::Shl:
+        V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension,
+                                TD, Depth+1);
+        Offset <<= RHSC->getValue().getLimitedValue();
+        Scale <<= RHSC->getValue().getLimitedValue();
+        return V;
+      }
     }
+  }
+  
+  // Since GEP indices are sign extended anyway, we don't care about the high
+  // bits of a sign or zero extended value - just scales and offsets.  The
+  // extensions have to be consistent though.
+  if ((isa<SExtInst>(V) && Extension != EK_ZeroExt) ||
+      (isa<ZExtInst>(V) && Extension != EK_SignExt)) {
+    Value *CastOp = cast<CastInst>(V)->getOperand(0);
+    unsigned OldWidth = Scale.getBitWidth();
+    unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits();
+    Scale.trunc(SmallWidth);
+    Offset.trunc(SmallWidth);
+    Extension = isa<SExtInst>(V) ? EK_SignExt : EK_ZeroExt;
+
+    Value *Result = GetLinearExpression(CastOp, Scale, Offset, Extension,
+                                        TD, Depth+1);
+    Scale.zext(OldWidth);
+    Offset.zext(OldWidth);
+    
+    return Result;
+  }
+  
+  Scale = 1;
+  Offset = 0;
+  return V;
+}
 
-    virtual void getArgumentAccesses(Function *F, CallSite CS,
-                                     std::vector<PointerAccessInfo> &Info) {
-      llvm_unreachable("This method may not be called on this function!");
+/// DecomposeGEPExpression - If V is a symbolic pointer expression, decompose it
+/// into a base pointer with a constant offset and a number of scaled symbolic
+/// offsets.
+///
+/// The scaled symbolic offsets (represented by pairs of a Value* and a scale in
+/// the VarIndices vector) are Value*'s that are known to be scaled by the
+/// specified amount, but which may have other unrepresented high bits. As such,
+/// the gep cannot necessarily be reconstructed from its decomposed form.
+///
+/// When TargetData is around, this function is capable of analyzing everything
+/// that Value::getUnderlyingObject() can look through.  When not, it just looks
+/// through pointer casts.
+///
+static const Value *
+DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
+                       SmallVectorImpl<VariableGEPIndex> &VarIndices,
+                       const TargetData *TD) {
+  // Limit recursion depth to limit compile time in crazy cases.
+  unsigned MaxLookup = 6;
+  
+  BaseOffs = 0;
+  do {
+    // See if this is a bitcast or GEP.
+    const Operator *Op = dyn_cast<Operator>(V);
+    if (Op == 0) {
+      // The only non-operator case we can handle are GlobalAliases.
+      if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
+        if (!GA->mayBeOverridden()) {
+          V = GA->getAliasee();
+          continue;
+        }
+      }
+      return V;
     }
-
-    virtual bool pointsToConstantMemory(const Value *P) { return false; }
-    virtual ModRefResult getModRefInfo(CallSite CS, Value *P, unsigned Size) {
-      return ModRef;
+    
+    if (Op->getOpcode() == Instruction::BitCast) {
+      V = Op->getOperand(0);
+      continue;
     }
-    virtual ModRefResult getModRefInfo(CallSite CS1, CallSite CS2) {
-      return ModRef;
+    
+    const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
+    if (GEPOp == 0)
+      return V;
+    
+    // Don't attempt to analyze GEPs over unsized objects.
+    if (!cast<PointerType>(GEPOp->getOperand(0)->getType())
+        ->getElementType()->isSized())
+      return V;
+    
+    // If we are lacking TargetData information, we can't compute the offets of
+    // elements computed by GEPs.  However, we can handle bitcast equivalent
+    // GEPs.
+    if (TD == 0) {
+      if (!GEPOp->hasAllZeroIndices())
+        return V;
+      V = GEPOp->getOperand(0);
+      continue;
     }
-
-    virtual void deleteValue(Value *V) {}
-    virtual void copyValue(Value *From, Value *To) {}
     
-    /// getAdjustedAnalysisPointer - This method is used when a pass implements
-    /// an analysis interface through multiple inheritance.  If needed, it should
-    /// override this to adjust the this pointer as needed for the specified pass
-    /// info.
-    virtual void *getAdjustedAnalysisPointer(const PassInfo *PI) {
-      if (PI->isPassID(&AliasAnalysis::ID))
-        return (AliasAnalysis*)this;
-      return this;
+    // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
+    gep_type_iterator GTI = gep_type_begin(GEPOp);
+    for (User::const_op_iterator I = GEPOp->op_begin()+1,
+         E = GEPOp->op_end(); I != E; ++I) {
+      Value *Index = *I;
+      // Compute the (potentially symbolic) offset in bytes for this index.
+      if (const StructType *STy = dyn_cast<StructType>(*GTI++)) {
+        // For a struct, add the member offset.
+        unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
+        if (FieldNo == 0) continue;
+        
+        BaseOffs += TD->getStructLayout(STy)->getElementOffset(FieldNo);
+        continue;
+      }
+      
+      // For an array/pointer, add the element offset, explicitly scaled.
+      if (ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
+        if (CIdx->isZero()) continue;
+        BaseOffs += TD->getTypeAllocSize(*GTI)*CIdx->getSExtValue();
+        continue;
+      }
+      
+      uint64_t Scale = TD->getTypeAllocSize(*GTI);
+      ExtensionKind Extension = EK_NotExtended;
+      
+      // If the integer type is smaller than the pointer size, it is implicitly
+      // sign extended to pointer size.
+      unsigned Width = cast<IntegerType>(Index->getType())->getBitWidth();
+      if (TD->getPointerSizeInBits() > Width)
+        Extension = EK_SignExt;
+      
+      // Use GetLinearExpression to decompose the index into a C1*V+C2 form.
+      APInt IndexScale(Width, 0), IndexOffset(Width, 0);
+      Index = GetLinearExpression(Index, IndexScale, IndexOffset, Extension,
+                                  *TD, 0);
+      
+      // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
+      // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
+      BaseOffs += IndexOffset.getSExtValue()*Scale;
+      Scale *= IndexScale.getSExtValue();
+      
+      
+      // If we already had an occurrance of this index variable, merge this
+      // scale into it.  For example, we want to handle:
+      //   A[x][x] -> x*16 + x*4 -> x*20
+      // This also ensures that 'x' only appears in the index list once.
+      for (unsigned i = 0, e = VarIndices.size(); i != e; ++i) {
+        if (VarIndices[i].V == Index &&
+            VarIndices[i].Extension == Extension) {
+          Scale += VarIndices[i].Scale;
+          VarIndices.erase(VarIndices.begin()+i);
+          break;
+        }
+      }
+      
+      // Make sure that we have a scale that makes sense for this target's
+      // pointer size.
+      if (unsigned ShiftBits = 64-TD->getPointerSizeInBits()) {
+        Scale <<= ShiftBits;
+        Scale = (int64_t)Scale >> ShiftBits;
+      }
+      
+      if (Scale) {
+        VariableGEPIndex Entry = {Index, Extension, Scale};
+        VarIndices.push_back(Entry);
+      }
     }
-  };
-}  // End of anonymous namespace
+    
+    // Analyze the base pointer next.
+    V = GEPOp->getOperand(0);
+  } while (--MaxLookup);
+  
+  // If the chain of expressions is too deep, just return early.
+  return V;
+}
 
-// Register this pass...
-char NoAA::ID = 0;
-INITIALIZE_AG_PASS(NoAA, AliasAnalysis, "no-aa",
-                   "No Alias Analysis (always returns 'may' alias)",
-                   true, true, false);
+/// GetIndexDifference - Dest and Src are the variable indices from two
+/// decomposed GetElementPtr instructions GEP1 and GEP2 which have common base
+/// pointers.  Subtract the GEP2 indices from GEP1 to find the symbolic
+/// difference between the two pointers. 
+static void GetIndexDifference(SmallVectorImpl<VariableGEPIndex> &Dest,
+                               const SmallVectorImpl<VariableGEPIndex> &Src) {
+  if (Src.empty()) return;
 
-ImmutablePass *llvm::createNoAAPass() { return new NoAA(); }
+  for (unsigned i = 0, e = Src.size(); i != e; ++i) {
+    const Value *V = Src[i].V;
+    ExtensionKind Extension = Src[i].Extension;
+    int64_t Scale = Src[i].Scale;
+    
+    // Find V in Dest.  This is N^2, but pointer indices almost never have more
+    // than a few variable indexes.
+    for (unsigned j = 0, e = Dest.size(); j != e; ++j) {
+      if (Dest[j].V != V || Dest[j].Extension != Extension) continue;
+      
+      // If we found it, subtract off Scale V's from the entry in Dest.  If it
+      // goes to zero, remove the entry.
+      if (Dest[j].Scale != Scale)
+        Dest[j].Scale -= Scale;
+      else
+        Dest.erase(Dest.begin()+j);
+      Scale = 0;
+      break;
+    }
+    
+    // If we didn't consume this entry, add it to the end of the Dest list.
+    if (Scale) {
+      VariableGEPIndex Entry = { V, Extension, -Scale };
+      Dest.push_back(Entry);
+    }
+  }
+}
 
 //===----------------------------------------------------------------------===//
 // BasicAliasAnalysis Pass
@@ -213,36 +419,59 @@
 #endif
 
 namespace {
-  /// BasicAliasAnalysis - This is the default alias analysis implementation.
-  /// Because it doesn't chain to a previous alias analysis (like -no-aa), it
-  /// derives from the NoAA class.
-  struct BasicAliasAnalysis : public NoAA {
+  /// BasicAliasAnalysis - This is the primary alias analysis implementation.
+  struct BasicAliasAnalysis : public ImmutablePass, public AliasAnalysis {
     static char ID; // Class identification, replacement for typeinfo
-    BasicAliasAnalysis() : NoAA(&ID) {}
+    BasicAliasAnalysis() : ImmutablePass(ID) {
+      initializeBasicAliasAnalysisPass(*PassRegistry::getPassRegistry());
+    }
 
-    AliasResult alias(const Value *V1, unsigned V1Size,
-                      const Value *V2, unsigned V2Size) {
+    virtual void initializePass() {
+      InitializeAliasAnalysis(this);
+    }
+
+    virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+      AU.addRequired<AliasAnalysis>();
+    }
+
+    virtual AliasResult alias(const Location &LocA,
+                              const Location &LocB) {
       assert(Visited.empty() && "Visited must be cleared after use!");
-      assert(notDifferentParent(V1, V2) &&
+      assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
              "BasicAliasAnalysis doesn't support interprocedural queries.");
-      AliasResult Alias = aliasCheck(V1, V1Size, V2, V2Size);
+      AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.TBAATag,
+                                     LocB.Ptr, LocB.Size, LocB.TBAATag);
       Visited.clear();
       return Alias;
     }
 
-    ModRefResult getModRefInfo(CallSite CS, Value *P, unsigned Size);
-    ModRefResult getModRefInfo(CallSite CS1, CallSite CS2);
+    virtual ModRefResult getModRefInfo(ImmutableCallSite CS,
+                                       const Location &Loc);
+
+    virtual ModRefResult getModRefInfo(ImmutableCallSite CS1,
+                                       ImmutableCallSite CS2) {
+      // The AliasAnalysis base class has some smarts, lets use them.
+      return AliasAnalysis::getModRefInfo(CS1, CS2);
+    }
 
     /// pointsToConstantMemory - Chase pointers until we find a (constant
     /// global) or not.
-    bool pointsToConstantMemory(const Value *P);
+    virtual bool pointsToConstantMemory(const Location &Loc);
+
+    /// getModRefBehavior - Return the behavior when calling the given
+    /// call site.
+    virtual ModRefBehavior getModRefBehavior(ImmutableCallSite CS);
+
+    /// getModRefBehavior - Return the behavior when calling the given function.
+    /// For use when the call site is not known.
+    virtual ModRefBehavior getModRefBehavior(const Function *F);
 
     /// getAdjustedAnalysisPointer - This method is used when a pass implements
-    /// an analysis interface through multiple inheritance.  If needed, it should
-    /// override this to adjust the this pointer as needed for the specified pass
-    /// info.
-    virtual void *getAdjustedAnalysisPointer(const PassInfo *PI) {
-      if (PI->isPassID(&AliasAnalysis::ID))
+    /// an analysis interface through multiple inheritance.  If needed, it
+    /// should override this to adjust the this pointer as needed for the
+    /// specified pass info.
+    virtual void *getAdjustedAnalysisPointer(const void *ID) {
+      if (ID == &AliasAnalysis::ID)
         return (AliasAnalysis*)this;
       return this;
     }
@@ -253,29 +482,36 @@
 
     // aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP
     // instruction against another.
-    AliasResult aliasGEP(const GEPOperator *V1, unsigned V1Size,
-                         const Value *V2, unsigned V2Size,
+    AliasResult aliasGEP(const GEPOperator *V1, uint64_t V1Size,
+                         const Value *V2, uint64_t V2Size,
+                         const MDNode *V2TBAAInfo,
                          const Value *UnderlyingV1, const Value *UnderlyingV2);
 
     // aliasPHI - Provide a bunch of ad-hoc rules to disambiguate a PHI
     // instruction against another.
-    AliasResult aliasPHI(const PHINode *PN, unsigned PNSize,
-                         const Value *V2, unsigned V2Size);
+    AliasResult aliasPHI(const PHINode *PN, uint64_t PNSize,
+                         const MDNode *PNTBAAInfo,
+                         const Value *V2, uint64_t V2Size,
+                         const MDNode *V2TBAAInfo);
 
     /// aliasSelect - Disambiguate a Select instruction against another value.
-    AliasResult aliasSelect(const SelectInst *SI, unsigned SISize,
-                            const Value *V2, unsigned V2Size);
-
-    AliasResult aliasCheck(const Value *V1, unsigned V1Size,
-                           const Value *V2, unsigned V2Size);
+    AliasResult aliasSelect(const SelectInst *SI, uint64_t SISize,
+                            const MDNode *SITBAAInfo,
+                            const Value *V2, uint64_t V2Size,
+                            const MDNode *V2TBAAInfo);
+
+    AliasResult aliasCheck(const Value *V1, uint64_t V1Size,
+                           const MDNode *V1TBAATag,
+                           const Value *V2, uint64_t V2Size,
+                           const MDNode *V2TBAATag);
   };
 }  // End of anonymous namespace
 
 // Register this pass...
 char BasicAliasAnalysis::ID = 0;
 INITIALIZE_AG_PASS(BasicAliasAnalysis, AliasAnalysis, "basicaa",
-                   "Basic Alias Analysis (default AA impl)",
-                   false, true, true);
+                   "Basic Alias Analysis (stateless AA impl)",
+                   false, true, false)
 
 ImmutablePass *llvm::createBasicAliasAnalysisPass() {
   return new BasicAliasAnalysis();
@@ -284,35 +520,69 @@
 
 /// pointsToConstantMemory - Chase pointers until we find a (constant
 /// global) or not.
-bool BasicAliasAnalysis::pointsToConstantMemory(const Value *P) {
+bool BasicAliasAnalysis::pointsToConstantMemory(const Location &Loc) {
   if (const GlobalVariable *GV = 
-        dyn_cast<GlobalVariable>(P->getUnderlyingObject()))
+        dyn_cast<GlobalVariable>(Loc.Ptr->getUnderlyingObject()))
     // Note: this doesn't require GV to be "ODR" because it isn't legal for a
     // global to be marked constant in some modules and non-constant in others.
     // GV may even be a declaration, not a definition.
     return GV->isConstant();
-  return false;
+
+  return AliasAnalysis::pointsToConstantMemory(Loc);
+}
+
+/// getModRefBehavior - Return the behavior when calling the given call site.
+AliasAnalysis::ModRefBehavior
+BasicAliasAnalysis::getModRefBehavior(ImmutableCallSite CS) {
+  if (CS.doesNotAccessMemory())
+    // Can't do better than this.
+    return DoesNotAccessMemory;
+
+  ModRefBehavior Min = UnknownModRefBehavior;
+
+  // If the callsite knows it only reads memory, don't return worse
+  // than that.
+  if (CS.onlyReadsMemory())
+    Min = OnlyReadsMemory;
+
+  // The AliasAnalysis base class has some smarts, lets use them.
+  return std::min(AliasAnalysis::getModRefBehavior(CS), Min);
 }
 
+/// getModRefBehavior - Return the behavior when calling the given function.
+/// For use when the call site is not known.
+AliasAnalysis::ModRefBehavior
+BasicAliasAnalysis::getModRefBehavior(const Function *F) {
+  if (F->doesNotAccessMemory())
+    // Can't do better than this.
+    return DoesNotAccessMemory;
+  if (F->onlyReadsMemory())
+    return OnlyReadsMemory;
+  if (unsigned id = F->getIntrinsicID())
+    return getIntrinsicModRefBehavior(id);
+
+  return AliasAnalysis::getModRefBehavior(F);
+}
 
 /// getModRefInfo - Check to see if the specified callsite can clobber the
 /// specified memory object.  Since we only look at local properties of this
 /// function, we really can't say much about this query.  We do, however, use
 /// simple "address taken" analysis on local objects.
 AliasAnalysis::ModRefResult
-BasicAliasAnalysis::getModRefInfo(CallSite CS, Value *P, unsigned Size) {
-  assert(notDifferentParent(CS.getInstruction(), P) &&
+BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
+                                  const Location &Loc) {
+  assert(notDifferentParent(CS.getInstruction(), Loc.Ptr) &&
          "AliasAnalysis query involving multiple functions!");
 
-  const Value *Object = P->getUnderlyingObject();
+  const Value *Object = Loc.Ptr->getUnderlyingObject();
   
-  // If this is a tail call and P points to a stack location, we know that
+  // If this is a tail call and Loc.Ptr points to a stack location, we know that
   // the tail call cannot access or modify the local stack.
   // We cannot exclude byval arguments here; these belong to the caller of
   // the current function not to the current function, and a tail callee
   // may reference them.
   if (isa<AllocaInst>(Object))
-    if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction()))
+    if (const CallInst *CI = dyn_cast<CallInst>(CS.getInstruction()))
       if (CI->isTailCall())
         return NoModRef;
   
@@ -323,18 +593,18 @@
       isNonEscapingLocalObject(Object)) {
     bool PassedAsArg = false;
     unsigned ArgNo = 0;
-    for (CallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end();
+    for (ImmutableCallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end();
          CI != CE; ++CI, ++ArgNo) {
       // Only look at the no-capture pointer arguments.
       if (!(*CI)->getType()->isPointerTy() ||
           !CS.paramHasAttr(ArgNo+1, Attribute::NoCapture))
         continue;
       
-      // If  this is a no-capture pointer argument, see if we can tell that it
+      // If this is a no-capture pointer argument, see if we can tell that it
       // is impossible to alias the pointer we're checking.  If not, we have to
       // assume that the call could touch the pointer, even though it doesn't
       // escape.
-      if (!isNoAlias(cast<Value>(CI), ~0U, P, ~0U)) {
+      if (!isNoAlias(Location(cast<Value>(CI)), Loc)) {
         PassedAsArg = true;
         break;
       }
@@ -345,125 +615,80 @@
   }
 
   // Finally, handle specific knowledge of intrinsics.
-  IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction());
-  if (II == 0)
-    return AliasAnalysis::getModRefInfo(CS, P, Size);
-
-  switch (II->getIntrinsicID()) {
-  default: break;
-  case Intrinsic::memcpy:
-  case Intrinsic::memmove: {
-    unsigned Len = ~0U;
-    if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2)))
-      Len = LenCI->getZExtValue();
-    Value *Dest = II->getArgOperand(0);
-    Value *Src = II->getArgOperand(1);
-    if (isNoAlias(Dest, Len, P, Size)) {
-      if (isNoAlias(Src, Len, P, Size))
-        return NoModRef;
-      return Ref;
-    }
-    break;
-  }
-  case Intrinsic::memset:
-    // Since memset is 'accesses arguments' only, the AliasAnalysis base class
-    // will handle it for the variable length case.
-    if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2))) {
-      unsigned Len = LenCI->getZExtValue();
+  const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction());
+  if (II != 0)
+    switch (II->getIntrinsicID()) {
+    default: break;
+    case Intrinsic::memcpy:
+    case Intrinsic::memmove: {
+      uint64_t Len = UnknownSize;
+      if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2)))
+        Len = LenCI->getZExtValue();
       Value *Dest = II->getArgOperand(0);
-      if (isNoAlias(Dest, Len, P, Size))
+      Value *Src = II->getArgOperand(1);
+      if (isNoAlias(Location(Dest, Len), Loc)) {
+        if (isNoAlias(Location(Src, Len), Loc))
+          return NoModRef;
+        return Ref;
+      }
+      break;
+    }
+    case Intrinsic::memset:
+      // Since memset is 'accesses arguments' only, the AliasAnalysis base class
+      // will handle it for the variable length case.
+      if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2))) {
+        uint64_t Len = LenCI->getZExtValue();
+        Value *Dest = II->getArgOperand(0);
+        if (isNoAlias(Location(Dest, Len), Loc))
+          return NoModRef;
+      }
+      break;
+    case Intrinsic::atomic_cmp_swap:
+    case Intrinsic::atomic_swap:
+    case Intrinsic::atomic_load_add:
+    case Intrinsic::atomic_load_sub:
+    case Intrinsic::atomic_load_and:
+    case Intrinsic::atomic_load_nand:
+    case Intrinsic::atomic_load_or:
+    case Intrinsic::atomic_load_xor:
+    case Intrinsic::atomic_load_max:
+    case Intrinsic::atomic_load_min:
+    case Intrinsic::atomic_load_umax:
+    case Intrinsic::atomic_load_umin:
+      if (TD) {
+        Value *Op1 = II->getArgOperand(0);
+        uint64_t Op1Size = TD->getTypeStoreSize(Op1->getType());
+        MDNode *Tag = II->getMetadata(LLVMContext::MD_tbaa);
+        if (isNoAlias(Location(Op1, Op1Size, Tag), Loc))
+          return NoModRef;
+      }
+      break;
+    case Intrinsic::lifetime_start:
+    case Intrinsic::lifetime_end:
+    case Intrinsic::invariant_start: {
+      uint64_t PtrSize =
+        cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
+      if (isNoAlias(Location(II->getArgOperand(1),
+                             PtrSize,
+                             II->getMetadata(LLVMContext::MD_tbaa)),
+                    Loc))
         return NoModRef;
+      break;
     }
-    break;
-  case Intrinsic::atomic_cmp_swap:
-  case Intrinsic::atomic_swap:
-  case Intrinsic::atomic_load_add:
-  case Intrinsic::atomic_load_sub:
-  case Intrinsic::atomic_load_and:
-  case Intrinsic::atomic_load_nand:
-  case Intrinsic::atomic_load_or:
-  case Intrinsic::atomic_load_xor:
-  case Intrinsic::atomic_load_max:
-  case Intrinsic::atomic_load_min:
-  case Intrinsic::atomic_load_umax:
-  case Intrinsic::atomic_load_umin:
-    if (TD) {
-      Value *Op1 = II->getArgOperand(0);
-      unsigned Op1Size = TD->getTypeStoreSize(Op1->getType());
-      if (isNoAlias(Op1, Op1Size, P, Size))
+    case Intrinsic::invariant_end: {
+      uint64_t PtrSize =
+        cast<ConstantInt>(II->getArgOperand(1))->getZExtValue();
+      if (isNoAlias(Location(II->getArgOperand(2),
+                             PtrSize,
+                             II->getMetadata(LLVMContext::MD_tbaa)),
+                    Loc))
         return NoModRef;
+      break;
+    }
     }
-    break;
-  case Intrinsic::lifetime_start:
-  case Intrinsic::lifetime_end:
-  case Intrinsic::invariant_start: {
-    unsigned PtrSize = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
-    if (isNoAlias(II->getArgOperand(1), PtrSize, P, Size))
-      return NoModRef;
-    break;
-  }
-  case Intrinsic::invariant_end: {
-    unsigned PtrSize = cast<ConstantInt>(II->getArgOperand(1))->getZExtValue();
-    if (isNoAlias(II->getArgOperand(2), PtrSize, P, Size))
-      return NoModRef;
-    break;
-  }
-  }
 
   // The AliasAnalysis base class has some smarts, lets use them.
-  return AliasAnalysis::getModRefInfo(CS, P, Size);
-}
-
-
-AliasAnalysis::ModRefResult 
-BasicAliasAnalysis::getModRefInfo(CallSite CS1, CallSite CS2) {
-  // If CS1 or CS2 are readnone, they don't interact.
-  ModRefBehavior CS1B = AliasAnalysis::getModRefBehavior(CS1);
-  if (CS1B == DoesNotAccessMemory) return NoModRef;
-  
-  ModRefBehavior CS2B = AliasAnalysis::getModRefBehavior(CS2);
-  if (CS2B == DoesNotAccessMemory) return NoModRef;
-  
-  // If they both only read from memory, just return ref.
-  if (CS1B == OnlyReadsMemory && CS2B == OnlyReadsMemory)
-    return Ref;
-  
-  // Otherwise, fall back to NoAA (mod+ref).
-  return NoAA::getModRefInfo(CS1, CS2);
-}
-
-/// GetIndiceDifference - Dest and Src are the variable indices from two
-/// decomposed GetElementPtr instructions GEP1 and GEP2 which have common base
-/// pointers.  Subtract the GEP2 indices from GEP1 to find the symbolic
-/// difference between the two pointers. 
-static void GetIndiceDifference(
-                      SmallVectorImpl<std::pair<const Value*, int64_t> > &Dest,
-                const SmallVectorImpl<std::pair<const Value*, int64_t> > &Src) {
-  if (Src.empty()) return;
-
-  for (unsigned i = 0, e = Src.size(); i != e; ++i) {
-    const Value *V = Src[i].first;
-    int64_t Scale = Src[i].second;
-    
-    // Find V in Dest.  This is N^2, but pointer indices almost never have more
-    // than a few variable indexes.
-    for (unsigned j = 0, e = Dest.size(); j != e; ++j) {
-      if (Dest[j].first != V) continue;
-      
-      // If we found it, subtract off Scale V's from the entry in Dest.  If it
-      // goes to zero, remove the entry.
-      if (Dest[j].second != Scale)
-        Dest[j].second -= Scale;
-      else
-        Dest.erase(Dest.begin()+j);
-      Scale = 0;
-      break;
-    }
-    
-    // If we didn't consume this entry, add it to the end of the Dest list.
-    if (Scale)
-      Dest.push_back(std::make_pair(V, -Scale));
-  }
+  return AliasAnalysis::getModRefInfo(CS, Loc);
 }
 
 /// aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP instruction
@@ -472,8 +697,9 @@
 /// UnderlyingV2 is the same for V2.
 ///
 AliasAnalysis::AliasResult
-BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, unsigned V1Size,
-                             const Value *V2, unsigned V2Size,
+BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
+                             const Value *V2, uint64_t V2Size,
+                             const MDNode *V2TBAAInfo,
                              const Value *UnderlyingV1,
                              const Value *UnderlyingV2) {
   // If this GEP has been visited before, we're on a use-def cycle.
@@ -484,13 +710,14 @@
     return MayAlias;
 
   int64_t GEP1BaseOffset;
-  SmallVector<std::pair<const Value*, int64_t>, 4> GEP1VariableIndices;
+  SmallVector<VariableGEPIndex, 4> GEP1VariableIndices;
 
   // If we have two gep instructions with must-alias'ing base pointers, figure
   // out if the indexes to the GEP tell us anything about the derived pointer.
   if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) {
     // Do the base pointers alias?
-    AliasResult BaseAlias = aliasCheck(UnderlyingV1, ~0U, UnderlyingV2, ~0U);
+    AliasResult BaseAlias = aliasCheck(UnderlyingV1, UnknownSize, 0,
+                                       UnderlyingV2, UnknownSize, 0);
     
     // If we get a No or May, then return it immediately, no amount of analysis
     // will improve this situation.
@@ -503,7 +730,7 @@
       DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD);
     
     int64_t GEP2BaseOffset;
-    SmallVector<std::pair<const Value*, int64_t>, 4> GEP2VariableIndices;
+    SmallVector<VariableGEPIndex, 4> GEP2VariableIndices;
     const Value *GEP2BasePtr =
       DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, TD);
     
@@ -519,7 +746,7 @@
     // Subtract the GEP2 pointer from the GEP1 pointer to find out their
     // symbolic difference.
     GEP1BaseOffset -= GEP2BaseOffset;
-    GetIndiceDifference(GEP1VariableIndices, GEP2VariableIndices);
+    GetIndexDifference(GEP1VariableIndices, GEP2VariableIndices);
     
   } else {
     // Check to see if these two pointers are related by the getelementptr
@@ -527,10 +754,11 @@
     // pointer, we know they cannot alias.
 
     // If both accesses are unknown size, we can't do anything useful here.
-    if (V1Size == ~0U && V2Size == ~0U)
+    if (V1Size == UnknownSize && V2Size == UnknownSize)
       return MayAlias;
 
-    AliasResult R = aliasCheck(UnderlyingV1, ~0U, V2, V2Size);
+    AliasResult R = aliasCheck(UnderlyingV1, UnknownSize, 0,
+                               V2, V2Size, V2TBAAInfo);
     if (R != MustAlias)
       // If V2 may alias GEP base pointer, conservatively returns MayAlias.
       // If V2 is known not to alias GEP base pointer, then the two values
@@ -574,14 +802,16 @@
   // provides an offset of 4 bytes (assuming a <= 4 byte access).
   for (unsigned i = 0, e = GEP1VariableIndices.size();
        i != e && GEP1BaseOffset;++i)
-    if (int64_t RemovedOffset = GEP1BaseOffset/GEP1VariableIndices[i].second)
-      GEP1BaseOffset -= RemovedOffset*GEP1VariableIndices[i].second;
+    if (int64_t RemovedOffset = GEP1BaseOffset/GEP1VariableIndices[i].Scale)
+      GEP1BaseOffset -= RemovedOffset*GEP1VariableIndices[i].Scale;
   
   // If our known offset is bigger than the access size, we know we don't have
   // an alias.
   if (GEP1BaseOffset) {
-    if (GEP1BaseOffset >= (int64_t)V2Size ||
-        GEP1BaseOffset <= -(int64_t)V1Size)
+    if (GEP1BaseOffset >= 0 ?
+        (V2Size != UnknownSize && (uint64_t)GEP1BaseOffset >= V2Size) :
+        (V1Size != UnknownSize && -(uint64_t)GEP1BaseOffset >= V1Size &&
+         GEP1BaseOffset != INT64_MIN))
       return NoAlias;
   }
   
@@ -591,8 +821,10 @@
 /// aliasSelect - Provide a bunch of ad-hoc rules to disambiguate a Select
 /// instruction against another.
 AliasAnalysis::AliasResult
-BasicAliasAnalysis::aliasSelect(const SelectInst *SI, unsigned SISize,
-                                const Value *V2, unsigned V2Size) {
+BasicAliasAnalysis::aliasSelect(const SelectInst *SI, uint64_t SISize,
+                                const MDNode *SITBAAInfo,
+                                const Value *V2, uint64_t V2Size,
+                                const MDNode *V2TBAAInfo) {
   // If this select has been visited before, we're on a use-def cycle.
   // Such cycles are only valid when PHI nodes are involved or in unreachable
   // code. The visitPHI function catches cycles containing PHIs, but there
@@ -605,13 +837,13 @@
   if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
     if (SI->getCondition() == SI2->getCondition()) {
       AliasResult Alias =
-        aliasCheck(SI->getTrueValue(), SISize,
-                   SI2->getTrueValue(), V2Size);
+        aliasCheck(SI->getTrueValue(), SISize, SITBAAInfo,
+                   SI2->getTrueValue(), V2Size, V2TBAAInfo);
       if (Alias == MayAlias)
         return MayAlias;
       AliasResult ThisAlias =
-        aliasCheck(SI->getFalseValue(), SISize,
-                   SI2->getFalseValue(), V2Size);
+        aliasCheck(SI->getFalseValue(), SISize, SITBAAInfo,
+                   SI2->getFalseValue(), V2Size, V2TBAAInfo);
       if (ThisAlias != Alias)
         return MayAlias;
       return Alias;
@@ -620,7 +852,7 @@
   // If both arms of the Select node NoAlias or MustAlias V2, then returns
   // NoAlias / MustAlias. Otherwise, returns MayAlias.
   AliasResult Alias =
-    aliasCheck(V2, V2Size, SI->getTrueValue(), SISize);
+    aliasCheck(V2, V2Size, V2TBAAInfo, SI->getTrueValue(), SISize, SITBAAInfo);
   if (Alias == MayAlias)
     return MayAlias;
 
@@ -630,7 +862,7 @@
   Visited.erase(V2);
 
   AliasResult ThisAlias =
-    aliasCheck(V2, V2Size, SI->getFalseValue(), SISize);
+    aliasCheck(V2, V2Size, V2TBAAInfo, SI->getFalseValue(), SISize, SITBAAInfo);
   if (ThisAlias != Alias)
     return MayAlias;
   return Alias;
@@ -639,8 +871,10 @@
 // aliasPHI - Provide a bunch of ad-hoc rules to disambiguate a PHI instruction
 // against another.
 AliasAnalysis::AliasResult
-BasicAliasAnalysis::aliasPHI(const PHINode *PN, unsigned PNSize,
-                             const Value *V2, unsigned V2Size) {
+BasicAliasAnalysis::aliasPHI(const PHINode *PN, uint64_t PNSize,
+                             const MDNode *PNTBAAInfo,
+                             const Value *V2, uint64_t V2Size,
+                             const MDNode *V2TBAAInfo) {
   // The PHI node has already been visited, avoid recursion any further.
   if (!Visited.insert(PN))
     return MayAlias;
@@ -651,16 +885,16 @@
   if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
     if (PN2->getParent() == PN->getParent()) {
       AliasResult Alias =
-        aliasCheck(PN->getIncomingValue(0), PNSize,
+        aliasCheck(PN->getIncomingValue(0), PNSize, PNTBAAInfo,
                    PN2->getIncomingValueForBlock(PN->getIncomingBlock(0)),
-                   V2Size);
+                   V2Size, V2TBAAInfo);
       if (Alias == MayAlias)
         return MayAlias;
       for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) {
         AliasResult ThisAlias =
-          aliasCheck(PN->getIncomingValue(i), PNSize,
+          aliasCheck(PN->getIncomingValue(i), PNSize, PNTBAAInfo,
                      PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)),
-                     V2Size);
+                     V2Size, V2TBAAInfo);
         if (ThisAlias != Alias)
           return MayAlias;
       }
@@ -681,7 +915,8 @@
       V1Srcs.push_back(PV1);
   }
 
-  AliasResult Alias = aliasCheck(V2, V2Size, V1Srcs[0], PNSize);
+  AliasResult Alias = aliasCheck(V2, V2Size, V2TBAAInfo,
+                                 V1Srcs[0], PNSize, PNTBAAInfo);
   // Early exit if the check of the first PHI source against V2 is MayAlias.
   // Other results are not possible.
   if (Alias == MayAlias)
@@ -697,7 +932,8 @@
     // don't need to assume that V2 is being visited recursively.
     Visited.erase(V2);
 
-    AliasResult ThisAlias = aliasCheck(V2, V2Size, V, PNSize);
+    AliasResult ThisAlias = aliasCheck(V2, V2Size, V2TBAAInfo,
+                                       V, PNSize, PNTBAAInfo);
     if (ThisAlias != Alias || ThisAlias == MayAlias)
       return MayAlias;
   }
@@ -709,8 +945,10 @@
 // such as array references.
 //
 AliasAnalysis::AliasResult
-BasicAliasAnalysis::aliasCheck(const Value *V1, unsigned V1Size,
-                               const Value *V2, unsigned V2Size) {
+BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size,
+                               const MDNode *V1TBAAInfo,
+                               const Value *V2, uint64_t V2Size,
+                               const MDNode *V2TBAAInfo) {
   // If either of the memory references is empty, it doesn't matter what the
   // pointer values are.
   if (V1Size == 0 || V2Size == 0)
@@ -778,8 +1016,8 @@
   // If the size of one access is larger than the entire object on the other
   // side, then we know such behavior is undefined and can assume no alias.
   if (TD)
-    if ((V1Size != ~0U && isObjectSmallerThan(O2, V1Size, *TD)) ||
-        (V2Size != ~0U && isObjectSmallerThan(O1, V2Size, *TD)))
+    if ((V1Size != UnknownSize && isObjectSmallerThan(O2, V1Size, *TD)) ||
+        (V2Size != UnknownSize && isObjectSmallerThan(O1, V2Size, *TD)))
       return NoAlias;
   
   // FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the
@@ -789,25 +1027,31 @@
     std::swap(V1Size, V2Size);
     std::swap(O1, O2);
   }
-  if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1))
-    return aliasGEP(GV1, V1Size, V2, V2Size, O1, O2);
+  if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
+    AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, V2TBAAInfo, O1, O2);
+    if (Result != MayAlias) return Result;
+  }
 
   if (isa<PHINode>(V2) && !isa<PHINode>(V1)) {
     std::swap(V1, V2);
     std::swap(V1Size, V2Size);
   }
-  if (const PHINode *PN = dyn_cast<PHINode>(V1))
-    return aliasPHI(PN, V1Size, V2, V2Size);
+  if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
+    AliasResult Result = aliasPHI(PN, V1Size, V1TBAAInfo,
+                                  V2, V2Size, V2TBAAInfo);
+    if (Result != MayAlias) return Result;
+  }
 
   if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) {
     std::swap(V1, V2);
     std::swap(V1Size, V2Size);
   }
-  if (const SelectInst *S1 = dyn_cast<SelectInst>(V1))
-    return aliasSelect(S1, V1Size, V2, V2Size);
+  if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
+    AliasResult Result = aliasSelect(S1, V1Size, V1TBAAInfo,
+                                     V2, V2Size, V2TBAAInfo);
+    if (Result != MayAlias) return Result;
+  }
 
-  return MayAlias;
+  return AliasAnalysis::alias(Location(V1, V1Size, V1TBAAInfo),
+                              Location(V2, V2Size, V2TBAAInfo));
 }
-
-// Make sure that anything that uses AliasAnalysis pulls in this file.
-DEFINING_FILE_FOR(BasicAliasAnalysis)

Modified: llvm/branches/wendling/eh/lib/Analysis/CFGPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/CFGPrinter.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/CFGPrinter.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/CFGPrinter.cpp Tue Oct 26 19:48:03 2010
@@ -25,7 +25,9 @@
 namespace {
   struct CFGViewer : public FunctionPass {
     static char ID; // Pass identifcation, replacement for typeid
-    CFGViewer() : FunctionPass(&ID) {}
+    CFGViewer() : FunctionPass(ID) {
+      initializeCFGOnlyViewerPass(*PassRegistry::getPassRegistry());
+    }
 
     virtual bool runOnFunction(Function &F) {
       F.viewCFG();
@@ -41,12 +43,14 @@
 }
 
 char CFGViewer::ID = 0;
-INITIALIZE_PASS(CFGViewer, "view-cfg", "View CFG of function", false, true);
+INITIALIZE_PASS(CFGViewer, "view-cfg", "View CFG of function", false, true)
 
 namespace {
   struct CFGOnlyViewer : public FunctionPass {
     static char ID; // Pass identifcation, replacement for typeid
-    CFGOnlyViewer() : FunctionPass(&ID) {}
+    CFGOnlyViewer() : FunctionPass(ID) {
+      initializeCFGOnlyViewerPass(*PassRegistry::getPassRegistry());
+    }
 
     virtual bool runOnFunction(Function &F) {
       F.viewCFGOnly();
@@ -63,13 +67,14 @@
 
 char CFGOnlyViewer::ID = 0;
 INITIALIZE_PASS(CFGOnlyViewer, "view-cfg-only",
-                "View CFG of function (with no function bodies)", false, true);
+                "View CFG of function (with no function bodies)", false, true)
 
 namespace {
   struct CFGPrinter : public FunctionPass {
     static char ID; // Pass identification, replacement for typeid
-    CFGPrinter() : FunctionPass(&ID) {}
-    explicit CFGPrinter(void *pid) : FunctionPass(pid) {}
+    CFGPrinter() : FunctionPass(ID) {
+      initializeCFGPrinterPass(*PassRegistry::getPassRegistry());
+    }
 
     virtual bool runOnFunction(Function &F) {
       std::string Filename = "cfg." + F.getNameStr() + ".dot";
@@ -95,14 +100,16 @@
 }
 
 char CFGPrinter::ID = 0;
-static RegisterPass<CFGPrinter>
-P1("dot-cfg", "Print CFG of function to 'dot' file", false, true);
+INITIALIZE_PASS(CFGPrinter, "dot-cfg", "Print CFG of function to 'dot' file", 
+                false, true)
 
 namespace {
   struct CFGOnlyPrinter : public FunctionPass {
     static char ID; // Pass identification, replacement for typeid
-    CFGOnlyPrinter() : FunctionPass(&ID) {}
-    explicit CFGOnlyPrinter(void *pid) : FunctionPass(pid) {}
+    CFGOnlyPrinter() : FunctionPass(ID) {
+      initializeCFGOnlyPrinterPass(*PassRegistry::getPassRegistry());
+    }
+    
     virtual bool runOnFunction(Function &F) {
       std::string Filename = "cfg." + F.getNameStr() + ".dot";
       errs() << "Writing '" << Filename << "'...";
@@ -126,9 +133,9 @@
 }
 
 char CFGOnlyPrinter::ID = 0;
-static RegisterPass<CFGOnlyPrinter>
-P2("dot-cfg-only",
-   "Print CFG of function to 'dot' file (with no function bodies)", false, true);
+INITIALIZE_PASS(CFGOnlyPrinter, "dot-cfg-only",
+   "Print CFG of function to 'dot' file (with no function bodies)",
+   false, true)
 
 /// viewCFG - This function is meant for use from the debugger.  You can just
 /// say 'call F->viewCFG()' and a ghostview window should pop up from the

Modified: llvm/branches/wendling/eh/lib/Analysis/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/CMakeLists.txt?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/CMakeLists.txt (original)
+++ llvm/branches/wendling/eh/lib/Analysis/CMakeLists.txt Tue Oct 26 19:48:03 2010
@@ -27,11 +27,12 @@
   LoopDependenceAnalysis.cpp
   LoopInfo.cpp
   LoopPass.cpp
+  MemDepPrinter.cpp
   MemoryBuiltins.cpp
   MemoryDependenceAnalysis.cpp
   ModuleDebugInfoPrinter.cpp
+  NoAliasAnalysis.cpp
   PHITransAddr.cpp
-  PointerTracking.cpp
   PostDominators.cpp
   ProfileEstimatorPass.cpp
   ProfileInfo.cpp
@@ -39,6 +40,7 @@
   ProfileInfoLoaderPass.cpp
   ProfileVerifierPass.cpp
   RegionInfo.cpp
+  RegionPass.cpp
   RegionPrinter.cpp
   ScalarEvolution.cpp
   ScalarEvolutionAliasAnalysis.cpp
@@ -46,7 +48,6 @@
   ScalarEvolutionNormalization.cpp
   SparsePropagation.cpp
   Trace.cpp
+  TypeBasedAliasAnalysis.cpp
   ValueTracking.cpp
   )
-
-target_link_libraries (LLVMAnalysis LLVMSupport)

Modified: llvm/branches/wendling/eh/lib/Analysis/ConstantFolding.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/ConstantFolding.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/ConstantFolding.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/ConstantFolding.cpp Tue Oct 26 19:48:03 2010
@@ -30,6 +30,7 @@
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/GetElementPtrTypeIterator.h"
 #include "llvm/Support/MathExtras.h"
+#include "llvm/System/FEnv.h"
 #include <cerrno>
 #include <cmath>
 using namespace llvm;
@@ -1000,6 +1001,7 @@
   case Intrinsic::usub_with_overflow:
   case Intrinsic::sadd_with_overflow:
   case Intrinsic::ssub_with_overflow:
+  case Intrinsic::smul_with_overflow:
   case Intrinsic::convert_from_fp16:
   case Intrinsic::convert_to_fp16:
     return true;
@@ -1039,10 +1041,10 @@
 
 static Constant *ConstantFoldFP(double (*NativeFP)(double), double V, 
                                 const Type *Ty) {
-  errno = 0;
+  sys::llvm_fenv_clearexcept();
   V = NativeFP(V);
-  if (errno != 0) {
-    errno = 0;
+  if (sys::llvm_fenv_testexcept()) {
+    sys::llvm_fenv_clearexcept();
     return 0;
   }
   
@@ -1056,10 +1058,10 @@
 
 static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
                                       double V, double W, const Type *Ty) {
-  errno = 0;
+  sys::llvm_fenv_clearexcept();
   V = NativeFP(V, W);
-  if (errno != 0) {
-    errno = 0;
+  if (sys::llvm_fenv_testexcept()) {
+    sys::llvm_fenv_clearexcept();
     return 0;
   }
   
@@ -1093,6 +1095,13 @@
 
       if (!Ty->isFloatTy() && !Ty->isDoubleTy())
         return 0;
+
+      /// We only fold functions with finite arguments. Folding NaN and inf is
+      /// likely to be aborted with an exception anyway, and some host libms
+      /// have known errors raising exceptions.
+      if (Op->getValueAPF().isNaN() || Op->getValueAPF().isInfinity())
+        return 0;
+
       /// Currently APFloat versions of these functions do not exist, so we use
       /// the host native double versions.  Float versions are not called
       /// directly but for all these it is true (float)(f((double)arg)) ==
@@ -1240,42 +1249,37 @@
       if (ConstantInt *Op2 = dyn_cast<ConstantInt>(Operands[1])) {
         switch (F->getIntrinsicID()) {
         default: break;
-        case Intrinsic::uadd_with_overflow: {
-          Constant *Res = ConstantExpr::getAdd(Op1, Op2);           // result.
-          Constant *Ops[] = {
-            Res, ConstantExpr::getICmp(CmpInst::ICMP_ULT, Res, Op1) // overflow.
-          };
-          return ConstantStruct::get(F->getContext(), Ops, 2, false);
-        }
-        case Intrinsic::usub_with_overflow: {
-          Constant *Res = ConstantExpr::getSub(Op1, Op2);           // result.
+        case Intrinsic::sadd_with_overflow:
+        case Intrinsic::uadd_with_overflow:
+        case Intrinsic::ssub_with_overflow:
+        case Intrinsic::usub_with_overflow:
+        case Intrinsic::smul_with_overflow: {
+          APInt Res;
+          bool Overflow;
+          switch (F->getIntrinsicID()) {
+          default: assert(0 && "Invalid case");
+          case Intrinsic::sadd_with_overflow:
+            Res = Op1->getValue().sadd_ov(Op2->getValue(), Overflow);
+            break;
+          case Intrinsic::uadd_with_overflow:
+            Res = Op1->getValue().uadd_ov(Op2->getValue(), Overflow);
+            break;
+          case Intrinsic::ssub_with_overflow:
+            Res = Op1->getValue().ssub_ov(Op2->getValue(), Overflow);
+            break;
+          case Intrinsic::usub_with_overflow:
+            Res = Op1->getValue().usub_ov(Op2->getValue(), Overflow);
+            break;
+          case Intrinsic::smul_with_overflow:
+            Res = Op1->getValue().smul_ov(Op2->getValue(), Overflow);
+            break;
+          }
           Constant *Ops[] = {
-            Res, ConstantExpr::getICmp(CmpInst::ICMP_UGT, Res, Op1) // overflow.
+            ConstantInt::get(F->getContext(), Res),
+            ConstantInt::get(Type::getInt1Ty(F->getContext()), Overflow)
           };
           return ConstantStruct::get(F->getContext(), Ops, 2, false);
         }
-        case Intrinsic::sadd_with_overflow: {
-          Constant *Res = ConstantExpr::getAdd(Op1, Op2);           // result.
-          Constant *Overflow = ConstantExpr::getSelect(
-              ConstantExpr::getICmp(CmpInst::ICMP_SGT,
-                ConstantInt::get(Op1->getType(), 0), Op1),
-              ConstantExpr::getICmp(CmpInst::ICMP_SGT, Res, Op2), 
-              ConstantExpr::getICmp(CmpInst::ICMP_SLT, Res, Op2)); // overflow.
-
-          Constant *Ops[] = { Res, Overflow };
-          return ConstantStruct::get(F->getContext(), Ops, 2, false);
-        }
-        case Intrinsic::ssub_with_overflow: {
-          Constant *Res = ConstantExpr::getSub(Op1, Op2);           // result.
-          Constant *Overflow = ConstantExpr::getSelect(
-              ConstantExpr::getICmp(CmpInst::ICMP_SGT,
-                ConstantInt::get(Op2->getType(), 0), Op2),
-              ConstantExpr::getICmp(CmpInst::ICMP_SLT, Res, Op1), 
-              ConstantExpr::getICmp(CmpInst::ICMP_SGT, Res, Op1)); // overflow.
-
-          Constant *Ops[] = { Res, Overflow };
-          return ConstantStruct::get(F->getContext(), Ops, 2, false);
-        }
         }
       }
       

Modified: llvm/branches/wendling/eh/lib/Analysis/DbgInfoPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/DbgInfoPrinter.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/DbgInfoPrinter.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/DbgInfoPrinter.cpp Tue Oct 26 19:48:03 2010
@@ -40,7 +40,9 @@
     void printVariableDeclaration(const Value *V);
   public:
     static char ID; // Pass identification
-    PrintDbgInfo() : FunctionPass(&ID), Out(outs()) {}
+    PrintDbgInfo() : FunctionPass(ID), Out(errs()) {
+      initializePrintDbgInfoPass(*PassRegistry::getPassRegistry());
+    }
 
     virtual bool runOnFunction(Function &F);
     virtual void getAnalysisUsage(AnalysisUsage &AU) const {
@@ -48,10 +50,11 @@
     }
   };
   char PrintDbgInfo::ID = 0;
-  INITIALIZE_PASS(PrintDbgInfo, "print-dbginfo",
-                  "Print debug info in human readable form", false, false);
 }
 
+INITIALIZE_PASS(PrintDbgInfo, "print-dbginfo",
+                "Print debug info in human readable form", false, false)
+
 FunctionPass *llvm::createDbgInfoPrinterPass() { return new PrintDbgInfo(); }
 
 void PrintDbgInfo::printVariableDeclaration(const Value *V) {

Modified: llvm/branches/wendling/eh/lib/Analysis/DebugInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/DebugInfo.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/DebugInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/DebugInfo.cpp Tue Oct 26 19:48:03 2010
@@ -22,6 +22,7 @@
 #include "llvm/Analysis/ValueTracking.h"
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/STLExtras.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/Dwarf.h"
 #include "llvm/Support/raw_ostream.h"
@@ -32,6 +33,21 @@
 // DIDescriptor
 //===----------------------------------------------------------------------===//
 
+DIDescriptor::DIDescriptor(const DIFile F) : DbgNode(F.DbgNode) {
+}
+
+DIDescriptor::DIDescriptor(const DISubprogram F) : DbgNode(F.DbgNode) {
+}
+
+DIDescriptor::DIDescriptor(const DILexicalBlock F) : DbgNode(F.DbgNode) {
+}
+
+DIDescriptor::DIDescriptor(const DIVariable F) : DbgNode(F.DbgNode) {
+}
+
+DIDescriptor::DIDescriptor(const DIType F) : DbgNode(F.DbgNode) {
+}
+
 StringRef
 DIDescriptor::getStringField(unsigned Elt) const {
   if (DbgNode == 0)
@@ -74,6 +90,15 @@
   return 0;
 }
 
+Constant *DIDescriptor::getConstantField(unsigned Elt) const {
+  if (DbgNode == 0)
+    return 0;
+
+  if (Elt < DbgNode->getNumOperands())
+      return dyn_cast_or_null<Constant>(DbgNode->getOperand(Elt));
+  return 0;
+}
+
 Function *DIDescriptor::getFunctionField(unsigned Elt) const {
   if (DbgNode == 0)
     return 0;
@@ -84,7 +109,9 @@
 }
 
 unsigned DIVariable::getNumAddrElements() const {
-  return DbgNode->getNumOperands()-6;
+  if (getVersion() <= llvm::LLVMDebugVersion8)
+    return DbgNode->getNumOperands()-6;
+  return DbgNode->getNumOperands()-7;
 }
 
 
@@ -110,6 +137,7 @@
   case dwarf::DW_TAG_restrict_type:
   case dwarf::DW_TAG_member:
   case dwarf::DW_TAG_inheritance:
+  case dwarf::DW_TAG_friend:
     return true;
   default:
     // CompositeTypes are currently modelled as DerivedTypes.
@@ -162,7 +190,8 @@
 /// isGlobalVariable - Return true if the specified tag is legal for
 /// DIGlobalVariable.
 bool DIDescriptor::isGlobalVariable() const {
-  return DbgNode && getTag() == dwarf::DW_TAG_variable;
+  return DbgNode && (getTag() == dwarf::DW_TAG_variable ||
+                     getTag() == dwarf::DW_TAG_constant);
 }
 
 /// isGlobal - Return true if the specified tag is legal for DIGlobal.
@@ -170,6 +199,12 @@
   return isGlobalVariable();
 }
 
+/// isUnspecifiedParmeter - Return true if the specified tab is
+/// DW_TAG_unspecified_parameters.
+bool DIDescriptor::isUnspecifiedParameter() const {
+  return DbgNode && getTag() == dwarf::DW_TAG_unspecified_parameters;
+}
+
 /// isScope - Return true if the specified tag is one of the scope
 /// related tag.
 bool DIDescriptor::isScope() const {
@@ -235,7 +270,7 @@
 
 /// replaceAllUsesWith - Replace all uses of debug info referenced by
 /// this descriptor.
-void DIDerivedType::replaceAllUsesWith(DIDescriptor &D) {
+void DIType::replaceAllUsesWith(DIDescriptor &D) {
   if (!DbgNode)
     return;
 
@@ -249,6 +284,7 @@
     const MDNode *DN = D;
     const Value *V = cast_or_null<Value>(DN);
     Node->replaceAllUsesWith(const_cast<Value*>(V));
+    MDNode::deleteTemporary(Node);
   }
 }
 
@@ -276,6 +312,16 @@
   return true;
 }
 
+/// Verify - Verify that a basic type descriptor is well formed.
+bool DIBasicType::Verify() const {
+  return isBasicType();
+}
+
+/// Verify - Verify that a derived type descriptor is well formed.
+bool DIDerivedType::Verify() const {
+  return isDerivedType();
+}
+
 /// Verify - Verify that a composite type descriptor is well formed.
 bool DICompositeType::Verify() const {
   if (!DbgNode)
@@ -326,7 +372,7 @@
   if (!Ty.Verify())
     return false;
 
-  if (!getGlobal())
+  if (!getGlobal() && !getConstant())
     return false;
 
   return true;
@@ -663,15 +709,13 @@
 /// GetOrCreateArray - Create an descriptor for an array of descriptors.
 /// This implicitly uniques the arrays created.
 DIArray DIFactory::GetOrCreateArray(DIDescriptor *Tys, unsigned NumTys) {
-  SmallVector<Value*, 16> Elts;
-
-  if (NumTys == 0)
-    Elts.push_back(llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)));
-  else
-    for (unsigned i = 0; i != NumTys; ++i)
-      Elts.push_back(Tys[i]);
+  if (NumTys == 0) {
+    Value *Null = llvm::Constant::getNullValue(Type::getInt32Ty(VMContext));
+    return DIArray(MDNode::get(VMContext, &Null, 1));
+  }
 
-  return DIArray(MDNode::get(VMContext,Elts.data(), Elts.size()));
+  SmallVector<Value *, 16> Elts(Tys, Tys+NumTys);
+  return DIArray(MDNode::get(VMContext, Elts.data(), Elts.size()));
 }
 
 /// GetOrCreateSubrange - Create a descriptor for a value range.  This
@@ -686,7 +730,14 @@
   return DISubrange(MDNode::get(VMContext, &Elts[0], 3));
 }
 
-
+/// CreateUnspecifiedParameter - Create unspeicified type descriptor
+/// for the subroutine type.
+DIDescriptor DIFactory::CreateUnspecifiedParameter() {
+  Value *Elts[] = {
+    GetTagConstant(dwarf::DW_TAG_unspecified_parameters)
+  };
+  return DIDescriptor(MDNode::get(VMContext, &Elts[0], 1));
+}
 
 /// CreateCompileUnit - Create a new descriptor for the specified compile
 /// unit.  Note that this does not unique compile units within the module.
@@ -898,7 +949,26 @@
     ConstantInt::get(Type::getInt32Ty(VMContext), RuntimeLang),
     ContainingType
   };
-  return DICompositeType(MDNode::get(VMContext, &Elts[0], 13));
+
+  MDNode *Node = MDNode::get(VMContext, &Elts[0], 13);
+  // Create a named metadata so that we do not lose this enum info.
+  if (Tag == dwarf::DW_TAG_enumeration_type) {
+    NamedMDNode *NMD = M.getOrInsertNamedMetadata("llvm.dbg.enum");
+    NMD->addOperand(Node);
+  }
+  return DICompositeType(Node);
+}
+
+
+/// CreateTemporaryType - Create a temporary forward-declared type.
+DIType DIFactory::CreateTemporaryType() {
+  // Give the temporary MDNode a tag. It doesn't matter what tag we
+  // use here as long as DIType accepts it.
+  Value *Elts[] = {
+    GetTagConstant(DW_TAG_base_type)
+  };
+  MDNode *Node = MDNode::getTemporary(VMContext, Elts, array_lengthof(Elts));
+  return DIType(Node);
 }
 
 
@@ -914,8 +984,8 @@
                                                  unsigned Flags,
                                                  DIType DerivedFrom,
                                                  DIArray Elements,
-                                                 unsigned RuntimeLang) {
-
+                                                 unsigned RuntimeLang,
+                                                 MDNode *ContainingType) {
   Value *Elts[] = {
     GetTagConstant(Tag),
     Context,
@@ -928,9 +998,16 @@
     ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
     DerivedFrom,
     Elements,
-    ConstantInt::get(Type::getInt32Ty(VMContext), RuntimeLang)
+    ConstantInt::get(Type::getInt32Ty(VMContext), RuntimeLang),
+    ContainingType
   };
-  return DICompositeType(MDNode::get(VMContext, &Elts[0], 12));
+  MDNode *Node = MDNode::get(VMContext, &Elts[0], 13);
+  // Create a named metadata so that we do not lose this enum info.
+  if (Tag == dwarf::DW_TAG_enumeration_type) {
+    NamedMDNode *NMD = M.getOrInsertNamedMetadata("llvm.dbg.enum");
+    NMD->addOperand(Node);
+  }
+  return DICompositeType(Node);
 }
 
 
@@ -947,7 +1024,7 @@
                                          bool isDefinition,
                                          unsigned VK, unsigned VIndex,
                                          DIType ContainingType,
-                                         bool isArtificial,
+                                         unsigned Flags,
                                          bool isOptimized,
                                          Function *Fn) {
 
@@ -966,7 +1043,7 @@
     ConstantInt::get(Type::getInt32Ty(VMContext), (unsigned)VK),
     ConstantInt::get(Type::getInt32Ty(VMContext), VIndex),
     ContainingType,
-    ConstantInt::get(Type::getInt1Ty(VMContext), isArtificial),
+    ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
     ConstantInt::get(Type::getInt1Ty(VMContext), isOptimized),
     Fn
   };
@@ -1000,7 +1077,7 @@
     DeclNode->getOperand(11), // Virtuality
     DeclNode->getOperand(12), // VIndex
     DeclNode->getOperand(13), // Containting Type
-    DeclNode->getOperand(14), // isArtificial
+    DeclNode->getOperand(14), // Flags
     DeclNode->getOperand(15), // isOptimized
     SPDeclaration.getFunction()
   };
@@ -1045,13 +1122,46 @@
   return DIGlobalVariable(Node);
 }
 
+/// CreateGlobalVariable - Create a new descriptor for the specified constant.
+DIGlobalVariable
+DIFactory::CreateGlobalVariable(DIDescriptor Context, StringRef Name,
+                                StringRef DisplayName,
+                                StringRef LinkageName,
+                                DIFile F,
+                                unsigned LineNo, DIType Ty,bool isLocalToUnit,
+                                bool isDefinition, llvm::Constant *Val) {
+  Value *Elts[] = {
+    GetTagConstant(dwarf::DW_TAG_variable),
+    llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+    Context,
+    MDString::get(VMContext, Name),
+    MDString::get(VMContext, DisplayName),
+    MDString::get(VMContext, LinkageName),
+    F,
+    ConstantInt::get(Type::getInt32Ty(VMContext), LineNo),
+    Ty,
+    ConstantInt::get(Type::getInt1Ty(VMContext), isLocalToUnit),
+    ConstantInt::get(Type::getInt1Ty(VMContext), isDefinition),
+    Val
+  };
+
+  Value *const *Vs = &Elts[0];
+  MDNode *Node = MDNode::get(VMContext,Vs, 12);
+
+  // Create a named metadata so that we do not lose this mdnode.
+  NamedMDNode *NMD = M.getOrInsertNamedMetadata("llvm.dbg.gv");
+  NMD->addOperand(Node);
+
+  return DIGlobalVariable(Node);
+}
 
 /// CreateVariable - Create a new descriptor for the specified variable.
 DIVariable DIFactory::CreateVariable(unsigned Tag, DIDescriptor Context,
                                      StringRef Name,
                                      DIFile F,
                                      unsigned LineNo,
-                                     DIType Ty, bool AlwaysPreserve) {
+                                     DIType Ty, bool AlwaysPreserve,
+                                     unsigned Flags) {
   Value *Elts[] = {
     GetTagConstant(Tag),
     Context,
@@ -1059,8 +1169,9 @@
     F,
     ConstantInt::get(Type::getInt32Ty(VMContext), LineNo),
     Ty,
+    ConstantInt::get(Type::getInt32Ty(VMContext), Flags)
   };
-  MDNode *Node = MDNode::get(VMContext, &Elts[0], 6);
+  MDNode *Node = MDNode::get(VMContext, &Elts[0], 7);
   if (AlwaysPreserve) {
     // The optimizer may remove local variable. If there is an interest
     // to preserve variable info in such situation then stash it in a
@@ -1085,21 +1196,20 @@
 /// CreateComplexVariable - Create a new descriptor for the specified variable
 /// which has a complex address expression for its address.
 DIVariable DIFactory::CreateComplexVariable(unsigned Tag, DIDescriptor Context,
-                                            const std::string &Name,
-                                            DIFile F,
+                                            StringRef Name, DIFile F,
                                             unsigned LineNo,
-                                            DIType Ty,
-                                            SmallVector<Value *, 9> &addr) {
-  SmallVector<Value *, 9> Elts;
+                                            DIType Ty, Value *const *Addr,
+                                            unsigned NumAddr) {
+  SmallVector<Value *, 15> Elts;
   Elts.push_back(GetTagConstant(Tag));
   Elts.push_back(Context);
   Elts.push_back(MDString::get(VMContext, Name));
   Elts.push_back(F);
   Elts.push_back(ConstantInt::get(Type::getInt32Ty(VMContext), LineNo));
   Elts.push_back(Ty);
-  Elts.insert(Elts.end(), addr.begin(), addr.end());
+  Elts.append(Addr, Addr+NumAddr);
 
-  return DIVariable(MDNode::get(VMContext, &Elts[0], 6+addr.size()));
+  return DIVariable(MDNode::get(VMContext, Elts.data(), Elts.size()));
 }
 
 
@@ -1213,6 +1323,14 @@
   return CallInst::Create(ValueFn, Args, Args+3, "", InsertAtEnd);
 }
 
+// RecordType - Record DIType in a module such that it is not lost even if
+// it is not referenced through debug info anchors.
+void DIFactory::RecordType(DIType T) {
+  NamedMDNode *NMD = M.getOrInsertNamedMetadata("llvm.dbg.ty");
+  NMD->addOperand(T);
+}
+
+
 //===----------------------------------------------------------------------===//
 // DebugInfoFinder implementations.
 //===----------------------------------------------------------------------===//

Modified: llvm/branches/wendling/eh/lib/Analysis/DomPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/DomPrinter.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/DomPrinter.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/DomPrinter.cpp Tue Oct 26 19:48:03 2010
@@ -86,74 +86,90 @@
 struct DomViewer
   : public DOTGraphTraitsViewer<DominatorTree, false> {
   static char ID;
-  DomViewer() : DOTGraphTraitsViewer<DominatorTree, false>("dom", &ID){}
+  DomViewer() : DOTGraphTraitsViewer<DominatorTree, false>("dom", ID){
+    initializeDomViewerPass(*PassRegistry::getPassRegistry());
+  }
 };
 
 struct DomOnlyViewer
   : public DOTGraphTraitsViewer<DominatorTree, true> {
   static char ID;
-  DomOnlyViewer() : DOTGraphTraitsViewer<DominatorTree, true>("domonly", &ID){}
+  DomOnlyViewer() : DOTGraphTraitsViewer<DominatorTree, true>("domonly", ID){
+    initializeDomOnlyViewerPass(*PassRegistry::getPassRegistry());
+  }
 };
 
 struct PostDomViewer
   : public DOTGraphTraitsViewer<PostDominatorTree, false> {
   static char ID;
   PostDomViewer() :
-    DOTGraphTraitsViewer<PostDominatorTree, false>("postdom", &ID){}
+    DOTGraphTraitsViewer<PostDominatorTree, false>("postdom", ID){
+      initializePostDomViewerPass(*PassRegistry::getPassRegistry());
+    }
 };
 
 struct PostDomOnlyViewer
   : public DOTGraphTraitsViewer<PostDominatorTree, true> {
   static char ID;
   PostDomOnlyViewer() :
-    DOTGraphTraitsViewer<PostDominatorTree, true>("postdomonly", &ID){}
+    DOTGraphTraitsViewer<PostDominatorTree, true>("postdomonly", ID){
+      initializePostDomOnlyViewerPass(*PassRegistry::getPassRegistry());
+    }
 };
 } // end anonymous namespace
 
 char DomViewer::ID = 0;
 INITIALIZE_PASS(DomViewer, "view-dom",
-                "View dominance tree of function", false, false);
+                "View dominance tree of function", false, false)
 
 char DomOnlyViewer::ID = 0;
 INITIALIZE_PASS(DomOnlyViewer, "view-dom-only",
                 "View dominance tree of function (with no function bodies)",
-                false, false);
+                false, false)
 
 char PostDomViewer::ID = 0;
 INITIALIZE_PASS(PostDomViewer, "view-postdom",
-                "View postdominance tree of function", false, false);
+                "View postdominance tree of function", false, false)
 
 char PostDomOnlyViewer::ID = 0;
 INITIALIZE_PASS(PostDomOnlyViewer, "view-postdom-only",
                 "View postdominance tree of function "
                 "(with no function bodies)",
-                false, false);
+                false, false)
 
 namespace {
 struct DomPrinter
   : public DOTGraphTraitsPrinter<DominatorTree, false> {
   static char ID;
-  DomPrinter() : DOTGraphTraitsPrinter<DominatorTree, false>("dom", &ID) {}
+  DomPrinter() : DOTGraphTraitsPrinter<DominatorTree, false>("dom", ID) {
+    initializeDomPrinterPass(*PassRegistry::getPassRegistry());
+  }
 };
 
 struct DomOnlyPrinter
   : public DOTGraphTraitsPrinter<DominatorTree, true> {
   static char ID;
-  DomOnlyPrinter() : DOTGraphTraitsPrinter<DominatorTree, true>("domonly", &ID) {}
+  DomOnlyPrinter() : DOTGraphTraitsPrinter<DominatorTree, true>("domonly", ID) {
+    initializeDomOnlyPrinterPass(*PassRegistry::getPassRegistry());
+  }
 };
 
 struct PostDomPrinter
   : public DOTGraphTraitsPrinter<PostDominatorTree, false> {
   static char ID;
   PostDomPrinter() :
-    DOTGraphTraitsPrinter<PostDominatorTree, false>("postdom", &ID) {}
+    DOTGraphTraitsPrinter<PostDominatorTree, false>("postdom", ID) {
+      initializePostDomPrinterPass(*PassRegistry::getPassRegistry());
+    }
 };
 
 struct PostDomOnlyPrinter
   : public DOTGraphTraitsPrinter<PostDominatorTree, true> {
   static char ID;
   PostDomOnlyPrinter() :
-    DOTGraphTraitsPrinter<PostDominatorTree, true>("postdomonly", &ID) {}
+    DOTGraphTraitsPrinter<PostDominatorTree, true>("postdomonly", ID) {
+      initializePostDomOnlyPrinterPass(*PassRegistry::getPassRegistry());
+    }
 };
 } // end anonymous namespace
 
@@ -162,24 +178,24 @@
 char DomPrinter::ID = 0;
 INITIALIZE_PASS(DomPrinter, "dot-dom",
                 "Print dominance tree of function to 'dot' file",
-                false, false);
+                false, false)
 
 char DomOnlyPrinter::ID = 0;
 INITIALIZE_PASS(DomOnlyPrinter, "dot-dom-only",
                 "Print dominance tree of function to 'dot' file "
                 "(with no function bodies)",
-                false, false);
+                false, false)
 
 char PostDomPrinter::ID = 0;
 INITIALIZE_PASS(PostDomPrinter, "dot-postdom",
                 "Print postdominance tree of function to 'dot' file",
-                false, false);
+                false, false)
 
 char PostDomOnlyPrinter::ID = 0;
 INITIALIZE_PASS(PostDomOnlyPrinter, "dot-postdom-only",
                 "Print postdominance tree of function to 'dot' file "
                 "(with no function bodies)",
-                false, false);
+                false, false)
 
 // Create methods available outside of this file, to use them
 // "include/llvm/LinkAllPasses.h". Otherwise the pass would be deleted by

Modified: llvm/branches/wendling/eh/lib/Analysis/IPA/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/IPA/CMakeLists.txt?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/IPA/CMakeLists.txt (original)
+++ llvm/branches/wendling/eh/lib/Analysis/IPA/CMakeLists.txt Tue Oct 26 19:48:03 2010
@@ -3,4 +3,5 @@
   CallGraphSCCPass.cpp
   FindUsedTypes.cpp
   GlobalsModRef.cpp
+  IPA.cpp
   )

Modified: llvm/branches/wendling/eh/lib/Analysis/IPA/CallGraph.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/IPA/CallGraph.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/IPA/CallGraph.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/IPA/CallGraph.cpp Tue Oct 26 19:48:03 2010
@@ -42,8 +42,10 @@
 
 public:
   static char ID; // Class identification, replacement for typeinfo
-  BasicCallGraph() : ModulePass(&ID), Root(0), 
-    ExternalCallingNode(0), CallsExternalNode(0) {}
+  BasicCallGraph() : ModulePass(ID), Root(0), 
+    ExternalCallingNode(0), CallsExternalNode(0) {
+      initializeBasicCallGraphPass(*PassRegistry::getPassRegistry());
+    }
 
   // runOnModule - Compute the call graph for the specified module.
   virtual bool runOnModule(Module &M) {
@@ -86,8 +88,8 @@
   /// an analysis interface through multiple inheritance.  If needed, it should
   /// override this to adjust the this pointer as needed for the specified pass
   /// info.
-  virtual void *getAdjustedAnalysisPointer(const PassInfo *PI) {
-    if (PI->isPassID(&CallGraph::ID))
+  virtual void *getAdjustedAnalysisPointer(AnalysisID PI) {
+    if (PI == &CallGraph::ID)
       return (CallGraph*)this;
     return this;
   }
@@ -171,9 +173,9 @@
 
 } //End anonymous namespace
 
-static RegisterAnalysisGroup<CallGraph> X("Call Graph");
+INITIALIZE_ANALYSIS_GROUP(CallGraph, "Call Graph", BasicCallGraph)
 INITIALIZE_AG_PASS(BasicCallGraph, CallGraph, "basiccg",
-                   "Basic CallGraph Construction", false, true, true);
+                   "Basic CallGraph Construction", false, true, true)
 
 char CallGraph::ID = 0;
 char BasicCallGraph::ID = 0;

Modified: llvm/branches/wendling/eh/lib/Analysis/IPA/CallGraphSCCPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/IPA/CallGraphSCCPass.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/IPA/CallGraphSCCPass.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/IPA/CallGraphSCCPass.cpp Tue Oct 26 19:48:03 2010
@@ -45,7 +45,7 @@
 public:
   static char ID;
   explicit CGPassManager(int Depth) 
-    : ModulePass(&ID), PMDataManager(Depth) { }
+    : ModulePass(ID), PMDataManager(Depth) { }
 
   /// run - Execute all of the passes scheduled for execution.  Keep track of
   /// whether any of the passes modifies the module, and if so, return true.
@@ -582,9 +582,8 @@
     
   public:
     static char ID;
-    PrintCallGraphPass() : CallGraphSCCPass(&ID), Out(dbgs()) {}
     PrintCallGraphPass(const std::string &B, raw_ostream &o)
-      : CallGraphSCCPass(&ID), Banner(B), Out(o) {}
+      : CallGraphSCCPass(ID), Banner(B), Out(o) {}
     
     virtual void getAnalysisUsage(AnalysisUsage &AU) const {
       AU.setPreservesAll();

Modified: llvm/branches/wendling/eh/lib/Analysis/IPA/FindUsedTypes.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/IPA/FindUsedTypes.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/IPA/FindUsedTypes.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/IPA/FindUsedTypes.cpp Tue Oct 26 19:48:03 2010
@@ -24,7 +24,7 @@
 
 char FindUsedTypes::ID = 0;
 INITIALIZE_PASS(FindUsedTypes, "print-used-types",
-                "Find Used Types", false, true);
+                "Find Used Types", false, true)
 
 // IncorporateType - Incorporate one type and all of its subtypes into the
 // collection of used types.

Modified: llvm/branches/wendling/eh/lib/Analysis/IPA/GlobalsModRef.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/IPA/GlobalsModRef.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/IPA/GlobalsModRef.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/IPA/GlobalsModRef.cpp Tue Oct 26 19:48:03 2010
@@ -47,14 +47,15 @@
     /// GlobalInfo - Maintain mod/ref info for all of the globals without
     /// addresses taken that are read or written (transitively) by this
     /// function.
-    std::map<GlobalValue*, unsigned> GlobalInfo;
+    std::map<const GlobalValue*, unsigned> GlobalInfo;
 
     /// MayReadAnyGlobal - May read global variables, but it is not known which.
     bool MayReadAnyGlobal;
 
-    unsigned getInfoForGlobal(GlobalValue *GV) const {
+    unsigned getInfoForGlobal(const GlobalValue *GV) const {
       unsigned Effect = MayReadAnyGlobal ? AliasAnalysis::Ref : 0;
-      std::map<GlobalValue*, unsigned>::const_iterator I = GlobalInfo.find(GV);
+      std::map<const GlobalValue*, unsigned>::const_iterator I =
+        GlobalInfo.find(GV);
       if (I != GlobalInfo.end())
         Effect |= I->second;
       return Effect;
@@ -71,23 +72,25 @@
   class GlobalsModRef : public ModulePass, public AliasAnalysis {
     /// NonAddressTakenGlobals - The globals that do not have their addresses
     /// taken.
-    std::set<GlobalValue*> NonAddressTakenGlobals;
+    std::set<const GlobalValue*> NonAddressTakenGlobals;
 
     /// IndirectGlobals - The memory pointed to by this global is known to be
     /// 'owned' by the global.
-    std::set<GlobalValue*> IndirectGlobals;
+    std::set<const GlobalValue*> IndirectGlobals;
 
     /// AllocsForIndirectGlobals - If an instruction allocates memory for an
     /// indirect global, this map indicates which one.
-    std::map<Value*, GlobalValue*> AllocsForIndirectGlobals;
+    std::map<const Value*, const GlobalValue*> AllocsForIndirectGlobals;
 
     /// FunctionInfo - For each function, keep track of what globals are
     /// modified or read.
-    std::map<Function*, FunctionRecord> FunctionInfo;
+    std::map<const Function*, FunctionRecord> FunctionInfo;
 
   public:
     static char ID;
-    GlobalsModRef() : ModulePass(&ID) {}
+    GlobalsModRef() : ModulePass(ID) {
+      initializeGlobalsModRefPass(*PassRegistry::getPassRegistry());
+    }
 
     bool runOnModule(Module &M) {
       InitializeAliasAnalysis(this);                 // set up super class
@@ -105,41 +108,40 @@
     //------------------------------------------------
     // Implement the AliasAnalysis API
     //
-    AliasResult alias(const Value *V1, unsigned V1Size,
-                      const Value *V2, unsigned V2Size);
-    ModRefResult getModRefInfo(CallSite CS, Value *P, unsigned Size);
-    ModRefResult getModRefInfo(CallSite CS1, CallSite CS2) {
-      return AliasAnalysis::getModRefInfo(CS1,CS2);
+    AliasResult alias(const Location &LocA, const Location &LocB);
+    ModRefResult getModRefInfo(ImmutableCallSite CS,
+                               const Location &Loc);
+    ModRefResult getModRefInfo(ImmutableCallSite CS1,
+                               ImmutableCallSite CS2) {
+      return AliasAnalysis::getModRefInfo(CS1, CS2);
     }
 
     /// getModRefBehavior - Return the behavior of the specified function if
     /// called from the specified call site.  The call site may be null in which
     /// case the most generic behavior of this function should be returned.
-    ModRefBehavior getModRefBehavior(Function *F,
-                                         std::vector<PointerAccessInfo> *Info) {
+    ModRefBehavior getModRefBehavior(const Function *F) {
       if (FunctionRecord *FR = getFunctionInfo(F)) {
         if (FR->FunctionEffect == 0)
           return DoesNotAccessMemory;
         else if ((FR->FunctionEffect & Mod) == 0)
           return OnlyReadsMemory;
       }
-      return AliasAnalysis::getModRefBehavior(F, Info);
+      return AliasAnalysis::getModRefBehavior(F);
     }
     
     /// getModRefBehavior - Return the behavior of the specified function if
     /// called from the specified call site.  The call site may be null in which
     /// case the most generic behavior of this function should be returned.
-    ModRefBehavior getModRefBehavior(CallSite CS,
-                                         std::vector<PointerAccessInfo> *Info) {
-      Function* F = CS.getCalledFunction();
-      if (!F) return AliasAnalysis::getModRefBehavior(CS, Info);
+    ModRefBehavior getModRefBehavior(ImmutableCallSite CS) {
+      const Function* F = CS.getCalledFunction();
+      if (!F) return AliasAnalysis::getModRefBehavior(CS);
       if (FunctionRecord *FR = getFunctionInfo(F)) {
         if (FR->FunctionEffect == 0)
           return DoesNotAccessMemory;
         else if ((FR->FunctionEffect & Mod) == 0)
           return OnlyReadsMemory;
       }
-      return AliasAnalysis::getModRefBehavior(CS, Info);
+      return AliasAnalysis::getModRefBehavior(CS);
     }
 
     virtual void deleteValue(Value *V);
@@ -149,8 +151,8 @@
     /// an analysis interface through multiple inheritance.  If needed, it
     /// should override this to adjust the this pointer as needed for the
     /// specified pass info.
-    virtual void *getAdjustedAnalysisPointer(const PassInfo *PI) {
-      if (PI->isPassID(&AliasAnalysis::ID))
+    virtual void *getAdjustedAnalysisPointer(AnalysisID PI) {
+      if (PI == &AliasAnalysis::ID)
         return (AliasAnalysis*)this;
       return this;
     }
@@ -158,8 +160,9 @@
   private:
     /// getFunctionInfo - Return the function info for the function, or null if
     /// we don't have anything useful to say about it.
-    FunctionRecord *getFunctionInfo(Function *F) {
-      std::map<Function*, FunctionRecord>::iterator I = FunctionInfo.find(F);
+    FunctionRecord *getFunctionInfo(const Function *F) {
+      std::map<const Function*, FunctionRecord>::iterator I =
+        FunctionInfo.find(F);
       if (I != FunctionInfo.end())
         return &I->second;
       return 0;
@@ -175,9 +178,13 @@
 }
 
 char GlobalsModRef::ID = 0;
-static RegisterPass<GlobalsModRef>
-X("globalsmodref-aa", "Simple mod/ref analysis for globals", false, true);
-static RegisterAnalysisGroup<AliasAnalysis> Y(X);
+INITIALIZE_AG_PASS_BEGIN(GlobalsModRef, AliasAnalysis,
+                "globalsmodref-aa", "Simple mod/ref analysis for globals",    
+                false, true, false)
+INITIALIZE_AG_DEPENDENCY(CallGraph)
+INITIALIZE_AG_PASS_END(GlobalsModRef, AliasAnalysis,
+                "globalsmodref-aa", "Simple mod/ref analysis for globals",    
+                false, true, false)
 
 Pass *llvm::createGlobalsModRefPass() { return new GlobalsModRef(); }
 
@@ -409,7 +416,7 @@
             FunctionEffect |= CalleeFR->FunctionEffect;
 
             // Incorporate callee's effects on globals into our info.
-            for (std::map<GlobalValue*, unsigned>::iterator GI =
+            for (std::map<const GlobalValue*, unsigned>::iterator GI =
                    CalleeFR->GlobalInfo.begin(), E = CalleeFR->GlobalInfo.end();
                  GI != E; ++GI)
               FR.GlobalInfo[GI->first] |= GI->second;
@@ -474,16 +481,16 @@
 /// other is some random pointer, we know there cannot be an alias, because the
 /// address of the global isn't taken.
 AliasAnalysis::AliasResult
-GlobalsModRef::alias(const Value *V1, unsigned V1Size,
-                     const Value *V2, unsigned V2Size) {
+GlobalsModRef::alias(const Location &LocA,
+                     const Location &LocB) {
   // Get the base object these pointers point to.
-  Value *UV1 = const_cast<Value*>(V1->getUnderlyingObject());
-  Value *UV2 = const_cast<Value*>(V2->getUnderlyingObject());
+  const Value *UV1 = LocA.Ptr->getUnderlyingObject();
+  const Value *UV2 = LocB.Ptr->getUnderlyingObject();
 
   // If either of the underlying values is a global, they may be non-addr-taken
   // globals, which we can answer queries about.
-  GlobalValue *GV1 = dyn_cast<GlobalValue>(UV1);
-  GlobalValue *GV2 = dyn_cast<GlobalValue>(UV2);
+  const GlobalValue *GV1 = dyn_cast<GlobalValue>(UV1);
+  const GlobalValue *GV2 = dyn_cast<GlobalValue>(UV2);
   if (GV1 || GV2) {
     // If the global's address is taken, pretend we don't know it's a pointer to
     // the global.
@@ -503,12 +510,12 @@
   // so, we may be able to handle this.  First check to see if the base pointer
   // is a direct load from an indirect global.
   GV1 = GV2 = 0;
-  if (LoadInst *LI = dyn_cast<LoadInst>(UV1))
+  if (const LoadInst *LI = dyn_cast<LoadInst>(UV1))
     if (GlobalVariable *GV = dyn_cast<GlobalVariable>(LI->getOperand(0)))
       if (IndirectGlobals.count(GV))
         GV1 = GV;
-  if (LoadInst *LI = dyn_cast<LoadInst>(UV2))
-    if (GlobalVariable *GV = dyn_cast<GlobalVariable>(LI->getOperand(0)))
+  if (const LoadInst *LI = dyn_cast<LoadInst>(UV2))
+    if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(LI->getOperand(0)))
       if (IndirectGlobals.count(GV))
         GV2 = GV;
 
@@ -526,25 +533,27 @@
   if ((GV1 || GV2) && GV1 != GV2)
     return NoAlias;
 
-  return AliasAnalysis::alias(V1, V1Size, V2, V2Size);
+  return AliasAnalysis::alias(LocA, LocB);
 }
 
 AliasAnalysis::ModRefResult
-GlobalsModRef::getModRefInfo(CallSite CS, Value *P, unsigned Size) {
+GlobalsModRef::getModRefInfo(ImmutableCallSite CS,
+                             const Location &Loc) {
   unsigned Known = ModRef;
 
   // If we are asking for mod/ref info of a direct call with a pointer to a
   // global we are tracking, return information if we have it.
-  if (GlobalValue *GV = dyn_cast<GlobalValue>(P->getUnderlyingObject()))
+  if (const GlobalValue *GV =
+        dyn_cast<GlobalValue>(Loc.Ptr->getUnderlyingObject()))
     if (GV->hasLocalLinkage())
-      if (Function *F = CS.getCalledFunction())
+      if (const Function *F = CS.getCalledFunction())
         if (NonAddressTakenGlobals.count(GV))
-          if (FunctionRecord *FR = getFunctionInfo(F))
+          if (const FunctionRecord *FR = getFunctionInfo(F))
             Known = FR->getInfoForGlobal(GV);
 
   if (Known == NoModRef)
     return NoModRef; // No need to query other mod/ref analyses
-  return ModRefResult(Known & AliasAnalysis::getModRefInfo(CS, P, Size));
+  return ModRefResult(Known & AliasAnalysis::getModRefInfo(CS, Loc));
 }
 
 
@@ -558,7 +567,7 @@
       // any AllocRelatedValues for it.
       if (IndirectGlobals.erase(GV)) {
         // Remove any entries in AllocsForIndirectGlobals for this global.
-        for (std::map<Value*, GlobalValue*>::iterator
+        for (std::map<const Value*, const GlobalValue*>::iterator
              I = AllocsForIndirectGlobals.begin(),
              E = AllocsForIndirectGlobals.end(); I != E; ) {
           if (I->second == GV) {

Modified: llvm/branches/wendling/eh/lib/Analysis/IVUsers.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/IVUsers.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/IVUsers.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/IVUsers.cpp Tue Oct 26 19:48:03 2010
@@ -21,7 +21,6 @@
 #include "llvm/Analysis/Dominators.h"
 #include "llvm/Analysis/LoopPass.h"
 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
-#include "llvm/Assembly/AsmAnnotationWriter.h"
 #include "llvm/ADT/STLExtras.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/raw_ostream.h"
@@ -29,7 +28,13 @@
 using namespace llvm;
 
 char IVUsers::ID = 0;
-INITIALIZE_PASS(IVUsers, "iv-users", "Induction Variable Users", false, true);
+INITIALIZE_PASS_BEGIN(IVUsers, "iv-users",
+                      "Induction Variable Users", false, true)
+INITIALIZE_PASS_DEPENDENCY(LoopInfo)
+INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_PASS_END(IVUsers, "iv-users",
+                      "Induction Variable Users", false, true)
 
 Pass *llvm::createIVUsersPass() {
   return new IVUsers();
@@ -38,27 +43,31 @@
 /// isInteresting - Test whether the given expression is "interesting" when
 /// used by the given expression, within the context of analyzing the
 /// given loop.
-static bool isInteresting(const SCEV *S, const Instruction *I, const Loop *L) {
-  // Anything loop-invariant is interesting.
-  if (!isa<SCEVUnknown>(S) && S->isLoopInvariant(L))
-    return true;
-
+static bool isInteresting(const SCEV *S, const Instruction *I, const Loop *L,
+                          ScalarEvolution *SE) {
   // An addrec is interesting if it's affine or if it has an interesting start.
   if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
     // Keep things simple. Don't touch loop-variant strides.
     if (AR->getLoop() == L)
       return AR->isAffine() || !L->contains(I);
-    // Otherwise recurse to see if the start value is interesting.
-    return isInteresting(AR->getStart(), I, L);
+    // Otherwise recurse to see if the start value is interesting, and that
+    // the step value is not interesting, since we don't yet know how to
+    // do effective SCEV expansions for addrecs with interesting steps.
+    return isInteresting(AR->getStart(), I, L, SE) &&
+          !isInteresting(AR->getStepRecurrence(*SE), I, L, SE);
   }
 
-  // An add is interesting if any of its operands is.
+  // An add is interesting if exactly one of its operands is interesting.
   if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
+    bool AnyInterestingYet = false;
     for (SCEVAddExpr::op_iterator OI = Add->op_begin(), OE = Add->op_end();
          OI != OE; ++OI)
-      if (isInteresting(*OI, I, L))
-        return true;
-    return false;
+      if (isInteresting(*OI, I, L, SE)) {
+        if (AnyInterestingYet)
+          return false;
+        AnyInterestingYet = true;
+      }
+    return AnyInterestingYet;
   }
 
   // Nothing else is interesting here.
@@ -84,7 +93,7 @@
 
   // If we've come to an uninteresting expression, stop the traversal and
   // call this a user.
-  if (!isInteresting(ISE, I, L))
+  if (!isInteresting(ISE, I, L, SE))
     return false;
 
   SmallPtrSet<Instruction *, 4> UniqueUsers;
@@ -140,7 +149,8 @@
 }
 
 IVUsers::IVUsers()
- : LoopPass(&ID) {
+    : LoopPass(ID) {
+  initializeIVUsersPass(*PassRegistry::getPassRegistry());
 }
 
 void IVUsers::getAnalysisUsage(AnalysisUsage &AU) const {
@@ -175,9 +185,6 @@
   }
   OS << ":\n";
 
-  // Use a default AssemblyAnnotationWriter to suppress the default info
-  // comments, which aren't relevant here.
-  AssemblyAnnotationWriter Annotator;
   for (ilist<IVStrideUse>::const_iterator UI = IVUses.begin(),
        E = IVUses.end(); UI != E; ++UI) {
     OS << "  ";
@@ -191,7 +198,7 @@
       OS << ")";
     }
     OS << " in  ";
-    UI->getUser()->print(OS, &Annotator);
+    UI->getUser()->print(OS);
     OS << '\n';
   }
 }

Modified: llvm/branches/wendling/eh/lib/Analysis/InlineCost.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/InlineCost.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/InlineCost.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/InlineCost.cpp Tue Oct 26 19:48:03 2010
@@ -18,96 +18,6 @@
 #include "llvm/ADT/SmallPtrSet.h"
 using namespace llvm;
 
-// CountCodeReductionForConstant - Figure out an approximation for how many
-// instructions will be constant folded if the specified value is constant.
-//
-unsigned InlineCostAnalyzer::FunctionInfo::
-CountCodeReductionForConstant(Value *V) {
-  unsigned Reduction = 0;
-  for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
-    User *U = *UI;
-    if (isa<BranchInst>(U) || isa<SwitchInst>(U)) {
-      // We will be able to eliminate all but one of the successors.
-      const TerminatorInst &TI = cast<TerminatorInst>(*U);
-      const unsigned NumSucc = TI.getNumSuccessors();
-      unsigned Instrs = 0;
-      for (unsigned I = 0; I != NumSucc; ++I)
-        Instrs += Metrics.NumBBInsts[TI.getSuccessor(I)];
-      // We don't know which blocks will be eliminated, so use the average size.
-      Reduction += InlineConstants::InstrCost*Instrs*(NumSucc-1)/NumSucc;
-    } else if (CallInst *CI = dyn_cast<CallInst>(U)) {
-      // Turning an indirect call into a direct call is a BIG win
-      if (CI->getCalledValue() == V)
-        Reduction += InlineConstants::IndirectCallBonus;
-    } else if (InvokeInst *II = dyn_cast<InvokeInst>(U)) {
-      // Turning an indirect call into a direct call is a BIG win
-      if (II->getCalledValue() == V)
-        Reduction += InlineConstants::IndirectCallBonus;
-    } else {
-      // Figure out if this instruction will be removed due to simple constant
-      // propagation.
-      Instruction &Inst = cast<Instruction>(*U);
-
-      // We can't constant propagate instructions which have effects or
-      // read memory.
-      //
-      // FIXME: It would be nice to capture the fact that a load from a
-      // pointer-to-constant-global is actually a *really* good thing to zap.
-      // Unfortunately, we don't know the pointer that may get propagated here,
-      // so we can't make this decision.
-      if (Inst.mayReadFromMemory() || Inst.mayHaveSideEffects() ||
-          isa<AllocaInst>(Inst))
-        continue;
-
-      bool AllOperandsConstant = true;
-      for (unsigned i = 0, e = Inst.getNumOperands(); i != e; ++i)
-        if (!isa<Constant>(Inst.getOperand(i)) && Inst.getOperand(i) != V) {
-          AllOperandsConstant = false;
-          break;
-        }
-
-      if (AllOperandsConstant) {
-        // We will get to remove this instruction...
-        Reduction += InlineConstants::InstrCost;
-
-        // And any other instructions that use it which become constants
-        // themselves.
-        Reduction += CountCodeReductionForConstant(&Inst);
-      }
-    }
-  }
-  return Reduction;
-}
-
-// CountCodeReductionForAlloca - Figure out an approximation of how much smaller
-// the function will be if it is inlined into a context where an argument
-// becomes an alloca.
-//
-unsigned InlineCostAnalyzer::FunctionInfo::
-         CountCodeReductionForAlloca(Value *V) {
-  if (!V->getType()->isPointerTy()) return 0;  // Not a pointer
-  unsigned Reduction = 0;
-  for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
-    Instruction *I = cast<Instruction>(*UI);
-    if (isa<LoadInst>(I) || isa<StoreInst>(I))
-      Reduction += InlineConstants::InstrCost;
-    else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
-      // If the GEP has variable indices, we won't be able to do much with it.
-      if (GEP->hasAllConstantIndices())
-        Reduction += CountCodeReductionForAlloca(GEP);
-    } else if (BitCastInst *BCI = dyn_cast<BitCastInst>(I)) {
-      // Track pointer through bitcasts.
-      Reduction += CountCodeReductionForAlloca(BCI);
-    } else {
-      // If there is some other strange instruction, we're not going to be able
-      // to do much if we inline this.
-      return 0;
-    }
-  }
-
-  return Reduction;
-}
-
 /// callIsSmall - If a call is likely to lower to a single target instruction,
 /// or is otherwise deemed small return true.
 /// TODO: Perhaps calls like memcpy, strcpy, etc?
@@ -160,6 +70,12 @@
       // variables as volatile if they are live across a setjmp call, and they
       // probably won't do this in callers.
       if (const Function *F = CS.getCalledFunction()) {
+        // If a function is both internal and has a single use, then it is 
+        // extremely likely to get inlined in the future (it was probably 
+        // exposed by an interleaved devirtualization pass).
+        if (F->hasInternalLinkage() && F->hasOneUse())
+          ++NumInlineCandidates;
+        
         if (F->isDeclaration() && 
             (F->getName() == "setjmp" || F->getName() == "_setjmp"))
           callsSetJmp = true;
@@ -226,6 +142,135 @@
   NumBBInsts[BB] = NumInsts - NumInstsBeforeThisBB;
 }
 
+// CountBonusForConstant - Figure out an approximation for how much per-call
+// performance boost we can expect if the specified value is constant.
+unsigned CodeMetrics::CountBonusForConstant(Value *V) {
+  unsigned Bonus = 0;
+  for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
+    User *U = *UI;
+    if (CallInst *CI = dyn_cast<CallInst>(U)) {
+      // Turning an indirect call into a direct call is a BIG win
+      if (CI->getCalledValue() == V)
+        Bonus += InlineConstants::IndirectCallBonus;
+    }
+    else if (InvokeInst *II = dyn_cast<InvokeInst>(U)) {
+      // Turning an indirect call into a direct call is a BIG win
+      if (II->getCalledValue() == V)
+        Bonus += InlineConstants::IndirectCallBonus;
+    }
+    // FIXME: Eliminating conditional branches and switches should
+    // also yield a per-call performance boost.
+    else {
+      // Figure out the bonuses that wll accrue due to simple constant
+      // propagation.
+      Instruction &Inst = cast<Instruction>(*U);
+
+      // We can't constant propagate instructions which have effects or
+      // read memory.
+      //
+      // FIXME: It would be nice to capture the fact that a load from a
+      // pointer-to-constant-global is actually a *really* good thing to zap.
+      // Unfortunately, we don't know the pointer that may get propagated here,
+      // so we can't make this decision.
+      if (Inst.mayReadFromMemory() || Inst.mayHaveSideEffects() ||
+          isa<AllocaInst>(Inst))
+        continue;
+
+      bool AllOperandsConstant = true;
+      for (unsigned i = 0, e = Inst.getNumOperands(); i != e; ++i)
+        if (!isa<Constant>(Inst.getOperand(i)) && Inst.getOperand(i) != V) {
+          AllOperandsConstant = false;
+          break;
+        }
+
+      if (AllOperandsConstant)
+        Bonus += CountBonusForConstant(&Inst);
+    }
+  }
+  return Bonus;
+}
+
+
+// CountCodeReductionForConstant - Figure out an approximation for how many
+// instructions will be constant folded if the specified value is constant.
+//
+unsigned CodeMetrics::CountCodeReductionForConstant(Value *V) {
+  unsigned Reduction = 0;
+  for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
+    User *U = *UI;
+    if (isa<BranchInst>(U) || isa<SwitchInst>(U)) {
+      // We will be able to eliminate all but one of the successors.
+      const TerminatorInst &TI = cast<TerminatorInst>(*U);
+      const unsigned NumSucc = TI.getNumSuccessors();
+      unsigned Instrs = 0;
+      for (unsigned I = 0; I != NumSucc; ++I)
+        Instrs += NumBBInsts[TI.getSuccessor(I)];
+      // We don't know which blocks will be eliminated, so use the average size.
+      Reduction += InlineConstants::InstrCost*Instrs*(NumSucc-1)/NumSucc;
+    } else {
+      // Figure out if this instruction will be removed due to simple constant
+      // propagation.
+      Instruction &Inst = cast<Instruction>(*U);
+
+      // We can't constant propagate instructions which have effects or
+      // read memory.
+      //
+      // FIXME: It would be nice to capture the fact that a load from a
+      // pointer-to-constant-global is actually a *really* good thing to zap.
+      // Unfortunately, we don't know the pointer that may get propagated here,
+      // so we can't make this decision.
+      if (Inst.mayReadFromMemory() || Inst.mayHaveSideEffects() ||
+          isa<AllocaInst>(Inst))
+        continue;
+
+      bool AllOperandsConstant = true;
+      for (unsigned i = 0, e = Inst.getNumOperands(); i != e; ++i)
+        if (!isa<Constant>(Inst.getOperand(i)) && Inst.getOperand(i) != V) {
+          AllOperandsConstant = false;
+          break;
+        }
+
+      if (AllOperandsConstant) {
+        // We will get to remove this instruction...
+        Reduction += InlineConstants::InstrCost;
+
+        // And any other instructions that use it which become constants
+        // themselves.
+        Reduction += CountCodeReductionForConstant(&Inst);
+      }
+    }
+  }
+  return Reduction;
+}
+
+// CountCodeReductionForAlloca - Figure out an approximation of how much smaller
+// the function will be if it is inlined into a context where an argument
+// becomes an alloca.
+//
+unsigned CodeMetrics::CountCodeReductionForAlloca(Value *V) {
+  if (!V->getType()->isPointerTy()) return 0;  // Not a pointer
+  unsigned Reduction = 0;
+  for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
+    Instruction *I = cast<Instruction>(*UI);
+    if (isa<LoadInst>(I) || isa<StoreInst>(I))
+      Reduction += InlineConstants::InstrCost;
+    else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
+      // If the GEP has variable indices, we won't be able to do much with it.
+      if (GEP->hasAllConstantIndices())
+        Reduction += CountCodeReductionForAlloca(GEP);
+    } else if (BitCastInst *BCI = dyn_cast<BitCastInst>(I)) {
+      // Track pointer through bitcasts.
+      Reduction += CountCodeReductionForAlloca(BCI);
+    } else {
+      // If there is some other strange instruction, we're not going to be able
+      // to do much if we inline this.
+      return 0;
+    }
+  }
+
+  return Reduction;
+}
+
 /// analyzeFunction - Fill in the current structure with information gleaned
 /// from the specified function.
 void CodeMetrics::analyzeFunction(Function *F) {
@@ -254,8 +299,9 @@
   // code can be eliminated if one of the arguments is a constant.
   ArgumentWeights.reserve(F->arg_size());
   for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I)
-    ArgumentWeights.push_back(ArgInfo(CountCodeReductionForConstant(I),
-                                      CountCodeReductionForAlloca(I)));
+    ArgumentWeights.push_back(ArgInfo(Metrics.CountCodeReductionForConstant(I),
+                                      Metrics.CountCodeReductionForAlloca(I),
+                                      Metrics.CountBonusForConstant(I)));
 }
 
 /// NeverInline - returns true if the function should never be inlined into
@@ -266,6 +312,42 @@
           Metrics.containsIndirectBr);
 
 }
+// getSpecializationBonus - The heuristic used to determine the per-call
+// performance boost for using a specialization of Callee with argument
+// specializedArgNo replaced by a constant.
+int InlineCostAnalyzer::getSpecializationBonus(Function *Callee,
+         SmallVectorImpl<unsigned> &SpecializedArgNos)
+{
+  if (Callee->mayBeOverridden())
+    return 0;
+  
+  int Bonus = 0;
+  // If this function uses the coldcc calling convention, prefer not to
+  // specialize it.
+  if (Callee->getCallingConv() == CallingConv::Cold)
+    Bonus -= InlineConstants::ColdccPenalty;
+  
+  // Get information about the callee.
+  FunctionInfo *CalleeFI = &CachedFunctionInfo[Callee];
+  
+  // If we haven't calculated this information yet, do so now.
+  if (CalleeFI->Metrics.NumBlocks == 0)
+    CalleeFI->analyzeFunction(Callee);
+
+
+  for (unsigned i = 0, s = SpecializedArgNos.size();
+       i < s; ++i )
+  {
+    Bonus += CalleeFI->ArgumentWeights[SpecializedArgNos[i]].ConstantBonus;
+  }
+  // Calls usually take a long time, so they make the specialization gain 
+  // smaller.
+  Bonus -= CalleeFI->Metrics.NumCalls * InlineConstants::CallPenalty;
+
+  return Bonus;
+}
+
+
 // getInlineCost - The heuristic used to determine if we should inline the
 // function call or not.
 //
@@ -379,7 +461,8 @@
       // away with this information.
     } else if (isa<Constant>(I)) {
       if (ArgNo < CalleeFI->ArgumentWeights.size())
-        InlineCost -= CalleeFI->ArgumentWeights[ArgNo].ConstantWeight;
+        InlineCost -= (CalleeFI->ArgumentWeights[ArgNo].ConstantWeight +
+                       CalleeFI->ArgumentWeights[ArgNo].ConstantBonus);
     }
   }
   
@@ -395,6 +478,40 @@
   return llvm::InlineCost::get(InlineCost);
 }
 
+// getSpecializationCost - The heuristic used to determine the code-size
+// impact of creating a specialized version of Callee with argument
+// SpecializedArgNo replaced by a constant.
+InlineCost InlineCostAnalyzer::getSpecializationCost(Function *Callee,
+                               SmallVectorImpl<unsigned> &SpecializedArgNos)
+{
+  // Don't specialize functions which can be redefined at link-time to mean
+  // something else.
+  if (Callee->mayBeOverridden())
+    return llvm::InlineCost::getNever();
+  
+  // Get information about the callee.
+  FunctionInfo *CalleeFI = &CachedFunctionInfo[Callee];
+  
+  // If we haven't calculated this information yet, do so now.
+  if (CalleeFI->Metrics.NumBlocks == 0)
+    CalleeFI->analyzeFunction(Callee);
+
+  int Cost = 0;
+  
+  // Look at the orginal size of the callee.  Each instruction counts as 5.
+  Cost += CalleeFI->Metrics.NumInsts * InlineConstants::InstrCost;
+
+  // Offset that with the amount of code that can be constant-folded
+  // away with the given arguments replaced by constants.
+  for (SmallVectorImpl<unsigned>::iterator an = SpecializedArgNos.begin(),
+       ae = SpecializedArgNos.end(); an != ae; ++an)
+  {
+    Cost -= CalleeFI->ArgumentWeights[*an].ConstantWeight;
+  }
+
+  return llvm::InlineCost::get(Cost);
+}
+
 // getInlineFudgeFactor - Return a > 1.0 factor if the inliner should use a
 // higher threshold to determine if the function call should be inlined.
 float InlineCostAnalyzer::getInlineFudgeFactor(CallSite CS) {

Modified: llvm/branches/wendling/eh/lib/Analysis/InstCount.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/InstCount.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/InstCount.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/InstCount.cpp Tue Oct 26 19:48:03 2010
@@ -51,7 +51,9 @@
     }
   public:
     static char ID; // Pass identification, replacement for typeid
-    InstCount() : FunctionPass(&ID) {}
+    InstCount() : FunctionPass(ID) {
+      initializeInstCountPass(*PassRegistry::getPassRegistry());
+    }
 
     virtual bool runOnFunction(Function &F);
 
@@ -65,7 +67,7 @@
 
 char InstCount::ID = 0;
 INITIALIZE_PASS(InstCount, "instcount",
-                "Counts the various types of Instructions", false, true);
+                "Counts the various types of Instructions", false, true)
 
 FunctionPass *llvm::createInstCountPass() { return new InstCount(); }
 

Modified: llvm/branches/wendling/eh/lib/Analysis/InstructionSimplify.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/InstructionSimplify.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/InstructionSimplify.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/InstructionSimplify.cpp Tue Oct 26 19:48:03 2010
@@ -106,6 +106,16 @@
       (A == Op0 || B == Op0))
     return Op0;
   
+  // (A & B) & A -> A & B
+  if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
+      (A == Op1 || B == Op1))
+    return Op0;
+
+  // A & (A & B) -> A & B
+  if (match(Op1, m_And(m_Value(A), m_Value(B))) &&
+      (A == Op0 || B == Op0))
+    return Op1;
+
   return 0;
 }
 
@@ -165,6 +175,16 @@
       (A == Op0 || B == Op0))
     return Op0;
   
+  // (A | B) | A -> A | B
+  if (match(Op0, m_Or(m_Value(A), m_Value(B))) &&
+      (A == Op1 || B == Op1))
+    return Op0;
+
+  // A | (A | B) -> A | B
+  if (match(Op1, m_Or(m_Value(A), m_Value(B))) &&
+      (A == Op0 || B == Op0))
+    return Op1;
+
   return 0;
 }
 

Modified: llvm/branches/wendling/eh/lib/Analysis/IntervalPartition.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/IntervalPartition.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/IntervalPartition.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/IntervalPartition.cpp Tue Oct 26 19:48:03 2010
@@ -17,7 +17,7 @@
 
 char IntervalPartition::ID = 0;
 INITIALIZE_PASS(IntervalPartition, "intervals",
-                "Interval Partition Construction", true, true);
+                "Interval Partition Construction", true, true)
 
 //===----------------------------------------------------------------------===//
 // IntervalPartition Implementation
@@ -91,7 +91,7 @@
 // distinguish it from a copy constructor.  Always pass in false for now.
 //
 IntervalPartition::IntervalPartition(IntervalPartition &IP, bool)
-  : FunctionPass(&ID) {
+  : FunctionPass(ID) {
   assert(IP.getRootInterval() && "Cannot operate on empty IntervalPartitions!");
 
   // Pass false to intervals_begin because we take ownership of it's memory

Modified: llvm/branches/wendling/eh/lib/Analysis/LazyValueInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/LazyValueInfo.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/LazyValueInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/LazyValueInfo.cpp Tue Oct 26 19:48:03 2010
@@ -19,18 +19,20 @@
 #include "llvm/Analysis/ConstantFolding.h"
 #include "llvm/Target/TargetData.h"
 #include "llvm/Support/CFG.h"
+#include "llvm/Support/ConstantRange.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/raw_ostream.h"
 #include "llvm/Support/ValueHandle.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/DenseSet.h"
-#include "llvm/ADT/PointerIntPair.h"
 #include "llvm/ADT/STLExtras.h"
+#include <map>
+#include <set>
 using namespace llvm;
 
 char LazyValueInfo::ID = 0;
 INITIALIZE_PASS(LazyValueInfo, "lazy-value-info",
-                "Lazy Value Information Analysis", false, true);
+                "Lazy Value Information Analysis", false, true)
 
 namespace llvm {
   FunctionPass *createLazyValueInfoPass() { return new LazyValueInfo(); }
@@ -52,12 +54,15 @@
   enum LatticeValueTy {
     /// undefined - This LLVM Value has no known value yet.
     undefined,
+    
     /// constant - This LLVM Value has a specific constant value.
     constant,
-    
     /// notconstant - This LLVM value is known to not have the specified value.
     notconstant,
     
+    /// constantrange
+    constantrange,
+    
     /// overdefined - This instruction is not known to be constant, and we know
     /// it has a value.
     overdefined
@@ -65,42 +70,62 @@
   
   /// Val: This stores the current lattice value along with the Constant* for
   /// the constant if this is a 'constant' or 'notconstant' value.
-  PointerIntPair<Constant *, 2, LatticeValueTy> Val;
+  LatticeValueTy Tag;
+  Constant *Val;
+  ConstantRange Range;
   
 public:
-  LVILatticeVal() : Val(0, undefined) {}
+  LVILatticeVal() : Tag(undefined), Val(0), Range(1, true) {}
 
   static LVILatticeVal get(Constant *C) {
     LVILatticeVal Res;
-    Res.markConstant(C);
+    if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
+      Res.markConstantRange(ConstantRange(CI->getValue(), CI->getValue()+1));
+    else if (!isa<UndefValue>(C))
+      Res.markConstant(C);
     return Res;
   }
   static LVILatticeVal getNot(Constant *C) {
     LVILatticeVal Res;
-    Res.markNotConstant(C);
+    if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
+      Res.markConstantRange(ConstantRange(CI->getValue()+1, CI->getValue()));
+    else
+      Res.markNotConstant(C);
+    return Res;
+  }
+  static LVILatticeVal getRange(ConstantRange CR) {
+    LVILatticeVal Res;
+    Res.markConstantRange(CR);
     return Res;
   }
   
-  bool isUndefined() const   { return Val.getInt() == undefined; }
-  bool isConstant() const    { return Val.getInt() == constant; }
-  bool isNotConstant() const { return Val.getInt() == notconstant; }
-  bool isOverdefined() const { return Val.getInt() == overdefined; }
+  bool isUndefined() const     { return Tag == undefined; }
+  bool isConstant() const      { return Tag == constant; }
+  bool isNotConstant() const   { return Tag == notconstant; }
+  bool isConstantRange() const { return Tag == constantrange; }
+  bool isOverdefined() const   { return Tag == overdefined; }
   
   Constant *getConstant() const {
     assert(isConstant() && "Cannot get the constant of a non-constant!");
-    return Val.getPointer();
+    return Val;
   }
   
   Constant *getNotConstant() const {
     assert(isNotConstant() && "Cannot get the constant of a non-notconstant!");
-    return Val.getPointer();
+    return Val;
+  }
+  
+  ConstantRange getConstantRange() const {
+    assert(isConstantRange() &&
+           "Cannot get the constant-range of a non-constant-range!");
+    return Range;
   }
   
   /// markOverdefined - Return true if this is a change in status.
   bool markOverdefined() {
     if (isOverdefined())
       return false;
-    Val.setInt(overdefined);
+    Tag = overdefined;
     return true;
   }
 
@@ -112,9 +137,9 @@
     }
     
     assert(isUndefined());
-    Val.setInt(constant);
+    Tag = constant;
     assert(V && "Marking constant with NULL");
-    Val.setPointer(V);
+    Val = V;
     return true;
   }
   
@@ -130,9 +155,29 @@
     else
       assert(isUndefined());
 
-    Val.setInt(notconstant);
+    Tag = notconstant;
     assert(V && "Marking constant with NULL");
-    Val.setPointer(V);
+    Val = V;
+    return true;
+  }
+  
+  /// markConstantRange - Return true if this is a change in status.
+  bool markConstantRange(const ConstantRange NewR) {
+    if (isConstantRange()) {
+      if (NewR.isEmptySet())
+        return markOverdefined();
+      
+      bool changed = Range == NewR;
+      Range = NewR;
+      return changed;
+    }
+    
+    assert(isUndefined());
+    if (NewR.isEmptySet())
+      return markOverdefined();
+    
+    Tag = constantrange;
+    Range = NewR;
     return true;
   }
   
@@ -149,20 +194,43 @@
             isa<ConstantExpr>(RHS.getNotConstant()))
           return markOverdefined();
         return false;
-      }
-      if (isConstant()) {
+      } else if (isConstant()) {
         if (getConstant() == RHS.getNotConstant() ||
             isa<ConstantExpr>(RHS.getNotConstant()) ||
             isa<ConstantExpr>(getConstant()))
           return markOverdefined();
         return markNotConstant(RHS.getNotConstant());
+      } else if (isConstantRange()) {
+         // FIXME: This could be made more precise.
+        return markOverdefined();
       }
       
       assert(isUndefined() && "Unexpected lattice");
       return markNotConstant(RHS.getNotConstant());
     }
     
-    // RHS must be a constant, we must be undef, constant, or notconstant.
+    if (RHS.isConstantRange()) {
+      if (isConstantRange()) {
+        ConstantRange NewR = Range.unionWith(RHS.getConstantRange());
+        if (NewR.isFullSet())
+          return markOverdefined();
+        else
+          return markConstantRange(NewR);
+      } else if (!isUndefined()) {
+        return markOverdefined();
+      }
+      
+      assert(isUndefined() && "Unexpected lattice");
+      return markConstantRange(RHS.getConstantRange());
+    }
+    
+    // RHS must be a constant, we must be constantrange, 
+    // undef, constant, or notconstant.
+    if (isConstantRange()) {
+      // FIXME: This could be made more precise.
+      return markOverdefined();
+    }
+    
     if (isUndefined())
       return markConstant(RHS.getConstant());
     
@@ -193,6 +261,9 @@
 
   if (Val.isNotConstant())
     return OS << "notconstant<" << *Val.getNotConstant() << '>';
+  else if (Val.isConstantRange())
+    return OS << "constantrange<" << Val.getConstantRange().getLower() << ", "
+              << Val.getConstantRange().getUpper() << '>';
   return OS << "constant<" << *Val.getConstant() << '>';
 }
 }
@@ -208,12 +279,12 @@
   public:
     /// BlockCacheEntryTy - This is a computed lattice value at the end of the
     /// specified basic block for a Value* that depends on context.
-    typedef std::pair<BasicBlock*, LVILatticeVal> BlockCacheEntryTy;
+    typedef std::pair<AssertingVH<BasicBlock>, LVILatticeVal> BlockCacheEntryTy;
     
     /// ValueCacheEntryTy - This is all of the cached block information for
     /// exactly one Value*.  The entries are sorted by the BasicBlock* of the
     /// entries, allowing us to do a lookup with a binary search.
-    typedef std::map<BasicBlock*, LVILatticeVal> ValueCacheEntryTy;
+    typedef std::map<AssertingVH<BasicBlock>, LVILatticeVal> ValueCacheEntryTy;
 
   private:
      /// LVIValueHandle - A callback value handle update the cache when
@@ -228,10 +299,6 @@
       void allUsesReplacedWith(Value* V) {
         deleted();
       }
-
-      LVIValueHandle &operator=(Value *V) {
-        return *this = LVIValueHandle(V, Parent);
-      }
     };
 
     /// ValueCache - This is all of the cached information for all values,
@@ -241,7 +308,7 @@
     /// OverDefinedCache - This tracks, on a per-block basis, the set of 
     /// values that are over-defined at the end of that block.  This is required
     /// for cache updating.
-    std::set<std::pair<BasicBlock*, Value*> > OverDefinedCache;
+    std::set<std::pair<AssertingVH<BasicBlock>, Value*> > OverDefinedCache;
 
   public:
     
@@ -257,6 +324,16 @@
     /// edge from PredBB to OldSucc has been threaded to be from PredBB to
     /// NewSucc.
     void threadEdge(BasicBlock *PredBB,BasicBlock *OldSucc,BasicBlock *NewSucc);
+    
+    /// eraseBlock - This is part of the update interface to inform the cache
+    /// that a block has been deleted.
+    void eraseBlock(BasicBlock *BB);
+    
+    /// clear - Empty the cache.
+    void clear() {
+      ValueCache.clear();
+      OverDefinedCache.clear();
+    }
   };
 } // end anonymous namespace
 
@@ -284,16 +361,17 @@
     ValueCacheEntryTy &Cache;
     
     /// This tracks, for each block, what values are overdefined.
-    std::set<std::pair<BasicBlock*, Value*> > &OverDefinedCache;
+    std::set<std::pair<AssertingVH<BasicBlock>, Value*> > &OverDefinedCache;
     
     ///  NewBlocks - This is a mapping of the new BasicBlocks which have been
     /// added to cache but that are not in sorted order.
     DenseSet<BasicBlock*> NewBlockInfo;
+    
   public:
     
     LVIQuery(Value *V, LazyValueInfoCache &P,
              ValueCacheEntryTy &VC,
-             std::set<std::pair<BasicBlock*, Value*> > &ODC)
+             std::set<std::pair<AssertingVH<BasicBlock>, Value*> > &ODC)
       : Val(V), Parent(P), Cache(VC), OverDefinedCache(ODC) {
     }
 
@@ -313,34 +391,50 @@
     LVILatticeVal getEdgeValue(BasicBlock *FromBB, BasicBlock *ToBB);
 
   private:
-    LVILatticeVal &getCachedEntryForBlock(BasicBlock *BB);
+    LVILatticeVal getCachedEntryForBlock(BasicBlock *BB);
   };
 } // end anonymous namespace
 
 void LazyValueInfoCache::LVIValueHandle::deleted() {
-  Parent->ValueCache.erase(*this);
-  for (std::set<std::pair<BasicBlock*, Value*> >::iterator
+  for (std::set<std::pair<AssertingVH<BasicBlock>, Value*> >::iterator
        I = Parent->OverDefinedCache.begin(),
        E = Parent->OverDefinedCache.end();
        I != E; ) {
-    std::set<std::pair<BasicBlock*, Value*> >::iterator tmp = I;
+    std::set<std::pair<AssertingVH<BasicBlock>, Value*> >::iterator tmp = I;
     ++I;
     if (tmp->second == getValPtr())
       Parent->OverDefinedCache.erase(tmp);
   }
+  
+  // This erasure deallocates *this, so it MUST happen after we're done
+  // using any and all members of *this.
+  Parent->ValueCache.erase(*this);
 }
 
+void LazyValueInfoCache::eraseBlock(BasicBlock *BB) {
+  for (std::set<std::pair<AssertingVH<BasicBlock>, Value*> >::iterator
+       I = OverDefinedCache.begin(), E = OverDefinedCache.end(); I != E; ) {
+    std::set<std::pair<AssertingVH<BasicBlock>, Value*> >::iterator tmp = I;
+    ++I;
+    if (tmp->first == BB)
+      OverDefinedCache.erase(tmp);
+  }
+
+  for (std::map<LVIValueHandle, ValueCacheEntryTy>::iterator
+       I = ValueCache.begin(), E = ValueCache.end(); I != E; ++I)
+    I->second.erase(BB);
+}
 
 /// getCachedEntryForBlock - See if we already have a value for this block.  If
 /// so, return it, otherwise create a new entry in the Cache map to use.
-LVILatticeVal &LVIQuery::getCachedEntryForBlock(BasicBlock *BB) {
+LVILatticeVal LVIQuery::getCachedEntryForBlock(BasicBlock *BB) {
   NewBlockInfo.insert(BB);
   return Cache[BB];
 }
 
 LVILatticeVal LVIQuery::getBlockValue(BasicBlock *BB) {
   // See if we already have a value for this block.
-  LVILatticeVal &BBLV = getCachedEntryForBlock(BB);
+  LVILatticeVal BBLV = getCachedEntryForBlock(BB);
   
   // If we've already computed this block's value, return it.
   if (!BBLV.isUndefined()) {
@@ -352,13 +446,28 @@
   // lattice value to overdefined, so that cycles will terminate and be
   // conservatively correct.
   BBLV.markOverdefined();
+  Cache[BB] = BBLV;
   
-  // If V is live into BB, see if our predecessors know anything about it.
   Instruction *BBI = dyn_cast<Instruction>(Val);
   if (BBI == 0 || BBI->getParent() != BB) {
     LVILatticeVal Result;  // Start Undefined.
-    unsigned NumPreds = 0;
     
+    // If this is a pointer, and there's a load from that pointer in this BB,
+    // then we know that the pointer can't be NULL.
+    bool NotNull = false;
+    if (Val->getType()->isPointerTy()) {
+      for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();BI != BE;++BI){
+        LoadInst *L = dyn_cast<LoadInst>(BI);
+        if (L && L->getPointerAddressSpace() == 0 &&
+            L->getPointerOperand()->getUnderlyingObject() ==
+              Val->getUnderlyingObject()) {
+          NotNull = true;
+          break;
+        }
+      }
+    }
+    
+    unsigned NumPreds = 0;    
     // Loop over all of our predecessors, merging what we know from them into
     // result.
     for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
@@ -369,11 +478,19 @@
       if (Result.isOverdefined()) {
         DEBUG(dbgs() << " compute BB '" << BB->getName()
                      << "' - overdefined because of pred.\n");
+        // If we previously determined that this is a pointer that can't be null
+        // then return that rather than giving up entirely.
+        if (NotNull) {
+          const PointerType *PTy = cast<PointerType>(Val->getType());
+          Result = LVILatticeVal::getNot(ConstantPointerNull::get(PTy));
+        }
+        
         return Result;
       }
       ++NumPreds;
     }
     
+    
     // If this is the entry block, we must be asking about an argument.  The
     // value is overdefined.
     if (NumPreds == 0 && BB == &BB->getParent()->front()) {
@@ -384,7 +501,7 @@
     
     // Return the merged value, which is more precise than 'overdefined'.
     assert(!Result.isOverdefined());
-    return getCachedEntryForBlock(BB) = Result;
+    return Cache[BB] = Result;
   }
   
   // If this value is defined by an instruction in this block, we have to
@@ -409,18 +526,104 @@
     
     // Return the merged value, which is more precise than 'overdefined'.
     assert(!Result.isOverdefined());
-    return getCachedEntryForBlock(BB) = Result;
+    return Cache[BB] = Result;
+  }
+
+  assert(Cache[BB].isOverdefined() && "Recursive query changed our cache?");
 
-  } else {
+  // We can only analyze the definitions of certain classes of instructions
+  // (integral binops and casts at the moment), so bail if this isn't one.
+  LVILatticeVal Result;
+  if ((!isa<BinaryOperator>(BBI) && !isa<CastInst>(BBI)) ||
+     !BBI->getType()->isIntegerTy()) {
+    DEBUG(dbgs() << " compute BB '" << BB->getName()
+                 << "' - overdefined because inst def found.\n");
+    Result.markOverdefined();
+    return Result;
+  }
+   
+  // FIXME: We're currently limited to binops with a constant RHS.  This should
+  // be improved.
+  BinaryOperator *BO = dyn_cast<BinaryOperator>(BBI);
+  if (BO && !isa<ConstantInt>(BO->getOperand(1))) { 
+    DEBUG(dbgs() << " compute BB '" << BB->getName()
+                 << "' - overdefined because inst def found.\n");
+
+    Result.markOverdefined();
+    return Result;
+  }  
+
+  // Figure out the range of the LHS.  If that fails, bail.
+  LVILatticeVal LHSVal = Parent.getValueInBlock(BBI->getOperand(0), BB);
+  if (!LHSVal.isConstantRange()) {
+    Result.markOverdefined();
+    return Result;
+  }
+  
+  ConstantInt *RHS = 0;
+  ConstantRange LHSRange = LHSVal.getConstantRange();
+  ConstantRange RHSRange(1);
+  const IntegerType *ResultTy = cast<IntegerType>(BBI->getType());
+  if (isa<BinaryOperator>(BBI)) {
+    RHS = dyn_cast<ConstantInt>(BBI->getOperand(1));
+    if (!RHS) {
+      Result.markOverdefined();
+      return Result;
+    }
     
+    RHSRange = ConstantRange(RHS->getValue(), RHS->getValue()+1);
+  }
+      
+  // NOTE: We're currently limited by the set of operations that ConstantRange
+  // can evaluate symbolically.  Enhancing that set will allows us to analyze
+  // more definitions.
+  switch (BBI->getOpcode()) {
+  case Instruction::Add:
+    Result.markConstantRange(LHSRange.add(RHSRange));
+    break;
+  case Instruction::Sub:
+    Result.markConstantRange(LHSRange.sub(RHSRange));
+    break;
+  case Instruction::Mul:
+    Result.markConstantRange(LHSRange.multiply(RHSRange));
+    break;
+  case Instruction::UDiv:
+    Result.markConstantRange(LHSRange.udiv(RHSRange));
+    break;
+  case Instruction::Shl:
+    Result.markConstantRange(LHSRange.shl(RHSRange));
+    break;
+  case Instruction::LShr:
+    Result.markConstantRange(LHSRange.lshr(RHSRange));
+    break;
+  case Instruction::Trunc:
+    Result.markConstantRange(LHSRange.truncate(ResultTy->getBitWidth()));
+    break;
+  case Instruction::SExt:
+    Result.markConstantRange(LHSRange.signExtend(ResultTy->getBitWidth()));
+    break;
+  case Instruction::ZExt:
+    Result.markConstantRange(LHSRange.zeroExtend(ResultTy->getBitWidth()));
+    break;
+  case Instruction::BitCast:
+    Result.markConstantRange(LHSRange);
+    break;
+  case Instruction::And:
+    Result.markConstantRange(LHSRange.binaryAnd(RHSRange));
+    break;
+  case Instruction::Or:
+    Result.markConstantRange(LHSRange.binaryOr(RHSRange));
+    break;
+  
+  // Unhandled instructions are overdefined.
+  default:
+    DEBUG(dbgs() << " compute BB '" << BB->getName()
+                 << "' - overdefined because inst def found.\n");
+    Result.markOverdefined();
+    break;
   }
   
-  DEBUG(dbgs() << " compute BB '" << BB->getName()
-               << "' - overdefined because inst def found.\n");
-
-  LVILatticeVal Result;
-  Result.markOverdefined();
-  return getCachedEntryForBlock(BB) = Result;
+  return Cache[BB] = Result;
 }
 
 
@@ -441,28 +644,57 @@
       // it is.
       if (BI->getCondition() == Val)
         return LVILatticeVal::get(ConstantInt::get(
-                               Type::getInt1Ty(Val->getContext()), isTrueDest));
+                              Type::getInt1Ty(Val->getContext()), isTrueDest));
       
       // If the condition of the branch is an equality comparison, we may be
       // able to infer the value.
-      if (ICmpInst *ICI = dyn_cast<ICmpInst>(BI->getCondition()))
-        if (ICI->isEquality() && ICI->getOperand(0) == Val &&
-            isa<Constant>(ICI->getOperand(1))) {
+      ICmpInst *ICI = dyn_cast<ICmpInst>(BI->getCondition());
+      if (ICI && ICI->getOperand(0) == Val &&
+          isa<Constant>(ICI->getOperand(1))) {
+        if (ICI->isEquality()) {
           // We know that V has the RHS constant if this is a true SETEQ or
           // false SETNE. 
           if (isTrueDest == (ICI->getPredicate() == ICmpInst::ICMP_EQ))
             return LVILatticeVal::get(cast<Constant>(ICI->getOperand(1)));
           return LVILatticeVal::getNot(cast<Constant>(ICI->getOperand(1)));
         }
+          
+        if (ConstantInt *CI = dyn_cast<ConstantInt>(ICI->getOperand(1))) {
+          // Calculate the range of values that would satisfy the comparison.
+          ConstantRange CmpRange(CI->getValue(), CI->getValue()+1);
+          ConstantRange TrueValues =
+            ConstantRange::makeICmpRegion(ICI->getPredicate(), CmpRange);
+            
+          // If we're interested in the false dest, invert the condition.
+          if (!isTrueDest) TrueValues = TrueValues.inverse();
+          
+          // Figure out the possible values of the query BEFORE this branch.  
+          LVILatticeVal InBlock = getBlockValue(BBFrom);
+          if (!InBlock.isConstantRange())
+            return LVILatticeVal::getRange(TrueValues);
+            
+          // Find all potential values that satisfy both the input and output
+          // conditions.
+          ConstantRange PossibleValues =
+            TrueValues.intersectWith(InBlock.getConstantRange());
+            
+          return LVILatticeVal::getRange(PossibleValues);
+        }
+      }
     }
   }
 
   // If the edge was formed by a switch on the value, then we may know exactly
   // what it is.
   if (SwitchInst *SI = dyn_cast<SwitchInst>(BBFrom->getTerminator())) {
-    // If BBTo is the default destination of the switch, we don't know anything.
-    // Given a more powerful range analysis we could know stuff.
-    if (SI->getCondition() == Val && SI->getDefaultDest() != BBTo) {
+    if (SI->getCondition() == Val) {
+      // We don't know anything in the default case.
+      if (SI->getDefaultDest() == BBTo) {
+        LVILatticeVal Result;
+        Result.markOverdefined();
+        return Result;
+      }
+      
       // We only know something if there is exactly one value that goes from
       // BBFrom to BBTo.
       unsigned NumEdges = 0;
@@ -496,8 +728,8 @@
         << BB->getName() << "'\n");
   
   LVILatticeVal Result = LVIQuery(V, *this,
-                                  ValueCache[LVIValueHandle(V, this)], 
-                                  OverDefinedCache).getBlockValue(BB);
+                                ValueCache[LVIValueHandle(V, this)], 
+                                OverDefinedCache).getBlockValue(BB);
   
   DEBUG(dbgs() << "  Result = " << Result << "\n");
   return Result;
@@ -537,7 +769,7 @@
   worklist.push_back(OldSucc);
   
   DenseSet<Value*> ClearSet;
-  for (std::set<std::pair<BasicBlock*, Value*> >::iterator
+  for (std::set<std::pair<AssertingVH<BasicBlock>, Value*> >::iterator
        I = OverDefinedCache.begin(), E = OverDefinedCache.end(); I != E; ++I) {
     if (I->first == OldSucc)
       ClearSet.insert(I->second);
@@ -558,7 +790,7 @@
     for (DenseSet<Value*>::iterator I = ClearSet.begin(),E = ClearSet.end();
          I != E; ++I) {
       // If a value was marked overdefined in OldSucc, and is here too...
-      std::set<std::pair<BasicBlock*, Value*> >::iterator OI =
+      std::set<std::pair<AssertingVH<BasicBlock>, Value*> >::iterator OI =
         OverDefinedCache.find(std::make_pair(ToUpdate, *I));
       if (OI == OverDefinedCache.end()) continue;
 
@@ -585,12 +817,6 @@
 //                            LazyValueInfo Impl
 //===----------------------------------------------------------------------===//
 
-bool LazyValueInfo::runOnFunction(Function &F) {
-  TD = getAnalysisIfAvailable<TargetData>();
-  // Fully lazy.
-  return false;
-}
-
 /// getCache - This lazily constructs the LazyValueInfoCache.
 static LazyValueInfoCache &getCache(void *&PImpl) {
   if (!PImpl)
@@ -598,6 +824,15 @@
   return *static_cast<LazyValueInfoCache*>(PImpl);
 }
 
+bool LazyValueInfo::runOnFunction(Function &F) {
+  if (PImpl)
+    getCache(PImpl).clear();
+  
+  TD = getAnalysisIfAvailable<TargetData>();
+  // Fully lazy.
+  return false;
+}
+
 void LazyValueInfo::releaseMemory() {
   // If the cache was allocated, free it.
   if (PImpl) {
@@ -611,6 +846,11 @@
   
   if (Result.isConstant())
     return Result.getConstant();
+  else if (Result.isConstantRange()) {
+    ConstantRange CR = Result.getConstantRange();
+    if (const APInt *SingleVal = CR.getSingleElement())
+      return ConstantInt::get(V->getContext(), *SingleVal);
+  }
   return 0;
 }
 
@@ -622,6 +862,11 @@
   
   if (Result.isConstant())
     return Result.getConstant();
+  else if (Result.isConstantRange()) {
+    ConstantRange CR = Result.getConstantRange();
+    if (const APInt *SingleVal = CR.getSingleElement())
+      return ConstantInt::get(V->getContext(), *SingleVal);
+  }
   return 0;
 }
 
@@ -642,6 +887,36 @@
     return Unknown;
   }
   
+  if (Result.isConstantRange()) {
+    ConstantInt *CI = dyn_cast<ConstantInt>(C);
+    if (!CI) return Unknown;
+    
+    ConstantRange CR = Result.getConstantRange();
+    if (Pred == ICmpInst::ICMP_EQ) {
+      if (!CR.contains(CI->getValue()))
+        return False;
+      
+      if (CR.isSingleElement() && CR.contains(CI->getValue()))
+        return True;
+    } else if (Pred == ICmpInst::ICMP_NE) {
+      if (!CR.contains(CI->getValue()))
+        return True;
+      
+      if (CR.isSingleElement() && CR.contains(CI->getValue()))
+        return False;
+    }
+    
+    // Handle more complex predicates.
+    ConstantRange RHS(CI->getValue(), CI->getValue()+1);
+    ConstantRange TrueValues = ConstantRange::makeICmpRegion(Pred, RHS);
+    if (CR.intersectWith(TrueValues).isEmptySet())
+      return False;
+    else if (TrueValues.contains(CR))
+      return True;
+    
+    return Unknown;
+  }
+  
   if (Result.isNotConstant()) {
     // If this is an equality comparison, we can try to fold it knowing that
     // "V != C1".
@@ -666,5 +941,9 @@
 
 void LazyValueInfo::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
                                BasicBlock* NewSucc) {
-  getCache(PImpl).threadEdge(PredBB, OldSucc, NewSucc);
+  if (PImpl) getCache(PImpl).threadEdge(PredBB, OldSucc, NewSucc);
+}
+
+void LazyValueInfo::eraseBlock(BasicBlock *BB) {
+  if (PImpl) getCache(PImpl).eraseBlock(BB);
 }

Modified: llvm/branches/wendling/eh/lib/Analysis/LibCallAliasAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/LibCallAliasAnalysis.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/LibCallAliasAnalysis.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/LibCallAliasAnalysis.cpp Tue Oct 26 19:48:03 2010
@@ -21,7 +21,7 @@
 // Register this pass...
 char LibCallAliasAnalysis::ID = 0;
 INITIALIZE_AG_PASS(LibCallAliasAnalysis, AliasAnalysis, "libcall-aa",
-                   "LibCall Alias Analysis", false, true, false);
+                   "LibCall Alias Analysis", false, true, false)
 
 FunctionPass *llvm::createLibCallAliasAnalysisPass(LibCallInfo *LCI) {
   return new LibCallAliasAnalysis(LCI);
@@ -43,8 +43,8 @@
 /// vs the specified pointer/size.
 AliasAnalysis::ModRefResult
 LibCallAliasAnalysis::AnalyzeLibCallDetails(const LibCallFunctionInfo *FI,
-                                            CallSite CS, Value *P,
-                                            unsigned Size) {
+                                            ImmutableCallSite CS,
+                                            const Location &Loc) {
   // If we have a function, check to see what kind of mod/ref effects it
   // has.  Start by including any info globally known about the function.
   AliasAnalysis::ModRefResult MRInfo = FI->UniversalBehavior;
@@ -64,9 +64,9 @@
   if (FI->DetailsType == LibCallFunctionInfo::DoesNot) {
     // Find out if the pointer refers to a known location.
     for (unsigned i = 0; Details[i].LocationID != ~0U; ++i) {
-      const LibCallLocationInfo &Loc =
+      const LibCallLocationInfo &LocInfo =
       LCI->getLocationInfo(Details[i].LocationID);
-      LibCallLocationInfo::LocResult Res = Loc.isLocation(CS, P, Size);
+      LibCallLocationInfo::LocResult Res = LocInfo.isLocation(CS, Loc);
       if (Res != LibCallLocationInfo::Yes) continue;
       
       // If we find a match against a location that we 'do not' interact with,
@@ -85,9 +85,9 @@
   // Find out if the pointer refers to a known location.
   bool NoneMatch = true;
   for (unsigned i = 0; Details[i].LocationID != ~0U; ++i) {
-    const LibCallLocationInfo &Loc =
+    const LibCallLocationInfo &LocInfo =
     LCI->getLocationInfo(Details[i].LocationID);
-    LibCallLocationInfo::LocResult Res = Loc.isLocation(CS, P, Size);
+    LibCallLocationInfo::LocResult Res = LocInfo.isLocation(CS, Loc);
     if (Res == LibCallLocationInfo::No) continue;
     
     // If we don't know if this pointer points to the location, then we have to
@@ -117,20 +117,21 @@
 // specified memory object.
 //
 AliasAnalysis::ModRefResult
-LibCallAliasAnalysis::getModRefInfo(CallSite CS, Value *P, unsigned Size) {
+LibCallAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
+                                    const Location &Loc) {
   ModRefResult MRInfo = ModRef;
   
   // If this is a direct call to a function that LCI knows about, get the
   // information about the runtime function.
   if (LCI) {
-    if (Function *F = CS.getCalledFunction()) {
+    if (const Function *F = CS.getCalledFunction()) {
       if (const LibCallFunctionInfo *FI = LCI->getFunctionInfo(F)) {
-        MRInfo = ModRefResult(MRInfo & AnalyzeLibCallDetails(FI, CS, P, Size));
+        MRInfo = ModRefResult(MRInfo & AnalyzeLibCallDetails(FI, CS, Loc));
         if (MRInfo == NoModRef) return NoModRef;
       }
     }
   }
   
   // The AliasAnalysis base class has some smarts, lets use them.
-  return (ModRefResult)(MRInfo | AliasAnalysis::getModRefInfo(CS, P, Size));
+  return (ModRefResult)(MRInfo | AliasAnalysis::getModRefInfo(CS, Loc));
 }

Modified: llvm/branches/wendling/eh/lib/Analysis/LibCallSemantics.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/LibCallSemantics.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/LibCallSemantics.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/LibCallSemantics.cpp Tue Oct 26 19:48:03 2010
@@ -40,7 +40,8 @@
 
 /// getFunctionInfo - Return the LibCallFunctionInfo object corresponding to
 /// the specified function if we have it.  If not, return null.
-const LibCallFunctionInfo *LibCallInfo::getFunctionInfo(Function *F) const {
+const LibCallFunctionInfo *
+LibCallInfo::getFunctionInfo(const Function *F) const {
   StringMap<const LibCallFunctionInfo*> *Map = getMap(Impl);
   
   /// If this is the first time we are querying for this info, lazily construct

Modified: llvm/branches/wendling/eh/lib/Analysis/Lint.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/Lint.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/Lint.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/Lint.cpp Tue Oct 26 19:48:03 2010
@@ -70,7 +70,7 @@
 
     void visitCallSite(CallSite CS);
     void visitMemoryReference(Instruction &I, Value *Ptr,
-                              unsigned Size, unsigned Align,
+                              uint64_t Size, unsigned Align,
                               const Type *Ty, unsigned Flags);
 
     void visitCallInst(CallInst &I);
@@ -108,7 +108,9 @@
     raw_string_ostream MessagesStr;
 
     static char ID; // Pass identification, replacement for typeid
-    Lint() : FunctionPass(&ID), MessagesStr(Messages) {}
+    Lint() : FunctionPass(ID), MessagesStr(Messages) {
+      initializeLintPass(*PassRegistry::getPassRegistry());
+    }
 
     virtual bool runOnFunction(Function &F);
 
@@ -129,12 +131,6 @@
       }
     }
 
-    void WriteType(const Type *T) {
-      if (!T) return;
-      MessagesStr << ' ';
-      WriteTypeSymbolic(MessagesStr, T, Mod);
-    }
-
     // CheckFailed - A check failed, so print out the condition and the message
     // that failed.  This provides a nice place to put a breakpoint if you want
     // to see why something is not correct.
@@ -147,27 +143,16 @@
       WriteValue(V3);
       WriteValue(V4);
     }
-
-    void CheckFailed(const Twine &Message, const Value *V1,
-                     const Type *T2, const Value *V3 = 0) {
-      MessagesStr << Message.str() << "\n";
-      WriteValue(V1);
-      WriteType(T2);
-      WriteValue(V3);
-    }
-
-    void CheckFailed(const Twine &Message, const Type *T1,
-                     const Type *T2 = 0, const Type *T3 = 0) {
-      MessagesStr << Message.str() << "\n";
-      WriteType(T1);
-      WriteType(T2);
-      WriteType(T3);
-    }
   };
 }
 
 char Lint::ID = 0;
-INITIALIZE_PASS(Lint, "lint", "Statically lint-checks LLVM IR", false, true);
+INITIALIZE_PASS_BEGIN(Lint, "lint", "Statically lint-checks LLVM IR",
+                      false, true)
+INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_PASS_END(Lint, "lint", "Statically lint-checks LLVM IR",
+                    false, true)
 
 // Assert - We know that cond should be true, if not print an error message.
 #define Assert(C, M) \
@@ -208,7 +193,8 @@
   Instruction &I = *CS.getInstruction();
   Value *Callee = CS.getCalledValue();
 
-  visitMemoryReference(I, Callee, ~0u, 0, 0, MemRef::Callee);
+  visitMemoryReference(I, Callee, AliasAnalysis::UnknownSize,
+                       0, 0, MemRef::Callee);
 
   if (Function *F = dyn_cast<Function>(findValue(Callee, /*OffsetOk=*/false))) {
     Assert1(CS.getCallingConv() == F->getCallingConv(),
@@ -246,8 +232,7 @@
         // where nothing is known.
         if (Formal->hasNoAliasAttr() && Actual->getType()->isPointerTy())
           for (CallSite::arg_iterator BI = CS.arg_begin(); BI != AE; ++BI) {
-            Assert1(AI == BI ||
-                    AA->alias(*AI, ~0u, *BI, ~0u) != AliasAnalysis::MustAlias,
+            Assert1(AI == BI || AA->alias(*AI, *BI) != AliasAnalysis::MustAlias,
                     "Unusual: noalias argument aliases another argument", &I);
           }
 
@@ -282,15 +267,17 @@
     case Intrinsic::memcpy: {
       MemCpyInst *MCI = cast<MemCpyInst>(&I);
       // TODO: If the size is known, use it.
-      visitMemoryReference(I, MCI->getDest(), ~0u, MCI->getAlignment(), 0,
+      visitMemoryReference(I, MCI->getDest(), AliasAnalysis::UnknownSize,
+                           MCI->getAlignment(), 0,
                            MemRef::Write);
-      visitMemoryReference(I, MCI->getSource(), ~0u, MCI->getAlignment(), 0,
+      visitMemoryReference(I, MCI->getSource(), AliasAnalysis::UnknownSize,
+                           MCI->getAlignment(), 0,
                            MemRef::Read);
 
       // Check that the memcpy arguments don't overlap. The AliasAnalysis API
       // isn't expressive enough for what we really want to do. Known partial
       // overlap is not distinguished from the case where nothing is known.
-      unsigned Size = 0;
+      uint64_t Size = 0;
       if (const ConstantInt *Len =
             dyn_cast<ConstantInt>(findValue(MCI->getLength(),
                                             /*OffsetOk=*/false)))
@@ -304,16 +291,19 @@
     case Intrinsic::memmove: {
       MemMoveInst *MMI = cast<MemMoveInst>(&I);
       // TODO: If the size is known, use it.
-      visitMemoryReference(I, MMI->getDest(), ~0u, MMI->getAlignment(), 0,
+      visitMemoryReference(I, MMI->getDest(), AliasAnalysis::UnknownSize,
+                           MMI->getAlignment(), 0,
                            MemRef::Write);
-      visitMemoryReference(I, MMI->getSource(), ~0u, MMI->getAlignment(), 0,
+      visitMemoryReference(I, MMI->getSource(), AliasAnalysis::UnknownSize,
+                           MMI->getAlignment(), 0,
                            MemRef::Read);
       break;
     }
     case Intrinsic::memset: {
       MemSetInst *MSI = cast<MemSetInst>(&I);
       // TODO: If the size is known, use it.
-      visitMemoryReference(I, MSI->getDest(), ~0u, MSI->getAlignment(), 0,
+      visitMemoryReference(I, MSI->getDest(), AliasAnalysis::UnknownSize,
+                           MSI->getAlignment(), 0,
                            MemRef::Write);
       break;
     }
@@ -323,24 +313,26 @@
               "Undefined behavior: va_start called in a non-varargs function",
               &I);
 
-      visitMemoryReference(I, CS.getArgument(0), ~0u, 0, 0,
-                           MemRef::Read | MemRef::Write);
+      visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize,
+                           0, 0, MemRef::Read | MemRef::Write);
       break;
     case Intrinsic::vacopy:
-      visitMemoryReference(I, CS.getArgument(0), ~0u, 0, 0, MemRef::Write);
-      visitMemoryReference(I, CS.getArgument(1), ~0u, 0, 0, MemRef::Read);
+      visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize,
+                           0, 0, MemRef::Write);
+      visitMemoryReference(I, CS.getArgument(1), AliasAnalysis::UnknownSize,
+                           0, 0, MemRef::Read);
       break;
     case Intrinsic::vaend:
-      visitMemoryReference(I, CS.getArgument(0), ~0u, 0, 0,
-                           MemRef::Read | MemRef::Write);
+      visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize,
+                           0, 0, MemRef::Read | MemRef::Write);
       break;
 
     case Intrinsic::stackrestore:
       // Stackrestore doesn't read or write memory, but it sets the
       // stack pointer, which the compiler may read from or write to
       // at any time, so check it for both readability and writeability.
-      visitMemoryReference(I, CS.getArgument(0), ~0u, 0, 0,
-                           MemRef::Read | MemRef::Write);
+      visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize,
+                           0, 0, MemRef::Read | MemRef::Write);
       break;
     }
 }
@@ -369,7 +361,7 @@
 // TODO: Check that the reference is in bounds.
 // TODO: Check readnone/readonly function attributes.
 void Lint::visitMemoryReference(Instruction &I,
-                                Value *Ptr, unsigned Size, unsigned Align,
+                                Value *Ptr, uint64_t Size, unsigned Align,
                                 const Type *Ty, unsigned Flags) {
   // If no memory is being referenced, it doesn't matter if the pointer
   // is valid.
@@ -513,12 +505,16 @@
 }
 
 void Lint::visitVAArgInst(VAArgInst &I) {
-  visitMemoryReference(I, I.getOperand(0), ~0u, 0, 0,
+  visitMemoryReference(I, I.getOperand(0), AliasAnalysis::UnknownSize, 0, 0,
                        MemRef::Read | MemRef::Write);
 }
 
 void Lint::visitIndirectBrInst(IndirectBrInst &I) {
-  visitMemoryReference(I, I.getAddress(), ~0u, 0, 0, MemRef::Branchee);
+  visitMemoryReference(I, I.getAddress(), AliasAnalysis::UnknownSize, 0, 0,
+                       MemRef::Branchee);
+
+  Assert1(I.getNumDestinations() != 0,
+          "Undefined behavior: indirectbr with no destinations", &I);
 }
 
 void Lint::visitExtractElementInst(ExtractElementInst &I) {

Modified: llvm/branches/wendling/eh/lib/Analysis/LiveValues.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/LiveValues.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/LiveValues.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/LiveValues.cpp Tue Oct 26 19:48:03 2010
@@ -22,10 +22,16 @@
 }
 
 char LiveValues::ID = 0;
-INITIALIZE_PASS(LiveValues, "live-values",
-                "Value Liveness Analysis", false, true);
+INITIALIZE_PASS_BEGIN(LiveValues, "live-values",
+                "Value Liveness Analysis", false, true)
+INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(LoopInfo)
+INITIALIZE_PASS_END(LiveValues, "live-values",
+                "Value Liveness Analysis", false, true)
 
-LiveValues::LiveValues() : FunctionPass(&ID) {}
+LiveValues::LiveValues() : FunctionPass(ID) {
+  initializeLiveValuesPass(*PassRegistry::getPassRegistry());
+}
 
 void LiveValues::getAnalysisUsage(AnalysisUsage &AU) const {
   AU.addRequired<DominatorTree>();

Modified: llvm/branches/wendling/eh/lib/Analysis/Loads.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/Loads.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/Loads.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/Loads.cpp Tue Oct 26 19:48:03 2010
@@ -166,7 +166,7 @@
   if (MaxInstsToScan == 0) MaxInstsToScan = ~0U;
 
   // If we're using alias analysis to disambiguate get the size of *Ptr.
-  unsigned AccessSize = 0;
+  uint64_t AccessSize = 0;
   if (AA) {
     const Type *AccessTy = cast<PointerType>(Ptr->getType())->getElementType();
     AccessSize = AA->getTypeStoreSize(AccessTy);

Modified: llvm/branches/wendling/eh/lib/Analysis/LoopDependenceAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/LoopDependenceAnalysis.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/LoopDependenceAnalysis.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/LoopDependenceAnalysis.cpp Tue Oct 26 19:48:03 2010
@@ -46,8 +46,12 @@
   return new LoopDependenceAnalysis();
 }
 
-INITIALIZE_PASS(LoopDependenceAnalysis, "lda",
-                "Loop Dependence Analysis", false, true);
+INITIALIZE_PASS_BEGIN(LoopDependenceAnalysis, "lda",
+                "Loop Dependence Analysis", false, true)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_PASS_END(LoopDependenceAnalysis, "lda",
+                "Loop Dependence Analysis", false, true)
 char LoopDependenceAnalysis::ID = 0;
 
 //===----------------------------------------------------------------------===//

Modified: llvm/branches/wendling/eh/lib/Analysis/LoopInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/LoopInfo.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/LoopInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/LoopInfo.cpp Tue Oct 26 19:48:03 2010
@@ -38,7 +38,9 @@
                 cl::desc("Verify loop info (time consuming)"));
 
 char LoopInfo::ID = 0;
-INITIALIZE_PASS(LoopInfo, "loops", "Natural Loop Information", true, true);
+INITIALIZE_PASS_BEGIN(LoopInfo, "loops", "Natural Loop Information", true, true)
+INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_END(LoopInfo, "loops", "Natural Loop Information", true, true)
 
 //===----------------------------------------------------------------------===//
 // Loop implementation
@@ -48,15 +50,18 @@
 ///
 bool Loop::isLoopInvariant(Value *V) const {
   if (Instruction *I = dyn_cast<Instruction>(V))
-    return isLoopInvariant(I);
+    return !contains(I);
   return true;  // All non-instructions are loop invariant
 }
 
-/// isLoopInvariant - Return true if the specified instruction is
-/// loop-invariant.
-///
-bool Loop::isLoopInvariant(Instruction *I) const {
-  return !contains(I);
+/// hasLoopInvariantOperands - Return true if all the operands of the
+/// specified instruction are loop invariant. 
+bool Loop::hasLoopInvariantOperands(Instruction *I) const {
+  for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i)
+    if (!isLoopInvariant(I->getOperand(i)))
+      return false;
+  
+  return true;
 }
 
 /// makeLoopInvariant - If the given value is an instruciton inside of the
@@ -105,6 +110,7 @@
   for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i)
     if (!makeLoopInvariant(I->getOperand(i), Changed, InsertPt))
       return false;
+  
   // Hoist.
   I->moveBefore(InsertPt);
   Changed = true;

Modified: llvm/branches/wendling/eh/lib/Analysis/LoopPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/LoopPass.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/LoopPass.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/LoopPass.cpp Tue Oct 26 19:48:03 2010
@@ -30,9 +30,8 @@
 
 public:
   static char ID;
-  PrintLoopPass() : LoopPass(&ID), Out(dbgs()) {}
   PrintLoopPass(const std::string &B, raw_ostream &o)
-      : LoopPass(&ID), Banner(B), Out(o) {}
+      : LoopPass(ID), Banner(B), Out(o) {}
 
   virtual void getAnalysisUsage(AnalysisUsage &AU) const {
     AU.setPreservesAll();
@@ -59,7 +58,7 @@
 char LPPassManager::ID = 0;
 
 LPPassManager::LPPassManager(int Depth) 
-  : FunctionPass(&ID), PMDataManager(Depth) { 
+  : FunctionPass(ID), PMDataManager(Depth) { 
   skipThisLoop = false;
   redoThisLoop = false;
   LI = NULL;
@@ -183,7 +182,7 @@
 void LPPassManager::cloneBasicBlockSimpleAnalysis(BasicBlock *From, 
                                                   BasicBlock *To, Loop *L) {
   for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {  
-    LoopPass *LP = (LoopPass *)getContainedPass(Index);
+    LoopPass *LP = getContainedPass(Index);
     LP->cloneBasicBlockAnalysis(From, To, L);
   }
 }
@@ -198,7 +197,7 @@
     }
   }
   for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {  
-    LoopPass *LP = (LoopPass *)getContainedPass(Index);
+    LoopPass *LP = getContainedPass(Index);
     LP->deleteAnalysisValue(V, L);
   }
 }
@@ -240,7 +239,7 @@
        I != E; ++I) {
     Loop *L = *I;
     for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {  
-      LoopPass *P = (LoopPass*)getContainedPass(Index);
+      LoopPass *P = getContainedPass(Index);
       Changed |= P->doInitialization(L, *this);
     }
   }
@@ -254,7 +253,7 @@
 
     // Run all passes on the current Loop.
     for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {  
-      LoopPass *P = (LoopPass*)getContainedPass(Index);
+      LoopPass *P = getContainedPass(Index);
 
       dumpPassInfo(P, EXECUTION_MSG, ON_LOOP_MSG,
                    CurrentLoop->getHeader()->getName());
@@ -320,7 +319,7 @@
   
   // Finalization
   for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
-    LoopPass *P = (LoopPass *)getContainedPass(Index);
+    LoopPass *P = getContainedPass(Index);
     Changed |= P->doFinalization();
   }
 

Modified: llvm/branches/wendling/eh/lib/Analysis/MemoryDependenceAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/MemoryDependenceAnalysis.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/MemoryDependenceAnalysis.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/MemoryDependenceAnalysis.cpp Tue Oct 26 19:48:03 2010
@@ -19,6 +19,7 @@
 #include "llvm/Instructions.h"
 #include "llvm/IntrinsicInst.h"
 #include "llvm/Function.h"
+#include "llvm/LLVMContext.h"
 #include "llvm/Analysis/AliasAnalysis.h"
 #include "llvm/Analysis/Dominators.h"
 #include "llvm/Analysis/InstructionSimplify.h"
@@ -46,11 +47,15 @@
 char MemoryDependenceAnalysis::ID = 0;
   
 // Register this pass...
-INITIALIZE_PASS(MemoryDependenceAnalysis, "memdep",
-                "Memory Dependence Analysis", false, true);
+INITIALIZE_PASS_BEGIN(MemoryDependenceAnalysis, "memdep",
+                "Memory Dependence Analysis", false, true)
+INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_PASS_END(MemoryDependenceAnalysis, "memdep",
+                      "Memory Dependence Analysis", false, true)
 
 MemoryDependenceAnalysis::MemoryDependenceAnalysis()
-: FunctionPass(&ID), PredCache(0) {
+: FunctionPass(ID), PredCache(0) {
+  initializeMemoryDependenceAnalysisPass(*PassRegistry::getPassRegistry());
 }
 MemoryDependenceAnalysis::~MemoryDependenceAnalysis() {
 }
@@ -108,44 +113,34 @@
     Instruction *Inst = --ScanIt;
     
     // If this inst is a memory op, get the pointer it accessed
-    Value *Pointer = 0;
-    uint64_t PointerSize = 0;
+    AliasAnalysis::Location Loc;
     if (StoreInst *S = dyn_cast<StoreInst>(Inst)) {
-      Pointer = S->getPointerOperand();
-      PointerSize = AA->getTypeStoreSize(S->getOperand(0)->getType());
+      Loc = AliasAnalysis::Location(S->getPointerOperand(),
+                                    AA->getTypeStoreSize(S->getValueOperand()
+                                                           ->getType()),
+                                    S->getMetadata(LLVMContext::MD_tbaa));
     } else if (VAArgInst *V = dyn_cast<VAArgInst>(Inst)) {
-      Pointer = V->getOperand(0);
-      PointerSize = AA->getTypeStoreSize(V->getType());
+      Loc = AliasAnalysis::Location(V->getPointerOperand(),
+                                    AA->getTypeStoreSize(V->getType()),
+                                    V->getMetadata(LLVMContext::MD_tbaa));
     } else if (const CallInst *CI = isFreeCall(Inst)) {
-      Pointer = CI->getArgOperand(0);
       // calls to free() erase the entire structure
-      PointerSize = ~0ULL;
+      Loc = AliasAnalysis::Location(CI->getArgOperand(0));
     } else if (CallSite InstCS = cast<Value>(Inst)) {
       // Debug intrinsics don't cause dependences.
       if (isa<DbgInfoIntrinsic>(Inst)) continue;
       // If these two calls do not interfere, look past it.
       switch (AA->getModRefInfo(CS, InstCS)) {
       case AliasAnalysis::NoModRef:
-        // If the two calls don't interact (e.g. InstCS is readnone) keep
-        // scanning.
+        // If the two calls are the same, return InstCS as a Def, so that
+        // CS can be found redundant and eliminated.
+        if (isReadOnlyCall && InstCS.onlyReadsMemory() &&
+            CS.getInstruction()->isIdenticalToWhenDefined(Inst))
+          return MemDepResult::getDef(Inst);
+
+        // Otherwise if the two calls don't interact (e.g. InstCS is readnone)
+        // keep scanning.
         continue;
-      case AliasAnalysis::Ref:
-        // If the two calls read the same memory locations and CS is a readonly
-        // function, then we have two cases: 1) the calls may not interfere with
-        // each other at all.  2) the calls may produce the same value.  In case
-        // #1 we want to ignore the values, in case #2, we want to return Inst
-        // as a Def dependence.  This allows us to CSE in cases like:
-        //   X = strlen(P);
-        //    memchr(...);
-        //   Y = strlen(P);  // Y = X
-        if (isReadOnlyCall) {
-          if (CS.getCalledFunction() != 0 &&
-              CS.getCalledFunction() == InstCS.getCalledFunction())
-            return MemDepResult::getDef(Inst);
-          // Ignore unrelated read/read call dependences.
-          continue;
-        }
-        // FALL THROUGH
       default:
         return MemDepResult::getClobber(Inst);
       }
@@ -154,7 +149,7 @@
       continue;
     }
     
-    if (AA->getModRefInfo(CS, Pointer, PointerSize) != AliasAnalysis::NoModRef)
+    if (AA->getModRefInfo(CS, Loc) != AliasAnalysis::NoModRef)
       return MemDepResult::getClobber(Inst);
   }
   
@@ -169,7 +164,7 @@
 /// location depends.  If isLoad is true, this routine ignore may-aliases with
 /// read-only operations.
 MemDepResult MemoryDependenceAnalysis::
-getPointerDependencyFrom(Value *MemPtr, uint64_t MemSize, bool isLoad, 
+getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad, 
                          BasicBlock::iterator ScanIt, BasicBlock *BB) {
 
   Value *InvariantTag = 0;
@@ -186,8 +181,8 @@
     }
     
     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
-      // Debug intrinsics don't cause dependences.
-      if (isa<DbgInfoIntrinsic>(Inst)) continue;
+      // Debug intrinsics don't (and can't) cause dependences.
+      if (isa<DbgInfoIntrinsic>(II)) continue;
       
       // If we pass an invariant-end marker, then we've just entered an
       // invariant region and can start ignoring dependencies.
@@ -195,28 +190,33 @@
         // FIXME: This only considers queries directly on the invariant-tagged
         // pointer, not on query pointers that are indexed off of them.  It'd
         // be nice to handle that at some point.
-        AliasAnalysis::AliasResult R = 
-          AA->alias(II->getArgOperand(2), ~0U, MemPtr, ~0U);
-        if (R == AliasAnalysis::MustAlias) {
+        AliasAnalysis::AliasResult R =
+          AA->alias(AliasAnalysis::Location(II->getArgOperand(2)), MemLoc);
+        if (R == AliasAnalysis::MustAlias)
           InvariantTag = II->getArgOperand(0);
-          continue;
-        }
-      
+
+        continue;
+      }
+
       // If we reach a lifetime begin or end marker, then the query ends here
       // because the value is undefined.
-      } else if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
+      if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
         // FIXME: This only considers queries directly on the invariant-tagged
         // pointer, not on query pointers that are indexed off of them.  It'd
         // be nice to handle that at some point.
         AliasAnalysis::AliasResult R =
-          AA->alias(II->getArgOperand(1), ~0U, MemPtr, ~0U);
+          AA->alias(AliasAnalysis::Location(II->getArgOperand(1)), MemLoc);
         if (R == AliasAnalysis::MustAlias)
           return MemDepResult::getDef(II);
+        continue;
       }
     }
 
     // If we're querying on a load and we're in an invariant region, we're done
     // at this point. Nothing a load depends on can live in an invariant region.
+    //
+    // FIXME: this will prevent us from returning load/load must-aliases, so GVN
+    // won't remove redundant loads.
     if (isLoad && InvariantTag) continue;
 
     // Values depend on loads if the pointers are must aliased.  This means that
@@ -224,10 +224,12 @@
     if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
       Value *Pointer = LI->getPointerOperand();
       uint64_t PointerSize = AA->getTypeStoreSize(LI->getType());
+      MDNode *TBAATag = LI->getMetadata(LLVMContext::MD_tbaa);
       
       // If we found a pointer, check if it could be the same as our pointer.
       AliasAnalysis::AliasResult R =
-        AA->alias(Pointer, PointerSize, MemPtr, MemSize);
+        AA->alias(AliasAnalysis::Location(Pointer, PointerSize, TBAATag),
+                  MemLoc);
       if (R == AliasAnalysis::NoAlias)
         continue;
       
@@ -247,17 +249,19 @@
       // If alias analysis can tell that this store is guaranteed to not modify
       // the query pointer, ignore it.  Use getModRefInfo to handle cases where
       // the query pointer points to constant memory etc.
-      if (AA->getModRefInfo(SI, MemPtr, MemSize) == AliasAnalysis::NoModRef)
+      if (AA->getModRefInfo(SI, MemLoc) == AliasAnalysis::NoModRef)
         continue;
 
       // Ok, this store might clobber the query pointer.  Check to see if it is
       // a must alias: in this case, we want to return this as a def.
       Value *Pointer = SI->getPointerOperand();
       uint64_t PointerSize = AA->getTypeStoreSize(SI->getOperand(0)->getType());
+      MDNode *TBAATag = SI->getMetadata(LLVMContext::MD_tbaa);
       
       // If we found a pointer, check if it could be the same as our pointer.
       AliasAnalysis::AliasResult R =
-        AA->alias(Pointer, PointerSize, MemPtr, MemSize);
+        AA->alias(AliasAnalysis::Location(Pointer, PointerSize, TBAATag),
+                  MemLoc);
       
       if (R == AliasAnalysis::NoAlias)
         continue;
@@ -276,7 +280,7 @@
     // need to continue scanning until the malloc call.
     if (isa<AllocaInst>(Inst) ||
         (isa<CallInst>(Inst) && extractMallocCall(Inst))) {
-      Value *AccessPtr = MemPtr->getUnderlyingObject();
+      const Value *AccessPtr = MemLoc.Ptr->getUnderlyingObject();
       
       if (AccessPtr == Inst ||
           AA->alias(Inst, 1, AccessPtr, 1) == AliasAnalysis::MustAlias)
@@ -285,7 +289,7 @@
     }
 
     // See if this instruction (e.g. a call or vaarg) mod/ref's the pointer.
-    switch (AA->getModRefInfo(Inst, MemPtr, MemSize)) {
+    switch (AA->getModRefInfo(Inst, MemLoc)) {
     case AliasAnalysis::NoModRef:
       // If the call has no effect on the queried pointer, just ignore it.
       continue;
@@ -335,8 +339,7 @@
   
   BasicBlock *QueryParent = QueryInst->getParent();
   
-  Value *MemPtr = 0;
-  uint64_t MemSize = 0;
+  AliasAnalysis::Location MemLoc;
   
   // Do the scan.
   if (BasicBlock::iterator(QueryInst) == QueryParent->begin()) {
@@ -351,23 +354,23 @@
     // previous instruction as a clobber.
     if (SI->isVolatile())
       LocalCache = MemDepResult::getClobber(--BasicBlock::iterator(ScanPos));
-    else {
-      MemPtr = SI->getPointerOperand();
-      MemSize = AA->getTypeStoreSize(SI->getOperand(0)->getType());
-    }
+    else
+      MemLoc = AliasAnalysis::Location(SI->getPointerOperand(),
+                                       AA->getTypeStoreSize(SI->getOperand(0)
+                                                              ->getType()),
+                                       SI->getMetadata(LLVMContext::MD_tbaa));
   } else if (LoadInst *LI = dyn_cast<LoadInst>(QueryInst)) {
     // If this is a volatile load, don't mess around with it.  Just return the
     // previous instruction as a clobber.
     if (LI->isVolatile())
       LocalCache = MemDepResult::getClobber(--BasicBlock::iterator(ScanPos));
-    else {
-      MemPtr = LI->getPointerOperand();
-      MemSize = AA->getTypeStoreSize(LI->getType());
-    }
+    else
+      MemLoc = AliasAnalysis::Location(LI->getPointerOperand(),
+                                       AA->getTypeStoreSize(LI->getType()),
+                                       LI->getMetadata(LLVMContext::MD_tbaa));
   } else if (const CallInst *CI = isFreeCall(QueryInst)) {
-    MemPtr = CI->getArgOperand(0);
     // calls to free() erase the entire structure, not just a field.
-    MemSize = ~0UL;
+    MemLoc = AliasAnalysis::Location(CI->getArgOperand(0));
   } else if (isa<CallInst>(QueryInst) || isa<InvokeInst>(QueryInst)) {
     int IntrinsicID = 0;  // Intrinsic IDs start at 1.
     IntrinsicInst *II = dyn_cast<IntrinsicInst>(QueryInst);
@@ -378,12 +381,16 @@
     case Intrinsic::lifetime_start:
     case Intrinsic::lifetime_end:
     case Intrinsic::invariant_start:
-      MemPtr = II->getArgOperand(1);
-      MemSize = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
+      MemLoc = AliasAnalysis::Location(II->getArgOperand(1),
+                                       cast<ConstantInt>(II->getArgOperand(0))
+                                         ->getZExtValue(),
+                                       II->getMetadata(LLVMContext::MD_tbaa));
       break;
     case Intrinsic::invariant_end:
-      MemPtr = II->getArgOperand(2);
-      MemSize = cast<ConstantInt>(II->getArgOperand(1))->getZExtValue();
+      MemLoc = AliasAnalysis::Location(II->getArgOperand(2),
+                                       cast<ConstantInt>(II->getArgOperand(1))
+                                         ->getZExtValue(),
+                                       II->getMetadata(LLVMContext::MD_tbaa));
       break;
     default:
       CallSite QueryCS(QueryInst);
@@ -398,12 +405,12 @@
   }
   
   // If we need to do a pointer scan, make it happen.
-  if (MemPtr) {
+  if (MemLoc.Ptr) {
     bool isLoad = !QueryInst->mayWriteToMemory();
     if (IntrinsicInst *II = dyn_cast<MemoryUseIntrinsic>(QueryInst)) {
       isLoad |= II->getIntrinsicID() == Intrinsic::lifetime_end;
     }
-    LocalCache = getPointerDependencyFrom(MemPtr, MemSize, isLoad, ScanPos,
+    LocalCache = getPointerDependencyFrom(MemLoc, isLoad, ScanPos,
                                           QueryParent);
   }
   
@@ -578,31 +585,27 @@
 /// own block.
 ///
 void MemoryDependenceAnalysis::
-getNonLocalPointerDependency(Value *Pointer, bool isLoad, BasicBlock *FromBB,
+getNonLocalPointerDependency(const AliasAnalysis::Location &Loc, bool isLoad,
+                             BasicBlock *FromBB,
                              SmallVectorImpl<NonLocalDepResult> &Result) {
-  assert(Pointer->getType()->isPointerTy() &&
+  assert(Loc.Ptr->getType()->isPointerTy() &&
          "Can't get pointer deps of a non-pointer!");
   Result.clear();
   
-  // We know that the pointer value is live into FromBB find the def/clobbers
-  // from presecessors.
-  const Type *EltTy = cast<PointerType>(Pointer->getType())->getElementType();
-  uint64_t PointeeSize = AA->getTypeStoreSize(EltTy);
-  
-  PHITransAddr Address(Pointer, TD);
+  PHITransAddr Address(const_cast<Value *>(Loc.Ptr), TD);
   
   // This is the set of blocks we've inspected, and the pointer we consider in
   // each block.  Because of critical edges, we currently bail out if querying
   // a block with multiple different pointers.  This can happen during PHI
   // translation.
   DenseMap<BasicBlock*, Value*> Visited;
-  if (!getNonLocalPointerDepFromBB(Address, PointeeSize, isLoad, FromBB,
+  if (!getNonLocalPointerDepFromBB(Address, Loc, isLoad, FromBB,
                                    Result, Visited, true))
     return;
   Result.clear();
   Result.push_back(NonLocalDepResult(FromBB,
                                      MemDepResult::getClobber(FromBB->begin()),
-                                     Pointer));
+                                     const_cast<Value *>(Loc.Ptr)));
 }
 
 /// GetNonLocalInfoForBlock - Compute the memdep value for BB with
@@ -610,7 +613,7 @@
 /// lookup (which may use dirty cache info if available).  If we do a lookup,
 /// add the result to the cache.
 MemDepResult MemoryDependenceAnalysis::
-GetNonLocalInfoForBlock(Value *Pointer, uint64_t PointeeSize,
+GetNonLocalInfoForBlock(const AliasAnalysis::Location &Loc,
                         bool isLoad, BasicBlock *BB,
                         NonLocalDepInfo *Cache, unsigned NumSortedEntries) {
   
@@ -644,15 +647,14 @@
     ScanPos = ExistingResult->getResult().getInst();
     
     // Eliminating the dirty entry from 'Cache', so update the reverse info.
-    ValueIsLoadPair CacheKey(Pointer, isLoad);
+    ValueIsLoadPair CacheKey(Loc.Ptr, isLoad);
     RemoveFromReverseMap(ReverseNonLocalPtrDeps, ScanPos, CacheKey);
   } else {
     ++NumUncacheNonLocalPtr;
   }
   
   // Scan the block for the dependency.
-  MemDepResult Dep = getPointerDependencyFrom(Pointer, PointeeSize, isLoad, 
-                                              ScanPos, BB);
+  MemDepResult Dep = getPointerDependencyFrom(Loc, isLoad, ScanPos, BB);
   
   // If we had a dirty entry for the block, update it.  Otherwise, just add
   // a new entry.
@@ -671,7 +673,7 @@
   // update MemDep when we remove instructions.
   Instruction *Inst = Dep.getInst();
   assert(Inst && "Didn't depend on anything?");
-  ValueIsLoadPair CacheKey(Pointer, isLoad);
+  ValueIsLoadPair CacheKey(Loc.Ptr, isLoad);
   ReverseNonLocalPtrDeps[Inst].insert(CacheKey);
   return Dep;
 }
@@ -725,7 +727,8 @@
 /// not compute dependence information for some reason.  This should be treated
 /// as a clobber dependence on the first instruction in the predecessor block.
 bool MemoryDependenceAnalysis::
-getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, uint64_t PointeeSize,
+getNonLocalPointerDepFromBB(const PHITransAddr &Pointer,
+                            const AliasAnalysis::Location &Loc,
                             bool isLoad, BasicBlock *StartBB,
                             SmallVectorImpl<NonLocalDepResult> &Result,
                             DenseMap<BasicBlock*, Value*> &Visited,
@@ -733,14 +736,23 @@
   
   // Look up the cached info for Pointer.
   ValueIsLoadPair CacheKey(Pointer.getAddr(), isLoad);
-  
-  std::pair<BBSkipFirstBlockPair, NonLocalDepInfo> *CacheInfo =
-    &NonLocalPointerDeps[CacheKey];
-  NonLocalDepInfo *Cache = &CacheInfo->second;
+  NonLocalPointerInfo *CacheInfo = &NonLocalPointerDeps[CacheKey];
+
+  // If this query's TBAATag is inconsistent with the cached one, discard the
+  // tag and restart the query.
+  if (CacheInfo->TBAATag != Loc.TBAATag) {
+    CacheInfo->TBAATag = 0;
+    NonLocalPointerDeps.erase(CacheKey);
+    return getNonLocalPointerDepFromBB(Pointer, Loc.getWithoutTBAATag(),
+                                       isLoad, StartBB, Result, Visited,
+                                       SkipFirstBlock);
+  }
+
+  NonLocalDepInfo *Cache = &CacheInfo->NonLocalDeps;
 
   // If we have valid cached information for exactly the block we are
   // investigating, just return it with no recomputation.
-  if (CacheInfo->first == BBSkipFirstBlockPair(StartBB, SkipFirstBlock)) {
+  if (CacheInfo->Pair == BBSkipFirstBlockPair(StartBB, SkipFirstBlock)) {
     // We have a fully cached result for this query then we can just return the
     // cached results and populate the visited set.  However, we have to verify
     // that we don't already have conflicting results for these blocks.  Check
@@ -776,9 +788,11 @@
   // than its valid cache info.  If empty, the result will be valid cache info,
   // otherwise it isn't.
   if (Cache->empty())
-    CacheInfo->first = BBSkipFirstBlockPair(StartBB, SkipFirstBlock);
-  else
-    CacheInfo->first = BBSkipFirstBlockPair();
+    CacheInfo->Pair = BBSkipFirstBlockPair(StartBB, SkipFirstBlock);
+  else {
+    CacheInfo->Pair = BBSkipFirstBlockPair();
+    CacheInfo->TBAATag = 0;
+  }
   
   SmallVector<BasicBlock*, 32> Worklist;
   Worklist.push_back(StartBB);
@@ -803,8 +817,7 @@
       // Get the dependency info for Pointer in BB.  If we have cached
       // information, we will use it, otherwise we compute it.
       DEBUG(AssertSorted(*Cache, NumSortedEntries));
-      MemDepResult Dep = GetNonLocalInfoForBlock(Pointer.getAddr(), PointeeSize,
-                                                 isLoad, BB, Cache,
+      MemDepResult Dep = GetNonLocalInfoForBlock(Loc, isLoad, BB, Cache,
                                                  NumSortedEntries);
       
       // If we got a Def or Clobber, add this to the list of results.
@@ -901,7 +914,9 @@
         // queries.  Mark this in NonLocalPointerDeps by setting the
         // BBSkipFirstBlockPair pointer to null.  This requires reuse of the
         // cached value to do more work but not miss the phi trans failure.
-        NonLocalPointerDeps[CacheKey].first = BBSkipFirstBlockPair();
+        NonLocalPointerInfo &NLPI = NonLocalPointerDeps[CacheKey];
+        NLPI.Pair = BBSkipFirstBlockPair();
+        NLPI.TBAATag = 0;
         continue;
       }
 
@@ -912,21 +927,24 @@
       
       // If we have a problem phi translating, fall through to the code below
       // to handle the failure condition.
-      if (getNonLocalPointerDepFromBB(PredPointer, PointeeSize, isLoad, Pred,
+      if (getNonLocalPointerDepFromBB(PredPointer,
+                                      Loc.getWithNewPtr(PredPointer.getAddr()),
+                                      isLoad, Pred,
                                       Result, Visited))
         goto PredTranslationFailure;
     }
     
     // Refresh the CacheInfo/Cache pointer so that it isn't invalidated.
     CacheInfo = &NonLocalPointerDeps[CacheKey];
-    Cache = &CacheInfo->second;
+    Cache = &CacheInfo->NonLocalDeps;
     NumSortedEntries = Cache->size();
     
     // Since we did phi translation, the "Cache" set won't contain all of the
     // results for the query.  This is ok (we can still use it to accelerate
     // specific block queries) but we can't do the fastpath "return all
     // results from the set"  Clear out the indicator for this.
-    CacheInfo->first = BBSkipFirstBlockPair();
+    CacheInfo->Pair = BBSkipFirstBlockPair();
+    CacheInfo->TBAATag = 0;
     SkipFirstBlock = false;
     continue;
 
@@ -935,7 +953,7 @@
     if (Cache == 0) {
       // Refresh the CacheInfo/Cache pointer if it got invalidated.
       CacheInfo = &NonLocalPointerDeps[CacheKey];
-      Cache = &CacheInfo->second;
+      Cache = &CacheInfo->NonLocalDeps;
       NumSortedEntries = Cache->size();
     }
     
@@ -943,7 +961,8 @@
     // results for the query.  This is ok (we can still use it to accelerate
     // specific block queries) but we can't do the fastpath "return all
     // results from the set".  Clear out the indicator for this.
-    CacheInfo->first = BBSkipFirstBlockPair();
+    CacheInfo->Pair = BBSkipFirstBlockPair();
+    CacheInfo->TBAATag = 0;
     
     // If *nothing* works, mark the pointer as being clobbered by the first
     // instruction in this block.
@@ -985,7 +1004,7 @@
   
   // Remove all of the entries in the BB->val map.  This involves removing
   // instructions from the reverse map.
-  NonLocalDepInfo &PInfo = It->second.second;
+  NonLocalDepInfo &PInfo = It->second.NonLocalDeps;
   
   for (unsigned i = 0, e = PInfo.size(); i != e; ++i) {
     Instruction *Target = PInfo[i].getResult().getInst();
@@ -1156,10 +1175,11 @@
       assert(P.getPointer() != RemInst &&
              "Already removed NonLocalPointerDeps info for RemInst");
       
-      NonLocalDepInfo &NLPDI = NonLocalPointerDeps[P].second;
+      NonLocalDepInfo &NLPDI = NonLocalPointerDeps[P].NonLocalDeps;
       
       // The cache is not valid for any specific block anymore.
-      NonLocalPointerDeps[P].first = BBSkipFirstBlockPair();
+      NonLocalPointerDeps[P].Pair = BBSkipFirstBlockPair();
+      NonLocalPointerDeps[P].TBAATag = 0;
       
       // Update any entries for RemInst to use the instruction after it.
       for (NonLocalDepInfo::iterator DI = NLPDI.begin(), DE = NLPDI.end();
@@ -1205,7 +1225,7 @@
   for (CachedNonLocalPointerInfo::const_iterator I =NonLocalPointerDeps.begin(),
        E = NonLocalPointerDeps.end(); I != E; ++I) {
     assert(I->first.getPointer() != D && "Inst occurs in NLPD map key");
-    const NonLocalDepInfo &Val = I->second.second;
+    const NonLocalDepInfo &Val = I->second.NonLocalDeps;
     for (NonLocalDepInfo::const_iterator II = Val.begin(), E = Val.end();
          II != E; ++II)
       assert(II->getResult().getInst() != D && "Inst occurs as NLPD value");

Modified: llvm/branches/wendling/eh/lib/Analysis/ModuleDebugInfoPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/ModuleDebugInfoPrinter.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/ModuleDebugInfoPrinter.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/ModuleDebugInfoPrinter.cpp Tue Oct 26 19:48:03 2010
@@ -30,7 +30,9 @@
     DebugInfoFinder Finder;
   public:
     static char ID; // Pass identification, replacement for typeid
-    ModuleDebugInfoPrinter() : ModulePass(&ID) {}
+    ModuleDebugInfoPrinter() : ModulePass(ID) {
+      initializeModuleDebugInfoPrinterPass(*PassRegistry::getPassRegistry());
+    }
 
     virtual bool runOnModule(Module &M);
 
@@ -43,7 +45,7 @@
 
 char ModuleDebugInfoPrinter::ID = 0;
 INITIALIZE_PASS(ModuleDebugInfoPrinter, "module-debuginfo",
-                "Decodes module-level debug info", false, true);
+                "Decodes module-level debug info", false, true)
 
 ModulePass *llvm::createModuleDebugInfoPrinterPass() {
   return new ModuleDebugInfoPrinter();

Removed: llvm/branches/wendling/eh/lib/Analysis/PointerTracking.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/PointerTracking.cpp?rev=117424&view=auto
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/PointerTracking.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/PointerTracking.cpp (removed)
@@ -1,267 +0,0 @@
-//===- PointerTracking.cpp - Pointer Bounds Tracking ------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements tracking of pointer bounds.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/Analysis/ConstantFolding.h"
-#include "llvm/Analysis/Dominators.h"
-#include "llvm/Analysis/LoopInfo.h"
-#include "llvm/Analysis/MemoryBuiltins.h"
-#include "llvm/Analysis/PointerTracking.h"
-#include "llvm/Analysis/ScalarEvolution.h"
-#include "llvm/Analysis/ScalarEvolutionExpressions.h"
-#include "llvm/Constants.h"
-#include "llvm/Module.h"
-#include "llvm/Value.h"
-#include "llvm/Support/CallSite.h"
-#include "llvm/Support/InstIterator.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
-using namespace llvm;
-
-char PointerTracking::ID = 0;
-PointerTracking::PointerTracking() : FunctionPass(&ID) {}
-
-bool PointerTracking::runOnFunction(Function &F) {
-  predCache.clear();
-  assert(analyzing.empty());
-  FF = &F;
-  TD = getAnalysisIfAvailable<TargetData>();
-  SE = &getAnalysis<ScalarEvolution>();
-  LI = &getAnalysis<LoopInfo>();
-  DT = &getAnalysis<DominatorTree>();
-  return false;
-}
-
-void PointerTracking::getAnalysisUsage(AnalysisUsage &AU) const {
-  AU.addRequiredTransitive<DominatorTree>();
-  AU.addRequiredTransitive<LoopInfo>();
-  AU.addRequiredTransitive<ScalarEvolution>();
-  AU.setPreservesAll();
-}
-
-bool PointerTracking::doInitialization(Module &M) {
-  const Type *PTy = Type::getInt8PtrTy(M.getContext());
-
-  // Find calloc(i64, i64) or calloc(i32, i32).
-  callocFunc = M.getFunction("calloc");
-  if (callocFunc) {
-    const FunctionType *Ty = callocFunc->getFunctionType();
-
-    std::vector<const Type*> args, args2;
-    args.push_back(Type::getInt64Ty(M.getContext()));
-    args.push_back(Type::getInt64Ty(M.getContext()));
-    args2.push_back(Type::getInt32Ty(M.getContext()));
-    args2.push_back(Type::getInt32Ty(M.getContext()));
-    const FunctionType *Calloc1Type =
-      FunctionType::get(PTy, args, false);
-    const FunctionType *Calloc2Type =
-      FunctionType::get(PTy, args2, false);
-    if (Ty != Calloc1Type && Ty != Calloc2Type)
-      callocFunc = 0; // Give up
-  }
-
-  // Find realloc(i8*, i64) or realloc(i8*, i32).
-  reallocFunc = M.getFunction("realloc");
-  if (reallocFunc) {
-    const FunctionType *Ty = reallocFunc->getFunctionType();
-    std::vector<const Type*> args, args2;
-    args.push_back(PTy);
-    args.push_back(Type::getInt64Ty(M.getContext()));
-    args2.push_back(PTy);
-    args2.push_back(Type::getInt32Ty(M.getContext()));
-
-    const FunctionType *Realloc1Type =
-      FunctionType::get(PTy, args, false);
-    const FunctionType *Realloc2Type =
-      FunctionType::get(PTy, args2, false);
-    if (Ty != Realloc1Type && Ty != Realloc2Type)
-      reallocFunc = 0; // Give up
-  }
-  return false;
-}
-
-// Calculates the number of elements allocated for pointer P,
-// the type of the element is stored in Ty.
-const SCEV *PointerTracking::computeAllocationCount(Value *P,
-                                                    const Type *&Ty) const {
-  Value *V = P->stripPointerCasts();
-  if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
-    Value *arraySize = AI->getArraySize();
-    Ty = AI->getAllocatedType();
-    // arraySize elements of type Ty.
-    return SE->getSCEV(arraySize);
-  }
-
-  if (CallInst *CI = extractMallocCall(V)) {
-    Value *arraySize = getMallocArraySize(CI, TD);
-    const Type* AllocTy = getMallocAllocatedType(CI);
-    if (!AllocTy || !arraySize) return SE->getCouldNotCompute();
-    Ty = AllocTy;
-    // arraySize elements of type Ty.
-    return SE->getSCEV(arraySize);
-  }
-
-  if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
-    if (GV->hasDefinitiveInitializer()) {
-      Constant *C = GV->getInitializer();
-      if (const ArrayType *ATy = dyn_cast<ArrayType>(C->getType())) {
-        Ty = ATy->getElementType();
-        return SE->getConstant(Type::getInt32Ty(P->getContext()),
-                               ATy->getNumElements());
-      }
-    }
-    Ty = GV->getType();
-    return SE->getConstant(Type::getInt32Ty(P->getContext()), 1);
-    //TODO: implement more tracking for globals
-  }
-
-  if (CallInst *CI = dyn_cast<CallInst>(V)) {
-    CallSite CS(CI);
-    Function *F = dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
-    const Loop *L = LI->getLoopFor(CI->getParent());
-    if (F == callocFunc) {
-      Ty = Type::getInt8Ty(P->getContext());
-      // calloc allocates arg0*arg1 bytes.
-      return SE->getSCEVAtScope(SE->getMulExpr(SE->getSCEV(CS.getArgument(0)),
-                                               SE->getSCEV(CS.getArgument(1))),
-                                L);
-    } else if (F == reallocFunc) {
-      Ty = Type::getInt8Ty(P->getContext());
-      // realloc allocates arg1 bytes.
-      return SE->getSCEVAtScope(CS.getArgument(1), L);
-    }
-  }
-
-  return SE->getCouldNotCompute();
-}
-
-// Calculates the number of elements of type Ty allocated for P.
-const SCEV *PointerTracking::computeAllocationCountForType(Value *P,
-                                                           const Type *Ty)
-  const {
-    const Type *elementTy;
-    const SCEV *Count = computeAllocationCount(P, elementTy);
-    if (isa<SCEVCouldNotCompute>(Count))
-      return Count;
-    if (elementTy == Ty)
-      return Count;
-
-    if (!TD) // need TargetData from this point forward
-      return SE->getCouldNotCompute();
-
-    uint64_t elementSize = TD->getTypeAllocSize(elementTy);
-    uint64_t wantSize = TD->getTypeAllocSize(Ty);
-    if (elementSize == wantSize)
-      return Count;
-    if (elementSize % wantSize) //fractional counts not possible
-      return SE->getCouldNotCompute();
-    return SE->getMulExpr(Count, SE->getConstant(Count->getType(),
-                                                 elementSize/wantSize));
-}
-
-const SCEV *PointerTracking::getAllocationElementCount(Value *V) const {
-  // We only deal with pointers.
-  const PointerType *PTy = cast<PointerType>(V->getType());
-  return computeAllocationCountForType(V, PTy->getElementType());
-}
-
-const SCEV *PointerTracking::getAllocationSizeInBytes(Value *V) const {
-  return computeAllocationCountForType(V, Type::getInt8Ty(V->getContext()));
-}
-
-// Helper for isLoopGuardedBy that checks the swapped and inverted predicate too
-enum SolverResult PointerTracking::isLoopGuardedBy(const Loop *L,
-                                                   Predicate Pred,
-                                                   const SCEV *A,
-                                                   const SCEV *B) const {
-  if (SE->isLoopEntryGuardedByCond(L, Pred, A, B))
-    return AlwaysTrue;
-  Pred = ICmpInst::getSwappedPredicate(Pred);
-  if (SE->isLoopEntryGuardedByCond(L, Pred, B, A))
-    return AlwaysTrue;
-
-  Pred = ICmpInst::getInversePredicate(Pred);
-  if (SE->isLoopEntryGuardedByCond(L, Pred, B, A))
-    return AlwaysFalse;
-  Pred = ICmpInst::getSwappedPredicate(Pred);
-  if (SE->isLoopEntryGuardedByCond(L, Pred, A, B))
-    return AlwaysTrue;
-  return Unknown;
-}
-
-enum SolverResult PointerTracking::checkLimits(const SCEV *Offset,
-                                               const SCEV *Limit,
-                                               BasicBlock *BB)
-{
-  //FIXME: merge implementation
-  return Unknown;
-}
-
-void PointerTracking::getPointerOffset(Value *Pointer, Value *&Base,
-                                       const SCEV *&Limit,
-                                       const SCEV *&Offset) const
-{
-    Pointer = Pointer->stripPointerCasts();
-    Base = Pointer->getUnderlyingObject();
-    Limit = getAllocationSizeInBytes(Base);
-    if (isa<SCEVCouldNotCompute>(Limit)) {
-      Base = 0;
-      Offset = Limit;
-      return;
-    }
-
-    Offset = SE->getMinusSCEV(SE->getSCEV(Pointer), SE->getSCEV(Base));
-    if (isa<SCEVCouldNotCompute>(Offset)) {
-      Base = 0;
-      Limit = Offset;
-    }
-}
-
-void PointerTracking::print(raw_ostream &OS, const Module* M) const {
-  // Calling some PT methods may cause caches to be updated, however
-  // this should be safe for the same reason its safe for SCEV.
-  PointerTracking &PT = *const_cast<PointerTracking*>(this);
-  for (inst_iterator I=inst_begin(*FF), E=inst_end(*FF); I != E; ++I) {
-    if (!I->getType()->isPointerTy())
-      continue;
-    Value *Base;
-    const SCEV *Limit, *Offset;
-    getPointerOffset(&*I, Base, Limit, Offset);
-    if (!Base)
-      continue;
-
-    if (Base == &*I) {
-      const SCEV *S = getAllocationElementCount(Base);
-      OS << *Base << " ==> " << *S << " elements, ";
-      OS << *Limit << " bytes allocated\n";
-      continue;
-    }
-    OS << &*I << " -- base: " << *Base;
-    OS << " offset: " << *Offset;
-
-    enum SolverResult res = PT.checkLimits(Offset, Limit, I->getParent());
-    switch (res) {
-    case AlwaysTrue:
-      OS << " always safe\n";
-      break;
-    case AlwaysFalse:
-      OS << " always unsafe\n";
-      break;
-    case Unknown:
-      OS << " <<unknown>>\n";
-      break;
-    }
-  }
-}
-
-INITIALIZE_PASS(PointerTracking, "pointertracking",
-                "Track pointer bounds", false, true);

Modified: llvm/branches/wendling/eh/lib/Analysis/PostDominators.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/PostDominators.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/PostDominators.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/PostDominators.cpp Tue Oct 26 19:48:03 2010
@@ -29,7 +29,7 @@
 char PostDominatorTree::ID = 0;
 char PostDominanceFrontier::ID = 0;
 INITIALIZE_PASS(PostDominatorTree, "postdomtree",
-                "Post-Dominator Tree Construction", true, true);
+                "Post-Dominator Tree Construction", true, true)
 
 bool PostDominatorTree::runOnFunction(Function &F) {
   DT->recalculate(F);
@@ -53,8 +53,11 @@
 //  PostDominanceFrontier Implementation
 //===----------------------------------------------------------------------===//
 
-INITIALIZE_PASS(PostDominanceFrontier, "postdomfrontier",
-                "Post-Dominance Frontier Construction", true, true);
+INITIALIZE_PASS_BEGIN(PostDominanceFrontier, "postdomfrontier",
+                "Post-Dominance Frontier Construction", true, true)
+INITIALIZE_PASS_DEPENDENCY(PostDominatorTree)
+INITIALIZE_PASS_END(PostDominanceFrontier, "postdomfrontier",
+                "Post-Dominance Frontier Construction", true, true)
 
 const DominanceFrontier::DomSetType &
 PostDominanceFrontier::calculate(const PostDominatorTree &DT,

Modified: llvm/branches/wendling/eh/lib/Analysis/ProfileEstimatorPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/ProfileEstimatorPass.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/ProfileEstimatorPass.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/ProfileEstimatorPass.cpp Tue Oct 26 19:48:03 2010
@@ -39,7 +39,8 @@
   public:
     static char ID; // Class identification, replacement for typeinfo
     explicit ProfileEstimatorPass(const double execcount = 0)
-      : FunctionPass(&ID), ExecCount(execcount) {
+        : FunctionPass(ID), ExecCount(execcount) {
+      initializeProfileEstimatorPassPass(*PassRegistry::getPassRegistry());
       if (execcount == 0) ExecCount = LoopWeight;
     }
 
@@ -59,8 +60,8 @@
     /// an analysis interface through multiple inheritance.  If needed, it
     /// should override this to adjust the this pointer as needed for the
     /// specified pass info.
-    virtual void *getAdjustedAnalysisPointer(const PassInfo *PI) {
-      if (PI->isPassID(&ProfileInfo::ID))
+    virtual void *getAdjustedAnalysisPointer(AnalysisID PI) {
+      if (PI == &ProfileInfo::ID)
         return (ProfileInfo*)this;
       return this;
     }
@@ -72,13 +73,14 @@
 }  // End of anonymous namespace
 
 char ProfileEstimatorPass::ID = 0;
-static RegisterPass<ProfileEstimatorPass>
-X("profile-estimator", "Estimate profiling information", false, true);
-
-static RegisterAnalysisGroup<ProfileInfo> Y(X);
+INITIALIZE_AG_PASS_BEGIN(ProfileEstimatorPass, ProfileInfo, "profile-estimator",
+                "Estimate profiling information", false, true, false)
+INITIALIZE_PASS_DEPENDENCY(LoopInfo)
+INITIALIZE_AG_PASS_END(ProfileEstimatorPass, ProfileInfo, "profile-estimator",
+                "Estimate profiling information", false, true, false)
 
 namespace llvm {
-  const PassInfo *ProfileEstimatorPassID = &X;
+  char &ProfileEstimatorPassID = ProfileEstimatorPass::ID;
 
   FunctionPass *createProfileEstimatorPass() {
     return new ProfileEstimatorPass();

Modified: llvm/branches/wendling/eh/lib/Analysis/ProfileInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/ProfileInfo.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/ProfileInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/ProfileInfo.cpp Tue Oct 26 19:48:03 2010
@@ -24,8 +24,12 @@
 #include <limits>
 using namespace llvm;
 
+namespace llvm {
+  template<> char ProfileInfoT<Function,BasicBlock>::ID = 0;
+}
+
 // Register the ProfileInfo interface, providing a nice name to refer to.
-static RegisterAnalysisGroup<ProfileInfo> Z("Profile Information");
+INITIALIZE_ANALYSIS_GROUP(ProfileInfo, "Profile Information", NoProfileInfo)
 
 namespace llvm {
 
@@ -44,9 +48,6 @@
 }
 
 template<>
-char ProfileInfoT<Function,BasicBlock>::ID = 0;
-
-template<>
 char ProfileInfoT<MachineFunction, MachineBasicBlock>::ID = 0;
 
 template<>
@@ -1076,14 +1077,16 @@
 namespace {
   struct NoProfileInfo : public ImmutablePass, public ProfileInfo {
     static char ID; // Class identification, replacement for typeinfo
-    NoProfileInfo() : ImmutablePass(&ID) {}
+    NoProfileInfo() : ImmutablePass(ID) {
+      initializeNoProfileInfoPass(*PassRegistry::getPassRegistry());
+    }
     
     /// getAdjustedAnalysisPointer - This method is used when a pass implements
     /// an analysis interface through multiple inheritance.  If needed, it
     /// should override this to adjust the this pointer as needed for the
     /// specified pass info.
-    virtual void *getAdjustedAnalysisPointer(const PassInfo *PI) {
-      if (PI->isPassID(&ProfileInfo::ID))
+    virtual void *getAdjustedAnalysisPointer(AnalysisID PI) {
+      if (PI == &ProfileInfo::ID)
         return (ProfileInfo*)this;
       return this;
     }
@@ -1097,6 +1100,6 @@
 char NoProfileInfo::ID = 0;
 // Register this pass...
 INITIALIZE_AG_PASS(NoProfileInfo, ProfileInfo, "no-profile",
-                   "No Profile Information", false, true, true);
+                   "No Profile Information", false, true, true)
 
 ImmutablePass *llvm::createNoProfileInfoPass() { return new NoProfileInfo(); }

Modified: llvm/branches/wendling/eh/lib/Analysis/ProfileInfoLoaderPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/ProfileInfoLoaderPass.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/ProfileInfoLoaderPass.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/ProfileInfoLoaderPass.cpp Tue Oct 26 19:48:03 2010
@@ -45,7 +45,8 @@
   public:
     static char ID; // Class identification, replacement for typeinfo
     explicit LoaderPass(const std::string &filename = "")
-      : ModulePass(&ID), Filename(filename) {
+      : ModulePass(ID), Filename(filename) {
+      initializeLoaderPassPass(*PassRegistry::getPassRegistry());
       if (filename.empty()) Filename = ProfileInfoFilename;
     }
 
@@ -67,8 +68,8 @@
     /// an analysis interface through multiple inheritance.  If needed, it
     /// should override this to adjust the this pointer as needed for the
     /// specified pass info.
-    virtual void *getAdjustedAnalysisPointer(const PassInfo *PI) {
-      if (PI->isPassID(&ProfileInfo::ID))
+    virtual void *getAdjustedAnalysisPointer(AnalysisID PI) {
+      if (PI == &ProfileInfo::ID)
         return (ProfileInfo*)this;
       return this;
     }
@@ -79,12 +80,10 @@
 }  // End of anonymous namespace
 
 char LoaderPass::ID = 0;
-static RegisterPass<LoaderPass>
-X("profile-loader", "Load profile information from llvmprof.out", false, true);
+INITIALIZE_AG_PASS(LoaderPass, ProfileInfo, "profile-loader",
+              "Load profile information from llvmprof.out", false, true, false)
 
-static RegisterAnalysisGroup<ProfileInfo> Y(X);
-
-const PassInfo *llvm::ProfileLoaderPassID = &X;
+char &llvm::ProfileLoaderPassID = LoaderPass::ID;
 
 ModulePass *llvm::createProfileLoaderPass() { return new LoaderPass(); }
 

Modified: llvm/branches/wendling/eh/lib/Analysis/ProfileVerifierPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/ProfileVerifierPass.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/ProfileVerifierPass.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/ProfileVerifierPass.cpp Tue Oct 26 19:48:03 2010
@@ -59,11 +59,13 @@
   public:
     static char ID; // Class identification, replacement for typeinfo
 
-    explicit ProfileVerifierPassT () : FunctionPass(&ID) {
+    explicit ProfileVerifierPassT () : FunctionPass(ID) {
+      initializeProfileVerifierPassPass(*PassRegistry::getPassRegistry());
       DisableAssertions = ProfileVerifierDisableAssertions;
     }
-    explicit ProfileVerifierPassT (bool da) : FunctionPass(&ID), 
+    explicit ProfileVerifierPassT (bool da) : FunctionPass(ID), 
                                               DisableAssertions(da) {
+      initializeProfileVerifierPassPass(*PassRegistry::getPassRegistry());
     }
 
     void getAnalysisUsage(AnalysisUsage &AU) const {
@@ -287,7 +289,7 @@
            i != ie; ++i) {
         if (const CallInst *CI = dyn_cast<CallInst>(&*i)) {
           FType *F = CI->getCalledFunction();
-          if (F && (F->getNameStr() == "_setjmp")) {
+          if (F && (F->getName() == "_setjmp")) {
             isSetJmpTarget = true; break;
           }
         }
@@ -366,8 +368,11 @@
   char ProfileVerifierPassT<FType, BType>::ID = 0;
 }
 
-INITIALIZE_PASS(ProfileVerifierPass, "profile-verifier",
-                "Verify profiling information", false, true);
+INITIALIZE_PASS_BEGIN(ProfileVerifierPass, "profile-verifier",
+                "Verify profiling information", false, true)
+INITIALIZE_AG_DEPENDENCY(ProfileInfo)
+INITIALIZE_PASS_END(ProfileVerifierPass, "profile-verifier",
+                "Verify profiling information", false, true)
 
 namespace llvm {
   FunctionPass *createProfileVerifierPass() {

Modified: llvm/branches/wendling/eh/lib/Analysis/RegionInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/RegionInfo.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/RegionInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/RegionInfo.cpp Tue Oct 26 19:48:03 2010
@@ -29,9 +29,9 @@
 
 // Always verify if expensive checking is enabled.
 #ifdef XDEBUG
-bool VerifyRegionInfo = true;
+static bool VerifyRegionInfo = true;
 #else
-bool VerifyRegionInfo = false;
+static bool VerifyRegionInfo = false;
 #endif
 
 static cl::opt<bool,true>
@@ -45,7 +45,7 @@
 /// PrintStyle - Print region in difference ways.
 enum PrintStyle { PrintNone, PrintBB, PrintRN  };
 
-cl::opt<enum PrintStyle> printStyle("print-region-style", cl::Hidden,
+static cl::opt<enum PrintStyle> printStyle("print-region-style", cl::Hidden,
   cl::desc("style of printing regions"),
   cl::values(
     clEnumValN(PrintNone, "none",  "print no details"),
@@ -72,6 +72,15 @@
     delete *I;
 }
 
+void Region::replaceEntry(BasicBlock *BB) {
+  entry.setPointer(BB);
+}
+
+void Region::replaceExit(BasicBlock *BB) {
+  assert(exit && "No exit to replace!");
+  exit = BB;
+}
+
 bool Region::contains(const BasicBlock *B) const {
   BasicBlock *BB = const_cast<BasicBlock*>(B);
 
@@ -131,19 +140,20 @@
 
   BasicBlock *entry = getEntry(), *exit = getExit();
 
-  // TopLevelRegion
-  if (!exit)
+  if (isTopLevelRegion())
     return false;
 
   for (pred_iterator PI = pred_begin(entry), PE = pred_end(entry); PI != PE;
-       ++PI)
-    if (!contains(*PI)) {
+       ++PI) {
+    BasicBlock *Pred = *PI;
+    if (DT->getNode(Pred) && !contains(Pred)) {
       if (found) {
         isSimple = false;
         break;
       }
       found = true;
     }
+  }
 
   found = false;
 
@@ -309,13 +319,38 @@
   children.clear();
 }
 
-void Region::addSubRegion(Region *SubRegion) {
+void Region::addSubRegion(Region *SubRegion, bool moveChildren) {
   assert(SubRegion->parent == 0 && "SubRegion already has a parent!");
+  assert(std::find(begin(), end(), SubRegion) == children.end()
+         && "Subregion already exists!");
+
   SubRegion->parent = this;
-  // Set up the region node.
-  assert(std::find(children.begin(), children.end(), SubRegion) == children.end()
-         && "Node already exist!");
   children.push_back(SubRegion);
+
+  if (!moveChildren)
+    return;
+
+  assert(SubRegion->children.size() == 0
+         && "SubRegions that contain children are not supported");
+
+  for (element_iterator I = element_begin(), E = element_end(); I != E; ++I)
+    if (!(*I)->isSubRegion()) {
+      BasicBlock *BB = (*I)->getNodeAs<BasicBlock>();
+
+      if (SubRegion->contains(BB))
+        RI->setRegionFor(BB, SubRegion);
+    }
+
+  std::vector<Region*> Keep;
+  for (iterator I = begin(), E = end(); I != E; ++I)
+    if (SubRegion->contains(*I) && *I != SubRegion) {
+      SubRegion->children.push_back(*I);
+      (*I)->parent = SubRegion;
+    } else
+      Keep.push_back(*I);
+
+  children.clear();
+  children.insert(children.begin(), Keep.begin(), Keep.end());
 }
 
 
@@ -337,6 +372,38 @@
   return Depth;
 }
 
+Region *Region::getExpandedRegion() const {
+  unsigned NumSuccessors = exit->getTerminator()->getNumSuccessors();
+
+  if (NumSuccessors == 0)
+    return NULL;
+
+  for (pred_iterator PI = pred_begin(getExit()), PE = pred_end(getExit());
+       PI != PE; ++PI)
+    if (!DT->dominates(getEntry(), *PI))
+      return NULL;
+
+  Region *R = RI->getRegionFor(exit);
+
+  if (R->getEntry() != exit) {
+    if (exit->getTerminator()->getNumSuccessors() == 1)
+      return new Region(getEntry(), *succ_begin(exit), RI, DT);
+    else
+      return NULL;
+  }
+
+  while (R->getParent() && R->getParent()->getEntry() == exit)
+    R = R->getParent();
+
+  if (!DT->dominates(getEntry(), R->getExit()))
+    for (pred_iterator PI = pred_begin(getExit()), PE = pred_end(getExit());
+         PI != PE; ++PI)
+    if (!DT->dominates(R->getExit(), *PI))
+      return NULL;
+
+  return new Region(getEntry(), R->getExit(), RI, DT);
+}
+
 void Region::print(raw_ostream &OS, bool print_tree, unsigned level) const {
   if (print_tree)
     OS.indent(level*2) << "[" << level << "] " << getNameStr();
@@ -374,6 +441,11 @@
 }
 
 void Region::clearNodeCache() {
+  // Free the cached nodes.
+  for (BBNodeMapT::iterator I = BBNodeMap.begin(),
+       IE = BBNodeMap.end(); I != IE; ++I)
+    delete I->second;
+
   BBNodeMap.clear();
   for (Region::iterator RI = begin(), RE = end(); RI != RE; ++RI)
     (*RI)->clearNodeCache();
@@ -589,7 +661,8 @@
   TopLevelRegion = 0;
 }
 
-RegionInfo::RegionInfo() : FunctionPass(&ID) {
+RegionInfo::RegionInfo() : FunctionPass(ID) {
+  initializeRegionInfoPass(*PassRegistry::getPassRegistry());
   TopLevelRegion = 0;
 }
 
@@ -652,11 +725,14 @@
   return I != BBtoRegion.end() ? I->second : 0;
 }
 
+void RegionInfo::setRegionFor(BasicBlock *BB, Region *R) {
+  BBtoRegion[BB] = R;
+}
+
 Region *RegionInfo::operator[](BasicBlock *BB) const {
   return getRegionFor(BB);
 }
 
-
 BasicBlock *RegionInfo::getMaxRegionExit(BasicBlock *BB) const {
   BasicBlock *Exit = NULL;
 
@@ -731,9 +807,28 @@
   return ret;
 }
 
+void RegionInfo::splitBlock(BasicBlock* NewBB, BasicBlock *OldBB)
+{
+  Region *R = getRegionFor(OldBB);
+
+  setRegionFor(NewBB, R);
+
+  while (R->getEntry() == OldBB && !R->isTopLevelRegion()) {
+    R->replaceEntry(NewBB);
+    R = R->getParent();
+  }
+
+  setRegionFor(OldBB, R);
+}
+
 char RegionInfo::ID = 0;
-INITIALIZE_PASS(RegionInfo, "regions",
-                "Detect single entry single exit regions", true, true);
+INITIALIZE_PASS_BEGIN(RegionInfo, "regions",
+                "Detect single entry single exit regions", true, true)
+INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_DEPENDENCY(PostDominatorTree)
+INITIALIZE_PASS_DEPENDENCY(DominanceFrontier)
+INITIALIZE_PASS_END(RegionInfo, "regions",
+                "Detect single entry single exit regions", true, true)
 
 // Create methods available outside of this file, to use them
 // "include/llvm/LinkAllPasses.h". Otherwise the pass would be deleted by

Modified: llvm/branches/wendling/eh/lib/Analysis/RegionPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/RegionPrinter.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/RegionPrinter.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/RegionPrinter.cpp Tue Oct 26 19:48:03 2010
@@ -121,48 +121,60 @@
 struct RegionViewer
   : public DOTGraphTraitsViewer<RegionInfo, false> {
   static char ID;
-  RegionViewer() : DOTGraphTraitsViewer<RegionInfo, false>("reg", &ID){}
+  RegionViewer() : DOTGraphTraitsViewer<RegionInfo, false>("reg", ID){
+    initializeRegionViewerPass(*PassRegistry::getPassRegistry());
+  }
 };
-
 char RegionViewer::ID = 0;
-INITIALIZE_PASS(RegionViewer, "view-regions", "View regions of function",
-                true, true);
 
 struct RegionOnlyViewer
   : public DOTGraphTraitsViewer<RegionInfo, true> {
   static char ID;
-  RegionOnlyViewer() : DOTGraphTraitsViewer<RegionInfo, true>("regonly", &ID){}
+  RegionOnlyViewer() : DOTGraphTraitsViewer<RegionInfo, true>("regonly", ID) {
+    initializeRegionOnlyViewerPass(*PassRegistry::getPassRegistry());
+  }
 };
-
 char RegionOnlyViewer::ID = 0;
-INITIALIZE_PASS(RegionOnlyViewer, "view-regions-only",
-                "View regions of function (with no function bodies)",
-                true, true);
 
 struct RegionPrinter
   : public DOTGraphTraitsPrinter<RegionInfo, false> {
   static char ID;
   RegionPrinter() :
-    DOTGraphTraitsPrinter<RegionInfo, false>("reg", &ID) {}
+    DOTGraphTraitsPrinter<RegionInfo, false>("reg", ID) {
+      initializeRegionPrinterPass(*PassRegistry::getPassRegistry());
+    }
 };
+char RegionPrinter::ID = 0;
 } //end anonymous namespace
 
-char RegionPrinter::ID = 0;
 INITIALIZE_PASS(RegionPrinter, "dot-regions",
-                "Print regions of function to 'dot' file", true, true);
+                "Print regions of function to 'dot' file", true, true)
+
+INITIALIZE_PASS(RegionViewer, "view-regions", "View regions of function",
+                true, true)
+                
+INITIALIZE_PASS(RegionOnlyViewer, "view-regions-only",
+                "View regions of function (with no function bodies)",
+                true, true)
+
+namespace {
 
 struct RegionOnlyPrinter
   : public DOTGraphTraitsPrinter<RegionInfo, true> {
   static char ID;
   RegionOnlyPrinter() :
-    DOTGraphTraitsPrinter<RegionInfo, true>("reg", &ID) {}
+    DOTGraphTraitsPrinter<RegionInfo, true>("reg", ID) {
+      initializeRegionOnlyPrinterPass(*PassRegistry::getPassRegistry());
+    }
 };
 
+}
+
 char RegionOnlyPrinter::ID = 0;
 INITIALIZE_PASS(RegionOnlyPrinter, "dot-regions-only",
                 "Print regions of function to 'dot' file "
                 "(with no function bodies)",
-                true, true);
+                true, true)
 
 FunctionPass* llvm::createRegionViewerPass() {
   return new RegionViewer();

Modified: llvm/branches/wendling/eh/lib/Analysis/ScalarEvolution.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/ScalarEvolution.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/ScalarEvolution.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/ScalarEvolution.cpp Tue Oct 26 19:48:03 2010
@@ -103,8 +103,12 @@
                                  "derived loop"),
                         cl::init(100));
 
-INITIALIZE_PASS(ScalarEvolution, "scalar-evolution",
-                "Scalar Evolution Analysis", false, true);
+INITIALIZE_PASS_BEGIN(ScalarEvolution, "scalar-evolution",
+                "Scalar Evolution Analysis", false, true)
+INITIALIZE_PASS_DEPENDENCY(LoopInfo)
+INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_END(ScalarEvolution, "scalar-evolution",
+                "Scalar Evolution Analysis", false, true)
 char ScalarEvolution::ID = 0;
 
 //===----------------------------------------------------------------------===//
@@ -251,28 +255,59 @@
   OS << "(";
   for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) {
     OS << **I;
-    if (next(I) != E)
+    if (llvm::next(I) != E)
       OS << OpStr;
   }
   OS << ")";
 }
 
 bool SCEVNAryExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
-  for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
-    if (!getOperand(i)->dominates(BB, DT))
+  for (op_iterator I = op_begin(), E = op_end(); I != E; ++I)
+    if (!(*I)->dominates(BB, DT))
       return false;
-  }
   return true;
 }
 
 bool SCEVNAryExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
-  for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
-    if (!getOperand(i)->properlyDominates(BB, DT))
+  for (op_iterator I = op_begin(), E = op_end(); I != E; ++I)
+    if (!(*I)->properlyDominates(BB, DT))
+      return false;
+  return true;
+}
+
+bool SCEVNAryExpr::isLoopInvariant(const Loop *L) const {
+  for (op_iterator I = op_begin(), E = op_end(); I != E; ++I)
+    if (!(*I)->isLoopInvariant(L))
       return false;
-  }
   return true;
 }
 
+// hasComputableLoopEvolution - N-ary expressions have computable loop
+// evolutions iff they have at least one operand that varies with the loop,
+// but that all varying operands are computable.
+bool SCEVNAryExpr::hasComputableLoopEvolution(const Loop *L) const {
+  bool HasVarying = false;
+  for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) {
+    const SCEV *S = *I;
+    if (!S->isLoopInvariant(L)) {
+      if (S->hasComputableLoopEvolution(L))
+        HasVarying = true;
+      else
+        return false;
+    }
+  }
+  return HasVarying;
+}
+
+bool SCEVNAryExpr::hasOperand(const SCEV *O) const {
+  for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) {
+    const SCEV *S = *I;
+    if (O == S || S->hasOperand(O))
+      return true;
+  }
+  return false;
+}
+
 bool SCEVUDivExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
   return LHS->dominates(BB, DT) && RHS->dominates(BB, DT);
 }
@@ -303,10 +338,14 @@
   if (QueryLoop->contains(L))
     return false;
 
+  // This recurrence is invariant w.r.t. QueryLoop if L contains QueryLoop.
+  if (L->contains(QueryLoop))
+    return true;
+
   // This recurrence is variant w.r.t. QueryLoop if any of its operands
   // are variant.
-  for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
-    if (!getOperand(i)->isLoopInvariant(QueryLoop))
+  for (op_iterator I = op_begin(), E = op_end(); I != E; ++I)
+    if (!(*I)->isLoopInvariant(QueryLoop))
       return false;
 
   // Otherwise it's loop-invariant.
@@ -337,12 +376,36 @@
   OS << ">";
 }
 
+void SCEVUnknown::deleted() {
+  // Clear this SCEVUnknown from ValuesAtScopes.
+  SE->ValuesAtScopes.erase(this);
+
+  // Remove this SCEVUnknown from the uniquing map.
+  SE->UniqueSCEVs.RemoveNode(this);
+
+  // Release the value.
+  setValPtr(0);
+}
+
+void SCEVUnknown::allUsesReplacedWith(Value *New) {
+  // Clear this SCEVUnknown from ValuesAtScopes.
+  SE->ValuesAtScopes.erase(this);
+
+  // Remove this SCEVUnknown from the uniquing map.
+  SE->UniqueSCEVs.RemoveNode(this);
+
+  // Update this SCEVUnknown to point to the new value. This is needed
+  // because there may still be outstanding SCEVs which still point to
+  // this SCEVUnknown.
+  setValPtr(New);
+}
+
 bool SCEVUnknown::isLoopInvariant(const Loop *L) const {
   // All non-instruction values are loop invariant.  All instructions are loop
   // invariant if they are not contained in the specified loop.
   // Instructions are never considered invariant in the function body
   // (null loop) because they are defined within the "loop".
-  if (Instruction *I = dyn_cast<Instruction>(V))
+  if (Instruction *I = dyn_cast<Instruction>(getValue()))
     return L && !L->contains(I);
   return true;
 }
@@ -360,11 +423,11 @@
 }
 
 const Type *SCEVUnknown::getType() const {
-  return V->getType();
+  return getValue()->getType();
 }
 
 bool SCEVUnknown::isSizeOf(const Type *&AllocTy) const {
-  if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(V))
+  if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
     if (VCE->getOpcode() == Instruction::PtrToInt)
       if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
         if (CE->getOpcode() == Instruction::GetElementPtr &&
@@ -381,7 +444,7 @@
 }
 
 bool SCEVUnknown::isAlignOf(const Type *&AllocTy) const {
-  if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(V))
+  if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
     if (VCE->getOpcode() == Instruction::PtrToInt)
       if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
         if (CE->getOpcode() == Instruction::GetElementPtr &&
@@ -406,7 +469,7 @@
 }
 
 bool SCEVUnknown::isOffsetOf(const Type *&CTy, Constant *&FieldNo) const {
-  if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(V))
+  if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
     if (VCE->getOpcode() == Instruction::PtrToInt)
       if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
         if (CE->getOpcode() == Instruction::GetElementPtr &&
@@ -448,176 +511,183 @@
   }
 
   // Otherwise just print it normally.
-  WriteAsOperand(OS, V, false);
+  WriteAsOperand(OS, getValue(), false);
 }
 
 //===----------------------------------------------------------------------===//
 //                               SCEV Utilities
 //===----------------------------------------------------------------------===//
 
-static bool CompareTypes(const Type *A, const Type *B) {
-  if (A->getTypeID() != B->getTypeID())
-    return A->getTypeID() < B->getTypeID();
-  if (const IntegerType *AI = dyn_cast<IntegerType>(A)) {
-    const IntegerType *BI = cast<IntegerType>(B);
-    return AI->getBitWidth() < BI->getBitWidth();
-  }
-  if (const PointerType *AI = dyn_cast<PointerType>(A)) {
-    const PointerType *BI = cast<PointerType>(B);
-    return CompareTypes(AI->getElementType(), BI->getElementType());
-  }
-  if (const ArrayType *AI = dyn_cast<ArrayType>(A)) {
-    const ArrayType *BI = cast<ArrayType>(B);
-    if (AI->getNumElements() != BI->getNumElements())
-      return AI->getNumElements() < BI->getNumElements();
-    return CompareTypes(AI->getElementType(), BI->getElementType());
-  }
-  if (const VectorType *AI = dyn_cast<VectorType>(A)) {
-    const VectorType *BI = cast<VectorType>(B);
-    if (AI->getNumElements() != BI->getNumElements())
-      return AI->getNumElements() < BI->getNumElements();
-    return CompareTypes(AI->getElementType(), BI->getElementType());
-  }
-  if (const StructType *AI = dyn_cast<StructType>(A)) {
-    const StructType *BI = cast<StructType>(B);
-    if (AI->getNumElements() != BI->getNumElements())
-      return AI->getNumElements() < BI->getNumElements();
-    for (unsigned i = 0, e = AI->getNumElements(); i != e; ++i)
-      if (CompareTypes(AI->getElementType(i), BI->getElementType(i)) ||
-          CompareTypes(BI->getElementType(i), AI->getElementType(i)))
-        return CompareTypes(AI->getElementType(i), BI->getElementType(i));
-  }
-  return false;
-}
-
 namespace {
   /// SCEVComplexityCompare - Return true if the complexity of the LHS is less
   /// than the complexity of the RHS.  This comparator is used to canonicalize
   /// expressions.
   class SCEVComplexityCompare {
-    const LoopInfo *LI;
+    const LoopInfo *const LI;
   public:
     explicit SCEVComplexityCompare(const LoopInfo *li) : LI(li) {}
 
+    // Return true or false if LHS is less than, or at least RHS, respectively.
     bool operator()(const SCEV *LHS, const SCEV *RHS) const {
+      return compare(LHS, RHS) < 0;
+    }
+
+    // Return negative, zero, or positive, if LHS is less than, equal to, or
+    // greater than RHS, respectively. A three-way result allows recursive
+    // comparisons to be more efficient.
+    int compare(const SCEV *LHS, const SCEV *RHS) const {
       // Fast-path: SCEVs are uniqued so we can do a quick equality check.
       if (LHS == RHS)
-        return false;
+        return 0;
 
       // Primarily, sort the SCEVs by their getSCEVType().
       unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType();
       if (LType != RType)
-        return LType < RType;
+        return (int)LType - (int)RType;
 
       // Aside from the getSCEVType() ordering, the particular ordering
       // isn't very important except that it's beneficial to be consistent,
       // so that (a + b) and (b + a) don't end up as different expressions.
-
-      // Sort SCEVUnknown values with some loose heuristics. TODO: This is
-      // not as complete as it could be.
-      if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) {
+      switch (LType) {
+      case scUnknown: {
+        const SCEVUnknown *LU = cast<SCEVUnknown>(LHS);
         const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
 
+        // Sort SCEVUnknown values with some loose heuristics. TODO: This is
+        // not as complete as it could be.
+        const Value *LV = LU->getValue(), *RV = RU->getValue();
+
         // Order pointer values after integer values. This helps SCEVExpander
         // form GEPs.
-        bool LIsPointer = LU->getType()->isPointerTy(),
-             RIsPointer = RU->getType()->isPointerTy();
+        bool LIsPointer = LV->getType()->isPointerTy(),
+             RIsPointer = RV->getType()->isPointerTy();
         if (LIsPointer != RIsPointer)
-          return RIsPointer;
+          return (int)LIsPointer - (int)RIsPointer;
 
         // Compare getValueID values.
-        unsigned LID = LU->getValue()->getValueID(),
-                 RID = RU->getValue()->getValueID();
+        unsigned LID = LV->getValueID(),
+                 RID = RV->getValueID();
         if (LID != RID)
-          return LID < RID;
+          return (int)LID - (int)RID;
 
         // Sort arguments by their position.
-        if (const Argument *LA = dyn_cast<Argument>(LU->getValue())) {
-          const Argument *RA = cast<Argument>(RU->getValue());
-          return LA->getArgNo() < RA->getArgNo();
+        if (const Argument *LA = dyn_cast<Argument>(LV)) {
+          const Argument *RA = cast<Argument>(RV);
+          unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo();
+          return (int)LArgNo - (int)RArgNo;
         }
 
-        // For instructions, compare their loop depth, and their opcode.
-        // This is pretty loose.
-        if (const Instruction *LV = dyn_cast<Instruction>(LU->getValue())) {
-          const Instruction *RV = cast<Instruction>(RU->getValue());
+        // For instructions, compare their loop depth, and their operand
+        // count.  This is pretty loose.
+        if (const Instruction *LInst = dyn_cast<Instruction>(LV)) {
+          const Instruction *RInst = cast<Instruction>(RV);
 
           // Compare loop depths.
-          unsigned LDepth = LI->getLoopDepth(LV->getParent()),
-                   RDepth = LI->getLoopDepth(RV->getParent());
-          if (LDepth != RDepth)
-            return LDepth < RDepth;
+          const BasicBlock *LParent = LInst->getParent(),
+                           *RParent = RInst->getParent();
+          if (LParent != RParent) {
+            unsigned LDepth = LI->getLoopDepth(LParent),
+                     RDepth = LI->getLoopDepth(RParent);
+            if (LDepth != RDepth)
+              return (int)LDepth - (int)RDepth;
+          }
 
           // Compare the number of operands.
-          unsigned LNumOps = LV->getNumOperands(),
-                   RNumOps = RV->getNumOperands();
-          if (LNumOps != RNumOps)
-            return LNumOps < RNumOps;
+          unsigned LNumOps = LInst->getNumOperands(),
+                   RNumOps = RInst->getNumOperands();
+          return (int)LNumOps - (int)RNumOps;
         }
 
-        return false;
+        return 0;
       }
 
-      // Compare constant values.
-      if (const SCEVConstant *LC = dyn_cast<SCEVConstant>(LHS)) {
+      case scConstant: {
+        const SCEVConstant *LC = cast<SCEVConstant>(LHS);
         const SCEVConstant *RC = cast<SCEVConstant>(RHS);
-        const ConstantInt *LCC = LC->getValue();
-        const ConstantInt *RCC = RC->getValue();
-        unsigned LBitWidth = LCC->getBitWidth(), RBitWidth = RCC->getBitWidth();
+
+        // Compare constant values.
+        const APInt &LA = LC->getValue()->getValue();
+        const APInt &RA = RC->getValue()->getValue();
+        unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth();
         if (LBitWidth != RBitWidth)
-          return LBitWidth < RBitWidth;
-        return LCC->getValue().ult(RCC->getValue());
+          return (int)LBitWidth - (int)RBitWidth;
+        return LA.ult(RA) ? -1 : 1;
       }
 
-      // Compare addrec loop depths.
-      if (const SCEVAddRecExpr *LA = dyn_cast<SCEVAddRecExpr>(LHS)) {
+      case scAddRecExpr: {
+        const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS);
         const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
-        unsigned LDepth = LA->getLoop()->getLoopDepth(),
-                 RDepth = RA->getLoop()->getLoopDepth();
-        if (LDepth != RDepth)
-          return LDepth < RDepth;
+
+        // Compare addrec loop depths.
+        const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop();
+        if (LLoop != RLoop) {
+          unsigned LDepth = LLoop->getLoopDepth(),
+                   RDepth = RLoop->getLoopDepth();
+          if (LDepth != RDepth)
+            return (int)LDepth - (int)RDepth;
+        }
+
+        // Addrec complexity grows with operand count.
+        unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands();
+        if (LNumOps != RNumOps)
+          return (int)LNumOps - (int)RNumOps;
+
+        // Lexicographically compare.
+        for (unsigned i = 0; i != LNumOps; ++i) {
+          long X = compare(LA->getOperand(i), RA->getOperand(i));
+          if (X != 0)
+            return X;
+        }
+
+        return 0;
       }
 
-      // Lexicographically compare n-ary expressions.
-      if (const SCEVNAryExpr *LC = dyn_cast<SCEVNAryExpr>(LHS)) {
+      case scAddExpr:
+      case scMulExpr:
+      case scSMaxExpr:
+      case scUMaxExpr: {
+        const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS);
         const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
+
+        // Lexicographically compare n-ary expressions.
         unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands();
         for (unsigned i = 0; i != LNumOps; ++i) {
           if (i >= RNumOps)
-            return false;
-          const SCEV *LOp = LC->getOperand(i), *ROp = RC->getOperand(i);
-          if (operator()(LOp, ROp))
-            return true;
-          if (operator()(ROp, LOp))
-            return false;
+            return 1;
+          long X = compare(LC->getOperand(i), RC->getOperand(i));
+          if (X != 0)
+            return X;
         }
-        return LNumOps < RNumOps;
+        return (int)LNumOps - (int)RNumOps;
       }
 
-      // Lexicographically compare udiv expressions.
-      if (const SCEVUDivExpr *LC = dyn_cast<SCEVUDivExpr>(LHS)) {
+      case scUDivExpr: {
+        const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS);
         const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
-        const SCEV *LL = LC->getLHS(), *LR = LC->getRHS(),
-                   *RL = RC->getLHS(), *RR = RC->getRHS();
-        if (operator()(LL, RL))
-          return true;
-        if (operator()(RL, LL))
-          return false;
-        if (operator()(LR, RR))
-          return true;
-        if (operator()(RR, LR))
-          return false;
-        return false;
+
+        // Lexicographically compare udiv expressions.
+        long X = compare(LC->getLHS(), RC->getLHS());
+        if (X != 0)
+          return X;
+        return compare(LC->getRHS(), RC->getRHS());
       }
 
-      // Compare cast expressions by operand.
-      if (const SCEVCastExpr *LC = dyn_cast<SCEVCastExpr>(LHS)) {
+      case scTruncate:
+      case scZeroExtend:
+      case scSignExtend: {
+        const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS);
         const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
-        return operator()(LC->getOperand(), RC->getOperand());
+
+        // Compare cast expressions by operand.
+        return compare(LC->getOperand(), RC->getOperand());
+      }
+
+      default:
+        break;
       }
 
       llvm_unreachable("Unknown SCEV kind!");
-      return false;
+      return 0;
     }
   };
 }
@@ -638,8 +708,9 @@
   if (Ops.size() == 2) {
     // This is the common case, which also happens to be trivially simple.
     // Special case it.
-    if (SCEVComplexityCompare(LI)(Ops[1], Ops[0]))
-      std::swap(Ops[0], Ops[1]);
+    const SCEV *&LHS = Ops[0], *&RHS = Ops[1];
+    if (SCEVComplexityCompare(LI)(RHS, LHS))
+      std::swap(LHS, RHS);
     return;
   }
 
@@ -1311,8 +1382,9 @@
   // If HasNSW is true and all the operands are non-negative, infer HasNUW.
   if (!HasNUW && HasNSW) {
     bool All = true;
-    for (unsigned i = 0, e = Ops.size(); i != e; ++i)
-      if (!isKnownNonNegative(Ops[i])) {
+    for (SmallVectorImpl<const SCEV *>::const_iterator I = Ops.begin(),
+         E = Ops.end(); I != E; ++I)
+      if (!isKnownNonNegative(*I)) {
         All = false;
         break;
       }
@@ -1345,22 +1417,29 @@
     if (Ops.size() == 1) return Ops[0];
   }
 
-  // Okay, check to see if the same value occurs in the operand list twice.  If
-  // so, merge them together into an multiply expression.  Since we sorted the
-  // list, these values are required to be adjacent.
+  // Okay, check to see if the same value occurs in the operand list more than
+  // once.  If so, merge them together into an multiply expression.  Since we
+  // sorted the list, these values are required to be adjacent.
   const Type *Ty = Ops[0]->getType();
-  for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
+  bool FoundMatch = false;
+  for (unsigned i = 0, e = Ops.size(); i != e-1; ++i)
     if (Ops[i] == Ops[i+1]) {      //  X + Y + Y  -->  X + Y*2
-      // Found a match, merge the two values into a multiply, and add any
-      // remaining values to the result.
-      const SCEV *Two = getConstant(Ty, 2);
-      const SCEV *Mul = getMulExpr(Ops[i], Two);
-      if (Ops.size() == 2)
+      // Scan ahead to count how many equal operands there are.
+      unsigned Count = 2;
+      while (i+Count != e && Ops[i+Count] == Ops[i])
+        ++Count;
+      // Merge the values into a multiply.
+      const SCEV *Scale = getConstant(Ty, Count);
+      const SCEV *Mul = getMulExpr(Scale, Ops[i]);
+      if (Ops.size() == Count)
         return Mul;
-      Ops.erase(Ops.begin()+i, Ops.begin()+i+2);
-      Ops.push_back(Mul);
-      return getAddExpr(Ops, HasNUW, HasNSW);
+      Ops[i] = Mul;
+      Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count);
+      --i; e -= Count - 1;
+      FoundMatch = true;
     }
+  if (FoundMatch)
+    return getAddExpr(Ops, HasNUW, HasNSW);
 
   // Check for truncates. If all the operands are truncated from the same
   // type, see if factoring out the truncate would permit the result to be
@@ -1457,7 +1536,7 @@
       // re-generate the operands list. Group the operands by constant scale,
       // to avoid multiplying by the same constant scale multiple times.
       std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
-      for (SmallVector<const SCEV *, 8>::iterator I = NewOps.begin(),
+      for (SmallVector<const SCEV *, 8>::const_iterator I = NewOps.begin(),
            E = NewOps.end(); I != E; ++I)
         MulOpLists[M.find(*I)->second].push_back(*I);
       // Re-generate the operands list.
@@ -1484,20 +1563,23 @@
     const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
     for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
       const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
+      if (isa<SCEVConstant>(MulOpSCEV))
+        continue;
       for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
-        if (MulOpSCEV == Ops[AddOp] && !isa<SCEVConstant>(Ops[AddOp])) {
+        if (MulOpSCEV == Ops[AddOp]) {
           // Fold W + X + (X * Y * Z)  -->  W + (X * ((Y*Z)+1))
           const SCEV *InnerMul = Mul->getOperand(MulOp == 0);
           if (Mul->getNumOperands() != 2) {
             // If the multiply has more than two operands, we must get the
             // Y*Z term.
-            SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), Mul->op_end());
-            MulOps.erase(MulOps.begin()+MulOp);
+            SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
+                                                Mul->op_begin()+MulOp);
+            MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
             InnerMul = getMulExpr(MulOps);
           }
           const SCEV *One = getConstant(Ty, 1);
-          const SCEV *AddOne = getAddExpr(InnerMul, One);
-          const SCEV *OuterMul = getMulExpr(AddOne, Ops[AddOp]);
+          const SCEV *AddOne = getAddExpr(One, InnerMul);
+          const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV);
           if (Ops.size() == 2) return OuterMul;
           if (AddOp < Idx) {
             Ops.erase(Ops.begin()+AddOp);
@@ -1524,15 +1606,15 @@
             const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
             if (Mul->getNumOperands() != 2) {
               SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
-                                                  Mul->op_end());
-              MulOps.erase(MulOps.begin()+MulOp);
+                                                  Mul->op_begin()+MulOp);
+              MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
               InnerMul1 = getMulExpr(MulOps);
             }
             const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
             if (OtherMul->getNumOperands() != 2) {
               SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
-                                                  OtherMul->op_end());
-              MulOps.erase(MulOps.begin()+OMulOp);
+                                                  OtherMul->op_begin()+OMulOp);
+              MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end());
               InnerMul2 = getMulExpr(MulOps);
             }
             const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2);
@@ -1598,30 +1680,31 @@
     // there are multiple AddRec's with the same loop induction variable being
     // added together.  If so, we can fold them.
     for (unsigned OtherIdx = Idx+1;
-         OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx)
-      if (OtherIdx != Idx) {
-        const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
-        if (AddRecLoop == OtherAddRec->getLoop()) {
-          // Other + {A,+,B} + {C,+,D}  -->  Other + {A+C,+,B+D}
-          SmallVector<const SCEV *, 4> NewOps(AddRec->op_begin(),
-                                              AddRec->op_end());
-          for (unsigned i = 0, e = OtherAddRec->getNumOperands(); i != e; ++i) {
-            if (i >= NewOps.size()) {
-              NewOps.append(OtherAddRec->op_begin()+i,
-                            OtherAddRec->op_end());
-              break;
+         OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
+         ++OtherIdx)
+      if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
+        // Other + {A,+,B}<L> + {C,+,D}<L>  -->  Other + {A+C,+,B+D}<L>
+        SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
+                                               AddRec->op_end());
+        for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
+             ++OtherIdx)
+          if (const SCEVAddRecExpr *OtherAddRec =
+                dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]))
+            if (OtherAddRec->getLoop() == AddRecLoop) {
+              for (unsigned i = 0, e = OtherAddRec->getNumOperands();
+                   i != e; ++i) {
+                if (i >= AddRecOps.size()) {
+                  AddRecOps.append(OtherAddRec->op_begin()+i,
+                                   OtherAddRec->op_end());
+                  break;
+                }
+                AddRecOps[i] = getAddExpr(AddRecOps[i],
+                                          OtherAddRec->getOperand(i));
+              }
+              Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
             }
-            NewOps[i] = getAddExpr(NewOps[i], OtherAddRec->getOperand(i));
-          }
-          const SCEV *NewAddRec = getAddRecExpr(NewOps, AddRecLoop);
-
-          if (Ops.size() == 2) return NewAddRec;
-
-          Ops.erase(Ops.begin()+Idx);
-          Ops.erase(Ops.begin()+OtherIdx-1);
-          Ops.push_back(NewAddRec);
-          return getAddExpr(Ops);
-        }
+        Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop);
+        return getAddExpr(Ops);
       }
 
     // Otherwise couldn't fold anything into this recurrence.  Move onto the
@@ -1632,7 +1715,6 @@
   // already have one, otherwise create a new one.
   FoldingSetNodeID ID;
   ID.AddInteger(scAddExpr);
-  ID.AddInteger(Ops.size());
   for (unsigned i = 0, e = Ops.size(); i != e; ++i)
     ID.AddPointer(Ops[i]);
   void *IP = 0;
@@ -1657,17 +1739,18 @@
   assert(!Ops.empty() && "Cannot get empty mul!");
   if (Ops.size() == 1) return Ops[0];
 #ifndef NDEBUG
+  const Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
   for (unsigned i = 1, e = Ops.size(); i != e; ++i)
-    assert(getEffectiveSCEVType(Ops[i]->getType()) ==
-           getEffectiveSCEVType(Ops[0]->getType()) &&
+    assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
            "SCEVMulExpr operand types don't match!");
 #endif
 
   // If HasNSW is true and all the operands are non-negative, infer HasNUW.
   if (!HasNUW && HasNSW) {
     bool All = true;
-    for (unsigned i = 0, e = Ops.size(); i != e; ++i)
-      if (!isKnownNonNegative(Ops[i])) {
+    for (SmallVectorImpl<const SCEV *>::const_iterator I = Ops.begin(),
+         E = Ops.end(); I != E; ++I)
+      if (!isKnownNonNegative(*I)) {
         All = false;
         break;
       }
@@ -1764,8 +1847,9 @@
     // they are loop invariant w.r.t. the recurrence.
     SmallVector<const SCEV *, 8> LIOps;
     const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
+    const Loop *AddRecLoop = AddRec->getLoop();
     for (unsigned i = 0, e = Ops.size(); i != e; ++i)
-      if (Ops[i]->isLoopInvariant(AddRec->getLoop())) {
+      if (Ops[i]->isLoopInvariant(AddRecLoop)) {
         LIOps.push_back(Ops[i]);
         Ops.erase(Ops.begin()+i);
         --i; --e;
@@ -1782,7 +1866,7 @@
 
       // Build the new addrec. Propagate the NUW and NSW flags if both the
       // outer mul and the inner addrec are guaranteed to have no overflow.
-      const SCEV *NewRec = getAddRecExpr(NewOps, AddRec->getLoop(),
+      const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop,
                                          HasNUW && AddRec->hasNoUnsignedWrap(),
                                          HasNSW && AddRec->hasNoSignedWrap());
 
@@ -1802,28 +1886,30 @@
     // there are multiple AddRec's with the same loop induction variable being
     // multiplied together.  If so, we can fold them.
     for (unsigned OtherIdx = Idx+1;
-         OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx)
-      if (OtherIdx != Idx) {
-        const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
-        if (AddRec->getLoop() == OtherAddRec->getLoop()) {
-          // F * G  -->  {A,+,B} * {C,+,D}  -->  {A*C,+,F*D + G*B + B*D}
-          const SCEVAddRecExpr *F = AddRec, *G = OtherAddRec;
-          const SCEV *NewStart = getMulExpr(F->getStart(),
-                                                 G->getStart());
-          const SCEV *B = F->getStepRecurrence(*this);
-          const SCEV *D = G->getStepRecurrence(*this);
-          const SCEV *NewStep = getAddExpr(getMulExpr(F, D),
-                                          getMulExpr(G, B),
-                                          getMulExpr(B, D));
-          const SCEV *NewAddRec = getAddRecExpr(NewStart, NewStep,
-                                               F->getLoop());
-          if (Ops.size() == 2) return NewAddRec;
-
-          Ops.erase(Ops.begin()+Idx);
-          Ops.erase(Ops.begin()+OtherIdx-1);
-          Ops.push_back(NewAddRec);
-          return getMulExpr(Ops);
-        }
+         OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
+         ++OtherIdx)
+      if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
+        // F * G, where F = {A,+,B}<L> and G = {C,+,D}<L>  -->
+        // {A*C,+,F*D + G*B + B*D}<L>
+        for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
+             ++OtherIdx)
+          if (const SCEVAddRecExpr *OtherAddRec =
+                dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]))
+            if (OtherAddRec->getLoop() == AddRecLoop) {
+              const SCEVAddRecExpr *F = AddRec, *G = OtherAddRec;
+              const SCEV *NewStart = getMulExpr(F->getStart(), G->getStart());
+              const SCEV *B = F->getStepRecurrence(*this);
+              const SCEV *D = G->getStepRecurrence(*this);
+              const SCEV *NewStep = getAddExpr(getMulExpr(F, D),
+                                               getMulExpr(G, B),
+                                               getMulExpr(B, D));
+              const SCEV *NewAddRec = getAddRecExpr(NewStart, NewStep,
+                                                    F->getLoop());
+              if (Ops.size() == 2) return NewAddRec;
+              Ops[Idx] = AddRec = cast<SCEVAddRecExpr>(NewAddRec);
+              Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
+            }
+        return getMulExpr(Ops);
       }
 
     // Otherwise couldn't fold anything into this recurrence.  Move onto the
@@ -1834,7 +1920,6 @@
   // already have one, otherwise create a new one.
   FoldingSetNodeID ID;
   ID.AddInteger(scMulExpr);
-  ID.AddInteger(Ops.size());
   for (unsigned i = 0, e = Ops.size(); i != e; ++i)
     ID.AddPointer(Ops[i]);
   void *IP = 0;
@@ -1872,7 +1957,7 @@
       // TODO: Generalize this to non-constants by using known-bits information.
       const Type *Ty = LHS->getType();
       unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
-      unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ;
+      unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1;
       // For non-power-of-two values, effectively round the value up to the
       // nearest power of two.
       if (!RHSC->getValue()->getValue().isPowerOf2())
@@ -1979,9 +2064,9 @@
                                bool HasNUW, bool HasNSW) {
   if (Operands.size() == 1) return Operands[0];
 #ifndef NDEBUG
+  const Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
   for (unsigned i = 1, e = Operands.size(); i != e; ++i)
-    assert(getEffectiveSCEVType(Operands[i]->getType()) ==
-           getEffectiveSCEVType(Operands[0]->getType()) &&
+    assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy &&
            "SCEVAddRecExpr operand types don't match!");
 #endif
 
@@ -1999,8 +2084,9 @@
   // If HasNSW is true and all the operands are non-negative, infer HasNUW.
   if (!HasNUW && HasNSW) {
     bool All = true;
-    for (unsigned i = 0, e = Operands.size(); i != e; ++i)
-      if (!isKnownNonNegative(Operands[i])) {
+    for (SmallVectorImpl<const SCEV *>::const_iterator I = Operands.begin(),
+         E = Operands.end(); I != E; ++I)
+      if (!isKnownNonNegative(*I)) {
         All = false;
         break;
       }
@@ -2010,9 +2096,9 @@
   // Canonicalize nested AddRecs in by nesting them in order of loop depth.
   if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
     const Loop *NestedLoop = NestedAR->getLoop();
-    if (L->contains(NestedLoop->getHeader()) ?
+    if (L->contains(NestedLoop) ?
         (L->getLoopDepth() < NestedLoop->getLoopDepth()) :
-        (!NestedLoop->contains(L->getHeader()) &&
+        (!NestedLoop->contains(L) &&
          DT->dominates(L->getHeader(), NestedLoop->getHeader()))) {
       SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(),
                                                   NestedAR->op_end());
@@ -2047,7 +2133,6 @@
   // already have one, otherwise create a new one.
   FoldingSetNodeID ID;
   ID.AddInteger(scAddRecExpr);
-  ID.AddInteger(Operands.size());
   for (unsigned i = 0, e = Operands.size(); i != e; ++i)
     ID.AddPointer(Operands[i]);
   ID.AddPointer(L);
@@ -2079,9 +2164,9 @@
   assert(!Ops.empty() && "Cannot get empty smax!");
   if (Ops.size() == 1) return Ops[0];
 #ifndef NDEBUG
+  const Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
   for (unsigned i = 1, e = Ops.size(); i != e; ++i)
-    assert(getEffectiveSCEVType(Ops[i]->getType()) ==
-           getEffectiveSCEVType(Ops[0]->getType()) &&
+    assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
            "SCEVSMaxExpr operand types don't match!");
 #endif
 
@@ -2158,7 +2243,6 @@
   // already have one, otherwise create a new one.
   FoldingSetNodeID ID;
   ID.AddInteger(scSMaxExpr);
-  ID.AddInteger(Ops.size());
   for (unsigned i = 0, e = Ops.size(); i != e; ++i)
     ID.AddPointer(Ops[i]);
   void *IP = 0;
@@ -2184,9 +2268,9 @@
   assert(!Ops.empty() && "Cannot get empty umax!");
   if (Ops.size() == 1) return Ops[0];
 #ifndef NDEBUG
+  const Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
   for (unsigned i = 1, e = Ops.size(); i != e; ++i)
-    assert(getEffectiveSCEVType(Ops[i]->getType()) ==
-           getEffectiveSCEVType(Ops[0]->getType()) &&
+    assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
            "SCEVUMaxExpr operand types don't match!");
 #endif
 
@@ -2263,7 +2347,6 @@
   // already have one, otherwise create a new one.
   FoldingSetNodeID ID;
   ID.AddInteger(scUMaxExpr);
-  ID.AddInteger(Ops.size());
   for (unsigned i = 0, e = Ops.size(); i != e; ++i)
     ID.AddPointer(Ops[i]);
   void *IP = 0;
@@ -2350,8 +2433,14 @@
   ID.AddInteger(scUnknown);
   ID.AddPointer(V);
   void *IP = 0;
-  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
-  SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V);
+  if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) {
+    assert(cast<SCEVUnknown>(S)->getValue() == V &&
+           "Stale SCEVUnknown in uniquing map!");
+    return S;
+  }
+  SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this,
+                                            FirstUnknown);
+  FirstUnknown = cast<SCEVUnknown>(S);
   UniqueSCEVs.InsertNode(S, IP);
   return S;
 }
@@ -2415,10 +2504,15 @@
 const SCEV *ScalarEvolution::getSCEV(Value *V) {
   assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
 
-  std::map<SCEVCallbackVH, const SCEV *>::iterator I = Scalars.find(V);
-  if (I != Scalars.end()) return I->second;
+  ValueExprMapType::const_iterator I = ValueExprMap.find(V);
+  if (I != ValueExprMap.end()) return I->second;
   const SCEV *S = createSCEV(V);
-  Scalars.insert(std::make_pair(SCEVCallbackVH(V, this), S));
+
+  // The process of creating a SCEV for V may have caused other SCEVs
+  // to have been created, so it's necessary to insert the new entry
+  // from scratch, rather than trying to remember the insert position
+  // above.
+  ValueExprMap.insert(std::make_pair(SCEVCallbackVH(V, this), S));
   return S;
 }
 
@@ -2603,7 +2697,7 @@
 
 /// ForgetSymbolicValue - This looks up computed SCEV values for all
 /// instructions that depend on the given instruction and removes them from
-/// the Scalars map if they reference SymName. This is used during PHI
+/// the ValueExprMapType map if they reference SymName. This is used during PHI
 /// resolution.
 void
 ScalarEvolution::ForgetSymbolicName(Instruction *PN, const SCEV *SymName) {
@@ -2616,9 +2710,9 @@
     Instruction *I = Worklist.pop_back_val();
     if (!Visited.insert(I)) continue;
 
-    std::map<SCEVCallbackVH, const SCEV *>::iterator It =
-      Scalars.find(static_cast<Value *>(I));
-    if (It != Scalars.end()) {
+    ValueExprMapType::iterator It =
+      ValueExprMap.find(static_cast<Value *>(I));
+    if (It != ValueExprMap.end()) {
       // Short-circuit the def-use traversal if the symbolic name
       // ceases to appear in expressions.
       if (It->second != SymName && !It->second->hasOperand(SymName))
@@ -2635,7 +2729,7 @@
           !isa<SCEVUnknown>(It->second) ||
           (I != PN && It->second == SymName)) {
         ValuesAtScopes.erase(It->second);
-        Scalars.erase(It);
+        ValueExprMap.erase(It);
       }
     }
 
@@ -2672,9 +2766,9 @@
       if (BEValueV && StartValueV) {
         // While we are analyzing this PHI node, handle its value symbolically.
         const SCEV *SymbolicName = getUnknown(PN);
-        assert(Scalars.find(PN) == Scalars.end() &&
+        assert(ValueExprMap.find(PN) == ValueExprMap.end() &&
                "PHI node already processed?");
-        Scalars.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName));
+        ValueExprMap.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName));
 
         // Using this symbolic name for the PHI, analyze the value coming around
         // the back-edge.
@@ -2735,7 +2829,7 @@
               // to be symbolic.  We now need to go back and purge all of the
               // entries for the scalars that use the symbolic expression.
               ForgetSymbolicName(PN, SymbolicName);
-              Scalars[SCEVCallbackVH(PN, this)] = PHISCEV;
+              ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
               return PHISCEV;
             }
           }
@@ -2760,7 +2854,7 @@
               // to be symbolic.  We now need to go back and purge all of the
               // entries for the scalars that use the symbolic expression.
               ForgetSymbolicName(PN, SymbolicName);
-              Scalars[SCEVCallbackVH(PN, this)] = PHISCEV;
+              ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
               return PHISCEV;
             }
           }
@@ -2805,7 +2899,7 @@
     return getUnknown(GEP);
   const SCEV *TotalOffset = getConstant(IntPtrTy, 0);
   gep_type_iterator GTI = gep_type_begin(GEP);
-  for (GetElementPtrInst::op_iterator I = next(GEP->op_begin()),
+  for (GetElementPtrInst::op_iterator I = llvm::next(GEP->op_begin()),
                                       E = GEP->op_end();
        I != E; ++I) {
     Value *Index = *I;
@@ -3228,12 +3322,42 @@
 
   Operator *U = cast<Operator>(V);
   switch (Opcode) {
-  case Instruction::Add:
-    return getAddExpr(getSCEV(U->getOperand(0)),
-                      getSCEV(U->getOperand(1)));
-  case Instruction::Mul:
-    return getMulExpr(getSCEV(U->getOperand(0)),
-                      getSCEV(U->getOperand(1)));
+  case Instruction::Add: {
+    // The simple thing to do would be to just call getSCEV on both operands
+    // and call getAddExpr with the result. However if we're looking at a
+    // bunch of things all added together, this can be quite inefficient,
+    // because it leads to N-1 getAddExpr calls for N ultimate operands.
+    // Instead, gather up all the operands and make a single getAddExpr call.
+    // LLVM IR canonical form means we need only traverse the left operands.
+    SmallVector<const SCEV *, 4> AddOps;
+    AddOps.push_back(getSCEV(U->getOperand(1)));
+    for (Value *Op = U->getOperand(0); ; Op = U->getOperand(0)) {
+      unsigned Opcode = Op->getValueID() - Value::InstructionVal;
+      if (Opcode != Instruction::Add && Opcode != Instruction::Sub)
+        break;
+      U = cast<Operator>(Op);
+      const SCEV *Op1 = getSCEV(U->getOperand(1));
+      if (Opcode == Instruction::Sub)
+        AddOps.push_back(getNegativeSCEV(Op1));
+      else
+        AddOps.push_back(Op1);
+    }
+    AddOps.push_back(getSCEV(U->getOperand(0)));
+    return getAddExpr(AddOps);
+  }
+  case Instruction::Mul: {
+    // See the Add code above.
+    SmallVector<const SCEV *, 4> MulOps;
+    MulOps.push_back(getSCEV(U->getOperand(1)));
+    for (Value *Op = U->getOperand(0);
+         Op->getValueID() == Instruction::Mul + Value::InstructionVal; 
+         Op = U->getOperand(0)) {
+      U = cast<Operator>(Op);
+      MulOps.push_back(getSCEV(U->getOperand(1)));
+    }
+    MulOps.push_back(getSCEV(U->getOperand(0)));
+    return getMulExpr(MulOps);
+  }
   case Instruction::UDiv:
     return getUDivExpr(getSCEV(U->getOperand(0)),
                        getSCEV(U->getOperand(1)));
@@ -3495,7 +3619,7 @@
           const SCEV *LDiff = getMinusSCEV(LA, LS);
           const SCEV *RDiff = getMinusSCEV(RA, One);
           if (LDiff == RDiff)
-            return getAddExpr(getUMaxExpr(LS, One), LDiff);
+            return getAddExpr(getUMaxExpr(One, LS), LDiff);
         }
         break;
       case ICmpInst::ICMP_EQ:
@@ -3510,7 +3634,7 @@
           const SCEV *LDiff = getMinusSCEV(LA, One);
           const SCEV *RDiff = getMinusSCEV(RA, LS);
           if (LDiff == RDiff)
-            return getAddExpr(getUMaxExpr(LS, One), LDiff);
+            return getAddExpr(getUMaxExpr(One, LS), LDiff);
         }
         break;
       default:
@@ -3607,9 +3731,9 @@
         Instruction *I = Worklist.pop_back_val();
         if (!Visited.insert(I)) continue;
 
-        std::map<SCEVCallbackVH, const SCEV *>::iterator It =
-          Scalars.find(static_cast<Value *>(I));
-        if (It != Scalars.end()) {
+        ValueExprMapType::iterator It =
+          ValueExprMap.find(static_cast<Value *>(I));
+        if (It != ValueExprMap.end()) {
           // SCEVUnknown for a PHI either means that it has an unrecognized
           // structure, or it's a PHI that's in the progress of being computed
           // by createNodeForPHI.  In the former case, additional loop trip
@@ -3618,7 +3742,7 @@
           // own when it gets to that point.
           if (!isa<PHINode>(I) || !isa<SCEVUnknown>(It->second)) {
             ValuesAtScopes.erase(It->second);
-            Scalars.erase(It);
+            ValueExprMap.erase(It);
           }
           if (PHINode *PN = dyn_cast<PHINode>(I))
             ConstantEvolutionLoopExitValue.erase(PN);
@@ -3647,11 +3771,10 @@
     Instruction *I = Worklist.pop_back_val();
     if (!Visited.insert(I)) continue;
 
-    std::map<SCEVCallbackVH, const SCEV *>::iterator It =
-      Scalars.find(static_cast<Value *>(I));
-    if (It != Scalars.end()) {
+    ValueExprMapType::iterator It = ValueExprMap.find(static_cast<Value *>(I));
+    if (It != ValueExprMap.end()) {
       ValuesAtScopes.erase(It->second);
-      Scalars.erase(It);
+      ValueExprMap.erase(It);
       if (PHINode *PN = dyn_cast<PHINode>(I))
         ConstantEvolutionLoopExitValue.erase(PN);
     }
@@ -3664,26 +3787,6 @@
 /// changed a value in a way that may effect its value, or which may
 /// disconnect it from a def-use chain linking it to a loop.
 void ScalarEvolution::forgetValue(Value *V) {
-  // If there's a SCEVUnknown tying this value into the SCEV
-  // space, remove it from the folding set map. The SCEVUnknown
-  // object and any other SCEV objects which reference it
-  // (transitively) remain allocated, effectively leaked until
-  // the underlying BumpPtrAllocator is freed.
-  //
-  // This permits SCEV pointers to be used as keys in maps
-  // such as the ValuesAtScopes map.
-  FoldingSetNodeID ID;
-  ID.AddInteger(scUnknown);
-  ID.AddPointer(V);
-  void *IP;
-  if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) {
-    UniqueSCEVs.RemoveNode(S);
-
-    // This isn't necessary, but we might as well remove the
-    // value from the ValuesAtScopes map too.
-    ValuesAtScopes.erase(S);
-  }
-
   Instruction *I = dyn_cast<Instruction>(V);
   if (!I) return;
 
@@ -3696,11 +3799,10 @@
     I = Worklist.pop_back_val();
     if (!Visited.insert(I)) continue;
 
-    std::map<SCEVCallbackVH, const SCEV *>::iterator It =
-      Scalars.find(static_cast<Value *>(I));
-    if (It != Scalars.end()) {
+    ValueExprMapType::iterator It = ValueExprMap.find(static_cast<Value *>(I));
+    if (It != ValueExprMap.end()) {
       ValuesAtScopes.erase(It->second);
-      Scalars.erase(It);
+      ValueExprMap.erase(It);
       if (PHINode *PN = dyn_cast<PHINode>(I))
         ConstantEvolutionLoopExitValue.erase(PN);
     }
@@ -3844,14 +3946,13 @@
         else
           MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max);
       } else {
-        // Both conditions must be true for the loop to exit.
+        // Both conditions must be true at the same time for the loop to exit.
+        // For now, be conservative.
         assert(L->contains(FBB) && "Loop block has no successor in loop!");
-        if (BTI0.Exact != getCouldNotCompute() &&
-            BTI1.Exact != getCouldNotCompute())
-          BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
-        if (BTI0.Max != getCouldNotCompute() &&
-            BTI1.Max != getCouldNotCompute())
-          MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max);
+        if (BTI0.Max == BTI1.Max)
+          MaxBECount = BTI0.Max;
+        if (BTI0.Exact == BTI1.Exact)
+          BECount = BTI0.Exact;
       }
 
       return BackedgeTakenInfo(BECount, MaxBECount);
@@ -3879,14 +3980,13 @@
         else
           MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max);
       } else {
-        // Both conditions must be false for the loop to exit.
+        // Both conditions must be false at the same time for the loop to exit.
+        // For now, be conservative.
         assert(L->contains(TBB) && "Loop block has no successor in loop!");
-        if (BTI0.Exact != getCouldNotCompute() &&
-            BTI1.Exact != getCouldNotCompute())
-          BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
-        if (BTI0.Max != getCouldNotCompute() &&
-            BTI1.Max != getCouldNotCompute())
-          MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max);
+        if (BTI0.Max == BTI1.Max)
+          MaxBECount = BTI0.Max;
+        if (BTI0.Exact == BTI1.Exact)
+          BECount = BTI0.Exact;
       }
 
       return BackedgeTakenInfo(BECount, MaxBECount);
@@ -4231,7 +4331,7 @@
 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
                                                    const APInt &BEs,
                                                    const Loop *L) {
-  std::map<PHINode*, Constant*>::iterator I =
+  std::map<PHINode*, Constant*>::const_iterator I =
     ConstantEvolutionLoopExitValue.find(PN);
   if (I != ConstantEvolutionLoopExitValue.end())
     return I->second;
@@ -5213,7 +5313,8 @@
       LoopContinuePredicate->isUnconditional())
     return false;
 
-  return isImpliedCond(LoopContinuePredicate->getCondition(), Pred, LHS, RHS,
+  return isImpliedCond(Pred, LHS, RHS,
+                       LoopContinuePredicate->getCondition(),
                        LoopContinuePredicate->getSuccessor(0) != L->getHeader());
 }
 
@@ -5242,7 +5343,8 @@
         LoopEntryPredicate->isUnconditional())
       continue;
 
-    if (isImpliedCond(LoopEntryPredicate->getCondition(), Pred, LHS, RHS,
+    if (isImpliedCond(Pred, LHS, RHS,
+                      LoopEntryPredicate->getCondition(),
                       LoopEntryPredicate->getSuccessor(0) != Pair.second))
       return true;
   }
@@ -5252,24 +5354,24 @@
 
 /// isImpliedCond - Test whether the condition described by Pred, LHS,
 /// and RHS is true whenever the given Cond value evaluates to true.
-bool ScalarEvolution::isImpliedCond(Value *CondValue,
-                                    ICmpInst::Predicate Pred,
+bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred,
                                     const SCEV *LHS, const SCEV *RHS,
+                                    Value *FoundCondValue,
                                     bool Inverse) {
   // Recursively handle And and Or conditions.
-  if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CondValue)) {
+  if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) {
     if (BO->getOpcode() == Instruction::And) {
       if (!Inverse)
-        return isImpliedCond(BO->getOperand(0), Pred, LHS, RHS, Inverse) ||
-               isImpliedCond(BO->getOperand(1), Pred, LHS, RHS, Inverse);
+        return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) ||
+               isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse);
     } else if (BO->getOpcode() == Instruction::Or) {
       if (Inverse)
-        return isImpliedCond(BO->getOperand(0), Pred, LHS, RHS, Inverse) ||
-               isImpliedCond(BO->getOperand(1), Pred, LHS, RHS, Inverse);
+        return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) ||
+               isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse);
     }
   }
 
-  ICmpInst *ICI = dyn_cast<ICmpInst>(CondValue);
+  ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue);
   if (!ICI) return false;
 
   // Bail if the ICmp's operands' types are wider than the needed type
@@ -5686,34 +5788,17 @@
   assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
   if (PHINode *PN = dyn_cast<PHINode>(getValPtr()))
     SE->ConstantEvolutionLoopExitValue.erase(PN);
-  SE->Scalars.erase(getValPtr());
+  SE->ValueExprMap.erase(getValPtr());
   // this now dangles!
 }
 
 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) {
   assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
 
-  Value *Old = getValPtr();
-
-  // If there's a SCEVUnknown tying this value into the SCEV
-  // space, replace the SCEVUnknown's value with the new value
-  // for the benefit of any SCEVs still referencing it, and
-  // and remove it from the folding set map so that new scevs
-  // don't reference it.
-  FoldingSetNodeID ID;
-  ID.AddInteger(scUnknown);
-  ID.AddPointer(Old);
-  void *IP;
-  if (SCEVUnknown *S = cast_or_null<SCEVUnknown>(
-        SE->UniqueSCEVs.FindNodeOrInsertPos(ID, IP))) {
-    S->V = V;
-    SE->UniqueSCEVs.RemoveNode(S);
-    SE->ValuesAtScopes.erase(S);
-  }
-
   // Forget all the expressions associated with users of the old value,
   // so that future queries will recompute the expressions using the new
   // value.
+  Value *Old = getValPtr();
   SmallVector<User *, 16> Worklist;
   SmallPtrSet<User *, 8> Visited;
   for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end();
@@ -5729,7 +5814,7 @@
       continue;
     if (PHINode *PN = dyn_cast<PHINode>(U))
       SE->ConstantEvolutionLoopExitValue.erase(PN);
-    SE->Scalars.erase(U);
+    SE->ValueExprMap.erase(U);
     for (Value::use_iterator UI = U->use_begin(), UE = U->use_end();
          UI != UE; ++UI)
       Worklist.push_back(*UI);
@@ -5737,7 +5822,7 @@
   // Delete the Old value.
   if (PHINode *PN = dyn_cast<PHINode>(Old))
     SE->ConstantEvolutionLoopExitValue.erase(PN);
-  SE->Scalars.erase(Old);
+  SE->ValueExprMap.erase(Old);
   // this now dangles!
 }
 
@@ -5749,7 +5834,8 @@
 //===----------------------------------------------------------------------===//
 
 ScalarEvolution::ScalarEvolution()
-  : FunctionPass(&ID) {
+  : FunctionPass(ID), FirstUnknown(0) {
+  initializeScalarEvolutionPass(*PassRegistry::getPassRegistry());
 }
 
 bool ScalarEvolution::runOnFunction(Function &F) {
@@ -5761,7 +5847,13 @@
 }
 
 void ScalarEvolution::releaseMemory() {
-  Scalars.clear();
+  // Iterate through all the SCEVUnknown instances and call their
+  // destructors, so that they release their references to their values.
+  for (SCEVUnknown *U = FirstUnknown; U; U = U->Next)
+    U->~SCEVUnknown();
+  FirstUnknown = 0;
+
+  ValueExprMap.clear();
   BackedgeTakenCounts.clear();
   ConstantEvolutionLoopExitValue.clear();
   ValuesAtScopes.clear();

Modified: llvm/branches/wendling/eh/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp Tue Oct 26 19:48:03 2010
@@ -34,14 +34,17 @@
 
   public:
     static char ID; // Class identification, replacement for typeinfo
-    ScalarEvolutionAliasAnalysis() : FunctionPass(&ID), SE(0) {}
+    ScalarEvolutionAliasAnalysis() : FunctionPass(ID), SE(0) {
+      initializeScalarEvolutionAliasAnalysisPass(
+        *PassRegistry::getPassRegistry());
+    }
 
     /// getAdjustedAnalysisPointer - This method is used when a pass implements
     /// an analysis interface through multiple inheritance.  If needed, it
     /// should override this to adjust the this pointer as needed for the
     /// specified pass info.
-    virtual void *getAdjustedAnalysisPointer(const PassInfo *PI) {
-      if (PI->isPassID(&AliasAnalysis::ID))
+    virtual void *getAdjustedAnalysisPointer(AnalysisID PI) {
+      if (PI == &AliasAnalysis::ID)
         return (AliasAnalysis*)this;
       return this;
     }
@@ -49,8 +52,7 @@
   private:
     virtual void getAnalysisUsage(AnalysisUsage &AU) const;
     virtual bool runOnFunction(Function &F);
-    virtual AliasResult alias(const Value *V1, unsigned V1Size,
-                              const Value *V2, unsigned V2Size);
+    virtual AliasResult alias(const Location &LocA, const Location &LocB);
 
     Value *GetBaseValue(const SCEV *S);
   };
@@ -58,8 +60,11 @@
 
 // Register this pass...
 char ScalarEvolutionAliasAnalysis::ID = 0;
-INITIALIZE_AG_PASS(ScalarEvolutionAliasAnalysis, AliasAnalysis, "scev-aa",
-                   "ScalarEvolution-based Alias Analysis", false, true, false);
+INITIALIZE_AG_PASS_BEGIN(ScalarEvolutionAliasAnalysis, AliasAnalysis, "scev-aa",
+                   "ScalarEvolution-based Alias Analysis", false, true, false)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
+INITIALIZE_AG_PASS_END(ScalarEvolutionAliasAnalysis, AliasAnalysis, "scev-aa",
+                    "ScalarEvolution-based Alias Analysis", false, true, false)
 
 FunctionPass *llvm::createScalarEvolutionAliasAnalysisPass() {
   return new ScalarEvolutionAliasAnalysis();
@@ -101,17 +106,17 @@
 }
 
 AliasAnalysis::AliasResult
-ScalarEvolutionAliasAnalysis::alias(const Value *A, unsigned ASize,
-                                    const Value *B, unsigned BSize) {
+ScalarEvolutionAliasAnalysis::alias(const Location &LocA,
+                                    const Location &LocB) {
   // If either of the memory references is empty, it doesn't matter what the
   // pointer values are. This allows the code below to ignore this special
   // case.
-  if (ASize == 0 || BSize == 0)
+  if (LocA.Size == 0 || LocB.Size == 0)
     return NoAlias;
 
   // This is ScalarEvolutionAliasAnalysis. Get the SCEVs!
-  const SCEV *AS = SE->getSCEV(const_cast<Value *>(A));
-  const SCEV *BS = SE->getSCEV(const_cast<Value *>(B));
+  const SCEV *AS = SE->getSCEV(const_cast<Value *>(LocA.Ptr));
+  const SCEV *BS = SE->getSCEV(const_cast<Value *>(LocB.Ptr));
 
   // If they evaluate to the same expression, it's a MustAlias.
   if (AS == BS) return MustAlias;
@@ -121,8 +126,8 @@
   if (SE->getEffectiveSCEVType(AS->getType()) ==
       SE->getEffectiveSCEVType(BS->getType())) {
     unsigned BitWidth = SE->getTypeSizeInBits(AS->getType());
-    APInt ASizeInt(BitWidth, ASize);
-    APInt BSizeInt(BitWidth, BSize);
+    APInt ASizeInt(BitWidth, LocA.Size);
+    APInt BSizeInt(BitWidth, LocB.Size);
 
     // Compute the difference between the two pointers.
     const SCEV *BA = SE->getMinusSCEV(BS, AS);
@@ -154,11 +159,15 @@
   // inttoptr and ptrtoint operators.
   Value *AO = GetBaseValue(AS);
   Value *BO = GetBaseValue(BS);
-  if ((AO && AO != A) || (BO && BO != B))
-    if (alias(AO ? AO : A, AO ? ~0u : ASize,
-              BO ? BO : B, BO ? ~0u : BSize) == NoAlias)
+  if ((AO && AO != LocA.Ptr) || (BO && BO != LocB.Ptr))
+    if (alias(Location(AO ? AO : LocA.Ptr,
+                       AO ? +UnknownSize : LocA.Size,
+                       AO ? 0 : LocA.TBAATag),
+              Location(BO ? BO : LocB.Ptr,
+                       BO ? +UnknownSize : LocB.Size,
+                       BO ? 0 : LocB.TBAATag)) == NoAlias)
       return NoAlias;
 
   // Forward the query to the next analysis.
-  return AliasAnalysis::alias(A, ASize, B, BSize);
+  return AliasAnalysis::alias(LocA, LocB);
 }

Modified: llvm/branches/wendling/eh/lib/Analysis/ScalarEvolutionNormalization.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/ScalarEvolutionNormalization.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/ScalarEvolutionNormalization.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/ScalarEvolutionNormalization.cpp Tue Oct 26 19:48:03 2010
@@ -108,7 +108,11 @@
         Result = SE.getMinusSCEV(Result, TransformedStep);
         Loops.insert(L);
       }
-#ifdef XDEBUG
+#if 0
+      // This assert is conceptually correct, but ScalarEvolution currently
+      // sometimes fails to canonicalize two equal SCEVs to exactly the same
+      // form. It's possibly a pessimization when this happens, but it isn't a
+      // correctness problem, so disable this assert for now.
       assert(S == TransformForPostIncUse(Denormalize, Result,
                                          User, OperandValToReplace,
                                          Loops, SE, DT) &&
@@ -122,7 +126,8 @@
                                  User, OperandValToReplace, Loops, SE, DT);
         Result = SE.getMinusSCEV(Result, TransformedStep);
       }
-#ifdef XDEBUG
+#if 0
+      // See the comment on the assert above.
       assert(S == TransformForPostIncUse(Denormalize, Result,
                                          User, OperandValToReplace,
                                          Loops, SE, DT) &&

Modified: llvm/branches/wendling/eh/lib/Analysis/ValueTracking.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/ValueTracking.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/ValueTracking.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/ValueTracking.cpp Tue Oct 26 19:48:03 2010
@@ -880,19 +880,20 @@
     }
 
     Value *Mul0 = NULL;
-    Value *Mul1 = NULL;
-    bool M0 = ComputeMultiple(Op0, Base, Mul0,
-                              LookThroughSExt, Depth+1);
-    bool M1 = ComputeMultiple(Op1, Base, Mul1,
-                              LookThroughSExt, Depth+1);
-
-    if (M0) {
-      if (isa<Constant>(Op1) && isa<Constant>(Mul0)) {
-        // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
-        Multiple = ConstantExpr::getMul(cast<Constant>(Mul0),
-                                        cast<Constant>(Op1));
-        return true;
-      }
+    if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
+      if (Constant *Op1C = dyn_cast<Constant>(Op1))
+        if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
+          if (Op1C->getType()->getPrimitiveSizeInBits() < 
+              MulC->getType()->getPrimitiveSizeInBits())
+            Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
+          if (Op1C->getType()->getPrimitiveSizeInBits() > 
+              MulC->getType()->getPrimitiveSizeInBits())
+            MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
+          
+          // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
+          Multiple = ConstantExpr::getMul(MulC, Op1C);
+          return true;
+        }
 
       if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
         if (Mul0CI->getValue() == 1) {
@@ -902,13 +903,21 @@
         }
     }
 
-    if (M1) {
-      if (isa<Constant>(Op0) && isa<Constant>(Mul1)) {
-        // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
-        Multiple = ConstantExpr::getMul(cast<Constant>(Mul1),
-                                        cast<Constant>(Op0));
-        return true;
-      }
+    Value *Mul1 = NULL;
+    if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
+      if (Constant *Op0C = dyn_cast<Constant>(Op0))
+        if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
+          if (Op0C->getType()->getPrimitiveSizeInBits() < 
+              MulC->getType()->getPrimitiveSizeInBits())
+            Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
+          if (Op0C->getType()->getPrimitiveSizeInBits() > 
+              MulC->getType()->getPrimitiveSizeInBits())
+            MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
+          
+          // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
+          Multiple = ConstantExpr::getMul(MulC, Op0C);
+          return true;
+        }
 
       if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
         if (Mul1CI->getValue() == 1) {
@@ -973,195 +982,6 @@
   return false;
 }
 
-
-/// GetLinearExpression - Analyze the specified value as a linear expression:
-/// "A*V + B", where A and B are constant integers.  Return the scale and offset
-/// values as APInts and return V as a Value*.  The incoming Value is known to
-/// have IntegerType.  Note that this looks through extends, so the high bits
-/// may not be represented in the result.
-static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset,
-                                  const TargetData *TD, unsigned Depth) {
-  assert(V->getType()->isIntegerTy() && "Not an integer value");
-
-  // Limit our recursion depth.
-  if (Depth == 6) {
-    Scale = 1;
-    Offset = 0;
-    return V;
-  }
-  
-  if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) {
-    if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
-      switch (BOp->getOpcode()) {
-      default: break;
-      case Instruction::Or:
-        // X|C == X+C if all the bits in C are unset in X.  Otherwise we can't
-        // analyze it.
-        if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), TD))
-          break;
-        // FALL THROUGH.
-      case Instruction::Add:
-        V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, TD, Depth+1);
-        Offset += RHSC->getValue();
-        return V;
-      case Instruction::Mul:
-        V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, TD, Depth+1);
-        Offset *= RHSC->getValue();
-        Scale *= RHSC->getValue();
-        return V;
-      case Instruction::Shl:
-        V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, TD, Depth+1);
-        Offset <<= RHSC->getValue().getLimitedValue();
-        Scale <<= RHSC->getValue().getLimitedValue();
-        return V;
-      }
-    }
-  }
-  
-  // Since clients don't care about the high bits of the value, just scales and
-  // offsets, we can look through extensions.
-  if (isa<SExtInst>(V) || isa<ZExtInst>(V)) {
-    Value *CastOp = cast<CastInst>(V)->getOperand(0);
-    unsigned OldWidth = Scale.getBitWidth();
-    unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits();
-    Scale.trunc(SmallWidth);
-    Offset.trunc(SmallWidth);
-    Value *Result = GetLinearExpression(CastOp, Scale, Offset, TD, Depth+1);
-    Scale.zext(OldWidth);
-    Offset.zext(OldWidth);
-    return Result;
-  }
-  
-  Scale = 1;
-  Offset = 0;
-  return V;
-}
-
-/// DecomposeGEPExpression - If V is a symbolic pointer expression, decompose it
-/// into a base pointer with a constant offset and a number of scaled symbolic
-/// offsets.
-///
-/// The scaled symbolic offsets (represented by pairs of a Value* and a scale in
-/// the VarIndices vector) are Value*'s that are known to be scaled by the
-/// specified amount, but which may have other unrepresented high bits. As such,
-/// the gep cannot necessarily be reconstructed from its decomposed form.
-///
-/// When TargetData is around, this function is capable of analyzing everything
-/// that Value::getUnderlyingObject() can look through.  When not, it just looks
-/// through pointer casts.
-///
-const Value *llvm::DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
-                 SmallVectorImpl<std::pair<const Value*, int64_t> > &VarIndices,
-                                          const TargetData *TD) {
-  // Limit recursion depth to limit compile time in crazy cases.
-  unsigned MaxLookup = 6;
-  
-  BaseOffs = 0;
-  do {
-    // See if this is a bitcast or GEP.
-    const Operator *Op = dyn_cast<Operator>(V);
-    if (Op == 0) {
-      // The only non-operator case we can handle are GlobalAliases.
-      if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
-        if (!GA->mayBeOverridden()) {
-          V = GA->getAliasee();
-          continue;
-        }
-      }
-      return V;
-    }
-    
-    if (Op->getOpcode() == Instruction::BitCast) {
-      V = Op->getOperand(0);
-      continue;
-    }
-    
-    const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
-    if (GEPOp == 0)
-      return V;
-    
-    // Don't attempt to analyze GEPs over unsized objects.
-    if (!cast<PointerType>(GEPOp->getOperand(0)->getType())
-        ->getElementType()->isSized())
-      return V;
-    
-    // If we are lacking TargetData information, we can't compute the offets of
-    // elements computed by GEPs.  However, we can handle bitcast equivalent
-    // GEPs.
-    if (!TD) {
-      if (!GEPOp->hasAllZeroIndices())
-        return V;
-      V = GEPOp->getOperand(0);
-      continue;
-    }
-    
-    // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
-    gep_type_iterator GTI = gep_type_begin(GEPOp);
-    for (User::const_op_iterator I = GEPOp->op_begin()+1,
-         E = GEPOp->op_end(); I != E; ++I) {
-      Value *Index = *I;
-      // Compute the (potentially symbolic) offset in bytes for this index.
-      if (const StructType *STy = dyn_cast<StructType>(*GTI++)) {
-        // For a struct, add the member offset.
-        unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
-        if (FieldNo == 0) continue;
-        
-        BaseOffs += TD->getStructLayout(STy)->getElementOffset(FieldNo);
-        continue;
-      }
-      
-      // For an array/pointer, add the element offset, explicitly scaled.
-      if (ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
-        if (CIdx->isZero()) continue;
-        BaseOffs += TD->getTypeAllocSize(*GTI)*CIdx->getSExtValue();
-        continue;
-      }
-      
-      uint64_t Scale = TD->getTypeAllocSize(*GTI);
-      
-      // Use GetLinearExpression to decompose the index into a C1*V+C2 form.
-      unsigned Width = cast<IntegerType>(Index->getType())->getBitWidth();
-      APInt IndexScale(Width, 0), IndexOffset(Width, 0);
-      Index = GetLinearExpression(Index, IndexScale, IndexOffset, TD, 0);
-      
-      // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
-      // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
-      BaseOffs += IndexOffset.getZExtValue()*Scale;
-      Scale *= IndexScale.getZExtValue();
-      
-      
-      // If we already had an occurrance of this index variable, merge this
-      // scale into it.  For example, we want to handle:
-      //   A[x][x] -> x*16 + x*4 -> x*20
-      // This also ensures that 'x' only appears in the index list once.
-      for (unsigned i = 0, e = VarIndices.size(); i != e; ++i) {
-        if (VarIndices[i].first == Index) {
-          Scale += VarIndices[i].second;
-          VarIndices.erase(VarIndices.begin()+i);
-          break;
-        }
-      }
-      
-      // Make sure that we have a scale that makes sense for this target's
-      // pointer size.
-      if (unsigned ShiftBits = 64-TD->getPointerSizeInBits()) {
-        Scale <<= ShiftBits;
-        Scale >>= ShiftBits;
-      }
-      
-      if (Scale)
-        VarIndices.push_back(std::make_pair(Index, Scale));
-    }
-    
-    // Analyze the base pointer next.
-    V = GEPOp->getOperand(0);
-  } while (--MaxLookup);
-  
-  // If the chain of expressions is too deep, just return early.
-  return V;
-}
-
-
 // This is the recursive version of BuildSubAggregate. It takes a few different
 // arguments. Idxs is the index within the nested struct From that we are
 // looking at now (which is of type IndexedType). IdxSkip is the number of

Modified: llvm/branches/wendling/eh/lib/AsmParser/LLLexer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/AsmParser/LLLexer.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/AsmParser/LLLexer.cpp (original)
+++ llvm/branches/wendling/eh/lib/AsmParser/LLLexer.cpp Tue Oct 26 19:48:03 2010
@@ -15,18 +15,19 @@
 #include "llvm/DerivedTypes.h"
 #include "llvm/Instruction.h"
 #include "llvm/LLVMContext.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Assembly/Parser.h"
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/MemoryBuffer.h"
 #include "llvm/Support/MathExtras.h"
 #include "llvm/Support/SourceMgr.h"
 #include "llvm/Support/raw_ostream.h"
-#include "llvm/Assembly/Parser.h"
 #include <cstdio>
 #include <cstdlib>
 #include <cstring>
 using namespace llvm;
 
-bool LLLexer::Error(LocTy ErrorLoc, const std::string &Msg) const {
+bool LLLexer::Error(LocTy ErrorLoc, const Twine &Msg) const {
   ErrorInfo = SM.GetMessage(ErrorLoc, Msg, "error");
   return true;
 }
@@ -493,6 +494,7 @@
   KEYWORD(private);
   KEYWORD(linker_private);
   KEYWORD(linker_private_weak);
+  KEYWORD(linker_private_weak_def_auto);
   KEYWORD(internal);
   KEYWORD(available_externally);
   KEYWORD(linkonce);
@@ -548,6 +550,8 @@
   KEYWORD(arm_aapcscc);
   KEYWORD(arm_aapcs_vfpcc);
   KEYWORD(msp430_intrcc);
+  KEYWORD(ptx_kernel);
+  KEYWORD(ptx_device);
 
   KEYWORD(cc);
   KEYWORD(c);
@@ -574,10 +578,10 @@
   KEYWORD(noredzone);
   KEYWORD(noimplicitfloat);
   KEYWORD(naked);
+  KEYWORD(hotpatch);
 
   KEYWORD(type);
   KEYWORD(opaque);
-  KEYWORD(union);
 
   KEYWORD(eq); KEYWORD(ne); KEYWORD(slt); KEYWORD(sgt); KEYWORD(sle);
   KEYWORD(sge); KEYWORD(ult); KEYWORD(ugt); KEYWORD(ule); KEYWORD(uge);
@@ -600,6 +604,7 @@
   TYPEKEYWORD("ppc_fp128", Type::getPPC_FP128Ty(Context));
   TYPEKEYWORD("label",     Type::getLabelTy(Context));
   TYPEKEYWORD("metadata",  Type::getMetadataTy(Context));
+  TYPEKEYWORD("x86_mmx",   Type::getX86_MMXTy(Context));
 #undef TYPEKEYWORD
 
   // Handle special forms for autoupgrading.  Drop these in LLVM 3.0.  This is

Modified: llvm/branches/wendling/eh/lib/AsmParser/LLLexer.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/AsmParser/LLLexer.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/AsmParser/LLLexer.h (original)
+++ llvm/branches/wendling/eh/lib/AsmParser/LLLexer.h Tue Oct 26 19:48:03 2010
@@ -62,8 +62,8 @@
     const APFloat &getAPFloatVal() const { return APFloatVal; }
 
 
-    bool Error(LocTy L, const std::string &Msg) const;
-    bool Error(const std::string &Msg) const { return Error(getLoc(), Msg); }
+    bool Error(LocTy L, const Twine &Msg) const;
+    bool Error(const Twine &Msg) const { return Error(getLoc(), Msg); }
     std::string getFilename() const;
 
   private:

Modified: llvm/branches/wendling/eh/lib/AsmParser/LLParser.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/AsmParser/LLParser.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/AsmParser/LLParser.cpp (original)
+++ llvm/branches/wendling/eh/lib/AsmParser/LLParser.cpp Tue Oct 26 19:48:03 2010
@@ -22,7 +22,6 @@
 #include "llvm/Operator.h"
 #include "llvm/ValueSymbolTable.h"
 #include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/StringExtras.h"
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/raw_ostream.h"
 using namespace llvm;
@@ -52,7 +51,7 @@
         
         if (SlotNo >= NumberedMetadata.size() || NumberedMetadata[SlotNo] == 0)
           return Error(MDList[i].Loc, "use of undefined metadata '!" +
-                       utostr(SlotNo) + "'");
+                       Twine(SlotNo) + "'");
         Inst->setMetadata(MDList[i].MDKind, NumberedMetadata[SlotNo]);
       }
     }
@@ -109,7 +108,7 @@
   if (!ForwardRefTypeIDs.empty())
     return Error(ForwardRefTypeIDs.begin()->second.second,
                  "use of undefined type '%" +
-                 utostr(ForwardRefTypeIDs.begin()->first) + "'");
+                 Twine(ForwardRefTypeIDs.begin()->first) + "'");
 
   if (!ForwardRefVals.empty())
     return Error(ForwardRefVals.begin()->second.second,
@@ -119,12 +118,12 @@
   if (!ForwardRefValIDs.empty())
     return Error(ForwardRefValIDs.begin()->second.second,
                  "use of undefined value '@" +
-                 utostr(ForwardRefValIDs.begin()->first) + "'");
+                 Twine(ForwardRefValIDs.begin()->first) + "'");
 
   if (!ForwardRefMDNodes.empty())
     return Error(ForwardRefMDNodes.begin()->second.second,
                  "use of undefined metadata '!" +
-                 utostr(ForwardRefMDNodes.begin()->first) + "'");
+                 Twine(ForwardRefMDNodes.begin()->first) + "'");
 
 
   // Look for intrinsic functions and CallInst that need to be upgraded
@@ -199,6 +198,7 @@
     case lltok::kw_private:             // OptionalLinkage
     case lltok::kw_linker_private:      // OptionalLinkage
     case lltok::kw_linker_private_weak: // OptionalLinkage
+    case lltok::kw_linker_private_weak_def_auto: // OptionalLinkage
     case lltok::kw_internal:            // OptionalLinkage
     case lltok::kw_weak:                // OptionalLinkage
     case lltok::kw_weak_odr:            // OptionalLinkage
@@ -316,7 +316,7 @@
   if (Lex.getKind() == lltok::LocalVarID) {
     if (Lex.getUIntVal() != TypeID)
       return Error(Lex.getLoc(), "type expected to be numbered '%" +
-                   utostr(TypeID) + "'");
+                   Twine(TypeID) + "'");
     Lex.Lex(); // eat LocalVarID;
 
     if (ParseToken(lltok::equal, "expected '=' after name"))
@@ -443,7 +443,7 @@
   if (Lex.getKind() == lltok::GlobalID) {
     if (Lex.getUIntVal() != VarID)
       return Error(Lex.getLoc(), "variable expected to be numbered '%" +
-                   utostr(VarID) + "'");
+                   Twine(VarID) + "'");
     Lex.Lex(); // eat GlobalID;
 
     if (ParseToken(lltok::equal, "expected '=' after name"))
@@ -517,11 +517,7 @@
   if (Result) return false;
 
   // Otherwise, create MDNode forward reference.
-
-  // FIXME: This is not unique enough!
-  std::string FwdRefName = "llvm.mdnode.fwdref." + utostr(MID);
-  Value *V = MDString::get(Context, FwdRefName);
-  MDNode *FwdNode = MDNode::get(Context, &V, 1);
+  MDNode *FwdNode = MDNode::getTemporary(Context, 0, 0);
   ForwardRefMDNodes[MID] = std::make_pair(FwdNode, Lex.getLoc());
   
   if (NumberedMetadata.size() <= MID)
@@ -585,7 +581,9 @@
   std::map<unsigned, std::pair<TrackingVH<MDNode>, LocTy> >::iterator
     FI = ForwardRefMDNodes.find(MetadataID);
   if (FI != ForwardRefMDNodes.end()) {
-    FI->second.first->replaceAllUsesWith(Init);
+    MDNode *Temp = FI->second.first;
+    Temp->replaceAllUsesWith(Init);
+    MDNode::deleteTemporary(Temp);
     ForwardRefMDNodes.erase(FI);
     
     assert(NumberedMetadata[MetadataID] == Init && "Tracking VH didn't work");
@@ -625,7 +623,8 @@
       Linkage != GlobalValue::InternalLinkage &&
       Linkage != GlobalValue::PrivateLinkage &&
       Linkage != GlobalValue::LinkerPrivateLinkage &&
-      Linkage != GlobalValue::LinkerPrivateWeakLinkage)
+      Linkage != GlobalValue::LinkerPrivateWeakLinkage &&
+      Linkage != GlobalValue::LinkerPrivateWeakDefAutoLinkage)
     return Error(LinkageLoc, "invalid linkage type for alias");
 
   Constant *Aliasee;
@@ -676,7 +675,7 @@
 
   // Insert into the module, we know its name won't collide now.
   M->getAliasList().push_back(GA);
-  assert(GA->getNameStr() == Name && "Should not be a name conflict!");
+  assert(GA->getName() == Name && "Should not be a name conflict!");
 
   return false;
 }
@@ -855,7 +854,7 @@
   // If we have the value in the symbol table or fwd-ref table, return it.
   if (Val) {
     if (Val->getType() == Ty) return Val;
-    Error(Loc, "'@" + utostr(ID) + "' defined with type '" +
+    Error(Loc, "'@" + Twine(ID) + "' defined with type '" +
           Val->getType()->getDescription() + "'");
     return 0;
   }
@@ -983,6 +982,7 @@
     case lltok::kw_noredzone:       Attrs |= Attribute::NoRedZone; break;
     case lltok::kw_noimplicitfloat: Attrs |= Attribute::NoImplicitFloat; break;
     case lltok::kw_naked:           Attrs |= Attribute::Naked; break;
+    case lltok::kw_hotpatch:        Attrs |= Attribute::Hotpatch; break;
 
     case lltok::kw_alignstack: {
       unsigned Alignment;
@@ -1010,6 +1010,7 @@
 ///   ::= 'private'
 ///   ::= 'linker_private'
 ///   ::= 'linker_private_weak'
+///   ::= 'linker_private_weak_def_auto'
 ///   ::= 'internal'
 ///   ::= 'weak'
 ///   ::= 'weak_odr'
@@ -1031,6 +1032,9 @@
   case lltok::kw_linker_private_weak:
     Res = GlobalValue::LinkerPrivateWeakLinkage;
     break;
+  case lltok::kw_linker_private_weak_def_auto:
+    Res = GlobalValue::LinkerPrivateWeakDefAutoLinkage;
+    break;
   case lltok::kw_internal:       Res = GlobalValue::InternalLinkage;      break;
   case lltok::kw_weak:           Res = GlobalValue::WeakAnyLinkage;       break;
   case lltok::kw_weak_odr:       Res = GlobalValue::WeakODRLinkage;       break;
@@ -1080,6 +1084,8 @@
 ///   ::= 'arm_aapcscc'
 ///   ::= 'arm_aapcs_vfpcc'
 ///   ::= 'msp430_intrcc'
+///   ::= 'ptx_kernel'
+///   ::= 'ptx_device'
 ///   ::= 'cc' UINT
 ///
 bool LLParser::ParseOptionalCallingConv(CallingConv::ID &CC) {
@@ -1095,6 +1101,8 @@
   case lltok::kw_arm_aapcscc:    CC = CallingConv::ARM_AAPCS; break;
   case lltok::kw_arm_aapcs_vfpcc:CC = CallingConv::ARM_AAPCS_VFP; break;
   case lltok::kw_msp430_intrcc:  CC = CallingConv::MSP430_INTR; break;
+  case lltok::kw_ptx_kernel:     CC = CallingConv::PTX_Kernel; break;
+  case lltok::kw_ptx_device:     CC = CallingConv::PTX_Device; break;
   case lltok::kw_cc: {
       unsigned ArbitraryCC;
       Lex.Lex();
@@ -1113,29 +1121,44 @@
 
 /// ParseInstructionMetadata
 ///   ::= !dbg !42 (',' !dbg !57)*
-bool LLParser::ParseInstructionMetadata(Instruction *Inst) {
+bool LLParser::ParseInstructionMetadata(Instruction *Inst,
+                                        PerFunctionState *PFS) {
   do {
     if (Lex.getKind() != lltok::MetadataVar)
       return TokError("expected metadata after comma");
 
     std::string Name = Lex.getStrVal();
+    unsigned MDK = M->getMDKindID(Name.c_str());
     Lex.Lex();
 
     MDNode *Node;
-    unsigned NodeID;
     SMLoc Loc = Lex.getLoc();
-    if (ParseToken(lltok::exclaim, "expected '!' here") ||
-        ParseMDNodeID(Node, NodeID))
+
+    if (ParseToken(lltok::exclaim, "expected '!' here"))
       return true;
 
-    unsigned MDK = M->getMDKindID(Name.c_str());
-    if (Node) {
-      // If we got the node, add it to the instruction.
-      Inst->setMetadata(MDK, Node);
+    // This code is similar to that of ParseMetadataValue, however it needs to
+    // have special-case code for a forward reference; see the comments on
+    // ForwardRefInstMetadata for details. Also, MDStrings are not supported
+    // at the top level here.
+    if (Lex.getKind() == lltok::lbrace) {
+      ValID ID;
+      if (ParseMetadataListValue(ID, PFS))
+        return true;
+      assert(ID.Kind == ValID::t_MDNode);
+      Inst->setMetadata(MDK, ID.MDNodeVal);
     } else {
-      MDRef R = { Loc, MDK, NodeID };
-      // Otherwise, remember that this should be resolved later.
-      ForwardRefInstMetadata[Inst].push_back(R);
+      unsigned NodeID = 0;
+      if (ParseMDNodeID(Node, NodeID))
+        return true;
+      if (Node) {
+        // If we got the node, add it to the instruction.
+        Inst->setMetadata(MDK, Node);
+      } else {
+        MDRef R = { Loc, MDK, NodeID };
+        // Otherwise, remember that this should be resolved later.
+        ForwardRefInstMetadata[Inst].push_back(R);
+      }
     }
 
     // If this is the end of the list, we're done.
@@ -1177,8 +1200,7 @@
     
     if (Lex.getKind() != lltok::kw_align)
       return Error(Lex.getLoc(), "expected metadata or 'align'");
-    
-    LocTy AlignLoc = Lex.getLoc();
+
     if (ParseOptionalAlignment(Alignment)) return true;
   }
 
@@ -1226,7 +1248,7 @@
       AteExtraComma = true;
       return false;
     }
-    unsigned Idx;
+    unsigned Idx = 0;
     if (ParseUInt32(Idx)) return true;
     Indices.push_back(Idx);
   }
@@ -1340,11 +1362,6 @@
     if (ParseStructType(Result, false))
       return true;
     break;
-  case lltok::kw_union:
-    // TypeRec ::= 'union' '{' ... '}'
-    if (ParseUnionType(Result))
-      return true;
-    break;
   case lltok::lsquare:
     // TypeRec ::= '[' ... ']'
     Lex.Lex(); // eat the lsquare.
@@ -1654,38 +1671,6 @@
   return false;
 }
 
-/// ParseUnionType
-///   TypeRec
-///     ::= 'union' '{' TypeRec (',' TypeRec)* '}'
-bool LLParser::ParseUnionType(PATypeHolder &Result) {
-  assert(Lex.getKind() == lltok::kw_union);
-  Lex.Lex(); // Consume the 'union'
-
-  if (ParseToken(lltok::lbrace, "'{' expected after 'union'")) return true;
-
-  SmallVector<PATypeHolder, 8> ParamsList;
-  do {
-    LocTy EltTyLoc = Lex.getLoc();
-    if (ParseTypeRec(Result)) return true;
-    ParamsList.push_back(Result);
-
-    if (Result->isVoidTy())
-      return Error(EltTyLoc, "union element can not have void type");
-    if (!UnionType::isValidElementType(Result))
-      return Error(EltTyLoc, "invalid element type for union");
-
-  } while (EatIfPresent(lltok::comma)) ;
-
-  if (ParseToken(lltok::rbrace, "expected '}' at end of union"))
-    return true;
-
-  SmallVector<const Type*, 8> ParamsListTy;
-  for (unsigned i = 0, e = ParamsList.size(); i != e; ++i)
-    ParamsListTy.push_back(ParamsList[i].get());
-  Result = HandleUpRefs(UnionType::get(&ParamsListTy[0], ParamsListTy.size()));
-  return false;
-}
-
 /// ParseArrayVectorType - Parse an array or vector type, assuming the first
 /// token has already been consumed.
 ///   TypeRec
@@ -1796,7 +1781,7 @@
   if (!ForwardRefValIDs.empty())
     return P.Error(ForwardRefValIDs.begin()->second.second,
                    "use of undefined value '%" +
-                   utostr(ForwardRefValIDs.begin()->first) + "'");
+                   Twine(ForwardRefValIDs.begin()->first) + "'");
   return false;
 }
 
@@ -1864,9 +1849,9 @@
   if (Val) {
     if (Val->getType() == Ty) return Val;
     if (Ty->isLabelTy())
-      P.Error(Loc, "'%" + utostr(ID) + "' is not a basic block");
+      P.Error(Loc, "'%" + Twine(ID) + "' is not a basic block");
     else
-      P.Error(Loc, "'%" + utostr(ID) + "' defined with type '" +
+      P.Error(Loc, "'%" + Twine(ID) + "' defined with type '" +
               Val->getType()->getDescription() + "'");
     return 0;
   }
@@ -1908,7 +1893,7 @@
 
     if (unsigned(NameID) != NumberedVals.size())
       return P.Error(NameLoc, "instruction expected to be numbered '%" +
-                     utostr(NumberedVals.size()) + "'");
+                     Twine(NumberedVals.size()) + "'");
 
     std::map<unsigned, std::pair<Value*, LocTy> >::iterator FI =
       ForwardRefValIDs.find(NameID);
@@ -1940,7 +1925,7 @@
   // Set the name on the instruction.
   Inst->setName(NameStr);
 
-  if (Inst->getNameStr() != NameStr)
+  if (Inst->getName() != NameStr)
     return P.Error(NameLoc, "multiple definition of local value named '" +
                    NameStr + "'");
   return false;
@@ -2086,7 +2071,7 @@
     for (unsigned i = 1, e = Elts.size(); i != e; ++i)
       if (Elts[i]->getType() != Elts[0]->getType())
         return Error(FirstEltLoc,
-                     "vector element #" + utostr(i) +
+                     "vector element #" + Twine(i) +
                     " is not of type '" + Elts[0]->getType()->getDescription());
 
     ID.ConstantVal = ConstantVector::get(Elts.data(), Elts.size());
@@ -2119,7 +2104,7 @@
     for (unsigned i = 0, e = Elts.size(); i != e; ++i) {
       if (Elts[i]->getType() != Elts[0]->getType())
         return Error(FirstEltLoc,
-                     "array element #" + utostr(i) +
+                     "array element #" + Twine(i) +
                      " is not of type '" +Elts[0]->getType()->getDescription());
     }
 
@@ -2500,6 +2485,20 @@
   return false;
 }
 
+bool LLParser::ParseMetadataListValue(ValID &ID, PerFunctionState *PFS) {
+  assert(Lex.getKind() == lltok::lbrace);
+  Lex.Lex();
+
+  SmallVector<Value*, 16> Elts;
+  if (ParseMDNodeVector(Elts, PFS) ||
+      ParseToken(lltok::rbrace, "expected end of metadata node"))
+    return true;
+
+  ID.MDNodeVal = MDNode::get(Context, Elts.data(), Elts.size());
+  ID.Kind = ValID::t_MDNode;
+  return false;
+}
+
 /// ParseMetadataValue
 ///  ::= !42
 ///  ::= !{...}
@@ -2510,16 +2509,8 @@
 
   // MDNode:
   // !{ ... }
-  if (EatIfPresent(lltok::lbrace)) {
-    SmallVector<Value*, 16> Elts;
-    if (ParseMDNodeVector(Elts, PFS) ||
-        ParseToken(lltok::rbrace, "expected end of metadata node"))
-      return true;
-
-    ID.MDNodeVal = MDNode::get(Context, Elts.data(), Elts.size());
-    ID.Kind = ValID::t_MDNode;
-    return false;
-  }
+  if (Lex.getKind() == lltok::lbrace)
+    return ParseMetadataListValue(ID, PFS);
 
   // Standalone metadata reference
   // !42
@@ -2631,16 +2622,8 @@
     V = Constant::getNullValue(Ty);
     return false;
   case ValID::t_Constant:
-    if (ID.ConstantVal->getType() != Ty) {
-      // Allow a constant struct with a single member to be converted
-      // to a union, if the union has a member which is the same type
-      // as the struct member.
-      if (const UnionType* utype = dyn_cast<UnionType>(Ty)) {
-        return ParseUnionValue(utype, ID, V);
-      }
-
+    if (ID.ConstantVal->getType() != Ty)
       return Error(ID.Loc, "constant expression type mismatch");
-    }
 
     V = ID.ConstantVal;
     return false;
@@ -2671,22 +2654,6 @@
   return false;
 }
 
-bool LLParser::ParseUnionValue(const UnionType* utype, ValID &ID, Value *&V) {
-  if (const StructType* stype = dyn_cast<StructType>(ID.ConstantVal->getType())) {
-    if (stype->getNumContainedTypes() != 1)
-      return Error(ID.Loc, "constant expression type mismatch");
-    int index = utype->getElementTypeIndex(stype->getContainedType(0));
-    if (index < 0)
-      return Error(ID.Loc, "initializer type is not a member of the union");
-
-    V = ConstantUnion::get(
-        utype, cast<Constant>(ID.ConstantVal->getOperand(0)));
-    return false;
-  }
-
-  return Error(ID.Loc, "constant expression type mismatch");
-}
-
 
 /// FunctionHeader
 ///   ::= OptionalLinkage OptionalVisibility OptionalCallingConv OptRetAttrs
@@ -2720,6 +2687,7 @@
   case GlobalValue::PrivateLinkage:
   case GlobalValue::LinkerPrivateLinkage:
   case GlobalValue::LinkerPrivateWeakLinkage:
+  case GlobalValue::LinkerPrivateWeakDefAutoLinkage:
   case GlobalValue::InternalLinkage:
   case GlobalValue::AvailableExternallyLinkage:
   case GlobalValue::LinkOnceAnyLinkage:
@@ -2749,7 +2717,7 @@
 
     if (NameID != NumberedVals.size())
       return TokError("function expected to be numbered '%" +
-                      utostr(NumberedVals.size()) + "'");
+                      Twine(NumberedVals.size()) + "'");
   } else {
     return TokError("expected function name");
   }
@@ -2856,7 +2824,7 @@
       Fn = cast<Function>(I->second.first);
       if (Fn->getType() != PFT)
         return Error(NameLoc, "type of definition and forward reference of '@" +
-                     utostr(NumberedVals.size()) +"' disagree");
+                     Twine(NumberedVals.size()) + "' disagree");
       ForwardRefValIDs.erase(I);
     }
   }
@@ -2890,7 +2858,7 @@
     // Set the name, if it conflicted, it will be auto-renamed.
     ArgIt->setName(ArgList[i].Name);
 
-    if (ArgIt->getNameStr() != ArgList[i].Name)
+    if (ArgIt->getName() != ArgList[i].Name)
       return Error(ArgList[i].Loc, "redefinition of argument '%" +
                    ArgList[i].Name + "'");
   }
@@ -2976,7 +2944,7 @@
       // With a normal result, we check to see if the instruction is followed by
       // a comma and metadata.
       if (EatIfPresent(lltok::comma))
-        if (ParseInstructionMetadata(Inst))
+        if (ParseInstructionMetadata(Inst, &PFS))
           return true;
       break;
     case InstExtraComma:
@@ -2984,7 +2952,7 @@
 
       // If the instruction parser ate an extra comma at the end of it, it
       // *must* be followed by metadata.
-      if (ParseInstructionMetadata(Inst))
+      if (ParseInstructionMetadata(Inst, &PFS))
         return true;
       break;        
     }

Modified: llvm/branches/wendling/eh/lib/AsmParser/LLParser.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/AsmParser/LLParser.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/AsmParser/LLParser.h (original)
+++ llvm/branches/wendling/eh/lib/AsmParser/LLParser.h Tue Oct 26 19:48:03 2010
@@ -32,7 +32,6 @@
   class GlobalValue;
   class MDString;
   class MDNode;
-  class UnionType;
 
   /// ValID - Represents a reference of a definition of some sort with no type.
   /// There are several cases where we have to parse the value but where the
@@ -80,6 +79,14 @@
     
     // Instruction metadata resolution.  Each instruction can have a list of
     // MDRef info associated with them.
+    //
+    // The simpler approach of just creating temporary MDNodes and then calling
+    // RAUW on them when the definition is processed doesn't work because some
+    // instruction metadata kinds, such as dbg, get stored in the IR in an
+    // "optimized" format which doesn't participate in the normal value use
+    // lists. This means that RAUW doesn't work, even on temporary MDNodes
+    // which otherwise support RAUW. Instead, we defer resolving MDNode
+    // references until the definitions have been processed.
     struct MDRef {
       SMLoc Loc;
       unsigned MDKind, MDSlot;
@@ -135,10 +142,10 @@
 
   private:
 
-    bool Error(LocTy L, const std::string &Msg) const {
+    bool Error(LocTy L, const Twine &Msg) const {
       return Lex.Error(L, Msg);
     }
-    bool TokError(const std::string &Msg) const {
+    bool TokError(const Twine &Msg) const {
       return Error(Lex.getLoc(), Msg);
     }
 
@@ -180,7 +187,6 @@
     bool ParseOptionalCallingConv(CallingConv::ID &CC);
     bool ParseOptionalAlignment(unsigned &Alignment);
     bool ParseOptionalStackAlignment(unsigned &Alignment);
-    bool ParseInstructionMetadata(Instruction *Inst);
     bool ParseOptionalCommaAlign(unsigned &Alignment, bool &AteExtraComma);
     bool ParseIndexList(SmallVectorImpl<unsigned> &Indices,bool &AteExtraComma);
     bool ParseIndexList(SmallVectorImpl<unsigned> &Indices) {
@@ -222,7 +228,6 @@
     }
     bool ParseTypeRec(PATypeHolder &H);
     bool ParseStructType(PATypeHolder &H, bool Packed);
-    bool ParseUnionType(PATypeHolder &H);
     bool ParseArrayVectorType(PATypeHolder &H, bool isVector);
     bool ParseFunctionType(PATypeHolder &Result);
     PATypeHolder HandleUpRefs(const Type *Ty);
@@ -291,7 +296,6 @@
       return ParseTypeAndBasicBlock(BB, Loc, PFS);
     }
 
-    bool ParseUnionValue(const UnionType* utype, ValID &ID, Value *&V);
 
     struct ParamInfo {
       LocTy Loc;
@@ -315,8 +319,10 @@
     bool ParseGlobalValue(const Type *Ty, Constant *&V);
     bool ParseGlobalTypeAndValue(Constant *&V);
     bool ParseGlobalValueVector(SmallVectorImpl<Constant*> &Elts);
+    bool ParseMetadataListValue(ValID &ID, PerFunctionState *PFS);
     bool ParseMetadataValue(ValID &ID, PerFunctionState *PFS);
     bool ParseMDNodeVector(SmallVectorImpl<Value*> &, PerFunctionState *PFS);
+    bool ParseInstructionMetadata(Instruction *Inst, PerFunctionState *PFS);
 
     // Function Parsing.
     struct ArgInfo {

Modified: llvm/branches/wendling/eh/lib/AsmParser/LLToken.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/AsmParser/LLToken.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/AsmParser/LLToken.h (original)
+++ llvm/branches/wendling/eh/lib/AsmParser/LLToken.h Tue Oct 26 19:48:03 2010
@@ -48,6 +48,7 @@
     kw_internal,
     kw_linker_private,
     kw_linker_private_weak,
+    kw_linker_private_weak_def_auto,
     kw_linkonce,
     kw_linkonce_odr,
     kw_private,
@@ -99,6 +100,8 @@
     kw_coldcc,
     kw_fastcc,
     kw_msp430_intrcc,
+    kw_ptx_kernel,
+    kw_ptx_device,
     kw_x86_fastcallcc,
     kw_x86_stdcallcc,
     kw_x86_thiscallcc,
@@ -125,11 +128,11 @@
     kw_ssp,
     kw_sspreq,
     kw_zeroext,
+    kw_hotpatch,
 
     // Type tokens.
     kw_opaque,
     kw_type,
-    kw_union,
 
     // Binary operator tokens.
     kw_eq,  kw_ne,  kw_slt, kw_sgt, kw_sle, kw_sge, kw_ult, kw_ugt, kw_ule,

Modified: llvm/branches/wendling/eh/lib/AsmParser/Parser.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/AsmParser/Parser.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/AsmParser/Parser.cpp (original)
+++ llvm/branches/wendling/eh/lib/AsmParser/Parser.cpp Tue Oct 26 19:48:03 2010
@@ -45,8 +45,7 @@
   MemoryBuffer *F = MemoryBuffer::getFileOrSTDIN(Filename.c_str(), &ErrorStr);
   if (F == 0) {
     Err = SMDiagnostic(Filename,
-                       "Could not open input file '" + Filename + "': " +
-                       ErrorStr);
+                       "Could not open input file: " + ErrorStr);
     return 0;
   }
 

Modified: llvm/branches/wendling/eh/lib/Bitcode/Reader/BitcodeReader.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Bitcode/Reader/BitcodeReader.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Bitcode/Reader/BitcodeReader.cpp (original)
+++ llvm/branches/wendling/eh/lib/Bitcode/Reader/BitcodeReader.cpp Tue Oct 26 19:48:03 2010
@@ -77,6 +77,7 @@
   case 12: return GlobalValue::AvailableExternallyLinkage;
   case 13: return GlobalValue::LinkerPrivateLinkage;
   case 14: return GlobalValue::LinkerPrivateWeakLinkage;
+  case 15: return GlobalValue::LinkerPrivateWeakDefAutoLinkage;
   }
 }
 
@@ -135,7 +136,6 @@
   /// @brief A class for maintaining the slot number definition
   /// as a placeholder for the actual definition for forward constants defs.
   class ConstantPlaceHolder : public ConstantExpr {
-    ConstantPlaceHolder();                       // DO NOT IMPLEMENT
     void operator=(const ConstantPlaceHolder &); // DO NOT IMPLEMENT
   public:
     // allocate space for exactly one operand
@@ -148,7 +148,7 @@
     }
 
     /// @brief Methods to support type inquiry through isa, cast, and dyn_cast.
-    static inline bool classof(const ConstantPlaceHolder *) { return true; }
+    //static inline bool classof(const ConstantPlaceHolder *) { return true; }
     static bool classof(const Value *V) {
       return isa<ConstantExpr>(V) &&
              cast<ConstantExpr>(V)->getOpcode() == Instruction::UserOp1;
@@ -296,8 +296,6 @@
       } else if (ConstantStruct *UserCS = dyn_cast<ConstantStruct>(UserC)) {
         NewC = ConstantStruct::get(Context, &NewOps[0], NewOps.size(),
                                          UserCS->getType()->isPacked());
-      } else if (ConstantUnion *UserCU = dyn_cast<ConstantUnion>(UserC)) {
-        NewC = ConstantUnion::get(UserCU->getType(), NewOps[0]);
       } else if (isa<ConstantVector>(UserC)) {
         NewC = ConstantVector::get(&NewOps[0], NewOps.size());
       } else {
@@ -333,9 +331,9 @@
   }
 
   // If there was a forward reference to this value, replace it.
-  Value *PrevVal = OldV;
+  MDNode *PrevVal = cast<MDNode>(OldV);
   OldV->replaceAllUsesWith(V);
-  delete PrevVal;
+  MDNode::deleteTemporary(PrevVal);
   // Deleting PrevVal sets Idx value in MDValuePtrs to null. Set new
   // value for Idx.
   MDValuePtrs[Idx] = V;
@@ -351,7 +349,7 @@
   }
 
   // Create and return a placeholder, which will later be RAUW'd.
-  Value *V = new Argument(Type::getMetadataTy(Context));
+  Value *V = MDNode::getTemporary(Context, 0, 0);
   MDValuePtrs[Idx] = V;
   return V;
 }
@@ -551,6 +549,9 @@
     case bitc::TYPE_CODE_METADATA:  // METADATA
       ResultTy = Type::getMetadataTy(Context);
       break;
+    case bitc::TYPE_CODE_X86_MMX:   // X86_MMX
+      ResultTy = Type::getX86_MMXTy(Context);
+      break;
     case bitc::TYPE_CODE_INTEGER:   // INTEGER: [width]
       if (Record.size() < 1)
         return Error("Invalid Integer type record");
@@ -590,13 +591,6 @@
       ResultTy = StructType::get(Context, EltTys, Record[0]);
       break;
     }
-    case bitc::TYPE_CODE_UNION: {  // UNION: [eltty x N]
-      SmallVector<const Type*, 8> EltTys;
-      for (unsigned i = 0, e = Record.size(); i != e; ++i)
-        EltTys.push_back(getTypeByID(Record[i], true));
-      ResultTy = UnionType::get(&EltTys[0], EltTys.size());
-      break;
-    }
     case bitc::TYPE_CODE_ARRAY:     // ARRAY: [numelts, eltty]
       if (Record.size() < 2)
         return Error("Invalid ARRAY type record");
@@ -782,7 +776,8 @@
     bool IsFunctionLocal = false;
     // Read a record.
     Record.clear();
-    switch (Stream.ReadRecord(Code, Record)) {
+    Code = Stream.ReadRecord(Code, Record);
+    switch (Code) {
     default:  // Default behavior: ignore.
       break;
     case bitc::METADATA_NAME: {
@@ -795,9 +790,13 @@
       Record.clear();
       Code = Stream.ReadCode();
 
-      // METADATA_NAME is always followed by METADATA_NAMED_NODE.
-      if (Stream.ReadRecord(Code, Record) != bitc::METADATA_NAMED_NODE)
-        assert ( 0 && "Inavlid Named Metadata record");
+      // METADATA_NAME is always followed by METADATA_NAMED_NODE2.
+      // Or METADATA_NAMED_NODE in LLVM 2.7. FIXME: Remove this in LLVM 3.0.
+      unsigned NextBitCode = Stream.ReadRecord(Code, Record);
+      if (NextBitCode == bitc::METADATA_NAMED_NODE) {
+        LLVM2_7MetadataDetected = true;
+      } else if (NextBitCode != bitc::METADATA_NAMED_NODE2)
+        assert ( 0 && "Invalid Named Metadata record");
 
       // Read named metadata elements.
       unsigned Size = Record.size();
@@ -808,14 +807,29 @@
           return Error("Malformed metadata record");
         NMD->addOperand(MD);
       }
+      // Backwards compatibility hack: NamedMDValues used to be Values,
+      // and they got their own slots in the value numbering. They are no
+      // longer Values, however we still need to account for them in the
+      // numbering in order to be able to read old bitcode files.
+      // FIXME: Remove this in LLVM 3.0.
+      if (LLVM2_7MetadataDetected)
+        MDValueList.AssignValue(0, NextMDValueNo++);
       break;
     }
-    case bitc::METADATA_FN_NODE:
+    case bitc::METADATA_FN_NODE: // FIXME: Remove in LLVM 3.0.
+    case bitc::METADATA_FN_NODE2:
       IsFunctionLocal = true;
       // fall-through
-    case bitc::METADATA_NODE: {
+    case bitc::METADATA_NODE:    // FIXME: Remove in LLVM 3.0.
+    case bitc::METADATA_NODE2: {
+
+      // Detect 2.7-era metadata.
+      // FIXME: Remove in LLVM 3.0.
+      if (Code == bitc::METADATA_FN_NODE || Code == bitc::METADATA_NODE)
+        LLVM2_7MetadataDetected = true;
+
       if (Record.size() % 2 == 1)
-        return Error("Invalid METADATA_NODE record");
+        return Error("Invalid METADATA_NODE2 record");
 
       unsigned Size = Record.size();
       SmallVector<Value*, 8> Elts;
@@ -1013,11 +1027,6 @@
           Elts.push_back(ValueList.getConstantFwdRef(Record[i],
                                                      STy->getElementType(i)));
         V = ConstantStruct::get(STy, Elts);
-      } else if (const UnionType *UnTy = dyn_cast<UnionType>(CurTy)) {
-        uint64_t Index = Record[0];
-        Constant *Val = ValueList.getConstantFwdRef(Record[1],
-                                        UnTy->getElementType(Index));
-        V = ConstantUnion::get(UnTy, Val);
       } else if (const ArrayType *ATy = dyn_cast<ArrayType>(CurTy)) {
         const Type *EltTy = ATy->getElementType();
         for (unsigned i = 0; i != Size; ++i)
@@ -1290,6 +1299,12 @@
           UpgradedIntrinsics.push_back(std::make_pair(FI, NewFn));
       }
 
+      // Look for global variables which need to be renamed.
+      for (Module::global_iterator
+             GI = TheModule->global_begin(), GE = TheModule->global_end();
+           GI != GE; ++GI)
+        UpgradeGlobalVariable(GI);
+
       // Force deallocation of memory for these vectors to favor the client that
       // want lazy deserialization.
       std::vector<std::pair<GlobalVariable*, unsigned> >().swap(GlobalInits);
@@ -1585,6 +1600,112 @@
   return false;
 }
 
+bool BitcodeReader::ParseModuleTriple(std::string &Triple) {
+  if (Stream.EnterSubBlock(bitc::MODULE_BLOCK_ID))
+    return Error("Malformed block record");
+
+  SmallVector<uint64_t, 64> Record;
+
+  // Read all the records for this module.
+  while (!Stream.AtEndOfStream()) {
+    unsigned Code = Stream.ReadCode();
+    if (Code == bitc::END_BLOCK) {
+      if (Stream.ReadBlockEnd())
+        return Error("Error at end of module block");
+
+      return false;
+    }
+
+    if (Code == bitc::ENTER_SUBBLOCK) {
+      switch (Stream.ReadSubBlockID()) {
+      default:  // Skip unknown content.
+        if (Stream.SkipBlock())
+          return Error("Malformed block record");
+        break;
+      }
+      continue;
+    }
+
+    if (Code == bitc::DEFINE_ABBREV) {
+      Stream.ReadAbbrevRecord();
+      continue;
+    }
+
+    // Read a record.
+    switch (Stream.ReadRecord(Code, Record)) {
+    default: break;  // Default behavior, ignore unknown content.
+    case bitc::MODULE_CODE_VERSION:  // VERSION: [version#]
+      if (Record.size() < 1)
+        return Error("Malformed MODULE_CODE_VERSION");
+      // Only version #0 is supported so far.
+      if (Record[0] != 0)
+        return Error("Unknown bitstream version!");
+      break;
+    case bitc::MODULE_CODE_TRIPLE: {  // TRIPLE: [strchr x N]
+      std::string S;
+      if (ConvertToString(Record, 0, S))
+        return Error("Invalid MODULE_CODE_TRIPLE record");
+      Triple = S;
+      break;
+    }
+    }
+    Record.clear();
+  }
+
+  return Error("Premature end of bitstream");
+}
+
+bool BitcodeReader::ParseTriple(std::string &Triple) {
+  if (Buffer->getBufferSize() & 3)
+    return Error("Bitcode stream should be a multiple of 4 bytes in length");
+
+  unsigned char *BufPtr = (unsigned char *)Buffer->getBufferStart();
+  unsigned char *BufEnd = BufPtr+Buffer->getBufferSize();
+
+  // If we have a wrapper header, parse it and ignore the non-bc file contents.
+  // The magic number is 0x0B17C0DE stored in little endian.
+  if (isBitcodeWrapper(BufPtr, BufEnd))
+    if (SkipBitcodeWrapperHeader(BufPtr, BufEnd))
+      return Error("Invalid bitcode wrapper header");
+
+  StreamFile.init(BufPtr, BufEnd);
+  Stream.init(StreamFile);
+
+  // Sniff for the signature.
+  if (Stream.Read(8) != 'B' ||
+      Stream.Read(8) != 'C' ||
+      Stream.Read(4) != 0x0 ||
+      Stream.Read(4) != 0xC ||
+      Stream.Read(4) != 0xE ||
+      Stream.Read(4) != 0xD)
+    return Error("Invalid bitcode signature");
+
+  // We expect a number of well-defined blocks, though we don't necessarily
+  // need to understand them all.
+  while (!Stream.AtEndOfStream()) {
+    unsigned Code = Stream.ReadCode();
+
+    if (Code != bitc::ENTER_SUBBLOCK)
+      return Error("Invalid record at top-level");
+
+    unsigned BlockID = Stream.ReadSubBlockID();
+
+    // We only know the MODULE subblock ID.
+    switch (BlockID) {
+    case bitc::MODULE_BLOCK_ID:
+      if (ParseModuleTriple(Triple))
+        return true;
+      break;
+    default:
+      if (Stream.SkipBlock())
+        return Error("Malformed block record");
+      break;
+    }
+  }
+
+  return false;
+}
+
 /// ParseMetadataAttachment - Parse metadata attachments.
 bool BitcodeReader::ParseMetadataAttachment() {
   if (Stream.EnterSubBlock(bitc::METADATA_ATTACHMENT_ID))
@@ -1607,7 +1728,10 @@
     switch (Stream.ReadRecord(Code, Record)) {
     default:  // Default behavior: ignore.
       break;
-    case bitc::METADATA_ATTACHMENT: {
+    // FIXME: Remove in LLVM 3.0.
+    case bitc::METADATA_ATTACHMENT:
+      LLVM2_7MetadataDetected = true;
+    case bitc::METADATA_ATTACHMENT2: {
       unsigned RecordLength = Record.size();
       if (Record.empty() || (RecordLength - 1) % 2 == 1)
         return Error ("Invalid METADATA_ATTACHMENT reader!");
@@ -1635,6 +1759,7 @@
 
   InstructionList.clear();
   unsigned ModuleValueListSize = ValueList.size();
+  unsigned ModuleMDValueListSize = MDValueList.size();
 
   // Add all the function arguments to the value table.
   for(Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I)
@@ -1719,7 +1844,10 @@
       I = 0;
       continue;
         
-    case bitc::FUNC_CODE_DEBUG_LOC: {      // DEBUG_LOC: [line, col, scope, ia]
+    // FIXME: Remove this in LLVM 3.0.
+    case bitc::FUNC_CODE_DEBUG_LOC:
+      LLVM2_7MetadataDetected = true;
+    case bitc::FUNC_CODE_DEBUG_LOC2: {      // DEBUG_LOC: [line, col, scope, ia]
       I = 0;     // Get the last instruction emitted.
       if (CurBB && !CurBB->empty())
         I = &CurBB->back();
@@ -1985,6 +2113,7 @@
         } while(OpNum != Record.size());
 
         const Type *ReturnType = F->getReturnType();
+        // Handle multiple return values. FIXME: Remove in LLVM 3.0.
         if (Vs.size() > 1 ||
             (ReturnType->isStructTy() &&
              (Vs.empty() || Vs[0]->getType() != ReturnType))) {
@@ -2216,7 +2345,7 @@
     }
     case bitc::FUNC_CODE_INST_ALLOCA: { // ALLOCA: [instty, opty, op, align]
       // For backward compatibility, tolerate a lack of an opty, and use i32.
-      // LLVM 3.0: Remove this.
+      // Remove this in LLVM 3.0.
       if (Record.size() < 3 || Record.size() > 4)
         return Error("Invalid ALLOCA record");
       unsigned OpNum = 0;
@@ -2269,7 +2398,10 @@
       InstructionList.push_back(I);
       break;
     }
-    case bitc::FUNC_CODE_INST_CALL: {
+    // FIXME: Remove this in LLVM 3.0.
+    case bitc::FUNC_CODE_INST_CALL:
+      LLVM2_7MetadataDetected = true;
+    case bitc::FUNC_CODE_INST_CALL2: {
       // CALL: [paramattrs, cc, fnty, fnid, arg0, arg1...]
       if (Record.size() < 3)
         return Error("Invalid CALL record");
@@ -2357,7 +2489,7 @@
     if (A->getParent() == 0) {
       // We found at least one unresolved value.  Nuke them all to avoid leaks.
       for (unsigned i = ModuleValueListSize, e = ValueList.size(); i != e; ++i){
-        if ((A = dyn_cast<Argument>(ValueList.back())) && A->getParent() == 0) {
+        if ((A = dyn_cast<Argument>(ValueList[i])) && A->getParent() == 0) {
           A->replaceAllUsesWith(UndefValue::get(A->getType()));
           delete A;
         }
@@ -2366,6 +2498,9 @@
     }
   }
 
+  // FIXME: Check for unresolved forward-declared metadata references
+  // and clean up leaks.
+
   // See if anything took the address of blocks in this function.  If so,
   // resolve them now.
   DenseMap<Function*, std::vector<BlockAddrRefTy> >::iterator BAFRI =
@@ -2385,8 +2520,21 @@
     BlockAddrFwdRefs.erase(BAFRI);
   }
   
+  // FIXME: Remove this in LLVM 3.0.
+  unsigned NewMDValueListSize = MDValueList.size();
+
   // Trim the value list down to the size it was before we parsed this function.
   ValueList.shrinkTo(ModuleValueListSize);
+  MDValueList.shrinkTo(ModuleMDValueListSize);
+
+  // Backwards compatibility hack: Function-local metadata numbers
+  // were previously not reset between functions. This is now fixed,
+  // however we still need to understand the old numbering in order
+  // to be able to read old bitcode files.
+  // FIXME: Remove this in LLVM 3.0.
+  if (LLVM2_7MetadataDetected)
+    MDValueList.resize(NewMDValueListSize);
+
   std::vector<BasicBlock*>().swap(FunctionBBs);
 
   return false;
@@ -2531,7 +2679,24 @@
   // Read in the entire module, and destroy the BitcodeReader.
   if (M->MaterializeAllPermanently(ErrMsg)) {
     delete M;
-    return NULL;
+    return 0;
   }
+
   return M;
 }
+
+std::string llvm::getBitcodeTargetTriple(MemoryBuffer *Buffer,
+                                         LLVMContext& Context,
+                                         std::string *ErrMsg) {
+  BitcodeReader *R = new BitcodeReader(Buffer, Context);
+  // Don't let the BitcodeReader dtor delete 'Buffer'.
+  R->setBufferOwned(false);
+
+  std::string Triple("");
+  if (R->ParseTriple(Triple))
+    if (ErrMsg)
+      *ErrMsg = R->getErrorString();
+
+  delete R;
+  return Triple;
+}

Modified: llvm/branches/wendling/eh/lib/Bitcode/Reader/BitcodeReader.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Bitcode/Reader/BitcodeReader.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Bitcode/Reader/BitcodeReader.h (original)
+++ llvm/branches/wendling/eh/lib/Bitcode/Reader/BitcodeReader.h Tue Oct 26 19:48:03 2010
@@ -173,11 +173,18 @@
   /// are resolved lazily when functions are loaded.
   typedef std::pair<unsigned, GlobalVariable*> BlockAddrRefTy;
   DenseMap<Function*, std::vector<BlockAddrRefTy> > BlockAddrFwdRefs;
+
+  /// LLVM2_7MetadataDetected - True if metadata produced by LLVM 2.7 or
+  /// earlier was detected, in which case we behave slightly differently,
+  /// for compatibility.
+  /// FIXME: Remove in LLVM 3.0.
+  bool LLVM2_7MetadataDetected;
   
 public:
   explicit BitcodeReader(MemoryBuffer *buffer, LLVMContext &C)
     : Context(C), TheModule(0), Buffer(buffer), BufferOwned(false),
-      ErrorString(0), ValueList(C), MDValueList(C) {
+      ErrorString(0), ValueList(C), MDValueList(C),
+      LLVM2_7MetadataDetected(false) {
     HasReversedFunctionsWithBodies = false;
   }
   ~BitcodeReader() {
@@ -205,6 +212,10 @@
   /// @brief Main interface to parsing a bitcode buffer.
   /// @returns true if an error occurred.
   bool ParseBitcodeInto(Module *M);
+
+  /// @brief Cheap mechanism to just extract module triple
+  /// @returns true if an error occurred.
+  bool ParseTriple(std::string &Triple);
 private:
   const Type *getTypeByID(unsigned ID, bool isTypeTable = false);
   Value *getFnValueByID(unsigned ID, const Type *Ty) {
@@ -263,6 +274,7 @@
   bool ResolveGlobalAndAliasInits();
   bool ParseMetadata();
   bool ParseMetadataAttachment();
+  bool ParseModuleTriple(std::string &Triple);
 };
   
 } // End llvm namespace

Modified: llvm/branches/wendling/eh/lib/Bitcode/Writer/BitcodeWriter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Bitcode/Writer/BitcodeWriter.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Bitcode/Writer/BitcodeWriter.cpp (original)
+++ llvm/branches/wendling/eh/lib/Bitcode/Writer/BitcodeWriter.cpp Tue Oct 26 19:48:03 2010
@@ -181,14 +181,6 @@
                             Log2_32_Ceil(VE.getTypes().size()+1)));
   unsigned StructAbbrev = Stream.EmitAbbrev(Abbv);
 
-  // Abbrev for TYPE_CODE_UNION.
-  Abbv = new BitCodeAbbrev();
-  Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_UNION));
-  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
-  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
-                            Log2_32_Ceil(VE.getTypes().size()+1)));
-  unsigned UnionAbbrev = Stream.EmitAbbrev(Abbv);
-
   // Abbrev for TYPE_CODE_ARRAY.
   Abbv = new BitCodeAbbrev();
   Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_ARRAY));
@@ -219,6 +211,7 @@
     case Type::LabelTyID:  Code = bitc::TYPE_CODE_LABEL;  break;
     case Type::OpaqueTyID: Code = bitc::TYPE_CODE_OPAQUE; break;
     case Type::MetadataTyID: Code = bitc::TYPE_CODE_METADATA; break;
+    case Type::X86_MMXTyID: Code = bitc::TYPE_CODE_X86_MMX; break;
     case Type::IntegerTyID:
       // INTEGER: [width]
       Code = bitc::TYPE_CODE_INTEGER;
@@ -258,17 +251,6 @@
       AbbrevToUse = StructAbbrev;
       break;
     }
-    case Type::UnionTyID: {
-      const UnionType *UT = cast<UnionType>(T);
-      // UNION: [eltty x N]
-      Code = bitc::TYPE_CODE_UNION;
-      // Output all of the element types.
-      for (UnionType::element_iterator I = UT->element_begin(),
-           E = UT->element_end(); I != E; ++I)
-        TypeVals.push_back(VE.getTypeID(*I));
-      AbbrevToUse = UnionAbbrev;
-      break;
-    }
     case Type::ArrayTyID: {
       const ArrayType *AT = cast<ArrayType>(T);
       // ARRAY: [numelts, eltty]
@@ -299,21 +281,22 @@
 static unsigned getEncodedLinkage(const GlobalValue *GV) {
   switch (GV->getLinkage()) {
   default: llvm_unreachable("Invalid linkage!");
-  case GlobalValue::ExternalLinkage:            return 0;
-  case GlobalValue::WeakAnyLinkage:             return 1;
-  case GlobalValue::AppendingLinkage:           return 2;
-  case GlobalValue::InternalLinkage:            return 3;
-  case GlobalValue::LinkOnceAnyLinkage:         return 4;
-  case GlobalValue::DLLImportLinkage:           return 5;
-  case GlobalValue::DLLExportLinkage:           return 6;
-  case GlobalValue::ExternalWeakLinkage:        return 7;
-  case GlobalValue::CommonLinkage:              return 8;
-  case GlobalValue::PrivateLinkage:             return 9;
-  case GlobalValue::WeakODRLinkage:             return 10;
-  case GlobalValue::LinkOnceODRLinkage:         return 11;
-  case GlobalValue::AvailableExternallyLinkage: return 12;
-  case GlobalValue::LinkerPrivateLinkage:       return 13;
-  case GlobalValue::LinkerPrivateWeakLinkage:   return 14;
+  case GlobalValue::ExternalLinkage:                 return 0;
+  case GlobalValue::WeakAnyLinkage:                  return 1;
+  case GlobalValue::AppendingLinkage:                return 2;
+  case GlobalValue::InternalLinkage:                 return 3;
+  case GlobalValue::LinkOnceAnyLinkage:              return 4;
+  case GlobalValue::DLLImportLinkage:                return 5;
+  case GlobalValue::DLLExportLinkage:                return 6;
+  case GlobalValue::ExternalWeakLinkage:             return 7;
+  case GlobalValue::CommonLinkage:                   return 8;
+  case GlobalValue::PrivateLinkage:                  return 9;
+  case GlobalValue::WeakODRLinkage:                  return 10;
+  case GlobalValue::LinkOnceODRLinkage:              return 11;
+  case GlobalValue::AvailableExternallyLinkage:      return 12;
+  case GlobalValue::LinkerPrivateLinkage:            return 13;
+  case GlobalValue::LinkerPrivateWeakLinkage:        return 14;
+  case GlobalValue::LinkerPrivateWeakDefAutoLinkage: return 15;
   }
 }
 
@@ -503,8 +486,8 @@
       Record.push_back(0);
     }
   }
-  unsigned MDCode = N->isFunctionLocal() ? bitc::METADATA_FN_NODE :
-                                           bitc::METADATA_NODE;
+  unsigned MDCode = N->isFunctionLocal() ? bitc::METADATA_FN_NODE2 :
+                                           bitc::METADATA_NODE2;
   Stream.EmitRecord(MDCode, Record, 0);
   Record.clear();
 }
@@ -567,7 +550,7 @@
     // Write named metadata operands.
     for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i)
       Record.push_back(VE.getValueID(NMD->getOperand(i)));
-    Stream.EmitRecord(bitc::METADATA_NAMED_NODE, Record, 0);
+    Stream.EmitRecord(bitc::METADATA_NAMED_NODE2, Record, 0);
     Record.clear();
   }
 
@@ -603,7 +586,7 @@
   SmallVector<uint64_t, 64> Record;
 
   // Write metadata attachments
-  // METADATA_ATTACHMENT - [m x [value, [n x [id, mdnode]]]
+  // METADATA_ATTACHMENT2 - [m x [value, [n x [id, mdnode]]]
   SmallVector<std::pair<unsigned, MDNode*>, 4> MDs;
   
   for (Function::const_iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
@@ -621,7 +604,7 @@
         Record.push_back(MDs[i].first);
         Record.push_back(VE.getValueID(MDs[i].second));
       }
-      Stream.EmitRecord(bitc::METADATA_ATTACHMENT, Record, 0);
+      Stream.EmitRecord(bitc::METADATA_ATTACHMENT2, Record, 0);
       Record.clear();
     }
 
@@ -735,8 +718,8 @@
       Code = bitc::CST_CODE_UNDEF;
     } else if (const ConstantInt *IV = dyn_cast<ConstantInt>(C)) {
       if (IV->getBitWidth() <= 64) {
-        int64_t V = IV->getSExtValue();
-        if (V >= 0)
+        uint64_t V = IV->getSExtValue();
+        if ((int64_t)V >= 0)
           Record.push_back(V << 1);
         else
           Record.push_back((-V << 1) | 1);
@@ -810,20 +793,6 @@
       for (unsigned i = 0, e = C->getNumOperands(); i != e; ++i)
         Record.push_back(VE.getValueID(C->getOperand(i)));
       AbbrevToUse = AggregateAbbrev;
-    } else if (isa<ConstantUnion>(C)) {
-      Code = bitc::CST_CODE_AGGREGATE;
-
-      // Unions only have one entry but we must send type along with it.
-      const Type *EntryKind = C->getOperand(0)->getType();
-
-      const UnionType *UnTy = cast<UnionType>(C->getType());
-      int UnionIndex = UnTy->getElementTypeIndex(EntryKind);
-      assert(UnionIndex != -1 && "Constant union contains invalid entry");
-
-      Record.push_back(UnionIndex);
-      Record.push_back(VE.getValueID(C->getOperand(0)));
-
-      AbbrevToUse = AggregateAbbrev;
     } else if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
       switch (CE->getOpcode()) {
       default:
@@ -1160,7 +1129,7 @@
     const PointerType *PTy = cast<PointerType>(CI.getCalledValue()->getType());
     const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
 
-    Code = bitc::FUNC_CODE_INST_CALL;
+    Code = bitc::FUNC_CODE_INST_CALL2;
 
     Vals.push_back(VE.getAttributeID(CI.getAttributes()));
     Vals.push_back((CI.getCallingConv() << 1) | unsigned(CI.isTailCall()));
@@ -1304,7 +1273,7 @@
         Vals.push_back(DL.getCol());
         Vals.push_back(Scope ? VE.getValueID(Scope)+1 : 0);
         Vals.push_back(IA ? VE.getValueID(IA)+1 : 0);
-        Stream.EmitRecord(bitc::FUNC_CODE_DEBUG_LOC, Vals);
+        Stream.EmitRecord(bitc::FUNC_CODE_DEBUG_LOC2, Vals);
         Vals.clear();
         
         LastDL = DL;

Modified: llvm/branches/wendling/eh/lib/Bitcode/Writer/BitcodeWriterPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Bitcode/Writer/BitcodeWriterPass.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Bitcode/Writer/BitcodeWriterPass.cpp (original)
+++ llvm/branches/wendling/eh/lib/Bitcode/Writer/BitcodeWriterPass.cpp Tue Oct 26 19:48:03 2010
@@ -21,7 +21,7 @@
   public:
     static char ID; // Pass identification, replacement for typeid
     explicit WriteBitcodePass(raw_ostream &o)
-      : ModulePass(&ID), OS(o) {}
+      : ModulePass(ID), OS(o) {}
     
     const char *getPassName() const { return "Bitcode Writer"; }
     

Modified: llvm/branches/wendling/eh/lib/Bitcode/Writer/ValueEnumerator.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Bitcode/Writer/ValueEnumerator.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Bitcode/Writer/ValueEnumerator.cpp (original)
+++ llvm/branches/wendling/eh/lib/Bitcode/Writer/ValueEnumerator.cpp Tue Oct 26 19:48:03 2010
@@ -137,7 +137,7 @@
 unsigned ValueEnumerator::getInstructionID(const Instruction *Inst) const {
   InstructionMapType::const_iterator I = InstructionMap.find(Inst);
   assert (I != InstructionMap.end() && "Instruction is not mapped!");
-    return I->second;
+  return I->second;
 }
 
 void ValueEnumerator::setInstructionID(const Instruction *I) {
@@ -217,12 +217,38 @@
 
 void ValueEnumerator::EnumerateNamedMDNode(const NamedMDNode *MD) {
   for (unsigned i = 0, e = MD->getNumOperands(); i != e; ++i)
-    if (MDNode *E = MD->getOperand(i))
-      EnumerateValue(E);
+    EnumerateMetadata(MD->getOperand(i));
+}
+
+/// EnumerateMDNodeOperands - Enumerate all non-function-local values
+/// and types referenced by the given MDNode.
+void ValueEnumerator::EnumerateMDNodeOperands(const MDNode *N) {
+  for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
+    if (Value *V = N->getOperand(i)) {
+      if (isa<MDNode>(V) || isa<MDString>(V))
+        EnumerateMetadata(V);
+      else if (!isa<Instruction>(V) && !isa<Argument>(V))
+        EnumerateValue(V);
+    } else
+      EnumerateType(Type::getVoidTy(N->getContext()));
+  }
 }
 
 void ValueEnumerator::EnumerateMetadata(const Value *MD) {
   assert((isa<MDNode>(MD) || isa<MDString>(MD)) && "Invalid metadata kind");
+
+  // Enumerate the type of this value.
+  EnumerateType(MD->getType());
+
+  const MDNode *N = dyn_cast<MDNode>(MD);
+
+  // In the module-level pass, skip function-local nodes themselves, but
+  // do walk their operands.
+  if (N && N->isFunctionLocal() && N->getFunction()) {
+    EnumerateMDNodeOperands(N);
+    return;
+  }
+
   // Check to see if it's already in!
   unsigned &MDValueID = MDValueMap[MD];
   if (MDValueID) {
@@ -230,35 +256,52 @@
     MDValues[MDValueID-1].second++;
     return;
   }
+  MDValues.push_back(std::make_pair(MD, 1U));
+  MDValueID = MDValues.size();
+
+  // Enumerate all non-function-local operands.
+  if (N)
+    EnumerateMDNodeOperands(N);
+}
+
+/// EnumerateFunctionLocalMetadataa - Incorporate function-local metadata
+/// information reachable from the given MDNode.
+void ValueEnumerator::EnumerateFunctionLocalMetadata(const MDNode *N) {
+  assert(N->isFunctionLocal() && N->getFunction() &&
+         "EnumerateFunctionLocalMetadata called on non-function-local mdnode!");
 
   // Enumerate the type of this value.
-  EnumerateType(MD->getType());
+  EnumerateType(N->getType());
 
-  if (const MDNode *N = dyn_cast<MDNode>(MD)) {
-    MDValues.push_back(std::make_pair(MD, 1U));
-    MDValueMap[MD] = MDValues.size();
-    MDValueID = MDValues.size();
-    for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
-      if (Value *V = N->getOperand(i))
-        EnumerateValue(V);
-      else
-        EnumerateType(Type::getVoidTy(MD->getContext()));
-    }
-    if (N->isFunctionLocal() && N->getFunction())
-      FunctionLocalMDs.push_back(N);
+  // Check to see if it's already in!
+  unsigned &MDValueID = MDValueMap[N];
+  if (MDValueID) {
+    // Increment use count.
+    MDValues[MDValueID-1].second++;
     return;
   }
-  
-  // Add the value.
-  assert(isa<MDString>(MD) && "Unknown metadata kind");
-  MDValues.push_back(std::make_pair(MD, 1U));
+  MDValues.push_back(std::make_pair(N, 1U));
   MDValueID = MDValues.size();
+
+  // To incoroporate function-local information visit all function-local
+  // MDNodes and all function-local values they reference.
+  for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
+    if (Value *V = N->getOperand(i)) {
+      if (MDNode *O = dyn_cast<MDNode>(V)) {
+        if (O->isFunctionLocal() && O->getFunction())
+          EnumerateFunctionLocalMetadata(O);
+      } else if (isa<Instruction>(V) || isa<Argument>(V))
+        EnumerateValue(V);
+    }
+
+  // Also, collect all function-local MDNodes for easy access.
+  FunctionLocalMDs.push_back(N);
 }
 
 void ValueEnumerator::EnumerateValue(const Value *V) {
   assert(!V->getType()->isVoidTy() && "Can't insert void values!");
-  if (isa<MDNode>(V) || isa<MDString>(V))
-    return EnumerateMetadata(V);
+  assert(!isa<MDNode>(V) && !isa<MDString>(V) &&
+         "EnumerateValue doesn't handle Metadata!");
 
   // Check to see if it's already in!
   unsigned &ValueID = ValueMap[V];
@@ -344,7 +387,7 @@
       // blockaddress.
       if (isa<BasicBlock>(Op)) continue;
       
-      EnumerateOperandType(cast<Constant>(Op));
+      EnumerateOperandType(Op);
     }
 
     if (const MDNode *N = dyn_cast<MDNode>(V)) {
@@ -353,7 +396,7 @@
           EnumerateOperandType(Elem);
     }
   } else if (isa<MDString>(V) || isa<MDNode>(V))
-    EnumerateValue(V);
+    EnumerateMetadata(V);
 }
 
 void ValueEnumerator::EnumerateAttributes(const AttrListPtr &PAL) {
@@ -371,6 +414,7 @@
 void ValueEnumerator::incorporateFunction(const Function &F) {
   InstructionCount = 0;
   NumModuleValues = Values.size();
+  NumModuleMDValues = MDValues.size();
 
   // Adding function arguments to the value table.
   for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end();
@@ -401,7 +445,6 @@
 
   FirstInstID = Values.size();
 
-  FunctionLocalMDs.clear();
   SmallVector<MDNode *, 8> FnLocalMDVector;
   // Add all of the instructions.
   for (Function::const_iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
@@ -413,6 +456,15 @@
             // Enumerate metadata after the instructions they might refer to.
             FnLocalMDVector.push_back(MD);
       }
+
+      SmallVector<std::pair<unsigned, MDNode*>, 8> MDs;
+      I->getAllMetadataOtherThanDebugLoc(MDs);
+      for (unsigned i = 0, e = MDs.size(); i != e; ++i) {
+        MDNode *N = MDs[i].second;
+        if (N->isFunctionLocal() && N->getFunction())
+          FnLocalMDVector.push_back(N);
+      }
+        
       if (!I->getType()->isVoidTy())
         EnumerateValue(I);
     }
@@ -420,18 +472,22 @@
 
   // Add all of the function-local metadata.
   for (unsigned i = 0, e = FnLocalMDVector.size(); i != e; ++i)
-    EnumerateOperandType(FnLocalMDVector[i]);
+    EnumerateFunctionLocalMetadata(FnLocalMDVector[i]);
 }
 
 void ValueEnumerator::purgeFunction() {
   /// Remove purged values from the ValueMap.
   for (unsigned i = NumModuleValues, e = Values.size(); i != e; ++i)
     ValueMap.erase(Values[i].first);
+  for (unsigned i = NumModuleMDValues, e = MDValues.size(); i != e; ++i)
+    MDValueMap.erase(MDValues[i].first);
   for (unsigned i = 0, e = BasicBlocks.size(); i != e; ++i)
     ValueMap.erase(BasicBlocks[i]);
 
   Values.resize(NumModuleValues);
+  MDValues.resize(NumModuleMDValues);
   BasicBlocks.clear();
+  FunctionLocalMDs.clear();
 }
 
 static void IncorporateFunctionInfoGlobalBBIDs(const Function *F,

Modified: llvm/branches/wendling/eh/lib/Bitcode/Writer/ValueEnumerator.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Bitcode/Writer/ValueEnumerator.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Bitcode/Writer/ValueEnumerator.h (original)
+++ llvm/branches/wendling/eh/lib/Bitcode/Writer/ValueEnumerator.h Tue Oct 26 19:48:03 2010
@@ -72,6 +72,11 @@
   /// When a function is incorporated, this is the size of the Values list
   /// before incorporation.
   unsigned NumModuleValues;
+
+  /// When a function is incorporated, this is the size of the MDValues list
+  /// before incorporation.
+  unsigned NumModuleMDValues;
+
   unsigned FirstFuncConstantID;
   unsigned FirstInstID;
   
@@ -132,7 +137,9 @@
 private:
   void OptimizeConstants(unsigned CstStart, unsigned CstEnd);
     
+  void EnumerateMDNodeOperands(const MDNode *N);
   void EnumerateMetadata(const Value *MD);
+  void EnumerateFunctionLocalMetadata(const MDNode *N);
   void EnumerateNamedMDNode(const NamedMDNode *NMD);
   void EnumerateValue(const Value *V);
   void EnumerateType(const Type *T);

Modified: llvm/branches/wendling/eh/lib/CodeGen/AggressiveAntiDepBreaker.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/AggressiveAntiDepBreaker.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/AggressiveAntiDepBreaker.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/AggressiveAntiDepBreaker.cpp Tue Oct 26 19:48:03 2010
@@ -654,6 +654,8 @@
     if (R == RB) R = RE;
     --R;
     const unsigned NewSuperReg = *R;
+    // Don't consider non-allocatable registers
+    if (!AllocatableSet.test(NewSuperReg)) continue;
     // Don't replace a register with itself.
     if (NewSuperReg == SuperReg) continue;
 

Modified: llvm/branches/wendling/eh/lib/CodeGen/Analysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/Analysis.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/Analysis.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/Analysis.cpp Tue Oct 26 19:48:03 2010
@@ -30,7 +30,7 @@
 /// of insertvalue or extractvalue indices that identify a member, return
 /// the linearized index of the start of the member.
 ///
-unsigned llvm::ComputeLinearIndex(const TargetLowering &TLI, const Type *Ty,
+unsigned llvm::ComputeLinearIndex(const Type *Ty,
                                   const unsigned *Indices,
                                   const unsigned *IndicesEnd,
                                   unsigned CurIndex) {
@@ -45,8 +45,8 @@
                                       EE = STy->element_end();
         EI != EE; ++EI) {
       if (Indices && *Indices == unsigned(EI - EB))
-        return ComputeLinearIndex(TLI, *EI, Indices+1, IndicesEnd, CurIndex);
-      CurIndex = ComputeLinearIndex(TLI, *EI, 0, 0, CurIndex);
+        return ComputeLinearIndex(*EI, Indices+1, IndicesEnd, CurIndex);
+      CurIndex = ComputeLinearIndex(*EI, 0, 0, CurIndex);
     }
     return CurIndex;
   }
@@ -55,8 +55,8 @@
     const Type *EltTy = ATy->getElementType();
     for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
       if (Indices && *Indices == i)
-        return ComputeLinearIndex(TLI, EltTy, Indices+1, IndicesEnd, CurIndex);
-      CurIndex = ComputeLinearIndex(TLI, EltTy, 0, 0, CurIndex);
+        return ComputeLinearIndex(EltTy, Indices+1, IndicesEnd, CurIndex);
+      CurIndex = ComputeLinearIndex(EltTy, 0, 0, CurIndex);
     }
     return CurIndex;
   }

Modified: llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/AsmPrinter.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/AsmPrinter.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/AsmPrinter.cpp Tue Oct 26 19:48:03 2010
@@ -91,7 +91,7 @@
 
 
 AsmPrinter::AsmPrinter(TargetMachine &tm, MCStreamer &Streamer)
-  : MachineFunctionPass(&ID),
+  : MachineFunctionPass(ID),
     TM(tm), MAI(tm.getMCAsmInfo()),
     OutContext(Streamer.getContext()),
     OutStreamer(Streamer),
@@ -200,11 +200,17 @@
   case GlobalValue::WeakAnyLinkage:
   case GlobalValue::WeakODRLinkage:
   case GlobalValue::LinkerPrivateWeakLinkage:
+  case GlobalValue::LinkerPrivateWeakDefAutoLinkage:
     if (MAI->getWeakDefDirective() != 0) {
       // .globl _foo
       OutStreamer.EmitSymbolAttribute(GVSym, MCSA_Global);
-      // .weak_definition _foo
-      OutStreamer.EmitSymbolAttribute(GVSym, MCSA_WeakDefinition);
+
+      if ((GlobalValue::LinkageTypes)Linkage !=
+          GlobalValue::LinkerPrivateWeakDefAutoLinkage)
+        // .weak_definition _foo
+        OutStreamer.EmitSymbolAttribute(GVSym, MCSA_WeakDefinition);
+      else
+        OutStreamer.EmitSymbolAttribute(GVSym, MCSA_WeakDefAutoPrivate);
     } else if (MAI->getLinkOnceDirective() != 0) {
       // .globl _foo
       OutStreamer.EmitSymbolAttribute(GVSym, MCSA_Global);
@@ -276,8 +282,12 @@
     
     // Handle common symbols.
     if (GVKind.isCommon()) {
+      unsigned Align = 1 << AlignLog;
+      if (!getObjFileLowering().getCommDirectiveSupportsAlignment())
+        Align = 0;
+          
       // .comm _foo, 42, 4
-      OutStreamer.EmitCommonSymbol(GVSym, Size, 1 << AlignLog);
+      OutStreamer.EmitCommonSymbol(GVSym, Size, Align);
       return;
     }
     
@@ -295,11 +305,15 @@
       OutStreamer.EmitLocalCommonSymbol(GVSym, Size);
       return;
     }
+
+    unsigned Align = 1 << AlignLog;
+    if (!getObjFileLowering().getCommDirectiveSupportsAlignment())
+      Align = 0;
     
     // .local _foo
     OutStreamer.EmitSymbolAttribute(GVSym, MCSA_Local);
     // .comm _foo, 42, 4
-    OutStreamer.EmitCommonSymbol(GVSym, Size, 1 << AlignLog);
+    OutStreamer.EmitCommonSymbol(GVSym, Size, Align);
     return;
   }
   
@@ -321,6 +335,13 @@
   // Handle thread local data for mach-o which requires us to output an
   // additional structure of data and mangle the original symbol so that we
   // can reference it later.
+  //
+  // TODO: This should become an "emit thread local global" method on TLOF.
+  // All of this macho specific stuff should be sunk down into TLOFMachO and
+  // stuff like "TLSExtraDataSection" should no longer be part of the parent
+  // TLOF class.  This will also make it more obvious that stuff like
+  // MCStreamer::EmitTBSSSymbol is macho specific and only called from macho
+  // specific code.
   if (GVKind.isThreadLocal() && MAI->hasMachoTBSSDirective()) {
     // Emit the .tbss symbol
     MCSymbol *MangSym = 
@@ -617,7 +638,7 @@
 
       if (ShouldPrintDebugScopes) {
         NamedRegionTimer T(DbgTimerName, DWARFGroupName, TimePassesIsEnabled);
-        DD->beginScope(II);
+        DD->beginInstruction(II);
       }
       
       if (isVerbose())
@@ -651,7 +672,7 @@
       
       if (ShouldPrintDebugScopes) {
         NamedRegionTimer T(DbgTimerName, DWARFGroupName, TimePassesIsEnabled);
-        DD->endScope(II);
+        DD->endInstruction(II);
       }
     }
   }
@@ -1212,6 +1233,22 @@
     OutStreamer.EmitSymbolValue(SetLabel, 4, 0/*AddrSpace*/);
   }
 }
+
+/// EmitLabelPlusOffset - Emit something like ".long Label+Offset" 
+/// where the size in bytes of the directive is specified by Size and Label
+/// specifies the label.  This implicitly uses .set if it is available.
+void AsmPrinter::EmitLabelPlusOffset(const MCSymbol *Label, uint64_t Offset,
+                                      unsigned Size) 
+  const {
+  
+  // Emit Label+Offset
+  const MCExpr *Plus =
+    MCBinaryExpr::CreateAdd(MCSymbolRefExpr::Create(Label, OutContext), 
+                            MCConstantExpr::Create(Offset, OutContext),
+                            OutContext);
+  
+  OutStreamer.EmitValue(Plus, 4, 0/*AddrSpace*/);
+}
     
 
 //===----------------------------------------------------------------------===//
@@ -1250,6 +1287,7 @@
   
   if (const GlobalValue *GV = dyn_cast<GlobalValue>(CV))
     return MCSymbolRefExpr::Create(AP.Mang->getSymbol(GV), Ctx);
+
   if (const BlockAddress *BA = dyn_cast<BlockAddress>(CV))
     return MCSymbolRefExpr::Create(AP.GetBlockAddressSymbol(BA), Ctx);
   
@@ -1268,10 +1306,17 @@
           ConstantFoldConstantExpression(CE, AP.TM.getTargetData()))
       if (C != CE)
         return LowerConstant(C, AP);
-#ifndef NDEBUG
-    CE->dump();
-#endif
-    llvm_unreachable("FIXME: Don't support this constant expr");
+
+    // Otherwise report the problem to the user.
+    {
+      std::string S;
+      raw_string_ostream OS(S);
+      OS << "Unsupported expression in static initializer: ";
+      WriteAsOperand(OS, CE, /*PrintType=*/false,
+                     !AP.MF ? 0 : AP.MF->getFunction()->getParent());
+      report_fatal_error(OS.str());
+    }
+    return MCConstantExpr::Create(0, Ctx);
   case Instruction::GetElementPtr: {
     const TargetData &TD = *AP.TM.getTargetData();
     // Generate a symbolic expression for the byte address
@@ -1419,21 +1464,6 @@
          "Layout of constant struct may be incorrect!");
 }
 
-static void EmitGlobalConstantUnion(const ConstantUnion *CU, 
-                                    unsigned AddrSpace, AsmPrinter &AP) {
-  const TargetData *TD = AP.TM.getTargetData();
-  unsigned Size = TD->getTypeAllocSize(CU->getType());
-
-  const Constant *Contents = CU->getOperand(0);
-  unsigned FilledSize = TD->getTypeAllocSize(Contents->getType());
-    
-  // Print the actually filled part
-  EmitGlobalConstantImpl(Contents, AddrSpace, AP);
-
-  // And pad with enough zeroes
-  AP.OutStreamer.EmitZeros(Size-FilledSize, AddrSpace);
-}
-
 static void EmitGlobalConstantFP(const ConstantFP *CFP, unsigned AddrSpace,
                                  AsmPrinter &AP) {
   // FP Constants are printed as integer constants to avoid losing
@@ -1536,7 +1566,7 @@
     case 8:
       if (AP.isVerbose())
         AP.OutStreamer.GetCommentOS() << format("0x%llx\n", CI->getZExtValue());
-        AP.OutStreamer.EmitIntValue(CI->getZExtValue(), Size, AddrSpace);
+      AP.OutStreamer.EmitIntValue(CI->getZExtValue(), Size, AddrSpace);
       return;
     default:
       EmitGlobalConstantLargeInt(CI, AddrSpace, AP);
@@ -1559,9 +1589,6 @@
     return;
   }
   
-  if (const ConstantUnion *CVU = dyn_cast<ConstantUnion>(CV))
-    return EmitGlobalConstantUnion(CVU, AddrSpace, AP);
-  
   if (const ConstantVector *V = dyn_cast<ConstantVector>(CV))
     return EmitGlobalConstantVector(V, AddrSpace, AP);
   

Modified: llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp Tue Oct 26 19:48:03 2010
@@ -36,7 +36,7 @@
   if (isVerbose() && Desc)
     OutStreamer.AddComment(Desc);
     
-  if (MAI->hasLEB128()) {
+  if (MAI->hasLEB128() && OutStreamer.hasRawTextSupport()) {
     // FIXME: MCize.
     OutStreamer.EmitRawText("\t.sleb128\t" + Twine(Value));
     return;
@@ -61,7 +61,7 @@
   if (isVerbose() && Desc)
     OutStreamer.AddComment(Desc);
  
-  if (MAI->hasLEB128() && PadTo == 0) {
+  if (MAI->hasLEB128() && PadTo == 0 && OutStreamer.hasRawTextSupport()) {
     // FIXME: MCize.
     OutStreamer.EmitRawText("\t.uleb128\t" + Twine(Value));
     return;

Modified: llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp Tue Oct 26 19:48:03 2010
@@ -37,12 +37,12 @@
 /// EmitInlineAsm - Emit a blob of inline asm to the output streamer.
 void AsmPrinter::EmitInlineAsm(StringRef Str, unsigned LocCookie) const {
   assert(!Str.empty() && "Can't emit empty inline asm block");
-  
+
   // Remember if the buffer is nul terminated or not so we can avoid a copy.
   bool isNullTerminated = Str.back() == 0;
   if (isNullTerminated)
     Str = Str.substr(0, Str.size()-1);
-  
+
   // If the output streamer is actually a .s file, just emit the blob textually.
   // This is useful in case the asm parser doesn't handle something but the
   // system assembler does.
@@ -50,9 +50,9 @@
     OutStreamer.EmitRawText(Str);
     return;
   }
-  
+
   SourceMgr SrcMgr;
-  
+
   // If the current LLVMContext has an inline asm handler, set it in SourceMgr.
   LLVMContext &LLVMCtx = MMI->getModule()->getContext();
   bool HasDiagHandler = false;
@@ -61,7 +61,7 @@
                           LLVMCtx.getInlineAsmDiagnosticContext(), LocCookie);
     HasDiagHandler = true;
   }
-  
+
   MemoryBuffer *Buffer;
   if (isNullTerminated)
     Buffer = MemoryBuffer::getMemBuffer(Str, "<inline asm>");
@@ -70,7 +70,7 @@
 
   // Tell SrcMgr about this buffer, it takes ownership of the buffer.
   SrcMgr.AddNewSourceBuffer(Buffer, SMLoc());
-  
+
   OwningPtr<MCAsmParser> Parser(createMCAsmParser(TM.getTarget(), SrcMgr,
                                                   OutContext, OutStreamer,
                                                   *MAI));
@@ -92,15 +92,15 @@
 /// instruction that is an inline asm.
 void AsmPrinter::EmitInlineAsm(const MachineInstr *MI) const {
   assert(MI->isInlineAsm() && "printInlineAsm only works on inline asms");
-  
+
   unsigned NumOperands = MI->getNumOperands();
-  
+
   // Count the number of register definitions to find the asm string.
   unsigned NumDefs = 0;
   for (; MI->getOperand(NumDefs).isReg() && MI->getOperand(NumDefs).isDef();
        ++NumDefs)
     assert(NumDefs != NumOperands-2 && "No asm string?");
-  
+
   assert(MI->getOperand(NumDefs).isSymbol() && "No asm string?");
 
   // Disassemble the AsmStr, printing out the literal pieces, the operands, etc.
@@ -138,12 +138,12 @@
             break;
           }
   }
-  
+
   // Emit the inline asm to a temporary string so we can emit it through
   // EmitInlineAsm.
   SmallString<256> StringData;
   raw_svector_ostream OS(StringData);
-  
+
   OS << '\t';
 
   // The variant of the current asmprinter.
@@ -151,7 +151,7 @@
 
   int CurVariant = -1;            // The number of the {.|.|.} region we are in.
   const char *LastEmitted = AsmStr; // One past the last character emitted.
-  
+
   while (*LastEmitted) {
     switch (*LastEmitted) {
     default: {
@@ -199,18 +199,18 @@
         ++LastEmitted;  // consume ')' character.
         if (CurVariant == -1)
           OS << '}';     // this is gcc's behavior for } outside a variant
-        else 
+        else
           CurVariant = -1;
         break;
       }
       if (Done) break;
-      
+
       bool HasCurlyBraces = false;
       if (*LastEmitted == '{') {     // ${variable}
         ++LastEmitted;               // Consume '{' character.
         HasCurlyBraces = true;
       }
-      
+
       // If we have ${:foo}, then this is not a real operand reference, it is a
       // "magic" string reference, just like in .td files.  Arrange to call
       // PrintSpecial.
@@ -221,25 +221,25 @@
         if (StrEnd == 0)
           report_fatal_error("Unterminated ${:foo} operand in inline asm"
                              " string: '" + Twine(AsmStr) + "'");
-        
+
         std::string Val(StrStart, StrEnd);
         PrintSpecial(MI, OS, Val.c_str());
         LastEmitted = StrEnd+1;
         break;
       }
-            
+
       const char *IDStart = LastEmitted;
       const char *IDEnd = IDStart;
-      while (*IDEnd >= '0' && *IDEnd <= '9') ++IDEnd;      
-      
+      while (*IDEnd >= '0' && *IDEnd <= '9') ++IDEnd;
+
       unsigned Val;
       if (StringRef(IDStart, IDEnd-IDStart).getAsInteger(10, Val))
         report_fatal_error("Bad $ operand number in inline asm string: '" +
                            Twine(AsmStr) + "'");
       LastEmitted = IDEnd;
-      
+
       char Modifier[2] = { 0, 0 };
-      
+
       if (HasCurlyBraces) {
         // If we have curly braces, check for a modifier character.  This
         // supports syntax like ${0:u}, which correspond to "%u0" in GCC asm.
@@ -248,21 +248,21 @@
           if (*LastEmitted == 0)
             report_fatal_error("Bad ${:} expression in inline asm string: '" +
                                Twine(AsmStr) + "'");
-          
+
           Modifier[0] = *LastEmitted;
           ++LastEmitted;    // Consume modifier character.
         }
-        
+
         if (*LastEmitted != '}')
           report_fatal_error("Bad ${} expression in inline asm string: '" +
                              Twine(AsmStr) + "'");
         ++LastEmitted;    // Consume '}' character.
       }
-      
+
       if (Val >= NumOperands-1)
         report_fatal_error("Invalid $ operand number in inline asm string: '" +
                            Twine(AsmStr) + "'");
-      
+
       // Okay, we finally have a value number.  Ask the target to print this
       // operand!
       if (CurVariant == -1 || CurVariant == AsmPrinterVariant) {
@@ -311,7 +311,7 @@
   }
   OS << '\n' << (char)0;  // null terminate string.
   EmitInlineAsm(OS.str(), LocCookie);
-  
+
   // Emit the #NOAPP end marker.  This has to happen even if verbose-asm isn't
   // enabled, so we use EmitRawText.
   if (OutStreamer.hasRawTextSupport())
@@ -335,7 +335,7 @@
   } else if (!strcmp(Code, "uid")) {
     // Comparing the address of MI isn't sufficient, because machineinstrs may
     // be allocated to the same address across functions.
-    
+
     // If this is a new LastFn instruction, bump the counter.
     if (LastMI != MI || LastFn != getFunctionNumber()) {
       ++Counter;
@@ -349,7 +349,7 @@
     Msg << "Unknown special formatter '" << Code
          << "' for machine instr: " << *MI;
     report_fatal_error(Msg.str());
-  }    
+  }
 }
 
 /// PrintAsmOperand - Print the specified operand of MI, an INLINEASM

Modified: llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/CMakeLists.txt?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/CMakeLists.txt (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/CMakeLists.txt Tue Oct 26 19:48:03 2010
@@ -8,4 +8,3 @@
   OcamlGCPrinter.cpp
   )
 
-target_link_libraries (LLVMAsmPrinter LLVMMCParser)

Modified: llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/DwarfDebug.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/DwarfDebug.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/DwarfDebug.cpp Tue Oct 26 19:48:03 2010
@@ -30,6 +30,7 @@
 #include "llvm/Target/TargetRegisterInfo.h"
 #include "llvm/Target/TargetOptions.h"
 #include "llvm/Analysis/DebugInfo.h"
+#include "llvm/ADT/Statistic.h"
 #include "llvm/ADT/STLExtras.h"
 #include "llvm/ADT/StringExtras.h"
 #include "llvm/Support/CommandLine.h"
@@ -52,6 +53,10 @@
      cl::desc("Make an absense of debug location information explicit."),
      cl::init(false));
 
+#ifndef NDEBUG
+STATISTIC(BlocksWithoutLineNo, "Number of blocks without any line number");
+#endif
+
 namespace {
   const char *DWARFGroupName = "DWARF Emission";
   const char *DbgTimerName = "DWARF Debug Writer";
@@ -180,6 +185,73 @@
   DIE *getDIE()                      const { return TheDIE; }
   void setDotDebugLocOffset(unsigned O)    { DotDebugLocOffset = O; }
   unsigned getDotDebugLocOffset()    const { return DotDebugLocOffset; }
+  StringRef getName()                const { return Var.getName(); }
+  unsigned getTag()                  const { return Var.getTag(); }
+  bool variableHasComplexAddress()   const {
+    assert(Var.Verify() && "Invalid complex DbgVariable!");
+    return Var.hasComplexAddress();
+  }
+  bool isBlockByrefVariable()        const {
+    assert(Var.Verify() && "Invalid complex DbgVariable!");
+    return Var.isBlockByrefVariable();
+  }
+  unsigned getNumAddrElements()      const { 
+    assert(Var.Verify() && "Invalid complex DbgVariable!");
+    return Var.getNumAddrElements();
+  }
+  uint64_t getAddrElement(unsigned i) const {
+    return Var.getAddrElement(i);
+  }
+  DIType getType()               const {
+    DIType Ty = Var.getType();
+    // FIXME: isBlockByrefVariable should be reformulated in terms of complex
+    // addresses instead.
+    if (Var.isBlockByrefVariable()) {
+      /* Byref variables, in Blocks, are declared by the programmer as
+         "SomeType VarName;", but the compiler creates a
+         __Block_byref_x_VarName struct, and gives the variable VarName
+         either the struct, or a pointer to the struct, as its type.  This
+         is necessary for various behind-the-scenes things the compiler
+         needs to do with by-reference variables in blocks.
+         
+         However, as far as the original *programmer* is concerned, the
+         variable should still have type 'SomeType', as originally declared.
+         
+         The following function dives into the __Block_byref_x_VarName
+         struct to find the original type of the variable.  This will be
+         passed back to the code generating the type for the Debug
+         Information Entry for the variable 'VarName'.  'VarName' will then
+         have the original type 'SomeType' in its debug information.
+         
+         The original type 'SomeType' will be the type of the field named
+         'VarName' inside the __Block_byref_x_VarName struct.
+         
+         NOTE: In order for this to not completely fail on the debugger
+         side, the Debug Information Entry for the variable VarName needs to
+         have a DW_AT_location that tells the debugger how to unwind through
+         the pointers and __Block_byref_x_VarName struct to find the actual
+         value of the variable.  The function addBlockByrefType does this.  */
+      DIType subType = Ty;
+      unsigned tag = Ty.getTag();
+      
+      if (tag == dwarf::DW_TAG_pointer_type) {
+        DIDerivedType DTy = DIDerivedType(Ty);
+        subType = DTy.getTypeDerivedFrom();
+      }
+      
+      DICompositeType blockStruct = DICompositeType(subType);
+      DIArray Elements = blockStruct.getTypeArray();
+      
+      for (unsigned i = 0, N = Elements.getNumElements(); i < N; ++i) {
+        DIDescriptor Element = Elements.getElement(i);
+        DIDerivedType DT = DIDerivedType(Element);
+        if (getName() == DT.getName())
+          return (DT.getTypeDerivedFrom());
+      }
+      return Ty;
+    }
+    return Ty;
+  }
 };
 
 //===----------------------------------------------------------------------===//
@@ -221,7 +293,7 @@
   const MDNode *getInlinedAt()         const { return InlinedAtLocation; }
   const MDNode *getScopeNode()         const { return Desc; }
   const SmallVector<DbgScope *, 4> &getScopes() { return Scopes; }
-  const SmallVector<DbgVariable *, 8> &getVariables() { return Variables; }
+  const SmallVector<DbgVariable *, 8> &getDbgVariables() { return Variables; }
   const SmallVector<DbgRange, 4> &getRanges() { return Ranges; }
 
   /// openInsnRange - This scope covers instruction range starting from MI.
@@ -321,7 +393,6 @@
   DwarfFrameSectionSym = DwarfInfoSectionSym = DwarfAbbrevSectionSym = 0;
   DwarfStrSectionSym = TextSectionSym = 0;
   DwarfDebugRangeSectionSym = DwarfDebugLocSectionSym = 0;
-  DwarfDebugLineSectionSym = CurrentLineSectionSym = 0;
   FunctionBeginSym = FunctionEndSym = 0;
   DIEIntegerOne = new (DIEValueAllocator) DIEInteger(1);
   {
@@ -435,14 +506,16 @@
 
 /// addSourceLine - Add location information to specified debug information
 /// entry.
-void DwarfDebug::addSourceLine(DIE *Die, const DIVariable *V) {
+void DwarfDebug::addSourceLine(DIE *Die, DIVariable V) {
   // Verify variable.
-  if (!V->Verify())
+  if (!V.Verify())
     return;
 
-  unsigned Line = V->getLineNumber();
-  unsigned FileID = GetOrCreateSourceID(V->getContext().getDirectory(),
-                                        V->getContext().getFilename());
+  unsigned Line = V.getLineNumber();
+  if (Line == 0)
+    return;
+  unsigned FileID = GetOrCreateSourceID(V.getContext().getDirectory(),
+                                        V.getContext().getFilename());
   assert(FileID && "Invalid file id");
   addUInt(Die, dwarf::DW_AT_decl_file, 0, FileID);
   addUInt(Die, dwarf::DW_AT_decl_line, 0, Line);
@@ -450,14 +523,16 @@
 
 /// addSourceLine - Add location information to specified debug information
 /// entry.
-void DwarfDebug::addSourceLine(DIE *Die, const DIGlobalVariable *G) {
+void DwarfDebug::addSourceLine(DIE *Die, DIGlobalVariable G) {
   // Verify global variable.
-  if (!G->Verify())
+  if (!G.Verify())
     return;
 
-  unsigned Line = G->getLineNumber();
-  unsigned FileID = GetOrCreateSourceID(G->getContext().getDirectory(),
-                                        G->getContext().getFilename());
+  unsigned Line = G.getLineNumber();
+  if (Line == 0)
+    return;
+  unsigned FileID = GetOrCreateSourceID(G.getContext().getDirectory(),
+                                        G.getContext().getFilename());
   assert(FileID && "Invalid file id");
   addUInt(Die, dwarf::DW_AT_decl_file, 0, FileID);
   addUInt(Die, dwarf::DW_AT_decl_line, 0, Line);
@@ -465,19 +540,19 @@
 
 /// addSourceLine - Add location information to specified debug information
 /// entry.
-void DwarfDebug::addSourceLine(DIE *Die, const DISubprogram *SP) {
+void DwarfDebug::addSourceLine(DIE *Die, DISubprogram SP) {
   // Verify subprogram.
-  if (!SP->Verify())
+  if (!SP.Verify())
     return;
   // If the line number is 0, don't add it.
-  if (SP->getLineNumber() == 0)
+  if (SP.getLineNumber() == 0)
     return;
 
-  unsigned Line = SP->getLineNumber();
-  if (!SP->getContext().Verify())
+  unsigned Line = SP.getLineNumber();
+  if (!SP.getContext().Verify())
     return;
-  unsigned FileID = GetOrCreateSourceID(SP->getDirectory(),
-                                        SP->getFilename());
+  unsigned FileID = GetOrCreateSourceID(SP.getDirectory(),
+                                        SP.getFilename());
   assert(FileID && "Invalid file id");
   addUInt(Die, dwarf::DW_AT_decl_file, 0, FileID);
   addUInt(Die, dwarf::DW_AT_decl_line, 0, Line);
@@ -485,16 +560,16 @@
 
 /// addSourceLine - Add location information to specified debug information
 /// entry.
-void DwarfDebug::addSourceLine(DIE *Die, const DIType *Ty) {
+void DwarfDebug::addSourceLine(DIE *Die, DIType Ty) {
   // Verify type.
-  if (!Ty->Verify())
+  if (!Ty.Verify())
     return;
 
-  unsigned Line = Ty->getLineNumber();
-  if (!Ty->getContext().Verify())
+  unsigned Line = Ty.getLineNumber();
+  if (Line == 0 || !Ty.getContext().Verify())
     return;
-  unsigned FileID = GetOrCreateSourceID(Ty->getContext().getDirectory(),
-                                        Ty->getContext().getFilename());
+  unsigned FileID = GetOrCreateSourceID(Ty.getContext().getDirectory(),
+                                        Ty.getContext().getFilename());
   assert(FileID && "Invalid file id");
   addUInt(Die, dwarf::DW_AT_decl_file, 0, FileID);
   addUInt(Die, dwarf::DW_AT_decl_line, 0, Line);
@@ -502,14 +577,16 @@
 
 /// addSourceLine - Add location information to specified debug information
 /// entry.
-void DwarfDebug::addSourceLine(DIE *Die, const DINameSpace *NS) {
+void DwarfDebug::addSourceLine(DIE *Die, DINameSpace NS) {
   // Verify namespace.
-  if (!NS->Verify())
+  if (!NS.Verify())
     return;
 
-  unsigned Line = NS->getLineNumber();
-  StringRef FN = NS->getFilename();
-  StringRef Dir = NS->getDirectory();
+  unsigned Line = NS.getLineNumber();
+  if (Line == 0)
+    return;
+  StringRef FN = NS.getFilename();
+  StringRef Dir = NS.getDirectory();
 
   unsigned FileID = GetOrCreateSourceID(Dir, FN);
   assert(FileID && "Invalid file id");
@@ -517,55 +594,21 @@
   addUInt(Die, dwarf::DW_AT_decl_line, 0, Line);
 }
 
-/* Byref variables, in Blocks, are declared by the programmer as
-   "SomeType VarName;", but the compiler creates a
-   __Block_byref_x_VarName struct, and gives the variable VarName
-   either the struct, or a pointer to the struct, as its type.  This
-   is necessary for various behind-the-scenes things the compiler
-   needs to do with by-reference variables in blocks.
-
-   However, as far as the original *programmer* is concerned, the
-   variable should still have type 'SomeType', as originally declared.
-
-   The following function dives into the __Block_byref_x_VarName
-   struct to find the original type of the variable.  This will be
-   passed back to the code generating the type for the Debug
-   Information Entry for the variable 'VarName'.  'VarName' will then
-   have the original type 'SomeType' in its debug information.
-
-   The original type 'SomeType' will be the type of the field named
-   'VarName' inside the __Block_byref_x_VarName struct.
-
-   NOTE: In order for this to not completely fail on the debugger
-   side, the Debug Information Entry for the variable VarName needs to
-   have a DW_AT_location that tells the debugger how to unwind through
-   the pointers and __Block_byref_x_VarName struct to find the actual
-   value of the variable.  The function addBlockByrefType does this.  */
-
-/// Find the type the programmer originally declared the variable to be
-/// and return that type.
-///
-DIType DwarfDebug::getBlockByrefType(DIType Ty, std::string Name) {
-
-  DIType subType = Ty;
-  unsigned tag = Ty.getTag();
-
-  if (tag == dwarf::DW_TAG_pointer_type) {
-    DIDerivedType DTy = DIDerivedType(Ty);
-    subType = DTy.getTypeDerivedFrom();
-  }
-
-  DICompositeType blockStruct = DICompositeType(subType);
-  DIArray Elements = blockStruct.getTypeArray();
-
-  for (unsigned i = 0, N = Elements.getNumElements(); i < N; ++i) {
-    DIDescriptor Element = Elements.getElement(i);
-    DIDerivedType DT = DIDerivedType(Element);
-    if (Name == DT.getName())
-      return (DT.getTypeDerivedFrom());
-  }
+/// addVariableAddress - Add DW_AT_location attribute for a DbgVariable based
+/// on provided frame index.
+void DwarfDebug::addVariableAddress(DbgVariable *&DV, DIE *Die, int64_t FI) {
+  MachineLocation Location;
+  unsigned FrameReg;
+  const TargetRegisterInfo *RI = Asm->TM.getRegisterInfo();
+  int Offset = RI->getFrameIndexReference(*Asm->MF, FI, FrameReg);
+  Location.set(FrameReg, Offset);
 
-  return Ty;
+  if (DV->variableHasComplexAddress())
+    addComplexAddress(DV, Die, dwarf::DW_AT_location, Location);
+  else if (DV->isBlockByrefVariable())
+    addBlockByrefAddress(DV, Die, dwarf::DW_AT_location, Location);
+  else
+    addAddress(Die, dwarf::DW_AT_location, Location);
 }
 
 /// addComplexAddress - Start with the address based on the location provided,
@@ -576,8 +619,7 @@
 void DwarfDebug::addComplexAddress(DbgVariable *&DV, DIE *Die,
                                    unsigned Attribute,
                                    const MachineLocation &Location) {
-  const DIVariable &VD = DV->getVariable();
-  DIType Ty = VD.getType();
+  DIType Ty = DV->getType();
 
   // Decode the original location, and use that as the start of the byref
   // variable's location.
@@ -604,12 +646,12 @@
     addUInt(Block, 0, dwarf::DW_FORM_sdata, Location.getOffset());
   }
 
-  for (unsigned i = 0, N = VD.getNumAddrElements(); i < N; ++i) {
-    uint64_t Element = VD.getAddrElement(i);
+  for (unsigned i = 0, N = DV->getNumAddrElements(); i < N; ++i) {
+    uint64_t Element = DV->getAddrElement(i);
 
     if (Element == DIFactory::OpPlus) {
       addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_plus_uconst);
-      addUInt(Block, 0, dwarf::DW_FORM_udata, VD.getAddrElement(++i));
+      addUInt(Block, 0, dwarf::DW_FORM_udata, DV->getAddrElement(++i));
     } else if (Element == DIFactory::OpDeref) {
       addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_deref);
     } else llvm_unreachable("unknown DIFactory Opcode");
@@ -682,13 +724,12 @@
 void DwarfDebug::addBlockByrefAddress(DbgVariable *&DV, DIE *Die,
                                       unsigned Attribute,
                                       const MachineLocation &Location) {
-  const DIVariable &VD = DV->getVariable();
-  DIType Ty = VD.getType();
+  DIType Ty = DV->getType();
   DIType TmpTy = Ty;
   unsigned Tag = Ty.getTag();
   bool isPointer = false;
 
-  StringRef varName = VD.getName();
+  StringRef varName = DV->getName();
 
   if (Tag == dwarf::DW_TAG_pointer_type) {
     DIDerivedType DTy = DIDerivedType(Ty);
@@ -781,6 +822,16 @@
   const TargetRegisterInfo *RI = Asm->TM.getRegisterInfo();
   unsigned Reg = RI->getDwarfRegNum(Location.getReg(), false);
   DIEBlock *Block = new (DIEValueAllocator) DIEBlock();
+  const TargetRegisterInfo *TRI = Asm->TM.getRegisterInfo();
+
+  if (TRI->getFrameRegister(*Asm->MF) == Location.getReg()
+      && Location.getOffset()) {
+    // If variable offset is based in frame register then use fbreg.
+    addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_fbreg);
+    addSInt(Block, 0, dwarf::DW_FORM_sdata, Location.getOffset());
+    addBlock(Die, Attribute, 0, Block);
+    return;
+  }
 
   if (Location.isReg()) {
     if (Reg < 32) {
@@ -868,8 +919,7 @@
     DIE *ContextDIE = getOrCreateNameSpace(DINameSpace(Context));
     ContextDIE->addChild(Die);
   } else if (Context.isSubprogram()) {
-    DIE *ContextDIE = createSubprogramDIE(DISubprogram(Context),
-                                          /*MakeDecl=*/false);
+    DIE *ContextDIE = createSubprogramDIE(DISubprogram(Context));
     ContextDIE->addChild(Die);
   } else if (DIE *ContextDIE = getCompileUnit(Context)->getDIE(Context))
     ContextDIE->addChild(Die);
@@ -966,7 +1016,7 @@
 
   // Add source line info if available and TyDesc is not a forward declaration.
   if (!DTy.isForwardDecl())
-    addSourceLine(&Buffer, &DTy);
+    addSourceLine(&Buffer, DTy);
 }
 
 /// constructTypeDIE - Construct type DIE from DICompositeType.
@@ -1003,16 +1053,23 @@
     DIDescriptor RTy = Elements.getElement(0);
     addType(&Buffer, DIType(RTy));
 
-    // Add prototype flag.
-    addUInt(&Buffer, dwarf::DW_AT_prototyped, dwarf::DW_FORM_flag, 1);
-
+    bool isPrototyped = true;
     // Add arguments.
     for (unsigned i = 1, N = Elements.getNumElements(); i < N; ++i) {
-      DIE *Arg = new DIE(dwarf::DW_TAG_formal_parameter);
       DIDescriptor Ty = Elements.getElement(i);
-      addType(Arg, DIType(Ty));
-      Buffer.addChild(Arg);
+      if (Ty.isUnspecifiedParameter()) {
+        DIE *Arg = new DIE(dwarf::DW_TAG_unspecified_parameters);
+        Buffer.addChild(Arg);
+        isPrototyped = false;
+      } else {
+        DIE *Arg = new DIE(dwarf::DW_TAG_formal_parameter);
+        addType(Arg, DIType(Ty));
+        Buffer.addChild(Arg);
+      }
     }
+    // Add prototype flag.
+    if (isPrototyped)
+      addUInt(&Buffer, dwarf::DW_AT_prototyped, dwarf::DW_FORM_flag, 1);
   }
     break;
   case dwarf::DW_TAG_structure_type:
@@ -1030,8 +1087,21 @@
     for (unsigned i = 0; i < N; ++i) {
       DIDescriptor Element = Elements.getElement(i);
       DIE *ElemDie = NULL;
-      if (Element.isSubprogram())
+      if (Element.isSubprogram()) {
+        DISubprogram SP(Element);
         ElemDie = createSubprogramDIE(DISubprogram(Element));
+        if (SP.isProtected())
+          addUInt(ElemDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_flag,
+                  dwarf::DW_ACCESS_protected);
+        else if (SP.isPrivate())
+          addUInt(ElemDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_flag,
+                  dwarf::DW_ACCESS_private);
+        else 
+          addUInt(ElemDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_flag,
+            dwarf::DW_ACCESS_public);
+        if (SP.isExplicit())
+          addUInt(ElemDie, dwarf::DW_AT_explicit, dwarf::DW_FORM_flag, 1);
+      }
       else if (Element.isVariable()) {
         DIVariable DV(Element);
         ElemDie = new DIE(dwarf::DW_TAG_variable);
@@ -1040,7 +1110,7 @@
         addType(ElemDie, DV.getType());
         addUInt(ElemDie, dwarf::DW_AT_declaration, dwarf::DW_FORM_flag, 1);
         addUInt(ElemDie, dwarf::DW_AT_external, dwarf::DW_FORM_flag, 1);
-        addSourceLine(ElemDie, &DV);
+        addSourceLine(ElemDie, DV);
       } else if (Element.isDerivedType())
         ElemDie = createMemberDIE(DIDerivedType(Element));
       else
@@ -1090,7 +1160,7 @@
 
     // Add source line info if available.
     if (!CTy.isForwardDecl())
-      addSourceLine(&Buffer, &CTy);
+      addSourceLine(&Buffer, CTy);
   }
 }
 
@@ -1160,32 +1230,8 @@
   return LinkageName;
 }
 
-/// createGlobalVariableDIE - Create new DIE using GV.
-DIE *DwarfDebug::createGlobalVariableDIE(const DIGlobalVariable &GV) {
-  // If the global variable was optmized out then no need to create debug info
-  // entry.
-  if (!GV.getGlobal()) return NULL;
-  if (GV.getDisplayName().empty()) return NULL;
-
-  DIE *GVDie = new DIE(dwarf::DW_TAG_variable);
-  addString(GVDie, dwarf::DW_AT_name, dwarf::DW_FORM_string,
-            GV.getDisplayName());
-
-  StringRef LinkageName = GV.getLinkageName();
-  if (!LinkageName.empty())
-    addString(GVDie, dwarf::DW_AT_MIPS_linkage_name, dwarf::DW_FORM_string,
-              getRealLinkageName(LinkageName));
-
-  addType(GVDie, GV.getType());
-  if (!GV.isLocalToUnit())
-    addUInt(GVDie, dwarf::DW_AT_external, dwarf::DW_FORM_flag, 1);
-  addSourceLine(GVDie, &GV);
-
-  return GVDie;
-}
-
 /// createMemberDIE - Create new member DIE.
-DIE *DwarfDebug::createMemberDIE(const DIDerivedType &DT) {
+DIE *DwarfDebug::createMemberDIE(DIDerivedType DT) {
   DIE *MemberDie = new DIE(DT.getTag());
   StringRef Name = DT.getName();
   if (!Name.empty())
@@ -1193,7 +1239,7 @@
 
   addType(MemberDie, DT.getTypeDerivedFrom());
 
-  addSourceLine(MemberDie, &DT);
+  addSourceLine(MemberDie, DT);
 
   DIEBlock *MemLocationDie = new (DIEValueAllocator) DIEBlock();
   addUInt(MemLocationDie, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_plus_uconst);
@@ -1252,7 +1298,8 @@
   else if (DT.isPrivate())
     addUInt(MemberDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_flag,
             dwarf::DW_ACCESS_private);
-  else if (DT.getTag() == dwarf::DW_TAG_inheritance)
+  // Otherwise C++ member and base classes are considered public.
+  else if (DT.getCompileUnit().getLanguage() == dwarf::DW_LANG_C_plus_plus)
     addUInt(MemberDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_flag,
             dwarf::DW_ACCESS_public);
   if (DT.isVirtual())
@@ -1262,7 +1309,7 @@
 }
 
 /// createSubprogramDIE - Create new DIE using SP.
-DIE *DwarfDebug::createSubprogramDIE(const DISubprogram &SP, bool MakeDecl) {
+DIE *DwarfDebug::createSubprogramDIE(DISubprogram SP) {
   CompileUnit *SPCU = getCompileUnit(SP);
   DIE *SPDie = SPCU->getDIE(SP);
   if (SPDie)
@@ -1278,12 +1325,9 @@
     addString(SPDie, dwarf::DW_AT_MIPS_linkage_name, dwarf::DW_FORM_string,
               getRealLinkageName(LinkageName));
 
-  addSourceLine(SPDie, &SP);
+  addSourceLine(SPDie, SP);
 
-  // Add prototyped tag, if C or ObjC.
-  unsigned Lang = SP.getCompileUnit().getLanguage();
-  if (Lang == dwarf::DW_LANG_C99 || Lang == dwarf::DW_LANG_C89 ||
-      Lang == dwarf::DW_LANG_ObjC)
+  if (SP.isPrototyped()) 
     addUInt(SPDie, dwarf::DW_AT_prototyped, dwarf::DW_FORM_flag, 1);
 
   // Add Return Type.
@@ -1307,7 +1351,7 @@
                                             SP.getContainingType()));
   }
 
-  if (MakeDecl || !SP.isDefinition()) {
+  if (!SP.isDefinition()) {
     addUInt(SPDie, dwarf::DW_AT_declaration, dwarf::DW_FORM_flag, 1);
 
     // Add arguments. Do not add arguments for subprogram definition. They will
@@ -1556,16 +1600,14 @@
 
 /// constructVariableDIE - Construct a DIE for the given DbgVariable.
 DIE *DwarfDebug::constructVariableDIE(DbgVariable *DV, DbgScope *Scope) {
-  // Get the descriptor.
-  const DIVariable &VD = DV->getVariable();
-  StringRef Name = VD.getName();
+  StringRef Name = DV->getName();
   if (Name.empty())
     return NULL;
 
   // Translate tag to proper Dwarf tag.  The result variable is dropped for
   // now.
   unsigned Tag;
-  switch (VD.getTag()) {
+  switch (DV->getTag()) {
   case dwarf::DW_TAG_return_variable:
     return NULL;
   case dwarf::DW_TAG_arg_variable:
@@ -1591,18 +1633,15 @@
                 dwarf::DW_FORM_ref4, AbsDIE);
   else {
     addString(VariableDie, dwarf::DW_AT_name, dwarf::DW_FORM_string, Name);
-    addSourceLine(VariableDie, &VD);
+    addSourceLine(VariableDie, DV->getVariable());
 
     // Add variable type.
-    // FIXME: isBlockByrefVariable should be reformulated in terms of complex
-    // addresses instead.
-    if (VD.isBlockByrefVariable())
-      addType(VariableDie, getBlockByrefType(VD.getType(), Name));
-    else
-      addType(VariableDie, VD.getType());
+    addType(VariableDie, DV->getType());
   }
 
-  if (Tag == dwarf::DW_TAG_formal_parameter && VD.getType().isArtificial())
+  if (Tag == dwarf::DW_TAG_formal_parameter && DV->getType().isArtificial())
+    addUInt(VariableDie, dwarf::DW_AT_artificial, dwarf::DW_FORM_flag, 1);
+  else if (DIVariable(DV->getVariable()).isArtificial())
     addUInt(VariableDie, dwarf::DW_AT_artificial, dwarf::DW_FORM_flag, 1);
 
   if (Scope->isAbstractScope()) {
@@ -1630,9 +1669,16 @@
     bool updated = false;
     // FIXME : Handle getNumOperands != 3
     if (DVInsn->getNumOperands() == 3) {
-      if (DVInsn->getOperand(0).isReg())
-        updated =
-          addRegisterAddress(VariableDie, DVLabel, DVInsn->getOperand(0));
+      if (DVInsn->getOperand(0).isReg()) {
+        const MachineOperand RegOp = DVInsn->getOperand(0);
+        const TargetRegisterInfo *TRI = Asm->TM.getRegisterInfo();
+        if (DVInsn->getOperand(1).isImm() &&
+            TRI->getFrameRegister(*Asm->MF) == RegOp.getReg()) {
+          addVariableAddress(DV, VariableDie, DVInsn->getOperand(1).getImm());
+          updated = true;
+        } else
+          updated = addRegisterAddress(VariableDie, DVLabel, RegOp);
+      }
       else if (DVInsn->getOperand(0).isImm())
         updated = addConstantValue(VariableDie, DVLabel, DVInsn->getOperand(0));
       else if (DVInsn->getOperand(0).isFPImm())
@@ -1659,21 +1705,10 @@
   }
 
   // .. else use frame index, if available.
-  MachineLocation Location;
-  unsigned FrameReg;
-  const TargetRegisterInfo *RI = Asm->TM.getRegisterInfo();
   int FI = 0;
-  if (findVariableFrameIndex(DV, &FI)) {
-    int Offset = RI->getFrameIndexReference(*Asm->MF, FI, FrameReg);
-    Location.set(FrameReg, Offset);
-
-    if (VD.hasComplexAddress())
-      addComplexAddress(DV, VariableDie, dwarf::DW_AT_location, Location);
-    else if (VD.isBlockByrefVariable())
-      addBlockByrefAddress(DV, VariableDie, dwarf::DW_AT_location, Location);
-    else
-      addAddress(VariableDie, dwarf::DW_AT_location, Location);
-  }
+  if (findVariableFrameIndex(DV, &FI))
+    addVariableAddress(DV, VariableDie, FI);
+  
   DV->setDIE(VariableDie);
   return VariableDie;
 
@@ -1725,7 +1760,7 @@
   if (!ScopeDIE) return NULL;
 
   // Add variables to scope.
-  const SmallVector<DbgVariable *, 8> &Variables = Scope->getVariables();
+  const SmallVector<DbgVariable *, 8> &Variables = Scope->getDbgVariables();
   for (unsigned i = 0, N = Variables.size(); i < N; ++i) {
     DIE *VariableDIE = constructVariableDIE(Variables[i], Scope);
     if (VariableDIE)
@@ -1755,6 +1790,10 @@
   unsigned DId;
   assert (DirName.empty() == false && "Invalid directory name!");
 
+  // If FE did not provide a file name, then assume stdin.
+  if (FileName.empty())
+    return GetOrCreateSourceID(DirName, "<stdin>");
+
   StringMap<unsigned>::iterator DI = DirectoryIdMap.find(DirName);
   if (DI != DirectoryIdMap.end()) {
     DId = DI->getValue();
@@ -1796,7 +1835,7 @@
   TheCU->insertDIE(NS, NDie);
   if (!NS.getName().empty())
     addString(NDie, dwarf::DW_AT_name, dwarf::DW_FORM_string, NS.getName());
-  addSourceLine(NDie, &NS);
+  addSourceLine(NDie, NS);
   addToContextOwner(NDie, NS.getContext());
   return NDie;
 }
@@ -1819,9 +1858,12 @@
   // simplifies debug range entries.
   addUInt(Die, dwarf::DW_AT_entry_pc, dwarf::DW_FORM_addr, 0);
   // DW_AT_stmt_list is a offset of line number information for this
-  // compile unit in debug_line section. This offset is calculated
-  // during endMoudle().
-  addLabel(Die, dwarf::DW_AT_stmt_list, dwarf::DW_FORM_data4, 0);
+  // compile unit in debug_line section.
+  if (Asm->MAI->doesDwarfUsesAbsoluteLabelForStmtList())
+    addLabel(Die, dwarf::DW_AT_stmt_list, dwarf::DW_FORM_addr,
+             Asm->GetTempSymbol("section_line"));
+  else
+    addUInt(Die, dwarf::DW_AT_stmt_list, dwarf::DW_FORM_data4, 0);
 
   if (!Dir.empty())
     addString(Die, dwarf::DW_AT_comp_dir, dwarf::DW_FORM_string, Dir);
@@ -1872,64 +1914,98 @@
   return I->second;
 }
 
+/// isUnsignedDIType - Return true if type encoding is unsigned.
+static bool isUnsignedDIType(DIType Ty) {
+  DIDerivedType DTy(Ty);
+  if (DTy.Verify())
+    return isUnsignedDIType(DTy.getTypeDerivedFrom());
+
+  DIBasicType BTy(Ty);
+  if (BTy.Verify()) {
+    unsigned Encoding = BTy.getEncoding();
+    if (Encoding == dwarf::DW_ATE_unsigned ||
+        Encoding == dwarf::DW_ATE_unsigned_char)
+      return true;
+  }
+  return false;
+}
 
 /// constructGlobalVariableDIE - Construct global variable DIE.
 void DwarfDebug::constructGlobalVariableDIE(const MDNode *N) {
-  DIGlobalVariable DI_GV(N);
+  DIGlobalVariable GV(N);
 
   // If debug information is malformed then ignore it.
-  if (DI_GV.Verify() == false)
+  if (GV.Verify() == false)
     return;
 
   // Check for pre-existence.
   CompileUnit *TheCU = getCompileUnit(N);
-  if (TheCU->getDIE(DI_GV))
+  if (TheCU->getDIE(GV))
     return;
 
-  DIE *VariableDie = createGlobalVariableDIE(DI_GV);
-  if (!VariableDie)
-    return;
+  DIType GTy = GV.getType();
+  DIE *VariableDIE = new DIE(GV.getTag());
 
-  // Add to map.
-  TheCU->insertDIE(N, VariableDie);
+  bool isGlobalVariable = GV.getGlobal() != NULL;
 
-  // Add to context owner.
-  DIDescriptor GVContext = DI_GV.getContext();
-  // Do not create specification DIE if context is either compile unit
-  // or a subprogram.
-  if (DI_GV.isDefinition() && !GVContext.isCompileUnit() &&
-      !GVContext.isFile() &&
-      !isSubprogramContext(GVContext)) {
-    // Create specification DIE.
-    DIE *VariableSpecDIE = new DIE(dwarf::DW_TAG_variable);
-    addDIEEntry(VariableSpecDIE, dwarf::DW_AT_specification,
-                dwarf::DW_FORM_ref4, VariableDie);
-    DIEBlock *Block = new (DIEValueAllocator) DIEBlock();
-    addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_addr);
-    addLabel(Block, 0, dwarf::DW_FORM_udata,
-             Asm->Mang->getSymbol(DI_GV.getGlobal()));
-    addBlock(VariableSpecDIE, dwarf::DW_AT_location, 0, Block);
-    addUInt(VariableDie, dwarf::DW_AT_declaration, dwarf::DW_FORM_flag, 1);
-    TheCU->addDie(VariableSpecDIE);
-  } else {
-    DIEBlock *Block = new (DIEValueAllocator) DIEBlock();
-    addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_addr);
-    addLabel(Block, 0, dwarf::DW_FORM_udata,
-             Asm->Mang->getSymbol(DI_GV.getGlobal()));
-    addBlock(VariableDie, dwarf::DW_AT_location, 0, Block);
-  }
-  addToContextOwner(VariableDie, GVContext);
-
-  // Expose as global. FIXME - need to check external flag.
-  TheCU->addGlobal(DI_GV.getName(), VariableDie);
-
-  DIType GTy = DI_GV.getType();
+  // Add name.
+  addString(VariableDIE, dwarf::DW_AT_name, dwarf::DW_FORM_string,
+            GV.getDisplayName());
+  StringRef LinkageName = GV.getLinkageName();
+  if (!LinkageName.empty() && isGlobalVariable)
+    addString(VariableDIE, dwarf::DW_AT_MIPS_linkage_name, dwarf::DW_FORM_string,
+              getRealLinkageName(LinkageName));
+  // Add type.
+  addType(VariableDIE, GTy);
   if (GTy.isCompositeType() && !GTy.getName().empty()
       && !GTy.isForwardDecl()) {
     DIEEntry *Entry = TheCU->getDIEEntry(GTy);
     assert(Entry && "Missing global type!");
     TheCU->addGlobalType(GTy.getName(), Entry->getEntry());
   }
+  // Add scoping info.
+  if (!GV.isLocalToUnit()) {
+    addUInt(VariableDIE, dwarf::DW_AT_external, dwarf::DW_FORM_flag, 1);
+    // Expose as global. 
+    TheCU->addGlobal(GV.getName(), VariableDIE);
+  }
+  // Add line number info.
+  addSourceLine(VariableDIE, GV);
+  // Add to map.
+  TheCU->insertDIE(N, VariableDIE);
+  // Add to context owner.
+  DIDescriptor GVContext = GV.getContext();
+  addToContextOwner(VariableDIE, GVContext);
+  // Add location.
+  if (isGlobalVariable) {
+    DIEBlock *Block = new (DIEValueAllocator) DIEBlock();
+    addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_addr);
+    addLabel(Block, 0, dwarf::DW_FORM_udata,
+             Asm->Mang->getSymbol(GV.getGlobal()));
+    // Do not create specification DIE if context is either compile unit
+    // or a subprogram.
+    if (GV.isDefinition() && !GVContext.isCompileUnit() &&
+        !GVContext.isFile() && !isSubprogramContext(GVContext)) {
+      // Create specification DIE.
+      DIE *VariableSpecDIE = new DIE(dwarf::DW_TAG_variable);
+      addDIEEntry(VariableSpecDIE, dwarf::DW_AT_specification,
+                  dwarf::DW_FORM_ref4, VariableDIE);
+      addBlock(VariableSpecDIE, dwarf::DW_AT_location, 0, Block);
+      addUInt(VariableDIE, dwarf::DW_AT_declaration, dwarf::DW_FORM_flag, 1);
+      TheCU->addDie(VariableSpecDIE);
+    } else {
+      addBlock(VariableDIE, dwarf::DW_AT_location, 0, Block);
+    } 
+  } else if (Constant *C = GV.getConstant()) {
+    if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
+      if (isUnsignedDIType(GTy))
+          addUInt(VariableDIE, dwarf::DW_AT_const_value, dwarf::DW_FORM_udata,
+                  CI->getZExtValue());
+        else
+          addSInt(VariableDIE, dwarf::DW_AT_const_value, dwarf::DW_FORM_sdata,
+                 CI->getSExtValue());
+    }
+  }
   return;
 }
 
@@ -2006,6 +2082,15 @@
          E = DbgFinder.global_variable_end(); I != E; ++I)
     constructGlobalVariableDIE(*I);
 
+  //getOrCreateTypeDIE
+  if (NamedMDNode *NMD = M->getNamedMetadata("llvm.dbg.enum"))
+    for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i)
+      getOrCreateTypeDIE(DIType(NMD->getOperand(i)));
+
+  if (NamedMDNode *NMD = M->getNamedMetadata("llvm.dbg.ty"))
+    for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i)
+      getOrCreateTypeDIE(DIType(NMD->getOperand(i)));
+
   // Prime section data.
   SectionMap.insert(Asm->getObjFileLowering().getTextSection());
 
@@ -2032,6 +2117,7 @@
 void DwarfDebug::endModule() {
   if (!FirstCU) return;
   const Module *M = MMI->getModule();
+  DenseMap<const MDNode *, DbgScope *> DeadFnScopeMap;
   if (NamedMDNode *AllSPs = M->getNamedMetadata("llvm.dbg.sp")) {
     for (unsigned SI = 0, SE = AllSPs->getNumOperands(); SI != SE; ++SI) {
       if (ProcessedSPNodes.count(AllSPs->getOperand(SI)) != 0) continue;
@@ -2049,6 +2135,7 @@
       unsigned E = NMD->getNumOperands();
       if (!E) continue;
       DbgScope *Scope = new DbgScope(NULL, DIDescriptor(SP), NULL);
+      DeadFnScopeMap[SP] = Scope;
       for (unsigned I = 0; I != E; ++I) {
         DIVariable DV(NMD->getOperand(I));
         if (!DV.Verify()) continue;
@@ -2058,7 +2145,7 @@
       // Construct subprogram DIE and add variables DIEs.
       constructSubprogramDIE(SP);
       DIE *ScopeDIE = getCompileUnit(SP)->getDIE(SP);
-      const SmallVector<DbgVariable *, 8> &Variables = Scope->getVariables();
+      const SmallVector<DbgVariable *, 8> &Variables = Scope->getDbgVariables();
       for (unsigned i = 0, N = Variables.size(); i < N; ++i) {
         DIE *VariableDIE = constructVariableDIE(Variables[i], Scope);
         if (VariableDIE)
@@ -2107,15 +2194,15 @@
   // Compute DIE offsets and sizes.
   computeSizeAndOffsets();
 
-  // Emit source line correspondence into a debug line section.
-  emitDebugLines();
-
   // Emit all the DIEs into a debug info section
   emitDebugInfo();
 
   // Corresponding abbreviations into a abbrev section.
   emitAbbreviations();
 
+  // Emit source line correspondence into a debug line section.
+  emitDebugLines();
+
   // Emit info into a debug pubnames section.
   emitDebugPubNames();
 
@@ -2140,6 +2227,8 @@
   // Emit info into a debug str section.
   emitDebugStr();
 
+  // clean up.
+  DeleteContainerSeconds(DeadFnScopeMap);
   for (DenseMap<const MDNode *, CompileUnit *>::iterator I = CUMap.begin(),
          E = CUMap.end(); I != E; ++I)
     delete I->second;
@@ -2303,17 +2392,30 @@
       }
       End = *MVI;
       MachineLocation MLoc;
-      MLoc.set(Begin->getOperand(0).getReg(), 0);
+      if (Begin->getNumOperands() == 3) {
+        if (Begin->getOperand(0).isReg() && Begin->getOperand(1).isImm())
+          MLoc.set(Begin->getOperand(0).getReg(), Begin->getOperand(1).getImm());
+      } else
+        MLoc = Asm->getDebugValueLocation(Begin);
+
       const MCSymbol *FLabel = getLabelBeforeInsn(Begin);
       const MCSymbol *SLabel = getLabelBeforeInsn(End);
-      DotDebugLocEntries.push_back(DotDebugLocEntry(FLabel, SLabel, MLoc));
+      if (MLoc.getReg())
+        DotDebugLocEntries.push_back(DotDebugLocEntry(FLabel, SLabel, MLoc));
+
       Begin = End;
       if (MVI + 1 == MVE) {
         // If End is the last instruction then its value is valid
         // until the end of the funtion.
-        MLoc.set(End->getOperand(0).getReg(), 0);
-        DotDebugLocEntries.
-          push_back(DotDebugLocEntry(SLabel, FunctionEndSym, MLoc));
+        MachineLocation EMLoc;
+        if (End->getNumOperands() == 3) {
+          if (End->getOperand(0).isReg() && Begin->getOperand(1).isImm())
+          EMLoc.set(Begin->getOperand(0).getReg(), Begin->getOperand(1).getImm());
+        } else
+          EMLoc = Asm->getDebugValueLocation(End);
+        if (EMLoc.getReg()) 
+          DotDebugLocEntries.
+            push_back(DotDebugLocEntry(SLabel, FunctionEndSym, EMLoc));
       }
     }
     DotDebugLocEntries.push_back(DotDebugLocEntry());
@@ -2355,8 +2457,8 @@
   return I->second;
 }
 
-/// beginScope - Process beginning of a scope.
-void DwarfDebug::beginScope(const MachineInstr *MI) {
+/// beginInstruction - Process beginning of an instruction.
+void DwarfDebug::beginInstruction(const MachineInstr *MI) {
   if (InsnNeedsLabel.count(MI) == 0) {
     LabelsBeforeInsn[MI] = PrevLabel;
     return;
@@ -2390,8 +2492,8 @@
   assert (0 && "Instruction is not processed!");
 }
 
-/// endScope - Process end of a scope.
-void DwarfDebug::endScope(const MachineInstr *MI) {
+/// endInstruction - Process end of an instruction.
+void DwarfDebug::endInstruction(const MachineInstr *MI) {
   if (InsnsEndScopeSet.count(MI) != 0) {
     // Emit a label if this instruction ends a scope.
     MCSymbol *Label = MMI->getContext().CreateTempSymbol();
@@ -2673,12 +2775,37 @@
   return DebugLoc();
 }
 
+#ifndef NDEBUG
+/// CheckLineNumbers - Count basicblocks whose instructions do not have any
+/// line number information.
+static void CheckLineNumbers(const MachineFunction *MF) {
+  for (MachineFunction::const_iterator I = MF->begin(), E = MF->end();
+       I != E; ++I) {
+    bool FoundLineNo = false;
+    for (MachineBasicBlock::const_iterator II = I->begin(), IE = I->end();
+         II != IE; ++II) {
+      const MachineInstr *MI = II;
+      if (!MI->getDebugLoc().isUnknown()) {
+        FoundLineNo = true;
+        break;
+      }
+    }
+    if (!FoundLineNo)
+      ++BlocksWithoutLineNo;      
+  }
+}
+#endif
+
 /// beginFunction - Gather pre-function debug information.  Assumes being
 /// emitted immediately after the function entry point.
 void DwarfDebug::beginFunction(const MachineFunction *MF) {
   if (!MMI->hasDebugInfo()) return;
   if (!extractScopeInformation()) return;
 
+#ifndef NDEBUG
+  CheckLineNumbers(MF);
+#endif
+
   FunctionBeginSym = Asm->GetTempSymbol("func_begin",
                                         Asm->getFunctionNumber());
   // Assumes in correct section after the entry point.
@@ -3019,8 +3146,7 @@
   if (const MCSection *MacroInfo = TLOF.getDwarfMacroInfoSection())
     EmitSectionSym(Asm, MacroInfo);
 
-  DwarfDebugLineSectionSym =
-    EmitSectionSym(Asm, TLOF.getDwarfLineSection(), "section_line");
+  EmitSectionSym(Asm, TLOF.getDwarfLineSection(), "section_line");
   EmitSectionSym(Asm, TLOF.getDwarfLocSection());
   EmitSectionSym(Asm, TLOF.getDwarfPubNamesSection());
   EmitSectionSym(Asm, TLOF.getDwarfPubTypesSection());
@@ -3077,15 +3203,17 @@
     case dwarf::DW_AT_ranges: {
       // DW_AT_range Value encodes offset in debug_range section.
       DIEInteger *V = cast<DIEInteger>(Values[i]);
-      Asm->EmitLabelOffsetDifference(DwarfDebugRangeSectionSym,
-                                     V->getValue(),
-                                     DwarfDebugRangeSectionSym,
-                                     4);
-      break;
-    }
-    case dwarf::DW_AT_stmt_list: {
-      Asm->EmitLabelDifference(CurrentLineSectionSym,
-                               DwarfDebugLineSectionSym, 4);
+
+      if (Asm->MAI->doesDwarfUsesLabelOffsetForRanges()) {
+        Asm->EmitLabelPlusOffset(DwarfDebugRangeSectionSym,
+                                 V->getValue(),
+                                 4);
+      } else {
+        Asm->EmitLabelOffsetDifference(DwarfDebugRangeSectionSym,
+                                       V->getValue(),
+                                       DwarfDebugRangeSectionSym,
+                                       4);
+      }
       break;
     }
     case dwarf::DW_AT_location: {
@@ -3096,6 +3224,14 @@
         Values[i]->EmitValue(Asm, Form);
       break;
     }
+    case dwarf::DW_AT_accessibility: {
+      if (Asm->isVerbose()) {
+        DIEInteger *V = cast<DIEInteger>(Values[i]);
+        Asm->OutStreamer.AddComment(dwarf::AccessibilityString(V->getValue()));
+      }
+      Values[i]->EmitValue(Asm, Form);
+      break;
+    }
     default:
       // Emit an attribute using the defined form.
       Values[i]->EmitValue(Asm, Form);
@@ -3233,8 +3369,6 @@
                             Asm->getObjFileLowering().getDwarfLineSection());
 
   // Construct the section header.
-  CurrentLineSectionSym = Asm->GetTempSymbol("section_line_begin");
-  Asm->OutStreamer.EmitLabel(CurrentLineSectionSym);
   Asm->OutStreamer.AddComment("Length of Source Line Info");
   Asm->EmitLabelDifference(Asm->GetTempSymbol("line_end"),
                            Asm->GetTempSymbol("line_begin"), 4);
@@ -3633,15 +3767,30 @@
       Asm->OutStreamer.EmitSymbolValue(Entry.End, Size, 0);
       const TargetRegisterInfo *RI = Asm->TM.getRegisterInfo();
       unsigned Reg = RI->getDwarfRegNum(Entry.Loc.getReg(), false);
-      if (Reg < 32) {
+      if (int Offset =  Entry.Loc.getOffset()) {
+        // If the value is at a certain offset from frame register then
+        // use DW_OP_fbreg.
+        unsigned OffsetSize = Offset ? MCAsmInfo::getSLEB128Size(Offset) : 1;
         Asm->OutStreamer.AddComment("Loc expr size");
-        Asm->EmitInt16(1);
-        Asm->EmitInt8(dwarf::DW_OP_reg0 + Reg);
+        Asm->EmitInt16(1 + OffsetSize);
+        Asm->OutStreamer.AddComment(
+          dwarf::OperationEncodingString(dwarf::DW_OP_fbreg));
+        Asm->EmitInt8(dwarf::DW_OP_fbreg);
+        Asm->OutStreamer.AddComment("Offset");
+        Asm->EmitSLEB128(Offset);
       } else {
-        Asm->OutStreamer.AddComment("Loc expr size");
-        Asm->EmitInt16(1+MCAsmInfo::getULEB128Size(Reg));
-        Asm->EmitInt8(dwarf::DW_OP_regx);
-        Asm->EmitULEB128(Reg);
+        if (Reg < 32) {
+          Asm->OutStreamer.AddComment("Loc expr size");
+          Asm->EmitInt16(1);
+          Asm->OutStreamer.AddComment(
+            dwarf::OperationEncodingString(dwarf::DW_OP_reg0 + Reg));
+          Asm->EmitInt8(dwarf::DW_OP_reg0 + Reg);
+        } else {
+          Asm->OutStreamer.AddComment("Loc expr size");
+          Asm->EmitInt16(1 + MCAsmInfo::getULEB128Size(Reg));
+          Asm->EmitInt8(dwarf::DW_OP_regx);
+          Asm->EmitULEB128(Reg);
+        }
       }
     }
   }

Modified: llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/DwarfDebug.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/DwarfDebug.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/DwarfDebug.h (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/DwarfDebug.h Tue Oct 26 19:48:03 2010
@@ -23,6 +23,7 @@
 #include "llvm/ADT/StringMap.h"
 #include "llvm/ADT/UniqueVector.h"
 #include "llvm/Support/Allocator.h"
+#include "llvm/Support/DebugLoc.h"
 
 namespace llvm {
 
@@ -261,7 +262,6 @@
   MCSymbol *DwarfFrameSectionSym, *DwarfInfoSectionSym, *DwarfAbbrevSectionSym;
   MCSymbol *DwarfStrSectionSym, *TextSectionSym, *DwarfDebugRangeSectionSym;
   MCSymbol *DwarfDebugLocSectionSym;
-  MCSymbol *DwarfDebugLineSectionSym, *CurrentLineSectionSym;
   MCSymbol *FunctionBeginSym, *FunctionEndSym;
 
   DIEInteger *DIEIntegerOne;
@@ -338,11 +338,11 @@
 
   /// addSourceLine - Add location information to specified debug information
   /// entry.
-  void addSourceLine(DIE *Die, const DIVariable *V);
-  void addSourceLine(DIE *Die, const DIGlobalVariable *G);
-  void addSourceLine(DIE *Die, const DISubprogram *SP);
-  void addSourceLine(DIE *Die, const DIType *Ty);
-  void addSourceLine(DIE *Die, const DINameSpace *NS);
+  void addSourceLine(DIE *Die, DIVariable V);
+  void addSourceLine(DIE *Die, DIGlobalVariable G);
+  void addSourceLine(DIE *Die, DISubprogram SP);
+  void addSourceLine(DIE *Die, DIType Ty);
+  void addSourceLine(DIE *Die, DINameSpace NS);
 
   /// addAddress - Add an address attribute to a die based on the location
   /// provided.
@@ -376,6 +376,10 @@
   void addBlockByrefAddress(DbgVariable *&DV, DIE *Die, unsigned Attribute,
                             const MachineLocation &Location);
 
+  /// addVariableAddress - Add DW_AT_location attribute for a DbgVariable based
+  /// on provided frame index.
+  void addVariableAddress(DbgVariable *&DV, DIE *Die, int64_t FI);
+
   /// addToContextOwner - Add Die into the list of its context owner's children.
   void addToContextOwner(DIE *Die, DIDescriptor Context);
 
@@ -414,14 +418,11 @@
   /// constructEnumTypeDIE - Construct enum type DIE from DIEnumerator.
   DIE *constructEnumTypeDIE(DIEnumerator ETy);
 
-  /// createGlobalVariableDIE - Create new DIE using GV.
-  DIE *createGlobalVariableDIE(const DIGlobalVariable &GV);
-
   /// createMemberDIE - Create new member DIE.
-  DIE *createMemberDIE(const DIDerivedType &DT);
+  DIE *createMemberDIE(DIDerivedType DT);
 
   /// createSubprogramDIE - Create new DIE using SP.
-  DIE *createSubprogramDIE(const DISubprogram &SP, bool MakeDecl = false);
+  DIE *createSubprogramDIE(DISubprogram SP);
 
   /// getOrCreateDbgScope - Create DbgScope for the scope.
   DbgScope *getOrCreateDbgScope(const MDNode *Scope, const MDNode *InlinedAt);
@@ -560,12 +561,6 @@
   /// construct SubprogramDIE - Construct subprogram DIE.
   void constructSubprogramDIE(const MDNode *N);
 
-  // FIXME: This should go away in favor of complex addresses.
-  /// Find the type the programmer originally declared the variable to be
-  /// and return that type.  Obsolete, use GetComplexAddrType instead.
-  ///
-  DIType getBlockByrefType(DIType Ty, std::string Name);
-
   /// recordSourceLine - Register a source line with debug info. Returns the
   /// unique label that was emitted and which provides correspondence to
   /// the source line list.
@@ -636,11 +631,11 @@
   /// getLabelAfterInsn - Return Label immediately following the instruction.
   const MCSymbol *getLabelAfterInsn(const MachineInstr *MI);
 
-  /// beginScope - Process beginning of a scope.
-  void beginScope(const MachineInstr *MI);
+  /// beginInstruction - Process beginning of an instruction.
+  void beginInstruction(const MachineInstr *MI);
 
-  /// endScope - Prcess end of a scope.
-  void endScope(const MachineInstr *MI);
+  /// endInstruction - Prcess end of an instruction.
+  void endInstruction(const MachineInstr *MI);
 };
 } // End of namespace llvm
 

Modified: llvm/branches/wendling/eh/lib/CodeGen/BranchFolding.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/BranchFolding.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/BranchFolding.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/BranchFolding.cpp Tue Oct 26 19:48:03 2010
@@ -65,7 +65,7 @@
   public:
     static char ID;
     explicit BranchFolderPass(bool defaultEnableTailMerge)
-      : MachineFunctionPass(&ID), BranchFolder(defaultEnableTailMerge) {}
+      : MachineFunctionPass(ID), BranchFolder(defaultEnableTailMerge) {}
 
     virtual bool runOnMachineFunction(MachineFunction &MF);
     virtual const char *getPassName() const { return "Control Flow Optimizer"; }

Modified: llvm/branches/wendling/eh/lib/CodeGen/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/CMakeLists.txt?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/CMakeLists.txt (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/CMakeLists.txt Tue Oct 26 19:48:03 2010
@@ -4,6 +4,7 @@
   BranchFolding.cpp
   CalcSpillWeights.cpp
   CallingConvLower.cpp
+  CodeGen.cpp
   CodePlacementOpt.cpp
   CriticalAntiDepBreaker.cpp
   DeadMachineInstructionElim.cpp
@@ -20,8 +21,11 @@
   LatencyPriorityQueue.cpp
   LiveInterval.cpp
   LiveIntervalAnalysis.cpp
+  LiveIntervalUnion.cpp
   LiveStackAnalysis.cpp
   LiveVariables.cpp
+  LiveRangeEdit.cpp
+  LocalStackSlotAllocation.cpp
   LowerSubregs.cpp
   MachineBasicBlock.cpp
   MachineCSE.cpp
@@ -42,16 +46,17 @@
   MachineVerifier.cpp
   ObjectCodeEmitter.cpp
   OcamlGC.cpp
-  OptimizeExts.cpp
   OptimizePHIs.cpp
   PHIElimination.cpp
   Passes.cpp
+  PeepholeOptimizer.cpp
   PostRAHazardRecognizer.cpp
   PostRASchedulerList.cpp
   PreAllocSplitting.cpp
   ProcessImplicitDefs.cpp
   PrologEpilogInserter.cpp
   PseudoSourceValue.cpp
+  RegAllocBasic.cpp
   RegAllocFast.cpp
   RegAllocLinearScan.cpp
   RegAllocPBQP.cpp
@@ -81,5 +86,3 @@
   VirtRegMap.cpp
   VirtRegRewriter.cpp
   )
-
-target_link_libraries (LLVMCodeGen LLVMCore LLVMScalarOpts)

Modified: llvm/branches/wendling/eh/lib/CodeGen/CalcSpillWeights.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/CalcSpillWeights.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/CalcSpillWeights.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/CalcSpillWeights.cpp Tue Oct 26 19:48:03 2010
@@ -25,8 +25,12 @@
 using namespace llvm;
 
 char CalculateSpillWeights::ID = 0;
-INITIALIZE_PASS(CalculateSpillWeights, "calcspillweights",
-                "Calculate spill weights", false, false);
+INITIALIZE_PASS_BEGIN(CalculateSpillWeights, "calcspillweights",
+                "Calculate spill weights", false, false)
+INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
+INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
+INITIALIZE_PASS_END(CalculateSpillWeights, "calcspillweights",
+                "Calculate spill weights", false, false)
 
 void CalculateSpillWeights::getAnalysisUsage(AnalysisUsage &au) const {
   au.addRequired<LiveIntervals>();
@@ -41,107 +45,184 @@
                << "********** Function: "
                << fn.getFunction()->getName() << '\n');
 
-  LiveIntervals *lis = &getAnalysis<LiveIntervals>();
-  MachineLoopInfo *loopInfo = &getAnalysis<MachineLoopInfo>();
-  MachineRegisterInfo *mri = &fn.getRegInfo();
-
-  SmallSet<unsigned, 4> processed;
-  for (MachineFunction::iterator mbbi = fn.begin(), mbbe = fn.end();
-       mbbi != mbbe; ++mbbi) {
-    MachineBasicBlock* mbb = mbbi;
-    SlotIndex mbbEnd = lis->getMBBEndIdx(mbb);
-    MachineLoop* loop = loopInfo->getLoopFor(mbb);
-    unsigned loopDepth = loop ? loop->getLoopDepth() : 0;
-    bool isExiting = loop ? loop->isLoopExiting(mbb) : false;
-
-    for (MachineBasicBlock::const_iterator mii = mbb->begin(), mie = mbb->end();
-         mii != mie; ++mii) {
-      const MachineInstr *mi = mii;
-      if (mi->isIdentityCopy() || mi->isImplicitDef() || mi->isDebugValue())
-        continue;
-
-      for (unsigned i = 0, e = mi->getNumOperands(); i != e; ++i) {
-        const MachineOperand &mopi = mi->getOperand(i);
-        if (!mopi.isReg() || mopi.getReg() == 0)
-          continue;
-        unsigned reg = mopi.getReg();
-        if (!TargetRegisterInfo::isVirtualRegister(mopi.getReg()))
-          continue;
-        // Multiple uses of reg by the same instruction. It should not
-        // contribute to spill weight again.
-        if (!processed.insert(reg))
-          continue;
-
-        bool hasDef = mopi.isDef();
-        bool hasUse = !hasDef;
-        for (unsigned j = i+1; j != e; ++j) {
-          const MachineOperand &mopj = mi->getOperand(j);
-          if (!mopj.isReg() || mopj.getReg() != reg)
-            continue;
-          hasDef |= mopj.isDef();
-          hasUse |= mopj.isUse();
-          if (hasDef && hasUse)
-            break;
-        }
-
-        LiveInterval &regInt = lis->getInterval(reg);
-        float weight = lis->getSpillWeight(hasDef, hasUse, loopDepth);
-        if (hasDef && isExiting) {
-          // Looks like this is a loop count variable update.
-          SlotIndex defIdx = lis->getInstructionIndex(mi).getDefIndex();
-          const LiveRange *dlr =
-            lis->getInterval(reg).getLiveRangeContaining(defIdx);
-          if (dlr->end >= mbbEnd)
-            weight *= 3.0F;
-        }
-        regInt.weight += weight;
-      }
-      processed.clear();
-    }
+  LiveIntervals &lis = getAnalysis<LiveIntervals>();
+  VirtRegAuxInfo vrai(fn, lis, getAnalysis<MachineLoopInfo>());
+  for (LiveIntervals::iterator I = lis.begin(), E = lis.end(); I != E; ++I) {
+    LiveInterval &li = *I->second;
+    if (TargetRegisterInfo::isVirtualRegister(li.reg))
+      vrai.CalculateWeightAndHint(li);
+  }
+  return false;
+}
+
+// Return the preferred allocation register for reg, given a COPY instruction.
+static unsigned copyHint(const MachineInstr *mi, unsigned reg,
+                         const TargetRegisterInfo &tri,
+                         const MachineRegisterInfo &mri) {
+  unsigned sub, hreg, hsub;
+  if (mi->getOperand(0).getReg() == reg) {
+    sub = mi->getOperand(0).getSubReg();
+    hreg = mi->getOperand(1).getReg();
+    hsub = mi->getOperand(1).getSubReg();
+  } else {
+    sub = mi->getOperand(1).getSubReg();
+    hreg = mi->getOperand(0).getReg();
+    hsub = mi->getOperand(0).getSubReg();
   }
 
-  for (LiveIntervals::iterator I = lis->begin(), E = lis->end(); I != E; ++I) {
-    LiveInterval &li = *I->second;
-    if (TargetRegisterInfo::isVirtualRegister(li.reg)) {
-      // If the live interval length is essentially zero, i.e. in every live
-      // range the use follows def immediately, it doesn't make sense to spill
-      // it and hope it will be easier to allocate for this li.
-      if (isZeroLengthInterval(&li)) {
-        li.weight = HUGE_VALF;
-        continue;
-      }
-
-      bool isLoad = false;
-      SmallVector<LiveInterval*, 4> spillIs;
-      if (lis->isReMaterializable(li, spillIs, isLoad)) {
-        // If all of the definitions of the interval are re-materializable,
-        // it is a preferred candidate for spilling. If none of the defs are
-        // loads, then it's potentially very cheap to re-materialize.
-        // FIXME: this gets much more complicated once we support non-trivial
-        // re-materialization.
-        if (isLoad)
-          li.weight *= 0.9F;
-        else
-          li.weight *= 0.5F;
-      }
-
-      // Slightly prefer live interval that has been assigned a preferred reg.
-      std::pair<unsigned, unsigned> Hint = mri->getRegAllocationHint(li.reg);
-      if (Hint.first || Hint.second)
-        li.weight *= 1.01F;
+  if (!hreg)
+    return 0;
+
+  if (TargetRegisterInfo::isVirtualRegister(hreg))
+    return sub == hsub ? hreg : 0;
 
-      lis->normalizeSpillWeight(li);
+  const TargetRegisterClass *rc = mri.getRegClass(reg);
+
+  // Only allow physreg hints in rc.
+  if (sub == 0)
+    return rc->contains(hreg) ? hreg : 0;
+
+  // reg:sub should match the physreg hreg.
+  return tri.getMatchingSuperReg(hreg, sub, rc);
+}
+
+void VirtRegAuxInfo::CalculateWeightAndHint(LiveInterval &li) {
+  MachineRegisterInfo &mri = mf_.getRegInfo();
+  const TargetRegisterInfo &tri = *mf_.getTarget().getRegisterInfo();
+  MachineBasicBlock *mbb = 0;
+  MachineLoop *loop = 0;
+  unsigned loopDepth = 0;
+  bool isExiting = false;
+  float totalWeight = 0;
+  SmallPtrSet<MachineInstr*, 8> visited;
+
+  // Find the best physreg hist and the best virtreg hint.
+  float bestPhys = 0, bestVirt = 0;
+  unsigned hintPhys = 0, hintVirt = 0;
+
+  // Don't recompute a target specific hint.
+  bool noHint = mri.getRegAllocationHint(li.reg).first != 0;
+
+  for (MachineRegisterInfo::reg_iterator I = mri.reg_begin(li.reg);
+       MachineInstr *mi = I.skipInstruction();) {
+    if (mi->isIdentityCopy() || mi->isImplicitDef() || mi->isDebugValue())
+      continue;
+    if (!visited.insert(mi))
+      continue;
+
+    // Get loop info for mi.
+    if (mi->getParent() != mbb) {
+      mbb = mi->getParent();
+      loop = loops_.getLoopFor(mbb);
+      loopDepth = loop ? loop->getLoopDepth() : 0;
+      isExiting = loop ? loop->isLoopExiting(mbb) : false;
+    }
+
+    // Calculate instr weight.
+    bool reads, writes;
+    tie(reads, writes) = mi->readsWritesVirtualRegister(li.reg);
+    float weight = LiveIntervals::getSpillWeight(writes, reads, loopDepth);
+
+    // Give extra weight to what looks like a loop induction variable update.
+    if (writes && isExiting && lis_.isLiveOutOfMBB(li, mbb))
+      weight *= 3;
+
+    totalWeight += weight;
+
+    // Get allocation hints from copies.
+    if (noHint || !mi->isCopy())
+      continue;
+    unsigned hint = copyHint(mi, li.reg, tri, mri);
+    if (!hint)
+      continue;
+    float hweight = hint_[hint] += weight;
+    if (TargetRegisterInfo::isPhysicalRegister(hint)) {
+      if (hweight > bestPhys && lis_.isAllocatable(hint))
+        bestPhys = hweight, hintPhys = hint;
+    } else {
+      if (hweight > bestVirt)
+        bestVirt = hweight, hintVirt = hint;
     }
   }
-  
-  return false;
+
+  hint_.clear();
+
+  // Always prefer the physreg hint.
+  if (unsigned hint = hintPhys ? hintPhys : hintVirt) {
+    mri.setRegAllocationHint(li.reg, 0, hint);
+    // Weakly boost the spill weifght of hinted registers.
+    totalWeight *= 1.01F;
+  }
+
+  // Mark li as unspillable if all live ranges are tiny.
+  if (li.isZeroLength()) {
+    li.markNotSpillable();
+    return;
+  }
+
+  // If all of the definitions of the interval are re-materializable,
+  // it is a preferred candidate for spilling. If none of the defs are
+  // loads, then it's potentially very cheap to re-materialize.
+  // FIXME: this gets much more complicated once we support non-trivial
+  // re-materialization.
+  bool isLoad = false;
+  SmallVector<LiveInterval*, 4> spillIs;
+  if (lis_.isReMaterializable(li, spillIs, isLoad)) {
+    if (isLoad)
+      totalWeight *= 0.9F;
+    else
+      totalWeight *= 0.5F;
+  }
+
+  li.weight = totalWeight;
+  lis_.normalizeSpillWeight(li);
 }
 
-/// Returns true if the given live interval is zero length.
-bool CalculateSpillWeights::isZeroLengthInterval(LiveInterval *li) const {
-  for (LiveInterval::Ranges::const_iterator
-       i = li->ranges.begin(), e = li->ranges.end(); i != e; ++i)
-    if (i->end.getPrevIndex() > i->start)
-      return false;
-  return true;
+void VirtRegAuxInfo::CalculateRegClass(unsigned reg) {
+  MachineRegisterInfo &mri = mf_.getRegInfo();
+  const TargetRegisterInfo *tri = mf_.getTarget().getRegisterInfo();
+  const TargetRegisterClass *orc = mri.getRegClass(reg);
+  SmallPtrSet<const TargetRegisterClass*,8> rcs;
+
+  for (MachineRegisterInfo::reg_nodbg_iterator I = mri.reg_nodbg_begin(reg),
+       E = mri.reg_nodbg_end(); I != E; ++I) {
+    // The targets don't have accurate enough regclass descriptions that we can
+    // handle subregs. We need something similar to
+    // TRI::getMatchingSuperRegClass, but returning a super class instead of a
+    // sub class.
+    if (I.getOperand().getSubReg()) {
+      DEBUG(dbgs() << "Cannot handle subregs: " << I.getOperand() << '\n');
+      return;
+    }
+    if (const TargetRegisterClass *rc =
+                                I->getDesc().getRegClass(I.getOperandNo(), tri))
+      rcs.insert(rc);
+  }
+
+  // If we found no regclass constraints, just leave reg as is.
+  // In theory, we could inflate to the largest superclass of reg's existing
+  // class, but that might not be legal for the current cpu setting.
+  // This could happen if reg is only used by COPY instructions, so we may need
+  // to improve on this.
+  if (rcs.empty()) {
+    return;
+  }
+
+  // Compute the intersection of all classes in rcs.
+  // This ought to be independent of iteration order, but if the target register
+  // classes don't form a proper algebra, it is possible to get different
+  // results. The solution is to make sure the intersection of any two register
+  // classes is also a register class or the null set.
+  const TargetRegisterClass *rc = 0;
+  for (SmallPtrSet<const TargetRegisterClass*,8>::iterator I = rcs.begin(),
+         E = rcs.end(); I != E; ++I) {
+    rc = rc ? getCommonSubClass(rc, *I) : *I;
+    assert(rc && "Incompatible regclass constraints found");
+  }
+
+  if (rc == orc)
+    return;
+  DEBUG(dbgs() << "Inflating " << orc->getName() << ":%reg" << reg << " to "
+               << rc->getName() <<".\n");
+  mri.setRegClass(reg, rc);
 }

Modified: llvm/branches/wendling/eh/lib/CodeGen/CodePlacementOpt.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/CodePlacementOpt.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/CodePlacementOpt.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/CodePlacementOpt.cpp Tue Oct 26 19:48:03 2010
@@ -36,7 +36,7 @@
 
   public:
     static char ID;
-    CodePlacementOpt() : MachineFunctionPass(&ID) {}
+    CodePlacementOpt() : MachineFunctionPass(ID) {}
 
     virtual bool runOnMachineFunction(MachineFunction &MF);
     virtual const char *getPassName() const {

Modified: llvm/branches/wendling/eh/lib/CodeGen/CriticalAntiDepBreaker.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/CriticalAntiDepBreaker.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/CriticalAntiDepBreaker.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/CriticalAntiDepBreaker.cpp Tue Oct 26 19:48:03 2010
@@ -130,21 +130,25 @@
     return;
   assert(Count < InsertPosIndex && "Instruction index out of expected range!");
 
-  // Any register which was defined within the previous scheduling region
-  // may have been rescheduled and its lifetime may overlap with registers
-  // in ways not reflected in our current liveness state. For each such
-  // register, adjust the liveness state to be conservatively correct.
-  for (unsigned Reg = 0; Reg != TRI->getNumRegs(); ++Reg)
-    if (DefIndices[Reg] < InsertPosIndex && DefIndices[Reg] >= Count) {
-      assert(KillIndices[Reg] == ~0u && "Clobbered register is live!");
-
-      // Mark this register to be non-renamable.
+  for (unsigned Reg = 0; Reg != TRI->getNumRegs(); ++Reg) {
+    if (KillIndices[Reg] != ~0u) {
+      // If Reg is currently live, then mark that it can't be renamed as
+      // we don't know the extent of its live-range anymore (now that it
+      // has been scheduled).
+      Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
+      KillIndices[Reg] = Count;
+    } else if (DefIndices[Reg] < InsertPosIndex && DefIndices[Reg] >= Count) {
+      // Any register which was defined within the previous scheduling region
+      // may have been rescheduled and its lifetime may overlap with registers
+      // in ways not reflected in our current liveness state. For each such
+      // register, adjust the liveness state to be conservatively correct.
       Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
 
       // Move the def index to the end of the previous region, to reflect
       // that the def could theoretically have been scheduled at the end.
       DefIndices[Reg] = InsertPosIndex;
     }
+  }
 
   PrescanInstruction(MI);
   ScanInstruction(MI, Count);
@@ -177,7 +181,7 @@
   // that have special allocation requirements. Also assume all registers
   // used in a call must not be changed (ABI).
   // FIXME: The issue with predicated instruction is more complex. We are being
-  // conservatively here because the kill markers cannot be trusted after
+  // conservative here because the kill markers cannot be trusted after
   // if-conversion:
   // %R6<def> = LDR %SP, %reg0, 92, pred:14, pred:%reg0; mem:LD4[FixedStack14]
   // ...
@@ -330,6 +334,8 @@
   for (TargetRegisterClass::iterator R = RC->allocation_order_begin(MF),
        RE = RC->allocation_order_end(MF); R != RE; ++R) {
     unsigned NewReg = *R;
+    // Don't consider non-allocatable registers
+    if (!AllocatableSet.test(NewReg)) continue;
     // Don't replace a register with itself.
     if (NewReg == AntiDepReg) continue;
     // Don't replace a register with one that was recently used to repair
@@ -578,7 +584,7 @@
         }
 
         // We just went back in time and modified history; the
-        // liveness information for the anti-depenence reg is now
+        // liveness information for the anti-dependence reg is now
         // inconsistent. Set the state as if it were dead.
         Classes[NewReg] = Classes[AntiDepReg];
         DefIndices[NewReg] = DefIndices[AntiDepReg];

Modified: llvm/branches/wendling/eh/lib/CodeGen/DeadMachineInstructionElim.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/DeadMachineInstructionElim.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/DeadMachineInstructionElim.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/DeadMachineInstructionElim.cpp Tue Oct 26 19:48:03 2010
@@ -36,7 +36,9 @@
 
   public:
     static char ID; // Pass identification, replacement for typeid
-    DeadMachineInstructionElim() : MachineFunctionPass(&ID) {}
+    DeadMachineInstructionElim() : MachineFunctionPass(ID) {
+     initializeDeadMachineInstructionElimPass(*PassRegistry::getPassRegistry());
+    }
 
   private:
     bool isDead(const MachineInstr *MI) const;
@@ -45,7 +47,7 @@
 char DeadMachineInstructionElim::ID = 0;
 
 INITIALIZE_PASS(DeadMachineInstructionElim, "dead-mi-elimination",
-                "Remove dead machine instructions", false, false);
+                "Remove dead machine instructions", false, false)
 
 FunctionPass *llvm::createDeadMachineInstructionElimPass() {
   return new DeadMachineInstructionElim();
@@ -80,9 +82,8 @@
   TRI = MF.getTarget().getRegisterInfo();
   TII = MF.getTarget().getInstrInfo();
 
-  // Compute a bitvector to represent all non-allocatable physregs.
-  BitVector NonAllocatableRegs = TRI->getAllocatableSet(MF);
-  NonAllocatableRegs.flip();
+  // Treat reserved registers as always live.
+  BitVector ReservedRegs = TRI->getReservedRegs(MF);
 
   // Loop over all instructions in all blocks, from bottom to top, so that it's
   // more likely that chains of dependent but ultimately dead instructions will
@@ -91,9 +92,8 @@
        I != E; ++I) {
     MachineBasicBlock *MBB = &*I;
 
-    // Start out assuming that all non-allocatable registers are live
-    // out of this block.
-    LivePhysRegs = NonAllocatableRegs;
+    // Start out assuming that reserved registers are live out of this block.
+    LivePhysRegs = ReservedRegs;
 
     // Also add any explicit live-out physregs for this block.
     if (!MBB->empty() && MBB->back().getDesc().isReturn())
@@ -104,6 +104,10 @@
           LivePhysRegs.set(Reg);
       }
 
+    // FIXME: Add live-ins from sucessors to LivePhysRegs. Normally, physregs
+    // are not live across blocks, but some targets (x86) can have flags live
+    // out of a block.
+
     // Now scan the instructions and delete dead ones, tracking physreg
     // liveness as we go.
     for (MachineBasicBlock::reverse_iterator MII = MBB->rbegin(),

Modified: llvm/branches/wendling/eh/lib/CodeGen/DwarfEHPrepare.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/DwarfEHPrepare.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/DwarfEHPrepare.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/DwarfEHPrepare.cpp Tue Oct 26 19:48:03 2010
@@ -25,19 +25,17 @@
 #include "llvm/Support/CallSite.h"
 #include "llvm/Target/TargetLowering.h"
 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
-#include "llvm/Transforms/Utils/PromoteMemToReg.h"
+#include "llvm/Transforms/Utils/SSAUpdater.h"
 using namespace llvm;
 
 STATISTIC(NumLandingPadsSplit,     "Number of landing pads split");
 STATISTIC(NumUnwindsLowered,       "Number of unwind instructions lowered");
 STATISTIC(NumExceptionValuesMoved, "Number of eh.exception calls moved");
-STATISTIC(NumStackTempsIntroduced, "Number of stack temporaries introduced");
 
 namespace {
   class DwarfEHPrepare : public FunctionPass {
     const TargetMachine *TM;
     const TargetLowering *TLI;
-    bool CompileFast;
 
     // The eh.exception intrinsic.
     Function *ExceptionValueIntrinsic;
@@ -54,9 +52,8 @@
     // _Unwind_Resume or the target equivalent.
     Constant *RewindFunction;
 
-    // Dominator info is used when turning stack temporaries into registers.
+    // We both use and preserve dominator info.
     DominatorTree *DT;
-    DominanceFrontier *DF;
 
     // The function we are running on.
     Function *F;
@@ -65,25 +62,11 @@
     typedef SmallPtrSet<BasicBlock*, 8> BBSet;
     BBSet LandingPads;
 
-    // Stack temporary used to hold eh.exception values.
-    AllocaInst *ExceptionValueVar;
-
     bool NormalizeLandingPads();
     bool LowerUnwinds();
     bool MoveExceptionValueCalls();
-    bool FinishStackTemporaries();
-    bool PromoteStackTemporaries();
 
     Instruction *CreateExceptionValueCall(BasicBlock *BB);
-    Instruction *CreateValueLoad(BasicBlock *BB);
-
-    /// CreateReadOfExceptionValue - Return the result of the eh.exception
-    /// intrinsic by calling the intrinsic if in a landing pad, or loading it
-    /// from the exception value variable otherwise.
-    Instruction *CreateReadOfExceptionValue(BasicBlock *BB) {
-      return LandingPads.count(BB) ?
-        CreateExceptionValueCall(BB) : CreateValueLoad(BB);
-    }
 
     /// CleanupSelectors - Any remaining eh.selector intrinsic calls which still
     /// use the "llvm.eh.catch.all.value" call need to convert to using its
@@ -112,69 +95,21 @@
     bool FindSelectorAndURoR(Instruction *Inst, bool &URoRInvoke,
                              SmallPtrSet<IntrinsicInst*, 8> &SelCalls);
       
-    /// DoMem2RegPromotion - Take an alloca call and promote it from memory to a
-    /// register.
-    bool DoMem2RegPromotion(Value *V) {
-      AllocaInst *AI = dyn_cast<AllocaInst>(V);
-      if (!AI || !isAllocaPromotable(AI)) return false;
-
-      // Turn the alloca into a register.
-      std::vector<AllocaInst*> Allocas(1, AI);
-      PromoteMemToReg(Allocas, *DT, *DF);
-      return true;
-    }
-
-    /// PromoteStoreInst - Perform Mem2Reg on a StoreInst.
-    bool PromoteStoreInst(StoreInst *SI) {
-      if (!SI || !DT || !DF) return false;
-      if (DoMem2RegPromotion(SI->getOperand(1)))
-        return true;
-      return false;
-    }
-
-    /// PromoteEHPtrStore - Promote the storing of an EH pointer into a
-    /// register. This should get rid of the store and subsequent loads.
-    bool PromoteEHPtrStore(IntrinsicInst *II) {
-      if (!DT || !DF) return false;
-
-      bool Changed = false;
-      StoreInst *SI;
-
-      while (1) {
-        SI = 0;
-        for (Value::use_iterator
-               I = II->use_begin(), E = II->use_end(); I != E; ++I) {
-          SI = dyn_cast<StoreInst>(*I);
-          if (SI) break;
-        }
-
-        if (!PromoteStoreInst(SI))
-          break;
-
-        Changed = true;
-      }
-
-      return Changed;
-    }
-
   public:
     static char ID; // Pass identification, replacement for typeid.
-    DwarfEHPrepare(const TargetMachine *tm, bool fast) :
-      FunctionPass(&ID), TM(tm), TLI(TM->getTargetLowering()),
-      CompileFast(fast),
+    DwarfEHPrepare(const TargetMachine *tm) :
+      FunctionPass(ID), TM(tm), TLI(TM->getTargetLowering()),
       ExceptionValueIntrinsic(0), SelectorIntrinsic(0),
-      URoR(0), EHCatchAllValue(0), RewindFunction(0) {}
+      URoR(0), EHCatchAllValue(0), RewindFunction(0) {
+        initializeDominatorTreePass(*PassRegistry::getPassRegistry());
+      }
 
     virtual bool runOnFunction(Function &Fn);
 
-    // getAnalysisUsage - We need dominance frontiers for memory promotion.
+    // getAnalysisUsage - We need the dominator tree for handling URoR.
     virtual void getAnalysisUsage(AnalysisUsage &AU) const {
-      if (!CompileFast)
-        AU.addRequired<DominatorTree>();
+      AU.addRequired<DominatorTree>();
       AU.addPreserved<DominatorTree>();
-      if (!CompileFast)
-        AU.addRequired<DominanceFrontier>();
-      AU.addPreserved<DominanceFrontier>();
     }
 
     const char *getPassName() const {
@@ -186,8 +121,8 @@
 
 char DwarfEHPrepare::ID = 0;
 
-FunctionPass *llvm::createDwarfEHPass(const TargetMachine *tm, bool fast) {
-  return new DwarfEHPrepare(tm, fast);
+FunctionPass *llvm::createDwarfEHPass(const TargetMachine *tm) {
+  return new DwarfEHPrepare(tm);
 }
 
 /// HasCatchAllInSelector - Return true if the intrinsic instruction has a
@@ -268,7 +203,6 @@
   SmallPtrSet<PHINode*, 32> SeenPHIs;
   bool Changed = false;
 
- restart:
   for (Value::use_iterator
          I = Inst->use_begin(), E = Inst->use_end(); I != E; ++I) {
     Instruction *II = dyn_cast<Instruction>(*I);
@@ -282,11 +216,6 @@
         URoRInvoke = true;
     } else if (CastInst *CI = dyn_cast<CastInst>(II)) {
       Changed |= FindSelectorAndURoR(CI, URoRInvoke, SelCalls);
-    } else if (StoreInst *SI = dyn_cast<StoreInst>(II)) {
-      if (!PromoteStoreInst(SI)) continue;
-      Changed = true;
-      SeenPHIs.clear();
-      goto restart;             // Uses may have changed, restart loop.
     } else if (PHINode *PN = dyn_cast<PHINode>(II)) {
       if (SeenPHIs.insert(PN))
         // Don't process a PHI node more than once.
@@ -318,10 +247,6 @@
   SmallPtrSet<IntrinsicInst*, 32> CatchAllSels;
   FindAllCleanupSelectors(Sels, CatchAllSels);
 
-  if (!DT)
-    // We require DominatorTree information.
-    return CleanupSelectors(CatchAllSels);
-
   if (!URoR) {
     URoR = F->getParent()->getFunction("_Unwind_Resume_or_Rethrow");
     if (!URoR) return CleanupSelectors(CatchAllSels);
@@ -363,8 +288,6 @@
       IntrinsicInst *EHPtr = dyn_cast<IntrinsicInst>(*I);
       if (!EHPtr || EHPtr->getParent()->getParent() != F) continue;
 
-      Changed |= PromoteEHPtrStore(EHPtr);
-
       bool URoRInvoke = false;
       SmallPtrSet<IntrinsicInst*, 8> SelCalls;
       Changed |= FindSelectorAndURoR(EHPtr, URoRInvoke, SelCalls);
@@ -532,11 +455,8 @@
     // Add a fallthrough from NewBB to the original landing pad.
     BranchInst::Create(LPad, NewBB);
 
-    // Now update DominatorTree and DominanceFrontier analysis information.
-    if (DT)
-      DT->splitBlock(NewBB);
-    if (DF)
-      DF->splitBlock(NewBB);
+    // Now update DominatorTree analysis information.
+    DT->splitBlock(NewBB);
 
     // Remember the newly constructed landing pad.  The original landing pad
     // LPad is no longer a landing pad now that all unwind edges have been
@@ -586,7 +506,7 @@
 
     // Create the call...
     CallInst *CI = CallInst::Create(RewindFunction,
-                                    CreateReadOfExceptionValue(TI->getParent()),
+                                    CreateExceptionValueCall(TI->getParent()),
                                     "", TI);
     CI->setCallingConv(TLI->getLibcallCallingConv(RTLIB::UNWIND_RESUME));
     // ...followed by an UnreachableInst.
@@ -602,9 +522,11 @@
 }
 
 /// MoveExceptionValueCalls - Ensure that eh.exception is only ever called from
-/// landing pads by replacing calls outside of landing pads with loads from a
-/// stack temporary.  Move eh.exception calls inside landing pads to the start
-/// of the landing pad (optional, but may make things simpler for later passes).
+/// landing pads by replacing calls outside of landing pads with direct use of
+/// a register holding the appropriate value; this requires adding calls inside
+/// all landing pads to initialize the register.  Also, move eh.exception calls
+/// inside landing pads to the start of the landing pad (optional, but may make
+/// things simpler for later passes).
 bool DwarfEHPrepare::MoveExceptionValueCalls() {
   // If the eh.exception intrinsic is not declared in the module then there is
   // nothing to do.  Speed up compilation by checking for this common case.
@@ -614,61 +536,87 @@
 
   bool Changed = false;
 
-  for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
-    for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E;)
-      if (IntrinsicInst *CI = dyn_cast<IntrinsicInst>(II++))
-        if (CI->getIntrinsicID() == Intrinsic::eh_exception) {
-          if (!CI->use_empty()) {
-            Value *ExceptionValue = CreateReadOfExceptionValue(BB);
-            if (CI == ExceptionValue) {
-              // The call was at the start of a landing pad - leave it alone.
-              assert(LandingPads.count(BB) &&
-                     "Created eh.exception call outside landing pad!");
-              continue;
-            }
-            CI->replaceAllUsesWith(ExceptionValue);
-          }
-          CI->eraseFromParent();
-          ++NumExceptionValuesMoved;
-          Changed = true;
+  // Move calls to eh.exception that are inside a landing pad to the start of
+  // the landing pad.
+  for (BBSet::const_iterator LI = LandingPads.begin(), LE = LandingPads.end();
+       LI != LE; ++LI) {
+    BasicBlock *LP = *LI;
+    for (BasicBlock::iterator II = LP->getFirstNonPHIOrDbg(), IE = LP->end();
+         II != IE;)
+      if (EHExceptionInst *EI = dyn_cast<EHExceptionInst>(II++)) {
+        // Found a call to eh.exception.
+        if (!EI->use_empty()) {
+          // If there is already a call to eh.exception at the start of the
+          // landing pad, then get hold of it; otherwise create such a call.
+          Value *CallAtStart = CreateExceptionValueCall(LP);
+
+          // If the call was at the start of a landing pad then leave it alone.
+          if (EI == CallAtStart)
+            continue;
+          EI->replaceAllUsesWith(CallAtStart);
         }
+        EI->eraseFromParent();
+        ++NumExceptionValuesMoved;
+        Changed = true;
+      }
   }
 
-  return Changed;
-}
+  // Look for calls to eh.exception that are not in a landing pad.  If one is
+  // found, then a register that holds the exception value will be created in
+  // each landing pad, and the SSAUpdater will be used to compute the values
+  // returned by eh.exception calls outside of landing pads.
+  SSAUpdater SSA;
+
+  // Remember where we found the eh.exception call, to avoid rescanning earlier
+  // basic blocks which we already know contain no eh.exception calls.
+  bool FoundCallOutsideLandingPad = false;
+  Function::iterator BB = F->begin();
+  for (Function::iterator BE = F->end(); BB != BE; ++BB) {
+    // Skip over landing pads.
+    if (LandingPads.count(BB))
+      continue;
 
-/// FinishStackTemporaries - If we introduced a stack variable to hold the
-/// exception value then initialize it in each landing pad.
-bool DwarfEHPrepare::FinishStackTemporaries() {
-  if (!ExceptionValueVar)
-    // Nothing to do.
-    return false;
+    for (BasicBlock::iterator II = BB->getFirstNonPHIOrDbg(), IE = BB->end();
+         II != IE; ++II)
+      if (isa<EHExceptionInst>(II)) {
+        SSA.Initialize(II->getType(), II->getName());
+        FoundCallOutsideLandingPad = true;
+        break;
+      }
 
-  bool Changed = false;
+    if (FoundCallOutsideLandingPad)
+      break;
+  }
+
+  // If all calls to eh.exception are in landing pads then we are done.
+  if (!FoundCallOutsideLandingPad)
+    return Changed;
 
-  // Make sure that there is a store of the exception value at the start of
-  // each landing pad.
+  // Add a call to eh.exception at the start of each landing pad, and tell the
+  // SSAUpdater that this is the value produced by the landing pad.
   for (BBSet::iterator LI = LandingPads.begin(), LE = LandingPads.end();
-       LI != LE; ++LI) {
-    Instruction *ExceptionValue = CreateReadOfExceptionValue(*LI);
-    Instruction *Store = new StoreInst(ExceptionValue, ExceptionValueVar);
-    Store->insertAfter(ExceptionValue);
-    Changed = true;
-  }
+       LI != LE; ++LI)
+    SSA.AddAvailableValue(*LI, CreateExceptionValueCall(*LI));
 
-  return Changed;
-}
+  // Now turn all calls to eh.exception that are not in a landing pad into a use
+  // of the appropriate register.
+  for (Function::iterator BE = F->end(); BB != BE; ++BB) {
+    // Skip over landing pads.
+    if (LandingPads.count(BB))
+      continue;
 
-/// PromoteStackTemporaries - Turn any stack temporaries we introduced into
-/// registers if possible.
-bool DwarfEHPrepare::PromoteStackTemporaries() {
-  if (ExceptionValueVar && DT && DF && isAllocaPromotable(ExceptionValueVar)) {
-    // Turn the exception temporary into registers and phi nodes if possible.
-    std::vector<AllocaInst*> Allocas(1, ExceptionValueVar);
-    PromoteMemToReg(Allocas, *DT, *DF);
-    return true;
+    for (BasicBlock::iterator II = BB->getFirstNonPHIOrDbg(), IE = BB->end();
+         II != IE;)
+      if (EHExceptionInst *EI = dyn_cast<EHExceptionInst>(II++)) {
+        // Found a call to eh.exception, replace it with the value from any
+        // upstream landing pad(s).
+        EI->replaceAllUsesWith(SSA.GetValueAtEndOfBlock(BB));
+        EI->eraseFromParent();
+        ++NumExceptionValuesMoved;
+      }
   }
-  return false;
+
+  return true;
 }
 
 /// CreateExceptionValueCall - Insert a call to the eh.exception intrinsic at
@@ -691,36 +639,11 @@
   return CallInst::Create(ExceptionValueIntrinsic, "eh.value.call", Start);
 }
 
-/// CreateValueLoad - Insert a load of the exception value stack variable
-/// (creating it if necessary) at the start of the basic block (unless
-/// there already is a load, in which case the existing load is returned).
-Instruction *DwarfEHPrepare::CreateValueLoad(BasicBlock *BB) {
-  Instruction *Start = BB->getFirstNonPHIOrDbg();
-  // Is this a load of the exception temporary?
-  if (ExceptionValueVar)
-    if (LoadInst* LI = dyn_cast<LoadInst>(Start))
-      if (LI->getPointerOperand() == ExceptionValueVar)
-        // Reuse the existing load.
-        return Start;
-
-  // Create the temporary if we didn't already.
-  if (!ExceptionValueVar) {
-    ExceptionValueVar = new AllocaInst(PointerType::getUnqual(
-           Type::getInt8Ty(BB->getContext())), "eh.value", F->begin()->begin());
-    ++NumStackTempsIntroduced;
-  }
-
-  // Load the value.
-  return new LoadInst(ExceptionValueVar, "eh.value.load", Start);
-}
-
 bool DwarfEHPrepare::runOnFunction(Function &Fn) {
   bool Changed = false;
 
   // Initialize internal state.
-  DT = getAnalysisIfAvailable<DominatorTree>();
-  DF = getAnalysisIfAvailable<DominanceFrontier>();
-  ExceptionValueVar = 0;
+  DT = &getAnalysis<DominatorTree>();
   F = &Fn;
 
   // Ensure that only unwind edges end at landing pads (a landing pad is a
@@ -735,13 +658,6 @@
   // Move eh.exception calls to landing pads.
   Changed |= MoveExceptionValueCalls();
 
-  // Initialize any stack temporaries we introduced.
-  Changed |= FinishStackTemporaries();
-
-  // Turn any stack temporaries into registers if possible.
-  if (!CompileFast)
-    Changed |= PromoteStackTemporaries();
-
   Changed |= HandleURoRInvokes();
 
   LandingPads.clear();

Modified: llvm/branches/wendling/eh/lib/CodeGen/ELFWriter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/ELFWriter.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/ELFWriter.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/ELFWriter.cpp Tue Oct 26 19:48:03 2010
@@ -63,7 +63,7 @@
 //===----------------------------------------------------------------------===//
 
 ELFWriter::ELFWriter(raw_ostream &o, TargetMachine &tm)
-  : MachineFunctionPass(&ID), O(o), TM(tm),
+  : MachineFunctionPass(ID), O(o), TM(tm),
     OutContext(*new MCContext(*TM.getMCAsmInfo())),
     TLOF(TM.getTargetLowering()->getObjFileLowering()),
     is64Bit(TM.getTargetData()->getPointerSizeInBits() == 64),

Modified: llvm/branches/wendling/eh/lib/CodeGen/ELFWriter.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/ELFWriter.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/ELFWriter.h (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/ELFWriter.h Tue Oct 26 19:48:03 2010
@@ -39,6 +39,7 @@
   class raw_ostream;
   class SectionKind;
   class MCContext;
+  class TargetMachine;
 
   typedef std::vector<ELFSym*>::iterator ELFSymIter;
   typedef std::vector<ELFSection*>::iterator ELFSectionIter;

Modified: llvm/branches/wendling/eh/lib/CodeGen/GCMetadata.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/GCMetadata.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/GCMetadata.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/GCMetadata.cpp Tue Oct 26 19:48:03 2010
@@ -30,8 +30,7 @@
     raw_ostream &OS;
     
   public:
-    Printer() : FunctionPass(&ID), OS(errs()) {}
-    explicit Printer(raw_ostream &OS) : FunctionPass(&ID), OS(OS) {}
+    explicit Printer(raw_ostream &OS) : FunctionPass(ID), OS(OS) {}
 
     
     const char *getPassName() const;
@@ -56,7 +55,7 @@
 }
 
 INITIALIZE_PASS(GCModuleInfo, "collector-metadata",
-                "Create Garbage Collector Module Metadata", false, false);
+                "Create Garbage Collector Module Metadata", false, false)
 
 // -----------------------------------------------------------------------------
 
@@ -70,7 +69,9 @@
 char GCModuleInfo::ID = 0;
 
 GCModuleInfo::GCModuleInfo()
-  : ImmutablePass(&ID) {}
+    : ImmutablePass(ID) {
+  initializeGCModuleInfoPass(*PassRegistry::getPassRegistry());
+}
 
 GCModuleInfo::~GCModuleInfo() {
   clear();
@@ -189,7 +190,7 @@
   return new Deleter();
 }
 
-Deleter::Deleter() : FunctionPass(&ID) {}
+Deleter::Deleter() : FunctionPass(ID) {}
 
 const char *Deleter::getPassName() const {
   return "Delete Garbage Collector Information";

Modified: llvm/branches/wendling/eh/lib/CodeGen/GCStrategy.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/GCStrategy.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/GCStrategy.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/GCStrategy.cpp Tue Oct 26 19:48:03 2010
@@ -123,6 +123,11 @@
 
 // -----------------------------------------------------------------------------
 
+INITIALIZE_PASS_BEGIN(LowerIntrinsics, "gc-lowering", "GC Lowering",
+                      false, false)
+INITIALIZE_PASS_DEPENDENCY(GCModuleInfo)
+INITIALIZE_PASS_END(LowerIntrinsics, "gc-lowering", "GC Lowering", false, false)
+
 FunctionPass *llvm::createGCLoweringPass() {
   return new LowerIntrinsics();
 }
@@ -130,7 +135,9 @@
 char LowerIntrinsics::ID = 0;
 
 LowerIntrinsics::LowerIntrinsics()
-  : FunctionPass(&ID) {}
+  : FunctionPass(ID) {
+    initializeLowerIntrinsicsPass(*PassRegistry::getPassRegistry());
+  }
 
 const char *LowerIntrinsics::getPassName() const {
   return "Lower Garbage Collection Instructions";
@@ -318,7 +325,7 @@
 char MachineCodeAnalysis::ID = 0;
 
 MachineCodeAnalysis::MachineCodeAnalysis()
-  : MachineFunctionPass(&ID) {}
+  : MachineFunctionPass(ID) {}
 
 const char *MachineCodeAnalysis::getPassName() const {
   return "Analyze Machine Code For Garbage Collection";
@@ -345,13 +352,15 @@
   MachineBasicBlock::iterator RAI = CI; 
   ++RAI;                                
   
-  if (FI->getStrategy().needsSafePoint(GC::PreCall))
-    FI->addSafePoint(GC::PreCall, InsertLabel(*CI->getParent(), CI,
-                                              CI->getDebugLoc()));
-  
-  if (FI->getStrategy().needsSafePoint(GC::PostCall))
-    FI->addSafePoint(GC::PostCall, InsertLabel(*CI->getParent(), RAI,
-                                               CI->getDebugLoc()));
+  if (FI->getStrategy().needsSafePoint(GC::PreCall)) {
+    MCSymbol* Label = InsertLabel(*CI->getParent(), CI, CI->getDebugLoc());
+    FI->addSafePoint(GC::PreCall, Label, CI->getDebugLoc());
+  }
+  
+  if (FI->getStrategy().needsSafePoint(GC::PostCall)) {
+    MCSymbol* Label = InsertLabel(*CI->getParent(), RAI, CI->getDebugLoc());
+    FI->addSafePoint(GC::PostCall, Label, CI->getDebugLoc());
+  }
 }
 
 void MachineCodeAnalysis::FindSafePoints(MachineFunction &MF) {

Modified: llvm/branches/wendling/eh/lib/CodeGen/IfConversion.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/IfConversion.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/IfConversion.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/IfConversion.cpp Tue Oct 26 19:48:03 2010
@@ -17,7 +17,9 @@
 #include "llvm/CodeGen/Passes.h"
 #include "llvm/CodeGen/MachineModuleInfo.h"
 #include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
 #include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetInstrItineraries.h"
 #include "llvm/Target/TargetLowering.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Target/TargetRegisterInfo.h"
@@ -91,6 +93,7 @@
     /// ClobbersPred    - True if BB could modify predicates (e.g. has
     ///                   cmp, call, etc.)
     /// NonPredSize     - Number of non-predicated instructions.
+    /// ExtraCost       - Extra cost for microcoded instructions.
     /// BB              - Corresponding MachineBasicBlock.
     /// TrueBB / FalseBB- See AnalyzeBranch().
     /// BrCond          - Conditions for end of block conditional branches.
@@ -106,6 +109,7 @@
       bool CannotBeCopied  : 1;
       bool ClobbersPred    : 1;
       unsigned NonPredSize;
+      unsigned ExtraCost;
       MachineBasicBlock *BB;
       MachineBasicBlock *TrueBB;
       MachineBasicBlock *FalseBB;
@@ -115,7 +119,7 @@
                  IsAnalyzed(false), IsEnqueued(false), IsBrAnalyzable(false),
                  HasFallThrough(false), IsUnpredicable(false),
                  CannotBeCopied(false), ClobbersPred(false), NonPredSize(0),
-                 BB(0), TrueBB(0), FalseBB(0) {}
+                 ExtraCost(0), BB(0), TrueBB(0), FalseBB(0) {}
     };
 
     /// IfcvtToken - Record information about pending if-conversions to attempt:
@@ -150,20 +154,31 @@
     const TargetLowering *TLI;
     const TargetInstrInfo *TII;
     const TargetRegisterInfo *TRI;
+    const InstrItineraryData *InstrItins;
+    const MachineLoopInfo *MLI;
     bool MadeChange;
     int FnNum;
   public:
     static char ID;
-    IfConverter() : MachineFunctionPass(&ID), FnNum(-1) {}
+    IfConverter() : MachineFunctionPass(ID), FnNum(-1) {
+      initializeIfConverterPass(*PassRegistry::getPassRegistry());
+    }
+    
+    virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+      AU.addRequired<MachineLoopInfo>();
+      MachineFunctionPass::getAnalysisUsage(AU);
+    }
 
     virtual bool runOnMachineFunction(MachineFunction &MF);
     virtual const char *getPassName() const { return "If Converter"; }
 
   private:
     bool ReverseBranchCondition(BBInfo &BBI);
-    bool ValidSimple(BBInfo &TrueBBI, unsigned &Dups) const;
+    bool ValidSimple(BBInfo &TrueBBI, unsigned &Dups,
+                     float Prediction, float Confidence) const;
     bool ValidTriangle(BBInfo &TrueBBI, BBInfo &FalseBBI,
-                       bool FalseBranch, unsigned &Dups) const;
+                       bool FalseBranch, unsigned &Dups,
+                       float Prediction, float Confidence) const;
     bool ValidDiamond(BBInfo &TrueBBI, BBInfo &FalseBBI,
                       unsigned &Dups1, unsigned &Dups2) const;
     void ScanInstructions(BBInfo &BBI);
@@ -188,14 +203,18 @@
                                bool IgnoreBr = false);
     void MergeBlocks(BBInfo &ToBBI, BBInfo &FromBBI, bool AddEdges = true);
 
-    bool MeetIfcvtSizeLimit(MachineBasicBlock &BB, unsigned Size) const {
-      return Size > 0 && TII->isProfitableToIfCvt(BB, Size);
+    bool MeetIfcvtSizeLimit(MachineBasicBlock &BB, unsigned Size,
+                            float Prediction, float Confidence) const {
+      return Size > 0 && TII->isProfitableToIfCvt(BB, Size,
+                                                  Prediction, Confidence);
     }
 
     bool MeetIfcvtSizeLimit(MachineBasicBlock &TBB, unsigned TSize,
-                            MachineBasicBlock &FBB, unsigned FSize) const {
+                            MachineBasicBlock &FBB, unsigned FSize,
+                            float Prediction, float Confidence) const {
       return TSize > 0 && FSize > 0 &&
-        TII->isProfitableToIfCvt(TBB, TSize, FBB, FSize);
+        TII->isProfitableToIfCvt(TBB, TSize, FBB, FSize,
+                                 Prediction, Confidence);
     }
 
     // blockAlwaysFallThrough - Block ends without a terminator.
@@ -230,7 +249,9 @@
   char IfConverter::ID = 0;
 }
 
-INITIALIZE_PASS(IfConverter, "if-converter", "If Converter", false, false);
+INITIALIZE_PASS_BEGIN(IfConverter, "if-converter", "If Converter", false, false)
+INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
+INITIALIZE_PASS_END(IfConverter, "if-converter", "If Converter", false, false)
 
 FunctionPass *llvm::createIfConverterPass() { return new IfConverter(); }
 
@@ -238,6 +259,8 @@
   TLI = MF.getTarget().getTargetLowering();
   TII = MF.getTarget().getInstrInfo();
   TRI = MF.getTarget().getRegisterInfo();
+  MLI = &getAnalysis<MachineLoopInfo>();
+  InstrItins = MF.getTarget().getInstrItineraryData();
   if (!TII) return false;
 
   // Tail merge tend to expose more if-conversion opportunities.
@@ -431,7 +454,8 @@
 /// predecessor) forms a valid simple shape for ifcvt. It also returns the
 /// number of instructions that the ifcvt would need to duplicate if performed
 /// in Dups.
-bool IfConverter::ValidSimple(BBInfo &TrueBBI, unsigned &Dups) const {
+bool IfConverter::ValidSimple(BBInfo &TrueBBI, unsigned &Dups,
+                              float Prediction, float Confidence) const {
   Dups = 0;
   if (TrueBBI.IsBeingAnalyzed || TrueBBI.IsDone)
     return false;
@@ -441,7 +465,8 @@
 
   if (TrueBBI.BB->pred_size() > 1) {
     if (TrueBBI.CannotBeCopied ||
-        !TII->isProfitableToDupForIfCvt(*TrueBBI.BB, TrueBBI.NonPredSize))
+        !TII->isProfitableToDupForIfCvt(*TrueBBI.BB, TrueBBI.NonPredSize,
+                                        Prediction, Confidence))
       return false;
     Dups = TrueBBI.NonPredSize;
   }
@@ -456,7 +481,8 @@
 /// returns the number of instructions that the ifcvt would need to duplicate
 /// if performed in 'Dups'.
 bool IfConverter::ValidTriangle(BBInfo &TrueBBI, BBInfo &FalseBBI,
-                                bool FalseBranch, unsigned &Dups) const {
+                                bool FalseBranch, unsigned &Dups,
+                                float Prediction, float Confidence) const {
   Dups = 0;
   if (TrueBBI.IsBeingAnalyzed || TrueBBI.IsDone)
     return false;
@@ -478,7 +504,8 @@
           ++Size;
       }
     }
-    if (!TII->isProfitableToDupForIfCvt(*TrueBBI.BB, Size))
+    if (!TII->isProfitableToDupForIfCvt(*TrueBBI.BB, Size,
+                                        Prediction, Confidence))
       return false;
     Dups = Size;
   }
@@ -493,18 +520,6 @@
   return TExit && TExit == FalseBBI.BB;
 }
 
-static
-MachineBasicBlock::iterator firstNonBranchInst(MachineBasicBlock *BB,
-                                               const TargetInstrInfo *TII) {
-  MachineBasicBlock::iterator I = BB->end();
-  while (I != BB->begin()) {
-    --I;
-    if (!I->getDesc().isBranch())
-      break;
-  }
-  return I;
-}
-
 /// ValidDiamond - Returns true if the 'true' and 'false' blocks (along
 /// with their common predecessor) forms a valid diamond shape for ifcvt.
 bool IfConverter::ValidDiamond(BBInfo &TrueBBI, BBInfo &FalseBBI,
@@ -533,64 +548,70 @@
       (TrueBBI.ClobbersPred && FalseBBI.ClobbersPred))
     return false;
 
-  MachineBasicBlock::iterator TI = TrueBBI.BB->begin();
-  MachineBasicBlock::iterator FI = FalseBBI.BB->begin();
+  // Count duplicate instructions at the beginning of the true and false blocks.
+  MachineBasicBlock::iterator TIB = TrueBBI.BB->begin();
+  MachineBasicBlock::iterator FIB = FalseBBI.BB->begin();
   MachineBasicBlock::iterator TIE = TrueBBI.BB->end();
   MachineBasicBlock::iterator FIE = FalseBBI.BB->end();
-  // Skip dbg_value instructions
-  while (TI != TIE && TI->isDebugValue())
-    ++TI;
-  while (FI != FIE && FI->isDebugValue())
-    ++FI;
-  while (TI != TIE && FI != FIE) {
+  while (TIB != TIE && FIB != FIE) {
     // Skip dbg_value instructions. These do not count.
-    if (TI->isDebugValue()) {
-      while (TI != TIE && TI->isDebugValue())
-        ++TI;
-      if (TI == TIE)
+    if (TIB->isDebugValue()) {
+      while (TIB != TIE && TIB->isDebugValue())
+        ++TIB;
+      if (TIB == TIE)
         break;
     }
-    if (FI->isDebugValue()) {
-      while (FI != FIE && FI->isDebugValue())
-        ++FI;
-      if (FI == FIE)
+    if (FIB->isDebugValue()) {
+      while (FIB != FIE && FIB->isDebugValue())
+        ++FIB;
+      if (FIB == FIE)
         break;
     }
-    if (!TI->isIdenticalTo(FI))
+    if (!TIB->isIdenticalTo(FIB))
       break;
     ++Dups1;
-    ++TI;
-    ++FI;
+    ++TIB;
+    ++FIB;
   }
 
-  TI = firstNonBranchInst(TrueBBI.BB, TII);
-  FI = firstNonBranchInst(FalseBBI.BB, TII);
-  MachineBasicBlock::iterator TIB = TrueBBI.BB->begin();
-  MachineBasicBlock::iterator FIB = FalseBBI.BB->begin();
-  // Skip dbg_value instructions at end of the bb's.
-  while (TI != TIB && TI->isDebugValue())
-    --TI;
-  while (FI != FIB && FI->isDebugValue())
-    --FI;
-  while (TI != TIB && FI != FIB) {
+  // Now, in preparation for counting duplicate instructions at the ends of the
+  // blocks, move the end iterators up past any branch instructions.
+  while (TIE != TIB) {
+    --TIE;
+    if (!TIE->getDesc().isBranch())
+      break;
+  }
+  while (FIE != FIB) {
+    --FIE;
+    if (!FIE->getDesc().isBranch())
+      break;
+  }
+
+  // If Dups1 includes all of a block, then don't count duplicate
+  // instructions at the end of the blocks.
+  if (TIB == TIE || FIB == FIE)
+    return true;
+
+  // Count duplicate instructions at the ends of the blocks.
+  while (TIE != TIB && FIE != FIB) {
     // Skip dbg_value instructions. These do not count.
-    if (TI->isDebugValue()) {
-      while (TI != TIB && TI->isDebugValue())
-        --TI;
-      if (TI == TIB)
+    if (TIE->isDebugValue()) {
+      while (TIE != TIB && TIE->isDebugValue())
+        --TIE;
+      if (TIE == TIB)
         break;
     }
-    if (FI->isDebugValue()) {
-      while (FI != FIB && FI->isDebugValue())
-        --FI;
-      if (FI == FIB)
+    if (FIE->isDebugValue()) {
+      while (FIE != FIB && FIE->isDebugValue())
+        --FIE;
+      if (FIE == FIB)
         break;
     }
-    if (!TI->isIdenticalTo(FI))
+    if (!TIE->isIdenticalTo(FIE))
       break;
     ++Dups2;
-    --TI;
-    --FI;
+    --TIE;
+    --FIE;
   }
 
   return true;
@@ -627,6 +648,7 @@
 
   // Then scan all the instructions.
   BBI.NonPredSize = 0;
+  BBI.ExtraCost = 0;
   BBI.ClobbersPred = false;
   for (MachineBasicBlock::iterator I = BBI.BB->begin(), E = BBI.BB->end();
        I != E; ++I) {
@@ -641,9 +663,12 @@
     bool isCondBr = BBI.IsBrAnalyzable && TID.isConditionalBranch();
 
     if (!isCondBr) {
-      if (!isPredicated)
+      if (!isPredicated) {
         BBI.NonPredSize++;
-      else if (!AlreadyPredicated) {
+        unsigned NumOps = TII->getNumMicroOps(&*I, InstrItins);
+        if (NumOps > 1)
+          BBI.ExtraCost += NumOps-1;
+      } else if (!AlreadyPredicated) {
         // FIXME: This instruction is already predicated before the
         // if-conversion pass. It's probably something like a conditional move.
         // Mark this block unpredicable for now.
@@ -765,9 +790,35 @@
   bool TNeedSub = TrueBBI.Predicate.size() > 0;
   bool FNeedSub = FalseBBI.Predicate.size() > 0;
   bool Enqueued = false;
+  
+  // Try to predict the branch, using loop info to guide us.
+  // General heuristics are:
+  //   - backedge -> 90% taken
+  //   - early exit -> 20% taken
+  //   - branch predictor confidence -> 90%
+  float Prediction = 0.5f;
+  float Confidence = 0.9f;
+  MachineLoop *Loop = MLI->getLoopFor(BB);
+  if (Loop) {
+    if (TrueBBI.BB == Loop->getHeader())
+      Prediction = 0.9f;
+    else if (FalseBBI.BB == Loop->getHeader())
+      Prediction = 0.1f;
+
+    MachineLoop *TrueLoop = MLI->getLoopFor(TrueBBI.BB);
+    MachineLoop *FalseLoop = MLI->getLoopFor(FalseBBI.BB);
+    if (!TrueLoop || TrueLoop->getParentLoop() == Loop)
+      Prediction = 0.2f;
+    else if (!FalseLoop || FalseLoop->getParentLoop() == Loop)
+      Prediction = 0.8f;
+  }
+  
   if (CanRevCond && ValidDiamond(TrueBBI, FalseBBI, Dups, Dups2) &&
-      MeetIfcvtSizeLimit(*TrueBBI.BB, TrueBBI.NonPredSize - (Dups + Dups2),
-                         *FalseBBI.BB, FalseBBI.NonPredSize - (Dups + Dups2)) &&
+      MeetIfcvtSizeLimit(*TrueBBI.BB, (TrueBBI.NonPredSize - (Dups + Dups2) +
+                                       TrueBBI.ExtraCost),
+                         *FalseBBI.BB, (FalseBBI.NonPredSize - (Dups + Dups2) +
+                                        FalseBBI.ExtraCost),
+                         Prediction, Confidence) &&
       FeasibilityAnalysis(TrueBBI, BBI.BrCond) &&
       FeasibilityAnalysis(FalseBBI, RevCond)) {
     // Diamond:
@@ -783,8 +834,9 @@
     Enqueued = true;
   }
 
-  if (ValidTriangle(TrueBBI, FalseBBI, false, Dups) &&
-      MeetIfcvtSizeLimit(*TrueBBI.BB, TrueBBI.NonPredSize) &&
+  if (ValidTriangle(TrueBBI, FalseBBI, false, Dups, Prediction, Confidence) &&
+      MeetIfcvtSizeLimit(*TrueBBI.BB, TrueBBI.NonPredSize + TrueBBI.ExtraCost,
+                         Prediction, Confidence) &&
       FeasibilityAnalysis(TrueBBI, BBI.BrCond, true)) {
     // Triangle:
     //   EBB
@@ -797,15 +849,17 @@
     Enqueued = true;
   }
 
-  if (ValidTriangle(TrueBBI, FalseBBI, true, Dups) &&
-      MeetIfcvtSizeLimit(*TrueBBI.BB, TrueBBI.NonPredSize) &&
+  if (ValidTriangle(TrueBBI, FalseBBI, true, Dups, Prediction, Confidence) &&
+      MeetIfcvtSizeLimit(*TrueBBI.BB, TrueBBI.NonPredSize + TrueBBI.ExtraCost,
+                         Prediction, Confidence) &&
       FeasibilityAnalysis(TrueBBI, BBI.BrCond, true, true)) {
     Tokens.push_back(new IfcvtToken(BBI, ICTriangleRev, TNeedSub, Dups));
     Enqueued = true;
   }
 
-  if (ValidSimple(TrueBBI, Dups) &&
-      MeetIfcvtSizeLimit(*TrueBBI.BB, TrueBBI.NonPredSize) &&
+  if (ValidSimple(TrueBBI, Dups, Prediction, Confidence) &&
+      MeetIfcvtSizeLimit(*TrueBBI.BB, TrueBBI.NonPredSize + TrueBBI.ExtraCost,
+                         Prediction, Confidence) &&
       FeasibilityAnalysis(TrueBBI, BBI.BrCond)) {
     // Simple (split, no rejoin):
     //   EBB
@@ -820,22 +874,30 @@
 
   if (CanRevCond) {
     // Try the other path...
-    if (ValidTriangle(FalseBBI, TrueBBI, false, Dups) &&
-        MeetIfcvtSizeLimit(*FalseBBI.BB, FalseBBI.NonPredSize) &&
+    if (ValidTriangle(FalseBBI, TrueBBI, false, Dups,
+                      1.0-Prediction, Confidence) &&
+        MeetIfcvtSizeLimit(*FalseBBI.BB,
+                           FalseBBI.NonPredSize + FalseBBI.ExtraCost,
+                           1.0-Prediction, Confidence) &&
         FeasibilityAnalysis(FalseBBI, RevCond, true)) {
       Tokens.push_back(new IfcvtToken(BBI, ICTriangleFalse, FNeedSub, Dups));
       Enqueued = true;
     }
 
-    if (ValidTriangle(FalseBBI, TrueBBI, true, Dups) &&
-        MeetIfcvtSizeLimit(*FalseBBI.BB, FalseBBI.NonPredSize) &&
+    if (ValidTriangle(FalseBBI, TrueBBI, true, Dups,
+                      1.0-Prediction, Confidence) &&
+        MeetIfcvtSizeLimit(*FalseBBI.BB,
+                           FalseBBI.NonPredSize + FalseBBI.ExtraCost,
+                           1.0-Prediction, Confidence) &&
         FeasibilityAnalysis(FalseBBI, RevCond, true, true)) {
       Tokens.push_back(new IfcvtToken(BBI, ICTriangleFRev, FNeedSub, Dups));
       Enqueued = true;
     }
 
-    if (ValidSimple(FalseBBI, Dups) &&
-        MeetIfcvtSizeLimit(*FalseBBI.BB, FalseBBI.NonPredSize) &&
+    if (ValidSimple(FalseBBI, Dups, 1.0-Prediction, Confidence) &&
+        MeetIfcvtSizeLimit(*FalseBBI.BB,
+                           FalseBBI.NonPredSize + FalseBBI.ExtraCost,
+                           1.0-Prediction, Confidence) &&
         FeasibilityAnalysis(FalseBBI, RevCond)) {
       Tokens.push_back(new IfcvtToken(BBI, ICSimpleFalse, FNeedSub, Dups));
       Enqueued = true;
@@ -1365,6 +1427,9 @@
     MachineInstr *MI = MF.CloneMachineInstr(I);
     ToBBI.BB->insert(ToBBI.BB->end(), MI);
     ToBBI.NonPredSize++;
+    unsigned NumOps = TII->getNumMicroOps(MI, InstrItins);
+    if (NumOps > 1)
+      ToBBI.ExtraCost += NumOps-1;
 
     if (!TII->isPredicated(I) && !MI->isDebugValue()) {
       if (!TII->PredicateInstruction(MI, Cond)) {
@@ -1438,7 +1503,9 @@
   FromBBI.Predicate.clear();
 
   ToBBI.NonPredSize += FromBBI.NonPredSize;
+  ToBBI.ExtraCost += FromBBI.ExtraCost;
   FromBBI.NonPredSize = 0;
+  FromBBI.ExtraCost = 0;
 
   ToBBI.ClobbersPred |= FromBBI.ClobbersPred;
   ToBBI.HasFallThrough = FromBBI.HasFallThrough;

Modified: llvm/branches/wendling/eh/lib/CodeGen/InlineSpiller.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/InlineSpiller.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/InlineSpiller.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/InlineSpiller.cpp Tue Oct 26 19:48:03 2010
@@ -14,9 +14,11 @@
 
 #define DEBUG_TYPE "spiller"
 #include "Spiller.h"
+#include "LiveRangeEdit.h"
 #include "SplitKit.h"
 #include "VirtRegMap.h"
 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
+#include "llvm/CodeGen/LiveStackAnalysis.h"
 #include "llvm/CodeGen/MachineFrameInfo.h"
 #include "llvm/CodeGen/MachineFunction.h"
 #include "llvm/CodeGen/MachineLoopInfo.h"
@@ -30,8 +32,10 @@
 
 namespace {
 class InlineSpiller : public Spiller {
+  MachineFunctionPass &pass_;
   MachineFunction &mf_;
   LiveIntervals &lis_;
+  LiveStacks &lss_;
   MachineLoopInfo &loops_;
   VirtRegMap &vrm_;
   MachineFrameInfo &mfi_;
@@ -43,16 +47,11 @@
   SplitAnalysis splitAnalysis_;
 
   // Variables that are valid during spill(), but used by multiple methods.
-  LiveInterval *li_;
-  std::vector<LiveInterval*> *newIntervals_;
+  LiveRangeEdit *edit_;
   const TargetRegisterClass *rc_;
   int stackSlot_;
-  const SmallVectorImpl<LiveInterval*> *spillIs_;
 
-  // Values of the current interval that can potentially remat.
-  SmallPtrSet<VNInfo*, 8> reMattable_;
-
-  // Values in reMattable_ that failed to remat at some point.
+  // Values that failed to remat at some point.
   SmallPtrSet<VNInfo*, 8> usedValues_;
 
   ~InlineSpiller() {}
@@ -61,8 +60,10 @@
   InlineSpiller(MachineFunctionPass &pass,
                 MachineFunction &mf,
                 VirtRegMap &vrm)
-    : mf_(mf),
+    : pass_(pass),
+      mf_(mf),
       lis_(pass.getAnalysis<LiveIntervals>()),
+      lss_(pass.getAnalysis<LiveStacks>()),
       loops_(pass.getAnalysis<MachineLoopInfo>()),
       vrm_(vrm),
       mfi_(*mf.getFrameInfo()),
@@ -73,18 +74,18 @@
       splitAnalysis_(mf, lis_, loops_) {}
 
   void spill(LiveInterval *li,
-             std::vector<LiveInterval*> &newIntervals,
-             SmallVectorImpl<LiveInterval*> &spillIs,
-             SlotIndex *earliestIndex);
+             SmallVectorImpl<LiveInterval*> &newIntervals,
+             SmallVectorImpl<LiveInterval*> &spillIs);
+
+  void spill(LiveRangeEdit &);
 
 private:
   bool split();
 
-  bool allUsesAvailableAt(const MachineInstr *OrigMI, SlotIndex OrigIdx,
-                          SlotIndex UseIdx);
   bool reMaterializeFor(MachineBasicBlock::iterator MI);
   void reMaterializeAll();
 
+  bool coalesceStackAccess(MachineInstr *MI);
   bool foldMemoryOperand(MachineBasicBlock::iterator MI,
                          const SmallVectorImpl<unsigned> &Ops);
   void insertReload(LiveInterval &NewLI, MachineBasicBlock::iterator MI);
@@ -103,83 +104,63 @@
 /// split - try splitting the current interval into pieces that may allocate
 /// separately. Return true if successful.
 bool InlineSpiller::split() {
-  // FIXME: Add intra-MBB splitting.
-  if (lis_.intervalIsInOneMBB(*li_))
-    return false;
-
-  splitAnalysis_.analyze(li_);
+  splitAnalysis_.analyze(&edit_->getParent());
 
+  // Try splitting around loops.
   if (const MachineLoop *loop = splitAnalysis_.getBestSplitLoop()) {
-    SplitEditor(splitAnalysis_, lis_, vrm_).splitAroundLoop(loop);
+    SplitEditor(splitAnalysis_, lis_, vrm_, *edit_)
+      .splitAroundLoop(loop);
     return true;
   }
-  return false;
-}
 
-/// allUsesAvailableAt - Return true if all registers used by OrigMI at
-/// OrigIdx are also available with the same value at UseIdx.
-bool InlineSpiller::allUsesAvailableAt(const MachineInstr *OrigMI,
-                                       SlotIndex OrigIdx,
-                                       SlotIndex UseIdx) {
-  OrigIdx = OrigIdx.getUseIndex();
-  UseIdx = UseIdx.getUseIndex();
-  for (unsigned i = 0, e = OrigMI->getNumOperands(); i != e; ++i) {
-    const MachineOperand &MO = OrigMI->getOperand(i);
-    if (!MO.isReg() || !MO.getReg() || MO.getReg() == li_->reg)
-      continue;
-    // Reserved registers are OK.
-    if (MO.isUndef() || !lis_.hasInterval(MO.getReg()))
-      continue;
-    // We don't want to move any defs.
-    if (MO.isDef())
-      return false;
-    // We cannot depend on virtual registers in spillIs_. They will be spilled.
-    for (unsigned si = 0, se = spillIs_->size(); si != se; ++si)
-      if ((*spillIs_)[si]->reg == MO.getReg())
-        return false;
+  // Try splitting into single block intervals.
+  SplitAnalysis::BlockPtrSet blocks;
+  if (splitAnalysis_.getMultiUseBlocks(blocks)) {
+    SplitEditor(splitAnalysis_, lis_, vrm_, *edit_)
+      .splitSingleBlocks(blocks);
+    return true;
+  }
 
-    LiveInterval &LI = lis_.getInterval(MO.getReg());
-    const VNInfo *OVNI = LI.getVNInfoAt(OrigIdx);
-    if (!OVNI)
-      continue;
-    if (OVNI != LI.getVNInfoAt(UseIdx))
-      return false;
+  // Try splitting inside a basic block.
+  if (const MachineBasicBlock *MBB = splitAnalysis_.getBlockForInsideSplit()) {
+    SplitEditor(splitAnalysis_, lis_, vrm_, *edit_)
+      .splitInsideBlock(MBB);
+    return true;
   }
-  return true;
+
+  return false;
 }
 
-/// reMaterializeFor - Attempt to rematerialize li_->reg before MI instead of
+/// reMaterializeFor - Attempt to rematerialize edit_->getReg() before MI instead of
 /// reloading it.
 bool InlineSpiller::reMaterializeFor(MachineBasicBlock::iterator MI) {
   SlotIndex UseIdx = lis_.getInstructionIndex(MI).getUseIndex();
-  VNInfo *OrigVNI = li_->getVNInfoAt(UseIdx);
+  VNInfo *OrigVNI = edit_->getParent().getVNInfoAt(UseIdx);
+
   if (!OrigVNI) {
     DEBUG(dbgs() << "\tadding <undef> flags: ");
     for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
       MachineOperand &MO = MI->getOperand(i);
-      if (MO.isReg() && MO.isUse() && MO.getReg() == li_->reg)
+      if (MO.isReg() && MO.isUse() && MO.getReg() == edit_->getReg())
         MO.setIsUndef();
     }
     DEBUG(dbgs() << UseIdx << '\t' << *MI);
     return true;
   }
-  if (!reMattable_.count(OrigVNI)) {
-    DEBUG(dbgs() << "\tusing non-remat valno " << OrigVNI->id << ": "
-                 << UseIdx << '\t' << *MI);
-    return false;
-  }
-  MachineInstr *OrigMI = lis_.getInstructionFromIndex(OrigVNI->def);
-  if (!allUsesAvailableAt(OrigMI, OrigVNI->def, UseIdx)) {
+
+  LiveRangeEdit::Remat RM = edit_->canRematerializeAt(OrigVNI, UseIdx, false,
+                                                      lis_);
+  if (!RM) {
     usedValues_.insert(OrigVNI);
     DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << *MI);
     return false;
   }
 
-  // If the instruction also writes li_->reg, it had better not require the same
-  // register for uses and defs.
+  // If the instruction also writes edit_->getReg(), it had better not require
+  // the same register for uses and defs.
   bool Reads, Writes;
   SmallVector<unsigned, 8> Ops;
-  tie(Reads, Writes) = MI->readsWritesVirtualRegister(li_->reg, &Ops);
+  tie(Reads, Writes) = MI->readsWritesVirtualRegister(edit_->getReg(), &Ops);
   if (Writes) {
     for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
       MachineOperand &MO = MI->getOperand(Ops[i]);
@@ -192,61 +173,43 @@
   }
 
   // Alocate a new register for the remat.
-  unsigned NewVReg = mri_.createVirtualRegister(rc_);
-  vrm_.grow();
-  LiveInterval &NewLI = lis_.getOrCreateInterval(NewVReg);
+  LiveInterval &NewLI = edit_->create(mri_, lis_, vrm_);
   NewLI.markNotSpillable();
-  newIntervals_->push_back(&NewLI);
 
   // Finally we can rematerialize OrigMI before MI.
-  MachineBasicBlock &MBB = *MI->getParent();
-  tii_.reMaterialize(MBB, MI, NewLI.reg, 0, OrigMI, tri_);
-  MachineBasicBlock::iterator RematMI = MI;
-  SlotIndex DefIdx = lis_.InsertMachineInstrInMaps(--RematMI).getDefIndex();
-  DEBUG(dbgs() << "\tremat:  " << DefIdx << '\t' << *RematMI);
+  SlotIndex DefIdx = edit_->rematerializeAt(*MI->getParent(), MI, NewLI.reg, RM,
+                                            lis_, tii_, tri_);
+  DEBUG(dbgs() << "\tremat:  " << DefIdx << '\n');
 
   // Replace operands
   for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
     MachineOperand &MO = MI->getOperand(Ops[i]);
-    if (MO.isReg() && MO.isUse() && MO.getReg() == li_->reg) {
-      MO.setReg(NewVReg);
+    if (MO.isReg() && MO.isUse() && MO.getReg() == edit_->getReg()) {
+      MO.setReg(NewLI.reg);
       MO.setIsKill();
     }
   }
   DEBUG(dbgs() << "\t        " << UseIdx << '\t' << *MI);
 
-  VNInfo *DefVNI = NewLI.getNextValue(DefIdx, 0, true,
-                                       lis_.getVNInfoAllocator());
+  VNInfo *DefVNI = NewLI.getNextValue(DefIdx, 0, lis_.getVNInfoAllocator());
   NewLI.addRange(LiveRange(DefIdx, UseIdx.getDefIndex(), DefVNI));
   DEBUG(dbgs() << "\tinterval: " << NewLI << '\n');
   return true;
 }
 
-/// reMaterializeAll - Try to rematerialize as many uses of li_ as possible,
+/// reMaterializeAll - Try to rematerialize as many uses as possible,
 /// and trim the live ranges after.
 void InlineSpiller::reMaterializeAll() {
   // Do a quick scan of the interval values to find if any are remattable.
-  reMattable_.clear();
-  usedValues_.clear();
-  for (LiveInterval::const_vni_iterator I = li_->vni_begin(),
-       E = li_->vni_end(); I != E; ++I) {
-    VNInfo *VNI = *I;
-    if (VNI->isUnused() || !VNI->isDefAccurate())
-      continue;
-    MachineInstr *DefMI = lis_.getInstructionFromIndex(VNI->def);
-    if (!DefMI || !tii_.isTriviallyReMaterializable(DefMI))
-      continue;
-    reMattable_.insert(VNI);
-  }
-
-  // Often, no defs are remattable.
-  if (reMattable_.empty())
+  if (!edit_->anyRematerializable(lis_, tii_, 0))
     return;
 
-  // Try to remat before all uses of li_->reg.
+  usedValues_.clear();
+
+  // Try to remat before all uses of edit_->getReg().
   bool anyRemat = false;
   for (MachineRegisterInfo::use_nodbg_iterator
-       RI = mri_.use_nodbg_begin(li_->reg);
+       RI = mri_.use_nodbg_begin(edit_->getReg());
        MachineInstr *MI = RI.skipInstruction();)
      anyRemat |= reMaterializeFor(MI);
 
@@ -255,34 +218,36 @@
 
   // Remove any values that were completely rematted.
   bool anyRemoved = false;
-  for (SmallPtrSet<VNInfo*, 8>::iterator I = reMattable_.begin(),
-       E = reMattable_.end(); I != E; ++I) {
+  for (LiveInterval::vni_iterator I = edit_->getParent().vni_begin(),
+       E = edit_->getParent().vni_end(); I != E; ++I) {
     VNInfo *VNI = *I;
-    if (VNI->hasPHIKill() || usedValues_.count(VNI))
+    if (VNI->hasPHIKill() || !edit_->didRematerialize(VNI) ||
+        usedValues_.count(VNI))
       continue;
     MachineInstr *DefMI = lis_.getInstructionFromIndex(VNI->def);
     DEBUG(dbgs() << "\tremoving dead def: " << VNI->def << '\t' << *DefMI);
     lis_.RemoveMachineInstrFromMaps(DefMI);
     vrm_.RemoveMachineInstrFromMaps(DefMI);
     DefMI->eraseFromParent();
-    li_->removeValNo(VNI);
+    VNI->def = SlotIndex();
     anyRemoved = true;
   }
 
   if (!anyRemoved)
     return;
 
-  // Removing values may cause debug uses where li_ is not live.
-  for (MachineRegisterInfo::use_iterator RI = mri_.use_begin(li_->reg);
+  // Removing values may cause debug uses where parent is not live.
+  for (MachineRegisterInfo::use_iterator RI = mri_.use_begin(edit_->getReg());
        MachineInstr *MI = RI.skipInstruction();) {
     if (!MI->isDebugValue())
       continue;
-    // Try to preserve the debug value if li_ is live immediately after it.
+    // Try to preserve the debug value if parent is live immediately after it.
     MachineBasicBlock::iterator NextMI = MI;
     ++NextMI;
     if (NextMI != MI->getParent()->end() && !lis_.isNotInMIMap(NextMI)) {
-      SlotIndex NearIdx = lis_.getInstructionIndex(NextMI);
-      if (li_->liveAt(NearIdx))
+      SlotIndex Idx = lis_.getInstructionIndex(NextMI);
+      VNInfo *VNI = edit_->getParent().getVNInfoAt(Idx);
+      if (VNI && (VNI->hasPHIKill() || usedValues_.count(VNI)))
         continue;
     }
     DEBUG(dbgs() << "Removing debug info due to remat:" << "\t" << *MI);
@@ -290,6 +255,24 @@
   }
 }
 
+/// If MI is a load or store of stackSlot_, it can be removed.
+bool InlineSpiller::coalesceStackAccess(MachineInstr *MI) {
+  int FI = 0;
+  unsigned reg;
+  if (!(reg = tii_.isLoadFromStackSlot(MI, FI)) &&
+      !(reg = tii_.isStoreToStackSlot(MI, FI)))
+    return false;
+
+  // We have a stack access. Is it the right register and slot?
+  if (reg != edit_->getReg() || FI != stackSlot_)
+    return false;
+
+  DEBUG(dbgs() << "Coalescing stack access: " << *MI);
+  lis_.RemoveMachineInstrFromMaps(MI);
+  MI->eraseFromParent();
+  return true;
+}
+
 /// foldMemoryOperand - Try folding stack slot references in Ops into MI.
 /// Return true on success, and MI will be erased.
 bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI,
@@ -330,7 +313,7 @@
   SlotIndex LoadIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex();
   vrm_.addSpillSlotUse(stackSlot_, MI);
   DEBUG(dbgs() << "\treload:  " << LoadIdx << '\t' << *MI);
-  VNInfo *LoadVNI = NewLI.getNextValue(LoadIdx, 0, true,
+  VNInfo *LoadVNI = NewLI.getNextValue(LoadIdx, 0,
                                        lis_.getVNInfoAllocator());
   NewLI.addRange(LiveRange(LoadIdx, Idx, LoadVNI));
 }
@@ -345,23 +328,23 @@
   SlotIndex StoreIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex();
   vrm_.addSpillSlotUse(stackSlot_, MI);
   DEBUG(dbgs() << "\tspilled: " << StoreIdx << '\t' << *MI);
-  VNInfo *StoreVNI = NewLI.getNextValue(Idx, 0, true,
-                                        lis_.getVNInfoAllocator());
+  VNInfo *StoreVNI = NewLI.getNextValue(Idx, 0, lis_.getVNInfoAllocator());
   NewLI.addRange(LiveRange(Idx, StoreIdx, StoreVNI));
 }
 
 void InlineSpiller::spill(LiveInterval *li,
-                          std::vector<LiveInterval*> &newIntervals,
-                          SmallVectorImpl<LiveInterval*> &spillIs,
-                          SlotIndex *earliestIndex) {
-  DEBUG(dbgs() << "Inline spilling " << *li << "\n");
-  assert(li->isSpillable() && "Attempting to spill already spilled value.");
-  assert(!li->isStackSlot() && "Trying to spill a stack slot.");
-
-  li_ = li;
-  newIntervals_ = &newIntervals;
-  rc_ = mri_.getRegClass(li->reg);
-  spillIs_ = &spillIs;
+                          SmallVectorImpl<LiveInterval*> &newIntervals,
+                          SmallVectorImpl<LiveInterval*> &spillIs) {
+  LiveRangeEdit edit(*li, newIntervals, spillIs);
+  spill(edit);
+}
+
+void InlineSpiller::spill(LiveRangeEdit &edit) {
+  edit_ = &edit;
+  DEBUG(dbgs() << "Inline spilling " << edit.getParent() << "\n");
+  assert(edit.getParent().isSpillable() &&
+         "Attempting to spill already spilled value.");
+  assert(!edit.getParent().isStackSlot() && "Trying to spill a stack slot.");
 
   if (split())
     return;
@@ -369,13 +352,20 @@
   reMaterializeAll();
 
   // Remat may handle everything.
-  if (li_->empty())
+  if (edit_->getParent().empty())
     return;
 
-  stackSlot_ = vrm_.assignVirt2StackSlot(li->reg);
+  rc_ = mri_.getRegClass(edit.getReg());
+  stackSlot_ = edit.assignStackSlot(vrm_);
+
+  // Update LiveStacks now that we are committed to spilling.
+  LiveInterval &stacklvr = lss_.getOrCreateInterval(stackSlot_, rc_);
+  if (!stacklvr.hasAtLeastOneValue())
+    stacklvr.getNextValue(SlotIndex(), 0, lss_.getVNInfoAllocator());
+  stacklvr.MergeRangesInAsValue(edit_->getParent(), stacklvr.getValNumInfo(0));
 
   // Iterate over instructions using register.
-  for (MachineRegisterInfo::reg_iterator RI = mri_.reg_begin(li->reg);
+  for (MachineRegisterInfo::reg_iterator RI = mri_.reg_begin(edit.getReg());
        MachineInstr *MI = RI.skipInstruction();) {
 
     // Debug values are not allowed to affect codegen.
@@ -396,10 +386,14 @@
       continue;
     }
 
+    // Stack slot accesses may coalesce away.
+    if (coalesceStackAccess(MI))
+      continue;
+
     // Analyze instruction.
     bool Reads, Writes;
     SmallVector<unsigned, 8> Ops;
-    tie(Reads, Writes) = MI->readsWritesVirtualRegister(li->reg, &Ops);
+    tie(Reads, Writes) = MI->readsWritesVirtualRegister(edit.getReg(), &Ops);
 
     // Attempt to fold memory ops.
     if (foldMemoryOperand(MI, Ops))
@@ -407,9 +401,7 @@
 
     // Allocate interval around instruction.
     // FIXME: Infer regclass from instruction alone.
-    unsigned NewVReg = mri_.createVirtualRegister(rc_);
-    vrm_.grow();
-    LiveInterval &NewLI = lis_.getOrCreateInterval(NewVReg);
+    LiveInterval &NewLI = edit.create(mri_, lis_, vrm_);
     NewLI.markNotSpillable();
 
     if (Reads)
@@ -419,7 +411,7 @@
     bool hasLiveDef = false;
     for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
       MachineOperand &MO = MI->getOperand(Ops[i]);
-      MO.setReg(NewVReg);
+      MO.setReg(NewLI.reg);
       if (MO.isUse()) {
         if (!MI->isRegTiedToDefOperand(Ops[i]))
           MO.setIsKill();
@@ -434,6 +426,5 @@
       insertSpill(NewLI, MI);
 
     DEBUG(dbgs() << "\tinterval: " << NewLI << '\n');
-    newIntervals.push_back(&NewLI);
   }
 }

Modified: llvm/branches/wendling/eh/lib/CodeGen/IntrinsicLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/IntrinsicLowering.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/IntrinsicLowering.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/IntrinsicLowering.cpp Tue Oct 26 19:48:03 2010
@@ -85,9 +85,11 @@
 }
 
 // VisualStudio defines setjmp as _setjmp
-#if defined(_MSC_VER) && defined(setjmp)
-#define setjmp_undefined_for_visual_studio
-#undef setjmp
+#if defined(_MSC_VER) && defined(setjmp) && \
+                         !defined(setjmp_undefined_for_msvc)
+#  pragma push_macro("setjmp")
+#  undef setjmp
+#  define setjmp_undefined_for_msvc
 #endif
 
 void IntrinsicLowering::AddPrototypes(Module &M) {

Modified: llvm/branches/wendling/eh/lib/CodeGen/LLVMTargetMachine.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/LLVMTargetMachine.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/LLVMTargetMachine.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/LLVMTargetMachine.cpp Tue Oct 26 19:48:03 2010
@@ -30,6 +30,7 @@
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/FormattedStream.h"
+#include "llvm/Support/StandardPasses.h"
 using namespace llvm;
 
 namespace llvm {
@@ -85,7 +86,7 @@
   case cl::BOU_UNSET: return TargetMachine::getAsmVerbosityDefault();
   case cl::BOU_TRUE:  return true;
   case cl::BOU_FALSE: return false;
-  }      
+  }
 }
 
 // Enable or disable FastISel. Both options are needed, because
@@ -160,6 +161,7 @@
     AsmStreamer.reset(getTarget().createObjectStreamer(TargetTriple, *Context,
                                                        *TAB, Out, MCE,
                                                        hasMCRelaxAll()));
+    AsmStreamer.get()->InitSections();
     break;
   }
   case CGFT_Null:
@@ -176,12 +178,12 @@
   FunctionPass *Printer = getTarget().createAsmPrinter(*this, *AsmStreamer);
   if (Printer == 0)
     return true;
-  
+
   // If successful, createAsmPrinter took ownership of AsmStreamer.
   AsmStreamer.take();
-  
+
   PM.add(Printer);
-  
+
   // Make sure the code model is set.
   setCodeModelForStatic();
   PM.add(createGCInfoDeleter());
@@ -200,7 +202,7 @@
                                                    bool DisableVerify) {
   // Make sure the code model is set.
   setCodeModelForJIT();
-  
+
   // Add common CodeGen passes.
   MCContext *Ctx = 0;
   if (addCommonCodeGenPasses(PM, OptLevel, DisableVerify, Ctx))
@@ -236,13 +238,12 @@
 }
 
 static void printAndVerify(PassManagerBase &PM,
-                           const char *Banner,
-                           bool allowDoubleDefs = false) {
+                           const char *Banner) {
   if (PrintMachineCode)
     PM.add(createMachineFunctionPrinterPass(dbgs(), Banner));
 
   if (VerifyMachineCode)
-    PM.add(createMachineVerifierPass(allowDoubleDefs));
+    PM.add(createMachineVerifierPass());
 }
 
 /// addCommonCodeGenPasses - Add standard LLVM codegen passes used for both
@@ -254,6 +255,9 @@
                                                MCContext *&OutContext) {
   // Standard LLVM-Level Passes.
 
+  // Basic AliasAnalysis support.
+  createStandardAliasAnalysisPasses(&PM);
+
   // Before running any passes, run the verifier to determine if the input
   // coming from the front-end and/or optimizer is valid.
   if (!DisableVerify)
@@ -272,6 +276,11 @@
       PM.add(createPrintFunctionPass("\n\n*** Code after LSR ***\n", &dbgs()));
   }
 
+  PM.add(createGCLoweringPass());
+
+  // Make sure that no unreachable blocks are instruction selected.
+  PM.add(createUnreachableBlockEliminationPass());
+
   // Turn exception handling constructs into something the code generators can
   // handle.
   switch (getMCAsmInfo()->getExceptionHandlingType()) {
@@ -283,22 +292,19 @@
     // pad is shared by multiple invokes and is also a target of a normal
     // edge from elsewhere.
     PM.add(createSjLjEHPass(getTargetLowering()));
-    PM.add(createDwarfEHPass(this, OptLevel==CodeGenOpt::None));
-    break;
+    // FALLTHROUGH
   case ExceptionHandling::Dwarf:
 ///EH-FIXME: Try to get by without the DwarfEHPass.
-///EH-FIXME:    PM.add(createDwarfEHPass(this, OptLevel==CodeGenOpt::None));
+///EH-FIXME:    PM.add(createDwarfEHPass(this));
     break;
   case ExceptionHandling::None:
     PM.add(createLowerInvokePass(getTargetLowering()));
+
+    // The lower invoke pass may create unreachable code. Remove it.
+    PM.add(createUnreachableBlockEliminationPass());
     break;
   }
 
-  PM.add(createGCLoweringPass());
-
-  // Make sure that no unreachable blocks are instruction selected.
-  PM.add(createUnreachableBlockEliminationPass());
-
   if (OptLevel != CodeGenOpt::None && !DisableCGP)
     PM.add(createCodeGenPreparePass(getTargetLowering()));
 
@@ -317,13 +323,12 @@
     PM.add(createVerifierPass());
 
   // Standard Lower-Level Passes.
-  
+
   // Install a MachineModuleInfo class, which is an immutable pass that holds
   // all the per-module stuff we're generating, including MCContext.
   MachineModuleInfo *MMI = new MachineModuleInfo(*getMCAsmInfo());
   PM.add(MMI);
   OutContext = &MMI->getContext(); // Return the MCContext specifically by-ref.
-  
 
   // Set up a MachineFunction for the rest of CodeGen to work on.
   PM.add(new MachineFunctionAnalysis(*this, OptLevel));
@@ -338,44 +343,43 @@
     return true;
 
   // Print the instruction selected machine code...
-  printAndVerify(PM, "After Instruction Selection",
-                 /* allowDoubleDefs= */ true);
+  printAndVerify(PM, "After Instruction Selection");
 
   // Optimize PHIs before DCE: removing dead PHI cycles may make more
   // instructions dead.
   if (OptLevel != CodeGenOpt::None)
     PM.add(createOptimizePHIsPass());
 
+  // If the target requests it, assign local variables to stack slots relative
+  // to one another and simplify frame index references where possible.
+  PM.add(createLocalStackSlotAllocationPass());
+
   if (OptLevel != CodeGenOpt::None) {
     // With optimization, dead code should already be eliminated. However
     // there is one known exception: lowered code for arguments that are only
     // used by tail calls, where the tail calls reuse the incoming stack
     // arguments directly (see t11 in test/CodeGen/X86/sibcall.ll).
     PM.add(createDeadMachineInstructionElimPass());
-    printAndVerify(PM, "After codegen DCE pass",
-                   /* allowDoubleDefs= */ true);
+    printAndVerify(PM, "After codegen DCE pass");
 
-    PM.add(createOptimizeExtsPass());
+    PM.add(createPeepholeOptimizerPass());
     if (!DisableMachineLICM)
       PM.add(createMachineLICMPass());
     PM.add(createMachineCSEPass());
     if (!DisableMachineSink)
       PM.add(createMachineSinkingPass());
-    printAndVerify(PM, "After Machine LICM, CSE and Sinking passes",
-                   /* allowDoubleDefs= */ true);
+    printAndVerify(PM, "After Machine LICM, CSE and Sinking passes");
   }
 
   // Pre-ra tail duplication.
   if (OptLevel != CodeGenOpt::None && !DisableEarlyTailDup) {
     PM.add(createTailDuplicatePass(true));
-    printAndVerify(PM, "After Pre-RegAlloc TailDuplicate",
-                   /* allowDoubleDefs= */ true);
+    printAndVerify(PM, "After Pre-RegAlloc TailDuplicate");
   }
 
   // Run pre-ra passes.
   if (addPreRegAlloc(PM, OptLevel))
-    printAndVerify(PM, "After PreRegAlloc passes",
-                   /* allowDoubleDefs= */ true);
+    printAndVerify(PM, "After PreRegAlloc passes");
 
   // Perform register allocation.
   PM.add(createRegisterAllocator(OptLevel));

Modified: llvm/branches/wendling/eh/lib/CodeGen/LiveInterval.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/LiveInterval.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/LiveInterval.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/LiveInterval.cpp Tue Oct 26 19:48:03 2010
@@ -30,58 +30,19 @@
 #include <algorithm>
 using namespace llvm;
 
-// An example for liveAt():
-//
-// this = [1,4), liveAt(0) will return false. The instruction defining this
-// spans slots [0,3]. The interval belongs to an spilled definition of the
-// variable it represents. This is because slot 1 is used (def slot) and spans
-// up to slot 3 (store slot).
-//
-bool LiveInterval::liveAt(SlotIndex I) const {
-  Ranges::const_iterator r = std::upper_bound(ranges.begin(), ranges.end(), I);
-
-  if (r == ranges.begin())
-    return false;
-
-  --r;
-  return r->contains(I);
+// CompEnd - Compare LiveRange ends.
+namespace {
+struct CompEnd {
+  bool operator()(const LiveRange &A, const LiveRange &B) const {
+    return A.end < B.end;
+  }
+};
 }
 
-// liveBeforeAndAt - Check if the interval is live at the index and the index
-// just before it. If index is liveAt, check if it starts a new live range.
-// If it does, then check if the previous live range ends at index-1.
-bool LiveInterval::liveBeforeAndAt(SlotIndex I) const {
-  Ranges::const_iterator r = std::upper_bound(ranges.begin(), ranges.end(), I);
-
-  if (r == ranges.begin())
-    return false;
-
-  --r;
-  if (!r->contains(I))
-    return false;
-  if (I != r->start)
-    return true;
-  // I is the start of a live range. Check if the previous live range ends
-  // at I-1.
-  if (r == ranges.begin())
-    return false;
-  return r->end == I;
-}
-
-/// killedAt - Return true if a live range ends at index. Note that the kill
-/// point is not contained in the half-open live range. It is usually the
-/// getDefIndex() slot following its last use.
-bool LiveInterval::killedAt(SlotIndex I) const {
-  Ranges::const_iterator r = std::lower_bound(ranges.begin(), ranges.end(), I);
-
-  // Now r points to the first interval with start >= I, or ranges.end().
-  if (r == ranges.begin())
-    return false;
-
-  --r;
-  // Now r points to the last interval with end <= I.
-  // r->end is the kill point.
-  return r->end == I;
+LiveInterval::iterator LiveInterval::find(SlotIndex Pos) {
+  assert(Pos.isValid() && "Cannot search for an invalid index");
+  return std::upper_bound(begin(), end(), LiveRange(SlotIndex(), Pos, 0),
+                          CompEnd());
 }
 
 /// killedInRange - Return true if the interval has kills in [Start,End).
@@ -180,6 +141,42 @@
   }
 }
 
+/// RenumberValues - Renumber all values in order of appearance and delete the
+/// remaining unused values.
+void LiveInterval::RenumberValues(LiveIntervals &lis) {
+  SmallPtrSet<VNInfo*, 8> Seen;
+  bool seenPHIDef = false;
+  valnos.clear();
+  for (const_iterator I = begin(), E = end(); I != E; ++I) {
+    VNInfo *VNI = I->valno;
+    if (!Seen.insert(VNI))
+      continue;
+    assert(!VNI->isUnused() && "Unused valno used by live range");
+    VNI->id = (unsigned)valnos.size();
+    valnos.push_back(VNI);
+    VNI->setHasPHIKill(false);
+    if (VNI->isPHIDef())
+      seenPHIDef = true;
+  }
+
+  // Recompute phi kill flags.
+  if (!seenPHIDef)
+    return;
+  for (const_vni_iterator I = vni_begin(), E = vni_end(); I != E; ++I) {
+    VNInfo *VNI = *I;
+    if (!VNI->isPHIDef())
+      continue;
+    const MachineBasicBlock *PHIBB = lis.getMBBFromIndex(VNI->def);
+    assert(PHIBB && "No basic block for phi-def");
+    for (MachineBasicBlock::const_pred_iterator PI = PHIBB->pred_begin(),
+         PE = PHIBB->pred_end(); PI != PE; ++PI) {
+      VNInfo *KVNI = getVNInfoAt(lis.getMBBEndIdx(*PI).getPrevSlot());
+      if (KVNI)
+        KVNI->setHasPHIKill(true);
+    }
+  }
+}
+
 /// extendIntervalEndTo - This method is used when we want to extend the range
 /// specified by I to end at the specified endpoint.  To do this, we should
 /// merge and eliminate all ranges that this will overlap with.  The iterator is
@@ -189,7 +186,7 @@
   VNInfo *ValNo = I->valno;
 
   // Search for the first interval that we can't merge with.
-  Ranges::iterator MergeTo = next(I);
+  Ranges::iterator MergeTo = llvm::next(I);
   for (; MergeTo != ranges.end() && NewEnd >= MergeTo->end; ++MergeTo) {
     assert(MergeTo->valno == ValNo && "Cannot merge with differing values!");
   }
@@ -198,11 +195,11 @@
   I->end = std::max(NewEnd, prior(MergeTo)->end);
 
   // Erase any dead ranges.
-  ranges.erase(next(I), MergeTo);
+  ranges.erase(llvm::next(I), MergeTo);
 
   // If the newly formed range now touches the range after it and if they have
   // the same value number, merge the two ranges into one range.
-  Ranges::iterator Next = next(I);
+  Ranges::iterator Next = llvm::next(I);
   if (Next != ranges.end() && Next->start <= I->end && Next->valno == ValNo) {
     I->end = Next->end;
     ranges.erase(Next);
@@ -241,7 +238,7 @@
     MergeTo->end = I->end;
   }
 
-  ranges.erase(next(MergeTo), next(I));
+  ranges.erase(llvm::next(MergeTo), llvm::next(I));
   return MergeTo;
 }
 
@@ -294,25 +291,14 @@
   return ranges.insert(it, LR);
 }
 
-/// isInOneLiveRange - Return true if the range specified is entirely in 
-/// a single LiveRange of the live interval.
-bool LiveInterval::isInOneLiveRange(SlotIndex Start, SlotIndex End) {
-  Ranges::iterator I = std::upper_bound(ranges.begin(), ranges.end(), Start);
-  if (I == ranges.begin())
-    return false;
-  --I;
-  return I->containsRange(Start, End);
-}
-
 
 /// removeRange - Remove the specified range from this interval.  Note that
 /// the range must be in a single LiveRange in its entirety.
 void LiveInterval::removeRange(SlotIndex Start, SlotIndex End,
                                bool RemoveDeadValNo) {
   // Find the LiveRange containing this span.
-  Ranges::iterator I = std::upper_bound(ranges.begin(), ranges.end(), Start);
-  assert(I != ranges.begin() && "Range is not in interval!");
-  --I;
+  Ranges::iterator I = find(Start);
+  assert(I != ranges.end() && "Range is not in interval!");
   assert(I->containsRange(Start, End) && "Range is not entirely in interval!");
 
   // If the span we are removing is at the start of the LiveRange, adjust it.
@@ -351,7 +337,7 @@
   I->end = Start;   // Trim the old interval.
 
   // Insert the new one.
-  ranges.insert(next(I), LiveRange(End, OldEnd, ValNo));
+  ranges.insert(llvm::next(I), LiveRange(End, OldEnd, ValNo));
 }
 
 /// removeValNo - Remove all the ranges defined by the specified value#.
@@ -369,32 +355,6 @@
   markValNoForDeletion(ValNo);
 }
 
-/// getLiveRangeContaining - Return the live range that contains the
-/// specified index, or null if there is none.
-LiveInterval::const_iterator 
-LiveInterval::FindLiveRangeContaining(SlotIndex Idx) const {
-  const_iterator It = std::upper_bound(begin(), end(), Idx);
-  if (It != ranges.begin()) {
-    --It;
-    if (It->contains(Idx))
-      return It;
-  }
-
-  return end();
-}
-
-LiveInterval::iterator 
-LiveInterval::FindLiveRangeContaining(SlotIndex Idx) {
-  iterator It = std::upper_bound(begin(), end(), Idx);
-  if (It != begin()) {
-    --It;
-    if (It->contains(Idx))
-      return It;
-  }
-  
-  return end();
-}
-
 /// findDefinedVNInfo - Find the VNInfo defined by the specified
 /// index (register interval).
 VNInfo *LiveInterval::findDefinedVNInfoForRegInt(SlotIndex Idx) const {
@@ -407,27 +367,16 @@
   return 0;
 }
 
-/// findDefinedVNInfo - Find the VNInfo defined by the specified
-/// register (stack inteval).
-VNInfo *LiveInterval::findDefinedVNInfoForStackInt(unsigned reg) const {
-  for (LiveInterval::const_vni_iterator i = vni_begin(), e = vni_end();
-       i != e; ++i) {
-    if ((*i)->getReg() == reg)
-      return *i;
-  }
-  return 0;
-}
-
 /// join - Join two live intervals (this, and other) together.  This applies
 /// mappings to the value numbers in the LHS/RHS intervals as specified.  If
 /// the intervals are not joinable, this aborts.
 void LiveInterval::join(LiveInterval &Other,
                         const int *LHSValNoAssignments,
-                        const int *RHSValNoAssignments, 
+                        const int *RHSValNoAssignments,
                         SmallVector<VNInfo*, 16> &NewVNInfo,
                         MachineRegisterInfo *MRI) {
   // Determine if any of our live range values are mapped.  This is uncommon, so
-  // we want to avoid the interval scan if not. 
+  // we want to avoid the interval scan if not.
   bool MustMapCurValNos = false;
   unsigned NumVals = getNumValNums();
   unsigned NumNewVals = NewVNInfo.size();
@@ -447,7 +396,7 @@
     ++OutIt;
     for (iterator I = OutIt, E = end(); I != E; ++I) {
       OutIt->valno = NewVNInfo[LHSValNoAssignments[I->valno->id]];
-      
+
       // If this live range has the same value # as its immediate predecessor,
       // and if they are neighbors, remove one LiveRange.  This happens when we
       // have [0,3:0)[4,7:1) and map 0/1 onto the same value #.
@@ -458,12 +407,12 @@
           OutIt->start = I->start;
           OutIt->end = I->end;
         }
-        
+
         // Didn't merge, on to the next one.
         ++OutIt;
       }
     }
-    
+
     // If we merge some live ranges, chop off the end.
     ranges.erase(OutIt, end());
   }
@@ -481,7 +430,7 @@
     if (VNI) {
       if (NumValNos >= NumVals)
         valnos.push_back(VNI);
-      else 
+      else
         valnos[NumValNos] = VNI;
       VNI->id = NumValNos++;  // Renumber val#.
     }
@@ -500,25 +449,13 @@
   }
 
   ComputeJoinedWeight(Other);
-
-  // Update regalloc hint if currently there isn't one.
-  if (TargetRegisterInfo::isVirtualRegister(reg) &&
-      TargetRegisterInfo::isVirtualRegister(Other.reg)) {
-    std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(reg);
-    if (Hint.first == 0 && Hint.second == 0) {
-      std::pair<unsigned, unsigned> OtherHint =
-        MRI->getRegAllocationHint(Other.reg);
-      if (OtherHint.first || OtherHint.second)
-        MRI->setRegAllocationHint(reg, OtherHint.first, OtherHint.second);
-    }
-  }
 }
 
 /// MergeRangesInAsValue - Merge all of the intervals in RHS into this live
 /// interval as the specified value number.  The LiveRanges in RHS are
 /// allowed to overlap with LiveRanges in the current interval, but only if
 /// the overlapping LiveRanges have the specified value number.
-void LiveInterval::MergeRangesInAsValue(const LiveInterval &RHS, 
+void LiveInterval::MergeRangesInAsValue(const LiveInterval &RHS,
                                         VNInfo *LHSValNo) {
   // TODO: Make this more efficient.
   iterator InsertPos = begin();
@@ -567,7 +504,7 @@
       // If this trimmed away the whole range, ignore it.
       if (Start == End) continue;
     }
-    
+
     // Map the valno in the other live range to the current live range.
     IP = addRangeFrom(LiveRange(Start, End, LHSValNo), IP);
   }
@@ -582,7 +519,7 @@
         if (I->valno == V1) {
           isDead = false;
           break;
-        }          
+        }
       if (isDead) {
         // Now that V1 is dead, remove it.
         markValNoForDeletion(V1);
@@ -592,103 +529,6 @@
 }
 
 
-/// MergeInClobberRanges - For any live ranges that are not defined in the
-/// current interval, but are defined in the Clobbers interval, mark them
-/// used with an unknown definition value.
-void LiveInterval::MergeInClobberRanges(LiveIntervals &li_,
-                                        const LiveInterval &Clobbers,
-                                        VNInfo::Allocator &VNInfoAllocator) {
-  if (Clobbers.empty()) return;
-  
-  DenseMap<VNInfo*, VNInfo*> ValNoMaps;
-  VNInfo *UnusedValNo = 0;
-  iterator IP = begin();
-  for (const_iterator I = Clobbers.begin(), E = Clobbers.end(); I != E; ++I) {
-    // For every val# in the Clobbers interval, create a new "unknown" val#.
-    VNInfo *ClobberValNo = 0;
-    DenseMap<VNInfo*, VNInfo*>::iterator VI = ValNoMaps.find(I->valno);
-    if (VI != ValNoMaps.end())
-      ClobberValNo = VI->second;
-    else if (UnusedValNo)
-      ClobberValNo = UnusedValNo;
-    else {
-      UnusedValNo = ClobberValNo =
-        getNextValue(li_.getInvalidIndex(), 0, false, VNInfoAllocator);
-      ValNoMaps.insert(std::make_pair(I->valno, ClobberValNo));
-    }
-
-    bool Done = false;
-    SlotIndex Start = I->start, End = I->end;
-    // If a clobber range starts before an existing range and ends after
-    // it, the clobber range will need to be split into multiple ranges.
-    // Loop until the entire clobber range is handled.
-    while (!Done) {
-      Done = true;
-      IP = std::upper_bound(IP, end(), Start);
-      SlotIndex SubRangeStart = Start;
-      SlotIndex SubRangeEnd = End;
-
-      // If the start of this range overlaps with an existing liverange, trim it.
-      if (IP != begin() && IP[-1].end > SubRangeStart) {
-        SubRangeStart = IP[-1].end;
-        // Trimmed away the whole range?
-        if (SubRangeStart >= SubRangeEnd) continue;
-      }
-      // If the end of this range overlaps with an existing liverange, trim it.
-      if (IP != end() && SubRangeEnd > IP->start) {
-        // If the clobber live range extends beyond the existing live range,
-        // it'll need at least another live range, so set the flag to keep
-        // iterating.
-        if (SubRangeEnd > IP->end) {
-          Start = IP->end;
-          Done = false;
-        }
-        SubRangeEnd = IP->start;
-        // If this trimmed away the whole range, ignore it.
-        if (SubRangeStart == SubRangeEnd) continue;
-      }
-
-      // Insert the clobber interval.
-      IP = addRangeFrom(LiveRange(SubRangeStart, SubRangeEnd, ClobberValNo),
-                        IP);
-      UnusedValNo = 0;
-    }
-  }
-
-  if (UnusedValNo) {
-    // Delete the last unused val#.
-    valnos.pop_back();
-  }
-}
-
-void LiveInterval::MergeInClobberRange(LiveIntervals &li_,
-                                       SlotIndex Start,
-                                       SlotIndex End,
-                                       VNInfo::Allocator &VNInfoAllocator) {
-  // Find a value # to use for the clobber ranges.  If there is already a value#
-  // for unknown values, use it.
-  VNInfo *ClobberValNo =
-    getNextValue(li_.getInvalidIndex(), 0, false, VNInfoAllocator);
-  
-  iterator IP = begin();
-  IP = std::upper_bound(IP, end(), Start);
-    
-  // If the start of this range overlaps with an existing liverange, trim it.
-  if (IP != begin() && IP[-1].end > Start) {
-    Start = IP[-1].end;
-    // Trimmed away the whole range?
-    if (Start >= End) return;
-  }
-  // If the end of this range overlaps with an existing liverange, trim it.
-  if (IP != end() && End > IP->start) {
-    End = IP->start;
-    // If this trimmed away the whole range, ignore it.
-    if (Start == End) return;
-  }
-    
-  // Insert the clobber interval.
-  addRangeFrom(LiveRange(Start, End, ClobberValNo), IP);
-}
 
 /// MergeValueNumberInto - This method is called when two value nubmers
 /// are found to be equivalent.  This eliminates V1, replacing all
@@ -712,7 +552,7 @@
   for (iterator I = begin(); I != end(); ) {
     iterator LR = I++;
     if (LR->valno != V1) continue;  // Not a V1 LiveRange.
-    
+
     // Okay, we found a V1 live range.  If it had a previous, touching, V2 live
     // range, extend it.
     if (LR != begin()) {
@@ -726,11 +566,11 @@
         LR = Prev;
       }
     }
-    
+
     // Okay, now we have a V1 or V2 live range that is maximally merged forward.
     // Ensure that it is a V2 live-range.
     LR->valno = V2;
-    
+
     // If we can merge it into later V2 live ranges, do so now.  We ignore any
     // following V1 live ranges, as they will be merged in subsequent iterations
     // of the loop.
@@ -742,10 +582,13 @@
       }
     }
   }
-  
+
+  // Merge the relevant flags.
+  V2->mergeFlags(V1);
+
   // Now that V1 is dead, remove it.
   markValNoForDeletion(V1);
-  
+
   return V2;
 }
 
@@ -839,10 +682,9 @@
       if (vni->isUnused()) {
         OS << "x";
       } else {
-        if (!vni->isDefAccurate() && !vni->isPHIDef())
-          OS << "?";
-        else
-          OS << vni->def;
+        OS << vni->def;
+        if (vni->isPHIDef())
+          OS << "-phidef";
         if (vni->hasPHIKill())
           OS << "-phikill";
         if (vni->hasRedefByEC())
@@ -860,3 +702,105 @@
 void LiveRange::print(raw_ostream &os) const {
   os << *this;
 }
+
+/// ConnectedVNInfoEqClasses - Helper class that can divide VNInfos in a
+/// LiveInterval into equivalence clases of connected components. A
+/// LiveInterval that has multiple connected components can be broken into
+/// multiple LiveIntervals.
+
+void ConnectedVNInfoEqClasses::Connect(unsigned a, unsigned b) {
+  unsigned eqa = eqClass_[a];
+  unsigned eqb = eqClass_[b];
+  if (eqa == eqb)
+    return;
+  // Make sure a and b are in the same class while maintaining eqClass_[i] <= i.
+  if (eqa > eqb)
+    eqClass_[a] = eqb;
+  else
+    eqClass_[b] = eqa;
+}
+
+unsigned ConnectedVNInfoEqClasses::Renumber() {
+  // Assign final class numbers.
+  // We use the fact that eqClass_[i] == i for class leaders.
+  // For others, eqClass_[i] points to an earlier value in the same class.
+  unsigned count = 0;
+  for (unsigned i = 0, e = eqClass_.size(); i != e; ++i) {
+    unsigned q = eqClass_[i];
+    assert(q <= i && "Invariant broken");
+    eqClass_[i] = q == i ? count++ : eqClass_[q];
+  }
+
+  return count;
+}
+
+unsigned ConnectedVNInfoEqClasses::Classify(const LiveInterval *LI) {
+  // Create initial equivalence classes.
+  eqClass_.clear();
+  eqClass_.reserve(LI->getNumValNums());
+  for (unsigned i = 0, e = LI->getNumValNums(); i != e; ++i)
+    eqClass_.push_back(i);
+
+  // Determine connections.
+  for (LiveInterval::const_vni_iterator I = LI->vni_begin(), E = LI->vni_end();
+       I != E; ++I) {
+    const VNInfo *VNI = *I;
+    if (VNI->id == eqClass_.size())
+      eqClass_.push_back(VNI->id);
+    assert(!VNI->isUnused() && "Cannot handle unused values");
+    if (VNI->isPHIDef()) {
+      const MachineBasicBlock *MBB = lis_.getMBBFromIndex(VNI->def);
+      assert(MBB && "Phi-def has no defining MBB");
+      // Connect to values live out of predecessors.
+      for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
+           PE = MBB->pred_end(); PI != PE; ++PI)
+        if (const VNInfo *PVNI =
+              LI->getVNInfoAt(lis_.getMBBEndIdx(*PI).getPrevSlot()))
+          Connect(VNI->id, PVNI->id);
+    } else {
+      // Normal value defined by an instruction. Check for two-addr redef.
+      // FIXME: This could be coincidental. Should we really check for a tied
+      // operand constraint?
+      if (const VNInfo *UVNI = LI->getVNInfoAt(VNI->def.getUseIndex()))
+        Connect(VNI->id, UVNI->id);
+    }
+  }
+  return Renumber();
+}
+
+void ConnectedVNInfoEqClasses::Distribute(LiveInterval *LIV[]) {
+  assert(LIV[0] && "LIV[0] must be set");
+  LiveInterval &LI = *LIV[0];
+  // Check that they likely ran Classify() on LIV[0] first.
+  assert(eqClass_.size() == LI.getNumValNums() && "Bad classification data");
+
+  // First move runs to new intervals.
+  LiveInterval::iterator J = LI.begin(), E = LI.end();
+  while (J != E && eqClass_[J->valno->id] == 0)
+    ++J;
+  for (LiveInterval::iterator I = J; I != E; ++I) {
+    if (unsigned eq = eqClass_[I->valno->id]) {
+      assert((LIV[eq]->empty() || LIV[eq]->expiredAt(I->start)) &&
+             "New intervals should be empty");
+      LIV[eq]->ranges.push_back(*I);
+    } else
+      *J++ = *I;
+  }
+  LI.ranges.erase(J, E);
+
+  // Transfer VNInfos to their new owners and renumber them.
+  unsigned j = 0, e = LI.getNumValNums();
+  while (j != e && eqClass_[j] == 0)
+    ++j;
+  for (unsigned i = j; i != e; ++i) {
+    VNInfo *VNI = LI.getValNumInfo(i);
+    if (unsigned eq = eqClass_[i]) {
+      VNI->id = LIV[eq]->getNumValNums();
+      LIV[eq]->valnos.push_back(VNI);
+    } else {
+      VNI->id = j;
+      LI.valnos[j++] = VNI;
+    }
+  }
+  LI.valnos.resize(j);
+}

Modified: llvm/branches/wendling/eh/lib/CodeGen/LiveIntervalAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/LiveIntervalAnalysis.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/LiveIntervalAnalysis.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/LiveIntervalAnalysis.cpp Tue Oct 26 19:48:03 2010
@@ -47,7 +47,7 @@
 using namespace llvm;
 
 // Hidden options for help debugging.
-static cl::opt<bool> DisableReMat("disable-rematerialization", 
+static cl::opt<bool> DisableReMat("disable-rematerialization",
                                   cl::init(false), cl::Hidden);
 
 STATISTIC(numIntervals , "Number of original intervals");
@@ -55,23 +55,33 @@
 STATISTIC(numSplits    , "Number of intervals split");
 
 char LiveIntervals::ID = 0;
-INITIALIZE_PASS(LiveIntervals, "liveintervals",
-                "Live Interval Analysis", false, false);
+INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
+                "Live Interval Analysis", false, false)
+INITIALIZE_PASS_DEPENDENCY(LiveVariables)
+INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
+INITIALIZE_PASS_DEPENDENCY(PHIElimination)
+INITIALIZE_PASS_DEPENDENCY(TwoAddressInstructionPass)
+INITIALIZE_PASS_DEPENDENCY(ProcessImplicitDefs)
+INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
+INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_PASS_END(LiveIntervals, "liveintervals",
+                "Live Interval Analysis", false, false)
 
 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
   AU.setPreservesCFG();
   AU.addRequired<AliasAnalysis>();
   AU.addPreserved<AliasAnalysis>();
-  AU.addPreserved<LiveVariables>();
   AU.addRequired<LiveVariables>();
-  AU.addPreservedID(MachineLoopInfoID);
+  AU.addPreserved<LiveVariables>();
+  AU.addRequired<MachineLoopInfo>();
+  AU.addPreserved<MachineLoopInfo>();
   AU.addPreservedID(MachineDominatorsID);
-  
+
   if (!StrongPHIElim) {
     AU.addPreservedID(PHIEliminationID);
     AU.addRequiredID(PHIEliminationID);
   }
-  
+
   AU.addRequiredID(TwoAddressInstructionPassID);
   AU.addPreserved<ProcessImplicitDefs>();
   AU.addRequired<ProcessImplicitDefs>();
@@ -85,7 +95,7 @@
   for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
        E = r2iMap_.end(); I != E; ++I)
     delete I->second;
-  
+
   r2iMap_.clear();
 
   // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
@@ -131,19 +141,7 @@
 
 void LiveIntervals::printInstrs(raw_ostream &OS) const {
   OS << "********** MACHINEINSTRS **********\n";
-
-  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
-       mbbi != mbbe; ++mbbi) {
-    OS << "BB#" << mbbi->getNumber()
-       << ":\t\t# derived from " << mbbi->getName() << "\n";
-    for (MachineBasicBlock::iterator mii = mbbi->begin(),
-           mie = mbbi->end(); mii != mie; ++mii) {
-      if (mii->isDebugValue())
-        OS << "    \t" << *mii;
-      else
-        OS << getInstructionIndex(mii) << '\t' << *mii;
-    }
-  }
+  mf_->print(OS, indexes_);
 }
 
 void LiveIntervals::dumpInstrs() const {
@@ -275,7 +273,7 @@
 
 /// isPartialRedef - Return true if the specified def at the specific index is
 /// partially re-defining the specified live interval. A common case of this is
-/// a definition of the sub-register. 
+/// a definition of the sub-register.
 bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
                                    LiveInterval &interval) {
   if (!MO.getSubReg() || MO.isEarlyClobber())
@@ -284,8 +282,8 @@
   SlotIndex RedefIndex = MIIdx.getDefIndex();
   const LiveRange *OldLR =
     interval.getLiveRangeContaining(RedefIndex.getUseIndex());
-  if (OldLR->valno->isDefAccurate()) {
-    MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
+  MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
+  if (DefMI != 0) {
     return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
   }
   return false;
@@ -325,8 +323,7 @@
       CopyMI = mi;
     }
 
-    VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, true,
-                                          VNInfoAllocator);
+    VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
     assert(ValNo->id == 0 && "First value in interval is not 0?");
 
     // Loop over all of the blocks that the vreg is defined in.  There are
@@ -392,8 +389,9 @@
       // Create interval with one of a NEW value number.  Note that this value
       // number isn't actually defined by an instruction, weird huh? :)
       if (PHIJoin) {
-        ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false,
-                                      VNInfoAllocator);
+        assert(getInstructionFromIndex(Start) == 0 &&
+               "PHI def index points at actual instruction.");
+        ValNo = interval.getNextValue(Start, 0, VNInfoAllocator);
         ValNo->setIsPHIDef(true);
       }
       LiveRange LR(Start, killIdx, ValNo);
@@ -415,8 +413,8 @@
     // def-and-use register operand.
 
     // It may also be partial redef like this:
-    // 80	%reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
-    // 120	%reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
+    // 80  %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
+    // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
     bool PartReDef = isPartialRedef(MIIdx, MO, interval);
     if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
       // If this is a two-address definition, then we have already processed
@@ -439,10 +437,7 @@
 
       // The new value number (#1) is defined by the instruction we claimed
       // defined value #0.
-      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
-                                            false, // update at *
-                                            VNInfoAllocator);
-      ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
+      VNInfo *ValNo = interval.createValueCopy(OldValNo, VNInfoAllocator);
 
       // Value#0 is now defined by the 2-addr instruction.
       OldValNo->def  = RedefIndex;
@@ -451,7 +446,7 @@
       // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
       if (PartReDef && mi->isCopyLike())
         OldValNo->setCopy(&*mi);
-      
+
       // Add the new live interval which replaces the range for the input copy.
       LiveRange LR(DefIndex, RedefIndex, ValNo);
       DEBUG(dbgs() << " replace range with " << LR);
@@ -480,8 +475,8 @@
       MachineInstr *CopyMI = NULL;
       if (mi->isCopyLike())
         CopyMI = mi;
-      ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
-      
+      ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
+
       SlotIndex killIndex = getMBBEndIdx(mbb);
       LiveRange LR(defIndex, killIndex, ValNo);
       interval.addRange(LR);
@@ -558,10 +553,10 @@
         goto exit;
       }
     }
-    
+
     baseIndex = baseIndex.getNextIndex();
   }
-  
+
   // The only case we should have a dead physreg here without a killing or
   // instruction where we know it's dead is if it is live-in to the function
   // and never used. Another possible case is the implicit use of the
@@ -572,11 +567,11 @@
   assert(start < end && "did not find end of interval?");
 
   // Already exists? Extend old live interval.
-  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
-  bool Extend = OldLR != interval.end();
-  VNInfo *ValNo = Extend
-    ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
-  if (MO.isEarlyClobber() && Extend)
+  VNInfo *ValNo = interval.getVNInfoAt(start);
+  bool Extend = ValNo != 0;
+  if (!Extend)
+    ValNo = interval.getNextValue(start, CopyMI, VNInfoAllocator);
+  if (Extend && MO.isEarlyClobber())
     ValNo->setHasRedefByEC(true);
   LiveRange LR(start, end, ValNo);
   interval.addRange(LR);
@@ -671,9 +666,11 @@
     }
   }
 
+  SlotIndex defIdx = getMBBStartIdx(MBB);
+  assert(getInstructionFromIndex(defIdx) == 0 &&
+         "PHI def index points at actual instruction.");
   VNInfo *vni =
-    interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
-                          0, false, VNInfoAllocator);
+    interval.getNextValue(defIdx, 0, VNInfoAllocator);
   vni->setIsPHIDef(true);
   LiveRange LR(start, end, vni);
 
@@ -685,7 +682,7 @@
 /// registers. for some ordering of the machine instructions [1,N] a
 /// live interval is an interval [i, j) where 1 <= i <= j < N for
 /// which a variable is live
-void LiveIntervals::computeIntervals() { 
+void LiveIntervals::computeIntervals() {
   DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
                << "********** Function: "
                << ((Value*)mf_->getFunction())->getName() << '\n');
@@ -712,11 +709,11 @@
           handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
                                true);
     }
-    
+
     // Skip over empty initial indices.
     if (getInstructionFromIndex(MIIndex) == 0)
       MIIndex = indexes_->getNextNonNullIndex(MIIndex);
-    
+
     for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
          MI != miEnd; ++MI) {
       DEBUG(dbgs() << MIIndex << "\t" << *MI);
@@ -735,7 +732,7 @@
         else if (MO.isUndef())
           UndefUses.push_back(MO.getReg());
       }
-      
+
       // Move to the next instr slot.
       MIIndex = indexes_->getNextNonNullIndex(MIIndex);
     }
@@ -780,7 +777,7 @@
     unsigned Reg = MO.getReg();
     if (Reg == 0 || Reg == li.reg)
       continue;
-    
+
     if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
         !allocatableRegs_[Reg])
       continue;
@@ -799,10 +796,8 @@
 /// which reaches the given instruction also reaches the specified use index.
 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
                                        SlotIndex UseIdx) const {
-  SlotIndex Index = getInstructionIndex(MI);  
-  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
-  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
-  return UI != li.end() && UI->valno == ValNo;
+  VNInfo *UValNo = li.getVNInfoAt(UseIdx);
+  return UValNo && UValNo == li.getVNInfoAt(getInstructionIndex(MI));
 }
 
 /// isReMaterializable - Returns true if the definition MI of the specified
@@ -828,7 +823,7 @@
          ri != re; ++ri) {
       MachineInstr *UseMI = &*ri;
       SlotIndex UseIdx = getInstructionIndex(UseMI);
-      if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
+      if (li.getVNInfoAt(UseIdx) != ValNo)
         continue;
       if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
         return false;
@@ -864,9 +859,9 @@
     if (VNI->isUnused())
       continue; // Dead val#.
     // Is the def for the val# rematerializable?
-    if (!VNI->isDefAccurate())
-      return false;
     MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
+    if (!ReMatDefMI)
+      return false;
     bool DefIsLoad = false;
     if (!ReMatDefMI ||
         !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
@@ -904,7 +899,7 @@
   }
   return false;
 }
-                           
+
 
 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
 /// slot / to reg or any rematerialized load into ith operand of specified
@@ -1024,7 +1019,7 @@
 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
 bool LiveIntervals::
 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
-                 bool TrySplit, SlotIndex index, SlotIndex end, 
+                 bool TrySplit, SlotIndex index, SlotIndex end,
                  MachineInstr *MI,
                  MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
                  unsigned Slot, int LdSlot,
@@ -1083,7 +1078,7 @@
     //      keep the src/dst regs pinned.
     //
     // Keep track of whether we replace a use and/or def so that we can
-    // create the spill interval with the appropriate range. 
+    // create the spill interval with the appropriate range.
     SmallVector<unsigned, 2> Ops;
     tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
 
@@ -1145,7 +1140,7 @@
       if (mopj.isImplicit())
         rewriteImplicitOps(li, MI, NewVReg, vrm);
     }
-            
+
     if (CreatedNewVReg) {
       if (DefIsReMat) {
         vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
@@ -1189,7 +1184,7 @@
     if (HasUse) {
       if (CreatedNewVReg) {
         LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
-                     nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
+                     nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
         DEBUG(dbgs() << " +" << LR);
         nI.addRange(LR);
       } else {
@@ -1203,7 +1198,7 @@
     }
     if (HasDef) {
       LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
-                   nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
+                   nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
       DEBUG(dbgs() << " +" << LR);
       nI.addRange(LR);
     }
@@ -1652,8 +1647,7 @@
     if (VNI->isUnused())
       continue; // Dead val#.
     // Is the def for the val# rematerializable?
-    MachineInstr *ReMatDefMI = VNI->isDefAccurate()
-      ? getInstructionFromIndex(VNI->def) : 0;
+    MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
     bool dummy;
     if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
       // Remember how to remat the def of this val#.
@@ -1685,7 +1679,7 @@
   if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
     if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
       Slot = vrm.assignVirt2StackSlot(li.reg);
-    
+
     // This case only occurs when the prealloc splitter has already assigned
     // a stack slot to this vreg.
     else
@@ -1742,7 +1736,7 @@
             Ops.push_back(j);
             if (MO.isDef())
               continue;
-            if (isReMat || 
+            if (isReMat ||
                 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
                                                 RestoreMBBs, RestoreIdxes))) {
               // MI has two-address uses of the same register. If the use
@@ -1855,7 +1849,6 @@
   for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
     LiveInterval *LI = NewLIs[i];
     if (!LI->empty()) {
-      LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
       if (!AddedKill.count(LI)) {
         LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
         SlotIndex LastUseIdx = LR->end.getBaseIndex();
@@ -1888,7 +1881,7 @@
 /// getRepresentativeReg - Find the largest super register of the specified
 /// physical register.
 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
-  // Find the largest super-register that is allocatable. 
+  // Find the largest super-register that is allocatable.
   unsigned BestReg = Reg;
   for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
     unsigned SuperReg = *AS;
@@ -1996,13 +1989,13 @@
   LiveInterval& Interval = getOrCreateInterval(reg);
   VNInfo* VN = Interval.getNextValue(
     SlotIndex(getInstructionIndex(startInst).getDefIndex()),
-    startInst, true, getVNInfoAllocator());
+    startInst, getVNInfoAllocator());
   VN->setHasPHIKill(true);
   LiveRange LR(
      SlotIndex(getInstructionIndex(startInst).getDefIndex()),
      getMBBEndIdx(startInst->getParent()), VN);
   Interval.addRange(LR);
-  
+
   return LR;
 }
 

Modified: llvm/branches/wendling/eh/lib/CodeGen/LiveStackAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/LiveStackAnalysis.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/LiveStackAnalysis.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/LiveStackAnalysis.cpp Tue Oct 26 19:48:03 2010
@@ -26,7 +26,9 @@
 
 char LiveStacks::ID = 0;
 INITIALIZE_PASS(LiveStacks, "livestacks",
-                "Live Stack Slot Analysis", false, false);
+                "Live Stack Slot Analysis", false, false)
+
+char &llvm::LiveStacksID = LiveStacks::ID;
 
 void LiveStacks::getAnalysisUsage(AnalysisUsage &AU) const {
   AU.setPreservesAll();

Modified: llvm/branches/wendling/eh/lib/CodeGen/LiveVariables.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/LiveVariables.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/LiveVariables.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/LiveVariables.cpp Tue Oct 26 19:48:03 2010
@@ -42,8 +42,11 @@
 using namespace llvm;
 
 char LiveVariables::ID = 0;
-INITIALIZE_PASS(LiveVariables, "livevars",
-                "Live Variable Analysis", false, false);
+INITIALIZE_PASS_BEGIN(LiveVariables, "livevars",
+                "Live Variable Analysis", false, false)
+INITIALIZE_PASS_DEPENDENCY(UnreachableMachineBlockElim)
+INITIALIZE_PASS_END(LiveVariables, "livevars",
+                "Live Variable Analysis", false, false)
 
 
 void LiveVariables::getAnalysisUsage(AnalysisUsage &AU) const {
@@ -483,21 +486,6 @@
   }
 }
 
-namespace {
-  struct RegSorter {
-    const TargetRegisterInfo *TRI;
-
-    RegSorter(const TargetRegisterInfo *tri) : TRI(tri) { }
-    bool operator()(unsigned A, unsigned B) {
-      if (TRI->isSubRegister(A, B))
-        return true;
-      else if (TRI->isSubRegister(B, A))
-        return false;
-      return A < B;
-    }
-  };
-}
-
 bool LiveVariables::runOnMachineFunction(MachineFunction &mf) {
   MF = &mf;
   MRI = &mf.getRegInfo();

Modified: llvm/branches/wendling/eh/lib/CodeGen/LowerSubregs.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/LowerSubregs.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/LowerSubregs.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/LowerSubregs.cpp Tue Oct 26 19:48:03 2010
@@ -36,7 +36,7 @@
 
   public:
     static char ID; // Pass identification, replacement for typeid
-    LowerSubregsInstructionPass() : MachineFunctionPass(&ID) {}
+    LowerSubregsInstructionPass() : MachineFunctionPass(ID) {}
     
     const char *getPassName() const {
       return "Subregister lowering instruction pass";
@@ -58,9 +58,6 @@
 
     void TransferDeadFlag(MachineInstr *MI, unsigned DstReg,
                           const TargetRegisterInfo *TRI);
-    void TransferKillFlag(MachineInstr *MI, unsigned SrcReg,
-                          const TargetRegisterInfo *TRI,
-                          bool AddIfNotFound = false);
     void TransferImplicitDefs(MachineInstr *MI);
   };
 
@@ -87,23 +84,6 @@
   }
 }
 
-/// TransferKillFlag - MI is a pseudo-instruction with SrcReg killed,
-/// and the lowered replacement instructions immediately precede it.
-/// Mark the replacement instructions with the kill flag.
-void
-LowerSubregsInstructionPass::TransferKillFlag(MachineInstr *MI,
-                                              unsigned SrcReg,
-                                              const TargetRegisterInfo *TRI,
-                                              bool AddIfNotFound) {
-  for (MachineBasicBlock::iterator MII =
-        prior(MachineBasicBlock::iterator(MI)); ; --MII) {
-    if (MII->addRegisterKilled(SrcReg, TRI, AddIfNotFound))
-      break;
-    assert(MII != MI->getParent()->begin() &&
-           "copyPhysReg output doesn't reference source register!");
-  }
-}
-
 /// TransferImplicitDefs - MI is a pseudo-instruction, and the lowered
 /// replacement instructions immediately precede it.  Copy any implicit-def
 /// operands from MI to the replacement instruction.

Modified: llvm/branches/wendling/eh/lib/CodeGen/MachineBasicBlock.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/MachineBasicBlock.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/MachineBasicBlock.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/MachineBasicBlock.cpp Tue Oct 26 19:48:03 2010
@@ -17,6 +17,7 @@
 #include "llvm/CodeGen/MachineDominators.h"
 #include "llvm/CodeGen/MachineFunction.h"
 #include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/SlotIndexes.h"
 #include "llvm/MC/MCAsmInfo.h"
 #include "llvm/MC/MCContext.h"
 #include "llvm/Target/TargetRegisterInfo.h"
@@ -176,7 +177,7 @@
     return "(null)";
 }
 
-void MachineBasicBlock::print(raw_ostream &OS) const {
+void MachineBasicBlock::print(raw_ostream &OS, SlotIndexes *Indexes) const {
   const MachineFunction *MF = getParent();
   if (!MF) {
     OS << "Can't print out MachineBasicBlock because parent MachineFunction"
@@ -186,6 +187,9 @@
 
   if (Alignment) { OS << "Alignment " << Alignment << "\n"; }
 
+  if (Indexes)
+    OS << Indexes->getMBBStartIdx(this) << '\t';
+
   OS << "BB#" << getNumber() << ": ";
 
   const char *Comma = "";
@@ -198,8 +202,9 @@
   if (hasAddressTaken()) { OS << Comma << "ADDRESS TAKEN"; Comma = ", "; }
   OS << '\n';
 
-  const TargetRegisterInfo *TRI = MF->getTarget().getRegisterInfo();  
+  const TargetRegisterInfo *TRI = MF->getTarget().getRegisterInfo();
   if (!livein_empty()) {
+    if (Indexes) OS << '\t';
     OS << "    Live Ins:";
     for (livein_iterator I = livein_begin(),E = livein_end(); I != E; ++I)
       OutputReg(OS, *I, TRI);
@@ -207,19 +212,26 @@
   }
   // Print the preds of this block according to the CFG.
   if (!pred_empty()) {
+    if (Indexes) OS << '\t';
     OS << "    Predecessors according to CFG:";
     for (const_pred_iterator PI = pred_begin(), E = pred_end(); PI != E; ++PI)
       OS << " BB#" << (*PI)->getNumber();
     OS << '\n';
   }
-  
+
   for (const_iterator I = begin(); I != end(); ++I) {
+    if (Indexes) {
+      if (Indexes->hasIndex(I))
+        OS << Indexes->getInstructionIndex(I);
+      OS << '\t';
+    }
     OS << '\t';
     I->print(OS, &getParent()->getTarget());
   }
 
   // Print the successors of this block according to the CFG.
   if (!succ_empty()) {
+    if (Indexes) OS << '\t';
     OS << "    Successors according to CFG:";
     for (const_succ_iterator SI = succ_begin(), E = succ_end(); SI != E; ++SI)
       OS << " BB#" << (*SI)->getNumber();
@@ -441,7 +453,7 @@
 
   MachineBasicBlock *NMBB = MF->CreateMachineBasicBlock();
   MF->insert(llvm::next(MachineFunction::iterator(this)), NMBB);
-  DEBUG(dbgs() << "PHIElimination splitting critical edge:"
+  DEBUG(dbgs() << "Splitting critical edge:"
         " BB#" << getNumber()
         << " -- BB#" << NMBB->getNumber()
         << " -- BB#" << Succ->getNumber() << '\n');
@@ -468,11 +480,33 @@
     LV->addNewBlock(NMBB, this, Succ);
 
   if (MachineDominatorTree *MDT =
-        P->getAnalysisIfAvailable<MachineDominatorTree>())
-    MDT->addNewBlock(NMBB, this);
+      P->getAnalysisIfAvailable<MachineDominatorTree>()) {
+    // Update dominator information.
+    MachineDomTreeNode *SucccDTNode = MDT->getNode(Succ);
+
+    bool IsNewIDom = true;
+    for (const_pred_iterator PI = Succ->pred_begin(), E = Succ->pred_end();
+         PI != E; ++PI) {
+      MachineBasicBlock *PredBB = *PI;
+      if (PredBB == NMBB)
+        continue;
+      if (!MDT->dominates(SucccDTNode, MDT->getNode(PredBB))) {
+        IsNewIDom = false;
+        break;
+      }
+    }
+
+    // We know "this" dominates the newly created basic block.
+    MachineDomTreeNode *NewDTNode = MDT->addNewBlock(NMBB, this);
+
+    // If all the other predecessors of "Succ" are dominated by "Succ" itself
+    // then the new block is the new immediate dominator of "Succ". Otherwise,
+    // the new block doesn't dominate anything.
+    if (IsNewIDom)
+      MDT->changeImmediateDominator(SucccDTNode, NewDTNode);
+  }
 
-  if (MachineLoopInfo *MLI =
-        P->getAnalysisIfAvailable<MachineLoopInfo>())
+  if (MachineLoopInfo *MLI = P->getAnalysisIfAvailable<MachineLoopInfo>())
     if (MachineLoop *TIL = MLI->getLoopFor(this)) {
       // If one or the other blocks were not in a loop, the new block is not
       // either, and thus LI doesn't need to be updated.

Modified: llvm/branches/wendling/eh/lib/CodeGen/MachineCSE.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/MachineCSE.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/MachineCSE.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/MachineCSE.cpp Tue Oct 26 19:48:03 2010
@@ -41,7 +41,9 @@
     MachineRegisterInfo *MRI;
   public:
     static char ID; // Pass identification
-    MachineCSE() : MachineFunctionPass(&ID), LookAheadLimit(5), CurrVN(0) {}
+    MachineCSE() : MachineFunctionPass(ID), LookAheadLimit(5), CurrVN(0) {
+      initializeMachineCSEPass(*PassRegistry::getPassRegistry());
+    }
 
     virtual bool runOnMachineFunction(MachineFunction &MF);
     
@@ -49,10 +51,16 @@
       AU.setPreservesCFG();
       MachineFunctionPass::getAnalysisUsage(AU);
       AU.addRequired<AliasAnalysis>();
+      AU.addPreservedID(MachineLoopInfoID);
       AU.addRequired<MachineDominatorTree>();
       AU.addPreserved<MachineDominatorTree>();
     }
 
+    virtual void releaseMemory() {
+      ScopeMap.clear();
+      Exps.clear();
+    }
+
   private:
     const unsigned LookAheadLimit;
     typedef ScopedHashTableScope<MachineInstr*, unsigned,
@@ -85,8 +93,12 @@
 } // end anonymous namespace
 
 char MachineCSE::ID = 0;
-INITIALIZE_PASS(MachineCSE, "machine-cse",
-                "Machine Common Subexpression Elimination", false, false);
+INITIALIZE_PASS_BEGIN(MachineCSE, "machine-cse",
+                "Machine Common Subexpression Elimination", false, false)
+INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
+INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_PASS_END(MachineCSE, "machine-cse",
+                "Machine Common Subexpression Elimination", false, false)
 
 FunctionPass *llvm::createMachineCSEPass() { return new MachineCSE(); }
 
@@ -100,7 +112,7 @@
     unsigned Reg = MO.getReg();
     if (!Reg || TargetRegisterInfo::isPhysicalRegister(Reg))
       continue;
-    if (!MRI->hasOneUse(Reg))
+    if (!MRI->hasOneNonDBGUse(Reg))
       // Only coalesce single use copies. This ensure the copy will be
       // deleted.
       continue;
@@ -114,17 +126,12 @@
       continue;
     if (DefMI->getOperand(0).getSubReg() || DefMI->getOperand(1).getSubReg())
       continue;
-    const TargetRegisterClass *SRC   = MRI->getRegClass(SrcReg);
-    const TargetRegisterClass *RC    = MRI->getRegClass(Reg);
-    const TargetRegisterClass *NewRC = getCommonSubClass(RC, SRC);
-    if (!NewRC)
+    if (!MRI->constrainRegClass(SrcReg, MRI->getRegClass(Reg)))
       continue;
     DEBUG(dbgs() << "Coalescing: " << *DefMI);
-    DEBUG(dbgs() << "*** to: " << *MI);
+    DEBUG(dbgs() << "***     to: " << *MI);
     MO.setReg(SrcReg);
     MRI->clearKillFlags(SrcReg);
-    if (NewRC != SRC)
-      MRI->setRegClass(SrcReg, NewRC);
     DefMI->eraseFromParent();
     ++NumCoalesces;
     Changed = true;
@@ -468,6 +475,8 @@
   DenseMap<MachineDomTreeNode*, MachineDomTreeNode*> ParentMap;
   DenseMap<MachineDomTreeNode*, unsigned> OpenChildren;
 
+  CurrVN = 0;
+
   // Perform a DFS walk to determine the order of visit.
   WorkList.push_back(Node);
   do {

Modified: llvm/branches/wendling/eh/lib/CodeGen/MachineDominators.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/MachineDominators.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/MachineDominators.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/MachineDominators.cpp Tue Oct 26 19:48:03 2010
@@ -24,10 +24,10 @@
 
 char MachineDominatorTree::ID = 0;
 
-static RegisterPass<MachineDominatorTree>
-E("machinedomtree", "MachineDominator Tree Construction", true);
+INITIALIZE_PASS(MachineDominatorTree, "machinedomtree",
+                "MachineDominator Tree Construction", true, true)
 
-const PassInfo *const llvm::MachineDominatorsID = &E;
+char &llvm::MachineDominatorsID = MachineDominatorTree::ID;
 
 void MachineDominatorTree::getAnalysisUsage(AnalysisUsage &AU) const {
   AU.setPreservesAll();
@@ -41,7 +41,8 @@
 }
 
 MachineDominatorTree::MachineDominatorTree()
-    : MachineFunctionPass(&ID) {
+    : MachineFunctionPass(ID) {
+  initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
   DT = new DominatorTreeBase<MachineBasicBlock>(false);
 }
 

Modified: llvm/branches/wendling/eh/lib/CodeGen/MachineFunction.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/MachineFunction.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/MachineFunction.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/MachineFunction.cpp Tue Oct 26 19:48:03 2010
@@ -190,20 +190,21 @@
 }
 
 MachineMemOperand *
-MachineFunction::getMachineMemOperand(const Value *v, unsigned f,
-                                      int64_t o, uint64_t s,
-                                      unsigned base_alignment) {
-  return new (Allocator) MachineMemOperand(v, f, o, s, base_alignment);
+MachineFunction::getMachineMemOperand(MachinePointerInfo PtrInfo, unsigned f,
+                                      uint64_t s, unsigned base_alignment,
+                                      const MDNode *TBAAInfo) {
+  return new (Allocator) MachineMemOperand(PtrInfo, f, s, base_alignment,
+                                           TBAAInfo);
 }
 
 MachineMemOperand *
 MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
                                       int64_t Offset, uint64_t Size) {
   return new (Allocator)
-             MachineMemOperand(MMO->getValue(), MMO->getFlags(),
-                               int64_t(uint64_t(MMO->getOffset()) +
-                                       uint64_t(Offset)),
-                               Size, MMO->getBaseAlignment());
+             MachineMemOperand(MachinePointerInfo(MMO->getValue(),
+                                                  MMO->getOffset()+Offset),
+                               MMO->getFlags(), Size,
+                               MMO->getBaseAlignment(), 0);
 }
 
 MachineInstr::mmo_iterator
@@ -231,10 +232,10 @@
       else {
         // Clone the MMO and unset the store flag.
         MachineMemOperand *JustLoad =
-          getMachineMemOperand((*I)->getValue(),
+          getMachineMemOperand((*I)->getPointerInfo(),
                                (*I)->getFlags() & ~MachineMemOperand::MOStore,
-                               (*I)->getOffset(), (*I)->getSize(),
-                               (*I)->getBaseAlignment());
+                               (*I)->getSize(), (*I)->getBaseAlignment(),
+                               (*I)->getTBAAInfo());
         Result[Index] = JustLoad;
       }
       ++Index;
@@ -263,10 +264,10 @@
       else {
         // Clone the MMO and unset the load flag.
         MachineMemOperand *JustStore =
-          getMachineMemOperand((*I)->getValue(),
+          getMachineMemOperand((*I)->getPointerInfo(),
                                (*I)->getFlags() & ~MachineMemOperand::MOLoad,
-                               (*I)->getOffset(), (*I)->getSize(),
-                               (*I)->getBaseAlignment());
+                               (*I)->getSize(), (*I)->getBaseAlignment(),
+                               (*I)->getTBAAInfo());
         Result[Index] = JustStore;
       }
       ++Index;
@@ -279,7 +280,7 @@
   print(dbgs());
 }
 
-void MachineFunction::print(raw_ostream &OS) const {
+void MachineFunction::print(raw_ostream &OS, SlotIndexes *Indexes) const {
   OS << "# Machine code for function " << Fn->getName() << ":\n";
 
   // Print Frame Information
@@ -334,7 +335,7 @@
   
   for (const_iterator BB = begin(), E = end(); BB != E; ++BB) {
     OS << '\n';
-    BB->print(OS);
+    BB->print(OS, Indexes);
   }
 
   OS << "\n# End machine code for function " << Fn->getName() << ".\n\n";

Modified: llvm/branches/wendling/eh/lib/CodeGen/MachineFunctionAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/MachineFunctionAnalysis.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/MachineFunctionAnalysis.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/MachineFunctionAnalysis.cpp Tue Oct 26 19:48:03 2010
@@ -20,14 +20,14 @@
 // a default constructor.
 static PassInfo
 X("Machine Function Analysis", "machine-function-analysis",
-  intptr_t(&MachineFunctionAnalysis::ID), 0,
+   &MachineFunctionAnalysis::ID, 0,
   /*CFGOnly=*/false, /*is_analysis=*/true);
 
 char MachineFunctionAnalysis::ID = 0;
 
 MachineFunctionAnalysis::MachineFunctionAnalysis(const TargetMachine &tm,
                                                  CodeGenOpt::Level OL) :
-  FunctionPass(&ID), TM(tm), OptLevel(OL), MF(0) {
+  FunctionPass(ID), TM(tm), OptLevel(OL), MF(0) {
 }
 
 MachineFunctionAnalysis::~MachineFunctionAnalysis() {

Modified: llvm/branches/wendling/eh/lib/CodeGen/MachineFunctionPrinterPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/MachineFunctionPrinterPass.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/MachineFunctionPrinterPass.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/MachineFunctionPrinterPass.cpp Tue Oct 26 19:48:03 2010
@@ -29,7 +29,7 @@
   const std::string Banner;
 
   MachineFunctionPrinterPass(raw_ostream &os, const std::string &banner) 
-      : MachineFunctionPass(&ID), OS(os), Banner(banner) {}
+      : MachineFunctionPass(ID), OS(os), Banner(banner) {}
 
   const char *getPassName() const { return "MachineFunction Printer"; }
 

Modified: llvm/branches/wendling/eh/lib/CodeGen/MachineInstr.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/MachineInstr.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/MachineInstr.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/MachineInstr.cpp Tue Oct 26 19:48:03 2010
@@ -102,13 +102,13 @@
     if (MachineBasicBlock *MBB = MI->getParent())
       if (MachineFunction *MF = MBB->getParent()) {
         RemoveRegOperandFromRegInfo();
-        Contents.Reg.RegNo = Reg;
+        SmallContents.RegNo = Reg;
         AddRegOperandToRegInfo(&MF->getRegInfo());
         return;
       }
         
   // Otherwise, just change the register, no problem.  :)
-  Contents.Reg.RegNo = Reg;
+  SmallContents.RegNo = Reg;
 }
 
 void MachineOperand::substVirtReg(unsigned Reg, unsigned SubIdx,
@@ -159,7 +159,7 @@
   } else {
     // Otherwise, change this to a register and set the reg#.
     OpKind = MO_Register;
-    Contents.Reg.RegNo = Reg;
+    SmallContents.RegNo = Reg;
 
     // If this operand is embedded in a function, add the operand to the
     // register's use/def list.
@@ -335,10 +335,45 @@
 // MachineMemOperand Implementation
 //===----------------------------------------------------------------------===//
 
-MachineMemOperand::MachineMemOperand(const Value *v, unsigned int f,
-                                     int64_t o, uint64_t s, unsigned int a)
-  : Offset(o), Size(s), V(v),
-    Flags((f & ((1 << MOMaxBits) - 1)) | ((Log2_32(a) + 1) << MOMaxBits)) {
+/// getAddrSpace - Return the LLVM IR address space number that this pointer
+/// points into.
+unsigned MachinePointerInfo::getAddrSpace() const {
+  if (V == 0) return 0;
+  return cast<PointerType>(V->getType())->getAddressSpace();
+}
+
+/// getConstantPool - Return a MachinePointerInfo record that refers to the
+/// constant pool.
+MachinePointerInfo MachinePointerInfo::getConstantPool() {
+  return MachinePointerInfo(PseudoSourceValue::getConstantPool());
+}
+
+/// getFixedStack - Return a MachinePointerInfo record that refers to the
+/// the specified FrameIndex.
+MachinePointerInfo MachinePointerInfo::getFixedStack(int FI, int64_t offset) {
+  return MachinePointerInfo(PseudoSourceValue::getFixedStack(FI), offset);
+}
+
+MachinePointerInfo MachinePointerInfo::getJumpTable() {
+  return MachinePointerInfo(PseudoSourceValue::getJumpTable());
+}
+
+MachinePointerInfo MachinePointerInfo::getGOT() {
+  return MachinePointerInfo(PseudoSourceValue::getGOT());
+}
+
+MachinePointerInfo MachinePointerInfo::getStack(int64_t Offset) {
+  return MachinePointerInfo(PseudoSourceValue::getStack(), Offset);
+}
+
+MachineMemOperand::MachineMemOperand(MachinePointerInfo ptrinfo, unsigned f,
+                                     uint64_t s, unsigned int a,
+                                     const MDNode *TBAAInfo)
+  : PtrInfo(ptrinfo), Size(s),
+    Flags((f & ((1 << MOMaxBits) - 1)) | ((Log2_32(a) + 1) << MOMaxBits)),
+    TBAAInfo(TBAAInfo) {
+  assert((PtrInfo.V == 0 || isa<PointerType>(PtrInfo.V->getType())) &&
+         "invalid pointer value");
   assert(getBaseAlignment() == a && "Alignment is not a power of 2!");
   assert((isLoad() || isStore()) && "Not a load/store!");
 }
@@ -346,9 +381,9 @@
 /// Profile - Gather unique data for the object.
 ///
 void MachineMemOperand::Profile(FoldingSetNodeID &ID) const {
-  ID.AddInteger(Offset);
+  ID.AddInteger(getOffset());
   ID.AddInteger(Size);
-  ID.AddPointer(V);
+  ID.AddPointer(getValue());
   ID.AddInteger(Flags);
 }
 
@@ -364,8 +399,7 @@
       ((Log2_32(MMO->getBaseAlignment()) + 1) << MOMaxBits);
     // Also update the base and offset, because the new alignment may
     // not be applicable with the old ones.
-    V = MMO->getValue();
-    Offset = MMO->getOffset();
+    PtrInfo = MMO->PtrInfo;
   }
 }
 
@@ -410,6 +444,16 @@
       MMO.getBaseAlignment() != MMO.getSize())
     OS << "(align=" << MMO.getAlignment() << ")";
 
+  // Print TBAA info.
+  if (const MDNode *TBAAInfo = MMO.getTBAAInfo()) {
+    OS << "(tbaa=";
+    if (TBAAInfo->getNumOperands() > 0)
+      WriteAsOperand(OS, TBAAInfo->getOperand(0), /*PrintType=*/false);
+    else
+      OS << "<unknown>";
+    OS << ")";
+  }
+
   return OS;
 }
 
@@ -1166,7 +1210,9 @@
         if (PSV->isConstant(MFI))
           continue;
       // If we have an AliasAnalysis, ask it whether the memory is constant.
-      if (AA && AA->pointsToConstantMemory(V))
+      if (AA && AA->pointsToConstantMemory(
+                      AliasAnalysis::Location(V, (*I)->getSize(),
+                                              (*I)->getTBAAInfo())))
         continue;
     }
 
@@ -1207,6 +1253,17 @@
   return true;
 }
 
+/// copyImplicitOps - Copy implicit register operands from specified
+/// instruction to this instruction.
+void MachineInstr::copyImplicitOps(const MachineInstr *MI) {
+  for (unsigned i = MI->getDesc().getNumOperands(), e = MI->getNumOperands();
+       i != e; ++i) {
+    const MachineOperand &MO = MI->getOperand(i);
+    if (MO.isReg() && MO.isImplicit())
+      addOperand(MO);
+  }
+}
+
 void MachineInstr::dump() const {
   dbgs() << "  " << *this;
 }
@@ -1338,7 +1395,7 @@
     for (mmo_iterator i = memoperands_begin(), e = memoperands_end();
          i != e; ++i) {
       OS << **i;
-      if (next(i) != e)
+      if (llvm::next(i) != e)
         OS << " ";
     }
   }

Modified: llvm/branches/wendling/eh/lib/CodeGen/MachineLICM.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/MachineLICM.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/MachineLICM.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/MachineLICM.cpp Tue Oct 26 19:48:03 2010
@@ -28,8 +28,10 @@
 #include "llvm/CodeGen/MachineMemOperand.h"
 #include "llvm/CodeGen/MachineRegisterInfo.h"
 #include "llvm/CodeGen/PseudoSourceValue.h"
+#include "llvm/Target/TargetLowering.h"
 #include "llvm/Target/TargetRegisterInfo.h"
 #include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetInstrItineraries.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Analysis/AliasAnalysis.h"
 #include "llvm/ADT/DenseMap.h"
@@ -40,8 +42,14 @@
 
 using namespace llvm;
 
-STATISTIC(NumHoisted, "Number of machine instructions hoisted out of loops");
-STATISTIC(NumCSEed,   "Number of hoisted machine instructions CSEed");
+STATISTIC(NumHoisted,
+          "Number of machine instructions hoisted out of loops");
+STATISTIC(NumLowRP,
+          "Number of instructions hoisted in low reg pressure situation");
+STATISTIC(NumHighLatency,
+          "Number of high latency instructions hoisted");
+STATISTIC(NumCSEed,
+          "Number of hoisted machine instructions CSEed");
 STATISTIC(NumPostRAHoisted,
           "Number of machine instructions hoisted out of loops post regalloc");
 
@@ -51,9 +59,11 @@
 
     const TargetMachine   *TM;
     const TargetInstrInfo *TII;
+    const TargetLowering *TLI;
     const TargetRegisterInfo *TRI;
     const MachineFrameInfo *MFI;
-    MachineRegisterInfo *RegInfo;
+    MachineRegisterInfo *MRI;
+    const InstrItineraryData *InstrItins;
 
     // Various analyses that we use...
     AliasAnalysis        *AA;      // Alias analysis info.
@@ -68,16 +78,31 @@
 
     BitVector AllocatableSet;
 
+    // Track 'estimated' register pressure.
+    SmallSet<unsigned, 32> RegSeen;
+    SmallVector<unsigned, 8> RegPressure;
+
+    // Register pressure "limit" per register class. If the pressure
+    // is higher than the limit, then it's considered high.
+    SmallVector<unsigned, 8> RegLimit;
+
+    // Register pressure on path leading from loop preheader to current BB.
+    SmallVector<SmallVector<unsigned, 8>, 16> BackTrace;
+
     // For each opcode, keep a list of potential CSE instructions.
     DenseMap<unsigned, std::vector<const MachineInstr*> > CSEMap;
 
   public:
     static char ID; // Pass identification, replacement for typeid
     MachineLICM() :
-      MachineFunctionPass(&ID), PreRegAlloc(true) {}
+      MachineFunctionPass(ID), PreRegAlloc(true) {
+        initializeMachineLICMPass(*PassRegistry::getPassRegistry());
+      }
 
     explicit MachineLICM(bool PreRA) :
-      MachineFunctionPass(&ID), PreRegAlloc(PreRA) {}
+      MachineFunctionPass(ID), PreRegAlloc(PreRA) {
+        initializeMachineLICMPass(*PassRegistry::getPassRegistry());
+      }
 
     virtual bool runOnMachineFunction(MachineFunction &MF);
 
@@ -94,6 +119,13 @@
     }
 
     virtual void releaseMemory() {
+      RegSeen.clear();
+      RegPressure.clear();
+      RegLimit.clear();
+      BackTrace.clear();
+      for (DenseMap<unsigned,std::vector<const MachineInstr*> >::iterator
+             CI = CSEMap.begin(), CE = CSEMap.end(); CI != CE; ++CI)
+        CI->second.clear();
       CSEMap.clear();
     }
 
@@ -138,6 +170,24 @@
     /// 
     bool IsLoopInvariantInst(MachineInstr &I);
 
+    /// HasHighOperandLatency - Compute operand latency between a def of 'Reg'
+    /// and an use in the current loop, return true if the target considered
+    /// it 'high'.
+    bool HasHighOperandLatency(MachineInstr &MI, unsigned DefIdx,
+                               unsigned Reg) const;
+
+    bool IsCheapInstruction(MachineInstr &MI) const;
+
+    /// CanCauseHighRegPressure - Visit BBs from header to current BB,
+    /// check if hoisting an instruction of the given cost matrix can cause high
+    /// register pressure.
+    bool CanCauseHighRegPressure(DenseMap<unsigned, int> &Cost);
+
+    /// UpdateBackTraceRegPressure - Traverse the back trace from header to
+    /// the current block and update their register pressures to reflect the
+    /// effect of hoisting MI from the current block to the preheader.
+    void UpdateBackTraceRegPressure(const MachineInstr *MI);
+
     /// IsProfitableToHoist - Return true if it is potentially profitable to
     /// hoist the given loop invariant.
     bool IsProfitableToHoist(MachineInstr &MI);
@@ -148,7 +198,16 @@
     /// visit definitions before uses, allowing us to hoist a loop body in one
     /// pass without iteration.
     ///
-    void HoistRegion(MachineDomTreeNode *N);
+    void HoistRegion(MachineDomTreeNode *N, bool IsHeader = false);
+
+    /// InitRegPressure - Find all virtual register references that are liveout
+    /// of the preheader to initialize the starting "register pressure". Note
+    /// this does not count live through (livein but not used) registers.
+    void InitRegPressure(MachineBasicBlock *BB);
+
+    /// UpdateRegPressure - Update estimate of register pressure after the
+    /// specified instruction.
+    void UpdateRegPressure(const MachineInstr *MI);
 
     /// isLoadFromConstantMemory - Return true if the given instruction is a
     /// load from constant memory.
@@ -174,8 +233,8 @@
 
     /// Hoist - When an instruction is found to only use loop invariant operands
     /// that is safe to hoist, this instruction is called to do the dirty work.
-    ///
-    void Hoist(MachineInstr *MI);
+    /// It returns true if the instruction is hoisted.
+    bool Hoist(MachineInstr *MI, MachineBasicBlock *Preheader);
 
     /// InitCSEMap - Initialize the CSE map with instructions that are in the
     /// current loop preheader that may become duplicates of instructions that
@@ -189,8 +248,13 @@
 } // end anonymous namespace
 
 char MachineLICM::ID = 0;
-INITIALIZE_PASS(MachineLICM, "machinelicm",
-                "Machine Loop Invariant Code Motion", false, false);
+INITIALIZE_PASS_BEGIN(MachineLICM, "machinelicm",
+                "Machine Loop Invariant Code Motion", false, false)
+INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
+INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
+INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_PASS_END(MachineLICM, "machinelicm",
+                "Machine Loop Invariant Code Motion", false, false)
 
 FunctionPass *llvm::createMachineLICMPass(bool PreRegAlloc) {
   return new MachineLICM(PreRegAlloc);
@@ -212,18 +276,32 @@
 
 bool MachineLICM::runOnMachineFunction(MachineFunction &MF) {
   if (PreRegAlloc)
-    DEBUG(dbgs() << "******** Pre-regalloc Machine LICM ********\n");
+    DEBUG(dbgs() << "******** Pre-regalloc Machine LICM: ");
   else
-    DEBUG(dbgs() << "******** Post-regalloc Machine LICM ********\n");
+    DEBUG(dbgs() << "******** Post-regalloc Machine LICM: ");
+  DEBUG(dbgs() << MF.getFunction()->getName() << " ********\n");
 
   Changed = FirstInLoop = false;
   TM = &MF.getTarget();
   TII = TM->getInstrInfo();
+  TLI = TM->getTargetLowering();
   TRI = TM->getRegisterInfo();
   MFI = MF.getFrameInfo();
-  RegInfo = &MF.getRegInfo();
+  MRI = &MF.getRegInfo();
+  InstrItins = TM->getInstrItineraryData();
   AllocatableSet = TRI->getAllocatableSet(MF);
 
+  if (PreRegAlloc) {
+    // Estimate register pressure during pre-regalloc pass.
+    unsigned NumRC = TRI->getNumRegClasses();
+    RegPressure.resize(NumRC);
+    std::fill(RegPressure.begin(), RegPressure.end(), 0);
+    RegLimit.resize(NumRC);
+    for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
+           E = TRI->regclass_end(); I != E; ++I)
+      RegLimit[(*I)->getID()] = TLI->getRegPressureLimit(*I, MF);
+  }
+
   // Get our Loop information...
   MLI = &getAnalysis<MachineLoopInfo>();
   DT  = &getAnalysis<MachineDominatorTree>();
@@ -248,7 +326,7 @@
       // being hoisted.
       MachineDomTreeNode *N = DT->getNode(CurLoop->getHeader());
       FirstInLoop = true;
-      HoistRegion(N);
+      HoistRegion(N, true);
       CSEMap.clear();
     }
   }
@@ -474,17 +552,33 @@
 /// first order w.r.t the DominatorTree. This allows us to visit definitions
 /// before uses, allowing us to hoist a loop body in one pass without iteration.
 ///
-void MachineLICM::HoistRegion(MachineDomTreeNode *N) {
+void MachineLICM::HoistRegion(MachineDomTreeNode *N, bool IsHeader) {
   assert(N != 0 && "Null dominator tree node?");
   MachineBasicBlock *BB = N->getBlock();
 
   // If this subregion is not in the top level loop at all, exit.
   if (!CurLoop->contains(BB)) return;
 
+  MachineBasicBlock *Preheader = getCurPreheader();
+  if (!Preheader)
+    return;
+
+  if (IsHeader) {
+    // Compute registers which are livein into the loop headers.
+    RegSeen.clear();
+    BackTrace.clear();
+    InitRegPressure(Preheader);
+  }
+
+  // Remember livein register pressure.
+  BackTrace.push_back(RegPressure);
+
   for (MachineBasicBlock::iterator
          MII = BB->begin(), E = BB->end(); MII != E; ) {
     MachineBasicBlock::iterator NextMII = MII; ++NextMII;
-    Hoist(&*MII);
+    MachineInstr *MI = &*MII;
+    if (!Hoist(MI, Preheader))
+      UpdateRegPressure(MI);
     MII = NextMII;
   }
 
@@ -496,6 +590,99 @@
     for (unsigned I = 0, E = Children.size(); I != E; ++I)
       HoistRegion(Children[I]);
   }
+
+  BackTrace.pop_back();
+}
+
+static bool isOperandKill(const MachineOperand &MO, MachineRegisterInfo *MRI) {
+  return MO.isKill() || MRI->hasOneNonDBGUse(MO.getReg());
+}
+
+/// InitRegPressure - Find all virtual register references that are liveout of
+/// the preheader to initialize the starting "register pressure". Note this
+/// does not count live through (livein but not used) registers.
+void MachineLICM::InitRegPressure(MachineBasicBlock *BB) {
+  std::fill(RegPressure.begin(), RegPressure.end(), 0);
+
+  // If the preheader has only a single predecessor and it ends with a
+  // fallthrough or an unconditional branch, then scan its predecessor for live
+  // defs as well. This happens whenever the preheader is created by splitting
+  // the critical edge from the loop predecessor to the loop header.
+  if (BB->pred_size() == 1) {
+    MachineBasicBlock *TBB = 0, *FBB = 0;
+    SmallVector<MachineOperand, 4> Cond;
+    if (!TII->AnalyzeBranch(*BB, TBB, FBB, Cond, false) && Cond.empty())
+      InitRegPressure(*BB->pred_begin());
+  }
+
+  for (MachineBasicBlock::iterator MII = BB->begin(), E = BB->end();
+       MII != E; ++MII) {
+    MachineInstr *MI = &*MII;
+    for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
+      const MachineOperand &MO = MI->getOperand(i);
+      if (!MO.isReg() || MO.isImplicit())
+        continue;
+      unsigned Reg = MO.getReg();
+      if (!Reg || TargetRegisterInfo::isPhysicalRegister(Reg))
+        continue;
+
+      bool isNew = RegSeen.insert(Reg);
+      const TargetRegisterClass *RC = MRI->getRegClass(Reg);
+      EVT VT = *RC->vt_begin();
+      unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+      if (MO.isDef())
+        RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
+      else {
+        bool isKill = isOperandKill(MO, MRI);
+        if (isNew && !isKill)
+          // Haven't seen this, it must be a livein.
+          RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
+        else if (!isNew && isKill)
+          RegPressure[RCId] -= TLI->getRepRegClassCostFor(VT);
+      }
+    }
+  }
+}
+
+/// UpdateRegPressure - Update estimate of register pressure after the
+/// specified instruction.
+void MachineLICM::UpdateRegPressure(const MachineInstr *MI) {
+  if (MI->isImplicitDef())
+    return;
+
+  SmallVector<unsigned, 4> Defs;
+  for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
+    const MachineOperand &MO = MI->getOperand(i);
+    if (!MO.isReg() || MO.isImplicit())
+      continue;
+    unsigned Reg = MO.getReg();
+    if (!Reg || TargetRegisterInfo::isPhysicalRegister(Reg))
+      continue;
+
+    bool isNew = RegSeen.insert(Reg);
+    if (MO.isDef())
+      Defs.push_back(Reg);
+    else if (!isNew && isOperandKill(MO, MRI)) {
+      const TargetRegisterClass *RC = MRI->getRegClass(Reg);
+      EVT VT = *RC->vt_begin();
+      unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+      unsigned RCCost = TLI->getRepRegClassCostFor(VT);
+
+      if (RCCost > RegPressure[RCId])
+        RegPressure[RCId] = 0;
+      else
+        RegPressure[RCId] -= RCCost;
+    }
+  }
+
+  while (!Defs.empty()) {
+    unsigned Reg = Defs.pop_back_val();
+    const TargetRegisterClass *RC = MRI->getRegClass(Reg);
+    EVT VT = *RC->vt_begin();
+    unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+    unsigned RCCost = TLI->getRepRegClassCostFor(VT);
+    RegPressure[RCId] += RCCost;
+  }
 }
 
 /// IsLICMCandidate - Returns true if the instruction may be a suitable
@@ -535,14 +722,14 @@
         // If the physreg has no defs anywhere, it's just an ambient register
         // and we can freely move its uses. Alternatively, if it's allocatable,
         // it could get allocated to something with a def during allocation.
-        if (!RegInfo->def_empty(Reg))
+        if (!MRI->def_empty(Reg))
           return false;
         if (AllocatableSet.test(Reg))
           return false;
         // Check for a def among the register's aliases too.
         for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
           unsigned AliasReg = *Alias;
-          if (!RegInfo->def_empty(AliasReg))
+          if (!MRI->def_empty(AliasReg))
             return false;
           if (AllocatableSet.test(AliasReg))
             return false;
@@ -562,12 +749,12 @@
     if (!MO.isUse())
       continue;
 
-    assert(RegInfo->getVRegDef(Reg) &&
+    assert(MRI->getVRegDef(Reg) &&
            "Machine instr not mapped for this vreg?!");
 
     // If the loop contains the definition of an operand, then the instruction
     // isn't loop invariant.
-    if (CurLoop->contains(RegInfo->getVRegDef(Reg)))
+    if (CurLoop->contains(MRI->getVRegDef(Reg)))
       return false;
   }
 
@@ -577,9 +764,9 @@
 
 
 /// HasPHIUses - Return true if the specified register has any PHI use.
-static bool HasPHIUses(unsigned Reg, MachineRegisterInfo *RegInfo) {
-  for (MachineRegisterInfo::use_iterator UI = RegInfo->use_begin(Reg),
-         UE = RegInfo->use_end(); UI != UE; ++UI) {
+static bool HasPHIUses(unsigned Reg, MachineRegisterInfo *MRI) {
+  for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(Reg),
+         UE = MRI->use_end(); UI != UE; ++UI) {
     MachineInstr *UseMI = &*UI;
     if (UseMI->isPHI())
       return true;
@@ -601,23 +788,213 @@
     MachineFunction &MF = *MI->getParent()->getParent();
     return PSV->isConstant(MF.getFrameInfo());
   } else {
-    return AA->pointsToConstantMemory(MMO->getValue());
+    return AA->pointsToConstantMemory(AliasAnalysis::Location(MMO->getValue(),
+                                                              MMO->getSize(),
+                                                              MMO->getTBAAInfo()));
+  }
+}
+
+/// HasHighOperandLatency - Compute operand latency between a def of 'Reg'
+/// and an use in the current loop, return true if the target considered
+/// it 'high'.
+bool MachineLICM::HasHighOperandLatency(MachineInstr &MI,
+                                        unsigned DefIdx, unsigned Reg) const {
+  if (!InstrItins || InstrItins->isEmpty() || MRI->use_nodbg_empty(Reg))
+    return false;
+
+  for (MachineRegisterInfo::use_nodbg_iterator I = MRI->use_nodbg_begin(Reg),
+         E = MRI->use_nodbg_end(); I != E; ++I) {
+    MachineInstr *UseMI = &*I;
+    if (UseMI->isCopyLike())
+      continue;
+    if (!CurLoop->contains(UseMI->getParent()))
+      continue;
+    for (unsigned i = 0, e = UseMI->getNumOperands(); i != e; ++i) {
+      const MachineOperand &MO = UseMI->getOperand(i);
+      if (!MO.isReg() || !MO.isUse())
+        continue;
+      unsigned MOReg = MO.getReg();
+      if (MOReg != Reg)
+        continue;
+
+      if (TII->hasHighOperandLatency(InstrItins, MRI, &MI, DefIdx, UseMI, i))
+        return true;
+    }
+
+    // Only look at the first in loop use.
+    break;
+  }
+
+  return false;
+}
+
+/// IsCheapInstruction - Return true if the instruction is marked "cheap" or
+/// the operand latency between its def and a use is one or less.
+bool MachineLICM::IsCheapInstruction(MachineInstr &MI) const {
+  if (MI.getDesc().isAsCheapAsAMove() || MI.isCopyLike())
+    return true;
+  if (!InstrItins || InstrItins->isEmpty())
+    return false;
+
+  bool isCheap = false;
+  unsigned NumDefs = MI.getDesc().getNumDefs();
+  for (unsigned i = 0, e = MI.getNumOperands(); NumDefs && i != e; ++i) {
+    MachineOperand &DefMO = MI.getOperand(i);
+    if (!DefMO.isReg() || !DefMO.isDef())
+      continue;
+    --NumDefs;
+    unsigned Reg = DefMO.getReg();
+    if (TargetRegisterInfo::isPhysicalRegister(Reg))
+      continue;
+
+    if (!TII->hasLowDefLatency(InstrItins, &MI, i))
+      return false;
+    isCheap = true;
+  }
+
+  return isCheap;
+}
+
+/// CanCauseHighRegPressure - Visit BBs from header to current BB, check
+/// if hoisting an instruction of the given cost matrix can cause high
+/// register pressure.
+bool MachineLICM::CanCauseHighRegPressure(DenseMap<unsigned, int> &Cost) {
+  for (DenseMap<unsigned, int>::iterator CI = Cost.begin(), CE = Cost.end();
+       CI != CE; ++CI) {
+    if (CI->second <= 0) 
+      continue;
+
+    unsigned RCId = CI->first;
+    for (unsigned i = BackTrace.size(); i != 0; --i) {
+      SmallVector<unsigned, 8> &RP = BackTrace[i-1];
+      if (RP[RCId] + CI->second >= RegLimit[RCId])
+        return true;
+    }
+  }
+
+  return false;
+}
+
+/// UpdateBackTraceRegPressure - Traverse the back trace from header to the
+/// current block and update their register pressures to reflect the effect
+/// of hoisting MI from the current block to the preheader.
+void MachineLICM::UpdateBackTraceRegPressure(const MachineInstr *MI) {
+  if (MI->isImplicitDef())
+    return;
+
+  // First compute the 'cost' of the instruction, i.e. its contribution
+  // to register pressure.
+  DenseMap<unsigned, int> Cost;
+  for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
+    const MachineOperand &MO = MI->getOperand(i);
+    if (!MO.isReg() || MO.isImplicit())
+      continue;
+    unsigned Reg = MO.getReg();
+    if (!Reg || TargetRegisterInfo::isPhysicalRegister(Reg))
+      continue;
+
+    const TargetRegisterClass *RC = MRI->getRegClass(Reg);
+    EVT VT = *RC->vt_begin();
+    unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+    unsigned RCCost = TLI->getRepRegClassCostFor(VT);
+    if (MO.isDef()) {
+      DenseMap<unsigned, int>::iterator CI = Cost.find(RCId);
+      if (CI != Cost.end())
+        CI->second += RCCost;
+      else
+        Cost.insert(std::make_pair(RCId, RCCost));
+    } else if (isOperandKill(MO, MRI)) {
+      DenseMap<unsigned, int>::iterator CI = Cost.find(RCId);
+      if (CI != Cost.end())
+        CI->second -= RCCost;
+      else
+        Cost.insert(std::make_pair(RCId, -RCCost));
+    }
+  }
+
+  // Update register pressure of blocks from loop header to current block.
+  for (unsigned i = 0, e = BackTrace.size(); i != e; ++i) {
+    SmallVector<unsigned, 8> &RP = BackTrace[i];
+    for (DenseMap<unsigned, int>::iterator CI = Cost.begin(), CE = Cost.end();
+         CI != CE; ++CI) {
+      unsigned RCId = CI->first;
+      RP[RCId] += CI->second;
+    }
   }
 }
 
 /// IsProfitableToHoist - Return true if it is potentially profitable to hoist
 /// the given loop invariant.
 bool MachineLICM::IsProfitableToHoist(MachineInstr &MI) {
-  // FIXME: For now, only hoist re-materilizable instructions. LICM will
-  // increase register pressure. We want to make sure it doesn't increase
-  // spilling.
+  if (MI.isImplicitDef())
+    return true;
+
+  // If the instruction is cheap, only hoist if it is re-materilizable. LICM
+  // will increase register pressure. It's probably not worth it if the
+  // instruction is cheap.
   // Also hoist loads from constant memory, e.g. load from stubs, GOT. Hoisting
   // these tend to help performance in low register pressure situation. The
   // trade off is it may cause spill in high pressure situation. It will end up
   // adding a store in the loop preheader. But the reload is no more expensive.
   // The side benefit is these loads are frequently CSE'ed.
-  if (!TII->isTriviallyReMaterializable(&MI, AA)) {
-    if (!isLoadFromConstantMemory(&MI))
+  if (IsCheapInstruction(MI)) {
+    if (!TII->isTriviallyReMaterializable(&MI, AA))
+      return false;
+  } else {
+    // Estimate register pressure to determine whether to LICM the instruction.
+    // In low register pressure situation, we can be more aggressive about 
+    // hoisting. Also, favors hoisting long latency instructions even in
+    // moderately high pressure situation.
+    DenseMap<unsigned, int> Cost;
+    for (unsigned i = 0, e = MI.getDesc().getNumOperands(); i != e; ++i) {
+      const MachineOperand &MO = MI.getOperand(i);
+      if (!MO.isReg() || MO.isImplicit())
+        continue;
+      unsigned Reg = MO.getReg();
+      if (!Reg || TargetRegisterInfo::isPhysicalRegister(Reg))
+        continue;
+      if (MO.isDef()) {
+        if (HasHighOperandLatency(MI, i, Reg)) {
+          ++NumHighLatency;
+          return true;
+        }
+
+        const TargetRegisterClass *RC = MRI->getRegClass(Reg);
+        EVT VT = *RC->vt_begin();
+        unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+        unsigned RCCost = TLI->getRepRegClassCostFor(VT);
+        DenseMap<unsigned, int>::iterator CI = Cost.find(RCId);
+        if (CI != Cost.end())
+          CI->second += RCCost;
+        else
+          Cost.insert(std::make_pair(RCId, RCCost));
+      } else if (isOperandKill(MO, MRI)) {
+        // Is a virtual register use is a kill, hoisting it out of the loop
+        // may actually reduce register pressure or be register pressure
+        // neutral.
+        const TargetRegisterClass *RC = MRI->getRegClass(Reg);
+        EVT VT = *RC->vt_begin();
+        unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+        unsigned RCCost = TLI->getRepRegClassCostFor(VT);
+        DenseMap<unsigned, int>::iterator CI = Cost.find(RCId);
+        if (CI != Cost.end())
+          CI->second -= RCCost;
+        else
+          Cost.insert(std::make_pair(RCId, -RCCost));
+      }
+    }
+
+    // Visit BBs from header to current BB, if hoisting this doesn't cause
+    // high register pressure, then it's safe to proceed.
+    if (!CanCauseHighRegPressure(Cost)) {
+      ++NumLowRP;
+      return true;
+    }
+
+    // High register pressure situation, only hoist if the instruction is going to
+    // be remat'ed.
+    if (!TII->isTriviallyReMaterializable(&MI, AA) &&
+        !isLoadFromConstantMemory(&MI))
       return false;
   }
 
@@ -628,7 +1005,7 @@
     const MachineOperand &MO = MI.getOperand(i);
     if (!MO.isReg() || !MO.isDef())
       continue;
-    if (HasPHIUses(MO.getReg(), RegInfo))
+    if (HasPHIUses(MO.getReg(), MRI))
       return false;
   }
 
@@ -636,6 +1013,10 @@
 }
 
 MachineInstr *MachineLICM::ExtractHoistableLoad(MachineInstr *MI) {
+  // Don't unfold simple loads.
+  if (MI->getDesc().canFoldAsLoad())
+    return 0;
+
   // If not, we may be able to unfold a load and hoist that.
   // First test whether the instruction is loading from an amenable
   // memory location.
@@ -654,7 +1035,7 @@
   if (TID.getNumDefs() != 1) return 0;
   const TargetRegisterClass *RC = TID.OpInfo[LoadRegIndex].getRegClass(TRI);
   // Ok, we're unfolding. Create a temporary register and do the unfold.
-  unsigned Reg = RegInfo->createVirtualRegister(RC);
+  unsigned Reg = MRI->createVirtualRegister(RC);
 
   MachineFunction &MF = *MI->getParent()->getParent();
   SmallVector<MachineInstr *, 2> NewMIs;
@@ -678,6 +1059,10 @@
     NewMIs[1]->eraseFromParent();
     return 0;
   }
+
+  // Update register pressure for the unfolded instruction.
+  UpdateRegPressure(NewMIs[1]);
+
   // Otherwise we successfully unfolded a load that we can hoist.
   MI->eraseFromParent();
   return NewMIs[0];
@@ -738,8 +1123,8 @@
 
       if (MO.isReg() && MO.isDef() &&
           !TargetRegisterInfo::isPhysicalRegister(MO.getReg())) {
-        RegInfo->replaceRegWith(MO.getReg(), Dup->getOperand(i).getReg());
-        RegInfo->clearKillFlags(Dup->getOperand(i).getReg());
+        MRI->replaceRegWith(MO.getReg(), Dup->getOperand(i).getReg());
+        MRI->clearKillFlags(Dup->getOperand(i).getReg());
       }
     }
     MI->eraseFromParent();
@@ -752,15 +1137,12 @@
 /// Hoist - When an instruction is found to use only loop invariant operands
 /// that are safe to hoist, this instruction is called to do the dirty work.
 ///
-void MachineLICM::Hoist(MachineInstr *MI) {
-  MachineBasicBlock *Preheader = getCurPreheader();
-  if (!Preheader) return;
-
+bool MachineLICM::Hoist(MachineInstr *MI, MachineBasicBlock *Preheader) {
   // First check whether we should hoist this instruction.
   if (!IsLoopInvariantInst(*MI) || !IsProfitableToHoist(*MI)) {
     // If not, try unfolding a hoistable load.
     MI = ExtractHoistableLoad(MI);
-    if (!MI) return;
+    if (!MI) return false;
   }
 
   // Now move the instructions to the predecessor, inserting it before any
@@ -791,13 +1173,16 @@
     // Otherwise, splice the instruction to the preheader.
     Preheader->splice(Preheader->getFirstTerminator(),MI->getParent(),MI);
 
+    // Update register pressure for BBs from header to this block.
+    UpdateBackTraceRegPressure(MI);
+
     // Clear the kill flags of any register this instruction defines,
     // since they may need to be live throughout the entire loop
     // rather than just live for part of it.
     for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
       MachineOperand &MO = MI->getOperand(i);
       if (MO.isReg() && MO.isDef() && !MO.isDead())
-        RegInfo->clearKillFlags(MO.getReg());
+        MRI->clearKillFlags(MO.getReg());
     }
 
     // Add to the CSE map.
@@ -812,6 +1197,8 @@
 
   ++NumHoisted;
   Changed = true;
+
+  return true;
 }
 
 MachineBasicBlock *MachineLICM::getCurPreheader() {

Modified: llvm/branches/wendling/eh/lib/CodeGen/MachineLoopInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/MachineLoopInfo.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/MachineLoopInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/MachineLoopInfo.cpp Tue Oct 26 19:48:03 2010
@@ -30,10 +30,13 @@
 }
 
 char MachineLoopInfo::ID = 0;
-static RegisterPass<MachineLoopInfo>
-X("machine-loops", "Machine Natural Loop Construction", true);
+INITIALIZE_PASS_BEGIN(MachineLoopInfo, "machine-loops",
+                "Machine Natural Loop Construction", true, true)
+INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
+INITIALIZE_PASS_END(MachineLoopInfo, "machine-loops",
+                "Machine Natural Loop Construction", true, true)
 
-const PassInfo *const llvm::MachineLoopInfoID = &X;
+char &llvm::MachineLoopInfoID = MachineLoopInfo::ID;
 
 bool MachineLoopInfo::runOnMachineFunction(MachineFunction &) {
   releaseMemory();

Modified: llvm/branches/wendling/eh/lib/CodeGen/MachineModuleInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/MachineModuleInfo.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/MachineModuleInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/MachineModuleInfo.cpp Tue Oct 26 19:48:03 2010
@@ -29,7 +29,7 @@
 
 // Handle the Pass registration stuff necessary to use TargetData's.
 INITIALIZE_PASS(MachineModuleInfo, "machinemoduleinfo",
-                "Machine Module Information", false, false);
+                "Machine Module Information", false, false)
 char MachineModuleInfo::ID = 0;
 
 // Out of line virtual method.
@@ -41,30 +41,30 @@
 public:
   MMIAddrLabelMapCallbackPtr() : Map(0) {}
   MMIAddrLabelMapCallbackPtr(Value *V) : CallbackVH(V), Map(0) {}
-  
+
   void setPtr(BasicBlock *BB) {
     ValueHandleBase::operator=(BB);
   }
-    
+
   void setMap(MMIAddrLabelMap *map) { Map = map; }
-  
+
   virtual void deleted();
   virtual void allUsesReplacedWith(Value *V2);
 };
-  
+
 class MMIAddrLabelMap {
   MCContext &Context;
   struct AddrLabelSymEntry {
     /// Symbols - The symbols for the label.  This is a pointer union that is
     /// either one symbol (the common case) or a list of symbols.
     PointerUnion<MCSymbol *, std::vector<MCSymbol*>*> Symbols;
-    
+
     Function *Fn;   // The containing function of the BasicBlock.
     unsigned Index; // The index in BBCallbacks for the BasicBlock.
   };
-  
+
   DenseMap<AssertingVH<BasicBlock>, AddrLabelSymEntry> AddrLabelSymbols;
-  
+
   /// BBCallbacks - Callbacks for the BasicBlock's that we have entries for.  We
   /// use this so we get notified if a block is deleted or RAUWd.
   std::vector<MMIAddrLabelMapCallbackPtr> BBCallbacks;
@@ -76,23 +76,23 @@
   DenseMap<AssertingVH<Function>, std::vector<MCSymbol*> >
     DeletedAddrLabelsNeedingEmission;
 public:
-  
+
   MMIAddrLabelMap(MCContext &context) : Context(context) {}
   ~MMIAddrLabelMap() {
     assert(DeletedAddrLabelsNeedingEmission.empty() &&
            "Some labels for deleted blocks never got emitted");
-    
+
     // Deallocate any of the 'list of symbols' case.
     for (DenseMap<AssertingVH<BasicBlock>, AddrLabelSymEntry>::iterator
          I = AddrLabelSymbols.begin(), E = AddrLabelSymbols.end(); I != E; ++I)
       if (I->second.Symbols.is<std::vector<MCSymbol*>*>())
         delete I->second.Symbols.get<std::vector<MCSymbol*>*>();
   }
-  
+
   MCSymbol *getAddrLabelSymbol(BasicBlock *BB);
   std::vector<MCSymbol*> getAddrLabelSymbolToEmit(BasicBlock *BB);
 
-  void takeDeletedSymbolsForFunction(Function *F, 
+  void takeDeletedSymbolsForFunction(Function *F,
                                      std::vector<MCSymbol*> &Result);
 
   void UpdateForDeletedBlock(BasicBlock *BB);
@@ -104,7 +104,7 @@
   assert(BB->hasAddressTaken() &&
          "Shouldn't get label for block without address taken");
   AddrLabelSymEntry &Entry = AddrLabelSymbols[BB];
-  
+
   // If we already had an entry for this block, just return it.
   if (!Entry.Symbols.isNull()) {
     assert(BB->getParent() == Entry.Fn && "Parent changed");
@@ -112,7 +112,7 @@
       return Entry.Symbols.get<MCSymbol*>();
     return (*Entry.Symbols.get<std::vector<MCSymbol*>*>())[0];
   }
-  
+
   // Otherwise, this is a new entry, create a new symbol for it and add an
   // entry to BBCallbacks so we can be notified if the BB is deleted or RAUWd.
   BBCallbacks.push_back(BB);
@@ -129,9 +129,9 @@
   assert(BB->hasAddressTaken() &&
          "Shouldn't get label for block without address taken");
   AddrLabelSymEntry &Entry = AddrLabelSymbols[BB];
-  
+
   std::vector<MCSymbol*> Result;
-  
+
   // If we already had an entry for this block, just return it.
   if (Entry.Symbols.isNull())
     Result.push_back(getAddrLabelSymbol(BB));
@@ -152,7 +152,7 @@
 
   // If there are no entries for the function, just return.
   if (I == DeletedAddrLabelsNeedingEmission.end()) return;
-  
+
   // Otherwise, take the list.
   std::swap(Result, I->second);
   DeletedAddrLabelsNeedingEmission.erase(I);
@@ -175,7 +175,7 @@
   if (MCSymbol *Sym = Entry.Symbols.dyn_cast<MCSymbol*>()) {
     if (Sym->isDefined())
       return;
-  
+
     // If the block is not yet defined, we need to emit it at the end of the
     // function.  Add the symbol to the DeletedAddrLabelsNeedingEmission list
     // for the containing Function.  Since the block is being deleted, its
@@ -187,7 +187,7 @@
     for (unsigned i = 0, e = Syms->size(); i != e; ++i) {
       MCSymbol *Sym = (*Syms)[i];
       if (Sym->isDefined()) continue;  // Ignore already emitted labels.
-      
+
       // If the block is not yet defined, we need to emit it at the end of the
       // function.  Add the symbol to the DeletedAddrLabelsNeedingEmission list
       // for the containing Function.  Since the block is being deleted, its
@@ -195,7 +195,7 @@
       // 'Entry'.
       DeletedAddrLabelsNeedingEmission[Entry.Fn].push_back(Sym);
     }
-    
+
     // The entry is deleted, free the memory associated with the symbol list.
     delete Syms;
   }
@@ -225,7 +225,7 @@
     SymList->push_back(PrevSym);
     NewEntry.Symbols = SymList;
   }
-      
+
   std::vector<MCSymbol*> *SymList =
     NewEntry.Symbols.get<std::vector<MCSymbol*>*>();
 
@@ -234,7 +234,7 @@
     SymList->push_back(Sym);
     return;
   }
-  
+
   // Otherwise, concatenate the list.
   std::vector<MCSymbol*> *Syms =OldEntry.Symbols.get<std::vector<MCSymbol*>*>();
   SymList->insert(SymList->end(), Syms->begin(), Syms->end());
@@ -254,9 +254,11 @@
 //===----------------------------------------------------------------------===//
 
 MachineModuleInfo::MachineModuleInfo(const MCAsmInfo &MAI)
-: ImmutablePass(&ID), Context(MAI),
+: ImmutablePass(ID), Context(MAI),
   ObjFileMMI(0),
-  CurCallSite(0), CallsEHReturn(0), CallsUnwindInit(0), DbgInfoAvailable(false){
+  CurCallSite(0), CallsEHReturn(0), CallsUnwindInit(0), DbgInfoAvailable(false),
+  CallsExternalVAFunctionWithFloatingPointArguments(false) {
+  initializeMachineModuleInfoPass(*PassRegistry::getPassRegistry());
   // Always emit some info, by default "no personality" info.
   Personalities.push_back(NULL);
   AddrLabelSymbols = 0;
@@ -264,7 +266,7 @@
 }
 
 MachineModuleInfo::MachineModuleInfo()
-: ImmutablePass(&ID), Context(*(MCAsmInfo*)0) {
+: ImmutablePass(ID), Context(*(MCAsmInfo*)0) {
   assert(0 && "This MachineModuleInfo constructor should never be called, MMI "
          "should always be explicitly constructed by LLVMTargetMachine");
   abort();
@@ -272,7 +274,7 @@
 
 MachineModuleInfo::~MachineModuleInfo() {
   delete ObjFileMMI;
-  
+
   // FIXME: Why isn't doFinalization being called??
   //assert(AddrLabelSymbols == 0 && "doFinalization not called");
   delete AddrLabelSymbols;
@@ -440,7 +442,7 @@
            (LPMap && (*LPMap)[BeginLabel] != 0)) &&
           (EndLabel->isDefined() ||
            (LPMap && (*LPMap)[EndLabel] != 0))) continue;
-      
+
       LandingPad.BeginLabels.erase(LandingPad.BeginLabels.begin() + j);
       LandingPad.EndLabels.erase(LandingPad.EndLabels.begin() + j);
       --j, --e;
@@ -523,20 +525,3 @@
   // in the zero index.
   return 0;
 }
-
-namespace {
-  /// VariableDebugSorter - Comparison to sort the VariableDbgInfo map
-  /// by source location, to avoid depending on the arbitrary order that
-  /// instruction selection visits variables in.
-  struct VariableDebugSorter {
-    bool operator()(const MachineModuleInfo::VariableDbgInfoMapTy::value_type &A,
-                    const MachineModuleInfo::VariableDbgInfoMapTy::value_type &B)
-                  const {
-       if (A.second.second.getLine() != B.second.second.getLine())
-         return A.second.second.getLine() < B.second.second.getLine();
-       if (A.second.second.getCol() != B.second.second.getCol())
-         return A.second.second.getCol() < B.second.second.getCol();
-       return false;
-    }
-  };
-}

Modified: llvm/branches/wendling/eh/lib/CodeGen/MachineRegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/MachineRegisterInfo.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/MachineRegisterInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/MachineRegisterInfo.cpp Tue Oct 26 19:48:03 2010
@@ -60,6 +60,20 @@
   RegClass2VRegMap[RC->getID()].push_back(VR);
 }
 
+const TargetRegisterClass *
+MachineRegisterInfo::constrainRegClass(unsigned Reg,
+                                       const TargetRegisterClass *RC) {
+  const TargetRegisterClass *OldRC = getRegClass(Reg);
+  if (OldRC == RC)
+    return RC;
+  const TargetRegisterClass *NewRC = getCommonSubClass(OldRC, RC);
+  if (!NewRC)
+    return 0;
+  if (NewRC != OldRC)
+    setRegClass(Reg, NewRC);
+  return NewRC;
+}
+
 /// createVirtualRegister - Create and return a new virtual register in the
 /// function with the specified register class.
 ///

Modified: llvm/branches/wendling/eh/lib/CodeGen/MachineSink.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/MachineSink.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/MachineSink.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/MachineSink.cpp Tue Oct 26 19:48:03 2010
@@ -25,26 +25,41 @@
 #include "llvm/Target/TargetRegisterInfo.h"
 #include "llvm/Target/TargetInstrInfo.h"
 #include "llvm/Target/TargetMachine.h"
+#include "llvm/ADT/SmallSet.h"
 #include "llvm/ADT/Statistic.h"
+#include "llvm/Support/CommandLine.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/raw_ostream.h"
 using namespace llvm;
 
-STATISTIC(NumSunk, "Number of machine instructions sunk");
+static cl::opt<bool> 
+SplitEdges("machine-sink-split",
+           cl::desc("Split critical edges during machine sinking"),
+           cl::init(true), cl::Hidden);
+
+STATISTIC(NumSunk,      "Number of machine instructions sunk");
+STATISTIC(NumSplit,     "Number of critical edges split");
+STATISTIC(NumCoalesces, "Number of copies coalesced");
 
 namespace {
   class MachineSinking : public MachineFunctionPass {
     const TargetInstrInfo *TII;
     const TargetRegisterInfo *TRI;
-    MachineRegisterInfo  *RegInfo; // Machine register information
+    MachineRegisterInfo  *MRI;  // Machine register information
     MachineDominatorTree *DT;   // Machine dominator tree
     MachineLoopInfo *LI;
     AliasAnalysis *AA;
     BitVector AllocatableSet;   // Which physregs are allocatable?
 
+    // Remember which edges have been considered for breaking.
+    SmallSet<std::pair<MachineBasicBlock*,MachineBasicBlock*>, 8>
+    CEBCandidates;
+
   public:
     static char ID; // Pass identification
-    MachineSinking() : MachineFunctionPass(&ID) {}
+    MachineSinking() : MachineFunctionPass(ID) {
+      initializeMachineSinkingPass(*PassRegistry::getPassRegistry());
+    }
 
     virtual bool runOnMachineFunction(MachineFunction &MF);
 
@@ -57,40 +72,132 @@
       AU.addPreserved<MachineDominatorTree>();
       AU.addPreserved<MachineLoopInfo>();
     }
+
+    virtual void releaseMemory() {
+      CEBCandidates.clear();
+    }
+
   private:
     bool ProcessBlock(MachineBasicBlock &MBB);
+    bool isWorthBreakingCriticalEdge(MachineInstr *MI,
+                                     MachineBasicBlock *From,
+                                     MachineBasicBlock *To);
+    MachineBasicBlock *SplitCriticalEdge(MachineInstr *MI,
+                                         MachineBasicBlock *From,
+                                         MachineBasicBlock *To,
+                                         bool BreakPHIEdge);
     bool SinkInstruction(MachineInstr *MI, bool &SawStore);
-    bool AllUsesDominatedByBlock(unsigned Reg, MachineBasicBlock *MBB) const;
+    bool AllUsesDominatedByBlock(unsigned Reg, MachineBasicBlock *MBB,
+                                 MachineBasicBlock *DefMBB,
+                                 bool &BreakPHIEdge, bool &LocalUse) const;
+    bool PerformTrivialForwardCoalescing(MachineInstr *MI,
+                                         MachineBasicBlock *MBB);
   };
 } // end anonymous namespace
 
 char MachineSinking::ID = 0;
-INITIALIZE_PASS(MachineSinking, "machine-sink",
-                "Machine code sinking", false, false);
+INITIALIZE_PASS_BEGIN(MachineSinking, "machine-sink",
+                "Machine code sinking", false, false)
+INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
+INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
+INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_PASS_END(MachineSinking, "machine-sink",
+                "Machine code sinking", false, false)
 
 FunctionPass *llvm::createMachineSinkingPass() { return new MachineSinking(); }
 
+bool MachineSinking::PerformTrivialForwardCoalescing(MachineInstr *MI,
+                                                     MachineBasicBlock *MBB) {
+  if (!MI->isCopy())
+    return false;
+
+  unsigned SrcReg = MI->getOperand(1).getReg();
+  unsigned DstReg = MI->getOperand(0).getReg();
+  if (!TargetRegisterInfo::isVirtualRegister(SrcReg) ||
+      !TargetRegisterInfo::isVirtualRegister(DstReg) ||
+      !MRI->hasOneNonDBGUse(SrcReg))
+    return false;
+
+  const TargetRegisterClass *SRC = MRI->getRegClass(SrcReg);
+  const TargetRegisterClass *DRC = MRI->getRegClass(DstReg);
+  if (SRC != DRC)
+    return false;
+
+  MachineInstr *DefMI = MRI->getVRegDef(SrcReg);
+  if (DefMI->isCopyLike())
+    return false;
+  DEBUG(dbgs() << "Coalescing: " << *DefMI);
+  DEBUG(dbgs() << "*** to: " << *MI);
+  MRI->replaceRegWith(DstReg, SrcReg);
+  MI->eraseFromParent();
+  ++NumCoalesces;
+  return true;
+}
+
 /// AllUsesDominatedByBlock - Return true if all uses of the specified register
-/// occur in blocks dominated by the specified block.
-bool MachineSinking::AllUsesDominatedByBlock(unsigned Reg,
-                                             MachineBasicBlock *MBB) const {
+/// occur in blocks dominated by the specified block. If any use is in the
+/// definition block, then return false since it is never legal to move def
+/// after uses.
+bool
+MachineSinking::AllUsesDominatedByBlock(unsigned Reg,
+                                        MachineBasicBlock *MBB,
+                                        MachineBasicBlock *DefMBB,
+                                        bool &BreakPHIEdge,
+                                        bool &LocalUse) const {
   assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
          "Only makes sense for vregs");
+
+  if (MRI->use_nodbg_empty(Reg))
+    return true;
+
   // Ignoring debug uses is necessary so debug info doesn't affect the code.
   // This may leave a referencing dbg_value in the original block, before
   // the definition of the vreg.  Dwarf generator handles this although the
   // user might not get the right info at runtime.
+
+  // BreakPHIEdge is true if all the uses are in the successor MBB being sunken
+  // into and they are all PHI nodes. In this case, machine-sink must break
+  // the critical edge first. e.g.
+  //
+  // BB#1: derived from LLVM BB %bb4.preheader
+  //   Predecessors according to CFG: BB#0
+  //     ...
+  //     %reg16385<def> = DEC64_32r %reg16437, %EFLAGS<imp-def,dead>
+  //     ...
+  //     JE_4 <BB#37>, %EFLAGS<imp-use>
+  //   Successors according to CFG: BB#37 BB#2
+  //
+  // BB#2: derived from LLVM BB %bb.nph
+  //   Predecessors according to CFG: BB#0 BB#1
+  //     %reg16386<def> = PHI %reg16434, <BB#0>, %reg16385, <BB#1>
+  BreakPHIEdge = true;
   for (MachineRegisterInfo::use_nodbg_iterator
-         I = RegInfo->use_nodbg_begin(Reg), E = RegInfo->use_nodbg_end();
+         I = MRI->use_nodbg_begin(Reg), E = MRI->use_nodbg_end();
        I != E; ++I) {
-    // Determine the block of the use.
     MachineInstr *UseInst = &*I;
     MachineBasicBlock *UseBlock = UseInst->getParent();
+    if (!(UseBlock == MBB && UseInst->isPHI() &&
+          UseInst->getOperand(I.getOperandNo()+1).getMBB() == DefMBB)) {
+      BreakPHIEdge = false;
+      break;
+    }
+  }
+  if (BreakPHIEdge)
+    return true;
 
+  for (MachineRegisterInfo::use_nodbg_iterator
+         I = MRI->use_nodbg_begin(Reg), E = MRI->use_nodbg_end();
+       I != E; ++I) {
+    // Determine the block of the use.
+    MachineInstr *UseInst = &*I;
+    MachineBasicBlock *UseBlock = UseInst->getParent();
     if (UseInst->isPHI()) {
       // PHI nodes use the operand in the predecessor block, not the block with
       // the PHI.
       UseBlock = UseInst->getOperand(I.getOperandNo()+1).getMBB();
+    } else if (UseBlock == DefMBB) {
+      LocalUse = true;
+      return false;
     }
 
     // Check that it dominates.
@@ -107,7 +214,7 @@
   const TargetMachine &TM = MF.getTarget();
   TII = TM.getInstrInfo();
   TRI = TM.getRegisterInfo();
-  RegInfo = &MF.getRegInfo();
+  MRI = &MF.getRegInfo();
   DT = &getAnalysis<MachineDominatorTree>();
   LI = &getAnalysis<MachineLoopInfo>();
   AA = &getAnalysis<AliasAnalysis>();
@@ -119,6 +226,7 @@
     bool MadeChange = false;
 
     // Process all basic blocks.
+    CEBCandidates.clear();
     for (MachineFunction::iterator I = MF.begin(), E = MF.end();
          I != E; ++I)
       MadeChange |= ProcessBlock(*I);
@@ -157,6 +265,9 @@
     if (MI->isDebugValue())
       continue;
 
+    if (PerformTrivialForwardCoalescing(MI, &MBB))
+      continue;
+
     if (SinkInstruction(MI, SawStore))
       ++NumSunk, MadeChange = true;
 
@@ -166,9 +277,116 @@
   return MadeChange;
 }
 
+bool MachineSinking::isWorthBreakingCriticalEdge(MachineInstr *MI,
+                                                 MachineBasicBlock *From,
+                                                 MachineBasicBlock *To) {
+  // FIXME: Need much better heuristics.
+
+  // If the pass has already considered breaking this edge (during this pass
+  // through the function), then let's go ahead and break it. This means
+  // sinking multiple "cheap" instructions into the same block.
+  if (!CEBCandidates.insert(std::make_pair(From, To)))
+    return true;
+
+  if (!MI->isCopy() && !MI->getDesc().isAsCheapAsAMove())
+    return true;
+
+  // MI is cheap, we probably don't want to break the critical edge for it.
+  // However, if this would allow some definitions of its source operands
+  // to be sunk then it's probably worth it.
+  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+    const MachineOperand &MO = MI->getOperand(i);
+    if (!MO.isReg()) continue;
+    unsigned Reg = MO.getReg();
+    if (Reg == 0 || !TargetRegisterInfo::isPhysicalRegister(Reg))
+      continue;
+    if (MRI->hasOneNonDBGUse(Reg))
+      return true;
+  }
+
+  return false;
+}
+
+MachineBasicBlock *MachineSinking::SplitCriticalEdge(MachineInstr *MI,
+                                                     MachineBasicBlock *FromBB,
+                                                     MachineBasicBlock *ToBB,
+                                                     bool BreakPHIEdge) {
+  if (!isWorthBreakingCriticalEdge(MI, FromBB, ToBB))
+    return 0;
+
+  // Avoid breaking back edge. From == To means backedge for single BB loop.
+  if (!SplitEdges || FromBB == ToBB)
+    return 0;
+
+  // Check for backedges of more "complex" loops.
+  if (LI->getLoopFor(FromBB) == LI->getLoopFor(ToBB) &&
+      LI->isLoopHeader(ToBB))
+    return 0;
+
+  // It's not always legal to break critical edges and sink the computation
+  // to the edge.
+  //
+  // BB#1:
+  // v1024
+  // Beq BB#3
+  // <fallthrough>
+  // BB#2:
+  // ... no uses of v1024
+  // <fallthrough>
+  // BB#3:
+  // ...
+  //       = v1024
+  //
+  // If BB#1 -> BB#3 edge is broken and computation of v1024 is inserted:
+  //
+  // BB#1:
+  // ...
+  // Bne BB#2
+  // BB#4:
+  // v1024 =
+  // B BB#3
+  // BB#2:
+  // ... no uses of v1024
+  // <fallthrough>
+  // BB#3:
+  // ...
+  //       = v1024
+  //
+  // This is incorrect since v1024 is not computed along the BB#1->BB#2->BB#3
+  // flow. We need to ensure the new basic block where the computation is
+  // sunk to dominates all the uses.
+  // It's only legal to break critical edge and sink the computation to the
+  // new block if all the predecessors of "To", except for "From", are
+  // not dominated by "From". Given SSA property, this means these
+  // predecessors are dominated by "To".
+  //
+  // There is no need to do this check if all the uses are PHI nodes. PHI
+  // sources are only defined on the specific predecessor edges.
+  if (!BreakPHIEdge) {
+    for (MachineBasicBlock::pred_iterator PI = ToBB->pred_begin(),
+           E = ToBB->pred_end(); PI != E; ++PI) {
+      if (*PI == FromBB)
+        continue;
+      if (!DT->dominates(ToBB, *PI))
+        return 0;
+    }
+  }
+
+  return FromBB->SplitCriticalEdge(ToBB, this);
+}
+
+static bool AvoidsSinking(MachineInstr *MI, MachineRegisterInfo *MRI) {
+  return MI->isInsertSubreg() || MI->isSubregToReg() || MI->isRegSequence();
+}
+
 /// SinkInstruction - Determine whether it is safe to sink the specified machine
 /// instruction out of its current block into a successor.
 bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore) {
+  // Don't sink insert_subreg, subreg_to_reg, reg_sequence. These are meant to
+  // be close to the source to make it easier to coalesce.
+  if (AvoidsSinking(MI, MRI))
+    return false;
+
   // Check if it's safe to move the instruction.
   if (!MI->isSafeToMove(TII, AA, SawStore))
     return false;
@@ -189,6 +407,7 @@
   // decide.
   MachineBasicBlock *SuccToSinkTo = 0;
 
+  bool BreakPHIEdge = false;
   for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
     const MachineOperand &MO = MI->getOperand(i);
     if (!MO.isReg()) continue;  // Ignore non-register operands.
@@ -201,7 +420,7 @@
         // If the physreg has no defs anywhere, it's just an ambient register
         // and we can freely move its uses. Alternatively, if it's allocatable,
         // it could get allocated to something with a def during allocation.
-        if (!RegInfo->def_empty(Reg))
+        if (!MRI->def_empty(Reg))
           return false;
 
         if (AllocatableSet.test(Reg))
@@ -210,7 +429,7 @@
         // Check for a def among the register's aliases too.
         for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
           unsigned AliasReg = *Alias;
-          if (!RegInfo->def_empty(AliasReg))
+          if (!MRI->def_empty(AliasReg))
             return false;
 
           if (AllocatableSet.test(AliasReg))
@@ -225,7 +444,7 @@
       if (MO.isUse()) continue;
 
       // If it's not safe to move defs of the register class, then abort.
-      if (!TII->isSafeToMoveRegClassDefs(RegInfo->getRegClass(Reg)))
+      if (!TII->isSafeToMoveRegClassDefs(MRI->getRegClass(Reg)))
         return false;
 
       // FIXME: This picks a successor to sink into based on having one
@@ -246,7 +465,9 @@
       if (SuccToSinkTo) {
         // If a previous operand picked a block to sink to, then this operand
         // must be sinkable to the same block.
-        if (!AllUsesDominatedByBlock(Reg, SuccToSinkTo))
+        bool LocalUse = false;
+        if (!AllUsesDominatedByBlock(Reg, SuccToSinkTo, ParentBlock,
+                                     BreakPHIEdge, LocalUse))
           return false;
 
         continue;
@@ -256,10 +477,15 @@
       // we should sink to.
       for (MachineBasicBlock::succ_iterator SI = ParentBlock->succ_begin(),
            E = ParentBlock->succ_end(); SI != E; ++SI) {
-        if (AllUsesDominatedByBlock(Reg, *SI)) {
+        bool LocalUse = false;
+        if (AllUsesDominatedByBlock(Reg, *SI, ParentBlock,
+                                    BreakPHIEdge, LocalUse)) {
           SuccToSinkTo = *SI;
           break;
         }
+        if (LocalUse)
+          // Def is used locally, it's never safe to move this def.
+          return false;
       }
 
       // If we couldn't find a block to sink to, ignore this instruction.
@@ -299,31 +525,69 @@
   // If the block has multiple predecessors, this would introduce computation on
   // a path that it doesn't already exist.  We could split the critical edge,
   // but for now we just punt.
-  // FIXME: Split critical edges if not backedges.
   if (SuccToSinkTo->pred_size() > 1) {
     // We cannot sink a load across a critical edge - there may be stores in
     // other code paths.
+    bool TryBreak = false;
     bool store = true;
     if (!MI->isSafeToMove(TII, AA, store)) {
-      DEBUG(dbgs() << " *** PUNTING: Wont sink load along critical edge.\n");
-      return false;
+      DEBUG(dbgs() << " *** NOTE: Won't sink load along critical edge.\n");
+      TryBreak = true;
     }
 
     // We don't want to sink across a critical edge if we don't dominate the
     // successor. We could be introducing calculations to new code paths.
-    if (!DT->dominates(ParentBlock, SuccToSinkTo)) {
-      DEBUG(dbgs() << " *** PUNTING: Critical edge found\n");
-      return false;
+    if (!TryBreak && !DT->dominates(ParentBlock, SuccToSinkTo)) {
+      DEBUG(dbgs() << " *** NOTE: Critical edge found\n");
+      TryBreak = true;
     }
 
     // Don't sink instructions into a loop.
-    if (LI->isLoopHeader(SuccToSinkTo)) {
-      DEBUG(dbgs() << " *** PUNTING: Loop header found\n");
-      return false;
+    if (!TryBreak && LI->isLoopHeader(SuccToSinkTo)) {
+      DEBUG(dbgs() << " *** NOTE: Loop header found\n");
+      TryBreak = true;
     }
 
     // Otherwise we are OK with sinking along a critical edge.
-    DEBUG(dbgs() << "Sinking along critical edge.\n");
+    if (!TryBreak)
+      DEBUG(dbgs() << "Sinking along critical edge.\n");
+    else {
+      MachineBasicBlock *NewSucc =
+        SplitCriticalEdge(MI, ParentBlock, SuccToSinkTo, BreakPHIEdge);
+      if (!NewSucc) {
+        DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to "
+                        "break critical edge\n");
+        return false;
+      } else {
+        DEBUG(dbgs() << " *** Splitting critical edge:"
+              " BB#" << ParentBlock->getNumber()
+              << " -- BB#" << NewSucc->getNumber()
+              << " -- BB#" << SuccToSinkTo->getNumber() << '\n');
+        SuccToSinkTo = NewSucc;
+        ++NumSplit;
+        BreakPHIEdge = false;
+      }
+    }
+  }
+
+  if (BreakPHIEdge) {
+    // BreakPHIEdge is true if all the uses are in the successor MBB being
+    // sunken into and they are all PHI nodes. In this case, machine-sink must
+    // break the critical edge first.
+    MachineBasicBlock *NewSucc = SplitCriticalEdge(MI, ParentBlock,
+                                                   SuccToSinkTo, BreakPHIEdge);
+    if (!NewSucc) {
+      DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to "
+            "break critical edge\n");
+      return false;
+    }
+
+    DEBUG(dbgs() << " *** Splitting critical edge:"
+          " BB#" << ParentBlock->getNumber()
+          << " -- BB#" << NewSucc->getNumber()
+          << " -- BB#" << SuccToSinkTo->getNumber() << '\n');
+    SuccToSinkTo = NewSucc;
+    ++NumSplit;
   }
 
   // Determine where to insert into. Skip phi nodes.

Modified: llvm/branches/wendling/eh/lib/CodeGen/MachineVerifier.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/MachineVerifier.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/MachineVerifier.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/MachineVerifier.cpp Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-//===-- MachineVerifier.cpp - Machine Code Verifier -------------*- C++ -*-===//
+//===-- MachineVerifier.cpp - Machine Code Verifier -----------------------===//
 //
 //                     The LLVM Compiler Infrastructure
 //
@@ -24,6 +24,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "llvm/Function.h"
+#include "llvm/CodeGen/LiveIntervalAnalysis.h"
 #include "llvm/CodeGen/LiveVariables.h"
 #include "llvm/CodeGen/MachineFunctionPass.h"
 #include "llvm/CodeGen/MachineFrameInfo.h"
@@ -44,19 +45,14 @@
 namespace {
   struct MachineVerifier {
 
-    MachineVerifier(Pass *pass, bool allowDoubleDefs) :
+    MachineVerifier(Pass *pass) :
       PASS(pass),
-      allowVirtDoubleDefs(allowDoubleDefs),
-      allowPhysDoubleDefs(true),
       OutFileName(getenv("LLVM_VERIFY_MACHINEINSTRS"))
       {}
 
     bool runOnMachineFunction(MachineFunction &MF);
 
     Pass *const PASS;
-    const bool allowVirtDoubleDefs;
-    const bool allowPhysDoubleDefs;
-
     const char *const OutFileName;
     raw_ostream *OS;
     const MachineFunction *MF;
@@ -91,10 +87,6 @@
       // defined. Map value is the user.
       RegMap vregsLiveIn;
 
-      // Vregs that must be dead in because they are defined without being
-      // killed first. Map value is the defining instruction.
-      RegMap vregsDeadIn;
-
       // Regs killed in MBB. They may be defined again, and will then be in both
       // regsKilled and regsLiveOut.
       RegSet regsKilled;
@@ -175,6 +167,8 @@
 
     // Analysis information if available
     LiveVariables *LiveVars;
+    LiveIntervals *LiveInts;
+    SlotIndexes *Indexes;
 
     void visitMachineFunctionBefore();
     void visitMachineBasicBlockBefore(const MachineBasicBlock *MBB);
@@ -195,15 +189,16 @@
 
     void calcRegsRequired();
     void verifyLiveVariables();
+    void verifyLiveIntervals();
   };
 
   struct MachineVerifierPass : public MachineFunctionPass {
     static char ID; // Pass ID, replacement for typeid
-    bool AllowDoubleDefs;
 
-    explicit MachineVerifierPass(bool allowDoubleDefs = false)
-      : MachineFunctionPass(&ID),
-        AllowDoubleDefs(allowDoubleDefs) {}
+    MachineVerifierPass()
+      : MachineFunctionPass(ID) {
+        initializeMachineVerifierPassPass(*PassRegistry::getPassRegistry());
+      }
 
     void getAnalysisUsage(AnalysisUsage &AU) const {
       AU.setPreservesAll();
@@ -211,7 +206,7 @@
     }
 
     bool runOnMachineFunction(MachineFunction &MF) {
-      MF.verify(this, AllowDoubleDefs);
+      MF.verify(this);
       return false;
     }
   };
@@ -219,17 +214,15 @@
 }
 
 char MachineVerifierPass::ID = 0;
-static RegisterPass<MachineVerifierPass>
-MachineVer("machineverifier", "Verify generated machine code");
-static const PassInfo *const MachineVerifyID = &MachineVer;
+INITIALIZE_PASS(MachineVerifierPass, "machineverifier",
+                "Verify generated machine code", false, false)
 
-FunctionPass *llvm::createMachineVerifierPass(bool allowPhysDoubleDefs) {
-  return new MachineVerifierPass(allowPhysDoubleDefs);
+FunctionPass *llvm::createMachineVerifierPass() {
+  return new MachineVerifierPass();
 }
 
-void MachineFunction::verify(Pass *p, bool allowDoubleDefs) const {
-  MachineVerifier(p, allowDoubleDefs)
-    .runOnMachineFunction(const_cast<MachineFunction&>(*this));
+void MachineFunction::verify(Pass *p) const {
+  MachineVerifier(p).runOnMachineFunction(const_cast<MachineFunction&>(*this));
 }
 
 bool MachineVerifier::runOnMachineFunction(MachineFunction &MF) {
@@ -255,10 +248,15 @@
   TRI = TM->getRegisterInfo();
   MRI = &MF.getRegInfo();
 
+  LiveVars = NULL;
+  LiveInts = NULL;
+  Indexes = NULL;
   if (PASS) {
-    LiveVars = PASS->getAnalysisIfAvailable<LiveVariables>();
-  } else {
-    LiveVars = NULL;
+    LiveInts = PASS->getAnalysisIfAvailable<LiveIntervals>();
+    // We don't want to verify LiveVariables if LiveIntervals is available.
+    if (!LiveInts)
+      LiveVars = PASS->getAnalysisIfAvailable<LiveVariables>();
+    Indexes = PASS->getAnalysisIfAvailable<SlotIndexes>();
   }
 
   visitMachineFunctionBefore();
@@ -296,7 +294,7 @@
   assert(MF);
   *OS << '\n';
   if (!foundErrors++)
-    MF->print(*OS);
+    MF->print(*OS, Indexes);
   *OS << "*** Bad machine code: " << msg << " ***\n"
       << "- function:    " << MF->getFunction()->getNameStr() << "\n";
 }
@@ -306,13 +304,19 @@
   report(msg, MBB->getParent());
   *OS << "- basic block: " << MBB->getName()
       << " " << (void*)MBB
-      << " (BB#" << MBB->getNumber() << ")\n";
+      << " (BB#" << MBB->getNumber() << ")";
+  if (Indexes)
+    *OS << " [" << Indexes->getMBBStartIdx(MBB)
+        << ';' <<  Indexes->getMBBEndIdx(MBB) << ')';
+  *OS << '\n';
 }
 
 void MachineVerifier::report(const char *msg, const MachineInstr *MI) {
   assert(MI);
   report(msg, MI->getParent());
   *OS << "- instruction: ";
+  if (Indexes && Indexes->hasIndex(MI))
+    *OS << Indexes->getInstructionIndex(MI) << '\t';
   MI->print(*OS, TM);
 }
 
@@ -364,6 +368,14 @@
 MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
   const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
 
+  // Count the number of landing pad successors.
+  unsigned LandingPadSuccs = 0;
+  for (MachineBasicBlock::const_succ_iterator I = MBB->succ_begin(),
+       E = MBB->succ_end(); I != E; ++I)
+    LandingPadSuccs += (*I)->isLandingPad();
+  if (LandingPadSuccs > 1)
+    report("MBB has more than one landing pad successor", MBB);
+
   // Call AnalyzeBranch. If it succeeds, there several more conditions to check.
   MachineBasicBlock *TBB = 0, *FBB = 0;
   SmallVector<MachineOperand, 4> Cond;
@@ -379,14 +391,14 @@
         // It's possible that the block legitimately ends with a noreturn
         // call or an unreachable, in which case it won't actually fall
         // out the bottom of the function.
-      } else if (MBB->succ_empty()) {
+      } else if (MBB->succ_size() == LandingPadSuccs) {
         // It's possible that the block legitimately ends with a noreturn
         // call or an unreachable, in which case it won't actuall fall
         // out of the block.
-      } else if (MBB->succ_size() != 1) {
+      } else if (MBB->succ_size() != 1+LandingPadSuccs) {
         report("MBB exits via unconditional fall-through but doesn't have "
                "exactly one CFG successor!", MBB);
-      } else if (MBB->succ_begin()[0] != MBBI) {
+      } else if (!MBB->isSuccessor(MBBI)) {
         report("MBB exits via unconditional fall-through but its successor "
                "differs from its CFG successor!", MBB);
       }
@@ -401,10 +413,10 @@
       }
     } else if (TBB && !FBB && Cond.empty()) {
       // Block unconditionally branches somewhere.
-      if (MBB->succ_size() != 1) {
+      if (MBB->succ_size() != 1+LandingPadSuccs) {
         report("MBB exits via unconditional branch but doesn't have "
                "exactly one CFG successor!", MBB);
-      } else if (MBB->succ_begin()[0] != TBB) {
+      } else if (!MBB->isSuccessor(TBB)) {
         report("MBB exits via unconditional branch but the CFG "
                "successor doesn't match the actual successor!", MBB);
       }
@@ -512,6 +524,20 @@
     if ((*I)->isStore() && !TI.mayStore())
       report("Missing mayStore flag", MI);
   }
+
+  // Debug values must not have a slot index.
+  // Other instructions must have one.
+  if (LiveInts) {
+    bool mapped = !LiveInts->isNotInMIMap(MI);
+    if (MI->isDebugValue()) {
+      if (mapped)
+        report("Debug instruction has a slot index", MI);
+    } else {
+      if (!mapped)
+        report("Missing slot index", MI);
+    }
+  }
+
 }
 
 void
@@ -570,15 +596,30 @@
       } else
         isKill = MO->isKill();
 
-      if (isKill) {
+      if (isKill)
         addRegWithSubRegs(regsKilled, Reg);
 
-        // Check that LiveVars knows this kill
-        if (LiveVars && TargetRegisterInfo::isVirtualRegister(Reg)) {
-          LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
-          if (std::find(VI.Kills.begin(),
-                        VI.Kills.end(), MI) == VI.Kills.end())
-            report("Kill missing from LiveVariables", MO, MONum);
+      // Check that LiveVars knows this kill.
+      if (LiveVars && TargetRegisterInfo::isVirtualRegister(Reg) &&
+          MO->isKill()) {
+        LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
+        if (std::find(VI.Kills.begin(),
+                      VI.Kills.end(), MI) == VI.Kills.end())
+          report("Kill missing from LiveVariables", MO, MONum);
+      }
+
+      // Check LiveInts liveness and kill.
+      if (LiveInts && !LiveInts->isNotInMIMap(MI)) {
+        SlotIndex UseIdx = LiveInts->getInstructionIndex(MI).getUseIndex();
+        if (LiveInts->hasInterval(Reg)) {
+          const LiveInterval &LI = LiveInts->getInterval(Reg);
+          if (!LI.liveAt(UseIdx)) {
+            report("No live range at use", MO, MONum);
+            *OS << UseIdx << " is not live in " << LI << '\n';
+          }
+          // TODO: Verify isKill == LI.killedAt.
+        } else if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+          report("Virtual register has no Live interval", MO, MONum);
         }
       }
 
@@ -607,6 +648,28 @@
         addRegWithSubRegs(regsDead, Reg);
       else
         addRegWithSubRegs(regsDefined, Reg);
+
+      // Check LiveInts for a live range, but only for virtual registers.
+      if (LiveInts && TargetRegisterInfo::isVirtualRegister(Reg) &&
+          !LiveInts->isNotInMIMap(MI)) {
+        SlotIndex DefIdx = LiveInts->getInstructionIndex(MI).getDefIndex();
+        if (LiveInts->hasInterval(Reg)) {
+          const LiveInterval &LI = LiveInts->getInterval(Reg);
+          if (const VNInfo *VNI = LI.getVNInfoAt(DefIdx)) {
+            assert(VNI && "NULL valno is not allowed");
+            if (VNI->def != DefIdx) {
+              report("Inconsistent valno->def", MO, MONum);
+              *OS << "Valno " << VNI->id << " is not defined at "
+                  << DefIdx << " in " << LI << '\n';
+            }
+          } else {
+            report("No live range at def", MO, MONum);
+            *OS << DefIdx << " is not live in " << LI << '\n';
+          }
+        } else {
+          report("Virtual register has no Live interval", MO, MONum);
+        }
+      }
     }
 
     // Check register classes.
@@ -670,40 +733,9 @@
 void MachineVerifier::visitMachineInstrAfter(const MachineInstr *MI) {
   BBInfo &MInfo = MBBInfoMap[MI->getParent()];
   set_union(MInfo.regsKilled, regsKilled);
-  set_subtract(regsLive, regsKilled);
-  regsKilled.clear();
-
-  // Verify that both <def> and <def,dead> operands refer to dead registers.
-  RegVector defs(regsDefined);
-  defs.append(regsDead.begin(), regsDead.end());
-
-  for (RegVector::const_iterator I = defs.begin(), E = defs.end();
-       I != E; ++I) {
-    if (regsLive.count(*I)) {
-      if (TargetRegisterInfo::isPhysicalRegister(*I)) {
-        if (!allowPhysDoubleDefs && !isReserved(*I) &&
-            !regsLiveInButUnused.count(*I)) {
-          report("Redefining a live physical register", MI);
-          *OS << "Register " << TRI->getName(*I)
-              << " was defined but already live.\n";
-        }
-      } else {
-        if (!allowVirtDoubleDefs) {
-          report("Redefining a live virtual register", MI);
-          *OS << "Virtual register %reg" << *I
-              << " was defined but already live.\n";
-        }
-      }
-    } else if (TargetRegisterInfo::isVirtualRegister(*I) &&
-               !MInfo.regsKilled.count(*I)) {
-      // Virtual register defined without being killed first must be dead on
-      // entry.
-      MInfo.vregsDeadIn.insert(std::make_pair(*I, MI));
-    }
-  }
-
-  set_subtract(regsLive, regsDead); regsDead.clear();
-  set_union(regsLive, regsDefined); regsDefined.clear();
+  set_subtract(regsLive, regsKilled); regsKilled.clear();
+  set_subtract(regsLive, regsDead);   regsDead.clear();
+  set_union(regsLive, regsDefined);   regsDefined.clear();
 }
 
 void
@@ -828,35 +860,15 @@
       continue;
 
     checkPHIOps(MFI);
-
-    // Verify dead-in virtual registers.
-    if (!allowVirtDoubleDefs) {
-      for (MachineBasicBlock::const_pred_iterator PrI = MFI->pred_begin(),
-             PrE = MFI->pred_end(); PrI != PrE; ++PrI) {
-        BBInfo &PrInfo = MBBInfoMap[*PrI];
-        if (!PrInfo.reachable)
-          continue;
-
-        for (RegMap::iterator I = MInfo.vregsDeadIn.begin(),
-               E = MInfo.vregsDeadIn.end(); I != E; ++I) {
-          // DeadIn register must be in neither regsLiveOut or vregsPassed of
-          // any predecessor.
-          if (PrInfo.isLiveOut(I->first)) {
-            report("Live-in virtual register redefined", I->second);
-            *OS << "Register %reg" << I->first
-                << " was live-out from predecessor MBB #"
-                << (*PrI)->getNumber() << ".\n";
-          }
-        }
-      }
-    }
   }
 
-  // Now check LiveVariables info if available
-  if (LiveVars) {
+  // Now check liveness info if available
+  if (LiveVars || LiveInts)
     calcRegsRequired();
+  if (LiveVars)
     verifyLiveVariables();
-  }
+  if (LiveInts)
+    verifyLiveIntervals();
 }
 
 void MachineVerifier::verifyLiveVariables() {
@@ -886,4 +898,183 @@
   }
 }
 
+void MachineVerifier::verifyLiveIntervals() {
+  assert(LiveInts && "Don't call verifyLiveIntervals without LiveInts");
+  for (LiveIntervals::const_iterator LVI = LiveInts->begin(),
+       LVE = LiveInts->end(); LVI != LVE; ++LVI) {
+    const LiveInterval &LI = *LVI->second;
+
+    // Spilling and splitting may leave unused registers around. Skip them.
+    if (MRI->use_empty(LI.reg))
+      continue;
+
+    assert(LVI->first == LI.reg && "Invalid reg to interval mapping");
+
+    for (LiveInterval::const_vni_iterator I = LI.vni_begin(), E = LI.vni_end();
+         I!=E; ++I) {
+      VNInfo *VNI = *I;
+      const VNInfo *DefVNI = LI.getVNInfoAt(VNI->def);
+
+      if (!DefVNI) {
+        if (!VNI->isUnused()) {
+          report("Valno not live at def and not marked unused", MF);
+          *OS << "Valno #" << VNI->id << " in " << LI << '\n';
+        }
+        continue;
+      }
+
+      if (VNI->isUnused())
+        continue;
+
+      if (DefVNI != VNI) {
+        report("Live range at def has different valno", MF);
+        *OS << "Valno #" << VNI->id << " is defined at " << VNI->def
+            << " where valno #" << DefVNI->id << " is live in " << LI << '\n';
+        continue;
+      }
+
+      const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(VNI->def);
+      if (!MBB) {
+        report("Invalid definition index", MF);
+        *OS << "Valno #" << VNI->id << " is defined at " << VNI->def
+            << " in " << LI << '\n';
+        continue;
+      }
+
+      if (VNI->isPHIDef()) {
+        if (VNI->def != LiveInts->getMBBStartIdx(MBB)) {
+          report("PHIDef value is not defined at MBB start", MF);
+          *OS << "Valno #" << VNI->id << " is defined at " << VNI->def
+              << ", not at the beginning of BB#" << MBB->getNumber()
+              << " in " << LI << '\n';
+        }
+      } else {
+        // Non-PHI def.
+        if (!VNI->def.isDef()) {
+          report("Non-PHI def must be at a DEF slot", MF);
+          *OS << "Valno #" << VNI->id << " is defined at " << VNI->def
+              << " in " << LI << '\n';
+        }
+        const MachineInstr *MI = LiveInts->getInstructionFromIndex(VNI->def);
+        if (!MI) {
+          report("No instruction at def index", MF);
+          *OS << "Valno #" << VNI->id << " is defined at " << VNI->def
+              << " in " << LI << '\n';
+        } else if (!MI->modifiesRegister(LI.reg, TRI)) {
+          report("Defining instruction does not modify register", MI);
+          *OS << "Valno #" << VNI->id << " in " << LI << '\n';
+        }
+      }
+    }
+
+    for (LiveInterval::const_iterator I = LI.begin(), E = LI.end(); I!=E; ++I) {
+      const VNInfo *VNI = I->valno;
+      assert(VNI && "Live range has no valno");
+
+      if (VNI->id >= LI.getNumValNums() || VNI != LI.getValNumInfo(VNI->id)) {
+        report("Foreign valno in live range", MF);
+        I->print(*OS);
+        *OS << " has a valno not in " << LI << '\n';
+      }
+
+      if (VNI->isUnused()) {
+        report("Live range valno is marked unused", MF);
+        I->print(*OS);
+        *OS << " in " << LI << '\n';
+      }
+
+      const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(I->start);
+      if (!MBB) {
+        report("Bad start of live segment, no basic block", MF);
+        I->print(*OS);
+        *OS << " in " << LI << '\n';
+        continue;
+      }
+      SlotIndex MBBStartIdx = LiveInts->getMBBStartIdx(MBB);
+      if (I->start != MBBStartIdx && I->start != VNI->def) {
+        report("Live segment must begin at MBB entry or valno def", MBB);
+        I->print(*OS);
+        *OS << " in " << LI << '\n' << "Basic block starts at "
+            << MBBStartIdx << '\n';
+      }
+
+      const MachineBasicBlock *EndMBB =
+                                LiveInts->getMBBFromIndex(I->end.getPrevSlot());
+      if (!EndMBB) {
+        report("Bad end of live segment, no basic block", MF);
+        I->print(*OS);
+        *OS << " in " << LI << '\n';
+        continue;
+      }
+      if (I->end != LiveInts->getMBBEndIdx(EndMBB)) {
+        // The live segment is ending inside EndMBB
+        const MachineInstr *MI =
+                        LiveInts->getInstructionFromIndex(I->end.getPrevSlot());
+        if (!MI) {
+          report("Live segment doesn't end at a valid instruction", EndMBB);
+        I->print(*OS);
+        *OS << " in " << LI << '\n' << "Basic block starts at "
+            << MBBStartIdx << '\n';
+        } else if (TargetRegisterInfo::isVirtualRegister(LI.reg) &&
+                   !MI->readsVirtualRegister(LI.reg)) {
+          // FIXME: Should we require a kill flag?
+          report("Instruction killing live segment doesn't read register", MI);
+          I->print(*OS);
+          *OS << " in " << LI << '\n';
+        }
+      }
+
+      // Now check all the basic blocks in this live segment.
+      MachineFunction::const_iterator MFI = MBB;
+      // Is LI live-in to MBB and not a PHIDef?
+      if (I->start == VNI->def) {
+        // Not live-in to any blocks.
+        if (MBB == EndMBB)
+          continue;
+        // Skip this block.
+        ++MFI;
+      }
+      for (;;) {
+        assert(LiveInts->isLiveInToMBB(LI, MFI));
+        // We don't know how to track physregs into a landing pad.
+        if (TargetRegisterInfo::isPhysicalRegister(LI.reg) &&
+            MFI->isLandingPad()) {
+          if (&*MFI == EndMBB)
+            break;
+          ++MFI;
+          continue;
+        }
+        // Check that VNI is live-out of all predecessors.
+        for (MachineBasicBlock::const_pred_iterator PI = MFI->pred_begin(),
+             PE = MFI->pred_end(); PI != PE; ++PI) {
+          SlotIndex PEnd = LiveInts->getMBBEndIdx(*PI).getPrevSlot();
+          const VNInfo *PVNI = LI.getVNInfoAt(PEnd);
+          if (!PVNI) {
+            report("Register not marked live out of predecessor", *PI);
+            *OS << "Valno #" << VNI->id << " live into BB#" << MFI->getNumber()
+                << '@' << LiveInts->getMBBStartIdx(MFI) << ", not live at "
+                << PEnd << " in " << LI << '\n';
+          } else if (PVNI != VNI) {
+            report("Different value live out of predecessor", *PI);
+            *OS << "Valno #" << PVNI->id << " live out of BB#"
+                << (*PI)->getNumber() << '@' << PEnd
+                << "\nValno #" << VNI->id << " live into BB#" << MFI->getNumber()
+                << '@' << LiveInts->getMBBStartIdx(MFI) << " in " << LI << '\n';
+          }
+        }
+        if (&*MFI == EndMBB)
+          break;
+        ++MFI;
+      }
+    }
+
+    // Check the LI only has one connected component.
+    ConnectedVNInfoEqClasses ConEQ(*LiveInts);
+    unsigned NumComp = ConEQ.Classify(&LI);
+    if (NumComp > 1) {
+      report("Multiple connected components in live interval", MF);
+      *OS << NumComp << " components in " << LI << '\n';
+    }
+  }
+}
 

Removed: llvm/branches/wendling/eh/lib/CodeGen/OptimizeExts.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/OptimizeExts.cpp?rev=117424&view=auto
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/OptimizeExts.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/OptimizeExts.cpp (removed)
@@ -1,220 +0,0 @@
-//===-- OptimizeExts.cpp - Optimize sign / zero extension instrs -----===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This pass performs optimization of sign / zero extension instructions. It
-// may be extended to handle other instructions of similar property.
-//
-// On some targets, some instructions, e.g. X86 sign / zero extension, may
-// leave the source value in the lower part of the result. This pass will
-// replace (some) uses of the pre-extension value with uses of the sub-register
-// of the results.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "ext-opt"
-#include "llvm/CodeGen/Passes.h"
-#include "llvm/CodeGen/MachineDominators.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/Statistic.h"
-using namespace llvm;
-
-static cl::opt<bool> Aggressive("aggressive-ext-opt", cl::Hidden,
-                                cl::desc("Aggressive extension optimization"));
-
-STATISTIC(NumReuse, "Number of extension results reused");
-
-namespace {
-  class OptimizeExts : public MachineFunctionPass {
-    const TargetMachine   *TM;
-    const TargetInstrInfo *TII;
-    MachineRegisterInfo *MRI;
-    MachineDominatorTree *DT;   // Machine dominator tree
-
-  public:
-    static char ID; // Pass identification
-    OptimizeExts() : MachineFunctionPass(&ID) {}
-
-    virtual bool runOnMachineFunction(MachineFunction &MF);
-
-    virtual void getAnalysisUsage(AnalysisUsage &AU) const {
-      AU.setPreservesCFG();
-      MachineFunctionPass::getAnalysisUsage(AU);
-      if (Aggressive) {
-        AU.addRequired<MachineDominatorTree>();
-        AU.addPreserved<MachineDominatorTree>();
-      }
-    }
-
-  private:
-    bool OptimizeInstr(MachineInstr *MI, MachineBasicBlock *MBB,
-                       SmallPtrSet<MachineInstr*, 8> &LocalMIs);
-  };
-}
-
-char OptimizeExts::ID = 0;
-INITIALIZE_PASS(OptimizeExts, "opt-exts",
-                "Optimize sign / zero extensions", false, false);
-
-FunctionPass *llvm::createOptimizeExtsPass() { return new OptimizeExts(); }
-
-/// OptimizeInstr - If instruction is a copy-like instruction, i.e. it reads
-/// a single register and writes a single register and it does not modify
-/// the source, and if the source value is preserved as a sub-register of
-/// the result, then replace all reachable uses of the source with the subreg
-/// of the result.
-/// Do not generate an EXTRACT that is used only in a debug use, as this
-/// changes the code.  Since this code does not currently share EXTRACTs, just
-/// ignore all debug uses.
-bool OptimizeExts::OptimizeInstr(MachineInstr *MI, MachineBasicBlock *MBB,
-                                 SmallPtrSet<MachineInstr*, 8> &LocalMIs) {
-  bool Changed = false;
-  LocalMIs.insert(MI);
-
-  unsigned SrcReg, DstReg, SubIdx;
-  if (TII->isCoalescableExtInstr(*MI, SrcReg, DstReg, SubIdx)) {
-    if (TargetRegisterInfo::isPhysicalRegister(DstReg) ||
-        TargetRegisterInfo::isPhysicalRegister(SrcReg))
-      return false;
-
-    MachineRegisterInfo::use_nodbg_iterator UI = MRI->use_nodbg_begin(SrcReg);
-    if (++UI == MRI->use_nodbg_end())
-      // No other uses.
-      return false;
-
-    // Ok, the source has other uses. See if we can replace the other uses
-    // with use of the result of the extension.
-    SmallPtrSet<MachineBasicBlock*, 4> ReachedBBs;
-    UI = MRI->use_nodbg_begin(DstReg);
-    for (MachineRegisterInfo::use_nodbg_iterator UE = MRI->use_nodbg_end();
-         UI != UE; ++UI)
-      ReachedBBs.insert(UI->getParent());
-
-    bool ExtendLife = true;
-    // Uses that are in the same BB of uses of the result of the instruction.
-    SmallVector<MachineOperand*, 8> Uses;
-    // Uses that the result of the instruction can reach.
-    SmallVector<MachineOperand*, 8> ExtendedUses;
-
-    UI = MRI->use_nodbg_begin(SrcReg);
-    for (MachineRegisterInfo::use_nodbg_iterator UE = MRI->use_nodbg_end();
-         UI != UE; ++UI) {
-      MachineOperand &UseMO = UI.getOperand();
-      MachineInstr *UseMI = &*UI;
-      if (UseMI == MI)
-        continue;
-      if (UseMI->isPHI()) {
-        ExtendLife = false;
-        continue;
-      }
-
-      // It's an error to translate this:
-      //
-      //    %reg1025 = <sext> %reg1024
-      //     ...
-      //    %reg1026 = SUBREG_TO_REG 0, %reg1024, 4
-      //
-      // into this:
-      //
-      //    %reg1025 = <sext> %reg1024
-      //     ...
-      //    %reg1027 = COPY %reg1025:4
-      //    %reg1026 = SUBREG_TO_REG 0, %reg1027, 4
-      //
-      // The problem here is that SUBREG_TO_REG is there to assert that an
-      // implicit zext occurs. It doesn't insert a zext instruction. If we allow
-      // the COPY here, it will give us the value after the <sext>,
-      // not the original value of %reg1024 before <sext>.
-      if (UseMI->getOpcode() == TargetOpcode::SUBREG_TO_REG)
-        continue;
-
-      MachineBasicBlock *UseMBB = UseMI->getParent();
-      if (UseMBB == MBB) {
-        // Local uses that come after the extension.
-        if (!LocalMIs.count(UseMI))
-          Uses.push_back(&UseMO);
-      } else if (ReachedBBs.count(UseMBB))
-        // Non-local uses where the result of extension is used. Always
-        // replace these unless it's a PHI.
-        Uses.push_back(&UseMO);
-      else if (Aggressive && DT->dominates(MBB, UseMBB))
-        // We may want to extend live range of the extension result in order
-        // to replace these uses.
-        ExtendedUses.push_back(&UseMO);
-      else {
-        // Both will be live out of the def MBB anyway. Don't extend live
-        // range of the extension result.
-        ExtendLife = false;
-        break;
-      }
-    }
-
-    if (ExtendLife && !ExtendedUses.empty())
-      // Ok, we'll extend the liveness of the extension result.
-      std::copy(ExtendedUses.begin(), ExtendedUses.end(),
-                std::back_inserter(Uses));
-
-    // Now replace all uses.
-    if (!Uses.empty()) {
-      SmallPtrSet<MachineBasicBlock*, 4> PHIBBs;
-      // Look for PHI uses of the extended result, we don't want to extend the
-      // liveness of a PHI input. It breaks all kinds of assumptions down
-      // stream. A PHI use is expected to be the kill of its source values.
-      UI = MRI->use_nodbg_begin(DstReg);
-      for (MachineRegisterInfo::use_nodbg_iterator UE = MRI->use_nodbg_end();
-           UI != UE; ++UI)
-        if (UI->isPHI())
-          PHIBBs.insert(UI->getParent());
-
-      const TargetRegisterClass *RC = MRI->getRegClass(SrcReg);
-      for (unsigned i = 0, e = Uses.size(); i != e; ++i) {
-        MachineOperand *UseMO = Uses[i];
-        MachineInstr *UseMI = UseMO->getParent();
-        MachineBasicBlock *UseMBB = UseMI->getParent();
-        if (PHIBBs.count(UseMBB))
-          continue;
-        unsigned NewVR = MRI->createVirtualRegister(RC);
-        BuildMI(*UseMBB, UseMI, UseMI->getDebugLoc(),
-                TII->get(TargetOpcode::COPY), NewVR)
-          .addReg(DstReg, 0, SubIdx);
-        UseMO->setReg(NewVR);
-        ++NumReuse;
-        Changed = true;
-      }
-    }
-  }
-
-  return Changed;
-}
-
-bool OptimizeExts::runOnMachineFunction(MachineFunction &MF) {
-  TM = &MF.getTarget();
-  TII = TM->getInstrInfo();
-  MRI = &MF.getRegInfo();
-  DT = Aggressive ? &getAnalysis<MachineDominatorTree>() : 0;
-
-  bool Changed = false;
-
-  SmallPtrSet<MachineInstr*, 8> LocalMIs;
-  for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
-    MachineBasicBlock *MBB = &*I;
-    LocalMIs.clear();
-    for (MachineBasicBlock::iterator MII = I->begin(), ME = I->end(); MII != ME;
-         ++MII) {
-      MachineInstr *MI = &*MII;
-      Changed |= OptimizeInstr(MI, MBB, LocalMIs);
-    }
-  }
-
-  return Changed;
-}

Modified: llvm/branches/wendling/eh/lib/CodeGen/OptimizePHIs.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/OptimizePHIs.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/OptimizePHIs.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/OptimizePHIs.cpp Tue Oct 26 19:48:03 2010
@@ -33,7 +33,9 @@
 
   public:
     static char ID; // Pass identification
-    OptimizePHIs() : MachineFunctionPass(&ID) {}
+    OptimizePHIs() : MachineFunctionPass(ID) {
+      initializeOptimizePHIsPass(*PassRegistry::getPassRegistry());
+    }
 
     virtual bool runOnMachineFunction(MachineFunction &MF);
 
@@ -55,7 +57,7 @@
 
 char OptimizePHIs::ID = 0;
 INITIALIZE_PASS(OptimizePHIs, "opt-phis",
-                "Optimize machine instruction PHIs", false, false);
+                "Optimize machine instruction PHIs", false, false)
 
 FunctionPass *llvm::createOptimizePHIsPass() { return new OptimizePHIs(); }
 

Modified: llvm/branches/wendling/eh/lib/CodeGen/PHIElimination.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/PHIElimination.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/PHIElimination.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/PHIElimination.cpp Tue Oct 26 19:48:03 2010
@@ -20,6 +20,7 @@
 #include "llvm/CodeGen/MachineDominators.h"
 #include "llvm/CodeGen/MachineInstr.h"
 #include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
 #include "llvm/CodeGen/MachineRegisterInfo.h"
 #include "llvm/Target/TargetInstrInfo.h"
 #include "llvm/Function.h"
@@ -37,16 +38,15 @@
 STATISTIC(NumReused, "Number of reused lowered phis");
 
 char PHIElimination::ID = 0;
-static RegisterPass<PHIElimination>
-X("phi-node-elimination", "Eliminate PHI nodes for register allocation");
+INITIALIZE_PASS(PHIElimination, "phi-node-elimination",
+                "Eliminate PHI nodes for register allocation", false, false)
 
-const PassInfo *const llvm::PHIEliminationID = &X;
+char &llvm::PHIEliminationID = PHIElimination::ID;
 
 void llvm::PHIElimination::getAnalysisUsage(AnalysisUsage &AU) const {
   AU.addPreserved<LiveVariables>();
   AU.addPreserved<MachineDominatorTree>();
-  // rdar://7401784 This would be nice:
-  // AU.addPreservedID(MachineLoopInfoID);
+  AU.addPreserved<MachineLoopInfo>();
   MachineFunctionPass::getAnalysisUsage(AU);
 }
 
@@ -56,9 +56,11 @@
   bool Changed = false;
 
   // Split critical edges to help the coalescer
-  if (LiveVariables *LV = getAnalysisIfAvailable<LiveVariables>())
+  if (LiveVariables *LV = getAnalysisIfAvailable<LiveVariables>()) {
+    MachineLoopInfo *MLI = getAnalysisIfAvailable<MachineLoopInfo>();
     for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I)
-      Changed |= SplitPHIEdges(MF, *I, *LV);
+      Changed |= SplitPHIEdges(MF, *I, *LV, MLI);
+  }
 
   // Populate VRegPHIUseCount
   analyzePHINodes(MF);
@@ -179,6 +181,7 @@
 
   unsigned NumSrcs = (MPhi->getNumOperands() - 1) / 2;
   unsigned DestReg = MPhi->getOperand(0).getReg();
+  assert(MPhi->getOperand(0).getSubReg() == 0 && "Can't handle sub-reg PHIs");
   bool isDead = MPhi->getOperand(0).isDead();
 
   // Create a new register for the incoming PHI arguments.
@@ -265,6 +268,8 @@
   SmallPtrSet<MachineBasicBlock*, 8> MBBsInsertedInto;
   for (int i = NumSrcs - 1; i >= 0; --i) {
     unsigned SrcReg = MPhi->getOperand(i*2+1).getReg();
+    unsigned SrcSubReg = MPhi->getOperand(i*2+1).getSubReg();
+
     assert(TargetRegisterInfo::isVirtualRegister(SrcReg) &&
            "Machine PHI Operands must all be virtual registers!");
 
@@ -294,7 +299,7 @@
     // Insert the copy.
     if (!reusedIncoming && IncomingReg)
       BuildMI(opBlock, InsertPos, MPhi->getDebugLoc(),
-              TII->get(TargetOpcode::COPY), IncomingReg).addReg(SrcReg);
+              TII->get(TargetOpcode::COPY), IncomingReg).addReg(SrcReg, 0, SrcSubReg);
 
     // Now update live variable information if we have it.  Otherwise we're done
     if (!LV) continue;
@@ -378,10 +383,12 @@
 
 bool llvm::PHIElimination::SplitPHIEdges(MachineFunction &MF,
                                          MachineBasicBlock &MBB,
-                                         LiveVariables &LV) {
+                                         LiveVariables &LV,
+                                         MachineLoopInfo *MLI) {
   if (MBB.empty() || !MBB.front().isPHI() || MBB.isLandingPad())
     return false;   // Quick exit for basic blocks without PHIs.
 
+  bool Changed = false;
   for (MachineBasicBlock::const_iterator BBI = MBB.begin(), BBE = MBB.end();
        BBI != BBE && BBI->isPHI(); ++BBI) {
     for (unsigned i = 1, e = BBI->getNumOperands(); i != e; i += 2) {
@@ -390,8 +397,15 @@
       // We break edges when registers are live out from the predecessor block
       // (not considering PHI nodes). If the register is live in to this block
       // anyway, we would gain nothing from splitting.
-      if (!LV.isLiveIn(Reg, MBB) && LV.isLiveOut(Reg, *PreMBB))
-        PreMBB->SplitCriticalEdge(&MBB, this);
+      // Avoid splitting backedges of loops. It would introduce small
+      // out-of-line blocks into the loop which is very bad for code placement.
+      if (PreMBB != &MBB &&
+          !LV.isLiveIn(Reg, MBB) && LV.isLiveOut(Reg, *PreMBB)) {
+        if (!MLI ||
+            !(MLI->getLoopFor(PreMBB) == MLI->getLoopFor(&MBB) &&
+              MLI->isLoopHeader(&MBB)))
+          Changed |= PreMBB->SplitCriticalEdge(&MBB, this) != 0;
+      }
     }
   }
   return true;

Modified: llvm/branches/wendling/eh/lib/CodeGen/PHIElimination.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/PHIElimination.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/PHIElimination.h (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/PHIElimination.h Tue Oct 26 19:48:03 2010
@@ -13,19 +13,23 @@
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/SmallSet.h"
 #include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
 #include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
 
 namespace llvm {
   class LiveVariables;
+  class MachineRegisterInfo;
+  class MachineLoopInfo;
   
   /// Lower PHI instructions to copies.  
   class PHIElimination : public MachineFunctionPass {
-    MachineRegisterInfo  *MRI; // Machine register information
+    MachineRegisterInfo *MRI; // Machine register information
 
   public:
     static char ID; // Pass identification, replacement for typeid
-    PHIElimination() : MachineFunctionPass(&ID) {}
+    PHIElimination() : MachineFunctionPass(ID) {
+      initializePHIEliminationPass(*PassRegistry::getPassRegistry());
+    }
 
     virtual bool runOnMachineFunction(MachineFunction &Fn);
     
@@ -49,7 +53,7 @@
 
     /// Split critical edges where necessary for good coalescer performance.
     bool SplitPHIEdges(MachineFunction &MF, MachineBasicBlock &MBB,
-                       LiveVariables &LV);
+                       LiveVariables &LV, MachineLoopInfo *MLI);
 
     /// SplitCriticalEdge - Split a critical edge from A to B by
     /// inserting a new MBB. Update branches in A and PHI instructions

Modified: llvm/branches/wendling/eh/lib/CodeGen/PostRAHazardRecognizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/PostRAHazardRecognizer.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/PostRAHazardRecognizer.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/PostRAHazardRecognizer.cpp Tue Oct 26 19:48:03 2010
@@ -23,19 +23,19 @@
 using namespace llvm;
 
 PostRAHazardRecognizer::
-PostRAHazardRecognizer(const InstrItineraryData &LItinData) :
+PostRAHazardRecognizer(const InstrItineraryData *LItinData) :
   ScheduleHazardRecognizer(), ItinData(LItinData) {
   // Determine the maximum depth of any itinerary. This determines the
   // depth of the scoreboard. We always make the scoreboard at least 1
   // cycle deep to avoid dealing with the boundary condition.
   unsigned ScoreboardDepth = 1;
-  if (!ItinData.isEmpty()) {
+  if (ItinData && !ItinData->isEmpty()) {
     for (unsigned idx = 0; ; ++idx) {
-      if (ItinData.isEndMarker(idx))
+      if (ItinData->isEndMarker(idx))
         break;
 
-      const InstrStage *IS = ItinData.beginStage(idx);
-      const InstrStage *E = ItinData.endStage(idx);
+      const InstrStage *IS = ItinData->beginStage(idx);
+      const InstrStage *E = ItinData->endStage(idx);
       unsigned ItinDepth = 0;
       for (; IS != E; ++IS)
         ItinDepth += IS->getCycles();
@@ -74,7 +74,7 @@
 
 ScheduleHazardRecognizer::HazardType
 PostRAHazardRecognizer::getHazardType(SUnit *SU) {
-  if (ItinData.isEmpty())
+  if (!ItinData || ItinData->isEmpty())
     return NoHazard;
 
   unsigned cycle = 0;
@@ -82,8 +82,8 @@
   // Use the itinerary for the underlying instruction to check for
   // free FU's in the scoreboard at the appropriate future cycles.
   unsigned idx = SU->getInstr()->getDesc().getSchedClass();
-  for (const InstrStage *IS = ItinData.beginStage(idx),
-         *E = ItinData.endStage(idx); IS != E; ++IS) {
+  for (const InstrStage *IS = ItinData->beginStage(idx),
+         *E = ItinData->endStage(idx); IS != E; ++IS) {
     // We must find one of the stage's units free for every cycle the
     // stage is occupied. FIXME it would be more accurate to find the
     // same unit free in all the cycles.
@@ -121,7 +121,7 @@
 }
 
 void PostRAHazardRecognizer::EmitInstruction(SUnit *SU) {
-  if (ItinData.isEmpty())
+  if (!ItinData || ItinData->isEmpty())
     return;
 
   unsigned cycle = 0;
@@ -129,8 +129,8 @@
   // Use the itinerary for the underlying instruction to reserve FU's
   // in the scoreboard at the appropriate future cycles.
   unsigned idx = SU->getInstr()->getDesc().getSchedClass();
-  for (const InstrStage *IS = ItinData.beginStage(idx),
-         *E = ItinData.endStage(idx); IS != E; ++IS) {
+  for (const InstrStage *IS = ItinData->beginStage(idx),
+         *E = ItinData->endStage(idx); IS != E; ++IS) {
     // We must reserve one of the stage's units for every cycle the
     // stage is occupied. FIXME it would be more accurate to reserve
     // the same unit free in all the cycles.

Modified: llvm/branches/wendling/eh/lib/CodeGen/PostRASchedulerList.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/PostRASchedulerList.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/PostRASchedulerList.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/PostRASchedulerList.cpp Tue Oct 26 19:48:03 2010
@@ -85,7 +85,7 @@
   public:
     static char ID;
     PostRAScheduler(CodeGenOpt::Level ol) :
-      MachineFunctionPass(&ID), OptLevel(ol) {}
+      MachineFunctionPass(ID), OptLevel(ol) {}
 
     void getAnalysisUsage(AnalysisUsage &AU) const {
       AU.setPreservesCFG();
@@ -213,7 +213,7 @@
   const MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
   const MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
   const TargetMachine &TM = Fn.getTarget();
-  const InstrItineraryData &InstrItins = TM.getInstrItineraryData();
+  const InstrItineraryData *InstrItins = TM.getInstrItineraryData();
   ScheduleHazardRecognizer *HR =
     TM.getInstrInfo()->CreateTargetPostRAHazardRecognizer(InstrItins);
   AntiDepBreaker *ADB =

Modified: llvm/branches/wendling/eh/lib/CodeGen/PreAllocSplitting.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/PreAllocSplitting.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/PreAllocSplitting.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/PreAllocSplitting.cpp Tue Oct 26 19:48:03 2010
@@ -91,8 +91,9 @@
 
   public:
     static char ID;
-    PreAllocSplitting()
-      : MachineFunctionPass(&ID) {}
+    PreAllocSplitting() : MachineFunctionPass(ID) {
+      initializePreAllocSplittingPass(*PassRegistry::getPassRegistry());
+    }
 
     virtual bool runOnMachineFunction(MachineFunction &MF);
 
@@ -203,10 +204,20 @@
 
 char PreAllocSplitting::ID = 0;
 
-static RegisterPass<PreAllocSplitting>
-X("pre-alloc-splitting", "Pre-Register Allocation Live Interval Splitting");
+INITIALIZE_PASS_BEGIN(PreAllocSplitting, "pre-alloc-splitting",
+                "Pre-Register Allocation Live Interval Splitting",
+                false, false)
+INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
+INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
+INITIALIZE_PASS_DEPENDENCY(LiveStacks)
+INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
+INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
+INITIALIZE_PASS_DEPENDENCY(VirtRegMap)
+INITIALIZE_PASS_END(PreAllocSplitting, "pre-alloc-splitting",
+                "Pre-Register Allocation Live Interval Splitting",
+                false, false)
 
-const PassInfo *const llvm::PreAllocSplittingID = &X;
+char &llvm::PreAllocSplittingID = PreAllocSplitting::ID;
 
 /// findSpillPoint - Find a gap as far away from the given MI that's suitable
 /// for spilling the current live interval. The index must be before any
@@ -323,7 +334,7 @@
   if (CurrSLI->hasAtLeastOneValue())
     CurrSValNo = CurrSLI->getValNumInfo(0);
   else
-    CurrSValNo = CurrSLI->getNextValue(SlotIndex(), 0, false,
+    CurrSValNo = CurrSLI->getNextValue(SlotIndex(), 0,
                                        LSs->getVNInfoAllocator());
   return SS;
 }
@@ -584,7 +595,7 @@
 
   SlotIndex StartIndex = LIs->getMBBStartIdx(MBB);
   VNInfo *RetVNI = Phis[MBB] =
-    LI->getNextValue(SlotIndex(), /*FIXME*/ 0, false,
+    LI->getNextValue(SlotIndex(), /*FIXME*/ 0,
                      LIs->getVNInfoAllocator());
 
   if (!IsIntraBlock) LiveOut[MBB] = RetVNI;
@@ -673,7 +684,7 @@
     DefIdx = DefIdx.getDefIndex();
     
     assert(!DI->isPHI() && "PHI instr in code during pre-alloc splitting.");
-    VNInfo* NewVN = LI->getNextValue(DefIdx, 0, true, Alloc);
+    VNInfo* NewVN = LI->getNextValue(DefIdx, 0, Alloc);
     
     // If the def is a move, set the copy field.
     if (DI->isCopyLike() && DI->getOperand(0).getReg() == LI->reg)
@@ -806,7 +817,7 @@
   MachineBasicBlock& MBB = *RestorePt->getParent();
   
   MachineBasicBlock::iterator KillPt = BarrierMBB->end();
-  if (!ValNo->isDefAccurate() || DefMI->getParent() == BarrierMBB)
+  if (!DefMI || DefMI->getParent() == BarrierMBB)
     KillPt = findSpillPoint(BarrierMBB, Barrier, NULL, RefsInMBB);
   else
     KillPt = llvm::next(MachineBasicBlock::iterator(DefMI));
@@ -871,7 +882,7 @@
     if (CurrSLI->hasAtLeastOneValue())
       CurrSValNo = CurrSLI->getValNumInfo(0);
     else
-      CurrSValNo = CurrSLI->getNextValue(SlotIndex(), 0, false,
+      CurrSValNo = CurrSLI->getNextValue(SlotIndex(), 0,
                                          LSs->getVNInfoAllocator());
   }
   
@@ -966,8 +977,7 @@
 
   assert(!ValNo->isUnused() && "Val# is defined by a dead def?");
 
-  MachineInstr *DefMI = ValNo->isDefAccurate()
-    ? LIs->getInstructionFromIndex(ValNo->def) : NULL;
+  MachineInstr *DefMI = LIs->getInstructionFromIndex(ValNo->def);
 
   // If this would create a new join point, do not split.
   if (DefMI && createsNewJoin(LR, DefMI->getParent(), Barrier->getParent())) {
@@ -1004,7 +1014,7 @@
   SlotIndex SpillIndex;
   MachineInstr *SpillMI = NULL;
   int SS = -1;
-  if (!ValNo->isDefAccurate()) {
+  if (!DefMI) {
     // If we don't know where the def is we must split just before the barrier.
     if ((SpillMI = FoldSpill(LI->reg, RC, 0, Barrier,
                             BarrierMBB, SS, RefsInMBB))) {
@@ -1198,12 +1208,12 @@
       
       // We also don't try to handle the results of PHI joins, since there's
       // no defining instruction to analyze.
-      if (!CurrVN->isDefAccurate() || CurrVN->isUnused()) continue;
+      MachineInstr* DefMI = LIs->getInstructionFromIndex(CurrVN->def);
+      if (!DefMI || CurrVN->isUnused()) continue;
     
       // We're only interested in eliminating cruft introduced by the splitter,
       // is of the form load-use or load-use-store.  First, check that the
       // definition is a load, and remember what stack slot we loaded it from.
-      MachineInstr* DefMI = LIs->getInstructionFromIndex(CurrVN->def);
       int FrameIndex;
       if (!TII->isLoadFromStackSlot(DefMI, FrameIndex)) continue;
       

Modified: llvm/branches/wendling/eh/lib/CodeGen/ProcessImplicitDefs.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/ProcessImplicitDefs.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/ProcessImplicitDefs.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/ProcessImplicitDefs.cpp Tue Oct 26 19:48:03 2010
@@ -26,8 +26,11 @@
 using namespace llvm;
 
 char ProcessImplicitDefs::ID = 0;
-INITIALIZE_PASS(ProcessImplicitDefs, "processimpdefs",
-                "Process Implicit Definitions.", false, false);
+INITIALIZE_PASS_BEGIN(ProcessImplicitDefs, "processimpdefs",
+                "Process Implicit Definitions.", false, false)
+INITIALIZE_PASS_DEPENDENCY(LiveVariables)
+INITIALIZE_PASS_END(ProcessImplicitDefs, "processimpdefs",
+                "Process Implicit Definitions.", false, false)
 
 void ProcessImplicitDefs::getAnalysisUsage(AnalysisUsage &AU) const {
   AU.setPreservesCFG();

Modified: llvm/branches/wendling/eh/lib/CodeGen/PrologEpilogInserter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/PrologEpilogInserter.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/PrologEpilogInserter.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/PrologEpilogInserter.cpp Tue Oct 26 19:48:03 2010
@@ -19,6 +19,7 @@
 //
 //===----------------------------------------------------------------------===//
 
+#define DEBUG_TYPE "pei"
 #include "PrologEpilogInserter.h"
 #include "llvm/CodeGen/MachineDominators.h"
 #include "llvm/CodeGen/MachineLoopInfo.h"
@@ -32,8 +33,10 @@
 #include "llvm/Target/TargetInstrInfo.h"
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
 #include "llvm/ADT/IndexedMap.h"
 #include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/Statistic.h"
 #include "llvm/ADT/STLExtras.h"
 #include <climits>
 
@@ -41,8 +44,15 @@
 
 char PEI::ID = 0;
 
-INITIALIZE_PASS(PEI, "prologepilog",
-                "Prologue/Epilogue Insertion", false, false);
+INITIALIZE_PASS_BEGIN(PEI, "prologepilog",
+                "Prologue/Epilogue Insertion", false, false)
+INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
+INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
+INITIALIZE_PASS_END(PEI, "prologepilog",
+                "Prologue/Epilogue Insertion", false, false)
+
+STATISTIC(NumVirtualFrameRegs, "Number of virtual frame regs encountered");
+STATISTIC(NumScavengedRegs, "Number of frame index regs scavenged");
 
 /// createPrologEpilogCodeInserter - This function returns a pass that inserts
 /// prolog and epilog code, and eliminates abstract frame references.
@@ -57,7 +67,6 @@
   const TargetRegisterInfo *TRI = Fn.getTarget().getRegisterInfo();
   RS = TRI->requiresRegisterScavenging(Fn) ? new RegScavenger() : NULL;
   FrameIndexVirtualScavenging = TRI->requiresFrameIndexScavenging(Fn);
-  FrameConstantRegMap.clear();
 
   // Calculate the MaxCallFrameSize and AdjustsStack variables for the
   // function's frame information. Also eliminates call frame pseudo
@@ -73,10 +82,10 @@
   calculateCalleeSavedRegisters(Fn);
 
   // Determine placement of CSR spill/restore code:
-  //  - with shrink wrapping, place spills and restores to tightly
+  //  - With shrink wrapping, place spills and restores to tightly
   //    enclose regions in the Machine CFG of the function where
-  //    they are used. Without shrink wrapping
-  //  - default (no shrink wrapping), place all spills in the
+  //    they are used.
+  //  - Without shink wrapping (default), place all spills in the
   //    entry block, all restores in return blocks.
   placeCSRSpillsAndRestores(Fn);
 
@@ -462,8 +471,10 @@
   Offset = (Offset + Align - 1) / Align * Align;
 
   if (StackGrowsDown) {
+    DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << -Offset << "]\n");
     MFI->setObjectOffset(FrameIdx, -Offset); // Set the computed offset
   } else {
+    DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << Offset << "]\n");
     MFI->setObjectOffset(FrameIdx, Offset);
     Offset += MFI->getObjectSize(FrameIdx);
   }
@@ -548,6 +559,32 @@
       AdjustStackOffset(MFI, SFI, StackGrowsDown, Offset, MaxAlign);
   }
 
+  // FIXME: Once this is working, then enable flag will change to a target
+  // check for whether the frame is large enough to want to use virtual
+  // frame index registers. Functions which don't want/need this optimization
+  // will continue to use the existing code path.
+  if (MFI->getUseLocalStackAllocationBlock()) {
+    unsigned Align = MFI->getLocalFrameMaxAlign();
+
+    // Adjust to alignment boundary.
+    Offset = (Offset + Align - 1) / Align * Align;
+
+    DEBUG(dbgs() << "Local frame base offset: " << Offset << "\n");
+
+    // Resolve offsets for objects in the local block.
+    for (unsigned i = 0, e = MFI->getLocalFrameObjectCount(); i != e; ++i) {
+      std::pair<int, int64_t> Entry = MFI->getLocalFrameObjectMap(i);
+      int64_t FIOffset = (StackGrowsDown ? -Offset : Offset) + Entry.second;
+      DEBUG(dbgs() << "alloc FI(" << Entry.first << ") at SP[" <<
+            FIOffset << "]\n");
+      MFI->setObjectOffset(Entry.first, FIOffset);
+    }
+    // Allocate the local block
+    Offset += MFI->getLocalFrameSize();
+
+    MaxAlign = std::max(Align, MaxAlign);
+  }
+
   // Make sure that the stack protector comes before the local variables on the
   // stack.
   SmallSet<int, 16> LargeStackObjs;
@@ -557,6 +594,9 @@
 
     // Assign large stack objects first.
     for (unsigned i = 0, e = MFI->getObjectIndexEnd(); i != e; ++i) {
+      if (MFI->isObjectPreAllocated(i) &&
+          MFI->getUseLocalStackAllocationBlock())
+        continue;
       if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex)
         continue;
       if (RS && (int)i == RS->getScavengingFrameIndex())
@@ -576,6 +616,9 @@
   // Then assign frame offsets to stack objects that are not used to spill
   // callee saved registers.
   for (unsigned i = 0, e = MFI->getObjectIndexEnd(); i != e; ++i) {
+    if (MFI->isObjectPreAllocated(i) &&
+        MFI->getUseLocalStackAllocationBlock())
+      continue;
     if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex)
       continue;
     if (RS && (int)i == RS->getScavengingFrameIndex())
@@ -716,16 +759,8 @@
           // If this instruction has a FrameIndex operand, we need to
           // use that target machine register info object to eliminate
           // it.
-          TargetRegisterInfo::FrameIndexValue Value;
-          unsigned VReg =
-            TRI.eliminateFrameIndex(MI, SPAdj, &Value,
+            TRI.eliminateFrameIndex(MI, SPAdj,
                                     FrameIndexVirtualScavenging ?  NULL : RS);
-          if (VReg) {
-            assert (FrameIndexVirtualScavenging &&
-                    "Not scavenging, but virtual returned from "
-                    "eliminateFrameIndex()!");
-            FrameConstantRegMap[VReg] = FrameConstantEntry(Value, SPAdj);
-          }
 
           // Reset the iterator if we were at the beginning of the BB.
           if (AtBeginning) {
@@ -753,38 +788,6 @@
   }
 }
 
-/// findLastUseReg - find the killing use of the specified register within
-/// the instruciton range. Return the operand number of the kill in Operand.
-static MachineBasicBlock::iterator
-findLastUseReg(MachineBasicBlock::iterator I, MachineBasicBlock::iterator ME,
-               unsigned Reg) {
-  // Scan forward to find the last use of this virtual register
-  for (++I; I != ME; ++I) {
-    MachineInstr *MI = I;
-    bool isDefInsn = false;
-    bool isKillInsn = false;
-    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i)
-      if (MI->getOperand(i).isReg()) {
-        unsigned OpReg = MI->getOperand(i).getReg();
-        if (OpReg == 0 || !TargetRegisterInfo::isVirtualRegister(OpReg))
-          continue;
-        assert (OpReg == Reg
-                && "overlapping use of scavenged index register!");
-        // If this is the killing use, we have a candidate.
-        if (MI->getOperand(i).isKill())
-          isKillInsn = true;
-        else if (MI->getOperand(i).isDef())
-          isDefInsn = true;
-      }
-    if (isKillInsn && !isDefInsn)
-      return I;
-  }
-  // If we hit the end of the basic block, there was no kill of
-  // the virtual register, which is wrong.
-  assert (0 && "scavenged index register never killed!");
-  return ME;
-}
-
 /// scavengeFrameVirtualRegs - Replace all frame index virtual registers
 /// with physical registers. Use the register scavenger to find an
 /// appropriate register to use.
@@ -794,27 +797,14 @@
        E = Fn.end(); BB != E; ++BB) {
     RS->enterBasicBlock(BB);
 
-    // FIXME: The logic flow in this function is still too convoluted.
-    // It needs a cleanup refactoring. Do that in preparation for tracking
-    // more than one scratch register value and using ranges to find
-    // available scratch registers.
-    unsigned CurrentVirtReg = 0;
-    unsigned CurrentScratchReg = 0;
-    bool havePrevValue = false;
-    TargetRegisterInfo::FrameIndexValue PrevValue(0,0);
-    TargetRegisterInfo::FrameIndexValue Value(0,0);
-    MachineInstr *PrevLastUseMI = NULL;
-    unsigned PrevLastUseOp = 0;
-    bool trackingCurrentValue = false;
+    unsigned VirtReg = 0;
+    unsigned ScratchReg = 0;
     int SPAdj = 0;
 
     // The instruction stream may change in the loop, so check BB->end()
     // directly.
     for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ) {
       MachineInstr *MI = I;
-      bool isDefInsn = false;
-      bool isKillInsn = false;
-      bool clobbersScratchReg = false;
       bool DoIncr = true;
       for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
         if (MI->getOperand(i).isReg()) {
@@ -822,121 +812,30 @@
           unsigned Reg = MO.getReg();
           if (Reg == 0)
             continue;
-          if (!TargetRegisterInfo::isVirtualRegister(Reg)) {
-            // If we have a previous scratch reg, check and see if anything
-            // here kills whatever value is in there.
-            if (Reg == CurrentScratchReg) {
-              if (MO.isUse()) {
-                // Two-address operands implicitly kill
-                if (MO.isKill() || MI->isRegTiedToDefOperand(i))
-                  clobbersScratchReg = true;
-              } else {
-                assert (MO.isDef());
-                clobbersScratchReg = true;
-              }
-            }
+          if (!TargetRegisterInfo::isVirtualRegister(Reg))
             continue;
-          }
-          // If this is a def, remember that this insn defines the value.
-          // This lets us properly consider insns which re-use the scratch
-          // register, such as r2 = sub r2, #imm, in the middle of the
-          // scratch range.
-          if (MO.isDef())
-            isDefInsn = true;
+
+          ++NumVirtualFrameRegs;
 
           // Have we already allocated a scratch register for this virtual?
-          if (Reg != CurrentVirtReg) {
+          if (Reg != VirtReg) {
             // When we first encounter a new virtual register, it
             // must be a definition.
             assert(MI->getOperand(i).isDef() &&
                    "frame index virtual missing def!");
-            // We can't have nested virtual register live ranges because
-            // there's only a guarantee of one scavenged register at a time.
-            assert (CurrentVirtReg == 0 &&
-                    "overlapping frame index virtual registers!");
-
-            // If the target gave us information about what's in the register,
-            // we can use that to re-use scratch regs.
-            DenseMap<unsigned, FrameConstantEntry>::iterator Entry =
-              FrameConstantRegMap.find(Reg);
-            trackingCurrentValue = Entry != FrameConstantRegMap.end();
-            if (trackingCurrentValue) {
-              SPAdj = (*Entry).second.second;
-              Value = (*Entry).second.first;
-            } else {
-              SPAdj = 0;
-              Value.first = 0;
-              Value.second = 0;
-            }
-
-            // If the scratch register from the last allocation is still
-            // available, see if the value matches. If it does, just re-use it.
-            if (trackingCurrentValue && havePrevValue && PrevValue == Value) {
-              // FIXME: This assumes that the instructions in the live range
-              // for the virtual register are exclusively for the purpose
-              // of populating the value in the register. That's reasonable
-              // for these frame index registers, but it's still a very, very
-              // strong assumption. rdar://7322732. Better would be to
-              // explicitly check each instruction in the range for references
-              // to the virtual register. Only delete those insns that
-              // touch the virtual register.
-
-              // Find the last use of the new virtual register. Remove all
-              // instruction between here and there, and update the current
-              // instruction to reference the last use insn instead.
-              MachineBasicBlock::iterator LastUseMI =
-                findLastUseReg(I, BB->end(), Reg);
-
-              // Remove all instructions up 'til the last use, since they're
-              // just calculating the value we already have.
-              BB->erase(I, LastUseMI);
-              I = LastUseMI;
-
-              // Extend the live range of the scratch register
-              PrevLastUseMI->getOperand(PrevLastUseOp).setIsKill(false);
-              RS->setUsed(CurrentScratchReg);
-              CurrentVirtReg = Reg;
-
-              // We deleted the instruction we were scanning the operands of.
-              // Jump back to the instruction iterator loop. Don't increment
-              // past this instruction since we updated the iterator already.
-              DoIncr = false;
-              break;
-            }
-
             // Scavenge a new scratch register
-            CurrentVirtReg = Reg;
+            VirtReg = Reg;
             const TargetRegisterClass *RC = Fn.getRegInfo().getRegClass(Reg);
-            CurrentScratchReg = RS->scavengeRegister(RC, I, SPAdj);
-            PrevValue = Value;
+            ScratchReg = RS->scavengeRegister(RC, I, SPAdj);
+            ++NumScavengedRegs;
           }
-          // replace this reference to the virtual register with the
+          // Replace this reference to the virtual register with the
           // scratch register.
-          assert (CurrentScratchReg && "Missing scratch register!");
-          MI->getOperand(i).setReg(CurrentScratchReg);
+          assert (ScratchReg && "Missing scratch register!");
+          MI->getOperand(i).setReg(ScratchReg);
 
-          if (MI->getOperand(i).isKill()) {
-            isKillInsn = true;
-            PrevLastUseOp = i;
-            PrevLastUseMI = MI;
-          }
         }
       }
-      // If this is the last use of the scratch, stop tracking it. The
-      // last use will be a kill operand in an instruction that does
-      // not also define the scratch register.
-      if (isKillInsn && !isDefInsn) {
-        CurrentVirtReg = 0;
-        havePrevValue = trackingCurrentValue;
-      }
-      // Similarly, notice if instruction clobbered the value in the
-      // register we're tracking for possible later reuse. This is noted
-      // above, but enforced here since the value is still live while we
-      // process the rest of the operands of the instruction.
-      if (clobbersScratchReg) {
-        havePrevValue = false;
-        CurrentScratchReg = 0;
-      }
       if (DoIncr) {
         RS->forward(I);
         ++I;

Modified: llvm/branches/wendling/eh/lib/CodeGen/PrologEpilogInserter.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/PrologEpilogInserter.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/PrologEpilogInserter.h (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/PrologEpilogInserter.h Tue Oct 26 19:48:03 2010
@@ -36,7 +36,9 @@
   class PEI : public MachineFunctionPass {
   public:
     static char ID;
-    PEI() : MachineFunctionPass(&ID) {}
+    PEI() : MachineFunctionPass(ID) {
+      initializePEIPass(*PassRegistry::getPassRegistry());
+    }
 
     const char *getPassName() const {
       return "Prolog/Epilog Insertion & Frame Finalization";
@@ -99,13 +101,6 @@
     // TRI->requiresFrameIndexScavenging() for the curren function.
     bool FrameIndexVirtualScavenging;
 
-    // When using the scavenger post-pass to resolve frame reference
-    // materialization registers, maintain a map of the registers to
-    // the constant value and SP adjustment associated with it.
-    typedef std::pair<TargetRegisterInfo::FrameIndexValue, int>
-      FrameConstantEntry;
-    DenseMap<unsigned, FrameConstantEntry> FrameConstantRegMap;
-
 #ifndef NDEBUG
     // Machine function handle.
     MachineFunction* MF;

Modified: llvm/branches/wendling/eh/lib/CodeGen/RegAllocFast.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/RegAllocFast.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/RegAllocFast.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/RegAllocFast.cpp Tue Oct 26 19:48:03 2010
@@ -16,6 +16,7 @@
 #include "llvm/BasicBlock.h"
 #include "llvm/CodeGen/MachineFunctionPass.h"
 #include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
 #include "llvm/CodeGen/MachineFrameInfo.h"
 #include "llvm/CodeGen/MachineRegisterInfo.h"
 #include "llvm/CodeGen/Passes.h"
@@ -46,8 +47,11 @@
   class RAFast : public MachineFunctionPass {
   public:
     static char ID;
-    RAFast() : MachineFunctionPass(&ID), StackSlotForVirtReg(-1),
-               isBulkSpilling(false) {}
+    RAFast() : MachineFunctionPass(ID), StackSlotForVirtReg(-1),
+               isBulkSpilling(false) {
+      initializePHIEliminationPass(*PassRegistry::getPassRegistry());
+      initializeTwoAddressInstructionPassPass(*PassRegistry::getPassRegistry());
+    }
   private:
     const TargetMachine *TM;
     MachineFunction *MF;
@@ -80,6 +84,8 @@
     // that is currently available in a physical register.
     LiveRegMap LiveVirtRegs;
 
+    DenseMap<unsigned, MachineInstr *> LiveDbgValueMap;
+
     // RegState - Track the state of a physical register.
     enum RegState {
       // A disabled register is not available for allocation, but an alias may
@@ -110,9 +116,9 @@
     // Allocatable - vector of allocatable physical registers.
     BitVector Allocatable;
 
-    // SkippedInstrs - Descriptors of instructions whose clobber list was ignored
-    // because all registers were spilled. It is still necessary to mark all the
-    // clobbered registers as used by the function.
+    // SkippedInstrs - Descriptors of instructions whose clobber list was
+    // ignored because all registers were spilled. It is still necessary to
+    // mark all the clobbered registers as used by the function.
     SmallPtrSet<const TargetInstrDesc*, 4> SkippedInstrs;
 
     // isBulkSpilling - This flag is set when LiveRegMap will be cleared
@@ -236,8 +242,7 @@
 }
 
 /// spillVirtReg - This method spills the value specified by VirtReg into the
-/// corresponding stack slot if needed. If isKill is set, the register is also
-/// killed.
+/// corresponding stack slot if needed.
 void RAFast::spillVirtReg(MachineBasicBlock::iterator MI, unsigned VirtReg) {
   assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
          "Spilling a physical register is illegal!");
@@ -265,6 +270,31 @@
     TII->storeRegToStackSlot(*MBB, MI, LR.PhysReg, SpillKill, FI, RC, TRI);
     ++NumStores;   // Update statistics
 
+    // If this register is used by DBG_VALUE then insert new DBG_VALUE to
+    // identify spilled location as the place to find corresponding variable's
+    // value.
+    if (MachineInstr *DBG = LiveDbgValueMap.lookup(LRI->first)) {
+      const MDNode *MDPtr =
+        DBG->getOperand(DBG->getNumOperands()-1).getMetadata();
+      int64_t Offset = 0;
+      if (DBG->getOperand(1).isImm())
+        Offset = DBG->getOperand(1).getImm();
+      DebugLoc DL;
+      if (MI == MBB->end()) {
+        // If MI is at basic block end then use last instruction's location.
+        MachineBasicBlock::iterator EI = MI;
+        DL = (--EI)->getDebugLoc();
+      }
+      else
+        DL = MI->getDebugLoc();
+      if (MachineInstr *NewDV =
+          TII->emitFrameIndexDebugValue(*MF, FI, Offset, MDPtr, DL)) {
+        MachineBasicBlock *MBB = DBG->getParent();
+        MBB->insert(MI, NewDV);
+        DEBUG(dbgs() << "Inserting debug info due to spill:" << "\n" << *NewDV);
+        LiveDbgValueMap[LRI->first] = NewDV;
+      }
+    }
     if (SpillKill)
       LR.LastUse = 0; // Don't kill register again
   }
@@ -471,7 +501,8 @@
   // First try to find a completely free register.
   for (TargetRegisterClass::iterator I = AOB; I != AOE; ++I) {
     unsigned PhysReg = *I;
-    if (PhysRegState[PhysReg] == regFree && !UsedInInstr.test(PhysReg))
+    if (PhysRegState[PhysReg] == regFree && !UsedInInstr.test(PhysReg) &&
+        Allocatable.test(PhysReg))
       return assignVirtToPhysReg(LRE, PhysReg);
   }
 
@@ -480,6 +511,8 @@
 
   unsigned BestReg = 0, BestCost = spillImpossible;
   for (TargetRegisterClass::iterator I = AOB; I != AOE; ++I) {
+    if (!Allocatable.test(*I))
+      continue;
     unsigned Cost = calcSpillCost(*I);
     // Cost is 0 when all aliases are already disabled.
     if (Cost == 0)
@@ -709,7 +742,8 @@
   // Add live-in registers as live.
   for (MachineBasicBlock::livein_iterator I = MBB->livein_begin(),
          E = MBB->livein_end(); I != E; ++I)
-    definePhysReg(MII, *I, regReserved);
+    if (Allocatable.test(*I))
+      definePhysReg(MII, *I, regReserved);
 
   SmallVector<unsigned, 8> VirtDead;
   SmallVector<MachineInstr*, 32> Coalesced;
@@ -761,30 +795,38 @@
           if (!MO.isReg()) continue;
           unsigned Reg = MO.getReg();
           if (!Reg || TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
+          LiveDbgValueMap[Reg] = MI;
           LiveRegMap::iterator LRI = LiveVirtRegs.find(Reg);
           if (LRI != LiveVirtRegs.end())
             setPhysReg(MI, i, LRI->second.PhysReg);
           else {
             int SS = StackSlotForVirtReg[Reg];
-            if (SS == -1)
-              MO.setReg(0); // We can't allocate a physreg for a DebugValue, sorry!
+            if (SS == -1) {
+              // We can't allocate a physreg for a DebugValue, sorry!
+              DEBUG(dbgs() << "Unable to allocate vreg used by DBG_VALUE");
+              MO.setReg(0);
+            }
             else {
               // Modify DBG_VALUE now that the value is in a spill slot.
-              uint64_t Offset = MI->getOperand(1).getImm();
-              const MDNode *MDPtr = 
+              int64_t Offset = MI->getOperand(1).getImm();
+              const MDNode *MDPtr =
                 MI->getOperand(MI->getNumOperands()-1).getMetadata();
               DebugLoc DL = MI->getDebugLoc();
-              if (MachineInstr *NewDV = 
+              if (MachineInstr *NewDV =
                   TII->emitFrameIndexDebugValue(*MF, SS, Offset, MDPtr, DL)) {
-                DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
+                DEBUG(dbgs() << "Modifying debug info due to spill:" <<
+                      "\t" << *MI);
                 MachineBasicBlock *MBB = MI->getParent();
                 MBB->insert(MBB->erase(MI), NewDV);
                 // Scan NewDV operands from the beginning.
                 MI = NewDV;
                 ScanDbgValue = true;
                 break;
-              } else
-                MO.setReg(0); // We can't allocate a physreg for a DebugValue, sorry!
+              } else {
+                // We can't allocate a physreg for a DebugValue; sorry!
+                DEBUG(dbgs() << "Unable to allocate vreg used by DBG_VALUE");
+                MO.setReg(0);
+              }
             }
           }
         }
@@ -847,8 +889,8 @@
     // operands. If there are also physical defs, these registers must avoid
     // both physical defs and uses, making them more constrained than normal
     // operands.
-    // Similarly, if there are multiple defs and tied operands, we must make sure
-    // the same register is allocated to uses and defs.
+    // Similarly, if there are multiple defs and tied operands, we must make
+    // sure the same register is allocated to uses and defs.
     // We didn't detect inline asm tied operands above, so just make this extra
     // pass for all inline asm.
     if (MI->isInlineAsm() || hasEarlyClobbers || hasPartialRedefs ||
@@ -899,9 +941,9 @@
     unsigned DefOpEnd = MI->getNumOperands();
     if (TID.isCall()) {
       // Spill all virtregs before a call. This serves two purposes: 1. If an
-      // exception is thrown, the landing pad is going to expect to find registers
-      // in their spill slots, and 2. we don't have to wade through all the
-      // <imp-def> operands on the call instruction.
+      // exception is thrown, the landing pad is going to expect to find
+      // registers in their spill slots, and 2. we don't have to wade through
+      // all the <imp-def> operands on the call instruction.
       DefOpEnd = VirtOpEnd;
       DEBUG(dbgs() << "  Spilling remaining registers before call.\n");
       spillAll(MI);
@@ -1004,6 +1046,7 @@
 
   SkippedInstrs.clear();
   StackSlotForVirtReg.clear();
+  LiveDbgValueMap.clear();
   return true;
 }
 

Modified: llvm/branches/wendling/eh/lib/CodeGen/RegAllocLinearScan.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/RegAllocLinearScan.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/RegAllocLinearScan.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/RegAllocLinearScan.cpp Tue Oct 26 19:48:03 2010
@@ -18,7 +18,6 @@
 #include "llvm/Function.h"
 #include "llvm/CodeGen/CalcSpillWeights.h"
 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
-#include "llvm/CodeGen/LiveStackAnalysis.h"
 #include "llvm/CodeGen/MachineFunctionPass.h"
 #include "llvm/CodeGen/MachineInstr.h"
 #include "llvm/CodeGen/MachineLoopInfo.h"
@@ -87,10 +86,21 @@
                                "to skip."),
                       cl::init(0),
                       cl::Hidden);
- 
+
   struct RALinScan : public MachineFunctionPass {
     static char ID;
-    RALinScan() : MachineFunctionPass(&ID) {
+    RALinScan() : MachineFunctionPass(ID) {
+      initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
+      initializeStrongPHIEliminationPass(*PassRegistry::getPassRegistry());
+      initializeRegisterCoalescerAnalysisGroup(
+        *PassRegistry::getPassRegistry());
+      initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry());
+      initializePreAllocSplittingPass(*PassRegistry::getPassRegistry());
+      initializeLiveStacksPass(*PassRegistry::getPassRegistry());
+      initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
+      initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
+      initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
+      
       // Initialize the queue to record recently-used registers.
       if (NumRecentlyUsedRegs > 0)
         RecentRegs.resize(NumRecentlyUsedRegs, 0);
@@ -125,8 +135,8 @@
     const TargetRegisterInfo* tri_;
     const TargetInstrInfo* tii_;
     BitVector allocatableRegs_;
+    BitVector reservedRegs_;
     LiveIntervals* li_;
-    LiveStacks* ls_;
     MachineLoopInfo *loopInfo;
 
     /// handled_ - Intervals are added to the handled_ set in the order of their
@@ -192,8 +202,8 @@
       AU.addRequired<CalculateSpillWeights>();
       if (PreSplitIntervals)
         AU.addRequiredID(PreAllocSplittingID);
-      AU.addRequired<LiveStacks>();
-      AU.addPreserved<LiveStacks>();
+      AU.addRequiredID(LiveStacksID);
+      AU.addPreservedID(LiveStacksID);
       AU.addRequired<MachineLoopInfo>();
       AU.addPreserved<MachineLoopInfo>();
       AU.addRequired<VirtRegMap>();
@@ -335,6 +345,17 @@
                             SmallVector<unsigned, 256> &inactiveCounts,
                             bool SkipDGRegs);
 
+    /// getFirstNonReservedPhysReg - return the first non-reserved physical
+    /// register in the register class.
+    unsigned getFirstNonReservedPhysReg(const TargetRegisterClass *RC) {
+        TargetRegisterClass::iterator aoe = RC->allocation_order_end(*mf_);
+        TargetRegisterClass::iterator i = RC->allocation_order_begin(*mf_);
+        while (i != aoe && reservedRegs_.test(*i))
+          ++i;
+        assert(i != aoe && "All registers reserved?!");
+        return *i;
+      }
+
     void ComputeRelatedRegClasses();
 
     template <typename ItTy>
@@ -358,8 +379,18 @@
   char RALinScan::ID = 0;
 }
 
-INITIALIZE_PASS(RALinScan, "linearscan-regalloc",
-                "Linear Scan Register Allocator", false, false);
+INITIALIZE_PASS_BEGIN(RALinScan, "linearscan-regalloc",
+                "Linear Scan Register Allocator", false, false)
+INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
+INITIALIZE_PASS_DEPENDENCY(StrongPHIElimination)
+INITIALIZE_PASS_DEPENDENCY(CalculateSpillWeights)
+INITIALIZE_PASS_DEPENDENCY(PreAllocSplitting)
+INITIALIZE_PASS_DEPENDENCY(LiveStacks)
+INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
+INITIALIZE_PASS_DEPENDENCY(VirtRegMap)
+INITIALIZE_AG_DEPENDENCY(RegisterCoalescer)
+INITIALIZE_PASS_END(RALinScan, "linearscan-regalloc",
+                "Linear Scan Register Allocator", false, false)
 
 void RALinScan::ComputeRelatedRegClasses() {
   // First pass, add all reg classes to the union, and determine at least one
@@ -371,7 +402,7 @@
     for (TargetRegisterClass::iterator I = (*RCI)->begin(), E = (*RCI)->end();
          I != E; ++I) {
       HasAliases = HasAliases || *tri_->getAliasSet(*I) != 0;
-      
+
       const TargetRegisterClass *&PRC = OneClassForEachPhysReg[*I];
       if (PRC) {
         // Already processed this register.  Just make sure we know that
@@ -382,7 +413,7 @@
       }
     }
   }
-  
+
   // Second pass, now that we know conservatively what register classes each reg
   // belongs to, add info about aliases.  We don't need to do this for targets
   // without register aliases.
@@ -419,8 +450,7 @@
   unsigned CandReg;
   {
     MachineInstr *CopyMI;
-    if (vni->def != SlotIndex() && vni->isDefAccurate() &&
-        (CopyMI = li_->getInstructionFromIndex(vni->def)) && CopyMI->isCopy())
+    if ((CopyMI = li_->getInstructionFromIndex(vni->def)) && CopyMI->isCopy())
       // Defined by a copy, try to extend SrcReg forward
       CandReg = CopyMI->getOperand(1).getReg();
     else if (TrivCoalesceEnds &&
@@ -464,8 +494,8 @@
   tri_ = tm_->getRegisterInfo();
   tii_ = tm_->getInstrInfo();
   allocatableRegs_ = tri_->getAllocatableSet(fn);
+  reservedRegs_ = tri_->getReservedRegs(fn);
   li_ = &getAnalysis<LiveIntervals>();
-  ls_ = &getAnalysis<LiveStacks>();
   loopInfo = &getAnalysis<MachineLoopInfo>();
 
   // We don't run the coalescer here because we have no reason to
@@ -482,9 +512,9 @@
 
   vrm_ = &getAnalysis<VirtRegMap>();
   if (!rewriter_.get()) rewriter_.reset(createVirtRegRewriter());
-  
+
   spiller_.reset(createSpiller(*this, *mf_, *vrm_));
-  
+
   initIntervalSets();
 
   linearScan();
@@ -538,7 +568,7 @@
   // linear scan algorithm
   DEBUG({
       dbgs() << "********** LINEAR SCAN **********\n"
-             << "********** Function: " 
+             << "********** Function: "
              << mf_->getFunction()->getName() << '\n';
       printIntervals("fixed", fixed_.begin(), fixed_.end());
     });
@@ -625,8 +655,6 @@
 
   // Look for physical registers that end up not being allocated even though
   // register allocator had to spill other registers in its register class.
-  if (ls_->getNumIntervals() == 0)
-    return;
   if (!vrm_->FindUnusedRegisters(li_))
     return;
 }
@@ -760,7 +788,8 @@
   return IP.end();
 }
 
-static void RevertVectorIteratorsTo(RALinScan::IntervalPtrs &V, SlotIndex Point){
+static void RevertVectorIteratorsTo(RALinScan::IntervalPtrs &V,
+                                    SlotIndex Point){
   for (unsigned i = 0, e = V.size(); i != e; ++i) {
     RALinScan::IntervalPtr &IP = V[i];
     LiveInterval::iterator I = std::upper_bound(IP.first->begin(),
@@ -770,30 +799,6 @@
   }
 }
 
-/// addStackInterval - Create a LiveInterval for stack if the specified live
-/// interval has been spilled.
-static void addStackInterval(LiveInterval *cur, LiveStacks *ls_,
-                             LiveIntervals *li_,
-                             MachineRegisterInfo* mri_, VirtRegMap &vrm_) {
-  int SS = vrm_.getStackSlot(cur->reg);
-  if (SS == VirtRegMap::NO_STACK_SLOT)
-    return;
-
-  const TargetRegisterClass *RC = mri_->getRegClass(cur->reg);
-  LiveInterval &SI = ls_->getOrCreateInterval(SS, RC);
-
-  VNInfo *VNI;
-  if (SI.hasAtLeastOneValue())
-    VNI = SI.getValNumInfo(0);
-  else
-    VNI = SI.getNextValue(SlotIndex(), 0, false,
-                          ls_->getVNInfoAllocator());
-
-  LiveInterval &RI = li_->getInterval(cur->reg);
-  // FIXME: This may be overly conservative.
-  SI.MergeRangesInAsValue(RI, VNI);
-}
-
 /// getConflictWeight - Return the number of conflicts between cur
 /// live interval and defs and uses of Reg weighted by loop depthes.
 static
@@ -832,7 +837,7 @@
         dbgs() << tri_->getName(Candidates[i].first) << " ";
       dbgs() << "\n";
     });
-  
+
   // Calculate the number of conflicts of each candidate.
   for (IntervalPtrs::iterator i = active_.begin(); i != active_.end(); ++i) {
     unsigned Reg = i->first->reg;
@@ -950,7 +955,7 @@
   if (cur->empty()) {
     unsigned physReg = vrm_->getRegAllocPref(cur->reg);
     if (!physReg)
-      physReg = *RC->allocation_order_begin(*mf_);
+      physReg = getFirstNonReservedPhysReg(RC);
     DEBUG(dbgs() <<  tri_->getName(physReg) << '\n');
     // Note the register is not really in use.
     vrm_->assignVirt2Phys(cur->reg, physReg);
@@ -970,8 +975,7 @@
   // one, e.g. X86::mov32to32_. These move instructions are not coalescable.
   if (!vrm_->getRegAllocPref(cur->reg) && cur->hasAtLeastOneValue()) {
     VNInfo *vni = cur->begin()->valno;
-    if ((vni->def != SlotIndex()) && !vni->isUnused() &&
-         vni->isDefAccurate()) {
+    if (!vni->isUnused()) {
       MachineInstr *CopyMI = li_->getInstructionFromIndex(vni->def);
       if (CopyMI && CopyMI->isCopy()) {
         unsigned DstSubReg = CopyMI->getOperand(0).getSubReg();
@@ -1002,7 +1006,7 @@
     assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
            "Can only allocate virtual registers!");
     const TargetRegisterClass *RegRC = mri_->getRegClass(Reg);
-    // If this is not in a related reg class to the register we're allocating, 
+    // If this is not in a related reg class to the register we're allocating,
     // don't check it.
     if (RelatedRegClasses.getLeaderValue(RegRC) == RCLeader &&
         cur->overlapsFrom(*i->first, i->second-1)) {
@@ -1011,7 +1015,7 @@
       SpillWeightsToAdd.push_back(std::make_pair(Reg, i->first->weight));
     }
   }
-  
+
   // Speculatively check to see if we can get a register right now.  If not,
   // we know we won't be able to by adding more constraints.  If so, we can
   // check to see if it is valid.  Doing an exhaustive search of the fixed_ list
@@ -1026,7 +1030,7 @@
     SmallSet<unsigned, 8> RegAliases;
     for (const unsigned *AS = tri_->getAliasSet(physReg); *AS; ++AS)
       RegAliases.insert(*AS);
-    
+
     bool ConflictsWithFixed = false;
     for (unsigned i = 0, e = fixed_.size(); i != e; ++i) {
       IntervalPtr &IP = fixed_[i];
@@ -1046,7 +1050,7 @@
         }
       }
     }
-    
+
     // Okay, the register picked by our speculative getFreePhysReg call turned
     // out to be in use.  Actually add all of the conflicting fixed registers to
     // regUse_ so we can do an accurate query.
@@ -1058,7 +1062,7 @@
         LiveInterval *I = IP.first;
 
         const TargetRegisterClass *RegRC = OneClassForEachPhysReg[I->reg];
-        if (RelatedRegClasses.getLeaderValue(RegRC) == RCLeader &&       
+        if (RelatedRegClasses.getLeaderValue(RegRC) == RCLeader &&
             I->endIndex() > StartPosition) {
           LiveInterval::iterator II = I->advanceTo(IP.second, StartPosition);
           IP.second = II;
@@ -1077,11 +1081,11 @@
       physReg = getFreePhysReg(cur);
     }
   }
-    
+
   // Restore the physical register tracker, removing information about the
   // future.
   restoreRegUses();
-  
+
   // If we find a free register, we are done: assign this virtual to
   // the free physical register and add this interval to the active
   // list.
@@ -1096,7 +1100,7 @@
     UpgradeRegister(physReg);
     if (LiveInterval *NextReloadLI = hasNextReloadInterval(cur)) {
       // "Downgrade" physReg to try to keep physReg from being allocated until
-      // the next reload from the same SS is allocated. 
+      // the next reload from the same SS is allocated.
       mri_->setRegAllocationHint(NextReloadLI->reg, 0, physReg);
       DowngradeRegister(cur, physReg);
     }
@@ -1109,7 +1113,7 @@
   for (std::vector<std::pair<unsigned, float> >::iterator
        I = SpillWeightsToAdd.begin(), E = SpillWeightsToAdd.end(); I != E; ++I)
     updateSpillWeights(SpillWeights, I->first, I->second, RC);
-  
+
   // for each interval in active, update spill weights.
   for (IntervalPtrs::const_iterator i = active_.begin(), e = active_.end();
        i != e; ++i) {
@@ -1119,7 +1123,7 @@
     reg = vrm_->getPhys(reg);
     updateSpillWeights(SpillWeights, reg, i->first->weight, RC);
   }
- 
+
   DEBUG(dbgs() << "\tassigning stack slot at interval "<< *cur << ":\n");
 
   // Find a register to spill.
@@ -1133,17 +1137,22 @@
            e = RC->allocation_order_end(*mf_); i != e; ++i) {
       unsigned reg = *i;
       float regWeight = SpillWeights[reg];
-      // Skip recently allocated registers.
+      // Don't even consider reserved regs.
+      if (reservedRegs_.test(reg))
+        continue;
+      // Skip recently allocated registers and reserved registers.
       if (minWeight > regWeight && !isRecentlyUsed(reg))
         Found = true;
       RegsWeights.push_back(std::make_pair(reg, regWeight));
     }
-  
+
   // If we didn't find a register that is spillable, try aliases?
   if (!Found) {
     for (TargetRegisterClass::iterator i = RC->allocation_order_begin(*mf_),
            e = RC->allocation_order_end(*mf_); i != e; ++i) {
       unsigned reg = *i;
+      if (reservedRegs_.test(reg))
+        continue;
       // No need to worry about if the alias register size < regsize of RC.
       // We are going to spill all registers that alias it anyway.
       for (const unsigned* as = tri_->getAliasSet(reg); *as; ++as)
@@ -1157,7 +1166,7 @@
   minWeight = RegsWeights[0].second;
   if (minWeight == HUGE_VALF) {
     // All registers must have inf weight. Just grab one!
-    minReg = BestPhysReg ? BestPhysReg : *RC->allocation_order_begin(*mf_);
+    minReg = BestPhysReg ? BestPhysReg : getFirstNonReservedPhysReg(RC);
     if (cur->weight == HUGE_VALF ||
         li_->getApproximateInstructionCount(*cur) == 0) {
       // Spill a physical register around defs and uses.
@@ -1202,12 +1211,10 @@
   // linearscan.
   if (cur->weight != HUGE_VALF && cur->weight <= minWeight) {
     DEBUG(dbgs() << "\t\t\tspilling(c): " << *cur << '\n');
-    SmallVector<LiveInterval*, 8> spillIs;
-    std::vector<LiveInterval*> added;
+    SmallVector<LiveInterval*, 8> spillIs, added;
     spiller_->spill(cur, added, spillIs);
 
     std::sort(added.begin(), added.end(), LISorter());
-    addStackInterval(cur, ls_, li_, mri_, *vrm_);
     if (added.empty())
       return;  // Early exit if all spills were folded.
 
@@ -1266,27 +1273,32 @@
 
   // The earliest start of a Spilled interval indicates up to where
   // in handled we need to roll back
-  assert(!spillIs.empty() && "No spill intervals?"); 
+  assert(!spillIs.empty() && "No spill intervals?");
   SlotIndex earliestStart = spillIs[0]->beginIndex();
-  
+
   // Spill live intervals of virtual regs mapped to the physical register we
   // want to clear (and its aliases).  We only spill those that overlap with the
   // current interval as the rest do not affect its allocation. we also keep
   // track of the earliest start of all spilled live intervals since this will
   // mark our rollback point.
-  std::vector<LiveInterval*> added;
+  SmallVector<LiveInterval*, 8> added;
   while (!spillIs.empty()) {
     LiveInterval *sli = spillIs.back();
     spillIs.pop_back();
     DEBUG(dbgs() << "\t\t\tspilling(a): " << *sli << '\n');
     if (sli->beginIndex() < earliestStart)
       earliestStart = sli->beginIndex();
-       
-    spiller_->spill(sli, added, spillIs, &earliestStart);
-    addStackInterval(sli, ls_, li_, mri_, *vrm_);
+    spiller_->spill(sli, added, spillIs);
     spilled.insert(sli->reg);
   }
 
+  // Include any added intervals in earliestStart.
+  for (unsigned i = 0, e = added.size(); i != e; ++i) {
+    SlotIndex SI = added[i]->beginIndex();
+    if (SI < earliestStart)
+      earliestStart = SI;
+  }
+
   DEBUG(dbgs() << "\t\trolling back to: " << earliestStart << '\n');
 
   // Scan handled in reverse order up to the earliest start of a
@@ -1409,6 +1421,9 @@
     // Ignore "downgraded" registers.
     if (SkipDGRegs && DowngradedRegs.count(Reg))
       continue;
+    // Skip reserved registers.
+    if (reservedRegs_.test(Reg))
+      continue;
     // Skip recently allocated registers.
     if (isRegAvail(Reg) && !isRecentlyUsed(Reg)) {
       FreeReg = Reg;
@@ -1437,6 +1452,9 @@
     // Ignore "downgraded" registers.
     if (SkipDGRegs && DowngradedRegs.count(Reg))
       continue;
+    // Skip reserved registers.
+    if (reservedRegs_.test(Reg))
+      continue;
     if (isRegAvail(Reg) && Reg < inactiveCounts.size() &&
         FreeRegInactiveCount < inactiveCounts[Reg] && !isRecentlyUsed(Reg)) {
       FreeReg = Reg;
@@ -1457,17 +1475,17 @@
 unsigned RALinScan::getFreePhysReg(LiveInterval *cur) {
   SmallVector<unsigned, 256> inactiveCounts;
   unsigned MaxInactiveCount = 0;
-  
+
   const TargetRegisterClass *RC = mri_->getRegClass(cur->reg);
   const TargetRegisterClass *RCLeader = RelatedRegClasses.getLeaderValue(RC);
- 
+
   for (IntervalPtrs::iterator i = inactive_.begin(), e = inactive_.end();
        i != e; ++i) {
     unsigned reg = i->first->reg;
     assert(TargetRegisterInfo::isVirtualRegister(reg) &&
            "Can only allocate virtual registers!");
 
-    // If this is not in a related reg class to the register we're allocating, 
+    // If this is not in a related reg class to the register we're allocating,
     // don't check it.
     const TargetRegisterClass *RegRC = mri_->getRegClass(reg);
     if (RelatedRegClasses.getLeaderValue(RegRC) == RCLeader) {
@@ -1484,7 +1502,7 @@
   unsigned Preference = vrm_->getRegAllocPref(cur->reg);
   if (Preference) {
     DEBUG(dbgs() << "(preferred: " << tri_->getName(Preference) << ") ");
-    if (isRegAvail(Preference) && 
+    if (isRegAvail(Preference) &&
         RC->contains(Preference))
       return Preference;
   }

Modified: llvm/branches/wendling/eh/lib/CodeGen/RegAllocPBQP.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/RegAllocPBQP.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/RegAllocPBQP.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/RegAllocPBQP.cpp Tue Oct 26 19:48:03 2010
@@ -31,9 +31,6 @@
 
 #define DEBUG_TYPE "regalloc"
 
-#include "PBQP/HeuristicSolver.h"
-#include "PBQP/Graph.h"
-#include "PBQP/Heuristics/Briggs.h"
 #include "RenderMachineFunction.h"
 #include "Splitter.h"
 #include "VirtRegMap.h"
@@ -41,9 +38,13 @@
 #include "llvm/CodeGen/CalcSpillWeights.h"
 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
 #include "llvm/CodeGen/LiveStackAnalysis.h"
+#include "llvm/CodeGen/RegAllocPBQP.h"
 #include "llvm/CodeGen/MachineFunctionPass.h"
 #include "llvm/CodeGen/MachineLoopInfo.h"
 #include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/PBQP/HeuristicSolver.h"
+#include "llvm/CodeGen/PBQP/Graph.h"
+#include "llvm/CodeGen/PBQP/Heuristics/Briggs.h"
 #include "llvm/CodeGen/RegAllocRegistry.h"
 #include "llvm/CodeGen/RegisterCoalescer.h"
 #include "llvm/Support/Debug.h"
@@ -51,7 +52,6 @@
 #include "llvm/Target/TargetInstrInfo.h"
 #include "llvm/Target/TargetMachine.h"
 #include <limits>
-#include <map>
 #include <memory>
 #include <set>
 #include <vector>
@@ -60,7 +60,7 @@
 
 static RegisterRegAlloc
 registerPBQPRepAlloc("pbqp", "PBQP register allocator",
-                       llvm::createPBQPRegisterAllocator);
+                       createDefaultPBQPRegisterAllocator);
 
 static cl::opt<bool>
 pbqpCoalescing("pbqp-coalescing",
@@ -69,634 +69,418 @@
 
 static cl::opt<bool>
 pbqpPreSplitting("pbqp-pre-splitting",
-                 cl::desc("Pre-splite before PBQP register allocation."),
+                 cl::desc("Pre-split before PBQP register allocation."),
                  cl::init(false), cl::Hidden);
 
 namespace {
 
-  ///
-  /// PBQP based allocators solve the register allocation problem by mapping
-  /// register allocation problems to Partitioned Boolean Quadratic
-  /// Programming problems.
-  class PBQPRegAlloc : public MachineFunctionPass {
-  public:
-
-    static char ID;
-
-    /// Construct a PBQP register allocator.
-    PBQPRegAlloc() : MachineFunctionPass(&ID) {}
-
-    /// Return the pass name.
-    virtual const char* getPassName() const {
-      return "PBQP Register Allocator";
-    }
-
-    /// PBQP analysis usage.
-    virtual void getAnalysisUsage(AnalysisUsage &au) const {
-      au.addRequired<SlotIndexes>();
-      au.addPreserved<SlotIndexes>();
-      au.addRequired<LiveIntervals>();
-      //au.addRequiredID(SplitCriticalEdgesID);
-      au.addRequired<RegisterCoalescer>();
-      au.addRequired<CalculateSpillWeights>();
-      au.addRequired<LiveStacks>();
-      au.addPreserved<LiveStacks>();
-      au.addRequired<MachineLoopInfo>();
-      au.addPreserved<MachineLoopInfo>();
-      if (pbqpPreSplitting)
-        au.addRequired<LoopSplitter>();
-      au.addRequired<VirtRegMap>();
-      au.addRequired<RenderMachineFunction>();
-      MachineFunctionPass::getAnalysisUsage(au);
-    }
-
-    /// Perform register allocation
-    virtual bool runOnMachineFunction(MachineFunction &MF);
-
-  private:
-
-    class LIOrdering {
-    public:
-      bool operator()(const LiveInterval *li1, const LiveInterval *li2) const {
-        return li1->reg < li2->reg;
-      }
-    };
-
-    typedef std::map<const LiveInterval*, unsigned, LIOrdering> LI2NodeMap;
-    typedef std::vector<const LiveInterval*> Node2LIMap;
-    typedef std::vector<unsigned> AllowedSet;
-    typedef std::vector<AllowedSet> AllowedSetMap;
-    typedef std::set<unsigned> RegSet;
-    typedef std::pair<unsigned, unsigned> RegPair;
-    typedef std::map<RegPair, PBQP::PBQPNum> CoalesceMap;
-
-    typedef std::set<LiveInterval*, LIOrdering> LiveIntervalSet;
-
-    typedef std::vector<PBQP::Graph::NodeItr> NodeVector;
-
-    MachineFunction *mf;
-    const TargetMachine *tm;
-    const TargetRegisterInfo *tri;
-    const TargetInstrInfo *tii;
-    const MachineLoopInfo *loopInfo;
-    MachineRegisterInfo *mri;
-
-    LiveIntervals *lis;
-    LiveStacks *lss;
-    VirtRegMap *vrm;
-
-    LI2NodeMap li2Node;
-    Node2LIMap node2LI;
-    AllowedSetMap allowedSets;
-    LiveIntervalSet vregIntervalsToAlloc,
-                    emptyVRegIntervals;
-    NodeVector problemNodes;
-
-
-    /// Builds a PBQP cost vector.
-    template <typename RegContainer>
-    PBQP::Vector buildCostVector(unsigned vReg,
-                                 const RegContainer &allowed,
-                                 const CoalesceMap &cealesces,
-                                 PBQP::PBQPNum spillCost) const;
-
-    /// \brief Builds a PBQP interference matrix.
-    ///
-    /// @return Either a pointer to a non-zero PBQP matrix representing the
-    ///         allocation option costs, or a null pointer for a zero matrix.
-    ///
-    /// Expects allowed sets for two interfering LiveIntervals. These allowed
-    /// sets should contain only allocable registers from the LiveInterval's
-    /// register class, with any interfering pre-colored registers removed.
-    template <typename RegContainer>
-    PBQP::Matrix* buildInterferenceMatrix(const RegContainer &allowed1,
-                                          const RegContainer &allowed2) const;
-
-    ///
-    /// Expects allowed sets for two potentially coalescable LiveIntervals,
-    /// and an estimated benefit due to coalescing. The allowed sets should
-    /// contain only allocable registers from the LiveInterval's register
-    /// classes, with any interfering pre-colored registers removed.
-    template <typename RegContainer>
-    PBQP::Matrix* buildCoalescingMatrix(const RegContainer &allowed1,
-                                        const RegContainer &allowed2,
-                                        PBQP::PBQPNum cBenefit) const;
-
-    /// \brief Finds coalescing opportunities and returns them as a map.
-    ///
-    /// Any entries in the map are guaranteed coalescable, even if their
-    /// corresponding live intervals overlap.
-    CoalesceMap findCoalesces();
-
-    /// \brief Finds the initial set of vreg intervals to allocate.
-    void findVRegIntervalsToAlloc();
-
-    /// \brief Constructs a PBQP problem representation of the register
-    /// allocation problem for this function.
-    ///
-    /// @return a PBQP solver object for the register allocation problem.
-    PBQP::Graph constructPBQPProblem();
-
-    /// \brief Adds a stack interval if the given live interval has been
-    /// spilled. Used to support stack slot coloring.
-    void addStackInterval(const LiveInterval *spilled,MachineRegisterInfo* mri);
-
-    /// \brief Given a solved PBQP problem maps this solution back to a register
-    /// assignment.
-    bool mapPBQPToRegAlloc(const PBQP::Solution &solution);
-
-    /// \brief Postprocessing before final spilling. Sets basic block "live in"
-    /// variables.
-    void finalizeAlloc() const;
-
-  };
-
-  char PBQPRegAlloc::ID = 0;
-}
-
-
-template <typename RegContainer>
-PBQP::Vector PBQPRegAlloc::buildCostVector(unsigned vReg,
-                                           const RegContainer &allowed,
-                                           const CoalesceMap &coalesces,
-                                           PBQP::PBQPNum spillCost) const {
-
-  typedef typename RegContainer::const_iterator AllowedItr;
-
-  // Allocate vector. Additional element (0th) used for spill option
-  PBQP::Vector v(allowed.size() + 1, 0);
-
-  v[0] = spillCost;
-
-  // Iterate over the allowed registers inserting coalesce benefits if there
-  // are any.
-  unsigned ai = 0;
-  for (AllowedItr itr = allowed.begin(), end = allowed.end();
-       itr != end; ++itr, ++ai) {
-
-    unsigned pReg = *itr;
+///
+/// PBQP based allocators solve the register allocation problem by mapping
+/// register allocation problems to Partitioned Boolean Quadratic
+/// Programming problems.
+class RegAllocPBQP : public MachineFunctionPass {
+public:
 
-    CoalesceMap::const_iterator cmItr =
-      coalesces.find(RegPair(vReg, pReg));
-
-    // No coalesce - on to the next preg.
-    if (cmItr == coalesces.end())
-      continue;
+  static char ID;
 
-    // We have a coalesce - insert the benefit.
-    v[ai + 1] = -cmItr->second;
+  /// Construct a PBQP register allocator.
+  RegAllocPBQP(std::auto_ptr<PBQPBuilder> b)
+      : MachineFunctionPass(ID), builder(b) {
+    initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
+    initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
+    initializeRegisterCoalescerAnalysisGroup(*PassRegistry::getPassRegistry());
+    initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry());
+    initializeLiveStacksPass(*PassRegistry::getPassRegistry());
+    initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
+    initializeLoopSplitterPass(*PassRegistry::getPassRegistry());
+    initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
+    initializeRenderMachineFunctionPass(*PassRegistry::getPassRegistry());
   }
 
-  return v;
-}
+  /// Return the pass name.
+  virtual const char* getPassName() const {
+    return "PBQP Register Allocator";
+  }
 
-template <typename RegContainer>
-PBQP::Matrix* PBQPRegAlloc::buildInterferenceMatrix(
-      const RegContainer &allowed1, const RegContainer &allowed2) const {
+  /// PBQP analysis usage.
+  virtual void getAnalysisUsage(AnalysisUsage &au) const;
 
-  typedef typename RegContainer::const_iterator RegContainerIterator;
+  /// Perform register allocation
+  virtual bool runOnMachineFunction(MachineFunction &MF);
 
-  // Construct a PBQP matrix representing the cost of allocation options. The
-  // rows and columns correspond to the allocation options for the two live
-  // intervals.  Elements will be infinite where corresponding registers alias,
-  // since we cannot allocate aliasing registers to interfering live intervals.
-  // All other elements (non-aliasing combinations) will have zero cost. Note
-  // that the spill option (element 0,0) has zero cost, since we can allocate
-  // both intervals to memory safely (the cost for each individual allocation
-  // to memory is accounted for by the cost vectors for each live interval).
-  PBQP::Matrix *m =
-    new PBQP::Matrix(allowed1.size() + 1, allowed2.size() + 1, 0);
+private:
 
-  // Assume this is a zero matrix until proven otherwise.  Zero matrices occur
-  // between interfering live ranges with non-overlapping register sets (e.g.
-  // non-overlapping reg classes, or disjoint sets of allowed regs within the
-  // same class). The term "overlapping" is used advisedly: sets which do not
-  // intersect, but contain registers which alias, will have non-zero matrices.
-  // We optimize zero matrices away to improve solver speed.
-  bool isZeroMatrix = true;
+  typedef std::map<const LiveInterval*, unsigned> LI2NodeMap;
+  typedef std::vector<const LiveInterval*> Node2LIMap;
+  typedef std::vector<unsigned> AllowedSet;
+  typedef std::vector<AllowedSet> AllowedSetMap;
+  typedef std::pair<unsigned, unsigned> RegPair;
+  typedef std::map<RegPair, PBQP::PBQPNum> CoalesceMap;
+  typedef std::vector<PBQP::Graph::NodeItr> NodeVector;
+  typedef std::set<unsigned> RegSet;
 
 
-  // Row index. Starts at 1, since the 0th row is for the spill option, which
-  // is always zero.
-  unsigned ri = 1;
+  std::auto_ptr<PBQPBuilder> builder;
 
-  // Iterate over allowed sets, insert infinities where required.
-  for (RegContainerIterator a1Itr = allowed1.begin(), a1End = allowed1.end();
-       a1Itr != a1End; ++a1Itr) {
+  MachineFunction *mf;
+  const TargetMachine *tm;
+  const TargetRegisterInfo *tri;
+  const TargetInstrInfo *tii;
+  const MachineLoopInfo *loopInfo;
+  MachineRegisterInfo *mri;
+  RenderMachineFunction *rmf;
 
-    // Column index, starts at 1 as for row index.
-    unsigned ci = 1;
-    unsigned reg1 = *a1Itr;
+  LiveIntervals *lis;
+  LiveStacks *lss;
+  VirtRegMap *vrm;
 
-    for (RegContainerIterator a2Itr = allowed2.begin(), a2End = allowed2.end();
-         a2Itr != a2End; ++a2Itr) {
+  RegSet vregsToAlloc, emptyIntervalVRegs;
 
-      unsigned reg2 = *a2Itr;
+  /// \brief Finds the initial set of vreg intervals to allocate.
+  void findVRegIntervalsToAlloc();
 
-      // If the row/column regs are identical or alias insert an infinity.
-      if (tri->regsOverlap(reg1, reg2)) {
-        (*m)[ri][ci] = std::numeric_limits<PBQP::PBQPNum>::infinity();
-        isZeroMatrix = false;
-      }
+  /// \brief Adds a stack interval if the given live interval has been
+  /// spilled. Used to support stack slot coloring.
+  void addStackInterval(const LiveInterval *spilled,MachineRegisterInfo* mri);
 
-      ++ci;
-    }
+  /// \brief Given a solved PBQP problem maps this solution back to a register
+  /// assignment.
+  bool mapPBQPToRegAlloc(const PBQPRAProblem &problem,
+                         const PBQP::Solution &solution);
 
-    ++ri;
-  }
+  /// \brief Postprocessing before final spilling. Sets basic block "live in"
+  /// variables.
+  void finalizeAlloc() const;
 
-  // If this turns out to be a zero matrix...
-  if (isZeroMatrix) {
-    // free it and return null.
-    delete m;
-    return 0;
-  }
+};
 
-  // ...otherwise return the cost matrix.
-  return m;
-}
+char RegAllocPBQP::ID = 0;
 
-template <typename RegContainer>
-PBQP::Matrix* PBQPRegAlloc::buildCoalescingMatrix(
-      const RegContainer &allowed1, const RegContainer &allowed2,
-      PBQP::PBQPNum cBenefit) const {
+} // End anonymous namespace.
 
-  typedef typename RegContainer::const_iterator RegContainerIterator;
+unsigned PBQPRAProblem::getVRegForNode(PBQP::Graph::ConstNodeItr node) const {
+  Node2VReg::const_iterator vregItr = node2VReg.find(node);
+  assert(vregItr != node2VReg.end() && "No vreg for node.");
+  return vregItr->second;
+}
 
-  // Construct a PBQP Matrix representing the benefits of coalescing. As with
-  // interference matrices the rows and columns represent allowed registers
-  // for the LiveIntervals which are (potentially) to be coalesced. The amount
-  // -cBenefit will be placed in any element representing the same register
-  // for both intervals.
-  PBQP::Matrix *m =
-    new PBQP::Matrix(allowed1.size() + 1, allowed2.size() + 1, 0);
+PBQP::Graph::NodeItr PBQPRAProblem::getNodeForVReg(unsigned vreg) const {
+  VReg2Node::const_iterator nodeItr = vreg2Node.find(vreg);
+  assert(nodeItr != vreg2Node.end() && "No node for vreg.");
+  return nodeItr->second;
+  
+}
 
-  // Reset costs to zero.
-  m->reset(0);
+const PBQPRAProblem::AllowedSet&
+  PBQPRAProblem::getAllowedSet(unsigned vreg) const {
+  AllowedSetMap::const_iterator allowedSetItr = allowedSets.find(vreg);
+  assert(allowedSetItr != allowedSets.end() && "No pregs for vreg.");
+  const AllowedSet &allowedSet = allowedSetItr->second;
+  return allowedSet;
+}
 
-  // Assume the matrix is zero till proven otherwise. Zero matrices will be
-  // optimized away as in the interference case.
-  bool isZeroMatrix = true;
+unsigned PBQPRAProblem::getPRegForOption(unsigned vreg, unsigned option) const {
+  assert(isPRegOption(vreg, option) && "Not a preg option.");
 
-  // Row index. Starts at 1, since the 0th row is for the spill option, which
-  // is always zero.
-  unsigned ri = 1;
+  const AllowedSet& allowedSet = getAllowedSet(vreg);
+  assert(option <= allowedSet.size() && "Option outside allowed set.");
+  return allowedSet[option - 1];
+}
 
-  // Iterate over the allowed sets, insert coalescing benefits where
-  // appropriate.
-  for (RegContainerIterator a1Itr = allowed1.begin(), a1End = allowed1.end();
-       a1Itr != a1End; ++a1Itr) {
+std::auto_ptr<PBQPRAProblem> PBQPBuilder::build(MachineFunction *mf,
+                                                const LiveIntervals *lis,
+                                                const MachineLoopInfo *loopInfo,
+                                                const RegSet &vregs) {
 
-    // Column index, starts at 1 as for row index.
-    unsigned ci = 1;
-    unsigned reg1 = *a1Itr;
+  typedef std::vector<const LiveInterval*> LIVector;
 
-    for (RegContainerIterator a2Itr = allowed2.begin(), a2End = allowed2.end();
-         a2Itr != a2End; ++a2Itr) {
+  MachineRegisterInfo *mri = &mf->getRegInfo();
+  const TargetRegisterInfo *tri = mf->getTarget().getRegisterInfo();  
 
-      // If the row and column represent the same register insert a beneficial
-      // cost to preference this allocation - it would allow us to eliminate a
-      // move instruction.
-      if (reg1 == *a2Itr) {
-        (*m)[ri][ci] = -cBenefit;
-        isZeroMatrix = false;
-      }
+  std::auto_ptr<PBQPRAProblem> p(new PBQPRAProblem());
+  PBQP::Graph &g = p->getGraph();
+  RegSet pregs;
 
-      ++ci;
+  // Collect the set of preg intervals, record that they're used in the MF.
+  for (LiveIntervals::const_iterator itr = lis->begin(), end = lis->end();
+       itr != end; ++itr) {
+    if (TargetRegisterInfo::isPhysicalRegister(itr->first)) {
+      pregs.insert(itr->first);
+      mri->setPhysRegUsed(itr->first);
     }
-
-    ++ri;
-  }
-
-  // If this turns out to be a zero matrix...
-  if (isZeroMatrix) {
-    // ...free it and return null.
-    delete m;
-    return 0;
   }
 
-  return m;
-}
-
-PBQPRegAlloc::CoalesceMap PBQPRegAlloc::findCoalesces() {
-
-  typedef MachineFunction::const_iterator MFIterator;
-  typedef MachineBasicBlock::const_iterator MBBIterator;
-  typedef LiveInterval::const_vni_iterator VNIIterator;
-
-  CoalesceMap coalescesFound;
+  BitVector reservedRegs = tri->getReservedRegs(*mf);
 
-  // To find coalesces we need to iterate over the function looking for
-  // copy instructions.
-  for (MFIterator bbItr = mf->begin(), bbEnd = mf->end();
-       bbItr != bbEnd; ++bbItr) {
+  // Iterate over vregs. 
+  for (RegSet::const_iterator vregItr = vregs.begin(), vregEnd = vregs.end();
+       vregItr != vregEnd; ++vregItr) {
+    unsigned vreg = *vregItr;
+    const TargetRegisterClass *trc = mri->getRegClass(vreg);
+    const LiveInterval *vregLI = &lis->getInterval(vreg);
 
-    const MachineBasicBlock *mbb = &*bbItr;
-
-    for (MBBIterator iItr = mbb->begin(), iEnd = mbb->end();
-         iItr != iEnd; ++iItr) {
-
-      const MachineInstr *instr = &*iItr;
-
-      // If this isn't a copy then continue to the next instruction.
-      if (!instr->isCopy())
-        continue;
+    // Compute an initial allowed set for the current vreg.
+    typedef std::vector<unsigned> VRAllowed;
+    VRAllowed vrAllowed;
+    for (TargetRegisterClass::iterator aoItr = trc->allocation_order_begin(*mf),
+                                       aoEnd = trc->allocation_order_end(*mf);
+         aoItr != aoEnd; ++aoItr) {
+      unsigned preg = *aoItr;
+      if (!reservedRegs.test(preg)) {
+        vrAllowed.push_back(preg);
+      }
+    }
 
-      unsigned srcReg = instr->getOperand(1).getReg();
-      unsigned dstReg = instr->getOperand(0).getReg();
+    // Remove any physical registers which overlap.
+    for (RegSet::const_iterator pregItr = pregs.begin(),
+                                pregEnd = pregs.end();
+         pregItr != pregEnd; ++pregItr) {
+      unsigned preg = *pregItr;
+      const LiveInterval *pregLI = &lis->getInterval(preg);
 
-      // If the registers are already the same our job is nice and easy.
-      if (dstReg == srcReg)
+      if (pregLI->empty())
         continue;
 
-      bool srcRegIsPhysical = TargetRegisterInfo::isPhysicalRegister(srcReg),
-           dstRegIsPhysical = TargetRegisterInfo::isPhysicalRegister(dstReg);
-
-      // If both registers are physical then we can't coalesce.
-      if (srcRegIsPhysical && dstRegIsPhysical)
+      if (!vregLI->overlaps(*pregLI))
         continue;
 
-      // If it's a copy that includes two virtual register but the source and
-      // destination classes differ then we can't coalesce.
-      if (!srcRegIsPhysical && !dstRegIsPhysical &&
-          mri->getRegClass(srcReg) != mri->getRegClass(dstReg))
-        continue;
+      // Remove the register from the allowed set.
+      VRAllowed::iterator eraseItr =
+        std::find(vrAllowed.begin(), vrAllowed.end(), preg);
 
-      // If one is physical and one is virtual, check that the physical is
-      // allocatable in the class of the virtual.
-      if (srcRegIsPhysical && !dstRegIsPhysical) {
-        const TargetRegisterClass *dstRegClass = mri->getRegClass(dstReg);
-        if (std::find(dstRegClass->allocation_order_begin(*mf),
-                      dstRegClass->allocation_order_end(*mf), srcReg) ==
-            dstRegClass->allocation_order_end(*mf))
-          continue;
-      }
-      if (!srcRegIsPhysical && dstRegIsPhysical) {
-        const TargetRegisterClass *srcRegClass = mri->getRegClass(srcReg);
-        if (std::find(srcRegClass->allocation_order_begin(*mf),
-                      srcRegClass->allocation_order_end(*mf), dstReg) ==
-            srcRegClass->allocation_order_end(*mf))
-          continue;
+      if (eraseItr != vrAllowed.end()) {
+        vrAllowed.erase(eraseItr);
       }
 
-      // If we've made it here we have a copy with compatible register classes.
-      // We can probably coalesce, but we need to consider overlap.
-      const LiveInterval *srcLI = &lis->getInterval(srcReg),
-                         *dstLI = &lis->getInterval(dstReg);
-
-      if (srcLI->overlaps(*dstLI)) {
-        // Even in the case of an overlap we might still be able to coalesce,
-        // but we need to make sure that no definition of either range occurs
-        // while the other range is live.
-
-        // Otherwise start by assuming we're ok.
-        bool badDef = false;
-
-        // Test all defs of the source range.
-        for (VNIIterator
-               vniItr = srcLI->vni_begin(), vniEnd = srcLI->vni_end();
-               vniItr != vniEnd; ++vniItr) {
-
-          // If we find a poorly defined def we err on the side of caution.
-          if (!(*vniItr)->def.isValid()) {
-            badDef = true;
-            break;
-          }
-
-          // If we find a def that kills the coalescing opportunity then
-          // record it and break from the loop.
-          if (dstLI->liveAt((*vniItr)->def)) {
-            badDef = true;
-            break;
-          }
-        }
-
-        // If we have a bad def give up, continue to the next instruction.
-        if (badDef)
-          continue;
-
-        // Otherwise test definitions of the destination range.
-        for (VNIIterator
-               vniItr = dstLI->vni_begin(), vniEnd = dstLI->vni_end();
-               vniItr != vniEnd; ++vniItr) {
-
-          // We want to make sure we skip the copy instruction itself.
-          if ((*vniItr)->getCopy() == instr)
-            continue;
-
-          if (!(*vniItr)->def.isValid()) {
-            badDef = true;
-            break;
-          }
+      // Also remove any aliases.
+      const unsigned *aliasItr = tri->getAliasSet(preg);
+      if (aliasItr != 0) {
+        for (; *aliasItr != 0; ++aliasItr) {
+          VRAllowed::iterator eraseItr =
+            std::find(vrAllowed.begin(), vrAllowed.end(), *aliasItr);
 
-          if (srcLI->liveAt((*vniItr)->def)) {
-            badDef = true;
-            break;
+          if (eraseItr != vrAllowed.end()) {
+            vrAllowed.erase(eraseItr);
           }
         }
-
-        // As before a bad def we give up and continue to the next instr.
-        if (badDef)
-          continue;
       }
+    }
 
-      // If we make it to here then either the ranges didn't overlap, or they
-      // did, but none of their definitions would prevent us from coalescing.
-      // We're good to go with the coalesce.
+    // Construct the node.
+    PBQP::Graph::NodeItr node = 
+      g.addNode(PBQP::Vector(vrAllowed.size() + 1, 0));
 
-      float cBenefit = std::pow(10.0f, (float)loopInfo->getLoopDepth(mbb)) / 5.0;
+    // Record the mapping and allowed set in the problem.
+    p->recordVReg(vreg, node, vrAllowed.begin(), vrAllowed.end());
 
-      coalescesFound[RegPair(srcReg, dstReg)] = cBenefit;
-      coalescesFound[RegPair(dstReg, srcReg)] = cBenefit;
-    }
+    PBQP::PBQPNum spillCost = (vregLI->weight != 0.0) ?
+        vregLI->weight : std::numeric_limits<PBQP::PBQPNum>::min();
 
+    addSpillCosts(g.getNodeCosts(node), spillCost);
   }
 
-  return coalescesFound;
-}
+  for (RegSet::const_iterator vr1Itr = vregs.begin(), vrEnd = vregs.end();
+         vr1Itr != vrEnd; ++vr1Itr) {
+    unsigned vr1 = *vr1Itr;
+    const LiveInterval &l1 = lis->getInterval(vr1);
+    const PBQPRAProblem::AllowedSet &vr1Allowed = p->getAllowedSet(vr1);
 
-void PBQPRegAlloc::findVRegIntervalsToAlloc() {
+    for (RegSet::const_iterator vr2Itr = llvm::next(vr1Itr);
+         vr2Itr != vrEnd; ++vr2Itr) {
+      unsigned vr2 = *vr2Itr;
+      const LiveInterval &l2 = lis->getInterval(vr2);
+      const PBQPRAProblem::AllowedSet &vr2Allowed = p->getAllowedSet(vr2);
 
-  // Iterate over all live ranges.
-  for (LiveIntervals::iterator itr = lis->begin(), end = lis->end();
-       itr != end; ++itr) {
+      assert(!l2.empty() && "Empty interval in vreg set?");
+      if (l1.overlaps(l2)) {
+        PBQP::Graph::EdgeItr edge =
+          g.addEdge(p->getNodeForVReg(vr1), p->getNodeForVReg(vr2),
+                    PBQP::Matrix(vr1Allowed.size()+1, vr2Allowed.size()+1, 0));
 
-    // Ignore physical ones.
-    if (TargetRegisterInfo::isPhysicalRegister(itr->first))
-      continue;
-
-    LiveInterval *li = itr->second;
-
-    // If this live interval is non-empty we will use pbqp to allocate it.
-    // Empty intervals we allocate in a simple post-processing stage in
-    // finalizeAlloc.
-    if (!li->empty()) {
-      vregIntervalsToAlloc.insert(li);
-    }
-    else {
-      emptyVRegIntervals.insert(li);
+        addInterferenceCosts(g.getEdgeCosts(edge), vr1Allowed, vr2Allowed, tri);
+      }
     }
   }
+
+  return p;
 }
 
-PBQP::Graph PBQPRegAlloc::constructPBQPProblem() {
+void PBQPBuilder::addSpillCosts(PBQP::Vector &costVec,
+                                PBQP::PBQPNum spillCost) {
+  costVec[0] = spillCost;
+}
 
-  typedef std::vector<const LiveInterval*> LIVector;
-  typedef std::vector<unsigned> RegVector;
+void PBQPBuilder::addInterferenceCosts(
+                                    PBQP::Matrix &costMat,
+                                    const PBQPRAProblem::AllowedSet &vr1Allowed,
+                                    const PBQPRAProblem::AllowedSet &vr2Allowed,
+                                    const TargetRegisterInfo *tri) {
+  assert(costMat.getRows() == vr1Allowed.size() + 1 && "Matrix height mismatch.");
+  assert(costMat.getCols() == vr2Allowed.size() + 1 && "Matrix width mismatch.");
 
-  // This will store the physical intervals for easy reference.
-  LIVector physIntervals;
+  for (unsigned i = 0; i < vr1Allowed.size(); ++i) {
+    unsigned preg1 = vr1Allowed[i];
 
-  // Start by clearing the old node <-> live interval mappings & allowed sets
-  li2Node.clear();
-  node2LI.clear();
-  allowedSets.clear();
+    for (unsigned j = 0; j < vr2Allowed.size(); ++j) {
+      unsigned preg2 = vr2Allowed[j];
 
-  // Populate physIntervals, update preg use:
-  for (LiveIntervals::iterator itr = lis->begin(), end = lis->end();
-       itr != end; ++itr) {
-
-    if (TargetRegisterInfo::isPhysicalRegister(itr->first)) {
-      physIntervals.push_back(itr->second);
-      mri->setPhysRegUsed(itr->second->reg);
+      if (tri->regsOverlap(preg1, preg2)) {
+        costMat[i + 1][j + 1] = std::numeric_limits<PBQP::PBQPNum>::infinity();
+      }
     }
   }
+}
 
-  // Iterate over vreg intervals, construct live interval <-> node number
-  //  mappings.
-  for (LiveIntervalSet::const_iterator
-       itr = vregIntervalsToAlloc.begin(), end = vregIntervalsToAlloc.end();
-       itr != end; ++itr) {
-    const LiveInterval *li = *itr;
-
-    li2Node[li] = node2LI.size();
-    node2LI.push_back(li);
-  }
-
-  // Get the set of potential coalesces.
-  CoalesceMap coalesces;
-
-  if (pbqpCoalescing) {
-    coalesces = findCoalesces();
-  }
-
-  // Construct a PBQP solver for this problem
-  PBQP::Graph problem;
-  problemNodes.resize(vregIntervalsToAlloc.size());
-
-  // Resize allowedSets container appropriately.
-  allowedSets.resize(vregIntervalsToAlloc.size());
-
-  // Iterate over virtual register intervals to compute allowed sets...
-  for (unsigned node = 0; node < node2LI.size(); ++node) {
+std::auto_ptr<PBQPRAProblem> PBQPBuilderWithCoalescing::build(
+                                                MachineFunction *mf,
+                                                const LiveIntervals *lis,
+                                                const MachineLoopInfo *loopInfo,
+                                                const RegSet &vregs) {
 
-    // Grab pointers to the interval and its register class.
-    const LiveInterval *li = node2LI[node];
-    const TargetRegisterClass *liRC = mri->getRegClass(li->reg);
+  std::auto_ptr<PBQPRAProblem> p = PBQPBuilder::build(mf, lis, loopInfo, vregs);
+  PBQP::Graph &g = p->getGraph();
 
-    // Start by assuming all allocable registers in the class are allowed...
-    RegVector liAllowed(liRC->allocation_order_begin(*mf),
-                        liRC->allocation_order_end(*mf));
+  const TargetMachine &tm = mf->getTarget();
+  CoalescerPair cp(*tm.getInstrInfo(), *tm.getRegisterInfo());
 
-    // Eliminate the physical registers which overlap with this range, along
-    // with all their aliases.
-    for (LIVector::iterator pItr = physIntervals.begin(),
-       pEnd = physIntervals.end(); pItr != pEnd; ++pItr) {
+  // Scan the machine function and add a coalescing cost whenever CoalescerPair
+  // gives the Ok.
+  for (MachineFunction::const_iterator mbbItr = mf->begin(),
+                                       mbbEnd = mf->end();
+       mbbItr != mbbEnd; ++mbbItr) {
+    const MachineBasicBlock *mbb = &*mbbItr;
 
-      if (!li->overlaps(**pItr))
-        continue;
+    for (MachineBasicBlock::const_iterator miItr = mbb->begin(),
+                                           miEnd = mbb->end();
+         miItr != miEnd; ++miItr) {
+      const MachineInstr *mi = &*miItr;
 
-      unsigned pReg = (*pItr)->reg;
-
-      // If we get here then the live intervals overlap, but we're still ok
-      // if they're coalescable.
-      if (coalesces.find(RegPair(li->reg, pReg)) != coalesces.end())
-        continue;
+      if (!cp.setRegisters(mi))
+        continue; // Not coalescable.
 
-      // If we get here then we have a genuine exclusion.
+      if (cp.getSrcReg() == cp.getDstReg())
+        continue; // Already coalesced.
 
-      // Remove the overlapping reg...
-      RegVector::iterator eraseItr =
-        std::find(liAllowed.begin(), liAllowed.end(), pReg);
+      unsigned dst = cp.getDstReg(),
+               src = cp.getSrcReg();
 
-      if (eraseItr != liAllowed.end())
-        liAllowed.erase(eraseItr);
+      const float copyFactor = 0.5; // Cost of copy relative to load. Current
+      // value plucked randomly out of the air.
+                                      
+      PBQP::PBQPNum cBenefit =
+        copyFactor * LiveIntervals::getSpillWeight(false, true,
+                                                   loopInfo->getLoopDepth(mbb));
 
-      const unsigned *aliasItr = tri->getAliasSet(pReg);
-
-      if (aliasItr != 0) {
-        // ...and its aliases.
-        for (; *aliasItr != 0; ++aliasItr) {
-          RegVector::iterator eraseItr =
-            std::find(liAllowed.begin(), liAllowed.end(), *aliasItr);
+      if (cp.isPhys()) {
+        if (!lis->isAllocatable(dst))
+          continue;
 
-          if (eraseItr != liAllowed.end()) {
-            liAllowed.erase(eraseItr);
+        const PBQPRAProblem::AllowedSet &allowed = p->getAllowedSet(src);
+        unsigned pregOpt = 0;  
+        while (pregOpt < allowed.size() && allowed[pregOpt] != dst)
+          ++pregOpt;
+        if (pregOpt < allowed.size()) {
+          ++pregOpt; // +1 to account for spill option.
+          PBQP::Graph::NodeItr node = p->getNodeForVReg(src);
+          addPhysRegCoalesce(g.getNodeCosts(node), pregOpt, cBenefit);
+        }
+      } else {
+        const PBQPRAProblem::AllowedSet *allowed1 = &p->getAllowedSet(dst);
+        const PBQPRAProblem::AllowedSet *allowed2 = &p->getAllowedSet(src);
+        PBQP::Graph::NodeItr node1 = p->getNodeForVReg(dst);
+        PBQP::Graph::NodeItr node2 = p->getNodeForVReg(src);
+        PBQP::Graph::EdgeItr edge = g.findEdge(node1, node2);
+        if (edge == g.edgesEnd()) {
+          edge = g.addEdge(node1, node2, PBQP::Matrix(allowed1->size() + 1,
+                                                      allowed2->size() + 1,
+                                                      0));
+        } else {
+          if (g.getEdgeNode1(edge) == node2) {
+            std::swap(node1, node2);
+            std::swap(allowed1, allowed2);
           }
         }
+            
+        addVirtRegCoalesce(g.getEdgeCosts(edge), *allowed1, *allowed2,
+                           cBenefit);
       }
     }
+  }
+
+  return p;
+}
 
-    // Copy the allowed set into a member vector for use when constructing cost
-    // vectors & matrices, and mapping PBQP solutions back to assignments.
-    allowedSets[node] = AllowedSet(liAllowed.begin(), liAllowed.end());
+void PBQPBuilderWithCoalescing::addPhysRegCoalesce(PBQP::Vector &costVec,
+                                                   unsigned pregOption,
+                                                   PBQP::PBQPNum benefit) {
+  costVec[pregOption] += -benefit;
+}
 
-    // Set the spill cost to the interval weight, or epsilon if the
-    // interval weight is zero
-    PBQP::PBQPNum spillCost = (li->weight != 0.0) ?
-        li->weight : std::numeric_limits<PBQP::PBQPNum>::min();
+void PBQPBuilderWithCoalescing::addVirtRegCoalesce(
+                                    PBQP::Matrix &costMat,
+                                    const PBQPRAProblem::AllowedSet &vr1Allowed,
+                                    const PBQPRAProblem::AllowedSet &vr2Allowed,
+                                    PBQP::PBQPNum benefit) {
 
-    // Build a cost vector for this interval.
-    problemNodes[node] =
-      problem.addNode(
-        buildCostVector(li->reg, allowedSets[node], coalesces, spillCost));
+  assert(costMat.getRows() == vr1Allowed.size() + 1 && "Size mismatch.");
+  assert(costMat.getCols() == vr2Allowed.size() + 1 && "Size mismatch.");
 
-  }
+  for (unsigned i = 0; i < vr1Allowed.size(); ++i) {
+    unsigned preg1 = vr1Allowed[i];
+    for (unsigned j = 0; j < vr2Allowed.size(); ++j) {
+      unsigned preg2 = vr2Allowed[j];
 
+      if (preg1 == preg2) {
+        costMat[i + 1][j + 1] += -benefit;
+      } 
+    }
+  }
+}
 
-  // Now add the cost matrices...
-  for (unsigned node1 = 0; node1 < node2LI.size(); ++node1) {
-    const LiveInterval *li = node2LI[node1];
 
-    // Test for live range overlaps and insert interference matrices.
-    for (unsigned node2 = node1 + 1; node2 < node2LI.size(); ++node2) {
-      const LiveInterval *li2 = node2LI[node2];
+void RegAllocPBQP::getAnalysisUsage(AnalysisUsage &au) const {
+  au.addRequired<SlotIndexes>();
+  au.addPreserved<SlotIndexes>();
+  au.addRequired<LiveIntervals>();
+  //au.addRequiredID(SplitCriticalEdgesID);
+  au.addRequired<RegisterCoalescer>();
+  au.addRequired<CalculateSpillWeights>();
+  au.addRequired<LiveStacks>();
+  au.addPreserved<LiveStacks>();
+  au.addRequired<MachineLoopInfo>();
+  au.addPreserved<MachineLoopInfo>();
+  if (pbqpPreSplitting)
+    au.addRequired<LoopSplitter>();
+  au.addRequired<VirtRegMap>();
+  au.addRequired<RenderMachineFunction>();
+  MachineFunctionPass::getAnalysisUsage(au);
+}
 
-      CoalesceMap::const_iterator cmItr =
-        coalesces.find(RegPair(li->reg, li2->reg));
+void RegAllocPBQP::findVRegIntervalsToAlloc() {
 
-      PBQP::Matrix *m = 0;
+  // Iterate over all live ranges.
+  for (LiveIntervals::iterator itr = lis->begin(), end = lis->end();
+       itr != end; ++itr) {
 
-      if (cmItr != coalesces.end()) {
-        m = buildCoalescingMatrix(allowedSets[node1], allowedSets[node2],
-                                  cmItr->second);
-      }
-      else if (li->overlaps(*li2)) {
-        m = buildInterferenceMatrix(allowedSets[node1], allowedSets[node2]);
-      }
+    // Ignore physical ones.
+    if (TargetRegisterInfo::isPhysicalRegister(itr->first))
+      continue;
 
-      if (m != 0) {
-        problem.addEdge(problemNodes[node1],
-                        problemNodes[node2],
-                        *m);
+    LiveInterval *li = itr->second;
 
-        delete m;
-      }
+    // If this live interval is non-empty we will use pbqp to allocate it.
+    // Empty intervals we allocate in a simple post-processing stage in
+    // finalizeAlloc.
+    if (!li->empty()) {
+      vregsToAlloc.insert(li->reg);
+    }
+    else {
+      emptyIntervalVRegs.insert(li->reg);
     }
   }
-
-  assert(problem.getNumNodes() == allowedSets.size());
-/*
-  std::cerr << "Allocating for " << problem.getNumNodes() << " nodes, "
-            << problem.getNumEdges() << " edges.\n";
-
-  problem.printDot(std::cerr);
-*/
-  // We're done, PBQP problem constructed - return it.
-  return problem;
 }
 
-void PBQPRegAlloc::addStackInterval(const LiveInterval *spilled,
+void RegAllocPBQP::addStackInterval(const LiveInterval *spilled,
                                     MachineRegisterInfo* mri) {
   int stackSlot = vrm->getStackSlot(spilled->reg);
 
@@ -711,90 +495,81 @@
     vni = stackInterval.getValNumInfo(0);
   else
     vni = stackInterval.getNextValue(
-      SlotIndex(), 0, false, lss->getVNInfoAllocator());
+      SlotIndex(), 0, lss->getVNInfoAllocator());
 
   LiveInterval &rhsInterval = lis->getInterval(spilled->reg);
   stackInterval.MergeRangesInAsValue(rhsInterval, vni);
 }
 
-bool PBQPRegAlloc::mapPBQPToRegAlloc(const PBQP::Solution &solution) {
-
+bool RegAllocPBQP::mapPBQPToRegAlloc(const PBQPRAProblem &problem,
+                                     const PBQP::Solution &solution) {
   // Set to true if we have any spills
   bool anotherRoundNeeded = false;
 
   // Clear the existing allocation.
   vrm->clearAllVirt();
 
-  // Iterate over the nodes mapping the PBQP solution to a register assignment.
-  for (unsigned node = 0; node < node2LI.size(); ++node) {
-    unsigned virtReg = node2LI[node]->reg,
-             allocSelection = solution.getSelection(problemNodes[node]);
-
-
-    // If the PBQP solution is non-zero it's a physical register...
-    if (allocSelection != 0) {
-      // Get the physical reg, subtracting 1 to account for the spill option.
-      unsigned physReg = allowedSets[node][allocSelection - 1];
-
-      DEBUG(dbgs() << "VREG " << virtReg << " -> "
-                   << tri->getName(physReg) << "\n");
-
-      assert(physReg != 0);
-
-      // Add to the virt reg map and update the used phys regs.
-      vrm->assignVirt2Phys(virtReg, physReg);
-    }
-    // ...Otherwise it's a spill.
-    else {
-
-      // Make sure we ignore this virtual reg on the next round
-      // of allocation
-      vregIntervalsToAlloc.erase(&lis->getInterval(virtReg));
-
-      // Insert spill ranges for this live range
-      const LiveInterval *spillInterval = node2LI[node];
-      double oldSpillWeight = spillInterval->weight;
+  const PBQP::Graph &g = problem.getGraph();
+  // Iterate over the nodes mapping the PBQP solution to a register
+  // assignment.
+  for (PBQP::Graph::ConstNodeItr node = g.nodesBegin(),
+                                 nodeEnd = g.nodesEnd();
+       node != nodeEnd; ++node) {
+    unsigned vreg = problem.getVRegForNode(node);
+    unsigned alloc = solution.getSelection(node);
+
+    if (problem.isPRegOption(vreg, alloc)) {
+      unsigned preg = problem.getPRegForOption(vreg, alloc);    
+      DEBUG(dbgs() << "VREG " << vreg << " -> " << tri->getName(preg) << "\n");
+      assert(preg != 0 && "Invalid preg selected.");
+      vrm->assignVirt2Phys(vreg, preg);      
+    } else if (problem.isSpillOption(vreg, alloc)) {
+      vregsToAlloc.erase(vreg);
+      const LiveInterval* spillInterval = &lis->getInterval(vreg);
+      double oldWeight = spillInterval->weight;
       SmallVector<LiveInterval*, 8> spillIs;
+      rmf->rememberUseDefs(spillInterval);
       std::vector<LiveInterval*> newSpills =
         lis->addIntervalsForSpills(*spillInterval, spillIs, loopInfo, *vrm);
       addStackInterval(spillInterval, mri);
+      rmf->rememberSpills(spillInterval, newSpills);
 
-      (void) oldSpillWeight;
-      DEBUG(dbgs() << "VREG " << virtReg << " -> SPILLED (Cost: "
-                   << oldSpillWeight << ", New vregs: ");
+      (void) oldWeight;
+      DEBUG(dbgs() << "VREG " << vreg << " -> SPILLED (Cost: "
+                   << oldWeight << ", New vregs: ");
 
       // Copy any newly inserted live intervals into the list of regs to
       // allocate.
       for (std::vector<LiveInterval*>::const_iterator
            itr = newSpills.begin(), end = newSpills.end();
            itr != end; ++itr) {
-
         assert(!(*itr)->empty() && "Empty spill range.");
-
         DEBUG(dbgs() << (*itr)->reg << " ");
-
-        vregIntervalsToAlloc.insert(*itr);
+        vregsToAlloc.insert((*itr)->reg);
       }
 
       DEBUG(dbgs() << ")\n");
 
       // We need another round if spill intervals were added.
       anotherRoundNeeded |= !newSpills.empty();
+    } else {
+      assert(false && "Unknown allocation option.");
     }
   }
 
   return !anotherRoundNeeded;
 }
 
-void PBQPRegAlloc::finalizeAlloc() const {
+
+void RegAllocPBQP::finalizeAlloc() const {
   typedef LiveIntervals::iterator LIIterator;
   typedef LiveInterval::Ranges::const_iterator LRIterator;
 
   // First allocate registers for the empty intervals.
-  for (LiveIntervalSet::const_iterator
-         itr = emptyVRegIntervals.begin(), end = emptyVRegIntervals.end();
+  for (RegSet::const_iterator
+         itr = emptyIntervalVRegs.begin(), end = emptyIntervalVRegs.end();
          itr != end; ++itr) {
-    LiveInterval *li = *itr;
+    LiveInterval *li = &lis->getInterval(*itr);
 
     unsigned physReg = vrm->getRegAllocPref(li->reg);
 
@@ -854,7 +629,7 @@
 
 }
 
-bool PBQPRegAlloc::runOnMachineFunction(MachineFunction &MF) {
+bool RegAllocPBQP::runOnMachineFunction(MachineFunction &MF) {
 
   mf = &MF;
   tm = &mf->getTarget();
@@ -865,7 +640,7 @@
   lis = &getAnalysis<LiveIntervals>();
   lss = &getAnalysis<LiveStacks>();
   loopInfo = &getAnalysis<MachineLoopInfo>();
-  RenderMachineFunction *rmf = &getAnalysis<RenderMachineFunction>();
+  rmf = &getAnalysis<RenderMachineFunction>();
 
   vrm = &getAnalysis<VirtRegMap>();
 
@@ -885,7 +660,7 @@
   findVRegIntervalsToAlloc();
 
   // If there are non-empty intervals allocate them using pbqp.
-  if (!vregIntervalsToAlloc.empty()) {
+  if (!vregsToAlloc.empty()) {
 
     bool pbqpAllocComplete = false;
     unsigned round = 0;
@@ -893,11 +668,13 @@
     while (!pbqpAllocComplete) {
       DEBUG(dbgs() << "  PBQP Regalloc round " << round << ":\n");
 
-      PBQP::Graph problem = constructPBQPProblem();
+      std::auto_ptr<PBQPRAProblem> problem =
+        builder->build(mf, lis, loopInfo, vregsToAlloc);
       PBQP::Solution solution =
-        PBQP::HeuristicSolver<PBQP::Heuristics::Briggs>::solve(problem);
+        PBQP::HeuristicSolver<PBQP::Heuristics::Briggs>::solve(
+          problem->getGraph());
 
-      pbqpAllocComplete = mapPBQPToRegAlloc(solution);
+      pbqpAllocComplete = mapPBQPToRegAlloc(*problem, solution);
 
       ++round;
     }
@@ -908,12 +685,8 @@
 
   rmf->renderMachineFunction("After PBQP register allocation.", vrm);
 
-  vregIntervalsToAlloc.clear();
-  emptyVRegIntervals.clear();
-  li2Node.clear();
-  node2LI.clear();
-  allowedSets.clear();
-  problemNodes.clear();
+  vregsToAlloc.clear();
+  emptyIntervalVRegs.clear();
 
   DEBUG(dbgs() << "Post alloc VirtRegMap:\n" << *vrm << "\n");
 
@@ -925,9 +698,18 @@
   return true;
 }
 
-FunctionPass* llvm::createPBQPRegisterAllocator() {
-  return new PBQPRegAlloc();
+FunctionPass* llvm::createPBQPRegisterAllocator(
+                                           std::auto_ptr<PBQPBuilder> builder) {
+  return new RegAllocPBQP(builder);
 }
 
+FunctionPass* llvm::createDefaultPBQPRegisterAllocator() {
+  if (pbqpCoalescing) {
+    return createPBQPRegisterAllocator(
+             std::auto_ptr<PBQPBuilder>(new PBQPBuilderWithCoalescing()));
+  } // else
+  return createPBQPRegisterAllocator(
+           std::auto_ptr<PBQPBuilder>(new PBQPBuilder()));
+}
 
 #undef DEBUG_TYPE

Modified: llvm/branches/wendling/eh/lib/CodeGen/RegisterCoalescer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/RegisterCoalescer.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/RegisterCoalescer.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/RegisterCoalescer.cpp Tue Oct 26 19:48:03 2010
@@ -24,7 +24,8 @@
 using namespace llvm;
 
 // Register the RegisterCoalescer interface, providing a nice name to refer to.
-static RegisterAnalysisGroup<RegisterCoalescer> Z("Register Coalescer");
+INITIALIZE_ANALYSIS_GROUP(RegisterCoalescer, "Register Coalescer", 
+                          SimpleRegisterCoalescing)
 char RegisterCoalescer::ID = 0;
 
 // RegisterCoalescer destructor: DO NOT move this to the header file

Modified: llvm/branches/wendling/eh/lib/CodeGen/RegisterScavenging.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/RegisterScavenging.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/RegisterScavenging.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/RegisterScavenging.cpp Tue Oct 26 19:48:03 2010
@@ -21,7 +21,9 @@
 #include "llvm/CodeGen/MachineBasicBlock.h"
 #include "llvm/CodeGen/MachineInstr.h"
 #include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/Debug.h"
 #include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
 #include "llvm/Target/TargetRegisterInfo.h"
 #include "llvm/Target/TargetInstrInfo.h"
 #include "llvm/Target/TargetMachine.h"
@@ -226,19 +228,14 @@
     used = ~RegsAvailable & ~ReservedRegs;
 }
 
-/// CreateRegClassMask - Set the bits that represent the registers in the
-/// TargetRegisterClass.
-static void CreateRegClassMask(const TargetRegisterClass *RC, BitVector &Mask) {
-  for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end(); I != E;
-       ++I)
-    Mask.set(*I);
-}
-
 unsigned RegScavenger::FindUnusedReg(const TargetRegisterClass *RC) const {
   for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
        I != E; ++I)
-    if (!isAliasUsed(*I))
+    if (!isAliasUsed(*I)) {
+      DEBUG(dbgs() << "Scavenger found unused reg: " << TRI->getName(*I) <<
+            "\n");
       return *I;
+    }
   return 0;
 }
 
@@ -325,11 +322,9 @@
 unsigned RegScavenger::scavengeRegister(const TargetRegisterClass *RC,
                                         MachineBasicBlock::iterator I,
                                         int SPAdj) {
-  // Mask off the registers which are not in the TargetRegisterClass.
-  BitVector Candidates(NumPhysRegs, false);
-  CreateRegClassMask(RC, Candidates);
-  // Do not include reserved registers.
-  Candidates ^= ReservedRegs & Candidates;
+  // Consider all allocatable registers in the register class initially
+  BitVector Candidates =
+    TRI->getAllocatableSet(*I->getParent()->getParent(), RC);
 
   // Exclude all the registers being used by the instruction.
   for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
@@ -349,8 +344,10 @@
   unsigned SReg = findSurvivorReg(I, Candidates, 25, UseMI);
 
   // If we found an unused register there is no reason to spill it.
-  if (!isAliasUsed(SReg))
+  if (!isAliasUsed(SReg)) {
+    DEBUG(dbgs() << "Scavenged register: " << TRI->getName(SReg) << "\n");
     return SReg;
+  }
 
   assert(ScavengedReg == 0 &&
          "Scavenger slot is live, unable to scavenge another register!");
@@ -366,12 +363,12 @@
            "Cannot scavenge register without an emergency spill slot!");
     TII->storeRegToStackSlot(*MBB, I, SReg, true, ScavengingFrameIndex, RC,TRI);
     MachineBasicBlock::iterator II = prior(I);
-    TRI->eliminateFrameIndex(II, SPAdj, NULL, this);
+    TRI->eliminateFrameIndex(II, SPAdj, this);
 
     // Restore the scavenged register before its use (or first terminator).
     TII->loadRegFromStackSlot(*MBB, UseMI, SReg, ScavengingFrameIndex, RC, TRI);
     II = prior(UseMI);
-    TRI->eliminateFrameIndex(II, SPAdj, NULL, this);
+    TRI->eliminateFrameIndex(II, SPAdj, this);
   }
 
   ScavengeRestore = prior(UseMI);
@@ -380,5 +377,8 @@
   // ScavengedReg = SReg;
   ScavengedRC = RC;
 
+  DEBUG(dbgs() << "Scavenged register (with spill): " << TRI->getName(SReg) <<
+        "\n");
+
   return SReg;
 }

Modified: llvm/branches/wendling/eh/lib/CodeGen/RenderMachineFunction.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/RenderMachineFunction.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/RenderMachineFunction.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/RenderMachineFunction.cpp Tue Oct 26 19:48:03 2010
@@ -30,9 +30,14 @@
 using namespace llvm;
 
 char RenderMachineFunction::ID = 0;
-INITIALIZE_PASS(RenderMachineFunction, "rendermf",
+INITIALIZE_PASS_BEGIN(RenderMachineFunction, "rendermf",
                 "Render machine functions (and related info) to HTML pages",
-                false, false);
+                false, false)
+INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
+INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
+INITIALIZE_PASS_END(RenderMachineFunction, "rendermf",
+                "Render machine functions (and related info) to HTML pages",
+                false, false)
 
 static cl::opt<std::string>
 outputFileSuffix("rmf-file-suffix",
@@ -57,6 +62,11 @@
               cl::init(""), cl::Hidden);
 
 static cl::opt<bool>
+filterEmpty("rmf-filter-empty-intervals",
+            cl::desc("Don't display empty intervals."),
+            cl::init(true), cl::Hidden);
+
+static cl::opt<bool>
 showEmptyIndexes("rmf-empty-indexes",
                  cl::desc("Render indexes not associated with instructions or "
                           "MBB starts."),
@@ -150,10 +160,14 @@
                                           const std::string &intervalRangeStr) {
     if (intervalRangeStr == "*") {
       intervalTypesToRender |= All;
+    } else if (intervalRangeStr == "virt-nospills*") {
+      intervalTypesToRender |= VirtNoSpills;
+    } else if (intervalRangeStr == "spills*") {
+      intervalTypesToRender |= VirtSpills;
     } else if (intervalRangeStr == "virt*") {
-      intervalTypesToRender |= VirtPlusExplicit;
+      intervalTypesToRender |= AllVirt;
     } else if (intervalRangeStr == "phys*") {
-      intervalTypesToRender |= PhysPlusExplicit;
+      intervalTypesToRender |= AllPhys;
     } else {
       std::istringstream iss(intervalRangeStr);
       unsigned reg1, reg2;
@@ -179,10 +193,12 @@
 
   void MFRenderingOptions::setup(MachineFunction *mf,
                                  const TargetRegisterInfo *tri,
-                                 LiveIntervals *lis) {
+                                 LiveIntervals *lis,
+                                 const RenderMachineFunction *rmf) {
     this->mf = mf;
     this->tri = tri;
     this->lis = lis;
+    this->rmf = rmf;
 
     clear();
   }
@@ -252,12 +268,19 @@
       if (intervalTypesToRender != ExplicitOnly) {
         for (LiveIntervals::iterator liItr = lis->begin(), liEnd = lis->end();
              liItr != liEnd; ++liItr) {
+          LiveInterval *li = liItr->second;
 
-          if ((TargetRegisterInfo::isPhysicalRegister(liItr->first) &&
-               (intervalTypesToRender & PhysPlusExplicit)) ||
-              (TargetRegisterInfo::isVirtualRegister(liItr->first) &&
-               (intervalTypesToRender & VirtPlusExplicit))) {
-            intervalSet.insert(liItr->second);
+          if (filterEmpty && li->empty())
+            continue;
+
+          if ((TargetRegisterInfo::isPhysicalRegister(li->reg) &&
+               (intervalTypesToRender & AllPhys))) {
+            intervalSet.insert(li);
+          } else if (TargetRegisterInfo::isVirtualRegister(li->reg)) {
+            if (((intervalTypesToRender & VirtNoSpills) && !rmf->isSpill(li)) || 
+                ((intervalTypesToRender & VirtSpills) && rmf->isSpill(li))) {
+              intervalSet.insert(li);
+            }
           }
         }
       }
@@ -440,14 +463,9 @@
          liItr != liEnd; ++liItr) {
       LiveInterval *li = liItr->second;
 
-      const TargetRegisterClass *liTRC;
-
       if (TargetRegisterInfo::isPhysicalRegister(li->reg))
         continue;
       
-      liTRC = mri->getRegClass(li->reg);
-     
-
       // For all ranges in the current interal.
       for (LiveInterval::iterator lrItr = li->begin(),
              lrEnd = li->end();
@@ -542,7 +560,26 @@
                                         SlotIndex i) const {
     const MachineInstr *mi = sis->getInstructionFromIndex(i);
 
+    // For uses/defs recorded use/def indexes override current liveness and
+    // instruction operands (Only for the interval which records the indexes).
+    if (i.isUse() || i.isDef()) {
+      UseDefs::const_iterator udItr = useDefs.find(li);
+      if (udItr != useDefs.end()) {
+        const SlotSet &slotSet = udItr->second;
+        if (slotSet.count(i)) {
+          if (i.isUse()) {
+            return Used;
+          }
+          // else
+          return Defined;
+        }
+      }
+    }
+
+    // If the slot is a load/store, or there's no info in the use/def set then
+    // use liveness and instruction operand info.
     if (li->liveAt(i)) {
+
       if (mi == 0) {
         if (vrm == 0 || 
             (vrm->getStackSlot(li->reg) == VirtRegMap::NO_STACK_SLOT)) {
@@ -551,11 +588,9 @@
           return AliveStack;
         }
       } else {
-        if (i.getSlot() == SlotIndex::DEF &&
-            mi->definesRegister(li->reg, tri)) {
+        if (i.isDef() && mi->definesRegister(li->reg, tri)) {
           return Defined;
-        } else if (i.getSlot() == SlotIndex::USE &&
-                   mi->readsRegister(li->reg)) {
+        } else if (i.isUse() && mi->readsRegister(li->reg)) {
           return Used;
         } else {
           if (vrm == 0 || 
@@ -771,7 +806,7 @@
       os << indent + s(2) << "<tr height=6ex>\n";
       
       // Render the code column.
-      if (i.getSlot() == SlotIndex::LOAD) {
+      if (i.isLoad()) {
         MachineBasicBlock *mbb = sis->getMBBFromIndex(i);
         mi = sis->getInstructionFromIndex(i);
 
@@ -882,6 +917,7 @@
   }
 
   bool RenderMachineFunction::runOnMachineFunction(MachineFunction &fn) {
+
     mf = &fn;
     mri = &mf->getRegInfo();
     tri = mf->getTarget().getRegisterInfo();
@@ -889,7 +925,10 @@
     sis = &getAnalysis<SlotIndexes>();
 
     trei.setup(mf, mri, tri, lis);
-    ro.setup(mf, tri, lis);
+    ro.setup(mf, tri, lis, this);
+    spillIntervals.clear();
+    spillFor.clear();
+    useDefs.clear();
 
     fqn = mf->getFunction()->getParent()->getModuleIdentifier() + "." +
           mf->getFunction()->getName().str();
@@ -900,6 +939,50 @@
   void RenderMachineFunction::releaseMemory() {
     trei.clear();
     ro.clear();
+    spillIntervals.clear();
+    spillFor.clear();
+    useDefs.clear();
+  }
+
+  void RenderMachineFunction::rememberUseDefs(const LiveInterval *li) {
+
+    if (!ro.shouldRenderCurrentMachineFunction())
+      return; 
+
+    for (MachineRegisterInfo::reg_iterator rItr = mri->reg_begin(li->reg),
+                                           rEnd = mri->reg_end();
+         rItr != rEnd; ++rItr) {
+      const MachineInstr *mi = &*rItr;
+      if (mi->readsRegister(li->reg)) {
+        useDefs[li].insert(lis->getInstructionIndex(mi).getUseIndex());
+      }
+      if (mi->definesRegister(li->reg)) {
+        useDefs[li].insert(lis->getInstructionIndex(mi).getDefIndex());
+      }
+    }
+  }
+
+  void RenderMachineFunction::rememberSpills(
+                                     const LiveInterval *li,
+                                     const std::vector<LiveInterval*> &spills) {
+
+    if (!ro.shouldRenderCurrentMachineFunction())
+      return; 
+
+    for (std::vector<LiveInterval*>::const_iterator siItr = spills.begin(),
+                                                    siEnd = spills.end();
+         siItr != siEnd; ++siItr) {
+      const LiveInterval *spill = *siItr;
+      spillIntervals[li].insert(spill);
+      spillFor[spill] = li;
+    }
+  }
+
+  bool RenderMachineFunction::isSpill(const LiveInterval *li) const {
+    SpillForMap::const_iterator sfItr = spillFor.find(li);
+    if (sfItr == spillFor.end())
+      return false;
+    return true;
   }
 
   void RenderMachineFunction::renderMachineFunction(

Modified: llvm/branches/wendling/eh/lib/CodeGen/RenderMachineFunction.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/RenderMachineFunction.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/RenderMachineFunction.h (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/RenderMachineFunction.h Tue Oct 26 19:48:03 2010
@@ -28,77 +28,12 @@
   class LiveIntervals;
   class MachineInstr;
   class MachineRegisterInfo;
+  class RenderMachineFunction;
   class TargetRegisterClass;
   class TargetRegisterInfo;
   class VirtRegMap;
   class raw_ostream;
 
-  /// \brief Provide extra information about the physical and virtual registers
-  ///        in the function being compiled.
-  class TargetRegisterExtraInfo {
-  public:
-    TargetRegisterExtraInfo();
-
-    /// \brief Set up TargetRegisterExtraInfo with pointers to necessary
-    ///        sources of information.
-    void setup(MachineFunction *mf, MachineRegisterInfo *mri,
-               const TargetRegisterInfo *tri, LiveIntervals *lis);
-
-    /// \brief Recompute tables for changed function.
-    void reset(); 
-
-    /// \brief Free all tables in TargetRegisterExtraInfo.
-    void clear();
-
-    /// \brief Maximum number of registers from trc which alias reg.
-    unsigned getWorst(unsigned reg, const TargetRegisterClass *trc) const;
-
-    /// \brief Returns the number of allocable registers in trc.
-    unsigned getCapacity(const TargetRegisterClass *trc) const;
-
-    /// \brief Return the number of registers of class trc that may be
-    ///        needed at slot i.
-    unsigned getPressureAtSlot(const TargetRegisterClass *trc,
-                               SlotIndex i) const;
-
-    /// \brief Return true if the number of registers of type trc that may be
-    ///        needed at slot i is greater than the capacity of trc.
-    bool classOverCapacityAtSlot(const TargetRegisterClass *trc,
-                                 SlotIndex i) const;
-
-  private:
-
-    MachineFunction *mf;
-    MachineRegisterInfo *mri;
-    const TargetRegisterInfo *tri;
-    LiveIntervals *lis;
-
-    typedef std::map<const TargetRegisterClass*, unsigned> WorstMapLine;
-    typedef std::map<const TargetRegisterClass*, WorstMapLine> VRWorstMap;
-    VRWorstMap vrWorst;
-
-    typedef std::map<unsigned, WorstMapLine> PRWorstMap;
-    PRWorstMap prWorst;
-
-    typedef std::map<const TargetRegisterClass*, unsigned> CapacityMap;
-    CapacityMap capacityMap;
-
-    typedef std::map<const TargetRegisterClass*, unsigned> PressureMapLine;
-    typedef std::map<SlotIndex, PressureMapLine> PressureMap;
-    PressureMap pressureMap;
-
-    bool mapsPopulated;
-
-    /// \brief Initialise the 'worst' table.
-    void initWorst();
- 
-    /// \brief Initialise the 'capacity' table.
-    void initCapacity();
-
-    /// \brief Initialise/Reset the 'pressure' and live states tables.
-    void resetPressureAndLiveStates();
-  };
-
   /// \brief Helper class to process rendering options. Tries to be as lazy as
   ///        possible.
   class MFRenderingOptions {
@@ -125,7 +60,7 @@
 
     /// Initialise the rendering options.
     void setup(MachineFunction *mf, const TargetRegisterInfo *tri,
-               LiveIntervals *lis);
+               LiveIntervals *lis, const RenderMachineFunction *rmf);
 
     /// Clear translations of options to the current function.
     void clear();
@@ -160,9 +95,11 @@
 
     static std::set<std::pair<unsigned, unsigned> > intervalNumsToRender;
     typedef enum { ExplicitOnly     = 0,
-                   VirtPlusExplicit = 1,
-                   PhysPlusExplicit = 2,
-                   All              = 3 }
+                   AllPhys          = 1,
+                   VirtNoSpills     = 2,
+                   VirtSpills       = 4,
+                   AllVirt          = 6,
+                   All              = 7 }
       IntervalTypesToRender;
     static unsigned intervalTypesToRender;
 
@@ -180,6 +117,7 @@
     MachineFunction *mf;
     const TargetRegisterInfo *tri;
     LiveIntervals *lis;
+    const RenderMachineFunction *rmf;
 
     mutable bool regClassesTranslatedToCurrentFunction;
     mutable RegClassSet regClassSet;
@@ -192,13 +130,81 @@
     void translateIntervalNumbersToCurrentFunction() const;
   };
 
+  /// \brief Provide extra information about the physical and virtual registers
+  ///        in the function being compiled.
+  class TargetRegisterExtraInfo {
+  public:
+    TargetRegisterExtraInfo();
+
+    /// \brief Set up TargetRegisterExtraInfo with pointers to necessary
+    ///        sources of information.
+    void setup(MachineFunction *mf, MachineRegisterInfo *mri,
+               const TargetRegisterInfo *tri, LiveIntervals *lis);
+
+    /// \brief Recompute tables for changed function.
+    void reset(); 
+
+    /// \brief Free all tables in TargetRegisterExtraInfo.
+    void clear();
+
+    /// \brief Maximum number of registers from trc which alias reg.
+    unsigned getWorst(unsigned reg, const TargetRegisterClass *trc) const;
+
+    /// \brief Returns the number of allocable registers in trc.
+    unsigned getCapacity(const TargetRegisterClass *trc) const;
+
+    /// \brief Return the number of registers of class trc that may be
+    ///        needed at slot i.
+    unsigned getPressureAtSlot(const TargetRegisterClass *trc,
+                               SlotIndex i) const;
+
+    /// \brief Return true if the number of registers of type trc that may be
+    ///        needed at slot i is greater than the capacity of trc.
+    bool classOverCapacityAtSlot(const TargetRegisterClass *trc,
+                                 SlotIndex i) const;
+
+  private:
+
+    MachineFunction *mf;
+    MachineRegisterInfo *mri;
+    const TargetRegisterInfo *tri;
+    LiveIntervals *lis;
+
+    typedef std::map<const TargetRegisterClass*, unsigned> WorstMapLine;
+    typedef std::map<const TargetRegisterClass*, WorstMapLine> VRWorstMap;
+    VRWorstMap vrWorst;
+
+    typedef std::map<unsigned, WorstMapLine> PRWorstMap;
+    PRWorstMap prWorst;
+
+    typedef std::map<const TargetRegisterClass*, unsigned> CapacityMap;
+    CapacityMap capacityMap;
+
+    typedef std::map<const TargetRegisterClass*, unsigned> PressureMapLine;
+    typedef std::map<SlotIndex, PressureMapLine> PressureMap;
+    PressureMap pressureMap;
+
+    bool mapsPopulated;
+
+    /// \brief Initialise the 'worst' table.
+    void initWorst();
+ 
+    /// \brief Initialise the 'capacity' table.
+    void initCapacity();
+
+    /// \brief Initialise/Reset the 'pressure' and live states tables.
+    void resetPressureAndLiveStates();
+  };
+
   /// \brief Render MachineFunction objects and related information to a HTML
   ///        page.
   class RenderMachineFunction : public MachineFunctionPass {
   public:
     static char ID;
 
-    RenderMachineFunction() : MachineFunctionPass(&ID) {}
+    RenderMachineFunction() : MachineFunctionPass(ID) {
+      initializeRenderMachineFunctionPass(*PassRegistry::getPassRegistry());
+    }
 
     virtual void getAnalysisUsage(AnalysisUsage &au) const;
 
@@ -206,6 +212,13 @@
 
     virtual void releaseMemory();
 
+    void rememberUseDefs(const LiveInterval *li);
+
+    void rememberSpills(const LiveInterval *li,
+                        const std::vector<LiveInterval*> &spills);
+
+    bool isSpill(const LiveInterval *li) const;
+
     /// \brief Render this machine function to HTML.
     /// 
     /// @param renderContextStr This parameter will be included in the top of
@@ -225,10 +238,8 @@
 
   private:
     class Spacer;
-
     friend raw_ostream& operator<<(raw_ostream &os, const Spacer &s);
 
-
     std::string fqn;
 
     MachineFunction *mf;
@@ -241,6 +252,8 @@
     TargetRegisterExtraInfo trei;
     MFRenderingOptions ro;
 
+    
+
     // Utilities.
     typedef enum { Dead, Defined, Used, AliveReg, AliveStack } LiveState;
     LiveState getLiveStateAt(const LiveInterval *li, SlotIndex i) const;
@@ -249,6 +262,17 @@
     PressureState getPressureStateAt(const TargetRegisterClass *trc,
                                      SlotIndex i) const;
 
+    typedef std::map<const LiveInterval*, std::set<const LiveInterval*> >
+      SpillIntervals;
+    SpillIntervals spillIntervals;
+
+    typedef std::map<const LiveInterval*, const LiveInterval*> SpillForMap;
+    SpillForMap spillFor;
+
+    typedef std::set<SlotIndex> SlotSet;
+    typedef std::map<const LiveInterval*, SlotSet> UseDefs;
+    UseDefs useDefs;
+
     // ---------- Rendering methods ----------
 
     /// For inserting spaces when pretty printing.

Modified: llvm/branches/wendling/eh/lib/CodeGen/ScheduleDAGInstrs.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/ScheduleDAGInstrs.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/ScheduleDAGInstrs.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/ScheduleDAGInstrs.cpp Tue Oct 26 19:48:03 2010
@@ -32,9 +32,9 @@
 ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf,
                                      const MachineLoopInfo &mli,
                                      const MachineDominatorTree &mdt)
-  : ScheduleDAG(mf), MLI(mli), MDT(mdt), Defs(TRI->getNumRegs()),
-    Uses(TRI->getNumRegs()), LoopRegs(MLI, MDT) {
-  MFI = mf.getFrameInfo();
+  : ScheduleDAG(mf), MLI(mli), MDT(mdt), MFI(mf.getFrameInfo()),
+    InstrItins(mf.getTarget().getInstrItineraryData()),
+    Defs(TRI->getNumRegs()), Uses(TRI->getNumRegs()), LoopRegs(MLI, MDT) {
   DbgValueVec.clear();
 }
 
@@ -141,6 +141,39 @@
     }
 }
 
+/// AddSchedBarrierDeps - Add dependencies from instructions in the current
+/// list of instructions being scheduled to scheduling barrier by adding
+/// the exit SU to the register defs and use list. This is because we want to
+/// make sure instructions which define registers that are either used by
+/// the terminator or are live-out are properly scheduled. This is
+/// especially important when the definition latency of the return value(s)
+/// are too high to be hidden by the branch or when the liveout registers
+/// used by instructions in the fallthrough block.
+void ScheduleDAGInstrs::AddSchedBarrierDeps() {
+  MachineInstr *ExitMI = InsertPos != BB->end() ? &*InsertPos : 0;
+  ExitSU.setInstr(ExitMI);
+  bool AllDepKnown = ExitMI &&
+    (ExitMI->getDesc().isCall() || ExitMI->getDesc().isBarrier());
+  if (ExitMI && AllDepKnown) {
+    // If it's a call or a barrier, add dependencies on the defs and uses of
+    // instruction.
+    for (unsigned i = 0, e = ExitMI->getNumOperands(); i != e; ++i) {
+      const MachineOperand &MO = ExitMI->getOperand(i);
+      if (!MO.isReg() || MO.isDef()) continue;
+      unsigned Reg = MO.getReg();
+      if (Reg == 0) continue;
+
+      assert(TRI->isPhysicalRegister(Reg) && "Virtual register encountered!");
+      Uses[Reg].push_back(&ExitSU);
+    }
+  } else {
+    // For others, e.g. fallthrough, conditional branch, assume the exit
+    // uses all the registers.
+    // FIXME: This causes too much compile time regression. We need to compute
+    // liveout instead.
+  }
+}
+
 void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
   // We'll be allocating one SUnit for each instruction, plus one for
   // the region exit node.
@@ -175,6 +208,10 @@
   // without emitting the info from the previous call.
   DbgValueVec.clear();
 
+  // Model data dependencies between instructions being scheduled and the
+  // ExitSU.
+  AddSchedBarrierDeps();
+
   // Walk the list of instructions, from bottom moving up.
   for (MachineBasicBlock::iterator MII = InsertPos, MIE = Begin;
        MII != MIE; --MII) {
@@ -228,6 +265,8 @@
       unsigned AOLatency = (Kind == SDep::Anti) ? 0 : 1;
       for (unsigned i = 0, e = DefList.size(); i != e; ++i) {
         SUnit *DefSU = DefList[i];
+        if (DefSU == &ExitSU)
+          continue;
         if (DefSU != SU &&
             (Kind != SDep::Output || !MO.isDead() ||
              !DefSU->getInstr()->registerDefIsDead(Reg)))
@@ -237,6 +276,8 @@
         std::vector<SUnit *> &DefList = Defs[*Alias];
         for (unsigned i = 0, e = DefList.size(); i != e; ++i) {
           SUnit *DefSU = DefList[i];
+          if (DefSU == &ExitSU)
+            continue;
           if (DefSU != SU &&
               (Kind != SDep::Output || !MO.isDead() ||
                !DefSU->getInstr()->registerDefIsDead(*Alias)))
@@ -258,12 +299,14 @@
           // TODO: Perhaps we should get rid of
           // SpecialAddressLatency and just move this into
           // adjustSchedDependency for the targets that care about it.
-          if (SpecialAddressLatency != 0 && !UnitLatencies) {
+          if (SpecialAddressLatency != 0 && !UnitLatencies &&
+              UseSU != &ExitSU) {
             MachineInstr *UseMI = UseSU->getInstr();
             const TargetInstrDesc &UseTID = UseMI->getDesc();
             int RegUseIndex = UseMI->findRegisterUseOperandIdx(Reg);
             assert(RegUseIndex >= 0 && "UseMI doesn's use register!");
-            if ((UseTID.mayLoad() || UseTID.mayStore()) &&
+            if (RegUseIndex >= 0 &&
+                (UseTID.mayLoad() || UseTID.mayStore()) &&
                 (unsigned)RegUseIndex < UseTID.getNumOperands() &&
                 UseTID.OpInfo[RegUseIndex].isLookupPtrRegClass())
               LDataLatency += SpecialAddressLatency;
@@ -446,6 +489,14 @@
         // Treat all other stores conservatively.
         goto new_alias_chain;
       }
+
+      if (!ExitSU.isPred(SU))
+        // Push store's up a bit to avoid them getting in between cmp
+        // and branches.
+        ExitSU.addPred(SDep(SU, SDep::Order, 0,
+                            /*Reg=*/0, /*isNormalMemory=*/false,
+                            /*isMustAlias=*/false,
+                            /*isArtificial=*/true));
     } else if (TID.mayLoad()) {
       bool MayAlias = true;
       TrueMemOrderLatency = 0;
@@ -498,23 +549,22 @@
 }
 
 void ScheduleDAGInstrs::ComputeLatency(SUnit *SU) {
-  const InstrItineraryData &InstrItins = TM.getInstrItineraryData();
-
   // Compute the latency for the node.
-  SU->Latency =
-    InstrItins.getStageLatency(SU->getInstr()->getDesc().getSchedClass());
+  if (!InstrItins || InstrItins->isEmpty()) {
+    SU->Latency = 1;
 
-  // Simplistic target-independent heuristic: assume that loads take
-  // extra time.
-  if (InstrItins.isEmpty())
+    // Simplistic target-independent heuristic: assume that loads take
+    // extra time.
     if (SU->getInstr()->getDesc().mayLoad())
       SU->Latency += 2;
+  } else
+    SU->Latency =
+      InstrItins->getStageLatency(SU->getInstr()->getDesc().getSchedClass());
 }
 
 void ScheduleDAGInstrs::ComputeOperandLatency(SUnit *Def, SUnit *Use, 
                                               SDep& dep) const {
-  const InstrItineraryData &InstrItins = TM.getInstrItineraryData();
-  if (InstrItins.isEmpty())
+  if (!InstrItins || InstrItins->isEmpty())
     return;
   
   // For a data dependency with a known register...
@@ -528,14 +578,21 @@
   MachineInstr *DefMI = Def->getInstr();
   int DefIdx = DefMI->findRegisterDefOperandIdx(Reg);
   if (DefIdx != -1) {
-    int DefCycle = InstrItins.getOperandCycle(DefMI->getDesc().getSchedClass(),
-                                              DefIdx);
-    if (DefCycle >= 0) {
-      MachineInstr *UseMI = Use->getInstr();
-      const unsigned UseClass = UseMI->getDesc().getSchedClass();
-
-      // For all uses of the register, calculate the maxmimum latency
-      int Latency = -1;
+    const MachineOperand &MO = DefMI->getOperand(DefIdx);
+    if (MO.isReg() && MO.isImplicit() &&
+        DefIdx >= (int)DefMI->getDesc().getNumOperands()) {
+      // This is an implicit def, getOperandLatency() won't return the correct
+      // latency. e.g.
+      //   %D6<def>, %D7<def> = VLD1q16 %R2<kill>, 0, ..., %Q3<imp-def>
+      //   %Q1<def> = VMULv8i16 %Q1<kill>, %Q3<kill>, ...
+      // What we want is to compute latency between def of %D6/%D7 and use of
+      // %Q3 instead.
+      DefIdx = DefMI->findRegisterDefOperandIdx(Reg, false, true, TRI);
+    }
+    MachineInstr *UseMI = Use->getInstr();
+    // For all uses of the register, calculate the maxmimum latency
+    int Latency = -1;
+    if (UseMI) {
       for (unsigned i = 0, e = UseMI->getNumOperands(); i != e; ++i) {
         const MachineOperand &MO = UseMI->getOperand(i);
         if (!MO.isReg() || !MO.isUse())
@@ -544,15 +601,21 @@
         if (MOReg != Reg)
           continue;
 
-        int UseCycle = InstrItins.getOperandCycle(UseClass, i);
-        if (UseCycle >= 0)
-          Latency = std::max(Latency, DefCycle - UseCycle + 1);
+        int UseCycle = TII->getOperandLatency(InstrItins, DefMI, DefIdx,
+                                              UseMI, i);
+        Latency = std::max(Latency, UseCycle);
       }
-
-      // If we found a latency, then replace the existing dependence latency.
-      if (Latency >= 0)
-        dep.setLatency(Latency);
+    } else {
+      // UseMI is null, then it must be a scheduling barrier.
+      if (!InstrItins || InstrItins->isEmpty())
+        return;
+      unsigned DefClass = DefMI->getDesc().getSchedClass();
+      Latency = InstrItins->getOperandCycle(DefClass, DefIdx);
     }
+
+    // If we found a latency, then replace the existing dependence latency.
+    if (Latency >= 0)
+      dep.setLatency(Latency);
   }
 }
 

Modified: llvm/branches/wendling/eh/lib/CodeGen/ScheduleDAGInstrs.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/ScheduleDAGInstrs.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/ScheduleDAGInstrs.h (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/ScheduleDAGInstrs.h Tue Oct 26 19:48:03 2010
@@ -101,6 +101,7 @@
     const MachineLoopInfo &MLI;
     const MachineDominatorTree &MDT;
     const MachineFrameInfo *MFI;
+    const InstrItineraryData *InstrItins;
 
     /// Defs, Uses - Remember where defs and uses of each physical register
     /// are as we iterate upward through the instructions. This is allocated
@@ -163,6 +164,15 @@
     /// input.
     virtual void BuildSchedGraph(AliasAnalysis *AA);
 
+    /// AddSchedBarrierDeps - Add dependencies from instructions in the current
+    /// list of instructions being scheduled to scheduling barrier. We want to
+    /// make sure instructions which define registers that are either used by
+    /// the terminator or are live-out are properly scheduled. This is
+    /// especially important when the definition latency of the return value(s)
+    /// are too high to be hidden by the branch or when the liveout registers
+    /// used by instructions in the fallthrough block.
+    void AddSchedBarrierDeps();
+
     /// ComputeLatency - Compute node latency.
     ///
     virtual void ComputeLatency(SUnit *SU);

Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/CMakeLists.txt?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/CMakeLists.txt (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/CMakeLists.txt Tue Oct 26 19:48:03 2010
@@ -21,5 +21,3 @@
   TargetLowering.cpp
   TargetSelectionDAGInfo.cpp
   )
-
-target_link_libraries (LLVMSelectionDAG LLVMAnalysis LLVMAsmPrinter LLVMCodeGen)

Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/DAGCombiner.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/DAGCombiner.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/DAGCombiner.cpp Tue Oct 26 19:48:03 2010
@@ -248,16 +248,19 @@
     bool isAlias(SDValue Ptr1, int64_t Size1,
                  const Value *SrcValue1, int SrcValueOffset1,
                  unsigned SrcValueAlign1,
+                 const MDNode *TBAAInfo1,
                  SDValue Ptr2, int64_t Size2,
                  const Value *SrcValue2, int SrcValueOffset2,
-                 unsigned SrcValueAlign2) const;
+                 unsigned SrcValueAlign2,
+                 const MDNode *TBAAInfo2) const;
 
     /// FindAliasInfo - Extracts the relevant alias information from the memory
     /// node.  Returns true if the operand was a load.
     bool FindAliasInfo(SDNode *N,
                        SDValue &Ptr, int64_t &Size,
                        const Value *&SrcValue, int &SrcValueOffset,
-                       unsigned &SrcValueAlignment) const;
+                       unsigned &SrcValueAlignment,
+                       const MDNode *&TBAAInfo) const;
 
     /// FindBetterChain - Walk up chain skipping non-aliasing memory nodes,
     /// looking for a better chain (aliasing node.)
@@ -671,7 +674,7 @@
     Replace = true;
     return DAG.getExtLoad(ExtType, PVT, dl,
                           LD->getChain(), LD->getBasePtr(),
-                          LD->getSrcValue(), LD->getSrcValueOffset(),
+                          LD->getPointerInfo(),
                           MemVT, LD->isVolatile(),
                           LD->isNonTemporal(), LD->getAlignment());
   }
@@ -893,7 +896,7 @@
       : LD->getExtensionType();
     SDValue NewLD = DAG.getExtLoad(ExtType, PVT, dl,
                                    LD->getChain(), LD->getBasePtr(),
-                                   LD->getSrcValue(), LD->getSrcValueOffset(),
+                                   LD->getPointerInfo(),
                                    MemVT, LD->isVolatile(),
                                    LD->isNonTemporal(), LD->getAlignment());
     SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, VT, NewLD);
@@ -1424,6 +1427,20 @@
                                        N0.getOperand(0).getOperand(1),
                                        N0.getOperand(1)));
 
+  if (N1.getOpcode() == ISD::AND) {
+    SDValue AndOp0 = N1.getOperand(0);
+    ConstantSDNode *AndOp1 = dyn_cast<ConstantSDNode>(N1->getOperand(1)); 
+    unsigned NumSignBits = DAG.ComputeNumSignBits(AndOp0);
+    unsigned DestBits = VT.getScalarType().getSizeInBits();
+    
+    // (add z, (and (sbbl x, x), 1)) -> (sub z, (sbbl x, x))
+    // and similar xforms where the inner op is either ~0 or 0.
+    if (NumSignBits == DestBits && AndOp1 && AndOp1->isOne()) {
+      DebugLoc DL = N->getDebugLoc();
+      return DAG.getNode(ISD::SUB, DL, VT, N->getOperand(0), AndOp0);
+    }
+  }
+
   return SDValue();
 }
 
@@ -2200,8 +2217,7 @@
          TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT))) {
       SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, N0.getDebugLoc(),
                                        LN0->getChain(), LN0->getBasePtr(),
-                                       LN0->getSrcValue(),
-                                       LN0->getSrcValueOffset(), MemVT,
+                                       LN0->getPointerInfo(), MemVT,
                                        LN0->isVolatile(), LN0->isNonTemporal(),
                                        LN0->getAlignment());
       AddToWorkList(N);
@@ -2223,8 +2239,8 @@
          TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT))) {
       SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, N0.getDebugLoc(),
                                        LN0->getChain(),
-                                       LN0->getBasePtr(), LN0->getSrcValue(),
-                                       LN0->getSrcValueOffset(), MemVT,
+                                       LN0->getBasePtr(), LN0->getPointerInfo(),
+                                       MemVT,
                                        LN0->isVolatile(), LN0->isNonTemporal(),
                                        LN0->getAlignment());
       AddToWorkList(N);
@@ -2257,7 +2273,7 @@
           SDValue NewLoad = 
             DAG.getExtLoad(ISD::ZEXTLOAD, LoadResultTy, LN0->getDebugLoc(),
                            LN0->getChain(), LN0->getBasePtr(),
-                           LN0->getSrcValue(), LN0->getSrcValueOffset(),
+                           LN0->getPointerInfo(),
                            ExtVT, LN0->isVolatile(), LN0->isNonTemporal(),
                            LN0->getAlignment());
           AddToWorkList(N);
@@ -2293,7 +2309,7 @@
           SDValue Load =
             DAG.getExtLoad(ISD::ZEXTLOAD, LoadResultTy, LN0->getDebugLoc(),
                            LN0->getChain(), NewPtr,
-                           LN0->getSrcValue(), LN0->getSrcValueOffset(),
+                           LN0->getPointerInfo(),
                            ExtVT, LN0->isVolatile(), LN0->isNonTemporal(),
                            Alignment);
           AddToWorkList(N);
@@ -3505,8 +3521,7 @@
       LoadSDNode *LN0 = cast<LoadSDNode>(N0);
       SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, N->getDebugLoc(),
                                        LN0->getChain(),
-                                       LN0->getBasePtr(), LN0->getSrcValue(),
-                                       LN0->getSrcValueOffset(),
+                                       LN0->getBasePtr(), LN0->getPointerInfo(),
                                        N0.getValueType(),
                                        LN0->isVolatile(), LN0->isNonTemporal(),
                                        LN0->getAlignment());
@@ -3549,8 +3564,8 @@
         TLI.isLoadExtLegal(ISD::SEXTLOAD, MemVT)) {
       SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, N->getDebugLoc(),
                                        LN0->getChain(),
-                                       LN0->getBasePtr(), LN0->getSrcValue(),
-                                       LN0->getSrcValueOffset(), MemVT,
+                                       LN0->getBasePtr(), LN0->getPointerInfo(),
+                                       MemVT,
                                        LN0->isVolatile(), LN0->isNonTemporal(),
                                        LN0->getAlignment());
       CombineTo(N, ExtLoad);
@@ -3694,8 +3709,7 @@
       LoadSDNode *LN0 = cast<LoadSDNode>(N0);
       SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, N->getDebugLoc(),
                                        LN0->getChain(),
-                                       LN0->getBasePtr(), LN0->getSrcValue(),
-                                       LN0->getSrcValueOffset(),
+                                       LN0->getBasePtr(), LN0->getPointerInfo(),
                                        N0.getValueType(),
                                        LN0->isVolatile(), LN0->isNonTemporal(),
                                        LN0->getAlignment());
@@ -3738,8 +3752,8 @@
         TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT)) {
       SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, N->getDebugLoc(),
                                        LN0->getChain(),
-                                       LN0->getBasePtr(), LN0->getSrcValue(),
-                                       LN0->getSrcValueOffset(), MemVT,
+                                       LN0->getBasePtr(), LN0->getPointerInfo(),
+                                       MemVT,
                                        LN0->isVolatile(), LN0->isNonTemporal(),
                                        LN0->getAlignment());
       CombineTo(N, ExtLoad);
@@ -3896,8 +3910,7 @@
       LoadSDNode *LN0 = cast<LoadSDNode>(N0);
       SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, VT, N->getDebugLoc(),
                                        LN0->getChain(),
-                                       LN0->getBasePtr(), LN0->getSrcValue(),
-                                       LN0->getSrcValueOffset(),
+                                       LN0->getBasePtr(), LN0->getPointerInfo(),
                                        N0.getValueType(),
                                        LN0->isVolatile(), LN0->isNonTemporal(),
                                        LN0->getAlignment());
@@ -3941,8 +3954,7 @@
     SDValue ExtLoad = DAG.getExtLoad(LN0->getExtensionType(), VT,
                                      N->getDebugLoc(),
                                      LN0->getChain(), LN0->getBasePtr(),
-                                     LN0->getSrcValue(),
-                                     LN0->getSrcValueOffset(), MemVT,
+                                     LN0->getPointerInfo(), MemVT,
                                      LN0->isVolatile(), LN0->isNonTemporal(),
                                      LN0->getAlignment());
     CombineTo(N, ExtLoad);
@@ -4078,6 +4090,15 @@
         if ((N0.getValueType().getSizeInBits() & (EVTBits-1)) != 0)
           return SDValue();
       }
+      
+      // If the shift amount is larger than the input type then we're not
+      // accessing any of the loaded bytes.  If the load was a zextload/extload
+      // then the result of the shift+trunc is zero/undef (handled elsewhere).
+      // If the load was a sextload then the result is a splat of the sign bit
+      // of the extended byte.  This is not worth optimizing for.
+      if (ShAmt >= VT.getSizeInBits())
+        return SDValue();
+      
     }
   }
 
@@ -4107,10 +4128,10 @@
 
     SDValue Load = (ExtType == ISD::NON_EXTLOAD)
       ? DAG.getLoad(VT, N0.getDebugLoc(), LN0->getChain(), NewPtr,
-                    LN0->getSrcValue(), LN0->getSrcValueOffset() + PtrOff,
+                    LN0->getPointerInfo().getWithOffset(PtrOff),
                     LN0->isVolatile(), LN0->isNonTemporal(), NewAlign)
       : DAG.getExtLoad(ExtType, VT, N0.getDebugLoc(), LN0->getChain(), NewPtr,
-                       LN0->getSrcValue(), LN0->getSrcValueOffset() + PtrOff,
+                       LN0->getPointerInfo().getWithOffset(PtrOff),
                        ExtVT, LN0->isVolatile(), LN0->isNonTemporal(),
                        NewAlign);
 
@@ -4198,8 +4219,8 @@
     LoadSDNode *LN0 = cast<LoadSDNode>(N0);
     SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, N->getDebugLoc(),
                                      LN0->getChain(),
-                                     LN0->getBasePtr(), LN0->getSrcValue(),
-                                     LN0->getSrcValueOffset(), EVT,
+                                     LN0->getBasePtr(), LN0->getPointerInfo(),
+                                     EVT,
                                      LN0->isVolatile(), LN0->isNonTemporal(),
                                      LN0->getAlignment());
     CombineTo(N, ExtLoad);
@@ -4215,8 +4236,8 @@
     LoadSDNode *LN0 = cast<LoadSDNode>(N0);
     SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, N->getDebugLoc(),
                                      LN0->getChain(),
-                                     LN0->getBasePtr(), LN0->getSrcValue(),
-                                     LN0->getSrcValueOffset(), EVT,
+                                     LN0->getBasePtr(), LN0->getPointerInfo(),
+                                     EVT,
                                      LN0->isVolatile(), LN0->isNonTemporal(),
                                      LN0->getAlignment());
     CombineTo(N, ExtLoad);
@@ -4295,7 +4316,9 @@
 
   LoadSDNode *LD1 = dyn_cast<LoadSDNode>(getBuildPairElt(N, 0));
   LoadSDNode *LD2 = dyn_cast<LoadSDNode>(getBuildPairElt(N, 1));
-  if (!LD1 || !LD2 || !ISD::isNON_EXTLoad(LD1) || !LD1->hasOneUse())
+  if (!LD1 || !LD2 || !ISD::isNON_EXTLoad(LD1) || !LD1->hasOneUse() ||
+      LD1->getPointerInfo().getAddrSpace() !=
+         LD2->getPointerInfo().getAddrSpace())
     return SDValue();
   EVT LD1VT = LD1->getValueType(0);
 
@@ -4313,8 +4336,8 @@
     if (NewAlign <= Align &&
         (!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT)))
       return DAG.getLoad(VT, N->getDebugLoc(), LD1->getChain(),
-                         LD1->getBasePtr(), LD1->getSrcValue(),
-                         LD1->getSrcValueOffset(), false, false, Align);
+                         LD1->getBasePtr(), LD1->getPointerInfo(),
+                         false, false, Align);
   }
 
   return SDValue();
@@ -4382,8 +4405,7 @@
 
     if (Align <= OrigAlign) {
       SDValue Load = DAG.getLoad(VT, N->getDebugLoc(), LN0->getChain(),
-                                 LN0->getBasePtr(),
-                                 LN0->getSrcValue(), LN0->getSrcValueOffset(),
+                                 LN0->getBasePtr(), LN0->getPointerInfo(),
                                  LN0->isVolatile(), LN0->isNonTemporal(),
                                  OrigAlign);
       AddToWorkList(N);
@@ -4986,8 +5008,7 @@
     LoadSDNode *LN0 = cast<LoadSDNode>(N0);
     SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, VT, N->getDebugLoc(),
                                      LN0->getChain(),
-                                     LN0->getBasePtr(), LN0->getSrcValue(),
-                                     LN0->getSrcValueOffset(),
+                                     LN0->getBasePtr(), LN0->getPointerInfo(),
                                      N0.getValueType(),
                                      LN0->isVolatile(), LN0->isNonTemporal(),
                                      LN0->getAlignment());
@@ -5084,14 +5105,17 @@
                        N1.getOperand(0), N1.getOperand(1), N2);
   }
 
-  SDNode *Trunc = 0;
-  if (N1.getOpcode() == ISD::TRUNCATE && N1.hasOneUse()) {
-    // Look past truncate.
-    Trunc = N1.getNode();
-    N1 = N1.getOperand(0);
-  }
+  if ((N1.hasOneUse() && N1.getOpcode() == ISD::SRL) ||
+      ((N1.getOpcode() == ISD::TRUNCATE && N1.hasOneUse()) &&
+       (N1.getOperand(0).hasOneUse() &&
+        N1.getOperand(0).getOpcode() == ISD::SRL))) {
+    SDNode *Trunc = 0;
+    if (N1.getOpcode() == ISD::TRUNCATE) {
+      // Look pass the truncate.
+      Trunc = N1.getNode();
+      N1 = N1.getOperand(0);
+    }
 
-  if (N1.hasOneUse() && N1.getOpcode() == ISD::SRL) {
     // Match this pattern so that we can generate simpler code:
     //
     //   %a = ...
@@ -5146,6 +5170,10 @@
         }
       }
     }
+
+    if (Trunc)
+      // Restore N1 if the above transformation doesn't match.
+      N1 = N->getOperand(1);
   }
   
   // Transform br(xor(x, y)) -> br(x != y)
@@ -5181,9 +5209,7 @@
           Equal = true;
         }
 
-      SDValue NodeToReplace = Trunc ? SDValue(Trunc, 0) : N1;
-      
-      EVT SetCCVT = NodeToReplace.getValueType();
+      EVT SetCCVT = N1.getValueType();
       if (LegalTypes)
         SetCCVT = TLI.getSetCCResultType(SetCCVT);
       SDValue SetCC = DAG.getSetCC(TheXor->getDebugLoc(),
@@ -5192,9 +5218,9 @@
                                    Equal ? ISD::SETEQ : ISD::SETNE);
       // Replace the uses of XOR with SETCC
       WorkListRemover DeadNodes(*this);
-      DAG.ReplaceAllUsesOfValueWith(NodeToReplace, SetCC, &DeadNodes);
-      removeFromWorkList(NodeToReplace.getNode());
-      DAG.DeleteNode(NodeToReplace.getNode());
+      DAG.ReplaceAllUsesOfValueWith(N1, SetCC, &DeadNodes);
+      removeFromWorkList(N1.getNode());
+      DAG.DeleteNode(N1.getNode());
       return DAG.getNode(ISD::BRCOND, N->getDebugLoc(),
                          MVT::Other, Chain, SetCC, N2);
     }
@@ -5570,8 +5596,8 @@
       if (Align > LD->getAlignment())
         return DAG.getExtLoad(LD->getExtensionType(), LD->getValueType(0),
                               N->getDebugLoc(),
-                              Chain, Ptr, LD->getSrcValue(),
-                              LD->getSrcValueOffset(), LD->getMemoryVT(),
+                              Chain, Ptr, LD->getPointerInfo(),
+                              LD->getMemoryVT(),
                               LD->isVolatile(), LD->isNonTemporal(), Align);
     }
   }
@@ -5587,15 +5613,13 @@
       // Replace the chain to void dependency.
       if (LD->getExtensionType() == ISD::NON_EXTLOAD) {
         ReplLoad = DAG.getLoad(N->getValueType(0), LD->getDebugLoc(),
-                               BetterChain, Ptr,
-                               LD->getSrcValue(), LD->getSrcValueOffset(),
+                               BetterChain, Ptr, LD->getPointerInfo(),
                                LD->isVolatile(), LD->isNonTemporal(),
                                LD->getAlignment());
       } else {
         ReplLoad = DAG.getExtLoad(LD->getExtensionType(), LD->getValueType(0),
                                   LD->getDebugLoc(),
-                                  BetterChain, Ptr, LD->getSrcValue(),
-                                  LD->getSrcValueOffset(),
+                                  BetterChain, Ptr, LD->getPointerInfo(),
                                   LD->getMemoryVT(),
                                   LD->isVolatile(),
                                   LD->isNonTemporal(),
@@ -5748,7 +5772,7 @@
   
   ++OpsNarrowed;
   return DAG.getStore(St->getChain(), St->getDebugLoc(), IVal, Ptr, 
-                      St->getSrcValue(), St->getSrcValueOffset()+StOffset,
+                      St->getPointerInfo().getWithOffset(StOffset),
                       false, false, NewAlign).getNode();
 }
 
@@ -5798,9 +5822,12 @@
     return SDValue();
 
   SDValue N0 = Value.getOperand(0);
-  if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse()) {
+  if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() &&
+      Chain == SDValue(N0.getNode(), 1)) {
     LoadSDNode *LD = cast<LoadSDNode>(N0);
-    if (LD->getBasePtr() != Ptr)
+    if (LD->getBasePtr() != Ptr ||
+        LD->getPointerInfo().getAddrSpace() !=
+        ST->getPointerInfo().getAddrSpace())
       return SDValue();
 
     // Find the type to narrow it the load / op / store to.
@@ -5849,14 +5876,14 @@
                                    DAG.getConstant(PtrOff, Ptr.getValueType()));
       SDValue NewLD = DAG.getLoad(NewVT, N0.getDebugLoc(),
                                   LD->getChain(), NewPtr,
-                                  LD->getSrcValue(), LD->getSrcValueOffset(),
+                                  LD->getPointerInfo().getWithOffset(PtrOff),
                                   LD->isVolatile(), LD->isNonTemporal(),
                                   NewAlign);
       SDValue NewVal = DAG.getNode(Opc, Value.getDebugLoc(), NewVT, NewLD,
                                    DAG.getConstant(NewImm, NewVT));
       SDValue NewST = DAG.getStore(Chain, N->getDebugLoc(),
                                    NewVal, NewPtr,
-                                   ST->getSrcValue(), ST->getSrcValueOffset(),
+                                   ST->getPointerInfo().getWithOffset(PtrOff),
                                    false, false, NewAlign);
 
       AddToWorkList(NewPtr.getNode());
@@ -5891,8 +5918,7 @@
         ((!LegalOperations && !ST->isVolatile()) ||
          TLI.isOperationLegalOrCustom(ISD::STORE, SVT)))
       return DAG.getStore(Chain, N->getDebugLoc(), Value.getOperand(0),
-                          Ptr, ST->getSrcValue(),
-                          ST->getSrcValueOffset(), ST->isVolatile(),
+                          Ptr, ST->getPointerInfo(), ST->isVolatile(),
                           ST->isNonTemporal(), OrigAlign);
   }
 
@@ -5916,8 +5942,7 @@
           Tmp = DAG.getConstant((uint32_t)CFP->getValueAPF().
                               bitcastToAPInt().getZExtValue(), MVT::i32);
           return DAG.getStore(Chain, N->getDebugLoc(), Tmp,
-                              Ptr, ST->getSrcValue(),
-                              ST->getSrcValueOffset(), ST->isVolatile(),
+                              Ptr, ST->getPointerInfo(), ST->isVolatile(),
                               ST->isNonTemporal(), ST->getAlignment());
         }
         break;
@@ -5928,8 +5953,7 @@
           Tmp = DAG.getConstant(CFP->getValueAPF().bitcastToAPInt().
                                 getZExtValue(), MVT::i64);
           return DAG.getStore(Chain, N->getDebugLoc(), Tmp,
-                              Ptr, ST->getSrcValue(),
-                              ST->getSrcValueOffset(), ST->isVolatile(),
+                              Ptr, ST->getPointerInfo(), ST->isVolatile(),
                               ST->isNonTemporal(), ST->getAlignment());
         } else if (!ST->isVolatile() &&
                    TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i32)) {
@@ -5941,23 +5965,20 @@
           SDValue Hi = DAG.getConstant(Val >> 32, MVT::i32);
           if (TLI.isBigEndian()) std::swap(Lo, Hi);
 
-          int SVOffset = ST->getSrcValueOffset();
           unsigned Alignment = ST->getAlignment();
           bool isVolatile = ST->isVolatile();
           bool isNonTemporal = ST->isNonTemporal();
 
           SDValue St0 = DAG.getStore(Chain, ST->getDebugLoc(), Lo,
-                                     Ptr, ST->getSrcValue(),
-                                     ST->getSrcValueOffset(),
+                                     Ptr, ST->getPointerInfo(),
                                      isVolatile, isNonTemporal,
                                      ST->getAlignment());
           Ptr = DAG.getNode(ISD::ADD, N->getDebugLoc(), Ptr.getValueType(), Ptr,
                             DAG.getConstant(4, Ptr.getValueType()));
-          SVOffset += 4;
           Alignment = MinAlign(Alignment, 4U);
           SDValue St1 = DAG.getStore(Chain, ST->getDebugLoc(), Hi,
-                                     Ptr, ST->getSrcValue(),
-                                     SVOffset, isVolatile, isNonTemporal,
+                                     Ptr, ST->getPointerInfo().getWithOffset(4),
+                                     isVolatile, isNonTemporal,
                                      Alignment);
           return DAG.getNode(ISD::TokenFactor, N->getDebugLoc(), MVT::Other,
                              St0, St1);
@@ -5973,8 +5994,7 @@
     if (unsigned Align = DAG.InferPtrAlignment(Ptr)) {
       if (Align > ST->getAlignment())
         return DAG.getTruncStore(Chain, N->getDebugLoc(), Value,
-                                 Ptr, ST->getSrcValue(),
-                                 ST->getSrcValueOffset(), ST->getMemoryVT(),
+                                 Ptr, ST->getPointerInfo(), ST->getMemoryVT(),
                                  ST->isVolatile(), ST->isNonTemporal(), Align);
     }
   }
@@ -5990,12 +6010,12 @@
       // Replace the chain to avoid dependency.
       if (ST->isTruncatingStore()) {
         ReplStore = DAG.getTruncStore(BetterChain, N->getDebugLoc(), Value, Ptr,
-                                      ST->getSrcValue(),ST->getSrcValueOffset(),
+                                      ST->getPointerInfo(),
                                       ST->getMemoryVT(), ST->isVolatile(),
                                       ST->isNonTemporal(), ST->getAlignment());
       } else {
         ReplStore = DAG.getStore(BetterChain, N->getDebugLoc(), Value, Ptr,
-                                 ST->getSrcValue(), ST->getSrcValueOffset(),
+                                 ST->getPointerInfo(),
                                  ST->isVolatile(), ST->isNonTemporal(),
                                  ST->getAlignment());
       }
@@ -6029,8 +6049,7 @@
     AddToWorkList(Value.getNode());
     if (Shorter.getNode())
       return DAG.getTruncStore(Chain, N->getDebugLoc(), Shorter,
-                               Ptr, ST->getSrcValue(),
-                               ST->getSrcValueOffset(), ST->getMemoryVT(),
+                               Ptr, ST->getPointerInfo(), ST->getMemoryVT(),
                                ST->isVolatile(), ST->isNonTemporal(),
                                ST->getAlignment());
 
@@ -6063,8 +6082,7 @@
       TLI.isTruncStoreLegal(Value.getOperand(0).getValueType(),
                             ST->getMemoryVT())) {
     return DAG.getTruncStore(Chain, N->getDebugLoc(), Value.getOperand(0),
-                             Ptr, ST->getSrcValue(),
-                             ST->getSrcValueOffset(), ST->getMemoryVT(),
+                             Ptr, ST->getPointerInfo(), ST->getMemoryVT(),
                              ST->isVolatile(), ST->isNonTemporal(),
                              ST->getAlignment());
   }
@@ -6203,8 +6221,9 @@
     }
 
     SDValue NewPtr = LN0->getBasePtr();
+    unsigned PtrOff = 0;
     if (Elt) {
-      unsigned PtrOff = LVT.getSizeInBits() * Elt / 8;
+      PtrOff = LVT.getSizeInBits() * Elt / 8;
       EVT PtrType = NewPtr.getValueType();
       if (TLI.isBigEndian())
         PtrOff = VT.getSizeInBits() / 8 - PtrOff;
@@ -6213,7 +6232,7 @@
     }
 
     return DAG.getLoad(LVT, N->getDebugLoc(), LN0->getChain(), NewPtr,
-                       LN0->getSrcValue(), LN0->getSrcValueOffset(),
+                       LN0->getPointerInfo().getWithOffset(PtrOff),
                        LN0->isVolatile(), LN0->isNonTemporal(), Align);
   }
 
@@ -6581,101 +6600,90 @@
 
   // If this is a select from two identical things, try to pull the operation
   // through the select.
-  if (LHS.getOpcode() == RHS.getOpcode() && LHS.hasOneUse() && RHS.hasOneUse()){
-    // If this is a load and the token chain is identical, replace the select
-    // of two loads with a load through a select of the address to load from.
-    // This triggers in things like "select bool X, 10.0, 123.0" after the FP
-    // constants have been dropped into the constant pool.
-    if (LHS.getOpcode() == ISD::LOAD &&
+  if (LHS.getOpcode() != RHS.getOpcode() ||
+      !LHS.hasOneUse() || !RHS.hasOneUse())
+    return false;
+  
+  // If this is a load and the token chain is identical, replace the select
+  // of two loads with a load through a select of the address to load from.
+  // This triggers in things like "select bool X, 10.0, 123.0" after the FP
+  // constants have been dropped into the constant pool.
+  if (LHS.getOpcode() == ISD::LOAD) {
+    LoadSDNode *LLD = cast<LoadSDNode>(LHS);
+    LoadSDNode *RLD = cast<LoadSDNode>(RHS);
+    
+    // Token chains must be identical.
+    if (LHS.getOperand(0) != RHS.getOperand(0) ||
         // Do not let this transformation reduce the number of volatile loads.
-        !cast<LoadSDNode>(LHS)->isVolatile() &&
-        !cast<LoadSDNode>(RHS)->isVolatile() &&
-        // Token chains must be identical.
-        LHS.getOperand(0) == RHS.getOperand(0)) {
-      LoadSDNode *LLD = cast<LoadSDNode>(LHS);
-      LoadSDNode *RLD = cast<LoadSDNode>(RHS);
-
-      // If this is an EXTLOAD, the VT's must match.
-      if (LLD->getMemoryVT() == RLD->getMemoryVT()) {
+        LLD->isVolatile() || RLD->isVolatile() ||
+        // If this is an EXTLOAD, the VT's must match.
+        LLD->getMemoryVT() != RLD->getMemoryVT() ||
         // FIXME: this discards src value information.  This is
         // over-conservative. It would be beneficial to be able to remember
         // both potential memory locations.  Since we are discarding
         // src value info, don't do the transformation if the memory
         // locations are not in the default address space.
-        unsigned LLDAddrSpace = 0, RLDAddrSpace = 0;
-        if (const Value *LLDVal = LLD->getMemOperand()->getValue()) {
-          if (const PointerType *PT = dyn_cast<PointerType>(LLDVal->getType()))
-            LLDAddrSpace = PT->getAddressSpace();
-        }
-        if (const Value *RLDVal = RLD->getMemOperand()->getValue()) {
-          if (const PointerType *PT = dyn_cast<PointerType>(RLDVal->getType()))
-            RLDAddrSpace = PT->getAddressSpace();
-        }
-        SDValue Addr;
-        if (LLDAddrSpace == 0 && RLDAddrSpace == 0) {
-          if (TheSelect->getOpcode() == ISD::SELECT) {
-            // Check that the condition doesn't reach either load.  If so, folding
-            // this will induce a cycle into the DAG.
-            if ((!LLD->hasAnyUseOfValue(1) ||
-                 !LLD->isPredecessorOf(TheSelect->getOperand(0).getNode())) &&
-                (!RLD->hasAnyUseOfValue(1) ||
-                 !RLD->isPredecessorOf(TheSelect->getOperand(0).getNode()))) {
-              Addr = DAG.getNode(ISD::SELECT, TheSelect->getDebugLoc(),
-                                 LLD->getBasePtr().getValueType(),
-                                 TheSelect->getOperand(0), LLD->getBasePtr(),
-                                 RLD->getBasePtr());
-            }
-          } else {
-            // Check that the condition doesn't reach either load.  If so, folding
-            // this will induce a cycle into the DAG.
-            if ((!LLD->hasAnyUseOfValue(1) ||
-                 (!LLD->isPredecessorOf(TheSelect->getOperand(0).getNode()) &&
-                  !LLD->isPredecessorOf(TheSelect->getOperand(1).getNode()))) &&
-                (!RLD->hasAnyUseOfValue(1) ||
-                 (!RLD->isPredecessorOf(TheSelect->getOperand(0).getNode()) &&
-                  !RLD->isPredecessorOf(TheSelect->getOperand(1).getNode())))) {
-              Addr = DAG.getNode(ISD::SELECT_CC, TheSelect->getDebugLoc(),
-                                 LLD->getBasePtr().getValueType(),
-                                 TheSelect->getOperand(0),
-                                 TheSelect->getOperand(1),
-                                 LLD->getBasePtr(), RLD->getBasePtr(),
-                                 TheSelect->getOperand(4));
-            }
-          }
-        }
-
-        if (Addr.getNode()) {
-          SDValue Load;
-          if (LLD->getExtensionType() == ISD::NON_EXTLOAD) {
-            Load = DAG.getLoad(TheSelect->getValueType(0),
-                               TheSelect->getDebugLoc(),
-                               LLD->getChain(),
-                               Addr, 0, 0,
-                               LLD->isVolatile(),
-                               LLD->isNonTemporal(),
-                               LLD->getAlignment());
-          } else {
-            Load = DAG.getExtLoad(LLD->getExtensionType(),
-                                  TheSelect->getValueType(0),
-                                  TheSelect->getDebugLoc(),
-                                  LLD->getChain(), Addr, 0, 0,
-                                  LLD->getMemoryVT(),
-                                  LLD->isVolatile(),
-                                  LLD->isNonTemporal(),
-                                  LLD->getAlignment());
-          }
-
-          // Users of the select now use the result of the load.
-          CombineTo(TheSelect, Load);
-
-          // Users of the old loads now use the new load's chain.  We know the
-          // old-load value is dead now.
-          CombineTo(LHS.getNode(), Load.getValue(0), Load.getValue(1));
-          CombineTo(RHS.getNode(), Load.getValue(0), Load.getValue(1));
-          return true;
-        }
-      }
-    }
+        LLD->getPointerInfo().getAddrSpace() != 0 ||
+        RLD->getPointerInfo().getAddrSpace() != 0)
+      return false;
+        
+    // Check that the select condition doesn't reach either load.  If so,
+    // folding this will induce a cycle into the DAG.  If not, this is safe to
+    // xform, so create a select of the addresses.
+    SDValue Addr;
+    if (TheSelect->getOpcode() == ISD::SELECT) {
+      SDNode *CondNode = TheSelect->getOperand(0).getNode();
+      if ((LLD->hasAnyUseOfValue(1) && LLD->isPredecessorOf(CondNode)) ||
+          (RLD->hasAnyUseOfValue(1) && RLD->isPredecessorOf(CondNode)))
+        return false;
+      Addr = DAG.getNode(ISD::SELECT, TheSelect->getDebugLoc(),
+                         LLD->getBasePtr().getValueType(),
+                         TheSelect->getOperand(0), LLD->getBasePtr(),
+                         RLD->getBasePtr());
+    } else {  // Otherwise SELECT_CC
+      SDNode *CondLHS = TheSelect->getOperand(0).getNode();
+      SDNode *CondRHS = TheSelect->getOperand(1).getNode();
+
+      if ((LLD->hasAnyUseOfValue(1) &&
+           (LLD->isPredecessorOf(CondLHS) || LLD->isPredecessorOf(CondRHS))) ||
+          (LLD->hasAnyUseOfValue(1) &&
+           (LLD->isPredecessorOf(CondLHS) || LLD->isPredecessorOf(CondRHS))))
+        return false;
+      
+      Addr = DAG.getNode(ISD::SELECT_CC, TheSelect->getDebugLoc(),
+                         LLD->getBasePtr().getValueType(),
+                         TheSelect->getOperand(0),
+                         TheSelect->getOperand(1),
+                         LLD->getBasePtr(), RLD->getBasePtr(),
+                         TheSelect->getOperand(4));
+    }
+
+    SDValue Load;
+    if (LLD->getExtensionType() == ISD::NON_EXTLOAD) {
+      Load = DAG.getLoad(TheSelect->getValueType(0),
+                         TheSelect->getDebugLoc(),
+                         // FIXME: Discards pointer info.
+                         LLD->getChain(), Addr, MachinePointerInfo(),
+                         LLD->isVolatile(), LLD->isNonTemporal(),
+                         LLD->getAlignment());
+    } else {
+      Load = DAG.getExtLoad(LLD->getExtensionType(),
+                            TheSelect->getValueType(0),
+                            TheSelect->getDebugLoc(),
+                            // FIXME: Discards pointer info.
+                            LLD->getChain(), Addr, MachinePointerInfo(),
+                            LLD->getMemoryVT(), LLD->isVolatile(),
+                            LLD->isNonTemporal(), LLD->getAlignment());
+    }
+
+    // Users of the select now use the result of the load.
+    CombineTo(TheSelect, Load);
+
+    // Users of the old loads now use the new load's chain.  We know the
+    // old-load value is dead now.
+    CombineTo(LHS.getNode(), Load.getValue(0), Load.getValue(1));
+    CombineTo(RHS.getNode(), Load.getValue(0), Load.getValue(1));
+    return true;
   }
 
   return false;
@@ -6768,7 +6776,7 @@
         CPIdx = DAG.getNode(ISD::ADD, DL, TLI.getPointerTy(), CPIdx,
                             CstOffset);
         return DAG.getLoad(TV->getValueType(0), DL, DAG.getEntryNode(), CPIdx,
-                           PseudoSourceValue::getConstantPool(), 0, false,
+                           MachinePointerInfo::getConstantPool(), false,
                            false, Alignment);
 
       }
@@ -6817,6 +6825,35 @@
     }
   }
 
+  // fold (select_cc seteq (and x, y), 0, 0, A) -> (and (shr (shl x)) A)
+  // where y is has a single bit set.
+  // A plaintext description would be, we can turn the SELECT_CC into an AND
+  // when the condition can be materialized as an all-ones register.  Any
+  // single bit-test can be materialized as an all-ones register with
+  // shift-left and shift-right-arith.
+  if (CC == ISD::SETEQ && N0->getOpcode() == ISD::AND &&
+      N0->getValueType(0) == VT &&
+      N1C && N1C->isNullValue() && 
+      N2C && N2C->isNullValue()) {
+    SDValue AndLHS = N0->getOperand(0);
+    ConstantSDNode *ConstAndRHS = dyn_cast<ConstantSDNode>(N0->getOperand(1));
+    if (ConstAndRHS && ConstAndRHS->getAPIntValue().countPopulation() == 1) {
+      // Shift the tested bit over the sign bit.
+      APInt AndMask = ConstAndRHS->getAPIntValue();
+      SDValue ShlAmt =
+        DAG.getConstant(AndMask.countLeadingZeros(), getShiftAmountTy());
+      SDValue Shl = DAG.getNode(ISD::SHL, N0.getDebugLoc(), VT, AndLHS, ShlAmt);
+      
+      // Now arithmetic right shift it all the way over, so the result is either
+      // all-ones, or zero.
+      SDValue ShrAmt =
+        DAG.getConstant(AndMask.getBitWidth()-1, getShiftAmountTy());
+      SDValue Shr = DAG.getNode(ISD::SRA, N0.getDebugLoc(), VT, Shl, ShrAmt);
+      
+      return DAG.getNode(ISD::AND, DL, VT, Shr, N3);
+    }
+  }
+
   // fold select C, 16, 0 -> shl C, 4
   if (N2C && N3C && N3C->isNullValue() && N2C->getAPIntValue().isPowerOf2() &&
       TLI.getBooleanContents() == TargetLowering::ZeroOrOneBooleanContent) {
@@ -7011,9 +7048,11 @@
 bool DAGCombiner::isAlias(SDValue Ptr1, int64_t Size1,
                           const Value *SrcValue1, int SrcValueOffset1,
                           unsigned SrcValueAlign1,
+                          const MDNode *TBAAInfo1,
                           SDValue Ptr2, int64_t Size2,
                           const Value *SrcValue2, int SrcValueOffset2,
-                          unsigned SrcValueAlign2) const {
+                          unsigned SrcValueAlign2,
+                          const MDNode *TBAAInfo2) const {
   // If they are the same then they must be aliases.
   if (Ptr1 == Ptr2) return true;
 
@@ -7029,8 +7068,19 @@
   if (Base1 == Base2 || (GV1 && (GV1 == GV2)) || (CV1 && (CV1 == CV2)))
     return !((Offset1 + Size1) <= Offset2 || (Offset2 + Size2) <= Offset1);
 
-  // If we know what the bases are, and they aren't identical, then we know they
-  // cannot alias.
+  // It is possible for different frame indices to alias each other, mostly
+  // when tail call optimization reuses return address slots for arguments.
+  // To catch this case, look up the actual index of frame indices to compute
+  // the real alias relationship.
+  if (isFrameIndex1 && isFrameIndex2) {
+    MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
+    Offset1 += MFI->getObjectOffset(cast<FrameIndexSDNode>(Base1)->getIndex());
+    Offset2 += MFI->getObjectOffset(cast<FrameIndexSDNode>(Base2)->getIndex());
+    return !((Offset1 + Size1) <= Offset2 || (Offset2 + Size2) <= Offset1);
+  }
+
+  // Otherwise, if we know what the bases are, and they aren't identical, then 
+  // we know they cannot alias.
   if ((isFrameIndex1 || CV1 || GV1) && (isFrameIndex2 || CV2 || GV2))
     return false;
 
@@ -7056,7 +7106,8 @@
     int64_t Overlap1 = Size1 + SrcValueOffset1 - MinOffset;
     int64_t Overlap2 = Size2 + SrcValueOffset2 - MinOffset;
     AliasAnalysis::AliasResult AAResult =
-                             AA.alias(SrcValue1, Overlap1, SrcValue2, Overlap2);
+      AA.alias(AliasAnalysis::Location(SrcValue1, Overlap1, TBAAInfo1),
+               AliasAnalysis::Location(SrcValue2, Overlap2, TBAAInfo2));
     if (AAResult == AliasAnalysis::NoAlias)
       return false;
   }
@@ -7071,13 +7122,15 @@
                         SDValue &Ptr, int64_t &Size,
                         const Value *&SrcValue, 
                         int &SrcValueOffset,
-                        unsigned &SrcValueAlign) const {
+                        unsigned &SrcValueAlign,
+                        const MDNode *&TBAAInfo) const {
   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
     Ptr = LD->getBasePtr();
     Size = LD->getMemoryVT().getSizeInBits() >> 3;
     SrcValue = LD->getSrcValue();
     SrcValueOffset = LD->getSrcValueOffset();
     SrcValueAlign = LD->getOriginalAlignment();
+    TBAAInfo = LD->getTBAAInfo();
     return true;
   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
     Ptr = ST->getBasePtr();
@@ -7085,6 +7138,7 @@
     SrcValue = ST->getSrcValue();
     SrcValueOffset = ST->getSrcValueOffset();
     SrcValueAlign = ST->getOriginalAlignment();
+    TBAAInfo = ST->getTBAAInfo();
   } else {
     llvm_unreachable("FindAliasInfo expected a memory operand");
   }
@@ -7105,8 +7159,9 @@
   const Value *SrcValue;
   int SrcValueOffset;
   unsigned SrcValueAlign;
+  const MDNode *SrcTBAAInfo;
   bool IsLoad = FindAliasInfo(N, Ptr, Size, SrcValue, SrcValueOffset, 
-                              SrcValueAlign);
+                              SrcValueAlign, SrcTBAAInfo);
 
   // Starting off.
   Chains.push_back(OriginalChain);
@@ -7150,15 +7205,18 @@
       const Value *OpSrcValue;
       int OpSrcValueOffset;
       unsigned OpSrcValueAlign;
+      const MDNode *OpSrcTBAAInfo;
       bool IsOpLoad = FindAliasInfo(Chain.getNode(), OpPtr, OpSize,
                                     OpSrcValue, OpSrcValueOffset,
-                                    OpSrcValueAlign);
+                                    OpSrcValueAlign,
+                                    OpSrcTBAAInfo);
 
       // If chain is alias then stop here.
       if (!(IsLoad && IsOpLoad) &&
           isAlias(Ptr, Size, SrcValue, SrcValueOffset, SrcValueAlign,
+                  SrcTBAAInfo,
                   OpPtr, OpSize, OpSrcValue, OpSrcValueOffset,
-                  OpSrcValueAlign)) {
+                  OpSrcValueAlign, OpSrcTBAAInfo)) {
         Aliases.push_back(Chain);
       } else {
         // Look further up the chain.

Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/FastISel.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/FastISel.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/FastISel.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/FastISel.cpp Tue Oct 26 19:48:03 2010
@@ -146,7 +146,7 @@
   return Reg;
 }
 
-/// materializeRegForValue - Helper for getRegForVale. This function is
+/// materializeRegForValue - Helper for getRegForValue. This function is
 /// called when the value isn't already available in a register and must
 /// be materialized with new instructions.
 unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
@@ -467,16 +467,28 @@
       return true;
 
     const Value *Address = DI->getAddress();
-    if (!Address)
+    if (!Address || isa<UndefValue>(Address) || isa<AllocaInst>(Address))
       return true;
-    if (isa<UndefValue>(Address))
-      return true;
-    const AllocaInst *AI = dyn_cast<AllocaInst>(Address);
-    // Don't handle byval struct arguments or VLAs, for example.
-    if (!AI)
-      // Building the map above is target independent.  Generating DBG_VALUE
-      // inline is target dependent; do this now.
-      (void)TargetSelectInstruction(cast<Instruction>(I));
+
+    unsigned Reg = 0;
+    unsigned Offset = 0;
+    if (const Argument *Arg = dyn_cast<Argument>(Address)) {
+      if (Arg->hasByValAttr()) {
+        // Byval arguments' frame index is recorded during argument lowering.
+        // Use this info directly.
+        Offset = FuncInfo.getByValArgumentFrameIndex(Arg);
+        if (Offset)
+          Reg = TRI.getFrameRegister(*FuncInfo.MF);
+      } 
+    }
+    if (!Reg)
+      Reg = getRegForValue(Address);
+    
+    if (Reg)
+      BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 
+              TII.get(TargetOpcode::DBG_VALUE))
+        .addReg(Reg, RegState::Debug).addImm(Offset)
+        .addMetadata(DI->getVariable());
     return true;
   }
   case Intrinsic::dbg_value: {

Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp Tue Oct 26 19:48:03 2010
@@ -232,6 +232,7 @@
   ArgDbgValues.clear();
   LiveOutRegInfo.clear();
   MBBMap.clear();
+  ByValArgFrameIndexMap.clear();
   RegFixups.clear();
   StaticAllocaMap.clear();
   ValueMap.clear();
@@ -267,7 +268,30 @@
   return FirstReg;
 }
 
-#if 0///EH-FIXME:
+/// setByValArgumentFrameIndex - Record frame index for the byval
+/// argument. This overrides previous frame index entry for this argument,
+/// if any.
+void FunctionLoweringInfo::setByValArgumentFrameIndex(const Argument *A, 
+                                                      int FI) {
+  assert (A->hasByValAttr() && "Argument does not have byval attribute!");
+  ByValArgFrameIndexMap[A] = FI;
+}
+  
+/// getByValArgumentFrameIndex - Get frame index for the byval argument.
+/// If the argument does not have any assigned frame index then 0 is
+/// returned.
+int FunctionLoweringInfo::getByValArgumentFrameIndex(const Argument *A) {
+  assert (A->hasByValAttr() && "Argument does not have byval attribute!");
+  DenseMap<const Argument *, int>::iterator I = 
+    ByValArgFrameIndexMap.find(A);
+  if (I != ByValArgFrameIndexMap.end())
+    return I->second;
+  DEBUG(dbgs() << "Argument does not have assigned frame index!");
+  return 0;
+}
+
+#if 0 ///EH-FIXME:
+
 /// AddCatchInfo - Extract the personality and type infos from an eh.selector
 /// call, and add them to the specified machine basic block.
 void llvm::AddCatchInfo(const CallInst &I, MachineModuleInfo *MMI,





More information about the llvm-branch-commits mailing list