[llvm-branch-commits] [llvm-branch] r117425 [8/9] - in /llvm/branches/wendling/eh: ./ autoconf/ autoconf/m4/ bindings/ada/ bindings/ocaml/llvm/ bindings/ocaml/transforms/scalar/ cmake/ cmake/modules/ docs/ docs/CommandGuide/ docs/tutorial/ examples/ examples/BrainF/ examples/ExceptionDemo/ examples/Fibonacci/ examples/Kaleidoscope/Chapter7/ examples/ModuleMaker/ include/llvm-c/ include/llvm-c/Transforms/ include/llvm/ include/llvm/ADT/ include/llvm/Analysis/ include/llvm/Assembly/ include/llvm/Bitcode/ include/llvm/CodeGen/ i...

Bill Wendling isanbard at gmail.com
Tue Oct 26 17:48:11 PDT 2010


Modified: llvm/branches/wendling/eh/lib/VMCore/Attributes.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/VMCore/Attributes.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/VMCore/Attributes.cpp (original)
+++ llvm/branches/wendling/eh/lib/VMCore/Attributes.cpp Tue Oct 26 19:48:03 2010
@@ -70,6 +70,8 @@
     Result += "noimplicitfloat ";
   if (Attrs & Attribute::Naked)
     Result += "naked ";
+  if (Attrs & Attribute::Hotpatch)
+    Result += "hotpatch ";
   if (Attrs & Attribute::StackAlignment) {
     Result += "alignstack(";
     Result += utostr(Attribute::getStackAlignmentFromAttrs(Attrs));
@@ -195,6 +197,7 @@
 }
 
 const AttrListPtr &AttrListPtr::operator=(const AttrListPtr &RHS) {
+  sys::SmartScopedLock<true> Lock(*ALMutex);
   if (AttrList == RHS.AttrList) return *this;
   if (AttrList) AttrList->DropRef();
   AttrList = RHS.AttrList;

Modified: llvm/branches/wendling/eh/lib/VMCore/AutoUpgrade.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/VMCore/AutoUpgrade.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/VMCore/AutoUpgrade.cpp (original)
+++ llvm/branches/wendling/eh/lib/VMCore/AutoUpgrade.cpp Tue Oct 26 19:48:03 2010
@@ -78,6 +78,63 @@
         NewFn = F;
         return true;
       }
+    } else if (Name.compare(5, 9, "arm.neon.", 9) == 0) {
+      if (((Name.compare(14, 5, "vmovl", 5) == 0 ||
+            Name.compare(14, 5, "vaddl", 5) == 0 ||
+            Name.compare(14, 5, "vsubl", 5) == 0 ||
+            Name.compare(14, 5, "vaddw", 5) == 0 ||
+            Name.compare(14, 5, "vsubw", 5) == 0 ||
+            Name.compare(14, 5, "vmull", 5) == 0 ||
+            Name.compare(14, 5, "vmlal", 5) == 0 ||
+            Name.compare(14, 5, "vmlsl", 5) == 0 ||
+            Name.compare(14, 5, "vabdl", 5) == 0 ||
+            Name.compare(14, 5, "vabal", 5) == 0) &&
+           (Name.compare(19, 2, "s.", 2) == 0 ||
+            Name.compare(19, 2, "u.", 2) == 0)) ||
+
+          (Name.compare(14, 4, "vaba", 4) == 0 &&
+           (Name.compare(18, 2, "s.", 2) == 0 ||
+            Name.compare(18, 2, "u.", 2) == 0)) ||
+
+          (Name.compare(14, 6, "vmovn.", 6) == 0)) {
+
+        // Calls to these are transformed into IR without intrinsics.
+        NewFn = 0;
+        return true;
+      }
+      // Old versions of NEON ld/st intrinsics are missing alignment arguments.
+      bool isVLd = (Name.compare(14, 3, "vld", 3) == 0);
+      bool isVSt = (Name.compare(14, 3, "vst", 3) == 0);
+      if (isVLd || isVSt) {
+        unsigned NumVecs = Name.at(17) - '0';
+        if (NumVecs == 0 || NumVecs > 4)
+          return false;
+        bool isLaneOp = (Name.compare(18, 5, "lane.", 5) == 0);
+        if (!isLaneOp && Name.at(18) != '.')
+          return false;
+        unsigned ExpectedArgs = 2; // for the address and alignment
+        if (isVSt || isLaneOp)
+          ExpectedArgs += NumVecs;
+        if (isLaneOp)
+          ExpectedArgs += 1; // for the lane number
+        unsigned NumP = FTy->getNumParams();
+        if (NumP != ExpectedArgs - 1)
+          return false;
+
+        // Change the name of the old (bad) intrinsic, because 
+        // its type is incorrect, but we cannot overload that name.
+        F->setName("");
+
+        // One argument is missing: add the alignment argument.
+        std::vector<const Type*> NewParams;
+        for (unsigned p = 0; p < NumP; ++p)
+          NewParams.push_back(FTy->getParamType(p));
+        NewParams.push_back(Type::getInt32Ty(F->getContext()));
+        FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(),
+                                                 NewParams, false);
+        NewFn = cast<Function>(M->getOrInsertFunction(Name, NewFTy));
+        return true;
+      }
     }
     break;
   case 'b':
@@ -182,7 +239,6 @@
         NewFnName = "llvm.memset.p0i8.i64";
     }
     if (NewFnName) {
-      const FunctionType *FTy = F->getFunctionType();
       NewFn = cast<Function>(M->getOrInsertFunction(NewFnName, 
                                             FTy->getReturnType(),
                                             FTy->getParamType(0),
@@ -232,37 +288,224 @@
     break;
   case 'x': 
     // This fixes all MMX shift intrinsic instructions to take a
-    // v1i64 instead of a v2i32 as the second parameter.
-    if (Name.compare(5,10,"x86.mmx.ps",10) == 0 &&
-        (Name.compare(13,4,"psll", 4) == 0 ||
-         Name.compare(13,4,"psra", 4) == 0 ||
-         Name.compare(13,4,"psrl", 4) == 0) && Name[17] != 'i') {
-      
-      const llvm::Type *VT =
-                    VectorType::get(IntegerType::get(FTy->getContext(), 64), 1);
-      
-      // We don't have to do anything if the parameter already has
-      // the correct type.
-      if (FTy->getParamType(1) == VT)
+    // x86_mmx instead of a v1i64, v2i32, v4i16, or v8i8.
+    if (Name.compare(5, 8, "x86.mmx.", 8) == 0) {
+      const Type *X86_MMXTy = VectorType::getX86_MMXTy(FTy->getContext());
+
+      if (Name.compare(13, 4, "padd", 4) == 0   ||
+          Name.compare(13, 4, "psub", 4) == 0   ||
+          Name.compare(13, 4, "pmul", 4) == 0   ||
+          Name.compare(13, 5, "pmadd", 5) == 0  ||
+          Name.compare(13, 4, "pand", 4) == 0   ||
+          Name.compare(13, 3, "por", 3) == 0    ||
+          Name.compare(13, 4, "pxor", 4) == 0   ||
+          Name.compare(13, 4, "pavg", 4) == 0   ||
+          Name.compare(13, 4, "pmax", 4) == 0   ||
+          Name.compare(13, 4, "pmin", 4) == 0   ||
+          Name.compare(13, 4, "psad", 4) == 0   ||
+          Name.compare(13, 4, "psll", 4) == 0   ||
+          Name.compare(13, 4, "psrl", 4) == 0   ||
+          Name.compare(13, 4, "psra", 4) == 0   ||
+          Name.compare(13, 4, "pack", 4) == 0   ||
+          Name.compare(13, 6, "punpck", 6) == 0 ||
+          Name.compare(13, 4, "pcmp", 4) == 0) {
+        assert(FTy->getNumParams() == 2 && "MMX intrinsic takes 2 args!");
+        const Type *SecondParamTy = X86_MMXTy;
+
+        if (Name.compare(13, 5, "pslli", 5) == 0 ||
+            Name.compare(13, 5, "psrli", 5) == 0 ||
+            Name.compare(13, 5, "psrai", 5) == 0)
+          SecondParamTy = FTy->getParamType(1);
+
+        // Don't do anything if it has the correct types.
+        if (FTy->getReturnType() == X86_MMXTy &&
+            FTy->getParamType(0) == X86_MMXTy &&
+            FTy->getParamType(1) == SecondParamTy)
+          break;
+
+        // We first need to change the name of the old (bad) intrinsic, because
+        // its type is incorrect, but we cannot overload that name. We
+        // arbitrarily unique it here allowing us to construct a correctly named
+        // and typed function below.
+        F->setName("");
+
+        // Now construct the new intrinsic with the correct name and type. We
+        // leave the old function around in order to query its type, whatever it
+        // may be, and correctly convert up to the new type.
+        NewFn = cast<Function>(M->getOrInsertFunction(Name, 
+                                                      X86_MMXTy, X86_MMXTy,
+                                                      SecondParamTy, (Type*)0));
+        return true;
+      }
+
+      if (Name.compare(13, 8, "maskmovq", 8) == 0) {
+        // Don't do anything if it has the correct types.
+        if (FTy->getParamType(0) == X86_MMXTy &&
+            FTy->getParamType(1) == X86_MMXTy)
+          break;
+
+        F->setName("");
+        NewFn = cast<Function>(M->getOrInsertFunction(Name, 
+                                                      FTy->getReturnType(),
+                                                      X86_MMXTy,
+                                                      X86_MMXTy,
+                                                      FTy->getParamType(2),
+                                                      (Type*)0));
+        return true;
+      }
+
+      if (Name.compare(13, 8, "pmovmskb", 8) == 0) {
+        if (FTy->getParamType(0) == X86_MMXTy)
+          break;
+
+        F->setName("");
+        NewFn = cast<Function>(M->getOrInsertFunction(Name, 
+                                                      FTy->getReturnType(),
+                                                      X86_MMXTy,
+                                                      (Type*)0));
+        return true;
+      }
+
+      if (Name.compare(13, 5, "movnt", 5) == 0) {
+        if (FTy->getParamType(1) == X86_MMXTy)
+          break;
+
+        F->setName("");
+        NewFn = cast<Function>(M->getOrInsertFunction(Name, 
+                                                      FTy->getReturnType(),
+                                                      FTy->getParamType(0),
+                                                      X86_MMXTy,
+                                                      (Type*)0));
+        return true;
+      }
+
+      if (Name.compare(13, 7, "palignr", 7) == 0) {
+        if (FTy->getReturnType() == X86_MMXTy &&
+            FTy->getParamType(0) == X86_MMXTy &&
+            FTy->getParamType(1) == X86_MMXTy)
+          break;
+
+        F->setName("");
+        NewFn = cast<Function>(M->getOrInsertFunction(Name, 
+                                                      X86_MMXTy,
+                                                      X86_MMXTy,
+                                                      X86_MMXTy,
+                                                      FTy->getParamType(2),
+                                                      (Type*)0));
+        return true;
+      }
+
+      if (Name.compare(13, 5, "pextr", 5) == 0) {
+        if (FTy->getParamType(0) == X86_MMXTy)
+          break;
+
+        F->setName("");
+        NewFn = cast<Function>(M->getOrInsertFunction(Name, 
+                                                      FTy->getReturnType(),
+                                                      X86_MMXTy,
+                                                      FTy->getParamType(1),
+                                                      (Type*)0));
+        return true;
+      }
+
+      if (Name.compare(13, 5, "pinsr", 5) == 0) {
+        if (FTy->getReturnType() == X86_MMXTy &&
+            FTy->getParamType(0) == X86_MMXTy)
+          break;
+
+        F->setName("");
+        NewFn = cast<Function>(M->getOrInsertFunction(Name, 
+                                                      X86_MMXTy,
+                                                      X86_MMXTy,
+                                                      FTy->getParamType(1),
+                                                      FTy->getParamType(2),
+                                                      (Type*)0));
+        return true;
+      }
+
+      if (Name.compare(13, 12, "cvtsi32.si64", 12) == 0) {
+        if (FTy->getReturnType() == X86_MMXTy)
+          break;
+
+        F->setName("");
+        NewFn = cast<Function>(M->getOrInsertFunction(Name, 
+                                                      X86_MMXTy,
+                                                      FTy->getParamType(0),
+                                                      (Type*)0));
+        return true;
+      }
+
+      if (Name.compare(13, 12, "cvtsi64.si32", 12) == 0) {
+        if (FTy->getParamType(0) == X86_MMXTy)
+          break;
+
+        F->setName("");
+        NewFn = cast<Function>(M->getOrInsertFunction(Name, 
+                                                      FTy->getReturnType(),
+                                                      X86_MMXTy,
+                                                      (Type*)0));
+        return true;
+      }
+
+      if (Name.compare(13, 8, "vec.init", 8) == 0) {
+        if (FTy->getReturnType() == X86_MMXTy)
+          break;
+
+        F->setName("");
+
+        if (Name.compare(21, 2, ".b", 2) == 0)
+          NewFn = cast<Function>(M->getOrInsertFunction(Name, 
+                                                        X86_MMXTy,
+                                                        FTy->getParamType(0),
+                                                        FTy->getParamType(1),
+                                                        FTy->getParamType(2),
+                                                        FTy->getParamType(3),
+                                                        FTy->getParamType(4),
+                                                        FTy->getParamType(5),
+                                                        FTy->getParamType(6),
+                                                        FTy->getParamType(7),
+                                                        (Type*)0));
+        else if (Name.compare(21, 2, ".w", 2) == 0)
+          NewFn = cast<Function>(M->getOrInsertFunction(Name, 
+                                                        X86_MMXTy,
+                                                        FTy->getParamType(0),
+                                                        FTy->getParamType(1),
+                                                        FTy->getParamType(2),
+                                                        FTy->getParamType(3),
+                                                        (Type*)0));
+        else if (Name.compare(21, 2, ".d", 2) == 0)
+          NewFn = cast<Function>(M->getOrInsertFunction(Name, 
+                                                        X86_MMXTy,
+                                                        FTy->getParamType(0),
+                                                        FTy->getParamType(1),
+                                                        (Type*)0));
+        return true;
+      }
+
+
+      if (Name.compare(13, 9, "vec.ext.d", 9) == 0) {
+        if (FTy->getReturnType() == X86_MMXTy &&
+            FTy->getParamType(0) == X86_MMXTy)
+          break;
+
+        F->setName("");
+        NewFn = cast<Function>(M->getOrInsertFunction(Name, 
+                                                      X86_MMXTy,
+                                                      X86_MMXTy,
+                                                      FTy->getParamType(1),
+                                                      (Type*)0));
+        return true;
+      }
+
+      if (Name.compare(13, 9, "emms", 4) == 0 ||
+          Name.compare(13, 9, "femms", 5) == 0) {
+        NewFn = 0;
         break;
-      
-      //  We first need to change the name of the old (bad) intrinsic, because 
-      //  its type is incorrect, but we cannot overload that name. We 
-      //  arbitrarily unique it here allowing us to construct a correctly named 
-      //  and typed function below.
-      F->setName("");
+      }
 
-      assert(FTy->getNumParams() == 2 && "MMX shift intrinsics take 2 args!");
-      
-      //  Now construct the new intrinsic with the correct name and type. We 
-      //  leave the old function around in order to query its type, whatever it 
-      //  may be, and correctly convert up to the new type.
-      NewFn = cast<Function>(M->getOrInsertFunction(Name, 
-                                                    FTy->getReturnType(),
-                                                    FTy->getParamType(0),
-                                                    VT,
-                                                    (Type *)0));
-      return true;
+      // We really shouldn't get here ever.
+      assert(0 && "Invalid MMX intrinsic!");
+      break;
     } else if (Name.compare(5,17,"x86.sse2.loadh.pd",17) == 0 ||
                Name.compare(5,17,"x86.sse2.loadl.pd",17) == 0 ||
                Name.compare(5,16,"x86.sse2.movl.dq",16) == 0 ||
@@ -285,6 +528,16 @@
       // or 0.
       NewFn = 0;
       return true;           
+    } else if (Name.compare(5, 17, "x86.ssse3.pshuf.w", 17) == 0) {
+      // This is an SSE/MMX instruction.
+      const Type *X86_MMXTy = VectorType::getX86_MMXTy(FTy->getContext());
+      NewFn =
+        cast<Function>(M->getOrInsertFunction("llvm.x86.sse.pshuf.w",
+                                              X86_MMXTy,
+                                              X86_MMXTy,
+                                              Type::getInt8Ty(F->getContext()),
+                                              (Type*)0));
+      return true;
     }
 
     break;
@@ -309,6 +562,106 @@
   return Upgraded;
 }
 
+bool llvm::UpgradeGlobalVariable(GlobalVariable *GV) {
+  StringRef Name(GV->getName());
+
+  // We are only upgrading one symbol here.
+  if (Name == ".llvm.eh.catch.all.value") {
+    GV->setName("llvm.eh.catch.all.value");
+    return true;
+  }
+
+  return false;
+}
+
+/// ExtendNEONArgs - For NEON "long" and "wide" operations, where the results
+/// have vector elements twice as big as one or both source operands, do the
+/// sign- or zero-extension that used to be handled by intrinsics.  The
+/// extended values are returned via V0 and V1.
+static void ExtendNEONArgs(CallInst *CI, Value *Arg0, Value *Arg1,
+                           Value *&V0, Value *&V1) {
+  Function *F = CI->getCalledFunction();
+  const std::string& Name = F->getName();
+  bool isLong = (Name.at(18) == 'l');
+  bool isSigned = (Name.at(19) == 's');
+
+  if (isSigned) {
+    if (isLong)
+      V0 = new SExtInst(Arg0, CI->getType(), "", CI);
+    else
+      V0 = Arg0;
+    V1 = new SExtInst(Arg1, CI->getType(), "", CI);
+  } else {
+    if (isLong)
+      V0 = new ZExtInst(Arg0, CI->getType(), "", CI);
+    else
+      V0 = Arg0;
+    V1 = new ZExtInst(Arg1, CI->getType(), "", CI);
+  }
+}
+
+/// CallVABD - As part of expanding a call to one of the old NEON vabdl, vaba,
+/// or vabal intrinsics, construct a call to a vabd intrinsic.  Examine the
+/// name of the old intrinsic to determine whether to use a signed or unsigned
+/// vabd intrinsic.  Get the type from the old call instruction, adjusted for
+/// half-size vector elements if the old intrinsic was vabdl or vabal.
+static Instruction *CallVABD(CallInst *CI, Value *Arg0, Value *Arg1) {
+  Function *F = CI->getCalledFunction();
+  const std::string& Name = F->getName();
+  bool isLong = (Name.at(18) == 'l');
+  bool isSigned = (Name.at(isLong ? 19 : 18) == 's');
+
+  Intrinsic::ID intID;
+  if (isSigned)
+    intID = Intrinsic::arm_neon_vabds;
+  else
+    intID = Intrinsic::arm_neon_vabdu;
+
+  const Type *Ty = CI->getType();
+  if (isLong)
+    Ty = VectorType::getTruncatedElementVectorType(cast<const VectorType>(Ty));
+
+  Function *VABD = Intrinsic::getDeclaration(F->getParent(), intID, &Ty, 1);
+  Value *Operands[2];
+  Operands[0] = Arg0;
+  Operands[1] = Arg1;
+  return CallInst::Create(VABD, Operands, Operands+2, 
+                          "upgraded."+CI->getName(), CI);
+}
+
+/// ConstructNewCallInst - Construct a new CallInst with the signature of NewFn.
+static void ConstructNewCallInst(Function *NewFn, CallInst *OldCI,
+                                 Value **Operands, unsigned NumOps,
+                                 bool AssignName = true) {
+  // Construct a new CallInst.
+  CallInst *NewCI =
+    CallInst::Create(NewFn, Operands, Operands + NumOps,
+                     AssignName ? "upgraded." + OldCI->getName() : "", OldCI);
+
+  NewCI->setTailCall(OldCI->isTailCall());
+  NewCI->setCallingConv(OldCI->getCallingConv());
+
+  // Handle any uses of the old CallInst. If the type has changed, add a cast.
+  if (!OldCI->use_empty()) {
+    if (OldCI->getType() != NewCI->getType()) {
+      Function *OldFn = OldCI->getCalledFunction();
+      CastInst *RetCast =
+        CastInst::Create(CastInst::getCastOpcode(NewCI, true,
+                                                 OldFn->getReturnType(), true),
+                         NewCI, OldFn->getReturnType(), NewCI->getName(),OldCI);
+
+      // Replace all uses of the old call with the new cast which has the
+      // correct type.
+      OldCI->replaceAllUsesWith(RetCast);
+    } else {
+      OldCI->replaceAllUsesWith(NewCI);
+    }
+  }
+
+  // Clean up the old call now that it has been completely upgraded.
+  OldCI->eraseFromParent();
+}
+
 // UpgradeIntrinsicCall - Upgrade a call to an old intrinsic to be a call the 
 // upgraded intrinsic. All argument and return casting must be provided in 
 // order to seamlessly integrate with existing context.
@@ -320,6 +673,60 @@
   assert(F && "CallInst has no function associated with it.");
 
   if (!NewFn) {
+    // Get the Function's name.
+    const std::string& Name = F->getName();
+
+    // Upgrade ARM NEON intrinsics.
+    if (Name.compare(5, 9, "arm.neon.", 9) == 0) {
+      Instruction *NewI;
+      Value *V0, *V1;
+      if (Name.compare(14, 7, "vmovls.", 7) == 0) {
+        NewI = new SExtInst(CI->getArgOperand(0), CI->getType(),
+                            "upgraded." + CI->getName(), CI);
+      } else if (Name.compare(14, 7, "vmovlu.", 7) == 0) {
+        NewI = new ZExtInst(CI->getArgOperand(0), CI->getType(),
+                            "upgraded." + CI->getName(), CI);
+      } else if (Name.compare(14, 4, "vadd", 4) == 0) {
+        ExtendNEONArgs(CI, CI->getArgOperand(0), CI->getArgOperand(1), V0, V1);
+        NewI = BinaryOperator::CreateAdd(V0, V1, "upgraded."+CI->getName(), CI);
+      } else if (Name.compare(14, 4, "vsub", 4) == 0) {
+        ExtendNEONArgs(CI, CI->getArgOperand(0), CI->getArgOperand(1), V0, V1);
+        NewI = BinaryOperator::CreateSub(V0, V1,"upgraded."+CI->getName(),CI);
+      } else if (Name.compare(14, 4, "vmul", 4) == 0) {
+        ExtendNEONArgs(CI, CI->getArgOperand(0), CI->getArgOperand(1), V0, V1);
+        NewI = BinaryOperator::CreateMul(V0, V1,"upgraded."+CI->getName(),CI);
+      } else if (Name.compare(14, 4, "vmla", 4) == 0) {
+        ExtendNEONArgs(CI, CI->getArgOperand(1), CI->getArgOperand(2), V0, V1);
+        Instruction *MulI = BinaryOperator::CreateMul(V0, V1, "", CI);
+        NewI = BinaryOperator::CreateAdd(CI->getArgOperand(0), MulI,
+                                         "upgraded."+CI->getName(), CI);
+      } else if (Name.compare(14, 4, "vmls", 4) == 0) {
+        ExtendNEONArgs(CI, CI->getArgOperand(1), CI->getArgOperand(2), V0, V1);
+        Instruction *MulI = BinaryOperator::CreateMul(V0, V1, "", CI);
+        NewI = BinaryOperator::CreateSub(CI->getArgOperand(0), MulI,
+                                         "upgraded."+CI->getName(), CI);
+      } else if (Name.compare(14, 4, "vabd", 4) == 0) {
+        NewI = CallVABD(CI, CI->getArgOperand(0), CI->getArgOperand(1));
+        NewI = new ZExtInst(NewI, CI->getType(), "upgraded."+CI->getName(), CI);
+      } else if (Name.compare(14, 4, "vaba", 4) == 0) {
+        NewI = CallVABD(CI, CI->getArgOperand(1), CI->getArgOperand(2));
+        if (Name.at(18) == 'l')
+          NewI = new ZExtInst(NewI, CI->getType(), "", CI);
+        NewI = BinaryOperator::CreateAdd(CI->getArgOperand(0), NewI,
+                                         "upgraded."+CI->getName(), CI);
+      } else if (Name.compare(14, 6, "vmovn.", 6) == 0) {
+        NewI = new TruncInst(CI->getArgOperand(0), CI->getType(),
+                             "upgraded." + CI->getName(), CI);
+      } else {
+        llvm_unreachable("Unknown arm.neon function for CallInst upgrade.");
+      }
+      // Replace any uses of the old CallInst.
+      if (!CI->use_empty())
+        CI->replaceAllUsesWith(NewI);
+      CI->eraseFromParent();
+      return;
+    }
+
     bool isLoadH = false, isLoadL = false, isMovL = false;
     bool isMovSD = false, isShufPD = false;
     bool isUnpckhPD = false, isUnpcklPD = false;
@@ -548,31 +955,29 @@
   }
 
   switch (NewFn->getIntrinsicID()) {
-  default:  llvm_unreachable("Unknown function for CallInst upgrade.");
-  case Intrinsic::x86_mmx_psll_d:
-  case Intrinsic::x86_mmx_psll_q:
-  case Intrinsic::x86_mmx_psll_w:
-  case Intrinsic::x86_mmx_psra_d:
-  case Intrinsic::x86_mmx_psra_w:
-  case Intrinsic::x86_mmx_psrl_d:
-  case Intrinsic::x86_mmx_psrl_q:
-  case Intrinsic::x86_mmx_psrl_w: {
-    Value *Operands[2];
-    
-    Operands[0] = CI->getArgOperand(0);
-    
-    // Cast the second parameter to the correct type.
-    BitCastInst *BC = new BitCastInst(CI->getArgOperand(1), 
-                                      NewFn->getFunctionType()->getParamType(1),
-                                      "upgraded.", CI);
-    Operands[1] = BC;
-    
-    //  Construct a new CallInst
-    CallInst *NewCI = CallInst::Create(NewFn, Operands, Operands+2, 
-                                       "upgraded."+CI->getName(), CI);
+  default: llvm_unreachable("Unknown function for CallInst upgrade.");
+  case Intrinsic::arm_neon_vld1:
+  case Intrinsic::arm_neon_vld2:
+  case Intrinsic::arm_neon_vld3:
+  case Intrinsic::arm_neon_vld4:
+  case Intrinsic::arm_neon_vst1:
+  case Intrinsic::arm_neon_vst2:
+  case Intrinsic::arm_neon_vst3:
+  case Intrinsic::arm_neon_vst4:
+  case Intrinsic::arm_neon_vld2lane:
+  case Intrinsic::arm_neon_vld3lane:
+  case Intrinsic::arm_neon_vld4lane:
+  case Intrinsic::arm_neon_vst2lane:
+  case Intrinsic::arm_neon_vst3lane:
+  case Intrinsic::arm_neon_vst4lane: {
+    // Add a default alignment argument of 1.
+    SmallVector<Value*, 8> Operands(CS.arg_begin(), CS.arg_end());
+    Operands.push_back(ConstantInt::get(Type::getInt32Ty(C), 1));
+    CallInst *NewCI = CallInst::Create(NewFn, Operands.begin(), Operands.end(),
+                                       CI->getName(), CI);
     NewCI->setTailCall(CI->isTailCall());
     NewCI->setCallingConv(CI->getCallingConv());
-    
+
     //  Handle any uses of the old CallInst.
     if (!CI->use_empty())
       //  Replace all uses of the old call with the new cast which has the 
@@ -583,6 +988,266 @@
     CI->eraseFromParent();
     break;
   }        
+
+  case Intrinsic::x86_mmx_padd_b:
+  case Intrinsic::x86_mmx_padd_w:
+  case Intrinsic::x86_mmx_padd_d:
+  case Intrinsic::x86_mmx_padd_q:
+  case Intrinsic::x86_mmx_padds_b:
+  case Intrinsic::x86_mmx_padds_w:
+  case Intrinsic::x86_mmx_paddus_b:
+  case Intrinsic::x86_mmx_paddus_w:
+  case Intrinsic::x86_mmx_psub_b:
+  case Intrinsic::x86_mmx_psub_w:
+  case Intrinsic::x86_mmx_psub_d:
+  case Intrinsic::x86_mmx_psub_q:
+  case Intrinsic::x86_mmx_psubs_b:
+  case Intrinsic::x86_mmx_psubs_w:
+  case Intrinsic::x86_mmx_psubus_b:
+  case Intrinsic::x86_mmx_psubus_w:
+  case Intrinsic::x86_mmx_pmulh_w:
+  case Intrinsic::x86_mmx_pmull_w:
+  case Intrinsic::x86_mmx_pmulhu_w:
+  case Intrinsic::x86_mmx_pmulu_dq:
+  case Intrinsic::x86_mmx_pmadd_wd:
+  case Intrinsic::x86_mmx_pand:
+  case Intrinsic::x86_mmx_pandn:
+  case Intrinsic::x86_mmx_por:
+  case Intrinsic::x86_mmx_pxor:
+  case Intrinsic::x86_mmx_pavg_b:
+  case Intrinsic::x86_mmx_pavg_w:
+  case Intrinsic::x86_mmx_pmaxu_b:
+  case Intrinsic::x86_mmx_pmaxs_w:
+  case Intrinsic::x86_mmx_pminu_b:
+  case Intrinsic::x86_mmx_pmins_w:
+  case Intrinsic::x86_mmx_psad_bw:
+  case Intrinsic::x86_mmx_psll_w:
+  case Intrinsic::x86_mmx_psll_d:
+  case Intrinsic::x86_mmx_psll_q:
+  case Intrinsic::x86_mmx_pslli_w:
+  case Intrinsic::x86_mmx_pslli_d:
+  case Intrinsic::x86_mmx_pslli_q:
+  case Intrinsic::x86_mmx_psrl_w:
+  case Intrinsic::x86_mmx_psrl_d:
+  case Intrinsic::x86_mmx_psrl_q:
+  case Intrinsic::x86_mmx_psrli_w:
+  case Intrinsic::x86_mmx_psrli_d:
+  case Intrinsic::x86_mmx_psrli_q:
+  case Intrinsic::x86_mmx_psra_w:
+  case Intrinsic::x86_mmx_psra_d:
+  case Intrinsic::x86_mmx_psrai_w:
+  case Intrinsic::x86_mmx_psrai_d:
+  case Intrinsic::x86_mmx_packsswb:
+  case Intrinsic::x86_mmx_packssdw:
+  case Intrinsic::x86_mmx_packuswb:
+  case Intrinsic::x86_mmx_punpckhbw:
+  case Intrinsic::x86_mmx_punpckhwd:
+  case Intrinsic::x86_mmx_punpckhdq:
+  case Intrinsic::x86_mmx_punpcklbw:
+  case Intrinsic::x86_mmx_punpcklwd:
+  case Intrinsic::x86_mmx_punpckldq:
+  case Intrinsic::x86_mmx_pcmpeq_b:
+  case Intrinsic::x86_mmx_pcmpeq_w:
+  case Intrinsic::x86_mmx_pcmpeq_d:
+  case Intrinsic::x86_mmx_pcmpgt_b:
+  case Intrinsic::x86_mmx_pcmpgt_w:
+  case Intrinsic::x86_mmx_pcmpgt_d: {
+    Value *Operands[2];
+    
+    // Cast the operand to the X86 MMX type.
+    Operands[0] = new BitCastInst(CI->getArgOperand(0), 
+                                  NewFn->getFunctionType()->getParamType(0),
+                                  "upgraded.", CI);
+
+    switch (NewFn->getIntrinsicID()) {
+    default:
+      // Cast to the X86 MMX type.
+      Operands[1] = new BitCastInst(CI->getArgOperand(1), 
+                                    NewFn->getFunctionType()->getParamType(1),
+                                    "upgraded.", CI);
+      break;
+    case Intrinsic::x86_mmx_pslli_w:
+    case Intrinsic::x86_mmx_pslli_d:
+    case Intrinsic::x86_mmx_pslli_q:
+    case Intrinsic::x86_mmx_psrli_w:
+    case Intrinsic::x86_mmx_psrli_d:
+    case Intrinsic::x86_mmx_psrli_q:
+    case Intrinsic::x86_mmx_psrai_w:
+    case Intrinsic::x86_mmx_psrai_d:
+      // These take an i32 as their second parameter.
+      Operands[1] = CI->getArgOperand(1);
+      break;
+    }
+
+    ConstructNewCallInst(NewFn, CI, Operands, 2);
+    break;
+  }
+  case Intrinsic::x86_mmx_maskmovq: {
+    Value *Operands[3];
+
+    // Cast the operands to the X86 MMX type.
+    Operands[0] = new BitCastInst(CI->getArgOperand(0), 
+                                  NewFn->getFunctionType()->getParamType(0),
+                                  "upgraded.", CI);
+    Operands[1] = new BitCastInst(CI->getArgOperand(1), 
+                                  NewFn->getFunctionType()->getParamType(1),
+                                  "upgraded.", CI);
+    Operands[2] = CI->getArgOperand(2);
+
+    ConstructNewCallInst(NewFn, CI, Operands, 3, false);
+    break;
+  }
+  case Intrinsic::x86_mmx_pmovmskb: {
+    Value *Operands[1];
+
+    // Cast the operand to the X86 MMX type.
+    Operands[0] = new BitCastInst(CI->getArgOperand(0), 
+                                  NewFn->getFunctionType()->getParamType(0),
+                                  "upgraded.", CI);
+
+    ConstructNewCallInst(NewFn, CI, Operands, 1);
+    break;
+  }
+  case Intrinsic::x86_mmx_movnt_dq: {
+    Value *Operands[2];
+
+    Operands[0] = CI->getArgOperand(0);
+
+    // Cast the operand to the X86 MMX type.
+    Operands[1] = new BitCastInst(CI->getArgOperand(1),
+                                  NewFn->getFunctionType()->getParamType(1),
+                                  "upgraded.", CI);
+
+    ConstructNewCallInst(NewFn, CI, Operands, 2, false);
+    break;
+  }
+  case Intrinsic::x86_mmx_palignr_b: {
+    Value *Operands[3];
+
+    // Cast the operands to the X86 MMX type.
+    Operands[0] = new BitCastInst(CI->getArgOperand(0),
+                                  NewFn->getFunctionType()->getParamType(0),
+                                  "upgraded.", CI);
+    Operands[1] = new BitCastInst(CI->getArgOperand(1),
+                                  NewFn->getFunctionType()->getParamType(1),
+                                  "upgraded.", CI);
+    Operands[2] = CI->getArgOperand(2);
+
+    ConstructNewCallInst(NewFn, CI, Operands, 3);
+    break;
+  }
+  case Intrinsic::x86_mmx_pextr_w: {
+    Value *Operands[2];
+
+    // Cast the operands to the X86 MMX type.
+    Operands[0] = new BitCastInst(CI->getArgOperand(0),
+                                  NewFn->getFunctionType()->getParamType(0),
+                                  "upgraded.", CI);
+    Operands[1] = CI->getArgOperand(1);
+
+    ConstructNewCallInst(NewFn, CI, Operands, 2);
+    break;
+  }
+  case Intrinsic::x86_mmx_pinsr_w: {
+    Value *Operands[3];
+
+    // Cast the operands to the X86 MMX type.
+    Operands[0] = new BitCastInst(CI->getArgOperand(0),
+                                  NewFn->getFunctionType()->getParamType(0),
+                                  "upgraded.", CI);
+    Operands[1] = CI->getArgOperand(1);
+    Operands[2] = CI->getArgOperand(2);
+
+    ConstructNewCallInst(NewFn, CI, Operands, 3);
+    break;
+  }
+  case Intrinsic::x86_sse_pshuf_w: {
+    IRBuilder<> Builder(C);
+    Builder.SetInsertPoint(CI->getParent(), CI);
+
+    // Cast the operand to the X86 MMX type.
+    Value *Operands[2];
+    Operands[0] =
+      Builder.CreateBitCast(CI->getArgOperand(0), 
+                            NewFn->getFunctionType()->getParamType(0),
+                            "upgraded.");
+    Operands[1] =
+      Builder.CreateTrunc(CI->getArgOperand(1),
+                          Type::getInt8Ty(C),
+                          "upgraded.");
+
+    ConstructNewCallInst(NewFn, CI, Operands, 2);
+    break;
+  }
+
+#if 0
+  case Intrinsic::x86_mmx_cvtsi32_si64: {
+    // The return type needs to be changed.
+    Value *Operands[1];
+    Operands[0] = CI->getArgOperand(0);
+    ConstructNewCallInst(NewFn, CI, Operands, 1);
+    break;
+  }
+  case Intrinsic::x86_mmx_cvtsi64_si32: {
+    Value *Operands[1];
+
+    // Cast the operand to the X86 MMX type.
+    Operands[0] = new BitCastInst(CI->getArgOperand(0),
+                                  NewFn->getFunctionType()->getParamType(0),
+                                  "upgraded.", CI);
+
+    ConstructNewCallInst(NewFn, CI, Operands, 1);
+    break;
+  }
+  case Intrinsic::x86_mmx_vec_init_b:
+  case Intrinsic::x86_mmx_vec_init_w:
+  case Intrinsic::x86_mmx_vec_init_d: {
+    // The return type needs to be changed.
+    Value *Operands[8];
+    unsigned NumOps = 0;
+
+    switch (NewFn->getIntrinsicID()) {
+    default: break;
+    case Intrinsic::x86_mmx_vec_init_b: NumOps = 8; break;
+    case Intrinsic::x86_mmx_vec_init_w: NumOps = 4; break;
+    case Intrinsic::x86_mmx_vec_init_d: NumOps = 2; break;
+    }
+
+    switch (NewFn->getIntrinsicID()) {
+    default: break;
+    case Intrinsic::x86_mmx_vec_init_b:
+      Operands[7] = CI->getArgOperand(7);
+      Operands[6] = CI->getArgOperand(6);
+      Operands[5] = CI->getArgOperand(5);
+      Operands[4] = CI->getArgOperand(4);
+      // FALLTHRU
+    case Intrinsic::x86_mmx_vec_init_w:
+      Operands[3] = CI->getArgOperand(3);
+      Operands[2] = CI->getArgOperand(2);
+      // FALLTHRU
+    case Intrinsic::x86_mmx_vec_init_d:
+      Operands[1] = CI->getArgOperand(1);
+      Operands[0] = CI->getArgOperand(0);
+      break;
+    }
+
+    ConstructNewCallInst(NewFn, CI, Operands, NumOps);
+    break;
+  }
+  case Intrinsic::x86_mmx_vec_ext_d: {
+    Value *Operands[2];
+
+    // Cast the operand to the X86 MMX type.
+    Operands[0] = new BitCastInst(CI->getArgOperand(0),
+                                  NewFn->getFunctionType()->getParamType(0),
+                                  "upgraded.", CI);
+    Operands[1] = CI->getArgOperand(1);
+
+    ConstructNewCallInst(NewFn, CI, Operands, 2);
+    break;
+  }
+#endif
+
   case Intrinsic::ctlz:
   case Intrinsic::ctpop:
   case Intrinsic::cttz: {

Modified: llvm/branches/wendling/eh/lib/VMCore/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/VMCore/CMakeLists.txt?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/VMCore/CMakeLists.txt (original)
+++ llvm/branches/wendling/eh/lib/VMCore/CMakeLists.txt Tue Oct 26 19:48:03 2010
@@ -1,3 +1,5 @@
+set(LLVM_REQUIRES_RTTI 1)
+
 add_llvm_library(LLVMCore
   AsmWriter.cpp
   Attributes.cpp

Modified: llvm/branches/wendling/eh/lib/VMCore/ConstantFold.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/VMCore/ConstantFold.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/VMCore/ConstantFold.cpp (original)
+++ llvm/branches/wendling/eh/lib/VMCore/ConstantFold.cpp Tue Oct 26 19:48:03 2010
@@ -357,22 +357,6 @@
       }
     }
 
-  if (const UnionType *UTy = dyn_cast<UnionType>(Ty)) {
-    unsigned NumElems = UTy->getNumElements();
-    // Check for a union with all members having the same size.
-    Constant *MemberSize =
-      getFoldedSizeOf(UTy->getElementType(0), DestTy, true);
-    bool AllSame = true;
-    for (unsigned i = 1; i != NumElems; ++i)
-      if (MemberSize !=
-          getFoldedSizeOf(UTy->getElementType(i), DestTy, true)) {
-        AllSame = false;
-        break;
-      }
-    if (AllSame)
-      return MemberSize;
-  }
-
   // Pointer size doesn't depend on the pointee type, so canonicalize them
   // to an arbitrary pointee.
   if (const PointerType *PTy = dyn_cast<PointerType>(Ty))
@@ -438,24 +422,6 @@
       return MemberAlign;
   }
 
-  if (const UnionType *UTy = dyn_cast<UnionType>(Ty)) {
-    // Union alignment is the maximum alignment of any member.
-    // Without target data, we can't compare much, but we can check to see
-    // if all the members have the same alignment.
-    unsigned NumElems = UTy->getNumElements();
-    // Check for a union with all members having the same alignment.
-    Constant *MemberAlign =
-      getFoldedAlignOf(UTy->getElementType(0), DestTy, true);
-    bool AllSame = true;
-    for (unsigned i = 1; i != NumElems; ++i)
-      if (MemberAlign != getFoldedAlignOf(UTy->getElementType(i), DestTy, true)) {
-        AllSame = false;
-        break;
-      }
-    if (AllSame)
-      return MemberAlign;
-  }
-
   // Pointer alignment doesn't depend on the pointee type, so canonicalize them
   // to an arbitrary pointee.
   if (const PointerType *PTy = dyn_cast<PointerType>(Ty))
@@ -909,8 +875,6 @@
     unsigned numOps;
     if (const ArrayType *AR = dyn_cast<ArrayType>(AggTy))
       numOps = AR->getNumElements();
-    else if (AggTy->isUnionTy())
-      numOps = 1;
     else
       numOps = cast<StructType>(AggTy)->getNumElements();
     
@@ -927,10 +891,6 @@
     
     if (const StructType* ST = dyn_cast<StructType>(AggTy))
       return ConstantStruct::get(ST->getContext(), Ops, ST->isPacked());
-    if (const UnionType* UT = dyn_cast<UnionType>(AggTy)) {
-      assert(Ops.size() == 1 && "Union can only contain a single value!");
-      return ConstantUnion::get(UT, Ops[0]);
-    }
     return ConstantArray::get(cast<ArrayType>(AggTy), Ops);
   }
   

Modified: llvm/branches/wendling/eh/lib/VMCore/Constants.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/VMCore/Constants.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/VMCore/Constants.cpp (original)
+++ llvm/branches/wendling/eh/lib/VMCore/Constants.cpp Tue Oct 26 19:48:03 2010
@@ -59,7 +59,6 @@
   case Type::PointerTyID:
     return ConstantPointerNull::get(cast<PointerType>(Ty));
   case Type::StructTyID:
-  case Type::UnionTyID:
   case Type::ArrayTyID:
   case Type::VectorTyID:
     return ConstantAggregateZero::get(Ty);
@@ -526,6 +525,7 @@
 Constant* ConstantArray::get(LLVMContext &Context, StringRef Str,
                              bool AddNull) {
   std::vector<Constant*> ElementVals;
+  ElementVals.reserve(Str.size() + size_t(AddNull));
   for (unsigned i = 0; i < Str.size(); ++i)
     ElementVals.push_back(ConstantInt::get(Type::getInt8Ty(Context), Str[i]));
 
@@ -586,27 +586,6 @@
   return get(Context, std::vector<Constant*>(Vals, Vals+NumVals), Packed);
 }
 
-ConstantUnion::ConstantUnion(const UnionType *T, Constant* V)
-  : Constant(T, ConstantUnionVal,
-             OperandTraits<ConstantUnion>::op_end(this) - 1, 1) {
-  Use *OL = OperandList;
-  assert(T->getElementTypeIndex(V->getType()) >= 0 &&
-      "Initializer for union element isn't a member of union type!");
-  *OL = V;
-}
-
-// ConstantUnion accessors.
-Constant* ConstantUnion::get(const UnionType* T, Constant* V) {
-  LLVMContextImpl* pImpl = T->getContext().pImpl;
-  
-  // Create a ConstantAggregateZero value if all elements are zeros...
-  if (!V->isNullValue())
-    return pImpl->UnionConstants.getOrCreate(T, V);
-
-  return ConstantAggregateZero::get(T);
-}
-
-
 ConstantVector::ConstantVector(const VectorType *T,
                                const std::vector<Constant*> &V)
   : Constant(T, ConstantVectorVal,
@@ -723,7 +702,7 @@
   if (getOpcode() != Instruction::GetElementPtr) return false;
 
   gep_type_iterator GEPI = gep_type_begin(this), E = gep_type_end(this);
-  User::const_op_iterator OI = next(this->op_begin());
+  User::const_op_iterator OI = llvm::next(this->op_begin());
 
   // Skip the first index, as it has no static limit.
   ++GEPI;
@@ -945,8 +924,7 @@
 //                      Factory Function Implementation
 
 ConstantAggregateZero* ConstantAggregateZero::get(const Type* Ty) {
-  assert((Ty->isStructTy() || Ty->isUnionTy()
-         || Ty->isArrayTy() || Ty->isVectorTy()) &&
+  assert((Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy()) &&
          "Cannot create an aggregate zero of non-aggregate type!");
   
   LLVMContextImpl *pImpl = Ty->getContext().pImpl;
@@ -1033,13 +1011,6 @@
 
 // destroyConstant - Remove the constant from the constant table...
 //
-void ConstantUnion::destroyConstant() {
-  getRawType()->getContext().pImpl->UnionConstants.remove(this);
-  destroyConstantImpl();
-}
-
-// destroyConstant - Remove the constant from the constant table...
-//
 void ConstantVector::destroyConstant() {
   getRawType()->getContext().pImpl->VectorConstants.remove(this);
   destroyConstantImpl();
@@ -2116,55 +2087,6 @@
   destroyConstant();
 }
 
-void ConstantUnion::replaceUsesOfWithOnConstant(Value *From, Value *To,
-                                                 Use *U) {
-  assert(isa<Constant>(To) && "Cannot make Constant refer to non-constant!");
-  Constant *ToC = cast<Constant>(To);
-
-  assert(U == OperandList && "Union constants can only have one use!");
-  assert(getNumOperands() == 1 && "Union constants can only have one use!");
-  assert(getOperand(0) == From && "ReplaceAllUsesWith broken!");
-
-  std::pair<LLVMContextImpl::UnionConstantsTy::MapKey, ConstantUnion*> Lookup;
-  Lookup.first.first = cast<UnionType>(getRawType());
-  Lookup.second = this;
-  Lookup.first.second = ToC;
-
-  LLVMContextImpl *pImpl = getRawType()->getContext().pImpl;
-
-  Constant *Replacement = 0;
-  if (ToC->isNullValue()) {
-    Replacement = ConstantAggregateZero::get(getRawType());
-  } else {
-    // Check to see if we have this union type already.
-    bool Exists;
-    LLVMContextImpl::UnionConstantsTy::MapTy::iterator I =
-      pImpl->UnionConstants.InsertOrGetItem(Lookup, Exists);
-    
-    if (Exists) {
-      Replacement = I->second;
-    } else {
-      // Okay, the new shape doesn't exist in the system yet.  Instead of
-      // creating a new constant union, inserting it, replaceallusesof'ing the
-      // old with the new, then deleting the old... just update the current one
-      // in place!
-      pImpl->UnionConstants.MoveConstantToNewSlot(this, I);
-      
-      // Update to the new value.
-      setOperand(0, ToC);
-      return;
-    }
-  }
-  
-  assert(Replacement != this && "I didn't contain From!");
-  
-  // Everyone using this now uses the replacement.
-  uncheckedReplaceAllUsesWith(Replacement);
-  
-  // Delete the old constant!
-  destroyConstant();
-}
-
 void ConstantVector::replaceUsesOfWithOnConstant(Value *From, Value *To,
                                                  Use *U) {
   assert(isa<Constant>(To) && "Cannot make Constant refer to non-constant!");

Modified: llvm/branches/wendling/eh/lib/VMCore/ConstantsContext.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/VMCore/ConstantsContext.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/VMCore/ConstantsContext.h (original)
+++ llvm/branches/wendling/eh/lib/VMCore/ConstantsContext.h Tue Oct 26 19:48:03 2010
@@ -511,14 +511,6 @@
   }
 };
 
-template<>
-struct ConstantKeyData<ConstantUnion> {
-  typedef Constant* ValType;
-  static ValType getValType(ConstantUnion *CU) {
-    return cast<Constant>(CU->getOperand(0));
-  }
-};
-
 // ConstantPointerNull does not take extra "value" argument...
 template<class ValType>
 struct ConstantCreator<ConstantPointerNull, PointerType, ValType> {

Modified: llvm/branches/wendling/eh/lib/VMCore/Core.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/VMCore/Core.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/VMCore/Core.cpp (original)
+++ llvm/branches/wendling/eh/lib/VMCore/Core.cpp Tue Oct 26 19:48:03 2010
@@ -7,8 +7,8 @@
 //
 //===----------------------------------------------------------------------===//
 //
-// This file implements the C bindings for libLLVMCore.a, which implements
-// the LLVM intermediate representation.
+// This file implements the common infrastructure (including the C bindings)
+// for libLLVMCore.a, which implements the LLVM intermediate representation.
 //
 //===----------------------------------------------------------------------===//
 
@@ -22,6 +22,7 @@
 #include "llvm/TypeSymbolTable.h"
 #include "llvm/InlineAsm.h"
 #include "llvm/IntrinsicInst.h"
+#include "llvm/PassManager.h"
 #include "llvm/Support/CallSite.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/ErrorHandling.h"
@@ -33,6 +34,18 @@
 
 using namespace llvm;
 
+void llvm::initializeCore(PassRegistry &Registry) {
+  initializeDominatorTreePass(Registry);
+  initializeDominanceFrontierPass(Registry);
+  initializePrintModulePassPass(Registry);
+  initializePrintFunctionPassPass(Registry);
+  initializeVerifierPass(Registry);
+  initializePreVerifierPass(Registry);
+}
+
+void LLVMInitializeCore(LLVMPassRegistryRef R) {
+  initializeCore(*unwrap(R));
+}
 
 /*===-- Error handling ----------------------------------------------------===*/
 
@@ -155,8 +168,6 @@
     return LLVMFunctionTypeKind;
   case Type::StructTyID:
     return LLVMStructTypeKind;
-  case Type::UnionTyID:
-    return LLVMUnionTypeKind;
   case Type::ArrayTyID:
     return LLVMArrayTypeKind;
   case Type::PointerTyID:
@@ -165,6 +176,8 @@
     return LLVMOpaqueTypeKind;
   case Type::VectorTyID:
     return LLVMVectorTypeKind;
+  case Type::X86_MMXTyID:
+    return LLVMX86_MMXTypeKind;
   }
 }
 
@@ -233,6 +246,9 @@
 LLVMTypeRef LLVMPPCFP128TypeInContext(LLVMContextRef C) {
   return (LLVMTypeRef) Type::getPPC_FP128Ty(*unwrap(C));
 }
+LLVMTypeRef LLVMX86MMXTypeInContext(LLVMContextRef C) {
+  return (LLVMTypeRef) Type::getX86_MMXTy(*unwrap(C));
+}
 
 LLVMTypeRef LLVMFloatType(void) {
   return LLVMFloatTypeInContext(LLVMGetGlobalContext());
@@ -249,6 +265,9 @@
 LLVMTypeRef LLVMPPCFP128Type(void) {
   return LLVMPPCFP128TypeInContext(LLVMGetGlobalContext());
 }
+LLVMTypeRef LLVMX86MMXType(void) {
+  return LLVMX86MMXTypeInContext(LLVMGetGlobalContext());
+}
 
 /*--.. Operations on function types ........................................--*/
 
@@ -315,34 +334,6 @@
   return unwrap<StructType>(StructTy)->isPacked();
 }
 
-/*--.. Operations on union types ..........................................--*/
-
-LLVMTypeRef LLVMUnionTypeInContext(LLVMContextRef C, LLVMTypeRef *ElementTypes,
-                                   unsigned ElementCount) {
-  SmallVector<const Type*, 8> Tys;
-  for (LLVMTypeRef *I = ElementTypes,
-                   *E = ElementTypes + ElementCount; I != E; ++I)
-    Tys.push_back(unwrap(*I));
-  
-  return wrap(UnionType::get(&Tys[0], Tys.size()));
-}
-
-LLVMTypeRef LLVMUnionType(LLVMTypeRef *ElementTypes, unsigned ElementCount) {
-  return LLVMUnionTypeInContext(LLVMGetGlobalContext(), ElementTypes,
-                                ElementCount);
-}
-
-unsigned LLVMCountUnionElementTypes(LLVMTypeRef UnionTy) {
-  return unwrap<UnionType>(UnionTy)->getNumElements();
-}
-
-void LLVMGetUnionElementTypes(LLVMTypeRef UnionTy, LLVMTypeRef *Dest) {
-  UnionType *Ty = unwrap<UnionType>(UnionTy);
-  for (FunctionType::param_iterator I = Ty->element_begin(),
-                                    E = Ty->element_end(); I != E; ++I)
-    *Dest++ = wrap(*I);
-}
-
 /*--.. Operations on array, pointer, and vector types (sequence types) .....--*/
 
 LLVMTypeRef LLVMArrayType(LLVMTypeRef ElementType, unsigned ElementCount) {
@@ -488,6 +479,14 @@
   return wrap(unwrap<User>(Val)->getOperand(Index));
 }
 
+void LLVMSetOperand(LLVMValueRef Val, unsigned Index, LLVMValueRef Op) {
+  unwrap<User>(Val)->setOperand(Index, unwrap(Op));
+}
+
+int LLVMGetNumOperands(LLVMValueRef Val) {
+  return unwrap<User>(Val)->getNumOperands();
+}
+
 /*--.. Operations on constants of any type .................................--*/
 
 LLVMValueRef LLVMConstNull(LLVMTypeRef Ty) {
@@ -588,7 +587,7 @@
                                       LLVMBool DontNullTerminate) {
   /* Inverted the sense of AddNull because ', 0)' is a
      better mnemonic for null termination than ', 1)'. */
-  return wrap(ConstantArray::get(*unwrap(C), std::string(Str, Length),
+  return wrap(ConstantArray::get(*unwrap(C), StringRef(Str, Length),
                                  DontNullTerminate == 0));
 }
 LLVMValueRef LLVMConstStructInContext(LLVMContextRef C, 
@@ -619,10 +618,6 @@
   return wrap(ConstantVector::get(
                             unwrap<Constant>(ScalarConstantVals, Size), Size));
 }
-LLVMValueRef LLVMConstUnion(LLVMTypeRef Ty, LLVMValueRef Val) {
-  return wrap(ConstantUnion::get(unwrap<UnionType>(Ty), unwrap<Constant>(Val)));
-}
-
 /*--.. Constant expressions ................................................--*/
 
 LLVMOpcode LLVMGetConstOpcode(LLVMValueRef ConstantVal) {
@@ -1060,6 +1055,8 @@
     return LLVMLinkerPrivateLinkage;
   case GlobalValue::LinkerPrivateWeakLinkage:
     return LLVMLinkerPrivateWeakLinkage;
+  case GlobalValue::LinkerPrivateWeakDefAutoLinkage:
+    return LLVMLinkerPrivateWeakDefAutoLinkage;
   case GlobalValue::DLLImportLinkage:
     return LLVMDLLImportLinkage;
   case GlobalValue::DLLExportLinkage:
@@ -1113,6 +1110,9 @@
   case LLVMLinkerPrivateWeakLinkage:
     GV->setLinkage(GlobalValue::LinkerPrivateWeakLinkage);
     break;
+  case LLVMLinkerPrivateWeakDefAutoLinkage:
+    GV->setLinkage(GlobalValue::LinkerPrivateWeakDefAutoLinkage);
+    break;
   case LLVMDLLImportLinkage:
     GV->setLinkage(GlobalValue::DLLImportLinkage);
     break;
@@ -2235,3 +2235,44 @@
 void LLVMDisposeMemoryBuffer(LLVMMemoryBufferRef MemBuf) {
   delete unwrap(MemBuf);
 }
+
+/*===-- Pass Registry -----------------------------------------------------===*/
+
+LLVMPassRegistryRef LLVMGetGlobalPassRegistry(void) {
+  return wrap(PassRegistry::getPassRegistry());
+}
+
+/*===-- Pass Manager ------------------------------------------------------===*/
+
+LLVMPassManagerRef LLVMCreatePassManager() {
+  return wrap(new PassManager());
+}
+
+LLVMPassManagerRef LLVMCreateFunctionPassManagerForModule(LLVMModuleRef M) {
+  return wrap(new FunctionPassManager(unwrap(M)));
+}
+
+LLVMPassManagerRef LLVMCreateFunctionPassManager(LLVMModuleProviderRef P) {
+  return LLVMCreateFunctionPassManagerForModule(
+                                            reinterpret_cast<LLVMModuleRef>(P));
+}
+
+LLVMBool LLVMRunPassManager(LLVMPassManagerRef PM, LLVMModuleRef M) {
+  return unwrap<PassManager>(PM)->run(*unwrap(M));
+}
+
+LLVMBool LLVMInitializeFunctionPassManager(LLVMPassManagerRef FPM) {
+  return unwrap<FunctionPassManager>(FPM)->doInitialization();
+}
+
+LLVMBool LLVMRunFunctionPassManager(LLVMPassManagerRef FPM, LLVMValueRef F) {
+  return unwrap<FunctionPassManager>(FPM)->run(*unwrap<Function>(F));
+}
+
+LLVMBool LLVMFinalizeFunctionPassManager(LLVMPassManagerRef FPM) {
+  return unwrap<FunctionPassManager>(FPM)->doFinalization();
+}
+
+void LLVMDisposePassManager(LLVMPassManagerRef PM) {
+  delete unwrap(PM);
+}

Modified: llvm/branches/wendling/eh/lib/VMCore/Dominators.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/VMCore/Dominators.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/VMCore/Dominators.cpp (original)
+++ llvm/branches/wendling/eh/lib/VMCore/Dominators.cpp Tue Oct 26 19:48:03 2010
@@ -53,7 +53,7 @@
 
 char DominatorTree::ID = 0;
 INITIALIZE_PASS(DominatorTree, "domtree",
-                "Dominator Tree Construction", true, true);
+                "Dominator Tree Construction", true, true)
 
 bool DominatorTree::runOnFunction(Function &F) {
   DT->recalculate(F);
@@ -106,8 +106,11 @@
 //===----------------------------------------------------------------------===//
 
 char DominanceFrontier::ID = 0;
-INITIALIZE_PASS(DominanceFrontier, "domfrontier",
-                "Dominance Frontier Construction", true, true);
+INITIALIZE_PASS_BEGIN(DominanceFrontier, "domfrontier",
+                "Dominance Frontier Construction", true, true)
+INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_END(DominanceFrontier, "domfrontier",
+                "Dominance Frontier Construction", true, true)
 
 void DominanceFrontier::verifyAnalysis() const {
   if (!VerifyDomInfo) return;

Modified: llvm/branches/wendling/eh/lib/VMCore/InlineAsm.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/VMCore/InlineAsm.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/VMCore/InlineAsm.cpp (original)
+++ llvm/branches/wendling/eh/lib/VMCore/InlineAsm.cpp Tue Oct 26 19:48:03 2010
@@ -53,6 +53,24 @@
 const FunctionType *InlineAsm::getFunctionType() const {
   return cast<FunctionType>(getType()->getElementType());
 }
+    
+///Default constructor.
+InlineAsm::ConstraintInfo::ConstraintInfo() :
+  Type(isInput), isEarlyClobber(false),
+  MatchingInput(-1), isCommutative(false),
+  isIndirect(false), isMultipleAlternative(false),
+  currentAlternativeIndex(0) {
+}
+
+/// Copy constructor.
+InlineAsm::ConstraintInfo::ConstraintInfo(const ConstraintInfo &other) :
+  Type(other.Type), isEarlyClobber(other.isEarlyClobber),
+  MatchingInput(other.MatchingInput), isCommutative(other.isCommutative),
+  isIndirect(other.isIndirect), Codes(other.Codes),
+  isMultipleAlternative(other.isMultipleAlternative),
+  multipleAlternatives(other.multipleAlternatives),
+  currentAlternativeIndex(other.currentAlternativeIndex) {
+}
 
 /// Parse - Analyze the specified string (e.g. "==&{eax}") and fill in the
 /// fields in this structure.  If the constraint string is not understood,
@@ -60,13 +78,22 @@
 bool InlineAsm::ConstraintInfo::Parse(StringRef Str,
                      std::vector<InlineAsm::ConstraintInfo> &ConstraintsSoFar) {
   StringRef::iterator I = Str.begin(), E = Str.end();
+  unsigned multipleAlternativeCount = Str.count('|') + 1;
+  unsigned multipleAlternativeIndex = 0;
+  std::vector<std::string> *pCodes = &Codes;
   
   // Initialize
+  isMultipleAlternative = (multipleAlternativeCount > 1 ? true : false);
+  if (isMultipleAlternative) {
+    multipleAlternatives.resize(multipleAlternativeCount);
+    pCodes = &multipleAlternatives[0].Codes;
+  }
   Type = isInput;
   isEarlyClobber = false;
   MatchingInput = -1;
   isCommutative = false;
   isIndirect = false;
+  currentAlternativeIndex = 0;
   
   // Parse prefixes.
   if (*I == '~') {
@@ -120,15 +147,15 @@
       // Find the end of the register name.
       StringRef::iterator ConstraintEnd = std::find(I+1, E, '}');
       if (ConstraintEnd == E) return true;  // "{foo"
-      Codes.push_back(std::string(I, ConstraintEnd+1));
+      pCodes->push_back(std::string(I, ConstraintEnd+1));
       I = ConstraintEnd+1;
     } else if (isdigit(*I)) {     // Matching Constraint
       // Maximal munch numbers.
       StringRef::iterator NumStart = I;
       while (I != E && isdigit(*I))
         ++I;
-      Codes.push_back(std::string(NumStart, I));
-      unsigned N = atoi(Codes.back().c_str());
+      pCodes->push_back(std::string(NumStart, I));
+      unsigned N = atoi(pCodes->back().c_str());
       // Check that this is a valid matching constraint!
       if (N >= ConstraintsSoFar.size() || ConstraintsSoFar[N].Type != isOutput||
           Type != isInput)
@@ -136,14 +163,26 @@
       
       // If Operand N already has a matching input, reject this.  An output
       // can't be constrained to the same value as multiple inputs.
-      if (ConstraintsSoFar[N].hasMatchingInput())
-        return true;
-      
-      // Note that operand #n has a matching input.
-      ConstraintsSoFar[N].MatchingInput = ConstraintsSoFar.size();
+      if (isMultipleAlternative) {
+        InlineAsm::SubConstraintInfo &scInfo =
+          ConstraintsSoFar[N].multipleAlternatives[multipleAlternativeIndex];
+        if (scInfo.MatchingInput != -1)
+          return true;
+        // Note that operand #n has a matching input.
+        scInfo.MatchingInput = ConstraintsSoFar.size();
+      } else {
+        if (ConstraintsSoFar[N].hasMatchingInput())
+          return true;
+        // Note that operand #n has a matching input.
+        ConstraintsSoFar[N].MatchingInput = ConstraintsSoFar.size();
+        }
+    } else if (*I == '|') {
+      multipleAlternativeIndex++;
+      pCodes = &multipleAlternatives[multipleAlternativeIndex].Codes;
+      ++I;
     } else {
       // Single letter constraint.
-      Codes.push_back(std::string(I, I+1));
+      pCodes->push_back(std::string(I, I+1));
       ++I;
     }
   }
@@ -151,6 +190,18 @@
   return false;
 }
 
+/// selectAlternative - Point this constraint to the alternative constraint
+/// indicated by the index.
+void InlineAsm::ConstraintInfo::selectAlternative(unsigned index) {
+  if (index < multipleAlternatives.size()) {
+    currentAlternativeIndex = index;
+    InlineAsm::SubConstraintInfo &scInfo =
+      multipleAlternatives[currentAlternativeIndex];
+    MatchingInput = scInfo.MatchingInput;
+    Codes = scInfo.Codes;
+  }
+}
+
 std::vector<InlineAsm::ConstraintInfo>
 InlineAsm::ParseConstraints(StringRef Constraints) {
   std::vector<ConstraintInfo> Result;
@@ -183,7 +234,6 @@
   return Result;
 }
 
-
 /// Verify - Verify that the specified constraint string is reasonable for the
 /// specified function type, and otherwise validate the constraint string.
 bool InlineAsm::Verify(const FunctionType *Ty, StringRef ConstStr) {

Modified: llvm/branches/wendling/eh/lib/VMCore/Instructions.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/VMCore/Instructions.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/VMCore/Instructions.cpp (original)
+++ llvm/branches/wendling/eh/lib/VMCore/Instructions.cpp Tue Oct 26 19:48:03 2010
@@ -33,7 +33,7 @@
 User::op_iterator CallSite::getCallee() const {
   Instruction *II(getInstruction());
   return isCall()
-    ? cast</*FIXME: CallInst*/User>(II)->op_end() - 1 // Skip Callee
+    ? cast<CallInst>(II)->op_end() - 1 // Skip Callee
     : cast<InvokeInst>(II)->op_end() - 4; // Skip PersFn, BB, BB, Callee
 }
 
@@ -928,7 +928,7 @@
 
 bool AllocaInst::isArrayAllocation() const {
   if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0)))
-    return CI->getZExtValue() != 1;
+    return !CI->isOne();
   return true;
 }
 
@@ -1459,9 +1459,24 @@
     return false;
   
   const VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType());
-  if (!isa<Constant>(Mask) || MaskTy == 0 ||
-      !MaskTy->getElementType()->isIntegerTy(32))
+  if (MaskTy == 0 || !MaskTy->getElementType()->isIntegerTy(32))
     return false;
+
+  // Check to see if Mask is valid.
+  if (const ConstantVector *MV = dyn_cast<ConstantVector>(Mask)) {
+    const VectorType *VTy = cast<VectorType>(V1->getType());
+    for (unsigned i = 0, e = MV->getNumOperands(); i != e; ++i) {
+      if (ConstantInt* CI = dyn_cast<ConstantInt>(MV->getOperand(i))) {
+        if (CI->uge(VTy->getNumElements()*2))
+          return false;
+      } else if (!isa<UndefValue>(MV->getOperand(i))) {
+        return false;
+      }
+    }
+  }
+  else if (!isa<UndefValue>(Mask) && !isa<ConstantAggregateZero>(Mask))
+    return false;
+  
   return true;
 }
 
@@ -2374,6 +2389,8 @@
     } else {                                    // Casting from something else
       return false;
     }
+  } else if (DestTy->isX86_MMXTy()) {     
+    return SrcBits == 64;
   } else {                                      // Casting to something else
     return false;
   }
@@ -2455,6 +2472,10 @@
       return BitCast;                             // vector -> vector
     } else if (DestPTy->getBitWidth() == SrcBits) {
       return BitCast;                               // float/int -> vector
+    } else if (SrcTy->isX86_MMXTy()) {
+      assert(DestPTy->getBitWidth()==64 &&
+             "Casting X86_MMX to vector of wrong width");
+      return BitCast;                             // MMX to 64-bit vector
     } else {
       assert(!"Illegal cast to vector (wrong type or size)");
     }
@@ -2466,6 +2487,14 @@
     } else {
       assert(!"Casting pointer to other than pointer or int");
     }
+  } else if (DestTy->isX86_MMXTy()) {
+    if (isa<VectorType>(SrcTy)) {
+      assert(cast<VectorType>(SrcTy)->getBitWidth() == 64 &&
+             "Casting vector of wrong width to X86_MMX");
+      return BitCast;                               // 64-bit vector to MMX
+    } else {
+      assert(!"Illegal cast to X86_MMX");
+    }
   } else {
     assert(!"Casting to type that is not first-class");
   }

Modified: llvm/branches/wendling/eh/lib/VMCore/LLVMContext.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/VMCore/LLVMContext.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/VMCore/LLVMContext.cpp (original)
+++ llvm/branches/wendling/eh/lib/VMCore/LLVMContext.cpp Tue Oct 26 19:48:03 2010
@@ -28,12 +28,27 @@
 }
 
 LLVMContext::LLVMContext() : pImpl(new LLVMContextImpl(*this)) {
-  // Create the first metadata kind, which is always 'dbg'.
+  // Create the fixed metadata kinds. This is done in the same order as the
+  // MD_* enum values so that they correspond.
+
+  // Create the 'dbg' metadata kind. 
   unsigned DbgID = getMDKindID("dbg");
   assert(DbgID == MD_dbg && "dbg kind id drifted"); (void)DbgID;
+
+  // Create the 'tbaa' metadata kind.
+  unsigned TBAAID = getMDKindID("tbaa");
+  assert(TBAAID == MD_tbaa && "tbaa kind id drifted"); (void)TBAAID;
 }
 LLVMContext::~LLVMContext() { delete pImpl; }
 
+void LLVMContext::addModule(Module *M) {
+  pImpl->OwnedModules.insert(M);
+}
+
+void LLVMContext::removeModule(Module *M) {
+  pImpl->OwnedModules.erase(M);
+}
+
 //===----------------------------------------------------------------------===//
 // Recoverable Backend Errors
 //===----------------------------------------------------------------------===//

Modified: llvm/branches/wendling/eh/lib/VMCore/LLVMContextImpl.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/VMCore/LLVMContextImpl.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/VMCore/LLVMContextImpl.cpp (original)
+++ llvm/branches/wendling/eh/lib/VMCore/LLVMContextImpl.cpp Tue Oct 26 19:48:03 2010
@@ -12,6 +12,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "LLVMContextImpl.h"
+#include "llvm/Module.h"
 #include <algorithm>
 using namespace llvm;
 
@@ -25,6 +26,7 @@
     X86_FP80Ty(C, Type::X86_FP80TyID),
     FP128Ty(C, Type::FP128TyID),
     PPC_FP128Ty(C, Type::PPC_FP128TyID),
+    X86_MMXTy(C, Type::X86_MMXTyID),
     Int1Ty(C, 1),
     Int8Ty(C, 8),
     Int16Ty(C, 16),
@@ -51,20 +53,26 @@
 }
 
 LLVMContextImpl::~LLVMContextImpl() {
+  // NOTE: We need to delete the contents of OwnedModules, but we have to
+  // duplicate it into a temporary vector, because the destructor of Module
+  // will try to remove itself from OwnedModules set.  This would cause
+  // iterator invalidation if we iterated on the set directly.
+  std::vector<Module*> Modules(OwnedModules.begin(), OwnedModules.end());
+  for (std::vector<Module*>::iterator I = Modules.begin(), E = Modules.end();
+       I != E; ++I)
+    delete *I;
+  
   std::for_each(ExprConstants.map_begin(), ExprConstants.map_end(),
                 DropReferences());
   std::for_each(ArrayConstants.map_begin(), ArrayConstants.map_end(),
                 DropReferences());
   std::for_each(StructConstants.map_begin(), StructConstants.map_end(),
                 DropReferences());
-  std::for_each(UnionConstants.map_begin(), UnionConstants.map_end(),
-                DropReferences());
   std::for_each(VectorConstants.map_begin(), VectorConstants.map_end(),
                 DropReferences());
   ExprConstants.freeConstants();
   ArrayConstants.freeConstants();
   StructConstants.freeConstants();
-  UnionConstants.freeConstants();
   VectorConstants.freeConstants();
   AggZeroConstants.freeConstants();
   NullPtrConstants.freeConstants();
@@ -93,7 +101,7 @@
     MDNodes.push_back(&*I);
   }
   MDNodes.append(NonUniquedMDNodes.begin(), NonUniquedMDNodes.end());
-  for (SmallVector<MDNode*, 8>::iterator I = MDNodes.begin(),
+  for (SmallVectorImpl<MDNode *>::iterator I = MDNodes.begin(),
          E = MDNodes.end(); I != E; ++I) {
     (*I)->destroy();
   }

Modified: llvm/branches/wendling/eh/lib/VMCore/LLVMContextImpl.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/VMCore/LLVMContextImpl.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/VMCore/LLVMContextImpl.h (original)
+++ llvm/branches/wendling/eh/lib/VMCore/LLVMContextImpl.h Tue Oct 26 19:48:03 2010
@@ -115,6 +115,10 @@
   
 class LLVMContextImpl {
 public:
+  /// OwnedModules - The set of modules instantiated in this context, and which
+  /// will be automatically deleted if this context is deleted.
+  SmallPtrSet<Module*, 4> OwnedModules;
+  
   void *InlineAsmDiagHandler, *InlineAsmDiagContext;
   
   typedef DenseMap<DenseMapAPIntKeyInfo::KeyTy, ConstantInt*, 
@@ -144,10 +148,6 @@
     ConstantStruct, true /*largekey*/> StructConstantsTy;
   StructConstantsTy StructConstants;
   
-  typedef ConstantUniqueMap<Constant*, UnionType, ConstantUnion>
-      UnionConstantsTy;
-  UnionConstantsTy UnionConstants;
-  
   typedef ConstantUniqueMap<std::vector<Constant*>, VectorType,
                             ConstantVector> VectorConstantsTy;
   VectorConstantsTy VectorConstants;
@@ -174,6 +174,7 @@
   const Type X86_FP80Ty;
   const Type FP128Ty;
   const Type PPC_FP128Ty;
+  const Type X86_MMXTy;
   const IntegerType Int1Ty;
   const IntegerType Int8Ty;
   const IntegerType Int16Ty;
@@ -192,7 +193,6 @@
   TypeMap<PointerValType, PointerType> PointerTypes;
   TypeMap<FunctionValType, FunctionType> FunctionTypes;
   TypeMap<StructValType, StructType> StructTypes;
-  TypeMap<UnionValType, UnionType> UnionTypes;
   TypeMap<IntegerValType, IntegerType> IntegerTypes;
 
   // Opaque types are not structurally uniqued, so don't use TypeMap.

Modified: llvm/branches/wendling/eh/lib/VMCore/Metadata.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/VMCore/Metadata.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/VMCore/Metadata.cpp (original)
+++ llvm/branches/wendling/eh/lib/VMCore/Metadata.cpp Tue Oct 26 19:48:03 2010
@@ -20,6 +20,7 @@
 #include "llvm/ADT/StringMap.h"
 #include "llvm/ADT/SmallString.h"
 #include "SymbolTableListTraitsImpl.h"
+#include "llvm/Support/LeakDetector.h"
 #include "llvm/Support/ValueHandle.h"
 using namespace llvm;
 
@@ -186,6 +187,21 @@
                           unsigned NumVals, FunctionLocalness FL,
                           bool Insert) {
   LLVMContextImpl *pImpl = Context.pImpl;
+
+  // Add all the operand pointers. Note that we don't have to add the
+  // isFunctionLocal bit because that's implied by the operands.
+  // Note that if the operands are later nulled out, the node will be
+  // removed from the uniquing map.
+  FoldingSetNodeID ID;
+  for (unsigned i = 0; i != NumVals; ++i)
+    ID.AddPointer(Vals[i]);
+
+  void *InsertPoint;
+  MDNode *N = NULL;
+  
+  if ((N = pImpl->MDNodeSet.FindNodeOrInsertPos(ID, InsertPoint)))
+    return N;
+    
   bool isFunctionLocal = false;
   switch (FL) {
   case FL_Unknown:
@@ -206,20 +222,6 @@
     break;
   }
 
-  FoldingSetNodeID ID;
-  for (unsigned i = 0; i != NumVals; ++i)
-    ID.AddPointer(Vals[i]);
-  ID.AddBoolean(isFunctionLocal);
-
-  void *InsertPoint;
-  MDNode *N = NULL;
-  
-  if ((N = pImpl->MDNodeSet.FindNodeOrInsertPos(ID, InsertPoint)))
-    return N;
-    
-  if (!Insert)
-    return NULL;
-    
   // Coallocate space for the node and Operands together, then placement new.
   void *Ptr = malloc(sizeof(MDNode)+NumVals*sizeof(MDNodeOperand));
   N = new (Ptr) MDNode(Context, Vals, NumVals, isFunctionLocal);
@@ -244,15 +246,42 @@
   return getMDNode(Context, Vals, NumVals, FL_Unknown, false);
 }
 
+MDNode *MDNode::getTemporary(LLVMContext &Context, Value *const *Vals,
+                             unsigned NumVals) {
+  MDNode *N = (MDNode *)malloc(sizeof(MDNode)+NumVals*sizeof(MDNodeOperand));
+  N = new (N) MDNode(Context, Vals, NumVals, FL_No);
+  N->setValueSubclassData(N->getSubclassDataFromValue() |
+                          NotUniquedBit);
+  LeakDetector::addGarbageObject(N);
+  return N;
+}
+
+void MDNode::deleteTemporary(MDNode *N) {
+  assert(N->use_empty() && "Temporary MDNode has uses!");
+  assert(!N->getContext().pImpl->MDNodeSet.RemoveNode(N) &&
+         "Deleting a non-temporary uniqued node!");
+  assert(!N->getContext().pImpl->NonUniquedMDNodes.erase(N) &&
+         "Deleting a non-temporary non-uniqued node!");
+  assert((N->getSubclassDataFromValue() & NotUniquedBit) &&
+         "Temporary MDNode does not have NotUniquedBit set!");
+  assert((N->getSubclassDataFromValue() & DestroyFlag) == 0 &&
+         "Temporary MDNode has DestroyFlag set!");
+  LeakDetector::removeGarbageObject(N);
+  N->destroy();
+}
+
 /// getOperand - Return specified operand.
 Value *MDNode::getOperand(unsigned i) const {
   return *getOperandPtr(const_cast<MDNode*>(this), i);
 }
 
 void MDNode::Profile(FoldingSetNodeID &ID) const {
+  // Add all the operand pointers. Note that we don't have to add the
+  // isFunctionLocal bit because that's implied by the operands.
+  // Note that if the operands are later nulled out, the node will be
+  // removed from the uniquing map.
   for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
     ID.AddPointer(getOperand(i));
-  ID.AddBoolean(isFunctionLocal());
 }
 
 void MDNode::setIsNotUniqued() {
@@ -301,7 +330,8 @@
 
   // If we are dropping an argument to null, we choose to not unique the MDNode
   // anymore.  This commonly occurs during destruction, and uniquing these
-  // brings little reuse.
+  // brings little reuse.  Also, this means we don't need to include
+  // isFunctionLocal bits in FoldingSetNodeIDs for MDNodes.
   if (To == 0) {
     setIsNotUniqued();
     return;
@@ -309,21 +339,34 @@
 
   // Now that the node is out of the folding set, get ready to reinsert it.
   // First, check to see if another node with the same operands already exists
-  // in the set.  If it doesn't exist, this returns the position to insert it.
+  // in the set.  If so, then this node is redundant.
   FoldingSetNodeID ID;
   Profile(ID);
   void *InsertPoint;
-  MDNode *N = pImpl->MDNodeSet.FindNodeOrInsertPos(ID, InsertPoint);
-
-  if (N) {
-    N->replaceAllUsesWith(this);
-    N->destroy();
-    N = pImpl->MDNodeSet.FindNodeOrInsertPos(ID, InsertPoint);
-    assert(N == 0 && "shouldn't be in the map now!"); (void)N;
+  if (MDNode *N = pImpl->MDNodeSet.FindNodeOrInsertPos(ID, InsertPoint)) {
+    replaceAllUsesWith(N);
+    destroy();
+    return;
   }
 
   // InsertPoint will have been set by the FindNodeOrInsertPos call.
   pImpl->MDNodeSet.InsertNode(this, InsertPoint);
+
+  // If this MDValue was previously function-local but no longer is, clear
+  // its function-local flag.
+  if (isFunctionLocal() && !isFunctionLocalValue(To)) {
+    bool isStillFunctionLocal = false;
+    for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
+      Value *V = getOperand(i);
+      if (!V) continue;
+      if (isFunctionLocalValue(V)) {
+        isStillFunctionLocal = true;
+        break;
+      }
+    }
+    if (!isStillFunctionLocal)
+      setValueSubclassData(getSubclassDataFromValue() & ~FunctionLocalBit);
+  }
 }
 
 //===----------------------------------------------------------------------===//
@@ -357,6 +400,8 @@
 
 /// addOperand - Add metadata Operand.
 void NamedMDNode::addOperand(MDNode *M) {
+  assert(!M->isFunctionLocal() &&
+         "NamedMDNode operands must not be function-local!");
   getNMDOps(Operands).push_back(TrackingVH<MDNode>(M));
 }
 

Modified: llvm/branches/wendling/eh/lib/VMCore/Module.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/VMCore/Module.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/VMCore/Module.cpp (original)
+++ llvm/branches/wendling/eh/lib/VMCore/Module.cpp Tue Oct 26 19:48:03 2010
@@ -58,13 +58,15 @@
 //
 
 Module::Module(StringRef MID, LLVMContext& C)
-  : Context(C), Materializer(NULL), ModuleID(MID), DataLayout("")  {
+  : Context(C), Materializer(NULL), ModuleID(MID) {
   ValSymTab = new ValueSymbolTable();
   TypeSymTab = new TypeSymbolTable();
   NamedMDSymTab = new StringMap<NamedMDNode *>();
+  Context.addModule(this);
 }
 
 Module::~Module() {
+  Context.removeModule(this);
   dropAllReferences();
   GlobalList.clear();
   FunctionList.clear();

Modified: llvm/branches/wendling/eh/lib/VMCore/Pass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/VMCore/Pass.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/VMCore/Pass.cpp (original)
+++ llvm/branches/wendling/eh/lib/VMCore/Pass.cpp Tue Oct 26 19:48:03 2010
@@ -14,36 +14,18 @@
 //===----------------------------------------------------------------------===//
 
 #include "llvm/Pass.h"
-#include "llvm/PassManager.h"
 #include "llvm/PassRegistry.h"
-#include "llvm/Module.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/StringMap.h"
 #include "llvm/Assembly/PrintModulePass.h"
 #include "llvm/Support/Debug.h"
-#include "llvm/Support/ManagedStatic.h"
 #include "llvm/Support/PassNameParser.h"
 #include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Atomic.h"
-#include "llvm/System/Mutex.h"
-#include "llvm/System/Threading.h"
-#include <algorithm>
-#include <map>
-#include <set>
 using namespace llvm;
 
 //===----------------------------------------------------------------------===//
 // Pass Implementation
 //
 
-Pass::Pass(PassKind K, intptr_t pid) : Resolver(0), PassID(pid), Kind(K) {
-  assert(pid && "pid cannot be 0");
-}
-
-Pass::Pass(PassKind K, const void *pid)
-  : Resolver(0), PassID((intptr_t)pid), Kind(K) {
-  assert(pid && "pid cannot be 0");
-}
+Pass::Pass(PassKind K, char &pid) : Resolver(0), PassID(&pid), Kind(K) { }
 
 // Force out-of-line virtual method.
 Pass::~Pass() { 
@@ -62,8 +44,8 @@
   return PMT_ModulePassManager;
 }
 
-bool Pass::mustPreserveAnalysisID(const PassInfo *AnalysisID) const {
-  return Resolver->getAnalysisIfAvailable(AnalysisID, true) != 0;
+bool Pass::mustPreserveAnalysisID(char &AID) const {
+  return Resolver->getAnalysisIfAvailable(&AID, true) != 0;
 }
 
 // dumpPassStructure - Implement the -debug-passes=Structure option
@@ -76,7 +58,9 @@
 /// Registration templates, but can be overloaded directly.
 ///
 const char *Pass::getPassName() const {
-  if (const PassInfo *PI = getPassInfo())
+  AnalysisID AID =  getPassID();
+  const PassInfo *PI = PassRegistry::getPassRegistry()->getPassInfo(AID);
+  if (PI)
     return PI->getPassName();
   return "Unnamed pass: implement Pass::getPassName()";
 }
@@ -102,7 +86,7 @@
   // By default, don't do anything.
 }
 
-void *Pass::getAdjustedAnalysisPointer(const PassInfo *) {
+void *Pass::getAdjustedAnalysisPointer(AnalysisID AID) {
   return this;
 }
 
@@ -151,30 +135,6 @@
   return createPrintFunctionPass(Banner, &O);
 }
 
-// run - On a module, we run this pass by initializing, runOnFunction'ing once
-// for every function in the module, then by finalizing.
-//
-bool FunctionPass::runOnModule(Module &M) {
-  bool Changed = doInitialization(M);
-
-  for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I)
-    if (!I->isDeclaration())      // Passes are not run on external functions!
-    Changed |= runOnFunction(*I);
-
-  return Changed | doFinalization(M);
-}
-
-// run - On a function, we simply initialize, run the function, then finalize.
-//
-bool FunctionPass::run(Function &F) {
-  // Passes are not run on external functions!
-  if (F.isDeclaration()) return false;
-
-  bool Changed = doInitialization(*F.getParent());
-  Changed |= runOnFunction(F);
-  return Changed | doFinalization(*F.getParent());
-}
-
 bool FunctionPass::doInitialization(Module &) {
   // By default, don't do anything.
   return false;
@@ -200,16 +160,6 @@
   return 0;
 }
 
-// To run this pass on a function, we simply call runOnBasicBlock once for each
-// function.
-//
-bool BasicBlockPass::runOnFunction(Function &F) {
-  bool Changed = doInitialization(F);
-  for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I)
-    Changed |= runOnBasicBlock(*I);
-  return Changed | doFinalization(F);
-}
-
 bool BasicBlockPass::doInitialization(Module &) {
   // By default, don't do anything.
   return false;
@@ -234,13 +184,7 @@
   return PMT_BasicBlockPassManager; 
 }
 
-// getPassInfo - Return the PassInfo data structure that corresponds to this
-// pass...
-const PassInfo *Pass::getPassInfo() const {
-  return lookupPassInfo(PassID);
-}
-
-const PassInfo *Pass::lookupPassInfo(intptr_t TI) {
+const PassInfo *Pass::lookupPassInfo(const void *TI) {
   return PassRegistry::getPassRegistry()->getPassInfo(TI);
 }
 
@@ -262,14 +206,13 @@
 
 // RegisterAGBase implementation
 //
-RegisterAGBase::RegisterAGBase(const char *Name, intptr_t InterfaceID,
-                               intptr_t PassID, bool isDefault)
+RegisterAGBase::RegisterAGBase(const char *Name, const void *InterfaceID,
+                               const void *PassID, bool isDefault)
     : PassInfo(Name, InterfaceID) {
   PassRegistry::getPassRegistry()->registerAnalysisGroup(InterfaceID, PassID,
                                                          *this, isDefault);
 }
 
-
 //===----------------------------------------------------------------------===//
 // PassRegistrationListener implementation
 //
@@ -306,7 +249,7 @@
     
     void passEnumerate(const PassInfo *P) {
       if (P->isCFGOnlyPass())
-        CFGOnlyList.push_back(P);
+        CFGOnlyList.push_back(P->getTypeInfo());
     }
   };
 }
@@ -326,15 +269,25 @@
   GetCFGOnlyPasses(Preserved).enumeratePasses();
 }
 
-AnalysisUsage &AnalysisUsage::addRequiredID(AnalysisID ID) {
-  assert(ID && "Pass class not registered!");
-  Required.push_back(ID);
+AnalysisUsage &AnalysisUsage::addPreserved(StringRef Arg) {
+  const PassInfo *PI = Pass::lookupPassInfo(Arg);
+  // If the pass exists, preserve it. Otherwise silently do nothing.
+  if (PI) Preserved.push_back(PI->getTypeInfo());
   return *this;
 }
 
-AnalysisUsage &AnalysisUsage::addRequiredTransitiveID(AnalysisID ID) {
-  assert(ID && "Pass class not registered!");
+AnalysisUsage &AnalysisUsage::addRequiredID(const void *ID) {
   Required.push_back(ID);
-  RequiredTransitive.push_back(ID);
+  return *this;
+}
+
+AnalysisUsage &AnalysisUsage::addRequiredID(char &ID) {
+  Required.push_back(&ID);
+  return *this;
+}
+
+AnalysisUsage &AnalysisUsage::addRequiredTransitiveID(char &ID) {
+  Required.push_back(&ID);
+  RequiredTransitive.push_back(&ID);
   return *this;
 }

Modified: llvm/branches/wendling/eh/lib/VMCore/PassManager.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/VMCore/PassManager.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/VMCore/PassManager.cpp (original)
+++ llvm/branches/wendling/eh/lib/VMCore/PassManager.cpp Tue Oct 26 19:48:03 2010
@@ -7,12 +7,13 @@
 //
 //===----------------------------------------------------------------------===//
 //
-// This file implements the LLVM Pass Manager infrastructure. 
+// This file implements the LLVM Pass Manager infrastructure.
 //
 //===----------------------------------------------------------------------===//
 
 
 #include "llvm/PassManagers.h"
+#include "llvm/PassManager.h"
 #include "llvm/Assembly/PrintModulePass.h"
 #include "llvm/Assembly/Writer.h"
 #include "llvm/Support/CommandLine.h"
@@ -24,8 +25,6 @@
 #include "llvm/Support/PassNameParser.h"
 #include "llvm/Support/raw_ostream.h"
 #include "llvm/System/Mutex.h"
-#include "llvm/System/Threading.h"
-#include "llvm-c/Core.h"
 #include <algorithm>
 #include <cstdio>
 #include <map>
@@ -82,30 +81,32 @@
 /// This is a helper to determine whether to print IR before or
 /// after a pass.
 
-static bool ShouldPrintBeforeOrAfterPass(Pass *P,
+static bool ShouldPrintBeforeOrAfterPass(const void *PassID,
                                          PassOptionList &PassesToPrint) {
-  for (unsigned i = 0, ie = PassesToPrint.size(); i < ie; ++i) {
-    const llvm::PassInfo *PassInf = PassesToPrint[i];
-    if (PassInf && P->getPassInfo())
-      if (PassInf->getPassArgument() ==
-          P->getPassInfo()->getPassArgument()) {
-        return true;
-      }
+  if (const llvm::PassInfo *PI =
+      PassRegistry::getPassRegistry()->getPassInfo(PassID)) {
+    for (unsigned i = 0, ie = PassesToPrint.size(); i < ie; ++i) {
+      const llvm::PassInfo *PassInf = PassesToPrint[i];
+      if (PassInf)
+        if (PassInf->getPassArgument() == PI->getPassArgument()) {
+          return true;
+        }
+    }
   }
   return false;
 }
-  
+
 
 /// This is a utility to check whether a pass should have IR dumped
 /// before it.
-static bool ShouldPrintBeforePass(Pass *P) {
-  return PrintBeforeAll || ShouldPrintBeforeOrAfterPass(P, PrintBefore);
+static bool ShouldPrintBeforePass(const void *PassID) {
+  return PrintBeforeAll || ShouldPrintBeforeOrAfterPass(PassID, PrintBefore);
 }
 
 /// This is a utility to check whether a pass should have IR dumped
 /// after it.
-static bool ShouldPrintAfterPass(Pass *P) {
-  return PrintAfterAll || ShouldPrintBeforeOrAfterPass(P, PrintAfter);
+static bool ShouldPrintAfterPass(const void *PassID) {
+  return PrintAfterAll || ShouldPrintBeforeOrAfterPass(PassID, PrintAfter);
 }
 
 } // End of llvm namespace
@@ -124,9 +125,9 @@
     OS << "Releasing pass '";
   else
     OS << "Running pass '";
-  
+
   OS << P->getPassName() << "'";
-  
+
   if (M) {
     OS << " on module '" << M->getModuleIdentifier() << "'.\n";
     return;
@@ -162,8 +163,8 @@
 
 public:
   static char ID;
-  explicit BBPassManager(int Depth) 
-    : PMDataManager(Depth), FunctionPass(&ID) {}
+  explicit BBPassManager(int Depth)
+    : PMDataManager(Depth), FunctionPass(ID) {}
 
   /// Execute all of the passes scheduled for execution.  Keep track of
   /// whether any of the passes modifies the function, and if so, return true.
@@ -202,8 +203,8 @@
     return BP;
   }
 
-  virtual PassManagerType getPassManagerType() const { 
-    return PMT_BasicBlockPassManager; 
+  virtual PassManagerType getPassManagerType() const {
+    return PMT_BasicBlockPassManager;
   }
 };
 
@@ -223,9 +224,9 @@
   bool wasRun;
 public:
   static char ID;
-  explicit FunctionPassManagerImpl(int Depth) : 
-    Pass(PT_PassManager, &ID), PMDataManager(Depth), 
-    PMTopLevelManager(TLM_Function), wasRun(false) { }
+  explicit FunctionPassManagerImpl(int Depth) :
+    Pass(PT_PassManager, ID), PMDataManager(Depth),
+    PMTopLevelManager(new FPPassManager(1)), wasRun(false) {}
 
   /// add - Add a pass to the queue of passes to run.  This passes ownership of
   /// the Pass to the PassManager.  When the PassManager is destroyed, the pass
@@ -234,8 +235,8 @@
   void add(Pass *P) {
     schedulePass(P);
   }
- 
-  /// createPrinterPass - Get a function printer pass. 
+
+  /// createPrinterPass - Get a function printer pass.
   Pass *createPrinterPass(raw_ostream &O, const std::string &Banner) const {
     return createPrintFunctionPass(Banner, &O);
   }
@@ -251,12 +252,12 @@
   /// doInitialization - Run all of the initializers for the function passes.
   ///
   bool doInitialization(Module &M);
-  
+
   /// doFinalization - Run all of the finalizers for the function passes.
   ///
   bool doFinalization(Module &M);
 
-                                  
+
   virtual PMDataManager *getAsPMDataManager() { return this; }
   virtual Pass *getAsPass() { return this; }
 
@@ -265,7 +266,7 @@
     Info.setPreservesAll();
   }
 
-  inline void addTopLevelPass(Pass *P) {
+  void addTopLevelPass(Pass *P) {
     if (ImmutablePass *IP = P->getAsImmutablePass()) {
       // P is a immutable pass and it will be managed by this
       // top level manager. Set up analysis resolver to connect them.
@@ -288,6 +289,7 @@
 };
 
 char FunctionPassManagerImpl::ID = 0;
+
 //===----------------------------------------------------------------------===//
 // MPPassManager
 //
@@ -298,11 +300,11 @@
 public:
   static char ID;
   explicit MPPassManager(int Depth) :
-    Pass(PT_PassManager, &ID), PMDataManager(Depth) { }
+    Pass(PT_PassManager, ID), PMDataManager(Depth) { }
 
   // Delete on the fly managers.
   virtual ~MPPassManager() {
-    for (std::map<Pass *, FunctionPassManagerImpl *>::iterator 
+    for (std::map<Pass *, FunctionPassManagerImpl *>::iterator
            I = OnTheFlyManagers.begin(), E = OnTheFlyManagers.end();
          I != E; ++I) {
       FunctionPassManagerImpl *FPP = I->second;
@@ -310,7 +312,7 @@
     }
   }
 
-  /// createPrinterPass - Get a module printer pass. 
+  /// createPrinterPass - Get a module printer pass.
   Pass *createPrinterPass(raw_ostream &O, const std::string &Banner) const {
     return createPrintModulePass(&O, false, Banner);
   }
@@ -329,10 +331,10 @@
   /// through getAnalysis interface.
   virtual void addLowerLevelRequiredPass(Pass *P, Pass *RequiredPass);
 
-  /// Return function pass corresponding to PassInfo PI, that is 
+  /// Return function pass corresponding to PassInfo PI, that is
   /// required by module pass MP. Instantiate analysis pass, by using
   /// its runOnFunction() for function F.
-  virtual Pass* getOnTheFlyPass(Pass *MP, const PassInfo *PI, Function &F);
+  virtual Pass* getOnTheFlyPass(Pass *MP, AnalysisID PI, Function &F);
 
   virtual const char *getPassName() const {
     return "Module Pass Manager";
@@ -360,8 +362,8 @@
     return static_cast<ModulePass *>(PassVector[N]);
   }
 
-  virtual PassManagerType getPassManagerType() const { 
-    return PMT_ModulePassManager; 
+  virtual PassManagerType getPassManagerType() const {
+    return PMT_ModulePassManager;
   }
 
  private:
@@ -383,8 +385,8 @@
 public:
   static char ID;
   explicit PassManagerImpl(int Depth) :
-    Pass(PT_PassManager, &ID), PMDataManager(Depth),
-                               PMTopLevelManager(TLM_Pass) { }
+    Pass(PT_PassManager, ID), PMDataManager(Depth),
+                              PMTopLevelManager(new MPPassManager(1)) {}
 
   /// add - Add a pass to the queue of passes to run.  This passes ownership of
   /// the Pass to the PassManager.  When the PassManager is destroyed, the pass
@@ -393,8 +395,8 @@
   void add(Pass *P) {
     schedulePass(P);
   }
- 
-  /// createPrinterPass - Get a module printer pass. 
+
+  /// createPrinterPass - Get a module printer pass.
   Pass *createPrinterPass(raw_ostream &O, const std::string &Banner) const {
     return createPrintModulePass(&O, false, Banner);
   }
@@ -408,7 +410,7 @@
     Info.setPreservesAll();
   }
 
-  inline void addTopLevelPass(Pass *P) {
+  void addTopLevelPass(Pass *P) {
     if (ImmutablePass *IP = P->getAsImmutablePass()) {
       // P is a immutable pass and it will be managed by this
       // top level manager. Set up analysis resolver to connect them.
@@ -451,7 +453,7 @@
 public:
   // Use 'create' member to get this.
   TimingInfo() : TG("... Pass execution timing report ...") {}
-  
+
   // TimingDtor - Print out information about timing information
   ~TimingInfo() {
     // Delete all of the timers, which accumulate their info into the
@@ -469,7 +471,7 @@
 
   /// getPassTimer - Return the timer for the specified pass if it exists.
   Timer *getPassTimer(Pass *P) {
-    if (P->getAsPMDataManager()) 
+    if (P->getAsPMDataManager())
       return 0;
 
     sys::SmartScopedLock<true> Lock(*TimingInfoMutex);
@@ -488,28 +490,21 @@
 // PMTopLevelManager implementation
 
 /// Initialize top level manager. Create first pass manager.
-PMTopLevelManager::PMTopLevelManager(enum TopLevelManagerType t) {
-  if (t == TLM_Pass) {
-    MPPassManager *MPP = new MPPassManager(1);
-    MPP->setTopLevelManager(this);
-    addPassManager(MPP);
-    activeStack.push(MPP);
-  } else if (t == TLM_Function) {
-    FPPassManager *FPP = new FPPassManager(1);
-    FPP->setTopLevelManager(this);
-    addPassManager(FPP);
-    activeStack.push(FPP);
-  } 
+PMTopLevelManager::PMTopLevelManager(PMDataManager *PMDM) {
+  PMDM->setTopLevelManager(this);
+  addPassManager(PMDM);
+  activeStack.push(PMDM);
 }
 
 /// Set pass P as the last user of the given analysis passes.
-void PMTopLevelManager::setLastUser(SmallVector<Pass *, 12> &AnalysisPasses, 
-                                    Pass *P) {
-  for (SmallVector<Pass *, 12>::iterator I = AnalysisPasses.begin(),
+void
+PMTopLevelManager::setLastUser(const SmallVectorImpl<Pass *> &AnalysisPasses,
+                               Pass *P) {
+  for (SmallVectorImpl<Pass *>::const_iterator I = AnalysisPasses.begin(),
          E = AnalysisPasses.end(); I != E; ++I) {
     Pass *AP = *I;
     LastUser[AP] = P;
-    
+
     if (P == AP)
       continue;
 
@@ -526,9 +521,9 @@
 }
 
 /// Collect passes whose last user is P
-void PMTopLevelManager::collectLastUses(SmallVector<Pass *, 12> &LastUses,
+void PMTopLevelManager::collectLastUses(SmallVectorImpl<Pass *> &LastUses,
                                         Pass *P) {
-  DenseMap<Pass *, SmallPtrSet<Pass *, 8> >::iterator DMI = 
+  DenseMap<Pass *, SmallPtrSet<Pass *, 8> >::iterator DMI =
     InversedLastUser.find(P);
   if (DMI == InversedLastUser.end())
     return;
@@ -544,7 +539,7 @@
 AnalysisUsage *PMTopLevelManager::findAnalysisUsage(Pass *P) {
   AnalysisUsage *AnUsage = NULL;
   DenseMap<Pass *, AnalysisUsage *>::iterator DMI = AnUsageMap.find(P);
-  if (DMI != AnUsageMap.end()) 
+  if (DMI != AnUsageMap.end())
     AnUsage = DMI->second;
   else {
     AnUsage = new AnalysisUsage();
@@ -568,8 +563,9 @@
   // If P is an analysis pass and it is available then do not
   // generate the analysis again. Stale analysis info should not be
   // available at this point.
-  if (P->getPassInfo() &&
-      P->getPassInfo()->isAnalysis() && findAnalysisPass(P->getPassInfo())) {
+  const PassInfo *PI =
+    PassRegistry::getPassRegistry()->getPassInfo(P->getPassID());
+  if (PI && PI->isAnalysis() && findAnalysisPass(P->getPassID())) {
     delete P;
     return;
   }
@@ -579,14 +575,15 @@
   bool checkAnalysis = true;
   while (checkAnalysis) {
     checkAnalysis = false;
-  
+
     const AnalysisUsage::VectorType &RequiredSet = AnUsage->getRequiredSet();
     for (AnalysisUsage::VectorType::const_iterator I = RequiredSet.begin(),
            E = RequiredSet.end(); I != E; ++I) {
-      
+
       Pass *AnalysisPass = findAnalysisPass(*I);
       if (!AnalysisPass) {
-        AnalysisPass = (*I)->createPass();
+        const PassInfo *PI = PassRegistry::getPassRegistry()->getPassInfo(*I);
+        AnalysisPass = PI->createPass();
         if (P->getPotentialPassManagerType () ==
             AnalysisPass->getPotentialPassManagerType())
           // Schedule analysis pass that is managed by the same pass manager.
@@ -595,12 +592,12 @@
                  AnalysisPass->getPotentialPassManagerType()) {
           // Schedule analysis pass that is managed by a new manager.
           schedulePass(AnalysisPass);
-          // Recheck analysis passes to ensure that required analysises that
+          // Recheck analysis passes to ensure that required analyses that
           // are already checked are still available.
           checkAnalysis = true;
         }
         else
-          // Do not schedule this analysis. Lower level analsyis 
+          // Do not schedule this analysis. Lower level analsyis
           // passes are run on the fly.
           delete AnalysisPass;
       }
@@ -616,36 +613,40 @@
 /// then return NULL.
 Pass *PMTopLevelManager::findAnalysisPass(AnalysisID AID) {
 
-  Pass *P = NULL;
   // Check pass managers
-  for (SmallVector<PMDataManager *, 8>::iterator I = PassManagers.begin(),
-         E = PassManagers.end(); P == NULL && I != E; ++I) {
-    PMDataManager *PMD = *I;
-    P = PMD->findAnalysisPass(AID, false);
-  }
+  for (SmallVectorImpl<PMDataManager *>::iterator I = PassManagers.begin(),
+         E = PassManagers.end(); I != E; ++I)
+    if (Pass *P = (*I)->findAnalysisPass(AID, false))
+      return P;
 
   // Check other pass managers
-  for (SmallVector<PMDataManager *, 8>::iterator
+  for (SmallVectorImpl<PMDataManager *>::iterator
          I = IndirectPassManagers.begin(),
-         E = IndirectPassManagers.end(); P == NULL && I != E; ++I)
-    P = (*I)->findAnalysisPass(AID, false);
+         E = IndirectPassManagers.end(); I != E; ++I)
+    if (Pass *P = (*I)->findAnalysisPass(AID, false))
+      return P;
 
-  for (SmallVector<ImmutablePass *, 8>::iterator I = ImmutablePasses.begin(),
-         E = ImmutablePasses.end(); P == NULL && I != E; ++I) {
-    const PassInfo *PI = (*I)->getPassInfo();
+  // Check the immutable passes. Iterate in reverse order so that we find
+  // the most recently registered passes first.
+  for (SmallVector<ImmutablePass *, 8>::reverse_iterator I =
+       ImmutablePasses.rbegin(), E = ImmutablePasses.rend(); I != E; ++I) {
+    AnalysisID PI = (*I)->getPassID();
     if (PI == AID)
-      P = *I;
+      return *I;
 
     // If Pass not found then check the interfaces implemented by Immutable Pass
-    if (!P) {
-      const std::vector<const PassInfo*> &ImmPI =
-        PI->getInterfacesImplemented();
-      if (std::find(ImmPI.begin(), ImmPI.end(), AID) != ImmPI.end())
-        P = *I;
+    const PassInfo *PassInf =
+      PassRegistry::getPassRegistry()->getPassInfo(PI);
+    const std::vector<const PassInfo*> &ImmPI =
+      PassInf->getInterfacesImplemented();
+    for (std::vector<const PassInfo*>::const_iterator II = ImmPI.begin(),
+         EE = ImmPI.end(); II != EE; ++II) {
+      if ((*II)->getTypeInfo() == AID)
+        return *I;
     }
   }
 
-  return P;
+  return 0;
 }
 
 // Print passes managed by this top level manager.
@@ -658,7 +659,7 @@
   for (unsigned i = 0, e = ImmutablePasses.size(); i != e; ++i) {
     ImmutablePasses[i]->dumpPassStructure(0);
   }
-  
+
   // Every class that derives from PMDataManager also derives from Pass
   // (sometimes indirectly), but there's no inheritance relationship
   // between PMDataManager and Pass, so we have to getAsPass to get
@@ -681,18 +682,19 @@
 }
 
 void PMTopLevelManager::initializeAllAnalysisInfo() {
-  for (SmallVector<PMDataManager *, 8>::iterator I = PassManagers.begin(),
+  for (SmallVectorImpl<PMDataManager *>::iterator I = PassManagers.begin(),
          E = PassManagers.end(); I != E; ++I)
     (*I)->initializeAnalysisInfo();
-  
+
   // Initailize other pass managers
-  for (SmallVector<PMDataManager *, 8>::iterator I = IndirectPassManagers.begin(),
-         E = IndirectPassManagers.end(); I != E; ++I)
+  for (SmallVectorImpl<PMDataManager *>::iterator
+       I = IndirectPassManagers.begin(), E = IndirectPassManagers.end();
+       I != E; ++I)
     (*I)->initializeAnalysisInfo();
 
   for (DenseMap<Pass *, Pass *>::iterator DMI = LastUser.begin(),
         DME = LastUser.end(); DMI != DME; ++DMI) {
-    DenseMap<Pass *, SmallPtrSet<Pass *, 8> >::iterator InvDMI = 
+    DenseMap<Pass *, SmallPtrSet<Pass *, 8> >::iterator InvDMI =
       InversedLastUser.find(DMI->second);
     if (InvDMI != InversedLastUser.end()) {
       SmallPtrSet<Pass *, 8> &L = InvDMI->second;
@@ -706,11 +708,11 @@
 
 /// Destructor
 PMTopLevelManager::~PMTopLevelManager() {
-  for (SmallVector<PMDataManager *, 8>::iterator I = PassManagers.begin(),
+  for (SmallVectorImpl<PMDataManager *>::iterator I = PassManagers.begin(),
          E = PassManagers.end(); I != E; ++I)
     delete *I;
-  
-  for (SmallVector<ImmutablePass *, 8>::iterator
+
+  for (SmallVectorImpl<ImmutablePass *>::iterator
          I = ImmutablePasses.begin(), E = ImmutablePasses.end(); I != E; ++I)
     delete *I;
 
@@ -724,16 +726,19 @@
 
 /// Augement AvailableAnalysis by adding analysis made available by pass P.
 void PMDataManager::recordAvailableAnalysis(Pass *P) {
-  const PassInfo *PI = P->getPassInfo();
-  if (PI == 0) return;
-  
+  AnalysisID PI = P->getPassID();
+
   AvailableAnalysis[PI] = P;
 
-  //This pass is the current implementation of all of the interfaces it
-  //implements as well.
-  const std::vector<const PassInfo*> &II = PI->getInterfacesImplemented();
+  assert(!AvailableAnalysis.empty());
+
+  // This pass is the current implementation of all of the interfaces it
+  // implements as well.
+  const PassInfo *PInf = PassRegistry::getPassRegistry()->getPassInfo(PI);
+  if (PInf == 0) return;
+  const std::vector<const PassInfo*> &II = PInf->getInterfacesImplemented();
   for (unsigned i = 0, e = II.size(); i != e; ++i)
-    AvailableAnalysis[II[i]] = P;
+    AvailableAnalysis[II[i]->getTypeInfo()] = P;
 }
 
 // Return true if P preserves high level analysis used by other
@@ -742,18 +747,18 @@
   AnalysisUsage *AnUsage = TPM->findAnalysisUsage(P);
   if (AnUsage->getPreservesAll())
     return true;
-  
+
   const AnalysisUsage::VectorType &PreservedSet = AnUsage->getPreservedSet();
-  for (SmallVector<Pass *, 8>::iterator I = HigherLevelAnalysis.begin(),
+  for (SmallVectorImpl<Pass *>::iterator I = HigherLevelAnalysis.begin(),
          E = HigherLevelAnalysis.end(); I  != E; ++I) {
     Pass *P1 = *I;
     if (P1->getAsImmutablePass() == 0 &&
         std::find(PreservedSet.begin(), PreservedSet.end(),
-                  P1->getPassInfo()) == 
+                  P1->getPassID()) ==
            PreservedSet.end())
       return false;
   }
-  
+
   return true;
 }
 
@@ -788,7 +793,7 @@
          E = AvailableAnalysis.end(); I != E; ) {
     std::map<AnalysisID, Pass*>::iterator Info = I++;
     if (Info->second->getAsImmutablePass() == 0 &&
-        std::find(PreservedSet.begin(), PreservedSet.end(), Info->first) == 
+        std::find(PreservedSet.begin(), PreservedSet.end(), Info->first) ==
         PreservedSet.end()) {
       // Remove this analysis
       if (PassDebugging >= Details) {
@@ -807,12 +812,12 @@
     if (!InheritedAnalysis[Index])
       continue;
 
-    for (std::map<AnalysisID, Pass*>::iterator 
+    for (std::map<AnalysisID, Pass*>::iterator
            I = InheritedAnalysis[Index]->begin(),
            E = InheritedAnalysis[Index]->end(); I != E; ) {
       std::map<AnalysisID, Pass *>::iterator Info = I++;
       if (Info->second->getAsImmutablePass() == 0 &&
-          std::find(PreservedSet.begin(), PreservedSet.end(), Info->first) == 
+          std::find(PreservedSet.begin(), PreservedSet.end(), Info->first) ==
              PreservedSet.end()) {
         // Remove this analysis
         if (PassDebugging >= Details) {
@@ -844,7 +849,7 @@
     dbgs() << " Free these instances\n";
   }
 
-  for (SmallVector<Pass *, 12>::iterator I = DeadPasses.begin(),
+  for (SmallVectorImpl<Pass *>::iterator I = DeadPasses.begin(),
          E = DeadPasses.end(); I != E; ++I)
     freePass(*I, Msg, DBG_STR);
 }
@@ -861,23 +866,24 @@
     P->releaseMemory();
   }
 
-  if (const PassInfo *PI = P->getPassInfo()) {
+  AnalysisID PI = P->getPassID();
+  if (const PassInfo *PInf = PassRegistry::getPassRegistry()->getPassInfo(PI)) {
     // Remove the pass itself (if it is not already removed).
     AvailableAnalysis.erase(PI);
 
     // Remove all interfaces this pass implements, for which it is also
     // listed as the available implementation.
-    const std::vector<const PassInfo*> &II = PI->getInterfacesImplemented();
+    const std::vector<const PassInfo*> &II = PInf->getInterfacesImplemented();
     for (unsigned i = 0, e = II.size(); i != e; ++i) {
       std::map<AnalysisID, Pass*>::iterator Pos =
-        AvailableAnalysis.find(II[i]);
+        AvailableAnalysis.find(II[i]->getTypeInfo());
       if (Pos != AvailableAnalysis.end() && Pos->second == P)
         AvailableAnalysis.erase(Pos);
     }
   }
 }
 
-/// Add pass P into the PassVector. Update 
+/// Add pass P into the PassVector. Update
 /// AvailableAnalysis appropriately if ProcessAnalysis is true.
 void PMDataManager::add(Pass *P, bool ProcessAnalysis) {
   // This manager is going to manage pass P. Set up analysis resolver
@@ -902,9 +908,9 @@
 
   unsigned PDepth = this->getDepth();
 
-  collectRequiredAnalysis(RequiredPasses, 
+  collectRequiredAnalysis(RequiredPasses,
                           ReqAnalysisNotAvailable, P);
-  for (SmallVector<Pass *, 8>::iterator I = RequiredPasses.begin(),
+  for (SmallVectorImpl<Pass *>::iterator I = RequiredPasses.begin(),
          E = RequiredPasses.end(); I != E; ++I) {
     Pass *PRequired = *I;
     unsigned RDepth = 0;
@@ -920,7 +926,7 @@
       TransferLastUses.push_back(PRequired);
       // Keep track of higher level analysis used by this manager.
       HigherLevelAnalysis.push_back(PRequired);
-    } else 
+    } else
       llvm_unreachable("Unable to accomodate Required Pass");
   }
 
@@ -937,11 +943,12 @@
     TransferLastUses.clear();
   }
 
-  // Now, take care of required analysises that are not available.
-  for (SmallVector<AnalysisID, 8>::iterator 
-         I = ReqAnalysisNotAvailable.begin(), 
+  // Now, take care of required analyses that are not available.
+  for (SmallVectorImpl<AnalysisID>::iterator
+         I = ReqAnalysisNotAvailable.begin(),
          E = ReqAnalysisNotAvailable.end() ;I != E; ++I) {
-    Pass *AnalysisPass = (*I)->createPass();
+    const PassInfo *PI = PassRegistry::getPassRegistry()->getPassInfo(*I);
+    Pass *AnalysisPass = PI->createPass();
     this->addLowerLevelRequiredPass(P, AnalysisPass);
   }
 
@@ -958,15 +965,15 @@
 /// Populate RP with analysis pass that are required by
 /// pass P and are available. Populate RP_NotAvail with analysis
 /// pass that are required by pass P but are not available.
-void PMDataManager::collectRequiredAnalysis(SmallVector<Pass *, 8>&RP,
-                                       SmallVector<AnalysisID, 8> &RP_NotAvail,
+void PMDataManager::collectRequiredAnalysis(SmallVectorImpl<Pass *> &RP,
+                                       SmallVectorImpl<AnalysisID> &RP_NotAvail,
                                             Pass *P) {
   AnalysisUsage *AnUsage = TPM->findAnalysisUsage(P);
   const AnalysisUsage::VectorType &RequiredSet = AnUsage->getRequiredSet();
-  for (AnalysisUsage::VectorType::const_iterator 
+  for (AnalysisUsage::VectorType::const_iterator
          I = RequiredSet.begin(), E = RequiredSet.end(); I != E; ++I) {
     if (Pass *AnalysisPass = findAnalysisPass(*I, true))
-      RP.push_back(AnalysisPass);   
+      RP.push_back(AnalysisPass);
     else
       RP_NotAvail.push_back(*I);
   }
@@ -975,7 +982,7 @@
   for (AnalysisUsage::VectorType::const_iterator I = IDs.begin(),
          E = IDs.end(); I != E; ++I) {
     if (Pass *AnalysisPass = findAnalysisPass(*I, true))
-      RP.push_back(AnalysisPass);   
+      RP.push_back(AnalysisPass);
     else
       RP_NotAvail.push_back(*I);
   }
@@ -1016,7 +1023,7 @@
   // Search Parents through TopLevelManager
   if (SearchParent)
     return TPM->findAnalysisPass(AID);
-  
+
   return NULL;
 }
 
@@ -1030,8 +1037,8 @@
     return;
 
   TPM->collectLastUses(LUses, P);
-  
-  for (SmallVector<Pass *, 12>::iterator I = LUses.begin(),
+
+  for (SmallVectorImpl<Pass *>::iterator I = LUses.begin(),
          E = LUses.end(); I != E; ++I) {
     llvm::dbgs() << "--" << std::string(Offset*2, ' ');
     (*I)->dumpPassStructure(0);
@@ -1039,12 +1046,13 @@
 }
 
 void PMDataManager::dumpPassArguments() const {
-  for (SmallVector<Pass *, 8>::const_iterator I = PassVector.begin(),
+  for (SmallVectorImpl<Pass *>::const_iterator I = PassVector.begin(),
         E = PassVector.end(); I != E; ++I) {
     if (PMDataManager *PMD = (*I)->getAsPMDataManager())
       PMD->dumpPassArguments();
     else
-      if (const PassInfo *PI = (*I)->getPassInfo())
+      if (const PassInfo *PI =
+            PassRegistry::getPassRegistry()->getPassInfo((*I)->getPassID()))
         if (!PI->isAnalysisGroup())
           dbgs() << " -" << PI->getPassArgument();
   }
@@ -1079,6 +1087,9 @@
   case ON_MODULE_MSG:
     dbgs() << "' on Module '"  << Msg << "'...\n";
     break;
+  case ON_REGION_MSG:
+    dbgs() << "' on Region '"  << Msg << "'...\n";
+    break;
   case ON_LOOP_MSG:
     dbgs() << "' on Loop '" << Msg << "'...\n";
     break;
@@ -1093,7 +1104,7 @@
 void PMDataManager::dumpRequiredSet(const Pass *P) const {
   if (PassDebugging < Details)
     return;
-    
+
   AnalysisUsage analysisUsage;
   P->getAnalysisUsage(analysisUsage);
   dumpAnalysisUsage("Required", P, analysisUsage.getRequiredSet());
@@ -1102,7 +1113,7 @@
 void PMDataManager::dumpPreservedSet(const Pass *P) const {
   if (PassDebugging < Details)
     return;
-    
+
   AnalysisUsage analysisUsage;
   P->getAnalysisUsage(analysisUsage);
   dumpAnalysisUsage("Preserved", P, analysisUsage.getPreservedSet());
@@ -1116,7 +1127,8 @@
   dbgs() << (void*)P << std::string(getDepth()*2+3, ' ') << Msg << " Analyses:";
   for (unsigned i = 0; i != Set.size(); ++i) {
     if (i) dbgs() << ',';
-    dbgs() << ' ' << Set[i]->getPassName();
+    const PassInfo *PInf = PassRegistry::getPassRegistry()->getPassInfo(Set[i]);
+    dbgs() << ' ' << PInf->getPassName();
   }
   dbgs() << '\n';
 }
@@ -1131,14 +1143,14 @@
     TPM->dumpPasses();
   }
 
-  // Module Level pass may required Function Level analysis info 
-  // (e.g. dominator info). Pass manager uses on the fly function pass manager 
-  // to provide this on demand. In that case, in Pass manager terminology, 
+  // Module Level pass may required Function Level analysis info
+  // (e.g. dominator info). Pass manager uses on the fly function pass manager
+  // to provide this on demand. In that case, in Pass manager terminology,
   // module level pass is requiring lower level analysis info managed by
   // lower level pass manager.
 
   // When Pass manager is not able to order required analysis info, Pass manager
-  // checks whether any lower level manager will be able to provide this 
+  // checks whether any lower level manager will be able to provide this
   // analysis info on demand or not.
 #ifndef NDEBUG
   dbgs() << "Unable to schedule '" << RequiredPass->getPassName();
@@ -1147,14 +1159,14 @@
   llvm_unreachable("Unable to schedule pass");
 }
 
-Pass *PMDataManager::getOnTheFlyPass(Pass *P, const PassInfo *PI, Function &F) {
+Pass *PMDataManager::getOnTheFlyPass(Pass *P, AnalysisID PI, Function &F) {
   assert(0 && "Unable to find on the fly pass");
   return NULL;
 }
 
 // Destructor
 PMDataManager::~PMDataManager() {
-  for (SmallVector<Pass *, 8>::iterator I = PassVector.begin(),
+  for (SmallVectorImpl<Pass *>::iterator I = PassVector.begin(),
          E = PassVector.end(); I != E; ++I)
     delete *I;
 }
@@ -1166,7 +1178,7 @@
   return PM.findAnalysisPass(ID, dir);
 }
 
-Pass *AnalysisResolver::findImplPass(Pass *P, const PassInfo *AnalysisPI, 
+Pass *AnalysisResolver::findImplPass(Pass *P, AnalysisID AnalysisPI,
                                      Function &F) {
   return PM.getOnTheFlyPass(P, AnalysisPI, F);
 }
@@ -1174,8 +1186,8 @@
 //===----------------------------------------------------------------------===//
 // BBPassManager implementation
 
-/// Execute all of the passes scheduled for execution by invoking 
-/// runOnBasicBlock method.  Keep track of whether any of the passes modifies 
+/// Execute all of the passes scheduled for execution by invoking
+/// runOnBasicBlock method.  Keep track of whether any of the passes modifies
 /// the function, and if so, return true.
 bool BBPassManager::runOnFunction(Function &F) {
   if (F.isDeclaration())
@@ -1202,7 +1214,7 @@
       }
 
       Changed |= LocalChanged;
-      if (LocalChanged) 
+      if (LocalChanged)
         dumpPassInfo(BP, MODIFICATION_MSG, ON_BASICBLOCK_MSG,
                      I->getName());
       dumpPreservedSet(BP);
@@ -1286,17 +1298,18 @@
 /// PassManager_X is destroyed, the pass will be destroyed as well, so
 /// there is no need to delete the pass. (TODO delete passes.)
 /// This implies that all passes MUST be allocated with 'new'.
-void FunctionPassManager::add(Pass *P) { 
+void FunctionPassManager::add(Pass *P) {
   // If this is a not a function pass, don't add a printer for it.
+  const void *PassID = P->getPassID();
   if (P->getPassKind() == PT_Function)
-    if (ShouldPrintBeforePass(P))
+    if (ShouldPrintBeforePass(PassID))
       addImpl(P->createPrinterPass(dbgs(), std::string("*** IR Dump Before ")
                                    + P->getPassName() + " ***"));
 
   addImpl(P);
 
   if (P->getPassKind() == PT_Function)
-    if (ShouldPrintAfterPass(P))
+    if (ShouldPrintAfterPass(PassID))
       addImpl(P->createPrinterPass(dbgs(), std::string("*** IR Dump After ")
                                    + P->getPassName() + " ***"));
 }
@@ -1405,8 +1418,8 @@
 }
 
 
-/// Execute all of the passes scheduled for execution by invoking 
-/// runOnFunction method.  Keep track of whether any of the passes modifies 
+/// Execute all of the passes scheduled for execution by invoking
+/// runOnFunction method.  Keep track of whether any of the passes modifies
 /// the function, and if so, return true.
 bool FPPassManager::runOnFunction(Function &F) {
   if (F.isDeclaration())
@@ -1476,8 +1489,8 @@
 //===----------------------------------------------------------------------===//
 // MPPassManager implementation
 
-/// Execute all of the passes scheduled for execution by invoking 
-/// runOnModule method.  Keep track of whether any of the passes modifies 
+/// Execute all of the passes scheduled for execution by invoking
+/// runOnModule method.  Keep track of whether any of the passes modifies
 /// the module, and if so, return true.
 bool
 MPPassManager::runOnModule(Module &M) {
@@ -1512,7 +1525,7 @@
       dumpPassInfo(MP, MODIFICATION_MSG, ON_MODULE_MSG,
                    M.getModuleIdentifier());
     dumpPreservedSet(MP);
-    
+
     verifyPreservedAnalysis(MP);
     removeNotPreservedAnalysis(MP);
     recordAvailableAnalysis(MP);
@@ -1538,7 +1551,7 @@
 void MPPassManager::addLowerLevelRequiredPass(Pass *P, Pass *RequiredPass) {
   assert(P->getPotentialPassManagerType() == PMT_ModulePassManager &&
          "Unable to handle Pass that requires lower level Analysis pass");
-  assert((P->getPotentialPassManagerType() < 
+  assert((P->getPotentialPassManagerType() <
           RequiredPass->getPotentialPassManagerType()) &&
          "Unable to handle Pass that requires lower level Analysis pass");
 
@@ -1553,18 +1566,18 @@
   FPP->add(RequiredPass);
 
   // Register P as the last user of RequiredPass.
-  SmallVector<Pass *, 12> LU;
+  SmallVector<Pass *, 1> LU;
   LU.push_back(RequiredPass);
   FPP->setLastUser(LU,  P);
 }
 
-/// Return function pass corresponding to PassInfo PI, that is 
+/// Return function pass corresponding to PassInfo PI, that is
 /// required by module pass MP. Instantiate analysis pass, by using
 /// its runOnFunction() for function F.
-Pass* MPPassManager::getOnTheFlyPass(Pass *MP, const PassInfo *PI, Function &F){
+Pass* MPPassManager::getOnTheFlyPass(Pass *MP, AnalysisID PI, Function &F){
   FunctionPassManagerImpl *FPP = OnTheFlyManagers[MP];
   assert(FPP && "Unable to find on the fly pass");
-  
+
   FPP->releaseMemoryOnTheFly();
   FPP->run(F);
   return ((PMTopLevelManager*)FPP)->findAnalysisPass(PI);
@@ -1614,13 +1627,14 @@
 /// will be destroyed as well, so there is no need to delete the pass.  This
 /// implies that all passes MUST be allocated with 'new'.
 void PassManager::add(Pass *P) {
-  if (ShouldPrintBeforePass(P))
+  const void* PassID = P->getPassID();
+  if (ShouldPrintBeforePass(PassID))
     addImpl(P->createPrinterPass(dbgs(), std::string("*** IR Dump Before ")
                                  + P->getPassName() + " ***"));
 
   addImpl(P);
 
-  if (ShouldPrintAfterPass(P))
+  if (ShouldPrintAfterPass(PassID))
     addImpl(P->createPrinterPass(dbgs(), std::string("*** IR Dump After ")
                                  + P->getPassName() + " ***"));
 }
@@ -1656,7 +1670,7 @@
 
 /// If TimingInfo is enabled then start pass timer.
 Timer *llvm::getPassTimer(Pass *P) {
-  if (TheTimeInfo) 
+  if (TheTimeInfo)
     return TheTimeInfo->getPassTimer(P);
   return 0;
 }
@@ -1690,8 +1704,8 @@
 }
 
 // Dump content of the pass manager stack.
-void PMStack::dump() {
-  for (std::deque<PMDataManager *>::iterator I = S.begin(),
+void PMStack::dump() const {
+  for (std::vector<PMDataManager *>::const_iterator I = S.begin(),
          E = S.end(); I != E; ++I)
     printf("%s ", (*I)->getAsPass()->getPassName());
 
@@ -1700,11 +1714,11 @@
 }
 
 /// Find appropriate Module Pass Manager in the PM Stack and
-/// add self into that manager. 
-void ModulePass::assignPassManager(PMStack &PMS, 
+/// add self into that manager.
+void ModulePass::assignPassManager(PMStack &PMS,
                                    PassManagerType PreferredType) {
   // Find Module Pass Manager
-  while(!PMS.empty()) {
+  while (!PMS.empty()) {
     PassManagerType TopPMType = PMS.top()->getPassManagerType();
     if (TopPMType == PreferredType)
       break; // We found desired pass manager
@@ -1718,7 +1732,7 @@
 }
 
 /// Find appropriate Function Pass Manager or Call Graph Pass Manager
-/// in the PM Stack and add self into that manager. 
+/// in the PM Stack and add self into that manager.
 void FunctionPass::assignPassManager(PMStack &PMS,
                                      PassManagerType PreferredType) {
 
@@ -1727,7 +1741,7 @@
     if (PMS.top()->getPassManagerType() > PMT_FunctionPassManager)
       PMS.pop();
     else
-      break; 
+      break;
   }
 
   // Create new Function Pass Manager if needed.
@@ -1759,14 +1773,14 @@
 }
 
 /// Find appropriate Basic Pass Manager or Call Graph Pass Manager
-/// in the PM Stack and add self into that manager. 
+/// in the PM Stack and add self into that manager.
 void BasicBlockPass::assignPassManager(PMStack &PMS,
                                        PassManagerType PreferredType) {
   BBPassManager *BBP;
 
   // Basic Pass Manager is a leaf pass manager. It does not handle
   // any other pass manager.
-  if (!PMS.empty() && 
+  if (!PMS.empty() &&
       PMS.top()->getPassManagerType() == PMT_BasicBlockPassManager) {
     BBP = (BBPassManager *)PMS.top();
   } else {
@@ -1796,38 +1810,3 @@
 }
 
 PassManagerBase::~PassManagerBase() {}
-  
-/*===-- C Bindings --------------------------------------------------------===*/
-
-LLVMPassManagerRef LLVMCreatePassManager() {
-  return wrap(new PassManager());
-}
-
-LLVMPassManagerRef LLVMCreateFunctionPassManagerForModule(LLVMModuleRef M) {
-  return wrap(new FunctionPassManager(unwrap(M)));
-}
-
-LLVMPassManagerRef LLVMCreateFunctionPassManager(LLVMModuleProviderRef P) {
-  return LLVMCreateFunctionPassManagerForModule(
-                                            reinterpret_cast<LLVMModuleRef>(P));
-}
-
-LLVMBool LLVMRunPassManager(LLVMPassManagerRef PM, LLVMModuleRef M) {
-  return unwrap<PassManager>(PM)->run(*unwrap(M));
-}
-
-LLVMBool LLVMInitializeFunctionPassManager(LLVMPassManagerRef FPM) {
-  return unwrap<FunctionPassManager>(FPM)->doInitialization();
-}
-
-LLVMBool LLVMRunFunctionPassManager(LLVMPassManagerRef FPM, LLVMValueRef F) {
-  return unwrap<FunctionPassManager>(FPM)->run(*unwrap<Function>(F));
-}
-
-LLVMBool LLVMFinalizeFunctionPassManager(LLVMPassManagerRef FPM) {
-  return unwrap<FunctionPassManager>(FPM)->doFinalization();
-}
-
-void LLVMDisposePassManager(LLVMPassManagerRef PM) {
-  delete unwrap(PM);
-}

Modified: llvm/branches/wendling/eh/lib/VMCore/PassRegistry.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/VMCore/PassRegistry.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/VMCore/PassRegistry.cpp (original)
+++ llvm/branches/wendling/eh/lib/VMCore/PassRegistry.cpp Tue Oct 26 19:48:03 2010
@@ -16,102 +16,134 @@
 #include "llvm/PassSupport.h"
 #include "llvm/Support/Compiler.h"
 #include "llvm/Support/ManagedStatic.h"
+#include "llvm/System/Mutex.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/StringMap.h"
+#include <vector>
 
 using namespace llvm;
 
-static PassRegistry *PassRegistryObj = 0;
-PassRegistry *PassRegistry::getPassRegistry() {
-  // Use double-checked locking to safely initialize the registrar when
-  // we're running in multithreaded mode.
-  PassRegistry* tmp = PassRegistryObj;
-  if (llvm_is_multithreaded()) {
-    sys::MemoryFence();
-    if (!tmp) {
-      llvm_acquire_global_lock();
-      tmp = PassRegistryObj;
-      if (!tmp) {
-        tmp = new PassRegistry();
-        sys::MemoryFence();
-        PassRegistryObj = tmp;
-      }
-      llvm_release_global_lock();
-    }
-  } else if (!tmp) {
-    PassRegistryObj = new PassRegistry();
-  }
-  
-  return PassRegistryObj;
-}
-
-namespace {
-
-// FIXME: We use ManagedCleanup to erase the pass registrar on shutdown.
+// FIXME: We use ManagedStatic to erase the pass registrar on shutdown.
 // Unfortunately, passes are registered with static ctors, and having
 // llvm_shutdown clear this map prevents successful ressurection after 
 // llvm_shutdown is run.  Ideally we should find a solution so that we don't
 // leak the map, AND can still resurrect after shutdown.
-void cleanupPassRegistry(void*) {
-  if (PassRegistryObj) {
-    delete PassRegistryObj;
-    PassRegistryObj = 0;
-  }
+static ManagedStatic<PassRegistry> PassRegistryObj;
+PassRegistry *PassRegistry::getPassRegistry() {
+  return &*PassRegistryObj;
 }
-ManagedCleanup<&cleanupPassRegistry> registryCleanup ATTRIBUTE_USED;
 
+static ManagedStatic<sys::SmartMutex<true> > Lock;
+
+//===----------------------------------------------------------------------===//
+// PassRegistryImpl
+//
+
+namespace {
+struct PassRegistryImpl {
+  /// PassInfoMap - Keep track of the PassInfo object for each registered pass.
+  typedef DenseMap<const void*, const PassInfo*> MapType;
+  MapType PassInfoMap;
+  
+  typedef StringMap<const PassInfo*> StringMapType;
+  StringMapType PassInfoStringMap;
+  
+  /// AnalysisGroupInfo - Keep track of information for each analysis group.
+  struct AnalysisGroupInfo {
+    SmallPtrSet<const PassInfo *, 8> Implementations;
+  };
+  DenseMap<const PassInfo*, AnalysisGroupInfo> AnalysisGroupInfoMap;
+  
+  std::vector<const PassInfo*> ToFree;
+  std::vector<PassRegistrationListener*> Listeners;
+};
+} // end anonymous namespace
+
+void *PassRegistry::getImpl() const {
+  if (!pImpl)
+    pImpl = new PassRegistryImpl();
+  return pImpl;
+}
+
+//===----------------------------------------------------------------------===//
+// Accessors
+//
+
+PassRegistry::~PassRegistry() {
+  sys::SmartScopedLock<true> Guard(*Lock);
+  PassRegistryImpl *Impl = static_cast<PassRegistryImpl*>(pImpl);
+  
+  for (std::vector<const PassInfo*>::iterator I = Impl->ToFree.begin(),
+       E = Impl->ToFree.end(); I != E; ++I)
+    delete *I;
+  
+  delete Impl;
+  pImpl = 0;
 }
 
-const PassInfo *PassRegistry::getPassInfo(intptr_t TI) const {
-  sys::SmartScopedLock<true> Guard(Lock);
-  MapType::const_iterator I = PassInfoMap.find(TI);
-  return I != PassInfoMap.end() ? I->second : 0;
+const PassInfo *PassRegistry::getPassInfo(const void *TI) const {
+  sys::SmartScopedLock<true> Guard(*Lock);
+  PassRegistryImpl *Impl = static_cast<PassRegistryImpl*>(getImpl());
+  PassRegistryImpl::MapType::const_iterator I = Impl->PassInfoMap.find(TI);
+  return I != Impl->PassInfoMap.end() ? I->second : 0;
 }
 
 const PassInfo *PassRegistry::getPassInfo(StringRef Arg) const {
-  sys::SmartScopedLock<true> Guard(Lock);
-  StringMapType::const_iterator I = PassInfoStringMap.find(Arg);
-  return I != PassInfoStringMap.end() ? I->second : 0;
+  sys::SmartScopedLock<true> Guard(*Lock);
+  PassRegistryImpl *Impl = static_cast<PassRegistryImpl*>(getImpl());
+  PassRegistryImpl::StringMapType::const_iterator
+    I = Impl->PassInfoStringMap.find(Arg);
+  return I != Impl->PassInfoStringMap.end() ? I->second : 0;
 }
 
 //===----------------------------------------------------------------------===//
 // Pass Registration mechanism
 //
 
-void PassRegistry::registerPass(const PassInfo &PI) {
-  sys::SmartScopedLock<true> Guard(Lock);
+void PassRegistry::registerPass(const PassInfo &PI, bool ShouldFree) {
+  sys::SmartScopedLock<true> Guard(*Lock);
+  PassRegistryImpl *Impl = static_cast<PassRegistryImpl*>(getImpl());
   bool Inserted =
-    PassInfoMap.insert(std::make_pair(PI.getTypeInfo(),&PI)).second;
+    Impl->PassInfoMap.insert(std::make_pair(PI.getTypeInfo(),&PI)).second;
   assert(Inserted && "Pass registered multiple times!"); Inserted=Inserted;
-  PassInfoStringMap[PI.getPassArgument()] = &PI;
+  Impl->PassInfoStringMap[PI.getPassArgument()] = &PI;
   
   // Notify any listeners.
   for (std::vector<PassRegistrationListener*>::iterator
-       I = Listeners.begin(), E = Listeners.end(); I != E; ++I)
+       I = Impl->Listeners.begin(), E = Impl->Listeners.end(); I != E; ++I)
     (*I)->passRegistered(&PI);
+  
+  if (ShouldFree) Impl->ToFree.push_back(&PI);
 }
 
 void PassRegistry::unregisterPass(const PassInfo &PI) {
-  sys::SmartScopedLock<true> Guard(Lock);
-  MapType::iterator I = PassInfoMap.find(PI.getTypeInfo());
-  assert(I != PassInfoMap.end() && "Pass registered but not in map!");
+  sys::SmartScopedLock<true> Guard(*Lock);
+  PassRegistryImpl *Impl = static_cast<PassRegistryImpl*>(getImpl());
+  PassRegistryImpl::MapType::iterator I = 
+    Impl->PassInfoMap.find(PI.getTypeInfo());
+  assert(I != Impl->PassInfoMap.end() && "Pass registered but not in map!");
   
   // Remove pass from the map.
-  PassInfoMap.erase(I);
-  PassInfoStringMap.erase(PI.getPassArgument());
+  Impl->PassInfoMap.erase(I);
+  Impl->PassInfoStringMap.erase(PI.getPassArgument());
 }
 
 void PassRegistry::enumerateWith(PassRegistrationListener *L) {
-  sys::SmartScopedLock<true> Guard(Lock);
-  for (MapType::const_iterator I = PassInfoMap.begin(),
-       E = PassInfoMap.end(); I != E; ++I)
+  sys::SmartScopedLock<true> Guard(*Lock);
+  PassRegistryImpl *Impl = static_cast<PassRegistryImpl*>(getImpl());
+  for (PassRegistryImpl::MapType::const_iterator I = Impl->PassInfoMap.begin(),
+       E = Impl->PassInfoMap.end(); I != E; ++I)
     L->passEnumerate(I->second);
 }
 
 
 /// Analysis Group Mechanisms.
-void PassRegistry::registerAnalysisGroup(intptr_t InterfaceID, 
-                                         intptr_t PassID,
+void PassRegistry::registerAnalysisGroup(const void *InterfaceID, 
+                                         const void *PassID,
                                          PassInfo& Registeree,
-                                         bool isDefault) {
+                                         bool isDefault,
+                                         bool ShouldFree) {
   PassInfo *InterfaceInfo =  const_cast<PassInfo*>(getPassInfo(InterfaceID));
   if (InterfaceInfo == 0) {
     // First reference to Interface, register it now.
@@ -126,12 +158,15 @@
     assert(ImplementationInfo &&
            "Must register pass before adding to AnalysisGroup!");
 
+    sys::SmartScopedLock<true> Guard(*Lock);
+    
     // Make sure we keep track of the fact that the implementation implements
     // the interface.
     ImplementationInfo->addInterfaceImplemented(InterfaceInfo);
 
-    sys::SmartScopedLock<true> Guard(Lock);
-    AnalysisGroupInfo &AGI = AnalysisGroupInfoMap[InterfaceInfo];
+    PassRegistryImpl *Impl = static_cast<PassRegistryImpl*>(getImpl());
+    PassRegistryImpl::AnalysisGroupInfo &AGI =
+      Impl->AnalysisGroupInfoMap[InterfaceInfo];
     assert(AGI.Implementations.count(ImplementationInfo) == 0 &&
            "Cannot add a pass to the same analysis group more than once!");
     AGI.Implementations.insert(ImplementationInfo);
@@ -143,17 +178,30 @@
       InterfaceInfo->setNormalCtor(ImplementationInfo->getNormalCtor());
     }
   }
+  
+  PassRegistryImpl *Impl = static_cast<PassRegistryImpl*>(getImpl());
+  if (ShouldFree) Impl->ToFree.push_back(&Registeree);
 }
 
 void PassRegistry::addRegistrationListener(PassRegistrationListener *L) {
-  sys::SmartScopedLock<true> Guard(Lock);
-  Listeners.push_back(L);
+  sys::SmartScopedLock<true> Guard(*Lock);
+  PassRegistryImpl *Impl = static_cast<PassRegistryImpl*>(getImpl());
+  Impl->Listeners.push_back(L);
 }
 
 void PassRegistry::removeRegistrationListener(PassRegistrationListener *L) {
-  sys::SmartScopedLock<true> Guard(Lock);
+  sys::SmartScopedLock<true> Guard(*Lock);
+  
+  // NOTE: This is necessary, because removeRegistrationListener() can be called
+  // as part of the llvm_shutdown sequence.  Since we have no control over the
+  // order of that sequence, we need to gracefully handle the case where the
+  // PassRegistry is destructed before the object that triggers this call.
+  if (!pImpl) return;
+  
+  PassRegistryImpl *Impl = static_cast<PassRegistryImpl*>(getImpl());
   std::vector<PassRegistrationListener*>::iterator I =
-    std::find(Listeners.begin(), Listeners.end(), L);
-  assert(I != Listeners.end() && "PassRegistrationListener not registered!");
-  Listeners.erase(I);
+    std::find(Impl->Listeners.begin(), Impl->Listeners.end(), L);
+  assert(I != Impl->Listeners.end() &&
+         "PassRegistrationListener not registered!");
+  Impl->Listeners.erase(I);
 }

Modified: llvm/branches/wendling/eh/lib/VMCore/PrintModulePass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/VMCore/PrintModulePass.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/VMCore/PrintModulePass.cpp (original)
+++ llvm/branches/wendling/eh/lib/VMCore/PrintModulePass.cpp Tue Oct 26 19:48:03 2010
@@ -28,10 +28,10 @@
     bool DeleteStream;      // Delete the ostream in our dtor?
   public:
     static char ID;
-    PrintModulePass() : ModulePass(&ID), Out(&dbgs()), 
+    PrintModulePass() : ModulePass(ID), Out(&dbgs()), 
       DeleteStream(false) {}
     PrintModulePass(const std::string &B, raw_ostream *o, bool DS)
-        : ModulePass(&ID), Banner(B), Out(o), DeleteStream(DS) {}
+        : ModulePass(ID), Banner(B), Out(o), DeleteStream(DS) {}
     
     ~PrintModulePass() {
       if (DeleteStream) delete Out;
@@ -53,12 +53,12 @@
     bool DeleteStream;      // Delete the ostream in our dtor?
   public:
     static char ID;
-    PrintFunctionPass() : FunctionPass(&ID), Banner(""), Out(&dbgs()), 
+    PrintFunctionPass() : FunctionPass(ID), Banner(""), Out(&dbgs()), 
                           DeleteStream(false) {}
     PrintFunctionPass(const std::string &B, raw_ostream *o, bool DS)
-      : FunctionPass(&ID), Banner(B), Out(o), DeleteStream(DS) {}
+      : FunctionPass(ID), Banner(B), Out(o), DeleteStream(DS) {}
     
-    inline ~PrintFunctionPass() {
+    ~PrintFunctionPass() {
       if (DeleteStream) delete Out;
     }
     
@@ -78,10 +78,10 @@
 
 char PrintModulePass::ID = 0;
 INITIALIZE_PASS(PrintModulePass, "print-module",
-                "Print module to stderr", false, false);
+                "Print module to stderr", false, false)
 char PrintFunctionPass::ID = 0;
 INITIALIZE_PASS(PrintFunctionPass, "print-function",
-                "Print function to stderr", false, false);
+                "Print function to stderr", false, false)
 
 /// createPrintModulePass - Create and return a pass that writes the
 /// module to the specified raw_ostream.

Modified: llvm/branches/wendling/eh/lib/VMCore/Type.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/VMCore/Type.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/VMCore/Type.cpp (original)
+++ llvm/branches/wendling/eh/lib/VMCore/Type.cpp Tue Oct 26 19:48:03 2010
@@ -50,7 +50,7 @@
 
 /// Because of the way Type subclasses are allocated, this function is necessary
 /// to use the correct kind of "delete" operator to deallocate the Type object.
-/// Some type objects (FunctionTy, StructTy, UnionTy) allocate additional space
+/// Some type objects (FunctionTy, StructTy) allocate additional space
 /// after the space for their derived type to hold the contained types array of
 /// PATypeHandles. Using this allocation scheme means all the PATypeHandles are
 /// allocated with the type object, decreasing allocations and eliminating the
@@ -66,8 +66,7 @@
   // Structures and Functions allocate their contained types past the end of
   // the type object itself. These need to be destroyed differently than the
   // other types.
-  if (this->isFunctionTy() || this->isStructTy() ||
-      this->isUnionTy()) {
+  if (this->isFunctionTy() || this->isStructTy()) {
     // First, make sure we destruct any PATypeHandles allocated by these
     // subclasses.  They must be manually destructed. 
     for (unsigned i = 0; i < NumContainedTys; ++i)
@@ -77,10 +76,10 @@
     // to delete this as an array of char.
     if (this->isFunctionTy())
       static_cast<const FunctionType*>(this)->FunctionType::~FunctionType();
-    else if (this->isStructTy())
+    else {
+      assert(isStructTy());
       static_cast<const StructType*>(this)->StructType::~StructType();
-    else
-      static_cast<const UnionType*>(this)->UnionType::~UnionType();
+    }
 
     // Finally, remove the memory as an array deallocation of the chars it was
     // constructed from.
@@ -110,6 +109,7 @@
   case PPC_FP128TyID : return getPPC_FP128Ty(C);
   case LabelTyID     : return getLabelTy(C);
   case MetadataTyID  : return getMetadataTy(C);
+  case X86_MMXTyID   : return getX86_MMXTy(C);
   default:
     return 0;
   }
@@ -173,10 +173,20 @@
     return false;
 
   // Vector -> Vector conversions are always lossless if the two vector types
-  // have the same size, otherwise not.
-  if (const VectorType *thisPTy = dyn_cast<VectorType>(this))
+  // have the same size, otherwise not.  Also, 64-bit vector types can be
+  // converted to x86mmx.
+  if (const VectorType *thisPTy = dyn_cast<VectorType>(this)) {
     if (const VectorType *thatPTy = dyn_cast<VectorType>(Ty))
       return thisPTy->getBitWidth() == thatPTy->getBitWidth();
+    if (Ty->getTypeID() == Type::X86_MMXTyID &&
+        thisPTy->getBitWidth() == 64)
+      return true;
+  }
+
+  if (this->getTypeID() == Type::X86_MMXTyID)
+    if (const VectorType *thatPTy = dyn_cast<VectorType>(Ty))
+      if (thatPTy->getBitWidth() == 64)
+        return true;
 
   // At this point we have only various mismatches of the first class types
   // remaining and ptr->ptr. Just select the lossless conversions. Everything
@@ -193,6 +203,7 @@
   case Type::X86_FP80TyID: return 80;
   case Type::FP128TyID: return 128;
   case Type::PPC_FP128TyID: return 128;
+  case Type::X86_MMXTyID: return 64;
   case Type::IntegerTyID: return cast<IntegerType>(this)->getBitWidth();
   case Type::VectorTyID:  return cast<VectorType>(this)->getBitWidth();
   default: return 0;
@@ -234,7 +245,7 @@
   if (const VectorType *PTy = dyn_cast<VectorType>(this))
     return PTy->getElementType()->isSized();
 
-  if (!this->isStructTy() && !this->isUnionTy()) 
+  if (!this->isStructTy()) 
     return false;
 
   // Okay, our struct is sized if all of the elements are...
@@ -319,31 +330,6 @@
 }
 
 
-bool UnionType::indexValid(const Value *V) const {
-  // Union indexes require 32-bit integer constants.
-  if (V->getType()->isIntegerTy(32))
-    if (const ConstantInt *CU = dyn_cast<ConstantInt>(V))
-      return indexValid(CU->getZExtValue());
-  return false;
-}
-
-bool UnionType::indexValid(unsigned V) const {
-  return V < NumContainedTys;
-}
-
-// getTypeAtIndex - Given an index value into the type, return the type of the
-// element.  For a structure type, this must be a constant value...
-//
-const Type *UnionType::getTypeAtIndex(const Value *V) const {
-  unsigned Idx = (unsigned)cast<ConstantInt>(V)->getZExtValue();
-  return getTypeAtIndex(Idx);
-}
-
-const Type *UnionType::getTypeAtIndex(unsigned Idx) const {
-  assert(indexValid(Idx) && "Invalid structure index!");
-  return ContainedTys[Idx];
-}
-
 //===----------------------------------------------------------------------===//
 //                          Primitive 'Type' data
 //===----------------------------------------------------------------------===//
@@ -380,6 +366,10 @@
   return &C.pImpl->PPC_FP128Ty;
 }
 
+const Type *Type::getX86_MMXTy(LLVMContext &C) {
+  return &C.pImpl->X86_MMXTy;
+}
+
 const IntegerType *Type::getIntNTy(LLVMContext &C, unsigned N) {
   return IntegerType::get(C, N);
 }
@@ -424,6 +414,10 @@
   return getPPC_FP128Ty(C)->getPointerTo(AS);
 }
 
+const PointerType *Type::getX86_MMXPtrTy(LLVMContext &C, unsigned AS) {
+  return getX86_MMXTy(C)->getPointerTo(AS);
+}
+
 const PointerType *Type::getIntNPtrTy(LLVMContext &C, unsigned N, unsigned AS) {
   return getIntNTy(C, N)->getPointerTo(AS);
 }
@@ -507,23 +501,6 @@
   setAbstract(isAbstract);
 }
 
-UnionType::UnionType(LLVMContext &C,const Type* const* Types, unsigned NumTypes)
-  : CompositeType(C, UnionTyID) {
-  ContainedTys = reinterpret_cast<PATypeHandle*>(this + 1);
-  NumContainedTys = NumTypes;
-  bool isAbstract = false;
-  for (unsigned i = 0; i < NumTypes; ++i) {
-    assert(Types[i] && "<null> type for union field!");
-    assert(isValidElementType(Types[i]) &&
-           "Invalid type for union element!");
-    new (&ContainedTys[i]) PATypeHandle(Types[i], this);
-    isAbstract |= Types[i]->isAbstract();
-  }
-
-  // Calculate whether or not this type is abstract
-  setAbstract(isAbstract);
-}
-
 ArrayType::ArrayType(const Type *ElType, uint64_t NumEl)
   : SequentialType(ArrayTyID, ElType) {
   NumElements = NumEl;
@@ -711,15 +688,6 @@
     return true;
   }
   
-  if (const UnionType *UTy = dyn_cast<UnionType>(Ty)) {
-    const UnionType *UTy2 = cast<UnionType>(Ty2);
-    if (UTy->getNumElements() != UTy2->getNumElements()) return false;
-    for (unsigned i = 0, e = UTy2->getNumElements(); i != e; ++i)
-      if (!TypesEqual(UTy->getElementType(i), UTy2->getElementType(i), EqTypes))
-        return false;
-    return true;
-  }
-  
   if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
     const ArrayType *ATy2 = cast<ArrayType>(Ty2);
     return ATy->getNumElements() == ATy2->getNumElements() &&
@@ -987,60 +955,6 @@
 
 
 //===----------------------------------------------------------------------===//
-// Union Type Factory...
-//
-
-UnionType *UnionType::get(const Type* const* Types, unsigned NumTypes) {
-  assert(NumTypes > 0 && "union must have at least one member type!");
-  UnionValType UTV(Types, NumTypes);
-  UnionType *UT = 0;
-  
-  LLVMContextImpl *pImpl = Types[0]->getContext().pImpl;
-  
-  UT = pImpl->UnionTypes.get(UTV);
-    
-  if (!UT) {
-    // Value not found.  Derive a new type!
-    UT = (UnionType*) operator new(sizeof(UnionType) +
-                                   sizeof(PATypeHandle) * NumTypes);
-    new (UT) UnionType(Types[0]->getContext(), Types, NumTypes);
-    pImpl->UnionTypes.add(UTV, UT);
-  }
-#ifdef DEBUG_MERGE_TYPES
-  DEBUG(dbgs() << "Derived new type: " << *UT << "\n");
-#endif
-  return UT;
-}
-
-UnionType *UnionType::get(const Type *type, ...) {
-  va_list ap;
-  SmallVector<const llvm::Type*, 8> UnionFields;
-  va_start(ap, type);
-  while (type) {
-    UnionFields.push_back(type);
-    type = va_arg(ap, llvm::Type*);
-  }
-  unsigned NumTypes = UnionFields.size();
-  assert(NumTypes > 0 && "union must have at least one member type!");
-  return llvm::UnionType::get(&UnionFields[0], NumTypes);
-}
-
-bool UnionType::isValidElementType(const Type *ElemTy) {
-  return !ElemTy->isVoidTy() && !ElemTy->isLabelTy() &&
-         !ElemTy->isMetadataTy() && !ElemTy->isFunctionTy();
-}
-
-int UnionType::getElementTypeIndex(const Type *ElemTy) const {
-  int index = 0;
-  for (UnionType::element_iterator I = element_begin(), E = element_end();
-       I != E; ++I, ++index) {
-     if (ElemTy == *I) return index;
-  }
-  
-  return -1;
-}
-
-//===----------------------------------------------------------------------===//
 // Pointer Type Factory...
 //
 
@@ -1291,21 +1205,6 @@
 // concrete - this could potentially change us from an abstract type to a
 // concrete type.
 //
-void UnionType::refineAbstractType(const DerivedType *OldType,
-                                    const Type *NewType) {
-  LLVMContextImpl *pImpl = OldType->getContext().pImpl;
-  pImpl->UnionTypes.RefineAbstractType(this, OldType, NewType);
-}
-
-void UnionType::typeBecameConcrete(const DerivedType *AbsTy) {
-  LLVMContextImpl *pImpl = AbsTy->getContext().pImpl;
-  pImpl->UnionTypes.TypeBecameConcrete(this, AbsTy);
-}
-
-// refineAbstractType - Called when a contained type is found to be more
-// concrete - this could potentially change us from an abstract type to a
-// concrete type.
-//
 void PointerType::refineAbstractType(const DerivedType *OldType,
                                      const Type *NewType) {
   LLVMContextImpl *pImpl = OldType->getContext().pImpl;

Modified: llvm/branches/wendling/eh/lib/VMCore/TypesContext.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/VMCore/TypesContext.h?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/VMCore/TypesContext.h (original)
+++ llvm/branches/wendling/eh/lib/VMCore/TypesContext.h Tue Oct 26 19:48:03 2010
@@ -180,32 +180,6 @@
   }
 };
 
-// UnionValType - Define a class to hold the key that goes into the TypeMap
-//
-class UnionValType {
-  std::vector<const Type*> ElTypes;
-public:
-  UnionValType(const Type* const* Types, unsigned NumTypes)
-    : ElTypes(&Types[0], &Types[NumTypes]) {}
-
-  static UnionValType get(const UnionType *UT) {
-    std::vector<const Type *> ElTypes;
-    ElTypes.reserve(UT->getNumElements());
-    for (unsigned i = 0, e = UT->getNumElements(); i != e; ++i)
-      ElTypes.push_back(UT->getElementType(i));
-
-    return UnionValType(&ElTypes[0], ElTypes.size());
-  }
-
-  static unsigned hashTypeStructure(const UnionType *UT) {
-    return UT->getNumElements();
-  }
-
-  inline bool operator<(const UnionValType &UTV) const {
-    return (ElTypes < UTV.ElTypes);
-  }
-};
-
 // FunctionValType - Define a class to hold the key that goes into the TypeMap
 //
 class FunctionValType {

Modified: llvm/branches/wendling/eh/lib/VMCore/ValueTypes.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/VMCore/ValueTypes.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/VMCore/ValueTypes.cpp (original)
+++ llvm/branches/wendling/eh/lib/VMCore/ValueTypes.cpp Tue Oct 26 19:48:03 2010
@@ -110,6 +110,7 @@
   case MVT::isVoid:  return "isVoid";
   case MVT::Other:   return "ch";
   case MVT::Flag:    return "flag";
+  case MVT::x86mmx:  return "x86mmx";
   case MVT::v2i8:    return "v2i8";
   case MVT::v4i8:    return "v4i8";
   case MVT::v8i8:    return "v8i8";
@@ -155,6 +156,7 @@
   case MVT::f80:     return Type::getX86_FP80Ty(Context);
   case MVT::f128:    return Type::getFP128Ty(Context);
   case MVT::ppcf128: return Type::getPPC_FP128Ty(Context);
+  case MVT::x86mmx:  return Type::getX86_MMXTy(Context);
   case MVT::v2i8:    return VectorType::get(Type::getInt8Ty(Context), 2);
   case MVT::v4i8:    return VectorType::get(Type::getInt8Ty(Context), 4);
   case MVT::v8i8:    return VectorType::get(Type::getInt8Ty(Context), 8);
@@ -196,6 +198,7 @@
   case Type::FloatTyID:     return MVT(MVT::f32);
   case Type::DoubleTyID:    return MVT(MVT::f64);
   case Type::X86_FP80TyID:  return MVT(MVT::f80);
+  case Type::X86_MMXTyID:   return MVT(MVT::x86mmx);
   case Type::FP128TyID:     return MVT(MVT::f128);
   case Type::PPC_FP128TyID: return MVT(MVT::ppcf128);
   case Type::PointerTyID:   return MVT(MVT::iPTR);

Modified: llvm/branches/wendling/eh/lib/VMCore/Verifier.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/VMCore/Verifier.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/VMCore/Verifier.cpp (original)
+++ llvm/branches/wendling/eh/lib/VMCore/Verifier.cpp Tue Oct 26 19:48:03 2010
@@ -72,7 +72,9 @@
   struct PreVerifier : public FunctionPass {
     static char ID; // Pass ID, replacement for typeid
 
-    PreVerifier() : FunctionPass(&ID) { }
+    PreVerifier() : FunctionPass(ID) {
+      initializePreVerifierPass(*PassRegistry::getPassRegistry());
+    }
 
     virtual void getAnalysisUsage(AnalysisUsage &AU) const {
       AU.setPreservesAll();
@@ -102,9 +104,9 @@
 }
 
 char PreVerifier::ID = 0;
-static RegisterPass<PreVerifier>
-PreVer("preverify", "Preliminary module verification");
-static const PassInfo *const PreVerifyID = &PreVer;
+INITIALIZE_PASS(PreVerifier, "preverify", "Preliminary module verification", 
+                false, false)
+static char &PreVerifyID = PreVerifier::ID;
 
 namespace {
   class TypeSet : public AbstractTypeUser {
@@ -182,23 +184,17 @@
     SmallPtrSet<MDNode *, 32> MDNodes;
 
     Verifier()
-      : FunctionPass(&ID), 
+      : FunctionPass(ID), 
       Broken(false), RealPass(true), action(AbortProcessAction),
-      Mod(0), Context(0), DT(0), MessagesStr(Messages) {}
+      Mod(0), Context(0), DT(0), MessagesStr(Messages) {
+        initializeVerifierPass(*PassRegistry::getPassRegistry());
+      }
     explicit Verifier(VerifierFailureAction ctn)
-      : FunctionPass(&ID), 
+      : FunctionPass(ID), 
       Broken(false), RealPass(true), action(ctn), Mod(0), Context(0), DT(0),
-      MessagesStr(Messages) {}
-    explicit Verifier(bool AB)
-      : FunctionPass(&ID), 
-      Broken(false), RealPass(true),
-      action( AB ? AbortProcessAction : PrintMessageAction), Mod(0),
-      Context(0), DT(0), MessagesStr(Messages) {}
-    explicit Verifier(DominatorTree &dt)
-      : FunctionPass(&ID), 
-      Broken(false), RealPass(false), action(PrintMessageAction), Mod(0),
-      Context(0), DT(&dt), MessagesStr(Messages) {}
-
+      MessagesStr(Messages) {
+        initializeVerifierPass(*PassRegistry::getPassRegistry());
+      }
 
     bool doInitialization(Module &M) {
       Mod = &M;
@@ -331,6 +327,7 @@
     void visitBranchInst(BranchInst &BI);
     void visitReturnInst(ReturnInst &RI);
     void visitSwitchInst(SwitchInst &SI);
+    void visitIndirectBrInst(IndirectBrInst &BI);
     void visitSelectInst(SelectInst &SI);
     void visitUserOp1(Instruction &I);
     void visitUserOp2(Instruction &I) { visitUserOp1(I); }
@@ -402,7 +399,10 @@
 } // End anonymous namespace
 
 char Verifier::ID = 0;
-static RegisterPass<Verifier> X("verify", "Module Verifier");
+INITIALIZE_PASS_BEGIN(Verifier, "verify", "Module Verifier", false, false)
+INITIALIZE_PASS_DEPENDENCY(PreVerifier)
+INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_END(Verifier, "verify", "Module Verifier", false, false)
 
 // Assert - We know that cond should be true, if not print an error message.
 #define Assert(C, M) \
@@ -445,6 +445,10 @@
     Assert1(GVar && GVar->getType()->getElementType()->isArrayTy(),
             "Only global arrays can have appending linkage!", GVar);
   }
+
+  Assert1(!GV.hasLinkerPrivateWeakDefAutoLinkage() || GV.hasDefaultVisibility(),
+          "linker_private_weak_def_auto can only have default visibility!",
+          &GV);
 }
 
 void Verifier::visitGlobalVariable(GlobalVariable &GV) {
@@ -690,6 +694,8 @@
   case CallingConv::Cold:
   case CallingConv::X86_FastCall:
   case CallingConv::X86_ThisCall:
+  case CallingConv::PTX_Kernel:
+  case CallingConv::PTX_Device:
     Assert1(!F.isVarArg(),
             "Varargs functions must have C calling conventions!", &F);
     break;
@@ -864,6 +870,16 @@
   visitTerminatorInst(SI);
 }
 
+void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
+  Assert1(BI.getAddress()->getType()->isPointerTy(),
+          "Indirectbr operand must have pointer type!", &BI);
+  for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
+    Assert1(BI.getDestination(i)->getType()->isLabelTy(),
+            "Indirectbr destinations must all have pointer type!", &BI);
+
+  visitTerminatorInst(BI);
+}
+
 void Verifier::visitSelectInst(SelectInst &SI) {
   Assert1(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
                                           SI.getOperand(2)),
@@ -1206,6 +1222,7 @@
             "Catches are not correct in invoke instruction!", &II);
 
   VerifyCallSite(&II);
+  visitTerminatorInst(II);
 }
 
 /// visitBinaryOperator - Check that both arguments to the binary operator are
@@ -1270,28 +1287,37 @@
   visitInstruction(B);
 }
 
-void Verifier::visitICmpInst(ICmpInst& IC) {
+void Verifier::visitICmpInst(ICmpInst &IC) {
   // Check that the operands are the same type
-  const Type* Op0Ty = IC.getOperand(0)->getType();
-  const Type* Op1Ty = IC.getOperand(1)->getType();
+  const Type *Op0Ty = IC.getOperand(0)->getType();
+  const Type *Op1Ty = IC.getOperand(1)->getType();
   Assert1(Op0Ty == Op1Ty,
           "Both operands to ICmp instruction are not of the same type!", &IC);
   // Check that the operands are the right type
   Assert1(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPointerTy(),
           "Invalid operand types for ICmp instruction", &IC);
+  // Check that the predicate is valid.
+  Assert1(IC.getPredicate() >= CmpInst::FIRST_ICMP_PREDICATE &&
+          IC.getPredicate() <= CmpInst::LAST_ICMP_PREDICATE,
+          "Invalid predicate in ICmp instruction!", &IC);
 
   visitInstruction(IC);
 }
 
-void Verifier::visitFCmpInst(FCmpInst& FC) {
+void Verifier::visitFCmpInst(FCmpInst &FC) {
   // Check that the operands are the same type
-  const Type* Op0Ty = FC.getOperand(0)->getType();
-  const Type* Op1Ty = FC.getOperand(1)->getType();
+  const Type *Op0Ty = FC.getOperand(0)->getType();
+  const Type *Op1Ty = FC.getOperand(1)->getType();
   Assert1(Op0Ty == Op1Ty,
           "Both operands to FCmp instruction are not of the same type!", &FC);
   // Check that the operands are the right type
   Assert1(Op0Ty->isFPOrFPVectorTy(),
           "Invalid operand types for FCmp instruction", &FC);
+  // Check that the predicate is valid.
+  Assert1(FC.getPredicate() >= CmpInst::FIRST_FCMP_PREDICATE &&
+          FC.getPredicate() <= CmpInst::LAST_FCMP_PREDICATE,
+          "Invalid predicate in FCmp instruction!", &FC);
+
   visitInstruction(FC);
 }
 
@@ -1314,27 +1340,6 @@
   Assert1(ShuffleVectorInst::isValidOperands(SV.getOperand(0), SV.getOperand(1),
                                              SV.getOperand(2)),
           "Invalid shufflevector operands!", &SV);
-
-  const VectorType *VTy = dyn_cast<VectorType>(SV.getOperand(0)->getType());
-  Assert1(VTy, "Operands are not a vector type", &SV);
-
-  // Check to see if Mask is valid.
-  if (const ConstantVector *MV = dyn_cast<ConstantVector>(SV.getOperand(2))) {
-    for (unsigned i = 0, e = MV->getNumOperands(); i != e; ++i) {
-      if (ConstantInt* CI = dyn_cast<ConstantInt>(MV->getOperand(i))) {
-        Assert1(!CI->uge(VTy->getNumElements()*2),
-                "Invalid shufflevector shuffle mask!", &SV);
-      } else {
-        Assert1(isa<UndefValue>(MV->getOperand(i)),
-                "Invalid shufflevector shuffle mask!", &SV);
-      }
-    }
-  } else {
-    Assert1(isa<UndefValue>(SV.getOperand(2)) || 
-            isa<ConstantAggregateZero>(SV.getOperand(2)),
-            "Invalid shufflevector shuffle mask!", &SV);
-  }
-
   visitInstruction(SV);
 }
 
@@ -1412,10 +1417,6 @@
               "Only PHI nodes may reference their own value!", &I);
   }
 
-  // Verify that if this is a terminator that it is at the end of the block.
-  if (isa<TerminatorInst>(I))
-    Assert1(BB->getTerminator() == &I, "Terminator not at end of block!", &I);
-
   // Check that void typed values don't have names
   Assert1(!I.getType()->isVoidTy() || !I.hasName(),
           "Instruction has a name, but provides a void value!", &I);
@@ -1574,7 +1575,8 @@
               "Function type with invalid parameter type", ElTy, FTy);
       VerifyType(ElTy);
     }
-  } break;
+    break;
+  }
   case Type::StructTyID: {
     const StructType *STy = cast<StructType>(Ty);
     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
@@ -1583,34 +1585,29 @@
               "Structure type with invalid element type", ElTy, STy);
       VerifyType(ElTy);
     }
-  } break;
-  case Type::UnionTyID: {
-    const UnionType *UTy = cast<UnionType>(Ty);
-    for (unsigned i = 0, e = UTy->getNumElements(); i != e; ++i) {
-      const Type *ElTy = UTy->getElementType(i);
-      Assert2(UnionType::isValidElementType(ElTy),
-              "Union type with invalid element type", ElTy, UTy);
-      VerifyType(ElTy);
-    }
-  } break;
+    break;
+  }
   case Type::ArrayTyID: {
     const ArrayType *ATy = cast<ArrayType>(Ty);
     Assert1(ArrayType::isValidElementType(ATy->getElementType()),
             "Array type with invalid element type", ATy);
     VerifyType(ATy->getElementType());
-  } break;
+    break;
+  }
   case Type::PointerTyID: {
     const PointerType *PTy = cast<PointerType>(Ty);
     Assert1(PointerType::isValidElementType(PTy->getElementType()),
             "Pointer type with invalid element type", PTy);
     VerifyType(PTy->getElementType());
-  } break;
+    break;
+  }
   case Type::VectorTyID: {
     const VectorType *VTy = cast<VectorType>(Ty);
     Assert1(VectorType::isValidElementType(VTy->getElementType()),
             "Vector type with invalid element type", VTy);
     VerifyType(VTy->getElementType());
-  } break;
+    break;
+  }
   default:
     break;
   }
@@ -1661,10 +1658,14 @@
     if (ID == Intrinsic::gcroot) {
       AllocaInst *AI =
         dyn_cast<AllocaInst>(CI.getArgOperand(0)->stripPointerCasts());
-      Assert1(AI && AI->getType()->getElementType()->isPointerTy(),
-              "llvm.gcroot parameter #1 must be a pointer alloca.", &CI);
+      Assert1(AI, "llvm.gcroot parameter #1 must be an alloca.", &CI);
       Assert1(isa<Constant>(CI.getArgOperand(1)),
               "llvm.gcroot parameter #2 must be a constant.", &CI);
+      if (!AI->getType()->getElementType()->isPointerTy()) {
+        Assert1(!isa<ConstantPointerNull>(CI.getArgOperand(1)),
+                "llvm.gcroot parameter #1 must either be a pointer alloca, "
+                "or argument #2 must be a non-null constant.", &CI);
+      }
     }
 
     Assert1(CI.getParent()->getParent()->hasGC(),
@@ -1836,8 +1837,13 @@
     // and iPTR. In the verifier, we can not distinguish which case we have so
     // allow either case to be legal.
     if (const PointerType* PTyp = dyn_cast<PointerType>(Ty)) {
-      Suffix += ".p" + utostr(PTyp->getAddressSpace()) + 
-        EVT::getEVT(PTyp->getElementType()).getEVTString();
+      EVT PointeeVT = EVT::getEVT(PTyp->getElementType(), true);
+      if (PointeeVT == MVT::Other) {
+        CheckFailed("Intrinsic has pointer to complex type.");
+        return false;
+      }
+      Suffix += ".p" + utostr(PTyp->getAddressSpace()) +
+        PointeeVT.getEVTString();
     } else {
       CheckFailed(IntrinsicParam(ArgNo, NumRetVals) + " is not a "
                   "pointer and a pointer is required.", F);

Modified: llvm/branches/wendling/eh/projects/Makefile
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/projects/Makefile?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/projects/Makefile (original)
+++ llvm/branches/wendling/eh/projects/Makefile Tue Oct 26 19:48:03 2010
@@ -14,9 +14,12 @@
 # Before 2008.06.24 it lived in llvm-test, so exclude that as well for now.
 DIRS:= $(filter-out llvm-test test-suite,$(patsubst $(PROJ_SRC_DIR)/%/Makefile,%,$(wildcard $(PROJ_SRC_DIR)/*/Makefile)))
 
-# Don't build compiler-rt either, it isn't designed to be built directly.
+# Don't build compiler-rt, it isn't designed to be built directly.
 DIRS := $(filter-out compiler-rt,$(DIRS))
 
+# Don't build libcxx, it isn't designed to be built directly.
+DIRS := $(filter-out libcxx,$(DIRS))
+
 # Sparc cannot link shared libraries (libtool problem?)
 ifeq ($(ARCH), Sparc)
 DIRS := $(filter-out sample, $(DIRS))

Modified: llvm/branches/wendling/eh/runtime/libprofile/Makefile
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/runtime/libprofile/Makefile?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/runtime/libprofile/Makefile (original)
+++ llvm/branches/wendling/eh/runtime/libprofile/Makefile Tue Oct 26 19:48:03 2010
@@ -1,10 +1,10 @@
 ##===- runtime/libprofile/Makefile -------------------------*- Makefile -*-===##
-# 
+#
 #                     The LLVM Compiler Infrastructure
 #
 # This file is distributed under the University of Illinois Open Source
 # License. See LICENSE.TXT for details.
-# 
+#
 ##===----------------------------------------------------------------------===##
 
 LEVEL = ../..
@@ -16,7 +16,7 @@
 SHARED_LIBRARY = 1
 LOADABLE_MODULE = 1
 LIBRARYNAME = profile_rt
-EXTRA_DIST = exported_symbols.lst
-EXPORTED_SYMBOL_FILE = $(PROJ_SRC_DIR)/exported_symbols.lst
+EXTRA_DIST = libprofile.exports
+EXPORTED_SYMBOL_FILE = $(PROJ_SRC_DIR)/libprofile.exports
 
 include $(LEVEL)/Makefile.common

Removed: llvm/branches/wendling/eh/runtime/libprofile/exported_symbols.lst
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/runtime/libprofile/exported_symbols.lst?rev=117424&view=auto
==============================================================================
--- llvm/branches/wendling/eh/runtime/libprofile/exported_symbols.lst (original)
+++ llvm/branches/wendling/eh/runtime/libprofile/exported_symbols.lst (removed)
@@ -1,4 +0,0 @@
-llvm_start_edge_profiling
-llvm_start_opt_edge_profiling
-llvm_start_basic_block_tracing
-llvm_trace_basic_block

Propchange: llvm/branches/wendling/eh/test/
------------------------------------------------------------------------------
--- svn:ignore (original)
+++ svn:ignore Tue Oct 26 19:48:03 2010
@@ -2,3 +2,4 @@
 *.sum
 site.exp
 site.bak
+lit.site.cfg

Modified: llvm/branches/wendling/eh/test/Analysis/BasicAA/2003-02-26-AccessSizeTest.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Analysis/BasicAA/2003-02-26-AccessSizeTest.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Analysis/BasicAA/2003-02-26-AccessSizeTest.ll (original)
+++ llvm/branches/wendling/eh/test/Analysis/BasicAA/2003-02-26-AccessSizeTest.ll Tue Oct 26 19:48:03 2010
@@ -2,7 +2,7 @@
 ; is performed.  It is not legal to delete the second load instruction because
 ; the value computed by the first load instruction is changed by the store.
 
-; RUN: opt < %s -gvn -instcombine -S | grep DONOTREMOVE
+; RUN: opt < %s -basicaa -gvn -instcombine -S | grep DONOTREMOVE
 
 define i32 @test() {
 	%A = alloca i32

Modified: llvm/branches/wendling/eh/test/Analysis/BasicAA/2003-04-22-GEPProblem.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Analysis/BasicAA/2003-04-22-GEPProblem.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Analysis/BasicAA/2003-04-22-GEPProblem.ll (original)
+++ llvm/branches/wendling/eh/test/Analysis/BasicAA/2003-04-22-GEPProblem.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: opt < %s -gvn -instcombine -S | grep sub
+; RUN: opt < %s -basicaa -gvn -instcombine -S | grep sub
 
 ; BasicAA was incorrectly concluding that P1 and P2 didn't conflict!
 

Modified: llvm/branches/wendling/eh/test/Analysis/BasicAA/2003-05-21-GEP-Problem.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Analysis/BasicAA/2003-05-21-GEP-Problem.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Analysis/BasicAA/2003-05-21-GEP-Problem.ll (original)
+++ llvm/branches/wendling/eh/test/Analysis/BasicAA/2003-05-21-GEP-Problem.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: opt < %s -licm -disable-output
+; RUN: opt < %s -basicaa -licm -disable-output
 	%struct..apr_array_header_t = type { i32*, i32, i32, i32, i8* }
 	%struct..apr_table_t = type { %struct..apr_array_header_t, i32, [32 x i32], [32 x i32] }
 

Modified: llvm/branches/wendling/eh/test/Analysis/BasicAA/2003-09-19-LocalArgument.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Analysis/BasicAA/2003-09-19-LocalArgument.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Analysis/BasicAA/2003-09-19-LocalArgument.ll (original)
+++ llvm/branches/wendling/eh/test/Analysis/BasicAA/2003-09-19-LocalArgument.ll Tue Oct 26 19:48:03 2010
@@ -1,6 +1,6 @@
 ; In this test, a local alloca cannot alias an incoming argument.
 
-; RUN: opt < %s -gvn -instcombine -S | not grep sub
+; RUN: opt < %s -basicaa -gvn -instcombine -S | not grep sub
 
 define i32 @test(i32* %P) {
 	%X = alloca i32

Modified: llvm/branches/wendling/eh/test/Analysis/BasicAA/2003-11-04-SimpleCases.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Analysis/BasicAA/2003-11-04-SimpleCases.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Analysis/BasicAA/2003-11-04-SimpleCases.ll (original)
+++ llvm/branches/wendling/eh/test/Analysis/BasicAA/2003-11-04-SimpleCases.ll Tue Oct 26 19:48:03 2010
@@ -1,7 +1,7 @@
 ; This testcase consists of alias relations which should be completely
 ; resolvable by basicaa.
 
-; RUN: opt < %s -aa-eval -print-may-aliases -disable-output \
+; RUN: opt < %s -basicaa -aa-eval -print-may-aliases -disable-output \
 ; RUN: |& not grep May:
 
 %T = type { i32, [10 x i8] }

Modified: llvm/branches/wendling/eh/test/Analysis/BasicAA/2003-12-11-ConstExprGEP.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Analysis/BasicAA/2003-12-11-ConstExprGEP.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Analysis/BasicAA/2003-12-11-ConstExprGEP.ll (original)
+++ llvm/branches/wendling/eh/test/Analysis/BasicAA/2003-12-11-ConstExprGEP.ll Tue Oct 26 19:48:03 2010
@@ -1,7 +1,7 @@
 ; This testcase consists of alias relations which should be completely
 ; resolvable by basicaa, but require analysis of getelementptr constant exprs.
 
-; RUN: opt < %s -aa-eval -print-may-aliases -disable-output \
+; RUN: opt < %s -basicaa -aa-eval -print-may-aliases -disable-output \
 ; RUN: |& not grep May:
 
 %T = type { i32, [10 x i8] }

Modified: llvm/branches/wendling/eh/test/Analysis/BasicAA/2004-07-28-MustAliasbug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Analysis/BasicAA/2004-07-28-MustAliasbug.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Analysis/BasicAA/2004-07-28-MustAliasbug.ll (original)
+++ llvm/branches/wendling/eh/test/Analysis/BasicAA/2004-07-28-MustAliasbug.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: opt < %s -dse -S | grep {store i32 0}
+; RUN: opt < %s -basicaa -dse -S | grep {store i32 0}
 
 define void @test({i32,i32 }* %P) {
 	%Q = getelementptr {i32,i32}* %P, i32 1

Modified: llvm/branches/wendling/eh/test/Analysis/BasicAA/2004-12-08-BasicAACrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Analysis/BasicAA/2004-12-08-BasicAACrash.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Analysis/BasicAA/2004-12-08-BasicAACrash.ll (original)
+++ llvm/branches/wendling/eh/test/Analysis/BasicAA/2004-12-08-BasicAACrash.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: opt < %s -licm
+; RUN: opt < %s -basicaa -licm
 
 %"java/lang/Object" = type { %struct.llvm_java_object_base }
 %"java/lang/StringBuffer" = type { "java/lang/Object", i32, { "java/lang/Object", i32, [0 x i8] }*, i1 }

Modified: llvm/branches/wendling/eh/test/Analysis/BasicAA/2004-12-08-BasicAACrash2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Analysis/BasicAA/2004-12-08-BasicAACrash2.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Analysis/BasicAA/2004-12-08-BasicAACrash2.ll (original)
+++ llvm/branches/wendling/eh/test/Analysis/BasicAA/2004-12-08-BasicAACrash2.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: opt < %s -dse
+; RUN: opt < %s -basicaa -dse
 
 %"java/lang/Object" = type { %struct.llvm_java_object_base }
 %"java/lang/StringBuffer" = type { "java/lang/Object", i32, { "java/lang/Object", i32, [0 x i8] }*, i1 }

Modified: llvm/branches/wendling/eh/test/Analysis/BasicAA/2006-03-03-BadArraySubscript.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Analysis/BasicAA/2006-03-03-BadArraySubscript.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Analysis/BasicAA/2006-03-03-BadArraySubscript.ll (original)
+++ llvm/branches/wendling/eh/test/Analysis/BasicAA/2006-03-03-BadArraySubscript.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: opt < %s -aa-eval -disable-output |& grep {2 no alias respon}
+; RUN: opt < %s -basicaa -aa-eval -disable-output |& grep {2 no alias respon}
 ; TEST that A[1][0] may alias A[0][i].
 target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
 

Modified: llvm/branches/wendling/eh/test/Analysis/BasicAA/2006-11-03-BasicAAVectorCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Analysis/BasicAA/2006-11-03-BasicAAVectorCrash.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Analysis/BasicAA/2006-11-03-BasicAAVectorCrash.ll (original)
+++ llvm/branches/wendling/eh/test/Analysis/BasicAA/2006-11-03-BasicAAVectorCrash.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: opt < %s -licm -disable-output
+; RUN: opt < %s -basicaa -licm -disable-output
 target datalayout = "E-p:32:32"
 target triple = "powerpc-apple-darwin8.7.0"
 

Modified: llvm/branches/wendling/eh/test/Analysis/BasicAA/2007-11-05-SizeCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Analysis/BasicAA/2007-11-05-SizeCrash.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Analysis/BasicAA/2007-11-05-SizeCrash.ll (original)
+++ llvm/branches/wendling/eh/test/Analysis/BasicAA/2007-11-05-SizeCrash.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: opt < %s -gvn -disable-output
+; RUN: opt < %s -basicaa -gvn -disable-output
 ; PR1774
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"

Modified: llvm/branches/wendling/eh/test/Analysis/BasicAA/2007-12-08-OutOfBoundsCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Analysis/BasicAA/2007-12-08-OutOfBoundsCrash.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Analysis/BasicAA/2007-12-08-OutOfBoundsCrash.ll (original)
+++ llvm/branches/wendling/eh/test/Analysis/BasicAA/2007-12-08-OutOfBoundsCrash.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: opt < %s -gvn -disable-output
+; RUN: opt < %s -basicaa -gvn -disable-output
 ; PR1782
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"

Modified: llvm/branches/wendling/eh/test/Analysis/BasicAA/2008-06-02-GEPTailCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Analysis/BasicAA/2008-06-02-GEPTailCrash.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Analysis/BasicAA/2008-06-02-GEPTailCrash.ll (original)
+++ llvm/branches/wendling/eh/test/Analysis/BasicAA/2008-06-02-GEPTailCrash.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: opt < %s -gvn -disable-output
+; RUN: opt < %s -basicaa -gvn -disable-output
 ; PR2395
 
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"

Modified: llvm/branches/wendling/eh/test/Analysis/BasicAA/2008-11-23-NoaliasRet.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Analysis/BasicAA/2008-11-23-NoaliasRet.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Analysis/BasicAA/2008-11-23-NoaliasRet.ll (original)
+++ llvm/branches/wendling/eh/test/Analysis/BasicAA/2008-11-23-NoaliasRet.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: opt < %s -aa-eval |& grep {1 no alias response}
+; RUN: opt < %s -basicaa -aa-eval |& grep {1 no alias response}
 
 declare noalias i32* @_Znwj(i32 %x) nounwind
 

Modified: llvm/branches/wendling/eh/test/Analysis/BasicAA/2009-10-13-AtomicModRef.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Analysis/BasicAA/2009-10-13-AtomicModRef.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Analysis/BasicAA/2009-10-13-AtomicModRef.ll (original)
+++ llvm/branches/wendling/eh/test/Analysis/BasicAA/2009-10-13-AtomicModRef.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: opt -gvn -instcombine -S < %s | FileCheck %s
+; RUN: opt -basicaa -gvn -instcombine -S < %s | FileCheck %s
 target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
 
 declare i8 @llvm.atomic.load.add.i8.p0i8(i8*, i8)

Modified: llvm/branches/wendling/eh/test/Analysis/BasicAA/2009-10-13-GEP-BaseNoAlias.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Analysis/BasicAA/2009-10-13-GEP-BaseNoAlias.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Analysis/BasicAA/2009-10-13-GEP-BaseNoAlias.ll (original)
+++ llvm/branches/wendling/eh/test/Analysis/BasicAA/2009-10-13-GEP-BaseNoAlias.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: opt < %s -aa-eval -print-all-alias-modref-info -disable-output |& grep {NoAlias:.*%P,.*@Z}
+; RUN: opt < %s -basicaa -aa-eval -print-all-alias-modref-info -disable-output |& grep {NoAlias:.*%P,.*@Z}
 ; If GEP base doesn't alias Z, then GEP doesn't alias Z.
 ; rdar://7282591
 

Modified: llvm/branches/wendling/eh/test/Analysis/BasicAA/args-rets-allocas-loads.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Analysis/BasicAA/args-rets-allocas-loads.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Analysis/BasicAA/args-rets-allocas-loads.ll (original)
+++ llvm/branches/wendling/eh/test/Analysis/BasicAA/args-rets-allocas-loads.ll Tue Oct 26 19:48:03 2010
@@ -1,5 +1,4 @@
-; RUN: opt -interprocedural-basic-aa -interprocedural-aa-eval -print-all-alias-modref-info -disable-output < %s |& FileCheck --check-prefix=IPAA %s
-; RUN: opt -basicaa -aa-eval -print-all-alias-modref-info -disable-output < %s |& FileCheck --check-prefix=FUNCAA %s
+; RUN: opt -basicaa -aa-eval -print-all-alias-modref-info -disable-output < %s |& FileCheck  %s
 
 declare void @callee(double* %callee_arg)
 declare void @nocap_callee(double* nocapture %nocap_callee_arg)
@@ -48,1675 +47,264 @@
   ret void
 }
 
-; caller_b is the same as caller_a but with different names, to test
-; interprocedural queries.
-define void @caller_b(double* %arg_b0,
-                      double* %arg_b1,
-                      double* noalias %noalias_arg_b0,
-                      double* noalias %noalias_arg_b1,
-                      double** %indirect_b0,
-                      double** %indirect_b1) {
-  %loaded_b0 = load double** %indirect_b0
-  %loaded_b1 = load double** %indirect_b1
-
-  %escape_alloca_b0 = alloca double
-  %escape_alloca_b1 = alloca double
-  %noescape_alloca_b0 = alloca double
-  %noescape_alloca_b1 = alloca double
-
-  %normal_ret_b0 = call double* @normal_returner()
-  %normal_ret_b1 = call double* @normal_returner()
-  %noalias_ret_b0 = call double* @noalias_returner()
-  %noalias_ret_b1 = call double* @noalias_returner()
-
-  call void @callee(double* %escape_alloca_b0)
-  call void @callee(double* %escape_alloca_b1)
-  call void @nocap_callee(double* %noescape_alloca_b0)
-  call void @nocap_callee(double* %noescape_alloca_b1)
-
-  store double 0.0, double* %loaded_b0
-  store double 0.0, double* %loaded_b1
-  store double 0.0, double* %arg_b0
-  store double 0.0, double* %arg_b1
-  store double 0.0, double* %noalias_arg_b0
-  store double 0.0, double* %noalias_arg_b1
-  store double 0.0, double* %escape_alloca_b0
-  store double 0.0, double* %escape_alloca_b1
-  store double 0.0, double* %noescape_alloca_b0
-  store double 0.0, double* %noescape_alloca_b1
-  store double 0.0, double* %normal_ret_b0
-  store double 0.0, double* %normal_ret_b1
-  store double 0.0, double* %noalias_ret_b0
-  store double 0.0, double* %noalias_ret_b1
-  ret void
-}
-
-; FUNCAA: Function: caller_a: 16 pointers, 8 call sites
-; FUNCAA:   MayAlias:	double* %arg_a0, double* %arg_a1
-; FUNCAA:   NoAlias:	double* %arg_a0, double* %noalias_arg_a0
-; FUNCAA:   NoAlias:	double* %arg_a1, double* %noalias_arg_a0
-; FUNCAA:   NoAlias:	double* %arg_a0, double* %noalias_arg_a1
-; FUNCAA:   NoAlias:	double* %arg_a1, double* %noalias_arg_a1
-; FUNCAA:   NoAlias:	double* %noalias_arg_a0, double* %noalias_arg_a1
-; FUNCAA:   MayAlias:	double* %arg_a0, double** %indirect_a0
-; FUNCAA:   MayAlias:	double* %arg_a1, double** %indirect_a0
-; FUNCAA:   NoAlias:	double* %noalias_arg_a0, double** %indirect_a0
-; FUNCAA:   NoAlias:	double* %noalias_arg_a1, double** %indirect_a0
-; FUNCAA:   MayAlias:	double* %arg_a0, double** %indirect_a1
-; FUNCAA:   MayAlias:	double* %arg_a1, double** %indirect_a1
-; FUNCAA:   NoAlias:	double* %noalias_arg_a0, double** %indirect_a1
-; FUNCAA:   NoAlias:	double* %noalias_arg_a1, double** %indirect_a1
-; FUNCAA:   MayAlias:	double** %indirect_a0, double** %indirect_a1
-; FUNCAA:   MayAlias:	double* %arg_a0, double* %loaded_a0
-; FUNCAA:   MayAlias:	double* %arg_a1, double* %loaded_a0
-; FUNCAA:   NoAlias:	double* %loaded_a0, double* %noalias_arg_a0
-; FUNCAA:   NoAlias:	double* %loaded_a0, double* %noalias_arg_a1
-; FUNCAA:   MayAlias:	double* %loaded_a0, double** %indirect_a0
-; FUNCAA:   MayAlias:	double* %loaded_a0, double** %indirect_a1
-; FUNCAA:   MayAlias:	double* %arg_a0, double* %loaded_a1
-; FUNCAA:   MayAlias:	double* %arg_a1, double* %loaded_a1
-; FUNCAA:   NoAlias:	double* %loaded_a1, double* %noalias_arg_a0
-; FUNCAA:   NoAlias:	double* %loaded_a1, double* %noalias_arg_a1
-; FUNCAA:   MayAlias:	double* %loaded_a1, double** %indirect_a0
-; FUNCAA:   MayAlias:	double* %loaded_a1, double** %indirect_a1
-; FUNCAA:   MayAlias:	double* %loaded_a0, double* %loaded_a1
-; FUNCAA:   NoAlias:	double* %arg_a0, double* %escape_alloca_a0
-; FUNCAA:   NoAlias:	double* %arg_a1, double* %escape_alloca_a0
-; FUNCAA:   NoAlias:	double* %escape_alloca_a0, double* %noalias_arg_a0
-; FUNCAA:   NoAlias:	double* %escape_alloca_a0, double* %noalias_arg_a1
-; FUNCAA:   NoAlias:	double* %escape_alloca_a0, double** %indirect_a0
-; FUNCAA:   NoAlias:	double* %escape_alloca_a0, double** %indirect_a1
-; FUNCAA:   MayAlias:	double* %escape_alloca_a0, double* %loaded_a0
-; FUNCAA:   MayAlias:	double* %escape_alloca_a0, double* %loaded_a1
-; FUNCAA:   NoAlias:	double* %arg_a0, double* %escape_alloca_a1
-; FUNCAA:   NoAlias:	double* %arg_a1, double* %escape_alloca_a1
-; FUNCAA:   NoAlias:	double* %escape_alloca_a1, double* %noalias_arg_a0
-; FUNCAA:   NoAlias:	double* %escape_alloca_a1, double* %noalias_arg_a1
-; FUNCAA:   NoAlias:	double* %escape_alloca_a1, double** %indirect_a0
-; FUNCAA:   NoAlias:	double* %escape_alloca_a1, double** %indirect_a1
-; FUNCAA:   MayAlias:	double* %escape_alloca_a1, double* %loaded_a0
-; FUNCAA:   MayAlias:	double* %escape_alloca_a1, double* %loaded_a1
-; FUNCAA:   NoAlias:	double* %escape_alloca_a0, double* %escape_alloca_a1
-; FUNCAA:   NoAlias:	double* %arg_a0, double* %noescape_alloca_a0
-; FUNCAA:   NoAlias:	double* %arg_a1, double* %noescape_alloca_a0
-; FUNCAA:   NoAlias:	double* %noalias_arg_a0, double* %noescape_alloca_a0
-; FUNCAA:   NoAlias:	double* %noalias_arg_a1, double* %noescape_alloca_a0
-; FUNCAA:   NoAlias:	double* %noescape_alloca_a0, double** %indirect_a0
-; FUNCAA:   NoAlias:	double* %noescape_alloca_a0, double** %indirect_a1
-; FUNCAA:   NoAlias:	double* %loaded_a0, double* %noescape_alloca_a0
-; FUNCAA:   NoAlias:	double* %loaded_a1, double* %noescape_alloca_a0
-; FUNCAA:   NoAlias:	double* %escape_alloca_a0, double* %noescape_alloca_a0
-; FUNCAA:   NoAlias:	double* %escape_alloca_a1, double* %noescape_alloca_a0
-; FUNCAA:   NoAlias:	double* %arg_a0, double* %noescape_alloca_a1
-; FUNCAA:   NoAlias:	double* %arg_a1, double* %noescape_alloca_a1
-; FUNCAA:   NoAlias:	double* %noalias_arg_a0, double* %noescape_alloca_a1
-; FUNCAA:   NoAlias:	double* %noalias_arg_a1, double* %noescape_alloca_a1
-; FUNCAA:   NoAlias:	double* %noescape_alloca_a1, double** %indirect_a0
-; FUNCAA:   NoAlias:	double* %noescape_alloca_a1, double** %indirect_a1
-; FUNCAA:   NoAlias:	double* %loaded_a0, double* %noescape_alloca_a1
-; FUNCAA:   NoAlias:	double* %loaded_a1, double* %noescape_alloca_a1
-; FUNCAA:   NoAlias:	double* %escape_alloca_a0, double* %noescape_alloca_a1
-; FUNCAA:   NoAlias:	double* %escape_alloca_a1, double* %noescape_alloca_a1
-; FUNCAA:   NoAlias:	double* %noescape_alloca_a0, double* %noescape_alloca_a1
-; FUNCAA:   MayAlias:	double* %arg_a0, double* %normal_ret_a0
-; FUNCAA:   MayAlias:	double* %arg_a1, double* %normal_ret_a0
-; FUNCAA:   NoAlias:	double* %noalias_arg_a0, double* %normal_ret_a0
-; FUNCAA:   NoAlias:	double* %noalias_arg_a1, double* %normal_ret_a0
-; FUNCAA:   MayAlias:	double* %normal_ret_a0, double** %indirect_a0
-; FUNCAA:   MayAlias:	double* %normal_ret_a0, double** %indirect_a1
-; FUNCAA:   MayAlias:	double* %loaded_a0, double* %normal_ret_a0
-; FUNCAA:   MayAlias:	double* %loaded_a1, double* %normal_ret_a0
-; FUNCAA:   MayAlias:	double* %escape_alloca_a0, double* %normal_ret_a0
-; FUNCAA:   MayAlias:	double* %escape_alloca_a1, double* %normal_ret_a0
-; FUNCAA:   NoAlias:	double* %noescape_alloca_a0, double* %normal_ret_a0
-; FUNCAA:   NoAlias:	double* %noescape_alloca_a1, double* %normal_ret_a0
-; FUNCAA:   MayAlias:	double* %arg_a0, double* %normal_ret_a1
-; FUNCAA:   MayAlias:	double* %arg_a1, double* %normal_ret_a1
-; FUNCAA:   NoAlias:	double* %noalias_arg_a0, double* %normal_ret_a1
-; FUNCAA:   NoAlias:	double* %noalias_arg_a1, double* %normal_ret_a1
-; FUNCAA:   MayAlias:	double* %normal_ret_a1, double** %indirect_a0
-; FUNCAA:   MayAlias:	double* %normal_ret_a1, double** %indirect_a1
-; FUNCAA:   MayAlias:	double* %loaded_a0, double* %normal_ret_a1
-; FUNCAA:   MayAlias:	double* %loaded_a1, double* %normal_ret_a1
-; FUNCAA:   MayAlias:	double* %escape_alloca_a0, double* %normal_ret_a1
-; FUNCAA:   MayAlias:	double* %escape_alloca_a1, double* %normal_ret_a1
-; FUNCAA:   NoAlias:	double* %noescape_alloca_a0, double* %normal_ret_a1
-; FUNCAA:   NoAlias:	double* %noescape_alloca_a1, double* %normal_ret_a1
-; FUNCAA:   MayAlias:	double* %normal_ret_a0, double* %normal_ret_a1
-; FUNCAA:   NoAlias:	double* %arg_a0, double* %noalias_ret_a0
-; FUNCAA:   NoAlias:	double* %arg_a1, double* %noalias_ret_a0
-; FUNCAA:   NoAlias:	double* %noalias_arg_a0, double* %noalias_ret_a0
-; FUNCAA:   NoAlias:	double* %noalias_arg_a1, double* %noalias_ret_a0
-; FUNCAA:   NoAlias:	double* %noalias_ret_a0, double** %indirect_a0
-; FUNCAA:   NoAlias:	double* %noalias_ret_a0, double** %indirect_a1
-; FUNCAA:   NoAlias:	double* %loaded_a0, double* %noalias_ret_a0
-; FUNCAA:   NoAlias:	double* %loaded_a1, double* %noalias_ret_a0
-; FUNCAA:   NoAlias:	double* %escape_alloca_a0, double* %noalias_ret_a0
-; FUNCAA:   NoAlias:	double* %escape_alloca_a1, double* %noalias_ret_a0
-; FUNCAA:   NoAlias:	double* %noalias_ret_a0, double* %noescape_alloca_a0
-; FUNCAA:   NoAlias:	double* %noalias_ret_a0, double* %noescape_alloca_a1
-; FUNCAA:   NoAlias:	double* %noalias_ret_a0, double* %normal_ret_a0
-; FUNCAA:   NoAlias:	double* %noalias_ret_a0, double* %normal_ret_a1
-; FUNCAA:   NoAlias:	double* %arg_a0, double* %noalias_ret_a1
-; FUNCAA:   NoAlias:	double* %arg_a1, double* %noalias_ret_a1
-; FUNCAA:   NoAlias:	double* %noalias_arg_a0, double* %noalias_ret_a1
-; FUNCAA:   NoAlias:	double* %noalias_arg_a1, double* %noalias_ret_a1
-; FUNCAA:   NoAlias:	double* %noalias_ret_a1, double** %indirect_a0
-; FUNCAA:   NoAlias:	double* %noalias_ret_a1, double** %indirect_a1
-; FUNCAA:   NoAlias:	double* %loaded_a0, double* %noalias_ret_a1
-; FUNCAA:   NoAlias:	double* %loaded_a1, double* %noalias_ret_a1
-; FUNCAA:   NoAlias:	double* %escape_alloca_a0, double* %noalias_ret_a1
-; FUNCAA:   NoAlias:	double* %escape_alloca_a1, double* %noalias_ret_a1
-; FUNCAA:   NoAlias:	double* %noalias_ret_a1, double* %noescape_alloca_a0
-; FUNCAA:   NoAlias:	double* %noalias_ret_a1, double* %noescape_alloca_a1
-; FUNCAA:   NoAlias:	double* %noalias_ret_a1, double* %normal_ret_a0
-; FUNCAA:   NoAlias:	double* %noalias_ret_a1, double* %normal_ret_a1
-; FUNCAA:   NoAlias:	double* %noalias_ret_a0, double* %noalias_ret_a1
-; FUNCAA:     ModRef:  Ptr: double* %arg_a0	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %arg_a1	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_arg_a0	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_arg_a1	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double** %indirect_a0	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double** %indirect_a1	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %loaded_a0	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %loaded_a1	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %escape_alloca_a0	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %escape_alloca_a1	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noescape_alloca_a0	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noescape_alloca_a1	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %normal_ret_a0	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %normal_ret_a1	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_ret_a0	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_ret_a1	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %arg_a0	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %arg_a1	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_arg_a0	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_arg_a1	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double** %indirect_a0	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double** %indirect_a1	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %loaded_a0	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %loaded_a1	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %escape_alloca_a0	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %escape_alloca_a1	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noescape_alloca_a0	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noescape_alloca_a1	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %normal_ret_a0	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %normal_ret_a1	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_ret_a0	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_ret_a1	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %arg_a0	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %arg_a1	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_arg_a0	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_arg_a1	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double** %indirect_a0	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double** %indirect_a1	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %loaded_a0	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %loaded_a1	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %escape_alloca_a0	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %escape_alloca_a1	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noescape_alloca_a0	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noescape_alloca_a1	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %normal_ret_a0	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %normal_ret_a1	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %noalias_ret_a0	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_ret_a1	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %arg_a0	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %arg_a1	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_arg_a0	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_arg_a1	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double** %indirect_a0	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double** %indirect_a1	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %loaded_a0	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %loaded_a1	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %escape_alloca_a0	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %escape_alloca_a1	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noescape_alloca_a0	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noescape_alloca_a1	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %normal_ret_a0	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %normal_ret_a1	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_ret_a0	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %noalias_ret_a1	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %arg_a0	<->  call void @callee(double* %escape_alloca_a0)
-; FUNCAA:     ModRef:  Ptr: double* %arg_a1	<->  call void @callee(double* %escape_alloca_a0)
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_arg_a0	<->  call void @callee(double* %escape_alloca_a0)
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_arg_a1	<->  call void @callee(double* %escape_alloca_a0)
-; FUNCAA:     ModRef:  Ptr: double** %indirect_a0	<->  call void @callee(double* %escape_alloca_a0)
-; FUNCAA:     ModRef:  Ptr: double** %indirect_a1	<->  call void @callee(double* %escape_alloca_a0)
-; FUNCAA:     ModRef:  Ptr: double* %loaded_a0	<->  call void @callee(double* %escape_alloca_a0)
-; FUNCAA:     ModRef:  Ptr: double* %loaded_a1	<->  call void @callee(double* %escape_alloca_a0)
-; FUNCAA:     ModRef:  Ptr: double* %escape_alloca_a0	<->  call void @callee(double* %escape_alloca_a0)
-; FUNCAA:     ModRef:  Ptr: double* %escape_alloca_a1	<->  call void @callee(double* %escape_alloca_a0)
-; FUNCAA:   NoModRef:  Ptr: double* %noescape_alloca_a0	<->  call void @callee(double* %escape_alloca_a0)
-; FUNCAA:   NoModRef:  Ptr: double* %noescape_alloca_a1	<->  call void @callee(double* %escape_alloca_a0)
-; FUNCAA:     ModRef:  Ptr: double* %normal_ret_a0	<->  call void @callee(double* %escape_alloca_a0)
-; FUNCAA:     ModRef:  Ptr: double* %normal_ret_a1	<->  call void @callee(double* %escape_alloca_a0)
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_ret_a0	<->  call void @callee(double* %escape_alloca_a0)
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_ret_a1	<->  call void @callee(double* %escape_alloca_a0)
-; FUNCAA:     ModRef:  Ptr: double* %arg_a0	<->  call void @callee(double* %escape_alloca_a1)
-; FUNCAA:     ModRef:  Ptr: double* %arg_a1	<->  call void @callee(double* %escape_alloca_a1)
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_arg_a0	<->  call void @callee(double* %escape_alloca_a1)
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_arg_a1	<->  call void @callee(double* %escape_alloca_a1)
-; FUNCAA:     ModRef:  Ptr: double** %indirect_a0	<->  call void @callee(double* %escape_alloca_a1)
-; FUNCAA:     ModRef:  Ptr: double** %indirect_a1	<->  call void @callee(double* %escape_alloca_a1)
-; FUNCAA:     ModRef:  Ptr: double* %loaded_a0	<->  call void @callee(double* %escape_alloca_a1)
-; FUNCAA:     ModRef:  Ptr: double* %loaded_a1	<->  call void @callee(double* %escape_alloca_a1)
-; FUNCAA:     ModRef:  Ptr: double* %escape_alloca_a0	<->  call void @callee(double* %escape_alloca_a1)
-; FUNCAA:     ModRef:  Ptr: double* %escape_alloca_a1	<->  call void @callee(double* %escape_alloca_a1)
-; FUNCAA:   NoModRef:  Ptr: double* %noescape_alloca_a0	<->  call void @callee(double* %escape_alloca_a1)
-; FUNCAA:   NoModRef:  Ptr: double* %noescape_alloca_a1	<->  call void @callee(double* %escape_alloca_a1)
-; FUNCAA:     ModRef:  Ptr: double* %normal_ret_a0	<->  call void @callee(double* %escape_alloca_a1)
-; FUNCAA:     ModRef:  Ptr: double* %normal_ret_a1	<->  call void @callee(double* %escape_alloca_a1)
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_ret_a0	<->  call void @callee(double* %escape_alloca_a1)
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_ret_a1	<->  call void @callee(double* %escape_alloca_a1)
-; FUNCAA:     ModRef:  Ptr: double* %arg_a0	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; FUNCAA:     ModRef:  Ptr: double* %arg_a1	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_arg_a0	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_arg_a1	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; FUNCAA:     ModRef:  Ptr: double** %indirect_a0	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; FUNCAA:     ModRef:  Ptr: double** %indirect_a1	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; FUNCAA:     ModRef:  Ptr: double* %loaded_a0	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; FUNCAA:     ModRef:  Ptr: double* %loaded_a1	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; FUNCAA:     ModRef:  Ptr: double* %escape_alloca_a0	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; FUNCAA:     ModRef:  Ptr: double* %escape_alloca_a1	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; FUNCAA:     ModRef:  Ptr: double* %noescape_alloca_a0	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; FUNCAA:   NoModRef:  Ptr: double* %noescape_alloca_a1	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; FUNCAA:     ModRef:  Ptr: double* %normal_ret_a0	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; FUNCAA:     ModRef:  Ptr: double* %normal_ret_a1	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_ret_a0	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_ret_a1	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; FUNCAA:     ModRef:  Ptr: double* %arg_a0	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; FUNCAA:     ModRef:  Ptr: double* %arg_a1	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_arg_a0	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_arg_a1	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; FUNCAA:     ModRef:  Ptr: double** %indirect_a0	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; FUNCAA:     ModRef:  Ptr: double** %indirect_a1	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; FUNCAA:     ModRef:  Ptr: double* %loaded_a0	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; FUNCAA:     ModRef:  Ptr: double* %loaded_a1	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; FUNCAA:     ModRef:  Ptr: double* %escape_alloca_a0	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; FUNCAA:     ModRef:  Ptr: double* %escape_alloca_a1	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; FUNCAA:   NoModRef:  Ptr: double* %noescape_alloca_a0	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; FUNCAA:     ModRef:  Ptr: double* %noescape_alloca_a1	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; FUNCAA:     ModRef:  Ptr: double* %normal_ret_a0	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; FUNCAA:     ModRef:  Ptr: double* %normal_ret_a1	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_ret_a0	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_ret_a1	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; FUNCAA: Function: caller_b: 16 pointers, 8 call sites
-; FUNCAA:   MayAlias:	double* %arg_b0, double* %arg_b1
-; FUNCAA:   NoAlias:	double* %arg_b0, double* %noalias_arg_b0
-; FUNCAA:   NoAlias:	double* %arg_b1, double* %noalias_arg_b0
-; FUNCAA:   NoAlias:	double* %arg_b0, double* %noalias_arg_b1
-; FUNCAA:   NoAlias:	double* %arg_b1, double* %noalias_arg_b1
-; FUNCAA:   NoAlias:	double* %noalias_arg_b0, double* %noalias_arg_b1
-; FUNCAA:   MayAlias:	double* %arg_b0, double** %indirect_b0
-; FUNCAA:   MayAlias:	double* %arg_b1, double** %indirect_b0
-; FUNCAA:   NoAlias:	double* %noalias_arg_b0, double** %indirect_b0
-; FUNCAA:   NoAlias:	double* %noalias_arg_b1, double** %indirect_b0
-; FUNCAA:   MayAlias:	double* %arg_b0, double** %indirect_b1
-; FUNCAA:   MayAlias:	double* %arg_b1, double** %indirect_b1
-; FUNCAA:   NoAlias:	double* %noalias_arg_b0, double** %indirect_b1
-; FUNCAA:   NoAlias:	double* %noalias_arg_b1, double** %indirect_b1
-; FUNCAA:   MayAlias:	double** %indirect_b0, double** %indirect_b1
-; FUNCAA:   MayAlias:	double* %arg_b0, double* %loaded_b0
-; FUNCAA:   MayAlias:	double* %arg_b1, double* %loaded_b0
-; FUNCAA:   NoAlias:	double* %loaded_b0, double* %noalias_arg_b0
-; FUNCAA:   NoAlias:	double* %loaded_b0, double* %noalias_arg_b1
-; FUNCAA:   MayAlias:	double* %loaded_b0, double** %indirect_b0
-; FUNCAA:   MayAlias:	double* %loaded_b0, double** %indirect_b1
-; FUNCAA:   MayAlias:	double* %arg_b0, double* %loaded_b1
-; FUNCAA:   MayAlias:	double* %arg_b1, double* %loaded_b1
-; FUNCAA:   NoAlias:	double* %loaded_b1, double* %noalias_arg_b0
-; FUNCAA:   NoAlias:	double* %loaded_b1, double* %noalias_arg_b1
-; FUNCAA:   MayAlias:	double* %loaded_b1, double** %indirect_b0
-; FUNCAA:   MayAlias:	double* %loaded_b1, double** %indirect_b1
-; FUNCAA:   MayAlias:	double* %loaded_b0, double* %loaded_b1
-; FUNCAA:   NoAlias:	double* %arg_b0, double* %escape_alloca_b0
-; FUNCAA:   NoAlias:	double* %arg_b1, double* %escape_alloca_b0
-; FUNCAA:   NoAlias:	double* %escape_alloca_b0, double* %noalias_arg_b0
-; FUNCAA:   NoAlias:	double* %escape_alloca_b0, double* %noalias_arg_b1
-; FUNCAA:   NoAlias:	double* %escape_alloca_b0, double** %indirect_b0
-; FUNCAA:   NoAlias:	double* %escape_alloca_b0, double** %indirect_b1
-; FUNCAA:   MayAlias:	double* %escape_alloca_b0, double* %loaded_b0
-; FUNCAA:   MayAlias:	double* %escape_alloca_b0, double* %loaded_b1
-; FUNCAA:   NoAlias:	double* %arg_b0, double* %escape_alloca_b1
-; FUNCAA:   NoAlias:	double* %arg_b1, double* %escape_alloca_b1
-; FUNCAA:   NoAlias:	double* %escape_alloca_b1, double* %noalias_arg_b0
-; FUNCAA:   NoAlias:	double* %escape_alloca_b1, double* %noalias_arg_b1
-; FUNCAA:   NoAlias:	double* %escape_alloca_b1, double** %indirect_b0
-; FUNCAA:   NoAlias:	double* %escape_alloca_b1, double** %indirect_b1
-; FUNCAA:   MayAlias:	double* %escape_alloca_b1, double* %loaded_b0
-; FUNCAA:   MayAlias:	double* %escape_alloca_b1, double* %loaded_b1
-; FUNCAA:   NoAlias:	double* %escape_alloca_b0, double* %escape_alloca_b1
-; FUNCAA:   NoAlias:	double* %arg_b0, double* %noescape_alloca_b0
-; FUNCAA:   NoAlias:	double* %arg_b1, double* %noescape_alloca_b0
-; FUNCAA:   NoAlias:	double* %noalias_arg_b0, double* %noescape_alloca_b0
-; FUNCAA:   NoAlias:	double* %noalias_arg_b1, double* %noescape_alloca_b0
-; FUNCAA:   NoAlias:	double* %noescape_alloca_b0, double** %indirect_b0
-; FUNCAA:   NoAlias:	double* %noescape_alloca_b0, double** %indirect_b1
-; FUNCAA:   NoAlias:	double* %loaded_b0, double* %noescape_alloca_b0
-; FUNCAA:   NoAlias:	double* %loaded_b1, double* %noescape_alloca_b0
-; FUNCAA:   NoAlias:	double* %escape_alloca_b0, double* %noescape_alloca_b0
-; FUNCAA:   NoAlias:	double* %escape_alloca_b1, double* %noescape_alloca_b0
-; FUNCAA:   NoAlias:	double* %arg_b0, double* %noescape_alloca_b1
-; FUNCAA:   NoAlias:	double* %arg_b1, double* %noescape_alloca_b1
-; FUNCAA:   NoAlias:	double* %noalias_arg_b0, double* %noescape_alloca_b1
-; FUNCAA:   NoAlias:	double* %noalias_arg_b1, double* %noescape_alloca_b1
-; FUNCAA:   NoAlias:	double* %noescape_alloca_b1, double** %indirect_b0
-; FUNCAA:   NoAlias:	double* %noescape_alloca_b1, double** %indirect_b1
-; FUNCAA:   NoAlias:	double* %loaded_b0, double* %noescape_alloca_b1
-; FUNCAA:   NoAlias:	double* %loaded_b1, double* %noescape_alloca_b1
-; FUNCAA:   NoAlias:	double* %escape_alloca_b0, double* %noescape_alloca_b1
-; FUNCAA:   NoAlias:	double* %escape_alloca_b1, double* %noescape_alloca_b1
-; FUNCAA:   NoAlias:	double* %noescape_alloca_b0, double* %noescape_alloca_b1
-; FUNCAA:   MayAlias:	double* %arg_b0, double* %normal_ret_b0
-; FUNCAA:   MayAlias:	double* %arg_b1, double* %normal_ret_b0
-; FUNCAA:   NoAlias:	double* %noalias_arg_b0, double* %normal_ret_b0
-; FUNCAA:   NoAlias:	double* %noalias_arg_b1, double* %normal_ret_b0
-; FUNCAA:   MayAlias:	double* %normal_ret_b0, double** %indirect_b0
-; FUNCAA:   MayAlias:	double* %normal_ret_b0, double** %indirect_b1
-; FUNCAA:   MayAlias:	double* %loaded_b0, double* %normal_ret_b0
-; FUNCAA:   MayAlias:	double* %loaded_b1, double* %normal_ret_b0
-; FUNCAA:   MayAlias:	double* %escape_alloca_b0, double* %normal_ret_b0
-; FUNCAA:   MayAlias:	double* %escape_alloca_b1, double* %normal_ret_b0
-; FUNCAA:   NoAlias:	double* %noescape_alloca_b0, double* %normal_ret_b0
-; FUNCAA:   NoAlias:	double* %noescape_alloca_b1, double* %normal_ret_b0
-; FUNCAA:   MayAlias:	double* %arg_b0, double* %normal_ret_b1
-; FUNCAA:   MayAlias:	double* %arg_b1, double* %normal_ret_b1
-; FUNCAA:   NoAlias:	double* %noalias_arg_b0, double* %normal_ret_b1
-; FUNCAA:   NoAlias:	double* %noalias_arg_b1, double* %normal_ret_b1
-; FUNCAA:   MayAlias:	double* %normal_ret_b1, double** %indirect_b0
-; FUNCAA:   MayAlias:	double* %normal_ret_b1, double** %indirect_b1
-; FUNCAA:   MayAlias:	double* %loaded_b0, double* %normal_ret_b1
-; FUNCAA:   MayAlias:	double* %loaded_b1, double* %normal_ret_b1
-; FUNCAA:   MayAlias:	double* %escape_alloca_b0, double* %normal_ret_b1
-; FUNCAA:   MayAlias:	double* %escape_alloca_b1, double* %normal_ret_b1
-; FUNCAA:   NoAlias:	double* %noescape_alloca_b0, double* %normal_ret_b1
-; FUNCAA:   NoAlias:	double* %noescape_alloca_b1, double* %normal_ret_b1
-; FUNCAA:   MayAlias:	double* %normal_ret_b0, double* %normal_ret_b1
-; FUNCAA:   NoAlias:	double* %arg_b0, double* %noalias_ret_b0
-; FUNCAA:   NoAlias:	double* %arg_b1, double* %noalias_ret_b0
-; FUNCAA:   NoAlias:	double* %noalias_arg_b0, double* %noalias_ret_b0
-; FUNCAA:   NoAlias:	double* %noalias_arg_b1, double* %noalias_ret_b0
-; FUNCAA:   NoAlias:	double* %noalias_ret_b0, double** %indirect_b0
-; FUNCAA:   NoAlias:	double* %noalias_ret_b0, double** %indirect_b1
-; FUNCAA:   NoAlias:	double* %loaded_b0, double* %noalias_ret_b0
-; FUNCAA:   NoAlias:	double* %loaded_b1, double* %noalias_ret_b0
-; FUNCAA:   NoAlias:	double* %escape_alloca_b0, double* %noalias_ret_b0
-; FUNCAA:   NoAlias:	double* %escape_alloca_b1, double* %noalias_ret_b0
-; FUNCAA:   NoAlias:	double* %noalias_ret_b0, double* %noescape_alloca_b0
-; FUNCAA:   NoAlias:	double* %noalias_ret_b0, double* %noescape_alloca_b1
-; FUNCAA:   NoAlias:	double* %noalias_ret_b0, double* %normal_ret_b0
-; FUNCAA:   NoAlias:	double* %noalias_ret_b0, double* %normal_ret_b1
-; FUNCAA:   NoAlias:	double* %arg_b0, double* %noalias_ret_b1
-; FUNCAA:   NoAlias:	double* %arg_b1, double* %noalias_ret_b1
-; FUNCAA:   NoAlias:	double* %noalias_arg_b0, double* %noalias_ret_b1
-; FUNCAA:   NoAlias:	double* %noalias_arg_b1, double* %noalias_ret_b1
-; FUNCAA:   NoAlias:	double* %noalias_ret_b1, double** %indirect_b0
-; FUNCAA:   NoAlias:	double* %noalias_ret_b1, double** %indirect_b1
-; FUNCAA:   NoAlias:	double* %loaded_b0, double* %noalias_ret_b1
-; FUNCAA:   NoAlias:	double* %loaded_b1, double* %noalias_ret_b1
-; FUNCAA:   NoAlias:	double* %escape_alloca_b0, double* %noalias_ret_b1
-; FUNCAA:   NoAlias:	double* %escape_alloca_b1, double* %noalias_ret_b1
-; FUNCAA:   NoAlias:	double* %noalias_ret_b1, double* %noescape_alloca_b0
-; FUNCAA:   NoAlias:	double* %noalias_ret_b1, double* %noescape_alloca_b1
-; FUNCAA:   NoAlias:	double* %noalias_ret_b1, double* %normal_ret_b0
-; FUNCAA:   NoAlias:	double* %noalias_ret_b1, double* %normal_ret_b1
-; FUNCAA:   NoAlias:	double* %noalias_ret_b0, double* %noalias_ret_b1
-; FUNCAA:     ModRef:  Ptr: double* %arg_b0	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %arg_b1	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_arg_b0	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_arg_b1	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double** %indirect_b0	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double** %indirect_b1	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %loaded_b0	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %loaded_b1	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %escape_alloca_b0	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %escape_alloca_b1	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noescape_alloca_b0	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noescape_alloca_b1	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %normal_ret_b0	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %normal_ret_b1	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_ret_b0	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_ret_b1	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %arg_b0	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %arg_b1	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_arg_b0	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_arg_b1	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double** %indirect_b0	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double** %indirect_b1	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %loaded_b0	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %loaded_b1	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %escape_alloca_b0	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %escape_alloca_b1	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noescape_alloca_b0	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noescape_alloca_b1	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %normal_ret_b0	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %normal_ret_b1	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_ret_b0	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_ret_b1	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %arg_b0	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %arg_b1	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_arg_b0	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_arg_b1	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double** %indirect_b0	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double** %indirect_b1	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %loaded_b0	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %loaded_b1	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %escape_alloca_b0	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %escape_alloca_b1	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noescape_alloca_b0	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noescape_alloca_b1	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %normal_ret_b0	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %normal_ret_b1	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %noalias_ret_b0	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_ret_b1	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %arg_b0	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %arg_b1	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_arg_b0	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_arg_b1	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double** %indirect_b0	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double** %indirect_b1	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %loaded_b0	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %loaded_b1	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %escape_alloca_b0	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %escape_alloca_b1	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noescape_alloca_b0	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noescape_alloca_b1	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %normal_ret_b0	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %normal_ret_b1	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_ret_b0	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %noalias_ret_b1	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; FUNCAA:     ModRef:  Ptr: double* %arg_b0	<->  call void @callee(double* %escape_alloca_b0)
-; FUNCAA:     ModRef:  Ptr: double* %arg_b1	<->  call void @callee(double* %escape_alloca_b0)
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_arg_b0	<->  call void @callee(double* %escape_alloca_b0)
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_arg_b1	<->  call void @callee(double* %escape_alloca_b0)
-; FUNCAA:     ModRef:  Ptr: double** %indirect_b0	<->  call void @callee(double* %escape_alloca_b0)
-; FUNCAA:     ModRef:  Ptr: double** %indirect_b1	<->  call void @callee(double* %escape_alloca_b0)
-; FUNCAA:     ModRef:  Ptr: double* %loaded_b0	<->  call void @callee(double* %escape_alloca_b0)
-; FUNCAA:     ModRef:  Ptr: double* %loaded_b1	<->  call void @callee(double* %escape_alloca_b0)
-; FUNCAA:     ModRef:  Ptr: double* %escape_alloca_b0	<->  call void @callee(double* %escape_alloca_b0)
-; FUNCAA:     ModRef:  Ptr: double* %escape_alloca_b1	<->  call void @callee(double* %escape_alloca_b0)
-; FUNCAA:   NoModRef:  Ptr: double* %noescape_alloca_b0	<->  call void @callee(double* %escape_alloca_b0)
-; FUNCAA:   NoModRef:  Ptr: double* %noescape_alloca_b1	<->  call void @callee(double* %escape_alloca_b0)
-; FUNCAA:     ModRef:  Ptr: double* %normal_ret_b0	<->  call void @callee(double* %escape_alloca_b0)
-; FUNCAA:     ModRef:  Ptr: double* %normal_ret_b1	<->  call void @callee(double* %escape_alloca_b0)
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_ret_b0	<->  call void @callee(double* %escape_alloca_b0)
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_ret_b1	<->  call void @callee(double* %escape_alloca_b0)
-; FUNCAA:     ModRef:  Ptr: double* %arg_b0	<->  call void @callee(double* %escape_alloca_b1)
-; FUNCAA:     ModRef:  Ptr: double* %arg_b1	<->  call void @callee(double* %escape_alloca_b1)
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_arg_b0	<->  call void @callee(double* %escape_alloca_b1)
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_arg_b1	<->  call void @callee(double* %escape_alloca_b1)
-; FUNCAA:     ModRef:  Ptr: double** %indirect_b0	<->  call void @callee(double* %escape_alloca_b1)
-; FUNCAA:     ModRef:  Ptr: double** %indirect_b1	<->  call void @callee(double* %escape_alloca_b1)
-; FUNCAA:     ModRef:  Ptr: double* %loaded_b0	<->  call void @callee(double* %escape_alloca_b1)
-; FUNCAA:     ModRef:  Ptr: double* %loaded_b1	<->  call void @callee(double* %escape_alloca_b1)
-; FUNCAA:     ModRef:  Ptr: double* %escape_alloca_b0	<->  call void @callee(double* %escape_alloca_b1)
-; FUNCAA:     ModRef:  Ptr: double* %escape_alloca_b1	<->  call void @callee(double* %escape_alloca_b1)
-; FUNCAA:   NoModRef:  Ptr: double* %noescape_alloca_b0	<->  call void @callee(double* %escape_alloca_b1)
-; FUNCAA:   NoModRef:  Ptr: double* %noescape_alloca_b1	<->  call void @callee(double* %escape_alloca_b1)
-; FUNCAA:     ModRef:  Ptr: double* %normal_ret_b0	<->  call void @callee(double* %escape_alloca_b1)
-; FUNCAA:     ModRef:  Ptr: double* %normal_ret_b1	<->  call void @callee(double* %escape_alloca_b1)
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_ret_b0	<->  call void @callee(double* %escape_alloca_b1)
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_ret_b1	<->  call void @callee(double* %escape_alloca_b1)
-; FUNCAA:     ModRef:  Ptr: double* %arg_b0	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; FUNCAA:     ModRef:  Ptr: double* %arg_b1	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_arg_b0	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_arg_b1	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; FUNCAA:     ModRef:  Ptr: double** %indirect_b0	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; FUNCAA:     ModRef:  Ptr: double** %indirect_b1	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; FUNCAA:     ModRef:  Ptr: double* %loaded_b0	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; FUNCAA:     ModRef:  Ptr: double* %loaded_b1	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; FUNCAA:     ModRef:  Ptr: double* %escape_alloca_b0	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; FUNCAA:     ModRef:  Ptr: double* %escape_alloca_b1	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; FUNCAA:     ModRef:  Ptr: double* %noescape_alloca_b0	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; FUNCAA:   NoModRef:  Ptr: double* %noescape_alloca_b1	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; FUNCAA:     ModRef:  Ptr: double* %normal_ret_b0	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; FUNCAA:     ModRef:  Ptr: double* %normal_ret_b1	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_ret_b0	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_ret_b1	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; FUNCAA:     ModRef:  Ptr: double* %arg_b0	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; FUNCAA:     ModRef:  Ptr: double* %arg_b1	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_arg_b0	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_arg_b1	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; FUNCAA:     ModRef:  Ptr: double** %indirect_b0	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; FUNCAA:     ModRef:  Ptr: double** %indirect_b1	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; FUNCAA:     ModRef:  Ptr: double* %loaded_b0	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; FUNCAA:     ModRef:  Ptr: double* %loaded_b1	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; FUNCAA:     ModRef:  Ptr: double* %escape_alloca_b0	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; FUNCAA:     ModRef:  Ptr: double* %escape_alloca_b1	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; FUNCAA:   NoModRef:  Ptr: double* %noescape_alloca_b0	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; FUNCAA:     ModRef:  Ptr: double* %noescape_alloca_b1	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; FUNCAA:     ModRef:  Ptr: double* %normal_ret_b0	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; FUNCAA:     ModRef:  Ptr: double* %normal_ret_b1	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_ret_b0	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; FUNCAA:   NoModRef:  Ptr: double* %noalias_ret_b1	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; FUNCAA: ===== Alias Analysis Evaluator Report =====
-; FUNCAA:   240 Total Alias Queries Performed
-; FUNCAA:   168 no alias responses (70.0%)
-; FUNCAA:   72 may alias responses (30.0%)
-; FUNCAA:   0 must alias responses (0.0%)
-; FUNCAA:   Alias Analysis Evaluator Pointer Alias Summary: 70%/30%/0%
-; FUNCAA:   256 Total ModRef Queries Performed
-; FUNCAA:   88 no mod/ref responses (34.3%)
-; FUNCAA:   0 mod responses (0.0%)
-; FUNCAA:   0 ref responses (0.0%)
-; FUNCAA:   168 mod & ref responses (65.6%)
-; FUNCAA:   Alias Analysis Evaluator Mod/Ref Summary: 34%/0%/0%/65%
-
-; IPAA: Module: 34 pointers, 16 call sites
-; IPAA:   MayAlias:	double* %callee_arg, double* %nocap_callee_arg
-; IPAA:   MayAlias:	double* %arg_a0, double* %callee_arg
-; IPAA:   MayAlias:	double* %arg_a0, double* %nocap_callee_arg
-; IPAA:   MayAlias:	double* %arg_a1, double* %callee_arg
-; IPAA:   MayAlias:	double* %arg_a1, double* %nocap_callee_arg
-; IPAA:   MayAlias:	double* %arg_a0, double* %arg_a1
-; IPAA:   MayAlias:	double* %callee_arg, double* %noalias_arg_a0
-; IPAA:   MayAlias:	double* %noalias_arg_a0, double* %nocap_callee_arg
-; IPAA:   NoAlias:	double* %arg_a0, double* %noalias_arg_a0
-; IPAA:   NoAlias:	double* %arg_a1, double* %noalias_arg_a0
-; IPAA:   MayAlias:	double* %callee_arg, double* %noalias_arg_a1
-; IPAA:   MayAlias:	double* %noalias_arg_a1, double* %nocap_callee_arg
-; IPAA:   NoAlias:	double* %arg_a0, double* %noalias_arg_a1
-; IPAA:   NoAlias:	double* %arg_a1, double* %noalias_arg_a1
-; IPAA:   NoAlias:	double* %noalias_arg_a0, double* %noalias_arg_a1
-; IPAA:   MayAlias:	double* %callee_arg, double** %indirect_a0
-; IPAA:   MayAlias:	double* %nocap_callee_arg, double** %indirect_a0
-; IPAA:   MayAlias:	double* %arg_a0, double** %indirect_a0
-; IPAA:   MayAlias:	double* %arg_a1, double** %indirect_a0
-; IPAA:   NoAlias:	double* %noalias_arg_a0, double** %indirect_a0
-; IPAA:   NoAlias:	double* %noalias_arg_a1, double** %indirect_a0
-; IPAA:   MayAlias:	double* %callee_arg, double** %indirect_a1
-; IPAA:   MayAlias:	double* %nocap_callee_arg, double** %indirect_a1
-; IPAA:   MayAlias:	double* %arg_a0, double** %indirect_a1
-; IPAA:   MayAlias:	double* %arg_a1, double** %indirect_a1
-; IPAA:   NoAlias:	double* %noalias_arg_a0, double** %indirect_a1
-; IPAA:   NoAlias:	double* %noalias_arg_a1, double** %indirect_a1
-; IPAA:   MayAlias:	double** %indirect_a0, double** %indirect_a1
-; IPAA:   MayAlias:	double* %callee_arg, double* %loaded_a0
-; IPAA:   MayAlias:	double* %loaded_a0, double* %nocap_callee_arg
-; IPAA:   MayAlias:	double* %arg_a0, double* %loaded_a0
-; IPAA:   MayAlias:	double* %arg_a1, double* %loaded_a0
-; IPAA:   NoAlias:	double* %loaded_a0, double* %noalias_arg_a0
-; IPAA:   NoAlias:	double* %loaded_a0, double* %noalias_arg_a1
-; IPAA:   MayAlias:	double* %loaded_a0, double** %indirect_a0
-; IPAA:   MayAlias:	double* %loaded_a0, double** %indirect_a1
-; IPAA:   MayAlias:	double* %callee_arg, double* %loaded_a1
-; IPAA:   MayAlias:	double* %loaded_a1, double* %nocap_callee_arg
-; IPAA:   MayAlias:	double* %arg_a0, double* %loaded_a1
-; IPAA:   MayAlias:	double* %arg_a1, double* %loaded_a1
-; IPAA:   NoAlias:	double* %loaded_a1, double* %noalias_arg_a0
-; IPAA:   NoAlias:	double* %loaded_a1, double* %noalias_arg_a1
-; IPAA:   MayAlias:	double* %loaded_a1, double** %indirect_a0
-; IPAA:   MayAlias:	double* %loaded_a1, double** %indirect_a1
-; IPAA:   MayAlias:	double* %loaded_a0, double* %loaded_a1
-; IPAA:   MayAlias:	double* %callee_arg, double* %escape_alloca_a0
-; IPAA:   MayAlias:	double* %escape_alloca_a0, double* %nocap_callee_arg
-; IPAA:   NoAlias:	double* %arg_a0, double* %escape_alloca_a0
-; IPAA:   NoAlias:	double* %arg_a1, double* %escape_alloca_a0
-; IPAA:   NoAlias:	double* %escape_alloca_a0, double* %noalias_arg_a0
-; IPAA:   NoAlias:	double* %escape_alloca_a0, double* %noalias_arg_a1
-; IPAA:   NoAlias:	double* %escape_alloca_a0, double** %indirect_a0
-; IPAA:   NoAlias:	double* %escape_alloca_a0, double** %indirect_a1
-; IPAA:   MayAlias:	double* %escape_alloca_a0, double* %loaded_a0
-; IPAA:   MayAlias:	double* %escape_alloca_a0, double* %loaded_a1
-; IPAA:   MayAlias:	double* %callee_arg, double* %escape_alloca_a1
-; IPAA:   MayAlias:	double* %escape_alloca_a1, double* %nocap_callee_arg
-; IPAA:   NoAlias:	double* %arg_a0, double* %escape_alloca_a1
-; IPAA:   NoAlias:	double* %arg_a1, double* %escape_alloca_a1
-; IPAA:   NoAlias:	double* %escape_alloca_a1, double* %noalias_arg_a0
-; IPAA:   NoAlias:	double* %escape_alloca_a1, double* %noalias_arg_a1
-; IPAA:   NoAlias:	double* %escape_alloca_a1, double** %indirect_a0
-; IPAA:   NoAlias:	double* %escape_alloca_a1, double** %indirect_a1
-; IPAA:   MayAlias:	double* %escape_alloca_a1, double* %loaded_a0
-; IPAA:   MayAlias:	double* %escape_alloca_a1, double* %loaded_a1
-; IPAA:   NoAlias:	double* %escape_alloca_a0, double* %escape_alloca_a1
-; IPAA:   MayAlias:	double* %callee_arg, double* %noescape_alloca_a0
-; IPAA:   MayAlias:	double* %nocap_callee_arg, double* %noescape_alloca_a0
-; IPAA:   NoAlias:	double* %arg_a0, double* %noescape_alloca_a0
-; IPAA:   NoAlias:	double* %arg_a1, double* %noescape_alloca_a0
-; IPAA:   NoAlias:	double* %noalias_arg_a0, double* %noescape_alloca_a0
-; IPAA:   NoAlias:	double* %noalias_arg_a1, double* %noescape_alloca_a0
-; IPAA:   NoAlias:	double* %noescape_alloca_a0, double** %indirect_a0
-; IPAA:   NoAlias:	double* %noescape_alloca_a0, double** %indirect_a1
-; IPAA:   NoAlias:	double* %loaded_a0, double* %noescape_alloca_a0
-; IPAA:   NoAlias:	double* %loaded_a1, double* %noescape_alloca_a0
-; IPAA:   NoAlias:	double* %escape_alloca_a0, double* %noescape_alloca_a0
-; IPAA:   NoAlias:	double* %escape_alloca_a1, double* %noescape_alloca_a0
-; IPAA:   MayAlias:	double* %callee_arg, double* %noescape_alloca_a1
-; IPAA:   MayAlias:	double* %nocap_callee_arg, double* %noescape_alloca_a1
-; IPAA:   NoAlias:	double* %arg_a0, double* %noescape_alloca_a1
-; IPAA:   NoAlias:	double* %arg_a1, double* %noescape_alloca_a1
-; IPAA:   NoAlias:	double* %noalias_arg_a0, double* %noescape_alloca_a1
-; IPAA:   NoAlias:	double* %noalias_arg_a1, double* %noescape_alloca_a1
-; IPAA:   NoAlias:	double* %noescape_alloca_a1, double** %indirect_a0
-; IPAA:   NoAlias:	double* %noescape_alloca_a1, double** %indirect_a1
-; IPAA:   NoAlias:	double* %loaded_a0, double* %noescape_alloca_a1
-; IPAA:   NoAlias:	double* %loaded_a1, double* %noescape_alloca_a1
-; IPAA:   NoAlias:	double* %escape_alloca_a0, double* %noescape_alloca_a1
-; IPAA:   NoAlias:	double* %escape_alloca_a1, double* %noescape_alloca_a1
-; IPAA:   NoAlias:	double* %noescape_alloca_a0, double* %noescape_alloca_a1
-; IPAA:   MayAlias:	double* %callee_arg, double* %normal_ret_a0
-; IPAA:   MayAlias:	double* %nocap_callee_arg, double* %normal_ret_a0
-; IPAA:   MayAlias:	double* %arg_a0, double* %normal_ret_a0
-; IPAA:   MayAlias:	double* %arg_a1, double* %normal_ret_a0
-; IPAA:   NoAlias:	double* %noalias_arg_a0, double* %normal_ret_a0
-; IPAA:   NoAlias:	double* %noalias_arg_a1, double* %normal_ret_a0
-; IPAA:   MayAlias:	double* %normal_ret_a0, double** %indirect_a0
-; IPAA:   MayAlias:	double* %normal_ret_a0, double** %indirect_a1
-; IPAA:   MayAlias:	double* %loaded_a0, double* %normal_ret_a0
-; IPAA:   MayAlias:	double* %loaded_a1, double* %normal_ret_a0
-; IPAA:   MayAlias:	double* %escape_alloca_a0, double* %normal_ret_a0
-; IPAA:   MayAlias:	double* %escape_alloca_a1, double* %normal_ret_a0
-; IPAA:   NoAlias:	double* %noescape_alloca_a0, double* %normal_ret_a0
-; IPAA:   NoAlias:	double* %noescape_alloca_a1, double* %normal_ret_a0
-; IPAA:   MayAlias:	double* %callee_arg, double* %normal_ret_a1
-; IPAA:   MayAlias:	double* %nocap_callee_arg, double* %normal_ret_a1
-; IPAA:   MayAlias:	double* %arg_a0, double* %normal_ret_a1
-; IPAA:   MayAlias:	double* %arg_a1, double* %normal_ret_a1
-; IPAA:   NoAlias:	double* %noalias_arg_a0, double* %normal_ret_a1
-; IPAA:   NoAlias:	double* %noalias_arg_a1, double* %normal_ret_a1
-; IPAA:   MayAlias:	double* %normal_ret_a1, double** %indirect_a0
-; IPAA:   MayAlias:	double* %normal_ret_a1, double** %indirect_a1
-; IPAA:   MayAlias:	double* %loaded_a0, double* %normal_ret_a1
-; IPAA:   MayAlias:	double* %loaded_a1, double* %normal_ret_a1
-; IPAA:   MayAlias:	double* %escape_alloca_a0, double* %normal_ret_a1
-; IPAA:   MayAlias:	double* %escape_alloca_a1, double* %normal_ret_a1
-; IPAA:   NoAlias:	double* %noescape_alloca_a0, double* %normal_ret_a1
-; IPAA:   NoAlias:	double* %noescape_alloca_a1, double* %normal_ret_a1
-; IPAA:   MayAlias:	double* %normal_ret_a0, double* %normal_ret_a1
-; IPAA:   MayAlias:	double* %callee_arg, double* %noalias_ret_a0
-; IPAA:   MayAlias:	double* %noalias_ret_a0, double* %nocap_callee_arg
-; IPAA:   NoAlias:	double* %arg_a0, double* %noalias_ret_a0
-; IPAA:   NoAlias:	double* %arg_a1, double* %noalias_ret_a0
-; IPAA:   NoAlias:	double* %noalias_arg_a0, double* %noalias_ret_a0
-; IPAA:   NoAlias:	double* %noalias_arg_a1, double* %noalias_ret_a0
-; IPAA:   NoAlias:	double* %noalias_ret_a0, double** %indirect_a0
-; IPAA:   NoAlias:	double* %noalias_ret_a0, double** %indirect_a1
-; IPAA:   NoAlias:	double* %loaded_a0, double* %noalias_ret_a0
-; IPAA:   NoAlias:	double* %loaded_a1, double* %noalias_ret_a0
-; IPAA:   NoAlias:	double* %escape_alloca_a0, double* %noalias_ret_a0
-; IPAA:   NoAlias:	double* %escape_alloca_a1, double* %noalias_ret_a0
-; IPAA:   NoAlias:	double* %noalias_ret_a0, double* %noescape_alloca_a0
-; IPAA:   NoAlias:	double* %noalias_ret_a0, double* %noescape_alloca_a1
-; IPAA:   NoAlias:	double* %noalias_ret_a0, double* %normal_ret_a0
-; IPAA:   NoAlias:	double* %noalias_ret_a0, double* %normal_ret_a1
-; IPAA:   MayAlias:	double* %callee_arg, double* %noalias_ret_a1
-; IPAA:   MayAlias:	double* %noalias_ret_a1, double* %nocap_callee_arg
-; IPAA:   NoAlias:	double* %arg_a0, double* %noalias_ret_a1
-; IPAA:   NoAlias:	double* %arg_a1, double* %noalias_ret_a1
-; IPAA:   NoAlias:	double* %noalias_arg_a0, double* %noalias_ret_a1
-; IPAA:   NoAlias:	double* %noalias_arg_a1, double* %noalias_ret_a1
-; IPAA:   NoAlias:	double* %noalias_ret_a1, double** %indirect_a0
-; IPAA:   NoAlias:	double* %noalias_ret_a1, double** %indirect_a1
-; IPAA:   NoAlias:	double* %loaded_a0, double* %noalias_ret_a1
-; IPAA:   NoAlias:	double* %loaded_a1, double* %noalias_ret_a1
-; IPAA:   NoAlias:	double* %escape_alloca_a0, double* %noalias_ret_a1
-; IPAA:   NoAlias:	double* %escape_alloca_a1, double* %noalias_ret_a1
-; IPAA:   NoAlias:	double* %noalias_ret_a1, double* %noescape_alloca_a0
-; IPAA:   NoAlias:	double* %noalias_ret_a1, double* %noescape_alloca_a1
-; IPAA:   NoAlias:	double* %noalias_ret_a1, double* %normal_ret_a0
-; IPAA:   NoAlias:	double* %noalias_ret_a1, double* %normal_ret_a1
-; IPAA:   NoAlias:	double* %noalias_ret_a0, double* %noalias_ret_a1
-; IPAA:   MayAlias:	double* %arg_b0, double* %callee_arg
-; IPAA:   MayAlias:	double* %arg_b0, double* %nocap_callee_arg
-; IPAA:   MayAlias:	double* %arg_a0, double* %arg_b0
-; IPAA:   MayAlias:	double* %arg_a1, double* %arg_b0
-; IPAA:   MayAlias:	double* %arg_b0, double* %noalias_arg_a0
-; IPAA:   MayAlias:	double* %arg_b0, double* %noalias_arg_a1
-; IPAA:   MayAlias:	double* %arg_b0, double** %indirect_a0
-; IPAA:   MayAlias:	double* %arg_b0, double** %indirect_a1
-; IPAA:   MayAlias:	double* %arg_b0, double* %loaded_a0
-; IPAA:   MayAlias:	double* %arg_b0, double* %loaded_a1
-; IPAA:   MayAlias:	double* %arg_b0, double* %escape_alloca_a0
-; IPAA:   MayAlias:	double* %arg_b0, double* %escape_alloca_a1
-; IPAA:   MayAlias:	double* %arg_b0, double* %noescape_alloca_a0
-; IPAA:   MayAlias:	double* %arg_b0, double* %noescape_alloca_a1
-; IPAA:   MayAlias:	double* %arg_b0, double* %normal_ret_a0
-; IPAA:   MayAlias:	double* %arg_b0, double* %normal_ret_a1
-; IPAA:   MayAlias:	double* %arg_b0, double* %noalias_ret_a0
-; IPAA:   MayAlias:	double* %arg_b0, double* %noalias_ret_a1
-; IPAA:   MayAlias:	double* %arg_b1, double* %callee_arg
-; IPAA:   MayAlias:	double* %arg_b1, double* %nocap_callee_arg
-; IPAA:   MayAlias:	double* %arg_a0, double* %arg_b1
-; IPAA:   MayAlias:	double* %arg_a1, double* %arg_b1
-; IPAA:   MayAlias:	double* %arg_b1, double* %noalias_arg_a0
-; IPAA:   MayAlias:	double* %arg_b1, double* %noalias_arg_a1
-; IPAA:   MayAlias:	double* %arg_b1, double** %indirect_a0
-; IPAA:   MayAlias:	double* %arg_b1, double** %indirect_a1
-; IPAA:   MayAlias:	double* %arg_b1, double* %loaded_a0
-; IPAA:   MayAlias:	double* %arg_b1, double* %loaded_a1
-; IPAA:   MayAlias:	double* %arg_b1, double* %escape_alloca_a0
-; IPAA:   MayAlias:	double* %arg_b1, double* %escape_alloca_a1
-; IPAA:   MayAlias:	double* %arg_b1, double* %noescape_alloca_a0
-; IPAA:   MayAlias:	double* %arg_b1, double* %noescape_alloca_a1
-; IPAA:   MayAlias:	double* %arg_b1, double* %normal_ret_a0
-; IPAA:   MayAlias:	double* %arg_b1, double* %normal_ret_a1
-; IPAA:   MayAlias:	double* %arg_b1, double* %noalias_ret_a0
-; IPAA:   MayAlias:	double* %arg_b1, double* %noalias_ret_a1
-; IPAA:   MayAlias:	double* %arg_b0, double* %arg_b1
-; IPAA:   MayAlias:	double* %callee_arg, double* %noalias_arg_b0
-; IPAA:   MayAlias:	double* %noalias_arg_b0, double* %nocap_callee_arg
-; IPAA:   MayAlias:	double* %arg_a0, double* %noalias_arg_b0
-; IPAA:   MayAlias:	double* %arg_a1, double* %noalias_arg_b0
-; IPAA:   MayAlias:	double* %noalias_arg_a0, double* %noalias_arg_b0
-; IPAA:   MayAlias:	double* %noalias_arg_a1, double* %noalias_arg_b0
-; IPAA:   MayAlias:	double* %noalias_arg_b0, double** %indirect_a0
-; IPAA:   MayAlias:	double* %noalias_arg_b0, double** %indirect_a1
-; IPAA:   MayAlias:	double* %loaded_a0, double* %noalias_arg_b0
-; IPAA:   MayAlias:	double* %loaded_a1, double* %noalias_arg_b0
-; IPAA:   MayAlias:	double* %escape_alloca_a0, double* %noalias_arg_b0
-; IPAA:   MayAlias:	double* %escape_alloca_a1, double* %noalias_arg_b0
-; IPAA:   MayAlias:	double* %noalias_arg_b0, double* %noescape_alloca_a0
-; IPAA:   MayAlias:	double* %noalias_arg_b0, double* %noescape_alloca_a1
-; IPAA:   MayAlias:	double* %noalias_arg_b0, double* %normal_ret_a0
-; IPAA:   MayAlias:	double* %noalias_arg_b0, double* %normal_ret_a1
-; IPAA:   MayAlias:	double* %noalias_arg_b0, double* %noalias_ret_a0
-; IPAA:   MayAlias:	double* %noalias_arg_b0, double* %noalias_ret_a1
-; IPAA:   NoAlias:	double* %arg_b0, double* %noalias_arg_b0
-; IPAA:   NoAlias:	double* %arg_b1, double* %noalias_arg_b0
-; IPAA:   MayAlias:	double* %callee_arg, double* %noalias_arg_b1
-; IPAA:   MayAlias:	double* %noalias_arg_b1, double* %nocap_callee_arg
-; IPAA:   MayAlias:	double* %arg_a0, double* %noalias_arg_b1
-; IPAA:   MayAlias:	double* %arg_a1, double* %noalias_arg_b1
-; IPAA:   MayAlias:	double* %noalias_arg_a0, double* %noalias_arg_b1
-; IPAA:   MayAlias:	double* %noalias_arg_a1, double* %noalias_arg_b1
-; IPAA:   MayAlias:	double* %noalias_arg_b1, double** %indirect_a0
-; IPAA:   MayAlias:	double* %noalias_arg_b1, double** %indirect_a1
-; IPAA:   MayAlias:	double* %loaded_a0, double* %noalias_arg_b1
-; IPAA:   MayAlias:	double* %loaded_a1, double* %noalias_arg_b1
-; IPAA:   MayAlias:	double* %escape_alloca_a0, double* %noalias_arg_b1
-; IPAA:   MayAlias:	double* %escape_alloca_a1, double* %noalias_arg_b1
-; IPAA:   MayAlias:	double* %noalias_arg_b1, double* %noescape_alloca_a0
-; IPAA:   MayAlias:	double* %noalias_arg_b1, double* %noescape_alloca_a1
-; IPAA:   MayAlias:	double* %noalias_arg_b1, double* %normal_ret_a0
-; IPAA:   MayAlias:	double* %noalias_arg_b1, double* %normal_ret_a1
-; IPAA:   MayAlias:	double* %noalias_arg_b1, double* %noalias_ret_a0
-; IPAA:   MayAlias:	double* %noalias_arg_b1, double* %noalias_ret_a1
-; IPAA:   NoAlias:	double* %arg_b0, double* %noalias_arg_b1
-; IPAA:   NoAlias:	double* %arg_b1, double* %noalias_arg_b1
-; IPAA:   NoAlias:	double* %noalias_arg_b0, double* %noalias_arg_b1
-; IPAA:   MayAlias:	double* %callee_arg, double** %indirect_b0
-; IPAA:   MayAlias:	double* %nocap_callee_arg, double** %indirect_b0
-; IPAA:   MayAlias:	double* %arg_a0, double** %indirect_b0
-; IPAA:   MayAlias:	double* %arg_a1, double** %indirect_b0
-; IPAA:   MayAlias:	double* %noalias_arg_a0, double** %indirect_b0
-; IPAA:   MayAlias:	double* %noalias_arg_a1, double** %indirect_b0
-; IPAA:   MayAlias:	double** %indirect_a0, double** %indirect_b0
-; IPAA:   MayAlias:	double** %indirect_a1, double** %indirect_b0
-; IPAA:   MayAlias:	double* %loaded_a0, double** %indirect_b0
-; IPAA:   MayAlias:	double* %loaded_a1, double** %indirect_b0
-; IPAA:   MayAlias:	double* %escape_alloca_a0, double** %indirect_b0
-; IPAA:   MayAlias:	double* %escape_alloca_a1, double** %indirect_b0
-; IPAA:   MayAlias:	double* %noescape_alloca_a0, double** %indirect_b0
-; IPAA:   MayAlias:	double* %noescape_alloca_a1, double** %indirect_b0
-; IPAA:   MayAlias:	double* %normal_ret_a0, double** %indirect_b0
-; IPAA:   MayAlias:	double* %normal_ret_a1, double** %indirect_b0
-; IPAA:   MayAlias:	double* %noalias_ret_a0, double** %indirect_b0
-; IPAA:   MayAlias:	double* %noalias_ret_a1, double** %indirect_b0
-; IPAA:   MayAlias:	double* %arg_b0, double** %indirect_b0
-; IPAA:   MayAlias:	double* %arg_b1, double** %indirect_b0
-; IPAA:   NoAlias:	double* %noalias_arg_b0, double** %indirect_b0
-; IPAA:   NoAlias:	double* %noalias_arg_b1, double** %indirect_b0
-; IPAA:   MayAlias:	double* %callee_arg, double** %indirect_b1
-; IPAA:   MayAlias:	double* %nocap_callee_arg, double** %indirect_b1
-; IPAA:   MayAlias:	double* %arg_a0, double** %indirect_b1
-; IPAA:   MayAlias:	double* %arg_a1, double** %indirect_b1
-; IPAA:   MayAlias:	double* %noalias_arg_a0, double** %indirect_b1
-; IPAA:   MayAlias:	double* %noalias_arg_a1, double** %indirect_b1
-; IPAA:   MayAlias:	double** %indirect_a0, double** %indirect_b1
-; IPAA:   MayAlias:	double** %indirect_a1, double** %indirect_b1
-; IPAA:   MayAlias:	double* %loaded_a0, double** %indirect_b1
-; IPAA:   MayAlias:	double* %loaded_a1, double** %indirect_b1
-; IPAA:   MayAlias:	double* %escape_alloca_a0, double** %indirect_b1
-; IPAA:   MayAlias:	double* %escape_alloca_a1, double** %indirect_b1
-; IPAA:   MayAlias:	double* %noescape_alloca_a0, double** %indirect_b1
-; IPAA:   MayAlias:	double* %noescape_alloca_a1, double** %indirect_b1
-; IPAA:   MayAlias:	double* %normal_ret_a0, double** %indirect_b1
-; IPAA:   MayAlias:	double* %normal_ret_a1, double** %indirect_b1
-; IPAA:   MayAlias:	double* %noalias_ret_a0, double** %indirect_b1
-; IPAA:   MayAlias:	double* %noalias_ret_a1, double** %indirect_b1
-; IPAA:   MayAlias:	double* %arg_b0, double** %indirect_b1
-; IPAA:   MayAlias:	double* %arg_b1, double** %indirect_b1
-; IPAA:   NoAlias:	double* %noalias_arg_b0, double** %indirect_b1
-; IPAA:   NoAlias:	double* %noalias_arg_b1, double** %indirect_b1
-; IPAA:   MayAlias:	double** %indirect_b0, double** %indirect_b1
-; IPAA:   MayAlias:	double* %callee_arg, double* %loaded_b0
-; IPAA:   MayAlias:	double* %loaded_b0, double* %nocap_callee_arg
-; IPAA:   MayAlias:	double* %arg_a0, double* %loaded_b0
-; IPAA:   MayAlias:	double* %arg_a1, double* %loaded_b0
-; IPAA:   MayAlias:	double* %loaded_b0, double* %noalias_arg_a0
-; IPAA:   MayAlias:	double* %loaded_b0, double* %noalias_arg_a1
-; IPAA:   MayAlias:	double* %loaded_b0, double** %indirect_a0
-; IPAA:   MayAlias:	double* %loaded_b0, double** %indirect_a1
-; IPAA:   MayAlias:	double* %loaded_a0, double* %loaded_b0
-; IPAA:   MayAlias:	double* %loaded_a1, double* %loaded_b0
-; IPAA:   MayAlias:	double* %escape_alloca_a0, double* %loaded_b0
-; IPAA:   MayAlias:	double* %escape_alloca_a1, double* %loaded_b0
-; IPAA:   MayAlias:	double* %loaded_b0, double* %noescape_alloca_a0
-; IPAA:   MayAlias:	double* %loaded_b0, double* %noescape_alloca_a1
-; IPAA:   MayAlias:	double* %loaded_b0, double* %normal_ret_a0
-; IPAA:   MayAlias:	double* %loaded_b0, double* %normal_ret_a1
-; IPAA:   MayAlias:	double* %loaded_b0, double* %noalias_ret_a0
-; IPAA:   MayAlias:	double* %loaded_b0, double* %noalias_ret_a1
-; IPAA:   MayAlias:	double* %arg_b0, double* %loaded_b0
-; IPAA:   MayAlias:	double* %arg_b1, double* %loaded_b0
-; IPAA:   NoAlias:	double* %loaded_b0, double* %noalias_arg_b0
-; IPAA:   NoAlias:	double* %loaded_b0, double* %noalias_arg_b1
-; IPAA:   MayAlias:	double* %loaded_b0, double** %indirect_b0
-; IPAA:   MayAlias:	double* %loaded_b0, double** %indirect_b1
-; IPAA:   MayAlias:	double* %callee_arg, double* %loaded_b1
-; IPAA:   MayAlias:	double* %loaded_b1, double* %nocap_callee_arg
-; IPAA:   MayAlias:	double* %arg_a0, double* %loaded_b1
-; IPAA:   MayAlias:	double* %arg_a1, double* %loaded_b1
-; IPAA:   MayAlias:	double* %loaded_b1, double* %noalias_arg_a0
-; IPAA:   MayAlias:	double* %loaded_b1, double* %noalias_arg_a1
-; IPAA:   MayAlias:	double* %loaded_b1, double** %indirect_a0
-; IPAA:   MayAlias:	double* %loaded_b1, double** %indirect_a1
-; IPAA:   MayAlias:	double* %loaded_a0, double* %loaded_b1
-; IPAA:   MayAlias:	double* %loaded_a1, double* %loaded_b1
-; IPAA:   MayAlias:	double* %escape_alloca_a0, double* %loaded_b1
-; IPAA:   MayAlias:	double* %escape_alloca_a1, double* %loaded_b1
-; IPAA:   MayAlias:	double* %loaded_b1, double* %noescape_alloca_a0
-; IPAA:   MayAlias:	double* %loaded_b1, double* %noescape_alloca_a1
-; IPAA:   MayAlias:	double* %loaded_b1, double* %normal_ret_a0
-; IPAA:   MayAlias:	double* %loaded_b1, double* %normal_ret_a1
-; IPAA:   MayAlias:	double* %loaded_b1, double* %noalias_ret_a0
-; IPAA:   MayAlias:	double* %loaded_b1, double* %noalias_ret_a1
-; IPAA:   MayAlias:	double* %arg_b0, double* %loaded_b1
-; IPAA:   MayAlias:	double* %arg_b1, double* %loaded_b1
-; IPAA:   NoAlias:	double* %loaded_b1, double* %noalias_arg_b0
-; IPAA:   NoAlias:	double* %loaded_b1, double* %noalias_arg_b1
-; IPAA:   MayAlias:	double* %loaded_b1, double** %indirect_b0
-; IPAA:   MayAlias:	double* %loaded_b1, double** %indirect_b1
-; IPAA:   MayAlias:	double* %loaded_b0, double* %loaded_b1
-; IPAA:   MayAlias:	double* %callee_arg, double* %escape_alloca_b0
-; IPAA:   MayAlias:	double* %escape_alloca_b0, double* %nocap_callee_arg
-; IPAA:   MayAlias:	double* %arg_a0, double* %escape_alloca_b0
-; IPAA:   MayAlias:	double* %arg_a1, double* %escape_alloca_b0
-; IPAA:   MayAlias:	double* %escape_alloca_b0, double* %noalias_arg_a0
-; IPAA:   MayAlias:	double* %escape_alloca_b0, double* %noalias_arg_a1
-; IPAA:   MayAlias:	double* %escape_alloca_b0, double** %indirect_a0
-; IPAA:   MayAlias:	double* %escape_alloca_b0, double** %indirect_a1
-; IPAA:   MayAlias:	double* %escape_alloca_b0, double* %loaded_a0
-; IPAA:   MayAlias:	double* %escape_alloca_b0, double* %loaded_a1
-; IPAA:   NoAlias:	double* %escape_alloca_a0, double* %escape_alloca_b0
-; IPAA:   NoAlias:	double* %escape_alloca_a1, double* %escape_alloca_b0
-; IPAA:   NoAlias:	double* %escape_alloca_b0, double* %noescape_alloca_a0
-; IPAA:   NoAlias:	double* %escape_alloca_b0, double* %noescape_alloca_a1
-; IPAA:   MayAlias:	double* %escape_alloca_b0, double* %normal_ret_a0
-; IPAA:   MayAlias:	double* %escape_alloca_b0, double* %normal_ret_a1
-; IPAA:   MayAlias:	double* %escape_alloca_b0, double* %noalias_ret_a0
-; IPAA:   MayAlias:	double* %escape_alloca_b0, double* %noalias_ret_a1
-; IPAA:   NoAlias:	double* %arg_b0, double* %escape_alloca_b0
-; IPAA:   NoAlias:	double* %arg_b1, double* %escape_alloca_b0
-; IPAA:   NoAlias:	double* %escape_alloca_b0, double* %noalias_arg_b0
-; IPAA:   NoAlias:	double* %escape_alloca_b0, double* %noalias_arg_b1
-; IPAA:   NoAlias:	double* %escape_alloca_b0, double** %indirect_b0
-; IPAA:   NoAlias:	double* %escape_alloca_b0, double** %indirect_b1
-; IPAA:   MayAlias:	double* %escape_alloca_b0, double* %loaded_b0
-; IPAA:   MayAlias:	double* %escape_alloca_b0, double* %loaded_b1
-; IPAA:   MayAlias:	double* %callee_arg, double* %escape_alloca_b1
-; IPAA:   MayAlias:	double* %escape_alloca_b1, double* %nocap_callee_arg
-; IPAA:   MayAlias:	double* %arg_a0, double* %escape_alloca_b1
-; IPAA:   MayAlias:	double* %arg_a1, double* %escape_alloca_b1
-; IPAA:   MayAlias:	double* %escape_alloca_b1, double* %noalias_arg_a0
-; IPAA:   MayAlias:	double* %escape_alloca_b1, double* %noalias_arg_a1
-; IPAA:   MayAlias:	double* %escape_alloca_b1, double** %indirect_a0
-; IPAA:   MayAlias:	double* %escape_alloca_b1, double** %indirect_a1
-; IPAA:   MayAlias:	double* %escape_alloca_b1, double* %loaded_a0
-; IPAA:   MayAlias:	double* %escape_alloca_b1, double* %loaded_a1
-; IPAA:   NoAlias:	double* %escape_alloca_a0, double* %escape_alloca_b1
-; IPAA:   NoAlias:	double* %escape_alloca_a1, double* %escape_alloca_b1
-; IPAA:   NoAlias:	double* %escape_alloca_b1, double* %noescape_alloca_a0
-; IPAA:   NoAlias:	double* %escape_alloca_b1, double* %noescape_alloca_a1
-; IPAA:   MayAlias:	double* %escape_alloca_b1, double* %normal_ret_a0
-; IPAA:   MayAlias:	double* %escape_alloca_b1, double* %normal_ret_a1
-; IPAA:   MayAlias:	double* %escape_alloca_b1, double* %noalias_ret_a0
-; IPAA:   MayAlias:	double* %escape_alloca_b1, double* %noalias_ret_a1
-; IPAA:   NoAlias:	double* %arg_b0, double* %escape_alloca_b1
-; IPAA:   NoAlias:	double* %arg_b1, double* %escape_alloca_b1
-; IPAA:   NoAlias:	double* %escape_alloca_b1, double* %noalias_arg_b0
-; IPAA:   NoAlias:	double* %escape_alloca_b1, double* %noalias_arg_b1
-; IPAA:   NoAlias:	double* %escape_alloca_b1, double** %indirect_b0
-; IPAA:   NoAlias:	double* %escape_alloca_b1, double** %indirect_b1
-; IPAA:   MayAlias:	double* %escape_alloca_b1, double* %loaded_b0
-; IPAA:   MayAlias:	double* %escape_alloca_b1, double* %loaded_b1
-; IPAA:   NoAlias:	double* %escape_alloca_b0, double* %escape_alloca_b1
-; IPAA:   MayAlias:	double* %callee_arg, double* %noescape_alloca_b0
-; IPAA:   MayAlias:	double* %nocap_callee_arg, double* %noescape_alloca_b0
-; IPAA:   MayAlias:	double* %arg_a0, double* %noescape_alloca_b0
-; IPAA:   MayAlias:	double* %arg_a1, double* %noescape_alloca_b0
-; IPAA:   MayAlias:	double* %noalias_arg_a0, double* %noescape_alloca_b0
-; IPAA:   MayAlias:	double* %noalias_arg_a1, double* %noescape_alloca_b0
-; IPAA:   MayAlias:	double* %noescape_alloca_b0, double** %indirect_a0
-; IPAA:   MayAlias:	double* %noescape_alloca_b0, double** %indirect_a1
-; IPAA:   MayAlias:	double* %loaded_a0, double* %noescape_alloca_b0
-; IPAA:   MayAlias:	double* %loaded_a1, double* %noescape_alloca_b0
-; IPAA:   NoAlias:	double* %escape_alloca_a0, double* %noescape_alloca_b0
-; IPAA:   NoAlias:	double* %escape_alloca_a1, double* %noescape_alloca_b0
-; IPAA:   NoAlias:	double* %noescape_alloca_a0, double* %noescape_alloca_b0
-; IPAA:   NoAlias:	double* %noescape_alloca_a1, double* %noescape_alloca_b0
-; IPAA:   MayAlias:	double* %noescape_alloca_b0, double* %normal_ret_a0
-; IPAA:   MayAlias:	double* %noescape_alloca_b0, double* %normal_ret_a1
-; IPAA:   MayAlias:	double* %noalias_ret_a0, double* %noescape_alloca_b0
-; IPAA:   MayAlias:	double* %noalias_ret_a1, double* %noescape_alloca_b0
-; IPAA:   NoAlias:	double* %arg_b0, double* %noescape_alloca_b0
-; IPAA:   NoAlias:	double* %arg_b1, double* %noescape_alloca_b0
-; IPAA:   NoAlias:	double* %noalias_arg_b0, double* %noescape_alloca_b0
-; IPAA:   NoAlias:	double* %noalias_arg_b1, double* %noescape_alloca_b0
-; IPAA:   NoAlias:	double* %noescape_alloca_b0, double** %indirect_b0
-; IPAA:   NoAlias:	double* %noescape_alloca_b0, double** %indirect_b1
-; IPAA:   NoAlias:	double* %loaded_b0, double* %noescape_alloca_b0
-; IPAA:   NoAlias:	double* %loaded_b1, double* %noescape_alloca_b0
-; IPAA:   NoAlias:	double* %escape_alloca_b0, double* %noescape_alloca_b0
-; IPAA:   NoAlias:	double* %escape_alloca_b1, double* %noescape_alloca_b0
-; IPAA:   MayAlias:	double* %callee_arg, double* %noescape_alloca_b1
-; IPAA:   MayAlias:	double* %nocap_callee_arg, double* %noescape_alloca_b1
-; IPAA:   MayAlias:	double* %arg_a0, double* %noescape_alloca_b1
-; IPAA:   MayAlias:	double* %arg_a1, double* %noescape_alloca_b1
-; IPAA:   MayAlias:	double* %noalias_arg_a0, double* %noescape_alloca_b1
-; IPAA:   MayAlias:	double* %noalias_arg_a1, double* %noescape_alloca_b1
-; IPAA:   MayAlias:	double* %noescape_alloca_b1, double** %indirect_a0
-; IPAA:   MayAlias:	double* %noescape_alloca_b1, double** %indirect_a1
-; IPAA:   MayAlias:	double* %loaded_a0, double* %noescape_alloca_b1
-; IPAA:   MayAlias:	double* %loaded_a1, double* %noescape_alloca_b1
-; IPAA:   NoAlias:	double* %escape_alloca_a0, double* %noescape_alloca_b1
-; IPAA:   NoAlias:	double* %escape_alloca_a1, double* %noescape_alloca_b1
-; IPAA:   NoAlias:	double* %noescape_alloca_a0, double* %noescape_alloca_b1
-; IPAA:   NoAlias:	double* %noescape_alloca_a1, double* %noescape_alloca_b1
-; IPAA:   MayAlias:	double* %noescape_alloca_b1, double* %normal_ret_a0
-; IPAA:   MayAlias:	double* %noescape_alloca_b1, double* %normal_ret_a1
-; IPAA:   MayAlias:	double* %noalias_ret_a0, double* %noescape_alloca_b1
-; IPAA:   MayAlias:	double* %noalias_ret_a1, double* %noescape_alloca_b1
-; IPAA:   NoAlias:	double* %arg_b0, double* %noescape_alloca_b1
-; IPAA:   NoAlias:	double* %arg_b1, double* %noescape_alloca_b1
-; IPAA:   NoAlias:	double* %noalias_arg_b0, double* %noescape_alloca_b1
-; IPAA:   NoAlias:	double* %noalias_arg_b1, double* %noescape_alloca_b1
-; IPAA:   NoAlias:	double* %noescape_alloca_b1, double** %indirect_b0
-; IPAA:   NoAlias:	double* %noescape_alloca_b1, double** %indirect_b1
-; IPAA:   NoAlias:	double* %loaded_b0, double* %noescape_alloca_b1
-; IPAA:   NoAlias:	double* %loaded_b1, double* %noescape_alloca_b1
-; IPAA:   NoAlias:	double* %escape_alloca_b0, double* %noescape_alloca_b1
-; IPAA:   NoAlias:	double* %escape_alloca_b1, double* %noescape_alloca_b1
-; IPAA:   NoAlias:	double* %noescape_alloca_b0, double* %noescape_alloca_b1
-; IPAA:   MayAlias:	double* %callee_arg, double* %normal_ret_b0
-; IPAA:   MayAlias:	double* %nocap_callee_arg, double* %normal_ret_b0
-; IPAA:   MayAlias:	double* %arg_a0, double* %normal_ret_b0
-; IPAA:   MayAlias:	double* %arg_a1, double* %normal_ret_b0
-; IPAA:   MayAlias:	double* %noalias_arg_a0, double* %normal_ret_b0
-; IPAA:   MayAlias:	double* %noalias_arg_a1, double* %normal_ret_b0
-; IPAA:   MayAlias:	double* %normal_ret_b0, double** %indirect_a0
-; IPAA:   MayAlias:	double* %normal_ret_b0, double** %indirect_a1
-; IPAA:   MayAlias:	double* %loaded_a0, double* %normal_ret_b0
-; IPAA:   MayAlias:	double* %loaded_a1, double* %normal_ret_b0
-; IPAA:   MayAlias:	double* %escape_alloca_a0, double* %normal_ret_b0
-; IPAA:   MayAlias:	double* %escape_alloca_a1, double* %normal_ret_b0
-; IPAA:   MayAlias:	double* %noescape_alloca_a0, double* %normal_ret_b0
-; IPAA:   MayAlias:	double* %noescape_alloca_a1, double* %normal_ret_b0
-; IPAA:   MayAlias:	double* %normal_ret_a0, double* %normal_ret_b0
-; IPAA:   MayAlias:	double* %normal_ret_a1, double* %normal_ret_b0
-; IPAA:   MayAlias:	double* %noalias_ret_a0, double* %normal_ret_b0
-; IPAA:   MayAlias:	double* %noalias_ret_a1, double* %normal_ret_b0
-; IPAA:   MayAlias:	double* %arg_b0, double* %normal_ret_b0
-; IPAA:   MayAlias:	double* %arg_b1, double* %normal_ret_b0
-; IPAA:   NoAlias:	double* %noalias_arg_b0, double* %normal_ret_b0
-; IPAA:   NoAlias:	double* %noalias_arg_b1, double* %normal_ret_b0
-; IPAA:   MayAlias:	double* %normal_ret_b0, double** %indirect_b0
-; IPAA:   MayAlias:	double* %normal_ret_b0, double** %indirect_b1
-; IPAA:   MayAlias:	double* %loaded_b0, double* %normal_ret_b0
-; IPAA:   MayAlias:	double* %loaded_b1, double* %normal_ret_b0
-; IPAA:   MayAlias:	double* %escape_alloca_b0, double* %normal_ret_b0
-; IPAA:   MayAlias:	double* %escape_alloca_b1, double* %normal_ret_b0
-; IPAA:   NoAlias:	double* %noescape_alloca_b0, double* %normal_ret_b0
-; IPAA:   NoAlias:	double* %noescape_alloca_b1, double* %normal_ret_b0
-; IPAA:   MayAlias:	double* %callee_arg, double* %normal_ret_b1
-; IPAA:   MayAlias:	double* %nocap_callee_arg, double* %normal_ret_b1
-; IPAA:   MayAlias:	double* %arg_a0, double* %normal_ret_b1
-; IPAA:   MayAlias:	double* %arg_a1, double* %normal_ret_b1
-; IPAA:   MayAlias:	double* %noalias_arg_a0, double* %normal_ret_b1
-; IPAA:   MayAlias:	double* %noalias_arg_a1, double* %normal_ret_b1
-; IPAA:   MayAlias:	double* %normal_ret_b1, double** %indirect_a0
-; IPAA:   MayAlias:	double* %normal_ret_b1, double** %indirect_a1
-; IPAA:   MayAlias:	double* %loaded_a0, double* %normal_ret_b1
-; IPAA:   MayAlias:	double* %loaded_a1, double* %normal_ret_b1
-; IPAA:   MayAlias:	double* %escape_alloca_a0, double* %normal_ret_b1
-; IPAA:   MayAlias:	double* %escape_alloca_a1, double* %normal_ret_b1
-; IPAA:   MayAlias:	double* %noescape_alloca_a0, double* %normal_ret_b1
-; IPAA:   MayAlias:	double* %noescape_alloca_a1, double* %normal_ret_b1
-; IPAA:   MayAlias:	double* %normal_ret_a0, double* %normal_ret_b1
-; IPAA:   MayAlias:	double* %normal_ret_a1, double* %normal_ret_b1
-; IPAA:   MayAlias:	double* %noalias_ret_a0, double* %normal_ret_b1
-; IPAA:   MayAlias:	double* %noalias_ret_a1, double* %normal_ret_b1
-; IPAA:   MayAlias:	double* %arg_b0, double* %normal_ret_b1
-; IPAA:   MayAlias:	double* %arg_b1, double* %normal_ret_b1
-; IPAA:   NoAlias:	double* %noalias_arg_b0, double* %normal_ret_b1
-; IPAA:   NoAlias:	double* %noalias_arg_b1, double* %normal_ret_b1
-; IPAA:   MayAlias:	double* %normal_ret_b1, double** %indirect_b0
-; IPAA:   MayAlias:	double* %normal_ret_b1, double** %indirect_b1
-; IPAA:   MayAlias:	double* %loaded_b0, double* %normal_ret_b1
-; IPAA:   MayAlias:	double* %loaded_b1, double* %normal_ret_b1
-; IPAA:   MayAlias:	double* %escape_alloca_b0, double* %normal_ret_b1
-; IPAA:   MayAlias:	double* %escape_alloca_b1, double* %normal_ret_b1
-; IPAA:   NoAlias:	double* %noescape_alloca_b0, double* %normal_ret_b1
-; IPAA:   NoAlias:	double* %noescape_alloca_b1, double* %normal_ret_b1
-; IPAA:   MayAlias:	double* %normal_ret_b0, double* %normal_ret_b1
-; IPAA:   MayAlias:	double* %callee_arg, double* %noalias_ret_b0
-; IPAA:   MayAlias:	double* %noalias_ret_b0, double* %nocap_callee_arg
-; IPAA:   MayAlias:	double* %arg_a0, double* %noalias_ret_b0
-; IPAA:   MayAlias:	double* %arg_a1, double* %noalias_ret_b0
-; IPAA:   MayAlias:	double* %noalias_arg_a0, double* %noalias_ret_b0
-; IPAA:   MayAlias:	double* %noalias_arg_a1, double* %noalias_ret_b0
-; IPAA:   MayAlias:	double* %noalias_ret_b0, double** %indirect_a0
-; IPAA:   MayAlias:	double* %noalias_ret_b0, double** %indirect_a1
-; IPAA:   MayAlias:	double* %loaded_a0, double* %noalias_ret_b0
-; IPAA:   MayAlias:	double* %loaded_a1, double* %noalias_ret_b0
-; IPAA:   MayAlias:	double* %escape_alloca_a0, double* %noalias_ret_b0
-; IPAA:   MayAlias:	double* %escape_alloca_a1, double* %noalias_ret_b0
-; IPAA:   MayAlias:	double* %noalias_ret_b0, double* %noescape_alloca_a0
-; IPAA:   MayAlias:	double* %noalias_ret_b0, double* %noescape_alloca_a1
-; IPAA:   MayAlias:	double* %noalias_ret_b0, double* %normal_ret_a0
-; IPAA:   MayAlias:	double* %noalias_ret_b0, double* %normal_ret_a1
-; IPAA:   MayAlias:	double* %noalias_ret_a0, double* %noalias_ret_b0
-; IPAA:   MayAlias:	double* %noalias_ret_a1, double* %noalias_ret_b0
-; IPAA:   NoAlias:	double* %arg_b0, double* %noalias_ret_b0
-; IPAA:   NoAlias:	double* %arg_b1, double* %noalias_ret_b0
-; IPAA:   NoAlias:	double* %noalias_arg_b0, double* %noalias_ret_b0
-; IPAA:   NoAlias:	double* %noalias_arg_b1, double* %noalias_ret_b0
-; IPAA:   NoAlias:	double* %noalias_ret_b0, double** %indirect_b0
-; IPAA:   NoAlias:	double* %noalias_ret_b0, double** %indirect_b1
-; IPAA:   NoAlias:	double* %loaded_b0, double* %noalias_ret_b0
-; IPAA:   NoAlias:	double* %loaded_b1, double* %noalias_ret_b0
-; IPAA:   NoAlias:	double* %escape_alloca_b0, double* %noalias_ret_b0
-; IPAA:   NoAlias:	double* %escape_alloca_b1, double* %noalias_ret_b0
-; IPAA:   NoAlias:	double* %noalias_ret_b0, double* %noescape_alloca_b0
-; IPAA:   NoAlias:	double* %noalias_ret_b0, double* %noescape_alloca_b1
-; IPAA:   NoAlias:	double* %noalias_ret_b0, double* %normal_ret_b0
-; IPAA:   NoAlias:	double* %noalias_ret_b0, double* %normal_ret_b1
-; IPAA:   MayAlias:	double* %callee_arg, double* %noalias_ret_b1
-; IPAA:   MayAlias:	double* %noalias_ret_b1, double* %nocap_callee_arg
-; IPAA:   MayAlias:	double* %arg_a0, double* %noalias_ret_b1
-; IPAA:   MayAlias:	double* %arg_a1, double* %noalias_ret_b1
-; IPAA:   MayAlias:	double* %noalias_arg_a0, double* %noalias_ret_b1
-; IPAA:   MayAlias:	double* %noalias_arg_a1, double* %noalias_ret_b1
-; IPAA:   MayAlias:	double* %noalias_ret_b1, double** %indirect_a0
-; IPAA:   MayAlias:	double* %noalias_ret_b1, double** %indirect_a1
-; IPAA:   MayAlias:	double* %loaded_a0, double* %noalias_ret_b1
-; IPAA:   MayAlias:	double* %loaded_a1, double* %noalias_ret_b1
-; IPAA:   MayAlias:	double* %escape_alloca_a0, double* %noalias_ret_b1
-; IPAA:   MayAlias:	double* %escape_alloca_a1, double* %noalias_ret_b1
-; IPAA:   MayAlias:	double* %noalias_ret_b1, double* %noescape_alloca_a0
-; IPAA:   MayAlias:	double* %noalias_ret_b1, double* %noescape_alloca_a1
-; IPAA:   MayAlias:	double* %noalias_ret_b1, double* %normal_ret_a0
-; IPAA:   MayAlias:	double* %noalias_ret_b1, double* %normal_ret_a1
-; IPAA:   MayAlias:	double* %noalias_ret_a0, double* %noalias_ret_b1
-; IPAA:   MayAlias:	double* %noalias_ret_a1, double* %noalias_ret_b1
-; IPAA:   NoAlias:	double* %arg_b0, double* %noalias_ret_b1
-; IPAA:   NoAlias:	double* %arg_b1, double* %noalias_ret_b1
-; IPAA:   NoAlias:	double* %noalias_arg_b0, double* %noalias_ret_b1
-; IPAA:   NoAlias:	double* %noalias_arg_b1, double* %noalias_ret_b1
-; IPAA:   NoAlias:	double* %noalias_ret_b1, double** %indirect_b0
-; IPAA:   NoAlias:	double* %noalias_ret_b1, double** %indirect_b1
-; IPAA:   NoAlias:	double* %loaded_b0, double* %noalias_ret_b1
-; IPAA:   NoAlias:	double* %loaded_b1, double* %noalias_ret_b1
-; IPAA:   NoAlias:	double* %escape_alloca_b0, double* %noalias_ret_b1
-; IPAA:   NoAlias:	double* %escape_alloca_b1, double* %noalias_ret_b1
-; IPAA:   NoAlias:	double* %noalias_ret_b1, double* %noescape_alloca_b0
-; IPAA:   NoAlias:	double* %noalias_ret_b1, double* %noescape_alloca_b1
-; IPAA:   NoAlias:	double* %noalias_ret_b1, double* %normal_ret_b0
-; IPAA:   NoAlias:	double* %noalias_ret_b1, double* %normal_ret_b1
-; IPAA:   NoAlias:	double* %noalias_ret_b0, double* %noalias_ret_b1
-; IPAA:     ModRef:  Ptr: double* %callee_arg	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %nocap_callee_arg	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %arg_a0	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %arg_a1	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noalias_arg_a0	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noalias_arg_a1	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double** %indirect_a0	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double** %indirect_a1	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %loaded_a0	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %loaded_a1	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_a0	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_a1	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noescape_alloca_a0	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noescape_alloca_a1	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %normal_ret_a0	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %normal_ret_a1	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noalias_ret_a0	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noalias_ret_a1	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %arg_b0	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %arg_b1	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noalias_arg_b0	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noalias_arg_b1	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double** %indirect_b0	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double** %indirect_b1	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %loaded_b0	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %loaded_b1	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_b0	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_b1	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noescape_alloca_b0	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noescape_alloca_b1	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %normal_ret_b0	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %normal_ret_b1	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noalias_ret_b0	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noalias_ret_b1	<->  %normal_ret_a0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %callee_arg	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %nocap_callee_arg	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %arg_a0	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %arg_a1	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noalias_arg_a0	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noalias_arg_a1	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double** %indirect_a0	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double** %indirect_a1	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %loaded_a0	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %loaded_a1	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_a0	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_a1	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noescape_alloca_a0	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noescape_alloca_a1	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %normal_ret_a0	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %normal_ret_a1	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noalias_ret_a0	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noalias_ret_a1	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %arg_b0	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %arg_b1	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noalias_arg_b0	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noalias_arg_b1	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double** %indirect_b0	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double** %indirect_b1	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %loaded_b0	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %loaded_b1	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_b0	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_b1	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noescape_alloca_b0	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noescape_alloca_b1	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %normal_ret_b0	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %normal_ret_b1	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noalias_ret_b0	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noalias_ret_b1	<->  %normal_ret_a1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %callee_arg	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %nocap_callee_arg	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %arg_a0	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %arg_a1	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noalias_arg_a0	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noalias_arg_a1	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double** %indirect_a0	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double** %indirect_a1	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %loaded_a0	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %loaded_a1	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_a0	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_a1	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noescape_alloca_a0	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noescape_alloca_a1	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %normal_ret_a0	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %normal_ret_a1	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noalias_ret_a0	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noalias_ret_a1	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %arg_b0	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %arg_b1	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noalias_arg_b0	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noalias_arg_b1	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double** %indirect_b0	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double** %indirect_b1	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %loaded_b0	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %loaded_b1	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_b0	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_b1	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noescape_alloca_b0	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noescape_alloca_b1	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %normal_ret_b0	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %normal_ret_b1	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noalias_ret_b0	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noalias_ret_b1	<->  %noalias_ret_a0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %callee_arg	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %nocap_callee_arg	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %arg_a0	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %arg_a1	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noalias_arg_a0	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noalias_arg_a1	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double** %indirect_a0	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double** %indirect_a1	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %loaded_a0	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %loaded_a1	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_a0	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_a1	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noescape_alloca_a0	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noescape_alloca_a1	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %normal_ret_a0	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %normal_ret_a1	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noalias_ret_a0	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noalias_ret_a1	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %arg_b0	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %arg_b1	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noalias_arg_b0	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noalias_arg_b1	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double** %indirect_b0	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double** %indirect_b1	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %loaded_b0	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %loaded_b1	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_b0	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_b1	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noescape_alloca_b0	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noescape_alloca_b1	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %normal_ret_b0	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %normal_ret_b1	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noalias_ret_b0	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noalias_ret_b1	<->  %noalias_ret_a1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %callee_arg	<->  call void @callee(double* %escape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %nocap_callee_arg	<->  call void @callee(double* %escape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %arg_a0	<->  call void @callee(double* %escape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %arg_a1	<->  call void @callee(double* %escape_alloca_a0)
-; IPAA:   NoModRef:  Ptr: double* %noalias_arg_a0	<->  call void @callee(double* %escape_alloca_a0)
-; IPAA:   NoModRef:  Ptr: double* %noalias_arg_a1	<->  call void @callee(double* %escape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double** %indirect_a0	<->  call void @callee(double* %escape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double** %indirect_a1	<->  call void @callee(double* %escape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %loaded_a0	<->  call void @callee(double* %escape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %loaded_a1	<->  call void @callee(double* %escape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_a0	<->  call void @callee(double* %escape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_a1	<->  call void @callee(double* %escape_alloca_a0)
-; IPAA:   NoModRef:  Ptr: double* %noescape_alloca_a0	<->  call void @callee(double* %escape_alloca_a0)
-; IPAA:   NoModRef:  Ptr: double* %noescape_alloca_a1	<->  call void @callee(double* %escape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %normal_ret_a0	<->  call void @callee(double* %escape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %normal_ret_a1	<->  call void @callee(double* %escape_alloca_a0)
-; IPAA:   NoModRef:  Ptr: double* %noalias_ret_a0	<->  call void @callee(double* %escape_alloca_a0)
-; IPAA:   NoModRef:  Ptr: double* %noalias_ret_a1	<->  call void @callee(double* %escape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %arg_b0	<->  call void @callee(double* %escape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %arg_b1	<->  call void @callee(double* %escape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %noalias_arg_b0	<->  call void @callee(double* %escape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %noalias_arg_b1	<->  call void @callee(double* %escape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double** %indirect_b0	<->  call void @callee(double* %escape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double** %indirect_b1	<->  call void @callee(double* %escape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %loaded_b0	<->  call void @callee(double* %escape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %loaded_b1	<->  call void @callee(double* %escape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_b0	<->  call void @callee(double* %escape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_b1	<->  call void @callee(double* %escape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %noescape_alloca_b0	<->  call void @callee(double* %escape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %noescape_alloca_b1	<->  call void @callee(double* %escape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %normal_ret_b0	<->  call void @callee(double* %escape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %normal_ret_b1	<->  call void @callee(double* %escape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %noalias_ret_b0	<->  call void @callee(double* %escape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %noalias_ret_b1	<->  call void @callee(double* %escape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %callee_arg	<->  call void @callee(double* %escape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %nocap_callee_arg	<->  call void @callee(double* %escape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %arg_a0	<->  call void @callee(double* %escape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %arg_a1	<->  call void @callee(double* %escape_alloca_a1)
-; IPAA:   NoModRef:  Ptr: double* %noalias_arg_a0	<->  call void @callee(double* %escape_alloca_a1)
-; IPAA:   NoModRef:  Ptr: double* %noalias_arg_a1	<->  call void @callee(double* %escape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double** %indirect_a0	<->  call void @callee(double* %escape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double** %indirect_a1	<->  call void @callee(double* %escape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %loaded_a0	<->  call void @callee(double* %escape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %loaded_a1	<->  call void @callee(double* %escape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_a0	<->  call void @callee(double* %escape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_a1	<->  call void @callee(double* %escape_alloca_a1)
-; IPAA:   NoModRef:  Ptr: double* %noescape_alloca_a0	<->  call void @callee(double* %escape_alloca_a1)
-; IPAA:   NoModRef:  Ptr: double* %noescape_alloca_a1	<->  call void @callee(double* %escape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %normal_ret_a0	<->  call void @callee(double* %escape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %normal_ret_a1	<->  call void @callee(double* %escape_alloca_a1)
-; IPAA:   NoModRef:  Ptr: double* %noalias_ret_a0	<->  call void @callee(double* %escape_alloca_a1)
-; IPAA:   NoModRef:  Ptr: double* %noalias_ret_a1	<->  call void @callee(double* %escape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %arg_b0	<->  call void @callee(double* %escape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %arg_b1	<->  call void @callee(double* %escape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %noalias_arg_b0	<->  call void @callee(double* %escape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %noalias_arg_b1	<->  call void @callee(double* %escape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double** %indirect_b0	<->  call void @callee(double* %escape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double** %indirect_b1	<->  call void @callee(double* %escape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %loaded_b0	<->  call void @callee(double* %escape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %loaded_b1	<->  call void @callee(double* %escape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_b0	<->  call void @callee(double* %escape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_b1	<->  call void @callee(double* %escape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %noescape_alloca_b0	<->  call void @callee(double* %escape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %noescape_alloca_b1	<->  call void @callee(double* %escape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %normal_ret_b0	<->  call void @callee(double* %escape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %normal_ret_b1	<->  call void @callee(double* %escape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %noalias_ret_b0	<->  call void @callee(double* %escape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %noalias_ret_b1	<->  call void @callee(double* %escape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %callee_arg	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %nocap_callee_arg	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %arg_a0	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %arg_a1	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; IPAA:   NoModRef:  Ptr: double* %noalias_arg_a0	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; IPAA:   NoModRef:  Ptr: double* %noalias_arg_a1	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double** %indirect_a0	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double** %indirect_a1	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %loaded_a0	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %loaded_a1	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_a0	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_a1	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %noescape_alloca_a0	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; IPAA:   NoModRef:  Ptr: double* %noescape_alloca_a1	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %normal_ret_a0	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %normal_ret_a1	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; IPAA:   NoModRef:  Ptr: double* %noalias_ret_a0	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; IPAA:   NoModRef:  Ptr: double* %noalias_ret_a1	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %arg_b0	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %arg_b1	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %noalias_arg_b0	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %noalias_arg_b1	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double** %indirect_b0	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double** %indirect_b1	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %loaded_b0	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %loaded_b1	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_b0	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_b1	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %noescape_alloca_b0	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %noescape_alloca_b1	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %normal_ret_b0	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %normal_ret_b1	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %noalias_ret_b0	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %noalias_ret_b1	<->  call void @nocap_callee(double* %noescape_alloca_a0)
-; IPAA:     ModRef:  Ptr: double* %callee_arg	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %nocap_callee_arg	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %arg_a0	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %arg_a1	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; IPAA:   NoModRef:  Ptr: double* %noalias_arg_a0	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; IPAA:   NoModRef:  Ptr: double* %noalias_arg_a1	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double** %indirect_a0	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double** %indirect_a1	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %loaded_a0	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %loaded_a1	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_a0	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_a1	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; IPAA:   NoModRef:  Ptr: double* %noescape_alloca_a0	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %noescape_alloca_a1	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %normal_ret_a0	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %normal_ret_a1	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; IPAA:   NoModRef:  Ptr: double* %noalias_ret_a0	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; IPAA:   NoModRef:  Ptr: double* %noalias_ret_a1	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %arg_b0	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %arg_b1	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %noalias_arg_b0	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %noalias_arg_b1	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double** %indirect_b0	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double** %indirect_b1	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %loaded_b0	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %loaded_b1	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_b0	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_b1	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %noescape_alloca_b0	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %noescape_alloca_b1	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %normal_ret_b0	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %normal_ret_b1	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %noalias_ret_b0	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %noalias_ret_b1	<->  call void @nocap_callee(double* %noescape_alloca_a1)
-; IPAA:     ModRef:  Ptr: double* %callee_arg	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %nocap_callee_arg	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %arg_a0	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %arg_a1	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noalias_arg_a0	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noalias_arg_a1	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double** %indirect_a0	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double** %indirect_a1	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %loaded_a0	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %loaded_a1	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_a0	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_a1	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noescape_alloca_a0	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noescape_alloca_a1	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %normal_ret_a0	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %normal_ret_a1	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noalias_ret_a0	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noalias_ret_a1	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %arg_b0	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %arg_b1	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noalias_arg_b0	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noalias_arg_b1	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double** %indirect_b0	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double** %indirect_b1	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %loaded_b0	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %loaded_b1	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_b0	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_b1	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noescape_alloca_b0	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noescape_alloca_b1	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %normal_ret_b0	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %normal_ret_b1	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noalias_ret_b0	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noalias_ret_b1	<->  %normal_ret_b0 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %callee_arg	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %nocap_callee_arg	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %arg_a0	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %arg_a1	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noalias_arg_a0	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noalias_arg_a1	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double** %indirect_a0	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double** %indirect_a1	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %loaded_a0	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %loaded_a1	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_a0	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_a1	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noescape_alloca_a0	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noescape_alloca_a1	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %normal_ret_a0	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %normal_ret_a1	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noalias_ret_a0	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noalias_ret_a1	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %arg_b0	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %arg_b1	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noalias_arg_b0	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noalias_arg_b1	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double** %indirect_b0	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double** %indirect_b1	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %loaded_b0	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %loaded_b1	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_b0	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_b1	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noescape_alloca_b0	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noescape_alloca_b1	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %normal_ret_b0	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %normal_ret_b1	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noalias_ret_b0	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noalias_ret_b1	<->  %normal_ret_b1 = call double* @normal_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %callee_arg	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %nocap_callee_arg	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %arg_a0	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %arg_a1	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noalias_arg_a0	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noalias_arg_a1	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double** %indirect_a0	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double** %indirect_a1	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %loaded_a0	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %loaded_a1	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_a0	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_a1	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noescape_alloca_a0	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noescape_alloca_a1	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %normal_ret_a0	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %normal_ret_a1	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noalias_ret_a0	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noalias_ret_a1	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %arg_b0	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %arg_b1	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noalias_arg_b0	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noalias_arg_b1	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double** %indirect_b0	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double** %indirect_b1	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %loaded_b0	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %loaded_b1	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_b0	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_b1	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noescape_alloca_b0	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noescape_alloca_b1	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %normal_ret_b0	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %normal_ret_b1	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noalias_ret_b0	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noalias_ret_b1	<->  %noalias_ret_b0 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %callee_arg	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %nocap_callee_arg	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %arg_a0	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %arg_a1	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noalias_arg_a0	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noalias_arg_a1	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double** %indirect_a0	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double** %indirect_a1	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %loaded_a0	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %loaded_a1	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_a0	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_a1	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noescape_alloca_a0	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noescape_alloca_a1	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %normal_ret_a0	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %normal_ret_a1	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noalias_ret_a0	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noalias_ret_a1	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %arg_b0	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %arg_b1	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noalias_arg_b0	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noalias_arg_b1	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double** %indirect_b0	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double** %indirect_b1	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %loaded_b0	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %loaded_b1	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_b0	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_b1	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noescape_alloca_b0	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noescape_alloca_b1	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %normal_ret_b0	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %normal_ret_b1	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:   NoModRef:  Ptr: double* %noalias_ret_b0	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %noalias_ret_b1	<->  %noalias_ret_b1 = call double* @noalias_returner() ; <double*> [#uses=1]
-; IPAA:     ModRef:  Ptr: double* %callee_arg	<->  call void @callee(double* %escape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %nocap_callee_arg	<->  call void @callee(double* %escape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %arg_a0	<->  call void @callee(double* %escape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %arg_a1	<->  call void @callee(double* %escape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %noalias_arg_a0	<->  call void @callee(double* %escape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %noalias_arg_a1	<->  call void @callee(double* %escape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double** %indirect_a0	<->  call void @callee(double* %escape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double** %indirect_a1	<->  call void @callee(double* %escape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %loaded_a0	<->  call void @callee(double* %escape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %loaded_a1	<->  call void @callee(double* %escape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_a0	<->  call void @callee(double* %escape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_a1	<->  call void @callee(double* %escape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %noescape_alloca_a0	<->  call void @callee(double* %escape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %noescape_alloca_a1	<->  call void @callee(double* %escape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %normal_ret_a0	<->  call void @callee(double* %escape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %normal_ret_a1	<->  call void @callee(double* %escape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %noalias_ret_a0	<->  call void @callee(double* %escape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %noalias_ret_a1	<->  call void @callee(double* %escape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %arg_b0	<->  call void @callee(double* %escape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %arg_b1	<->  call void @callee(double* %escape_alloca_b0)
-; IPAA:   NoModRef:  Ptr: double* %noalias_arg_b0	<->  call void @callee(double* %escape_alloca_b0)
-; IPAA:   NoModRef:  Ptr: double* %noalias_arg_b1	<->  call void @callee(double* %escape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double** %indirect_b0	<->  call void @callee(double* %escape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double** %indirect_b1	<->  call void @callee(double* %escape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %loaded_b0	<->  call void @callee(double* %escape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %loaded_b1	<->  call void @callee(double* %escape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_b0	<->  call void @callee(double* %escape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_b1	<->  call void @callee(double* %escape_alloca_b0)
-; IPAA:   NoModRef:  Ptr: double* %noescape_alloca_b0	<->  call void @callee(double* %escape_alloca_b0)
-; IPAA:   NoModRef:  Ptr: double* %noescape_alloca_b1	<->  call void @callee(double* %escape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %normal_ret_b0	<->  call void @callee(double* %escape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %normal_ret_b1	<->  call void @callee(double* %escape_alloca_b0)
-; IPAA:   NoModRef:  Ptr: double* %noalias_ret_b0	<->  call void @callee(double* %escape_alloca_b0)
-; IPAA:   NoModRef:  Ptr: double* %noalias_ret_b1	<->  call void @callee(double* %escape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %callee_arg	<->  call void @callee(double* %escape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %nocap_callee_arg	<->  call void @callee(double* %escape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %arg_a0	<->  call void @callee(double* %escape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %arg_a1	<->  call void @callee(double* %escape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %noalias_arg_a0	<->  call void @callee(double* %escape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %noalias_arg_a1	<->  call void @callee(double* %escape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double** %indirect_a0	<->  call void @callee(double* %escape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double** %indirect_a1	<->  call void @callee(double* %escape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %loaded_a0	<->  call void @callee(double* %escape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %loaded_a1	<->  call void @callee(double* %escape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_a0	<->  call void @callee(double* %escape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_a1	<->  call void @callee(double* %escape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %noescape_alloca_a0	<->  call void @callee(double* %escape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %noescape_alloca_a1	<->  call void @callee(double* %escape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %normal_ret_a0	<->  call void @callee(double* %escape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %normal_ret_a1	<->  call void @callee(double* %escape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %noalias_ret_a0	<->  call void @callee(double* %escape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %noalias_ret_a1	<->  call void @callee(double* %escape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %arg_b0	<->  call void @callee(double* %escape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %arg_b1	<->  call void @callee(double* %escape_alloca_b1)
-; IPAA:   NoModRef:  Ptr: double* %noalias_arg_b0	<->  call void @callee(double* %escape_alloca_b1)
-; IPAA:   NoModRef:  Ptr: double* %noalias_arg_b1	<->  call void @callee(double* %escape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double** %indirect_b0	<->  call void @callee(double* %escape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double** %indirect_b1	<->  call void @callee(double* %escape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %loaded_b0	<->  call void @callee(double* %escape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %loaded_b1	<->  call void @callee(double* %escape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_b0	<->  call void @callee(double* %escape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_b1	<->  call void @callee(double* %escape_alloca_b1)
-; IPAA:   NoModRef:  Ptr: double* %noescape_alloca_b0	<->  call void @callee(double* %escape_alloca_b1)
-; IPAA:   NoModRef:  Ptr: double* %noescape_alloca_b1	<->  call void @callee(double* %escape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %normal_ret_b0	<->  call void @callee(double* %escape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %normal_ret_b1	<->  call void @callee(double* %escape_alloca_b1)
-; IPAA:   NoModRef:  Ptr: double* %noalias_ret_b0	<->  call void @callee(double* %escape_alloca_b1)
-; IPAA:   NoModRef:  Ptr: double* %noalias_ret_b1	<->  call void @callee(double* %escape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %callee_arg	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %nocap_callee_arg	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %arg_a0	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %arg_a1	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %noalias_arg_a0	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %noalias_arg_a1	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double** %indirect_a0	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double** %indirect_a1	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %loaded_a0	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %loaded_a1	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_a0	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_a1	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %noescape_alloca_a0	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %noescape_alloca_a1	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %normal_ret_a0	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %normal_ret_a1	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %noalias_ret_a0	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %noalias_ret_a1	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %arg_b0	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %arg_b1	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; IPAA:   NoModRef:  Ptr: double* %noalias_arg_b0	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; IPAA:   NoModRef:  Ptr: double* %noalias_arg_b1	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double** %indirect_b0	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double** %indirect_b1	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %loaded_b0	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %loaded_b1	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_b0	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_b1	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %noescape_alloca_b0	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; IPAA:   NoModRef:  Ptr: double* %noescape_alloca_b1	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %normal_ret_b0	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %normal_ret_b1	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; IPAA:   NoModRef:  Ptr: double* %noalias_ret_b0	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; IPAA:   NoModRef:  Ptr: double* %noalias_ret_b1	<->  call void @nocap_callee(double* %noescape_alloca_b0)
-; IPAA:     ModRef:  Ptr: double* %callee_arg	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %nocap_callee_arg	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %arg_a0	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %arg_a1	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %noalias_arg_a0	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %noalias_arg_a1	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double** %indirect_a0	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double** %indirect_a1	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %loaded_a0	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %loaded_a1	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_a0	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_a1	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %noescape_alloca_a0	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %noescape_alloca_a1	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %normal_ret_a0	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %normal_ret_a1	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %noalias_ret_a0	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %noalias_ret_a1	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %arg_b0	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %arg_b1	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; IPAA:   NoModRef:  Ptr: double* %noalias_arg_b0	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; IPAA:   NoModRef:  Ptr: double* %noalias_arg_b1	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double** %indirect_b0	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double** %indirect_b1	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %loaded_b0	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %loaded_b1	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_b0	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %escape_alloca_b1	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; IPAA:   NoModRef:  Ptr: double* %noescape_alloca_b0	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %noescape_alloca_b1	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %normal_ret_b0	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; IPAA:     ModRef:  Ptr: double* %normal_ret_b1	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; IPAA:   NoModRef:  Ptr: double* %noalias_ret_b0	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; IPAA:   NoModRef:  Ptr: double* %noalias_ret_b1	<->  call void @nocap_callee(double* %noescape_alloca_b1)
-; IPAA: ===== Alias Analysis Evaluator Report =====
-; IPAA:   561 Total Alias Queries Performed
-; IPAA:   184 no alias responses (32.7%)
-; IPAA:   377 may alias responses (67.2%)
-; IPAA:   0 must alias responses (0.0%)
-; IPAA:   Alias Analysis Evaluator Pointer Alias Summary: 32%/67%/0%
-; IPAA:   544 Total ModRef Queries Performed
-; IPAA:   88 no mod/ref responses (16.1%)
-; IPAA:   0 mod responses (0.0%)
-; IPAA:   0 ref responses (0.0%)
-; IPAA:   456 mod & ref responses (83.8%)
-; IPAA:   Alias Analysis Evaluator Mod/Ref Summary: 16%/0%/0%/83%
+; CHECK: Function: caller_a: 16 pointers, 8 call sites
+; CHECK:   MayAlias:	double* %arg_a0, double* %arg_a1
+; CHECK:   NoAlias:	double* %arg_a0, double* %noalias_arg_a0
+; CHECK:   NoAlias:	double* %arg_a1, double* %noalias_arg_a0
+; CHECK:   NoAlias:	double* %arg_a0, double* %noalias_arg_a1
+; CHECK:   NoAlias:	double* %arg_a1, double* %noalias_arg_a1
+; CHECK:   NoAlias:	double* %noalias_arg_a0, double* %noalias_arg_a1
+; CHECK:   MayAlias:	double* %arg_a0, double** %indirect_a0
+; CHECK:   MayAlias:	double* %arg_a1, double** %indirect_a0
+; CHECK:   NoAlias:	double* %noalias_arg_a0, double** %indirect_a0
+; CHECK:   NoAlias:	double* %noalias_arg_a1, double** %indirect_a0
+; CHECK:   MayAlias:	double* %arg_a0, double** %indirect_a1
+; CHECK:   MayAlias:	double* %arg_a1, double** %indirect_a1
+; CHECK:   NoAlias:	double* %noalias_arg_a0, double** %indirect_a1
+; CHECK:   NoAlias:	double* %noalias_arg_a1, double** %indirect_a1
+; CHECK:   MayAlias:	double** %indirect_a0, double** %indirect_a1
+; CHECK:   MayAlias:	double* %arg_a0, double* %loaded_a0
+; CHECK:   MayAlias:	double* %arg_a1, double* %loaded_a0
+; CHECK:   NoAlias:	double* %loaded_a0, double* %noalias_arg_a0
+; CHECK:   NoAlias:	double* %loaded_a0, double* %noalias_arg_a1
+; CHECK:   MayAlias:	double* %loaded_a0, double** %indirect_a0
+; CHECK:   MayAlias:	double* %loaded_a0, double** %indirect_a1
+; CHECK:   MayAlias:	double* %arg_a0, double* %loaded_a1
+; CHECK:   MayAlias:	double* %arg_a1, double* %loaded_a1
+; CHECK:   NoAlias:	double* %loaded_a1, double* %noalias_arg_a0
+; CHECK:   NoAlias:	double* %loaded_a1, double* %noalias_arg_a1
+; CHECK:   MayAlias:	double* %loaded_a1, double** %indirect_a0
+; CHECK:   MayAlias:	double* %loaded_a1, double** %indirect_a1
+; CHECK:   MayAlias:	double* %loaded_a0, double* %loaded_a1
+; CHECK:   NoAlias:	double* %arg_a0, double* %escape_alloca_a0
+; CHECK:   NoAlias:	double* %arg_a1, double* %escape_alloca_a0
+; CHECK:   NoAlias:	double* %escape_alloca_a0, double* %noalias_arg_a0
+; CHECK:   NoAlias:	double* %escape_alloca_a0, double* %noalias_arg_a1
+; CHECK:   NoAlias:	double* %escape_alloca_a0, double** %indirect_a0
+; CHECK:   NoAlias:	double* %escape_alloca_a0, double** %indirect_a1
+; CHECK:   MayAlias:	double* %escape_alloca_a0, double* %loaded_a0
+; CHECK:   MayAlias:	double* %escape_alloca_a0, double* %loaded_a1
+; CHECK:   NoAlias:	double* %arg_a0, double* %escape_alloca_a1
+; CHECK:   NoAlias:	double* %arg_a1, double* %escape_alloca_a1
+; CHECK:   NoAlias:	double* %escape_alloca_a1, double* %noalias_arg_a0
+; CHECK:   NoAlias:	double* %escape_alloca_a1, double* %noalias_arg_a1
+; CHECK:   NoAlias:	double* %escape_alloca_a1, double** %indirect_a0
+; CHECK:   NoAlias:	double* %escape_alloca_a1, double** %indirect_a1
+; CHECK:   MayAlias:	double* %escape_alloca_a1, double* %loaded_a0
+; CHECK:   MayAlias:	double* %escape_alloca_a1, double* %loaded_a1
+; CHECK:   NoAlias:	double* %escape_alloca_a0, double* %escape_alloca_a1
+; CHECK:   NoAlias:	double* %arg_a0, double* %noescape_alloca_a0
+; CHECK:   NoAlias:	double* %arg_a1, double* %noescape_alloca_a0
+; CHECK:   NoAlias:	double* %noalias_arg_a0, double* %noescape_alloca_a0
+; CHECK:   NoAlias:	double* %noalias_arg_a1, double* %noescape_alloca_a0
+; CHECK:   NoAlias:	double* %noescape_alloca_a0, double** %indirect_a0
+; CHECK:   NoAlias:	double* %noescape_alloca_a0, double** %indirect_a1
+; CHECK:   NoAlias:	double* %loaded_a0, double* %noescape_alloca_a0
+; CHECK:   NoAlias:	double* %loaded_a1, double* %noescape_alloca_a0
+; CHECK:   NoAlias:	double* %escape_alloca_a0, double* %noescape_alloca_a0
+; CHECK:   NoAlias:	double* %escape_alloca_a1, double* %noescape_alloca_a0
+; CHECK:   NoAlias:	double* %arg_a0, double* %noescape_alloca_a1
+; CHECK:   NoAlias:	double* %arg_a1, double* %noescape_alloca_a1
+; CHECK:   NoAlias:	double* %noalias_arg_a0, double* %noescape_alloca_a1
+; CHECK:   NoAlias:	double* %noalias_arg_a1, double* %noescape_alloca_a1
+; CHECK:   NoAlias:	double* %noescape_alloca_a1, double** %indirect_a0
+; CHECK:   NoAlias:	double* %noescape_alloca_a1, double** %indirect_a1
+; CHECK:   NoAlias:	double* %loaded_a0, double* %noescape_alloca_a1
+; CHECK:   NoAlias:	double* %loaded_a1, double* %noescape_alloca_a1
+; CHECK:   NoAlias:	double* %escape_alloca_a0, double* %noescape_alloca_a1
+; CHECK:   NoAlias:	double* %escape_alloca_a1, double* %noescape_alloca_a1
+; CHECK:   NoAlias:	double* %noescape_alloca_a0, double* %noescape_alloca_a1
+; CHECK:   MayAlias:	double* %arg_a0, double* %normal_ret_a0
+; CHECK:   MayAlias:	double* %arg_a1, double* %normal_ret_a0
+; CHECK:   NoAlias:	double* %noalias_arg_a0, double* %normal_ret_a0
+; CHECK:   NoAlias:	double* %noalias_arg_a1, double* %normal_ret_a0
+; CHECK:   MayAlias:	double* %normal_ret_a0, double** %indirect_a0
+; CHECK:   MayAlias:	double* %normal_ret_a0, double** %indirect_a1
+; CHECK:   MayAlias:	double* %loaded_a0, double* %normal_ret_a0
+; CHECK:   MayAlias:	double* %loaded_a1, double* %normal_ret_a0
+; CHECK:   MayAlias:	double* %escape_alloca_a0, double* %normal_ret_a0
+; CHECK:   MayAlias:	double* %escape_alloca_a1, double* %normal_ret_a0
+; CHECK:   NoAlias:	double* %noescape_alloca_a0, double* %normal_ret_a0
+; CHECK:   NoAlias:	double* %noescape_alloca_a1, double* %normal_ret_a0
+; CHECK:   MayAlias:	double* %arg_a0, double* %normal_ret_a1
+; CHECK:   MayAlias:	double* %arg_a1, double* %normal_ret_a1
+; CHECK:   NoAlias:	double* %noalias_arg_a0, double* %normal_ret_a1
+; CHECK:   NoAlias:	double* %noalias_arg_a1, double* %normal_ret_a1
+; CHECK:   MayAlias:	double* %normal_ret_a1, double** %indirect_a0
+; CHECK:   MayAlias:	double* %normal_ret_a1, double** %indirect_a1
+; CHECK:   MayAlias:	double* %loaded_a0, double* %normal_ret_a1
+; CHECK:   MayAlias:	double* %loaded_a1, double* %normal_ret_a1
+; CHECK:   MayAlias:	double* %escape_alloca_a0, double* %normal_ret_a1
+; CHECK:   MayAlias:	double* %escape_alloca_a1, double* %normal_ret_a1
+; CHECK:   NoAlias:	double* %noescape_alloca_a0, double* %normal_ret_a1
+; CHECK:   NoAlias:	double* %noescape_alloca_a1, double* %normal_ret_a1
+; CHECK:   MayAlias:	double* %normal_ret_a0, double* %normal_ret_a1
+; CHECK:   NoAlias:	double* %arg_a0, double* %noalias_ret_a0
+; CHECK:   NoAlias:	double* %arg_a1, double* %noalias_ret_a0
+; CHECK:   NoAlias:	double* %noalias_arg_a0, double* %noalias_ret_a0
+; CHECK:   NoAlias:	double* %noalias_arg_a1, double* %noalias_ret_a0
+; CHECK:   NoAlias:	double* %noalias_ret_a0, double** %indirect_a0
+; CHECK:   NoAlias:	double* %noalias_ret_a0, double** %indirect_a1
+; CHECK:   NoAlias:	double* %loaded_a0, double* %noalias_ret_a0
+; CHECK:   NoAlias:	double* %loaded_a1, double* %noalias_ret_a0
+; CHECK:   NoAlias:	double* %escape_alloca_a0, double* %noalias_ret_a0
+; CHECK:   NoAlias:	double* %escape_alloca_a1, double* %noalias_ret_a0
+; CHECK:   NoAlias:	double* %noalias_ret_a0, double* %noescape_alloca_a0
+; CHECK:   NoAlias:	double* %noalias_ret_a0, double* %noescape_alloca_a1
+; CHECK:   NoAlias:	double* %noalias_ret_a0, double* %normal_ret_a0
+; CHECK:   NoAlias:	double* %noalias_ret_a0, double* %normal_ret_a1
+; CHECK:   NoAlias:	double* %arg_a0, double* %noalias_ret_a1
+; CHECK:   NoAlias:	double* %arg_a1, double* %noalias_ret_a1
+; CHECK:   NoAlias:	double* %noalias_arg_a0, double* %noalias_ret_a1
+; CHECK:   NoAlias:	double* %noalias_arg_a1, double* %noalias_ret_a1
+; CHECK:   NoAlias:	double* %noalias_ret_a1, double** %indirect_a0
+; CHECK:   NoAlias:	double* %noalias_ret_a1, double** %indirect_a1
+; CHECK:   NoAlias:	double* %loaded_a0, double* %noalias_ret_a1
+; CHECK:   NoAlias:	double* %loaded_a1, double* %noalias_ret_a1
+; CHECK:   NoAlias:	double* %escape_alloca_a0, double* %noalias_ret_a1
+; CHECK:   NoAlias:	double* %escape_alloca_a1, double* %noalias_ret_a1
+; CHECK:   NoAlias:	double* %noalias_ret_a1, double* %noescape_alloca_a0
+; CHECK:   NoAlias:	double* %noalias_ret_a1, double* %noescape_alloca_a1
+; CHECK:   NoAlias:	double* %noalias_ret_a1, double* %normal_ret_a0
+; CHECK:   NoAlias:	double* %noalias_ret_a1, double* %normal_ret_a1
+; CHECK:   NoAlias:	double* %noalias_ret_a0, double* %noalias_ret_a1
+; CHECK: Both ModRef:  Ptr: double* %arg_a0	<->  %normal_ret_a0 = call double* @normal_returner()
+; CHECK: Both ModRef:  Ptr: double* %arg_a1	<->  %normal_ret_a0 = call double* @normal_returner()
+; CHECK:   NoModRef:  Ptr: double* %noalias_arg_a0	<->  %normal_ret_a0 = call double* @normal_returner()
+; CHECK:   NoModRef:  Ptr: double* %noalias_arg_a1	<->  %normal_ret_a0 = call double* @normal_returner()
+; CHECK: Both ModRef:  Ptr: double** %indirect_a0	<->  %normal_ret_a0 = call double* @normal_returner()
+; CHECK: Both ModRef:  Ptr: double** %indirect_a1	<->  %normal_ret_a0 = call double* @normal_returner()
+; CHECK: Both ModRef:  Ptr: double* %loaded_a0	<->  %normal_ret_a0 = call double* @normal_returner()
+; CHECK: Both ModRef:  Ptr: double* %loaded_a1	<->  %normal_ret_a0 = call double* @normal_returner()
+; CHECK: Both ModRef:  Ptr: double* %escape_alloca_a0	<->  %normal_ret_a0 = call double* @normal_returner()
+; CHECK: Both ModRef:  Ptr: double* %escape_alloca_a1	<->  %normal_ret_a0 = call double* @normal_returner()
+; CHECK:   NoModRef:  Ptr: double* %noescape_alloca_a0	<->  %normal_ret_a0 = call double* @normal_returner()
+; CHECK:   NoModRef:  Ptr: double* %noescape_alloca_a1	<->  %normal_ret_a0 = call double* @normal_returner()
+; CHECK: Both ModRef:  Ptr: double* %normal_ret_a0	<->  %normal_ret_a0 = call double* @normal_returner()
+; CHECK: Both ModRef:  Ptr: double* %normal_ret_a1	<->  %normal_ret_a0 = call double* @normal_returner()
+; CHECK:   NoModRef:  Ptr: double* %noalias_ret_a0	<->  %normal_ret_a0 = call double* @normal_returner()
+; CHECK:   NoModRef:  Ptr: double* %noalias_ret_a1	<->  %normal_ret_a0 = call double* @normal_returner()
+; CHECK: Both ModRef:  Ptr: double* %arg_a0	<->  %normal_ret_a1 = call double* @normal_returner()
+; CHECK: Both ModRef:  Ptr: double* %arg_a1	<->  %normal_ret_a1 = call double* @normal_returner()
+; CHECK:   NoModRef:  Ptr: double* %noalias_arg_a0	<->  %normal_ret_a1 = call double* @normal_returner()
+; CHECK:   NoModRef:  Ptr: double* %noalias_arg_a1	<->  %normal_ret_a1 = call double* @normal_returner()
+; CHECK: Both ModRef:  Ptr: double** %indirect_a0	<->  %normal_ret_a1 = call double* @normal_returner()
+; CHECK: Both ModRef:  Ptr: double** %indirect_a1	<->  %normal_ret_a1 = call double* @normal_returner()
+; CHECK: Both ModRef:  Ptr: double* %loaded_a0	<->  %normal_ret_a1 = call double* @normal_returner()
+; CHECK: Both ModRef:  Ptr: double* %loaded_a1	<->  %normal_ret_a1 = call double* @normal_returner()
+; CHECK: Both ModRef:  Ptr: double* %escape_alloca_a0	<->  %normal_ret_a1 = call double* @normal_returner()
+; CHECK: Both ModRef:  Ptr: double* %escape_alloca_a1	<->  %normal_ret_a1 = call double* @normal_returner()
+; CHECK:   NoModRef:  Ptr: double* %noescape_alloca_a0	<->  %normal_ret_a1 = call double* @normal_returner()
+; CHECK:   NoModRef:  Ptr: double* %noescape_alloca_a1	<->  %normal_ret_a1 = call double* @normal_returner()
+; CHECK: Both ModRef:  Ptr: double* %normal_ret_a0	<->  %normal_ret_a1 = call double* @normal_returner()
+; CHECK: Both ModRef:  Ptr: double* %normal_ret_a1	<->  %normal_ret_a1 = call double* @normal_returner() 
+; CHECK:   NoModRef:  Ptr: double* %noalias_ret_a0	<->  %normal_ret_a1 = call double* @normal_returner() 
+; CHECK:   NoModRef:  Ptr: double* %noalias_ret_a1	<->  %normal_ret_a1 = call double* @normal_returner() 
+; CHECK: Both ModRef:  Ptr: double* %arg_a0	<->  %noalias_ret_a0 = call double* @noalias_returner() 
+; CHECK: Both ModRef:  Ptr: double* %arg_a1	<->  %noalias_ret_a0 = call double* @noalias_returner() 
+; CHECK:   NoModRef:  Ptr: double* %noalias_arg_a0	<->  %noalias_ret_a0 = call double* @noalias_returner() 
+; CHECK:   NoModRef:  Ptr: double* %noalias_arg_a1	<->  %noalias_ret_a0 = call double* @noalias_returner() 
+; CHECK: Both ModRef:  Ptr: double** %indirect_a0	<->  %noalias_ret_a0 = call double* @noalias_returner() 
+; CHECK: Both ModRef:  Ptr: double** %indirect_a1	<->  %noalias_ret_a0 = call double* @noalias_returner() 
+; CHECK: Both ModRef:  Ptr: double* %loaded_a0	<->  %noalias_ret_a0 = call double* @noalias_returner() 
+; CHECK: Both ModRef:  Ptr: double* %loaded_a1	<->  %noalias_ret_a0 = call double* @noalias_returner() 
+; CHECK: Both ModRef:  Ptr: double* %escape_alloca_a0	<->  %noalias_ret_a0 = call double* @noalias_returner() 
+; CHECK: Both ModRef:  Ptr: double* %escape_alloca_a1	<->  %noalias_ret_a0 = call double* @noalias_returner() 
+; CHECK:   NoModRef:  Ptr: double* %noescape_alloca_a0	<->  %noalias_ret_a0 = call double* @noalias_returner() 
+; CHECK:   NoModRef:  Ptr: double* %noescape_alloca_a1	<->  %noalias_ret_a0 = call double* @noalias_returner() 
+; CHECK: Both ModRef:  Ptr: double* %normal_ret_a0	<->  %noalias_ret_a0 = call double* @noalias_returner() 
+; CHECK: Both ModRef:  Ptr: double* %normal_ret_a1	<->  %noalias_ret_a0 = call double* @noalias_returner() 
+; CHECK: Both ModRef:  Ptr: double* %noalias_ret_a0	<->  %noalias_ret_a0 = call double* @noalias_returner() 
+; CHECK:   NoModRef:  Ptr: double* %noalias_ret_a1	<->  %noalias_ret_a0 = call double* @noalias_returner() 
+; CHECK: Both ModRef:  Ptr: double* %arg_a0	<->  %noalias_ret_a1 = call double* @noalias_returner() 
+; CHECK: Both ModRef:  Ptr: double* %arg_a1	<->  %noalias_ret_a1 = call double* @noalias_returner() 
+; CHECK:   NoModRef:  Ptr: double* %noalias_arg_a0	<->  %noalias_ret_a1 = call double* @noalias_returner() 
+; CHECK:   NoModRef:  Ptr: double* %noalias_arg_a1	<->  %noalias_ret_a1 = call double* @noalias_returner() 
+; CHECK: Both ModRef:  Ptr: double** %indirect_a0	<->  %noalias_ret_a1 = call double* @noalias_returner() 
+; CHECK: Both ModRef:  Ptr: double** %indirect_a1	<->  %noalias_ret_a1 = call double* @noalias_returner() 
+; CHECK: Both ModRef:  Ptr: double* %loaded_a0	<->  %noalias_ret_a1 = call double* @noalias_returner() 
+; CHECK: Both ModRef:  Ptr: double* %loaded_a1	<->  %noalias_ret_a1 = call double* @noalias_returner() 
+; CHECK: Both ModRef:  Ptr: double* %escape_alloca_a0	<->  %noalias_ret_a1 = call double* @noalias_returner() 
+; CHECK: Both ModRef:  Ptr: double* %escape_alloca_a1	<->  %noalias_ret_a1 = call double* @noalias_returner() 
+; CHECK:   NoModRef:  Ptr: double* %noescape_alloca_a0	<->  %noalias_ret_a1 = call double* @noalias_returner() 
+; CHECK:   NoModRef:  Ptr: double* %noescape_alloca_a1	<->  %noalias_ret_a1 = call double* @noalias_returner() 
+; CHECK: Both ModRef:  Ptr: double* %normal_ret_a0	<->  %noalias_ret_a1 = call double* @noalias_returner() 
+; CHECK: Both ModRef:  Ptr: double* %normal_ret_a1	<->  %noalias_ret_a1 = call double* @noalias_returner() 
+; CHECK:   NoModRef:  Ptr: double* %noalias_ret_a0	<->  %noalias_ret_a1 = call double* @noalias_returner() 
+; CHECK: Both ModRef:  Ptr: double* %noalias_ret_a1	<->  %noalias_ret_a1 = call double* @noalias_returner() 
+; CHECK: Both ModRef:  Ptr: double* %arg_a0	<->  call void @callee(double* %escape_alloca_a0)
+; CHECK: Both ModRef:  Ptr: double* %arg_a1	<->  call void @callee(double* %escape_alloca_a0)
+; CHECK:   NoModRef:  Ptr: double* %noalias_arg_a0	<->  call void @callee(double* %escape_alloca_a0)
+; CHECK:   NoModRef:  Ptr: double* %noalias_arg_a1	<->  call void @callee(double* %escape_alloca_a0)
+; CHECK: Both ModRef:  Ptr: double** %indirect_a0	<->  call void @callee(double* %escape_alloca_a0)
+; CHECK: Both ModRef:  Ptr: double** %indirect_a1	<->  call void @callee(double* %escape_alloca_a0)
+; CHECK: Both ModRef:  Ptr: double* %loaded_a0	<->  call void @callee(double* %escape_alloca_a0)
+; CHECK: Both ModRef:  Ptr: double* %loaded_a1	<->  call void @callee(double* %escape_alloca_a0)
+; CHECK: Both ModRef:  Ptr: double* %escape_alloca_a0	<->  call void @callee(double* %escape_alloca_a0)
+; CHECK: Both ModRef:  Ptr: double* %escape_alloca_a1	<->  call void @callee(double* %escape_alloca_a0)
+; CHECK:   NoModRef:  Ptr: double* %noescape_alloca_a0	<->  call void @callee(double* %escape_alloca_a0)
+; CHECK:   NoModRef:  Ptr: double* %noescape_alloca_a1	<->  call void @callee(double* %escape_alloca_a0)
+; CHECK: Both ModRef:  Ptr: double* %normal_ret_a0	<->  call void @callee(double* %escape_alloca_a0)
+; CHECK: Both ModRef:  Ptr: double* %normal_ret_a1	<->  call void @callee(double* %escape_alloca_a0)
+; CHECK:   NoModRef:  Ptr: double* %noalias_ret_a0	<->  call void @callee(double* %escape_alloca_a0)
+; CHECK:   NoModRef:  Ptr: double* %noalias_ret_a1	<->  call void @callee(double* %escape_alloca_a0)
+; CHECK: Both ModRef:  Ptr: double* %arg_a0	<->  call void @callee(double* %escape_alloca_a1)
+; CHECK: Both ModRef:  Ptr: double* %arg_a1	<->  call void @callee(double* %escape_alloca_a1)
+; CHECK:   NoModRef:  Ptr: double* %noalias_arg_a0	<->  call void @callee(double* %escape_alloca_a1)
+; CHECK:   NoModRef:  Ptr: double* %noalias_arg_a1	<->  call void @callee(double* %escape_alloca_a1)
+; CHECK: Both ModRef:  Ptr: double** %indirect_a0	<->  call void @callee(double* %escape_alloca_a1)
+; CHECK: Both ModRef:  Ptr: double** %indirect_a1	<->  call void @callee(double* %escape_alloca_a1)
+; CHECK: Both ModRef:  Ptr: double* %loaded_a0	<->  call void @callee(double* %escape_alloca_a1)
+; CHECK: Both ModRef:  Ptr: double* %loaded_a1	<->  call void @callee(double* %escape_alloca_a1)
+; CHECK: Both ModRef:  Ptr: double* %escape_alloca_a0	<->  call void @callee(double* %escape_alloca_a1)
+; CHECK: Both ModRef:  Ptr: double* %escape_alloca_a1	<->  call void @callee(double* %escape_alloca_a1)
+; CHECK:   NoModRef:  Ptr: double* %noescape_alloca_a0	<->  call void @callee(double* %escape_alloca_a1)
+; CHECK:   NoModRef:  Ptr: double* %noescape_alloca_a1	<->  call void @callee(double* %escape_alloca_a1)
+; CHECK: Both ModRef:  Ptr: double* %normal_ret_a0	<->  call void @callee(double* %escape_alloca_a1)
+; CHECK: Both ModRef:  Ptr: double* %normal_ret_a1	<->  call void @callee(double* %escape_alloca_a1)
+; CHECK:   NoModRef:  Ptr: double* %noalias_ret_a0	<->  call void @callee(double* %escape_alloca_a1)
+; CHECK:   NoModRef:  Ptr: double* %noalias_ret_a1	<->  call void @callee(double* %escape_alloca_a1)
+; CHECK: Both ModRef:  Ptr: double* %arg_a0	<->  call void @nocap_callee(double* %noescape_alloca_a0)
+; CHECK: Both ModRef:  Ptr: double* %arg_a1	<->  call void @nocap_callee(double* %noescape_alloca_a0)
+; CHECK:   NoModRef:  Ptr: double* %noalias_arg_a0	<->  call void @nocap_callee(double* %noescape_alloca_a0)
+; CHECK:   NoModRef:  Ptr: double* %noalias_arg_a1	<->  call void @nocap_callee(double* %noescape_alloca_a0)
+; CHECK: Both ModRef:  Ptr: double** %indirect_a0	<->  call void @nocap_callee(double* %noescape_alloca_a0)
+; CHECK: Both ModRef:  Ptr: double** %indirect_a1	<->  call void @nocap_callee(double* %noescape_alloca_a0)
+; CHECK: Both ModRef:  Ptr: double* %loaded_a0	<->  call void @nocap_callee(double* %noescape_alloca_a0)
+; CHECK: Both ModRef:  Ptr: double* %loaded_a1	<->  call void @nocap_callee(double* %noescape_alloca_a0)
+; CHECK: Both ModRef:  Ptr: double* %escape_alloca_a0	<->  call void @nocap_callee(double* %noescape_alloca_a0)
+; CHECK: Both ModRef:  Ptr: double* %escape_alloca_a1	<->  call void @nocap_callee(double* %noescape_alloca_a0)
+; CHECK: Both ModRef:  Ptr: double* %noescape_alloca_a0	<->  call void @nocap_callee(double* %noescape_alloca_a0)
+; CHECK:   NoModRef:  Ptr: double* %noescape_alloca_a1	<->  call void @nocap_callee(double* %noescape_alloca_a0)
+; CHECK: Both ModRef:  Ptr: double* %normal_ret_a0	<->  call void @nocap_callee(double* %noescape_alloca_a0)
+; CHECK: Both ModRef:  Ptr: double* %normal_ret_a1	<->  call void @nocap_callee(double* %noescape_alloca_a0)
+; CHECK:   NoModRef:  Ptr: double* %noalias_ret_a0	<->  call void @nocap_callee(double* %noescape_alloca_a0)
+; CHECK:   NoModRef:  Ptr: double* %noalias_ret_a1	<->  call void @nocap_callee(double* %noescape_alloca_a0)
+; CHECK: Both ModRef:  Ptr: double* %arg_a0	<->  call void @nocap_callee(double* %noescape_alloca_a1)
+; CHECK: Both ModRef:  Ptr: double* %arg_a1	<->  call void @nocap_callee(double* %noescape_alloca_a1)
+; CHECK:   NoModRef:  Ptr: double* %noalias_arg_a0	<->  call void @nocap_callee(double* %noescape_alloca_a1)
+; CHECK:   NoModRef:  Ptr: double* %noalias_arg_a1	<->  call void @nocap_callee(double* %noescape_alloca_a1)
+; CHECK: Both ModRef:  Ptr: double** %indirect_a0	<->  call void @nocap_callee(double* %noescape_alloca_a1)
+; CHECK: Both ModRef:  Ptr: double** %indirect_a1	<->  call void @nocap_callee(double* %noescape_alloca_a1)
+; CHECK: Both ModRef:  Ptr: double* %loaded_a0	<->  call void @nocap_callee(double* %noescape_alloca_a1)
+; CHECK: Both ModRef:  Ptr: double* %loaded_a1	<->  call void @nocap_callee(double* %noescape_alloca_a1)
+; CHECK: Both ModRef:  Ptr: double* %escape_alloca_a0	<->  call void @nocap_callee(double* %noescape_alloca_a1)
+; CHECK: Both ModRef:  Ptr: double* %escape_alloca_a1	<->  call void @nocap_callee(double* %noescape_alloca_a1)
+; CHECK:   NoModRef:  Ptr: double* %noescape_alloca_a0	<->  call void @nocap_callee(double* %noescape_alloca_a1)
+; CHECK: Both ModRef:  Ptr: double* %noescape_alloca_a1	<->  call void @nocap_callee(double* %noescape_alloca_a1)
+; CHECK: Both ModRef:  Ptr: double* %normal_ret_a0	<->  call void @nocap_callee(double* %noescape_alloca_a1)
+; CHECK: Both ModRef:  Ptr: double* %normal_ret_a1	<->  call void @nocap_callee(double* %noescape_alloca_a1)
+; CHECK:   NoModRef:  Ptr: double* %noalias_ret_a0	<->  call void @nocap_callee(double* %noescape_alloca_a1)
+; CHECK:   NoModRef:  Ptr: double* %noalias_ret_a1	<->  call void @nocap_callee(double* %noescape_alloca_a1)
+; CHECK: ===== Alias Analysis Evaluator Report =====
+; CHECK:   120 Total Alias Queries Performed
+; CHECK:   84 no alias responses (70.0%)
+; CHECK:   36 may alias responses (30.0%)
+; CHECK:   0 must alias responses (0.0%)
+; CHECK:   Alias Analysis Evaluator Pointer Alias Summary: 70%/30%/0%
+; CHECK:   184 Total ModRef Queries Performed
+; CHECK:   44 no mod/ref responses (23.9%)
+; CHECK:   0 mod responses (0.0%)
+; CHECK:   0 ref responses (0.0%)
+; CHECK:   140 mod & ref responses (76.0%)
+; CHECK:   Alias Analysis Evaluator Mod/Ref Summary: 23%/0%/0%/76%

Modified: llvm/branches/wendling/eh/test/Analysis/BasicAA/byval.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Analysis/BasicAA/byval.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Analysis/BasicAA/byval.ll (original)
+++ llvm/branches/wendling/eh/test/Analysis/BasicAA/byval.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: opt < %s -gvn -S | grep {ret i32 1}
+; RUN: opt < %s -basicaa -gvn -S | grep {ret i32 1}
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
 target triple = "i686-apple-darwin8"
 	%struct.x = type { i32, i32, i32, i32 }

Modified: llvm/branches/wendling/eh/test/Analysis/BasicAA/constant-over-index.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Analysis/BasicAA/constant-over-index.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Analysis/BasicAA/constant-over-index.ll (original)
+++ llvm/branches/wendling/eh/test/Analysis/BasicAA/constant-over-index.ll Tue Oct 26 19:48:03 2010
@@ -1,7 +1,8 @@
-; RUN: opt < %s -aa-eval -print-all-alias-modref-info \
-; RUN:   |& grep {MayAlias:	double\\* \[%\]p.0.i.0, double\\* \[%\]p3\$}
+; RUN: opt < %s -basicaa -aa-eval -print-all-alias-modref-info |& FileCheck %s
 ; PR4267
 
+; CHECK: MayAlias: double* %p.0.i.0, double* %p3
+
 ; %p3 is equal to %p.0.i.0 on the second iteration of the loop,
 ; so MayAlias is needed.
 

Modified: llvm/branches/wendling/eh/test/Analysis/BasicAA/empty.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Analysis/BasicAA/empty.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Analysis/BasicAA/empty.ll (original)
+++ llvm/branches/wendling/eh/test/Analysis/BasicAA/empty.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: opt < %s -aa-eval -print-all-alias-modref-info -disable-output \
+; RUN: opt < %s -basicaa -aa-eval -print-all-alias-modref-info -disable-output \
 ; RUN:   |& grep {NoAlias:	\{\}\\* \[%\]p, \{\}\\* \[%\]q}
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"

Modified: llvm/branches/wendling/eh/test/Analysis/BasicAA/featuretest.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Analysis/BasicAA/featuretest.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Analysis/BasicAA/featuretest.ll (original)
+++ llvm/branches/wendling/eh/test/Analysis/BasicAA/featuretest.ll Tue Oct 26 19:48:03 2010
@@ -1,17 +1,22 @@
 ; This testcase tests for various features the basicaa test should be able to 
 ; determine, as noted in the comments.
 
-; RUN: opt < %s -basicaa -gvn -instcombine -dce -S | not grep REMOVE
+; RUN: opt < %s -basicaa -gvn -instcombine -dce -S | FileCheck %s
 target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
 
 @Global = external global { i32 }
 
+declare void @external(i32*)
+
 ; Array test:  Test that operations on one local array do not invalidate 
 ; operations on another array.  Important for scientific codes.
 ;
 define i32 @different_array_test(i64 %A, i64 %B) {
 	%Array1 = alloca i32, i32 100
 	%Array2 = alloca i32, i32 200
+        
+        call void @external(i32* %Array1)
+        call void @external(i32* %Array2)
 
 	%pointer = getelementptr i32* %Array1, i64 %A
 	%val = load i32* %pointer
@@ -22,6 +27,8 @@
 	%REMOVE = load i32* %pointer ; redundant with above load
 	%retval = sub i32 %REMOVE, %val
 	ret i32 %retval
+; CHECK: @different_array_test
+; CHECK: ret i32 0
 }
 
 ; Constant index test: Constant indexes into the same array should not 
@@ -29,6 +36,8 @@
 ;
 define i32 @constant_array_index_test() {
 	%Array = alloca i32, i32 100
+        call void @external(i32* %Array)
+
 	%P1 = getelementptr i32* %Array, i64 7
 	%P2 = getelementptr i32* %Array, i64 6
 	
@@ -37,6 +46,8 @@
 	%BREMOVE = load i32* %P1
 	%Val = sub i32 %A, %BREMOVE
 	ret i32 %Val
+; CHECK: @constant_array_index_test
+; CHECK: ret i32 0
 }
 
 ; Test that if two pointers are spaced out by a constant getelementptr, that 
@@ -48,6 +59,8 @@
         %REMOVEv = load i32* %A
         %r = sub i32 %REMOVEu, %REMOVEv
         ret i32 %r
+; CHECK: @gep_distance_test
+; CHECK: ret i32 0
 }
 
 ; Test that if two pointers are spaced out by a constant offset, that they
@@ -60,6 +73,8 @@
 	%REMOVEv = load i32* %A1
         %r = sub i32 %REMOVEu, %REMOVEv
         ret i32 %r
+; CHECK: @gep_distance_test2
+; CHECK: ret i32 0
 }
 
 ; Test that we can do funny pointer things and that distance calc will still 
@@ -68,16 +83,45 @@
 	%X = load i32* %A
 	%B = bitcast i32* %A to i8*
 	%C = getelementptr i8* %B, i64 4
-	%Y = load i8* %C
-	ret i32 8
+        store i8 42, i8* %C
+	%Y = load i32* %A
+        %R = sub i32 %X, %Y
+	ret i32 %R
+; CHECK: @gep_distance_test3
+; CHECK: ret i32 0
 }
 
 ; Test that we can disambiguate globals reached through constantexpr geps
 define i32 @constexpr_test() {
    %X = alloca i32
+   call void @external(i32* %X)
+
    %Y = load i32* %X
    store i32 5, i32* getelementptr ({ i32 }* @Global, i64 0, i32 0)
    %REMOVE = load i32* %X
    %retval = sub i32 %Y, %REMOVE
    ret i32 %retval
+; CHECK: @constexpr_test
+; CHECK: ret i32 0
+}
+
+
+
+; PR7589
+; These two index expressions are different, this cannot be CSE'd.
+define i16 @zext_sext_confusion(i16* %row2col, i5 %j) nounwind{
+entry:
+  %sum5.cast = zext i5 %j to i64             ; <i64> [#uses=1]
+  %P1 = getelementptr i16* %row2col, i64 %sum5.cast
+  %row2col.load.1.2 = load i16* %P1, align 1 ; <i16> [#uses=1]
+  
+  %sum13.cast31 = sext i5 %j to i6          ; <i6> [#uses=1]
+  %sum13.cast = zext i6 %sum13.cast31 to i64      ; <i64> [#uses=1]
+  %P2 = getelementptr i16* %row2col, i64 %sum13.cast
+  %row2col.load.1.6 = load i16* %P2, align 1 ; <i16> [#uses=1]
+  
+  %.ret = sub i16 %row2col.load.1.6, %row2col.load.1.2 ; <i16> [#uses=1]
+  ret i16 %.ret
+; CHECK: @zext_sext_confusion
+; CHECK: ret i16 %.ret
 }

Modified: llvm/branches/wendling/eh/test/Analysis/BasicAA/gep-alias.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Analysis/BasicAA/gep-alias.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Analysis/BasicAA/gep-alias.ll (original)
+++ llvm/branches/wendling/eh/test/Analysis/BasicAA/gep-alias.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: opt < %s -gvn -instcombine -S |& FileCheck %s
+; RUN: opt < %s -basicaa -gvn -instcombine -S |& FileCheck %s
 
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
 
@@ -117,12 +117,12 @@
 
 ; P[zext(i)] != p[zext(i+1)]
 ; PR1143
-define i32 @test8(i32* %p, i32 %i) {
-  %i1 = zext i32 %i to i64
-  %pi = getelementptr i32* %p, i64 %i1
-  %i.next = add i32 %i, 1
-  %i.next2 = zext i32 %i.next to i64
-  %pi.next = getelementptr i32* %p, i64 %i.next2
+define i32 @test8(i32* %p, i16 %i) {
+  %i1 = zext i16 %i to i32
+  %pi = getelementptr i32* %p, i32 %i1
+  %i.next = add i16 %i, 1
+  %i.next2 = zext i16 %i.next to i32
+  %pi.next = getelementptr i32* %p, i32 %i.next2
   %x = load i32* %pi
   store i32 42, i32* %pi.next
   %y = load i32* %pi

Modified: llvm/branches/wendling/eh/test/Analysis/BasicAA/modref.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Analysis/BasicAA/modref.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Analysis/BasicAA/modref.ll (original)
+++ llvm/branches/wendling/eh/test/Analysis/BasicAA/modref.ll Tue Oct 26 19:48:03 2010
@@ -123,3 +123,14 @@
 ; CHECK: sub i32 %tmp, %tmp
 }
 
+define i8 @test6(i8* %p, i8* noalias %a) {
+  %x = load i8* %a
+  %t = va_arg i8* %p, float
+  %y = load i8* %a
+  %z = add i8 %x, %y
+  ret i8 %z
+; CHECK: @test6
+; CHECK: load i8* %a
+; CHECK-NOT: load
+; CHECK: ret
+}

Modified: llvm/branches/wendling/eh/test/Analysis/BasicAA/phi-and-select.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Analysis/BasicAA/phi-and-select.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Analysis/BasicAA/phi-and-select.ll (original)
+++ llvm/branches/wendling/eh/test/Analysis/BasicAA/phi-and-select.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: opt < %s -aa-eval -print-all-alias-modref-info -disable-output \
+; RUN: opt < %s -basicaa -aa-eval -print-all-alias-modref-info -disable-output \
 ; RUN:   |& grep {NoAlias:	double\\* \[%\]a, double\\* \[%\]b\$} | count 4
 
 ; BasicAA should detect NoAliases in PHIs and Selects.

Modified: llvm/branches/wendling/eh/test/Analysis/BasicAA/unreachable-block.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Analysis/BasicAA/unreachable-block.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Analysis/BasicAA/unreachable-block.ll (original)
+++ llvm/branches/wendling/eh/test/Analysis/BasicAA/unreachable-block.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: opt -aa-eval -disable-output < %s >& /dev/null
+; RUN: opt -basicaa -aa-eval -disable-output < %s >& /dev/null
 
 ; BasicAA shouldn't infinitely recurse on the use-def cycles in
 ; unreachable code.

Modified: llvm/branches/wendling/eh/test/Analysis/GlobalsModRef/aliastest.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Analysis/GlobalsModRef/aliastest.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Analysis/GlobalsModRef/aliastest.ll (original)
+++ llvm/branches/wendling/eh/test/Analysis/GlobalsModRef/aliastest.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: opt < %s -globalsmodref-aa -gvn -S | not grep load
+; RUN: opt < %s -basicaa -globalsmodref-aa -gvn -S | not grep load
 @X = internal global i32 4		; <i32*> [#uses=1]
 
 define i32 @test(i32* %P) {

Modified: llvm/branches/wendling/eh/test/Analysis/GlobalsModRef/chaining-analysis.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Analysis/GlobalsModRef/chaining-analysis.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Analysis/GlobalsModRef/chaining-analysis.ll (original)
+++ llvm/branches/wendling/eh/test/Analysis/GlobalsModRef/chaining-analysis.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: opt < %s -globalsmodref-aa -gvn -S | not grep load
+; RUN: opt < %s -basicaa -globalsmodref-aa -gvn -S | not grep load
 
 ; This test requires the use of previous analyses to determine that
 ; doesnotmodX does not modify X (because 'sin' doesn't).

Modified: llvm/branches/wendling/eh/test/Analysis/GlobalsModRef/indirect-global.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Analysis/GlobalsModRef/indirect-global.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Analysis/GlobalsModRef/indirect-global.ll (original)
+++ llvm/branches/wendling/eh/test/Analysis/GlobalsModRef/indirect-global.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: opt < %s -globalsmodref-aa -gvn -instcombine -S | \
+; RUN: opt < %s -basicaa -globalsmodref-aa -gvn -instcombine -S | \
 ; RUN:   grep {ret i32 0}
 
 @G = internal global i32* null		; <i32**> [#uses=3]

Modified: llvm/branches/wendling/eh/test/Analysis/GlobalsModRef/modreftest.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Analysis/GlobalsModRef/modreftest.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Analysis/GlobalsModRef/modreftest.ll (original)
+++ llvm/branches/wendling/eh/test/Analysis/GlobalsModRef/modreftest.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: opt < %s -globalsmodref-aa -gvn -S | not grep load
+; RUN: opt < %s -basicaa -globalsmodref-aa -gvn -S | not grep load
 @X = internal global i32 4		; <i32*> [#uses=2]
 
 define i32 @test(i32* %P) {

Modified: llvm/branches/wendling/eh/test/Analysis/LoopDependenceAnalysis/alias.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Analysis/LoopDependenceAnalysis/alias.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Analysis/LoopDependenceAnalysis/alias.ll (original)
+++ llvm/branches/wendling/eh/test/Analysis/LoopDependenceAnalysis/alias.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: opt < %s -analyze -lda | FileCheck %s
+; RUN: opt < %s -analyze -basicaa -lda | FileCheck %s
 
 ;; x[5] = x[6] // with x being a pointer passed as argument
 

Modified: llvm/branches/wendling/eh/test/Analysis/LoopDependenceAnalysis/siv-strong.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Analysis/LoopDependenceAnalysis/siv-strong.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Analysis/LoopDependenceAnalysis/siv-strong.ll (original)
+++ llvm/branches/wendling/eh/test/Analysis/LoopDependenceAnalysis/siv-strong.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: opt < %s -analyze -lda | FileCheck %s
+; RUN: opt < %s -analyze -basicaa -lda | FileCheck %s
 
 @x = common global [256 x i32] zeroinitializer, align 4
 @y = common global [256 x i32] zeroinitializer, align 4

Modified: llvm/branches/wendling/eh/test/Analysis/LoopDependenceAnalysis/siv-weak-crossing.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Analysis/LoopDependenceAnalysis/siv-weak-crossing.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Analysis/LoopDependenceAnalysis/siv-weak-crossing.ll (original)
+++ llvm/branches/wendling/eh/test/Analysis/LoopDependenceAnalysis/siv-weak-crossing.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: opt < %s -analyze -lda | FileCheck %s
+; RUN: opt < %s -analyze -basicaa -lda | FileCheck %s
 
 @x = common global [256 x i32] zeroinitializer, align 4
 @y = common global [256 x i32] zeroinitializer, align 4

Modified: llvm/branches/wendling/eh/test/Analysis/LoopDependenceAnalysis/siv-weak-zero.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Analysis/LoopDependenceAnalysis/siv-weak-zero.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Analysis/LoopDependenceAnalysis/siv-weak-zero.ll (original)
+++ llvm/branches/wendling/eh/test/Analysis/LoopDependenceAnalysis/siv-weak-zero.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: opt < %s -analyze -lda | FileCheck %s
+; RUN: opt < %s -analyze -basicaa -lda | FileCheck %s
 
 @x = common global [256 x i32] zeroinitializer, align 4
 @y = common global [256 x i32] zeroinitializer, align 4

Modified: llvm/branches/wendling/eh/test/Analysis/LoopDependenceAnalysis/ziv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Analysis/LoopDependenceAnalysis/ziv.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Analysis/LoopDependenceAnalysis/ziv.ll (original)
+++ llvm/branches/wendling/eh/test/Analysis/LoopDependenceAnalysis/ziv.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: opt < %s -analyze -lda | FileCheck %s
+; RUN: opt < %s -analyze -basicaa -lda | FileCheck %s
 
 @x = common global [256 x i32] zeroinitializer, align 4
 

Modified: llvm/branches/wendling/eh/test/Analysis/ScalarEvolution/avoid-smax-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Analysis/ScalarEvolution/avoid-smax-1.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Analysis/ScalarEvolution/avoid-smax-1.ll (original)
+++ llvm/branches/wendling/eh/test/Analysis/ScalarEvolution/avoid-smax-1.ll Tue Oct 26 19:48:03 2010
@@ -1,6 +1,6 @@
 ; RUN: opt < %s -indvars -S > %t
 ; RUN: grep select %t | count 2
-; RUN: grep {icmp ne i32.\* %w } %t
+; RUN: grep {icmp ne i32.\* } %t
 
 ; Indvars should be able to insert a canonical induction variable
 ; for the bb6 loop without using a maximum calculation (icmp, select)

Modified: llvm/branches/wendling/eh/test/Analysis/ScalarEvolution/max-trip-count.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Analysis/ScalarEvolution/max-trip-count.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Analysis/ScalarEvolution/max-trip-count.ll (original)
+++ llvm/branches/wendling/eh/test/Analysis/ScalarEvolution/max-trip-count.ll Tue Oct 26 19:48:03 2010
@@ -1,8 +1,9 @@
-; RUN: opt < %s -analyze -scalar-evolution \
-; RUN:   | grep {\{%d,+,\[^\{\}\]\*\}<%bb>}
+; RUN: opt < %s -analyze -scalar-evolution | FileCheck %s
 
 ; ScalarEvolution should be able to understand the loop and eliminate the casts.
 
+; CHECK: {%d,+,sizeof(i32)}
+
 define void @foo(i32* nocapture %d, i32 %n) nounwind {
 entry:
 	%0 = icmp sgt i32 %n, 0		; <i1> [#uses=1]
@@ -32,3 +33,40 @@
 return:		; preds = %bb1.return_crit_edge, %entry
 	ret void
 }
+
+; ScalarEvolution should be able to find the maximum tripcount
+; of this multiple-exit loop, and if it doesn't know the exact
+; count, it should say so.
+
+; PR7845
+; CHECK: Loop %for.cond: <multiple exits> Unpredictable backedge-taken count. 
+; CHECK: Loop %for.cond: max backedge-taken count is 5
+
+ at .str = private constant [4 x i8] c"%d\0A\00"     ; <[4 x i8]*> [#uses=2]
+
+define i32 @main() nounwind {
+entry:
+  br label %for.cond
+
+for.cond:                                         ; preds = %for.inc, %entry
+  %g_4.0 = phi i32 [ 0, %entry ], [ %add, %for.inc ] ; <i32> [#uses=5]
+  %cmp = icmp slt i32 %g_4.0, 5                   ; <i1> [#uses=1]
+  br i1 %cmp, label %for.body, label %for.end
+
+for.body:                                         ; preds = %for.cond
+  %conv = trunc i32 %g_4.0 to i16                 ; <i16> [#uses=1]
+  %tobool.not = icmp eq i16 %conv, 0              ; <i1> [#uses=1]
+  %tobool3 = icmp ne i32 %g_4.0, 0                ; <i1> [#uses=1]
+  %or.cond = and i1 %tobool.not, %tobool3         ; <i1> [#uses=1]
+  br i1 %or.cond, label %for.end, label %for.inc
+
+for.inc:                                          ; preds = %for.body
+  %add = add nsw i32 %g_4.0, 1                    ; <i32> [#uses=1]
+  br label %for.cond
+
+for.end:                                          ; preds = %for.body, %for.cond
+  %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), i32 %g_4.0) nounwind ; <i32> [#uses=0]
+  ret i32 0
+}
+
+declare i32 @printf(i8*, ...)

Modified: llvm/branches/wendling/eh/test/Archive/README.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Archive/README.txt?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Archive/README.txt (original)
+++ llvm/branches/wendling/eh/test/Archive/README.txt Tue Oct 26 19:48:03 2010
@@ -5,7 +5,7 @@
 compatibility reading other ar(1) formats. It also provides a basic
 functionality test for these tools.
 
-There are four archives stored in CVS with these tests: 
+There are four archives accompanying these tests: 
 
 GNU.a    - constructed on Linux with GNU ar
 MacOSX.a - constructed on Mac OS X with its native BSD4.4 ar

Removed: llvm/branches/wendling/eh/test/Assembler/2010-01-06-UnionType.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Assembler/2010-01-06-UnionType.ll?rev=117424&view=auto
==============================================================================
--- llvm/branches/wendling/eh/test/Assembler/2010-01-06-UnionType.ll (original)
+++ llvm/branches/wendling/eh/test/Assembler/2010-01-06-UnionType.ll (removed)
@@ -1,3 +0,0 @@
-; RUN: llvm-as %s -o /dev/null
-
-%X = type union { i32, i32* }

Modified: llvm/branches/wendling/eh/test/Assembler/AutoUpgradeIntrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Assembler/AutoUpgradeIntrinsics.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Assembler/AutoUpgradeIntrinsics.ll (original)
+++ llvm/branches/wendling/eh/test/Assembler/AutoUpgradeIntrinsics.ll Tue Oct 26 19:48:03 2010
@@ -7,7 +7,7 @@
 ; RUN: llvm-as < %s | llvm-dis | \
 ; RUN:   not grep {llvm\\.bswap\\.i\[0-9\]*\\.i\[0-9\]*}
 ; RUN: llvm-as < %s | llvm-dis | \
-; RUN:   grep {llvm\\.x86\\.mmx\\.ps} | grep {\\\<2 x i32\\\>} | count 6
+; RUN:   grep {llvm\\.x86\\.mmx\\.ps} | grep {x86_mmx} | count 16
 
 declare i32 @llvm.ctpop.i28(i28 %val)
 declare i32 @llvm.cttz.i29(i29 %val)

Modified: llvm/branches/wendling/eh/test/Assembler/getelementptr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Assembler/getelementptr.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Assembler/getelementptr.ll (original)
+++ llvm/branches/wendling/eh/test/Assembler/getelementptr.ll Tue Oct 26 19:48:03 2010
@@ -3,9 +3,9 @@
 ; Verify that over-indexed getelementptrs are folded.
 @A = external global [2 x [3 x [5 x [7 x i32]]]]
 @B = global i32* getelementptr ([2 x [3 x [5 x [7 x i32]]]]* @A, i64 0, i64 0, i64 2, i64 1, i64 7523)
-; CHECK: @B = global i32* getelementptr ([2 x [3 x [5 x [7 x i32]]]]* @A, i64 36, i64 0, i64 1, i64 0, i64 5) ; <i32**> [#uses=0]
+; CHECK: @B = global i32* getelementptr ([2 x [3 x [5 x [7 x i32]]]]* @A, i64 36, i64 0, i64 1, i64 0, i64 5)
 @C = global i32* getelementptr ([2 x [3 x [5 x [7 x i32]]]]* @A, i64 3, i64 2, i64 0, i64 0, i64 7523)
-; CHECK: @C = global i32* getelementptr ([2 x [3 x [5 x [7 x i32]]]]* @A, i64 39, i64 1, i64 1, i64 4, i64 5) ; <i32**> [#uses=0]
+; CHECK: @C = global i32* getelementptr ([2 x [3 x [5 x [7 x i32]]]]* @A, i64 39, i64 1, i64 1, i64 4, i64 5)
 
 ;; Verify that i16 indices work.
 @x = external global {i32, i32}

Modified: llvm/branches/wendling/eh/test/Bindings/Ocaml/analysis.ml
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Bindings/Ocaml/analysis.ml?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Bindings/Ocaml/analysis.ml (original)
+++ llvm/branches/wendling/eh/test/Bindings/Ocaml/analysis.ml Tue Oct 26 19:48:03 2010
@@ -1,5 +1,6 @@
 (* RUN: %ocamlopt -warn-error A llvm.cmxa llvm_analysis.cmxa %s -o %t
- * RUN: ./%t %t.bc
+ * RUN: %t
+ * XFAIL: vg_leak
  *)
 
 open Llvm

Modified: llvm/branches/wendling/eh/test/Bindings/Ocaml/bitreader.ml
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Bindings/Ocaml/bitreader.ml?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Bindings/Ocaml/bitreader.ml (original)
+++ llvm/branches/wendling/eh/test/Bindings/Ocaml/bitreader.ml Tue Oct 26 19:48:03 2010
@@ -1,6 +1,7 @@
 (* RUN: %ocamlopt -warn-error A llvm.cmxa llvm_bitreader.cmxa llvm_bitwriter.cmxa %s -o %t
- * RUN: ./%t %t.bc
+ * RUN: %t %t.bc
  * RUN: llvm-dis < %t.bc | grep caml_int_ty
+ * XFAIL: vg_leak
  *)
 
 (* Note that this takes a moment to link, so it's best to keep the number of

Modified: llvm/branches/wendling/eh/test/Bindings/Ocaml/bitwriter.ml
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Bindings/Ocaml/bitwriter.ml?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Bindings/Ocaml/bitwriter.ml (original)
+++ llvm/branches/wendling/eh/test/Bindings/Ocaml/bitwriter.ml Tue Oct 26 19:48:03 2010
@@ -1,6 +1,7 @@
 (* RUN: %ocamlopt -warn-error A unix.cmxa llvm.cmxa llvm_bitwriter.cmxa %s -o %t
- * RUN: ./%t %t.bc
+ * RUN: %t %t.bc
  * RUN: llvm-dis < %t.bc | grep caml_int_ty
+ * XFAIL: vg_leak
  *)
 
 (* Note that this takes a moment to link, so it's best to keep the number of

Modified: llvm/branches/wendling/eh/test/Bindings/Ocaml/executionengine.ml
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Bindings/Ocaml/executionengine.ml?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Bindings/Ocaml/executionengine.ml (original)
+++ llvm/branches/wendling/eh/test/Bindings/Ocaml/executionengine.ml Tue Oct 26 19:48:03 2010
@@ -1,5 +1,6 @@
 (* RUN: %ocamlopt -warn-error A llvm.cmxa llvm_target.cmxa llvm_executionengine.cmxa %s -o %t
- * RUN: ./%t %t.bc
+ * RUN: %t
+ * XFAIL: vg_leak
  *)
 
 open Llvm

Modified: llvm/branches/wendling/eh/test/Bindings/Ocaml/scalar_opts.ml
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Bindings/Ocaml/scalar_opts.ml?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Bindings/Ocaml/scalar_opts.ml (original)
+++ llvm/branches/wendling/eh/test/Bindings/Ocaml/scalar_opts.ml Tue Oct 26 19:48:03 2010
@@ -1,4 +1,6 @@
 (* RUN: %ocamlopt -warn-error A llvm.cmxa llvm_scalar_opts.cmxa llvm_target.cmxa %s -o %t
+ * RUN: %t %t.bc
+ * XFAIL: vg_leak
  *)
 
 (* Note: It takes several seconds for ocamlopt to link an executable with
@@ -13,8 +15,11 @@
 let void_type = Llvm.void_type context
 
 (* Tiny unit test framework - really just to help find which line is busted *)
+let print_checkpoints = false
+
 let suite name f =
-  prerr_endline (name ^ ":");
+  if print_checkpoints then
+    prerr_endline (name ^ ":");
   f ()
 
 
@@ -38,7 +43,7 @@
   ignore (PassManager.create_function m
            ++ TargetData.add td
            ++ add_constant_propagation
-					 ++ add_sccp
+           ++ add_sccp
            ++ add_dead_store_elimination
            ++ add_aggressive_dce
            ++ add_scalar_repl_aggregation
@@ -48,7 +53,6 @@
            ++ add_loop_unswitch
            ++ add_loop_unroll
            ++ add_loop_rotation
-           ++ add_loop_index_split
            ++ add_memory_to_register_promotion
            ++ add_memory_to_register_demotion
            ++ add_reassociation

Modified: llvm/branches/wendling/eh/test/Bindings/Ocaml/target.ml
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Bindings/Ocaml/target.ml?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Bindings/Ocaml/target.ml (original)
+++ llvm/branches/wendling/eh/test/Bindings/Ocaml/target.ml Tue Oct 26 19:48:03 2010
@@ -1,4 +1,6 @@
 (* RUN: %ocamlopt -warn-error A llvm.cmxa llvm_target.cmxa %s -o %t
+ * RUN: %t %t.bc
+ * XFAIL: vg_leak
  *)
 
 (* Note: It takes several seconds for ocamlopt to link an executable with
@@ -8,13 +10,17 @@
 open Llvm
 open Llvm_target
 
+
 let context = global_context ()
 let i32_type = Llvm.i32_type context
 let i64_type = Llvm.i64_type context
 
 (* Tiny unit test framework - really just to help find which line is busted *)
+let print_checkpoints = false
+
 let suite name f =
-  prerr_endline (name ^ ":");
+  if print_checkpoints then
+    prerr_endline (name ^ ":");
   f ()
 
 

Modified: llvm/branches/wendling/eh/test/Bindings/Ocaml/vmcore.ml
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Bindings/Ocaml/vmcore.ml?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Bindings/Ocaml/vmcore.ml (original)
+++ llvm/branches/wendling/eh/test/Bindings/Ocaml/vmcore.ml Tue Oct 26 19:48:03 2010
@@ -1,6 +1,7 @@
 (* RUN: %ocamlopt -warn-error A llvm.cmxa llvm_analysis.cmxa llvm_bitwriter.cmxa %s -o %t
- * RUN: ./%t %t.bc
+ * RUN: %t %t.bc
  * RUN: llvm-dis < %t.bc > %t.ll
+ * XFAIL: vg_leak
  *)
 
 (* Note: It takes several seconds for ocamlopt to link an executable with
@@ -296,12 +297,6 @@
   insist ((struct_type context [| i16_type; i16_type; i32_type; i32_type |])
         = (type_of c));
 
-  group "union";
-  let t = union_type context [| i1_type; i16_type; i64_type; double_type |] in
-  let c = const_union t one in
-  ignore (define_global "const_union" c m);
-  insist (t = (type_of c));
-  
   (* RUN: grep {const_null.*zeroinit} < %t.ll
    *)
   group "null";
@@ -436,7 +431,7 @@
    * RUN: grep {const_select.*select} < %t.ll
    * RUN: grep {const_extractelement.*extractelement} < %t.ll
    * RUN: grep {const_insertelement.*insertelement} < %t.ll
-   * RUN: grep {const_shufflevector.*shufflevector} < %t.ll
+   * RUN: grep {const_shufflevector = global <4 x i32> <i32 0, i32 1, i32 1, i32 0>} < %t.ll
    *)
   ignore (define_global "const_size_of" (size_of (pointer_type i8_type)) m);
   ignore (define_global "const_gep" (const_gep foldbomb_gv [| five |]) m);
@@ -455,7 +450,8 @@
   ignore (define_global "const_shufflevector" (const_shufflevector
     (const_vector [| zero; one |])
     (const_vector [| one; zero |])
-    (const_bitcast foldbomb (vector_type i32_type 2))) m);
+    (const_vector [| const_int i32_type 0; const_int i32_type 1;
+                     const_int i32_type 2; const_int i32_type 3 |])) m);
 
   group "asm"; begin
     let ft = function_type void_type [| i32_type; i32_type; i32_type |] in
@@ -642,11 +638,18 @@
 
   let p1 = param fn 0 in
   let p2 = param fn 1 in
+  let a3 = build_alloca i32_type "user_alloca" b in
+  let p3 = build_load a3 "user_load" b in
   let i = build_add p1 p2 "sum" b in
 
+  insist ((num_operands i) = 2);
   insist ((operand i 0) = p1);
   insist ((operand i 1) = p2);
 
+  set_operand i 1 p3;
+  insist ((operand i 1) != p2);
+  insist ((operand i 1) = p3);
+
   ignore (build_unreachable b)
 
 
@@ -1154,13 +1157,13 @@
   group "comparisons"; begin
     (* RUN: grep {%build_icmp_ne = icmp ne i32 %P1, %P2} < %t.ll
      * RUN: grep {%build_icmp_sle = icmp sle i32 %P2, %P1} < %t.ll
-     * RUN: grep {%build_icmp_false = fcmp false float %F1, %F2} < %t.ll
-     * RUN: grep {%build_icmp_true = fcmp true float %F2, %F1} < %t.ll
+     * RUN: grep {%build_fcmp_false = fcmp false float %F1, %F2} < %t.ll
+     * RUN: grep {%build_fcmp_true = fcmp true float %F2, %F1} < %t.ll
      *)
     ignore (build_icmp Icmp.Ne    p1 p2 "build_icmp_ne" atentry);
     ignore (build_icmp Icmp.Sle   p2 p1 "build_icmp_sle" atentry);
-    ignore (build_fcmp Fcmp.False f1 f2 "build_icmp_false" atentry);
-    ignore (build_fcmp Fcmp.True  f2 f1 "build_icmp_true" atentry)
+    ignore (build_fcmp Fcmp.False f1 f2 "build_fcmp_false" atentry);
+    ignore (build_fcmp Fcmp.True  f2 f1 "build_fcmp_true" atentry)
   end;
   
   group "miscellaneous"; begin
@@ -1229,13 +1232,19 @@
 
   group "dbg"; begin
     (* RUN: grep {%dbg = add i32 %P1, %P2, !dbg !1} < %t.ll
-     * RUN: grep {!1 = metadata !\{i32 2, metadata !"dbg test"\}} < %t.ll
+     * RUN: grep {!1 = metadata !\{i32 2, i32 3, metadata !2, metadata !2\}} < %t.ll
      *)
-    let m1 = const_int i32_type 2 in
-    let m2 = mdstring context "dbg test" in
-    let md = mdnode context [| m1; m2 |] in
+    insist ((current_debug_location atentry) = None);
+
+    let m_line = const_int i32_type 2 in
+    let m_col = const_int i32_type 3 in
+    let m_scope = mdnode context [| |] in
+    let m_inlined = mdnode context [| |] in
+    let md = mdnode context [| m_line; m_col; m_scope; m_inlined |] in
     set_current_debug_location atentry md;
 
+    insist ((current_debug_location atentry) = Some md);
+
     let i = build_add p1 p2 "dbg" atentry in
     insist ((has_metadata i) = true);
 

Modified: llvm/branches/wendling/eh/test/Bitcode/ssse3_palignr.ll.bc
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Bitcode/ssse3_palignr.ll.bc?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
Binary files - no diff available.

Modified: llvm/branches/wendling/eh/test/BugPoint/crash-narrowfunctiontest.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/BugPoint/crash-narrowfunctiontest.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/BugPoint/crash-narrowfunctiontest.ll (original)
+++ llvm/branches/wendling/eh/test/BugPoint/crash-narrowfunctiontest.ll Tue Oct 26 19:48:03 2010
@@ -1,6 +1,8 @@
 ; Test that bugpoint can narrow down the testcase to the important function
+; FIXME: This likely fails on windows
 ;
-; RUN: bugpoint %s -output-prefix %t -bugpoint-crashcalls -silence-passes > /dev/null
+; RUN: bugpoint -load %llvmlibsdir/BugpointPasses%shlibext %s -output-prefix %t -bugpoint-crashcalls -silence-passes > /dev/null
+; XFAIL: mingw
 
 define i32 @foo() { ret i32 1 }
 

Modified: llvm/branches/wendling/eh/test/BugPoint/remove_arguments_test.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/BugPoint/remove_arguments_test.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/BugPoint/remove_arguments_test.ll (original)
+++ llvm/branches/wendling/eh/test/BugPoint/remove_arguments_test.ll Tue Oct 26 19:48:03 2010
@@ -1,5 +1,7 @@
-; RUN: bugpoint %s -output-prefix %t -bugpoint-crashcalls -silence-passes
+; FIXME: This likely fails on windows
+; RUN: bugpoint -load %llvmlibsdir/BugpointPasses%shlibext %s -output-prefix %t -bugpoint-crashcalls -silence-passes
 ; RUN: llvm-dis %t-reduced-simplified.bc -o - | FileCheck %s
+; XFAIL: mingw
 
 ; Test to make sure that arguments are removed from the function if they are 
 ; unnecessary. And clean up any types that that frees up too.

Modified: llvm/branches/wendling/eh/test/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CMakeLists.txt?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CMakeLists.txt (original)
+++ llvm/branches/wendling/eh/test/CMakeLists.txt Tue Oct 26 19:48:03 2010
@@ -18,35 +18,62 @@
 else() # Default for all other unix like systems.
   # CMake hardcodes the library locaction using rpath.
   # Therefore LD_LIBRARY_PATH is not required to run binaries in the
-  # build dir. We pass it anyways. 
+  # build dir. We pass it anyways.
   set(SHLIBPATH_VAR "LD_LIBRARY_PATH")
 endif()
 
 include(FindPythonInterp)
 if(PYTHONINTERP_FOUND)
+  get_directory_property(DEFINITIONS COMPILE_DEFINITIONS)
+  foreach(DEF ${DEFINITIONS})
+    set(DEFS "${DEFS} -D${DEF}")
+  endforeach()
+  get_directory_property(INC_DIRS INCLUDE_DIRECTORIES)
+  foreach(INC_DIR ${INC_DIRS})
+    set(IDIRS "${IDIRS} -I${INC_DIR}")
+  endforeach()
+  string(REPLACE "<CMAKE_CXX_COMPILER>" "${CMAKE_CXX_COMPILER}" TEST_COMPILE_CXX_CMD ${CMAKE_CXX_COMPILE_OBJECT})
+  string(REPLACE "<DEFINES>"            "${DEFS}"               TEST_COMPILE_CXX_CMD ${TEST_COMPILE_CXX_CMD})
+  string(REPLACE "<FLAGS>"              "${CMAKE_CXX_FLAGS}"    TEST_COMPILE_CXX_CMD ${TEST_COMPILE_CXX_CMD})
+  string(REPLACE "-o"                   ""                      TEST_COMPILE_CXX_CMD ${TEST_COMPILE_CXX_CMD})
+  string(REGEX REPLACE "<[^>]+>"        ""                      TEST_COMPILE_CXX_CMD ${TEST_COMPILE_CXX_CMD})
+  set(TEST_COMPILE_CXX_CMD "${TEST_COMPILE_CXX_CMD} ${IDIRS}")
+  if(NOT MSVC)
+    set(TEST_COMPILE_CXX_CMD "${TEST_COMPILE_CXX_CMD} -x c++")
+  endif()
   configure_file(
     ${CMAKE_CURRENT_SOURCE_DIR}/site.exp.in
     ${CMAKE_CURRENT_BINARY_DIR}/site.exp)
 
   MAKE_DIRECTORY(${CMAKE_CURRENT_BINARY_DIR}/Unit)
 
+  set(LLVM_SOURCE_DIR ${LLVM_MAIN_SRC_DIR})
+  set(LLVM_BINARY_DIR ${LLVM_BINARY_DIR})
+  set(LLVM_TOOLS_DIR "${LLVM_TOOLS_BINARY_DIR}/${CMAKE_CFG_INTDIR}")
+  set(LLVMGCCDIR "")
+  set(PYTHON_EXECUTABLE ${PYTHON_EXECUTABLE})
+  if (CMAKE_CONFIGURATION_TYPES)
+    # FIXME: We have no idea. It could be any of them... So just output all of
+    # them.
+    set(LLVM_BUILD_MODE "${CMAKE_CONFIGURATION_TYPES}")
+  elseif (CMAKE_BUILD_TYPE)
+    set(LLVM_BUILD_MODE "${CMAKE_BUILD_TYPE}")
+  else()
+    set(LLVM_BUILD_MODE "None")
+  endif()
+  set(ENABLE_SHARED ${LLVM_SHARED_LIBS_ENABLED})
+  set(SHLIBPATH_VAR ${SHLIBPATH_VAR})
+
+  configure_file(
+    ${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.in
+    ${CMAKE_CURRENT_BINARY_DIR}/lit.site.cfg
+    @ONLY)
+  configure_file(
+    ${CMAKE_CURRENT_SOURCE_DIR}/Unit/lit.site.cfg.in
+    ${CMAKE_CURRENT_BINARY_DIR}/Unit/lit.site.cfg
+    @ONLY)
+
   add_custom_target(check
-    COMMAND sed -e "s#\@LLVM_SOURCE_DIR\@#${LLVM_MAIN_SRC_DIR}#"
-                -e "s#\@LLVM_BINARY_DIR\@#${LLVM_BINARY_DIR}#"
-                -e "s#\@LLVM_TOOLS_DIR\@#${LLVM_TOOLS_BINARY_DIR}/${CMAKE_CFG_INTDIR}#"
-                -e "s#\@LLVMGCCDIR\@##"
-                -e "s#\@PYTHON_EXECUTABLE\@#${PYTHON_EXECUTABLE}#"
-                ${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.in >
-                ${CMAKE_CURRENT_BINARY_DIR}/lit.site.cfg
-    COMMAND sed -e "s#\@LLVM_SOURCE_DIR\@#${LLVM_MAIN_SRC_DIR}#"
-                -e "s#\@LLVM_BINARY_DIR\@#${LLVM_BINARY_DIR}#"
-                -e "s#\@LLVM_TOOLS_DIR\@#${LLVM_TOOLS_BINARY_DIR}/${CMAKE_CFG_INTDIR}#"
-                -e "s#\@LLVMGCCDIR\@##"
-                -e "s#\@LLVM_BUILD_MODE\@#${CMAKE_CFG_INTDIR}#"
-                -e "s#\@ENABLE_SHARED\@#${LLVM_SHARED_LIBS_ENABLED}#"
-                -e "s#\@SHLIBPATH_VAR\@#${SHLIBPATH_VAR}#"
-                ${CMAKE_CURRENT_SOURCE_DIR}/Unit/lit.site.cfg.in >
-                ${CMAKE_CURRENT_BINARY_DIR}/Unit/lit.site.cfg
     COMMAND ${PYTHON_EXECUTABLE}
                 ${LLVM_SOURCE_DIR}/utils/lit/lit.py
                 --param llvm_site_config=${CMAKE_CURRENT_BINARY_DIR}/lit.site.cfg

Propchange: llvm/branches/wendling/eh/test/CodeGen/ARM/
------------------------------------------------------------------------------
--- svn:ignore (original)
+++ svn:ignore Tue Oct 26 19:48:03 2010
@@ -1 +1,2 @@
 Output
+Output/*.script

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll Tue Oct 26 19:48:03 2010
@@ -1,11 +1,15 @@
-; RUN: llc < %s -march=arm -mattr=+v6,+vfp2
+; RUN: llc < %s -march=arm -mattr=+v6,+vfp2 | FileCheck %s
 
 @quant_coef = external global [6 x [4 x [4 x i32]]]		; <[6 x [4 x [4 x i32]]]*> [#uses=1]
 @dequant_coef = external global [6 x [4 x [4 x i32]]]		; <[6 x [4 x [4 x i32]]]*> [#uses=1]
 @A = external global [4 x [4 x i32]]		; <[4 x [4 x i32]]*> [#uses=1]
 
+; CHECK: dct_luma_sp:
 define fastcc i32 @dct_luma_sp(i32 %block_x, i32 %block_y, i32* %coeff_cost) {
 entry:
+; Make sure to use base-updating stores for saving callee-saved registers.
+; CHECK-NOT: sub sp
+; CHECK: vpush 
 	%predicted_block = alloca [4 x [4 x i32]], align 4		; <[4 x [4 x i32]]*> [#uses=1]
 	br label %cond_next489
 

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2007-03-26-RegScavengerAssert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2007-03-26-RegScavengerAssert.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2007-03-26-RegScavengerAssert.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2007-03-26-RegScavengerAssert.ll Tue Oct 26 19:48:03 2010
@@ -2,7 +2,7 @@
 ; PR1266
 
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "arm-linux-gnueabi"
+target triple = "arm-unknown-linux-gnueabi"
 	%struct.CUMULATIVE_ARGS = type { i32, i32, i32, i32, i32, i32 }
 	%struct.FILE = type { i32, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, %struct._IO_marker*, %struct.FILE*, i32, i32, i32, i16, i8, [1 x i8], i8*, i64, i8*, i8*, i32, [52 x i8] }
 	%struct.VEC_edge = type { i32, i32, [1 x %struct.edge_def*] }

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2007-05-31-RegScavengerInfiniteLoop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2007-05-31-RegScavengerInfiniteLoop.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2007-05-31-RegScavengerInfiniteLoop.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2007-05-31-RegScavengerInfiniteLoop.ll Tue Oct 26 19:48:03 2010
@@ -2,7 +2,7 @@
 ; PR1424
 
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "arm-linux-gnueabi"
+target triple = "arm-unknown-linux-gnueabi"
 	%struct.AVClass = type { i8*, i8* (i8*)*, %struct.AVOption* }
 	%struct.AVCodec = type { i8*, i32, i32, i32, i32 (%struct.AVCodecContext*)*, i32 (%struct.AVCodecContext*, i8*, i32, i8*)*, i32 (%struct.AVCodecContext*)*, i32 (%struct.AVCodecContext*, i8*, i32*, i8*, i32)*, i32, %struct.AVCodec*, void (%struct.AVCodecContext*)*, %struct.AVRational*, i32* }
 	%struct.AVCodecContext = type { %struct.AVClass*, i32, i32, i32, i32, i32, i8*, i32, %struct.AVRational, i32, i32, i32, i32, i32, void (%struct.AVCodecContext*, %struct.AVFrame*, i32*, i32, i32, i32)*, i32, i32, i32, i32, i32, i32, i32, float, float, i32, i32, i32, i32, float, i32, i32, i32, %struct.AVCodec*, i8*, i32, i32, void (%struct.AVCodecContext*, i8*, i32, i32)*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8*, [32 x i8], i32, i32, i32, i32, i32, i32, i32, float, i32, i32 (%struct.AVCodecContext*, %struct.AVFrame*)*, void (%struct.AVCodecContext*, %struct.AVFrame*)*, i32, i32, i32, i32, i8*, i8*, float, float, i32, %struct.RcOverride*, i32, i8*, i32, i32, i32, float, float, float, float, i32, float, float, float, float, float, i32, i32, i32, i32*, i32, i32, i32, i32, %struct.AVRational, %struct.AVFrame*, i32, i32, [4 x i64], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 (%struct.AVCodecContext*, i32*)*, i32, i32, i32, i32, i32, i32, i8*, i32, i32,
  i32, i32, i32, i32, i16*, i16*, i32, i32, i32, i32, %struct.AVPaletteControl*, i32, i32 (%struct.AVCodecContext*, %struct.AVFrame*)*, i32, i32, i32, i32, i32, i32, i32, i32 (%struct.AVCodecContext*, i32 (%struct.AVCodecContext*, i8*)*, i8**, i32*, i32)*, i8*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, float, i32, i32, i32, i32, i32, i32, i32, i32, float, i32, i32, i32, i32, i32, i32, float, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i64 }

Removed: llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-21-PostRAKill4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-21-PostRAKill4.ll?rev=117424&view=auto
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-21-PostRAKill4.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-21-PostRAKill4.ll (removed)
@@ -1,26 +0,0 @@
-; RUN: llc < %s -asm-verbose=false -O3 -relocation-model=pic -disable-fp-elim -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -post-RA-scheduler
-
-; ModuleID = '<stdin>'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:64"
-target triple = "armv7-apple-darwin9"
-
- at .str = external constant [36 x i8], align 1      ; <[36 x i8]*> [#uses=0]
- at .str1 = external constant [31 x i8], align 1     ; <[31 x i8]*> [#uses=1]
- at .str2 = external constant [4 x i8], align 1      ; <[4 x i8]*> [#uses=1]
-
-declare i32 @getUnknown(i32, ...) nounwind
-
-declare void @llvm.va_start(i8*) nounwind
-
-declare void @llvm.va_end(i8*) nounwind
-
-declare i32 @printf(i8* nocapture, ...) nounwind
-
-define i32 @main() nounwind {
-entry:
-  %0 = tail call  i32 (i8*, ...)* @printf(i8* getelementptr ([31 x i8]* @.str1, i32 0, i32 0), i32 1, i32 1, i32 1, i32 1, i32 1, i32 1) nounwind ; <i32> [#uses=0]
-  %1 = tail call  i32 (i8*, ...)* @printf(i8* getelementptr ([31 x i8]* @.str1, i32 0, i32 0), i32 -128, i32 116, i32 116, i32 -3852, i32 -31232, i32 -1708916736) nounwind ; <i32> [#uses=0]
-  %2 = tail call  i32 (i32, ...)* @getUnknown(i32 undef, i32 116, i32 116, i32 -3852, i32 -31232, i32 30556, i32 -1708916736) nounwind ; <i32> [#uses=1]
-  %3 = tail call  i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @.str2, i32 0, i32 0), i32 %2) nounwind ; <i32> [#uses=0]
-  ret i32 0
-}

Removed: llvm/branches/wendling/eh/test/CodeGen/ARM/2009-09-01-PostRAProlog.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2009-09-01-PostRAProlog.ll?rev=117424&view=auto
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2009-09-01-PostRAProlog.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2009-09-01-PostRAProlog.ll (removed)
@@ -1,106 +0,0 @@
-; RUN: llc -asm-verbose=false -O3 -relocation-model=pic -disable-fp-elim -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 < %s | FileCheck %s
-
-target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32"
-target triple = "thumbv7-apple-darwin9"
-
- at history = internal global [2 x [56 x i32]] [[56 x i32] [i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 0, i32 1, i32 2, i32 4, i32 2, i32 1, i32 0, i32 -1, i32 1, i32 3, i32 5, i32 7, i32 5, i32 3, i32 1, i32 -1, i32 2, i32 5, i32 8, i32 10, i32 8, i32 5, i32 2, i32 -1, i32 2, i32 5, i32 8, i32 10, i32 8, i32 5, i32 2, i32 -1, i32 1, i32 3, i32 5, i32 7, i32 5, i32 3, i32 1, i32 -1, i32 0, i32 1, i32 2, i32 4, i32 2, i32 1, i32 0], [56 x i32] [i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 0, i32 1, i32 2, i32 4, i32 2, i32 1, i32 0, i32 -1, i32 1, i32 3, i32 5, i32 7, i32 5, i32 3, i32 1, i32 -1, i32 2, i32 5, i32 8, i32 10, i32 8, i32 5, i32 2, i32 -1, i32 2, i32 5, i32 8, i32 10, i32 8, i32 5, i32 2, i32 -1, i32 1, i32 3, i32 5, i32 7, i32 5, i32 3, i32 1, i32 -1, i32 0, i32 1, i32 2, i32 4, i32 2, i32 1, i32 0]] ; <[2 x [56 x i32]]*> [#uses=3]
- at nodes = internal global i64 0                    ; <i64*> [#uses=4]
- at .str = private constant [9 x i8] c"##-<=>+#\00", align 1 ; <[9 x i8]*> [#uses=2]
- at .str1 = private constant [6 x i8] c"%c%d\0A\00", align 1 ; <[6 x i8]*> [#uses=1]
- at .str2 = private constant [16 x i8] c"Fhourstones 2.0\00", align 1 ; <[16 x i8]*> [#uses=1]
- at .str3 = private constant [54 x i8] c"Using %d transposition table entries with %d probes.\0A\00", align 1 ; <[54 x i8]*> [#uses=1]
- at .str4 = private constant [31 x i8] c"Solving %d-ply position after \00", align 1 ; <[31 x i8]*> [#uses=1]
- at .str5 = private constant [7 x i8] c" . . .\00", align 1 ; <[7 x i8]*> [#uses=1]
- at .str6 = private constant [28 x i8] c"score = %d (%c)  work = %d\0A\00", align 1 ; <[28 x i8]*> [#uses=1]
- at .str7 = private constant [36 x i8] c"%lu pos / %lu msec = %.1f Kpos/sec\0A\00", align 1 ; <[36 x i8]*> [#uses=1]
- at plycnt = internal global i32 0                   ; <i32*> [#uses=21]
- at dias = internal global [19 x i32] zeroinitializer ; <[19 x i32]*> [#uses=43]
- at columns = internal global [128 x i32] zeroinitializer ; <[128 x i32]*> [#uses=18]
- at height = internal global [128 x i32] zeroinitializer ; <[128 x i32]*> [#uses=21]
- at rows = internal global [8 x i32] zeroinitializer ; <[8 x i32]*> [#uses=20]
- at colthr = internal global [128 x i32] zeroinitializer ; <[128 x i32]*> [#uses=5]
- at moves = internal global [44 x i32] zeroinitializer ; <[44 x i32]*> [#uses=9]
- at .str8 = private constant [3 x i8] c"%d\00", align 1 ; <[3 x i8]*> [#uses=1]
- at he = internal global i8* null                    ; <i8**> [#uses=9]
- at hits = internal global i64 0                     ; <i64*> [#uses=8]
- at posed = internal global i64 0                    ; <i64*> [#uses=7]
- at ht = internal global i32* null                   ; <i32**> [#uses=5]
- at .str16 = private constant [19 x i8] c"store rate = %.3f\0A\00", align 1 ; <[19 x i8]*> [#uses=1]
- at .str117 = private constant [45 x i8] c"- %5.3f  < %5.3f  = %5.3f  > %5.3f  + %5.3f\0A\00", align 1 ; <[45 x i8]*> [#uses=1]
- at .str218 = private constant [6 x i8] c"%7d%c\00", align 1 ; <[6 x i8]*> [#uses=1]
- at .str319 = private constant [30 x i8] c"Failed to allocate %u bytes.\0A\00", align 1 ; <[30 x i8]*> [#uses=1]
-
-declare i32 @puts(i8* nocapture) nounwind
-
-declare i32 @getchar() nounwind
-
-define internal i32 @transpose() nounwind readonly {
-; CHECK: push
-entry:
-  %0 = load i32* getelementptr inbounds ([128 x i32]* @columns, i32 0, i32 1), align 4 ; <i32> [#uses=1]
-  %1 = shl i32 %0, 7                              ; <i32> [#uses=1]
-  %2 = load i32* getelementptr inbounds ([128 x i32]* @columns, i32 0, i32 2), align 4 ; <i32> [#uses=1]
-  %3 = or i32 %1, %2                              ; <i32> [#uses=1]
-  %4 = shl i32 %3, 7                              ; <i32> [#uses=1]
-  %5 = load i32* getelementptr inbounds ([128 x i32]* @columns, i32 0, i32 3), align 4 ; <i32> [#uses=1]
-  %6 = or i32 %4, %5                              ; <i32> [#uses=3]
-  %7 = load i32* getelementptr inbounds ([128 x i32]* @columns, i32 0, i32 7), align 4 ; <i32> [#uses=1]
-  %8 = shl i32 %7, 7                              ; <i32> [#uses=1]
-  %9 = load i32* getelementptr inbounds ([128 x i32]* @columns, i32 0, i32 6), align 4 ; <i32> [#uses=1]
-  %10 = or i32 %8, %9                             ; <i32> [#uses=1]
-  %11 = shl i32 %10, 7                            ; <i32> [#uses=1]
-  %12 = load i32* getelementptr inbounds ([128 x i32]* @columns, i32 0, i32 5), align 4 ; <i32> [#uses=1]
-  %13 = or i32 %11, %12                           ; <i32> [#uses=3]
-  %14 = icmp ugt i32 %6, %13                      ; <i1> [#uses=2]
-  %.pn2.in.i = select i1 %14, i32 %6, i32 %13     ; <i32> [#uses=1]
-  %.pn1.in.i = select i1 %14, i32 %13, i32 %6     ; <i32> [#uses=1]
-  %.pn2.i = shl i32 %.pn2.in.i, 7                 ; <i32> [#uses=1]
-  %.pn3.i = load i32* getelementptr inbounds ([128 x i32]* @columns, i32 0, i32 4) ; <i32> [#uses=1]
-  %.pn.in.in.i = or i32 %.pn2.i, %.pn3.i          ; <i32> [#uses=1]
-  %.pn.in.i = zext i32 %.pn.in.in.i to i64        ; <i64> [#uses=1]
-  %.pn.i = shl i64 %.pn.in.i, 21                  ; <i64> [#uses=1]
-  %.pn1.i = zext i32 %.pn1.in.i to i64            ; <i64> [#uses=1]
-  %iftmp.22.0.i = or i64 %.pn.i, %.pn1.i          ; <i64> [#uses=2]
-  %15 = lshr i64 %iftmp.22.0.i, 17                ; <i64> [#uses=1]
-  %16 = trunc i64 %15 to i32                      ; <i32> [#uses=2]
-  %17 = urem i64 %iftmp.22.0.i, 1050011           ; <i64> [#uses=1]
-  %18 = trunc i64 %17 to i32                      ; <i32> [#uses=1]
-  %19 = urem i32 %16, 179                         ; <i32> [#uses=1]
-  %20 = or i32 %19, 131072                        ; <i32> [#uses=1]
-  %21 = load i32** @ht, align 4                   ; <i32*> [#uses=1]
-  br label %bb5
-
-bb:                                               ; preds = %bb5
-  %22 = getelementptr inbounds i32* %21, i32 %x.0 ; <i32*> [#uses=1]
-  %23 = load i32* %22, align 4                    ; <i32> [#uses=1]
-  %24 = icmp eq i32 %23, %16                      ; <i1> [#uses=1]
-  br i1 %24, label %bb1, label %bb2
-
-bb1:                                              ; preds = %bb
-  %25 = load i8** @he, align 4                    ; <i8*> [#uses=1]
-  %26 = getelementptr inbounds i8* %25, i32 %x.0  ; <i8*> [#uses=1]
-  %27 = load i8* %26, align 1                     ; <i8> [#uses=1]
-  %28 = sext i8 %27 to i32                        ; <i32> [#uses=1]
-  ret i32 %28
-
-bb2:                                              ; preds = %bb
-  %29 = add nsw i32 %20, %x.0                     ; <i32> [#uses=3]
-  %30 = add i32 %29, -1050011                     ; <i32> [#uses=1]
-  %31 = icmp sgt i32 %29, 1050010                 ; <i1> [#uses=1]
-  %. = select i1 %31, i32 %30, i32 %29            ; <i32> [#uses=1]
-  %32 = add i32 %33, 1                            ; <i32> [#uses=1]
-  br label %bb5
-
-bb5:                                              ; preds = %bb2, %entry
-  %33 = phi i32 [ 0, %entry ], [ %32, %bb2 ]      ; <i32> [#uses=2]
-  %x.0 = phi i32 [ %18, %entry ], [ %., %bb2 ]    ; <i32> [#uses=3]
-  %34 = icmp sgt i32 %33, 7                       ; <i1> [#uses=1]
-  br i1 %34, label %bb7, label %bb
-
-bb7:                                              ; preds = %bb5
-  ret i32 -128
-}
-
-declare noalias i8* @calloc(i32, i32) nounwind
-
-declare void @llvm.memset.i64(i8* nocapture, i8, i64, i32) nounwind

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2009-09-28-LdStOptiBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2009-09-28-LdStOptiBug.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2009-09-28-LdStOptiBug.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2009-09-28-LdStOptiBug.ll Tue Oct 26 19:48:03 2010
@@ -5,7 +5,7 @@
 
 define void @foo(%0* noalias nocapture sret %agg.result, double %x.0, double %y.0) nounwind {
 ; CHECK: foo:
-; CHECK: bl __adddf3
+; CHECK: bl __aeabi_dadd
 ; CHECK-NOT: strd
 ; CHECK: mov
   %x76 = fmul double %y.0, 0.000000e+00           ; <double> [#uses=1]

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2009-12-02-vtrn-undef.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2009-12-02-vtrn-undef.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2009-12-02-vtrn-undef.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2009-12-02-vtrn-undef.ll Tue Oct 26 19:48:03 2010
@@ -17,3 +17,17 @@
   store <8 x i16> %1, <8 x i16>* %agg.result12.1.0, align 16
   ret void
 }
+
+; Radar 8290937: Ignore undef shuffle indices.
+; CHECK: t2
+; CHECK: vtrn.16
+define void @t2(%struct.int16x8x2_t* nocapture %ptr, <4 x i16> %a.0, <4 x i16> %b.0) nounwind {
+entry:
+  %0 = shufflevector <4 x i16> %a.0, <4 x i16> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 undef, i32 undef, i32 undef, i32 undef>
+  %1 = shufflevector <4 x i16> %a.0, <4 x i16> undef, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
+  %ptr26.0 = getelementptr inbounds %struct.int16x8x2_t* %ptr, i32 0, i32 0, i32 0, i32 0
+  store <8 x i16> %0, <8 x i16>* %ptr26.0, align 16
+  %ptr20.1.0 = getelementptr inbounds %struct.int16x8x2_t* %ptr, i32 0, i32 0, i32 1, i32 0
+  store <8 x i16> %1, <8 x i16>* %ptr20.1.0, align 16
+  ret void
+}

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2010-05-20-NEONSpillCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2010-05-20-NEONSpillCrash.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2010-05-20-NEONSpillCrash.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2010-05-20-NEONSpillCrash.ll Tue Oct 26 19:48:03 2010
@@ -5,32 +5,32 @@
 
 %struct.__neon_int8x8x3_t = type { <8 x i8>, <8 x i8>, <8 x i8> }
 
-declare %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8*) nounwind readonly
+declare %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8*, i32) nounwind readonly
 
-declare void @llvm.arm.neon.vst3.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>) nounwind
+declare void @llvm.arm.neon.vst3.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32) nounwind
 
 define <8 x i8> @t3(i8* %A1, i8* %A2, i8* %A3, i8* %A4, i8* %A5, i8* %A6, i8* %A7, i8* %A8, i8* %B) nounwind {
-  %tmp1b = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A2) ; <%struct.__neon_int8x8x3_t> [#uses=2]
+  %tmp1b = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A2, i32 1) ; <%struct.__neon_int8x8x3_t> [#uses=2]
   %tmp2b = extractvalue %struct.__neon_int8x8x3_t %tmp1b, 0 ; <<8 x i8>> [#uses=1]
   %tmp4b = extractvalue %struct.__neon_int8x8x3_t %tmp1b, 1 ; <<8 x i8>> [#uses=1]
-  %tmp1d = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A4) ; <%struct.__neon_int8x8x3_t> [#uses=2]
+  %tmp1d = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A4, i32 1) ; <%struct.__neon_int8x8x3_t> [#uses=2]
   %tmp2d = extractvalue %struct.__neon_int8x8x3_t %tmp1d, 0 ; <<8 x i8>> [#uses=1]
   %tmp4d = extractvalue %struct.__neon_int8x8x3_t %tmp1d, 1 ; <<8 x i8>> [#uses=1]
-  %tmp1e = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A5) ; <%struct.__neon_int8x8x3_t> [#uses=1]
+  %tmp1e = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A5, i32 1) ; <%struct.__neon_int8x8x3_t> [#uses=1]
   %tmp2e = extractvalue %struct.__neon_int8x8x3_t %tmp1e, 0 ; <<8 x i8>> [#uses=1]
-  %tmp1f = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A6) ; <%struct.__neon_int8x8x3_t> [#uses=1]
+  %tmp1f = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A6, i32 1) ; <%struct.__neon_int8x8x3_t> [#uses=1]
   %tmp2f = extractvalue %struct.__neon_int8x8x3_t %tmp1f, 0 ; <<8 x i8>> [#uses=1]
-  %tmp1g = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A7) ; <%struct.__neon_int8x8x3_t> [#uses=2]
+  %tmp1g = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A7, i32 1) ; <%struct.__neon_int8x8x3_t> [#uses=2]
   %tmp2g = extractvalue %struct.__neon_int8x8x3_t %tmp1g, 0 ; <<8 x i8>> [#uses=1]
   %tmp4g = extractvalue %struct.__neon_int8x8x3_t %tmp1g, 1 ; <<8 x i8>> [#uses=1]
-  %tmp1h = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A8) ; <%struct.__neon_int8x8x3_t> [#uses=2]
+  %tmp1h = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A8, i32 1) ; <%struct.__neon_int8x8x3_t> [#uses=2]
   %tmp2h = extractvalue %struct.__neon_int8x8x3_t %tmp1h, 0 ; <<8 x i8>> [#uses=1]
   %tmp3h = extractvalue %struct.__neon_int8x8x3_t %tmp1h, 2 ; <<8 x i8>> [#uses=1]
   %tmp2bd = add <8 x i8> %tmp2b, %tmp2d           ; <<8 x i8>> [#uses=1]
   %tmp4bd = add <8 x i8> %tmp4b, %tmp4d           ; <<8 x i8>> [#uses=1]
   %tmp2abcd = mul <8 x i8> undef, %tmp2bd         ; <<8 x i8>> [#uses=1]
   %tmp4abcd = mul <8 x i8> undef, %tmp4bd         ; <<8 x i8>> [#uses=2]
-  call void @llvm.arm.neon.vst3.v8i8(i8* %A1, <8 x i8> %tmp4abcd, <8 x i8> zeroinitializer, <8 x i8> %tmp2abcd)
+  call void @llvm.arm.neon.vst3.v8i8(i8* %A1, <8 x i8> %tmp4abcd, <8 x i8> zeroinitializer, <8 x i8> %tmp2abcd, i32 1)
   %tmp2ef = sub <8 x i8> %tmp2e, %tmp2f           ; <<8 x i8>> [#uses=1]
   %tmp2gh = sub <8 x i8> %tmp2g, %tmp2h           ; <<8 x i8>> [#uses=1]
   %tmp3gh = sub <8 x i8> zeroinitializer, %tmp3h  ; <<8 x i8>> [#uses=1]
@@ -38,8 +38,8 @@
   %tmp2efgh = mul <8 x i8> %tmp2ef, %tmp2gh       ; <<8 x i8>> [#uses=1]
   %tmp3efgh = mul <8 x i8> undef, %tmp3gh         ; <<8 x i8>> [#uses=1]
   %tmp4efgh = mul <8 x i8> %tmp4ef, undef         ; <<8 x i8>> [#uses=2]
-  call void @llvm.arm.neon.vst3.v8i8(i8* %A2, <8 x i8> %tmp4efgh, <8 x i8> %tmp3efgh, <8 x i8> %tmp2efgh)
+  call void @llvm.arm.neon.vst3.v8i8(i8* %A2, <8 x i8> %tmp4efgh, <8 x i8> %tmp3efgh, <8 x i8> %tmp2efgh, i32 1)
   %tmp4 = sub <8 x i8> %tmp4efgh, %tmp4abcd       ; <<8 x i8>> [#uses=1]
-  tail call void @llvm.arm.neon.vst3.v8i8(i8* %B, <8 x i8> zeroinitializer, <8 x i8> undef, <8 x i8> undef)
+  tail call void @llvm.arm.neon.vst3.v8i8(i8* %B, <8 x i8> zeroinitializer, <8 x i8> undef, <8 x i8> undef, i32 1)
   ret <8 x i8> %tmp4
 }

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2010-05-21-BuildVector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2010-05-21-BuildVector.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2010-05-21-BuildVector.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2010-05-21-BuildVector.ll Tue Oct 26 19:48:03 2010
@@ -36,8 +36,8 @@
   %tmp5 = insertelement <4 x float> %tmp7, float %18, i32 3
   %19 = fmul <4 x float> %tmp5, %2
   %20 = bitcast float* %fltp to i8*
-  tail call void @llvm.arm.neon.vst1.v4f32(i8* %20, <4 x float> %19)
+  tail call void @llvm.arm.neon.vst1.v4f32(i8* %20, <4 x float> %19, i32 1)
   ret void
 }
 
-declare void @llvm.arm.neon.vst1.v4f32(i8*, <4 x float>) nounwind
+declare void @llvm.arm.neon.vst1.v4f32(i8*, <4 x float>, i32) nounwind

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2010-06-11-vmovdrr-bitcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2010-06-11-vmovdrr-bitcast.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2010-06-11-vmovdrr-bitcast.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2010-06-11-vmovdrr-bitcast.ll Tue Oct 26 19:48:03 2010
@@ -12,8 +12,8 @@
  %tmp9 = trunc i128 %tmp8 to i64                 ; <i64> [#uses=1]
  %tmp16.i = bitcast i64 %tmp6 to <8 x i8>        ; <<8 x i8>> [#uses=1]
  %tmp20.i = bitcast i64 %tmp9 to <8 x i8>        ; <<8 x i8>> [#uses=1]
- tail call void @llvm.arm.neon.vst2.v8i8(i8* %b, <8 x i8> %tmp16.i, <8 x i8> %tmp20.i) nounwind
+ tail call void @llvm.arm.neon.vst2.v8i8(i8* %b, <8 x i8> %tmp16.i, <8 x i8> %tmp20.i, i32 1) nounwind
  ret void
 }
 
-declare void @llvm.arm.neon.vst2.v8i8(i8*, <8 x i8>, <8 x i8>) nounwind
+declare void @llvm.arm.neon.vst2.v8i8(i8*, <8 x i8>, <8 x i8>, i32) nounwind

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2010-06-21-nondarwin-tc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2010-06-21-nondarwin-tc.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2010-06-21-nondarwin-tc.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2010-06-21-nondarwin-tc.ll Tue Oct 26 19:48:03 2010
@@ -1,5 +1,6 @@
 ; RUN: llc < %s -march=arm -mtriple=armv4t-unknown-linux-gnueabi  | FileCheck %s
 ; PR 7433
+; XFAIL: *
 
 %0 = type { i8*, i8* }
 %1 = type { i8*, i8*, i8* }

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2010-06-29-PartialRedefFastAlloc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2010-06-29-PartialRedefFastAlloc.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2010-06-29-PartialRedefFastAlloc.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2010-06-29-PartialRedefFastAlloc.ll Tue Oct 26 19:48:03 2010
@@ -10,16 +10,16 @@
 ; %reg1028 gets allocated %Q0, and if %reg1030 is reloaded for the partial
 ; redef, it cannot also get %Q0.
 
-; CHECK: vld1.64 {d0, d1}, [r{{.}}]
-; CHECK-NOT: vld1.64 {d0, d1}
-; CHECK: vmov.f64 d3, d0
+; CHECK: vld1.64 {d16, d17}, [r{{.}}]
+; CHECK-NOT: vld1.64 {d16, d17}
+; CHECK: vmov.f64 d19, d16
 
 define i32 @test(i8* %arg) nounwind {
 entry:
- %0 = call <2 x i64> @llvm.arm.neon.vld1.v2i64(i8* %arg)
+ %0 = call <2 x i64> @llvm.arm.neon.vld1.v2i64(i8* %arg, i32 1)
  %1 = shufflevector <2 x i64> undef, <2 x i64> %0, <2 x i32> <i32 1, i32 2>
  store <2 x i64> %1, <2 x i64>* undef, align 16
  ret i32 undef
 }
 
-declare <2 x i64> @llvm.arm.neon.vld1.v2i64(i8*) nounwind readonly
+declare <2 x i64> @llvm.arm.neon.vld1.v2i64(i8*, i32) nounwind readonly

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/align.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/align.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/align.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/align.ll Tue Oct 26 19:48:03 2010
@@ -22,7 +22,7 @@
 @e = global i64 4
 ;ELF: .align 3
 ;ELF: e
-;DARWIN: .align 2
+;DARWIN: .align 3
 ;DARWIN: _e:
 
 @f = global float 5.0
@@ -34,7 +34,7 @@
 @g = global double 6.0
 ;ELF: .align 3
 ;ELF: g:
-;DARWIN: .align 2
+;DARWIN: .align 3
 ;DARWIN: _g:
 
 @bar = common global [75 x i8] zeroinitializer, align 128

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/arguments.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/arguments.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/arguments.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/arguments.ll Tue Oct 26 19:48:03 2010
@@ -13,8 +13,8 @@
 ; test that allocating the double to r2/r3 makes r1 unavailable on gnueabi.
 define i32 @f2() nounwind optsize {
 ; ELF: f2:
-; ELF: mov  r0, #128
-; ELF: str  r0, [sp]
+; ELF: mov  [[REGISTER:(r[0-9]+)]], #128
+; ELF: str  [[REGISTER]], [sp]
 ; DARWIN: f2:
 ; DARWIN: mov	r3, #128
 entry:
@@ -24,6 +24,20 @@
   ret i32 %.0
 }
 
+; test that on gnueabi a 64 bit value at this position will cause r3 to go
+; unused and the value stored in [sp]
+; ELF: f3:
+; ELF: ldr r0, [sp]
+; ELF-NEXT: mov pc, lr
+; DARWIN: f3:
+; DARWIN: mov r0, r3
+; DARWIN-NEXT: mov pc, lr
+define i32 @f3(i32 %i, i32 %j, i32 %k, i64 %l, ...) {
+entry:
+  %0 = trunc i64 %l to i32
+  ret i32 %0
+}
+
 declare i32 @g1(i64)
 
 declare i32 @g2(i32 %i, ...)

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/bfi.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/bfi.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/bfi.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/bfi.ll Tue Oct 26 19:48:03 2010
@@ -19,7 +19,7 @@
 define i32 @f2(i32 %A, i32 %B) nounwind readnone optsize {
 entry:
 ; CHECK: f2
-; CHECK: mov r1, r1, lsr #7
+; CHECK: lsr{{.*}}#7
 ; CHECK: bfi r0, r1, #7, #16
   %and = and i32 %A, -8388481                     ; <i32> [#uses=1]
   %and2 = and i32 %B, 8388480                     ; <i32> [#uses=1]
@@ -30,7 +30,7 @@
 define i32 @f3(i32 %A, i32 %B) nounwind readnone optsize {
 entry:
 ; CHECK: f3
-; CHECK: mov r2, r0, lsr #7
+; CHECK: lsr{{.*}} #7
 ; CHECK: mov r0, r1
 ; CHECK: bfi r0, r2, #7, #16
   %and = and i32 %A, 8388480                      ; <i32> [#uses=1]

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/bits.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/bits.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/bits.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/bits.ll Tue Oct 26 19:48:03 2010
@@ -1,36 +1,41 @@
-; RUN: llc < %s -march=arm > %t
-; RUN: grep and      %t | count 1
-; RUN: grep orr      %t | count 1
-; RUN: grep eor      %t | count 1
-; RUN: grep mov.*lsl %t | count 1
-; RUN: grep mov.*asr %t | count 1
+; RUN: llc < %s -march=arm | FileCheck %s
 
 define i32 @f1(i32 %a, i32 %b) {
 entry:
+; CHECK: f1
+; CHECK: and r0, r1, r0
 	%tmp2 = and i32 %b, %a		; <i32> [#uses=1]
 	ret i32 %tmp2
 }
 
 define i32 @f2(i32 %a, i32 %b) {
 entry:
+; CHECK: f2
+; CHECK: orr r0, r1, r0
 	%tmp2 = or i32 %b, %a		; <i32> [#uses=1]
 	ret i32 %tmp2
 }
 
 define i32 @f3(i32 %a, i32 %b) {
 entry:
+; CHECK: f3
+; CHECK: eor r0, r1, r0
 	%tmp2 = xor i32 %b, %a		; <i32> [#uses=1]
 	ret i32 %tmp2
 }
 
 define i32 @f4(i32 %a, i32 %b) {
 entry:
+; CHECK: f4
+; CHECK: lsl
 	%tmp3 = shl i32 %a, %b		; <i32> [#uses=1]
 	ret i32 %tmp3
 }
 
 define i32 @f5(i32 %a, i32 %b) {
 entry:
+; CHECK: f5
+; CHECK: asr
 	%tmp3 = ashr i32 %a, %b		; <i32> [#uses=1]
 	ret i32 %tmp3
 }

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/call-tc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/call-tc.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/call-tc.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/call-tc.ll Tue Oct 26 19:48:03 2010
@@ -2,6 +2,7 @@
 ; RUN: llc < %s -march=arm -mattr=+v5t | FileCheck %s -check-prefix=CHECKV5
 ; RUN: llc < %s -march=arm -mtriple=arm-linux-gnueabi\
 ; RUN:   -relocation-model=pic | FileCheck %s -check-prefix=CHECKELF
+; XFAIL: *
 
 @t = weak global i32 ()* null           ; <i32 ()**> [#uses=1]
 

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/clz.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/clz.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/clz.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/clz.ll Tue Oct 26 19:48:03 2010
@@ -1,8 +1,10 @@
-; RUN: llc < %s -march=arm -mattr=+v5t | grep clz
+; RUN: llc < %s -march=arm -mattr=+v5t | FileCheck %s
 
 declare i32 @llvm.ctlz.i32(i32)
 
 define i32 @test(i32 %x) {
-        %tmp.1 = call i32 @llvm.ctlz.i32( i32 %x )              ; <i32> [#uses=1]
+; CHECK: test
+; CHECK: clz r0, r0
+        %tmp.1 = call i32 @llvm.ctlz.i32( i32 %x )
         ret i32 %tmp.1
 }

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/constants.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/constants.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/constants.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/constants.ll Tue Oct 26 19:48:03 2010
@@ -14,34 +14,33 @@
 
 define i32 @f3() {
 ; CHECK: f3
-; CHECK: mov r0{{.*}}256
+; CHECK: mov r0, #1, 24
         ret i32 256
 }
 
 define i32 @f4() {
 ; CHECK: f4
-; CHECK: orr{{.*}}256
+; CHECK: orr{{.*}}#1, 24
         ret i32 257
 }
 
 define i32 @f5() {
 ; CHECK: f5
-; CHECK: mov r0, {{.*}}-1073741761
+; CHECK: mov r0, #255, 2
         ret i32 -1073741761
 }
 
 define i32 @f6() {
 ; CHECK: f6
-; CHECK: mov r0, {{.*}}1008
+; CHECK: mov r0, #63, 28
         ret i32 1008
 }
 
 define void @f7(i32 %a) {
 ; CHECK: f7
 ; CHECK: cmp r0, #1, 16
-        %b = icmp ugt i32 %a, 65536             ; <i1> [#uses=1]
+        %b = icmp ugt i32 %a, 65536
         br i1 %b, label %r, label %r
-
-r:              ; preds = %0, %0
+r:
         ret void
 }

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/div.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/div.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/div.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/div.ll Tue Oct 26 19:48:03 2010
@@ -1,13 +1,9 @@
 ; RUN: llc < %s -march=arm | FileCheck %s -check-prefix=CHECK-ARM
-; RUN: llc < %s -march=arm -mcpu=cortex-m3 \
-; RUN:    | FileCheck %s -check-prefix=CHECK-ARMV7M
 
 define i32 @f1(i32 %a, i32 %b) {
 entry:
 ; CHECK-ARM: f1
 ; CHECK-ARM: __divsi3
-; CHECK-ARMV7M: f1
-; CHECK-ARMV7M: sdiv
         %tmp1 = sdiv i32 %a, %b         ; <i32> [#uses=1]
         ret i32 %tmp1
 }
@@ -16,8 +12,6 @@
 entry:
 ; CHECK-ARM: f2
 ; CHECK-ARM: __udivsi3
-; CHECK-ARMV7M: f2
-; CHECK-ARMV7M: udiv
         %tmp1 = udiv i32 %a, %b         ; <i32> [#uses=1]
         ret i32 %tmp1
 }
@@ -26,8 +20,6 @@
 entry:
 ; CHECK-ARM: f3
 ; CHECK-ARM: __modsi3
-; CHECK-ARMV7M: f3
-; CHECK-ARMV7M: sdiv
         %tmp1 = srem i32 %a, %b         ; <i32> [#uses=1]
         ret i32 %tmp1
 }
@@ -36,8 +28,6 @@
 entry:
 ; CHECK-ARM: f4
 ; CHECK-ARM: __umodsi3
-; CHECK-ARMV7M: f4
-; CHECK-ARMV7M: udiv
         %tmp1 = urem i32 %a, %b         ; <i32> [#uses=1]
         ret i32 %tmp1
 }

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/fast-isel.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/fast-isel.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/fast-isel.ll Tue Oct 26 19:48:03 2010
@@ -1,8 +1,9 @@
-; RUN: llc < %s -fast-isel -fast-isel-abort -march=arm
+; RUN: llc < %s -O0 -fast-isel-abort -mtriple=armv7-apple-darwin
+; RUN: llc < %s -O0 -fast-isel-abort -mtriple=thumbv7-apple-darwin
 
 ; Very basic fast-isel functionality.
 
-define i32 @add(i32 %a, i32 %b) nounwind ssp {
+define i32 @add(i32 %a, i32 %b) nounwind {
 entry:
   %a.addr = alloca i32, align 4
   %b.addr = alloca i32, align 4
@@ -13,3 +14,26 @@
   %add = add nsw i32 %tmp, %tmp1
   ret i32 %add
 }
+
+define i32* @foo(i32* %p, i32* %q, i32** %z) nounwind {
+entry:
+  %r = load i32* %p
+  %s = load i32* %q
+  %y = load i32** %z
+  br label %fast
+
+fast:
+  %t0 = add i32 %r, %s
+  %t1 = mul i32 %t0, %s
+  %t2 = sub i32 %t1, %s
+  %t3 = and i32 %t2, %s
+  %t4 = xor i32 %t3, 3
+  %t5 = xor i32 %t4, %s
+  %t6 = add i32 %t5, 2
+  %t7 = getelementptr i32* %y, i32 1
+  %t8 = getelementptr i32* %t7, i32 %t6
+  br label %exit
+
+exit:
+  ret i32* %t8
+}

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/fmscs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/fmscs.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/fmscs.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/fmscs.ll Tue Oct 26 19:48:03 2010
@@ -19,6 +19,6 @@
 ; NFP0: 	vnmls.f32	s2, s1, s0
 
 ; CORTEXA8: test:
-; CORTEXA8: 	vnmls.f32	s2, s1, s0
+; CORTEXA8: 	vnmls.f32	s1, s2, s0
 ; CORTEXA9: test:
 ; CORTEXA9: 	vnmls.f32	s0, s1, s2

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/fnmuls.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/fnmuls.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/fnmuls.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/fnmuls.ll Tue Oct 26 19:48:03 2010
@@ -1,20 +1,18 @@
-; XFAIL: *
 ; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s
-; RUN: llc < %s -march=arm -mattr=+neon -arm-use-neon-fp=1 | FileCheck %s
-; RUN: llc < %s -march=arm -mattr=+neon -arm-use-neon-fp=0 | FileCheck %s
+; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
 ; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s
 ; RUN: llc < %s -march=arm -mcpu=cortex-a9 | FileCheck %s
 
-define float @test1(float %a, float %b) nounwind {
-; CHECK: fnmscs s2, s1, s0 
+define arm_aapcs_vfpcc float @test1(float %a, float %b) nounwind {
+; CHECK: vnmul.f32 s0, s0, s1 
 entry:
 	%0 = fmul float %a, %b
         %1 = fsub float -0.0, %0
 	ret float %1
 }
 
-define float @test2(float %a, float %b) nounwind {
-; CHECK: fnmscs s2, s1, s0 
+define arm_aapcs_vfpcc float @test2(float %a, float %b) nounwind {
+; CHECK: vnmul.f32 s0, s0, s1 
 entry:
 	%0 = fmul float %a, %b
         %1 = fmul float -1.0, %0

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/fp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/fp.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/fp.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/fp.ll Tue Oct 26 19:48:03 2010
@@ -51,7 +51,7 @@
 
 define float @h2() {
 ;CHECK: h2:
-;CHECK: 1065353216
+;CHECK: mov r0, #254, 10
 entry:
         ret float 1.000000e+00
 }

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/fpconsts.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/fpconsts.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/fpconsts.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/fpconsts.ll Tue Oct 26 19:48:03 2010
@@ -11,7 +11,7 @@
 define double @t2(double %x) nounwind readnone optsize {
 entry:
 ; CHECK: t2:
-; CHECK: vmov.f64 d1, #3.000000e+00
+; CHECK: vmov.f64 d{{.*}}, #3.000000e+00
   %0 = fadd double %x, 3.000000e+00
   ret double %0
 }
@@ -19,7 +19,7 @@
 define double @t3(double %x) nounwind readnone optsize {
 entry:
 ; CHECK: t3:
-; CHECK: vmov.f64 d1, #-1.300000e+01
+; CHECK: vmov.f64 d{{.*}}, #-1.300000e+01
   %0 = fmul double %x, -1.300000e+01
   ret double %0
 }

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/fpowi.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/fpowi.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/fpowi.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/fpowi.ll Tue Oct 26 19:48:03 2010
@@ -3,7 +3,7 @@
 
 ; ModuleID = '<stdin>'
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "arm-linux-gnueabi"
+target triple = "arm-unknown-linux-gnueabi"
 
 define double @_ZSt3powdi(double %__x, i32 %__i) {
 entry:

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/inlineasm3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/inlineasm3.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/inlineasm3.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/inlineasm3.ll Tue Oct 26 19:48:03 2010
@@ -7,7 +7,7 @@
 entry:
 ; CHECK: vmov.I64 q15, #0
 ; CHECK: vmov.32 d30[0], r0
-; CHECK: vmov q0, q15
+; CHECK: vmov q8, q15
   %tmp = alloca %struct.int32x4_t, align 16
   call void asm sideeffect "vmov.I64 q15, #0\0Avmov.32 d30[0], $1\0Avmov ${0:q}, q15\0A", "=*w,r,~{d31},~{d30}"(%struct.int32x4_t* %tmp, i32 8192) nounwind
   ret void
@@ -18,7 +18,7 @@
 
 define void @t2() nounwind {
 entry:
-; CHECK: vmov d30, d0
+; CHECK: vmov d30, d16
 ; CHECK: vmov.32 r0, d30[0]
   %asmtmp2 = tail call i32 asm sideeffect "vmov d30, $1\0Avmov.32 $0, d30[0]\0A", "=r,w,~{d30}"(<2 x i32> undef) nounwind
   ret void

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/ispositive.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/ispositive.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/ispositive.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/ispositive.ll Tue Oct 26 19:48:03 2010
@@ -1,7 +1,7 @@
 ; RUN: llc < %s -march=arm | FileCheck %s
 
 define i32 @test1(i32 %X) {
-; CHECK: mov r0, r0, lsr #31
+; CHECK: lsr{{.*}}#31
 entry:
         icmp slt i32 %X, 0              ; <i1>:0 [#uses=1]
         zext i1 %0 to i32               ; <i32>:1 [#uses=1]

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/long.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/long.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/long.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/long.ll Tue Oct 26 19:48:03 2010
@@ -14,22 +14,22 @@
 
 define i64 @f3() {
 ; CHECK: f3:
-; CHECK: mvn{{.*}}-2147483648
+; CHECK: mvn r0, #2, 2
 entry:
         ret i64 2147483647
 }
 
 define i64 @f4() {
 ; CHECK: f4:
-; CHECK: -2147483648
+; CHECK: mov r0, #2, 2
 entry:
         ret i64 2147483648
 }
 
 define i64 @f5() {
 ; CHECK: f5:
-; CHECK: mvn
-; CHECK: mvn{{.*}}-2147483648
+; CHECK: mvn r0, #0
+; CHECK: mvn r1, #2, 2
 entry:
         ret i64 9223372036854775807
 }

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/long_shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/long_shift.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/long_shift.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/long_shift.ll Tue Oct 26 19:48:03 2010
@@ -2,8 +2,8 @@
 
 define i64 @f0(i64 %A, i64 %B) {
 ; CHECK: f0
-; CHECK:      movs    r3, r3, lsr #1
-; CHECK-NEXT: mov     r2, r2, rrx
+; CHECK:      lsrs    r3, r3, #1
+; CHECK-NEXT: rrx     r2, r2
 ; CHECK-NEXT: subs    r0, r0, r2
 ; CHECK-NEXT: sbc     r1, r1, r3
 	%tmp = bitcast i64 %A to i64
@@ -14,7 +14,7 @@
 
 define i32 @f1(i64 %x, i64 %y) {
 ; CHECK: f1
-; CHECK: mov r0, r0, lsl r2
+; CHECK: lsl{{.*}}r2
 	%a = shl i64 %x, %y
 	%b = trunc i64 %a to i32
 	ret i32 %b
@@ -22,10 +22,9 @@
 
 define i32 @f2(i64 %x, i64 %y) {
 ; CHECK: f2
-; CHECK:      mov     r0, r0, lsr r2
+; CHECK:      lsr{{.*}}r2
 ; CHECK-NEXT: rsb     r3, r2, #32
-; CHECK-NEXT: sub     r2, r2, #32
-; CHECK-NEXT: cmp     r2, #0
+; CHECK-NEXT: subs    r2, r2, #32
 ; CHECK-NEXT: orr     r0, r0, r1, lsl r3
 ; CHECK-NEXT: movge   r0, r1, asr r2
 	%a = ashr i64 %x, %y
@@ -35,10 +34,9 @@
 
 define i32 @f3(i64 %x, i64 %y) {
 ; CHECK: f3
-; CHECK:      mov     r0, r0, lsr r2
+; CHECK:      lsr{{.*}}r2
 ; CHECK-NEXT: rsb     r3, r2, #32
-; CHECK-NEXT: sub     r2, r2, #32
-; CHECK-NEXT: cmp     r2, #0
+; CHECK-NEXT: subs    r2, r2, #32
 ; CHECK-NEXT: orr     r0, r0, r1, lsl r3
 ; CHECK-NEXT: movge   r0, r1, lsr r2
 	%a = lshr i64 %x, %y

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/lsr-code-insertion.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/lsr-code-insertion.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/lsr-code-insertion.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/lsr-code-insertion.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: llc < %s -stats |& grep {38.*Number of machine instrs printed}
+; RUN: llc < %s -stats |& grep {36.*Number of machine instrs printed}
 ; RUN: llc < %s -stats |& not grep {.*Number of re-materialization}
 ; This test really wants to check that the resultant "cond_true" block only 
 ; has a single store in it, and that cond_true55 only has code to materialize 

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/lsr-on-unrolled-loops.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/lsr-on-unrolled-loops.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/lsr-on-unrolled-loops.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/lsr-on-unrolled-loops.ll Tue Oct 26 19:48:03 2010
@@ -624,13 +624,11 @@
 bb24:                                             ; preds = %bb23
 
 ; LSR should use count-down iteration to avoid requiring the trip count
-; in a register, and it shouldn't require any reloads here.
+; in a register.
 
 ;      CHECK: @ %bb24
-; CHECK-NEXT: @   in Loop: Header=BB1_1 Depth=1
-; CHECK-NEXT: sub{{.*}} [[REGISTER:r[0-9]+]], #1
-; CHECK-NEXT: cmp{{.*}} [[REGISTER]], #0
-; CHECK-NEXT: bne.w
+; CHECK: subs{{.*}} [[REGISTER:(r[0-9]+)|(lr)]], #1
+; CHECK: bne.w
 
   %92 = icmp eq i32 %tmp81, %indvar78             ; <i1> [#uses=1]
   %indvar.next79 = add i32 %indvar78, 1           ; <i32> [#uses=1]

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/mul_const.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/mul_const.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/mul_const.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/mul_const.ll Tue Oct 26 19:48:03 2010
@@ -36,7 +36,7 @@
 entry:
 ; CHECK: t12288:
 ; CHECK: add r0, r0, r0, lsl #1
-; CHECK: mov     r0, r0, lsl #12
+; CHECK: lsl{{.*}}#12
         %0 = mul i32 %v, 12288
         ret i32 %0
 }

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/pack.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/pack.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/pack.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/pack.ll Tue Oct 26 19:48:03 2010
@@ -1,73 +1,88 @@
-; RUN: llc < %s -march=arm -mattr=+v6 | \
-; RUN:   grep pkhbt | count 5
-; RUN: llc < %s -march=arm -mattr=+v6 | \
-; RUN:   grep pkhtb | count 4
+; RUN: llc < %s -march=arm -mattr=+v6 | FileCheck %s
 
+; CHECK: test1
+; CHECK: pkhbt   r0, r0, r1, lsl #16
 define i32 @test1(i32 %X, i32 %Y) {
-	%tmp1 = and i32 %X, 65535		; <i32> [#uses=1]
-	%tmp4 = shl i32 %Y, 16		; <i32> [#uses=1]
-	%tmp5 = or i32 %tmp4, %tmp1		; <i32> [#uses=1]
-	ret i32 %tmp5
-}
-
-define i32 @test1a(i32 %X, i32 %Y) {
-	%tmp19 = and i32 %X, 65535		; <i32> [#uses=1]
-	%tmp37 = shl i32 %Y, 16		; <i32> [#uses=1]
-	%tmp5 = or i32 %tmp37, %tmp19		; <i32> [#uses=1]
+	%tmp1 = and i32 %X, 65535
+	%tmp4 = shl i32 %Y, 16
+	%tmp5 = or i32 %tmp4, %tmp1
 	ret i32 %tmp5
 }
 
+; CHECK: test2
+; CHECK: pkhbt   r0, r0, r1, lsl #12
 define i32 @test2(i32 %X, i32 %Y) {
-	%tmp1 = and i32 %X, 65535		; <i32> [#uses=1]
-	%tmp3 = shl i32 %Y, 12		; <i32> [#uses=1]
-	%tmp4 = and i32 %tmp3, -65536		; <i32> [#uses=1]
-	%tmp57 = or i32 %tmp4, %tmp1		; <i32> [#uses=1]
+	%tmp1 = and i32 %X, 65535
+	%tmp3 = shl i32 %Y, 12
+	%tmp4 = and i32 %tmp3, -65536
+	%tmp57 = or i32 %tmp4, %tmp1
 	ret i32 %tmp57
 }
 
+; CHECK: test3
+; CHECK: pkhbt   r0, r0, r1, lsl #18
 define i32 @test3(i32 %X, i32 %Y) {
-	%tmp19 = and i32 %X, 65535		; <i32> [#uses=1]
-	%tmp37 = shl i32 %Y, 18		; <i32> [#uses=1]
-	%tmp5 = or i32 %tmp37, %tmp19		; <i32> [#uses=1]
+	%tmp19 = and i32 %X, 65535
+	%tmp37 = shl i32 %Y, 18
+	%tmp5 = or i32 %tmp37, %tmp19
 	ret i32 %tmp5
 }
 
+; CHECK: test4
+; CHECK: pkhbt   r0, r0, r1
 define i32 @test4(i32 %X, i32 %Y) {
-	%tmp1 = and i32 %X, 65535		; <i32> [#uses=1]
-	%tmp3 = and i32 %Y, -65536		; <i32> [#uses=1]
-	%tmp46 = or i32 %tmp3, %tmp1		; <i32> [#uses=1]
+	%tmp1 = and i32 %X, 65535
+	%tmp3 = and i32 %Y, -65536
+	%tmp46 = or i32 %tmp3, %tmp1
 	ret i32 %tmp46
 }
 
+; CHECK: test5
+; CHECK: pkhtb   r0, r0, r1, asr #16
 define i32 @test5(i32 %X, i32 %Y) {
-	%tmp17 = and i32 %X, -65536		; <i32> [#uses=1]
-	%tmp2 = bitcast i32 %Y to i32		; <i32> [#uses=1]
-	%tmp4 = lshr i32 %tmp2, 16		; <i32> [#uses=2]
-	%tmp5 = or i32 %tmp4, %tmp17		; <i32> [#uses=1]
+	%tmp17 = and i32 %X, -65536
+	%tmp2 = bitcast i32 %Y to i32
+	%tmp4 = lshr i32 %tmp2, 16
+	%tmp5 = or i32 %tmp4, %tmp17
 	ret i32 %tmp5
 }
 
+; CHECK: test5a
+; CHECK: pkhtb   r0, r0, r1, asr #16
 define i32 @test5a(i32 %X, i32 %Y) {
-	%tmp110 = and i32 %X, -65536		; <i32> [#uses=1]
-	%tmp37 = lshr i32 %Y, 16		; <i32> [#uses=1]
-	%tmp39 = bitcast i32 %tmp37 to i32		; <i32> [#uses=1]
-	%tmp5 = or i32 %tmp39, %tmp110		; <i32> [#uses=1]
+	%tmp110 = and i32 %X, -65536
+	%tmp37 = lshr i32 %Y, 16
+	%tmp39 = bitcast i32 %tmp37 to i32
+	%tmp5 = or i32 %tmp39, %tmp110
 	ret i32 %tmp5
 }
 
+; CHECK: test6
+; CHECK: pkhtb   r0, r0, r1, asr #12
 define i32 @test6(i32 %X, i32 %Y) {
-	%tmp1 = and i32 %X, -65536		; <i32> [#uses=1]
-	%tmp37 = lshr i32 %Y, 12		; <i32> [#uses=1]
-	%tmp38 = bitcast i32 %tmp37 to i32		; <i32> [#uses=1]
-	%tmp4 = and i32 %tmp38, 65535		; <i32> [#uses=1]
-	%tmp59 = or i32 %tmp4, %tmp1		; <i32> [#uses=1]
+	%tmp1 = and i32 %X, -65536
+	%tmp37 = lshr i32 %Y, 12
+	%tmp38 = bitcast i32 %tmp37 to i32
+	%tmp4 = and i32 %tmp38, 65535
+	%tmp59 = or i32 %tmp4, %tmp1
 	ret i32 %tmp59
 }
 
+; CHECK: test7
+; CHECK: pkhtb   r0, r0, r1, asr #18
 define i32 @test7(i32 %X, i32 %Y) {
-	%tmp1 = and i32 %X, -65536		; <i32> [#uses=1]
-	%tmp3 = ashr i32 %Y, 18		; <i32> [#uses=1]
-	%tmp4 = and i32 %tmp3, 65535		; <i32> [#uses=1]
-	%tmp57 = or i32 %tmp4, %tmp1		; <i32> [#uses=1]
+	%tmp1 = and i32 %X, -65536
+	%tmp3 = ashr i32 %Y, 18
+	%tmp4 = and i32 %tmp3, 65535
+	%tmp57 = or i32 %tmp4, %tmp1
+	ret i32 %tmp57
+}
+
+; CHECK: test8
+; CHECK: pkhtb   r0, r0, r1, asr #22
+define i32 @test8(i32 %X, i32 %Y) {
+	%tmp1 = and i32 %X, -65536
+	%tmp3 = lshr i32 %Y, 22
+	%tmp57 = or i32 %tmp3, %tmp1
 	ret i32 %tmp57
 }

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/reg_sequence.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/reg_sequence.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/reg_sequence.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/reg_sequence.ll Tue Oct 26 19:48:03 2010
@@ -23,21 +23,21 @@
   %2 = getelementptr inbounds %struct.int32x4_t* %vT1ptr, i32 0, i32 0 ; <<4 x i32>*> [#uses=1]
   %3 = load <4 x i32>* %2, align 16               ; <<4 x i32>> [#uses=1]
   %4 = bitcast i16* %i_ptr to i8*                 ; <i8*> [#uses=1]
-  %5 = tail call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %4) ; <<8 x i16>> [#uses=1]
+  %5 = tail call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %4, i32 1) ; <<8 x i16>> [#uses=1]
   %6 = bitcast <8 x i16> %5 to <2 x double>       ; <<2 x double>> [#uses=2]
   %7 = extractelement <2 x double> %6, i32 0      ; <double> [#uses=1]
   %8 = bitcast double %7 to <4 x i16>             ; <<4 x i16>> [#uses=1]
-  %9 = tail call <4 x i32> @llvm.arm.neon.vmovls.v4i32(<4 x i16> %8) ; <<4 x i32>> [#uses=1]
+  %9 = sext <4 x i16> %8 to <4 x i32>             ; <<4 x i32>> [#uses=1]
   %10 = extractelement <2 x double> %6, i32 1     ; <double> [#uses=1]
   %11 = bitcast double %10 to <4 x i16>           ; <<4 x i16>> [#uses=1]
-  %12 = tail call <4 x i32> @llvm.arm.neon.vmovls.v4i32(<4 x i16> %11) ; <<4 x i32>> [#uses=1]
+  %12 = sext <4 x i16> %11 to <4 x i32>           ; <<4 x i32>> [#uses=1]
   %13 = mul <4 x i32> %1, %9                      ; <<4 x i32>> [#uses=1]
   %14 = mul <4 x i32> %3, %12                     ; <<4 x i32>> [#uses=1]
   %15 = tail call <4 x i16> @llvm.arm.neon.vshiftn.v4i16(<4 x i32> %13, <4 x i32> <i32 -12, i32 -12, i32 -12, i32 -12>) ; <<4 x i16>> [#uses=1]
   %16 = tail call <4 x i16> @llvm.arm.neon.vshiftn.v4i16(<4 x i32> %14, <4 x i32> <i32 -12, i32 -12, i32 -12, i32 -12>) ; <<4 x i16>> [#uses=1]
   %17 = shufflevector <4 x i16> %15, <4 x i16> %16, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> ; <<8 x i16>> [#uses=1]
   %18 = bitcast i16* %o_ptr to i8*                ; <i8*> [#uses=1]
-  tail call void @llvm.arm.neon.vst1.v8i16(i8* %18, <8 x i16> %17)
+  tail call void @llvm.arm.neon.vst1.v8i16(i8* %18, <8 x i16> %17, i32 1)
   ret void
 }
 
@@ -45,8 +45,8 @@
 entry:
 ; CHECK:        t2:
 ; CHECK:        vld1.16
-; CHECK:        vmul.i16
 ; CHECK-NOT:    vmov
+; CHECK:        vmul.i16
 ; CHECK:        vld1.16
 ; CHECK:        vmul.i16
 ; CHECK-NOT:    vmov
@@ -57,17 +57,17 @@
   %2 = getelementptr inbounds %struct.int16x8_t* %vT1ptr, i32 0, i32 0 ; <<8 x i16>*> [#uses=1]
   %3 = load <8 x i16>* %2, align 16               ; <<8 x i16>> [#uses=1]
   %4 = bitcast i16* %i_ptr to i8*                 ; <i8*> [#uses=1]
-  %5 = tail call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %4) ; <<8 x i16>> [#uses=1]
+  %5 = tail call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %4, i32 1) ; <<8 x i16>> [#uses=1]
   %6 = getelementptr inbounds i16* %i_ptr, i32 8  ; <i16*> [#uses=1]
   %7 = bitcast i16* %6 to i8*                     ; <i8*> [#uses=1]
-  %8 = tail call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %7) ; <<8 x i16>> [#uses=1]
+  %8 = tail call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %7, i32 1) ; <<8 x i16>> [#uses=1]
   %9 = mul <8 x i16> %1, %5                       ; <<8 x i16>> [#uses=1]
   %10 = mul <8 x i16> %3, %8                      ; <<8 x i16>> [#uses=1]
   %11 = bitcast i16* %o_ptr to i8*                ; <i8*> [#uses=1]
-  tail call void @llvm.arm.neon.vst1.v8i16(i8* %11, <8 x i16> %9)
+  tail call void @llvm.arm.neon.vst1.v8i16(i8* %11, <8 x i16> %9, i32 1)
   %12 = getelementptr inbounds i16* %o_ptr, i32 8 ; <i16*> [#uses=1]
   %13 = bitcast i16* %12 to i8*                   ; <i8*> [#uses=1]
-  tail call void @llvm.arm.neon.vst1.v8i16(i8* %13, <8 x i16> %10)
+  tail call void @llvm.arm.neon.vst1.v8i16(i8* %13, <8 x i16> %10, i32 1)
   ret void
 }
 
@@ -75,16 +75,17 @@
 ; CHECK:        t3:
 ; CHECK:        vld3.8
 ; CHECK:        vmul.i8
-; CHECK-NOT:    vmov
+; CHECK:        vmov r
+; CHECK-NOT:    vmov d
 ; CHECK:        vst3.8
-  %tmp1 = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A) ; <%struct.__neon_int8x8x3_t> [#uses=2]
+  %tmp1 = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A, i32 1) ; <%struct.__neon_int8x8x3_t> [#uses=2]
   %tmp2 = extractvalue %struct.__neon_int8x8x3_t %tmp1, 0 ; <<8 x i8>> [#uses=1]
   %tmp3 = extractvalue %struct.__neon_int8x8x3_t %tmp1, 2 ; <<8 x i8>> [#uses=1]
   %tmp4 = extractvalue %struct.__neon_int8x8x3_t %tmp1, 1 ; <<8 x i8>> [#uses=1]
   %tmp5 = sub <8 x i8> %tmp3, %tmp4
   %tmp6 = add <8 x i8> %tmp2, %tmp3               ; <<8 x i8>> [#uses=1]
   %tmp7 = mul <8 x i8> %tmp4, %tmp2
-  tail call void @llvm.arm.neon.vst3.v8i8(i8* %B, <8 x i8> %tmp5, <8 x i8> %tmp6, <8 x i8> %tmp7)
+  tail call void @llvm.arm.neon.vst3.v8i8(i8* %B, <8 x i8> %tmp5, <8 x i8> %tmp6, <8 x i8> %tmp7, i32 1)
   ret <8 x i8> %tmp4
 }
 
@@ -97,10 +98,10 @@
 ; CHECK-NOT:    vmov
 ; CHECK:        bne
   %tmp1 = bitcast i32* %in to i8*                 ; <i8*> [#uses=1]
-  %tmp2 = tail call %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32(i8* %tmp1) ; <%struct.__neon_int32x4x2_t> [#uses=2]
+  %tmp2 = tail call %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32(i8* %tmp1, i32 1) ; <%struct.__neon_int32x4x2_t> [#uses=2]
   %tmp3 = getelementptr inbounds i32* %in, i32 8  ; <i32*> [#uses=1]
   %tmp4 = bitcast i32* %tmp3 to i8*               ; <i8*> [#uses=1]
-  %tmp5 = tail call %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32(i8* %tmp4) ; <%struct.__neon_int32x4x2_t> [#uses=2]
+  %tmp5 = tail call %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32(i8* %tmp4, i32 1) ; <%struct.__neon_int32x4x2_t> [#uses=2]
   %tmp8 = bitcast i32* %out to i8*                ; <i8*> [#uses=1]
   br i1 undef, label %return1, label %return2
 
@@ -116,19 +117,19 @@
   %tmp39 = extractvalue %struct.__neon_int32x4x2_t %tmp5, 1 ; <<4 x i32>> [#uses=1]
   %tmp6 = add <4 x i32> %tmp52, %tmp              ; <<4 x i32>> [#uses=1]
   %tmp7 = add <4 x i32> %tmp57, %tmp39            ; <<4 x i32>> [#uses=1]
-  tail call void @llvm.arm.neon.vst2.v4i32(i8* %tmp8, <4 x i32> %tmp6, <4 x i32> %tmp7)
+  tail call void @llvm.arm.neon.vst2.v4i32(i8* %tmp8, <4 x i32> %tmp6, <4 x i32> %tmp7, i32 1)
   ret void
 
 return2:
 ; CHECK:        %return2
 ; CHECK:        vadd.i32
-; CHECK:        vmov q1, q3
+; CHECK:        vmov q9, q11
 ; CHECK-NOT:    vmov
-; CHECK:        vst2.32 {d0, d1, d2, d3}
+; CHECK:        vst2.32 {d16, d17, d18, d19}
   %tmp100 = extractvalue %struct.__neon_int32x4x2_t %tmp2, 0 ; <<4 x i32>> [#uses=1]
   %tmp101 = extractvalue %struct.__neon_int32x4x2_t %tmp5, 1 ; <<4 x i32>> [#uses=1]
   %tmp102 = add <4 x i32> %tmp100, %tmp101              ; <<4 x i32>> [#uses=1]
-  tail call void @llvm.arm.neon.vst2.v4i32(i8* %tmp8, <4 x i32> %tmp102, <4 x i32> %tmp101)
+  tail call void @llvm.arm.neon.vst2.v4i32(i8* %tmp8, <4 x i32> %tmp102, <4 x i32> %tmp101, i32 1)
   call void @llvm.trap()
   unreachable
 }
@@ -136,14 +137,14 @@
 define <8 x i16> @t5(i16* %A, <8 x i16>* %B) nounwind {
 ; CHECK:        t5:
 ; CHECK:        vldmia
-; CHECK:        vmov q1, q0
+; CHECK:        vmov q9, q8
 ; CHECK-NOT:    vmov
-; CHECK:        vld2.16 {d0[1], d2[1]}, [r0]
+; CHECK:        vld2.16 {d16[1], d18[1]}, [r0]
 ; CHECK-NOT:    vmov
 ; CHECK:        vadd.i16
   %tmp0 = bitcast i16* %A to i8*                  ; <i8*> [#uses=1]
   %tmp1 = load <8 x i16>* %B                      ; <<8 x i16>> [#uses=2]
-  %tmp2 = call %struct.__neon_int16x8x2_t @llvm.arm.neon.vld2lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1) ; <%struct.__neon_int16x8x2_t> [#uses=2]
+  %tmp2 = call %struct.__neon_int16x8x2_t @llvm.arm.neon.vld2lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1, i32 1) ; <%struct.__neon_int16x8x2_t> [#uses=2]
   %tmp3 = extractvalue %struct.__neon_int16x8x2_t %tmp2, 0 ; <<8 x i16>> [#uses=1]
   %tmp4 = extractvalue %struct.__neon_int16x8x2_t %tmp2, 1 ; <<8 x i16>> [#uses=1]
   %tmp5 = add <8 x i16> %tmp3, %tmp4              ; <<8 x i16>> [#uses=1]
@@ -153,10 +154,10 @@
 define <8 x i8> @t6(i8* %A, <8 x i8>* %B) nounwind {
 ; CHECK:        t6:
 ; CHECK:        vldr.64
-; CHECK:        vmov d1, d0
-; CHECK-NEXT:   vld2.8 {d0[1], d1[1]}
+; CHECK:        vmov d17, d16
+; CHECK-NEXT:   vld2.8 {d16[1], d17[1]}
   %tmp1 = load <8 x i8>* %B                       ; <<8 x i8>> [#uses=2]
-  %tmp2 = call %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1) ; <%struct.__neon_int8x8x2_t> [#uses=2]
+  %tmp2 = call %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1, i32 1) ; <%struct.__neon_int8x8x2_t> [#uses=2]
   %tmp3 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 0 ; <<8 x i8>> [#uses=1]
   %tmp4 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 1 ; <<8 x i8>> [#uses=1]
   %tmp5 = add <8 x i8> %tmp3, %tmp4               ; <<8 x i8>> [#uses=1]
@@ -168,27 +169,27 @@
 ; CHECK:        t7:
 ; CHECK:        vld2.32
 ; CHECK:        vst2.32
-; CHECK:        vld1.32 {d0, d1},
-; CHECK:        vmov q1, q0
+; CHECK:        vld1.32 {d16, d17},
+; CHECK:        vmov q9, q8
 ; CHECK-NOT:    vmov
-; CHECK:        vuzp.32 q0, q1
+; CHECK:        vuzp.32 q8, q9
 ; CHECK:        vst1.32
   %0 = bitcast i32* %iptr to i8*                  ; <i8*> [#uses=2]
-  %1 = tail call %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32(i8* %0) ; <%struct.__neon_int32x4x2_t> [#uses=2]
+  %1 = tail call %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32(i8* %0, i32 1) ; <%struct.__neon_int32x4x2_t> [#uses=2]
   %tmp57 = extractvalue %struct.__neon_int32x4x2_t %1, 0 ; <<4 x i32>> [#uses=1]
   %tmp60 = extractvalue %struct.__neon_int32x4x2_t %1, 1 ; <<4 x i32>> [#uses=1]
   %2 = bitcast i32* %optr to i8*                  ; <i8*> [#uses=2]
-  tail call void @llvm.arm.neon.vst2.v4i32(i8* %2, <4 x i32> %tmp57, <4 x i32> %tmp60)
-  %3 = tail call <4 x i32> @llvm.arm.neon.vld1.v4i32(i8* %0) ; <<4 x i32>> [#uses=1]
+  tail call void @llvm.arm.neon.vst2.v4i32(i8* %2, <4 x i32> %tmp57, <4 x i32> %tmp60, i32 1)
+  %3 = tail call <4 x i32> @llvm.arm.neon.vld1.v4i32(i8* %0, i32 1) ; <<4 x i32>> [#uses=1]
   %4 = shufflevector <4 x i32> %3, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 0, i32 2> ; <<4 x i32>> [#uses=1]
-  tail call void @llvm.arm.neon.vst1.v4i32(i8* %2, <4 x i32> %4)
+  tail call void @llvm.arm.neon.vst1.v4i32(i8* %2, <4 x i32> %4, i32 1)
   ret void
 }
 
 ; PR7156
 define arm_aapcs_vfpcc i32 @t8() nounwind {
 ; CHECK: t8:
-; CHECK: vrsqrte.f32 q0, q0
+; CHECK: vrsqrte.f32 q8, q8
 bb.nph55.bb.nph55.split_crit_edge:
   br label %bb3
 
@@ -238,10 +239,10 @@
 define arm_aapcs_vfpcc float @t9(%0* nocapture, %3* nocapture) nounwind {
 ; CHECK:        t9:
 ; CHECK:        vldr.64
-; CHECK-NOT:    vmov d{{.*}}, d0
-; CHECK:        vmov.i32 d1
-; CHECK-NEXT:   vstmia r0, {d0, d1}
-; CHECK-NEXT:   vstmia r0, {d0, d1}
+; CHECK-NOT:    vmov d{{.*}}, d16
+; CHECK:        vmov.i32 d17
+; CHECK-NEXT:   vstmia r0, {d16, d17}
+; CHECK-NEXT:   vstmia r0, {d16, d17}
   %3 = bitcast double 0.000000e+00 to <2 x float> ; <<2 x float>> [#uses=2]
   %4 = shufflevector <2 x float> %3, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> ; <<4 x float>> [#uses=1]
   store <4 x float> %4, <4 x float>* undef, align 16
@@ -269,9 +270,9 @@
 define arm_aapcs_vfpcc i32 @t10() nounwind {
 entry:
 ; CHECK: t10:
-; CHECK: vmov.i32 q1, #0x3F000000
-; CHECK: vmov d0, d1
-; CHECK: vmla.f32 q0, q0, d0[0]
+; CHECK: vmov.i32 q9, #0x3F000000
+; CHECK: vmov d0, d17
+; CHECK: vmla.f32 q8, q8, d0[0]
   %0 = shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>> [#uses=1]
   %1 = insertelement <4 x float> %0, float undef, i32 1 ; <<4 x float>> [#uses=1]
   %2 = insertelement <4 x float> %1, float undef, i32 2 ; <<4 x float>> [#uses=1]
@@ -304,44 +305,43 @@
 
 ; This test crashes the coalescer because live variables were not updated properly.
 define <8 x i8> @t11(i8* %A1, i8* %A2, i8* %A3, i8* %A4, i8* %A5, i8* %A6, i8* %A7, i8* %A8, i8* %B) nounwind {
-  %tmp1d = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A4) ; <%struct.__neon_int8x8x3_t> [#uses=1]
+  %tmp1d = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A4, i32 1) ; <%struct.__neon_int8x8x3_t> [#uses=1]
   %tmp2d = extractvalue %struct.__neon_int8x8x3_t %tmp1d, 0 ; <<8 x i8>> [#uses=1]
-  %tmp1f = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A6) ; <%struct.__neon_int8x8x3_t> [#uses=1]
+  %tmp1f = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A6, i32 1) ; <%struct.__neon_int8x8x3_t> [#uses=1]
   %tmp2f = extractvalue %struct.__neon_int8x8x3_t %tmp1f, 0 ; <<8 x i8>> [#uses=1]
   %tmp2bd = add <8 x i8> zeroinitializer, %tmp2d  ; <<8 x i8>> [#uses=1]
   %tmp2abcd = mul <8 x i8> zeroinitializer, %tmp2bd ; <<8 x i8>> [#uses=1]
   %tmp2ef = sub <8 x i8> zeroinitializer, %tmp2f  ; <<8 x i8>> [#uses=1]
   %tmp2efgh = mul <8 x i8> %tmp2ef, undef         ; <<8 x i8>> [#uses=2]
-  call void @llvm.arm.neon.vst3.v8i8(i8* %A2, <8 x i8> undef, <8 x i8> undef, <8 x i8> %tmp2efgh)
+  call void @llvm.arm.neon.vst3.v8i8(i8* %A2, <8 x i8> undef, <8 x i8> undef, <8 x i8> %tmp2efgh, i32 1)
   %tmp2 = sub <8 x i8> %tmp2efgh, %tmp2abcd       ; <<8 x i8>> [#uses=1]
   %tmp7 = mul <8 x i8> undef, %tmp2               ; <<8 x i8>> [#uses=1]
-  tail call void @llvm.arm.neon.vst3.v8i8(i8* %B, <8 x i8> undef, <8 x i8> undef, <8 x i8> %tmp7)
+  tail call void @llvm.arm.neon.vst3.v8i8(i8* %B, <8 x i8> undef, <8 x i8> undef, <8 x i8> %tmp7, i32 1)
   ret <8 x i8> undef
 }
 
-declare <4 x i32> @llvm.arm.neon.vld1.v4i32(i8*) nounwind readonly
-
-declare <8 x i16> @llvm.arm.neon.vld1.v8i16(i8*) nounwind readonly
+declare <4 x i32> @llvm.arm.neon.vld1.v4i32(i8*, i32) nounwind readonly
 
-declare <4 x i32> @llvm.arm.neon.vmovls.v4i32(<4 x i16>) nounwind readnone
+declare <8 x i16> @llvm.arm.neon.vld1.v8i16(i8*, i32) nounwind readonly
 
 declare <4 x i16> @llvm.arm.neon.vshiftn.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
 
-declare void @llvm.arm.neon.vst1.v4i32(i8*, <4 x i32>) nounwind
+declare void @llvm.arm.neon.vst1.v4i32(i8*, <4 x i32>, i32) nounwind
 
-declare void @llvm.arm.neon.vst1.v8i16(i8*, <8 x i16>) nounwind
+declare void @llvm.arm.neon.vst1.v8i16(i8*, <8 x i16>, i32) nounwind
 
-declare void @llvm.arm.neon.vst3.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>) nounwind
+declare void @llvm.arm.neon.vst3.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32)
+nounwind
 
-declare %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8*) nounwind readonly
+declare %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8*, i32) nounwind readonly
 
-declare %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32(i8*) nounwind readonly
+declare %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32(i8*, i32) nounwind readonly
 
-declare %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8(i8*, <8 x i8>, <8 x i8>, i32) nounwind readonly
+declare %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8(i8*, <8 x i8>, <8 x i8>, i32, i32) nounwind readonly
 
-declare %struct.__neon_int16x8x2_t @llvm.arm.neon.vld2lane.v8i16(i8*, <8 x i16>, <8 x i16>, i32) nounwind readonly
+declare %struct.__neon_int16x8x2_t @llvm.arm.neon.vld2lane.v8i16(i8*, <8 x i16>, <8 x i16>, i32, i32) nounwind readonly
 
-declare void @llvm.arm.neon.vst2.v4i32(i8*, <4 x i32>, <4 x i32>) nounwind
+declare void @llvm.arm.neon.vst2.v4i32(i8*, <4 x i32>, <4 x i32>, i32) nounwind
 
 declare <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float>) nounwind readnone
 

Removed: llvm/branches/wendling/eh/test/CodeGen/ARM/remat.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/remat.ll?rev=117424&view=auto
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/remat.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/remat.ll (removed)
@@ -1,65 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+v6,+vfp2 -stats -info-output-file - | grep "Number of re-materialization"
-
-define i32 @main(i32 %argc, i8** nocapture %argv, double %d1, double %d2) nounwind {
-entry:
-  br i1 undef, label %smvp.exit, label %bb.i3
-
-bb.i3:                                            ; preds = %bb.i3, %bb134
-  br i1 undef, label %smvp.exit, label %bb.i3
-
-smvp.exit:                                        ; preds = %bb.i3
-  %0 = fmul double %d1, 2.400000e-03            ; <double> [#uses=2]
-  br i1 undef, label %bb138.preheader, label %bb159
-
-bb138.preheader:                                  ; preds = %smvp.exit
-  br label %bb138
-
-bb138:                                            ; preds = %bb138, %bb138.preheader
-  br i1 undef, label %bb138, label %bb145.loopexit
-
-bb142:                                            ; preds = %bb.nph218.bb.nph218.split_crit_edge, %phi0.exit
-  %1 = fmul double %d1, -1.200000e-03           ; <double> [#uses=1]
-  %2 = fadd double %d2, %1                      ; <double> [#uses=1]
-  %3 = fmul double %2, %d2                      ; <double> [#uses=1]
-  %4 = fsub double 0.000000e+00, %3               ; <double> [#uses=1]
-  br i1 %14, label %phi1.exit, label %bb.i35
-
-bb.i35:                                           ; preds = %bb142
-  %5 = call  double @sin(double %15) nounwind readonly ; <double> [#uses=1]
-  %6 = fmul double %5, 0x4031740AFA84AD8A         ; <double> [#uses=1]
-  %7 = fsub double 1.000000e+00, undef            ; <double> [#uses=1]
-  %8 = fdiv double %7, 6.000000e-01               ; <double> [#uses=1]
-  br label %phi1.exit
-
-phi1.exit:                                        ; preds = %bb.i35, %bb142
-  %.pn = phi double [ %6, %bb.i35 ], [ 0.000000e+00, %bb142 ] ; <double> [#uses=1]
-  %9 = phi double [ %8, %bb.i35 ], [ 0.000000e+00, %bb142 ] ; <double> [#uses=1]
-  %10 = fmul double %.pn, %9                      ; <double> [#uses=1]
-  br i1 %14, label %phi0.exit, label %bb.i
-
-bb.i:                                             ; preds = %phi1.exit
-  unreachable
-
-phi0.exit:                                        ; preds = %phi1.exit
-  %11 = fsub double %4, %10                       ; <double> [#uses=1]
-  %12 = fadd double 0.000000e+00, %11             ; <double> [#uses=1]
-  store double %12, double* undef, align 4
-  br label %bb142
-
-bb145.loopexit:                                   ; preds = %bb138
-  br i1 undef, label %bb.nph218.bb.nph218.split_crit_edge, label %bb159
-
-bb.nph218.bb.nph218.split_crit_edge:              ; preds = %bb145.loopexit
-  %13 = fmul double %0, 0x401921FB54442D18        ; <double> [#uses=1]
-  %14 = fcmp ugt double %0, 6.000000e-01          ; <i1> [#uses=2]
-  %15 = fdiv double %13, 6.000000e-01             ; <double> [#uses=1]
-  br label %bb142
-
-bb159:                                            ; preds = %bb145.loopexit, %smvp.exit, %bb134
-  unreachable
-
-bb166:                                            ; preds = %bb127
-  unreachable
-}
-
-declare double @sin(double) nounwind readonly

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/rev.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/rev.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/rev.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/rev.ll Tue Oct 26 19:48:03 2010
@@ -1,27 +1,30 @@
-; RUN: llc < %s -march=arm -mattr=+v6 | grep rev16
-; RUN: llc < %s -march=arm -mattr=+v6 | grep revsh
+; RUN: llc < %s -march=arm -mattr=+v6 | FileCheck %s
 
 define i32 @test1(i32 %X) {
-        %tmp1 = lshr i32 %X, 8          ; <i32> [#uses=3]
-        %X15 = bitcast i32 %X to i32            ; <i32> [#uses=1]
-        %tmp4 = shl i32 %X15, 8         ; <i32> [#uses=2]
-        %tmp2 = and i32 %tmp1, 16711680         ; <i32> [#uses=1]
-        %tmp5 = and i32 %tmp4, -16777216                ; <i32> [#uses=1]
-        %tmp9 = and i32 %tmp1, 255              ; <i32> [#uses=1]
-        %tmp13 = and i32 %tmp4, 65280           ; <i32> [#uses=1]
-        %tmp6 = or i32 %tmp5, %tmp2             ; <i32> [#uses=1]
-        %tmp10 = or i32 %tmp6, %tmp13           ; <i32> [#uses=1]
-        %tmp14 = or i32 %tmp10, %tmp9           ; <i32> [#uses=1]
+; CHECK: test1
+; CHECK: rev16 r0, r0
+        %tmp1 = lshr i32 %X, 8
+        %X15 = bitcast i32 %X to i32
+        %tmp4 = shl i32 %X15, 8
+        %tmp2 = and i32 %tmp1, 16711680
+        %tmp5 = and i32 %tmp4, -16777216
+        %tmp9 = and i32 %tmp1, 255
+        %tmp13 = and i32 %tmp4, 65280
+        %tmp6 = or i32 %tmp5, %tmp2
+        %tmp10 = or i32 %tmp6, %tmp13
+        %tmp14 = or i32 %tmp10, %tmp9
         ret i32 %tmp14
 }
 
 define i32 @test2(i32 %X) {
-        %tmp1 = lshr i32 %X, 8          ; <i32> [#uses=1]
-        %tmp1.upgrd.1 = trunc i32 %tmp1 to i16          ; <i16> [#uses=1]
-        %tmp3 = trunc i32 %X to i16             ; <i16> [#uses=1]
-        %tmp2 = and i16 %tmp1.upgrd.1, 255              ; <i16> [#uses=1]
-        %tmp4 = shl i16 %tmp3, 8                ; <i16> [#uses=1]
-        %tmp5 = or i16 %tmp2, %tmp4             ; <i16> [#uses=1]
-        %tmp5.upgrd.2 = sext i16 %tmp5 to i32           ; <i32> [#uses=1]
+; CHECK: test2
+; CHECK: revsh r0, r0
+        %tmp1 = lshr i32 %X, 8
+        %tmp1.upgrd.1 = trunc i32 %tmp1 to i16
+        %tmp3 = trunc i32 %X to i16
+        %tmp2 = and i16 %tmp1.upgrd.1, 255
+        %tmp4 = shl i16 %tmp3, 8
+        %tmp5 = or i16 %tmp2, %tmp4
+        %tmp5.upgrd.2 = sext i16 %tmp5 to i32
         ret i32 %tmp5.upgrd.2
 }

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/select-imm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/select-imm.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/select-imm.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/select-imm.ll Tue Oct 26 19:48:03 2010
@@ -25,8 +25,8 @@
 ; ARM: movle r0, #123
 
 ; T2: t2:
-; T2: movw r0, #357
-; T2: movle r0, #123
+; T2: mov r0, #123
+; T2: movwgt r0, #357
 
   %0 = icmp sgt i32 %c, 1
   %1 = select i1 %0, i32 357, i32 123

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/select.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/select.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/select.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/select.ll Tue Oct 26 19:48:03 2010
@@ -1,5 +1,6 @@
 ; RUN: llc < %s -march=arm | FileCheck %s
 ; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s --check-prefix=CHECK-VFP
+; RUN: llc < %s -mattr=+neon,+thumb2 -mtriple=thumbv7-apple-darwin | FileCheck %s --check-prefix=CHECK-NEON
 
 define i32 @f1(i32 %a.s) {
 ;CHECK: f1:
@@ -65,3 +66,27 @@
     %tmp1 = select i1 %tmp, double -1.000e+00, double %b
     ret double %tmp1
 }
+
+; <rdar://problem/7260094>
+;
+; We used to generate really horrible code for this function. The main cause was
+; a lack of a custom lowering routine for an ISD::SELECT. This would result in
+; two "it" blocks in the code: one for the "icmp" and another to move the index
+; into the constant pool based on the value of the "icmp". If we have one "it"
+; block generated, odds are good that we have close to the ideal code for this:
+;
+; CHECK-NEON:      _f8:
+; CHECK-NEON:      movw   [[REGISTER_1:r[0-9]+]], #1123
+; CHECK-NEON-NEXT: movs   [[REGISTER_2:r[0-9]+]], #0
+; CHECK-NEON-NEXT: cmp    r0, [[REGISTER_1]]
+; CHECK-NEON-NEXT: it     eq
+; CHECK-NEON-NEXT: moveq  [[REGISTER_2]], #4
+; CHECK-NEON-NEXT: adr    [[REGISTER_3:r[0-9]+]], #LCPI
+; CHECK-NEON-NEXT: ldr
+; CHECK-NEON:      bx
+
+define arm_apcscc float @f8(i32 %a) nounwind {
+  %tmp = icmp eq i32 %a, 1123
+  %tmp1 = select i1 %tmp, float 0x3FF3BE76C0000000, float 0x40030E9A20000000
+  ret float %tmp1
+}

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/spill-q.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/spill-q.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/spill-q.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/spill-q.ll Tue Oct 26 19:48:03 2010
@@ -7,7 +7,7 @@
 %quux = type { i32 (...)**, %baz*, i32 }
 %quuz = type { %quux, i32, %bar, [128 x i8], [16 x %foo], %foo, %foo, %foo }
 
-declare <4 x float> @llvm.arm.neon.vld1.v4f32(i8*) nounwind readonly
+declare <4 x float> @llvm.arm.neon.vld1.v4f32(i8*, i32) nounwind readonly
 
 define void @aaa(%quuz* %this, i8* %block) {
 ; CHECK: aaa:
@@ -15,11 +15,31 @@
 ; CHECK: vst1.64 {{.*}}sp, :128
 ; CHECK: vld1.64 {{.*}}sp, :128
 entry:
-  %0 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef) nounwind ; <<4 x float>> [#uses=1]
+  %0 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind ; <<4 x float>> [#uses=1]
   store float 6.300000e+01, float* undef, align 4
-  %1 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef) nounwind ; <<4 x float>> [#uses=1]
+  %1 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind ; <<4 x float>> [#uses=1]
+  store float 0.000000e+00, float* undef, align 4
+  %2 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind ; <<4 x float>> [#uses=1]
+  %ld3 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind
+  store float 0.000000e+00, float* undef, align 4
+  %ld4 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind
+  store float 0.000000e+00, float* undef, align 4
+  %ld5 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind
+  store float 0.000000e+00, float* undef, align 4
+  %ld6 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind
+  store float 0.000000e+00, float* undef, align 4
+  %ld7 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind
+  store float 0.000000e+00, float* undef, align 4
+  %ld8 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind
+  store float 0.000000e+00, float* undef, align 4
+  %ld9 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind
+  store float 0.000000e+00, float* undef, align 4
+  %ld10 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind
+  store float 0.000000e+00, float* undef, align 4
+  %ld11 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind
+  store float 0.000000e+00, float* undef, align 4
+  %ld12 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind
   store float 0.000000e+00, float* undef, align 4
-  %2 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef) nounwind ; <<4 x float>> [#uses=1]
   %val173 = load <4 x float>* undef               ; <<4 x float>> [#uses=1]
   br label %bb4
 
@@ -44,7 +64,16 @@
   %18 = fmul <4 x float> %17, %val173             ; <<4 x float>> [#uses=1]
   %19 = shufflevector <4 x float> %18, <4 x float> undef, <2 x i32> <i32 2, i32 3> ; <<2 x float>> [#uses=1]
   %20 = shufflevector <2 x float> %19, <2 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>> [#uses=1]
-  %21 = fadd <4 x float> zeroinitializer, %20     ; <<4 x float>> [#uses=2]
+  %tmp1 = fadd <4 x float> %20, %ld3
+  %tmp2 = fadd <4 x float> %tmp1, %ld4
+  %tmp3 = fadd <4 x float> %tmp2, %ld5
+  %tmp4 = fadd <4 x float> %tmp3, %ld6
+  %tmp5 = fadd <4 x float> %tmp4, %ld7
+  %tmp6 = fadd <4 x float> %tmp5, %ld8
+  %tmp7 = fadd <4 x float> %tmp6, %ld9
+  %tmp8 = fadd <4 x float> %tmp7, %ld10
+  %tmp9 = fadd <4 x float> %tmp8, %ld11
+  %21 = fadd <4 x float> %tmp9, %ld12
   %22 = fcmp ogt <4 x float> %besterror.0.2264, %21 ; <<4 x i1>> [#uses=0]
   %tmp = extractelement <4 x i1> %22, i32 0
   br i1 %tmp, label %bb193, label %bb186

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/stm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/stm.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/stm.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/stm.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=arm-apple-darwin -mattr=+v6,+vfp2 | grep stm | count 2
+; RUN: llc < %s -mtriple=arm-apple-darwin -mattr=+v6,+vfp2 | FileCheck %s
 
 @"\01LC" = internal constant [32 x i8] c"Boolean Not: %d %d %d %d %d %d\0A\00", section "__TEXT,__cstring,cstring_literals"		; <[32 x i8]*> [#uses=1]
 @"\01LC1" = internal constant [26 x i8] c"Bitwise Not: %d %d %d %d\0A\00", section "__TEXT,__cstring,cstring_literals"		; <[26 x i8]*> [#uses=1]
@@ -7,6 +7,9 @@
 
 define i32 @main() nounwind {
 entry:
+; CHECK: main
+; CHECK: push
+; CHECK: stmib
 	%0 = tail call i32 (i8*, ...)* @printf(i8* getelementptr ([26 x i8]* @"\01LC1", i32 0, i32 0), i32 -2, i32 -3, i32 2, i32 -6) nounwind		; <i32> [#uses=0]
 	%1 = tail call i32 (i8*, ...)* @printf(i8* getelementptr ([32 x i8]* @"\01LC", i32 0, i32 0), i32 0, i32 1, i32 0, i32 1, i32 0, i32 1) nounwind		; <i32> [#uses=0]
 	ret i32 0

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/str_pre-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/str_pre-2.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/str_pre-2.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/str_pre-2.ll Tue Oct 26 19:48:03 2010
@@ -1,10 +1,11 @@
-; RUN: llc < %s -mtriple=arm-linux-gnu | grep {str.*\\!}
-; RUN: llc < %s -mtriple=arm-linux-gnu | grep {ldr.*\\\[.*\], #4}
+; RUN: llc < %s -mtriple=arm-linux-gnu | FileCheck %s
 
 @b = external global i64*
 
 define i64 @t(i64 %a) nounwind readonly {
 entry:
+; CHECK: str lr, [sp, #-4]!
+; CHECK: ldr lr, [sp], #4
 	%0 = load i64** @b, align 4
 	%1 = load i64* %0, align 4
 	%2 = mul i64 %1, %a

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/t2-imm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/t2-imm.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/t2-imm.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/t2-imm.ll Tue Oct 26 19:48:03 2010
@@ -1,9 +1,9 @@
-; RUN: llc < %s -march=arm -mattr=+thumb2 | FileCheck %s
+; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
 
 define i32 @f6(i32 %a) {
 ; CHECK:f6
-; CHECK: movw r0, #:lower16:65537123
-; CHECK: movt r0, #:upper16:65537123
+; CHECK: movw r0, #1123
+; CHECK: movt r0, #1000
     %tmp = add i32 0, 65537123
     ret i32 %tmp
 }

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/unaligned_load_store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/unaligned_load_store.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/unaligned_load_store.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/unaligned_load_store.ll Tue Oct 26 19:48:03 2010
@@ -1,5 +1,6 @@
 ; RUN: llc < %s -march=arm | FileCheck %s -check-prefix=GENERIC
 ; RUN: llc < %s -mtriple=armv6-apple-darwin | FileCheck %s -check-prefix=DARWIN_V6
+; RUN: llc < %s -mtriple=armv6-apple-darwin -arm-strict-align | FileCheck %s -check-prefix=GENERIC
 ; RUN: llc < %s -mtriple=armv6-linux | FileCheck %s -check-prefix=GENERIC
 
 ; rdar://7113725

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/vaba.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/vaba.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/vaba.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/vaba.ll Tue Oct 26 19:48:03 2010
@@ -6,8 +6,9 @@
 	%tmp1 = load <8 x i8>* %A
 	%tmp2 = load <8 x i8>* %B
 	%tmp3 = load <8 x i8>* %C
-	%tmp4 = call <8 x i8> @llvm.arm.neon.vabas.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i8> %tmp3)
-	ret <8 x i8> %tmp4
+	%tmp4 = call <8 x i8> @llvm.arm.neon.vabds.v8i8(<8 x i8> %tmp2, <8 x i8> %tmp3)
+	%tmp5 = add <8 x i8> %tmp1, %tmp4
+	ret <8 x i8> %tmp5
 }
 
 define <4 x i16> @vabas16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
@@ -16,8 +17,9 @@
 	%tmp1 = load <4 x i16>* %A
 	%tmp2 = load <4 x i16>* %B
 	%tmp3 = load <4 x i16>* %C
-	%tmp4 = call <4 x i16> @llvm.arm.neon.vabas.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i16> %tmp3)
-	ret <4 x i16> %tmp4
+	%tmp4 = call <4 x i16> @llvm.arm.neon.vabds.v4i16(<4 x i16> %tmp2, <4 x i16> %tmp3)
+	%tmp5 = add <4 x i16> %tmp1, %tmp4
+	ret <4 x i16> %tmp5
 }
 
 define <2 x i32> @vabas32(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
@@ -26,8 +28,9 @@
 	%tmp1 = load <2 x i32>* %A
 	%tmp2 = load <2 x i32>* %B
 	%tmp3 = load <2 x i32>* %C
-	%tmp4 = call <2 x i32> @llvm.arm.neon.vabas.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> %tmp3)
-	ret <2 x i32> %tmp4
+	%tmp4 = call <2 x i32> @llvm.arm.neon.vabds.v2i32(<2 x i32> %tmp2, <2 x i32> %tmp3)
+	%tmp5 = add <2 x i32> %tmp1, %tmp4
+	ret <2 x i32> %tmp5
 }
 
 define <8 x i8> @vabau8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
@@ -36,8 +39,9 @@
 	%tmp1 = load <8 x i8>* %A
 	%tmp2 = load <8 x i8>* %B
 	%tmp3 = load <8 x i8>* %C
-	%tmp4 = call <8 x i8> @llvm.arm.neon.vabau.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i8> %tmp3)
-	ret <8 x i8> %tmp4
+	%tmp4 = call <8 x i8> @llvm.arm.neon.vabdu.v8i8(<8 x i8> %tmp2, <8 x i8> %tmp3)
+	%tmp5 = add <8 x i8> %tmp1, %tmp4
+	ret <8 x i8> %tmp5
 }
 
 define <4 x i16> @vabau16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
@@ -46,8 +50,9 @@
 	%tmp1 = load <4 x i16>* %A
 	%tmp2 = load <4 x i16>* %B
 	%tmp3 = load <4 x i16>* %C
-	%tmp4 = call <4 x i16> @llvm.arm.neon.vabau.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i16> %tmp3)
-	ret <4 x i16> %tmp4
+	%tmp4 = call <4 x i16> @llvm.arm.neon.vabdu.v4i16(<4 x i16> %tmp2, <4 x i16> %tmp3)
+	%tmp5 = add <4 x i16> %tmp1, %tmp4
+	ret <4 x i16> %tmp5
 }
 
 define <2 x i32> @vabau32(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
@@ -56,8 +61,9 @@
 	%tmp1 = load <2 x i32>* %A
 	%tmp2 = load <2 x i32>* %B
 	%tmp3 = load <2 x i32>* %C
-	%tmp4 = call <2 x i32> @llvm.arm.neon.vabau.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> %tmp3)
-	ret <2 x i32> %tmp4
+	%tmp4 = call <2 x i32> @llvm.arm.neon.vabdu.v2i32(<2 x i32> %tmp2, <2 x i32> %tmp3)
+	%tmp5 = add <2 x i32> %tmp1, %tmp4
+	ret <2 x i32> %tmp5
 }
 
 define <16 x i8> @vabaQs8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8>* %C) nounwind {
@@ -66,8 +72,9 @@
 	%tmp1 = load <16 x i8>* %A
 	%tmp2 = load <16 x i8>* %B
 	%tmp3 = load <16 x i8>* %C
-	%tmp4 = call <16 x i8> @llvm.arm.neon.vabas.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i8> %tmp3)
-	ret <16 x i8> %tmp4
+	%tmp4 = call <16 x i8> @llvm.arm.neon.vabds.v16i8(<16 x i8> %tmp2, <16 x i8> %tmp3)
+	%tmp5 = add <16 x i8> %tmp1, %tmp4
+	ret <16 x i8> %tmp5
 }
 
 define <8 x i16> @vabaQs16(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind {
@@ -76,8 +83,9 @@
 	%tmp1 = load <8 x i16>* %A
 	%tmp2 = load <8 x i16>* %B
 	%tmp3 = load <8 x i16>* %C
-	%tmp4 = call <8 x i16> @llvm.arm.neon.vabas.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i16> %tmp3)
-	ret <8 x i16> %tmp4
+	%tmp4 = call <8 x i16> @llvm.arm.neon.vabds.v8i16(<8 x i16> %tmp2, <8 x i16> %tmp3)
+	%tmp5 = add <8 x i16> %tmp1, %tmp4
+	ret <8 x i16> %tmp5
 }
 
 define <4 x i32> @vabaQs32(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwind {
@@ -86,8 +94,9 @@
 	%tmp1 = load <4 x i32>* %A
 	%tmp2 = load <4 x i32>* %B
 	%tmp3 = load <4 x i32>* %C
-	%tmp4 = call <4 x i32> @llvm.arm.neon.vabas.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> %tmp3)
-	ret <4 x i32> %tmp4
+	%tmp4 = call <4 x i32> @llvm.arm.neon.vabds.v4i32(<4 x i32> %tmp2, <4 x i32> %tmp3)
+	%tmp5 = add <4 x i32> %tmp1, %tmp4
+	ret <4 x i32> %tmp5
 }
 
 define <16 x i8> @vabaQu8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8>* %C) nounwind {
@@ -96,8 +105,9 @@
 	%tmp1 = load <16 x i8>* %A
 	%tmp2 = load <16 x i8>* %B
 	%tmp3 = load <16 x i8>* %C
-	%tmp4 = call <16 x i8> @llvm.arm.neon.vabau.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i8> %tmp3)
-	ret <16 x i8> %tmp4
+	%tmp4 = call <16 x i8> @llvm.arm.neon.vabdu.v16i8(<16 x i8> %tmp2, <16 x i8> %tmp3)
+	%tmp5 = add <16 x i8> %tmp1, %tmp4
+	ret <16 x i8> %tmp5
 }
 
 define <8 x i16> @vabaQu16(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind {
@@ -106,8 +116,9 @@
 	%tmp1 = load <8 x i16>* %A
 	%tmp2 = load <8 x i16>* %B
 	%tmp3 = load <8 x i16>* %C
-	%tmp4 = call <8 x i16> @llvm.arm.neon.vabau.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i16> %tmp3)
-	ret <8 x i16> %tmp4
+	%tmp4 = call <8 x i16> @llvm.arm.neon.vabdu.v8i16(<8 x i16> %tmp2, <8 x i16> %tmp3)
+	%tmp5 = add <8 x i16> %tmp1, %tmp4
+	ret <8 x i16> %tmp5
 }
 
 define <4 x i32> @vabaQu32(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwind {
@@ -116,25 +127,26 @@
 	%tmp1 = load <4 x i32>* %A
 	%tmp2 = load <4 x i32>* %B
 	%tmp3 = load <4 x i32>* %C
-	%tmp4 = call <4 x i32> @llvm.arm.neon.vabau.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> %tmp3)
-	ret <4 x i32> %tmp4
-}
-
-declare <8 x i8>  @llvm.arm.neon.vabas.v8i8(<8 x i8>, <8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vabas.v4i16(<4 x i16>, <4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vabas.v2i32(<2 x i32>, <2 x i32>, <2 x i32>) nounwind readnone
-
-declare <8 x i8>  @llvm.arm.neon.vabau.v8i8(<8 x i8>, <8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vabau.v4i16(<4 x i16>, <4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vabau.v2i32(<2 x i32>, <2 x i32>, <2 x i32>) nounwind readnone
-
-declare <16 x i8> @llvm.arm.neon.vabas.v16i8(<16 x i8>, <16 x i8>, <16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.arm.neon.vabas.v8i16(<8 x i16>, <8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vabas.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) nounwind readnone
-
-declare <16 x i8> @llvm.arm.neon.vabau.v16i8(<16 x i8>, <16 x i8>, <16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.arm.neon.vabau.v8i16(<8 x i16>, <8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vabau.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) nounwind readnone
+	%tmp4 = call <4 x i32> @llvm.arm.neon.vabdu.v4i32(<4 x i32> %tmp2, <4 x i32> %tmp3)
+	%tmp5 = add <4 x i32> %tmp1, %tmp4
+	ret <4 x i32> %tmp5
+}
+
+declare <8 x i8>  @llvm.arm.neon.vabds.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
+declare <4 x i16> @llvm.arm.neon.vabds.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+declare <2 x i32> @llvm.arm.neon.vabds.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+
+declare <8 x i8>  @llvm.arm.neon.vabdu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
+declare <4 x i16> @llvm.arm.neon.vabdu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
+declare <2 x i32> @llvm.arm.neon.vabdu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
+
+declare <16 x i8> @llvm.arm.neon.vabds.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
+declare <8 x i16> @llvm.arm.neon.vabds.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
+declare <4 x i32> @llvm.arm.neon.vabds.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
+
+declare <16 x i8> @llvm.arm.neon.vabdu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
+declare <8 x i16> @llvm.arm.neon.vabdu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
+declare <4 x i32> @llvm.arm.neon.vabdu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
 
 define <8 x i16> @vabals8(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
 ;CHECK: vabals8:
@@ -142,8 +154,10 @@
 	%tmp1 = load <8 x i16>* %A
 	%tmp2 = load <8 x i8>* %B
 	%tmp3 = load <8 x i8>* %C
-	%tmp4 = call <8 x i16> @llvm.arm.neon.vabals.v8i16(<8 x i16> %tmp1, <8 x i8> %tmp2, <8 x i8> %tmp3)
-	ret <8 x i16> %tmp4
+	%tmp4 = call <8 x i8> @llvm.arm.neon.vabds.v8i8(<8 x i8> %tmp2, <8 x i8> %tmp3)
+	%tmp5 = zext <8 x i8> %tmp4 to <8 x i16>
+	%tmp6 = add <8 x i16> %tmp1, %tmp5
+	ret <8 x i16> %tmp6
 }
 
 define <4 x i32> @vabals16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
@@ -152,8 +166,10 @@
 	%tmp1 = load <4 x i32>* %A
 	%tmp2 = load <4 x i16>* %B
 	%tmp3 = load <4 x i16>* %C
-	%tmp4 = call <4 x i32> @llvm.arm.neon.vabals.v4i32(<4 x i32> %tmp1, <4 x i16> %tmp2, <4 x i16> %tmp3)
-	ret <4 x i32> %tmp4
+	%tmp4 = call <4 x i16> @llvm.arm.neon.vabds.v4i16(<4 x i16> %tmp2, <4 x i16> %tmp3)
+	%tmp5 = zext <4 x i16> %tmp4 to <4 x i32>
+	%tmp6 = add <4 x i32> %tmp1, %tmp5
+	ret <4 x i32> %tmp6
 }
 
 define <2 x i64> @vabals32(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
@@ -162,8 +178,10 @@
 	%tmp1 = load <2 x i64>* %A
 	%tmp2 = load <2 x i32>* %B
 	%tmp3 = load <2 x i32>* %C
-	%tmp4 = call <2 x i64> @llvm.arm.neon.vabals.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2, <2 x i32> %tmp3)
-	ret <2 x i64> %tmp4
+	%tmp4 = call <2 x i32> @llvm.arm.neon.vabds.v2i32(<2 x i32> %tmp2, <2 x i32> %tmp3)
+	%tmp5 = zext <2 x i32> %tmp4 to <2 x i64>
+	%tmp6 = add <2 x i64> %tmp1, %tmp5
+	ret <2 x i64> %tmp6
 }
 
 define <8 x i16> @vabalu8(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
@@ -172,8 +190,10 @@
 	%tmp1 = load <8 x i16>* %A
 	%tmp2 = load <8 x i8>* %B
 	%tmp3 = load <8 x i8>* %C
-	%tmp4 = call <8 x i16> @llvm.arm.neon.vabalu.v8i16(<8 x i16> %tmp1, <8 x i8> %tmp2, <8 x i8> %tmp3)
-	ret <8 x i16> %tmp4
+	%tmp4 = call <8 x i8> @llvm.arm.neon.vabdu.v8i8(<8 x i8> %tmp2, <8 x i8> %tmp3)
+	%tmp5 = zext <8 x i8> %tmp4 to <8 x i16>
+	%tmp6 = add <8 x i16> %tmp1, %tmp5
+	ret <8 x i16> %tmp6
 }
 
 define <4 x i32> @vabalu16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
@@ -182,8 +202,10 @@
 	%tmp1 = load <4 x i32>* %A
 	%tmp2 = load <4 x i16>* %B
 	%tmp3 = load <4 x i16>* %C
-	%tmp4 = call <4 x i32> @llvm.arm.neon.vabalu.v4i32(<4 x i32> %tmp1, <4 x i16> %tmp2, <4 x i16> %tmp3)
-	ret <4 x i32> %tmp4
+	%tmp4 = call <4 x i16> @llvm.arm.neon.vabdu.v4i16(<4 x i16> %tmp2, <4 x i16> %tmp3)
+	%tmp5 = zext <4 x i16> %tmp4 to <4 x i32>
+	%tmp6 = add <4 x i32> %tmp1, %tmp5
+	ret <4 x i32> %tmp6
 }
 
 define <2 x i64> @vabalu32(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
@@ -192,14 +214,8 @@
 	%tmp1 = load <2 x i64>* %A
 	%tmp2 = load <2 x i32>* %B
 	%tmp3 = load <2 x i32>* %C
-	%tmp4 = call <2 x i64> @llvm.arm.neon.vabalu.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2, <2 x i32> %tmp3)
-	ret <2 x i64> %tmp4
+	%tmp4 = call <2 x i32> @llvm.arm.neon.vabdu.v2i32(<2 x i32> %tmp2, <2 x i32> %tmp3)
+	%tmp5 = zext <2 x i32> %tmp4 to <2 x i64>
+	%tmp6 = add <2 x i64> %tmp1, %tmp5
+	ret <2 x i64> %tmp6
 }
-
-declare <8 x i16> @llvm.arm.neon.vabals.v8i16(<8 x i16>, <8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vabals.v4i32(<4 x i32>, <4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vabals.v2i64(<2 x i64>, <2 x i32>, <2 x i32>) nounwind readnone
-
-declare <8 x i16> @llvm.arm.neon.vabalu.v8i16(<8 x i16>, <8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vabalu.v4i32(<4 x i32>, <4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vabalu.v2i64(<2 x i64>, <2 x i32>, <2 x i32>) nounwind readnone

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/vabd.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/vabd.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/vabd.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/vabd.ll Tue Oct 26 19:48:03 2010
@@ -151,8 +151,9 @@
 ;CHECK: vabdl.s8
 	%tmp1 = load <8 x i8>* %A
 	%tmp2 = load <8 x i8>* %B
-	%tmp3 = call <8 x i16> @llvm.arm.neon.vabdls.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
-	ret <8 x i16> %tmp3
+	%tmp3 = call <8 x i8> @llvm.arm.neon.vabds.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+	%tmp4 = zext <8 x i8> %tmp3 to <8 x i16>
+	ret <8 x i16> %tmp4
 }
 
 define <4 x i32> @vabdls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
@@ -160,8 +161,9 @@
 ;CHECK: vabdl.s16
 	%tmp1 = load <4 x i16>* %A
 	%tmp2 = load <4 x i16>* %B
-	%tmp3 = call <4 x i32> @llvm.arm.neon.vabdls.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
-	ret <4 x i32> %tmp3
+	%tmp3 = call <4 x i16> @llvm.arm.neon.vabds.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+	%tmp4 = zext <4 x i16> %tmp3 to <4 x i32>
+	ret <4 x i32> %tmp4
 }
 
 define <2 x i64> @vabdls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
@@ -169,8 +171,9 @@
 ;CHECK: vabdl.s32
 	%tmp1 = load <2 x i32>* %A
 	%tmp2 = load <2 x i32>* %B
-	%tmp3 = call <2 x i64> @llvm.arm.neon.vabdls.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
-	ret <2 x i64> %tmp3
+	%tmp3 = call <2 x i32> @llvm.arm.neon.vabds.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+	%tmp4 = zext <2 x i32> %tmp3 to <2 x i64>
+	ret <2 x i64> %tmp4
 }
 
 define <8 x i16> @vabdlu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
@@ -178,8 +181,9 @@
 ;CHECK: vabdl.u8
 	%tmp1 = load <8 x i8>* %A
 	%tmp2 = load <8 x i8>* %B
-	%tmp3 = call <8 x i16> @llvm.arm.neon.vabdlu.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
-	ret <8 x i16> %tmp3
+	%tmp3 = call <8 x i8> @llvm.arm.neon.vabdu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
+	%tmp4 = zext <8 x i8> %tmp3 to <8 x i16>
+	ret <8 x i16> %tmp4
 }
 
 define <4 x i32> @vabdlu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
@@ -187,8 +191,9 @@
 ;CHECK: vabdl.u16
 	%tmp1 = load <4 x i16>* %A
 	%tmp2 = load <4 x i16>* %B
-	%tmp3 = call <4 x i32> @llvm.arm.neon.vabdlu.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
-	ret <4 x i32> %tmp3
+	%tmp3 = call <4 x i16> @llvm.arm.neon.vabdu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
+	%tmp4 = zext <4 x i16> %tmp3 to <4 x i32>
+	ret <4 x i32> %tmp4
 }
 
 define <2 x i64> @vabdlu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
@@ -196,14 +201,7 @@
 ;CHECK: vabdl.u32
 	%tmp1 = load <2 x i32>* %A
 	%tmp2 = load <2 x i32>* %B
-	%tmp3 = call <2 x i64> @llvm.arm.neon.vabdlu.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
-	ret <2 x i64> %tmp3
+	%tmp3 = call <2 x i32> @llvm.arm.neon.vabdu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
+	%tmp4 = zext <2 x i32> %tmp3 to <2 x i64>
+	ret <2 x i64> %tmp4
 }
-
-declare <8 x i16> @llvm.arm.neon.vabdls.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vabdls.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vabdls.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
-
-declare <8 x i16> @llvm.arm.neon.vabdlu.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vabdlu.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vabdlu.v2i64(<2 x i32>, <2 x i32>) nounwind readnone

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/vadd.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/vadd.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/vadd.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/vadd.ll Tue Oct 26 19:48:03 2010
@@ -157,8 +157,10 @@
 ;CHECK: vaddl.s8
 	%tmp1 = load <8 x i8>* %A
 	%tmp2 = load <8 x i8>* %B
-	%tmp3 = call <8 x i16> @llvm.arm.neon.vaddls.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
-	ret <8 x i16> %tmp3
+	%tmp3 = sext <8 x i8> %tmp1 to <8 x i16>
+	%tmp4 = sext <8 x i8> %tmp2 to <8 x i16>
+	%tmp5 = add <8 x i16> %tmp3, %tmp4
+	ret <8 x i16> %tmp5
 }
 
 define <4 x i32> @vaddls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
@@ -166,8 +168,10 @@
 ;CHECK: vaddl.s16
 	%tmp1 = load <4 x i16>* %A
 	%tmp2 = load <4 x i16>* %B
-	%tmp3 = call <4 x i32> @llvm.arm.neon.vaddls.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
-	ret <4 x i32> %tmp3
+	%tmp3 = sext <4 x i16> %tmp1 to <4 x i32>
+	%tmp4 = sext <4 x i16> %tmp2 to <4 x i32>
+	%tmp5 = add <4 x i32> %tmp3, %tmp4
+	ret <4 x i32> %tmp5
 }
 
 define <2 x i64> @vaddls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
@@ -175,8 +179,10 @@
 ;CHECK: vaddl.s32
 	%tmp1 = load <2 x i32>* %A
 	%tmp2 = load <2 x i32>* %B
-	%tmp3 = call <2 x i64> @llvm.arm.neon.vaddls.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
-	ret <2 x i64> %tmp3
+	%tmp3 = sext <2 x i32> %tmp1 to <2 x i64>
+	%tmp4 = sext <2 x i32> %tmp2 to <2 x i64>
+	%tmp5 = add <2 x i64> %tmp3, %tmp4
+	ret <2 x i64> %tmp5
 }
 
 define <8 x i16> @vaddlu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
@@ -184,8 +190,10 @@
 ;CHECK: vaddl.u8
 	%tmp1 = load <8 x i8>* %A
 	%tmp2 = load <8 x i8>* %B
-	%tmp3 = call <8 x i16> @llvm.arm.neon.vaddlu.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
-	ret <8 x i16> %tmp3
+	%tmp3 = zext <8 x i8> %tmp1 to <8 x i16>
+	%tmp4 = zext <8 x i8> %tmp2 to <8 x i16>
+	%tmp5 = add <8 x i16> %tmp3, %tmp4
+	ret <8 x i16> %tmp5
 }
 
 define <4 x i32> @vaddlu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
@@ -193,8 +201,10 @@
 ;CHECK: vaddl.u16
 	%tmp1 = load <4 x i16>* %A
 	%tmp2 = load <4 x i16>* %B
-	%tmp3 = call <4 x i32> @llvm.arm.neon.vaddlu.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
-	ret <4 x i32> %tmp3
+	%tmp3 = zext <4 x i16> %tmp1 to <4 x i32>
+	%tmp4 = zext <4 x i16> %tmp2 to <4 x i32>
+	%tmp5 = add <4 x i32> %tmp3, %tmp4
+	ret <4 x i32> %tmp5
 }
 
 define <2 x i64> @vaddlu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
@@ -202,25 +212,20 @@
 ;CHECK: vaddl.u32
 	%tmp1 = load <2 x i32>* %A
 	%tmp2 = load <2 x i32>* %B
-	%tmp3 = call <2 x i64> @llvm.arm.neon.vaddlu.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
-	ret <2 x i64> %tmp3
+	%tmp3 = zext <2 x i32> %tmp1 to <2 x i64>
+	%tmp4 = zext <2 x i32> %tmp2 to <2 x i64>
+	%tmp5 = add <2 x i64> %tmp3, %tmp4
+	ret <2 x i64> %tmp5
 }
 
-declare <8 x i16> @llvm.arm.neon.vaddls.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vaddls.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vaddls.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
-
-declare <8 x i16> @llvm.arm.neon.vaddlu.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vaddlu.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vaddlu.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
-
 define <8 x i16> @vaddws8(<8 x i16>* %A, <8 x i8>* %B) nounwind {
 ;CHECK: vaddws8:
 ;CHECK: vaddw.s8
 	%tmp1 = load <8 x i16>* %A
 	%tmp2 = load <8 x i8>* %B
-	%tmp3 = call <8 x i16> @llvm.arm.neon.vaddws.v8i16(<8 x i16> %tmp1, <8 x i8> %tmp2)
-	ret <8 x i16> %tmp3
+	%tmp3 = sext <8 x i8> %tmp2 to <8 x i16>
+	%tmp4 = add <8 x i16> %tmp1, %tmp3
+	ret <8 x i16> %tmp4
 }
 
 define <4 x i32> @vaddws16(<4 x i32>* %A, <4 x i16>* %B) nounwind {
@@ -228,8 +233,9 @@
 ;CHECK: vaddw.s16
 	%tmp1 = load <4 x i32>* %A
 	%tmp2 = load <4 x i16>* %B
-	%tmp3 = call <4 x i32> @llvm.arm.neon.vaddws.v4i32(<4 x i32> %tmp1, <4 x i16> %tmp2)
-	ret <4 x i32> %tmp3
+	%tmp3 = sext <4 x i16> %tmp2 to <4 x i32>
+	%tmp4 = add <4 x i32> %tmp1, %tmp3
+	ret <4 x i32> %tmp4
 }
 
 define <2 x i64> @vaddws32(<2 x i64>* %A, <2 x i32>* %B) nounwind {
@@ -237,8 +243,9 @@
 ;CHECK: vaddw.s32
 	%tmp1 = load <2 x i64>* %A
 	%tmp2 = load <2 x i32>* %B
-	%tmp3 = call <2 x i64> @llvm.arm.neon.vaddws.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2)
-	ret <2 x i64> %tmp3
+	%tmp3 = sext <2 x i32> %tmp2 to <2 x i64>
+	%tmp4 = add <2 x i64> %tmp1, %tmp3
+	ret <2 x i64> %tmp4
 }
 
 define <8 x i16> @vaddwu8(<8 x i16>* %A, <8 x i8>* %B) nounwind {
@@ -246,8 +253,9 @@
 ;CHECK: vaddw.u8
 	%tmp1 = load <8 x i16>* %A
 	%tmp2 = load <8 x i8>* %B
-	%tmp3 = call <8 x i16> @llvm.arm.neon.vaddwu.v8i16(<8 x i16> %tmp1, <8 x i8> %tmp2)
-	ret <8 x i16> %tmp3
+	%tmp3 = zext <8 x i8> %tmp2 to <8 x i16>
+	%tmp4 = add <8 x i16> %tmp1, %tmp3
+	ret <8 x i16> %tmp4
 }
 
 define <4 x i32> @vaddwu16(<4 x i32>* %A, <4 x i16>* %B) nounwind {
@@ -255,8 +263,9 @@
 ;CHECK: vaddw.u16
 	%tmp1 = load <4 x i32>* %A
 	%tmp2 = load <4 x i16>* %B
-	%tmp3 = call <4 x i32> @llvm.arm.neon.vaddwu.v4i32(<4 x i32> %tmp1, <4 x i16> %tmp2)
-	ret <4 x i32> %tmp3
+	%tmp3 = zext <4 x i16> %tmp2 to <4 x i32>
+	%tmp4 = add <4 x i32> %tmp1, %tmp3
+	ret <4 x i32> %tmp4
 }
 
 define <2 x i64> @vaddwu32(<2 x i64>* %A, <2 x i32>* %B) nounwind {
@@ -264,14 +273,7 @@
 ;CHECK: vaddw.u32
 	%tmp1 = load <2 x i64>* %A
 	%tmp2 = load <2 x i32>* %B
-	%tmp3 = call <2 x i64> @llvm.arm.neon.vaddwu.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2)
-	ret <2 x i64> %tmp3
+	%tmp3 = zext <2 x i32> %tmp2 to <2 x i64>
+	%tmp4 = add <2 x i64> %tmp1, %tmp3
+	ret <2 x i64> %tmp4
 }
-
-declare <8 x i16> @llvm.arm.neon.vaddws.v8i16(<8 x i16>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vaddws.v4i32(<4 x i32>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vaddws.v2i64(<2 x i64>, <2 x i32>) nounwind readnone
-
-declare <8 x i16> @llvm.arm.neon.vaddwu.v8i16(<8 x i16>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vaddwu.v4i32(<4 x i32>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vaddwu.v2i64(<2 x i64>, <2 x i32>) nounwind readnone

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/vcgt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/vcgt.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/vcgt.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/vcgt.ll Tue Oct 26 19:48:03 2010
@@ -161,9 +161,9 @@
 ; rdar://7923010
 define <4 x i32> @vcgt_zext(<4 x float>* %A, <4 x float>* %B) nounwind {
 ;CHECK: vcgt_zext:
-;CHECK: vcgt.f32 q0
-;CHECK: vmov.i32 q1, #0x1
-;CHECK: vand q0, q0, q1
+;CHECK: vcgt.f32 q8
+;CHECK: vmov.i32 q9, #0x1
+;CHECK: vand q8, q8, q9
 	%tmp1 = load <4 x float>* %A
 	%tmp2 = load <4 x float>* %B
 	%tmp3 = fcmp ogt <4 x float> %tmp1, %tmp2

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/vext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/vext.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/vext.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/vext.ll Tue Oct 26 19:48:03 2010
@@ -54,3 +54,23 @@
 	ret <4 x i32> %tmp3
 }
 
+; Undef shuffle indices should not prevent matching to VEXT:
+
+define <8 x i8> @test_vextd_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK: test_vextd_undef:
+;CHECK: vext
+	%tmp1 = load <8 x i8>* %A
+	%tmp2 = load <8 x i8>* %B
+	%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 3, i32 undef, i32 undef, i32 6, i32 7, i32 8, i32 9, i32 10>
+	ret <8 x i8> %tmp3
+}
+
+define <16 x i8> @test_vextRq_undef(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK: test_vextRq_undef:
+;CHECK: vext
+	%tmp1 = load <16 x i8>* %A
+	%tmp2 = load <16 x i8>* %B
+	%tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 23, i32 24, i32 25, i32 26, i32 undef, i32 undef, i32 29, i32 30, i32 31, i32 0, i32 1, i32 2, i32 3, i32 4, i32 undef, i32 6>
+	ret <16 x i8> %tmp3
+}
+

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/vget_lane.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/vget_lane.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/vget_lane.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/vget_lane.ll Tue Oct 26 19:48:03 2010
@@ -96,7 +96,7 @@
 
 define arm_aapcs_vfpcc void @test_vget_laneu16() nounwind {
 entry:
-; CHECK: vmov.u16 r0, d0[1]
+; CHECK: vmov.u16 r0, d{{.*}}[1]
   %arg0_uint16x4_t = alloca <4 x i16>             ; <<4 x i16>*> [#uses=1]
   %out_uint16_t = alloca i16                      ; <i16*> [#uses=1]
   %"alloca point" = bitcast i32 0 to i32          ; <i32> [#uses=0]
@@ -111,7 +111,7 @@
 
 define arm_aapcs_vfpcc void @test_vget_laneu8() nounwind {
 entry:
-; CHECK: vmov.u8 r0, d0[1]
+; CHECK: vmov.u8 r0, d{{.*}}[1]
   %arg0_uint8x8_t = alloca <8 x i8>               ; <<8 x i8>*> [#uses=1]
   %out_uint8_t = alloca i8                        ; <i8*> [#uses=1]
   %"alloca point" = bitcast i32 0 to i32          ; <i32> [#uses=0]
@@ -126,7 +126,7 @@
 
 define arm_aapcs_vfpcc void @test_vgetQ_laneu16() nounwind {
 entry:
-; CHECK: vmov.u16 r0, d0[1]
+; CHECK: vmov.u16 r0, d{{.*}}[1]
   %arg0_uint16x8_t = alloca <8 x i16>             ; <<8 x i16>*> [#uses=1]
   %out_uint16_t = alloca i16                      ; <i16*> [#uses=1]
   %"alloca point" = bitcast i32 0 to i32          ; <i32> [#uses=0]
@@ -141,7 +141,7 @@
 
 define arm_aapcs_vfpcc void @test_vgetQ_laneu8() nounwind {
 entry:
-; CHECK: vmov.u8 r0, d0[1]
+; CHECK: vmov.u8 r0, d{{.*}}[1]
   %arg0_uint8x16_t = alloca <16 x i8>             ; <<16 x i8>*> [#uses=1]
   %out_uint8_t = alloca i8                        ; <i8*> [#uses=1]
   %"alloca point" = bitcast i32 0 to i32          ; <i32> [#uses=0]

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/vld1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/vld1.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/vld1.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/vld1.ll Tue Oct 26 19:48:03 2010
@@ -2,8 +2,9 @@
 
 define <8 x i8> @vld1i8(i8* %A) nounwind {
 ;CHECK: vld1i8:
-;CHECK: vld1.8
-	%tmp1 = call <8 x i8> @llvm.arm.neon.vld1.v8i8(i8* %A)
+;Check the alignment value.  Max for this instruction is 64 bits:
+;CHECK: vld1.8 {d16}, [r0, :64]
+	%tmp1 = call <8 x i8> @llvm.arm.neon.vld1.v8i8(i8* %A, i32 16)
 	ret <8 x i8> %tmp1
 }
 
@@ -11,7 +12,7 @@
 ;CHECK: vld1i16:
 ;CHECK: vld1.16
 	%tmp0 = bitcast i16* %A to i8*
-	%tmp1 = call <4 x i16> @llvm.arm.neon.vld1.v4i16(i8* %tmp0)
+	%tmp1 = call <4 x i16> @llvm.arm.neon.vld1.v4i16(i8* %tmp0, i32 1)
 	ret <4 x i16> %tmp1
 }
 
@@ -19,7 +20,7 @@
 ;CHECK: vld1i32:
 ;CHECK: vld1.32
 	%tmp0 = bitcast i32* %A to i8*
-	%tmp1 = call <2 x i32> @llvm.arm.neon.vld1.v2i32(i8* %tmp0)
+	%tmp1 = call <2 x i32> @llvm.arm.neon.vld1.v2i32(i8* %tmp0, i32 1)
 	ret <2 x i32> %tmp1
 }
 
@@ -27,7 +28,7 @@
 ;CHECK: vld1f:
 ;CHECK: vld1.32
 	%tmp0 = bitcast float* %A to i8*
-	%tmp1 = call <2 x float> @llvm.arm.neon.vld1.v2f32(i8* %tmp0)
+	%tmp1 = call <2 x float> @llvm.arm.neon.vld1.v2f32(i8* %tmp0, i32 1)
 	ret <2 x float> %tmp1
 }
 
@@ -35,22 +36,24 @@
 ;CHECK: vld1i64:
 ;CHECK: vld1.64
 	%tmp0 = bitcast i64* %A to i8*
-	%tmp1 = call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %tmp0)
+	%tmp1 = call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %tmp0, i32 1)
 	ret <1 x i64> %tmp1
 }
 
 define <16 x i8> @vld1Qi8(i8* %A) nounwind {
 ;CHECK: vld1Qi8:
-;CHECK: vld1.8
-	%tmp1 = call <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* %A)
+;Check the alignment value.  Max for this instruction is 128 bits:
+;CHECK: vld1.8 {d16, d17}, [r0, :64]
+	%tmp1 = call <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* %A, i32 8)
 	ret <16 x i8> %tmp1
 }
 
 define <8 x i16> @vld1Qi16(i16* %A) nounwind {
 ;CHECK: vld1Qi16:
-;CHECK: vld1.16
+;Check the alignment value.  Max for this instruction is 128 bits:
+;CHECK: vld1.16 {d16, d17}, [r0, :128]
 	%tmp0 = bitcast i16* %A to i8*
-	%tmp1 = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %tmp0)
+	%tmp1 = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %tmp0, i32 32)
 	ret <8 x i16> %tmp1
 }
 
@@ -58,7 +61,7 @@
 ;CHECK: vld1Qi32:
 ;CHECK: vld1.32
 	%tmp0 = bitcast i32* %A to i8*
-	%tmp1 = call <4 x i32> @llvm.arm.neon.vld1.v4i32(i8* %tmp0)
+	%tmp1 = call <4 x i32> @llvm.arm.neon.vld1.v4i32(i8* %tmp0, i32 1)
 	ret <4 x i32> %tmp1
 }
 
@@ -66,7 +69,7 @@
 ;CHECK: vld1Qf:
 ;CHECK: vld1.32
 	%tmp0 = bitcast float* %A to i8*
-	%tmp1 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* %tmp0)
+	%tmp1 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* %tmp0, i32 1)
 	ret <4 x float> %tmp1
 }
 
@@ -74,18 +77,31 @@
 ;CHECK: vld1Qi64:
 ;CHECK: vld1.64
 	%tmp0 = bitcast i64* %A to i8*
-	%tmp1 = call <2 x i64> @llvm.arm.neon.vld1.v2i64(i8* %tmp0)
+	%tmp1 = call <2 x i64> @llvm.arm.neon.vld1.v2i64(i8* %tmp0, i32 1)
 	ret <2 x i64> %tmp1
 }
 
-declare <8 x i8>  @llvm.arm.neon.vld1.v8i8(i8*) nounwind readonly
-declare <4 x i16> @llvm.arm.neon.vld1.v4i16(i8*) nounwind readonly
-declare <2 x i32> @llvm.arm.neon.vld1.v2i32(i8*) nounwind readonly
-declare <2 x float> @llvm.arm.neon.vld1.v2f32(i8*) nounwind readonly
-declare <1 x i64> @llvm.arm.neon.vld1.v1i64(i8*) nounwind readonly
-
-declare <16 x i8> @llvm.arm.neon.vld1.v16i8(i8*) nounwind readonly
-declare <8 x i16> @llvm.arm.neon.vld1.v8i16(i8*) nounwind readonly
-declare <4 x i32> @llvm.arm.neon.vld1.v4i32(i8*) nounwind readonly
-declare <4 x float> @llvm.arm.neon.vld1.v4f32(i8*) nounwind readonly
-declare <2 x i64> @llvm.arm.neon.vld1.v2i64(i8*) nounwind readonly
+declare <8 x i8>  @llvm.arm.neon.vld1.v8i8(i8*, i32) nounwind readonly
+declare <4 x i16> @llvm.arm.neon.vld1.v4i16(i8*, i32) nounwind readonly
+declare <2 x i32> @llvm.arm.neon.vld1.v2i32(i8*, i32) nounwind readonly
+declare <2 x float> @llvm.arm.neon.vld1.v2f32(i8*, i32) nounwind readonly
+declare <1 x i64> @llvm.arm.neon.vld1.v1i64(i8*, i32) nounwind readonly
+
+declare <16 x i8> @llvm.arm.neon.vld1.v16i8(i8*, i32) nounwind readonly
+declare <8 x i16> @llvm.arm.neon.vld1.v8i16(i8*, i32) nounwind readonly
+declare <4 x i32> @llvm.arm.neon.vld1.v4i32(i8*, i32) nounwind readonly
+declare <4 x float> @llvm.arm.neon.vld1.v4f32(i8*, i32) nounwind readonly
+declare <2 x i64> @llvm.arm.neon.vld1.v2i64(i8*, i32) nounwind readonly
+
+; Radar 8355607
+; Do not crash if the vld1 result is not used.
+define void @unused_vld1_result() {
+entry:
+;CHECK: unused_vld1_result
+;CHECK: vld1.32
+  %0 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) 
+  call void @llvm.trap()
+  unreachable
+}
+
+declare void @llvm.trap() nounwind

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/vld2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/vld2.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/vld2.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/vld2.ll Tue Oct 26 19:48:03 2010
@@ -13,8 +13,9 @@
 
 define <8 x i8> @vld2i8(i8* %A) nounwind {
 ;CHECK: vld2i8:
-;CHECK: vld2.8
-	%tmp1 = call %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2.v8i8(i8* %A)
+;Check the alignment value.  Max for this instruction is 128 bits:
+;CHECK: vld2.8 {d16, d17}, [r0, :64]
+	%tmp1 = call %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2.v8i8(i8* %A, i32 8)
         %tmp2 = extractvalue %struct.__neon_int8x8x2_t %tmp1, 0
         %tmp3 = extractvalue %struct.__neon_int8x8x2_t %tmp1, 1
         %tmp4 = add <8 x i8> %tmp2, %tmp3
@@ -23,9 +24,10 @@
 
 define <4 x i16> @vld2i16(i16* %A) nounwind {
 ;CHECK: vld2i16:
-;CHECK: vld2.16
+;Check the alignment value.  Max for this instruction is 128 bits:
+;CHECK: vld2.16 {d16, d17}, [r0, :128]
 	%tmp0 = bitcast i16* %A to i8*
-	%tmp1 = call %struct.__neon_int16x4x2_t @llvm.arm.neon.vld2.v4i16(i8* %tmp0)
+	%tmp1 = call %struct.__neon_int16x4x2_t @llvm.arm.neon.vld2.v4i16(i8* %tmp0, i32 32)
         %tmp2 = extractvalue %struct.__neon_int16x4x2_t %tmp1, 0
         %tmp3 = extractvalue %struct.__neon_int16x4x2_t %tmp1, 1
         %tmp4 = add <4 x i16> %tmp2, %tmp3
@@ -36,7 +38,7 @@
 ;CHECK: vld2i32:
 ;CHECK: vld2.32
 	%tmp0 = bitcast i32* %A to i8*
-	%tmp1 = call %struct.__neon_int32x2x2_t @llvm.arm.neon.vld2.v2i32(i8* %tmp0)
+	%tmp1 = call %struct.__neon_int32x2x2_t @llvm.arm.neon.vld2.v2i32(i8* %tmp0, i32 1)
         %tmp2 = extractvalue %struct.__neon_int32x2x2_t %tmp1, 0
         %tmp3 = extractvalue %struct.__neon_int32x2x2_t %tmp1, 1
         %tmp4 = add <2 x i32> %tmp2, %tmp3
@@ -47,7 +49,7 @@
 ;CHECK: vld2f:
 ;CHECK: vld2.32
 	%tmp0 = bitcast float* %A to i8*
-	%tmp1 = call %struct.__neon_float32x2x2_t @llvm.arm.neon.vld2.v2f32(i8* %tmp0)
+	%tmp1 = call %struct.__neon_float32x2x2_t @llvm.arm.neon.vld2.v2f32(i8* %tmp0, i32 1)
         %tmp2 = extractvalue %struct.__neon_float32x2x2_t %tmp1, 0
         %tmp3 = extractvalue %struct.__neon_float32x2x2_t %tmp1, 1
         %tmp4 = fadd <2 x float> %tmp2, %tmp3
@@ -56,9 +58,10 @@
 
 define <1 x i64> @vld2i64(i64* %A) nounwind {
 ;CHECK: vld2i64:
-;CHECK: vld1.64
+;Check the alignment value.  Max for this instruction is 128 bits:
+;CHECK: vld1.64 {d16, d17}, [r0, :128]
 	%tmp0 = bitcast i64* %A to i8*
-	%tmp1 = call %struct.__neon_int64x1x2_t @llvm.arm.neon.vld2.v1i64(i8* %tmp0)
+	%tmp1 = call %struct.__neon_int64x1x2_t @llvm.arm.neon.vld2.v1i64(i8* %tmp0, i32 32)
         %tmp2 = extractvalue %struct.__neon_int64x1x2_t %tmp1, 0
         %tmp3 = extractvalue %struct.__neon_int64x1x2_t %tmp1, 1
         %tmp4 = add <1 x i64> %tmp2, %tmp3
@@ -67,8 +70,9 @@
 
 define <16 x i8> @vld2Qi8(i8* %A) nounwind {
 ;CHECK: vld2Qi8:
-;CHECK: vld2.8
-	%tmp1 = call %struct.__neon_int8x16x2_t @llvm.arm.neon.vld2.v16i8(i8* %A)
+;Check the alignment value.  Max for this instruction is 256 bits:
+;CHECK: vld2.8 {d16, d17, d18, d19}, [r0, :64]
+	%tmp1 = call %struct.__neon_int8x16x2_t @llvm.arm.neon.vld2.v16i8(i8* %A, i32 8)
         %tmp2 = extractvalue %struct.__neon_int8x16x2_t %tmp1, 0
         %tmp3 = extractvalue %struct.__neon_int8x16x2_t %tmp1, 1
         %tmp4 = add <16 x i8> %tmp2, %tmp3
@@ -77,9 +81,10 @@
 
 define <8 x i16> @vld2Qi16(i16* %A) nounwind {
 ;CHECK: vld2Qi16:
-;CHECK: vld2.16
+;Check the alignment value.  Max for this instruction is 256 bits:
+;CHECK: vld2.16 {d16, d17, d18, d19}, [r0, :128]
 	%tmp0 = bitcast i16* %A to i8*
-	%tmp1 = call %struct.__neon_int16x8x2_t @llvm.arm.neon.vld2.v8i16(i8* %tmp0)
+	%tmp1 = call %struct.__neon_int16x8x2_t @llvm.arm.neon.vld2.v8i16(i8* %tmp0, i32 16)
         %tmp2 = extractvalue %struct.__neon_int16x8x2_t %tmp1, 0
         %tmp3 = extractvalue %struct.__neon_int16x8x2_t %tmp1, 1
         %tmp4 = add <8 x i16> %tmp2, %tmp3
@@ -88,9 +93,10 @@
 
 define <4 x i32> @vld2Qi32(i32* %A) nounwind {
 ;CHECK: vld2Qi32:
-;CHECK: vld2.32
+;Check the alignment value.  Max for this instruction is 256 bits:
+;CHECK: vld2.32 {d16, d17, d18, d19}, [r0, :256]
 	%tmp0 = bitcast i32* %A to i8*
-	%tmp1 = call %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32(i8* %tmp0)
+	%tmp1 = call %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32(i8* %tmp0, i32 64)
         %tmp2 = extractvalue %struct.__neon_int32x4x2_t %tmp1, 0
         %tmp3 = extractvalue %struct.__neon_int32x4x2_t %tmp1, 1
         %tmp4 = add <4 x i32> %tmp2, %tmp3
@@ -101,20 +107,20 @@
 ;CHECK: vld2Qf:
 ;CHECK: vld2.32
 	%tmp0 = bitcast float* %A to i8*
-	%tmp1 = call %struct.__neon_float32x4x2_t @llvm.arm.neon.vld2.v4f32(i8* %tmp0)
+	%tmp1 = call %struct.__neon_float32x4x2_t @llvm.arm.neon.vld2.v4f32(i8* %tmp0, i32 1)
         %tmp2 = extractvalue %struct.__neon_float32x4x2_t %tmp1, 0
         %tmp3 = extractvalue %struct.__neon_float32x4x2_t %tmp1, 1
         %tmp4 = fadd <4 x float> %tmp2, %tmp3
 	ret <4 x float> %tmp4
 }
 
-declare %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2.v8i8(i8*) nounwind readonly
-declare %struct.__neon_int16x4x2_t @llvm.arm.neon.vld2.v4i16(i8*) nounwind readonly
-declare %struct.__neon_int32x2x2_t @llvm.arm.neon.vld2.v2i32(i8*) nounwind readonly
-declare %struct.__neon_float32x2x2_t @llvm.arm.neon.vld2.v2f32(i8*) nounwind readonly
-declare %struct.__neon_int64x1x2_t @llvm.arm.neon.vld2.v1i64(i8*) nounwind readonly
-
-declare %struct.__neon_int8x16x2_t @llvm.arm.neon.vld2.v16i8(i8*) nounwind readonly
-declare %struct.__neon_int16x8x2_t @llvm.arm.neon.vld2.v8i16(i8*) nounwind readonly
-declare %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32(i8*) nounwind readonly
-declare %struct.__neon_float32x4x2_t @llvm.arm.neon.vld2.v4f32(i8*) nounwind readonly
+declare %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2.v8i8(i8*, i32) nounwind readonly
+declare %struct.__neon_int16x4x2_t @llvm.arm.neon.vld2.v4i16(i8*, i32) nounwind readonly
+declare %struct.__neon_int32x2x2_t @llvm.arm.neon.vld2.v2i32(i8*, i32) nounwind readonly
+declare %struct.__neon_float32x2x2_t @llvm.arm.neon.vld2.v2f32(i8*, i32) nounwind readonly
+declare %struct.__neon_int64x1x2_t @llvm.arm.neon.vld2.v1i64(i8*, i32) nounwind readonly
+
+declare %struct.__neon_int8x16x2_t @llvm.arm.neon.vld2.v16i8(i8*, i32) nounwind readonly
+declare %struct.__neon_int16x8x2_t @llvm.arm.neon.vld2.v8i16(i8*, i32) nounwind readonly
+declare %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32(i8*, i32) nounwind readonly
+declare %struct.__neon_float32x4x2_t @llvm.arm.neon.vld2.v4f32(i8*, i32) nounwind readonly

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/vld3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/vld3.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/vld3.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/vld3.ll Tue Oct 26 19:48:03 2010
@@ -13,8 +13,9 @@
 
 define <8 x i8> @vld3i8(i8* %A) nounwind {
 ;CHECK: vld3i8:
-;CHECK: vld3.8
-	%tmp1 = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A)
+;Check the alignment value.  Max for this instruction is 64 bits:
+;CHECK: vld3.8 {d16, d17, d18}, [r0, :64]
+	%tmp1 = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A, i32 32)
         %tmp2 = extractvalue %struct.__neon_int8x8x3_t %tmp1, 0
         %tmp3 = extractvalue %struct.__neon_int8x8x3_t %tmp1, 2
         %tmp4 = add <8 x i8> %tmp2, %tmp3
@@ -25,7 +26,7 @@
 ;CHECK: vld3i16:
 ;CHECK: vld3.16
 	%tmp0 = bitcast i16* %A to i8*
-	%tmp1 = call %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3.v4i16(i8* %tmp0)
+	%tmp1 = call %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3.v4i16(i8* %tmp0, i32 1)
         %tmp2 = extractvalue %struct.__neon_int16x4x3_t %tmp1, 0
         %tmp3 = extractvalue %struct.__neon_int16x4x3_t %tmp1, 2
         %tmp4 = add <4 x i16> %tmp2, %tmp3
@@ -36,7 +37,7 @@
 ;CHECK: vld3i32:
 ;CHECK: vld3.32
 	%tmp0 = bitcast i32* %A to i8*
-	%tmp1 = call %struct.__neon_int32x2x3_t @llvm.arm.neon.vld3.v2i32(i8* %tmp0)
+	%tmp1 = call %struct.__neon_int32x2x3_t @llvm.arm.neon.vld3.v2i32(i8* %tmp0, i32 1)
         %tmp2 = extractvalue %struct.__neon_int32x2x3_t %tmp1, 0
         %tmp3 = extractvalue %struct.__neon_int32x2x3_t %tmp1, 2
         %tmp4 = add <2 x i32> %tmp2, %tmp3
@@ -47,7 +48,7 @@
 ;CHECK: vld3f:
 ;CHECK: vld3.32
 	%tmp0 = bitcast float* %A to i8*
-	%tmp1 = call %struct.__neon_float32x2x3_t @llvm.arm.neon.vld3.v2f32(i8* %tmp0)
+	%tmp1 = call %struct.__neon_float32x2x3_t @llvm.arm.neon.vld3.v2f32(i8* %tmp0, i32 1)
         %tmp2 = extractvalue %struct.__neon_float32x2x3_t %tmp1, 0
         %tmp3 = extractvalue %struct.__neon_float32x2x3_t %tmp1, 2
         %tmp4 = fadd <2 x float> %tmp2, %tmp3
@@ -56,9 +57,10 @@
 
 define <1 x i64> @vld3i64(i64* %A) nounwind {
 ;CHECK: vld3i64:
-;CHECK: vld1.64
+;Check the alignment value.  Max for this instruction is 64 bits:
+;CHECK: vld1.64 {d16, d17, d18}, [r0, :64]
 	%tmp0 = bitcast i64* %A to i8*
-	%tmp1 = call %struct.__neon_int64x1x3_t @llvm.arm.neon.vld3.v1i64(i8* %tmp0)
+	%tmp1 = call %struct.__neon_int64x1x3_t @llvm.arm.neon.vld3.v1i64(i8* %tmp0, i32 16)
         %tmp2 = extractvalue %struct.__neon_int64x1x3_t %tmp1, 0
         %tmp3 = extractvalue %struct.__neon_int64x1x3_t %tmp1, 2
         %tmp4 = add <1 x i64> %tmp2, %tmp3
@@ -67,9 +69,10 @@
 
 define <16 x i8> @vld3Qi8(i8* %A) nounwind {
 ;CHECK: vld3Qi8:
-;CHECK: vld3.8
-;CHECK: vld3.8
-	%tmp1 = call %struct.__neon_int8x16x3_t @llvm.arm.neon.vld3.v16i8(i8* %A)
+;Check the alignment value.  Max for this instruction is 64 bits:
+;CHECK: vld3.8 {d16, d18, d20}, [r0, :64]!
+;CHECK: vld3.8 {d17, d19, d21}, [r0, :64]
+	%tmp1 = call %struct.__neon_int8x16x3_t @llvm.arm.neon.vld3.v16i8(i8* %A, i32 32)
         %tmp2 = extractvalue %struct.__neon_int8x16x3_t %tmp1, 0
         %tmp3 = extractvalue %struct.__neon_int8x16x3_t %tmp1, 2
         %tmp4 = add <16 x i8> %tmp2, %tmp3
@@ -81,7 +84,7 @@
 ;CHECK: vld3.16
 ;CHECK: vld3.16
 	%tmp0 = bitcast i16* %A to i8*
-	%tmp1 = call %struct.__neon_int16x8x3_t @llvm.arm.neon.vld3.v8i16(i8* %tmp0)
+	%tmp1 = call %struct.__neon_int16x8x3_t @llvm.arm.neon.vld3.v8i16(i8* %tmp0, i32 1)
         %tmp2 = extractvalue %struct.__neon_int16x8x3_t %tmp1, 0
         %tmp3 = extractvalue %struct.__neon_int16x8x3_t %tmp1, 2
         %tmp4 = add <8 x i16> %tmp2, %tmp3
@@ -93,7 +96,7 @@
 ;CHECK: vld3.32
 ;CHECK: vld3.32
 	%tmp0 = bitcast i32* %A to i8*
-	%tmp1 = call %struct.__neon_int32x4x3_t @llvm.arm.neon.vld3.v4i32(i8* %tmp0)
+	%tmp1 = call %struct.__neon_int32x4x3_t @llvm.arm.neon.vld3.v4i32(i8* %tmp0, i32 1)
         %tmp2 = extractvalue %struct.__neon_int32x4x3_t %tmp1, 0
         %tmp3 = extractvalue %struct.__neon_int32x4x3_t %tmp1, 2
         %tmp4 = add <4 x i32> %tmp2, %tmp3
@@ -105,20 +108,20 @@
 ;CHECK: vld3.32
 ;CHECK: vld3.32
 	%tmp0 = bitcast float* %A to i8*
-	%tmp1 = call %struct.__neon_float32x4x3_t @llvm.arm.neon.vld3.v4f32(i8* %tmp0)
+	%tmp1 = call %struct.__neon_float32x4x3_t @llvm.arm.neon.vld3.v4f32(i8* %tmp0, i32 1)
         %tmp2 = extractvalue %struct.__neon_float32x4x3_t %tmp1, 0
         %tmp3 = extractvalue %struct.__neon_float32x4x3_t %tmp1, 2
         %tmp4 = fadd <4 x float> %tmp2, %tmp3
 	ret <4 x float> %tmp4
 }
 
-declare %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8*) nounwind readonly
-declare %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3.v4i16(i8*) nounwind readonly
-declare %struct.__neon_int32x2x3_t @llvm.arm.neon.vld3.v2i32(i8*) nounwind readonly
-declare %struct.__neon_float32x2x3_t @llvm.arm.neon.vld3.v2f32(i8*) nounwind readonly
-declare %struct.__neon_int64x1x3_t @llvm.arm.neon.vld3.v1i64(i8*) nounwind readonly
-
-declare %struct.__neon_int8x16x3_t @llvm.arm.neon.vld3.v16i8(i8*) nounwind readonly
-declare %struct.__neon_int16x8x3_t @llvm.arm.neon.vld3.v8i16(i8*) nounwind readonly
-declare %struct.__neon_int32x4x3_t @llvm.arm.neon.vld3.v4i32(i8*) nounwind readonly
-declare %struct.__neon_float32x4x3_t @llvm.arm.neon.vld3.v4f32(i8*) nounwind readonly
+declare %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8*, i32) nounwind readonly
+declare %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3.v4i16(i8*, i32) nounwind readonly
+declare %struct.__neon_int32x2x3_t @llvm.arm.neon.vld3.v2i32(i8*, i32) nounwind readonly
+declare %struct.__neon_float32x2x3_t @llvm.arm.neon.vld3.v2f32(i8*, i32) nounwind readonly
+declare %struct.__neon_int64x1x3_t @llvm.arm.neon.vld3.v1i64(i8*, i32) nounwind readonly
+
+declare %struct.__neon_int8x16x3_t @llvm.arm.neon.vld3.v16i8(i8*, i32) nounwind readonly
+declare %struct.__neon_int16x8x3_t @llvm.arm.neon.vld3.v8i16(i8*, i32) nounwind readonly
+declare %struct.__neon_int32x4x3_t @llvm.arm.neon.vld3.v4i32(i8*, i32) nounwind readonly
+declare %struct.__neon_float32x4x3_t @llvm.arm.neon.vld3.v4f32(i8*, i32) nounwind readonly

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/vld4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/vld4.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/vld4.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/vld4.ll Tue Oct 26 19:48:03 2010
@@ -13,8 +13,9 @@
 
 define <8 x i8> @vld4i8(i8* %A) nounwind {
 ;CHECK: vld4i8:
-;CHECK: vld4.8
-	%tmp1 = call %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4.v8i8(i8* %A)
+;Check the alignment value.  Max for this instruction is 256 bits:
+;CHECK: vld4.8 {d16, d17, d18, d19}, [r0, :64]
+	%tmp1 = call %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4.v8i8(i8* %A, i32 8)
         %tmp2 = extractvalue %struct.__neon_int8x8x4_t %tmp1, 0
         %tmp3 = extractvalue %struct.__neon_int8x8x4_t %tmp1, 2
         %tmp4 = add <8 x i8> %tmp2, %tmp3
@@ -23,9 +24,10 @@
 
 define <4 x i16> @vld4i16(i16* %A) nounwind {
 ;CHECK: vld4i16:
-;CHECK: vld4.16
+;Check the alignment value.  Max for this instruction is 256 bits:
+;CHECK: vld4.16 {d16, d17, d18, d19}, [r0, :128]
 	%tmp0 = bitcast i16* %A to i8*
-	%tmp1 = call %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4.v4i16(i8* %tmp0)
+	%tmp1 = call %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4.v4i16(i8* %tmp0, i32 16)
         %tmp2 = extractvalue %struct.__neon_int16x4x4_t %tmp1, 0
         %tmp3 = extractvalue %struct.__neon_int16x4x4_t %tmp1, 2
         %tmp4 = add <4 x i16> %tmp2, %tmp3
@@ -34,9 +36,10 @@
 
 define <2 x i32> @vld4i32(i32* %A) nounwind {
 ;CHECK: vld4i32:
-;CHECK: vld4.32
+;Check the alignment value.  Max for this instruction is 256 bits:
+;CHECK: vld4.32 {d16, d17, d18, d19}, [r0, :256]
 	%tmp0 = bitcast i32* %A to i8*
-	%tmp1 = call %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4.v2i32(i8* %tmp0)
+	%tmp1 = call %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4.v2i32(i8* %tmp0, i32 32)
         %tmp2 = extractvalue %struct.__neon_int32x2x4_t %tmp1, 0
         %tmp3 = extractvalue %struct.__neon_int32x2x4_t %tmp1, 2
         %tmp4 = add <2 x i32> %tmp2, %tmp3
@@ -47,7 +50,7 @@
 ;CHECK: vld4f:
 ;CHECK: vld4.32
 	%tmp0 = bitcast float* %A to i8*
-	%tmp1 = call %struct.__neon_float32x2x4_t @llvm.arm.neon.vld4.v2f32(i8* %tmp0)
+	%tmp1 = call %struct.__neon_float32x2x4_t @llvm.arm.neon.vld4.v2f32(i8* %tmp0, i32 1)
         %tmp2 = extractvalue %struct.__neon_float32x2x4_t %tmp1, 0
         %tmp3 = extractvalue %struct.__neon_float32x2x4_t %tmp1, 2
         %tmp4 = fadd <2 x float> %tmp2, %tmp3
@@ -56,9 +59,10 @@
 
 define <1 x i64> @vld4i64(i64* %A) nounwind {
 ;CHECK: vld4i64:
-;CHECK: vld1.64
+;Check the alignment value.  Max for this instruction is 256 bits:
+;CHECK: vld1.64 {d16, d17, d18, d19}, [r0, :256]
 	%tmp0 = bitcast i64* %A to i8*
-	%tmp1 = call %struct.__neon_int64x1x4_t @llvm.arm.neon.vld4.v1i64(i8* %tmp0)
+	%tmp1 = call %struct.__neon_int64x1x4_t @llvm.arm.neon.vld4.v1i64(i8* %tmp0, i32 64)
         %tmp2 = extractvalue %struct.__neon_int64x1x4_t %tmp1, 0
         %tmp3 = extractvalue %struct.__neon_int64x1x4_t %tmp1, 2
         %tmp4 = add <1 x i64> %tmp2, %tmp3
@@ -67,9 +71,10 @@
 
 define <16 x i8> @vld4Qi8(i8* %A) nounwind {
 ;CHECK: vld4Qi8:
-;CHECK: vld4.8
-;CHECK: vld4.8
-	%tmp1 = call %struct.__neon_int8x16x4_t @llvm.arm.neon.vld4.v16i8(i8* %A)
+;Check the alignment value.  Max for this instruction is 256 bits:
+;CHECK: vld4.8 {d16, d18, d20, d22}, [r0, :256]!
+;CHECK: vld4.8 {d17, d19, d21, d23}, [r0, :256]
+	%tmp1 = call %struct.__neon_int8x16x4_t @llvm.arm.neon.vld4.v16i8(i8* %A, i32 64)
         %tmp2 = extractvalue %struct.__neon_int8x16x4_t %tmp1, 0
         %tmp3 = extractvalue %struct.__neon_int8x16x4_t %tmp1, 2
         %tmp4 = add <16 x i8> %tmp2, %tmp3
@@ -78,10 +83,11 @@
 
 define <8 x i16> @vld4Qi16(i16* %A) nounwind {
 ;CHECK: vld4Qi16:
-;CHECK: vld4.16
-;CHECK: vld4.16
+;Check for no alignment specifier.
+;CHECK: vld4.16 {d16, d18, d20, d22}, [r0]!
+;CHECK: vld4.16 {d17, d19, d21, d23}, [r0]
 	%tmp0 = bitcast i16* %A to i8*
-	%tmp1 = call %struct.__neon_int16x8x4_t @llvm.arm.neon.vld4.v8i16(i8* %tmp0)
+	%tmp1 = call %struct.__neon_int16x8x4_t @llvm.arm.neon.vld4.v8i16(i8* %tmp0, i32 1)
         %tmp2 = extractvalue %struct.__neon_int16x8x4_t %tmp1, 0
         %tmp3 = extractvalue %struct.__neon_int16x8x4_t %tmp1, 2
         %tmp4 = add <8 x i16> %tmp2, %tmp3
@@ -93,7 +99,7 @@
 ;CHECK: vld4.32
 ;CHECK: vld4.32
 	%tmp0 = bitcast i32* %A to i8*
-	%tmp1 = call %struct.__neon_int32x4x4_t @llvm.arm.neon.vld4.v4i32(i8* %tmp0)
+	%tmp1 = call %struct.__neon_int32x4x4_t @llvm.arm.neon.vld4.v4i32(i8* %tmp0, i32 1)
         %tmp2 = extractvalue %struct.__neon_int32x4x4_t %tmp1, 0
         %tmp3 = extractvalue %struct.__neon_int32x4x4_t %tmp1, 2
         %tmp4 = add <4 x i32> %tmp2, %tmp3
@@ -105,20 +111,20 @@
 ;CHECK: vld4.32
 ;CHECK: vld4.32
 	%tmp0 = bitcast float* %A to i8*
-	%tmp1 = call %struct.__neon_float32x4x4_t @llvm.arm.neon.vld4.v4f32(i8* %tmp0)
+	%tmp1 = call %struct.__neon_float32x4x4_t @llvm.arm.neon.vld4.v4f32(i8* %tmp0, i32 1)
         %tmp2 = extractvalue %struct.__neon_float32x4x4_t %tmp1, 0
         %tmp3 = extractvalue %struct.__neon_float32x4x4_t %tmp1, 2
         %tmp4 = fadd <4 x float> %tmp2, %tmp3
 	ret <4 x float> %tmp4
 }
 
-declare %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4.v8i8(i8*) nounwind readonly
-declare %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4.v4i16(i8*) nounwind readonly
-declare %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4.v2i32(i8*) nounwind readonly
-declare %struct.__neon_float32x2x4_t @llvm.arm.neon.vld4.v2f32(i8*) nounwind readonly
-declare %struct.__neon_int64x1x4_t @llvm.arm.neon.vld4.v1i64(i8*) nounwind readonly
-
-declare %struct.__neon_int8x16x4_t @llvm.arm.neon.vld4.v16i8(i8*) nounwind readonly
-declare %struct.__neon_int16x8x4_t @llvm.arm.neon.vld4.v8i16(i8*) nounwind readonly
-declare %struct.__neon_int32x4x4_t @llvm.arm.neon.vld4.v4i32(i8*) nounwind readonly
-declare %struct.__neon_float32x4x4_t @llvm.arm.neon.vld4.v4f32(i8*) nounwind readonly
+declare %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4.v8i8(i8*, i32) nounwind readonly
+declare %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4.v4i16(i8*, i32) nounwind readonly
+declare %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4.v2i32(i8*, i32) nounwind readonly
+declare %struct.__neon_float32x2x4_t @llvm.arm.neon.vld4.v2f32(i8*, i32) nounwind readonly
+declare %struct.__neon_int64x1x4_t @llvm.arm.neon.vld4.v1i64(i8*, i32) nounwind readonly
+
+declare %struct.__neon_int8x16x4_t @llvm.arm.neon.vld4.v16i8(i8*, i32) nounwind readonly
+declare %struct.__neon_int16x8x4_t @llvm.arm.neon.vld4.v8i16(i8*, i32) nounwind readonly
+declare %struct.__neon_int32x4x4_t @llvm.arm.neon.vld4.v4i32(i8*, i32) nounwind readonly
+declare %struct.__neon_float32x4x4_t @llvm.arm.neon.vld4.v4f32(i8*, i32) nounwind readonly

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/vldlane.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/vldlane.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/vldlane.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/vldlane.ll Tue Oct 26 19:48:03 2010
@@ -11,9 +11,10 @@
 
 define <8 x i8> @vld2lanei8(i8* %A, <8 x i8>* %B) nounwind {
 ;CHECK: vld2lanei8:
-;CHECK: vld2.8
+;Check the alignment value.  Max for this instruction is 16 bits:
+;CHECK: vld2.8 {d16[1], d17[1]}, [r0, :16]
 	%tmp1 = load <8 x i8>* %B
-	%tmp2 = call %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1)
+	%tmp2 = call %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1, i32 4)
         %tmp3 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 1
         %tmp5 = add <8 x i8> %tmp3, %tmp4
@@ -22,10 +23,11 @@
 
 define <4 x i16> @vld2lanei16(i16* %A, <4 x i16>* %B) nounwind {
 ;CHECK: vld2lanei16:
-;CHECK: vld2.16
+;Check the alignment value.  Max for this instruction is 32 bits:
+;CHECK: vld2.16 {d16[1], d17[1]}, [r0, :32]
 	%tmp0 = bitcast i16* %A to i8*
 	%tmp1 = load <4 x i16>* %B
-	%tmp2 = call %struct.__neon_int16x4x2_t @llvm.arm.neon.vld2lane.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1)
+	%tmp2 = call %struct.__neon_int16x4x2_t @llvm.arm.neon.vld2lane.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1, i32 8)
         %tmp3 = extractvalue %struct.__neon_int16x4x2_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_int16x4x2_t %tmp2, 1
         %tmp5 = add <4 x i16> %tmp3, %tmp4
@@ -37,7 +39,7 @@
 ;CHECK: vld2.32
 	%tmp0 = bitcast i32* %A to i8*
 	%tmp1 = load <2 x i32>* %B
-	%tmp2 = call %struct.__neon_int32x2x2_t @llvm.arm.neon.vld2lane.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1)
+	%tmp2 = call %struct.__neon_int32x2x2_t @llvm.arm.neon.vld2lane.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1, i32 1)
         %tmp3 = extractvalue %struct.__neon_int32x2x2_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_int32x2x2_t %tmp2, 1
         %tmp5 = add <2 x i32> %tmp3, %tmp4
@@ -49,7 +51,7 @@
 ;CHECK: vld2.32
 	%tmp0 = bitcast float* %A to i8*
 	%tmp1 = load <2 x float>* %B
-	%tmp2 = call %struct.__neon_float32x2x2_t @llvm.arm.neon.vld2lane.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, i32 1)
+	%tmp2 = call %struct.__neon_float32x2x2_t @llvm.arm.neon.vld2lane.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, i32 1, i32 1)
         %tmp3 = extractvalue %struct.__neon_float32x2x2_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_float32x2x2_t %tmp2, 1
         %tmp5 = fadd <2 x float> %tmp3, %tmp4
@@ -58,10 +60,11 @@
 
 define <8 x i16> @vld2laneQi16(i16* %A, <8 x i16>* %B) nounwind {
 ;CHECK: vld2laneQi16:
-;CHECK: vld2.16
+;Check the (default) alignment.
+;CHECK: vld2.16 {d17[1], d19[1]}, [r0]
 	%tmp0 = bitcast i16* %A to i8*
 	%tmp1 = load <8 x i16>* %B
-	%tmp2 = call %struct.__neon_int16x8x2_t @llvm.arm.neon.vld2lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1)
+	%tmp2 = call %struct.__neon_int16x8x2_t @llvm.arm.neon.vld2lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 5, i32 1)
         %tmp3 = extractvalue %struct.__neon_int16x8x2_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_int16x8x2_t %tmp2, 1
         %tmp5 = add <8 x i16> %tmp3, %tmp4
@@ -70,10 +73,11 @@
 
 define <4 x i32> @vld2laneQi32(i32* %A, <4 x i32>* %B) nounwind {
 ;CHECK: vld2laneQi32:
-;CHECK: vld2.32
+;Check the alignment value.  Max for this instruction is 64 bits:
+;CHECK: vld2.32 {d17[0], d19[0]}, [r0, :64]
 	%tmp0 = bitcast i32* %A to i8*
 	%tmp1 = load <4 x i32>* %B
-	%tmp2 = call %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2lane.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 2)
+	%tmp2 = call %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2lane.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 2, i32 16)
         %tmp3 = extractvalue %struct.__neon_int32x4x2_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_int32x4x2_t %tmp2, 1
         %tmp5 = add <4 x i32> %tmp3, %tmp4
@@ -85,21 +89,21 @@
 ;CHECK: vld2.32
 	%tmp0 = bitcast float* %A to i8*
 	%tmp1 = load <4 x float>* %B
-	%tmp2 = call %struct.__neon_float32x4x2_t @llvm.arm.neon.vld2lane.v4f32(i8* %tmp0, <4 x float> %tmp1, <4 x float> %tmp1, i32 1)
+	%tmp2 = call %struct.__neon_float32x4x2_t @llvm.arm.neon.vld2lane.v4f32(i8* %tmp0, <4 x float> %tmp1, <4 x float> %tmp1, i32 1, i32 1)
         %tmp3 = extractvalue %struct.__neon_float32x4x2_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_float32x4x2_t %tmp2, 1
         %tmp5 = fadd <4 x float> %tmp3, %tmp4
 	ret <4 x float> %tmp5
 }
 
-declare %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8(i8*, <8 x i8>, <8 x i8>, i32) nounwind readonly
-declare %struct.__neon_int16x4x2_t @llvm.arm.neon.vld2lane.v4i16(i8*, <4 x i16>, <4 x i16>, i32) nounwind readonly
-declare %struct.__neon_int32x2x2_t @llvm.arm.neon.vld2lane.v2i32(i8*, <2 x i32>, <2 x i32>, i32) nounwind readonly
-declare %struct.__neon_float32x2x2_t @llvm.arm.neon.vld2lane.v2f32(i8*, <2 x float>, <2 x float>, i32) nounwind readonly
-
-declare %struct.__neon_int16x8x2_t @llvm.arm.neon.vld2lane.v8i16(i8*, <8 x i16>, <8 x i16>, i32) nounwind readonly
-declare %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2lane.v4i32(i8*, <4 x i32>, <4 x i32>, i32) nounwind readonly
-declare %struct.__neon_float32x4x2_t @llvm.arm.neon.vld2lane.v4f32(i8*, <4 x float>, <4 x float>, i32) nounwind readonly
+declare %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8(i8*, <8 x i8>, <8 x i8>, i32, i32) nounwind readonly
+declare %struct.__neon_int16x4x2_t @llvm.arm.neon.vld2lane.v4i16(i8*, <4 x i16>, <4 x i16>, i32, i32) nounwind readonly
+declare %struct.__neon_int32x2x2_t @llvm.arm.neon.vld2lane.v2i32(i8*, <2 x i32>, <2 x i32>, i32, i32) nounwind readonly
+declare %struct.__neon_float32x2x2_t @llvm.arm.neon.vld2lane.v2f32(i8*, <2 x float>, <2 x float>, i32, i32) nounwind readonly
+
+declare %struct.__neon_int16x8x2_t @llvm.arm.neon.vld2lane.v8i16(i8*, <8 x i16>, <8 x i16>, i32, i32) nounwind readonly
+declare %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2lane.v4i32(i8*, <4 x i32>, <4 x i32>, i32, i32) nounwind readonly
+declare %struct.__neon_float32x4x2_t @llvm.arm.neon.vld2lane.v4f32(i8*, <4 x float>, <4 x float>, i32, i32) nounwind readonly
 
 %struct.__neon_int8x8x3_t = type { <8 x i8>,  <8 x i8>,  <8 x i8> }
 %struct.__neon_int16x4x3_t = type { <4 x i16>, <4 x i16>, <4 x i16> }
@@ -114,7 +118,7 @@
 ;CHECK: vld3lanei8:
 ;CHECK: vld3.8
 	%tmp1 = load <8 x i8>* %B
-	%tmp2 = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1)
+	%tmp2 = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1, i32 1)
         %tmp3 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 1
         %tmp5 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 2
@@ -125,10 +129,11 @@
 
 define <4 x i16> @vld3lanei16(i16* %A, <4 x i16>* %B) nounwind {
 ;CHECK: vld3lanei16:
-;CHECK: vld3.16
+;Check the (default) alignment value.  VLD3 does not support alignment.
+;CHECK: vld3.16 {d16[1], d17[1], d18[1]}, [r0]
 	%tmp0 = bitcast i16* %A to i8*
 	%tmp1 = load <4 x i16>* %B
-	%tmp2 = call %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3lane.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1)
+	%tmp2 = call %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3lane.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1, i32 8)
         %tmp3 = extractvalue %struct.__neon_int16x4x3_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_int16x4x3_t %tmp2, 1
         %tmp5 = extractvalue %struct.__neon_int16x4x3_t %tmp2, 2
@@ -142,7 +147,7 @@
 ;CHECK: vld3.32
 	%tmp0 = bitcast i32* %A to i8*
 	%tmp1 = load <2 x i32>* %B
-	%tmp2 = call %struct.__neon_int32x2x3_t @llvm.arm.neon.vld3lane.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1)
+	%tmp2 = call %struct.__neon_int32x2x3_t @llvm.arm.neon.vld3lane.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1, i32 1)
         %tmp3 = extractvalue %struct.__neon_int32x2x3_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_int32x2x3_t %tmp2, 1
         %tmp5 = extractvalue %struct.__neon_int32x2x3_t %tmp2, 2
@@ -156,7 +161,7 @@
 ;CHECK: vld3.32
 	%tmp0 = bitcast float* %A to i8*
 	%tmp1 = load <2 x float>* %B
-	%tmp2 = call %struct.__neon_float32x2x3_t @llvm.arm.neon.vld3lane.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, i32 1)
+	%tmp2 = call %struct.__neon_float32x2x3_t @llvm.arm.neon.vld3lane.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, i32 1, i32 1)
         %tmp3 = extractvalue %struct.__neon_float32x2x3_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_float32x2x3_t %tmp2, 1
         %tmp5 = extractvalue %struct.__neon_float32x2x3_t %tmp2, 2
@@ -167,10 +172,11 @@
 
 define <8 x i16> @vld3laneQi16(i16* %A, <8 x i16>* %B) nounwind {
 ;CHECK: vld3laneQi16:
-;CHECK: vld3.16
+;Check the (default) alignment value.  VLD3 does not support alignment.
+;CHECK: vld3.16 {d16[1], d18[1], d20[1]}, [r0]
 	%tmp0 = bitcast i16* %A to i8*
 	%tmp1 = load <8 x i16>* %B
-	%tmp2 = call %struct.__neon_int16x8x3_t @llvm.arm.neon.vld3lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1)
+	%tmp2 = call %struct.__neon_int16x8x3_t @llvm.arm.neon.vld3lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1, i32 8)
         %tmp3 = extractvalue %struct.__neon_int16x8x3_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_int16x8x3_t %tmp2, 1
         %tmp5 = extractvalue %struct.__neon_int16x8x3_t %tmp2, 2
@@ -184,7 +190,7 @@
 ;CHECK: vld3.32
 	%tmp0 = bitcast i32* %A to i8*
 	%tmp1 = load <4 x i32>* %B
-	%tmp2 = call %struct.__neon_int32x4x3_t @llvm.arm.neon.vld3lane.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 3)
+	%tmp2 = call %struct.__neon_int32x4x3_t @llvm.arm.neon.vld3lane.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 3, i32 1)
         %tmp3 = extractvalue %struct.__neon_int32x4x3_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_int32x4x3_t %tmp2, 1
         %tmp5 = extractvalue %struct.__neon_int32x4x3_t %tmp2, 2
@@ -198,7 +204,7 @@
 ;CHECK: vld3.32
 	%tmp0 = bitcast float* %A to i8*
 	%tmp1 = load <4 x float>* %B
-	%tmp2 = call %struct.__neon_float32x4x3_t @llvm.arm.neon.vld3lane.v4f32(i8* %tmp0, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, i32 1)
+	%tmp2 = call %struct.__neon_float32x4x3_t @llvm.arm.neon.vld3lane.v4f32(i8* %tmp0, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, i32 1, i32 1)
         %tmp3 = extractvalue %struct.__neon_float32x4x3_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_float32x4x3_t %tmp2, 1
         %tmp5 = extractvalue %struct.__neon_float32x4x3_t %tmp2, 2
@@ -207,14 +213,14 @@
 	ret <4 x float> %tmp7
 }
 
-declare %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3lane.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32) nounwind readonly
-declare %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, i32) nounwind readonly
-declare %struct.__neon_int32x2x3_t @llvm.arm.neon.vld3lane.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, i32) nounwind readonly
-declare %struct.__neon_float32x2x3_t @llvm.arm.neon.vld3lane.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, i32) nounwind readonly
-
-declare %struct.__neon_int16x8x3_t @llvm.arm.neon.vld3lane.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, i32) nounwind readonly
-declare %struct.__neon_int32x4x3_t @llvm.arm.neon.vld3lane.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, i32) nounwind readonly
-declare %struct.__neon_float32x4x3_t @llvm.arm.neon.vld3lane.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, i32) nounwind readonly
+declare %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3lane.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32, i32) nounwind readonly
+declare %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, i32, i32) nounwind readonly
+declare %struct.__neon_int32x2x3_t @llvm.arm.neon.vld3lane.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, i32, i32) nounwind readonly
+declare %struct.__neon_float32x2x3_t @llvm.arm.neon.vld3lane.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, i32, i32) nounwind readonly
+
+declare %struct.__neon_int16x8x3_t @llvm.arm.neon.vld3lane.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, i32, i32) nounwind readonly
+declare %struct.__neon_int32x4x3_t @llvm.arm.neon.vld3lane.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, i32, i32) nounwind readonly
+declare %struct.__neon_float32x4x3_t @llvm.arm.neon.vld3lane.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, i32, i32) nounwind readonly
 
 %struct.__neon_int8x8x4_t = type { <8 x i8>,  <8 x i8>,  <8 x i8>,  <8 x i8> }
 %struct.__neon_int16x4x4_t = type { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> }
@@ -227,9 +233,10 @@
 
 define <8 x i8> @vld4lanei8(i8* %A, <8 x i8>* %B) nounwind {
 ;CHECK: vld4lanei8:
-;CHECK: vld4.8
+;Check the alignment value.  Max for this instruction is 32 bits:
+;CHECK: vld4.8 {d16[1], d17[1], d18[1], d19[1]}, [r0, :32]
 	%tmp1 = load <8 x i8>* %B
-	%tmp2 = call %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1)
+	%tmp2 = call %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1, i32 8)
         %tmp3 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 1
         %tmp5 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 2
@@ -245,7 +252,7 @@
 ;CHECK: vld4.16
 	%tmp0 = bitcast i16* %A to i8*
 	%tmp1 = load <4 x i16>* %B
-	%tmp2 = call %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4lane.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1)
+	%tmp2 = call %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4lane.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1, i32 1)
         %tmp3 = extractvalue %struct.__neon_int16x4x4_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_int16x4x4_t %tmp2, 1
         %tmp5 = extractvalue %struct.__neon_int16x4x4_t %tmp2, 2
@@ -258,10 +265,11 @@
 
 define <2 x i32> @vld4lanei32(i32* %A, <2 x i32>* %B) nounwind {
 ;CHECK: vld4lanei32:
-;CHECK: vld4.32
+;Check the alignment value.  Max for this instruction is 128 bits:
+;CHECK: vld4.32 {d16[1], d17[1], d18[1], d19[1]}, [r0, :128]
 	%tmp0 = bitcast i32* %A to i8*
 	%tmp1 = load <2 x i32>* %B
-	%tmp2 = call %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4lane.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1)
+	%tmp2 = call %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4lane.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1, i32 16)
         %tmp3 = extractvalue %struct.__neon_int32x2x4_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_int32x2x4_t %tmp2, 1
         %tmp5 = extractvalue %struct.__neon_int32x2x4_t %tmp2, 2
@@ -277,7 +285,7 @@
 ;CHECK: vld4.32
 	%tmp0 = bitcast float* %A to i8*
 	%tmp1 = load <2 x float>* %B
-	%tmp2 = call %struct.__neon_float32x2x4_t @llvm.arm.neon.vld4lane.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, i32 1)
+	%tmp2 = call %struct.__neon_float32x2x4_t @llvm.arm.neon.vld4lane.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, i32 1, i32 1)
         %tmp3 = extractvalue %struct.__neon_float32x2x4_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_float32x2x4_t %tmp2, 1
         %tmp5 = extractvalue %struct.__neon_float32x2x4_t %tmp2, 2
@@ -290,10 +298,11 @@
 
 define <8 x i16> @vld4laneQi16(i16* %A, <8 x i16>* %B) nounwind {
 ;CHECK: vld4laneQi16:
-;CHECK: vld4.16
+;Check the alignment value.  Max for this instruction is 64 bits:
+;CHECK: vld4.16 {d16[1], d18[1], d20[1], d22[1]}, [r0, :64]
 	%tmp0 = bitcast i16* %A to i8*
 	%tmp1 = load <8 x i16>* %B
-	%tmp2 = call %struct.__neon_int16x8x4_t @llvm.arm.neon.vld4lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1)
+	%tmp2 = call %struct.__neon_int16x8x4_t @llvm.arm.neon.vld4lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1, i32 16)
         %tmp3 = extractvalue %struct.__neon_int16x8x4_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_int16x8x4_t %tmp2, 1
         %tmp5 = extractvalue %struct.__neon_int16x8x4_t %tmp2, 2
@@ -306,10 +315,11 @@
 
 define <4 x i32> @vld4laneQi32(i32* %A, <4 x i32>* %B) nounwind {
 ;CHECK: vld4laneQi32:
-;CHECK: vld4.32
+;Check the (default) alignment.
+;CHECK: vld4.32 {d17[0], d19[0], d21[0], d23[0]}, [r0]
 	%tmp0 = bitcast i32* %A to i8*
 	%tmp1 = load <4 x i32>* %B
-	%tmp2 = call %struct.__neon_int32x4x4_t @llvm.arm.neon.vld4lane.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 1)
+	%tmp2 = call %struct.__neon_int32x4x4_t @llvm.arm.neon.vld4lane.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 2, i32 1)
         %tmp3 = extractvalue %struct.__neon_int32x4x4_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_int32x4x4_t %tmp2, 1
         %tmp5 = extractvalue %struct.__neon_int32x4x4_t %tmp2, 2
@@ -325,7 +335,7 @@
 ;CHECK: vld4.32
 	%tmp0 = bitcast float* %A to i8*
 	%tmp1 = load <4 x float>* %B
-	%tmp2 = call %struct.__neon_float32x4x4_t @llvm.arm.neon.vld4lane.v4f32(i8* %tmp0, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, i32 1)
+	%tmp2 = call %struct.__neon_float32x4x4_t @llvm.arm.neon.vld4lane.v4f32(i8* %tmp0, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, i32 1, i32 1)
         %tmp3 = extractvalue %struct.__neon_float32x4x4_t %tmp2, 0
         %tmp4 = extractvalue %struct.__neon_float32x4x4_t %tmp2, 1
         %tmp5 = extractvalue %struct.__neon_float32x4x4_t %tmp2, 2
@@ -336,11 +346,11 @@
 	ret <4 x float> %tmp9
 }
 
-declare %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4lane.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i32) nounwind readonly
-declare %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i32) nounwind readonly
-declare %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4lane.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32) nounwind readonly
-declare %struct.__neon_float32x2x4_t @llvm.arm.neon.vld4lane.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, <2 x float>, i32) nounwind readonly
-
-declare %struct.__neon_int16x8x4_t @llvm.arm.neon.vld4lane.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i32) nounwind readonly
-declare %struct.__neon_int32x4x4_t @llvm.arm.neon.vld4lane.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i32) nounwind readonly
-declare %struct.__neon_float32x4x4_t @llvm.arm.neon.vld4lane.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>, i32) nounwind readonly
+declare %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4lane.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i32, i32) nounwind readonly
+declare %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i32, i32) nounwind readonly
+declare %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4lane.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32, i32) nounwind readonly
+declare %struct.__neon_float32x2x4_t @llvm.arm.neon.vld4lane.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, <2 x float>, i32, i32) nounwind readonly
+
+declare %struct.__neon_int16x8x4_t @llvm.arm.neon.vld4lane.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i32, i32) nounwind readonly
+declare %struct.__neon_int32x4x4_t @llvm.arm.neon.vld4lane.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i32, i32) nounwind readonly
+declare %struct.__neon_float32x4x4_t @llvm.arm.neon.vld4lane.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>, i32, i32) nounwind readonly

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/vmla.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/vmla.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/vmla.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/vmla.ll Tue Oct 26 19:48:03 2010
@@ -94,8 +94,11 @@
 	%tmp1 = load <8 x i16>* %A
 	%tmp2 = load <8 x i8>* %B
 	%tmp3 = load <8 x i8>* %C
-	%tmp4 = call <8 x i16> @llvm.arm.neon.vmlals.v8i16(<8 x i16> %tmp1, <8 x i8> %tmp2, <8 x i8> %tmp3)
-	ret <8 x i16> %tmp4
+	%tmp4 = sext <8 x i8> %tmp2 to <8 x i16>
+	%tmp5 = sext <8 x i8> %tmp3 to <8 x i16>
+	%tmp6 = mul <8 x i16> %tmp4, %tmp5
+	%tmp7 = add <8 x i16> %tmp1, %tmp6
+	ret <8 x i16> %tmp7
 }
 
 define <4 x i32> @vmlals16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
@@ -104,8 +107,11 @@
 	%tmp1 = load <4 x i32>* %A
 	%tmp2 = load <4 x i16>* %B
 	%tmp3 = load <4 x i16>* %C
-	%tmp4 = call <4 x i32> @llvm.arm.neon.vmlals.v4i32(<4 x i32> %tmp1, <4 x i16> %tmp2, <4 x i16> %tmp3)
-	ret <4 x i32> %tmp4
+	%tmp4 = sext <4 x i16> %tmp2 to <4 x i32>
+	%tmp5 = sext <4 x i16> %tmp3 to <4 x i32>
+	%tmp6 = mul <4 x i32> %tmp4, %tmp5
+	%tmp7 = add <4 x i32> %tmp1, %tmp6
+	ret <4 x i32> %tmp7
 }
 
 define <2 x i64> @vmlals32(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
@@ -114,8 +120,11 @@
 	%tmp1 = load <2 x i64>* %A
 	%tmp2 = load <2 x i32>* %B
 	%tmp3 = load <2 x i32>* %C
-	%tmp4 = call <2 x i64> @llvm.arm.neon.vmlals.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2, <2 x i32> %tmp3)
-	ret <2 x i64> %tmp4
+	%tmp4 = sext <2 x i32> %tmp2 to <2 x i64>
+	%tmp5 = sext <2 x i32> %tmp3 to <2 x i64>
+	%tmp6 = mul <2 x i64> %tmp4, %tmp5
+	%tmp7 = add <2 x i64> %tmp1, %tmp6
+	ret <2 x i64> %tmp7
 }
 
 define <8 x i16> @vmlalu8(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
@@ -124,8 +133,11 @@
 	%tmp1 = load <8 x i16>* %A
 	%tmp2 = load <8 x i8>* %B
 	%tmp3 = load <8 x i8>* %C
-	%tmp4 = call <8 x i16> @llvm.arm.neon.vmlalu.v8i16(<8 x i16> %tmp1, <8 x i8> %tmp2, <8 x i8> %tmp3)
-	ret <8 x i16> %tmp4
+	%tmp4 = zext <8 x i8> %tmp2 to <8 x i16>
+	%tmp5 = zext <8 x i8> %tmp3 to <8 x i16>
+	%tmp6 = mul <8 x i16> %tmp4, %tmp5
+	%tmp7 = add <8 x i16> %tmp1, %tmp6
+	ret <8 x i16> %tmp7
 }
 
 define <4 x i32> @vmlalu16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
@@ -134,8 +146,11 @@
 	%tmp1 = load <4 x i32>* %A
 	%tmp2 = load <4 x i16>* %B
 	%tmp3 = load <4 x i16>* %C
-	%tmp4 = call <4 x i32> @llvm.arm.neon.vmlalu.v4i32(<4 x i32> %tmp1, <4 x i16> %tmp2, <4 x i16> %tmp3)
-	ret <4 x i32> %tmp4
+	%tmp4 = zext <4 x i16> %tmp2 to <4 x i32>
+	%tmp5 = zext <4 x i16> %tmp3 to <4 x i32>
+	%tmp6 = mul <4 x i32> %tmp4, %tmp5
+	%tmp7 = add <4 x i32> %tmp1, %tmp6
+	ret <4 x i32> %tmp7
 }
 
 define <2 x i64> @vmlalu32(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
@@ -144,8 +159,11 @@
 	%tmp1 = load <2 x i64>* %A
 	%tmp2 = load <2 x i32>* %B
 	%tmp3 = load <2 x i32>* %C
-	%tmp4 = call <2 x i64> @llvm.arm.neon.vmlalu.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2, <2 x i32> %tmp3)
-	ret <2 x i64> %tmp4
+	%tmp4 = zext <2 x i32> %tmp2 to <2 x i64>
+	%tmp5 = zext <2 x i32> %tmp3 to <2 x i64>
+	%tmp6 = mul <2 x i64> %tmp4, %tmp5
+	%tmp7 = add <2 x i64> %tmp1, %tmp6
+	ret <2 x i64> %tmp7
 }
 
 define arm_aapcs_vfpcc <4 x i32> @test_vmlal_lanes16(<4 x i32> %arg0_int32x4_t, <4 x i16> %arg1_int16x4_t, <4 x i16> %arg2_int16x4_t) nounwind readnone {
@@ -153,8 +171,11 @@
 ; CHECK: test_vmlal_lanes16
 ; CHECK: vmlal.s16 q0, d2, d3[1]
   %0 = shufflevector <4 x i16> %arg2_int16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i16>> [#uses=1]
-  %1 = tail call <4 x i32> @llvm.arm.neon.vmlals.v4i32(<4 x i32> %arg0_int32x4_t, <4 x i16> %arg1_int16x4_t, <4 x i16> %0) ; <<4 x i32>> [#uses=1]
-  ret <4 x i32> %1
+  %1 = sext <4 x i16> %arg1_int16x4_t to <4 x i32>
+  %2 = sext <4 x i16> %0 to <4 x i32>
+  %3 = mul <4 x i32> %1, %2
+  %4 = add <4 x i32> %arg0_int32x4_t, %3
+  ret <4 x i32> %4
 }
 
 define arm_aapcs_vfpcc <2 x i64> @test_vmlal_lanes32(<2 x i64> %arg0_int64x2_t, <2 x i32> %arg1_int32x2_t, <2 x i32> %arg2_int32x2_t) nounwind readnone {
@@ -162,8 +183,11 @@
 ; CHECK: test_vmlal_lanes32
 ; CHECK: vmlal.s32 q0, d2, d3[1]
   %0 = shufflevector <2 x i32> %arg2_int32x2_t, <2 x i32> undef, <2 x i32> <i32 1, i32 1> ; <<2 x i32>> [#uses=1]
-  %1 = tail call <2 x i64> @llvm.arm.neon.vmlals.v2i64(<2 x i64> %arg0_int64x2_t, <2 x i32> %arg1_int32x2_t, <2 x i32> %0) ; <<2 x i64>> [#uses=1]
-  ret <2 x i64> %1
+  %1 = sext <2 x i32> %arg1_int32x2_t to <2 x i64>
+  %2 = sext <2 x i32> %0 to <2 x i64>
+  %3 = mul <2 x i64> %1, %2
+  %4 = add <2 x i64> %arg0_int64x2_t, %3
+  ret <2 x i64> %4
 }
 
 define arm_aapcs_vfpcc <4 x i32> @test_vmlal_laneu16(<4 x i32> %arg0_uint32x4_t, <4 x i16> %arg1_uint16x4_t, <4 x i16> %arg2_uint16x4_t) nounwind readnone {
@@ -171,8 +195,11 @@
 ; CHECK: test_vmlal_laneu16
 ; CHECK: vmlal.u16 q0, d2, d3[1]
   %0 = shufflevector <4 x i16> %arg2_uint16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i16>> [#uses=1]
-  %1 = tail call <4 x i32> @llvm.arm.neon.vmlalu.v4i32(<4 x i32> %arg0_uint32x4_t, <4 x i16> %arg1_uint16x4_t, <4 x i16> %0) ; <<4 x i32>> [#uses=1]
-  ret <4 x i32> %1
+  %1 = zext <4 x i16> %arg1_uint16x4_t to <4 x i32>
+  %2 = zext <4 x i16> %0 to <4 x i32>
+  %3 = mul <4 x i32> %1, %2
+  %4 = add <4 x i32> %arg0_uint32x4_t, %3
+  ret <4 x i32> %4
 }
 
 define arm_aapcs_vfpcc <2 x i64> @test_vmlal_laneu32(<2 x i64> %arg0_uint64x2_t, <2 x i32> %arg1_uint32x2_t, <2 x i32> %arg2_uint32x2_t) nounwind readnone {
@@ -180,14 +207,9 @@
 ; CHECK: test_vmlal_laneu32
 ; CHECK: vmlal.u32 q0, d2, d3[1]
   %0 = shufflevector <2 x i32> %arg2_uint32x2_t, <2 x i32> undef, <2 x i32> <i32 1, i32 1> ; <<2 x i32>> [#uses=1]
-  %1 = tail call <2 x i64> @llvm.arm.neon.vmlalu.v2i64(<2 x i64> %arg0_uint64x2_t, <2 x i32> %arg1_uint32x2_t, <2 x i32> %0) ; <<2 x i64>> [#uses=1]
-  ret <2 x i64> %1
+  %1 = zext <2 x i32> %arg1_uint32x2_t to <2 x i64>
+  %2 = zext <2 x i32> %0 to <2 x i64>
+  %3 = mul <2 x i64> %1, %2
+  %4 = add <2 x i64> %arg0_uint64x2_t, %3
+  ret <2 x i64> %4
 }
-
-declare <8 x i16> @llvm.arm.neon.vmlals.v8i16(<8 x i16>, <8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vmlals.v4i32(<4 x i32>, <4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vmlals.v2i64(<2 x i64>, <2 x i32>, <2 x i32>) nounwind readnone
-
-declare <8 x i16> @llvm.arm.neon.vmlalu.v8i16(<8 x i16>, <8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vmlalu.v4i32(<4 x i32>, <4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vmlalu.v2i64(<2 x i64>, <2 x i32>, <2 x i32>) nounwind readnone

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/vmls.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/vmls.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/vmls.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/vmls.ll Tue Oct 26 19:48:03 2010
@@ -94,8 +94,11 @@
 	%tmp1 = load <8 x i16>* %A
 	%tmp2 = load <8 x i8>* %B
 	%tmp3 = load <8 x i8>* %C
-	%tmp4 = call <8 x i16> @llvm.arm.neon.vmlsls.v8i16(<8 x i16> %tmp1, <8 x i8> %tmp2, <8 x i8> %tmp3)
-	ret <8 x i16> %tmp4
+	%tmp4 = sext <8 x i8> %tmp2 to <8 x i16>
+	%tmp5 = sext <8 x i8> %tmp3 to <8 x i16>
+	%tmp6 = mul <8 x i16> %tmp4, %tmp5
+	%tmp7 = sub <8 x i16> %tmp1, %tmp6
+	ret <8 x i16> %tmp7
 }
 
 define <4 x i32> @vmlsls16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
@@ -104,8 +107,11 @@
 	%tmp1 = load <4 x i32>* %A
 	%tmp2 = load <4 x i16>* %B
 	%tmp3 = load <4 x i16>* %C
-	%tmp4 = call <4 x i32> @llvm.arm.neon.vmlsls.v4i32(<4 x i32> %tmp1, <4 x i16> %tmp2, <4 x i16> %tmp3)
-	ret <4 x i32> %tmp4
+	%tmp4 = sext <4 x i16> %tmp2 to <4 x i32>
+	%tmp5 = sext <4 x i16> %tmp3 to <4 x i32>
+	%tmp6 = mul <4 x i32> %tmp4, %tmp5
+	%tmp7 = sub <4 x i32> %tmp1, %tmp6
+	ret <4 x i32> %tmp7
 }
 
 define <2 x i64> @vmlsls32(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
@@ -114,8 +120,11 @@
 	%tmp1 = load <2 x i64>* %A
 	%tmp2 = load <2 x i32>* %B
 	%tmp3 = load <2 x i32>* %C
-	%tmp4 = call <2 x i64> @llvm.arm.neon.vmlsls.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2, <2 x i32> %tmp3)
-	ret <2 x i64> %tmp4
+	%tmp4 = sext <2 x i32> %tmp2 to <2 x i64>
+	%tmp5 = sext <2 x i32> %tmp3 to <2 x i64>
+	%tmp6 = mul <2 x i64> %tmp4, %tmp5
+	%tmp7 = sub <2 x i64> %tmp1, %tmp6
+	ret <2 x i64> %tmp7
 }
 
 define <8 x i16> @vmlslu8(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
@@ -124,8 +133,11 @@
 	%tmp1 = load <8 x i16>* %A
 	%tmp2 = load <8 x i8>* %B
 	%tmp3 = load <8 x i8>* %C
-	%tmp4 = call <8 x i16> @llvm.arm.neon.vmlslu.v8i16(<8 x i16> %tmp1, <8 x i8> %tmp2, <8 x i8> %tmp3)
-	ret <8 x i16> %tmp4
+	%tmp4 = zext <8 x i8> %tmp2 to <8 x i16>
+	%tmp5 = zext <8 x i8> %tmp3 to <8 x i16>
+	%tmp6 = mul <8 x i16> %tmp4, %tmp5
+	%tmp7 = sub <8 x i16> %tmp1, %tmp6
+	ret <8 x i16> %tmp7
 }
 
 define <4 x i32> @vmlslu16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
@@ -134,8 +146,11 @@
 	%tmp1 = load <4 x i32>* %A
 	%tmp2 = load <4 x i16>* %B
 	%tmp3 = load <4 x i16>* %C
-	%tmp4 = call <4 x i32> @llvm.arm.neon.vmlslu.v4i32(<4 x i32> %tmp1, <4 x i16> %tmp2, <4 x i16> %tmp3)
-	ret <4 x i32> %tmp4
+	%tmp4 = zext <4 x i16> %tmp2 to <4 x i32>
+	%tmp5 = zext <4 x i16> %tmp3 to <4 x i32>
+	%tmp6 = mul <4 x i32> %tmp4, %tmp5
+	%tmp7 = sub <4 x i32> %tmp1, %tmp6
+	ret <4 x i32> %tmp7
 }
 
 define <2 x i64> @vmlslu32(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
@@ -144,8 +159,11 @@
 	%tmp1 = load <2 x i64>* %A
 	%tmp2 = load <2 x i32>* %B
 	%tmp3 = load <2 x i32>* %C
-	%tmp4 = call <2 x i64> @llvm.arm.neon.vmlslu.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2, <2 x i32> %tmp3)
-	ret <2 x i64> %tmp4
+	%tmp4 = zext <2 x i32> %tmp2 to <2 x i64>
+	%tmp5 = zext <2 x i32> %tmp3 to <2 x i64>
+	%tmp6 = mul <2 x i64> %tmp4, %tmp5
+	%tmp7 = sub <2 x i64> %tmp1, %tmp6
+	ret <2 x i64> %tmp7
 }
 
 define arm_aapcs_vfpcc <4 x i32> @test_vmlsl_lanes16(<4 x i32> %arg0_int32x4_t, <4 x i16> %arg1_int16x4_t, <4 x i16> %arg2_int16x4_t) nounwind readnone {
@@ -153,8 +171,11 @@
 ; CHECK: test_vmlsl_lanes16
 ; CHECK: vmlsl.s16 q0, d2, d3[1]
   %0 = shufflevector <4 x i16> %arg2_int16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i16>> [#uses=1]
-  %1 = tail call <4 x i32> @llvm.arm.neon.vmlsls.v4i32(<4 x i32> %arg0_int32x4_t, <4 x i16> %arg1_int16x4_t, <4 x i16> %0) ; <<4 x i32>> [#uses=1]
-  ret <4 x i32> %1
+  %1 = sext <4 x i16> %arg1_int16x4_t to <4 x i32>
+  %2 = sext <4 x i16> %0 to <4 x i32>
+  %3 = mul <4 x i32> %1, %2
+  %4 = sub <4 x i32> %arg0_int32x4_t, %3
+  ret <4 x i32> %4
 }
 
 define arm_aapcs_vfpcc <2 x i64> @test_vmlsl_lanes32(<2 x i64> %arg0_int64x2_t, <2 x i32> %arg1_int32x2_t, <2 x i32> %arg2_int32x2_t) nounwind readnone {
@@ -162,8 +183,11 @@
 ; CHECK: test_vmlsl_lanes32
 ; CHECK: vmlsl.s32 q0, d2, d3[1]
   %0 = shufflevector <2 x i32> %arg2_int32x2_t, <2 x i32> undef, <2 x i32> <i32 1, i32 1> ; <<2 x i32>> [#uses=1]
-  %1 = tail call <2 x i64> @llvm.arm.neon.vmlsls.v2i64(<2 x i64> %arg0_int64x2_t, <2 x i32> %arg1_int32x2_t, <2 x i32> %0) ; <<2 x i64>> [#uses=1]
-  ret <2 x i64> %1
+  %1 = sext <2 x i32> %arg1_int32x2_t to <2 x i64>
+  %2 = sext <2 x i32> %0 to <2 x i64>
+  %3 = mul <2 x i64> %1, %2
+  %4 = sub <2 x i64> %arg0_int64x2_t, %3
+  ret <2 x i64> %4
 }
 
 define arm_aapcs_vfpcc <4 x i32> @test_vmlsl_laneu16(<4 x i32> %arg0_uint32x4_t, <4 x i16> %arg1_uint16x4_t, <4 x i16> %arg2_uint16x4_t) nounwind readnone {
@@ -171,8 +195,11 @@
 ; CHECK: test_vmlsl_laneu16
 ; CHECK: vmlsl.u16 q0, d2, d3[1]
   %0 = shufflevector <4 x i16> %arg2_uint16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i16>> [#uses=1]
-  %1 = tail call <4 x i32> @llvm.arm.neon.vmlslu.v4i32(<4 x i32> %arg0_uint32x4_t, <4 x i16> %arg1_uint16x4_t, <4 x i16> %0) ; <<4 x i32>> [#uses=1]
-  ret <4 x i32> %1
+  %1 = zext <4 x i16> %arg1_uint16x4_t to <4 x i32>
+  %2 = zext <4 x i16> %0 to <4 x i32>
+  %3 = mul <4 x i32> %1, %2
+  %4 = sub <4 x i32> %arg0_uint32x4_t, %3
+  ret <4 x i32> %4
 }
 
 define arm_aapcs_vfpcc <2 x i64> @test_vmlsl_laneu32(<2 x i64> %arg0_uint64x2_t, <2 x i32> %arg1_uint32x2_t, <2 x i32> %arg2_uint32x2_t) nounwind readnone {
@@ -180,14 +207,9 @@
 ; CHECK: test_vmlsl_laneu32
 ; CHECK: vmlsl.u32 q0, d2, d3[1]
   %0 = shufflevector <2 x i32> %arg2_uint32x2_t, <2 x i32> undef, <2 x i32> <i32 1, i32 1> ; <<2 x i32>> [#uses=1]
-  %1 = tail call <2 x i64> @llvm.arm.neon.vmlslu.v2i64(<2 x i64> %arg0_uint64x2_t, <2 x i32> %arg1_uint32x2_t, <2 x i32> %0) ; <<2 x i64>> [#uses=1]
-  ret <2 x i64> %1
+  %1 = zext <2 x i32> %arg1_uint32x2_t to <2 x i64>
+  %2 = zext <2 x i32> %0 to <2 x i64>
+  %3 = mul <2 x i64> %1, %2
+  %4 = sub <2 x i64> %arg0_uint64x2_t, %3
+  ret <2 x i64> %4
 }
-
-declare <8 x i16> @llvm.arm.neon.vmlsls.v8i16(<8 x i16>, <8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vmlsls.v4i32(<4 x i32>, <4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vmlsls.v2i64(<2 x i64>, <2 x i32>, <2 x i32>) nounwind readnone
-
-declare <8 x i16> @llvm.arm.neon.vmlslu.v8i16(<8 x i16>, <8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vmlslu.v4i32(<4 x i32>, <4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vmlslu.v2i64(<2 x i64>, <2 x i32>, <2 x i32>) nounwind readnone

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/vmov.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/vmov.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/vmov.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/vmov.ll Tue Oct 26 19:48:03 2010
@@ -2,169 +2,169 @@
 
 define <8 x i8> @v_movi8() nounwind {
 ;CHECK: v_movi8:
-;CHECK: vmov.i8 d0, #0x8
+;CHECK: vmov.i8 d{{.*}}, #0x8
 	ret <8 x i8> < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
 }
 
 define <4 x i16> @v_movi16a() nounwind {
 ;CHECK: v_movi16a:
-;CHECK: vmov.i16 d0, #0x10
+;CHECK: vmov.i16 d{{.*}}, #0x10
 	ret <4 x i16> < i16 16, i16 16, i16 16, i16 16 >
 }
 
 define <4 x i16> @v_movi16b() nounwind {
 ;CHECK: v_movi16b:
-;CHECK: vmov.i16 d0, #0x1000
+;CHECK: vmov.i16 d{{.*}}, #0x1000
 	ret <4 x i16> < i16 4096, i16 4096, i16 4096, i16 4096 >
 }
 
 define <4 x i16> @v_mvni16a() nounwind {
 ;CHECK: v_mvni16a:
-;CHECK: vmvn.i16 d0, #0x10
+;CHECK: vmvn.i16 d{{.*}}, #0x10
 	ret <4 x i16> < i16 65519, i16 65519, i16 65519, i16 65519 >
 }
 
 define <4 x i16> @v_mvni16b() nounwind {
 ;CHECK: v_mvni16b:
-;CHECK: vmvn.i16 d0, #0x1000
+;CHECK: vmvn.i16 d{{.*}}, #0x1000
 	ret <4 x i16> < i16 61439, i16 61439, i16 61439, i16 61439 >
 }
 
 define <2 x i32> @v_movi32a() nounwind {
 ;CHECK: v_movi32a:
-;CHECK: vmov.i32 d0, #0x20
+;CHECK: vmov.i32 d{{.*}}, #0x20
 	ret <2 x i32> < i32 32, i32 32 >
 }
 
 define <2 x i32> @v_movi32b() nounwind {
 ;CHECK: v_movi32b:
-;CHECK: vmov.i32 d0, #0x2000
+;CHECK: vmov.i32 d{{.*}}, #0x2000
 	ret <2 x i32> < i32 8192, i32 8192 >
 }
 
 define <2 x i32> @v_movi32c() nounwind {
 ;CHECK: v_movi32c:
-;CHECK: vmov.i32 d0, #0x200000
+;CHECK: vmov.i32 d{{.*}}, #0x200000
 	ret <2 x i32> < i32 2097152, i32 2097152 >
 }
 
 define <2 x i32> @v_movi32d() nounwind {
 ;CHECK: v_movi32d:
-;CHECK: vmov.i32 d0, #0x20000000
+;CHECK: vmov.i32 d{{.*}}, #0x20000000
 	ret <2 x i32> < i32 536870912, i32 536870912 >
 }
 
 define <2 x i32> @v_movi32e() nounwind {
 ;CHECK: v_movi32e:
-;CHECK: vmov.i32 d0, #0x20FF
+;CHECK: vmov.i32 d{{.*}}, #0x20FF
 	ret <2 x i32> < i32 8447, i32 8447 >
 }
 
 define <2 x i32> @v_movi32f() nounwind {
 ;CHECK: v_movi32f:
-;CHECK: vmov.i32 d0, #0x20FFFF
+;CHECK: vmov.i32 d{{.*}}, #0x20FFFF
 	ret <2 x i32> < i32 2162687, i32 2162687 >
 }
 
 define <2 x i32> @v_mvni32a() nounwind {
 ;CHECK: v_mvni32a:
-;CHECK: vmvn.i32 d0, #0x20
+;CHECK: vmvn.i32 d{{.*}}, #0x20
 	ret <2 x i32> < i32 4294967263, i32 4294967263 >
 }
 
 define <2 x i32> @v_mvni32b() nounwind {
 ;CHECK: v_mvni32b:
-;CHECK: vmvn.i32 d0, #0x2000
+;CHECK: vmvn.i32 d{{.*}}, #0x2000
 	ret <2 x i32> < i32 4294959103, i32 4294959103 >
 }
 
 define <2 x i32> @v_mvni32c() nounwind {
 ;CHECK: v_mvni32c:
-;CHECK: vmvn.i32 d0, #0x200000
+;CHECK: vmvn.i32 d{{.*}}, #0x200000
 	ret <2 x i32> < i32 4292870143, i32 4292870143 >
 }
 
 define <2 x i32> @v_mvni32d() nounwind {
 ;CHECK: v_mvni32d:
-;CHECK: vmvn.i32 d0, #0x20000000
+;CHECK: vmvn.i32 d{{.*}}, #0x20000000
 	ret <2 x i32> < i32 3758096383, i32 3758096383 >
 }
 
 define <2 x i32> @v_mvni32e() nounwind {
 ;CHECK: v_mvni32e:
-;CHECK: vmvn.i32 d0, #0x20FF
+;CHECK: vmvn.i32 d{{.*}}, #0x20FF
 	ret <2 x i32> < i32 4294958848, i32 4294958848 >
 }
 
 define <2 x i32> @v_mvni32f() nounwind {
 ;CHECK: v_mvni32f:
-;CHECK: vmvn.i32 d0, #0x20FFFF
+;CHECK: vmvn.i32 d{{.*}}, #0x20FFFF
 	ret <2 x i32> < i32 4292804608, i32 4292804608 >
 }
 
 define <1 x i64> @v_movi64() nounwind {
 ;CHECK: v_movi64:
-;CHECK: vmov.i64 d0, #0xFF0000FF0000FFFF
+;CHECK: vmov.i64 d{{.*}}, #0xFF0000FF0000FFFF
 	ret <1 x i64> < i64 18374687574888349695 >
 }
 
 define <16 x i8> @v_movQi8() nounwind {
 ;CHECK: v_movQi8:
-;CHECK: vmov.i8 q0, #0x8
+;CHECK: vmov.i8 q{{.*}}, #0x8
 	ret <16 x i8> < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
 }
 
 define <8 x i16> @v_movQi16a() nounwind {
 ;CHECK: v_movQi16a:
-;CHECK: vmov.i16 q0, #0x10
+;CHECK: vmov.i16 q{{.*}}, #0x10
 	ret <8 x i16> < i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16 >
 }
 
 define <8 x i16> @v_movQi16b() nounwind {
 ;CHECK: v_movQi16b:
-;CHECK: vmov.i16 q0, #0x1000
+;CHECK: vmov.i16 q{{.*}}, #0x1000
 	ret <8 x i16> < i16 4096, i16 4096, i16 4096, i16 4096, i16 4096, i16 4096, i16 4096, i16 4096 >
 }
 
 define <4 x i32> @v_movQi32a() nounwind {
 ;CHECK: v_movQi32a:
-;CHECK: vmov.i32 q0, #0x20
+;CHECK: vmov.i32 q{{.*}}, #0x20
 	ret <4 x i32> < i32 32, i32 32, i32 32, i32 32 >
 }
 
 define <4 x i32> @v_movQi32b() nounwind {
 ;CHECK: v_movQi32b:
-;CHECK: vmov.i32 q0, #0x2000
+;CHECK: vmov.i32 q{{.*}}, #0x2000
 	ret <4 x i32> < i32 8192, i32 8192, i32 8192, i32 8192 >
 }
 
 define <4 x i32> @v_movQi32c() nounwind {
 ;CHECK: v_movQi32c:
-;CHECK: vmov.i32 q0, #0x200000
+;CHECK: vmov.i32 q{{.*}}, #0x200000
 	ret <4 x i32> < i32 2097152, i32 2097152, i32 2097152, i32 2097152 >
 }
 
 define <4 x i32> @v_movQi32d() nounwind {
 ;CHECK: v_movQi32d:
-;CHECK: vmov.i32 q0, #0x20000000
+;CHECK: vmov.i32 q{{.*}}, #0x20000000
 	ret <4 x i32> < i32 536870912, i32 536870912, i32 536870912, i32 536870912 >
 }
 
 define <4 x i32> @v_movQi32e() nounwind {
 ;CHECK: v_movQi32e:
-;CHECK: vmov.i32 q0, #0x20FF
+;CHECK: vmov.i32 q{{.*}}, #0x20FF
 	ret <4 x i32> < i32 8447, i32 8447, i32 8447, i32 8447 >
 }
 
 define <4 x i32> @v_movQi32f() nounwind {
 ;CHECK: v_movQi32f:
-;CHECK: vmov.i32 q0, #0x20FFFF
+;CHECK: vmov.i32 q{{.*}}, #0x20FFFF
 	ret <4 x i32> < i32 2162687, i32 2162687, i32 2162687, i32 2162687 >
 }
 
 define <2 x i64> @v_movQi64() nounwind {
 ;CHECK: v_movQi64:
-;CHECK: vmov.i64 q0, #0xFF0000FF0000FFFF
+;CHECK: vmov.i64 q{{.*}}, #0xFF0000FF0000FFFF
 	ret <2 x i64> < i64 18374687574888349695, i64 18374687574888349695 >
 }
 
@@ -173,7 +173,7 @@
 define void @vdupn128(%struct.int8x8_t* noalias nocapture sret %agg.result) nounwind {
 entry:
 ;CHECK: vdupn128:
-;CHECK: vmov.i8 d0, #0x80
+;CHECK: vmov.i8 d{{.*}}, #0x80
   %0 = getelementptr inbounds %struct.int8x8_t* %agg.result, i32 0, i32 0 ; <<8 x i8>*> [#uses=1]
   store <8 x i8> <i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128>, <8 x i8>* %0, align 8
   ret void
@@ -182,7 +182,7 @@
 define void @vdupnneg75(%struct.int8x8_t* noalias nocapture sret %agg.result) nounwind {
 entry:
 ;CHECK: vdupnneg75:
-;CHECK: vmov.i8 d0, #0xB5
+;CHECK: vmov.i8 d{{.*}}, #0xB5
   %0 = getelementptr inbounds %struct.int8x8_t* %agg.result, i32 0, i32 0 ; <<8 x i8>*> [#uses=1]
   store <8 x i8> <i8 -75, i8 -75, i8 -75, i8 -75, i8 -75, i8 -75, i8 -75, i8 -75>, <8 x i8>* %0, align 8
   ret void
@@ -192,7 +192,7 @@
 ;CHECK: vmovls8:
 ;CHECK: vmovl.s8
 	%tmp1 = load <8 x i8>* %A
-	%tmp2 = call <8 x i16> @llvm.arm.neon.vmovls.v8i16(<8 x i8> %tmp1)
+	%tmp2 = sext <8 x i8> %tmp1 to <8 x i16>
 	ret <8 x i16> %tmp2
 }
 
@@ -200,7 +200,7 @@
 ;CHECK: vmovls16:
 ;CHECK: vmovl.s16
 	%tmp1 = load <4 x i16>* %A
-	%tmp2 = call <4 x i32> @llvm.arm.neon.vmovls.v4i32(<4 x i16> %tmp1)
+	%tmp2 = sext <4 x i16> %tmp1 to <4 x i32>
 	ret <4 x i32> %tmp2
 }
 
@@ -208,7 +208,7 @@
 ;CHECK: vmovls32:
 ;CHECK: vmovl.s32
 	%tmp1 = load <2 x i32>* %A
-	%tmp2 = call <2 x i64> @llvm.arm.neon.vmovls.v2i64(<2 x i32> %tmp1)
+	%tmp2 = sext <2 x i32> %tmp1 to <2 x i64>
 	ret <2 x i64> %tmp2
 }
 
@@ -216,7 +216,7 @@
 ;CHECK: vmovlu8:
 ;CHECK: vmovl.u8
 	%tmp1 = load <8 x i8>* %A
-	%tmp2 = call <8 x i16> @llvm.arm.neon.vmovlu.v8i16(<8 x i8> %tmp1)
+	%tmp2 = zext <8 x i8> %tmp1 to <8 x i16>
 	ret <8 x i16> %tmp2
 }
 
@@ -224,7 +224,7 @@
 ;CHECK: vmovlu16:
 ;CHECK: vmovl.u16
 	%tmp1 = load <4 x i16>* %A
-	%tmp2 = call <4 x i32> @llvm.arm.neon.vmovlu.v4i32(<4 x i16> %tmp1)
+	%tmp2 = zext <4 x i16> %tmp1 to <4 x i32>
 	ret <4 x i32> %tmp2
 }
 
@@ -232,23 +232,15 @@
 ;CHECK: vmovlu32:
 ;CHECK: vmovl.u32
 	%tmp1 = load <2 x i32>* %A
-	%tmp2 = call <2 x i64> @llvm.arm.neon.vmovlu.v2i64(<2 x i32> %tmp1)
+	%tmp2 = zext <2 x i32> %tmp1 to <2 x i64>
 	ret <2 x i64> %tmp2
 }
 
-declare <8 x i16> @llvm.arm.neon.vmovls.v8i16(<8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vmovls.v4i32(<4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vmovls.v2i64(<2 x i32>) nounwind readnone
-
-declare <8 x i16> @llvm.arm.neon.vmovlu.v8i16(<8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vmovlu.v4i32(<4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vmovlu.v2i64(<2 x i32>) nounwind readnone
-
 define <8 x i8> @vmovni16(<8 x i16>* %A) nounwind {
 ;CHECK: vmovni16:
 ;CHECK: vmovn.i16
 	%tmp1 = load <8 x i16>* %A
-	%tmp2 = call <8 x i8> @llvm.arm.neon.vmovn.v8i8(<8 x i16> %tmp1)
+	%tmp2 = trunc <8 x i16> %tmp1 to <8 x i8>
 	ret <8 x i8> %tmp2
 }
 
@@ -256,7 +248,7 @@
 ;CHECK: vmovni32:
 ;CHECK: vmovn.i32
 	%tmp1 = load <4 x i32>* %A
-	%tmp2 = call <4 x i16> @llvm.arm.neon.vmovn.v4i16(<4 x i32> %tmp1)
+	%tmp2 = trunc <4 x i32> %tmp1 to <4 x i16>
 	ret <4 x i16> %tmp2
 }
 
@@ -264,14 +256,10 @@
 ;CHECK: vmovni64:
 ;CHECK: vmovn.i64
 	%tmp1 = load <2 x i64>* %A
-	%tmp2 = call <2 x i32> @llvm.arm.neon.vmovn.v2i32(<2 x i64> %tmp1)
+	%tmp2 = trunc <2 x i64> %tmp1 to <2 x i32>
 	ret <2 x i32> %tmp2
 }
 
-declare <8 x i8>  @llvm.arm.neon.vmovn.v8i8(<8 x i16>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vmovn.v4i16(<4 x i32>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vmovn.v2i32(<2 x i64>) nounwind readnone
-
 define <8 x i8> @vqmovns16(<8 x i16>* %A) nounwind {
 ;CHECK: vqmovns16:
 ;CHECK: vqmovn.s16

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/vmul.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/vmul.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/vmul.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/vmul.ll Tue Oct 26 19:48:03 2010
@@ -152,8 +152,10 @@
 ;CHECK: vmull.s8
 	%tmp1 = load <8 x i8>* %A
 	%tmp2 = load <8 x i8>* %B
-	%tmp3 = call <8 x i16> @llvm.arm.neon.vmulls.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
-	ret <8 x i16> %tmp3
+	%tmp3 = sext <8 x i8> %tmp1 to <8 x i16>
+	%tmp4 = sext <8 x i8> %tmp2 to <8 x i16>
+	%tmp5 = mul <8 x i16> %tmp3, %tmp4
+	ret <8 x i16> %tmp5
 }
 
 define <4 x i32> @vmulls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
@@ -161,8 +163,10 @@
 ;CHECK: vmull.s16
 	%tmp1 = load <4 x i16>* %A
 	%tmp2 = load <4 x i16>* %B
-	%tmp3 = call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
-	ret <4 x i32> %tmp3
+	%tmp3 = sext <4 x i16> %tmp1 to <4 x i32>
+	%tmp4 = sext <4 x i16> %tmp2 to <4 x i32>
+	%tmp5 = mul <4 x i32> %tmp3, %tmp4
+	ret <4 x i32> %tmp5
 }
 
 define <2 x i64> @vmulls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
@@ -170,8 +174,10 @@
 ;CHECK: vmull.s32
 	%tmp1 = load <2 x i32>* %A
 	%tmp2 = load <2 x i32>* %B
-	%tmp3 = call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
-	ret <2 x i64> %tmp3
+	%tmp3 = sext <2 x i32> %tmp1 to <2 x i64>
+	%tmp4 = sext <2 x i32> %tmp2 to <2 x i64>
+	%tmp5 = mul <2 x i64> %tmp3, %tmp4
+	ret <2 x i64> %tmp5
 }
 
 define <8 x i16> @vmullu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
@@ -179,8 +185,10 @@
 ;CHECK: vmull.u8
 	%tmp1 = load <8 x i8>* %A
 	%tmp2 = load <8 x i8>* %B
-	%tmp3 = call <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
-	ret <8 x i16> %tmp3
+	%tmp3 = zext <8 x i8> %tmp1 to <8 x i16>
+	%tmp4 = zext <8 x i8> %tmp2 to <8 x i16>
+	%tmp5 = mul <8 x i16> %tmp3, %tmp4
+	ret <8 x i16> %tmp5
 }
 
 define <4 x i32> @vmullu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
@@ -188,8 +196,10 @@
 ;CHECK: vmull.u16
 	%tmp1 = load <4 x i16>* %A
 	%tmp2 = load <4 x i16>* %B
-	%tmp3 = call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
-	ret <4 x i32> %tmp3
+	%tmp3 = zext <4 x i16> %tmp1 to <4 x i32>
+	%tmp4 = zext <4 x i16> %tmp2 to <4 x i32>
+	%tmp5 = mul <4 x i32> %tmp3, %tmp4
+	ret <4 x i32> %tmp5
 }
 
 define <2 x i64> @vmullu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
@@ -197,8 +207,10 @@
 ;CHECK: vmull.u32
 	%tmp1 = load <2 x i32>* %A
 	%tmp2 = load <2 x i32>* %B
-	%tmp3 = call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
-	ret <2 x i64> %tmp3
+	%tmp3 = zext <2 x i32> %tmp1 to <2 x i64>
+	%tmp4 = zext <2 x i32> %tmp2 to <2 x i64>
+	%tmp5 = mul <2 x i64> %tmp3, %tmp4
+	ret <2 x i64> %tmp5
 }
 
 define <8 x i16> @vmullp8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
@@ -215,8 +227,10 @@
 ; CHECK: test_vmull_lanes16
 ; CHECK: vmull.s16 q0, d0, d1[1]
   %0 = shufflevector <4 x i16> %arg1_int16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i16>> [#uses=1]
-  %1 = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %arg0_int16x4_t, <4 x i16> %0) ; <<4 x i32>> [#uses=1]
-  ret <4 x i32> %1
+  %1 = sext <4 x i16> %arg0_int16x4_t to <4 x i32>
+  %2 = sext <4 x i16> %0 to <4 x i32>
+  %3 = mul <4 x i32> %1, %2
+  ret <4 x i32> %3
 }
 
 define arm_aapcs_vfpcc <2 x i64> @test_vmull_lanes32(<2 x i32> %arg0_int32x2_t, <2 x i32> %arg1_int32x2_t) nounwind readnone {
@@ -224,8 +238,10 @@
 ; CHECK: test_vmull_lanes32
 ; CHECK: vmull.s32 q0, d0, d1[1]
   %0 = shufflevector <2 x i32> %arg1_int32x2_t, <2 x i32> undef, <2 x i32> <i32 1, i32 1> ; <<2 x i32>> [#uses=1]
-  %1 = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %arg0_int32x2_t, <2 x i32> %0) ; <<2 x i64>> [#uses=1]
-  ret <2 x i64> %1
+  %1 = sext <2 x i32> %arg0_int32x2_t to <2 x i64>
+  %2 = sext <2 x i32> %0 to <2 x i64>
+  %3 = mul <2 x i64> %1, %2
+  ret <2 x i64> %3
 }
 
 define arm_aapcs_vfpcc <4 x i32> @test_vmull_laneu16(<4 x i16> %arg0_uint16x4_t, <4 x i16> %arg1_uint16x4_t) nounwind readnone {
@@ -233,8 +249,10 @@
 ; CHECK: test_vmull_laneu16
 ; CHECK: vmull.u16 q0, d0, d1[1]
   %0 = shufflevector <4 x i16> %arg1_uint16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i16>> [#uses=1]
-  %1 = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %arg0_uint16x4_t, <4 x i16> %0) ; <<4 x i32>> [#uses=1]
-  ret <4 x i32> %1
+  %1 = zext <4 x i16> %arg0_uint16x4_t to <4 x i32>
+  %2 = zext <4 x i16> %0 to <4 x i32>
+  %3 = mul <4 x i32> %1, %2
+  ret <4 x i32> %3
 }
 
 define arm_aapcs_vfpcc <2 x i64> @test_vmull_laneu32(<2 x i32> %arg0_uint32x2_t, <2 x i32> %arg1_uint32x2_t) nounwind readnone {
@@ -242,16 +260,10 @@
 ; CHECK: test_vmull_laneu32
 ; CHECK: vmull.u32 q0, d0, d1[1]
   %0 = shufflevector <2 x i32> %arg1_uint32x2_t, <2 x i32> undef, <2 x i32> <i32 1, i32 1> ; <<2 x i32>> [#uses=1]
-  %1 = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %arg0_uint32x2_t, <2 x i32> %0) ; <<2 x i64>> [#uses=1]
-  ret <2 x i64> %1
+  %1 = zext <2 x i32> %arg0_uint32x2_t to <2 x i64>
+  %2 = zext <2 x i32> %0 to <2 x i64>
+  %3 = mul <2 x i64> %1, %2
+  ret <2 x i64> %3
 }
 
-declare <8 x i16> @llvm.arm.neon.vmulls.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
-
-declare <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
-
 declare <8 x i16>  @llvm.arm.neon.vmullp.v8i16(<8 x i8>, <8 x i8>) nounwind readnone

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/vrev.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/vrev.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/vrev.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/vrev.ll Tue Oct 26 19:48:03 2010
@@ -111,3 +111,21 @@
 	%tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
 	ret <16 x i8> %tmp2
 }
+
+; Undef shuffle indices should not prevent matching to VREV:
+
+define <8 x i8> @test_vrev64D8_undef(<8 x i8>* %A) nounwind {
+;CHECK: test_vrev64D8_undef:
+;CHECK: vrev64.8
+	%tmp1 = load <8 x i8>* %A
+	%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 7, i32 undef, i32 undef, i32 4, i32 3, i32 2, i32 1, i32 0>
+	ret <8 x i8> %tmp2
+}
+
+define <8 x i16> @test_vrev32Q16_undef(<8 x i16>* %A) nounwind {
+;CHECK: test_vrev32Q16_undef:
+;CHECK: vrev32.16
+	%tmp1 = load <8 x i16>* %A
+	%tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 undef, i32 0, i32 undef, i32 2, i32 5, i32 4, i32 7, i32 undef>
+	ret <8 x i16> %tmp2
+}

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/vst1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/vst1.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/vst1.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/vst1.ll Tue Oct 26 19:48:03 2010
@@ -2,9 +2,10 @@
 
 define void @vst1i8(i8* %A, <8 x i8>* %B) nounwind {
 ;CHECK: vst1i8:
-;CHECK: vst1.8
+;Check the alignment value.  Max for this instruction is 64 bits:
+;CHECK: vst1.8 {d16}, [r0, :64]
 	%tmp1 = load <8 x i8>* %B
-	call void @llvm.arm.neon.vst1.v8i8(i8* %A, <8 x i8> %tmp1)
+	call void @llvm.arm.neon.vst1.v8i8(i8* %A, <8 x i8> %tmp1, i32 16)
 	ret void
 }
 
@@ -13,7 +14,7 @@
 ;CHECK: vst1.16
 	%tmp0 = bitcast i16* %A to i8*
 	%tmp1 = load <4 x i16>* %B
-	call void @llvm.arm.neon.vst1.v4i16(i8* %tmp0, <4 x i16> %tmp1)
+	call void @llvm.arm.neon.vst1.v4i16(i8* %tmp0, <4 x i16> %tmp1, i32 1)
 	ret void
 }
 
@@ -22,7 +23,7 @@
 ;CHECK: vst1.32
 	%tmp0 = bitcast i32* %A to i8*
 	%tmp1 = load <2 x i32>* %B
-	call void @llvm.arm.neon.vst1.v2i32(i8* %tmp0, <2 x i32> %tmp1)
+	call void @llvm.arm.neon.vst1.v2i32(i8* %tmp0, <2 x i32> %tmp1, i32 1)
 	ret void
 }
 
@@ -31,7 +32,7 @@
 ;CHECK: vst1.32
 	%tmp0 = bitcast float* %A to i8*
 	%tmp1 = load <2 x float>* %B
-	call void @llvm.arm.neon.vst1.v2f32(i8* %tmp0, <2 x float> %tmp1)
+	call void @llvm.arm.neon.vst1.v2f32(i8* %tmp0, <2 x float> %tmp1, i32 1)
 	ret void
 }
 
@@ -40,24 +41,26 @@
 ;CHECK: vst1.64
 	%tmp0 = bitcast i64* %A to i8*
 	%tmp1 = load <1 x i64>* %B
-	call void @llvm.arm.neon.vst1.v1i64(i8* %tmp0, <1 x i64> %tmp1)
+	call void @llvm.arm.neon.vst1.v1i64(i8* %tmp0, <1 x i64> %tmp1, i32 1)
 	ret void
 }
 
 define void @vst1Qi8(i8* %A, <16 x i8>* %B) nounwind {
 ;CHECK: vst1Qi8:
-;CHECK: vst1.8
+;Check the alignment value.  Max for this instruction is 128 bits:
+;CHECK: vst1.8 {d16, d17}, [r0, :64]
 	%tmp1 = load <16 x i8>* %B
-	call void @llvm.arm.neon.vst1.v16i8(i8* %A, <16 x i8> %tmp1)
+	call void @llvm.arm.neon.vst1.v16i8(i8* %A, <16 x i8> %tmp1, i32 8)
 	ret void
 }
 
 define void @vst1Qi16(i16* %A, <8 x i16>* %B) nounwind {
 ;CHECK: vst1Qi16:
-;CHECK: vst1.16
+;Check the alignment value.  Max for this instruction is 128 bits:
+;CHECK: vst1.16 {d16, d17}, [r0, :128]
 	%tmp0 = bitcast i16* %A to i8*
 	%tmp1 = load <8 x i16>* %B
-	call void @llvm.arm.neon.vst1.v8i16(i8* %tmp0, <8 x i16> %tmp1)
+	call void @llvm.arm.neon.vst1.v8i16(i8* %tmp0, <8 x i16> %tmp1, i32 32)
 	ret void
 }
 
@@ -66,7 +69,7 @@
 ;CHECK: vst1.32
 	%tmp0 = bitcast i32* %A to i8*
 	%tmp1 = load <4 x i32>* %B
-	call void @llvm.arm.neon.vst1.v4i32(i8* %tmp0, <4 x i32> %tmp1)
+	call void @llvm.arm.neon.vst1.v4i32(i8* %tmp0, <4 x i32> %tmp1, i32 1)
 	ret void
 }
 
@@ -75,7 +78,7 @@
 ;CHECK: vst1.32
 	%tmp0 = bitcast float* %A to i8*
 	%tmp1 = load <4 x float>* %B
-	call void @llvm.arm.neon.vst1.v4f32(i8* %tmp0, <4 x float> %tmp1)
+	call void @llvm.arm.neon.vst1.v4f32(i8* %tmp0, <4 x float> %tmp1, i32 1)
 	ret void
 }
 
@@ -84,18 +87,18 @@
 ;CHECK: vst1.64
 	%tmp0 = bitcast i64* %A to i8*
 	%tmp1 = load <2 x i64>* %B
-	call void @llvm.arm.neon.vst1.v2i64(i8* %tmp0, <2 x i64> %tmp1)
+	call void @llvm.arm.neon.vst1.v2i64(i8* %tmp0, <2 x i64> %tmp1, i32 1)
 	ret void
 }
 
-declare void @llvm.arm.neon.vst1.v8i8(i8*, <8 x i8>) nounwind
-declare void @llvm.arm.neon.vst1.v4i16(i8*, <4 x i16>) nounwind
-declare void @llvm.arm.neon.vst1.v2i32(i8*, <2 x i32>) nounwind
-declare void @llvm.arm.neon.vst1.v2f32(i8*, <2 x float>) nounwind
-declare void @llvm.arm.neon.vst1.v1i64(i8*, <1 x i64>) nounwind
+declare void @llvm.arm.neon.vst1.v8i8(i8*, <8 x i8>, i32) nounwind
+declare void @llvm.arm.neon.vst1.v4i16(i8*, <4 x i16>, i32) nounwind
+declare void @llvm.arm.neon.vst1.v2i32(i8*, <2 x i32>, i32) nounwind
+declare void @llvm.arm.neon.vst1.v2f32(i8*, <2 x float>, i32) nounwind
+declare void @llvm.arm.neon.vst1.v1i64(i8*, <1 x i64>, i32) nounwind
 
-declare void @llvm.arm.neon.vst1.v16i8(i8*, <16 x i8>) nounwind
-declare void @llvm.arm.neon.vst1.v8i16(i8*, <8 x i16>) nounwind
-declare void @llvm.arm.neon.vst1.v4i32(i8*, <4 x i32>) nounwind
-declare void @llvm.arm.neon.vst1.v4f32(i8*, <4 x float>) nounwind
-declare void @llvm.arm.neon.vst1.v2i64(i8*, <2 x i64>) nounwind
+declare void @llvm.arm.neon.vst1.v16i8(i8*, <16 x i8>, i32) nounwind
+declare void @llvm.arm.neon.vst1.v8i16(i8*, <8 x i16>, i32) nounwind
+declare void @llvm.arm.neon.vst1.v4i32(i8*, <4 x i32>, i32) nounwind
+declare void @llvm.arm.neon.vst1.v4f32(i8*, <4 x float>, i32) nounwind
+declare void @llvm.arm.neon.vst1.v2i64(i8*, <2 x i64>, i32) nounwind

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/vst2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/vst2.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/vst2.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/vst2.ll Tue Oct 26 19:48:03 2010
@@ -2,18 +2,20 @@
 
 define void @vst2i8(i8* %A, <8 x i8>* %B) nounwind {
 ;CHECK: vst2i8:
-;CHECK: vst2.8
+;Check the alignment value.  Max for this instruction is 128 bits:
+;CHECK: vst2.8 {d16, d17}, [r0, :64]
 	%tmp1 = load <8 x i8>* %B
-	call void @llvm.arm.neon.vst2.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1)
+	call void @llvm.arm.neon.vst2.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 8)
 	ret void
 }
 
 define void @vst2i16(i16* %A, <4 x i16>* %B) nounwind {
 ;CHECK: vst2i16:
-;CHECK: vst2.16
+;Check the alignment value.  Max for this instruction is 128 bits:
+;CHECK: vst2.16 {d16, d17}, [r0, :128]
 	%tmp0 = bitcast i16* %A to i8*
 	%tmp1 = load <4 x i16>* %B
-	call void @llvm.arm.neon.vst2.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1)
+	call void @llvm.arm.neon.vst2.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 32)
 	ret void
 }
 
@@ -22,7 +24,7 @@
 ;CHECK: vst2.32
 	%tmp0 = bitcast i32* %A to i8*
 	%tmp1 = load <2 x i32>* %B
-	call void @llvm.arm.neon.vst2.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1)
+	call void @llvm.arm.neon.vst2.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1)
 	ret void
 }
 
@@ -31,42 +33,46 @@
 ;CHECK: vst2.32
 	%tmp0 = bitcast float* %A to i8*
 	%tmp1 = load <2 x float>* %B
-	call void @llvm.arm.neon.vst2.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1)
+	call void @llvm.arm.neon.vst2.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, i32 1)
 	ret void
 }
 
 define void @vst2i64(i64* %A, <1 x i64>* %B) nounwind {
 ;CHECK: vst2i64:
-;CHECK: vst1.64
+;Check the alignment value.  Max for this instruction is 128 bits:
+;CHECK: vst1.64 {d16, d17}, [r0, :128]
 	%tmp0 = bitcast i64* %A to i8*
 	%tmp1 = load <1 x i64>* %B
-	call void @llvm.arm.neon.vst2.v1i64(i8* %tmp0, <1 x i64> %tmp1, <1 x i64> %tmp1)
+	call void @llvm.arm.neon.vst2.v1i64(i8* %tmp0, <1 x i64> %tmp1, <1 x i64> %tmp1, i32 32)
 	ret void
 }
 
 define void @vst2Qi8(i8* %A, <16 x i8>* %B) nounwind {
 ;CHECK: vst2Qi8:
-;CHECK: vst2.8
+;Check the alignment value.  Max for this instruction is 256 bits:
+;CHECK: vst2.8 {d16, d17, d18, d19}, [r0, :64]
 	%tmp1 = load <16 x i8>* %B
-	call void @llvm.arm.neon.vst2.v16i8(i8* %A, <16 x i8> %tmp1, <16 x i8> %tmp1)
+	call void @llvm.arm.neon.vst2.v16i8(i8* %A, <16 x i8> %tmp1, <16 x i8> %tmp1, i32 8)
 	ret void
 }
 
 define void @vst2Qi16(i16* %A, <8 x i16>* %B) nounwind {
 ;CHECK: vst2Qi16:
-;CHECK: vst2.16
+;Check the alignment value.  Max for this instruction is 256 bits:
+;CHECK: vst2.16 {d16, d17, d18, d19}, [r0, :128]
 	%tmp0 = bitcast i16* %A to i8*
 	%tmp1 = load <8 x i16>* %B
-	call void @llvm.arm.neon.vst2.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1)
+	call void @llvm.arm.neon.vst2.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 16)
 	ret void
 }
 
 define void @vst2Qi32(i32* %A, <4 x i32>* %B) nounwind {
 ;CHECK: vst2Qi32:
-;CHECK: vst2.32
+;Check the alignment value.  Max for this instruction is 256 bits:
+;CHECK: vst2.32 {d16, d17, d18, d19}, [r0, :256]
 	%tmp0 = bitcast i32* %A to i8*
 	%tmp1 = load <4 x i32>* %B
-	call void @llvm.arm.neon.vst2.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1)
+	call void @llvm.arm.neon.vst2.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 64)
 	ret void
 }
 
@@ -75,17 +81,17 @@
 ;CHECK: vst2.32
 	%tmp0 = bitcast float* %A to i8*
 	%tmp1 = load <4 x float>* %B
-	call void @llvm.arm.neon.vst2.v4f32(i8* %tmp0, <4 x float> %tmp1, <4 x float> %tmp1)
+	call void @llvm.arm.neon.vst2.v4f32(i8* %tmp0, <4 x float> %tmp1, <4 x float> %tmp1, i32 1)
 	ret void
 }
 
-declare void @llvm.arm.neon.vst2.v8i8(i8*, <8 x i8>, <8 x i8>) nounwind
-declare void @llvm.arm.neon.vst2.v4i16(i8*, <4 x i16>, <4 x i16>) nounwind
-declare void @llvm.arm.neon.vst2.v2i32(i8*, <2 x i32>, <2 x i32>) nounwind
-declare void @llvm.arm.neon.vst2.v2f32(i8*, <2 x float>, <2 x float>) nounwind
-declare void @llvm.arm.neon.vst2.v1i64(i8*, <1 x i64>, <1 x i64>) nounwind
+declare void @llvm.arm.neon.vst2.v8i8(i8*, <8 x i8>, <8 x i8>, i32) nounwind
+declare void @llvm.arm.neon.vst2.v4i16(i8*, <4 x i16>, <4 x i16>, i32) nounwind
+declare void @llvm.arm.neon.vst2.v2i32(i8*, <2 x i32>, <2 x i32>, i32) nounwind
+declare void @llvm.arm.neon.vst2.v2f32(i8*, <2 x float>, <2 x float>, i32) nounwind
+declare void @llvm.arm.neon.vst2.v1i64(i8*, <1 x i64>, <1 x i64>, i32) nounwind
 
-declare void @llvm.arm.neon.vst2.v16i8(i8*, <16 x i8>, <16 x i8>) nounwind
-declare void @llvm.arm.neon.vst2.v8i16(i8*, <8 x i16>, <8 x i16>) nounwind
-declare void @llvm.arm.neon.vst2.v4i32(i8*, <4 x i32>, <4 x i32>) nounwind
-declare void @llvm.arm.neon.vst2.v4f32(i8*, <4 x float>, <4 x float>) nounwind
+declare void @llvm.arm.neon.vst2.v16i8(i8*, <16 x i8>, <16 x i8>, i32) nounwind
+declare void @llvm.arm.neon.vst2.v8i16(i8*, <8 x i16>, <8 x i16>, i32) nounwind
+declare void @llvm.arm.neon.vst2.v4i32(i8*, <4 x i32>, <4 x i32>, i32) nounwind
+declare void @llvm.arm.neon.vst2.v4f32(i8*, <4 x float>, <4 x float>, i32) nounwind

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/vst3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/vst3.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/vst3.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/vst3.ll Tue Oct 26 19:48:03 2010
@@ -1,10 +1,12 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
+; RUN: llc < %s -march=arm -mattr=+neon -O0 | FileCheck %s
 
 define void @vst3i8(i8* %A, <8 x i8>* %B) nounwind {
 ;CHECK: vst3i8:
-;CHECK: vst3.8
+;Check the alignment value.  Max for this instruction is 64 bits:
+;This test runs at -O0 so do not check for specific register numbers.
+;CHECK: vst3.8 {d{{.*}}, d{{.*}}, d{{.*}}}, [r{{.*}}, :64]
 	%tmp1 = load <8 x i8>* %B
-	call void @llvm.arm.neon.vst3.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1)
+	call void @llvm.arm.neon.vst3.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 32)
 	ret void
 }
 
@@ -13,7 +15,7 @@
 ;CHECK: vst3.16
 	%tmp0 = bitcast i16* %A to i8*
 	%tmp1 = load <4 x i16>* %B
-	call void @llvm.arm.neon.vst3.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1)
+	call void @llvm.arm.neon.vst3.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1)
 	ret void
 }
 
@@ -22,7 +24,7 @@
 ;CHECK: vst3.32
 	%tmp0 = bitcast i32* %A to i8*
 	%tmp1 = load <2 x i32>* %B
-	call void @llvm.arm.neon.vst3.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1)
+	call void @llvm.arm.neon.vst3.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1)
 	ret void
 }
 
@@ -31,25 +33,29 @@
 ;CHECK: vst3.32
 	%tmp0 = bitcast float* %A to i8*
 	%tmp1 = load <2 x float>* %B
-	call void @llvm.arm.neon.vst3.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1)
+	call void @llvm.arm.neon.vst3.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, i32 1)
 	ret void
 }
 
 define void @vst3i64(i64* %A, <1 x i64>* %B) nounwind {
 ;CHECK: vst3i64:
-;CHECK: vst1.64
+;Check the alignment value.  Max for this instruction is 64 bits:
+;This test runs at -O0 so do not check for specific register numbers.
+;CHECK: vst1.64 {d{{.*}}, d{{.*}}, d{{.*}}}, [r{{.*}}, :64]
 	%tmp0 = bitcast i64* %A to i8*
 	%tmp1 = load <1 x i64>* %B
-	call void @llvm.arm.neon.vst3.v1i64(i8* %tmp0, <1 x i64> %tmp1, <1 x i64> %tmp1, <1 x i64> %tmp1)
+	call void @llvm.arm.neon.vst3.v1i64(i8* %tmp0, <1 x i64> %tmp1, <1 x i64> %tmp1, <1 x i64> %tmp1, i32 16)
 	ret void
 }
 
 define void @vst3Qi8(i8* %A, <16 x i8>* %B) nounwind {
 ;CHECK: vst3Qi8:
-;CHECK: vst3.8
-;CHECK: vst3.8
+;Check the alignment value.  Max for this instruction is 64 bits:
+;This test runs at -O0 so do not check for specific register numbers.
+;CHECK: vst3.8 {d{{.*}}, d{{.*}}, d{{.*}}}, [r{{.*}}, :64]!
+;CHECK: vst3.8 {d{{.*}}, d{{.*}}, d{{.*}}}, [r{{.*}}, :64]
 	%tmp1 = load <16 x i8>* %B
-	call void @llvm.arm.neon.vst3.v16i8(i8* %A, <16 x i8> %tmp1, <16 x i8> %tmp1, <16 x i8> %tmp1)
+	call void @llvm.arm.neon.vst3.v16i8(i8* %A, <16 x i8> %tmp1, <16 x i8> %tmp1, <16 x i8> %tmp1, i32 32)
 	ret void
 }
 
@@ -59,7 +65,7 @@
 ;CHECK: vst3.16
 	%tmp0 = bitcast i16* %A to i8*
 	%tmp1 = load <8 x i16>* %B
-	call void @llvm.arm.neon.vst3.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1)
+	call void @llvm.arm.neon.vst3.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1)
 	ret void
 }
 
@@ -69,7 +75,7 @@
 ;CHECK: vst3.32
 	%tmp0 = bitcast i32* %A to i8*
 	%tmp1 = load <4 x i32>* %B
-	call void @llvm.arm.neon.vst3.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1)
+	call void @llvm.arm.neon.vst3.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 1)
 	ret void
 }
 
@@ -79,17 +85,17 @@
 ;CHECK: vst3.32
 	%tmp0 = bitcast float* %A to i8*
 	%tmp1 = load <4 x float>* %B
-	call void @llvm.arm.neon.vst3.v4f32(i8* %tmp0, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1)
+	call void @llvm.arm.neon.vst3.v4f32(i8* %tmp0, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, i32 1)
 	ret void
 }
 
-declare void @llvm.arm.neon.vst3.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>) nounwind
-declare void @llvm.arm.neon.vst3.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>) nounwind
-declare void @llvm.arm.neon.vst3.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>) nounwind
-declare void @llvm.arm.neon.vst3.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>) nounwind
-declare void @llvm.arm.neon.vst3.v1i64(i8*, <1 x i64>, <1 x i64>, <1 x i64>) nounwind
+declare void @llvm.arm.neon.vst3.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32) nounwind
+declare void @llvm.arm.neon.vst3.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, i32) nounwind
+declare void @llvm.arm.neon.vst3.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, i32) nounwind
+declare void @llvm.arm.neon.vst3.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, i32) nounwind
+declare void @llvm.arm.neon.vst3.v1i64(i8*, <1 x i64>, <1 x i64>, <1 x i64>, i32) nounwind
 
-declare void @llvm.arm.neon.vst3.v16i8(i8*, <16 x i8>, <16 x i8>, <16 x i8>) nounwind
-declare void @llvm.arm.neon.vst3.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>) nounwind
-declare void @llvm.arm.neon.vst3.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>) nounwind
-declare void @llvm.arm.neon.vst3.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>) nounwind
+declare void @llvm.arm.neon.vst3.v16i8(i8*, <16 x i8>, <16 x i8>, <16 x i8>, i32) nounwind
+declare void @llvm.arm.neon.vst3.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, i32) nounwind
+declare void @llvm.arm.neon.vst3.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, i32) nounwind
+declare void @llvm.arm.neon.vst3.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, i32) nounwind

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/vst4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/vst4.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/vst4.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/vst4.ll Tue Oct 26 19:48:03 2010
@@ -2,27 +2,30 @@
 
 define void @vst4i8(i8* %A, <8 x i8>* %B) nounwind {
 ;CHECK: vst4i8:
-;CHECK: vst4.8
+;Check the alignment value.  Max for this instruction is 256 bits:
+;CHECK: vst4.8 {d16, d17, d18, d19}, [r0, :64]
 	%tmp1 = load <8 x i8>* %B
-	call void @llvm.arm.neon.vst4.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1)
+	call void @llvm.arm.neon.vst4.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 8)
 	ret void
 }
 
 define void @vst4i16(i16* %A, <4 x i16>* %B) nounwind {
 ;CHECK: vst4i16:
-;CHECK: vst4.16
+;Check the alignment value.  Max for this instruction is 256 bits:
+;CHECK: vst4.16 {d16, d17, d18, d19}, [r0, :128]
 	%tmp0 = bitcast i16* %A to i8*
 	%tmp1 = load <4 x i16>* %B
-	call void @llvm.arm.neon.vst4.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1)
+	call void @llvm.arm.neon.vst4.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 16)
 	ret void
 }
 
 define void @vst4i32(i32* %A, <2 x i32>* %B) nounwind {
 ;CHECK: vst4i32:
-;CHECK: vst4.32
+;Check the alignment value.  Max for this instruction is 256 bits:
+;CHECK: vst4.32 {d16, d17, d18, d19}, [r0, :256]
 	%tmp0 = bitcast i32* %A to i8*
 	%tmp1 = load <2 x i32>* %B
-	call void @llvm.arm.neon.vst4.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1)
+	call void @llvm.arm.neon.vst4.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 32)
 	ret void
 }
 
@@ -31,35 +34,38 @@
 ;CHECK: vst4.32
 	%tmp0 = bitcast float* %A to i8*
 	%tmp1 = load <2 x float>* %B
-	call void @llvm.arm.neon.vst4.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1)
+	call void @llvm.arm.neon.vst4.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, i32 1)
 	ret void
 }
 
 define void @vst4i64(i64* %A, <1 x i64>* %B) nounwind {
 ;CHECK: vst4i64:
-;CHECK: vst1.64
+;Check the alignment value.  Max for this instruction is 256 bits:
+;CHECK: vst1.64 {d16, d17, d18, d19}, [r0, :256]
 	%tmp0 = bitcast i64* %A to i8*
 	%tmp1 = load <1 x i64>* %B
-	call void @llvm.arm.neon.vst4.v1i64(i8* %tmp0, <1 x i64> %tmp1, <1 x i64> %tmp1, <1 x i64> %tmp1, <1 x i64> %tmp1)
+	call void @llvm.arm.neon.vst4.v1i64(i8* %tmp0, <1 x i64> %tmp1, <1 x i64> %tmp1, <1 x i64> %tmp1, <1 x i64> %tmp1, i32 64)
 	ret void
 }
 
 define void @vst4Qi8(i8* %A, <16 x i8>* %B) nounwind {
 ;CHECK: vst4Qi8:
-;CHECK: vst4.8
-;CHECK: vst4.8
+;Check the alignment value.  Max for this instruction is 256 bits:
+;CHECK: vst4.8 {d16, d18, d20, d22}, [r0, :256]!
+;CHECK: vst4.8 {d17, d19, d21, d23}, [r0, :256]
 	%tmp1 = load <16 x i8>* %B
-	call void @llvm.arm.neon.vst4.v16i8(i8* %A, <16 x i8> %tmp1, <16 x i8> %tmp1, <16 x i8> %tmp1, <16 x i8> %tmp1)
+	call void @llvm.arm.neon.vst4.v16i8(i8* %A, <16 x i8> %tmp1, <16 x i8> %tmp1, <16 x i8> %tmp1, <16 x i8> %tmp1, i32 64)
 	ret void
 }
 
 define void @vst4Qi16(i16* %A, <8 x i16>* %B) nounwind {
 ;CHECK: vst4Qi16:
-;CHECK: vst4.16
-;CHECK: vst4.16
+;Check for no alignment specifier.
+;CHECK: vst4.16 {d16, d18, d20, d22}, [r0]!
+;CHECK: vst4.16 {d17, d19, d21, d23}, [r0]
 	%tmp0 = bitcast i16* %A to i8*
 	%tmp1 = load <8 x i16>* %B
-	call void @llvm.arm.neon.vst4.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1)
+	call void @llvm.arm.neon.vst4.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1)
 	ret void
 }
 
@@ -69,7 +75,7 @@
 ;CHECK: vst4.32
 	%tmp0 = bitcast i32* %A to i8*
 	%tmp1 = load <4 x i32>* %B
-	call void @llvm.arm.neon.vst4.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1)
+	call void @llvm.arm.neon.vst4.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 1)
 	ret void
 }
 
@@ -79,17 +85,17 @@
 ;CHECK: vst4.32
 	%tmp0 = bitcast float* %A to i8*
 	%tmp1 = load <4 x float>* %B
-	call void @llvm.arm.neon.vst4.v4f32(i8* %tmp0, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1)
+	call void @llvm.arm.neon.vst4.v4f32(i8* %tmp0, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, i32 1)
 	ret void
 }
 
-declare void @llvm.arm.neon.vst4.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>) nounwind
-declare void @llvm.arm.neon.vst4.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>) nounwind
-declare void @llvm.arm.neon.vst4.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>) nounwind
-declare void @llvm.arm.neon.vst4.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, <2 x float>) nounwind
-declare void @llvm.arm.neon.vst4.v1i64(i8*, <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>) nounwind
+declare void @llvm.arm.neon.vst4.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i32) nounwind
+declare void @llvm.arm.neon.vst4.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i32) nounwind
+declare void @llvm.arm.neon.vst4.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32) nounwind
+declare void @llvm.arm.neon.vst4.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, <2 x float>, i32) nounwind
+declare void @llvm.arm.neon.vst4.v1i64(i8*, <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>, i32) nounwind
 
-declare void @llvm.arm.neon.vst4.v16i8(i8*, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>) nounwind
-declare void @llvm.arm.neon.vst4.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>) nounwind
-declare void @llvm.arm.neon.vst4.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>) nounwind
-declare void @llvm.arm.neon.vst4.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>) nounwind
+declare void @llvm.arm.neon.vst4.v16i8(i8*, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i32) nounwind
+declare void @llvm.arm.neon.vst4.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i32) nounwind
+declare void @llvm.arm.neon.vst4.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i32) nounwind
+declare void @llvm.arm.neon.vst4.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>, i32) nounwind

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/vstlane.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/vstlane.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/vstlane.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/vstlane.ll Tue Oct 26 19:48:03 2010
@@ -2,18 +2,20 @@
 
 define void @vst2lanei8(i8* %A, <8 x i8>* %B) nounwind {
 ;CHECK: vst2lanei8:
-;CHECK: vst2.8
+;Check the alignment value.  Max for this instruction is 16 bits:
+;CHECK: vst2.8 {d16[1], d17[1]}, [r0, :16]
 	%tmp1 = load <8 x i8>* %B
-	call void @llvm.arm.neon.vst2lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1)
+	call void @llvm.arm.neon.vst2lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1, i32 4)
 	ret void
 }
 
 define void @vst2lanei16(i16* %A, <4 x i16>* %B) nounwind {
 ;CHECK: vst2lanei16:
-;CHECK: vst2.16
+;Check the alignment value.  Max for this instruction is 32 bits:
+;CHECK: vst2.16 {d16[1], d17[1]}, [r0, :32]
 	%tmp0 = bitcast i16* %A to i8*
 	%tmp1 = load <4 x i16>* %B
-	call void @llvm.arm.neon.vst2lane.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1)
+	call void @llvm.arm.neon.vst2lane.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1, i32 8)
 	ret void
 }
 
@@ -22,7 +24,7 @@
 ;CHECK: vst2.32
 	%tmp0 = bitcast i32* %A to i8*
 	%tmp1 = load <2 x i32>* %B
-	call void @llvm.arm.neon.vst2lane.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1)
+	call void @llvm.arm.neon.vst2lane.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1, i32 1)
 	ret void
 }
 
@@ -31,25 +33,27 @@
 ;CHECK: vst2.32
 	%tmp0 = bitcast float* %A to i8*
 	%tmp1 = load <2 x float>* %B
-	call void @llvm.arm.neon.vst2lane.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, i32 1)
+	call void @llvm.arm.neon.vst2lane.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, i32 1, i32 1)
 	ret void
 }
 
 define void @vst2laneQi16(i16* %A, <8 x i16>* %B) nounwind {
 ;CHECK: vst2laneQi16:
-;CHECK: vst2.16
+;Check the (default) alignment.
+;CHECK: vst2.16 {d17[1], d19[1]}, [r0]
 	%tmp0 = bitcast i16* %A to i8*
 	%tmp1 = load <8 x i16>* %B
-	call void @llvm.arm.neon.vst2lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1)
+	call void @llvm.arm.neon.vst2lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 5, i32 1)
 	ret void
 }
 
 define void @vst2laneQi32(i32* %A, <4 x i32>* %B) nounwind {
 ;CHECK: vst2laneQi32:
-;CHECK: vst2.32
+;Check the alignment value.  Max for this instruction is 64 bits:
+;CHECK: vst2.32 {d17[0], d19[0]}, [r0, :64]
 	%tmp0 = bitcast i32* %A to i8*
 	%tmp1 = load <4 x i32>* %B
-	call void @llvm.arm.neon.vst2lane.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 2)
+	call void @llvm.arm.neon.vst2lane.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 2, i32 16)
 	ret void
 }
 
@@ -58,33 +62,34 @@
 ;CHECK: vst2.32
 	%tmp0 = bitcast float* %A to i8*
 	%tmp1 = load <4 x float>* %B
-	call void @llvm.arm.neon.vst2lane.v4f32(i8* %tmp0, <4 x float> %tmp1, <4 x float> %tmp1, i32 3)
+	call void @llvm.arm.neon.vst2lane.v4f32(i8* %tmp0, <4 x float> %tmp1, <4 x float> %tmp1, i32 3, i32 1)
 	ret void
 }
 
-declare void @llvm.arm.neon.vst2lane.v8i8(i8*, <8 x i8>, <8 x i8>, i32) nounwind
-declare void @llvm.arm.neon.vst2lane.v4i16(i8*, <4 x i16>, <4 x i16>, i32) nounwind
-declare void @llvm.arm.neon.vst2lane.v2i32(i8*, <2 x i32>, <2 x i32>, i32) nounwind
-declare void @llvm.arm.neon.vst2lane.v2f32(i8*, <2 x float>, <2 x float>, i32) nounwind
+declare void @llvm.arm.neon.vst2lane.v8i8(i8*, <8 x i8>, <8 x i8>, i32, i32) nounwind
+declare void @llvm.arm.neon.vst2lane.v4i16(i8*, <4 x i16>, <4 x i16>, i32, i32) nounwind
+declare void @llvm.arm.neon.vst2lane.v2i32(i8*, <2 x i32>, <2 x i32>, i32, i32) nounwind
+declare void @llvm.arm.neon.vst2lane.v2f32(i8*, <2 x float>, <2 x float>, i32, i32) nounwind
 
-declare void @llvm.arm.neon.vst2lane.v8i16(i8*, <8 x i16>, <8 x i16>, i32) nounwind
-declare void @llvm.arm.neon.vst2lane.v4i32(i8*, <4 x i32>, <4 x i32>, i32) nounwind
-declare void @llvm.arm.neon.vst2lane.v4f32(i8*, <4 x float>, <4 x float>, i32) nounwind
+declare void @llvm.arm.neon.vst2lane.v8i16(i8*, <8 x i16>, <8 x i16>, i32, i32) nounwind
+declare void @llvm.arm.neon.vst2lane.v4i32(i8*, <4 x i32>, <4 x i32>, i32, i32) nounwind
+declare void @llvm.arm.neon.vst2lane.v4f32(i8*, <4 x float>, <4 x float>, i32, i32) nounwind
 
 define void @vst3lanei8(i8* %A, <8 x i8>* %B) nounwind {
 ;CHECK: vst3lanei8:
 ;CHECK: vst3.8
 	%tmp1 = load <8 x i8>* %B
-	call void @llvm.arm.neon.vst3lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1)
+	call void @llvm.arm.neon.vst3lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1, i32 1)
 	ret void
 }
 
 define void @vst3lanei16(i16* %A, <4 x i16>* %B) nounwind {
 ;CHECK: vst3lanei16:
-;CHECK: vst3.16
+;Check the (default) alignment value.  VST3 does not support alignment.
+;CHECK: vst3.16 {d16[1], d17[1], d18[1]}, [r0]
 	%tmp0 = bitcast i16* %A to i8*
 	%tmp1 = load <4 x i16>* %B
-	call void @llvm.arm.neon.vst3lane.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1)
+	call void @llvm.arm.neon.vst3lane.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1, i32 8)
 	ret void
 }
 
@@ -93,7 +98,7 @@
 ;CHECK: vst3.32
 	%tmp0 = bitcast i32* %A to i8*
 	%tmp1 = load <2 x i32>* %B
-	call void @llvm.arm.neon.vst3lane.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1)
+	call void @llvm.arm.neon.vst3lane.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1, i32 1)
 	ret void
 }
 
@@ -102,16 +107,17 @@
 ;CHECK: vst3.32
 	%tmp0 = bitcast float* %A to i8*
 	%tmp1 = load <2 x float>* %B
-	call void @llvm.arm.neon.vst3lane.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, i32 1)
+	call void @llvm.arm.neon.vst3lane.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, i32 1, i32 1)
 	ret void
 }
 
 define void @vst3laneQi16(i16* %A, <8 x i16>* %B) nounwind {
 ;CHECK: vst3laneQi16:
-;CHECK: vst3.16
+;Check the (default) alignment value.  VST3 does not support alignment.
+;CHECK: vst3.16 {d17[2], d19[2], d21[2]}, [r0]
 	%tmp0 = bitcast i16* %A to i8*
 	%tmp1 = load <8 x i16>* %B
-	call void @llvm.arm.neon.vst3lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 6)
+	call void @llvm.arm.neon.vst3lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 6, i32 8)
 	ret void
 }
 
@@ -120,7 +126,7 @@
 ;CHECK: vst3.32
 	%tmp0 = bitcast i32* %A to i8*
 	%tmp1 = load <4 x i32>* %B
-	call void @llvm.arm.neon.vst3lane.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 0)
+	call void @llvm.arm.neon.vst3lane.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 0, i32 1)
 	ret void
 }
 
@@ -129,25 +135,26 @@
 ;CHECK: vst3.32
 	%tmp0 = bitcast float* %A to i8*
 	%tmp1 = load <4 x float>* %B
-	call void @llvm.arm.neon.vst3lane.v4f32(i8* %tmp0, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, i32 1)
+	call void @llvm.arm.neon.vst3lane.v4f32(i8* %tmp0, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, i32 1, i32 1)
 	ret void
 }
 
-declare void @llvm.arm.neon.vst3lane.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32) nounwind
-declare void @llvm.arm.neon.vst3lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, i32) nounwind
-declare void @llvm.arm.neon.vst3lane.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, i32) nounwind
-declare void @llvm.arm.neon.vst3lane.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, i32) nounwind
+declare void @llvm.arm.neon.vst3lane.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32, i32) nounwind
+declare void @llvm.arm.neon.vst3lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, i32, i32) nounwind
+declare void @llvm.arm.neon.vst3lane.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, i32, i32) nounwind
+declare void @llvm.arm.neon.vst3lane.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, i32, i32) nounwind
 
-declare void @llvm.arm.neon.vst3lane.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, i32) nounwind
-declare void @llvm.arm.neon.vst3lane.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, i32) nounwind
-declare void @llvm.arm.neon.vst3lane.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, i32) nounwind
+declare void @llvm.arm.neon.vst3lane.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, i32, i32) nounwind
+declare void @llvm.arm.neon.vst3lane.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, i32, i32) nounwind
+declare void @llvm.arm.neon.vst3lane.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, i32, i32) nounwind
 
 
 define void @vst4lanei8(i8* %A, <8 x i8>* %B) nounwind {
 ;CHECK: vst4lanei8:
-;CHECK: vst4.8
+;Check the alignment value.  Max for this instruction is 32 bits:
+;CHECK: vst4.8 {d16[1], d17[1], d18[1], d19[1]}, [r0, :32]
 	%tmp1 = load <8 x i8>* %B
-	call void @llvm.arm.neon.vst4lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1)
+	call void @llvm.arm.neon.vst4lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1, i32 8)
 	ret void
 }
 
@@ -156,16 +163,17 @@
 ;CHECK: vst4.16
 	%tmp0 = bitcast i16* %A to i8*
 	%tmp1 = load <4 x i16>* %B
-	call void @llvm.arm.neon.vst4lane.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1)
+	call void @llvm.arm.neon.vst4lane.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1, i32 1)
 	ret void
 }
 
 define void @vst4lanei32(i32* %A, <2 x i32>* %B) nounwind {
 ;CHECK: vst4lanei32:
-;CHECK: vst4.32
+;Check the alignment value.  Max for this instruction is 128 bits:
+;CHECK: vst4.32 {d16[1], d17[1], d18[1], d19[1]}, [r0, :128]
 	%tmp0 = bitcast i32* %A to i8*
 	%tmp1 = load <2 x i32>* %B
-	call void @llvm.arm.neon.vst4lane.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1)
+	call void @llvm.arm.neon.vst4lane.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1, i32 16)
 	ret void
 }
 
@@ -174,25 +182,27 @@
 ;CHECK: vst4.32
 	%tmp0 = bitcast float* %A to i8*
 	%tmp1 = load <2 x float>* %B
-	call void @llvm.arm.neon.vst4lane.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, i32 1)
+	call void @llvm.arm.neon.vst4lane.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, i32 1, i32 1)
 	ret void
 }
 
 define void @vst4laneQi16(i16* %A, <8 x i16>* %B) nounwind {
 ;CHECK: vst4laneQi16:
-;CHECK: vst4.16
+;Check the alignment value.  Max for this instruction is 64 bits:
+;CHECK: vst4.16 {d17[3], d19[3], d21[3], d23[3]}, [r0, :64]
 	%tmp0 = bitcast i16* %A to i8*
 	%tmp1 = load <8 x i16>* %B
-	call void @llvm.arm.neon.vst4lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 7)
+	call void @llvm.arm.neon.vst4lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 7, i32 16)
 	ret void
 }
 
 define void @vst4laneQi32(i32* %A, <4 x i32>* %B) nounwind {
 ;CHECK: vst4laneQi32:
-;CHECK: vst4.32
+;Check the (default) alignment.
+;CHECK: vst4.32 {d17[0], d19[0], d21[0], d23[0]}, [r0]
 	%tmp0 = bitcast i32* %A to i8*
 	%tmp1 = load <4 x i32>* %B
-	call void @llvm.arm.neon.vst4lane.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 2)
+	call void @llvm.arm.neon.vst4lane.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 2, i32 1)
 	ret void
 }
 
@@ -201,15 +211,15 @@
 ;CHECK: vst4.32
 	%tmp0 = bitcast float* %A to i8*
 	%tmp1 = load <4 x float>* %B
-	call void @llvm.arm.neon.vst4lane.v4f32(i8* %tmp0, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, i32 1)
+	call void @llvm.arm.neon.vst4lane.v4f32(i8* %tmp0, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, i32 1, i32 1)
 	ret void
 }
 
-declare void @llvm.arm.neon.vst4lane.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i32) nounwind
-declare void @llvm.arm.neon.vst4lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i32) nounwind
-declare void @llvm.arm.neon.vst4lane.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32) nounwind
-declare void @llvm.arm.neon.vst4lane.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, <2 x float>, i32) nounwind
+declare void @llvm.arm.neon.vst4lane.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i32, i32) nounwind
+declare void @llvm.arm.neon.vst4lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i32, i32) nounwind
+declare void @llvm.arm.neon.vst4lane.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32, i32) nounwind
+declare void @llvm.arm.neon.vst4lane.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, <2 x float>, i32, i32) nounwind
 
-declare void @llvm.arm.neon.vst4lane.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i32) nounwind
-declare void @llvm.arm.neon.vst4lane.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i32) nounwind
-declare void @llvm.arm.neon.vst4lane.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>, i32) nounwind
+declare void @llvm.arm.neon.vst4lane.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i32, i32) nounwind
+declare void @llvm.arm.neon.vst4lane.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i32, i32) nounwind
+declare void @llvm.arm.neon.vst4lane.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>, i32, i32) nounwind

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/vsub.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/vsub.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/vsub.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/vsub.ll Tue Oct 26 19:48:03 2010
@@ -157,8 +157,10 @@
 ;CHECK: vsubl.s8
 	%tmp1 = load <8 x i8>* %A
 	%tmp2 = load <8 x i8>* %B
-	%tmp3 = call <8 x i16> @llvm.arm.neon.vsubls.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
-	ret <8 x i16> %tmp3
+	%tmp3 = sext <8 x i8> %tmp1 to <8 x i16>
+	%tmp4 = sext <8 x i8> %tmp2 to <8 x i16>
+	%tmp5 = sub <8 x i16> %tmp3, %tmp4
+	ret <8 x i16> %tmp5
 }
 
 define <4 x i32> @vsubls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
@@ -166,8 +168,10 @@
 ;CHECK: vsubl.s16
 	%tmp1 = load <4 x i16>* %A
 	%tmp2 = load <4 x i16>* %B
-	%tmp3 = call <4 x i32> @llvm.arm.neon.vsubls.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
-	ret <4 x i32> %tmp3
+	%tmp3 = sext <4 x i16> %tmp1 to <4 x i32>
+	%tmp4 = sext <4 x i16> %tmp2 to <4 x i32>
+	%tmp5 = sub <4 x i32> %tmp3, %tmp4
+	ret <4 x i32> %tmp5
 }
 
 define <2 x i64> @vsubls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
@@ -175,8 +179,10 @@
 ;CHECK: vsubl.s32
 	%tmp1 = load <2 x i32>* %A
 	%tmp2 = load <2 x i32>* %B
-	%tmp3 = call <2 x i64> @llvm.arm.neon.vsubls.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
-	ret <2 x i64> %tmp3
+	%tmp3 = sext <2 x i32> %tmp1 to <2 x i64>
+	%tmp4 = sext <2 x i32> %tmp2 to <2 x i64>
+	%tmp5 = sub <2 x i64> %tmp3, %tmp4
+	ret <2 x i64> %tmp5
 }
 
 define <8 x i16> @vsublu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
@@ -184,8 +190,10 @@
 ;CHECK: vsubl.u8
 	%tmp1 = load <8 x i8>* %A
 	%tmp2 = load <8 x i8>* %B
-	%tmp3 = call <8 x i16> @llvm.arm.neon.vsublu.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
-	ret <8 x i16> %tmp3
+	%tmp3 = zext <8 x i8> %tmp1 to <8 x i16>
+	%tmp4 = zext <8 x i8> %tmp2 to <8 x i16>
+	%tmp5 = sub <8 x i16> %tmp3, %tmp4
+	ret <8 x i16> %tmp5
 }
 
 define <4 x i32> @vsublu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
@@ -193,8 +201,10 @@
 ;CHECK: vsubl.u16
 	%tmp1 = load <4 x i16>* %A
 	%tmp2 = load <4 x i16>* %B
-	%tmp3 = call <4 x i32> @llvm.arm.neon.vsublu.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
-	ret <4 x i32> %tmp3
+	%tmp3 = zext <4 x i16> %tmp1 to <4 x i32>
+	%tmp4 = zext <4 x i16> %tmp2 to <4 x i32>
+	%tmp5 = sub <4 x i32> %tmp3, %tmp4
+	ret <4 x i32> %tmp5
 }
 
 define <2 x i64> @vsublu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
@@ -202,25 +212,20 @@
 ;CHECK: vsubl.u32
 	%tmp1 = load <2 x i32>* %A
 	%tmp2 = load <2 x i32>* %B
-	%tmp3 = call <2 x i64> @llvm.arm.neon.vsublu.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
-	ret <2 x i64> %tmp3
+	%tmp3 = zext <2 x i32> %tmp1 to <2 x i64>
+	%tmp4 = zext <2 x i32> %tmp2 to <2 x i64>
+	%tmp5 = sub <2 x i64> %tmp3, %tmp4
+	ret <2 x i64> %tmp5
 }
 
-declare <8 x i16> @llvm.arm.neon.vsubls.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vsubls.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vsubls.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
-
-declare <8 x i16> @llvm.arm.neon.vsublu.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vsublu.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vsublu.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
-
 define <8 x i16> @vsubws8(<8 x i16>* %A, <8 x i8>* %B) nounwind {
 ;CHECK: vsubws8:
 ;CHECK: vsubw.s8
 	%tmp1 = load <8 x i16>* %A
 	%tmp2 = load <8 x i8>* %B
-	%tmp3 = call <8 x i16> @llvm.arm.neon.vsubws.v8i16(<8 x i16> %tmp1, <8 x i8> %tmp2)
-	ret <8 x i16> %tmp3
+	%tmp3 = sext <8 x i8> %tmp2 to <8 x i16>
+	%tmp4 = sub <8 x i16> %tmp1, %tmp3
+	ret <8 x i16> %tmp4
 }
 
 define <4 x i32> @vsubws16(<4 x i32>* %A, <4 x i16>* %B) nounwind {
@@ -228,8 +233,9 @@
 ;CHECK: vsubw.s16
 	%tmp1 = load <4 x i32>* %A
 	%tmp2 = load <4 x i16>* %B
-	%tmp3 = call <4 x i32> @llvm.arm.neon.vsubws.v4i32(<4 x i32> %tmp1, <4 x i16> %tmp2)
-	ret <4 x i32> %tmp3
+	%tmp3 = sext <4 x i16> %tmp2 to <4 x i32>
+	%tmp4 = sub <4 x i32> %tmp1, %tmp3
+	ret <4 x i32> %tmp4
 }
 
 define <2 x i64> @vsubws32(<2 x i64>* %A, <2 x i32>* %B) nounwind {
@@ -237,8 +243,9 @@
 ;CHECK: vsubw.s32
 	%tmp1 = load <2 x i64>* %A
 	%tmp2 = load <2 x i32>* %B
-	%tmp3 = call <2 x i64> @llvm.arm.neon.vsubws.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2)
-	ret <2 x i64> %tmp3
+	%tmp3 = sext <2 x i32> %tmp2 to <2 x i64>
+	%tmp4 = sub <2 x i64> %tmp1, %tmp3
+	ret <2 x i64> %tmp4
 }
 
 define <8 x i16> @vsubwu8(<8 x i16>* %A, <8 x i8>* %B) nounwind {
@@ -246,8 +253,9 @@
 ;CHECK: vsubw.u8
 	%tmp1 = load <8 x i16>* %A
 	%tmp2 = load <8 x i8>* %B
-	%tmp3 = call <8 x i16> @llvm.arm.neon.vsubwu.v8i16(<8 x i16> %tmp1, <8 x i8> %tmp2)
-	ret <8 x i16> %tmp3
+	%tmp3 = zext <8 x i8> %tmp2 to <8 x i16>
+	%tmp4 = sub <8 x i16> %tmp1, %tmp3
+	ret <8 x i16> %tmp4
 }
 
 define <4 x i32> @vsubwu16(<4 x i32>* %A, <4 x i16>* %B) nounwind {
@@ -255,8 +263,9 @@
 ;CHECK: vsubw.u16
 	%tmp1 = load <4 x i32>* %A
 	%tmp2 = load <4 x i16>* %B
-	%tmp3 = call <4 x i32> @llvm.arm.neon.vsubwu.v4i32(<4 x i32> %tmp1, <4 x i16> %tmp2)
-	ret <4 x i32> %tmp3
+	%tmp3 = zext <4 x i16> %tmp2 to <4 x i32>
+	%tmp4 = sub <4 x i32> %tmp1, %tmp3
+	ret <4 x i32> %tmp4
 }
 
 define <2 x i64> @vsubwu32(<2 x i64>* %A, <2 x i32>* %B) nounwind {
@@ -264,14 +273,7 @@
 ;CHECK: vsubw.u32
 	%tmp1 = load <2 x i64>* %A
 	%tmp2 = load <2 x i32>* %B
-	%tmp3 = call <2 x i64> @llvm.arm.neon.vsubwu.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2)
-	ret <2 x i64> %tmp3
+	%tmp3 = zext <2 x i32> %tmp2 to <2 x i64>
+	%tmp4 = sub <2 x i64> %tmp1, %tmp3
+	ret <2 x i64> %tmp4
 }
-
-declare <8 x i16> @llvm.arm.neon.vsubws.v8i16(<8 x i16>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vsubws.v4i32(<4 x i32>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vsubws.v2i64(<2 x i64>, <2 x i32>) nounwind readnone
-
-declare <8 x i16> @llvm.arm.neon.vsubwu.v8i16(<8 x i16>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vsubwu.v4i32(<4 x i32>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vsubwu.v2i64(<2 x i64>, <2 x i32>) nounwind readnone

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/vtrn.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/vtrn.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/vtrn.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/vtrn.ll Tue Oct 26 19:48:03 2010
@@ -95,3 +95,30 @@
         %tmp5 = fadd <4 x float> %tmp3, %tmp4
 	ret <4 x float> %tmp5
 }
+
+; Undef shuffle indices should not prevent matching to VTRN:
+
+define <8 x i8> @vtrni8_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK: vtrni8_undef:
+;CHECK: vtrn.8
+;CHECK-NEXT: vadd.i8
+	%tmp1 = load <8 x i8>* %A
+	%tmp2 = load <8 x i8>* %B
+	%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 undef, i32 2, i32 10, i32 undef, i32 12, i32 6, i32 14>
+	%tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 undef, i32 undef, i32 15>
+        %tmp5 = add <8 x i8> %tmp3, %tmp4
+	ret <8 x i8> %tmp5
+}
+
+define <8 x i16> @vtrnQi16_undef(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK: vtrnQi16_undef:
+;CHECK: vtrn.16
+;CHECK-NEXT: vadd.i16
+	%tmp1 = load <8 x i16>* %A
+	%tmp2 = load <8 x i16>* %B
+	%tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 8, i32 undef, i32 undef, i32 4, i32 12, i32 6, i32 14>
+	%tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 undef, i32 3, i32 11, i32 5, i32 13, i32 undef, i32 undef>
+        %tmp5 = add <8 x i16> %tmp3, %tmp4
+	ret <8 x i16> %tmp5
+}
+

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/vuzp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/vuzp.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/vuzp.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/vuzp.ll Tue Oct 26 19:48:03 2010
@@ -73,3 +73,30 @@
         %tmp5 = fadd <4 x float> %tmp3, %tmp4
 	ret <4 x float> %tmp5
 }
+
+; Undef shuffle indices should not prevent matching to VUZP:
+
+define <8 x i8> @vuzpi8_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK: vuzpi8_undef:
+;CHECK: vuzp.8
+;CHECK-NEXT: vadd.i8
+	%tmp1 = load <8 x i8>* %A
+	%tmp2 = load <8 x i8>* %B
+	%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 2, i32 undef, i32 undef, i32 8, i32 10, i32 12, i32 14>
+	%tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 undef, i32 undef, i32 13, i32 15>
+        %tmp5 = add <8 x i8> %tmp3, %tmp4
+	ret <8 x i8> %tmp5
+}
+
+define <8 x i16> @vuzpQi16_undef(<8 x i16>* %A, <8 x i16>* %B) nounwind {
+;CHECK: vuzpQi16_undef:
+;CHECK: vuzp.16
+;CHECK-NEXT: vadd.i16
+	%tmp1 = load <8 x i16>* %A
+	%tmp2 = load <8 x i16>* %B
+	%tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 undef, i32 4, i32 undef, i32 8, i32 10, i32 12, i32 14>
+	%tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 3, i32 5, i32 undef, i32 undef, i32 11, i32 13, i32 15>
+        %tmp5 = add <8 x i16> %tmp3, %tmp4
+	ret <8 x i16> %tmp5
+}
+

Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/vzip.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/vzip.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/vzip.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/vzip.ll Tue Oct 26 19:48:03 2010
@@ -73,3 +73,30 @@
         %tmp5 = fadd <4 x float> %tmp3, %tmp4
 	ret <4 x float> %tmp5
 }
+
+; Undef shuffle indices should not prevent matching to VZIP:
+
+define <8 x i8> @vzipi8_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK: vzipi8_undef:
+;CHECK: vzip.8
+;CHECK-NEXT: vadd.i8
+	%tmp1 = load <8 x i8>* %A
+	%tmp2 = load <8 x i8>* %B
+	%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 undef, i32 1, i32 9, i32 undef, i32 10, i32 3, i32 11>
+	%tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 undef, i32 undef, i32 15>
+        %tmp5 = add <8 x i8> %tmp3, %tmp4
+	ret <8 x i8> %tmp5
+}
+
+define <16 x i8> @vzipQi8_undef(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+;CHECK: vzipQi8_undef:
+;CHECK: vzip.8
+;CHECK-NEXT: vadd.i8
+	%tmp1 = load <16 x i8>* %A
+	%tmp2 = load <16 x i8>* %B
+	%tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 16, i32 1, i32 undef, i32 undef, i32 undef, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
+	%tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 24, i32 9, i32 undef, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 undef, i32 14, i32 30, i32 undef, i32 31>
+        %tmp5 = add <16 x i8> %tmp3, %tmp4
+	ret <16 x i8> %tmp5
+}
+

Modified: llvm/branches/wendling/eh/test/CodeGen/CellSPU/bigstack.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/CellSPU/bigstack.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/CellSPU/bigstack.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/CellSPU/bigstack.ll Tue Oct 26 19:48:03 2010
@@ -1,7 +1,7 @@
 ; RUN: llc < %s -march=cellspu -o %t1.s
-; RUN: grep lqx   %t1.s | count 4
-; RUN: grep il    %t1.s | grep -v file | count 7
-; RUN: grep stqx  %t1.s | count 2
+; RUN: grep lqx   %t1.s | count 3
+; RUN: grep il    %t1.s | grep -v file | count 5
+; RUN: grep stqx  %t1.s | count 1
 
 define i32 @bigstack() nounwind {
 entry:

Modified: llvm/branches/wendling/eh/test/CodeGen/CellSPU/call.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/CellSPU/call.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/CellSPU/call.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/CellSPU/call.ll Tue Oct 26 19:48:03 2010
@@ -1,7 +1,7 @@
 ; RUN: llc < %s -march=cellspu -regalloc=linearscan > %t1.s
 ; RUN: grep brsl    %t1.s | count 1
-; RUN: grep brasl   %t1.s | count 1
-; RUN: grep stqd    %t1.s | count 80
+; RUN: grep brasl   %t1.s | count 2
+; RUN: grep stqd    %t1.s | count 82
 ; RUN: llc < %s -march=cellspu | FileCheck %s
 
 target datalayout = "E-p:32:32:128-f64:64:128-f32:32:128-i64:32:128-i32:32:128-i16:16:128-i8:8:128-i1:8:128-a0:0:128-v128:128:128-s0:128:128"
@@ -29,3 +29,25 @@
 entry:
   ret i32 0
 }
+
+; check that struct is passed in r3->
+; assert this by changing the second field in the struct
+%0 = type { i32, i32, i32 }
+declare %0 @callee()
+define %0 @test_structret()
+{
+;CHECK:	stqd	$lr, 16($sp)
+;CHECK:	stqd	$sp, -48($sp)
+;CHECK:	ai	$sp, $sp, -48
+;CHECK:	brasl	$lr, callee
+  %rv = call %0 @callee()
+;CHECK: ai	$4, $4, 1
+;CHECK: lqd	$lr, 64($sp)
+;CHECK:	ai	$sp, $sp, 48
+;CHECK:	bi	$lr
+  %oldval = extractvalue %0 %rv, 1
+  %newval = add i32 %oldval,1
+  %newrv = insertvalue %0 %rv, i32 %newval, 1
+  ret %0 %newrv
+}
+

Modified: llvm/branches/wendling/eh/test/CodeGen/CellSPU/sext128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/CellSPU/sext128.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/CellSPU/sext128.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/CellSPU/sext128.ll Tue Oct 26 19:48:03 2010
@@ -12,6 +12,7 @@
 ; CHECK: 	long	269488144
 ; CHECK:	long	66051
 ; CHECK: 	long	67438087
+; CHECK-NOT: rotqmbyi
 ; CHECK: 	rotmai
 ; CHECK:	lqa
 ; CHECK:	shufb
@@ -25,6 +26,7 @@
 ; CHECK: 	long	269488144
 ; CHECK: 	long	269488144
 ; CHECK:	long	66051
+; CHECK-NOT: rotqmbyi
 ; CHECK: 	rotmai
 ; CHECK:	lqa
 ; CHECK:	shufb
@@ -39,6 +41,7 @@
 ; CHECK: 	long	269488144
 ; CHECK: 	long	269488144
 ; CHECK:	long	66051
+; CHECK-NOT: rotqmbyi
 ; CHECK: 	rotmai
 ; CHECK:	lqa
 ; CHECK:	shufb

Modified: llvm/branches/wendling/eh/test/CodeGen/CellSPU/shuffles.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/CellSPU/shuffles.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/CellSPU/shuffles.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/CellSPU/shuffles.ll Tue Oct 26 19:48:03 2010
@@ -16,3 +16,52 @@
   ret <4 x float> %val  
 }
 
+define void @test_insert( <2 x float>* %ptr, float %val1, float %val2 ) {
+  %sl2_17_tmp1 = insertelement <2 x float> zeroinitializer, float %val1, i32 0
+;CHECK:	lqa	$6,
+;CHECK:	shufb	$4, $4, $5, $6
+  %sl2_17 = insertelement <2 x float> %sl2_17_tmp1, float %val2, i32 1
+
+;CHECK: cdd	$5, 0($3)
+;CHECK: lqd	$6, 0($3)
+;CHECK: shufb	$4, $4, $6, $5
+;CHECK: stqd	$4, 0($3)
+;CHECK:	bi	$lr
+  store <2 x float> %sl2_17, <2 x float>* %ptr
+  ret void 
+}
+
+define <4 x float>  @test_insert_1(<4 x float> %vparam, float %eltparam) {
+;CHECK: cwd     $5, 4($sp)
+;CHECK: shufb   $3, $4, $3, $5
+;CHECK: bi      $lr
+  %rv = insertelement <4 x float> %vparam, float %eltparam, i32 1
+  ret <4 x float> %rv
+}
+
+define <2 x i32> @test_v2i32(<4 x i32>%vec)
+{
+;CHECK: rotqbyi $3, $3, 4
+;CHECK: bi $lr
+  %rv = shufflevector <4 x i32> %vec, <4 x i32> undef, <2 x i32><i32 1,i32 2>
+  ret <2 x i32> %rv
+}
+
+define <4 x i32> @test_v4i32_rot8(<4 x i32>%vec)
+{
+;CHECK: rotqbyi $3, $3, 8
+;CHECK: bi $lr
+  %rv = shufflevector <4 x i32> %vec, <4 x i32> undef, 
+        <4 x i32> <i32 2,i32 3,i32 0, i32 1>
+  ret <4 x i32> %rv
+}
+
+define <4 x i32> @test_v4i32_rot4(<4 x i32>%vec)
+{
+;CHECK: rotqbyi $3, $3, 4
+;CHECK: bi $lr
+  %rv = shufflevector <4 x i32> %vec, <4 x i32> undef, 
+        <4 x i32> <i32 1,i32 2,i32 3, i32 0>
+  ret <4 x i32> %rv
+}
+

Modified: llvm/branches/wendling/eh/test/CodeGen/MBlaze/brind.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/MBlaze/brind.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/MBlaze/brind.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/MBlaze/brind.ll Tue Oct 26 19:48:03 2010
@@ -28,32 +28,31 @@
                              label %L3,
                              label %L4,
                              label %L5 ]
-    ; CHECK:        br {{r[0-9]*}}
+    ; CHECK:        brd {{r[0-9]*}}
 
 L1:
     %tmp.1 = add i32 %a, %b
     br label %finish
-    ; CHECK:        br
+    ; CHECK:        brid
 
 L2:
     %tmp.2 = sub i32 %a, %b
     br label %finish
-    ; CHECK:        br
+    ; CHECK:        brid
 
 L3:
     %tmp.3 = mul i32 %a, %b
     br label %finish
-    ; CHECK:        br
+    ; CHECK:        brid
 
 L4:
     %tmp.4 = sdiv i32 %a, %b
     br label %finish
-    ; CHECK:        br
+    ; CHECK:        brid
 
 L5:
     %tmp.5 = srem i32 %a, %b
     br label %finish
-    ; CHECK:        br
 
 finish:
     %tmp.6 = phi i32 [ %tmp.1, %L1 ],
@@ -69,5 +68,5 @@
     %tmp.8 = urem i32 %tmp.7, 5
 
     br label %loop
-    ; CHECK:        br
+    ; CHECK:        brid
 }

Modified: llvm/branches/wendling/eh/test/CodeGen/MBlaze/cc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/MBlaze/cc.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/MBlaze/cc.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/MBlaze/cc.ll Tue Oct 26 19:48:03 2010
@@ -12,7 +12,7 @@
 define void @params0_noret() {
     ; CHECK:        params0_noret:
     ret void
-    ; CHECK-NOT:    {{.* r3, r0, 1}}
+    ; CHECK-NOT:    {{.* r3, .*, .*}}
     ; CHECK-NOT:    {{.* r4, .*, .*}}
     ; CHECK:        rtsd
 }
@@ -20,81 +20,88 @@
 define i8 @params0_8bitret() {
     ; CHECK:        params0_8bitret:
     ret i8 1
-    ; CHECK:        {{.* r3, r0, 1}}
+    ; CHECK-NOT:    {{.* r3, .*, .*}}
     ; CHECK-NOT:    {{.* r4, .*, .*}}
     ; CHECK:        rtsd
+    ; CHECK:        {{.* r3, r0, 1}}
 }
 
 define i16 @params0_16bitret() {
     ; CHECK:        params0_16bitret:
     ret i16 1
+    ; CHECK:        rtsd
     ; CHECK:        {{.* r3, r0, 1}}
     ; CHECK-NOT:    {{.* r4, .*, .*}}
-    ; CHECK:        rtsd
 }
 
 define i32 @params0_32bitret() {
     ; CHECK:        params0_32bitret:
     ret i32 1
-    ; CHECK:        {{.* r3, r0, 1}}
     ; CHECK-NOT:    {{.* r4, .*, .*}}
     ; CHECK:        rtsd
+    ; CHECK:        {{.* r3, r0, 1}}
 }
 
 define i64 @params0_64bitret() {
     ; CHECK:        params0_64bitret:
     ret i64 1
     ; CHECK:        {{.* r3, r0, .*}}
-    ; CHECK:        {{.* r4, r0, 1}}
     ; CHECK:        rtsd
+    ; CHECK:        {{.* r4, r0, 1}}
 }
 
 define i32 @params1_32bitret(i32 %a) {
     ; CHECK:        params1_32bitret:
     ret i32 %a
-    ; CHECK:        {{.* r3, r5, r0}}
+    ; CHECK-NOT:    {{.* r3, .*, .*}}
     ; CHECK-NOT:    {{.* r4, .*, .*}}
     ; CHECK:        rtsd
+    ; CHECK:        {{.* r3, r5, r0}}
 }
 
 define i32 @params2_32bitret(i32 %a, i32 %b) {
     ; CHECK:        params2_32bitret:
     ret i32 %b
-    ; CHECK:        {{.* r3, r6, r0}}
+    ; CHECK-NOT:    {{.* r3, .*, .*}}
     ; CHECK-NOT:    {{.* r4, .*, .*}}
     ; CHECK:        rtsd
+    ; CHECK:        {{.* r3, r6, r0}}
 }
 
 define i32 @params3_32bitret(i32 %a, i32 %b, i32 %c) {
     ; CHECK:        params3_32bitret:
     ret i32 %c
-    ; CHECK:        {{.* r3, r7, r0}}
+    ; CHECK-NOT:    {{.* r3, .*, .*}}
     ; CHECK-NOT:    {{.* r4, .*, .*}}
     ; CHECK:        rtsd
+    ; CHECK:        {{.* r3, r7, r0}}
 }
 
 define i32 @params4_32bitret(i32 %a, i32 %b, i32 %c, i32 %d) {
     ; CHECK:        params4_32bitret:
     ret i32 %d
-    ; CHECK:        {{.* r3, r8, r0}}
+    ; CHECK-NOT:    {{.* r3, .*, .*}}
     ; CHECK-NOT:    {{.* r4, .*, .*}}
     ; CHECK:        rtsd
+    ; CHECK:        {{.* r3, r8, r0}}
 }
 
 define i32 @params5_32bitret(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e) {
     ; CHECK:        params5_32bitret:
     ret i32 %e
-    ; CHECK:        {{.* r3, r9, r0}}
+    ; CHECK-NOT:    {{.* r3, .*, .*}}
     ; CHECK-NOT:    {{.* r4, .*, .*}}
     ; CHECK:        rtsd
+    ; CHECK:        {{.* r3, r9, r0}}
 }
 
 define i32 @params6_32bitret(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f) {
     ; CHECK:        params6_32bitret:
     ret i32 %f
-    ; CHECK:        {{.* r3, r10, r0}}
+    ; CHECK-NOT:    {{.* r3, .*, .*}}
     ; CHECK-NOT:    {{.* r4, .*, .*}}
     ; CHECK:        rtsd
+    ; CHECK:        {{.* r3, r10, r0}}
 }
 
 define i32 @params7_32bitret(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f,
@@ -142,53 +149,29 @@
     %tmp.1 = call i8 @params0_8bitret()
     ; CHECK:        brlid
     call i32 (i8*,...)* @printf(i8* %MSG.1, i8 %tmp.1)
-    ; CHECK:        {{.* r5, .*, .*}}
-    ; CHECK:        {{.* r6, r3, r0}}
-    ; CHECK-NOT:    {{.* r7, .*, .*}}
-    ; CHECK:        brlid
 
     %tmp.2 = call i16 @params0_16bitret()
     ; CHECK:        brlid
     call i32 (i8*,...)* @printf(i8* %MSG.1, i16 %tmp.2)
-    ; CHECK:        {{.* r5, .*, .*}}
-    ; CHECK:        {{.* r6, r3, r0}}
-    ; CHECK-NOT:    {{.* r7, .*, .*}}
-    ; CHECK:        brlid
 
     %tmp.3 = call i32 @params0_32bitret()
     ; CHECK:        brlid
     call i32 (i8*,...)* @printf(i8* %MSG.1, i32 %tmp.3)
-    ; CHECK:        {{.* r5, .*, .*}}
-    ; CHECK:        {{.* r6, r3, r0}}
-    ; CHECK-NOT:    {{.* r7, .*, .*}}
-    ; CHECK:        brlid
 
     %tmp.4 = call i64 @params0_64bitret()
     ; CHECK:        brlid
     call i32 (i8*,...)* @printf(i8* %MSG.1, i64 %tmp.4)
-    ; CHECK:        {{.* r5, .*, .*}}
-    ; CHECK:        {{.* r6, r3, r0}}
-    ; CHECK:        {{.* r7, r4, r0}}
-    ; CHECK:        brlid
 
     %tmp.5 = call i32 @params1_32bitret(i32 1)
     ; CHECK:        {{.* r5, .*, .*}}
     ; CHECK:        brlid
     call i32 (i8*,...)* @printf(i8* %MSG.1, i32 %tmp.5)
-    ; CHECK:        {{.* r5, .*, .*}}
-    ; CHECK:        {{.* r6, r3, r0}}
-    ; CHECK-NOT:    {{.* r7, .*, .*}}
-    ; CHECK:        brlid
 
     %tmp.6 = call i32 @params2_32bitret(i32 1, i32 2)
     ; CHECK:        {{.* r5, .*, .*}}
     ; CHECK:        {{.* r6, .*, .*}}
     ; CHECK:        brlid
     call i32 (i8*,...)* @printf(i8* %MSG.1, i32 %tmp.6)
-    ; CHECK:        {{.* r5, .*, .*}}
-    ; CHECK:        {{.* r6, r3, r0}}
-    ; CHECK-NOT:    {{.* r7, .*, .*}}
-    ; CHECK:        brlid
 
     %tmp.7 = call i32 @params3_32bitret(i32 1, i32 2, i32 3)
     ; CHECK:        {{.* r5, .*, .*}}
@@ -196,10 +179,6 @@
     ; CHECK:        {{.* r7, .*, .*}}
     ; CHECK:        brlid
     call i32 (i8*,...)* @printf(i8* %MSG.1, i32 %tmp.7)
-    ; CHECK:        {{.* r5, .*, .*}}
-    ; CHECK:        {{.* r6, r3, r0}}
-    ; CHECK-NOT:    {{.* r7, .*, .*}}
-    ; CHECK:        brlid
 
     %tmp.8 = call i32 @params4_32bitret(i32 1, i32 2, i32 3, i32 4)
     ; CHECK:        {{.* r5, .*, .*}}
@@ -208,10 +187,6 @@
     ; CHECK:        {{.* r8, .*, .*}}
     ; CHECK:        brlid
     call i32 (i8*,...)* @printf(i8* %MSG.1, i32 %tmp.8)
-    ; CHECK:        {{.* r5, .*, .*}}
-    ; CHECK:        {{.* r6, r3, r0}}
-    ; CHECK-NOT:    {{.* r7, .*, .*}}
-    ; CHECK:        brlid
 
     %tmp.9 = call i32 @params5_32bitret(i32 1, i32 2, i32 3, i32 4, i32 5)
     ; CHECK:        {{.* r5, .*, .*}}
@@ -221,10 +196,6 @@
     ; CHECK:        {{.* r9, .*, .*}}
     ; CHECK:        brlid
     call i32 (i8*,...)* @printf(i8* %MSG.1, i32 %tmp.9)
-    ; CHECK:        {{.* r5, .*, .*}}
-    ; CHECK:        {{.* r6, r3, r0}}
-    ; CHECK-NOT:    {{.* r7, .*, .*}}
-    ; CHECK:        brlid
 
     %tmp.10 = call i32 @params6_32bitret(i32 1, i32 2, i32 3, i32 4, i32 5,
                                          i32 6)
@@ -236,10 +207,6 @@
     ; CHECK:        {{.* r10, .*, .*}}
     ; CHECK:        brlid
     call i32 (i8*,...)* @printf(i8* %MSG.1, i32 %tmp.10)
-    ; CHECK:        {{.* r5, .*, .*}}
-    ; CHECK:        {{.* r6, r3, r0}}
-    ; CHECK-NOT:    {{.* r7, .*, .*}}
-    ; CHECK:        brlid
 
     %tmp.11 = call i32 @params7_32bitret(i32 1, i32 2, i32 3, i32 4, i32 5,
                                          i32 6, i32 7)
@@ -252,10 +219,6 @@
     ; CHECK:        {{.* r10, .*, .*}}
     ; CHECK:        brlid
     call i32 (i8*,...)* @printf(i8* %MSG.1, i32 %tmp.11)
-    ; CHECK:        {{.* r5, .*, .*}}
-    ; CHECK:        {{.* r6, r3, r0}}
-    ; CHECK-NOT:    {{.* r7, .*, .*}}
-    ; CHECK:        brlid
 
     %tmp.12 = call i32 @params8_32bitret(i32 1, i32 2, i32 3, i32 4, i32 5,
                                          i32 6, i32 7, i32 8)
@@ -269,10 +232,6 @@
     ; CHECK:        {{.* r10, .*, .*}}
     ; CHECK:        brlid
     call i32 (i8*,...)* @printf(i8* %MSG.1, i32 %tmp.12)
-    ; CHECK:        {{.* r5, .*, .*}}
-    ; CHECK:        {{.* r6, r3, r0}}
-    ; CHECK-NOT:    {{.* r7, .*, .*}}
-    ; CHECK:        brlid
 
     %tmp.13 = call i32 @params9_32bitret(i32 1, i32 2, i32 3, i32 4, i32 5,
                                          i32 6, i32 7, i32 8, i32 9)
@@ -287,10 +246,6 @@
     ; CHECK:        {{.* r10, .*, .*}}
     ; CHECK:        brlid
     call i32 (i8*,...)* @printf(i8* %MSG.1, i32 %tmp.13)
-    ; CHECK:        {{.* r5, .*, .*}}
-    ; CHECK:        {{.* r6, r3, r0}}
-    ; CHECK-NOT:    {{.* r7, .*, .*}}
-    ; CHECK:        brlid
 
     %tmp.14 = call i32 @params10_32bitret(i32 1, i32 2, i32 3, i32 4, i32 5,
                                           i32 6, i32 7, i32 8, i32 9, i32 10)
@@ -306,10 +261,6 @@
     ; CHECK:        {{.* r10, .*, .*}}
     ; CHECK:        brlid
     call i32 (i8*,...)* @printf(i8* %MSG.1, i32 %tmp.14)
-    ; CHECK:        {{.* r5, .*, .*}}
-    ; CHECK:        {{.* r6, r3, r0}}
-    ; CHECK-NOT:    {{.* r7, .*, .*}}
-    ; CHECK:        brlid
 
     ret void
 }

Modified: llvm/branches/wendling/eh/test/CodeGen/MBlaze/fpu.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/MBlaze/fpu.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/MBlaze/fpu.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/MBlaze/fpu.ll Tue Oct 26 19:48:03 2010
@@ -10,14 +10,14 @@
     ; FPU:        test_add:
 
     %tmp.1 = fadd float %a, %b
-    ; FUN-NOT:    fadd
     ; FUN:        brlid
     ; FPU-NOT:    brlid
-    ; FPU:        fadd
 
     ret float %tmp.1
     ; FUN:        rtsd
     ; FPU:        rtsd
+    ; FUN-NOT:    fadd
+    ; FPU-NEXT:   fadd
 }
 
 define float @test_sub(float %a, float %b) {
@@ -25,14 +25,14 @@
     ; FPU:        test_sub:
 
     %tmp.1 = fsub float %a, %b
-    ; FUN-NOT:    frsub
     ; FUN:        brlid
     ; FPU-NOT:    brlid
-    ; FPU:        frsub
 
     ret float %tmp.1
     ; FUN:        rtsd
     ; FPU:        rtsd
+    ; FUN-NOT:    frsub
+    ; FPU-NEXT:   frsub
 }
 
 define float @test_mul(float %a, float %b) {
@@ -40,14 +40,14 @@
     ; FPU:        test_mul:
 
     %tmp.1 = fmul float %a, %b
-    ; FUN-NOT:    fmul
     ; FUN:        brlid
     ; FPU-NOT:    brlid
-    ; FPU:        fmul
 
     ret float %tmp.1
     ; FUN:        rtsd
     ; FPU:        rtsd
+    ; FUN-NOT:    fmul
+    ; FPU-NEXT:   fmul
 }
 
 define float @test_div(float %a, float %b) {
@@ -55,12 +55,12 @@
     ; FPU:        test_div:
 
     %tmp.1 = fdiv float %a, %b
-    ; FUN-NOT:    fdiv
     ; FUN:        brlid
     ; FPU-NOT:    brlid
-    ; FPU:        fdiv
 
     ret float %tmp.1
     ; FUN:        rtsd
     ; FPU:        rtsd
+    ; FUN-NOT:    fdiv
+    ; FPU-NEXT:   fdiv
 }

Modified: llvm/branches/wendling/eh/test/CodeGen/MBlaze/imm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/MBlaze/imm.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/MBlaze/imm.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/MBlaze/imm.ll Tue Oct 26 19:48:03 2010
@@ -7,21 +7,21 @@
 
 define i8 @retimm_i8() {
     ; CHECK:        retimm_i8:
-    ; CHECK:        add
-    ; CHECK-NEXT:   rtsd
+    ; CHECK:        rtsd
+    ; CHECK-NEXT:   add
     ; FPU:          retimm_i8:
-    ; FPU:          add
-    ; FPU-NEXT:     rtsd
+    ; FPU:          rtsd
+    ; FPU-NEXT:     add
     ret i8 123
 }
 
 define i16 @retimm_i16() {
     ; CHECK:        retimm_i16:
-    ; CHECK:        add
-    ; CHECK-NEXT:   rtsd
+    ; CHECK:        rtsd
+    ; CHECK-NEXT:   add
     ; FPU:          retimm_i16:
-    ; FPU:          add
-    ; FPU-NEXT:     rtsd
+    ; FPU:          rtsd
+    ; FPU-NEXT:     add
     ret i16 38212
 }
 
@@ -38,12 +38,12 @@
 define i64 @retimm_i64() {
     ; CHECK:        retimm_i64:
     ; CHECK:        add
-    ; CHECK-NEXT:   add
     ; CHECK-NEXT:   rtsd
+    ; CHECK-NEXT:   add
     ; FPU:          retimm_i64:
     ; FPU:          add
-    ; FPU-NEXT:     add
     ; FPU-NEXT:     rtsd
+    ; FPU-NEXT:     add
     ret i64 94581823
 }
 
@@ -53,7 +53,7 @@
     ; CHECK-NEXT:   rtsd
     ; FPU:          retimm_float:
     ; FPU:          or
-    ; FPU:          rtsd
+    ; FPU-NEXT:     rtsd
     ret float 12.0
 }
 

Modified: llvm/branches/wendling/eh/test/CodeGen/MBlaze/jumptable.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/MBlaze/jumptable.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/MBlaze/jumptable.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/MBlaze/jumptable.ll Tue Oct 26 19:48:03 2010
@@ -18,8 +18,8 @@
                                       i32 8, label %L8
                                       i32 9, label %L9 ]
 
-    ; CHECK:        lw [[REG:r[0-9]*]]
-    ; CHECK:        br [[REG]]
+    ; CHECK:        lw  [[REG:r[0-9]*]]
+    ; CHECK:        brd [[REG]]
 L0:
     %var0 = add i32 %arg, 0
     br label %DONE

Modified: llvm/branches/wendling/eh/test/CodeGen/MBlaze/mul.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/MBlaze/mul.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/MBlaze/mul.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/MBlaze/mul.ll Tue Oct 26 19:48:03 2010
@@ -13,11 +13,11 @@
     ; FUN-NOT:    mul
     ; FUN:        brlid
     ; MUL-NOT:    brlid
-    ; MUL:        mul
 
     ret i8 %tmp.1
     ; FUN:        rtsd
     ; MUL:        rtsd
+    ; MUL:        mul
 }
 
 define i16 @test_i16(i16 %a, i16 %b) {
@@ -28,11 +28,11 @@
     ; FUN-NOT:    mul
     ; FUN:        brlid
     ; MUL-NOT:    brlid
-    ; MUL:        mul
 
     ret i16 %tmp.1
     ; FUN:        rtsd
     ; MUL:        rtsd
+    ; MUL:        mul
 }
 
 define i32 @test_i32(i32 %a, i32 %b) {
@@ -43,9 +43,9 @@
     ; FUN-NOT:    mul
     ; FUN:        brlid
     ; MUL-NOT:    brlid
-    ; MUL:        mul
 
     ret i32 %tmp.1
     ; FUN:        rtsd
     ; MUL:        rtsd
+    ; MUL:        mul
 }

Modified: llvm/branches/wendling/eh/test/CodeGen/MBlaze/shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/MBlaze/shift.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/MBlaze/shift.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/MBlaze/shift.ll Tue Oct 26 19:48:03 2010
@@ -10,17 +10,17 @@
     ; SHT:        test_i8:
 
     %tmp.1 = shl i8 %a, %b
-    ; FUN-NOT:    bsll
     ; FUN:        andi
     ; FUN:        add
     ; FUN:        bnei
     ; SHT-NOT:    andi
     ; SHT-NOT:    bnei
-    ; SHT:        bsll
 
     ret i8 %tmp.1
     ; FUN:        rtsd
     ; SHT:        rtsd
+    ; FUN-NOT:    bsll
+    ; SHT-NEXT:   bsll
 }
 
 define i8 @testc_i8(i8 %a, i8 %b) {
@@ -28,18 +28,18 @@
     ; SHT:        testc_i8:
 
     %tmp.1 = shl i8 %a, 5
-    ; FUN-NOT:    bsll
     ; FUN:        andi
     ; FUN:        add
     ; FUN:        bnei
     ; SHT-NOT:    andi
     ; SHT-NOT:    add
     ; SHT-NOT:    bnei
-    ; SHT:        bslli
 
     ret i8 %tmp.1
     ; FUN:        rtsd
     ; SHT:        rtsd
+    ; FUN-NOT:    bsll
+    ; SHT-NEXT:   bslli
 }
 
 define i16 @test_i16(i16 %a, i16 %b) {
@@ -47,17 +47,17 @@
     ; SHT:        test_i16:
 
     %tmp.1 = shl i16 %a, %b
-    ; FUN-NOT:    bsll
     ; FUN:        andi
     ; FUN:        add
     ; FUN:        bnei
     ; SHT-NOT:    andi
     ; SHT-NOT:    bnei
-    ; SHT:        bsll
 
     ret i16 %tmp.1
     ; FUN:        rtsd
     ; SHT:        rtsd
+    ; FUN-NOT:    bsll
+    ; SHT-NEXT:   bsll
 }
 
 define i16 @testc_i16(i16 %a, i16 %b) {
@@ -65,18 +65,18 @@
     ; SHT:        testc_i16:
 
     %tmp.1 = shl i16 %a, 5
-    ; FUN-NOT:    bsll
     ; FUN:        andi
     ; FUN:        add
     ; FUN:        bnei
     ; SHT-NOT:    andi
     ; SHT-NOT:    add
     ; SHT-NOT:    bnei
-    ; SHT:        bslli
 
     ret i16 %tmp.1
     ; FUN:        rtsd
     ; SHT:        rtsd
+    ; FUN-NOT:    bsll
+    ; SHT-NEXT:   bslli
 }
 
 define i32 @test_i32(i32 %a, i32 %b) {
@@ -84,17 +84,17 @@
     ; SHT:        test_i32:
 
     %tmp.1 = shl i32 %a, %b
-    ; FUN-NOT:    bsll
     ; FUN:        andi
     ; FUN:        add
     ; FUN:        bnei
     ; SHT-NOT:    andi
     ; SHT-NOT:    bnei
-    ; SHT:        bsll
 
     ret i32 %tmp.1
     ; FUN:        rtsd
     ; SHT:        rtsd
+    ; FUN-NOT:    bsll
+    ; SHT-NEXT:   bsll
 }
 
 define i32 @testc_i32(i32 %a, i32 %b) {
@@ -102,16 +102,16 @@
     ; SHT:        testc_i32:
 
     %tmp.1 = shl i32 %a, 5
-    ; FUN-NOT:    bsll
     ; FUN:        andi
     ; FUN:        add
     ; FUN:        bnei
     ; SHT-NOT:    andi
     ; SHT-NOT:    add
     ; SHT-NOT:    bnei
-    ; SHT:        bslli
 
     ret i32 %tmp.1
     ; FUN:        rtsd
     ; SHT:        rtsd
+    ; FUN-NOT:    bsll
+    ; SHT-NEXT:   bslli
 }

Modified: llvm/branches/wendling/eh/test/CodeGen/MSP430/Inst16mm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/MSP430/Inst16mm.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/MSP430/Inst16mm.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/MSP430/Inst16mm.ll Tue Oct 26 19:48:03 2010
@@ -64,6 +64,6 @@
  %0 = load i16* %retval                          ; <i16> [#uses=1]
  ret i16 %0
 ; CHECK: mov2:
-; CHECK:	mov.w	0(r1), 4(r1)
 ; CHECK:	mov.w	2(r1), 6(r1)
+; CHECK:	mov.w	0(r1), 4(r1)
 }

Modified: llvm/branches/wendling/eh/test/CodeGen/Mips/2008-06-05-Carry.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Mips/2008-06-05-Carry.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Mips/2008-06-05-Carry.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Mips/2008-06-05-Carry.ll Tue Oct 26 19:48:03 2010
@@ -4,7 +4,7 @@
 
 target datalayout =
 "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "mipsallegrexel-psp-elf"
+target triple = "mipsallegrexel-unknown-psp-elf"
 
 define i64 @add64(i64 %u, i64 %v) nounwind  {
 entry:

Modified: llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-03-SRet.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-03-SRet.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-03-SRet.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-03-SRet.ll Tue Oct 26 19:48:03 2010
@@ -1,7 +1,7 @@
 ; RUN: llc < %s -march=mips | grep {sw.*(\$4)} | count 3
 
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "mipsallegrexel-psp-elf"
+target triple = "mipsallegrexel-unknown-psp-elf"
 	%struct.sret0 = type { i32, i32, i32 }
 
 define void @test0(%struct.sret0* noalias sret %agg.result, i32 %dummy) nounwind {

Modified: llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-05-ByVal.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-05-ByVal.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-05-ByVal.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-05-ByVal.ll Tue Oct 26 19:48:03 2010
@@ -1,7 +1,7 @@
 ; RUN: llc < %s -march=mips | grep {lw.*(\$4)} | count 2
 
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "mipsallegrexel-psp-elf"
+target triple = "mipsallegrexel-unknown-psp-elf"
 	%struct.byval0 = type { i32, i32 }
 
 define i64 @test0(%struct.byval0* byval  %b, i64 %sum) nounwind  {

Modified: llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-06-fadd64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-06-fadd64.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-06-fadd64.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-06-fadd64.ll Tue Oct 26 19:48:03 2010
@@ -1,7 +1,7 @@
 ; RUN: llc < %s -march=mips | grep __adddf3
 
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "mipsallegrexel-psp-elf"
+target triple = "mipsallegrexel-unknown-psp-elf"
 
 define double @dofloat(double %a, double %b) nounwind {
 entry:

Modified: llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-07-FPExtend.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-07-FPExtend.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-07-FPExtend.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-07-FPExtend.ll Tue Oct 26 19:48:03 2010
@@ -1,7 +1,7 @@
 ; RUN: llc < %s -march=mips | grep __extendsfdf2
 
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "mipsallegrexel-psp-elf"
+target triple = "mipsallegrexel-unknown-psp-elf"
 
 define double @dofloat(float %a) nounwind {
 entry:

Modified: llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-07-Float2Int.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-07-Float2Int.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-07-Float2Int.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-07-Float2Int.ll Tue Oct 26 19:48:03 2010
@@ -1,7 +1,7 @@
 ; RUN: llc < %s -march=mips | grep trunc.w.s | count 3
 
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "mipsallegrexel-psp-elf"
+target triple = "mipsallegrexel-unknown-psp-elf"
 
 define i32 @fptoint(float %a) nounwind {
 entry:

Modified: llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-07-IntDoubleConvertions.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-07-IntDoubleConvertions.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-07-IntDoubleConvertions.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-07-IntDoubleConvertions.ll Tue Oct 26 19:48:03 2010
@@ -5,7 +5,7 @@
 ; RUN: grep __fixunsdfsi %t  | count 1
 
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "mipsallegrexel-psp-elf"
+target triple = "mipsallegrexel-unknown-psp-elf"
 
 define double @int2fp(i32 %a) nounwind {
 entry:

Modified: llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-15-InternalConstant.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-15-InternalConstant.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-15-InternalConstant.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-15-InternalConstant.ll Tue Oct 26 19:48:03 2010
@@ -6,7 +6,7 @@
 ; RUN: not grep {gp_rel} %t
 
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "mipsallegrexel-psp-elf"
+target triple = "mipsallegrexel-unknown-psp-elf"
 @.str = internal constant [10 x i8] c"AAAAAAAAA\00"
 @i0 = internal constant [5 x i32] [ i32 0, i32 1, i32 2, i32 3, i32 4 ] 
 

Modified: llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-15-SmallSection.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-15-SmallSection.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-15-SmallSection.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-15-SmallSection.ll Tue Oct 26 19:48:03 2010
@@ -10,7 +10,7 @@
 ; RUN: grep {\%lo} %t1 | count 2
 
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "mipsallegrexel-psp-elf"
+target triple = "mipsallegrexel-unknown-psp-elf"
 
   %struct.anon = type { i32, i32 }
 @s0 = global [8 x i8] c"AAAAAAA\00", align 4

Modified: llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-16-SignExtInReg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-16-SignExtInReg.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-16-SignExtInReg.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-16-SignExtInReg.ll Tue Oct 26 19:48:03 2010
@@ -3,7 +3,7 @@
 ; RUN: grep seb %t | count 1
 
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "mipsallegrexel-psp-elf"
+target triple = "mipsallegrexel-unknown-psp-elf"
 
 define i8 @A(i8 %e.0, i8 signext %sum) signext nounwind {
 entry:

Modified: llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-22-Cstpool.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-22-Cstpool.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-22-Cstpool.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-22-Cstpool.ll Tue Oct 26 19:48:03 2010
@@ -2,7 +2,7 @@
 ; RUN: grep {CPI\[01\]_\[01\]:} %t | count 2
 ; RUN: grep {rodata.cst4,"aM", at progbits} %t | count 1
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "mipsallegrexel-psp-elf"
+target triple = "mipsallegrexel-unknown-psp-elf"
 
 define float @F(float %a) nounwind {
 entry:

Modified: llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-23-fpcmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-23-fpcmp.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-23-fpcmp.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-23-fpcmp.ll Tue Oct 26 19:48:03 2010
@@ -3,7 +3,7 @@
 ; RUN: grep {bc1\[tf\]} %t | count 3
 
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "mipsallegrexel-psp-elf"
+target triple = "mipsallegrexel-unknown-psp-elf"
 
 define float @A(float %a, float %b) nounwind {
 entry:

Modified: llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-29-icmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-29-icmp.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-29-icmp.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-29-icmp.ll Tue Oct 26 19:48:03 2010
@@ -1,7 +1,7 @@
 ; RUN: llc < %s -march=mips | grep {b\[ne\]\[eq\]} | count 1
 
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "mipsallegrexel-psp-elf"
+target triple = "mipsallegrexel-unknown-psp-elf"
 
 define float @A(float %a, float %b, i32 %j) nounwind {
 entry:

Modified: llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-31-fcopysign.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-31-fcopysign.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-31-fcopysign.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Mips/2008-07-31-fcopysign.ll Tue Oct 26 19:48:03 2010
@@ -3,7 +3,7 @@
 ; RUN: grep neg.s %t | count 1
 
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "mipsallegrexel-psp-elf"
+target triple = "mipsallegrexel-unknown-psp-elf"
 
 define float @A(float %i, float %j) nounwind  {
 entry:

Modified: llvm/branches/wendling/eh/test/CodeGen/Mips/2008-08-01-AsmInline.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Mips/2008-08-01-AsmInline.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Mips/2008-08-01-AsmInline.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Mips/2008-08-01-AsmInline.ll Tue Oct 26 19:48:03 2010
@@ -4,7 +4,7 @@
 ; RUN: grep multu %t | count 1
 
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "mipsallegrexel-psp-elf"
+target triple = "mipsallegrexel-unknown-psp-elf"
 	%struct.DWstruct = type { i32, i32 }
 
 define i32 @A0(i32 %u, i32 %v) nounwind  {

Modified: llvm/branches/wendling/eh/test/CodeGen/Mips/2008-08-03-fabs64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Mips/2008-08-03-fabs64.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Mips/2008-08-03-fabs64.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Mips/2008-08-03-fabs64.ll Tue Oct 26 19:48:03 2010
@@ -3,7 +3,7 @@
 ; RUN: grep {ori.*65535} %t | count 1
 
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "mipsallegrexel-psp-elf"
+target triple = "mipsallegrexel-unknown-psp-elf"
 
 define double @A(double %c, double %d) nounwind readnone  {
 entry:

Modified: llvm/branches/wendling/eh/test/CodeGen/Mips/2008-08-04-Bitconvert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Mips/2008-08-04-Bitconvert.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Mips/2008-08-04-Bitconvert.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Mips/2008-08-04-Bitconvert.ll Tue Oct 26 19:48:03 2010
@@ -3,7 +3,7 @@
 ; RUN: grep mfc1 %t | count 1
 
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "mipsallegrexel-psp-elf"
+target triple = "mipsallegrexel-unknown-psp-elf"
 
 define float @A(i32 %u) nounwind  {
 entry:

Modified: llvm/branches/wendling/eh/test/CodeGen/Mips/2008-08-06-Alloca.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Mips/2008-08-06-Alloca.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Mips/2008-08-06-Alloca.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Mips/2008-08-06-Alloca.ll Tue Oct 26 19:48:03 2010
@@ -1,7 +1,7 @@
 ; RUN: llc < %s -march=mips | grep {subu.*sp} | count 2
 
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "mipsallegrexel-psp-elf"
+target triple = "mipsallegrexel-unknown-psp-elf"
 
 define i32 @twoalloca(i32 %size) nounwind {
 entry:

Modified: llvm/branches/wendling/eh/test/CodeGen/Mips/2008-08-07-CC.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Mips/2008-08-07-CC.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Mips/2008-08-07-CC.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Mips/2008-08-07-CC.ll Tue Oct 26 19:48:03 2010
@@ -3,7 +3,7 @@
 
 target datalayout =
 "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "mipsallegrexel-psp-elf"
+target triple = "mipsallegrexel-unknown-psp-elf"
 
 define internal fastcc i32 @A(i32 %u) nounwind  {
 entry:

Modified: llvm/branches/wendling/eh/test/CodeGen/Mips/2008-08-07-FPRound.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Mips/2008-08-07-FPRound.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Mips/2008-08-07-FPRound.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Mips/2008-08-07-FPRound.ll Tue Oct 26 19:48:03 2010
@@ -1,7 +1,7 @@
 ; RUN: llc < %s -march=mips | grep __truncdfsf2 | count 1
 
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "mipsallegrexel-psp-elf"
+target triple = "mipsallegrexel-unknown-psp-elf"
 
 define float @round2float(double %a) nounwind {
 entry:

Modified: llvm/branches/wendling/eh/test/CodeGen/Mips/2008-08-08-ctlz.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Mips/2008-08-08-ctlz.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Mips/2008-08-08-ctlz.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Mips/2008-08-08-ctlz.ll Tue Oct 26 19:48:03 2010
@@ -1,7 +1,7 @@
 ; RUN: llc < %s -march=mips | grep clz | count 1
 
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "mipsallegrexel-psp-elf"
+target triple = "mipsallegrexel-unknown-psp-elf"
 
 define i32 @A0(i32 %u) nounwind  {
 entry:

Modified: llvm/branches/wendling/eh/test/CodeGen/Mips/2010-07-20-Select.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Mips/2010-07-20-Select.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Mips/2010-07-20-Select.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Mips/2010-07-20-Select.ll Tue Oct 26 19:48:03 2010
@@ -9,12 +9,12 @@
   volatile store i32 0, i32* %c, align 4
   %0 = volatile load i32* %a, align 4             ; <i32> [#uses=1]
   %1 = icmp eq i32 %0, 0                          ; <i1> [#uses=1]
-; CHECK: addiu $4, $zero, 3
+; CHECK: addiu $3, $zero, 0
   %iftmp.0.0 = select i1 %1, i32 3, i32 0         ; <i32> [#uses=1]
   %2 = volatile load i32* %c, align 4             ; <i32> [#uses=1]
   %3 = icmp eq i32 %2, 0                          ; <i1> [#uses=1]
-; CHECK: addu $4, $zero, $3
-; CHECK: addu $2, $5, $4
+; CHECK: addiu $3, $zero, 3
+; CHECK: addu $2, $5, $3
   %iftmp.2.0 = select i1 %3, i32 0, i32 5         ; <i32> [#uses=1]
   %4 = add nsw i32 %iftmp.2.0, %iftmp.0.0         ; <i32> [#uses=1]
   ret i32 %4

Propchange: llvm/branches/wendling/eh/test/CodeGen/PTX/
------------------------------------------------------------------------------
--- svn:ignore (added)
+++ svn:ignore Tue Oct 26 19:48:03 2010
@@ -0,0 +1,2 @@
+Output
+Output/*.script

Modified: llvm/branches/wendling/eh/test/CodeGen/PowerPC/align.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/PowerPC/align.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/PowerPC/align.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/PowerPC/align.ll Tue Oct 26 19:48:03 2010
@@ -1,5 +1,6 @@
 ; RUN: llc < %s -mtriple=powerpc-linux-gnu | FileCheck %s -check-prefix=ELF
 ; RUN: llc < %s -mtriple=powerpc-apple-darwin9 | FileCheck %s -check-prefix=DARWIN
+; RUN: llc < %s -mtriple=powerpc-apple-darwin8 | FileCheck %s -check-prefix=DARWIN8
 
 @a = global i1 true
 ; no alignment
@@ -40,3 +41,6 @@
 @bar = common global [75 x i8] zeroinitializer, align 128
 ;ELF: .comm bar,75,128
 ;DARWIN: .comm _bar,75,7
+
+;; Darwin8 doesn't support aligned comm.  Just miscompile this.
+; DARWIN8: .comm _bar,75 ;

Modified: llvm/branches/wendling/eh/test/CodeGen/PowerPC/vec_constants.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/PowerPC/vec_constants.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/PowerPC/vec_constants.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/PowerPC/vec_constants.ll Tue Oct 26 19:48:03 2010
@@ -1,6 +1,6 @@
 ; RUN: llc < %s -march=ppc32 -mcpu=g5 | not grep CPI
 
-define void @test1(<4 x i32>* %P1, <4 x i32>* %P2, <4 x float>* %P3) {
+define void @test1(<4 x i32>* %P1, <4 x i32>* %P2, <4 x float>* %P3) nounwind {
 	%tmp = load <4 x i32>* %P1		; <<4 x i32>> [#uses=1]
 	%tmp4 = and <4 x i32> %tmp, < i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648 >		; <<4 x i32>> [#uses=1]
 	store <4 x i32> %tmp4, <4 x i32>* %P1
@@ -15,26 +15,30 @@
 	ret void
 }
 
-define <4 x i32> @test_30() {
+define <4 x i32> @test_30() nounwind {
 	ret <4 x i32> < i32 30, i32 30, i32 30, i32 30 >
 }
 
-define <4 x i32> @test_29() {
+define <4 x i32> @test_29() nounwind {
 	ret <4 x i32> < i32 29, i32 29, i32 29, i32 29 >
 }
 
-define <8 x i16> @test_n30() {
+define <8 x i16> @test_n30() nounwind {
 	ret <8 x i16> < i16 -30, i16 -30, i16 -30, i16 -30, i16 -30, i16 -30, i16 -30, i16 -30 >
 }
 
-define <16 x i8> @test_n104() {
+define <16 x i8> @test_n104() nounwind {
 	ret <16 x i8> < i8 -104, i8 -104, i8 -104, i8 -104, i8 -104, i8 -104, i8 -104, i8 -104, i8 -104, i8 -104, i8 -104, i8 -104, i8 -104, i8 -104, i8 -104, i8 -104 >
 }
 
-define <4 x i32> @test_vsldoi() {
+define <4 x i32> @test_vsldoi() nounwind {
 	ret <4 x i32> < i32 512, i32 512, i32 512, i32 512 >
 }
 
-define <4 x i32> @test_rol() {
+define <8 x i16> @test_vsldoi_65023() nounwind {
+	ret <8 x i16> < i16 65023, i16 65023,i16 65023,i16 65023,i16 65023,i16 65023,i16 65023,i16 65023 >
+}
+
+define <4 x i32> @test_rol() nounwind {
 	ret <4 x i32> < i32 -11534337, i32 -11534337, i32 -11534337, i32 -11534337 >
 }

Modified: llvm/branches/wendling/eh/test/CodeGen/SystemZ/05-MemLoadsStores.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/SystemZ/05-MemLoadsStores.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/SystemZ/05-MemLoadsStores.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/SystemZ/05-MemLoadsStores.ll Tue Oct 26 19:48:03 2010
@@ -4,7 +4,7 @@
 ; RUN: llc < %s | grep {st	%} | count 2
 
 target datalayout = "E-p:64:64:64-i8:8:16-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-a0:16:16"
-target triple = "s390x-linux"
+target triple = "s390x-ibm-linux"
 
 define void @foo1(i32* nocapture %foo, i32* nocapture %bar) nounwind {
 entry:

Modified: llvm/branches/wendling/eh/test/CodeGen/SystemZ/05-MemLoadsStores16.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/SystemZ/05-MemLoadsStores16.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/SystemZ/05-MemLoadsStores16.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/SystemZ/05-MemLoadsStores16.ll Tue Oct 26 19:48:03 2010
@@ -4,7 +4,7 @@
 ; RUN: llc < %s | grep {sth.%}  | count 2
 
 target datalayout = "E-p:64:64:64-i8:8:16-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-a0:16:16"
-target triple = "s390x-linux"
+target triple = "s390x-ibm-linux"
 
 define void @foo1(i16* nocapture %foo, i16* nocapture %bar) nounwind {
 entry:

Modified: llvm/branches/wendling/eh/test/CodeGen/SystemZ/07-BrUnCond.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/SystemZ/07-BrUnCond.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/SystemZ/07-BrUnCond.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/SystemZ/07-BrUnCond.ll Tue Oct 26 19:48:03 2010
@@ -1,7 +1,7 @@
 ; RUN: llc < %s
 
 target datalayout = "E-p:64:64:64-i1:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128"
-target triple = "s390x-linux"
+target triple = "s390x-ibm-linux"
 
 define void @foo() noreturn nounwind {
 entry:

Modified: llvm/branches/wendling/eh/test/CodeGen/SystemZ/09-DynamicAlloca.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/SystemZ/09-DynamicAlloca.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/SystemZ/09-DynamicAlloca.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/SystemZ/09-DynamicAlloca.ll Tue Oct 26 19:48:03 2010
@@ -1,7 +1,7 @@
 ; RUN: llc < %s
 
 target datalayout = "E-p:64:64:64-i8:8:16-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-a0:16:16"
-target triple = "s390x-linux"
+target triple = "s390x-ibm-linux"
 
 define void @foo(i64 %N) nounwind {
 entry:

Modified: llvm/branches/wendling/eh/test/CodeGen/SystemZ/09-Globals.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/SystemZ/09-Globals.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/SystemZ/09-Globals.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/SystemZ/09-Globals.ll Tue Oct 26 19:48:03 2010
@@ -1,7 +1,7 @@
 ; RUN: llc < %s | grep larl | count 3
 
 target datalayout = "E-p:64:64:64-i1:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128"
-target triple = "s390x-linux"
+target triple = "s390x-ibm-linux"
 @bar = common global i64 0, align 8		; <i64*> [#uses=3]
 
 define i64 @foo() nounwind readonly {

Modified: llvm/branches/wendling/eh/test/CodeGen/SystemZ/10-FuncsPic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/SystemZ/10-FuncsPic.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/SystemZ/10-FuncsPic.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/SystemZ/10-FuncsPic.ll Tue Oct 26 19:48:03 2010
@@ -2,7 +2,7 @@
 ; RUN: llc < %s -relocation-model=pic | grep PLT | count 1
 
 target datalayout = "E-p:64:64:64-i8:8:16-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-a0:16:16"
-target triple = "s390x-linux"
+target triple = "s390x-ibm-linux"
 @ptr = external global void (...)*		; <void (...)**> [#uses=2]
 
 define void @foo1() nounwind {

Modified: llvm/branches/wendling/eh/test/CodeGen/SystemZ/10-GlobalsPic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/SystemZ/10-GlobalsPic.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/SystemZ/10-GlobalsPic.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/SystemZ/10-GlobalsPic.ll Tue Oct 26 19:48:03 2010
@@ -1,7 +1,7 @@
 ; RUN: llc < %s -relocation-model=pic | grep GOTENT | count 6
 
 target datalayout = "E-p:64:64:64-i8:8:16-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-a0:16:16"
-target triple = "s390x-linux"
+target triple = "s390x-ibm-linux"
 @src = external global i32		; <i32*> [#uses=2]
 @dst = external global i32		; <i32*> [#uses=2]
 @ptr = external global i32*		; <i32**> [#uses=2]

Modified: llvm/branches/wendling/eh/test/CodeGen/SystemZ/11-BSwap.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/SystemZ/11-BSwap.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/SystemZ/11-BSwap.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/SystemZ/11-BSwap.ll Tue Oct 26 19:48:03 2010
@@ -2,7 +2,7 @@
 
 
 target datalayout = "E-p:64:64:64-i8:8:16-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-a0:16:16"
-target triple = "s390x-linux"
+target triple = "s390x-ibm-linux"
 
 
 define i16 @foo(i16 zeroext %a) zeroext {

Modified: llvm/branches/wendling/eh/test/CodeGen/SystemZ/2009-06-02-Rotate.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/SystemZ/2009-06-02-Rotate.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/SystemZ/2009-06-02-Rotate.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/SystemZ/2009-06-02-Rotate.ll Tue Oct 26 19:48:03 2010
@@ -1,7 +1,7 @@
 ; RUN: llc < %s -march=systemz | grep rll
 
 target datalayout = "E-p:64:64:64-i8:8:16-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-a0:16:16"
-target triple = "s390x-linux"
+target triple = "s390x-ibm-linux"
 
 define i32 @rotl(i32 %x, i32 %y, i32 %z) nounwind readnone {
 entry:

Modified: llvm/branches/wendling/eh/test/CodeGen/SystemZ/2009-07-04-Shl32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/SystemZ/2009-07-04-Shl32.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/SystemZ/2009-07-04-Shl32.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/SystemZ/2009-07-04-Shl32.ll Tue Oct 26 19:48:03 2010
@@ -1,7 +1,7 @@
 ; RUN: llc < %s
 
 target datalayout = "E-p:64:64:64-i8:8:16-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-a0:16:16"
-target triple = "s390x-linux"
+target triple = "s390x-ibm-linux"
 
 define void @compdecomp(i8* nocapture %data, i64 %data_len) nounwind {
 entry:

Modified: llvm/branches/wendling/eh/test/CodeGen/SystemZ/2009-07-05-Shifts.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/SystemZ/2009-07-05-Shifts.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/SystemZ/2009-07-05-Shifts.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/SystemZ/2009-07-05-Shifts.ll Tue Oct 26 19:48:03 2010
@@ -1,7 +1,7 @@
 ; RUN: llc < %s
 
 target datalayout = "E-p:64:64:64-i8:8:16-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-a0:16:16"
-target triple = "s390x-linux"
+target triple = "s390x-ibm-linux"
 
 define signext i32 @bit_place_piece(i32 signext %col, i32 signext %player, i64* nocapture %b1, i64* nocapture %b2) nounwind {
 entry:

Modified: llvm/branches/wendling/eh/test/CodeGen/SystemZ/2009-07-10-BadIncomingArgOffset.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/SystemZ/2009-07-10-BadIncomingArgOffset.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/SystemZ/2009-07-10-BadIncomingArgOffset.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/SystemZ/2009-07-10-BadIncomingArgOffset.ll Tue Oct 26 19:48:03 2010
@@ -1,7 +1,7 @@
 ; RUN: llc < %s | FileCheck %s
 
 target datalayout = "E-p:64:64:64-i8:8:16-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-a0:16:16"
-target triple = "s390x-linux"
+target triple = "s390x-ibm-linux"
 
 declare void @rdft(i32 signext, i32 signext, double*, i32* nocapture, double*) nounwind
 

Modified: llvm/branches/wendling/eh/test/CodeGen/SystemZ/2009-07-11-FloatBitConvert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/SystemZ/2009-07-11-FloatBitConvert.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/SystemZ/2009-07-11-FloatBitConvert.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/SystemZ/2009-07-11-FloatBitConvert.ll Tue Oct 26 19:48:03 2010
@@ -1,7 +1,7 @@
 ; RUN: llc < %s
 
 target datalayout = "E-p:64:64:64-i8:8:16-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-a0:16:16"
-target triple = "s390x-linux"
+target triple = "s390x-ibm-linux"
 
 define float @foo(i32 signext %a) {
 entry:

Modified: llvm/branches/wendling/eh/test/CodeGen/SystemZ/2009-07-11-InvalidRIISel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/SystemZ/2009-07-11-InvalidRIISel.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/SystemZ/2009-07-11-InvalidRIISel.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/SystemZ/2009-07-11-InvalidRIISel.ll Tue Oct 26 19:48:03 2010
@@ -1,7 +1,7 @@
 ; RUN: llc < %s
 
 target datalayout = "E-p:64:64:64-i8:8:16-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-a0:16:16"
-target triple = "s390x-linux"
+target triple = "s390x-ibm-linux"
 
 define signext i32 @dfg_parse() nounwind {
 entry:

Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb/2007-05-05-InvalidPushPop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb/2007-05-05-InvalidPushPop.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb/2007-05-05-InvalidPushPop.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb/2007-05-05-InvalidPushPop.ll Tue Oct 26 19:48:03 2010
@@ -1,6 +1,6 @@
 ; RUN: llc < %s | not grep r11
 
-target triple = "thumb-linux-gnueabi"
+target triple = "thumb-unknown-linux-gnueabi"
 	%struct.__sched_param = type { i32 }
 	%struct.pthread_attr_t = type { i32, i32, %struct.__sched_param, i32, i32, i32, i32, i8*, i32 }
 @i.1882 = internal global i32 1		; <i32*> [#uses=2]

Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb/2010-07-15-debugOrdering.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb/2010-07-15-debugOrdering.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb/2010-07-15-debugOrdering.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb/2010-07-15-debugOrdering.ll Tue Oct 26 19:48:03 2010
@@ -10,7 +10,7 @@
 define void @_Z19getClosestDiagonal3ii(%0* noalias sret, i32, i32) nounwind {
 ; CHECK: blx ___muldf3
 ; CHECK: blx ___muldf3
-; CHECK: beq LBB0_8
+; CHECK: beq LBB0_7
 ; CHECK: blx ___muldf3
 ; <label>:3
   switch i32 %1, label %4 [

Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb/dyn-stackalloc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb/dyn-stackalloc.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb/dyn-stackalloc.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb/dyn-stackalloc.ll Tue Oct 26 19:48:03 2010
@@ -1,7 +1,7 @@
 ; RUN: llc < %s -march=thumb | not grep {ldr sp}
 ; RUN: llc < %s -mtriple=thumb-apple-darwin | \
 ; RUN:   not grep {sub.*r7}
-; RUN: llc < %s -march=thumb | grep 4294967280
+; RUN: llc < %s -march=thumb | grep {mov.*r6, sp}
 
 	%struct.state = type { i32, %struct.info*, float**, i32, i32, i32, i32, i32, i32, i32, i32, i32, i64, i64, i64, i64, i64, i64, i8* }
 	%struct.info = type { i32, i32, i32, i32, i32, i32, i32, i8* }

Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb/large-stack.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb/large-stack.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb/large-stack.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb/large-stack.ll Tue Oct 26 19:48:03 2010
@@ -1,20 +1,35 @@
-; RUN: llc < %s -march=thumb | grep {ldr.*LCP} | count 5
+; RUN: llc < %s -mtriple=thumb-apple-darwin | FileCheck %s
 
 define void @test1() {
+; CHECK: test1:
+; CHECK: sub sp, #256
+; CHECK: add sp, #256
     %tmp = alloca [ 64 x i32 ] , align 4
     ret void
 }
 
 define void @test2() {
+; CHECK: test2:
+; CHECK: ldr r0, LCPI
+; CHECK: add sp, r0
+; CHECK: mov sp, r7
+; CHECK: sub sp, #4
     %tmp = alloca [ 4168 x i8 ] , align 4
     ret void
 }
 
 define i32 @test3() {
-	%retval = alloca i32, align 4
-	%tmp = alloca i32, align 4
-	%a = alloca [805306369 x i8], align 16
-	store i32 0, i32* %tmp
-	%tmp1 = load i32* %tmp
-        ret i32 %tmp1
+; CHECK: test3:
+; CHECK: ldr r2, LCPI
+; CHECK: add sp, r2
+; CHECK: ldr r1, LCPI
+; CHECK: add r1, sp
+; CHECK: mov sp, r7
+; CHECK: sub sp, #4
+    %retval = alloca i32, align 4
+    %tmp = alloca i32, align 4
+    %a = alloca [805306369 x i8], align 16
+    store i32 0, i32* %tmp
+    %tmp1 = load i32* %tmp
+    ret i32 %tmp1
 }

Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb/vargs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb/vargs.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb/vargs.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb/vargs.ll Tue Oct 26 19:48:03 2010
@@ -1,5 +1,5 @@
 ; RUN: llc < %s -march=thumb
-; RUN: llc < %s -mtriple=thumb-linux | grep pop | count 1
+; RUN: llc < %s -mtriple=thumb-linux | grep pop | count 2
 ; RUN: llc < %s -mtriple=thumb-darwin | grep pop | count 2
 
 @str = internal constant [4 x i8] c"%d\0A\00"           ; <[4 x i8]*> [#uses=1]

Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-10-15-ITBlockBranch.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-10-15-ITBlockBranch.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-10-15-ITBlockBranch.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-10-15-ITBlockBranch.ll Tue Oct 26 19:48:03 2010
@@ -11,8 +11,8 @@
 define weak arm_aapcs_vfpcc i32 @_ZNKSs7compareERKSs(%"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %this, %"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %__str) {
 ; CHECK: _ZNKSs7compareERKSs:
 ; CHECK:      it  eq
-; CHECK-NEXT: subeq.w r0, r6, r8
-; CHECK-NEXT: ldmia.w sp!, {r4, r5, r6, r8, r9, pc}
+; CHECK-NEXT: subeq r0, r6, r7
+; CHECK-NEXT: ldmia.w sp!, {r4, r5, r6, r7, r8, pc}
 entry:
   %0 = tail call arm_aapcs_vfpcc  i32 @_ZNKSs4sizeEv(%"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %this) ; <i32> [#uses=3]
   %1 = tail call arm_aapcs_vfpcc  i32 @_ZNKSs4sizeEv(%"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %__str) ; <i32> [#uses=3]

Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-04-15-DynAllocBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-04-15-DynAllocBug.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-04-15-DynAllocBug.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-04-15-DynAllocBug.ll Tue Oct 26 19:48:03 2010
@@ -7,19 +7,12 @@
 define void @t() nounwind ssp {
 entry:
 ; CHECK: t:
-; CHECK:  push  {r4, r7}
-; CHECK:  mov r0, sp
-; CHECK:  add r7, sp, #4
-; CHECK:  bic r0, r0, #7
+  %size = mul i32 8, 2
 ; CHECK:  subs  r0, #16
 ; CHECK:  mov sp, r0
-; CHECK:  mov r0, sp
-; CHECK:  bic r0, r0, #7
+  %vla_a = alloca i8, i32 %size, align 8
 ; CHECK:  subs  r0, #16
 ; CHECK:  mov sp, r0
-
-  %size = mul i32 8, 2
-  %vla_a = alloca i8, i32 %size, align 8
   %vla_b = alloca i8, i32 %size, align 8
   unreachable
 }

Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-06-14-NEONCoalescer.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-06-14-NEONCoalescer.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-06-14-NEONCoalescer.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-06-14-NEONCoalescer.ll Tue Oct 26 19:48:03 2010
@@ -23,8 +23,8 @@
   %4 = insertelement <2 x double> %2, double %V.0.ph, i32 1 ; <<2 x double>> [#uses=2]
 ; Constant pool load followed by add.
 ; Then clobber the loaded register, not the sum.
-; CHECK: vldr.64 [[LDR:d.]]
-; CHECK: vadd.f64 [[ADD:d.]], [[LDR]], [[LDR]]
+; CHECK: vldr.64 [[LDR:d.*]],
+; CHECK: vadd.f64 [[ADD:d.*]], [[LDR]], [[LDR]]
 ; CHECK: vmov.f64 [[LDR]]
   %5 = fadd <2 x double> %3, %3                   ; <<2 x double>> [#uses=2]
   %6 = fadd <2 x double> %4, %4                   ; <<2 x double>> [#uses=2]

Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-06-21-TailMergeBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-06-21-TailMergeBug.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-06-21-TailMergeBug.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-06-21-TailMergeBug.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -O3 -relocation-model=pic -mcpu=cortex-a8 | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin -O3 -relocation-model=pic | FileCheck %s
 ; rdar://8115404
 ; Tail merging must not split an IT block.
 
@@ -32,15 +32,14 @@
 
 define fastcc i32 @parse_percent_token() nounwind {
 entry:
-; CHECK: ittt eq
-; CHECK: ittt eq
-; CHECK: ittt eq
-; CHECK: ittt eq
-; CHECK: ittt eq
+; CHECK: itt eq
+; CHECK: itt eq
+; CHECK: itt eq
+; CHECK: itt eq
+; CHECK: itt eq
 ; CHECK: moveq r0
 ; CHECK-NOT: LBB0_
-; CHECK: ldreq
-; CHECK: popeq
+; CHECK: ldmiaeq
   switch i32 undef, label %bb7 [
     i32 37, label %bb43
     i32 48, label %bb5

Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/crash.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/crash.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/crash.ll Tue Oct 26 19:48:03 2010
@@ -14,11 +14,11 @@
   %6 = bitcast i32* %sp3 to <4 x i32>*            ; <<4 x i32>*> [#uses=1]
   %7 = load <4 x i32>* %6, align 16               ; <<4 x i32>> [#uses=1]
   %8 = bitcast i32* %dp to i8*                    ; <i8*> [#uses=1]
-  tail call void @llvm.arm.neon.vst4.v4i32(i8* %8, <4 x i32> %1, <4 x i32> %3, <4 x i32> %5, <4 x i32> %7)
+  tail call void @llvm.arm.neon.vst4.v4i32(i8* %8, <4 x i32> %1, <4 x i32> %3, <4 x i32> %5, <4 x i32> %7, i32 1)
   ret void
 }
 
-declare void @llvm.arm.neon.vst4.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>) nounwind
+declare void @llvm.arm.neon.vst4.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i32) nounwind
 
 @sbuf = common global [16 x i32] zeroinitializer, align 16 ; <[16 x i32]*> [#uses=5]
 @dbuf = common global [16 x i32] zeroinitializer  ; <[16 x i32]*> [#uses=2]
@@ -44,6 +44,6 @@
   %3 = load <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32]* @sbuf, i32 0, i32 4) to <4 x i32>*), align 16 ; <<4 x i32>> [#uses=1]
   %4 = load <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32]* @sbuf, i32 0, i32 8) to <4 x i32>*), align 16 ; <<4 x i32>> [#uses=1]
   %5 = load <4 x i32>* bitcast (i32* getelementptr inbounds ([16 x i32]* @sbuf, i32 0, i32 12) to <4 x i32>*), align 16 ; <<4 x i32>> [#uses=1]
-  tail call void @llvm.arm.neon.vst4.v4i32(i8* bitcast ([16 x i32]* @dbuf to i8*), <4 x i32> %2, <4 x i32> %3, <4 x i32> %4, <4 x i32> %5) nounwind
+  tail call void @llvm.arm.neon.vst4.v4i32(i8* bitcast ([16 x i32]* @dbuf to i8*), <4 x i32> %2, <4 x i32> %3, <4 x i32> %4, <4 x i32> %5, i32 1) nounwind
   ret i32 0
 }

Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/cross-rc-coalescing-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/cross-rc-coalescing-2.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/cross-rc-coalescing-2.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/cross-rc-coalescing-2.ll Tue Oct 26 19:48:03 2010
@@ -1,15 +1,23 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin9 -mcpu=cortex-a8 | grep vmov.f32 | count 1
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin9 -mcpu=cortex-a8 | FileCheck %s
 
 define void @fht(float* nocapture %fz, i16 signext %n) nounwind {
+; CHECK: fht:
 entry:
   br label %bb5
 
 bb5:                                              ; preds = %bb5, %entry
+; CHECK: %bb5
+; CHECK: bne
   br i1 undef, label %bb5, label %bb.nph
 
 bb.nph:                                           ; preds = %bb5
   br label %bb7
 
+; Loop preheader
+; CHECK: vmov.f32
+; CHECK: vmul.f32
+; CHECK: vsub.f32
+; CHECK: vadd.f32
 bb7:                                              ; preds = %bb9, %bb.nph
   %s1.02 = phi float [ undef, %bb.nph ], [ %35, %bb9 ] ; <float> [#uses=3]
   %tmp79 = add i32 undef, undef                   ; <i32> [#uses=1]
@@ -19,6 +27,9 @@
   br label %bb8
 
 bb8:                                              ; preds = %bb8, %bb7
+; CHECK: %bb8
+; CHECK-NOT: vmov.f32
+; CHECK: blt
   %tmp54 = add i32 0, %tmp53                      ; <i32> [#uses=0]
   %fi.1 = getelementptr float* %fz, i32 undef     ; <float*> [#uses=2]
   %tmp80 = add i32 0, %tmp79                      ; <i32> [#uses=1]
@@ -62,6 +73,8 @@
   br i1 %34, label %bb8, label %bb9
 
 bb9:                                              ; preds = %bb8
+; CHECK: %bb9
+; CHECK: vmov.f32
   %35 = fadd float 0.000000e+00, undef            ; <float> [#uses=1]
   br label %bb7
 }

Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/div.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/div.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/div.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/div.ll Tue Oct 26 19:48:03 2010
@@ -1,6 +1,6 @@
 ; RUN: llc < %s -march=thumb -mattr=+thumb2 \
 ; RUN:    | FileCheck %s -check-prefix=CHECK-THUMB
-; RUN: llc < %s -march=arm -mcpu=cortex-m3 -mattr=+thumb2 \
+; RUN: llc < %s -march=thumb -mcpu=cortex-m3 -mattr=+thumb2 \
 ; RUN:    | FileCheck %s -check-prefix=CHECK-THUMBV7M
 
 define i32 @f1(i32 %a, i32 %b) {

Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/large-stack.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/large-stack.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/large-stack.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/large-stack.ll Tue Oct 26 19:48:03 2010
@@ -27,7 +27,7 @@
 ; DARWIN: sub.w sp, sp, #805306368
 ; DARWIN: sub sp, #20
 ; LINUX: test3:
-; LINUX: stmdb   sp!, {r4, r7, r11, lr}
+; LINUX: push {r4, r7, r11, lr}
 ; LINUX: sub.w sp, sp, #805306368
 ; LINUX: sub sp, #16
     %retval = alloca i32, align 4

Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/ldr-str-imm12.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/ldr-str-imm12.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/ldr-str-imm12.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/ldr-str-imm12.ll Tue Oct 26 19:48:03 2010
@@ -22,7 +22,7 @@
 
 define %union.rec* @Manifest(%union.rec* %x, %union.rec* %env, %struct.STYLE* %style, %union.rec** %bthr, %union.rec** %fthr, %union.rec** %target, %union.rec** %crs, i32 %ok, i32 %need_expand, %union.rec** %enclose, i32 %fcr) nounwind {
 entry:
-; CHECK:       ldr.w	r9, [r7, #28]
+; CHECK:       ldr.w	{{(r[0-9])|(lr)}}, [r7, #28]
   %xgaps.i = alloca [32 x %union.rec*], align 4   ; <[32 x %union.rec*]*> [#uses=0]
   %ycomp.i = alloca [32 x %union.rec*], align 4   ; <[32 x %union.rec*]*> [#uses=0]
   br label %bb20
@@ -46,9 +46,9 @@
 
 bb420:                                            ; preds = %bb20, %bb20
 ; CHECK: bb420
-; CHECK: str r{{[0-7]}}, [sp]
-; CHECK: str r{{[0-7]}}, [sp, #4]
-; CHECK: str r{{[0-7]}}, [sp, #8]
+; CHECK: str{{(.w)?}} r{{[0-9]+}}, [sp]
+; CHECK: str{{(.w)?}} r{{[0-9]+}}, [sp, #4]
+; CHECK: str{{(.w)?}} r{{[0-9]+}}, [sp, #8]
 ; CHECK: str{{(.w)?}} r{{[0-9]+}}, [sp, #24]
   store %union.rec* null, %union.rec** @zz_hold, align 4
   store %union.rec* null, %union.rec** @zz_res, align 4

Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/lsr-deficiency.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/lsr-deficiency.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/lsr-deficiency.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/lsr-deficiency.ll Tue Oct 26 19:48:03 2010
@@ -21,8 +21,8 @@
 bb:                                               ; preds = %bb, %entry
 ; CHECK: LBB0_1:
 ; CHECK: cmp r2, #0
-; CHECK: sub.w r9, r2, #1
-; CHECK: mov r2, r9
+; CHECK: sub{{(.w)?}} [[REGISTER:(r[0-9]+)|(lr)]], r2, #1
+; CHECK: mov r2, [[REGISTER]]
 
   %0 = phi i32 [ %.pre, %entry ], [ %3, %bb ]     ; <i32> [#uses=1]
   %indvar = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=2]

Removed: llvm/branches/wendling/eh/test/CodeGen/Thumb2/machine-licm-vdup.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/machine-licm-vdup.ll?rev=117424&view=auto
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/machine-licm-vdup.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/machine-licm-vdup.ll (removed)
@@ -1,38 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -disable-fp-elim                -arm-vdup-splat | FileCheck %s
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -relocation-model=pic -disable-fp-elim -arm-vdup-splat | FileCheck %s 
-; Modified version of machine-licm.ll with -arm-vdup-splat turned on, 8003375.
-; Eventually this should become the default and be moved into machine-licm.ll.
-; FIXME: the vdup should be hoisted out of the loop, 8248029.
-
-define void @t2(i8* %ptr1, i8* %ptr2) nounwind {
-entry:
-; CHECK: t2:
-; CHECK: mov.w r3, #1065353216
-  br i1 undef, label %bb1, label %bb2
-
-bb1:
-; CHECK-NEXT: %bb1
-; CHECK: vdup.32 q1, r3
-  %indvar = phi i32 [ %indvar.next, %bb1 ], [ 0, %entry ]
-  %tmp1 = shl i32 %indvar, 2
-  %gep1 = getelementptr i8* %ptr1, i32 %tmp1
-  %tmp2 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* %gep1)
-  %tmp3 = call <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, <4 x float> %tmp2)
-  %gep2 = getelementptr i8* %ptr2, i32 %tmp1
-  call void @llvm.arm.neon.vst1.v4f32(i8* %gep2, <4 x float> %tmp3)
-  %indvar.next = add i32 %indvar, 1
-  %cond = icmp eq i32 %indvar.next, 10
-  br i1 %cond, label %bb2, label %bb1
-
-bb2:
-  ret void
-}
-
-; CHECK-NOT: LCPI1_0:
-; CHECK: .subsections_via_symbols
-
-declare <4 x float> @llvm.arm.neon.vld1.v4f32(i8*) nounwind readonly
-
-declare void @llvm.arm.neon.vst1.v4f32(i8*, <4 x float>) nounwind
-
-declare <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float>, <4 x float>) nounwind readnone

Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/machine-licm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/machine-licm.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/machine-licm.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/machine-licm.ll Tue Oct 26 19:48:03 2010
@@ -55,8 +55,8 @@
 define void @t2(i8* %ptr1, i8* %ptr2) nounwind {
 entry:
 ; CHECK: t2:
-; CHECK: adr r{{.}}, #LCPI1_0
-; CHECK: vldmia r3, {d0, d1}
+; CHECK: mov.w r3, #1065353216
+; CHECK: vdup.32 q{{.*}}, r3
   br i1 undef, label %bb1, label %bb2
 
 bb1:
@@ -64,10 +64,10 @@
   %indvar = phi i32 [ %indvar.next, %bb1 ], [ 0, %entry ]
   %tmp1 = shl i32 %indvar, 2
   %gep1 = getelementptr i8* %ptr1, i32 %tmp1
-  %tmp2 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* %gep1)
+  %tmp2 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* %gep1, i32 1)
   %tmp3 = call <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, <4 x float> %tmp2)
   %gep2 = getelementptr i8* %ptr2, i32 %tmp1
-  call void @llvm.arm.neon.vst1.v4f32(i8* %gep2, <4 x float> %tmp3)
+  call void @llvm.arm.neon.vst1.v4f32(i8* %gep2, <4 x float> %tmp3, i32 1)
   %indvar.next = add i32 %indvar, 1
   %cond = icmp eq i32 %indvar.next, 10
   br i1 %cond, label %bb2, label %bb1
@@ -76,11 +76,11 @@
   ret void
 }
 
-; CHECK: LCPI1_0:
-; CHECK: .section
+; CHECK-NOT: LCPI1_0:
+; CHECK: .subsections_via_symbols
 
-declare <4 x float> @llvm.arm.neon.vld1.v4f32(i8*) nounwind readonly
+declare <4 x float> @llvm.arm.neon.vld1.v4f32(i8*, i32) nounwind readonly
 
-declare void @llvm.arm.neon.vst1.v4f32(i8*, <4 x float>) nounwind
+declare void @llvm.arm.neon.vst1.v4f32(i8*, <4 x float>, i32) nounwind
 
 declare <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float>, <4 x float>) nounwind readnone

Removed: llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-badreg-operands.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-badreg-operands.ll?rev=117424&view=auto
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-badreg-operands.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-badreg-operands.ll (removed)
@@ -1,15 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin10 | FileCheck %s
-
-define void @b(i32 %x) nounwind optsize {
-entry:
-; CHECK: b
-; CHECK: mov r2, sp
-; CHECK: mls r0, r0, r1, r2
-; CHECK: mov sp, r0
-  %0 = mul i32 %x, 24                             ; <i32> [#uses=1]
-  %vla = alloca i8, i32 %0, align 1               ; <i8*> [#uses=1]
-  call arm_aapcscc  void @a(i8* %vla) nounwind optsize
-  ret void
-}
-
-declare void @a(i8*) optsize

Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-call-tc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-call-tc.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-call-tc.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-call-tc.ll Tue Oct 26 19:48:03 2010
@@ -1,5 +1,6 @@
 ; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mattr=+thumb2 | FileCheck %s -check-prefix=DARWIN
 ; RUN: llc < %s -mtriple=thumbv7-linux -mattr=+thumb2 | FileCheck %s -check-prefix=LINUX
+; XFAIL: *
 
 @t = weak global i32 ()* null           ; <i32 ()**> [#uses=1]
 

Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-cmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-cmp.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-cmp.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-cmp.ll Tue Oct 26 19:48:03 2010
@@ -39,3 +39,17 @@
     %tmp = icmp eq i32 %a, 1114112
     ret i1 %tmp
 }
+
+; Check that we don't do an invalid (a > b) --> !(a < b + 1) transform.
+;
+; CHECK: f6:
+; CHECK-NOT: cmp.w r0, #-2147483648
+; CHECK: bx lr
+define i32 @f6(i32 %a) {
+    %tmp = icmp sgt i32 %a, 2147483647
+    br i1 %tmp, label %true, label %false
+true:
+    ret i32 2
+false:
+    ret i32 0
+}

Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-ifcvt1-tc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-ifcvt1-tc.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-ifcvt1-tc.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-ifcvt1-tc.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,5 @@
 ; RUN: llc < %s -mtriple=thumbv7-apple-darwin | FileCheck %s
+; XFAIL: *
 
 define i32 @t1(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
 ; CHECK: t1:

Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-pack.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-pack.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-pack.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-pack.ll Tue Oct 26 19:48:03 2010
@@ -1,8 +1,7 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2,+t2xtpk | \
-; RUN:   grep pkhbt | count 5
-; RUN: llc < %s -march=thumb -mattr=+thumb2,+t2xtpk | \
-; RUN:   grep pkhtb | count 4
+; RUN: llc < %s -march=thumb -mattr=+thumb2,+t2xtpk | FileCheck %s
 
+; CHECK: test1
+; CHECK: pkhbt   r0, r0, r1, lsl #16
 define i32 @test1(i32 %X, i32 %Y) {
 	%tmp1 = and i32 %X, 65535		; <i32> [#uses=1]
 	%tmp4 = shl i32 %Y, 16		; <i32> [#uses=1]
@@ -10,6 +9,8 @@
 	ret i32 %tmp5
 }
 
+; CHECK: test1a
+; CHECK: pkhbt   r0, r0, r1, lsl #16
 define i32 @test1a(i32 %X, i32 %Y) {
 	%tmp19 = and i32 %X, 65535		; <i32> [#uses=1]
 	%tmp37 = shl i32 %Y, 16		; <i32> [#uses=1]
@@ -17,6 +18,8 @@
 	ret i32 %tmp5
 }
 
+; CHECK: test2
+; CHECK: pkhbt   r0, r0, r1, lsl #12
 define i32 @test2(i32 %X, i32 %Y) {
 	%tmp1 = and i32 %X, 65535		; <i32> [#uses=1]
 	%tmp3 = shl i32 %Y, 12		; <i32> [#uses=1]
@@ -25,6 +28,8 @@
 	ret i32 %tmp57
 }
 
+; CHECK: test3
+; CHECK: pkhbt   r0, r0, r1, lsl #18
 define i32 @test3(i32 %X, i32 %Y) {
 	%tmp19 = and i32 %X, 65535		; <i32> [#uses=1]
 	%tmp37 = shl i32 %Y, 18		; <i32> [#uses=1]
@@ -32,6 +37,8 @@
 	ret i32 %tmp5
 }
 
+; CHECK: test4
+; CHECK: pkhbt   r0, r0, r1
 define i32 @test4(i32 %X, i32 %Y) {
 	%tmp1 = and i32 %X, 65535		; <i32> [#uses=1]
 	%tmp3 = and i32 %Y, -65536		; <i32> [#uses=1]
@@ -39,6 +46,8 @@
 	ret i32 %tmp46
 }
 
+; CHECK: test5
+; CHECK: pkhtb   r0, r0, r1, asr #16
 define i32 @test5(i32 %X, i32 %Y) {
 	%tmp17 = and i32 %X, -65536		; <i32> [#uses=1]
 	%tmp2 = bitcast i32 %Y to i32		; <i32> [#uses=1]
@@ -47,6 +56,8 @@
 	ret i32 %tmp5
 }
 
+; CHECK: test5a
+; CHECK: pkhtb   r0, r0, r1, asr #16
 define i32 @test5a(i32 %X, i32 %Y) {
 	%tmp110 = and i32 %X, -65536		; <i32> [#uses=1]
 	%tmp37 = lshr i32 %Y, 16		; <i32> [#uses=1]
@@ -55,6 +66,8 @@
 	ret i32 %tmp5
 }
 
+; CHECK: test6
+; CHECK: pkhtb   r0, r0, r1, asr #12
 define i32 @test6(i32 %X, i32 %Y) {
 	%tmp1 = and i32 %X, -65536		; <i32> [#uses=1]
 	%tmp37 = lshr i32 %Y, 12		; <i32> [#uses=1]
@@ -64,6 +77,8 @@
 	ret i32 %tmp59
 }
 
+; CHECK: test7
+; CHECK: pkhtb   r0, r0, r1, asr #18
 define i32 @test7(i32 %X, i32 %Y) {
 	%tmp1 = and i32 %X, -65536		; <i32> [#uses=1]
 	%tmp3 = ashr i32 %Y, 18		; <i32> [#uses=1]
@@ -71,3 +86,12 @@
 	%tmp57 = or i32 %tmp4, %tmp1		; <i32> [#uses=1]
 	ret i32 %tmp57
 }
+
+; CHECK: test8
+; CHECK: pkhtb   r0, r0, r1, asr #22
+define i32 @test8(i32 %X, i32 %Y) {
+	%tmp1 = and i32 %X, -65536
+	%tmp3 = lshr i32 %Y, 22
+	%tmp57 = or i32 %tmp3, %tmp1
+	ret i32 %tmp57
+}

Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-spill-q.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-spill-q.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-spill-q.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-spill-q.ll Tue Oct 26 19:48:03 2010
@@ -7,7 +7,7 @@
 %quux = type { i32 (...)**, %baz*, i32 }
 %quuz = type { %quux, i32, %bar, [128 x i8], [16 x %foo], %foo, %foo, %foo }
 
-declare <4 x float> @llvm.arm.neon.vld1.v4f32(i8*) nounwind readonly
+declare <4 x float> @llvm.arm.neon.vld1.v4f32(i8*, i32) nounwind readonly
 
 define void @aaa(%quuz* %this, i8* %block) {
 ; CHECK: aaa:
@@ -15,11 +15,31 @@
 ; CHECK: vst1.64 {{.*}}[{{.*}}, :128]
 ; CHECK: vld1.64 {{.*}}[{{.*}}, :128]
 entry:
-  %0 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef) nounwind ; <<4 x float>> [#uses=1]
+  %0 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind ; <<4 x float>> [#uses=1]
   store float 6.300000e+01, float* undef, align 4
-  %1 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef) nounwind ; <<4 x float>> [#uses=1]
+  %1 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind ; <<4 x float>> [#uses=1]
+  store float 0.000000e+00, float* undef, align 4
+  %2 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind ; <<4 x float>> [#uses=1]
+  %ld3 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind
+  store float 0.000000e+00, float* undef, align 4
+  %ld4 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind
+  store float 0.000000e+00, float* undef, align 4
+  %ld5 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind
+  store float 0.000000e+00, float* undef, align 4
+  %ld6 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind
+  store float 0.000000e+00, float* undef, align 4
+  %ld7 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind
+  store float 0.000000e+00, float* undef, align 4
+  %ld8 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind
+  store float 0.000000e+00, float* undef, align 4
+  %ld9 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind
+  store float 0.000000e+00, float* undef, align 4
+  %ld10 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind
+  store float 0.000000e+00, float* undef, align 4
+  %ld11 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind
+  store float 0.000000e+00, float* undef, align 4
+  %ld12 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef, i32 1) nounwind
   store float 0.000000e+00, float* undef, align 4
-  %2 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef) nounwind ; <<4 x float>> [#uses=1]
   %val173 = load <4 x float>* undef               ; <<4 x float>> [#uses=1]
   br label %bb4
 
@@ -44,7 +64,16 @@
   %18 = fmul <4 x float> %17, %val173             ; <<4 x float>> [#uses=1]
   %19 = shufflevector <4 x float> %18, <4 x float> undef, <2 x i32> <i32 2, i32 3> ; <<2 x float>> [#uses=1]
   %20 = shufflevector <2 x float> %19, <2 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>> [#uses=1]
-  %21 = fadd <4 x float> zeroinitializer, %20     ; <<4 x float>> [#uses=2]
+  %tmp1 = fadd <4 x float> %20, %ld3
+  %tmp2 = fadd <4 x float> %tmp1, %ld4
+  %tmp3 = fadd <4 x float> %tmp2, %ld5
+  %tmp4 = fadd <4 x float> %tmp3, %ld6
+  %tmp5 = fadd <4 x float> %tmp4, %ld7
+  %tmp6 = fadd <4 x float> %tmp5, %ld8
+  %tmp7 = fadd <4 x float> %tmp6, %ld9
+  %tmp8 = fadd <4 x float> %tmp7, %ld10
+  %tmp9 = fadd <4 x float> %tmp8, %ld11
+  %21 = fadd <4 x float> %tmp9, %ld12
   %22 = fcmp ogt <4 x float> %besterror.0.2264, %21 ; <<4 x i1>> [#uses=0]
   %tmp = extractelement <4 x i1> %22, i32 0
   br i1 %tmp, label %bb193, label %bb186

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/2007-04-25-MMX-PADDQ.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/2007-04-25-MMX-PADDQ.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/2007-04-25-MMX-PADDQ.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/2007-04-25-MMX-PADDQ.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,5 @@
 ; RUN: llc < %s -o - -march=x86 -mattr=+mmx | FileCheck %s
+; There are no MMX instructions here.  We use add+adcl for the adds.
 
 define <1 x i64> @unsigned_add3(<1 x i64>* %a, <1 x i64>* %b, i32 %count) nounwind {
 entry:
@@ -7,9 +8,8 @@
 
 bb26:		; preds = %bb26, %entry
 
-; CHECK:  movq	({{.*}},8), %mm
-; CHECK:  paddq	({{.*}},8), %mm
-; CHECK:  paddq	%mm{{[0-7]}}, %mm
+; CHECK:  addl  %e
+; CHECK:  adcl  %e
 
 	%i.037.0 = phi i32 [ 0, %entry ], [ %tmp25, %bb26 ]		; <i32> [#uses=3]
 	%sum.035.0 = phi <1 x i64> [ zeroinitializer, %entry ], [ %tmp22, %bb26 ]		; <<1 x i64>> [#uses=1]
@@ -27,3 +27,38 @@
 	%sum.035.1 = phi <1 x i64> [ zeroinitializer, %entry ], [ %tmp22, %bb26 ]		; <<1 x i64>> [#uses=1]
 	ret <1 x i64> %sum.035.1
 }
+
+
+; This is the original test converted to use MMX intrinsics.
+
+define <1 x i64> @unsigned_add3a(x86_mmx* %a, x86_mmx* %b, i32 %count) nounwind {
+entry:
+        %tmp2943 = bitcast <1 x i64><i64 0> to x86_mmx
+	%tmp2942 = icmp eq i32 %count, 0		; <i1> [#uses=1]
+	br i1 %tmp2942, label %bb31, label %bb26
+
+bb26:		; preds = %bb26, %entry
+
+; CHECK:  movq	({{.*}},8), %mm
+; CHECK:  paddq	({{.*}},8), %mm
+; CHECK:  paddq	%mm{{[0-7]}}, %mm
+
+	%i.037.0 = phi i32 [ 0, %entry ], [ %tmp25, %bb26 ]		; <i32> [#uses=3]
+	%sum.035.0 = phi x86_mmx [ %tmp2943, %entry ], [ %tmp22, %bb26 ]		; <x86_mmx> [#uses=1]
+	%tmp13 = getelementptr x86_mmx* %b, i32 %i.037.0		; <x86_mmx*> [#uses=1]
+	%tmp14 = load x86_mmx* %tmp13		; <x86_mmx> [#uses=1]
+	%tmp18 = getelementptr x86_mmx* %a, i32 %i.037.0		; <x86_mmx*> [#uses=1]
+	%tmp19 = load x86_mmx* %tmp18		; <x86_mmx> [#uses=1]
+	%tmp21 = call x86_mmx @llvm.x86.mmx.padd.q (x86_mmx %tmp19, x86_mmx %tmp14)		; <x86_mmx> [#uses=1]
+	%tmp22 = call x86_mmx @llvm.x86.mmx.padd.q (x86_mmx %tmp21, x86_mmx %sum.035.0)		; <x86_mmx> [#uses=2]
+	%tmp25 = add i32 %i.037.0, 1		; <i32> [#uses=2]
+	%tmp29 = icmp ult i32 %tmp25, %count		; <i1> [#uses=1]
+	br i1 %tmp29, label %bb26, label %bb31
+
+bb31:		; preds = %bb26, %entry
+	%sum.035.1 = phi x86_mmx [ %tmp2943, %entry ], [ %tmp22, %bb26 ]		; <x86_mmx> [#uses=1]
+        %t = bitcast x86_mmx %sum.035.1 to <1 x i64>
+	ret <1 x i64> %t
+}
+
+declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx)

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/2007-05-15-maskmovq.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/2007-05-15-maskmovq.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/2007-05-15-maskmovq.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/2007-05-15-maskmovq.ll Tue Oct 26 19:48:03 2010
@@ -5,10 +5,10 @@
 
 define void @test(<1 x i64> %c64, <1 x i64> %mask1, i8* %P) {
 entry:
-	%tmp4 = bitcast <1 x i64> %mask1 to <8 x i8>		; <<8 x i8>> [#uses=1]
-	%tmp6 = bitcast <1 x i64> %c64 to <8 x i8>		; <<8 x i8>> [#uses=1]
-	tail call void @llvm.x86.mmx.maskmovq( <8 x i8> %tmp6, <8 x i8> %tmp4, i8* %P )
+	%tmp4 = bitcast <1 x i64> %mask1 to x86_mmx		; <x86_mmx> [#uses=1]
+	%tmp6 = bitcast <1 x i64> %c64 to x86_mmx		; <x86_mmx> [#uses=1]
+	tail call void @llvm.x86.mmx.maskmovq( x86_mmx %tmp4, x86_mmx %tmp6, i8* %P )
 	ret void
 }
 
-declare void @llvm.x86.mmx.maskmovq(<8 x i8>, <8 x i8>, i8*)
+declare void @llvm.x86.mmx.maskmovq(x86_mmx, x86_mmx, i8*)

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/2007-06-15-IntToMMX.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/2007-06-15-IntToMMX.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/2007-06-15-IntToMMX.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/2007-06-15-IntToMMX.ll Tue Oct 26 19:48:03 2010
@@ -1,17 +1,16 @@
 ; RUN: llc < %s -march=x86-64 -mattr=+mmx | grep paddusw
- at R = external global <1 x i64>          ; <<1 x i64>*> [#uses=1]
+ at R = external global x86_mmx          ; <x86_mmx*> [#uses=1]
 
 define void @foo(<1 x i64> %A, <1 x i64> %B) {
 entry:
-        %tmp4 = bitcast <1 x i64> %B to <4 x i16>               ; <<4 x i16>> [#uses=1]
-        %tmp6 = bitcast <1 x i64> %A to <4 x i16>               ; <<4 x i16>> [#uses=1]
-        %tmp7 = tail call <4 x i16> @llvm.x86.mmx.paddus.w( <4 x i16> %tmp6, <4 x i16> %tmp4 )   ; <<4 x i16>> [#uses=1]
-        %tmp8 = bitcast <4 x i16> %tmp7 to <1 x i64>            ; <<1 x i64>> [#uses=1]
-        store <1 x i64> %tmp8, <1 x i64>* @R
+        %tmp2 = bitcast <1 x i64> %A to x86_mmx
+        %tmp3 = bitcast <1 x i64> %B to x86_mmx
+        %tmp7 = tail call x86_mmx @llvm.x86.mmx.paddus.w( x86_mmx %tmp2, x86_mmx %tmp3 )   ; <x86_mmx> [#uses=1]
+        store x86_mmx %tmp7, x86_mmx* @R
         tail call void @llvm.x86.mmx.emms( )
         ret void
 }
 
-declare <4 x i16> @llvm.x86.mmx.paddus.w(<4 x i16>, <4 x i16>)
+declare x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx, x86_mmx)
 
 declare void @llvm.x86.mmx.emms()

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/2007-07-03-GR64ToVR64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/2007-07-03-GR64ToVR64.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/2007-07-03-GR64ToVR64.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/2007-07-03-GR64ToVR64.ll Tue Oct 26 19:48:03 2010
@@ -2,19 +2,17 @@
 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx | grep {movd	%rdi, %mm1}
 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx | grep {paddusw	%mm0, %mm1}
 
- at R = external global <1 x i64>		; <<1 x i64>*> [#uses=1]
+ at R = external global x86_mmx		; <x86_mmx*> [#uses=1]
 
 define void @foo(<1 x i64> %A, <1 x i64> %B) nounwind {
 entry:
-	%tmp4 = bitcast <1 x i64> %B to <4 x i16>		; <<4 x i16>> [#uses=1]
-	%tmp6 = bitcast <1 x i64> %A to <4 x i16>		; <<4 x i16>> [#uses=1]
-	%tmp7 = tail call <4 x i16> @llvm.x86.mmx.paddus.w( <4 x i16> %tmp6, <4 x i16> %tmp4 )		; <<4 x i16>> [#uses=1]
-	%tmp8 = bitcast <4 x i16> %tmp7 to <1 x i64>		; <<1 x i64>> [#uses=1]
-	store <1 x i64> %tmp8, <1 x i64>* @R
+	%tmp4 = bitcast <1 x i64> %B to x86_mmx		; <<4 x i16>> [#uses=1]
+	%tmp6 = bitcast <1 x i64> %A to x86_mmx		; <<4 x i16>> [#uses=1]
+	%tmp7 = tail call x86_mmx @llvm.x86.mmx.paddus.w( x86_mmx %tmp6, x86_mmx %tmp4 )		; <x86_mmx> [#uses=1]
+	store x86_mmx %tmp7, x86_mmx* @R
 	tail call void @llvm.x86.mmx.emms( )
 	ret void
 }
 
-declare <4 x i16> @llvm.x86.mmx.paddus.w(<4 x i16>, <4 x i16>)
-
+declare x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx, x86_mmx)
 declare void @llvm.x86.mmx.emms()

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/2008-02-18-TailMergingBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/2008-02-18-TailMergingBug.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/2008-02-18-TailMergingBug.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/2008-02-18-TailMergingBug.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 -mcpu=yonah -stats |& grep {Number of block tails merged} | grep 9
+; RUN: llc < %s -march=x86 -mcpu=yonah -stats |& grep {Number of block tails merged} | grep 16
 ; PR1909
 
 @.str = internal constant [48 x i8] c"transformed bounds: (%.2f, %.2f), (%.2f, %.2f)\0A\00"		; <[48 x i8]*> [#uses=1]

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/2008-04-08-CoalescerCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/2008-04-08-CoalescerCrash.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/2008-04-08-CoalescerCrash.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/2008-04-08-CoalescerCrash.ll Tue Oct 26 19:48:03 2010
@@ -5,15 +5,15 @@
 	tail call void asm sideeffect "# top of block", "~{dirflag},~{fpsr},~{flags},~{di},~{si},~{dx},~{cx},~{ax}"( ) nounwind 
 	tail call void asm sideeffect ".file \224443946.c\22", "~{dirflag},~{fpsr},~{flags}"( ) nounwind 
 	tail call void asm sideeffect ".line 8", "~{dirflag},~{fpsr},~{flags}"( ) nounwind 
-	%tmp1 = tail call <2 x i32> asm sideeffect "movd $1, $0", "=={mm4},{bp},~{dirflag},~{fpsr},~{flags},~{memory}"( i32 undef ) nounwind 		; <<2 x i32>> [#uses=1]
+	%tmp1 = tail call x86_mmx asm sideeffect "movd $1, $0", "=={mm4},{bp},~{dirflag},~{fpsr},~{flags},~{memory}"( i32 undef ) nounwind 		; <x86_mmx> [#uses=1]
 	tail call void asm sideeffect ".file \224443946.c\22", "~{dirflag},~{fpsr},~{flags}"( ) nounwind 
 	tail call void asm sideeffect ".line 9", "~{dirflag},~{fpsr},~{flags}"( ) nounwind 
-	%tmp3 = tail call i32 asm sideeffect "movd $1, $0", "=={bp},{mm3},~{dirflag},~{fpsr},~{flags},~{memory}"( <2 x i32> undef ) nounwind 		; <i32> [#uses=1]
+	%tmp3 = tail call i32 asm sideeffect "movd $1, $0", "=={bp},{mm3},~{dirflag},~{fpsr},~{flags},~{memory}"( x86_mmx undef ) nounwind 		; <i32> [#uses=1]
 	tail call void asm sideeffect ".file \224443946.c\22", "~{dirflag},~{fpsr},~{flags}"( ) nounwind 
 	tail call void asm sideeffect ".line 10", "~{dirflag},~{fpsr},~{flags}"( ) nounwind 
-	tail call void asm sideeffect "movntq $0, 0($1,$2)", "{mm0},{di},{bp},~{dirflag},~{fpsr},~{flags},~{memory}"( <2 x i32> undef, i32 undef, i32 %tmp3 ) nounwind 
+	tail call void asm sideeffect "movntq $0, 0($1,$2)", "{mm0},{di},{bp},~{dirflag},~{fpsr},~{flags},~{memory}"( x86_mmx undef, i32 undef, i32 %tmp3 ) nounwind 
 	tail call void asm sideeffect ".file \224443946.c\22", "~{dirflag},~{fpsr},~{flags}"( ) nounwind 
 	tail call void asm sideeffect ".line 11", "~{dirflag},~{fpsr},~{flags}"( ) nounwind 
-	%tmp8 = tail call i32 asm sideeffect "movd $1, $0", "=={bp},{mm4},~{dirflag},~{fpsr},~{flags},~{memory}"( <2 x i32> %tmp1 ) nounwind 		; <i32> [#uses=0]
+	%tmp8 = tail call i32 asm sideeffect "movd $1, $0", "=={bp},{mm4},~{dirflag},~{fpsr},~{flags},~{memory}"( x86_mmx %tmp1 ) nounwind 		; <i32> [#uses=0]
 	ret i32 undef
 }

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/2008-06-13-VolatileLoadStore.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/2008-06-13-VolatileLoadStore.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/2008-06-13-VolatileLoadStore.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/2008-06-13-VolatileLoadStore.ll Tue Oct 26 19:48:03 2010
@@ -1,5 +1,5 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep movsd | count 5
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep movl | count 2
+; RUN: llc < %s -mtriple=i686-linux -mattr=+sse2 | grep movsd | count 5
+; RUN: llc < %s -mtriple=i686-linux -mattr=+sse2 | grep movl | count 2
 
 @atomic = global double 0.000000e+00		; <double*> [#uses=1]
 @atomic2 = global double 0.000000e+00		; <double*> [#uses=1]

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/2008-08-23-64Bit-maskmovq.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/2008-08-23-64Bit-maskmovq.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/2008-08-23-64Bit-maskmovq.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/2008-08-23-64Bit-maskmovq.ll Tue Oct 26 19:48:03 2010
@@ -17,11 +17,13 @@
 	br i1 false, label %bb.nph144.split, label %bb133
 
 bb.nph144.split:		; preds = %entry
-	tail call void @llvm.x86.mmx.maskmovq( <8 x i8> zeroinitializer, <8 x i8> zeroinitializer, i8* null ) nounwind
+        %tmp = bitcast <8 x i8> zeroinitializer to x86_mmx
+        %tmp2 = bitcast <8 x i8> zeroinitializer to x86_mmx
+	tail call void @llvm.x86.mmx.maskmovq( x86_mmx %tmp, x86_mmx %tmp2, i8* null ) nounwind
 	unreachable
 
 bb133:		; preds = %entry
 	ret void
 }
 
-declare void @llvm.x86.mmx.maskmovq(<8 x i8>, <8 x i8>, i8*) nounwind
+declare void @llvm.x86.mmx.maskmovq(x86_mmx, x86_mmx, i8*) nounwind

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/2008-09-05-sinttofp-2xi32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/2008-09-05-sinttofp-2xi32.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/2008-09-05-sinttofp-2xi32.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/2008-09-05-sinttofp-2xi32.ll Tue Oct 26 19:48:03 2010
@@ -1,6 +1,9 @@
+; RUN: llc < %s -march=x86 -mattr=+sse2 -mattr=+mmx | grep unpcklpd
+; RUN: llc < %s -march=x86 -mattr=+sse2 -mattr=+mmx | grep unpckhpd
 ; RUN: llc < %s -march=x86 -mattr=+sse2 | grep cvttpd2pi | count 1
 ; RUN: llc < %s -march=x86 -mattr=+sse2 | grep cvtpi2pd | count 1
-; PR2687
+; originally from PR2687, but things don't work that way any more.
+; there are no MMX instructions here; we use XMM.
 
 define <2 x double> @a(<2 x i32> %x) nounwind {
 entry:
@@ -13,3 +16,20 @@
   %y = fptosi <2 x double> %x to <2 x i32>
   ret <2 x i32> %y
 }
+
+; This is how to get MMX instructions.
+
+define <2 x double> @a2(x86_mmx %x) nounwind {
+entry:
+  %y = tail call <2 x double> @llvm.x86.sse.cvtpi2pd(x86_mmx %x)
+  ret <2 x double> %y
+}
+
+define x86_mmx @b2(<2 x double> %x) nounwind {
+entry:
+  %y = tail call x86_mmx @llvm.x86.sse.cvttpd2pi (<2 x double> %x)
+  ret x86_mmx %y
+}
+
+declare <2 x double> @llvm.x86.sse.cvtpi2pd(x86_mmx)
+declare x86_mmx @llvm.x86.sse.cvttpd2pi(<2 x double>)

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/2008-10-27-CoalescerBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/2008-10-27-CoalescerBug.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/2008-10-27-CoalescerBug.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/2008-10-27-CoalescerBug.ll Tue Oct 26 19:48:03 2010
@@ -1,6 +1,9 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -stats |& not grep {Number of register spills}
+; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+sse2 -stats |& FileCheck %s
+; Now this test spills one register. But a reload in the loop is cheaper than
+; the divsd so it's a win.
 
 define fastcc void @fourn(double* %data, i32 %isign) nounwind {
+; CHECK: fourn
 entry:
 	br label %bb
 
@@ -11,6 +14,11 @@
 	%1 = icmp sgt i32 %0, 2		; <i1> [#uses=1]
 	br i1 %1, label %bb30.loopexit, label %bb
 
+; CHECK: %bb30.loopexit
+; CHECK: divsd %xmm0
+; CHECK: movsd %xmm0, 16(%esp)
+; CHECK: .align
+; CHECK-NEXT: %bb3
 bb3:		; preds = %bb30.loopexit, %bb25, %bb3
 	%2 = load i32* null, align 4		; <i32> [#uses=1]
 	%3 = mul i32 %2, 0		; <i32> [#uses=1]

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/2009-02-26-MachineLICMBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/2009-02-26-MachineLICMBug.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/2009-02-26-MachineLICMBug.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/2009-02-26-MachineLICMBug.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86-64 -mattr=+sse3,+sse41 -stats |& grep {7 machine-licm}
+; RUN: llc < %s -march=x86-64 -mattr=+sse3,+sse41 -stats |& grep {8 machine-licm}
 ; RUN: llc < %s -march=x86-64 -mattr=+sse3,+sse41 | FileCheck %s
 ; rdar://6627786
 ; rdar://7792037

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/2009-03-13-PHIElimBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/2009-03-13-PHIElimBug.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/2009-03-13-PHIElimBug.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/2009-03-13-PHIElimBug.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 | FileCheck %s
+; RUN: llc < %s -mtriple=i686-linux | FileCheck %s
 ; Check the register copy comes after the call to f and before the call to g
 ; PR3784
 

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/2009-03-16-PHIElimInLPad.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/2009-03-16-PHIElimInLPad.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/2009-03-16-PHIElimInLPad.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/2009-03-16-PHIElimInLPad.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 -asm-verbose | FileCheck %s
+; RUN: llc < %s -mtriple=i686-linux -asm-verbose | FileCheck %s
 ; Check that register copies in the landing pad come after the EH_LABEL
 
 declare i32 @f()

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/2009-03-23-MultiUseSched.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/2009-03-23-MultiUseSched.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/2009-03-23-MultiUseSched.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/2009-03-23-MultiUseSched.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=x86_64-linux -relocation-model=static -stats -info-output-file - > %t
+; RUN: llc < %s -mtriple=x86_64-linux -relocation-model=static -o /dev/null -stats -info-output-file - > %t
 ; RUN: not grep spill %t
 ; RUN: not grep {%rsp} %t
 ; RUN: not grep {%rbp} %t

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/2009-06-05-ScalarToVectorByteMMX.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/2009-06-05-ScalarToVectorByteMMX.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/2009-06-05-ScalarToVectorByteMMX.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/2009-06-05-ScalarToVectorByteMMX.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 -mattr=+mmx | not grep movl
+; RUN: llc < %s -march=x86 -mattr=+mmx,+sse2 | not grep movl
 
 define <8 x i8> @a(i8 zeroext %x) nounwind {
   %r = insertelement <8 x i8> undef, i8 %x, i32 0

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/2009-08-02-mmx-scalar-to-vector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/2009-08-02-mmx-scalar-to-vector.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/2009-08-02-mmx-scalar-to-vector.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/2009-08-02-mmx-scalar-to-vector.ll Tue Oct 26 19:48:03 2010
@@ -1,10 +1,12 @@
 ; RUN: llc < %s -march=x86-64
 ; PR4669
-declare <1 x i64> @llvm.x86.mmx.pslli.q(<1 x i64>, i32)
+declare x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx, i32)
 
 define <1 x i64> @test(i64 %t) {
 entry:
 	%t1 = insertelement <1 x i64> undef, i64 %t, i32 0
-	%t2 = tail call <1 x i64> @llvm.x86.mmx.pslli.q(<1 x i64> %t1, i32 48)
-	ret <1 x i64> %t2
+        %t0 = bitcast <1 x i64> %t1 to x86_mmx
+	%t2 = tail call x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx %t0, i32 48)
+        %t3 = bitcast x86_mmx %t2 to <1 x i64>
+	ret <1 x i64> %t3
 }

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/2010-04-23-mmx-movdq2q.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/2010-04-23-mmx-movdq2q.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/2010-04-23-mmx-movdq2q.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/2010-04-23-mmx-movdq2q.ll Tue Oct 26 19:48:03 2010
@@ -1,12 +1,12 @@
 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx,+sse2 | FileCheck %s
+; There are no MMX operations here, so we use XMM or i64.
 
 define void @ti8(double %a, double %b) nounwind {
 entry:
         %tmp1 = bitcast double %a to <8 x i8>
-; CHECK: movdq2q
         %tmp2 = bitcast double %b to <8 x i8>
-; CHECK: movdq2q
         %tmp3 = add <8 x i8> %tmp1, %tmp2
+; CHECK:  paddb %xmm1, %xmm0
         store <8 x i8> %tmp3, <8 x i8>* null
         ret void
 }
@@ -14,10 +14,9 @@
 define void @ti16(double %a, double %b) nounwind {
 entry:
         %tmp1 = bitcast double %a to <4 x i16>
-; CHECK: movdq2q
         %tmp2 = bitcast double %b to <4 x i16>
-; CHECK: movdq2q
         %tmp3 = add <4 x i16> %tmp1, %tmp2
+; CHECK:  paddw %xmm1, %xmm0
         store <4 x i16> %tmp3, <4 x i16>* null
         ret void
 }
@@ -25,10 +24,9 @@
 define void @ti32(double %a, double %b) nounwind {
 entry:
         %tmp1 = bitcast double %a to <2 x i32>
-; CHECK: movdq2q
         %tmp2 = bitcast double %b to <2 x i32>
-; CHECK: movdq2q
         %tmp3 = add <2 x i32> %tmp1, %tmp2
+; CHECK:  paddd %xmm1, %xmm0
         store <2 x i32> %tmp3, <2 x i32>* null
         ret void
 }
@@ -36,10 +34,60 @@
 define void @ti64(double %a, double %b) nounwind {
 entry:
         %tmp1 = bitcast double %a to <1 x i64>
-; CHECK: movdq2q
         %tmp2 = bitcast double %b to <1 x i64>
-; CHECK: movdq2q
         %tmp3 = add <1 x i64> %tmp1, %tmp2
+; CHECK:  addq  %rax, %rcx
         store <1 x i64> %tmp3, <1 x i64>* null
         ret void
 }
+
+; MMX intrinsics calls get us MMX instructions.
+
+define void @ti8a(double %a, double %b) nounwind {
+entry:
+        %tmp1 = bitcast double %a to x86_mmx
+; CHECK: movdq2q
+        %tmp2 = bitcast double %b to x86_mmx
+; CHECK: movdq2q
+        %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.b(x86_mmx %tmp1, x86_mmx %tmp2)
+        store x86_mmx %tmp3, x86_mmx* null
+        ret void
+}
+
+define void @ti16a(double %a, double %b) nounwind {
+entry:
+        %tmp1 = bitcast double %a to x86_mmx
+; CHECK: movdq2q
+        %tmp2 = bitcast double %b to x86_mmx
+; CHECK: movdq2q
+        %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %tmp1, x86_mmx %tmp2)
+        store x86_mmx %tmp3, x86_mmx* null
+        ret void
+}
+
+define void @ti32a(double %a, double %b) nounwind {
+entry:
+        %tmp1 = bitcast double %a to x86_mmx
+; CHECK: movdq2q
+        %tmp2 = bitcast double %b to x86_mmx
+; CHECK: movdq2q
+        %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %tmp1, x86_mmx %tmp2)
+        store x86_mmx %tmp3, x86_mmx* null
+        ret void
+}
+
+define void @ti64a(double %a, double %b) nounwind {
+entry:
+        %tmp1 = bitcast double %a to x86_mmx
+; CHECK: movdq2q
+        %tmp2 = bitcast double %b to x86_mmx
+; CHECK: movdq2q
+        %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.q(x86_mmx %tmp1, x86_mmx %tmp2)
+        store x86_mmx %tmp3, x86_mmx* null
+        ret void
+}
+ 
+declare x86_mmx @llvm.x86.mmx.padd.b(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.padd.w(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx)

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll Tue Oct 26 19:48:03 2010
@@ -11,7 +11,7 @@
 ; Verify that %esi gets spilled before the call.
 ; CHECK: Z4test1SiS
 ; CHECK: movl %esi,{{.*}}(%ebp) 
-; CHECK: call __Z6throwsv
+; CHECK: calll __Z6throwsv
 
 define i8* @_Z4test1SiS_(%struct.S* byval %s1, i32 %n, %struct.S* byval %s2) ssp {
 entry:

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/3addr-or.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/3addr-or.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/3addr-or.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/3addr-or.ll Tue Oct 26 19:48:03 2010
@@ -1,9 +1,9 @@
 ; RUN: llc < %s -mtriple=x86_64-apple-darwin | FileCheck %s
 ; rdar://7527734
 
-define i32 @test(i32 %x) nounwind readnone ssp {
+define i32 @test1(i32 %x) nounwind readnone ssp {
 entry:
-; CHECK: test:
+; CHECK: test1:
 ; CHECK: leal 3(%rdi), %eax
   %0 = shl i32 %x, 5                              ; <i32> [#uses=1]
   %1 = or i32 %0, 3                               ; <i32> [#uses=1]
@@ -25,3 +25,37 @@
   %H = or i64 %G, %E                              ; <i64> [#uses=1]
   ret i64 %H
 }
+
+;; Test that OR is only emitted as LEA, not as ADD.
+
+define void @test3(i32 %x, i32* %P) nounwind readnone ssp {
+entry:
+; No reason to emit an add here, should be an or.
+; CHECK: test3:
+; CHECK: orl $3, %edi
+  %0 = shl i32 %x, 5
+  %1 = or i32 %0, 3
+  store i32 %1, i32* %P
+  ret void
+}
+
+define i32 @test4(i32 %a, i32 %b) nounwind readnone ssp {
+entry:
+  %and = and i32 %a, 6
+  %and2 = and i32 %b, 16
+  %or = or i32 %and2, %and
+  ret i32 %or
+; CHECK: test4:
+; CHECK: leal	(%rsi,%rdi), %eax
+}
+
+define void @test5(i32 %a, i32 %b, i32* nocapture %P) nounwind ssp {
+entry:
+  %and = and i32 %a, 6
+  %and2 = and i32 %b, 16
+  %or = or i32 %and2, %and
+  store i32 %or, i32* %P, align 4
+  ret void
+; CHECK: test5:
+; CHECK: orl
+}

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/GC/dg.exp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/GC/dg.exp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/GC/dg.exp (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/GC/dg.exp Tue Oct 26 19:48:03 2010
@@ -1,3 +1,5 @@
 load_lib llvm.exp
 
-RunLLVMTests [lsort [glob -nocomplain $srcdir/$subdir/*.{ll,c,cpp}]]
+if { [llvm_supports_target X86] } {
+  RunLLVMTests [lsort [glob -nocomplain $srcdir/$subdir/*.{ll,c,cpp}]]
+}

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/abi-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/abi-isel.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/abi-isel.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/abi-isel.ll Tue Oct 26 19:48:03 2010
@@ -72,7 +72,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _foo00:
-; DARWIN-32-PIC: 	call	L0$pb
+; DARWIN-32-PIC: 	calll	L0$pb
 ; DARWIN-32-PIC-NEXT: L0$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	L_src$non_lazy_ptr-L0$pb(%eax), %ecx
@@ -144,7 +144,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _fxo00:
-; DARWIN-32-PIC: 	call	L1$pb
+; DARWIN-32-PIC: 	calll	L1$pb
 ; DARWIN-32-PIC-NEXT: L1$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	L_xsrc$non_lazy_ptr-L1$pb(%eax), %ecx
@@ -208,7 +208,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _foo01:
-; DARWIN-32-PIC: 	call	L2$pb
+; DARWIN-32-PIC: 	calll	L2$pb
 ; DARWIN-32-PIC-NEXT: L2$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	L_dst$non_lazy_ptr-L2$pb(%eax), %ecx
@@ -268,7 +268,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _fxo01:
-; DARWIN-32-PIC: 	call	L3$pb
+; DARWIN-32-PIC: 	calll	L3$pb
 ; DARWIN-32-PIC-NEXT: L3$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	L_xdst$non_lazy_ptr-L3$pb(%eax), %ecx
@@ -342,7 +342,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _foo02:
-; DARWIN-32-PIC: 	call	L4$pb
+; DARWIN-32-PIC: 	calll	L4$pb
 ; DARWIN-32-PIC-NEXT: L4$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	L_src$non_lazy_ptr-L4$pb(%eax), %ecx
@@ -424,7 +424,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _fxo02:
-; DARWIN-32-PIC: 	call	L5$pb
+; DARWIN-32-PIC: 	calll	L5$pb
 ; DARWIN-32-PIC-NEXT: L5$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	L_xsrc$non_lazy_ptr-L5$pb(%eax), %ecx
@@ -497,7 +497,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _foo03:
-; DARWIN-32-PIC: 	call	L6$pb
+; DARWIN-32-PIC: 	calll	L6$pb
 ; DARWIN-32-PIC-NEXT: L6$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	_dsrc-L6$pb(%eax), %ecx
@@ -551,7 +551,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _foo04:
-; DARWIN-32-PIC: 	call	L7$pb
+; DARWIN-32-PIC: 	calll	L7$pb
 ; DARWIN-32-PIC-NEXT: L7$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	leal	_ddst-L7$pb(%eax), %ecx
@@ -619,7 +619,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _foo05:
-; DARWIN-32-PIC: 	call	L8$pb
+; DARWIN-32-PIC: 	calll	L8$pb
 ; DARWIN-32-PIC-NEXT: L8$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	_dsrc-L8$pb(%eax), %ecx
@@ -682,7 +682,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _foo06:
-; DARWIN-32-PIC: 	call	L9$pb
+; DARWIN-32-PIC: 	calll	L9$pb
 ; DARWIN-32-PIC-NEXT: L9$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	_lsrc-L9$pb(%eax), %ecx
@@ -735,7 +735,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _foo07:
-; DARWIN-32-PIC: 	call	L10$pb
+; DARWIN-32-PIC: 	calll	L10$pb
 ; DARWIN-32-PIC-NEXT: L10$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	leal	_ldst-L10$pb(%eax), %ecx
@@ -801,7 +801,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _foo08:
-; DARWIN-32-PIC: 	call	L11$pb
+; DARWIN-32-PIC: 	calll	L11$pb
 ; DARWIN-32-PIC-NEXT: L11$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	_lsrc-L11$pb(%eax), %ecx
@@ -868,7 +868,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _qux00:
-; DARWIN-32-PIC: 	call	L12$pb
+; DARWIN-32-PIC: 	calll	L12$pb
 ; DARWIN-32-PIC-NEXT: L12$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	L_src$non_lazy_ptr-L12$pb(%eax), %ecx
@@ -939,7 +939,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _qxx00:
-; DARWIN-32-PIC: 	call	L13$pb
+; DARWIN-32-PIC: 	calll	L13$pb
 ; DARWIN-32-PIC-NEXT: L13$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	L_xsrc$non_lazy_ptr-L13$pb(%eax), %ecx
@@ -1005,7 +1005,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _qux01:
-; DARWIN-32-PIC: 	call	L14$pb
+; DARWIN-32-PIC: 	calll	L14$pb
 ; DARWIN-32-PIC-NEXT: L14$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	L_dst$non_lazy_ptr-L14$pb(%eax), %ecx
@@ -1071,7 +1071,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _qxx01:
-; DARWIN-32-PIC: 	call	L15$pb
+; DARWIN-32-PIC: 	calll	L15$pb
 ; DARWIN-32-PIC-NEXT: L15$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	L_xdst$non_lazy_ptr-L15$pb(%eax), %ecx
@@ -1150,7 +1150,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _qux02:
-; DARWIN-32-PIC: 	call	L16$pb
+; DARWIN-32-PIC: 	calll	L16$pb
 ; DARWIN-32-PIC-NEXT: L16$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	L_src$non_lazy_ptr-L16$pb(%eax), %ecx
@@ -1233,7 +1233,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _qxx02:
-; DARWIN-32-PIC: 	call	L17$pb
+; DARWIN-32-PIC: 	calll	L17$pb
 ; DARWIN-32-PIC-NEXT: L17$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	L_xsrc$non_lazy_ptr-L17$pb(%eax), %ecx
@@ -1306,7 +1306,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _qux03:
-; DARWIN-32-PIC: 	call	L18$pb
+; DARWIN-32-PIC: 	calll	L18$pb
 ; DARWIN-32-PIC-NEXT: L18$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	(_dsrc-L18$pb)+64(%eax), %ecx
@@ -1361,7 +1361,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _qux04:
-; DARWIN-32-PIC: 	call	L19$pb
+; DARWIN-32-PIC: 	calll	L19$pb
 ; DARWIN-32-PIC-NEXT: L19$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	leal	(_ddst-L19$pb)+64(%eax), %ecx
@@ -1430,7 +1430,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _qux05:
-; DARWIN-32-PIC: 	call	L20$pb
+; DARWIN-32-PIC: 	calll	L20$pb
 ; DARWIN-32-PIC-NEXT: L20$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	(_dsrc-L20$pb)+64(%eax), %ecx
@@ -1493,7 +1493,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _qux06:
-; DARWIN-32-PIC: 	call	L21$pb
+; DARWIN-32-PIC: 	calll	L21$pb
 ; DARWIN-32-PIC-NEXT: L21$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	(_lsrc-L21$pb)+64(%eax), %ecx
@@ -1546,7 +1546,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _qux07:
-; DARWIN-32-PIC: 	call	L22$pb
+; DARWIN-32-PIC: 	calll	L22$pb
 ; DARWIN-32-PIC-NEXT: L22$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	leal	(_ldst-L22$pb)+64(%eax), %ecx
@@ -1613,7 +1613,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _qux08:
-; DARWIN-32-PIC: 	call	L23$pb
+; DARWIN-32-PIC: 	calll	L23$pb
 ; DARWIN-32-PIC-NEXT: L23$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	(_lsrc-L23$pb)+64(%eax), %ecx
@@ -1686,7 +1686,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _ind00:
-; DARWIN-32-PIC: 	call	L24$pb
+; DARWIN-32-PIC: 	calll	L24$pb
 ; DARWIN-32-PIC-NEXT: L24$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -1764,7 +1764,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _ixd00:
-; DARWIN-32-PIC: 	call	L25$pb
+; DARWIN-32-PIC: 	calll	L25$pb
 ; DARWIN-32-PIC-NEXT: L25$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -1840,7 +1840,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _ind01:
-; DARWIN-32-PIC: 	call	L26$pb
+; DARWIN-32-PIC: 	calll	L26$pb
 ; DARWIN-32-PIC-NEXT: L26$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -1916,7 +1916,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _ixd01:
-; DARWIN-32-PIC: 	call	L27$pb
+; DARWIN-32-PIC: 	calll	L27$pb
 ; DARWIN-32-PIC-NEXT: L27$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -2001,7 +2001,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _ind02:
-; DARWIN-32-PIC: 	call	L28$pb
+; DARWIN-32-PIC: 	calll	L28$pb
 ; DARWIN-32-PIC-NEXT: L28$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -2090,7 +2090,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _ixd02:
-; DARWIN-32-PIC: 	call	L29$pb
+; DARWIN-32-PIC: 	calll	L29$pb
 ; DARWIN-32-PIC-NEXT: L29$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -2170,7 +2170,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _ind03:
-; DARWIN-32-PIC: 	call	L30$pb
+; DARWIN-32-PIC: 	calll	L30$pb
 ; DARWIN-32-PIC-NEXT: L30$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -2242,7 +2242,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _ind04:
-; DARWIN-32-PIC: 	call	L31$pb
+; DARWIN-32-PIC: 	calll	L31$pb
 ; DARWIN-32-PIC-NEXT: L31$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -2320,7 +2320,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _ind05:
-; DARWIN-32-PIC: 	call	L32$pb
+; DARWIN-32-PIC: 	calll	L32$pb
 ; DARWIN-32-PIC-NEXT: L32$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -2395,7 +2395,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _ind06:
-; DARWIN-32-PIC: 	call	L33$pb
+; DARWIN-32-PIC: 	calll	L33$pb
 ; DARWIN-32-PIC-NEXT: L33$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -2466,7 +2466,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _ind07:
-; DARWIN-32-PIC: 	call	L34$pb
+; DARWIN-32-PIC: 	calll	L34$pb
 ; DARWIN-32-PIC-NEXT: L34$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -2543,7 +2543,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _ind08:
-; DARWIN-32-PIC: 	call	L35$pb
+; DARWIN-32-PIC: 	calll	L35$pb
 ; DARWIN-32-PIC-NEXT: L35$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -2621,7 +2621,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _off00:
-; DARWIN-32-PIC: 	call	L36$pb
+; DARWIN-32-PIC: 	calll	L36$pb
 ; DARWIN-32-PIC-NEXT: L36$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -2700,7 +2700,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _oxf00:
-; DARWIN-32-PIC: 	call	L37$pb
+; DARWIN-32-PIC: 	calll	L37$pb
 ; DARWIN-32-PIC-NEXT: L37$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -2777,7 +2777,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _off01:
-; DARWIN-32-PIC: 	call	L38$pb
+; DARWIN-32-PIC: 	calll	L38$pb
 ; DARWIN-32-PIC-NEXT: L38$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -2854,7 +2854,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _oxf01:
-; DARWIN-32-PIC: 	call	L39$pb
+; DARWIN-32-PIC: 	calll	L39$pb
 ; DARWIN-32-PIC-NEXT: L39$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -2940,7 +2940,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _off02:
-; DARWIN-32-PIC: 	call	L40$pb
+; DARWIN-32-PIC: 	calll	L40$pb
 ; DARWIN-32-PIC-NEXT: L40$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -3030,7 +3030,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _oxf02:
-; DARWIN-32-PIC: 	call	L41$pb
+; DARWIN-32-PIC: 	calll	L41$pb
 ; DARWIN-32-PIC-NEXT: L41$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -3111,7 +3111,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _off03:
-; DARWIN-32-PIC: 	call	L42$pb
+; DARWIN-32-PIC: 	calll	L42$pb
 ; DARWIN-32-PIC-NEXT: L42$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -3184,7 +3184,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _off04:
-; DARWIN-32-PIC: 	call	L43$pb
+; DARWIN-32-PIC: 	calll	L43$pb
 ; DARWIN-32-PIC-NEXT: L43$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -3263,7 +3263,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _off05:
-; DARWIN-32-PIC: 	call	L44$pb
+; DARWIN-32-PIC: 	calll	L44$pb
 ; DARWIN-32-PIC-NEXT: L44$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -3339,7 +3339,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _off06:
-; DARWIN-32-PIC: 	call	L45$pb
+; DARWIN-32-PIC: 	calll	L45$pb
 ; DARWIN-32-PIC-NEXT: L45$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -3411,7 +3411,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _off07:
-; DARWIN-32-PIC: 	call	L46$pb
+; DARWIN-32-PIC: 	calll	L46$pb
 ; DARWIN-32-PIC-NEXT: L46$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -3489,7 +3489,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _off08:
-; DARWIN-32-PIC: 	call	L47$pb
+; DARWIN-32-PIC: 	calll	L47$pb
 ; DARWIN-32-PIC-NEXT: L47$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -3560,7 +3560,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _moo00:
-; DARWIN-32-PIC: 	call	L48$pb
+; DARWIN-32-PIC: 	calll	L48$pb
 ; DARWIN-32-PIC-NEXT: L48$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	L_src$non_lazy_ptr-L48$pb(%eax), %ecx
@@ -3626,7 +3626,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _moo01:
-; DARWIN-32-PIC: 	call	L49$pb
+; DARWIN-32-PIC: 	calll	L49$pb
 ; DARWIN-32-PIC-NEXT: L49$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	$262144, %ecx
@@ -3705,7 +3705,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _moo02:
-; DARWIN-32-PIC: 	call	L50$pb
+; DARWIN-32-PIC: 	calll	L50$pb
 ; DARWIN-32-PIC-NEXT: L50$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	L_src$non_lazy_ptr-L50$pb(%eax), %ecx
@@ -3778,7 +3778,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _moo03:
-; DARWIN-32-PIC: 	call	L51$pb
+; DARWIN-32-PIC: 	calll	L51$pb
 ; DARWIN-32-PIC-NEXT: L51$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	(_dsrc-L51$pb)+262144(%eax), %ecx
@@ -3833,7 +3833,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _moo04:
-; DARWIN-32-PIC: 	call	L52$pb
+; DARWIN-32-PIC: 	calll	L52$pb
 ; DARWIN-32-PIC-NEXT: L52$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	leal	(_ddst-L52$pb)+262144(%eax), %ecx
@@ -3902,7 +3902,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _moo05:
-; DARWIN-32-PIC: 	call	L53$pb
+; DARWIN-32-PIC: 	calll	L53$pb
 ; DARWIN-32-PIC-NEXT: L53$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	(_dsrc-L53$pb)+262144(%eax), %ecx
@@ -3965,7 +3965,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _moo06:
-; DARWIN-32-PIC: 	call	L54$pb
+; DARWIN-32-PIC: 	calll	L54$pb
 ; DARWIN-32-PIC-NEXT: L54$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	(_lsrc-L54$pb)+262144(%eax), %ecx
@@ -4018,7 +4018,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _moo07:
-; DARWIN-32-PIC: 	call	L55$pb
+; DARWIN-32-PIC: 	calll	L55$pb
 ; DARWIN-32-PIC-NEXT: L55$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	leal	(_ldst-L55$pb)+262144(%eax), %ecx
@@ -4085,7 +4085,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _moo08:
-; DARWIN-32-PIC: 	call	L56$pb
+; DARWIN-32-PIC: 	calll	L56$pb
 ; DARWIN-32-PIC-NEXT: L56$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	(_lsrc-L56$pb)+262144(%eax), %ecx
@@ -4159,7 +4159,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _big00:
-; DARWIN-32-PIC: 	call	L57$pb
+; DARWIN-32-PIC: 	calll	L57$pb
 ; DARWIN-32-PIC-NEXT: L57$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -4236,7 +4236,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _big01:
-; DARWIN-32-PIC: 	call	L58$pb
+; DARWIN-32-PIC: 	calll	L58$pb
 ; DARWIN-32-PIC-NEXT: L58$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -4322,7 +4322,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _big02:
-; DARWIN-32-PIC: 	call	L59$pb
+; DARWIN-32-PIC: 	calll	L59$pb
 ; DARWIN-32-PIC-NEXT: L59$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -4403,7 +4403,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _big03:
-; DARWIN-32-PIC: 	call	L60$pb
+; DARWIN-32-PIC: 	calll	L60$pb
 ; DARWIN-32-PIC-NEXT: L60$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -4476,7 +4476,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _big04:
-; DARWIN-32-PIC: 	call	L61$pb
+; DARWIN-32-PIC: 	calll	L61$pb
 ; DARWIN-32-PIC-NEXT: L61$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -4555,7 +4555,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _big05:
-; DARWIN-32-PIC: 	call	L62$pb
+; DARWIN-32-PIC: 	calll	L62$pb
 ; DARWIN-32-PIC-NEXT: L62$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -4631,7 +4631,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _big06:
-; DARWIN-32-PIC: 	call	L63$pb
+; DARWIN-32-PIC: 	calll	L63$pb
 ; DARWIN-32-PIC-NEXT: L63$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -4703,7 +4703,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _big07:
-; DARWIN-32-PIC: 	call	L64$pb
+; DARWIN-32-PIC: 	calll	L64$pb
 ; DARWIN-32-PIC-NEXT: L64$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -4781,7 +4781,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _big08:
-; DARWIN-32-PIC: 	call	L65$pb
+; DARWIN-32-PIC: 	calll	L65$pb
 ; DARWIN-32-PIC-NEXT: L65$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -4840,7 +4840,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _bar00:
-; DARWIN-32-PIC: 	call	L66$pb
+; DARWIN-32-PIC: 	calll	L66$pb
 ; DARWIN-32-PIC-NEXT: L66$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	L_src$non_lazy_ptr-L66$pb(%eax), %eax
@@ -4887,7 +4887,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _bxr00:
-; DARWIN-32-PIC: 	call	L67$pb
+; DARWIN-32-PIC: 	calll	L67$pb
 ; DARWIN-32-PIC-NEXT: L67$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	L_xsrc$non_lazy_ptr-L67$pb(%eax), %eax
@@ -4934,7 +4934,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _bar01:
-; DARWIN-32-PIC: 	call	L68$pb
+; DARWIN-32-PIC: 	calll	L68$pb
 ; DARWIN-32-PIC-NEXT: L68$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	L_dst$non_lazy_ptr-L68$pb(%eax), %eax
@@ -4981,7 +4981,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _bxr01:
-; DARWIN-32-PIC: 	call	L69$pb
+; DARWIN-32-PIC: 	calll	L69$pb
 ; DARWIN-32-PIC-NEXT: L69$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	L_xdst$non_lazy_ptr-L69$pb(%eax), %eax
@@ -5028,7 +5028,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _bar02:
-; DARWIN-32-PIC: 	call	L70$pb
+; DARWIN-32-PIC: 	calll	L70$pb
 ; DARWIN-32-PIC-NEXT: L70$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	L_ptr$non_lazy_ptr-L70$pb(%eax), %eax
@@ -5075,7 +5075,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _bar03:
-; DARWIN-32-PIC: 	call	L71$pb
+; DARWIN-32-PIC: 	calll	L71$pb
 ; DARWIN-32-PIC-NEXT: L71$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	leal	_dsrc-L71$pb(%eax), %eax
@@ -5122,7 +5122,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _bar04:
-; DARWIN-32-PIC: 	call	L72$pb
+; DARWIN-32-PIC: 	calll	L72$pb
 ; DARWIN-32-PIC-NEXT: L72$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	leal	_ddst-L72$pb(%eax), %eax
@@ -5169,7 +5169,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _bar05:
-; DARWIN-32-PIC: 	call	L73$pb
+; DARWIN-32-PIC: 	calll	L73$pb
 ; DARWIN-32-PIC-NEXT: L73$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	leal	_dptr-L73$pb(%eax), %eax
@@ -5216,7 +5216,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _bar06:
-; DARWIN-32-PIC: 	call	L74$pb
+; DARWIN-32-PIC: 	calll	L74$pb
 ; DARWIN-32-PIC-NEXT: L74$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	leal	_lsrc-L74$pb(%eax), %eax
@@ -5263,7 +5263,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _bar07:
-; DARWIN-32-PIC: 	call	L75$pb
+; DARWIN-32-PIC: 	calll	L75$pb
 ; DARWIN-32-PIC-NEXT: L75$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	leal	_ldst-L75$pb(%eax), %eax
@@ -5310,7 +5310,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _bar08:
-; DARWIN-32-PIC: 	call	L76$pb
+; DARWIN-32-PIC: 	calll	L76$pb
 ; DARWIN-32-PIC-NEXT: L76$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	leal	_lptr-L76$pb(%eax), %eax
@@ -5357,7 +5357,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _har00:
-; DARWIN-32-PIC: 	call	L77$pb
+; DARWIN-32-PIC: 	calll	L77$pb
 ; DARWIN-32-PIC-NEXT: L77$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	L_src$non_lazy_ptr-L77$pb(%eax), %eax
@@ -5404,7 +5404,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _hxr00:
-; DARWIN-32-PIC: 	call	L78$pb
+; DARWIN-32-PIC: 	calll	L78$pb
 ; DARWIN-32-PIC-NEXT: L78$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	L_xsrc$non_lazy_ptr-L78$pb(%eax), %eax
@@ -5451,7 +5451,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _har01:
-; DARWIN-32-PIC: 	call	L79$pb
+; DARWIN-32-PIC: 	calll	L79$pb
 ; DARWIN-32-PIC-NEXT: L79$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	L_dst$non_lazy_ptr-L79$pb(%eax), %eax
@@ -5498,7 +5498,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _hxr01:
-; DARWIN-32-PIC: 	call	L80$pb
+; DARWIN-32-PIC: 	calll	L80$pb
 ; DARWIN-32-PIC-NEXT: L80$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	L_xdst$non_lazy_ptr-L80$pb(%eax), %eax
@@ -5549,7 +5549,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _har02:
-; DARWIN-32-PIC: 	call	L81$pb
+; DARWIN-32-PIC: 	calll	L81$pb
 ; DARWIN-32-PIC-NEXT: L81$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	L_ptr$non_lazy_ptr-L81$pb(%eax), %eax
@@ -5600,7 +5600,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _har03:
-; DARWIN-32-PIC: 	call	L82$pb
+; DARWIN-32-PIC: 	calll	L82$pb
 ; DARWIN-32-PIC-NEXT: L82$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	leal	_dsrc-L82$pb(%eax), %eax
@@ -5647,7 +5647,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _har04:
-; DARWIN-32-PIC: 	call	L83$pb
+; DARWIN-32-PIC: 	calll	L83$pb
 ; DARWIN-32-PIC-NEXT: L83$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	leal	_ddst-L83$pb(%eax), %eax
@@ -5697,7 +5697,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _har05:
-; DARWIN-32-PIC: 	call	L84$pb
+; DARWIN-32-PIC: 	calll	L84$pb
 ; DARWIN-32-PIC-NEXT: L84$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	_dptr-L84$pb(%eax), %eax
@@ -5744,7 +5744,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _har06:
-; DARWIN-32-PIC: 	call	L85$pb
+; DARWIN-32-PIC: 	calll	L85$pb
 ; DARWIN-32-PIC-NEXT: L85$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	leal	_lsrc-L85$pb(%eax), %eax
@@ -5791,7 +5791,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _har07:
-; DARWIN-32-PIC: 	call	L86$pb
+; DARWIN-32-PIC: 	calll	L86$pb
 ; DARWIN-32-PIC-NEXT: L86$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	leal	_ldst-L86$pb(%eax), %eax
@@ -5840,7 +5840,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _har08:
-; DARWIN-32-PIC: 	call	L87$pb
+; DARWIN-32-PIC: 	calll	L87$pb
 ; DARWIN-32-PIC-NEXT: L87$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	_lptr-L87$pb(%eax), %eax
@@ -5889,7 +5889,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _bat00:
-; DARWIN-32-PIC: 	call	L88$pb
+; DARWIN-32-PIC: 	calll	L88$pb
 ; DARWIN-32-PIC-NEXT: L88$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	L_src$non_lazy_ptr-L88$pb(%eax), %eax
@@ -5942,7 +5942,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _bxt00:
-; DARWIN-32-PIC: 	call	L89$pb
+; DARWIN-32-PIC: 	calll	L89$pb
 ; DARWIN-32-PIC-NEXT: L89$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	L_xsrc$non_lazy_ptr-L89$pb(%eax), %eax
@@ -5995,7 +5995,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _bat01:
-; DARWIN-32-PIC: 	call	L90$pb
+; DARWIN-32-PIC: 	calll	L90$pb
 ; DARWIN-32-PIC-NEXT: L90$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	L_dst$non_lazy_ptr-L90$pb(%eax), %eax
@@ -6048,7 +6048,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _bxt01:
-; DARWIN-32-PIC: 	call	L91$pb
+; DARWIN-32-PIC: 	calll	L91$pb
 ; DARWIN-32-PIC-NEXT: L91$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	L_xdst$non_lazy_ptr-L91$pb(%eax), %eax
@@ -6110,7 +6110,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _bat02:
-; DARWIN-32-PIC: 	call	L92$pb
+; DARWIN-32-PIC: 	calll	L92$pb
 ; DARWIN-32-PIC-NEXT: L92$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	L_ptr$non_lazy_ptr-L92$pb(%eax), %eax
@@ -6166,7 +6166,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _bat03:
-; DARWIN-32-PIC: 	call	L93$pb
+; DARWIN-32-PIC: 	calll	L93$pb
 ; DARWIN-32-PIC-NEXT: L93$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	leal	(_dsrc-L93$pb)+64(%eax), %eax
@@ -6214,7 +6214,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _bat04:
-; DARWIN-32-PIC: 	call	L94$pb
+; DARWIN-32-PIC: 	calll	L94$pb
 ; DARWIN-32-PIC-NEXT: L94$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	leal	(_ddst-L94$pb)+64(%eax), %eax
@@ -6271,7 +6271,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _bat05:
-; DARWIN-32-PIC: 	call	L95$pb
+; DARWIN-32-PIC: 	calll	L95$pb
 ; DARWIN-32-PIC-NEXT: L95$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	_dptr-L95$pb(%eax), %eax
@@ -6322,7 +6322,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _bat06:
-; DARWIN-32-PIC: 	call	L96$pb
+; DARWIN-32-PIC: 	calll	L96$pb
 ; DARWIN-32-PIC-NEXT: L96$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	leal	(_lsrc-L96$pb)+64(%eax), %eax
@@ -6369,7 +6369,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _bat07:
-; DARWIN-32-PIC: 	call	L97$pb
+; DARWIN-32-PIC: 	calll	L97$pb
 ; DARWIN-32-PIC-NEXT: L97$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	leal	(_ldst-L97$pb)+64(%eax), %eax
@@ -6425,7 +6425,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _bat08:
-; DARWIN-32-PIC: 	call	L98$pb
+; DARWIN-32-PIC: 	calll	L98$pb
 ; DARWIN-32-PIC-NEXT: L98$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	_lptr-L98$pb(%eax), %eax
@@ -6478,7 +6478,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _bam00:
-; DARWIN-32-PIC: 	call	L99$pb
+; DARWIN-32-PIC: 	calll	L99$pb
 ; DARWIN-32-PIC-NEXT: L99$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%ecx
 ; DARWIN-32-PIC-NEXT: 	movl	$262144, %eax
@@ -6531,7 +6531,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _bam01:
-; DARWIN-32-PIC: 	call	L100$pb
+; DARWIN-32-PIC: 	calll	L100$pb
 ; DARWIN-32-PIC-NEXT: L100$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%ecx
 ; DARWIN-32-PIC-NEXT: 	movl	$262144, %eax
@@ -6584,7 +6584,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _bxm01:
-; DARWIN-32-PIC: 	call	L101$pb
+; DARWIN-32-PIC: 	calll	L101$pb
 ; DARWIN-32-PIC-NEXT: L101$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%ecx
 ; DARWIN-32-PIC-NEXT: 	movl	$262144, %eax
@@ -6646,7 +6646,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _bam02:
-; DARWIN-32-PIC: 	call	L102$pb
+; DARWIN-32-PIC: 	calll	L102$pb
 ; DARWIN-32-PIC-NEXT: L102$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	L_ptr$non_lazy_ptr-L102$pb(%eax), %ecx
@@ -6702,7 +6702,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _bam03:
-; DARWIN-32-PIC: 	call	L103$pb
+; DARWIN-32-PIC: 	calll	L103$pb
 ; DARWIN-32-PIC-NEXT: L103$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	leal	(_dsrc-L103$pb)+262144(%eax), %eax
@@ -6750,7 +6750,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _bam04:
-; DARWIN-32-PIC: 	call	L104$pb
+; DARWIN-32-PIC: 	calll	L104$pb
 ; DARWIN-32-PIC-NEXT: L104$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	leal	(_ddst-L104$pb)+262144(%eax), %eax
@@ -6807,7 +6807,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _bam05:
-; DARWIN-32-PIC: 	call	L105$pb
+; DARWIN-32-PIC: 	calll	L105$pb
 ; DARWIN-32-PIC-NEXT: L105$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%ecx
 ; DARWIN-32-PIC-NEXT: 	movl	$262144, %eax
@@ -6858,7 +6858,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _bam06:
-; DARWIN-32-PIC: 	call	L106$pb
+; DARWIN-32-PIC: 	calll	L106$pb
 ; DARWIN-32-PIC-NEXT: L106$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	leal	(_lsrc-L106$pb)+262144(%eax), %eax
@@ -6905,7 +6905,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _bam07:
-; DARWIN-32-PIC: 	call	L107$pb
+; DARWIN-32-PIC: 	calll	L107$pb
 ; DARWIN-32-PIC-NEXT: L107$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	leal	(_ldst-L107$pb)+262144(%eax), %eax
@@ -6961,7 +6961,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _bam08:
-; DARWIN-32-PIC: 	call	L108$pb
+; DARWIN-32-PIC: 	calll	L108$pb
 ; DARWIN-32-PIC-NEXT: L108$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%ecx
 ; DARWIN-32-PIC-NEXT: 	movl	$262144, %eax
@@ -7021,7 +7021,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _cat00:
-; DARWIN-32-PIC: 	call	L109$pb
+; DARWIN-32-PIC: 	calll	L109$pb
 ; DARWIN-32-PIC-NEXT: L109$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -7082,7 +7082,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _cxt00:
-; DARWIN-32-PIC: 	call	L110$pb
+; DARWIN-32-PIC: 	calll	L110$pb
 ; DARWIN-32-PIC-NEXT: L110$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -7143,7 +7143,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _cat01:
-; DARWIN-32-PIC: 	call	L111$pb
+; DARWIN-32-PIC: 	calll	L111$pb
 ; DARWIN-32-PIC-NEXT: L111$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -7204,7 +7204,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _cxt01:
-; DARWIN-32-PIC: 	call	L112$pb
+; DARWIN-32-PIC: 	calll	L112$pb
 ; DARWIN-32-PIC-NEXT: L112$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -7272,7 +7272,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _cat02:
-; DARWIN-32-PIC: 	call	L113$pb
+; DARWIN-32-PIC: 	calll	L113$pb
 ; DARWIN-32-PIC-NEXT: L113$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	L_ptr$non_lazy_ptr-L113$pb(%eax), %eax
@@ -7336,7 +7336,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _cat03:
-; DARWIN-32-PIC: 	call	L114$pb
+; DARWIN-32-PIC: 	calll	L114$pb
 ; DARWIN-32-PIC-NEXT: L114$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -7395,7 +7395,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _cat04:
-; DARWIN-32-PIC: 	call	L115$pb
+; DARWIN-32-PIC: 	calll	L115$pb
 ; DARWIN-32-PIC-NEXT: L115$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -7461,7 +7461,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _cat05:
-; DARWIN-32-PIC: 	call	L116$pb
+; DARWIN-32-PIC: 	calll	L116$pb
 ; DARWIN-32-PIC-NEXT: L116$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -7521,7 +7521,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _cat06:
-; DARWIN-32-PIC: 	call	L117$pb
+; DARWIN-32-PIC: 	calll	L117$pb
 ; DARWIN-32-PIC-NEXT: L117$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -7580,7 +7580,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _cat07:
-; DARWIN-32-PIC: 	call	L118$pb
+; DARWIN-32-PIC: 	calll	L118$pb
 ; DARWIN-32-PIC-NEXT: L118$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -7645,7 +7645,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _cat08:
-; DARWIN-32-PIC: 	call	L119$pb
+; DARWIN-32-PIC: 	calll	L119$pb
 ; DARWIN-32-PIC-NEXT: L119$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -7706,7 +7706,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _cam00:
-; DARWIN-32-PIC: 	call	L120$pb
+; DARWIN-32-PIC: 	calll	L120$pb
 ; DARWIN-32-PIC-NEXT: L120$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -7767,7 +7767,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _cxm00:
-; DARWIN-32-PIC: 	call	L121$pb
+; DARWIN-32-PIC: 	calll	L121$pb
 ; DARWIN-32-PIC-NEXT: L121$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -7828,7 +7828,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _cam01:
-; DARWIN-32-PIC: 	call	L122$pb
+; DARWIN-32-PIC: 	calll	L122$pb
 ; DARWIN-32-PIC-NEXT: L122$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -7889,7 +7889,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _cxm01:
-; DARWIN-32-PIC: 	call	L123$pb
+; DARWIN-32-PIC: 	calll	L123$pb
 ; DARWIN-32-PIC-NEXT: L123$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -7957,7 +7957,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _cam02:
-; DARWIN-32-PIC: 	call	L124$pb
+; DARWIN-32-PIC: 	calll	L124$pb
 ; DARWIN-32-PIC-NEXT: L124$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	L_ptr$non_lazy_ptr-L124$pb(%eax), %eax
@@ -8021,7 +8021,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _cam03:
-; DARWIN-32-PIC: 	call	L125$pb
+; DARWIN-32-PIC: 	calll	L125$pb
 ; DARWIN-32-PIC-NEXT: L125$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -8080,7 +8080,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _cam04:
-; DARWIN-32-PIC: 	call	L126$pb
+; DARWIN-32-PIC: 	calll	L126$pb
 ; DARWIN-32-PIC-NEXT: L126$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -8146,7 +8146,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _cam05:
-; DARWIN-32-PIC: 	call	L127$pb
+; DARWIN-32-PIC: 	calll	L127$pb
 ; DARWIN-32-PIC-NEXT: L127$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -8206,7 +8206,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _cam06:
-; DARWIN-32-PIC: 	call	L128$pb
+; DARWIN-32-PIC: 	calll	L128$pb
 ; DARWIN-32-PIC-NEXT: L128$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -8265,7 +8265,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _cam07:
-; DARWIN-32-PIC: 	call	L129$pb
+; DARWIN-32-PIC: 	calll	L129$pb
 ; DARWIN-32-PIC-NEXT: L129$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -8330,7 +8330,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _cam08:
-; DARWIN-32-PIC: 	call	L130$pb
+; DARWIN-32-PIC: 	calll	L130$pb
 ; DARWIN-32-PIC-NEXT: L130$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	4(%esp), %ecx
@@ -8376,25 +8376,25 @@
 
 ; LINUX-32-STATIC: lcallee:
 ; LINUX-32-STATIC: 	subl	$4, %esp
-; LINUX-32-STATIC-NEXT: 	call	x
-; LINUX-32-STATIC-NEXT: 	call	x
-; LINUX-32-STATIC-NEXT: 	call	x
-; LINUX-32-STATIC-NEXT: 	call	x
-; LINUX-32-STATIC-NEXT: 	call	x
-; LINUX-32-STATIC-NEXT: 	call	x
-; LINUX-32-STATIC-NEXT: 	call	x
+; LINUX-32-STATIC-NEXT: 	calll	x
+; LINUX-32-STATIC-NEXT: 	calll	x
+; LINUX-32-STATIC-NEXT: 	calll	x
+; LINUX-32-STATIC-NEXT: 	calll	x
+; LINUX-32-STATIC-NEXT: 	calll	x
+; LINUX-32-STATIC-NEXT: 	calll	x
+; LINUX-32-STATIC-NEXT: 	calll	x
 ; LINUX-32-STATIC-NEXT: 	addl	$4, %esp
 ; LINUX-32-STATIC-NEXT: 	ret
 
 ; LINUX-32-PIC: lcallee:
 ; LINUX-32-PIC: 	subl	$4, %esp
-; LINUX-32-PIC-NEXT: 	call	x
-; LINUX-32-PIC-NEXT: 	call	x
-; LINUX-32-PIC-NEXT: 	call	x
-; LINUX-32-PIC-NEXT: 	call	x
-; LINUX-32-PIC-NEXT: 	call	x
-; LINUX-32-PIC-NEXT: 	call	x
-; LINUX-32-PIC-NEXT: 	call	x
+; LINUX-32-PIC-NEXT: 	calll	x
+; LINUX-32-PIC-NEXT: 	calll	x
+; LINUX-32-PIC-NEXT: 	calll	x
+; LINUX-32-PIC-NEXT: 	calll	x
+; LINUX-32-PIC-NEXT: 	calll	x
+; LINUX-32-PIC-NEXT: 	calll	x
+; LINUX-32-PIC-NEXT: 	calll	x
 ; LINUX-32-PIC-NEXT: 	addl	$4, %esp
 ; LINUX-32-PIC-NEXT: 	ret
 
@@ -8412,37 +8412,37 @@
 
 ; DARWIN-32-STATIC: _lcallee:
 ; DARWIN-32-STATIC: 	subl	$12, %esp
-; DARWIN-32-STATIC-NEXT: 	call	_x
-; DARWIN-32-STATIC-NEXT: 	call	_x
-; DARWIN-32-STATIC-NEXT: 	call	_x
-; DARWIN-32-STATIC-NEXT: 	call	_x
-; DARWIN-32-STATIC-NEXT: 	call	_x
-; DARWIN-32-STATIC-NEXT: 	call	_x
-; DARWIN-32-STATIC-NEXT: 	call	_x
+; DARWIN-32-STATIC-NEXT: 	calll	_x
+; DARWIN-32-STATIC-NEXT: 	calll	_x
+; DARWIN-32-STATIC-NEXT: 	calll	_x
+; DARWIN-32-STATIC-NEXT: 	calll	_x
+; DARWIN-32-STATIC-NEXT: 	calll	_x
+; DARWIN-32-STATIC-NEXT: 	calll	_x
+; DARWIN-32-STATIC-NEXT: 	calll	_x
 ; DARWIN-32-STATIC-NEXT: 	addl	$12, %esp
 ; DARWIN-32-STATIC-NEXT: 	ret
 
 ; DARWIN-32-DYNAMIC: _lcallee:
 ; DARWIN-32-DYNAMIC: 	subl	$12, %esp
-; DARWIN-32-DYNAMIC-NEXT: 	call	L_x$stub
-; DARWIN-32-DYNAMIC-NEXT: 	call	L_x$stub
-; DARWIN-32-DYNAMIC-NEXT: 	call	L_x$stub
-; DARWIN-32-DYNAMIC-NEXT: 	call	L_x$stub
-; DARWIN-32-DYNAMIC-NEXT: 	call	L_x$stub
-; DARWIN-32-DYNAMIC-NEXT: 	call	L_x$stub
-; DARWIN-32-DYNAMIC-NEXT: 	call	L_x$stub
+; DARWIN-32-DYNAMIC-NEXT: 	calll	L_x$stub
+; DARWIN-32-DYNAMIC-NEXT: 	calll	L_x$stub
+; DARWIN-32-DYNAMIC-NEXT: 	calll	L_x$stub
+; DARWIN-32-DYNAMIC-NEXT: 	calll	L_x$stub
+; DARWIN-32-DYNAMIC-NEXT: 	calll	L_x$stub
+; DARWIN-32-DYNAMIC-NEXT: 	calll	L_x$stub
+; DARWIN-32-DYNAMIC-NEXT: 	calll	L_x$stub
 ; DARWIN-32-DYNAMIC-NEXT: 	addl	$12, %esp
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _lcallee:
 ; DARWIN-32-PIC: 	subl	$12, %esp
-; DARWIN-32-PIC-NEXT: 	call	L_x$stub
-; DARWIN-32-PIC-NEXT: 	call	L_x$stub
-; DARWIN-32-PIC-NEXT: 	call	L_x$stub
-; DARWIN-32-PIC-NEXT: 	call	L_x$stub
-; DARWIN-32-PIC-NEXT: 	call	L_x$stub
-; DARWIN-32-PIC-NEXT: 	call	L_x$stub
-; DARWIN-32-PIC-NEXT: 	call	L_x$stub
+; DARWIN-32-PIC-NEXT: 	calll	L_x$stub
+; DARWIN-32-PIC-NEXT: 	calll	L_x$stub
+; DARWIN-32-PIC-NEXT: 	calll	L_x$stub
+; DARWIN-32-PIC-NEXT: 	calll	L_x$stub
+; DARWIN-32-PIC-NEXT: 	calll	L_x$stub
+; DARWIN-32-PIC-NEXT: 	calll	L_x$stub
+; DARWIN-32-PIC-NEXT: 	calll	L_x$stub
 ; DARWIN-32-PIC-NEXT: 	addl	$12, %esp
 ; DARWIN-32-PIC-NEXT: 	ret
 
@@ -8507,25 +8507,25 @@
 
 ; LINUX-32-STATIC: dcallee:
 ; LINUX-32-STATIC: 	subl	$4, %esp
-; LINUX-32-STATIC-NEXT: 	call	y
-; LINUX-32-STATIC-NEXT: 	call	y
-; LINUX-32-STATIC-NEXT: 	call	y
-; LINUX-32-STATIC-NEXT: 	call	y
-; LINUX-32-STATIC-NEXT: 	call	y
-; LINUX-32-STATIC-NEXT: 	call	y
-; LINUX-32-STATIC-NEXT: 	call	y
+; LINUX-32-STATIC-NEXT: 	calll	y
+; LINUX-32-STATIC-NEXT: 	calll	y
+; LINUX-32-STATIC-NEXT: 	calll	y
+; LINUX-32-STATIC-NEXT: 	calll	y
+; LINUX-32-STATIC-NEXT: 	calll	y
+; LINUX-32-STATIC-NEXT: 	calll	y
+; LINUX-32-STATIC-NEXT: 	calll	y
 ; LINUX-32-STATIC-NEXT: 	addl	$4, %esp
 ; LINUX-32-STATIC-NEXT: 	ret
 
 ; LINUX-32-PIC: dcallee:
 ; LINUX-32-PIC: 	subl	$4, %esp
-; LINUX-32-PIC-NEXT: 	call	y
-; LINUX-32-PIC-NEXT: 	call	y
-; LINUX-32-PIC-NEXT: 	call	y
-; LINUX-32-PIC-NEXT: 	call	y
-; LINUX-32-PIC-NEXT: 	call	y
-; LINUX-32-PIC-NEXT: 	call	y
-; LINUX-32-PIC-NEXT: 	call	y
+; LINUX-32-PIC-NEXT: 	calll	y
+; LINUX-32-PIC-NEXT: 	calll	y
+; LINUX-32-PIC-NEXT: 	calll	y
+; LINUX-32-PIC-NEXT: 	calll	y
+; LINUX-32-PIC-NEXT: 	calll	y
+; LINUX-32-PIC-NEXT: 	calll	y
+; LINUX-32-PIC-NEXT: 	calll	y
 ; LINUX-32-PIC-NEXT: 	addl	$4, %esp
 ; LINUX-32-PIC-NEXT: 	ret
 
@@ -8543,37 +8543,37 @@
 
 ; DARWIN-32-STATIC: _dcallee:
 ; DARWIN-32-STATIC: 	subl	$12, %esp
-; DARWIN-32-STATIC-NEXT: 	call	_y
-; DARWIN-32-STATIC-NEXT: 	call	_y
-; DARWIN-32-STATIC-NEXT: 	call	_y
-; DARWIN-32-STATIC-NEXT: 	call	_y
-; DARWIN-32-STATIC-NEXT: 	call	_y
-; DARWIN-32-STATIC-NEXT: 	call	_y
-; DARWIN-32-STATIC-NEXT: 	call	_y
+; DARWIN-32-STATIC-NEXT: 	calll	_y
+; DARWIN-32-STATIC-NEXT: 	calll	_y
+; DARWIN-32-STATIC-NEXT: 	calll	_y
+; DARWIN-32-STATIC-NEXT: 	calll	_y
+; DARWIN-32-STATIC-NEXT: 	calll	_y
+; DARWIN-32-STATIC-NEXT: 	calll	_y
+; DARWIN-32-STATIC-NEXT: 	calll	_y
 ; DARWIN-32-STATIC-NEXT: 	addl	$12, %esp
 ; DARWIN-32-STATIC-NEXT: 	ret
 
 ; DARWIN-32-DYNAMIC: _dcallee:
 ; DARWIN-32-DYNAMIC: 	subl	$12, %esp
-; DARWIN-32-DYNAMIC-NEXT: 	call	L_y$stub
-; DARWIN-32-DYNAMIC-NEXT: 	call	L_y$stub
-; DARWIN-32-DYNAMIC-NEXT: 	call	L_y$stub
-; DARWIN-32-DYNAMIC-NEXT: 	call	L_y$stub
-; DARWIN-32-DYNAMIC-NEXT: 	call	L_y$stub
-; DARWIN-32-DYNAMIC-NEXT: 	call	L_y$stub
-; DARWIN-32-DYNAMIC-NEXT: 	call	L_y$stub
+; DARWIN-32-DYNAMIC-NEXT: 	calll	L_y$stub
+; DARWIN-32-DYNAMIC-NEXT: 	calll	L_y$stub
+; DARWIN-32-DYNAMIC-NEXT: 	calll	L_y$stub
+; DARWIN-32-DYNAMIC-NEXT: 	calll	L_y$stub
+; DARWIN-32-DYNAMIC-NEXT: 	calll	L_y$stub
+; DARWIN-32-DYNAMIC-NEXT: 	calll	L_y$stub
+; DARWIN-32-DYNAMIC-NEXT: 	calll	L_y$stub
 ; DARWIN-32-DYNAMIC-NEXT: 	addl	$12, %esp
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _dcallee:
 ; DARWIN-32-PIC: 	subl	$12, %esp
-; DARWIN-32-PIC-NEXT: 	call	L_y$stub
-; DARWIN-32-PIC-NEXT: 	call	L_y$stub
-; DARWIN-32-PIC-NEXT: 	call	L_y$stub
-; DARWIN-32-PIC-NEXT: 	call	L_y$stub
-; DARWIN-32-PIC-NEXT: 	call	L_y$stub
-; DARWIN-32-PIC-NEXT: 	call	L_y$stub
-; DARWIN-32-PIC-NEXT: 	call	L_y$stub
+; DARWIN-32-PIC-NEXT: 	calll	L_y$stub
+; DARWIN-32-PIC-NEXT: 	calll	L_y$stub
+; DARWIN-32-PIC-NEXT: 	calll	L_y$stub
+; DARWIN-32-PIC-NEXT: 	calll	L_y$stub
+; DARWIN-32-PIC-NEXT: 	calll	L_y$stub
+; DARWIN-32-PIC-NEXT: 	calll	L_y$stub
+; DARWIN-32-PIC-NEXT: 	calll	L_y$stub
 ; DARWIN-32-PIC-NEXT: 	addl	$12, %esp
 ; DARWIN-32-PIC-NEXT: 	ret
 
@@ -8644,7 +8644,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _address:
-; DARWIN-32-PIC: 	call	L133$pb
+; DARWIN-32-PIC: 	calll	L133$pb
 ; DARWIN-32-PIC-NEXT: L133$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	L_callee$non_lazy_ptr-L133$pb(%eax), %eax
@@ -8693,7 +8693,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _laddress:
-; DARWIN-32-PIC: 	call	L134$pb
+; DARWIN-32-PIC: 	calll	L134$pb
 ; DARWIN-32-PIC-NEXT: L134$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	leal	_lcallee-L134$pb(%eax), %eax
@@ -8740,7 +8740,7 @@
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _daddress:
-; DARWIN-32-PIC: 	call	L135$pb
+; DARWIN-32-PIC: 	calll	L135$pb
 ; DARWIN-32-PIC-NEXT: L135$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	leal	_dcallee-L135$pb(%eax), %eax
@@ -8771,15 +8771,15 @@
 
 ; LINUX-32-STATIC: caller:
 ; LINUX-32-STATIC: 	subl	$4, %esp
-; LINUX-32-STATIC-NEXT: 	call	callee
-; LINUX-32-STATIC-NEXT: 	call	callee
+; LINUX-32-STATIC-NEXT: 	calll	callee
+; LINUX-32-STATIC-NEXT: 	calll	callee
 ; LINUX-32-STATIC-NEXT: 	addl	$4, %esp
 ; LINUX-32-STATIC-NEXT: 	ret
 
 ; LINUX-32-PIC: caller:
 ; LINUX-32-PIC: 	subl	$4, %esp
-; LINUX-32-PIC-NEXT: 	call	callee
-; LINUX-32-PIC-NEXT: 	call	callee
+; LINUX-32-PIC-NEXT: 	calll	callee
+; LINUX-32-PIC-NEXT: 	calll	callee
 ; LINUX-32-PIC-NEXT: 	addl	$4, %esp
 ; LINUX-32-PIC-NEXT: 	ret
 
@@ -8792,22 +8792,22 @@
 
 ; DARWIN-32-STATIC: _caller:
 ; DARWIN-32-STATIC: 	subl	$12, %esp
-; DARWIN-32-STATIC-NEXT: 	call	_callee
-; DARWIN-32-STATIC-NEXT: 	call	_callee
+; DARWIN-32-STATIC-NEXT: 	calll	_callee
+; DARWIN-32-STATIC-NEXT: 	calll	_callee
 ; DARWIN-32-STATIC-NEXT: 	addl	$12, %esp
 ; DARWIN-32-STATIC-NEXT: 	ret
 
 ; DARWIN-32-DYNAMIC: _caller:
 ; DARWIN-32-DYNAMIC: 	subl	$12, %esp
-; DARWIN-32-DYNAMIC-NEXT: 	call	L_callee$stub
-; DARWIN-32-DYNAMIC-NEXT: 	call	L_callee$stub
+; DARWIN-32-DYNAMIC-NEXT: 	calll	L_callee$stub
+; DARWIN-32-DYNAMIC-NEXT: 	calll	L_callee$stub
 ; DARWIN-32-DYNAMIC-NEXT: 	addl	$12, %esp
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _caller:
 ; DARWIN-32-PIC: 	subl	$12, %esp
-; DARWIN-32-PIC-NEXT: 	call	L_callee$stub
-; DARWIN-32-PIC-NEXT: 	call	L_callee$stub
+; DARWIN-32-PIC-NEXT: 	calll	L_callee$stub
+; DARWIN-32-PIC-NEXT: 	calll	L_callee$stub
 ; DARWIN-32-PIC-NEXT: 	addl	$12, %esp
 ; DARWIN-32-PIC-NEXT: 	ret
 
@@ -8845,15 +8845,15 @@
 
 ; LINUX-32-STATIC: dcaller:
 ; LINUX-32-STATIC: 	subl	$4, %esp
-; LINUX-32-STATIC-NEXT: 	call	dcallee
-; LINUX-32-STATIC-NEXT: 	call	dcallee
+; LINUX-32-STATIC-NEXT: 	calll	dcallee
+; LINUX-32-STATIC-NEXT: 	calll	dcallee
 ; LINUX-32-STATIC-NEXT: 	addl	$4, %esp
 ; LINUX-32-STATIC-NEXT: 	ret
 
 ; LINUX-32-PIC: dcaller:
 ; LINUX-32-PIC: 	subl	$4, %esp
-; LINUX-32-PIC-NEXT: 	call	dcallee
-; LINUX-32-PIC-NEXT: 	call	dcallee
+; LINUX-32-PIC-NEXT: 	calll	dcallee
+; LINUX-32-PIC-NEXT: 	calll	dcallee
 ; LINUX-32-PIC-NEXT: 	addl	$4, %esp
 ; LINUX-32-PIC-NEXT: 	ret
 
@@ -8866,22 +8866,22 @@
 
 ; DARWIN-32-STATIC: _dcaller:
 ; DARWIN-32-STATIC: 	subl	$12, %esp
-; DARWIN-32-STATIC-NEXT: 	call	_dcallee
-; DARWIN-32-STATIC-NEXT: 	call	_dcallee
+; DARWIN-32-STATIC-NEXT: 	calll	_dcallee
+; DARWIN-32-STATIC-NEXT: 	calll	_dcallee
 ; DARWIN-32-STATIC-NEXT: 	addl	$12, %esp
 ; DARWIN-32-STATIC-NEXT: 	ret
 
 ; DARWIN-32-DYNAMIC: _dcaller:
 ; DARWIN-32-DYNAMIC: 	subl	$12, %esp
-; DARWIN-32-DYNAMIC-NEXT: 	call	_dcallee
-; DARWIN-32-DYNAMIC-NEXT: 	call	_dcallee
+; DARWIN-32-DYNAMIC-NEXT: 	calll	_dcallee
+; DARWIN-32-DYNAMIC-NEXT: 	calll	_dcallee
 ; DARWIN-32-DYNAMIC-NEXT: 	addl	$12, %esp
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _dcaller:
 ; DARWIN-32-PIC: 	subl	$12, %esp
-; DARWIN-32-PIC-NEXT: 	call	_dcallee
-; DARWIN-32-PIC-NEXT: 	call	_dcallee
+; DARWIN-32-PIC-NEXT: 	calll	_dcallee
+; DARWIN-32-PIC-NEXT: 	calll	_dcallee
 ; DARWIN-32-PIC-NEXT: 	addl	$12, %esp
 ; DARWIN-32-PIC-NEXT: 	ret
 
@@ -8919,15 +8919,15 @@
 
 ; LINUX-32-STATIC: lcaller:
 ; LINUX-32-STATIC: 	subl	$4, %esp
-; LINUX-32-STATIC-NEXT: 	call	lcallee
-; LINUX-32-STATIC-NEXT: 	call	lcallee
+; LINUX-32-STATIC-NEXT: 	calll	lcallee
+; LINUX-32-STATIC-NEXT: 	calll	lcallee
 ; LINUX-32-STATIC-NEXT: 	addl	$4, %esp
 ; LINUX-32-STATIC-NEXT: 	ret
 
 ; LINUX-32-PIC: lcaller:
 ; LINUX-32-PIC: 	subl	$4, %esp
-; LINUX-32-PIC-NEXT: 	call	lcallee
-; LINUX-32-PIC-NEXT: 	call	lcallee
+; LINUX-32-PIC-NEXT: 	calll	lcallee
+; LINUX-32-PIC-NEXT: 	calll	lcallee
 ; LINUX-32-PIC-NEXT: 	addl	$4, %esp
 ; LINUX-32-PIC-NEXT: 	ret
 
@@ -8940,22 +8940,22 @@
 
 ; DARWIN-32-STATIC: _lcaller:
 ; DARWIN-32-STATIC: 	subl	$12, %esp
-; DARWIN-32-STATIC-NEXT: 	call	_lcallee
-; DARWIN-32-STATIC-NEXT: 	call	_lcallee
+; DARWIN-32-STATIC-NEXT: 	calll	_lcallee
+; DARWIN-32-STATIC-NEXT: 	calll	_lcallee
 ; DARWIN-32-STATIC-NEXT: 	addl	$12, %esp
 ; DARWIN-32-STATIC-NEXT: 	ret
 
 ; DARWIN-32-DYNAMIC: _lcaller:
 ; DARWIN-32-DYNAMIC: 	subl	$12, %esp
-; DARWIN-32-DYNAMIC-NEXT: 	call	_lcallee
-; DARWIN-32-DYNAMIC-NEXT: 	call	_lcallee
+; DARWIN-32-DYNAMIC-NEXT: 	calll	_lcallee
+; DARWIN-32-DYNAMIC-NEXT: 	calll	_lcallee
 ; DARWIN-32-DYNAMIC-NEXT: 	addl	$12, %esp
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _lcaller:
 ; DARWIN-32-PIC: 	subl	$12, %esp
-; DARWIN-32-PIC-NEXT: 	call	_lcallee
-; DARWIN-32-PIC-NEXT: 	call	_lcallee
+; DARWIN-32-PIC-NEXT: 	calll	_lcallee
+; DARWIN-32-PIC-NEXT: 	calll	_lcallee
 ; DARWIN-32-PIC-NEXT: 	addl	$12, %esp
 ; DARWIN-32-PIC-NEXT: 	ret
 
@@ -8991,13 +8991,13 @@
 
 ; LINUX-32-STATIC: tailcaller:
 ; LINUX-32-STATIC: 	subl	$4, %esp
-; LINUX-32-STATIC-NEXT: 	call	callee
+; LINUX-32-STATIC-NEXT: 	calll	callee
 ; LINUX-32-STATIC-NEXT: 	addl	$4, %esp
 ; LINUX-32-STATIC-NEXT: 	ret
 
 ; LINUX-32-PIC: tailcaller:
 ; LINUX-32-PIC: 	subl	$4, %esp
-; LINUX-32-PIC-NEXT: 	call	callee
+; LINUX-32-PIC-NEXT: 	calll	callee
 ; LINUX-32-PIC-NEXT: 	addl	$4, %esp
 ; LINUX-32-PIC-NEXT: 	ret
 
@@ -9009,19 +9009,19 @@
 
 ; DARWIN-32-STATIC: _tailcaller:
 ; DARWIN-32-STATIC: 	subl	$12, %esp
-; DARWIN-32-STATIC-NEXT: 	call	_callee
+; DARWIN-32-STATIC-NEXT: 	calll	_callee
 ; DARWIN-32-STATIC-NEXT: 	addl	$12, %esp
 ; DARWIN-32-STATIC-NEXT: 	ret
 
 ; DARWIN-32-DYNAMIC: _tailcaller:
 ; DARWIN-32-DYNAMIC: 	subl	$12, %esp
-; DARWIN-32-DYNAMIC-NEXT: 	call	L_callee$stub
+; DARWIN-32-DYNAMIC-NEXT: 	calll	L_callee$stub
 ; DARWIN-32-DYNAMIC-NEXT: 	addl	$12, %esp
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _tailcaller:
 ; DARWIN-32-PIC: 	subl	$12, %esp
-; DARWIN-32-PIC-NEXT: 	call	L_callee$stub
+; DARWIN-32-PIC-NEXT: 	calll	L_callee$stub
 ; DARWIN-32-PIC-NEXT: 	addl	$12, %esp
 ; DARWIN-32-PIC-NEXT: 	ret
 
@@ -9054,13 +9054,13 @@
 
 ; LINUX-32-STATIC: dtailcaller:
 ; LINUX-32-STATIC: 	subl	$4, %esp
-; LINUX-32-STATIC-NEXT: 	call	dcallee
+; LINUX-32-STATIC-NEXT: 	calll	dcallee
 ; LINUX-32-STATIC-NEXT: 	addl	$4, %esp
 ; LINUX-32-STATIC-NEXT: 	ret
 
 ; LINUX-32-PIC: dtailcaller:
 ; LINUX-32-PIC: 	subl	$4, %esp
-; LINUX-32-PIC-NEXT: 	call	dcallee
+; LINUX-32-PIC-NEXT: 	calll	dcallee
 ; LINUX-32-PIC-NEXT: 	addl	$4, %esp
 ; LINUX-32-PIC-NEXT: 	ret
 
@@ -9072,19 +9072,19 @@
 
 ; DARWIN-32-STATIC: _dtailcaller:
 ; DARWIN-32-STATIC: 	subl	$12, %esp
-; DARWIN-32-STATIC-NEXT: 	call	_dcallee
+; DARWIN-32-STATIC-NEXT: 	calll	_dcallee
 ; DARWIN-32-STATIC-NEXT: 	addl	$12, %esp
 ; DARWIN-32-STATIC-NEXT: 	ret
 
 ; DARWIN-32-DYNAMIC: _dtailcaller:
 ; DARWIN-32-DYNAMIC: 	subl	$12, %esp
-; DARWIN-32-DYNAMIC-NEXT: 	call	_dcallee
+; DARWIN-32-DYNAMIC-NEXT: 	calll	_dcallee
 ; DARWIN-32-DYNAMIC-NEXT: 	addl	$12, %esp
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _dtailcaller:
 ; DARWIN-32-PIC: 	subl	$12, %esp
-; DARWIN-32-PIC-NEXT: 	call	_dcallee
+; DARWIN-32-PIC-NEXT: 	calll	_dcallee
 ; DARWIN-32-PIC-NEXT: 	addl	$12, %esp
 ; DARWIN-32-PIC-NEXT: 	ret
 
@@ -9117,13 +9117,13 @@
 
 ; LINUX-32-STATIC: ltailcaller:
 ; LINUX-32-STATIC: 	subl	$4, %esp
-; LINUX-32-STATIC-NEXT: 	call	lcallee
+; LINUX-32-STATIC-NEXT: 	calll	lcallee
 ; LINUX-32-STATIC-NEXT: 	addl	$4, %esp
 ; LINUX-32-STATIC-NEXT: 	ret
 
 ; LINUX-32-PIC: ltailcaller:
 ; LINUX-32-PIC: 	subl	$4, %esp
-; LINUX-32-PIC-NEXT: 	call	lcallee
+; LINUX-32-PIC-NEXT: 	calll	lcallee
 ; LINUX-32-PIC-NEXT: 	addl	$4, %esp
 ; LINUX-32-PIC-NEXT: 	ret
 
@@ -9135,19 +9135,19 @@
 
 ; DARWIN-32-STATIC: _ltailcaller:
 ; DARWIN-32-STATIC: 	subl	$12, %esp
-; DARWIN-32-STATIC-NEXT: 	call	_lcallee
+; DARWIN-32-STATIC-NEXT: 	calll	_lcallee
 ; DARWIN-32-STATIC-NEXT: 	addl	$12, %esp
 ; DARWIN-32-STATIC-NEXT: 	ret
 
 ; DARWIN-32-DYNAMIC: _ltailcaller:
 ; DARWIN-32-DYNAMIC: 	subl	$12, %esp
-; DARWIN-32-DYNAMIC-NEXT: 	call	_lcallee
+; DARWIN-32-DYNAMIC-NEXT: 	calll	_lcallee
 ; DARWIN-32-DYNAMIC-NEXT: 	addl	$12, %esp
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _ltailcaller:
 ; DARWIN-32-PIC: 	subl	$12, %esp
-; DARWIN-32-PIC-NEXT: 	call	_lcallee
+; DARWIN-32-PIC-NEXT: 	calll	_lcallee
 ; DARWIN-32-PIC-NEXT: 	addl	$12, %esp
 ; DARWIN-32-PIC-NEXT: 	ret
 
@@ -9184,15 +9184,15 @@
 
 ; LINUX-32-STATIC: icaller:
 ; LINUX-32-STATIC: 	subl	$4, %esp
-; LINUX-32-STATIC-NEXT: 	call	*ifunc
-; LINUX-32-STATIC-NEXT: 	call	*ifunc
+; LINUX-32-STATIC-NEXT: 	calll	*ifunc
+; LINUX-32-STATIC-NEXT: 	calll	*ifunc
 ; LINUX-32-STATIC-NEXT: 	addl	$4, %esp
 ; LINUX-32-STATIC-NEXT: 	ret
 
 ; LINUX-32-PIC: icaller:
 ; LINUX-32-PIC: 	subl	$4, %esp
-; LINUX-32-PIC-NEXT: 	call	*ifunc
-; LINUX-32-PIC-NEXT: 	call	*ifunc
+; LINUX-32-PIC-NEXT: 	calll	*ifunc
+; LINUX-32-PIC-NEXT: 	calll	*ifunc
 ; LINUX-32-PIC-NEXT: 	addl	$4, %esp
 ; LINUX-32-PIC-NEXT: 	ret
 
@@ -9206,8 +9206,8 @@
 
 ; DARWIN-32-STATIC: _icaller:
 ; DARWIN-32-STATIC: 	subl	$12, %esp
-; DARWIN-32-STATIC-NEXT: 	call	*_ifunc
-; DARWIN-32-STATIC-NEXT: 	call	*_ifunc
+; DARWIN-32-STATIC-NEXT: 	calll	*_ifunc
+; DARWIN-32-STATIC-NEXT: 	calll	*_ifunc
 ; DARWIN-32-STATIC-NEXT: 	addl	$12, %esp
 ; DARWIN-32-STATIC-NEXT: 	ret
 
@@ -9215,8 +9215,8 @@
 ; DARWIN-32-DYNAMIC: 	pushl	%esi
 ; DARWIN-32-DYNAMIC-NEXT: 	subl	$8, %esp
 ; DARWIN-32-DYNAMIC-NEXT: 	movl	L_ifunc$non_lazy_ptr, %esi
-; DARWIN-32-DYNAMIC-NEXT: 	call	*(%esi)
-; DARWIN-32-DYNAMIC-NEXT: 	call	*(%esi)
+; DARWIN-32-DYNAMIC-NEXT: 	calll	*(%esi)
+; DARWIN-32-DYNAMIC-NEXT: 	calll	*(%esi)
 ; DARWIN-32-DYNAMIC-NEXT: 	addl	$8, %esp
 ; DARWIN-32-DYNAMIC-NEXT: 	popl	%esi
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
@@ -9224,12 +9224,12 @@
 ; DARWIN-32-PIC: _icaller:
 ; DARWIN-32-PIC: 	pushl	%esi
 ; DARWIN-32-PIC-NEXT: 	subl	$8, %esp
-; DARWIN-32-PIC-NEXT: 	call	L142$pb
+; DARWIN-32-PIC-NEXT: 	calll	L142$pb
 ; DARWIN-32-PIC-NEXT: L142$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	L_ifunc$non_lazy_ptr-L142$pb(%eax), %esi
-; DARWIN-32-PIC-NEXT: 	call	*(%esi)
-; DARWIN-32-PIC-NEXT: 	call	*(%esi)
+; DARWIN-32-PIC-NEXT: 	calll	*(%esi)
+; DARWIN-32-PIC-NEXT: 	calll	*(%esi)
 ; DARWIN-32-PIC-NEXT: 	addl	$8, %esp
 ; DARWIN-32-PIC-NEXT: 	popl	%esi
 ; DARWIN-32-PIC-NEXT: 	ret
@@ -9273,15 +9273,15 @@
 
 ; LINUX-32-STATIC: dicaller:
 ; LINUX-32-STATIC: 	subl	$4, %esp
-; LINUX-32-STATIC-NEXT: 	call	*difunc
-; LINUX-32-STATIC-NEXT: 	call	*difunc
+; LINUX-32-STATIC-NEXT: 	calll	*difunc
+; LINUX-32-STATIC-NEXT: 	calll	*difunc
 ; LINUX-32-STATIC-NEXT: 	addl	$4, %esp
 ; LINUX-32-STATIC-NEXT: 	ret
 
 ; LINUX-32-PIC: dicaller:
 ; LINUX-32-PIC: 	subl	$4, %esp
-; LINUX-32-PIC-NEXT: 	call	*difunc
-; LINUX-32-PIC-NEXT: 	call	*difunc
+; LINUX-32-PIC-NEXT: 	calll	*difunc
+; LINUX-32-PIC-NEXT: 	calll	*difunc
 ; LINUX-32-PIC-NEXT: 	addl	$4, %esp
 ; LINUX-32-PIC-NEXT: 	ret
 
@@ -9295,26 +9295,26 @@
 
 ; DARWIN-32-STATIC: _dicaller:
 ; DARWIN-32-STATIC: 	subl	$12, %esp
-; DARWIN-32-STATIC-NEXT: 	call	*_difunc
-; DARWIN-32-STATIC-NEXT: 	call	*_difunc
+; DARWIN-32-STATIC-NEXT: 	calll	*_difunc
+; DARWIN-32-STATIC-NEXT: 	calll	*_difunc
 ; DARWIN-32-STATIC-NEXT: 	addl	$12, %esp
 ; DARWIN-32-STATIC-NEXT: 	ret
 
 ; DARWIN-32-DYNAMIC: _dicaller:
 ; DARWIN-32-DYNAMIC: 	subl	$12, %esp
-; DARWIN-32-DYNAMIC-NEXT: 	call	*_difunc
-; DARWIN-32-DYNAMIC-NEXT: 	call	*_difunc
+; DARWIN-32-DYNAMIC-NEXT: 	calll	*_difunc
+; DARWIN-32-DYNAMIC-NEXT: 	calll	*_difunc
 ; DARWIN-32-DYNAMIC-NEXT: 	addl	$12, %esp
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _dicaller:
 ; DARWIN-32-PIC: 	pushl	%esi
 ; DARWIN-32-PIC-NEXT: 	subl	$8, %esp
-; DARWIN-32-PIC-NEXT: 	call	L143$pb
+; DARWIN-32-PIC-NEXT: 	calll	L143$pb
 ; DARWIN-32-PIC-NEXT: L143$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%esi
-; DARWIN-32-PIC-NEXT: 	call	*_difunc-L143$pb(%esi)
-; DARWIN-32-PIC-NEXT: 	call	*_difunc-L143$pb(%esi)
+; DARWIN-32-PIC-NEXT: 	calll	*_difunc-L143$pb(%esi)
+; DARWIN-32-PIC-NEXT: 	calll	*_difunc-L143$pb(%esi)
 ; DARWIN-32-PIC-NEXT: 	addl	$8, %esp
 ; DARWIN-32-PIC-NEXT: 	popl	%esi
 ; DARWIN-32-PIC-NEXT: 	ret
@@ -9355,15 +9355,15 @@
 
 ; LINUX-32-STATIC: licaller:
 ; LINUX-32-STATIC: 	subl	$4, %esp
-; LINUX-32-STATIC-NEXT: 	call	*lifunc
-; LINUX-32-STATIC-NEXT: 	call	*lifunc
+; LINUX-32-STATIC-NEXT: 	calll	*lifunc
+; LINUX-32-STATIC-NEXT: 	calll	*lifunc
 ; LINUX-32-STATIC-NEXT: 	addl	$4, %esp
 ; LINUX-32-STATIC-NEXT: 	ret
 
 ; LINUX-32-PIC: licaller:
 ; LINUX-32-PIC: 	subl	$4, %esp
-; LINUX-32-PIC-NEXT: 	call	*lifunc
-; LINUX-32-PIC-NEXT: 	call	*lifunc
+; LINUX-32-PIC-NEXT: 	calll	*lifunc
+; LINUX-32-PIC-NEXT: 	calll	*lifunc
 ; LINUX-32-PIC-NEXT: 	addl	$4, %esp
 ; LINUX-32-PIC-NEXT: 	ret
 
@@ -9376,26 +9376,26 @@
 
 ; DARWIN-32-STATIC: _licaller:
 ; DARWIN-32-STATIC: 	subl	$12, %esp
-; DARWIN-32-STATIC-NEXT: 	call	*_lifunc
-; DARWIN-32-STATIC-NEXT: 	call	*_lifunc
+; DARWIN-32-STATIC-NEXT: 	calll	*_lifunc
+; DARWIN-32-STATIC-NEXT: 	calll	*_lifunc
 ; DARWIN-32-STATIC-NEXT: 	addl	$12, %esp
 ; DARWIN-32-STATIC-NEXT: 	ret
 
 ; DARWIN-32-DYNAMIC: _licaller:
 ; DARWIN-32-DYNAMIC: 	subl	$12, %esp
-; DARWIN-32-DYNAMIC-NEXT: 	call	*_lifunc
-; DARWIN-32-DYNAMIC-NEXT: 	call	*_lifunc
+; DARWIN-32-DYNAMIC-NEXT: 	calll	*_lifunc
+; DARWIN-32-DYNAMIC-NEXT: 	calll	*_lifunc
 ; DARWIN-32-DYNAMIC-NEXT: 	addl	$12, %esp
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _licaller:
 ; DARWIN-32-PIC: 	pushl	%esi
 ; DARWIN-32-PIC-NEXT: 	subl	$8, %esp
-; DARWIN-32-PIC-NEXT: 	call	L144$pb
+; DARWIN-32-PIC-NEXT: 	calll	L144$pb
 ; DARWIN-32-PIC-NEXT: L144$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%esi
-; DARWIN-32-PIC-NEXT: 	call	*_lifunc-L144$pb(%esi)
-; DARWIN-32-PIC-NEXT: 	call	*_lifunc-L144$pb(%esi)
+; DARWIN-32-PIC-NEXT: 	calll	*_lifunc-L144$pb(%esi)
+; DARWIN-32-PIC-NEXT: 	calll	*_lifunc-L144$pb(%esi)
 ; DARWIN-32-PIC-NEXT: 	addl	$8, %esp
 ; DARWIN-32-PIC-NEXT: 	popl	%esi
 ; DARWIN-32-PIC-NEXT: 	ret
@@ -9436,15 +9436,15 @@
 
 ; LINUX-32-STATIC: itailcaller:
 ; LINUX-32-STATIC: 	subl	$4, %esp
-; LINUX-32-STATIC-NEXT: 	call	*ifunc
-; LINUX-32-STATIC-NEXT: 	call	*ifunc
+; LINUX-32-STATIC-NEXT: 	calll	*ifunc
+; LINUX-32-STATIC-NEXT: 	calll	*ifunc
 ; LINUX-32-STATIC-NEXT: 	addl	$4, %esp
 ; LINUX-32-STATIC-NEXT: 	ret
 
 ; LINUX-32-PIC: itailcaller:
 ; LINUX-32-PIC: 	subl	$4, %esp
-; LINUX-32-PIC-NEXT: 	call	*ifunc
-; LINUX-32-PIC-NEXT: 	call	*ifunc
+; LINUX-32-PIC-NEXT: 	calll	*ifunc
+; LINUX-32-PIC-NEXT: 	calll	*ifunc
 ; LINUX-32-PIC-NEXT: 	addl	$4, %esp
 ; LINUX-32-PIC-NEXT: 	ret
 
@@ -9458,8 +9458,8 @@
 
 ; DARWIN-32-STATIC: _itailcaller:
 ; DARWIN-32-STATIC: 	subl	$12, %esp
-; DARWIN-32-STATIC-NEXT: 	call	*_ifunc
-; DARWIN-32-STATIC-NEXT: 	call	*_ifunc
+; DARWIN-32-STATIC-NEXT: 	calll	*_ifunc
+; DARWIN-32-STATIC-NEXT: 	calll	*_ifunc
 ; DARWIN-32-STATIC-NEXT: 	addl	$12, %esp
 ; DARWIN-32-STATIC-NEXT: 	ret
 
@@ -9467,8 +9467,8 @@
 ; DARWIN-32-DYNAMIC: 	pushl	%esi
 ; DARWIN-32-DYNAMIC-NEXT: 	subl	$8, %esp
 ; DARWIN-32-DYNAMIC-NEXT: 	movl	L_ifunc$non_lazy_ptr, %esi
-; DARWIN-32-DYNAMIC-NEXT: 	call	*(%esi)
-; DARWIN-32-DYNAMIC-NEXT: 	call	*(%esi)
+; DARWIN-32-DYNAMIC-NEXT: 	calll	*(%esi)
+; DARWIN-32-DYNAMIC-NEXT: 	calll	*(%esi)
 ; DARWIN-32-DYNAMIC-NEXT: 	addl	$8, %esp
 ; DARWIN-32-DYNAMIC-NEXT: 	popl	%esi
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
@@ -9476,12 +9476,12 @@
 ; DARWIN-32-PIC: _itailcaller:
 ; DARWIN-32-PIC: 	pushl	%esi
 ; DARWIN-32-PIC-NEXT: 	subl	$8, %esp
-; DARWIN-32-PIC-NEXT: 	call	L145$pb
+; DARWIN-32-PIC-NEXT: 	calll	L145$pb
 ; DARWIN-32-PIC-NEXT: L145$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
 ; DARWIN-32-PIC-NEXT: 	movl	L_ifunc$non_lazy_ptr-L145$pb(%eax), %esi
-; DARWIN-32-PIC-NEXT: 	call	*(%esi)
-; DARWIN-32-PIC-NEXT: 	call	*(%esi)
+; DARWIN-32-PIC-NEXT: 	calll	*(%esi)
+; DARWIN-32-PIC-NEXT: 	calll	*(%esi)
 ; DARWIN-32-PIC-NEXT: 	addl	$8, %esp
 ; DARWIN-32-PIC-NEXT: 	popl	%esi
 ; DARWIN-32-PIC-NEXT: 	ret
@@ -9522,13 +9522,13 @@
 
 ; LINUX-32-STATIC: ditailcaller:
 ; LINUX-32-STATIC: 	subl	$4, %esp
-; LINUX-32-STATIC-NEXT: 	call	*difunc
+; LINUX-32-STATIC-NEXT: 	calll	*difunc
 ; LINUX-32-STATIC-NEXT: 	addl	$4, %esp
 ; LINUX-32-STATIC-NEXT: 	ret
 
 ; LINUX-32-PIC: ditailcaller:
 ; LINUX-32-PIC: 	subl	$4, %esp
-; LINUX-32-PIC-NEXT: 	call	*difunc
+; LINUX-32-PIC-NEXT: 	calll	*difunc
 ; LINUX-32-PIC-NEXT: 	addl	$4, %esp
 ; LINUX-32-PIC-NEXT: 	ret
 
@@ -9541,22 +9541,22 @@
 
 ; DARWIN-32-STATIC: _ditailcaller:
 ; DARWIN-32-STATIC: 	subl	$12, %esp
-; DARWIN-32-STATIC-NEXT: 	call	*_difunc
+; DARWIN-32-STATIC-NEXT: 	calll	*_difunc
 ; DARWIN-32-STATIC-NEXT: 	addl	$12, %esp
 ; DARWIN-32-STATIC-NEXT: 	ret
 
 ; DARWIN-32-DYNAMIC: _ditailcaller:
 ; DARWIN-32-DYNAMIC: 	subl	$12, %esp
-; DARWIN-32-DYNAMIC-NEXT: 	call	*_difunc
+; DARWIN-32-DYNAMIC-NEXT: 	calll	*_difunc
 ; DARWIN-32-DYNAMIC-NEXT: 	addl	$12, %esp
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _ditailcaller:
 ; DARWIN-32-PIC: 	subl	$12, %esp
-; DARWIN-32-PIC-NEXT: 	call	L146$pb
+; DARWIN-32-PIC-NEXT: 	calll	L146$pb
 ; DARWIN-32-PIC-NEXT: L146$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
-; DARWIN-32-PIC-NEXT: 	call	*_difunc-L146$pb(%eax)
+; DARWIN-32-PIC-NEXT: 	calll	*_difunc-L146$pb(%eax)
 ; DARWIN-32-PIC-NEXT: 	addl	$12, %esp
 ; DARWIN-32-PIC-NEXT: 	ret
 
@@ -9589,13 +9589,13 @@
 
 ; LINUX-32-STATIC: litailcaller:
 ; LINUX-32-STATIC: 	subl	$4, %esp
-; LINUX-32-STATIC-NEXT: 	call	*lifunc
+; LINUX-32-STATIC-NEXT: 	calll	*lifunc
 ; LINUX-32-STATIC-NEXT: 	addl	$4, %esp
 ; LINUX-32-STATIC-NEXT: 	ret
 
 ; LINUX-32-PIC: litailcaller:
 ; LINUX-32-PIC: 	subl	$4, %esp
-; LINUX-32-PIC-NEXT: 	call	*lifunc
+; LINUX-32-PIC-NEXT: 	calll	*lifunc
 ; LINUX-32-PIC-NEXT: 	addl	$4, %esp
 ; LINUX-32-PIC-NEXT: 	ret
 
@@ -9607,22 +9607,22 @@
 
 ; DARWIN-32-STATIC: _litailcaller:
 ; DARWIN-32-STATIC: 	subl	$12, %esp
-; DARWIN-32-STATIC-NEXT: 	call	*_lifunc
+; DARWIN-32-STATIC-NEXT: 	calll	*_lifunc
 ; DARWIN-32-STATIC-NEXT: 	addl	$12, %esp
 ; DARWIN-32-STATIC-NEXT: 	ret
 
 ; DARWIN-32-DYNAMIC: _litailcaller:
 ; DARWIN-32-DYNAMIC: 	subl	$12, %esp
-; DARWIN-32-DYNAMIC-NEXT: 	call	*_lifunc
+; DARWIN-32-DYNAMIC-NEXT: 	calll	*_lifunc
 ; DARWIN-32-DYNAMIC-NEXT: 	addl	$12, %esp
 ; DARWIN-32-DYNAMIC-NEXT: 	ret
 
 ; DARWIN-32-PIC: _litailcaller:
 ; DARWIN-32-PIC: 	subl	$12, %esp
-; DARWIN-32-PIC-NEXT: 	call	L147$pb
+; DARWIN-32-PIC-NEXT: 	calll	L147$pb
 ; DARWIN-32-PIC-NEXT: L147$pb:
 ; DARWIN-32-PIC-NEXT: 	popl	%eax
-; DARWIN-32-PIC-NEXT: 	call	*_lifunc-L147$pb(%eax)
+; DARWIN-32-PIC-NEXT: 	calll	*_lifunc-L147$pb(%eax)
 ; DARWIN-32-PIC-NEXT: 	addl	$12, %esp
 ; DARWIN-32-PIC-NEXT: 	ret
 

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/atomic_op.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/atomic_op.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/atomic_op.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/atomic_op.ll Tue Oct 26 19:48:03 2010
@@ -1,16 +1,8 @@
-; RUN: llc < %s -march=x86 -o %t1
-; RUN: grep "lock" %t1 | count 17
-; RUN: grep "xaddl" %t1 | count 4 
-; RUN: grep "cmpxchgl"  %t1 | count 13 
-; RUN: grep "xchgl" %t1 | count 14
-; RUN: grep "cmova" %t1 | count 2
-; RUN: grep "cmovb" %t1 | count 2
-; RUN: grep "cmovg" %t1 | count 2
-; RUN: grep "cmovl" %t1 | count 2
+; RUN: llc < %s -march=x86 | FileCheck %s
 
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
 
-define void @main(i32 %argc, i8** %argv) {
+define void @func(i32 %argc, i8** %argv) nounwind {
 entry:
 	%argc.addr = alloca i32		; <i32*> [#uses=1]
 	%argv.addr = alloca i8**		; <i8***> [#uses=1]
@@ -29,48 +21,105 @@
 	store i32 3855, i32* %ort
 	store i32 3855, i32* %xort
 	store i32 4, i32* %temp
-	%tmp = load i32* %temp		; <i32> [#uses=1]
+	%tmp = load i32* %temp
+        ; CHECK: lock
+        ; CHECK: xaddl
 	call i32 @llvm.atomic.load.add.i32.p0i32( i32* %val1, i32 %tmp )		; <i32>:0 [#uses=1]
 	store i32 %0, i32* %old
+        ; CHECK: lock
+        ; CHECK: xaddl
 	call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %val2, i32 30 )		; <i32>:1 [#uses=1]
 	store i32 %1, i32* %old
+        ; CHECK: lock
+        ; CHECK: xaddl
 	call i32 @llvm.atomic.load.add.i32.p0i32( i32* %val2, i32 1 )		; <i32>:2 [#uses=1]
 	store i32 %2, i32* %old
+        ; CHECK: lock
+        ; CHECK: xaddl
 	call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %val2, i32 1 )		; <i32>:3 [#uses=1]
 	store i32 %3, i32* %old
+        ; CHECK: andl
+        ; CHECK: lock
+        ; CHECK: cmpxchgl
 	call i32 @llvm.atomic.load.and.i32.p0i32( i32* %andt, i32 4080 )		; <i32>:4 [#uses=1]
 	store i32 %4, i32* %old
+        ; CHECK: orl
+        ; CHECK: lock
+        ; CHECK: cmpxchgl
 	call i32 @llvm.atomic.load.or.i32.p0i32( i32* %ort, i32 4080 )		; <i32>:5 [#uses=1]
 	store i32 %5, i32* %old
+        ; CHECK: xorl
+        ; CHECK: lock
+        ; CHECK: cmpxchgl
 	call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %xort, i32 4080 )		; <i32>:6 [#uses=1]
 	store i32 %6, i32* %old
+        ; CHECK: cmov
+        ; CHECK: lock
+        ; CHECK: cmpxchgl
 	call i32 @llvm.atomic.load.min.i32.p0i32( i32* %val2, i32 16 )		; <i32>:7 [#uses=1]
 	store i32 %7, i32* %old
 	%neg = sub i32 0, 1		; <i32> [#uses=1]
+        ; CHECK: cmov
+        ; CHECK: lock
+        ; CHECK: cmpxchgl
 	call i32 @llvm.atomic.load.min.i32.p0i32( i32* %val2, i32 %neg )		; <i32>:8 [#uses=1]
 	store i32 %8, i32* %old
+        ; CHECK: cmov
+        ; CHECK: lock
+        ; CHECK: cmpxchgl
 	call i32 @llvm.atomic.load.max.i32.p0i32( i32* %val2, i32 1 )		; <i32>:9 [#uses=1]
 	store i32 %9, i32* %old
+        ; CHECK: cmov
+        ; CHECK: lock
+        ; CHECK: cmpxchgl
 	call i32 @llvm.atomic.load.max.i32.p0i32( i32* %val2, i32 0 )		; <i32>:10 [#uses=1]
 	store i32 %10, i32* %old
+        ; CHECK: cmov
+        ; CHECK: lock
+        ; CHECK: cmpxchgl
 	call i32 @llvm.atomic.load.umax.i32.p0i32( i32* %val2, i32 65535 )		; <i32>:11 [#uses=1]
 	store i32 %11, i32* %old
+        ; CHECK: cmov
+        ; CHECK: lock
+        ; CHECK: cmpxchgl
 	call i32 @llvm.atomic.load.umax.i32.p0i32( i32* %val2, i32 10 )		; <i32>:12 [#uses=1]
 	store i32 %12, i32* %old
+        ; CHECK: cmov
+        ; CHECK: lock
+        ; CHECK: cmpxchgl
 	call i32 @llvm.atomic.load.umin.i32.p0i32( i32* %val2, i32 1 )		; <i32>:13 [#uses=1]
 	store i32 %13, i32* %old
+        ; CHECK: cmov
+        ; CHECK: lock
+        ; CHECK: cmpxchgl
 	call i32 @llvm.atomic.load.umin.i32.p0i32( i32* %val2, i32 10 )		; <i32>:14 [#uses=1]
 	store i32 %14, i32* %old
+        ; CHECK: xchgl   %{{.*}}, {{.*}}(%esp)
 	call i32 @llvm.atomic.swap.i32.p0i32( i32* %val2, i32 1976 )		; <i32>:15 [#uses=1]
 	store i32 %15, i32* %old
 	%neg1 = sub i32 0, 10		; <i32> [#uses=1]
+        ; CHECK: lock
+        ; CHECK: cmpxchgl
 	call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %val2, i32 %neg1, i32 1 )		; <i32>:16 [#uses=1]
 	store i32 %16, i32* %old
+        ; CHECK: lock
+        ; CHECK: cmpxchgl
 	call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %val2, i32 1976, i32 1 )		; <i32>:17 [#uses=1]
 	store i32 %17, i32* %old
 	ret void
 }
 
+define void @test2(i32 addrspace(256)* nocapture %P) nounwind {
+entry:
+; CHECK: lock
+; CEHCK:	cmpxchgl	%{{.*}}, %gs:(%{{.*}})
+
+  %0 = tail call i32 @llvm.atomic.cmp.swap.i32.p256i32(i32 addrspace(256)* %P, i32 0, i32 1)
+  ret void
+}
+
+declare i32 @llvm.atomic.cmp.swap.i32.p256i32(i32 addrspace(256)* nocapture, i32, i32) nounwind
+
 declare i32 @llvm.atomic.load.add.i32.p0i32(i32*, i32) nounwind 
 
 declare i32 @llvm.atomic.load.sub.i32.p0i32(i32*, i32) nounwind 

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/byval.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/byval.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/byval.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/byval.ll Tue Oct 26 19:48:03 2010
@@ -1,7 +1,10 @@
-; RUN: llc < %s -march=x86-64 | grep {movq	8(%rsp), %rax}
-; RUN: llc < %s -march=x86 > %t
-; RUN: grep {movl	8(%esp), %edx} %t
-; RUN: grep {movl	4(%esp), %eax} %t
+; RUN: llc < %s -march=x86-64 | FileCheck -check-prefix=X86-64 %s
+; RUN: llc < %s -march=x86 | FileCheck -check-prefix=X86 %s
+
+; X86: movl	4(%esp), %eax
+; X86: movl	8(%esp), %edx
+
+; X86-64: movq	8(%rsp), %rax
 
 %struct.s = type { i64, i64, i64 }
 

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/call-imm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/call-imm.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/call-imm.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/call-imm.ll Tue Oct 26 19:48:03 2010
@@ -5,7 +5,7 @@
 ; Call to immediate is not safe on x86-64 unless we *know* that the
 ; call will be within 32-bits pcrel from the dest immediate.
 
-; RUN: llc < %s -march=x86-64 | grep {call.*\*%rax}
+; RUN: llc < %s -march=x86-64 | grep {call.*\\*%rax}
 
 ; PR3666
 ; PR3773

Removed: llvm/branches/wendling/eh/test/CodeGen/X86/cmp-test.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/cmp-test.ll?rev=117424&view=auto
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/cmp-test.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/cmp-test.ll (removed)
@@ -1,27 +0,0 @@
-; RUN: llc < %s -march=x86 | grep cmp | count 1
-; RUN: llc < %s -march=x86 | grep test | count 1
-
-define i32 @f1(i32 %X, i32* %y) {
-	%tmp = load i32* %y		; <i32> [#uses=1]
-	%tmp.upgrd.1 = icmp eq i32 %tmp, 0		; <i1> [#uses=1]
-	br i1 %tmp.upgrd.1, label %ReturnBlock, label %cond_true
-
-cond_true:		; preds = %0
-	ret i32 1
-
-ReturnBlock:		; preds = %0
-	ret i32 0
-}
-
-define i32 @f2(i32 %X, i32* %y) {
-	%tmp = load i32* %y		; <i32> [#uses=1]
-	%tmp1 = shl i32 %tmp, 3		; <i32> [#uses=1]
-	%tmp1.upgrd.2 = icmp eq i32 %tmp1, 0		; <i1> [#uses=1]
-	br i1 %tmp1.upgrd.2, label %ReturnBlock, label %cond_true
-
-cond_true:		; preds = %0
-	ret i32 1
-
-ReturnBlock:		; preds = %0
-	ret i32 0
-}

Removed: llvm/branches/wendling/eh/test/CodeGen/X86/cmp0.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/cmp0.ll?rev=117424&view=auto
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/cmp0.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/cmp0.ll (removed)
@@ -1,24 +0,0 @@
-; RUN: llc < %s -march=x86-64 | FileCheck %s
-
-define i64 @test0(i64 %x) nounwind {
-  %t = icmp eq i64 %x, 0
-  %r = zext i1 %t to i64
-  ret i64 %r
-; CHECK: test0:
-; CHECK: 	testq	%rdi, %rdi
-; CHECK: 	sete	%al
-; CHECK: 	movzbl	%al, %eax
-; CHECK: 	ret
-}
-
-define i64 @test1(i64 %x) nounwind {
-  %t = icmp slt i64 %x, 1
-  %r = zext i1 %t to i64
-  ret i64 %r
-; CHECK: test1:
-; CHECK: 	testq	%rdi, %rdi
-; CHECK: 	setle	%al
-; CHECK: 	movzbl	%al, %eax
-; CHECK: 	ret
-}
-

Removed: llvm/branches/wendling/eh/test/CodeGen/X86/cmp2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/cmp2.ll?rev=117424&view=auto
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/cmp2.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/cmp2.ll (removed)
@@ -1,18 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep ucomisd | grep CPI | count 2
-
-define i32 @test(double %A) nounwind  {
- entry:
- %tmp2 = fcmp ogt double %A, 1.500000e+02; <i1> [#uses=1]
- %tmp5 = fcmp ult double %A, 7.500000e+01; <i1> [#uses=1]
- %bothcond = or i1 %tmp2, %tmp5; <i1> [#uses=1]
- br i1 %bothcond, label %bb8, label %bb12
-
- bb8:; preds = %entry
- %tmp9 = tail call i32 (...)* @foo( ) nounwind ; <i32> [#uses=1]
- ret i32 %tmp9
-
- bb12:; preds = %entry
- ret i32 32
-}
-
-declare i32 @foo(...)

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/compare-inf.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/compare-inf.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/compare-inf.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/compare-inf.ll Tue Oct 26 19:48:03 2010
@@ -5,7 +5,7 @@
 
 ; CHECK: oeq_inff:
 ; CHECK: ucomiss
-; CHECK: jae
+; CHECK: jb
 define float @oeq_inff(float %x, float %y) nounwind readonly {
   %t0 = fcmp oeq float %x, 0x7FF0000000000000
   %t1 = select i1 %t0, float 1.0, float %y
@@ -14,7 +14,7 @@
 
 ; CHECK: oeq_inf:
 ; CHECK: ucomisd
-; CHECK: jae
+; CHECK: jb
 define double @oeq_inf(double %x, double %y) nounwind readonly {
   %t0 = fcmp oeq double %x, 0x7FF0000000000000
   %t1 = select i1 %t0, double 1.0, double %y
@@ -23,7 +23,7 @@
 
 ; CHECK: une_inff:
 ; CHECK: ucomiss
-; CHECK: jb
+; CHECK: jae
 define float @une_inff(float %x, float %y) nounwind readonly {
   %t0 = fcmp une float %x, 0x7FF0000000000000
   %t1 = select i1 %t0, float 1.0, float %y
@@ -32,7 +32,7 @@
 
 ; CHECK: une_inf:
 ; CHECK: ucomisd
-; CHECK: jb
+; CHECK: jae
 define double @une_inf(double %x, double %y) nounwind readonly {
   %t0 = fcmp une double %x, 0x7FF0000000000000
   %t1 = select i1 %t0, double 1.0, double %y
@@ -41,7 +41,7 @@
 
 ; CHECK: oeq_neg_inff:
 ; CHECK: ucomiss
-; CHECK: jae
+; CHECK: jb
 define float @oeq_neg_inff(float %x, float %y) nounwind readonly {
   %t0 = fcmp oeq float %x, 0xFFF0000000000000
   %t1 = select i1 %t0, float 1.0, float %y
@@ -50,7 +50,7 @@
 
 ; CHECK: oeq_neg_inf:
 ; CHECK: ucomisd
-; CHECK: jae
+; CHECK: jb
 define double @oeq_neg_inf(double %x, double %y) nounwind readonly {
   %t0 = fcmp oeq double %x, 0xFFF0000000000000
   %t1 = select i1 %t0, double 1.0, double %y
@@ -59,7 +59,7 @@
 
 ; CHECK: une_neg_inff:
 ; CHECK: ucomiss
-; CHECK: jb
+; CHECK: jae
 define float @une_neg_inff(float %x, float %y) nounwind readonly {
   %t0 = fcmp une float %x, 0xFFF0000000000000
   %t1 = select i1 %t0, float 1.0, float %y
@@ -68,7 +68,7 @@
 
 ; CHECK: une_neg_inf:
 ; CHECK: ucomisd
-; CHECK: jb
+; CHECK: jae
 define double @une_neg_inf(double %x, double %y) nounwind readonly {
   %t0 = fcmp une double %x, 0xFFF0000000000000
   %t1 = select i1 %t0, double 1.0, double %y

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/constant-pool-remat-0.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/constant-pool-remat-0.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/constant-pool-remat-0.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/constant-pool-remat-0.ll Tue Oct 26 19:48:03 2010
@@ -1,7 +1,7 @@
 ; RUN: llc < %s -march=x86-64 | grep LCPI | count 3
-; RUN: llc < %s -march=x86-64 -stats  -info-output-file - | grep asm-printer | grep 6
+; RUN: llc < %s -march=x86-64 -o /dev/null -stats  -info-output-file - | grep asm-printer | grep 6
 ; RUN: llc < %s -march=x86 -mattr=+sse2 | grep LCPI | count 3
-; RUN: llc < %s -march=x86 -mattr=+sse2 -stats  -info-output-file - | grep asm-printer | grep 12
+; RUN: llc < %s -march=x86 -mattr=+sse2 -o /dev/null -stats  -info-output-file - | grep asm-printer | grep 12
 
 declare float @qux(float %y)
 

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/critical-edge-split.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/critical-edge-split.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/critical-edge-split.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/critical-edge-split.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin -stats -info-output-file - | grep asm-printer | grep 29
+; RUN: llc < %s -mtriple=i386-apple-darwin -o /dev/null -stats -info-output-file - | grep asm-printer | grep 29
 
 	%CC = type { %Register }
 	%II = type { %"struct.XX::II::$_74" }

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/dll-linkage.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/dll-linkage.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/dll-linkage.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/dll-linkage.ll Tue Oct 26 19:48:03 2010
@@ -3,7 +3,7 @@
 declare dllimport void @foo()
 
 define void @bar() nounwind {
-; CHECK: call	*__imp__foo
+; CHECK: calll	*__imp__foo
   call void @foo()
   ret void
 }

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/dllexport.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/dllexport.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/dllexport.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/dllexport.ll Tue Oct 26 19:48:03 2010
@@ -1,7 +1,7 @@
 ; RUN: llc < %s | FileCheck %s
 ; PR2936
 
-target triple = "i386-mingw32"
+target triple = "i386-pc-mingw32"
 
 define dllexport x86_fastcallcc i32 @foo() nounwind  {
 entry:

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/dollar-name.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/dollar-name.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/dollar-name.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/dollar-name.ll Tue Oct 26 19:48:03 2010
@@ -7,7 +7,7 @@
 define i32 @"$foo"() nounwind {
 ; CHECK: movl	($bar),
 ; CHECK: addl	($qux),
-; CHECK: call	($hen)
+; CHECK: calll	($hen)
   %m = load i32* @"$bar"
   %n = load i32* @"$qux"
   %t = add i32 %m, %n

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/dyn-stackalloc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/dyn-stackalloc.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/dyn-stackalloc.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/dyn-stackalloc.ll Tue Oct 26 19:48:03 2010
@@ -1,6 +1,6 @@
-; RUN: llc < %s -march=x86 | not egrep {\\\$4294967289|-7}
-; RUN: llc < %s -march=x86 | egrep {\\\$4294967280|-16}
-; RUN: llc < %s -march=x86-64 | grep {\\-16}
+; RUN: llc < %s -mtriple=i686-linux | not egrep {\\\$4294967289|-7}
+; RUN: llc < %s -mtriple=i686-linux | egrep {\\\$4294967280|-16}
+; RUN: llc < %s -mtriple=x86_64-linux | grep {\\-16}
 
 define void @t() nounwind {
 A:

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/fast-isel-bc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/fast-isel-bc.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/fast-isel-bc.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/fast-isel-bc.ll Tue Oct 26 19:48:03 2010
@@ -1,19 +1,23 @@
-; RUN: llc < %s -O0 -regalloc=linearscan -march=x86-64 -mattr=+mmx | FileCheck %s
+; RUN: llc < %s -O0 -regalloc=linearscan -march=x86-64 -mattr=+mmx,+sse2 | FileCheck %s
 ; PR4684
 
 target datalayout =
 "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
 target triple = "x86_64-apple-darwin9.8"
 
-declare void @func2(<1 x i64>)
+declare void @func2(x86_mmx)
 
 define void @func1() nounwind {
 
 ; This isn't spectacular, but it's MMX code at -O0...
-; CHECK: movl $2, %eax
-; CHECK: movd %rax, %mm0
-; CHECK: movd %mm0, %rdi
+; CHECK:  movq2dq %mm0, %xmm0
+; For now, handling of x86_mmx parameters in fast Isel is unimplemented,
+; so we get pretty poor code.  The below is preferable.
+; CHEK: movl $2, %eax
+; CHEK: movd %rax, %mm0
+; CHEK: movd %mm0, %rdi
 
-        call void @func2(<1 x i64> <i64 2>)
+        %tmp0 = bitcast <2 x i32><i32 0, i32 2> to x86_mmx
+        call void @func2(x86_mmx %tmp0)
         ret void
 }

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/fast-isel-mem.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/fast-isel-mem.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/fast-isel-mem.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/fast-isel-mem.ll Tue Oct 26 19:48:03 2010
@@ -1,10 +1,8 @@
-; RUN: llc < %s -fast-isel -mtriple=i386-apple-darwin | \
-; RUN:   grep lazy_ptr, | count 2
-; RUN: llc < %s -fast-isel -march=x86 -relocation-model=static | \
-; RUN:   grep lea
+; RUN: llc < %s -fast-isel -mtriple=i386-apple-darwin | FileCheck %s
 
 @src = external global i32
 
+; rdar://6653118
 define i32 @loadgv() nounwind {
 entry:
 	%0 = load i32* @src, align 4
@@ -12,6 +10,14 @@
         %2 = add i32 %0, %1
         store i32 %2, i32* @src
 	ret i32 %2
+; This should fold one of the loads into the add.
+; CHECK: loadgv:
+; CHECK: 	movl	L_src$non_lazy_ptr, %ecx
+; CHECK: 	movl	(%ecx), %eax
+; CHECK: 	addl	(%ecx), %eax
+; CHECK: 	movl	%eax, (%ecx)
+; CHECK: 	ret
+
 }
 
 %stuff = type { i32 (...)** }
@@ -21,4 +27,8 @@
 entry:
 	store i32 (...)** getelementptr ([4 x i32 (...)*]* @LotsStuff, i32 0, i32 2), i32 (...)*** null, align 4
 	ret void
+; CHECK: _t:
+; CHECK:	movl	$0, %eax
+; CHECK:	movl	L_LotsStuff$non_lazy_ptr, %ecx
+
 }

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/ghc-cc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/ghc-cc.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/ghc-cc.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/ghc-cc.ll Tue Oct 26 19:48:03 2010
@@ -11,9 +11,9 @@
 entry:
   ; CHECK: movl {{[0-9]*}}(%esp), %ebx
   ; CHECK-NEXT: movl {{[0-9]*}}(%esp), %ebp
-  ; CHECK-NEXT: call addtwo
+  ; CHECK-NEXT: calll addtwo
   %0 = call cc 10 i32 @addtwo(i32 %a, i32 %b)
-  ; CHECK: call foo
+  ; CHECK: calll foo
   call void @foo() nounwind
   ret void
 }

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/global-sections.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/global-sections.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/global-sections.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/global-sections.ll Tue Oct 26 19:48:03 2010
@@ -20,7 +20,7 @@
 
 ; TODO: linux drops this into .rodata, we drop it into ".gnu.linkonce.r.G2"
 
-; DARWIN: .section __TEXT,__const_coal,coalesced,pure_instructions
+; DARWIN: .section __TEXT,__const_coal,coalesced
 ; DARWIN: _G2:
 ; DARWIN:    .long 42
 
@@ -85,7 +85,7 @@
 ; LINUX:   .byte	1
 ; LINUX:   .size	G6, 1
 
-; DARWIN:  .section __TEXT,__const_coal,coalesced,pure_instructions
+; DARWIN:  .section __TEXT,__const_coal,coalesced
 ; DARWIN:  .globl _G6
 ; DARWIN:  .weak_definition _G6
 ; DARWIN:_G6:

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/legalizedag_vec.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/legalizedag_vec.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/legalizedag_vec.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/legalizedag_vec.ll Tue Oct 26 19:48:03 2010
@@ -1,5 +1,4 @@
-; RUN: llc < %s -march=x86 -mattr=sse2 -disable-mmx -o %t
-; RUN: grep {call.*divdi3}  %t | count 2
+; RUN: llc < %s -march=x86 -mattr=sse2 -disable-mmx | FileCheck %s
 
 
 ; Test case for r63760 where we generate a legalization assert that an illegal
@@ -12,4 +11,7 @@
 define <2 x i64> @test_long_div(<2 x i64> %num, <2 x i64> %div) {
   %div.r = sdiv <2 x i64> %num, %div
   ret <2 x i64>  %div.r
-}                                     
+}
+
+; CHECK: call{{.*(divdi3|alldiv)}}
+; CHECK: call{{.*(divdi3|alldiv)}}

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/licm-nested.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/licm-nested.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/licm-nested.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/licm-nested.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=x86_64-apple-darwin -march=x86-64 < %s -stats -info-output-file - | grep machine-licm | grep 2
+; RUN: llc -mtriple=x86_64-apple-darwin -march=x86-64 < %s -o /dev/null -stats -info-output-file - | grep machine-licm | grep 3
 
 ; MachineLICM should be able to hoist the symbolic addresses out of
 ; the inner loops.

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/loop-strength-reduce4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/loop-strength-reduce4.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/loop-strength-reduce4.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/loop-strength-reduce4.ll Tue Oct 26 19:48:03 2010
@@ -1,5 +1,5 @@
-; RUN: llc < %s -march=x86 -relocation-model=static -mtriple=i686-apple-darwin | FileCheck %s -check-prefix=STATIC
-; RUN: llc < %s -march=x86 -relocation-model=pic | FileCheck %s -check-prefix=PIC
+; RUN: llc < %s -mtriple=i686-apple-darwin -relocation-model=static | FileCheck %s -check-prefix=STATIC
+; RUN: llc < %s -mtriple=i686-apple-darwin -relocation-model=pic | FileCheck %s -check-prefix=PIC
 
 ; By starting the IV at -64 instead of 0, a cmp is eliminated,
 ; as the flags from the add can be used directly.

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/lsr-reuse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/lsr-reuse.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/lsr-reuse.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/lsr-reuse.ll Tue Oct 26 19:48:03 2010
@@ -353,11 +353,11 @@
 
 ; CHECK: count_me_3:
 ; CHECK: call
-; CHECK: movsd   (%r15,%r13,8), %xmm0
-; CHECK: mulsd   (%r14,%r13,8), %xmm0
-; CHECK: movsd   %xmm0, (%r12,%r13,8)
-; CHECK: incq    %r13
-; CHECK: cmpq    %r13, %rbx
+; CHECK: movsd   (%r{{[^,]*}},%r{{[^,]*}},8), %xmm0
+; CHECK: mulsd   (%r{{[^,]*}},%r{{[^,]*}},8), %xmm0
+; CHECK: movsd   %xmm0, (%r{{[^,]*}},%r{{[^,]*}},8)
+; CHECK: incq    %r{{.*}}
+; CHECK: cmpq    %r{{.*}}, %r{{.*}}
 ; CHECK: jne
 
 declare void @use(i64)
@@ -389,7 +389,7 @@
 ; rdar://7657764
 
 ; CHECK: asd:
-; CHECK: BB9_5:
+; CHECK: BB9_4:
 ; CHECK-NEXT: addl  (%r{{[^,]*}},%rdi,4), %e
 ; CHECK-NEXT: incq  %rdi
 ; CHECK-NEXT: cmpq  %rdi, %r{{[^,]*}}
@@ -464,7 +464,7 @@
 
 ; And the one at %bb68, where we want to be sure to use superhero mode:
 
-; CHECK:      BB10_10:
+; CHECK:      BB10_7:
 ; CHECK-NEXT:   movaps  48(%r{{[^,]*}}), %xmm{{.*}}
 ; CHECK-NEXT:   mulps   %xmm{{.*}}, %xmm{{.*}}
 ; CHECK-NEXT:   movaps  32(%r{{[^,]*}}), %xmm{{.*}}
@@ -484,7 +484,6 @@
 ; CHECK-NEXT:   addq    $64, %r{{.*}}
 ; CHECK-NEXT:   addq    $64, %r{{.*}}
 ; CHECK-NEXT:   addq    $-16, %r{{.*}}
-; CHECK-NEXT: BB10_11:
 ; CHECK-NEXT:   cmpq    $15, %r{{.*}}
 ; CHECK-NEXT:   jg
 

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/lsr-wrap.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/lsr-wrap.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/lsr-wrap.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/lsr-wrap.ll Tue Oct 26 19:48:03 2010
@@ -3,7 +3,7 @@
 ; LSR would like to use a single IV for both of these, however it's
 ; not safe due to wraparound.
 
-; CHECK: addb  $-4, %r
+; CHECK: addb  $-4, %
 ; CHECK: decw  %
 
 @g_19 = common global i32 0                       ; <i32*> [#uses=2]

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/memcmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/memcmp.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/memcmp.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/memcmp.ll Tue Oct 26 19:48:03 2010
@@ -20,8 +20,8 @@
 return:                                           ; preds = %entry
   ret void
 ; CHECK: memcmp2:
-; CHECK: movw    (%rsi), %ax
-; CHECK: cmpw    %ax, (%rdi)
+; CHECK: movw    (%rdi), %ax
+; CHECK: cmpw    (%rsi), %ax
 }
 
 define void @memcmp2a(i8* %X, i32* nocapture %P) nounwind {
@@ -54,8 +54,8 @@
 return:                                           ; preds = %entry
   ret void
 ; CHECK: memcmp4:
-; CHECK: movl    (%rsi), %eax
-; CHECK: cmpl    %eax, (%rdi)
+; CHECK: movl    (%rdi), %eax
+; CHECK: cmpl    (%rsi), %eax
 }
 
 define void @memcmp4a(i8* %X, i32* nocapture %P) nounwind {
@@ -87,8 +87,8 @@
 return:                                           ; preds = %entry
   ret void
 ; CHECK: memcmp8:
-; CHECK: movq    (%rsi), %rax
-; CHECK: cmpq    %rax, (%rdi)
+; CHECK: movq    (%rdi), %rax
+; CHECK: cmpq    (%rsi), %rax
 }
 
 define void @memcmp8a(i8* %X, i32* nocapture %P) nounwind {

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/memmove-0.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/memmove-0.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/memmove-0.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/memmove-0.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 -mtriple=i686-pc-linux-gnu | grep {call	memcpy}
+; RUN: llc < %s -march=x86 -mtriple=i686-pc-linux-gnu | grep {calll	memcpy}
 
 declare void @llvm.memmove.i64(i8* %d, i8* %s, i64 %l, i32 %a)
 

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/memmove-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/memmove-1.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/memmove-1.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/memmove-1.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 -mtriple=i686-pc-linux-gnu | grep {call	memmove}
+; RUN: llc < %s -march=x86 -mtriple=i686-pc-linux-gnu | grep {calll	memmove}
 
 declare void @llvm.memmove.i64(i8* %d, i8* %s, i64 %l, i32 %a)
 

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/memmove-3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/memmove-3.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/memmove-3.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/memmove-3.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 -mtriple=i686-pc-linux-gnu | grep {call	memmove}
+; RUN: llc < %s -march=x86 -mtriple=i686-pc-linux-gnu | grep {calll	memmove}
 
 declare void @llvm.memmove.i64(i8* %d, i8* %s, i64 %l, i32 %a)
 

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/memset-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/memset-2.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/memset-2.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/memset-2.ll Tue Oct 26 19:48:03 2010
@@ -5,7 +5,7 @@
 define fastcc void @t1() nounwind {
 entry:
 ; CHECK: t1:
-; CHECK: call _memset
+; CHECK: calll _memset
   call void @llvm.memset.i32( i8* null, i8 0, i32 188, i32 1 ) nounwind
   unreachable
 }
@@ -13,7 +13,7 @@
 define fastcc void @t2(i8 signext %c) nounwind {
 entry:
 ; CHECK: t2:
-; CHECK: call _memset
+; CHECK: calll _memset
   call void @llvm.memset.i32( i8* undef, i8 %c, i32 76, i32 1 ) nounwind
   unreachable
 }

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/mingw-alloca.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/mingw-alloca.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/mingw-alloca.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/mingw-alloca.ll Tue Oct 26 19:48:03 2010
@@ -6,7 +6,7 @@
 define void @foo1(i32 %N) nounwind {
 entry:
 ; CHECK: _foo1:
-; CHECK: call __alloca
+; CHECK: calll __alloca
 	%tmp14 = alloca i32, i32 %N		; <i32*> [#uses=1]
 	call void @bar1( i32* %tmp14 )
 	ret void
@@ -19,7 +19,7 @@
 ; CHECK: _foo2:
 ; CHECK: andl $-16, %esp
 ; CHECK: pushl %eax
-; CHECK: call __alloca
+; CHECK: calll __alloca
 ; CHECK: movl	8028(%esp), %eax
 	%A2 = alloca [2000 x i32], align 16		; <[2000 x i32]*> [#uses=1]
 	%A2.sub = getelementptr [2000 x i32]* %A2, i32 0, i32 0		; <i32*> [#uses=1]

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/mmx-arg-passing.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/mmx-arg-passing.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/mmx-arg-passing.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/mmx-arg-passing.ll Tue Oct 26 19:48:03 2010
@@ -1,24 +1,27 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+mmx | grep mm0 | count 3
-; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+mmx | grep esp | count 1
+; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+mmx | grep mm0 | count 1
+; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+mmx | grep esp | count 2
 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx,+sse2 | grep xmm0
 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx,+sse2 | grep rdi
 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx,+sse2 | not grep movups
 ;
 ; On Darwin x86-32, v8i8, v4i16, v2i32 values are passed in MM[0-2].
-; On Darwin x86-32, v1i64 values are passed in memory.
+; On Darwin x86-32, v1i64 values are passed in memory.  In this example, they
+;                   are never moved into an MM register at all.
 ; On Darwin x86-64, v8i8, v4i16, v2i32 values are passed in XMM[0-7].
 ; On Darwin x86-64, v1i64 values are passed in 64-bit GPRs.
 
- at u1 = external global <8 x i8>
+ at u1 = external global x86_mmx
 
-define void @t1(<8 x i8> %v1) nounwind  {
-	store <8 x i8> %v1, <8 x i8>* @u1, align 8
+define void @t1(x86_mmx %v1) nounwind  {
+	store x86_mmx %v1, x86_mmx* @u1, align 8
 	ret void
 }
 
- at u2 = external global <1 x i64>
+ at u2 = external global x86_mmx
 
 define void @t2(<1 x i64> %v1) nounwind  {
-	store <1 x i64> %v1, <1 x i64>* @u2, align 8
+        %tmp = bitcast <1 x i64> %v1 to x86_mmx
+	store x86_mmx %tmp, x86_mmx* @u2, align 8
 	ret void
 }
+

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/mmx-arg-passing2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/mmx-arg-passing2.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/mmx-arg-passing2.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/mmx-arg-passing2.ll Tue Oct 26 19:48:03 2010
@@ -1,17 +1,21 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx,+sse2 | grep movq2dq | count 1
 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx,+sse2 | grep movdq2q | count 2
+; Since the add is not an MMX add, we don't have a movq2dq any more.
 
 @g_v8qi = external global <8 x i8>
 
 define void @t1() nounwind  {
 	%tmp3 = load <8 x i8>* @g_v8qi, align 8
-	%tmp4 = tail call i32 (...)* @pass_v8qi( <8 x i8> %tmp3 ) nounwind
+        %tmp3a = bitcast <8 x i8> %tmp3 to x86_mmx
+	%tmp4 = tail call i32 (...)* @pass_v8qi( x86_mmx %tmp3a ) nounwind
 	ret void
 }
 
-define void @t2(<8 x i8> %v1, <8 x i8> %v2) nounwind  {
-       %tmp3 = add <8 x i8> %v1, %v2
-       %tmp4 = tail call i32 (...)* @pass_v8qi( <8 x i8> %tmp3 ) nounwind
+define void @t2(x86_mmx %v1, x86_mmx %v2) nounwind  {
+       %v1a = bitcast x86_mmx %v1 to <8 x i8>
+       %v2b = bitcast x86_mmx %v2 to <8 x i8>
+       %tmp3 = add <8 x i8> %v1a, %v2b
+       %tmp3a = bitcast <8 x i8> %tmp3 to x86_mmx
+       %tmp4 = tail call i32 (...)* @pass_v8qi( x86_mmx %tmp3a ) nounwind
        ret void
 }
 

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/mmx-arith.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/mmx-arith.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/mmx-arith.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/mmx-arith.ll Tue Oct 26 19:48:03 2010
@@ -1,131 +1,309 @@
 ; RUN: llc < %s -march=x86 -mattr=+mmx
 
 ;; A basic sanity check to make sure that MMX arithmetic actually compiles.
+;; First is a straight translation of the original with bitcasts as needed.
 
-define void @foo(<8 x i8>* %A, <8 x i8>* %B) {
+define void @foo(x86_mmx* %A, x86_mmx* %B) {
 entry:
-	%tmp1 = load <8 x i8>* %A		; <<8 x i8>> [#uses=1]
-	%tmp3 = load <8 x i8>* %B		; <<8 x i8>> [#uses=1]
-	%tmp4 = add <8 x i8> %tmp1, %tmp3		; <<8 x i8>> [#uses=2]
-	store <8 x i8> %tmp4, <8 x i8>* %A
-	%tmp7 = load <8 x i8>* %B		; <<8 x i8>> [#uses=1]
-	%tmp12 = tail call <8 x i8> @llvm.x86.mmx.padds.b( <8 x i8> %tmp4, <8 x i8> %tmp7 )		; <<8 x i8>> [#uses=2]
-	store <8 x i8> %tmp12, <8 x i8>* %A
-	%tmp16 = load <8 x i8>* %B		; <<8 x i8>> [#uses=1]
-	%tmp21 = tail call <8 x i8> @llvm.x86.mmx.paddus.b( <8 x i8> %tmp12, <8 x i8> %tmp16 )		; <<8 x i8>> [#uses=2]
-	store <8 x i8> %tmp21, <8 x i8>* %A
-	%tmp27 = load <8 x i8>* %B		; <<8 x i8>> [#uses=1]
-	%tmp28 = sub <8 x i8> %tmp21, %tmp27		; <<8 x i8>> [#uses=2]
-	store <8 x i8> %tmp28, <8 x i8>* %A
-	%tmp31 = load <8 x i8>* %B		; <<8 x i8>> [#uses=1]
-	%tmp36 = tail call <8 x i8> @llvm.x86.mmx.psubs.b( <8 x i8> %tmp28, <8 x i8> %tmp31 )		; <<8 x i8>> [#uses=2]
-	store <8 x i8> %tmp36, <8 x i8>* %A
-	%tmp40 = load <8 x i8>* %B		; <<8 x i8>> [#uses=1]
-	%tmp45 = tail call <8 x i8> @llvm.x86.mmx.psubus.b( <8 x i8> %tmp36, <8 x i8> %tmp40 )		; <<8 x i8>> [#uses=2]
-	store <8 x i8> %tmp45, <8 x i8>* %A
-	%tmp51 = load <8 x i8>* %B		; <<8 x i8>> [#uses=1]
-	%tmp52 = mul <8 x i8> %tmp45, %tmp51		; <<8 x i8>> [#uses=2]
-	store <8 x i8> %tmp52, <8 x i8>* %A
-	%tmp57 = load <8 x i8>* %B		; <<8 x i8>> [#uses=1]
-	%tmp58 = and <8 x i8> %tmp52, %tmp57		; <<8 x i8>> [#uses=2]
-	store <8 x i8> %tmp58, <8 x i8>* %A
-	%tmp63 = load <8 x i8>* %B		; <<8 x i8>> [#uses=1]
-	%tmp64 = or <8 x i8> %tmp58, %tmp63		; <<8 x i8>> [#uses=2]
-	store <8 x i8> %tmp64, <8 x i8>* %A
-	%tmp69 = load <8 x i8>* %B		; <<8 x i8>> [#uses=1]
-	%tmp70 = xor <8 x i8> %tmp64, %tmp69		; <<8 x i8>> [#uses=1]
-	store <8 x i8> %tmp70, <8 x i8>* %A
-	tail call void @llvm.x86.mmx.emms( )
-	ret void
-}
-
-define void @baz(<2 x i32>* %A, <2 x i32>* %B) {
-entry:
-	%tmp1 = load <2 x i32>* %A		; <<2 x i32>> [#uses=1]
-	%tmp3 = load <2 x i32>* %B		; <<2 x i32>> [#uses=1]
-	%tmp4 = add <2 x i32> %tmp1, %tmp3		; <<2 x i32>> [#uses=2]
-	store <2 x i32> %tmp4, <2 x i32>* %A
-	%tmp9 = load <2 x i32>* %B		; <<2 x i32>> [#uses=1]
-	%tmp10 = sub <2 x i32> %tmp4, %tmp9		; <<2 x i32>> [#uses=2]
-	store <2 x i32> %tmp10, <2 x i32>* %A
-	%tmp15 = load <2 x i32>* %B		; <<2 x i32>> [#uses=1]
-	%tmp16 = mul <2 x i32> %tmp10, %tmp15		; <<2 x i32>> [#uses=2]
-	store <2 x i32> %tmp16, <2 x i32>* %A
-	%tmp21 = load <2 x i32>* %B		; <<2 x i32>> [#uses=1]
-	%tmp22 = and <2 x i32> %tmp16, %tmp21		; <<2 x i32>> [#uses=2]
-	store <2 x i32> %tmp22, <2 x i32>* %A
-	%tmp27 = load <2 x i32>* %B		; <<2 x i32>> [#uses=1]
-	%tmp28 = or <2 x i32> %tmp22, %tmp27		; <<2 x i32>> [#uses=2]
-	store <2 x i32> %tmp28, <2 x i32>* %A
-	%tmp33 = load <2 x i32>* %B		; <<2 x i32>> [#uses=1]
-	%tmp34 = xor <2 x i32> %tmp28, %tmp33		; <<2 x i32>> [#uses=1]
-	store <2 x i32> %tmp34, <2 x i32>* %A
-	tail call void @llvm.x86.mmx.emms( )
-	ret void
-}
-
-define void @bar(<4 x i16>* %A, <4 x i16>* %B) {
-entry:
-	%tmp1 = load <4 x i16>* %A		; <<4 x i16>> [#uses=1]
-	%tmp3 = load <4 x i16>* %B		; <<4 x i16>> [#uses=1]
-	%tmp4 = add <4 x i16> %tmp1, %tmp3		; <<4 x i16>> [#uses=2]
-	store <4 x i16> %tmp4, <4 x i16>* %A
-	%tmp7 = load <4 x i16>* %B		; <<4 x i16>> [#uses=1]
-	%tmp12 = tail call <4 x i16> @llvm.x86.mmx.padds.w( <4 x i16> %tmp4, <4 x i16> %tmp7 )		; <<4 x i16>> [#uses=2]
-	store <4 x i16> %tmp12, <4 x i16>* %A
-	%tmp16 = load <4 x i16>* %B		; <<4 x i16>> [#uses=1]
-	%tmp21 = tail call <4 x i16> @llvm.x86.mmx.paddus.w( <4 x i16> %tmp12, <4 x i16> %tmp16 )		; <<4 x i16>> [#uses=2]
-	store <4 x i16> %tmp21, <4 x i16>* %A
-	%tmp27 = load <4 x i16>* %B		; <<4 x i16>> [#uses=1]
-	%tmp28 = sub <4 x i16> %tmp21, %tmp27		; <<4 x i16>> [#uses=2]
-	store <4 x i16> %tmp28, <4 x i16>* %A
-	%tmp31 = load <4 x i16>* %B		; <<4 x i16>> [#uses=1]
-	%tmp36 = tail call <4 x i16> @llvm.x86.mmx.psubs.w( <4 x i16> %tmp28, <4 x i16> %tmp31 )		; <<4 x i16>> [#uses=2]
-	store <4 x i16> %tmp36, <4 x i16>* %A
-	%tmp40 = load <4 x i16>* %B		; <<4 x i16>> [#uses=1]
-	%tmp45 = tail call <4 x i16> @llvm.x86.mmx.psubus.w( <4 x i16> %tmp36, <4 x i16> %tmp40 )		; <<4 x i16>> [#uses=2]
-	store <4 x i16> %tmp45, <4 x i16>* %A
-	%tmp51 = load <4 x i16>* %B		; <<4 x i16>> [#uses=1]
-	%tmp52 = mul <4 x i16> %tmp45, %tmp51		; <<4 x i16>> [#uses=2]
-	store <4 x i16> %tmp52, <4 x i16>* %A
-	%tmp55 = load <4 x i16>* %B		; <<4 x i16>> [#uses=1]
-	%tmp60 = tail call <4 x i16> @llvm.x86.mmx.pmulh.w( <4 x i16> %tmp52, <4 x i16> %tmp55 )		; <<4 x i16>> [#uses=2]
-	store <4 x i16> %tmp60, <4 x i16>* %A
-	%tmp64 = load <4 x i16>* %B		; <<4 x i16>> [#uses=1]
-	%tmp69 = tail call <2 x i32> @llvm.x86.mmx.pmadd.wd( <4 x i16> %tmp60, <4 x i16> %tmp64 )		; <<2 x i32>> [#uses=1]
-	%tmp70 = bitcast <2 x i32> %tmp69 to <4 x i16>		; <<4 x i16>> [#uses=2]
-	store <4 x i16> %tmp70, <4 x i16>* %A
-	%tmp75 = load <4 x i16>* %B		; <<4 x i16>> [#uses=1]
-	%tmp76 = and <4 x i16> %tmp70, %tmp75		; <<4 x i16>> [#uses=2]
-	store <4 x i16> %tmp76, <4 x i16>* %A
-	%tmp81 = load <4 x i16>* %B		; <<4 x i16>> [#uses=1]
-	%tmp82 = or <4 x i16> %tmp76, %tmp81		; <<4 x i16>> [#uses=2]
-	store <4 x i16> %tmp82, <4 x i16>* %A
-	%tmp87 = load <4 x i16>* %B		; <<4 x i16>> [#uses=1]
-	%tmp88 = xor <4 x i16> %tmp82, %tmp87		; <<4 x i16>> [#uses=1]
-	store <4 x i16> %tmp88, <4 x i16>* %A
-	tail call void @llvm.x86.mmx.emms( )
-	ret void
-}
-
-declare <8 x i8> @llvm.x86.mmx.padds.b(<8 x i8>, <8 x i8>)
-
-declare <8 x i8> @llvm.x86.mmx.paddus.b(<8 x i8>, <8 x i8>)
-
-declare <8 x i8> @llvm.x86.mmx.psubs.b(<8 x i8>, <8 x i8>)
-
-declare <8 x i8> @llvm.x86.mmx.psubus.b(<8 x i8>, <8 x i8>)
-
-declare <4 x i16> @llvm.x86.mmx.padds.w(<4 x i16>, <4 x i16>)
-
-declare <4 x i16> @llvm.x86.mmx.paddus.w(<4 x i16>, <4 x i16>)
+	%tmp1 = load x86_mmx* %A		; <x86_mmx> [#uses=1]
+	%tmp3 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+        %tmp1a = bitcast x86_mmx %tmp1 to <8 x i8>
+        %tmp3a = bitcast x86_mmx %tmp3 to <8 x i8>
+	%tmp4 = add <8 x i8> %tmp1a, %tmp3a		; <<8 x i8>> [#uses=2]
+        %tmp4a = bitcast <8 x i8> %tmp4 to x86_mmx
+	store x86_mmx %tmp4a, x86_mmx* %A
+	%tmp7 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+	%tmp12 = tail call x86_mmx @llvm.x86.mmx.padds.b( x86_mmx %tmp4a, x86_mmx %tmp7 )		; <x86_mmx> [#uses=2]
+	store x86_mmx %tmp12, x86_mmx* %A
+	%tmp16 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+	%tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.b( x86_mmx %tmp12, x86_mmx %tmp16 )		; <x86_mmx> [#uses=2]
+	store x86_mmx %tmp21, x86_mmx* %A
+	%tmp27 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+        %tmp21a = bitcast x86_mmx %tmp21 to <8 x i8>
+        %tmp27a = bitcast x86_mmx %tmp27 to <8 x i8>
+	%tmp28 = sub <8 x i8> %tmp21a, %tmp27a		; <<8 x i8>> [#uses=2]
+        %tmp28a = bitcast <8 x i8> %tmp28 to x86_mmx
+	store x86_mmx %tmp28a, x86_mmx* %A
+	%tmp31 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+	%tmp36 = tail call x86_mmx @llvm.x86.mmx.psubs.b( x86_mmx %tmp28a, x86_mmx %tmp31 )		; <x86_mmx> [#uses=2]
+	store x86_mmx %tmp36, x86_mmx* %A
+	%tmp40 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+	%tmp45 = tail call x86_mmx @llvm.x86.mmx.psubus.b( x86_mmx %tmp36, x86_mmx %tmp40 )		; <x86_mmx> [#uses=2]
+	store x86_mmx %tmp45, x86_mmx* %A
+	%tmp51 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+        %tmp45a = bitcast x86_mmx %tmp45 to <8 x i8>
+        %tmp51a = bitcast x86_mmx %tmp51 to <8 x i8>
+	%tmp52 = mul <8 x i8> %tmp45a, %tmp51a		; <<8 x i8>> [#uses=2]
+        %tmp52a = bitcast <8 x i8> %tmp52 to x86_mmx
+	store x86_mmx %tmp52a, x86_mmx* %A
+	%tmp57 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+        %tmp57a = bitcast x86_mmx %tmp57 to <8 x i8>
+	%tmp58 = and <8 x i8> %tmp52, %tmp57a		; <<8 x i8>> [#uses=2]
+        %tmp58a = bitcast <8 x i8> %tmp58 to x86_mmx
+	store x86_mmx %tmp58a, x86_mmx* %A
+	%tmp63 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+        %tmp63a = bitcast x86_mmx %tmp63 to <8 x i8>
+	%tmp64 = or <8 x i8> %tmp58, %tmp63a		; <<8 x i8>> [#uses=2]
+        %tmp64a = bitcast <8 x i8> %tmp64 to x86_mmx
+	store x86_mmx %tmp64a, x86_mmx* %A
+	%tmp69 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+        %tmp69a = bitcast x86_mmx %tmp69 to <8 x i8>
+        %tmp64b = bitcast x86_mmx %tmp64a to <8 x i8>
+	%tmp70 = xor <8 x i8> %tmp64b, %tmp69a		; <<8 x i8>> [#uses=1]
+        %tmp70a = bitcast <8 x i8> %tmp70 to x86_mmx
+	store x86_mmx %tmp70a, x86_mmx* %A
+	tail call void @llvm.x86.mmx.emms( )
+	ret void
+}
 
-declare <4 x i16> @llvm.x86.mmx.psubs.w(<4 x i16>, <4 x i16>)
+define void @baz(x86_mmx* %A, x86_mmx* %B) {
+entry:
+	%tmp1 = load x86_mmx* %A		; <x86_mmx> [#uses=1]
+	%tmp3 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+        %tmp1a = bitcast x86_mmx %tmp1 to <2 x i32>
+        %tmp3a = bitcast x86_mmx %tmp3 to <2 x i32>
+	%tmp4 = add <2 x i32> %tmp1a, %tmp3a		; <<2 x i32>> [#uses=2]
+        %tmp4a = bitcast <2 x i32> %tmp4 to x86_mmx
+	store x86_mmx %tmp4a, x86_mmx* %A
+	%tmp9 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+        %tmp9a = bitcast x86_mmx %tmp9 to <2 x i32>
+	%tmp10 = sub <2 x i32> %tmp4, %tmp9a		; <<2 x i32>> [#uses=2]
+        %tmp10a = bitcast <2 x i32> %tmp4 to x86_mmx
+	store x86_mmx %tmp10a, x86_mmx* %A
+	%tmp15 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+        %tmp10b = bitcast x86_mmx %tmp10a to <2 x i32>
+        %tmp15a = bitcast x86_mmx %tmp15 to <2 x i32>
+	%tmp16 = mul <2 x i32> %tmp10b, %tmp15a		; <<2 x i32>> [#uses=2]
+        %tmp16a = bitcast <2 x i32> %tmp16 to x86_mmx
+	store x86_mmx %tmp16a, x86_mmx* %A
+	%tmp21 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+        %tmp16b = bitcast x86_mmx %tmp16a to <2 x i32>
+        %tmp21a = bitcast x86_mmx %tmp21 to <2 x i32>
+	%tmp22 = and <2 x i32> %tmp16b, %tmp21a		; <<2 x i32>> [#uses=2]
+        %tmp22a = bitcast <2 x i32> %tmp22 to x86_mmx
+	store x86_mmx %tmp22a, x86_mmx* %A
+	%tmp27 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+        %tmp22b = bitcast x86_mmx %tmp22a to <2 x i32>
+        %tmp27a = bitcast x86_mmx %tmp27 to <2 x i32>
+	%tmp28 = or <2 x i32> %tmp22b, %tmp27a		; <<2 x i32>> [#uses=2]
+        %tmp28a = bitcast <2 x i32> %tmp28 to x86_mmx
+	store x86_mmx %tmp28a, x86_mmx* %A
+	%tmp33 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+        %tmp28b = bitcast x86_mmx %tmp28a to <2 x i32>
+        %tmp33a = bitcast x86_mmx %tmp33 to <2 x i32>
+	%tmp34 = xor <2 x i32> %tmp28b, %tmp33a		; <<2 x i32>> [#uses=1]
+        %tmp34a = bitcast <2 x i32> %tmp34 to x86_mmx
+	store x86_mmx %tmp34a, x86_mmx* %A
+	tail call void @llvm.x86.mmx.emms( )
+	ret void
+}
 
-declare <4 x i16> @llvm.x86.mmx.psubus.w(<4 x i16>, <4 x i16>)
+define void @bar(x86_mmx* %A, x86_mmx* %B) {
+entry:
+	%tmp1 = load x86_mmx* %A		; <x86_mmx> [#uses=1]
+	%tmp3 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+        %tmp1a = bitcast x86_mmx %tmp1 to <4 x i16>
+        %tmp3a = bitcast x86_mmx %tmp3 to <4 x i16>
+	%tmp4 = add <4 x i16> %tmp1a, %tmp3a		; <<4 x i16>> [#uses=2]
+        %tmp4a = bitcast <4 x i16> %tmp4 to x86_mmx
+	store x86_mmx %tmp4a, x86_mmx* %A
+	%tmp7 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+	%tmp12 = tail call x86_mmx @llvm.x86.mmx.padds.w( x86_mmx %tmp4a, x86_mmx %tmp7 )		; <x86_mmx> [#uses=2]
+	store x86_mmx %tmp12, x86_mmx* %A
+	%tmp16 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+	%tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.w( x86_mmx %tmp12, x86_mmx %tmp16 )		; <x86_mmx> [#uses=2]
+	store x86_mmx %tmp21, x86_mmx* %A
+	%tmp27 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+        %tmp21a = bitcast x86_mmx %tmp21 to <4 x i16>
+        %tmp27a = bitcast x86_mmx %tmp27 to <4 x i16>
+	%tmp28 = sub <4 x i16> %tmp21a, %tmp27a		; <<4 x i16>> [#uses=2]
+        %tmp28a = bitcast <4 x i16> %tmp28 to x86_mmx
+	store x86_mmx %tmp28a, x86_mmx* %A
+	%tmp31 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+	%tmp36 = tail call x86_mmx @llvm.x86.mmx.psubs.w( x86_mmx %tmp28a, x86_mmx %tmp31 )		; <x86_mmx> [#uses=2]
+	store x86_mmx %tmp36, x86_mmx* %A
+	%tmp40 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+	%tmp45 = tail call x86_mmx @llvm.x86.mmx.psubus.w( x86_mmx %tmp36, x86_mmx %tmp40 )		; <x86_mmx> [#uses=2]
+	store x86_mmx %tmp45, x86_mmx* %A
+	%tmp51 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+        %tmp45a = bitcast x86_mmx %tmp45 to <4 x i16>
+        %tmp51a = bitcast x86_mmx %tmp51 to <4 x i16>
+	%tmp52 = mul <4 x i16> %tmp45a, %tmp51a		; <<4 x i16>> [#uses=2]
+        %tmp52a = bitcast <4 x i16> %tmp52 to x86_mmx
+	store x86_mmx %tmp52a, x86_mmx* %A
+	%tmp55 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+	%tmp60 = tail call x86_mmx @llvm.x86.mmx.pmulh.w( x86_mmx %tmp52a, x86_mmx %tmp55 )		; <x86_mmx> [#uses=2]
+	store x86_mmx %tmp60, x86_mmx* %A
+	%tmp64 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+	%tmp69 = tail call x86_mmx @llvm.x86.mmx.pmadd.wd( x86_mmx %tmp60, x86_mmx %tmp64 )		; <x86_mmx> [#uses=1]
+	%tmp70 = bitcast x86_mmx %tmp69 to x86_mmx		; <x86_mmx> [#uses=2]
+	store x86_mmx %tmp70, x86_mmx* %A
+	%tmp75 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+        %tmp70a = bitcast x86_mmx %tmp70 to <4 x i16>
+        %tmp75a = bitcast x86_mmx %tmp75 to <4 x i16>
+	%tmp76 = and <4 x i16> %tmp70a, %tmp75a		; <<4 x i16>> [#uses=2]
+        %tmp76a = bitcast <4 x i16> %tmp76 to x86_mmx
+	store x86_mmx %tmp76a, x86_mmx* %A
+	%tmp81 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+        %tmp76b = bitcast x86_mmx %tmp76a to <4 x i16>
+        %tmp81a = bitcast x86_mmx %tmp81 to <4 x i16>
+	%tmp82 = or <4 x i16> %tmp76b, %tmp81a		; <<4 x i16>> [#uses=2]
+        %tmp82a = bitcast <4 x i16> %tmp82 to x86_mmx
+	store x86_mmx %tmp82a, x86_mmx* %A
+	%tmp87 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+        %tmp82b = bitcast x86_mmx %tmp82a to <4 x i16>
+        %tmp87a = bitcast x86_mmx %tmp87 to <4 x i16>
+	%tmp88 = xor <4 x i16> %tmp82b, %tmp87a		; <<4 x i16>> [#uses=1]
+        %tmp88a = bitcast <4 x i16> %tmp88 to x86_mmx
+	store x86_mmx %tmp88a, x86_mmx* %A
+	tail call void @llvm.x86.mmx.emms( )
+	ret void
+}
 
-declare <4 x i16> @llvm.x86.mmx.pmulh.w(<4 x i16>, <4 x i16>)
+;; The following is modified to use MMX intrinsics everywhere they work.
 
-declare <2 x i32> @llvm.x86.mmx.pmadd.wd(<4 x i16>, <4 x i16>)
+define void @fooa(x86_mmx* %A, x86_mmx* %B) {
+entry:
+	%tmp1 = load x86_mmx* %A		; <x86_mmx> [#uses=1]
+	%tmp3 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+	%tmp4 = tail call x86_mmx @llvm.x86.mmx.padd.b( x86_mmx %tmp1, x86_mmx %tmp3 )		; <x86_mmx> [#uses=2]
+	store x86_mmx %tmp4, x86_mmx* %A
+	%tmp7 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+	%tmp12 = tail call x86_mmx @llvm.x86.mmx.padds.b( x86_mmx %tmp4, x86_mmx %tmp7 )		; <x86_mmx> [#uses=2]
+	store x86_mmx %tmp12, x86_mmx* %A
+	%tmp16 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+	%tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.b( x86_mmx %tmp12, x86_mmx %tmp16 )		; <x86_mmx> [#uses=2]
+	store x86_mmx %tmp21, x86_mmx* %A
+	%tmp27 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+	%tmp28 = tail call x86_mmx @llvm.x86.mmx.psub.b( x86_mmx %tmp21, x86_mmx %tmp27 )		; <x86_mmx> [#uses=2]
+	store x86_mmx %tmp28, x86_mmx* %A
+	%tmp31 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+	%tmp36 = tail call x86_mmx @llvm.x86.mmx.psubs.b( x86_mmx %tmp28, x86_mmx %tmp31 )		; <x86_mmx> [#uses=2]
+	store x86_mmx %tmp36, x86_mmx* %A
+	%tmp40 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+	%tmp45 = tail call x86_mmx @llvm.x86.mmx.psubus.b( x86_mmx %tmp36, x86_mmx %tmp40 )		; <x86_mmx> [#uses=2]
+	store x86_mmx %tmp45, x86_mmx* %A
+	%tmp51 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+        %tmp51a = bitcast x86_mmx %tmp51 to i64
+        %tmp51aa = bitcast i64 %tmp51a to <8 x i8>
+        %tmp51b = bitcast x86_mmx %tmp45 to <8 x i8>
+	%tmp52 = mul <8 x i8> %tmp51b, %tmp51aa		; <x86_mmx> [#uses=2]
+        %tmp52a = bitcast <8 x i8> %tmp52 to i64
+        %tmp52aa = bitcast i64 %tmp52a to x86_mmx
+	store x86_mmx %tmp52aa, x86_mmx* %A
+	%tmp57 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+	%tmp58 = tail call x86_mmx @llvm.x86.mmx.pand( x86_mmx %tmp51, x86_mmx %tmp57 )		; <x86_mmx> [#uses=2]
+	store x86_mmx %tmp58, x86_mmx* %A
+	%tmp63 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+	%tmp64 = tail call x86_mmx @llvm.x86.mmx.por( x86_mmx %tmp58, x86_mmx %tmp63 )		; <x86_mmx> [#uses=2]	
+	store x86_mmx %tmp64, x86_mmx* %A
+	%tmp69 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+	%tmp70 = tail call x86_mmx @llvm.x86.mmx.pxor( x86_mmx %tmp64, x86_mmx %tmp69 )		; <x86_mmx> [#uses=2]
+	store x86_mmx %tmp70, x86_mmx* %A
+	tail call void @llvm.x86.mmx.emms( )
+	ret void
+}
+
+define void @baza(x86_mmx* %A, x86_mmx* %B) {
+entry:
+	%tmp1 = load x86_mmx* %A		; <x86_mmx> [#uses=1]
+	%tmp3 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+	%tmp4 = tail call x86_mmx @llvm.x86.mmx.padd.d( x86_mmx %tmp1, x86_mmx %tmp3 )		; <x86_mmx> [#uses=2]
+	store x86_mmx %tmp4, x86_mmx* %A
+	%tmp9 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+	%tmp10 = tail call x86_mmx @llvm.x86.mmx.psub.d( x86_mmx %tmp4, x86_mmx %tmp9 )		; <x86_mmx> [#uses=2]
+	store x86_mmx %tmp10, x86_mmx* %A
+	%tmp15 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+        %tmp10a = bitcast x86_mmx %tmp10 to <2 x i32>
+        %tmp15a = bitcast x86_mmx %tmp15 to <2 x i32>
+	%tmp16 = mul <2 x i32> %tmp10a, %tmp15a		; <x86_mmx> [#uses=2]
+        %tmp16a = bitcast <2 x i32> %tmp16 to x86_mmx
+	store x86_mmx %tmp16a, x86_mmx* %A
+	%tmp21 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+	%tmp22 = tail call x86_mmx @llvm.x86.mmx.pand( x86_mmx %tmp16a, x86_mmx %tmp21 )		; <x86_mmx> [#uses=2]
+	store x86_mmx %tmp22, x86_mmx* %A
+	%tmp27 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+	%tmp28 = tail call x86_mmx @llvm.x86.mmx.por( x86_mmx %tmp22, x86_mmx %tmp27 )		; <x86_mmx> [#uses=2]
+	store x86_mmx %tmp28, x86_mmx* %A
+	%tmp33 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+	%tmp34 = tail call x86_mmx @llvm.x86.mmx.pxor( x86_mmx %tmp28, x86_mmx %tmp33 )		; <x86_mmx> [#uses=2]
+	store x86_mmx %tmp34, x86_mmx* %A
+	tail call void @llvm.x86.mmx.emms( )
+	ret void
+}
+
+define void @bara(x86_mmx* %A, x86_mmx* %B) {
+entry:
+	%tmp1 = load x86_mmx* %A		; <x86_mmx> [#uses=1]
+	%tmp3 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+	%tmp4 = tail call x86_mmx @llvm.x86.mmx.padd.w( x86_mmx %tmp1, x86_mmx %tmp3 )		; <x86_mmx> [#uses=2]
+	store x86_mmx %tmp4, x86_mmx* %A
+	%tmp7 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+	%tmp12 = tail call x86_mmx @llvm.x86.mmx.padds.w( x86_mmx %tmp4, x86_mmx %tmp7 )		; <x86_mmx> [#uses=2]
+	store x86_mmx %tmp12, x86_mmx* %A
+	%tmp16 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+	%tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.w( x86_mmx %tmp12, x86_mmx %tmp16 )		; <x86_mmx> [#uses=2]
+	store x86_mmx %tmp21, x86_mmx* %A
+	%tmp27 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+	%tmp28 = tail call x86_mmx @llvm.x86.mmx.psub.w( x86_mmx %tmp21, x86_mmx %tmp27 )		; <x86_mmx> [#uses=2]
+	store x86_mmx %tmp28, x86_mmx* %A
+	%tmp31 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+	%tmp36 = tail call x86_mmx @llvm.x86.mmx.psubs.w( x86_mmx %tmp28, x86_mmx %tmp31 )		; <x86_mmx> [#uses=2]
+	store x86_mmx %tmp36, x86_mmx* %A
+	%tmp40 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+	%tmp45 = tail call x86_mmx @llvm.x86.mmx.psubus.w( x86_mmx %tmp36, x86_mmx %tmp40 )		; <x86_mmx> [#uses=2]
+	store x86_mmx %tmp45, x86_mmx* %A
+	%tmp51 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+	%tmp52 = tail call x86_mmx @llvm.x86.mmx.pmull.w( x86_mmx %tmp45, x86_mmx %tmp51 )		; <x86_mmx> [#uses=2]
+	store x86_mmx %tmp52, x86_mmx* %A
+	%tmp55 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+	%tmp60 = tail call x86_mmx @llvm.x86.mmx.pmulh.w( x86_mmx %tmp52, x86_mmx %tmp55 )		; <x86_mmx> [#uses=2]
+	store x86_mmx %tmp60, x86_mmx* %A
+	%tmp64 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+	%tmp69 = tail call x86_mmx @llvm.x86.mmx.pmadd.wd( x86_mmx %tmp60, x86_mmx %tmp64 )		; <x86_mmx> [#uses=1]
+	%tmp70 = bitcast x86_mmx %tmp69 to x86_mmx		; <x86_mmx> [#uses=2]
+	store x86_mmx %tmp70, x86_mmx* %A
+	%tmp75 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+	%tmp76 = tail call x86_mmx @llvm.x86.mmx.pand( x86_mmx %tmp70, x86_mmx %tmp75 )		; <x86_mmx> [#uses=2]
+	store x86_mmx %tmp76, x86_mmx* %A
+	%tmp81 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+	%tmp82 = tail call x86_mmx @llvm.x86.mmx.por( x86_mmx %tmp76, x86_mmx %tmp81 )		; <x86_mmx> [#uses=2]
+	store x86_mmx %tmp82, x86_mmx* %A
+	%tmp87 = load x86_mmx* %B		; <x86_mmx> [#uses=1]
+	%tmp88 = tail call x86_mmx @llvm.x86.mmx.pxor( x86_mmx %tmp82, x86_mmx %tmp87 )		; <x86_mmx> [#uses=2]
+	store x86_mmx %tmp88, x86_mmx* %A
+	tail call void @llvm.x86.mmx.emms( )
+	ret void
+}
+
+declare x86_mmx @llvm.x86.mmx.paddus.b(x86_mmx, x86_mmx)
+
+declare x86_mmx @llvm.x86.mmx.psubus.b(x86_mmx, x86_mmx)
+
+declare x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx, x86_mmx)
+
+declare x86_mmx @llvm.x86.mmx.psubus.w(x86_mmx, x86_mmx)
+
+declare x86_mmx @llvm.x86.mmx.pmulh.w(x86_mmx, x86_mmx)
+
+declare x86_mmx @llvm.x86.mmx.pmadd.wd(x86_mmx, x86_mmx)
 
 declare void @llvm.x86.mmx.emms()
+
+declare x86_mmx @llvm.x86.mmx.padd.b(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.padd.w(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.padds.b(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.padds.w(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.padds.d(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.psubs.b(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.psubs.w(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.psubs.d(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.psub.b(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.psub.w(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.psub.d(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.pmull.w(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.pand(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.por(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.pxor(x86_mmx, x86_mmx)
+

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/mmx-bitcast-to-i64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/mmx-bitcast-to-i64.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/mmx-bitcast-to-i64.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/mmx-bitcast-to-i64.ll Tue Oct 26 19:48:03 2010
@@ -1,26 +1,31 @@
 ; RUN: llc < %s -march=x86-64 | grep movd | count 4
 
-define i64 @foo(<1 x i64>* %p) {
-  %t = load <1 x i64>* %p
-  %u = add <1 x i64> %t, %t
-  %s = bitcast <1 x i64> %u to i64
+define i64 @foo(x86_mmx* %p) {
+  %t = load x86_mmx* %p
+  %u = tail call x86_mmx @llvm.x86.mmx.padd.q(x86_mmx %t, x86_mmx %t)
+  %s = bitcast x86_mmx %u to i64
   ret i64 %s
 }
-define i64 @goo(<2 x i32>* %p) {
-  %t = load <2 x i32>* %p
-  %u = add <2 x i32> %t, %t
-  %s = bitcast <2 x i32> %u to i64
+define i64 @goo(x86_mmx* %p) {
+  %t = load x86_mmx* %p
+  %u = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %t, x86_mmx %t)
+  %s = bitcast x86_mmx %u to i64
   ret i64 %s
 }
-define i64 @hoo(<4 x i16>* %p) {
-  %t = load <4 x i16>* %p
-  %u = add <4 x i16> %t, %t
-  %s = bitcast <4 x i16> %u to i64
+define i64 @hoo(x86_mmx* %p) {
+  %t = load x86_mmx* %p
+  %u = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %t, x86_mmx %t)
+  %s = bitcast x86_mmx %u to i64
   ret i64 %s
 }
-define i64 @ioo(<8 x i8>* %p) {
-  %t = load <8 x i8>* %p
-  %u = add <8 x i8> %t, %t
-  %s = bitcast <8 x i8> %u to i64
+define i64 @ioo(x86_mmx* %p) {
+  %t = load x86_mmx* %p
+  %u = tail call x86_mmx @llvm.x86.mmx.padd.b(x86_mmx %t, x86_mmx %t)
+  %s = bitcast x86_mmx %u to i64
   ret i64 %s
 }
+
+declare x86_mmx @llvm.x86.mmx.padd.b(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.padd.w(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx)

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/mmx-insert-element.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/mmx-insert-element.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/mmx-insert-element.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/mmx-insert-element.ll Tue Oct 26 19:48:03 2010
@@ -1,7 +1,9 @@
-; RUN: llc < %s -march=x86 -mattr=+mmx | not grep movq
-; RUN: llc < %s -march=x86 -mattr=+mmx | grep psllq
+; RUN: llc < %s -march=x86 -mattr=+mmx,+sse2 | grep movq
+; RUN: llc < %s -march=x86 -mattr=+mmx,+sse2 | grep pshufd
+; This is not an MMX operation; promoted to XMM.
 
-define <2 x i32> @qux(i32 %A) nounwind {
+define x86_mmx @qux(i32 %A) nounwind {
 	%tmp3 = insertelement <2 x i32> < i32 0, i32 undef >, i32 %A, i32 1		; <<2 x i32>> [#uses=1]
-	ret <2 x i32> %tmp3
+        %tmp4 = bitcast <2 x i32> %tmp3 to x86_mmx
+	ret x86_mmx %tmp4
 }

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/mmx-pinsrw.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/mmx-pinsrw.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/mmx-pinsrw.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/mmx-pinsrw.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 -mattr=+mmx | grep pinsrw | count 1
+; RUN: llc < %s -march=x86 -mattr=+mmx,+sse2 | grep pinsrw | count 1
 ; PR2562
 
 external global i16		; <i16*>:0 [#uses=1]

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/mmx-punpckhdq.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/mmx-punpckhdq.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/mmx-punpckhdq.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/mmx-punpckhdq.ll Tue Oct 26 19:48:03 2010
@@ -1,6 +1,9 @@
-; RUN: llc < %s -march=x86 -mattr=+mmx | grep punpckhdq | count 1
+; RUN: llc < %s -march=x86 -mattr=+mmx,+sse42 -mtriple=x86_64-apple-darwin10 | FileCheck %s
+; There are no MMX operations in bork; promoted to XMM.
 
 define void @bork(<1 x i64>* %x) {
+; CHECK: bork
+; CHECK: pextrd
 entry:
 	%tmp2 = load <1 x i64>* %x		; <<1 x i64>> [#uses=1]
 	%tmp6 = bitcast <1 x i64> %tmp2 to <2 x i32>		; <<2 x i32>> [#uses=1]
@@ -11,4 +14,18 @@
 	ret void
 }
 
+; pork uses MMX.
+
+define void @pork(x86_mmx* %x) {
+; CHECK: pork
+; CHECK: punpckhdq
+entry:
+	%tmp2 = load x86_mmx* %x		; <x86_mmx> [#uses=1]
+        %tmp9 = tail call x86_mmx @llvm.x86.mmx.punpckhdq (x86_mmx %tmp2, x86_mmx %tmp2)
+	store x86_mmx %tmp9, x86_mmx* %x
+	tail call void @llvm.x86.mmx.emms( )
+	ret void
+}
+
+declare x86_mmx @llvm.x86.mmx.punpckhdq(x86_mmx, x86_mmx)
 declare void @llvm.x86.mmx.emms()

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/mmx-shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/mmx-shift.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/mmx-shift.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/mmx-shift.ll Tue Oct 26 19:48:03 2010
@@ -5,28 +5,28 @@
 
 define i64 @t1(<1 x i64> %mm1) nounwind  {
 entry:
-	%tmp6 = tail call <1 x i64> @llvm.x86.mmx.pslli.q( <1 x i64> %mm1, i32 32 )		; <<1 x i64>> [#uses=1]
-	%retval1112 = bitcast <1 x i64> %tmp6 to i64		; <i64> [#uses=1]
+        %tmp = bitcast <1 x i64> %mm1 to x86_mmx
+	%tmp6 = tail call x86_mmx @llvm.x86.mmx.pslli.q( x86_mmx %tmp, i32 32 )		; <x86_mmx> [#uses=1]
+        %retval1112 = bitcast x86_mmx %tmp6 to i64
 	ret i64 %retval1112
 }
 
-declare <1 x i64> @llvm.x86.mmx.pslli.q(<1 x i64>, i32) nounwind readnone 
+declare x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx, i32) nounwind readnone 
 
-define i64 @t2(<2 x i32> %mm1, <2 x i32> %mm2) nounwind  {
+define i64 @t2(x86_mmx %mm1, x86_mmx %mm2) nounwind  {
 entry:
-	%tmp7 = tail call <2 x i32> @llvm.x86.mmx.psra.d( <2 x i32> %mm1, <2 x i32> %mm2 ) nounwind readnone 		; <<2 x i32>> [#uses=1]
-	%retval1112 = bitcast <2 x i32> %tmp7 to i64		; <i64> [#uses=1]
+	%tmp7 = tail call x86_mmx @llvm.x86.mmx.psra.d( x86_mmx %mm1, x86_mmx %mm2 ) nounwind readnone 		; <x86_mmx> [#uses=1]
+        %retval1112 = bitcast x86_mmx %tmp7 to i64
 	ret i64 %retval1112
 }
 
-declare <2 x i32> @llvm.x86.mmx.psra.d(<2 x i32>, <2 x i32>) nounwind readnone 
+declare x86_mmx @llvm.x86.mmx.psra.d(x86_mmx, x86_mmx) nounwind readnone 
 
-define i64 @t3(<1 x i64> %mm1, i32 %bits) nounwind  {
+define i64 @t3(x86_mmx %mm1, i32 %bits) nounwind  {
 entry:
-	%tmp6 = bitcast <1 x i64> %mm1 to <4 x i16>		; <<4 x i16>> [#uses=1]
-	%tmp8 = tail call <4 x i16> @llvm.x86.mmx.psrli.w( <4 x i16> %tmp6, i32 %bits ) nounwind readnone 		; <<4 x i16>> [#uses=1]
-	%retval1314 = bitcast <4 x i16> %tmp8 to i64		; <i64> [#uses=1]
+	%tmp8 = tail call x86_mmx @llvm.x86.mmx.psrli.w( x86_mmx %mm1, i32 %bits ) nounwind readnone 		; <x86_mmx> [#uses=1]
+        %retval1314 = bitcast x86_mmx %tmp8 to i64
 	ret i64 %retval1314
 }
 
-declare <4 x i16> @llvm.x86.mmx.psrli.w(<4 x i16>, i32) nounwind readnone 
+declare x86_mmx @llvm.x86.mmx.psrli.w(x86_mmx, i32) nounwind readnone 

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/mmx-shuffle.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/mmx-shuffle.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/mmx-shuffle.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/mmx-shuffle.ll Tue Oct 26 19:48:03 2010
@@ -22,8 +22,10 @@
 	%tmp542 = bitcast <2 x i32> %tmp529 to <4 x i16>		; <<4 x i16>> [#uses=1]
 	%tmp543 = add <4 x i16> %tmp542, < i16 0, i16 16448, i16 24672, i16 28784 >		; <<4 x i16>> [#uses=1]
 	%tmp555 = bitcast <4 x i16> %tmp543 to <8 x i8>		; <<8 x i8>> [#uses=1]
-	tail call void @llvm.x86.mmx.maskmovq( <8 x i8> zeroinitializer, <8 x i8> %tmp555, i8* null )
+        %tmp556 = bitcast <8 x i8> %tmp555 to x86_mmx
+        %tmp557 = bitcast <8 x i8> zeroinitializer to x86_mmx
+	tail call void @llvm.x86.mmx.maskmovq( x86_mmx %tmp557, x86_mmx %tmp556, i8* null )
 	ret void
 }
 
-declare void @llvm.x86.mmx.maskmovq(<8 x i8>, <8 x i8>, i8*)
+declare void @llvm.x86.mmx.maskmovq(x86_mmx, x86_mmx, i8*)

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/mmx-vzmovl-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/mmx-vzmovl-2.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/mmx-vzmovl-2.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/mmx-vzmovl-2.ll Tue Oct 26 19:48:03 2010
@@ -1,10 +1,10 @@
-; RUN: llc < %s -march=x86-64 -mattr=+mmx | grep pxor
-; RUN: llc < %s -march=x86-64 -mattr=+mmx | grep punpckldq
+; RUN: llc < %s -march=x86-64 -mattr=+mmx,+sse2 | grep pxor
+; RUN: llc < %s -march=x86-64 -mattr=+mmx,+sse2 | grep punpckldq
 
 	%struct.vS1024 = type { [8 x <4 x i32>] }
 	%struct.vS512 = type { [4 x <4 x i32>] }
 
-declare <1 x i64> @llvm.x86.mmx.psrli.q(<1 x i64>, i32) nounwind readnone
+declare x86_mmx @llvm.x86.mmx.psrli.q(x86_mmx, i32) nounwind readnone
 
 define void @t() nounwind {
 entry:
@@ -12,14 +12,18 @@
 
 bb554:		; preds = %bb554, %entry
 	%sum.0.reg2mem.0 = phi <1 x i64> [ %tmp562, %bb554 ], [ zeroinitializer, %entry ]		; <<1 x i64>> [#uses=1]
-	%0 = load <1 x i64>* null, align 8		; <<1 x i64>> [#uses=2]
-	%1 = bitcast <1 x i64> %0 to <2 x i32>		; <<2 x i32>> [#uses=1]
+	%0 = load x86_mmx* null, align 8		; <<1 x i64>> [#uses=2]
+	%1 = bitcast x86_mmx %0 to <2 x i32>		; <<2 x i32>> [#uses=1]
 	%tmp555 = and <2 x i32> %1, < i32 -1, i32 0 >		; <<2 x i32>> [#uses=1]
-	%2 = bitcast <2 x i32> %tmp555 to <1 x i64>		; <<1 x i64>> [#uses=1]
-	%3 = call <1 x i64> @llvm.x86.mmx.psrli.q(<1 x i64> %0, i32 32) nounwind readnone		; <<1 x i64>> [#uses=1]
+	%2 = bitcast <2 x i32> %tmp555 to x86_mmx		; <<1 x i64>> [#uses=1]
+	%3 = call x86_mmx @llvm.x86.mmx.psrli.q(x86_mmx %0, i32 32) nounwind readnone		; <<1 x i64>> [#uses=1]
         store <1 x i64> %sum.0.reg2mem.0, <1 x i64>* null
-	%tmp558 = add <1 x i64> %sum.0.reg2mem.0, %2		; <<1 x i64>> [#uses=1]
-	%4 = call <1 x i64> @llvm.x86.mmx.psrli.q(<1 x i64> %tmp558, i32 32) nounwind readnone		; <<1 x i64>> [#uses=1]
-	%tmp562 = add <1 x i64> %4, %3		; <<1 x i64>> [#uses=1]
+        %tmp3 = bitcast x86_mmx %2 to <1 x i64>
+	%tmp558 = add <1 x i64> %sum.0.reg2mem.0, %tmp3		; <<1 x i64>> [#uses=1]
+        %tmp5 = bitcast <1 x i64> %tmp558 to x86_mmx
+	%4 = call x86_mmx @llvm.x86.mmx.psrli.q(x86_mmx %tmp5, i32 32) nounwind readnone		; <<1 x i64>> [#uses=1]
+        %tmp6 = bitcast x86_mmx %4 to <1 x i64>
+        %tmp7 = bitcast x86_mmx %3 to <1 x i64>
+	%tmp562 = add <1 x i64> %tmp6, %tmp7		; <<1 x i64>> [#uses=1]
 	br label %bb554
 }

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/mmx-vzmovl.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/mmx-vzmovl.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/mmx-vzmovl.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/mmx-vzmovl.ll Tue Oct 26 19:48:03 2010
@@ -1,5 +1,5 @@
-; RUN: llc < %s -march=x86-64 -mattr=+mmx | grep movd
-; RUN: llc < %s -march=x86-64 -mattr=+mmx | grep movq
+; RUN: llc < %s -march=x86-64 -mattr=+mmx,+sse2 | grep movq | count 2
+; There are no MMX operations here; this is promoted to XMM.
 
 define void @foo(<1 x i64>* %a, <1 x i64>* %b) nounwind {
 entry:

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/movgs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/movgs.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/movgs.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/movgs.ll Tue Oct 26 19:48:03 2010
@@ -1,8 +1,57 @@
-; RUN: llc < %s -march=x86 | grep gs
+; RUN: llc < %s -march=x86 -mattr=sse41 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -march=x86-64 -mattr=sse41 | FileCheck %s --check-prefix=X64
 
-define i32 @foo() nounwind readonly {
+define i32 @test1() nounwind readonly {
 entry:
 	%tmp = load i32* addrspace(256)* getelementptr (i32* addrspace(256)* inttoptr (i32 72 to i32* addrspace(256)*), i32 31)		; <i32*> [#uses=1]
 	%tmp1 = load i32* %tmp		; <i32> [#uses=1]
 	ret i32 %tmp1
 }
+; X32: test1:
+; X32: 	movl	%gs:196, %eax
+; X32: 	movl	(%eax), %eax
+; X32: 	ret
+
+; X64: test1:
+; X64: 	movq	%gs:320, %rax
+; X64: 	movl	(%rax), %eax
+; X64: 	ret
+
+define i64 @test2(void (i8*)* addrspace(256)* %tmp8) nounwind {
+entry:
+  %tmp9 = load void (i8*)* addrspace(256)* %tmp8, align 8
+  tail call void %tmp9(i8* undef) nounwind optsize
+  ret i64 0
+}
+
+; rdar://8453210
+; X32: test2:
+; X32: movl	{{.*}}(%esp), %eax
+; X32: calll	*%gs:(%eax)
+
+; X64: test2:
+; X64: callq	*%gs:(%rdi)
+
+
+
+
+define <2 x i64> @pmovsxwd_1(i64 addrspace(256)* %p) nounwind readonly {
+entry:
+  %0 = load i64 addrspace(256)* %p
+  %tmp2 = insertelement <2 x i64> zeroinitializer, i64 %0, i32 0
+  %1 = bitcast <2 x i64> %tmp2 to <8 x i16>
+  %2 = tail call <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16> %1) nounwind readnone
+  %3 = bitcast <4 x i32> %2 to <2 x i64>
+  ret <2 x i64> %3
+  
+; X32: pmovsxwd_1:
+; X32: 	movl	4(%esp), %eax
+; X32: 	pmovsxwd	%gs:(%eax), %xmm0
+; X32: 	ret
+
+; X64: pmovsxwd_1:
+; X64:	pmovsxwd	%gs:(%rdi), %xmm0
+; X64:	ret
+}
+
+declare <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16>) nounwind readnone

Removed: llvm/branches/wendling/eh/test/CodeGen/X86/narrow_op-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/narrow_op-2.ll?rev=117424&view=auto
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/narrow_op-2.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/narrow_op-2.ll (removed)
@@ -1,25 +0,0 @@
-; RUN: llc < %s -march=x86-64 | FileCheck %s
-
-	%struct.bf = type { i64, i16, i16, i32 }
- at bfi = external global %struct.bf*
-
-define void @t1() nounwind ssp {
-entry:
-
-; CHECK: andb	$-2, 10(
-; CHECK: andb	$-3, 10(
-
-	%0 = load %struct.bf** @bfi, align 8
-	%1 = getelementptr %struct.bf* %0, i64 0, i32 1
-	%2 = bitcast i16* %1 to i32*
-	%3 = load i32* %2, align 1
-	%4 = and i32 %3, -65537
-	store i32 %4, i32* %2, align 1
-	%5 = load %struct.bf** @bfi, align 8
-	%6 = getelementptr %struct.bf* %5, i64 0, i32 1
-	%7 = bitcast i16* %6 to i32*
-	%8 = load i32* %7, align 1
-	%9 = and i32 %8, -131073
-	store i32 %9, i32* %7, align 1
-	ret void
-}

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/phi-immediate-factoring.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/phi-immediate-factoring.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/phi-immediate-factoring.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/phi-immediate-factoring.ll Tue Oct 26 19:48:03 2010
@@ -1,10 +1,10 @@
+; RUN: llc < %s -march=x86 -stats |& grep {Number of blocks eliminated} | grep 6
 ; PR1296
-; RUN: llc < %s -march=x86 | grep {movl	\$1} | count 1
 
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
 target triple = "i686-apple-darwin8"
 
-define i32 @foo(i32 %A, i32 %B, i32 %C) {
+define i32 @foo(i32 %A, i32 %B, i32 %C) nounwind {
 entry:
 	switch i32 %A, label %out [
 		 i32 1, label %bb

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/phys_subreg_coalesce-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/phys_subreg_coalesce-2.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/phys_subreg_coalesce-2.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/phys_subreg_coalesce-2.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 | grep mov | count 5
+; RUN: llc < %s -march=x86 | grep mov | count 4
 ; PR2659
 
 define i32 @binomial(i32 %n, i32 %k) nounwind {

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/pic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/pic.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/pic.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/pic.ll Tue Oct 26 19:48:03 2010
@@ -12,7 +12,7 @@
     ret void
     
 ; LINUX:    test0:
-; LINUX:	call	.L0$pb
+; LINUX:	calll	.L0$pb
 ; LINUX-NEXT: .L0$pb:
 ; LINUX-NEXT:	popl
 ; LINUX:	addl	$_GLOBAL_OFFSET_TABLE_+(.L{{.*}}-.L0$pb),
@@ -34,7 +34,7 @@
     ret void
     
 ; LINUX: test1:
-; LINUX:	call	.L1$pb
+; LINUX:	calll	.L1$pb
 ; LINUX-NEXT: .L1$pb:
 ; LINUX-NEXT:	popl
 ; LINUX:	addl	$_GLOBAL_OFFSET_TABLE_+(.L{{.*}}-.L1$pb), %eax
@@ -54,12 +54,12 @@
 ; LINUX: test2:
 ; LINUX: 	pushl	%ebx
 ; LINUX-NEXT: 	subl	$8, %esp
-; LINUX-NEXT: 	call	.L2$pb
+; LINUX-NEXT: 	calll	.L2$pb
 ; LINUX-NEXT: .L2$pb:
 ; LINUX-NEXT: 	popl	%ebx
 ; LINUX: 	addl	$_GLOBAL_OFFSET_TABLE_+(.L{{.*}}-.L2$pb), %ebx
 ; LINUX: 	movl	$40, (%esp)
-; LINUX: 	call	malloc at PLT
+; LINUX: 	calll	malloc at PLT
 ; LINUX: 	addl	$8, %esp
 ; LINUX: 	popl	%ebx
 ; LINUX: 	ret
@@ -75,13 +75,13 @@
     call void(...)* %tmp1()
     ret void
 ; LINUX: test3:
-; LINUX: 	call	.L3$pb
+; LINUX: 	calll	.L3$pb
 ; LINUX-NEXT: .L3$pb:
 ; LINUX: 	popl
 ; LINUX: 	addl	$_GLOBAL_OFFSET_TABLE_+(.L{{.*}}-.L3$pb),
 ; LINUX: 	movl	pfoo at GOT(%esi),
-; LINUX: 	call	afoo at PLT
-; LINUX: 	call	*
+; LINUX: 	calll	afoo at PLT
+; LINUX: 	calll	*
 }
 
 declare void(...)* @afoo(...)
@@ -91,10 +91,10 @@
     call void(...)* @foo()
     ret void
 ; LINUX: test4:
-; LINUX: call	.L4$pb
+; LINUX: calll	.L4$pb
 ; LINUX: popl	%ebx
 ; LINUX: addl	$_GLOBAL_OFFSET_TABLE_+(.L{{.*}}-.L4$pb), %ebx
-; LINUX: call	foo at PLT
+; LINUX: calll	foo at PLT
 }
 
 declare void @foo(...)
@@ -112,7 +112,7 @@
     ret void
     
 ; LINUX: test5:
-; LINUX: 	call	.L5$pb
+; LINUX: 	calll	.L5$pb
 ; LINUX-NEXT: .L5$pb:
 ; LINUX-NEXT: 	popl	%eax
 ; LINUX: 	addl	$_GLOBAL_OFFSET_TABLE_+(.L{{.*}}-.L5$pb), %eax
@@ -134,7 +134,7 @@
 ; LINUX: .LCPI6_0:
 
 ; LINUX: test6:
-; LINUX:    call .L6$pb
+; LINUX:    calll .L6$pb
 ; LINUX: .L6$pb:
 ; LINUX:    addl	$_GLOBAL_OFFSET_TABLE_+(.L{{.*}}-.L6$pb), 
 ; LINUX:    fldl	.LCPI6_0 at GOTOFF(
@@ -186,7 +186,7 @@
     ret void
     
 ; LINUX: test7:
-; LINUX:   call	.L7$pb
+; LINUX:   calll	.L7$pb
 ; LINUX: .L7$pb:
 ; LINUX:   addl	$_GLOBAL_OFFSET_TABLE_+(.L{{.*}}-.L7$pb),
 ; LINUX:   .LJTI7_0 at GOTOFF(

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/postra-licm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/postra-licm.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/postra-licm.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/postra-licm.ll Tue Oct 26 19:48:03 2010
@@ -68,7 +68,7 @@
 
 bb23:                                             ; preds = %imix_test.exit
   unreachable
-; X86-32: %bb26.preheader.bb28_crit_edge
+; X86-32: %bb26.preheader
 ; X86-32: movl -16(%ebp),
 ; X86-32-NEXT: .align 4
 ; X86-32-NEXT: %bb28

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/pr2659.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/pr2659.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/pr2659.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/pr2659.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 -mtriple=i686-apple-darwin9.4.0 | grep movl | count 5
+; RUN: llc < %s -march=x86 -mtriple=i686-apple-darwin9.4.0 | grep movl | count 4
 ; RUN: llc < %s -march=x86 -mtriple=i686-apple-darwin9.4.0 | FileCheck %s
 ; PR2659
 
@@ -14,10 +14,11 @@
   %cmp44 = icmp eq i32 %k, 0            ; <i1> [#uses=1]
   br i1 %cmp44, label %afterfor, label %forbody
 
-; CHECK: %forcond.preheader.forbody_crit_edge
+; CHECK: %forcond.preheader
 ; CHECK: movl $1
 ; CHECK-NOT: xorl
-; CHECK-NEXT: movl
+; CHECK-NOT: movl
+; CHECK-NEXT: je
 
 ifthen:         ; preds = %entry
   ret i32 0

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/pr3522.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/pr3522.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/pr3522.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/pr3522.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 -stats |& not grep machine-sink
+; RUN: llc < %s -march=x86 -stats |& not grep {instructions sunk}
 ; PR3522
 
 target triple = "i386-pc-linux-gnu"

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/shift-folding.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/shift-folding.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/shift-folding.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/shift-folding.ll Tue Oct 26 19:48:03 2010
@@ -1,21 +1,21 @@
 ; RUN: llc < %s -march=x86 | \
 ; RUN:   grep {s\[ah\]\[rl\]l} | count 1
 
-define i32* @test1(i32* %P, i32 %X) {
+define i32* @test1(i32* %P, i32 %X) nounwind {
         %Y = lshr i32 %X, 2             ; <i32> [#uses=1]
         %gep.upgrd.1 = zext i32 %Y to i64               ; <i64> [#uses=1]
         %P2 = getelementptr i32* %P, i64 %gep.upgrd.1           ; <i32*> [#uses=1]
         ret i32* %P2
 }
 
-define i32* @test2(i32* %P, i32 %X) {
+define i32* @test2(i32* %P, i32 %X) nounwind {
         %Y = shl i32 %X, 2              ; <i32> [#uses=1]
         %gep.upgrd.2 = zext i32 %Y to i64               ; <i64> [#uses=1]
         %P2 = getelementptr i32* %P, i64 %gep.upgrd.2           ; <i32*> [#uses=1]
         ret i32* %P2
 }
 
-define i32* @test3(i32* %P, i32 %X) {
+define i32* @test3(i32* %P, i32 %X) nounwind {
         %Y = ashr i32 %X, 2             ; <i32> [#uses=1]
         %P2 = getelementptr i32* %P, i32 %Y             ; <i32*> [#uses=1]
         ret i32* %P2

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/sibcall-3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/sibcall-3.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/sibcall-3.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/sibcall-3.ll Tue Oct 26 19:48:03 2010
@@ -3,7 +3,7 @@
 
 define void @t1(i8* inreg %dst, i8* inreg %src, i8* inreg %len) nounwind {
 ; CHECK: t1:
-; CHECK: call 0
+; CHECK: calll 0
   tail call void null(i8* inreg %dst, i8* inreg %src, i8* inreg %len) nounwind
   ret void
 }

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/sibcall.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/sibcall.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/sibcall.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/sibcall.ll Tue Oct 26 19:48:03 2010
@@ -1,5 +1,5 @@
-; RUN: llc < %s -march=x86    -mattr=+sse2 -asm-verbose=false | FileCheck %s -check-prefix=32
-; RUN: llc < %s -march=x86-64 -mattr=+sse2 -asm-verbose=false | FileCheck %s -check-prefix=64
+; RUN: llc < %s -mtriple=i686-linux   -mattr=+sse2 -asm-verbose=false | FileCheck %s -check-prefix=32
+; RUN: llc < %s -mtriple=x86_64-linux -mattr=+sse2 -asm-verbose=false | FileCheck %s -check-prefix=64
 
 define void @t1(i32 %x) nounwind ssp {
 entry:
@@ -43,7 +43,7 @@
 define void @t4(void (i32)* nocapture %x) nounwind ssp {
 entry:
 ; 32: t4:
-; 32: call *
+; 32: calll *
 ; FIXME: gcc can generate a tailcall for this. But it's tricky.
 
 ; 64: t4:
@@ -69,7 +69,7 @@
 define i32 @t6(i32 %x) nounwind ssp {
 entry:
 ; 32: t6:
-; 32: call {{_?}}t6
+; 32: calll {{_?}}t6
 ; 32: jmp {{_?}}bar
 
 ; 64: t6:
@@ -106,7 +106,7 @@
 define signext i16 @t8() nounwind ssp {
 entry:
 ; 32: t8:
-; 32: call {{_?}}bar3
+; 32: calll {{_?}}bar3
 
 ; 64: t8:
 ; 64: callq {{_?}}bar3
@@ -119,7 +119,7 @@
 define signext i16 @t9(i32 (i32)* nocapture %x) nounwind ssp {
 entry:
 ; 32: t9:
-; 32: call *
+; 32: calll *
 
 ; 64: t9:
 ; 64: callq *
@@ -131,7 +131,7 @@
 define void @t10() nounwind ssp {
 entry:
 ; 32: t10:
-; 32: call
+; 32: calll
 
 ; 64: t10:
 ; 64: callq
@@ -203,12 +203,12 @@
 define %struct.ns* @t13(%struct.cp* %yy) nounwind ssp {
 ; 32: t13:
 ; 32-NOT: jmp
-; 32: call
+; 32: calll
 ; 32: ret
 
 ; 64: t13:
 ; 64-NOT: jmp
-; 64: call
+; 64: callq
 ; 64: ret
 entry:
   %0 = tail call fastcc %struct.ns* @foo7(%struct.cp* byval align 4 %yy, i8 signext 0) nounwind
@@ -246,7 +246,7 @@
 
 define void @t15(%struct.foo* noalias sret %agg.result) nounwind  {
 ; 32: t15:
-; 32: call {{_?}}f
+; 32: calll {{_?}}f
 ; 32: ret $4
 
 ; 64: t15:
@@ -261,7 +261,7 @@
 define void @t16() nounwind ssp {
 entry:
 ; 32: t16:
-; 32: call {{_?}}bar4
+; 32: calll {{_?}}bar4
 ; 32: fstp
 
 ; 64: t16:
@@ -291,7 +291,7 @@
 define void @t18() nounwind ssp {
 entry:
 ; 32: t18:
-; 32: call {{_?}}bar6
+; 32: calll {{_?}}bar6
 ; 32: fstp %st(0)
 
 ; 64: t18:
@@ -307,7 +307,7 @@
 entry:
 ; CHECK: t19:
 ; CHECK: andl $-32
-; CHECK: call {{_?}}foo
+; CHECK: calll {{_?}}foo
   tail call void @foo() nounwind
   ret void
 }
@@ -321,7 +321,7 @@
 define double @t20(double %x) nounwind {
 entry:
 ; 32: t20:
-; 32: call {{_?}}foo20
+; 32: calll {{_?}}foo20
 ; 32: fldl (%esp)
 
 ; 64: t20:

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/sink-hoist.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/sink-hoist.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/sink-hoist.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/sink-hoist.ll Tue Oct 26 19:48:03 2010
@@ -6,10 +6,11 @@
 ; that it's conditionally evaluated.
 
 ; CHECK: foo:
-; CHECK:      divsd
 ; CHECK-NEXT: testb $1, %dil
-; CHECK-NEXT: jne
+; CHECK-NEXT: je
 ; CHECK-NEXT: divsd
+; CHECK-NEXT: ret
+; CHECK:      divsd
 
 define double @foo(double %x, double %y, i1 %c) nounwind {
   %a = fdiv double %x, 3.2
@@ -18,6 +19,24 @@
   ret double %z
 }
 
+; Make sure the critical edge is broken so the divsd is sunken below
+; the conditional branch.
+; rdar://8454886
+
+; CHECK: split:
+; CHECK-NEXT: testb $1, %dil
+; CHECK-NEXT: je
+; CHECK-NEXT: divsd
+; CHECK-NEXT: ret
+; CHECK:      movaps
+; CHECK-NEXT: ret
+define double @split(double %x, double %y, i1 %c) nounwind {
+  %a = fdiv double %x, 3.2
+  %z = select i1 %c, double %a, double %y
+  ret double %z
+}
+
+
 ; Hoist floating-point constant-pool loads out of loops.
 
 ; CHECK: bar:
@@ -68,9 +87,9 @@
 ; Codegen should hoist and CSE these constants.
 
 ; CHECK: vv:
-; CHECK: LCPI2_0(%rip), %xmm0
-; CHECK: LCPI2_1(%rip), %xmm1
-; CHECK: LCPI2_2(%rip), %xmm2
+; CHECK: LCPI3_0(%rip), %xmm0
+; CHECK: LCPI3_1(%rip), %xmm1
+; CHECK: LCPI3_2(%rip), %xmm2
 ; CHECK: align
 ; CHECK-NOT: LCPI
 ; CHECK: ret

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/sse2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/sse2.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/sse2.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/sse2.ll Tue Oct 26 19:48:03 2010
@@ -1,14 +1,14 @@
 ; Tests for SSE2 and below, without SSE3+.
 ; RUN: llc < %s -mtriple=i386-apple-darwin10 -mcpu=pentium4 -O3 | FileCheck %s
 
-define void @t1(<2 x double>* %r, <2 x double>* %A, double %B) nounwind  {
+define void @test1(<2 x double>* %r, <2 x double>* %A, double %B) nounwind  {
 	%tmp3 = load <2 x double>* %A, align 16
 	%tmp7 = insertelement <2 x double> undef, double %B, i32 0
 	%tmp9 = shufflevector <2 x double> %tmp3, <2 x double> %tmp7, <2 x i32> < i32 2, i32 1 >
 	store <2 x double> %tmp9, <2 x double>* %r, align 16
 	ret void
         
-; CHECK: t1:
+; CHECK: test1:
 ; CHECK: 	movl	8(%esp), %eax
 ; CHECK-NEXT: 	movapd	(%eax), %xmm0
 ; CHECK-NEXT: 	movlpd	12(%esp), %xmm0
@@ -17,14 +17,14 @@
 ; CHECK-NEXT: 	ret
 }
 
-define void @t2(<2 x double>* %r, <2 x double>* %A, double %B) nounwind  {
+define void @test2(<2 x double>* %r, <2 x double>* %A, double %B) nounwind  {
 	%tmp3 = load <2 x double>* %A, align 16
 	%tmp7 = insertelement <2 x double> undef, double %B, i32 0
 	%tmp9 = shufflevector <2 x double> %tmp3, <2 x double> %tmp7, <2 x i32> < i32 0, i32 2 >
 	store <2 x double> %tmp9, <2 x double>* %r, align 16
 	ret void
         
-; CHECK: t2:
+; CHECK: test2:
 ; CHECK: 	movl	8(%esp), %eax
 ; CHECK-NEXT: 	movapd	(%eax), %xmm0
 ; CHECK-NEXT: 	movhpd	12(%esp), %xmm0
@@ -32,3 +32,163 @@
 ; CHECK-NEXT: 	movapd	%xmm0, (%eax)
 ; CHECK-NEXT: 	ret
 }
+
+
+define void @test3(<4 x float>* %res, <4 x float>* %A, <4 x float>* %B) nounwind {
+	%tmp = load <4 x float>* %B		; <<4 x float>> [#uses=2]
+	%tmp3 = load <4 x float>* %A		; <<4 x float>> [#uses=2]
+	%tmp.upgrd.1 = extractelement <4 x float> %tmp3, i32 0		; <float> [#uses=1]
+	%tmp7 = extractelement <4 x float> %tmp, i32 0		; <float> [#uses=1]
+	%tmp8 = extractelement <4 x float> %tmp3, i32 1		; <float> [#uses=1]
+	%tmp9 = extractelement <4 x float> %tmp, i32 1		; <float> [#uses=1]
+	%tmp10 = insertelement <4 x float> undef, float %tmp.upgrd.1, i32 0		; <<4 x float>> [#uses=1]
+	%tmp11 = insertelement <4 x float> %tmp10, float %tmp7, i32 1		; <<4 x float>> [#uses=1]
+	%tmp12 = insertelement <4 x float> %tmp11, float %tmp8, i32 2		; <<4 x float>> [#uses=1]
+	%tmp13 = insertelement <4 x float> %tmp12, float %tmp9, i32 3		; <<4 x float>> [#uses=1]
+	store <4 x float> %tmp13, <4 x float>* %res
+	ret void
+; CHECK: @test3
+; CHECK: 	unpcklps	
+}
+
+define void @test4(<4 x float> %X, <4 x float>* %res) nounwind {
+	%tmp5 = shufflevector <4 x float> %X, <4 x float> undef, <4 x i32> < i32 2, i32 6, i32 3, i32 7 >		; <<4 x float>> [#uses=1]
+	store <4 x float> %tmp5, <4 x float>* %res
+	ret void
+; CHECK: @test4
+; CHECK: 	pshufd	$50, %xmm0, %xmm0
+}
+
+define <4 x i32> @test5(i8** %ptr) nounwind {
+; CHECK: test5:
+; CHECK: pxor
+; CHECK: punpcklbw
+; CHECK: punpcklwd
+
+	%tmp = load i8** %ptr		; <i8*> [#uses=1]
+	%tmp.upgrd.1 = bitcast i8* %tmp to float*		; <float*> [#uses=1]
+	%tmp.upgrd.2 = load float* %tmp.upgrd.1		; <float> [#uses=1]
+	%tmp.upgrd.3 = insertelement <4 x float> undef, float %tmp.upgrd.2, i32 0		; <<4 x float>> [#uses=1]
+	%tmp9 = insertelement <4 x float> %tmp.upgrd.3, float 0.000000e+00, i32 1		; <<4 x float>> [#uses=1]
+	%tmp10 = insertelement <4 x float> %tmp9, float 0.000000e+00, i32 2		; <<4 x float>> [#uses=1]
+	%tmp11 = insertelement <4 x float> %tmp10, float 0.000000e+00, i32 3		; <<4 x float>> [#uses=1]
+	%tmp21 = bitcast <4 x float> %tmp11 to <16 x i8>		; <<16 x i8>> [#uses=1]
+	%tmp22 = shufflevector <16 x i8> %tmp21, <16 x i8> zeroinitializer, <16 x i32> < i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23 >		; <<16 x i8>> [#uses=1]
+	%tmp31 = bitcast <16 x i8> %tmp22 to <8 x i16>		; <<8 x i16>> [#uses=1]
+	%tmp.upgrd.4 = shufflevector <8 x i16> zeroinitializer, <8 x i16> %tmp31, <8 x i32> < i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11 >		; <<8 x i16>> [#uses=1]
+	%tmp36 = bitcast <8 x i16> %tmp.upgrd.4 to <4 x i32>		; <<4 x i32>> [#uses=1]
+	ret <4 x i32> %tmp36
+}
+
+define void @test6(<4 x float>* %res, <4 x float>* %A) nounwind {
+        %tmp1 = load <4 x float>* %A            ; <<4 x float>> [#uses=1]
+        %tmp2 = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> < i32 0, i32 5, i32 6, i32 7 >          ; <<4 x float>> [#uses=1]
+        store <4 x float> %tmp2, <4 x float>* %res
+        ret void
+        
+; CHECK: test6:
+; CHECK: 	movaps	(%eax), %xmm0
+; CHECK:	movaps	%xmm0, (%eax)
+}
+
+define void @test7() nounwind {
+        bitcast <4 x i32> zeroinitializer to <4 x float>                ; <<4 x float>>:1 [#uses=1]
+        shufflevector <4 x float> %1, <4 x float> zeroinitializer, <4 x i32> zeroinitializer         ; <<4 x float>>:2 [#uses=1]
+        store <4 x float> %2, <4 x float>* null
+        ret void
+        
+; CHECK: test7:
+; CHECK:	pxor	%xmm0, %xmm0
+; CHECK:	movaps	%xmm0, 0
+}
+
+ at x = external global [4 x i32]
+
+define <2 x i64> @test8() nounwind {
+	%tmp = load i32* getelementptr ([4 x i32]* @x, i32 0, i32 0)		; <i32> [#uses=1]
+	%tmp3 = load i32* getelementptr ([4 x i32]* @x, i32 0, i32 1)		; <i32> [#uses=1]
+	%tmp5 = load i32* getelementptr ([4 x i32]* @x, i32 0, i32 2)		; <i32> [#uses=1]
+	%tmp7 = load i32* getelementptr ([4 x i32]* @x, i32 0, i32 3)		; <i32> [#uses=1]
+	%tmp.upgrd.1 = insertelement <4 x i32> undef, i32 %tmp, i32 0		; <<4 x i32>> [#uses=1]
+	%tmp13 = insertelement <4 x i32> %tmp.upgrd.1, i32 %tmp3, i32 1		; <<4 x i32>> [#uses=1]
+	%tmp14 = insertelement <4 x i32> %tmp13, i32 %tmp5, i32 2		; <<4 x i32>> [#uses=1]
+	%tmp15 = insertelement <4 x i32> %tmp14, i32 %tmp7, i32 3		; <<4 x i32>> [#uses=1]
+	%tmp16 = bitcast <4 x i32> %tmp15 to <2 x i64>		; <<2 x i64>> [#uses=1]
+	ret <2 x i64> %tmp16
+; CHECK: test8:
+; CHECK: movups	(%eax), %xmm0
+}
+
+define <4 x float> @test9(i32 %dummy, float %a, float %b, float %c, float %d) nounwind {
+	%tmp = insertelement <4 x float> undef, float %a, i32 0		; <<4 x float>> [#uses=1]
+	%tmp11 = insertelement <4 x float> %tmp, float %b, i32 1		; <<4 x float>> [#uses=1]
+	%tmp12 = insertelement <4 x float> %tmp11, float %c, i32 2		; <<4 x float>> [#uses=1]
+	%tmp13 = insertelement <4 x float> %tmp12, float %d, i32 3		; <<4 x float>> [#uses=1]
+	ret <4 x float> %tmp13
+; CHECK: test9:
+; CHECK: movups	8(%esp), %xmm0
+}
+
+define <4 x float> @test10(float %a, float %b, float %c, float %d) nounwind {
+	%tmp = insertelement <4 x float> undef, float %a, i32 0		; <<4 x float>> [#uses=1]
+	%tmp11 = insertelement <4 x float> %tmp, float %b, i32 1		; <<4 x float>> [#uses=1]
+	%tmp12 = insertelement <4 x float> %tmp11, float %c, i32 2		; <<4 x float>> [#uses=1]
+	%tmp13 = insertelement <4 x float> %tmp12, float %d, i32 3		; <<4 x float>> [#uses=1]
+	ret <4 x float> %tmp13
+; CHECK: test10:
+; CHECK: movaps	4(%esp), %xmm0
+}
+
+define <2 x double> @test11(double %a, double %b) nounwind {
+	%tmp = insertelement <2 x double> undef, double %a, i32 0		; <<2 x double>> [#uses=1]
+	%tmp7 = insertelement <2 x double> %tmp, double %b, i32 1		; <<2 x double>> [#uses=1]
+	ret <2 x double> %tmp7
+; CHECK: test11:
+; CHECK: movapd	4(%esp), %xmm0
+}
+
+define void @test12() nounwind {
+        %tmp1 = load <4 x float>* null          ; <<4 x float>> [#uses=2]
+        %tmp2 = shufflevector <4 x float> %tmp1, <4 x float> < float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00 >, <4 x i32> < i32 0, i32 1, i32 6, i32 7 >             ; <<4 x float>> [#uses=1]
+        %tmp3 = shufflevector <4 x float> %tmp1, <4 x float> zeroinitializer, <4 x i32> < i32 2, i32 3, i32 6, i32 7 >                ; <<4 x float>> [#uses=1]
+        %tmp4 = fadd <4 x float> %tmp2, %tmp3            ; <<4 x float>> [#uses=1]
+        store <4 x float> %tmp4, <4 x float>* null
+        ret void
+; CHECK: test12:
+; CHECK: movhlps
+; CHECK: shufps
+}
+
+define void @test13(<4 x float>* %res, <4 x float>* %A, <4 x float>* %B, <4 x float>* %C) nounwind {
+        %tmp3 = load <4 x float>* %B            ; <<4 x float>> [#uses=1]
+        %tmp5 = load <4 x float>* %C            ; <<4 x float>> [#uses=1]
+        %tmp11 = shufflevector <4 x float> %tmp3, <4 x float> %tmp5, <4 x i32> < i32 1, i32 4, i32 1, i32 5 >         ; <<4 x float>> [#uses=1]
+        store <4 x float> %tmp11, <4 x float>* %res
+        ret void
+; CHECK: test13
+; CHECK: shufps	$69, (%eax), %xmm0
+; CHECK: pshufd	$-40, %xmm0, %xmm0
+}
+
+define <4 x float> @test14(<4 x float>* %x, <4 x float>* %y) nounwind {
+        %tmp = load <4 x float>* %y             ; <<4 x float>> [#uses=2]
+        %tmp5 = load <4 x float>* %x            ; <<4 x float>> [#uses=2]
+        %tmp9 = fadd <4 x float> %tmp5, %tmp             ; <<4 x float>> [#uses=1]
+        %tmp21 = fsub <4 x float> %tmp5, %tmp            ; <<4 x float>> [#uses=1]
+        %tmp27 = shufflevector <4 x float> %tmp9, <4 x float> %tmp21, <4 x i32> < i32 0, i32 1, i32 4, i32 5 >                ; <<4 x float>> [#uses=1]
+        ret <4 x float> %tmp27
+; CHECK: test14:
+; CHECK: 	addps	%xmm1, %xmm0
+; CHECK: 	subps	%xmm1, %xmm2
+; CHECK: 	movlhps	%xmm2, %xmm0
+}
+
+define <4 x float> @test15(<4 x float>* %x, <4 x float>* %y) nounwind {
+entry:
+        %tmp = load <4 x float>* %y             ; <<4 x float>> [#uses=1]
+        %tmp3 = load <4 x float>* %x            ; <<4 x float>> [#uses=1]
+        %tmp4 = shufflevector <4 x float> %tmp3, <4 x float> %tmp, <4 x i32> < i32 2, i32 3, i32 6, i32 7 >           ; <<4 x float>> [#uses=1]
+        ret <4 x float> %tmp4
+; CHECK: test15:
+; CHECK: 	movhlps	%xmm1, %xmm0
+}

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/sse3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/sse3.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/sse3.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/sse3.ll Tue Oct 26 19:48:03 2010
@@ -169,7 +169,7 @@
         ret void
 ; X64: 	t10:
 ; X64: 		pextrw	$4, %xmm0, %eax
-; X64: 		movlhps	%xmm1, %xmm1
+; X64: 		unpcklpd %xmm1, %xmm1
 ; X64: 		pshuflw	$8, %xmm1, %xmm1
 ; X64: 		pinsrw	$2, %eax, %xmm1
 ; X64: 		pextrw	$6, %xmm0, %eax
@@ -260,3 +260,18 @@
 ; X64: 		pinsrw	$1, %eax, %xmm0
 ; X64: 		ret
 }
+
+; rdar://8520311
+define <4 x i32> @t17() nounwind {
+entry:
+; X64: t17:
+; X64:          movddup (%rax), %xmm0
+  %tmp1 = load <4 x float>* undef, align 16
+  %tmp2 = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> <i32 4, i32 1, i32 2, i32 3>
+  %tmp3 = load <4 x float>* undef, align 16
+  %tmp4 = shufflevector <4 x float> %tmp2, <4 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 0, i32 1>
+  %tmp5 = bitcast <4 x float> %tmp3 to <4 x i32>
+  %tmp6 = shufflevector <4 x i32> %tmp5, <4 x i32> undef, <4 x i32> <i32 undef, i32 undef, i32 0, i32 1>
+  %tmp7 = and <4 x i32> %tmp6, <i32 undef, i32 undef, i32 -1, i32 0>
+  ret <4 x i32> %tmp7
+}

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/sse41.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/sse41.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/sse41.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/sse41.ll Tue Oct 26 19:48:03 2010
@@ -224,3 +224,28 @@
 declare i32 @llvm.x86.sse41.ptestc(<4 x float>, <4 x float>) nounwind readnone
 declare i32 @llvm.x86.sse41.ptestnzc(<4 x float>, <4 x float>) nounwind readnone
 
+; This used to compile to insertps $0  + insertps $16.  insertps $0 is always
+; pointless.
+define <2 x float> @buildvector(<2 x float> %A, <2 x float> %B) nounwind  {
+entry:
+  %tmp7 = extractelement <2 x float> %A, i32 0
+  %tmp5 = extractelement <2 x float> %A, i32 1
+  %tmp3 = extractelement <2 x float> %B, i32 0
+  %tmp1 = extractelement <2 x float> %B, i32 1
+  %add.r = fadd float %tmp7, %tmp3
+  %add.i = fadd float %tmp5, %tmp1
+  %tmp11 = insertelement <2 x float> undef, float %add.r, i32 0
+  %tmp9 = insertelement <2 x float> %tmp11, float %add.i, i32 1
+  ret <2 x float> %tmp9
+; X32: buildvector:
+; X32-NOT: insertps $0
+; X32: insertps $16
+; X32-NOT: insertps $0
+; X32: ret
+; X64: buildvector:
+; X64-NOT: insertps $0
+; X64: insertps $16
+; X64-NOT: insertps $0
+; X64: ret
+}
+

Removed: llvm/branches/wendling/eh/test/CodeGen/X86/stack-color-with-reg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/stack-color-with-reg.ll?rev=117424&view=auto
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/stack-color-with-reg.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/stack-color-with-reg.ll (removed)
@@ -1,361 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -relocation-model=pic -disable-fp-elim -color-ss-with-regs -stats -info-output-file - > %t
-; RUN:   grep asm-printer %t | grep 166
-; RUN:   grep stackcoloring %t | grep "stack slot refs replaced with reg refs"  | grep 5
-
-	type { [62 x %struct.Bitvec*] }		; type %0
-	type { i8* }		; type %1
-	type { double }		; type %2
-	%struct..5sPragmaType = type { i8*, i32 }
-	%struct.AggInfo = type { i8, i8, i32, %struct.ExprList*, i32, %struct.AggInfo_col*, i32, i32, i32, %struct.AggInfo_func*, i32, i32 }
-	%struct.AggInfo_col = type { %struct.Table*, i32, i32, i32, i32, %struct.Expr* }
-	%struct.AggInfo_func = type { %struct.Expr*, %struct.FuncDef*, i32, i32 }
-	%struct.AuxData = type { i8*, void (i8*)* }
-	%struct.Bitvec = type { i32, i32, i32, %0 }
-	%struct.BtCursor = type { %struct.Btree*, %struct.BtShared*, %struct.BtCursor*, %struct.BtCursor*, i32 (i8*, i32, i8*, i32, i8*)*, i8*, i32, %struct.MemPage*, i32, %struct.CellInfo, i8, i8, i8*, i64, i32, i8, i32* }
-	%struct.BtLock = type { %struct.Btree*, i32, i8, %struct.BtLock* }
-	%struct.BtShared = type { %struct.Pager*, %struct.sqlite3*, %struct.BtCursor*, %struct.MemPage*, i8, i8, i8, i8, i8, i8, i8, i8, i32, i16, i16, i32, i32, i32, i32, i8, i32, i8*, void (i8*)*, %struct.sqlite3_mutex*, %struct.BusyHandler, i32, %struct.BtShared*, %struct.BtLock*, %struct.Btree* }
-	%struct.Btree = type { %struct.sqlite3*, %struct.BtShared*, i8, i8, i8, i32, %struct.Btree*, %struct.Btree* }
-	%struct.BtreeMutexArray = type { i32, [11 x %struct.Btree*] }
-	%struct.BusyHandler = type { i32 (i8*, i32)*, i8*, i32 }
-	%struct.CellInfo = type { i8*, i64, i32, i32, i16, i16, i16, i16 }
-	%struct.CollSeq = type { i8*, i8, i8, i8*, i32 (i8*, i32, i8*, i32, i8*)*, void (i8*)* }
-	%struct.Column = type { i8*, %struct.Expr*, i8*, i8*, i8, i8, i8, i8 }
-	%struct.Context = type { i64, i32, %struct.Fifo }
-	%struct.CountCtx = type { i64 }
-	%struct.Cursor = type { %struct.BtCursor*, i32, i64, i64, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i64, %struct.Btree*, i32, i8*, i64, i8*, %struct.KeyInfo*, i32, i64, %struct.sqlite3_vtab_cursor*, %struct.sqlite3_module*, i32, i32, i32*, i32*, i8* }
-	%struct.Db = type { i8*, %struct.Btree*, i8, i8, i8*, void (i8*)*, %struct.Schema* }
-	%struct.DbPage = type { %struct.Pager*, i32, %struct.DbPage*, %struct.DbPage*, %struct.PagerLruLink, %struct.DbPage*, i8, i8, i8, i8, i8, i16, %struct.DbPage*, %struct.DbPage*, i8* }
-	%struct.Expr = type { i8, i8, i16, %struct.CollSeq*, %struct.Expr*, %struct.Expr*, %struct.ExprList*, %struct..5sPragmaType, %struct..5sPragmaType, i32, i32, %struct.AggInfo*, i32, i32, %struct.Select*, %struct.Table*, i32 }
-	%struct.ExprList = type { i32, i32, i32, %struct.ExprList_item* }
-	%struct.ExprList_item = type { %struct.Expr*, i8*, i8, i8, i8 }
-	%struct.FKey = type { %struct.Table*, %struct.FKey*, i8*, %struct.FKey*, i32, %struct.sColMap*, i8, i8, i8, i8 }
-	%struct.Fifo = type { i32, %struct.FifoPage*, %struct.FifoPage* }
-	%struct.FifoPage = type { i32, i32, i32, %struct.FifoPage*, [1 x i64] }
-	%struct.FuncDef = type { i16, i8, i8, i8, i8*, %struct.FuncDef*, void (%struct.sqlite3_context*, i32, %struct.Mem**)*, void (%struct.sqlite3_context*, i32, %struct.Mem**)*, void (%struct.sqlite3_context*)*, [1 x i8] }
-	%struct.Hash = type { i8, i8, i32, i32, %struct.HashElem*, %struct._ht* }
-	%struct.HashElem = type { %struct.HashElem*, %struct.HashElem*, i8*, i8*, i32 }
-	%struct.IdList = type { %struct..5sPragmaType*, i32, i32 }
-	%struct.Index = type { i8*, i32, i32*, i32*, %struct.Table*, i32, i8, i8, i8*, %struct.Index*, %struct.Schema*, i8*, i8** }
-	%struct.KeyInfo = type { %struct.sqlite3*, i8, i8, i8, i32, i8*, [1 x %struct.CollSeq*] }
-	%struct.Mem = type { %struct.CountCtx, double, %struct.sqlite3*, i8*, i32, i16, i8, i8, void (i8*)* }
-	%struct.MemPage = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i16, i16, i16, i16, i16, i16, [5 x %struct._OvflCell], %struct.BtShared*, i8*, %struct.DbPage*, i32, %struct.MemPage* }
-	%struct.Module = type { %struct.sqlite3_module*, i8*, i8*, void (i8*)* }
-	%struct.Op = type { i8, i8, i8, i8, i32, i32, i32, %1 }
-	%struct.Pager = type { %struct.sqlite3_vfs*, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, %struct.Bitvec*, %struct.Bitvec*, i8*, i8*, i8*, i8*, %struct.sqlite3_file*, %struct.sqlite3_file*, %struct.sqlite3_file*, %struct.BusyHandler*, %struct.PagerLruList, %struct.DbPage*, %struct.DbPage*, %struct.DbPage*, i64, i64, i64, i64, i64, i32, void (%struct.DbPage*, i32)*, void (%struct.DbPage*, i32)*, i32, %struct.DbPage**, i8*, [16 x i8] }
-	%struct.PagerLruLink = type { %struct.DbPage*, %struct.DbPage* }
-	%struct.PagerLruList = type { %struct.DbPage*, %struct.DbPage*, %struct.DbPage* }
-	%struct.Schema = type { i32, %struct.Hash, %struct.Hash, %struct.Hash, %struct.Hash, %struct.Table*, i8, i8, i16, i32, %struct.sqlite3* }
-	%struct.Select = type { %struct.ExprList*, i8, i8, i8, i8, i8, i8, i8, %struct.SrcList*, %struct.Expr*, %struct.ExprList*, %struct.Expr*, %struct.ExprList*, %struct.Select*, %struct.Select*, %struct.Select*, %struct.Expr*, %struct.Expr*, i32, i32, [3 x i32] }
-	%struct.SrcList = type { i16, i16, [1 x %struct.SrcList_item] }
-	%struct.SrcList_item = type { i8*, i8*, i8*, %struct.Table*, %struct.Select*, i8, i8, i32, %struct.Expr*, %struct.IdList*, i64 }
-	%struct.Table = type { i8*, i32, %struct.Column*, i32, %struct.Index*, i32, %struct.Select*, i32, %struct.Trigger*, %struct.FKey*, i8*, %struct.Expr*, i32, i8, i8, i8, i8, i8, i8, i8, %struct.Module*, %struct.sqlite3_vtab*, i32, i8**, %struct.Schema* }
-	%struct.Trigger = type { i8*, i8*, i8, i8, %struct.Expr*, %struct.IdList*, %struct..5sPragmaType, %struct.Schema*, %struct.Schema*, %struct.TriggerStep*, %struct.Trigger* }
-	%struct.TriggerStep = type { i32, i32, %struct.Trigger*, %struct.Select*, %struct..5sPragmaType, %struct.Expr*, %struct.ExprList*, %struct.IdList*, %struct.TriggerStep*, %struct.TriggerStep* }
-	%struct.Vdbe = type { %struct.sqlite3*, %struct.Vdbe*, %struct.Vdbe*, i32, i32, %struct.Op*, i32, i32, i32*, %struct.Mem**, %struct.Mem*, i32, %struct.Cursor**, i32, %struct.Mem*, i8**, i32, i32, i32, %struct.Mem*, i32, i32, %struct.Fifo, i32, i32, %struct.Context*, i32, i32, i32, i32, i32, [25 x i32], i32, i32, i8**, i8*, %struct.Mem*, i8, i8, i8, i8, i8, i8, i32, i64, i32, %struct.BtreeMutexArray, i32, i8*, i32 }
-	%struct.VdbeFunc = type { %struct.FuncDef*, i32, [1 x %struct.AuxData] }
-	%struct._OvflCell = type { i8*, i16 }
-	%struct._ht = type { i32, %struct.HashElem* }
-	%struct.sColMap = type { i32, i8* }
-	%struct.sqlite3 = type { %struct.sqlite3_vfs*, i32, %struct.Db*, i32, i32, i32, i32, i8, i8, i8, i8, i32, %struct.CollSeq*, i64, i64, i32, i32, i32, %struct.sqlite3_mutex*, %struct.sqlite3InitInfo, i32, i8**, %struct.Vdbe*, i32, void (i8*, i8*)*, i8*, void (i8*, i8*, i64)*, i8*, i8*, i32 (i8*)*, i8*, void (i8*)*, i8*, void (i8*, i32, i8*, i8*, i64)*, void (i8*, %struct.sqlite3*, i32, i8*)*, void (i8*, %struct.sqlite3*, i32, i8*)*, i8*, %struct.Mem*, i8*, i8*, %2, i32 (i8*, i32, i8*, i8*, i8*, i8*)*, i8*, i32 (i8*)*, i8*, i32, %struct.Hash, %struct.Table*, %struct.sqlite3_vtab**, i32, %struct.Hash, %struct.Hash, %struct.BusyHandler, i32, [2 x %struct.Db], i8 }
-	%struct.sqlite3InitInfo = type { i32, i32, i8 }
-	%struct.sqlite3_context = type { %struct.FuncDef*, %struct.VdbeFunc*, %struct.Mem, %struct.Mem*, i32, %struct.CollSeq* }
-	%struct.sqlite3_file = type { %struct.sqlite3_io_methods* }
-	%struct.sqlite3_index_constraint = type { i32, i8, i8, i32 }
-	%struct.sqlite3_index_constraint_usage = type { i32, i8 }
-	%struct.sqlite3_index_info = type { i32, %struct.sqlite3_index_constraint*, i32, %struct.sqlite3_index_constraint_usage*, %struct.sqlite3_index_constraint_usage*, i32, i8*, i32, i32, double }
-	%struct.sqlite3_io_methods = type { i32, i32 (%struct.sqlite3_file*)*, i32 (%struct.sqlite3_file*, i8*, i32, i64)*, i32 (%struct.sqlite3_file*, i8*, i32, i64)*, i32 (%struct.sqlite3_file*, i64)*, i32 (%struct.sqlite3_file*, i32)*, i32 (%struct.sqlite3_file*, i64*)*, i32 (%struct.sqlite3_file*, i32)*, i32 (%struct.sqlite3_file*, i32)*, i32 (%struct.sqlite3_file*)*, i32 (%struct.sqlite3_file*, i32, i8*)*, i32 (%struct.sqlite3_file*)*, i32 (%struct.sqlite3_file*)* }
-	%struct.sqlite3_module = type { i32, i32 (%struct.sqlite3*, i8*, i32, i8**, %struct.sqlite3_vtab**, i8**)*, i32 (%struct.sqlite3*, i8*, i32, i8**, %struct.sqlite3_vtab**, i8**)*, i32 (%struct.sqlite3_vtab*, %struct.sqlite3_index_info*)*, i32 (%struct.sqlite3_vtab*)*, i32 (%struct.sqlite3_vtab*)*, i32 (%struct.sqlite3_vtab*, %struct.sqlite3_vtab_cursor**)*, i32 (%struct.sqlite3_vtab_cursor*)*, i32 (%struct.sqlite3_vtab_cursor*, i32, i8*, i32, %struct.Mem**)*, i32 (%struct.sqlite3_vtab_cursor*)*, i32 (%struct.sqlite3_vtab_cursor*)*, i32 (%struct.sqlite3_vtab_cursor*, %struct.sqlite3_context*, i32)*, i32 (%struct.sqlite3_vtab_cursor*, i64*)*, i32 (%struct.sqlite3_vtab*, i32, %struct.Mem**, i64*)*, i32 (%struct.sqlite3_vtab*)*, i32 (%struct.sqlite3_vtab*)*, i32 (%struct.sqlite3_vtab*)*, i32 (%struct.sqlite3_vtab*)*, i32 (%struct.sqlite3_vtab*, i32, i8*, void (%struct.sqlite3_context*, i32, %struct.Mem**)**, i8**)*, i32 (%struct.sqlite3_vtab*, i8*)* }
-	%struct.sqlite3_mutex = type opaque
-	%struct.sqlite3_vfs = type { i32, i32, i32, %struct.sqlite3_vfs*, i8*, i8*, i32 (%struct.sqlite3_vfs*, i8*, %struct.sqlite3_file*, i32, i32*)*, i32 (%struct.sqlite3_vfs*, i8*, i32)*, i32 (%struct.sqlite3_vfs*, i8*, i32)*, i32 (%struct.sqlite3_vfs*, i32, i8*)*, i32 (%struct.sqlite3_vfs*, i8*, i32, i8*)*, i8* (%struct.sqlite3_vfs*, i8*)*, void (%struct.sqlite3_vfs*, i32, i8*)*, i8* (%struct.sqlite3_vfs*, i8*, i8*)*, void (%struct.sqlite3_vfs*, i8*)*, i32 (%struct.sqlite3_vfs*, i32, i8*)*, i32 (%struct.sqlite3_vfs*, i32)*, i32 (%struct.sqlite3_vfs*, double*)* }
-	%struct.sqlite3_vtab = type { %struct.sqlite3_module*, i32, i8* }
-	%struct.sqlite3_vtab_cursor = type { %struct.sqlite3_vtab* }
- at llvm.used = appending global [1 x i8*] [i8* bitcast (void (%struct.MemPage*, i32, i32)* @dropCell to i8*)], section "llvm.metadata"		; <[1 x i8*]*> [#uses=0]
-
-define fastcc void @dropCell(%struct.MemPage* nocapture %pPage, i32 %idx, i32 %sz) nounwind ssp {
-entry:
-	%0 = getelementptr %struct.MemPage* %pPage, i64 0, i32 18		; <i8**> [#uses=1]
-	%1 = load i8** %0, align 8		; <i8*> [#uses=34]
-	%2 = getelementptr %struct.MemPage* %pPage, i64 0, i32 12		; <i16*> [#uses=1]
-	%3 = load i16* %2, align 2		; <i16> [#uses=1]
-	%4 = zext i16 %3 to i32		; <i32> [#uses=2]
-	%5 = shl i32 %idx, 1		; <i32> [#uses=2]
-	%6 = add i32 %4, %5		; <i32> [#uses=1]
-	%7 = sext i32 %6 to i64		; <i64> [#uses=2]
-	%8 = getelementptr i8* %1, i64 %7		; <i8*> [#uses=1]
-	%9 = load i8* %8, align 1		; <i8> [#uses=2]
-	%10 = zext i8 %9 to i32		; <i32> [#uses=1]
-	%11 = shl i32 %10, 8		; <i32> [#uses=1]
-	%.sum3 = add i64 %7, 1		; <i64> [#uses=1]
-	%12 = getelementptr i8* %1, i64 %.sum3		; <i8*> [#uses=1]
-	%13 = load i8* %12, align 1		; <i8> [#uses=2]
-	%14 = zext i8 %13 to i32		; <i32> [#uses=1]
-	%15 = or i32 %11, %14		; <i32> [#uses=3]
-	%16 = icmp slt i32 %sz, 4		; <i1> [#uses=1]
-	%size_addr.0.i = select i1 %16, i32 4, i32 %sz		; <i32> [#uses=3]
-	%17 = getelementptr %struct.MemPage* %pPage, i64 0, i32 8		; <i8*> [#uses=5]
-	%18 = load i8* %17, align 8		; <i8> [#uses=1]
-	%19 = zext i8 %18 to i32		; <i32> [#uses=4]
-	%20 = add i32 %19, 1		; <i32> [#uses=2]
-	br label %bb3.i
-
-bb3.i:		; preds = %bb3.i, %entry
-	%addr.0.i = phi i32 [ %20, %entry ], [ %29, %bb3.i ]		; <i32> [#uses=1]
-	%21 = sext i32 %addr.0.i to i64		; <i64> [#uses=2]
-	%22 = getelementptr i8* %1, i64 %21		; <i8*> [#uses=2]
-	%23 = load i8* %22, align 1		; <i8> [#uses=2]
-	%24 = zext i8 %23 to i32		; <i32> [#uses=1]
-	%25 = shl i32 %24, 8		; <i32> [#uses=1]
-	%.sum34.i = add i64 %21, 1		; <i64> [#uses=1]
-	%26 = getelementptr i8* %1, i64 %.sum34.i		; <i8*> [#uses=2]
-	%27 = load i8* %26, align 1		; <i8> [#uses=2]
-	%28 = zext i8 %27 to i32		; <i32> [#uses=1]
-	%29 = or i32 %25, %28		; <i32> [#uses=3]
-	%.not.i = icmp uge i32 %29, %15		; <i1> [#uses=1]
-	%30 = icmp eq i32 %29, 0		; <i1> [#uses=1]
-	%or.cond.i = or i1 %30, %.not.i		; <i1> [#uses=1]
-	br i1 %or.cond.i, label %bb5.i, label %bb3.i
-
-bb5.i:		; preds = %bb3.i
-	store i8 %9, i8* %22, align 1
-	store i8 %13, i8* %26, align 1
-	%31 = zext i32 %15 to i64		; <i64> [#uses=2]
-	%32 = getelementptr i8* %1, i64 %31		; <i8*> [#uses=1]
-	store i8 %23, i8* %32, align 1
-	%.sum32.i = add i64 %31, 1		; <i64> [#uses=1]
-	%33 = getelementptr i8* %1, i64 %.sum32.i		; <i8*> [#uses=1]
-	store i8 %27, i8* %33, align 1
-	%34 = add i32 %15, 2		; <i32> [#uses=1]
-	%35 = zext i32 %34 to i64		; <i64> [#uses=2]
-	%36 = getelementptr i8* %1, i64 %35		; <i8*> [#uses=1]
-	%37 = lshr i32 %size_addr.0.i, 8		; <i32> [#uses=1]
-	%38 = trunc i32 %37 to i8		; <i8> [#uses=1]
-	store i8 %38, i8* %36, align 1
-	%39 = trunc i32 %size_addr.0.i to i8		; <i8> [#uses=1]
-	%.sum31.i = add i64 %35, 1		; <i64> [#uses=1]
-	%40 = getelementptr i8* %1, i64 %.sum31.i		; <i8*> [#uses=1]
-	store i8 %39, i8* %40, align 1
-	%41 = getelementptr %struct.MemPage* %pPage, i64 0, i32 14		; <i16*> [#uses=4]
-	%42 = load i16* %41, align 2		; <i16> [#uses=1]
-	%43 = trunc i32 %size_addr.0.i to i16		; <i16> [#uses=1]
-	%44 = add i16 %42, %43		; <i16> [#uses=1]
-	store i16 %44, i16* %41, align 2
-	%45 = load i8* %17, align 8		; <i8> [#uses=1]
-	%46 = zext i8 %45 to i32		; <i32> [#uses=1]
-	%47 = add i32 %46, 1		; <i32> [#uses=1]
-	br label %bb11.outer.i
-
-bb11.outer.i:		; preds = %bb6.i, %bb5.i
-	%addr.1.ph.i = phi i32 [ %47, %bb5.i ], [ %111, %bb6.i ]		; <i32> [#uses=1]
-	%48 = sext i32 %addr.1.ph.i to i64		; <i64> [#uses=2]
-	%49 = getelementptr i8* %1, i64 %48		; <i8*> [#uses=1]
-	%.sum30.i = add i64 %48, 1		; <i64> [#uses=1]
-	%50 = getelementptr i8* %1, i64 %.sum30.i		; <i8*> [#uses=1]
-	br label %bb11.i
-
-bb6.i:		; preds = %bb11.i
-	%51 = zext i32 %111 to i64		; <i64> [#uses=2]
-	%52 = getelementptr i8* %1, i64 %51		; <i8*> [#uses=2]
-	%53 = load i8* %52, align 1		; <i8> [#uses=1]
-	%54 = zext i8 %53 to i32		; <i32> [#uses=1]
-	%55 = shl i32 %54, 8		; <i32> [#uses=1]
-	%.sum24.i = add i64 %51, 1		; <i64> [#uses=1]
-	%56 = getelementptr i8* %1, i64 %.sum24.i		; <i8*> [#uses=2]
-	%57 = load i8* %56, align 1		; <i8> [#uses=3]
-	%58 = zext i8 %57 to i32		; <i32> [#uses=1]
-	%59 = or i32 %55, %58		; <i32> [#uses=5]
-	%60 = add i32 %111, 2		; <i32> [#uses=1]
-	%61 = zext i32 %60 to i64		; <i64> [#uses=2]
-	%62 = getelementptr i8* %1, i64 %61		; <i8*> [#uses=2]
-	%63 = load i8* %62, align 1		; <i8> [#uses=1]
-	%64 = zext i8 %63 to i32		; <i32> [#uses=1]
-	%65 = shl i32 %64, 8		; <i32> [#uses=1]
-	%.sum23.i = add i64 %61, 1		; <i64> [#uses=1]
-	%66 = getelementptr i8* %1, i64 %.sum23.i		; <i8*> [#uses=2]
-	%67 = load i8* %66, align 1		; <i8> [#uses=2]
-	%68 = zext i8 %67 to i32		; <i32> [#uses=1]
-	%69 = or i32 %65, %68		; <i32> [#uses=1]
-	%70 = add i32 %111, 3		; <i32> [#uses=1]
-	%71 = add i32 %70, %69		; <i32> [#uses=1]
-	%72 = icmp sge i32 %71, %59		; <i1> [#uses=1]
-	%73 = icmp ne i32 %59, 0		; <i1> [#uses=1]
-	%74 = and i1 %72, %73		; <i1> [#uses=1]
-	br i1 %74, label %bb9.i, label %bb11.outer.i
-
-bb9.i:		; preds = %bb6.i
-	%75 = load i8* %17, align 8		; <i8> [#uses=1]
-	%76 = zext i8 %75 to i32		; <i32> [#uses=1]
-	%77 = add i32 %76, 7		; <i32> [#uses=1]
-	%78 = zext i32 %77 to i64		; <i64> [#uses=1]
-	%79 = getelementptr i8* %1, i64 %78		; <i8*> [#uses=2]
-	%80 = load i8* %79, align 1		; <i8> [#uses=1]
-	%81 = sub i8 %109, %57		; <i8> [#uses=1]
-	%82 = add i8 %81, %67		; <i8> [#uses=1]
-	%83 = add i8 %82, %80		; <i8> [#uses=1]
-	store i8 %83, i8* %79, align 1
-	%84 = zext i32 %59 to i64		; <i64> [#uses=2]
-	%85 = getelementptr i8* %1, i64 %84		; <i8*> [#uses=1]
-	%86 = load i8* %85, align 1		; <i8> [#uses=1]
-	store i8 %86, i8* %52, align 1
-	%.sum22.i = add i64 %84, 1		; <i64> [#uses=1]
-	%87 = getelementptr i8* %1, i64 %.sum22.i		; <i8*> [#uses=1]
-	%88 = load i8* %87, align 1		; <i8> [#uses=1]
-	store i8 %88, i8* %56, align 1
-	%89 = add i32 %59, 2		; <i32> [#uses=1]
-	%90 = zext i32 %89 to i64		; <i64> [#uses=2]
-	%91 = getelementptr i8* %1, i64 %90		; <i8*> [#uses=1]
-	%92 = load i8* %91, align 1		; <i8> [#uses=1]
-	%93 = zext i8 %92 to i32		; <i32> [#uses=1]
-	%94 = shl i32 %93, 8		; <i32> [#uses=1]
-	%.sum20.i = add i64 %90, 1		; <i64> [#uses=1]
-	%95 = getelementptr i8* %1, i64 %.sum20.i		; <i8*> [#uses=2]
-	%96 = load i8* %95, align 1		; <i8> [#uses=1]
-	%97 = zext i8 %96 to i32		; <i32> [#uses=1]
-	%98 = or i32 %94, %97		; <i32> [#uses=1]
-	%99 = sub i32 %59, %111		; <i32> [#uses=1]
-	%100 = add i32 %99, %98		; <i32> [#uses=1]
-	%101 = lshr i32 %100, 8		; <i32> [#uses=1]
-	%102 = trunc i32 %101 to i8		; <i8> [#uses=1]
-	store i8 %102, i8* %62, align 1
-	%103 = load i8* %95, align 1		; <i8> [#uses=1]
-	%104 = sub i8 %57, %109		; <i8> [#uses=1]
-	%105 = add i8 %104, %103		; <i8> [#uses=1]
-	store i8 %105, i8* %66, align 1
-	br label %bb11.i
-
-bb11.i:		; preds = %bb9.i, %bb11.outer.i
-	%106 = load i8* %49, align 1		; <i8> [#uses=1]
-	%107 = zext i8 %106 to i32		; <i32> [#uses=1]
-	%108 = shl i32 %107, 8		; <i32> [#uses=1]
-	%109 = load i8* %50, align 1		; <i8> [#uses=3]
-	%110 = zext i8 %109 to i32		; <i32> [#uses=1]
-	%111 = or i32 %108, %110		; <i32> [#uses=6]
-	%112 = icmp eq i32 %111, 0		; <i1> [#uses=1]
-	br i1 %112, label %bb12.i, label %bb6.i
-
-bb12.i:		; preds = %bb11.i
-	%113 = zext i32 %20 to i64		; <i64> [#uses=2]
-	%114 = getelementptr i8* %1, i64 %113		; <i8*> [#uses=2]
-	%115 = load i8* %114, align 1		; <i8> [#uses=2]
-	%116 = add i32 %19, 5		; <i32> [#uses=1]
-	%117 = zext i32 %116 to i64		; <i64> [#uses=2]
-	%118 = getelementptr i8* %1, i64 %117		; <i8*> [#uses=3]
-	%119 = load i8* %118, align 1		; <i8> [#uses=1]
-	%120 = icmp eq i8 %115, %119		; <i1> [#uses=1]
-	br i1 %120, label %bb13.i, label %bb1.preheader
-
-bb13.i:		; preds = %bb12.i
-	%121 = add i32 %19, 2		; <i32> [#uses=1]
-	%122 = zext i32 %121 to i64		; <i64> [#uses=1]
-	%123 = getelementptr i8* %1, i64 %122		; <i8*> [#uses=1]
-	%124 = load i8* %123, align 1		; <i8> [#uses=1]
-	%125 = add i32 %19, 6		; <i32> [#uses=1]
-	%126 = zext i32 %125 to i64		; <i64> [#uses=1]
-	%127 = getelementptr i8* %1, i64 %126		; <i8*> [#uses=1]
-	%128 = load i8* %127, align 1		; <i8> [#uses=1]
-	%129 = icmp eq i8 %124, %128		; <i1> [#uses=1]
-	br i1 %129, label %bb14.i, label %bb1.preheader
-
-bb14.i:		; preds = %bb13.i
-	%130 = zext i8 %115 to i32		; <i32> [#uses=1]
-	%131 = shl i32 %130, 8		; <i32> [#uses=1]
-	%.sum29.i = add i64 %113, 1		; <i64> [#uses=1]
-	%132 = getelementptr i8* %1, i64 %.sum29.i		; <i8*> [#uses=1]
-	%133 = load i8* %132, align 1		; <i8> [#uses=1]
-	%134 = zext i8 %133 to i32		; <i32> [#uses=1]
-	%135 = or i32 %134, %131		; <i32> [#uses=2]
-	%136 = zext i32 %135 to i64		; <i64> [#uses=1]
-	%137 = getelementptr i8* %1, i64 %136		; <i8*> [#uses=1]
-	%138 = bitcast i8* %137 to i16*		; <i16*> [#uses=1]
-	%139 = bitcast i8* %114 to i16*		; <i16*> [#uses=1]
-	%tmp.i = load i16* %138, align 1		; <i16> [#uses=1]
-	store i16 %tmp.i, i16* %139, align 1
-	%140 = load i8* %118, align 1		; <i8> [#uses=1]
-	%141 = zext i8 %140 to i32		; <i32> [#uses=1]
-	%142 = shl i32 %141, 8		; <i32> [#uses=1]
-	%.sum28.i = add i64 %117, 1		; <i64> [#uses=1]
-	%143 = getelementptr i8* %1, i64 %.sum28.i		; <i8*> [#uses=2]
-	%144 = load i8* %143, align 1		; <i8> [#uses=2]
-	%145 = zext i8 %144 to i32		; <i32> [#uses=1]
-	%146 = or i32 %142, %145		; <i32> [#uses=1]
-	%147 = add i32 %135, 2		; <i32> [#uses=1]
-	%148 = zext i32 %147 to i64		; <i64> [#uses=2]
-	%149 = getelementptr i8* %1, i64 %148		; <i8*> [#uses=1]
-	%150 = load i8* %149, align 1		; <i8> [#uses=1]
-	%151 = zext i8 %150 to i32		; <i32> [#uses=1]
-	%152 = shl i32 %151, 8		; <i32> [#uses=1]
-	%.sum27.i = add i64 %148, 1		; <i64> [#uses=1]
-	%153 = getelementptr i8* %1, i64 %.sum27.i		; <i8*> [#uses=2]
-	%154 = load i8* %153, align 1		; <i8> [#uses=1]
-	%155 = zext i8 %154 to i32		; <i32> [#uses=1]
-	%156 = or i32 %152, %155		; <i32> [#uses=1]
-	%157 = add i32 %156, %146		; <i32> [#uses=1]
-	%158 = lshr i32 %157, 8		; <i32> [#uses=1]
-	%159 = trunc i32 %158 to i8		; <i8> [#uses=1]
-	store i8 %159, i8* %118, align 1
-	%160 = load i8* %153, align 1		; <i8> [#uses=1]
-	%161 = add i8 %160, %144		; <i8> [#uses=1]
-	store i8 %161, i8* %143, align 1
-	br label %bb1.preheader
-
-bb1.preheader:		; preds = %bb14.i, %bb13.i, %bb12.i
-	%i.08 = add i32 %idx, 1		; <i32> [#uses=2]
-	%162 = getelementptr %struct.MemPage* %pPage, i64 0, i32 15		; <i16*> [#uses=4]
-	%163 = load i16* %162, align 4		; <i16> [#uses=2]
-	%164 = zext i16 %163 to i32		; <i32> [#uses=1]
-	%165 = icmp sgt i32 %164, %i.08		; <i1> [#uses=1]
-	br i1 %165, label %bb, label %bb2
-
-bb:		; preds = %bb, %bb1.preheader
-	%indvar = phi i64 [ 0, %bb1.preheader ], [ %indvar.next, %bb ]		; <i64> [#uses=3]
-	%tmp16 = add i32 %5, %4		; <i32> [#uses=1]
-	%tmp.17 = sext i32 %tmp16 to i64		; <i64> [#uses=1]
-	%tmp19 = shl i64 %indvar, 1		; <i64> [#uses=1]
-	%ctg2.sum = add i64 %tmp.17, %tmp19		; <i64> [#uses=4]
-	%ctg229 = getelementptr i8* %1, i64 %ctg2.sum		; <i8*> [#uses=1]
-	%ctg229.sum31 = add i64 %ctg2.sum, 2		; <i64> [#uses=1]
-	%166 = getelementptr i8* %1, i64 %ctg229.sum31		; <i8*> [#uses=1]
-	%167 = load i8* %166, align 1		; <i8> [#uses=1]
-	store i8 %167, i8* %ctg229
-	%ctg229.sum30 = add i64 %ctg2.sum, 3		; <i64> [#uses=1]
-	%168 = getelementptr i8* %1, i64 %ctg229.sum30		; <i8*> [#uses=1]
-	%169 = load i8* %168, align 1		; <i8> [#uses=1]
-	%ctg229.sum = add i64 %ctg2.sum, 1		; <i64> [#uses=1]
-	%170 = getelementptr i8* %1, i64 %ctg229.sum		; <i8*> [#uses=1]
-	store i8 %169, i8* %170, align 1
-	%indvar15 = trunc i64 %indvar to i32		; <i32> [#uses=1]
-	%i.09 = add i32 %indvar15, %i.08		; <i32> [#uses=1]
-	%i.0 = add i32 %i.09, 1		; <i32> [#uses=1]
-	%171 = load i16* %162, align 4		; <i16> [#uses=2]
-	%172 = zext i16 %171 to i32		; <i32> [#uses=1]
-	%173 = icmp sgt i32 %172, %i.0		; <i1> [#uses=1]
-	%indvar.next = add i64 %indvar, 1		; <i64> [#uses=1]
-	br i1 %173, label %bb, label %bb2
-
-bb2:		; preds = %bb, %bb1.preheader
-	%174 = phi i16 [ %163, %bb1.preheader ], [ %171, %bb ]		; <i16> [#uses=1]
-	%175 = add i16 %174, -1		; <i16> [#uses=2]
-	store i16 %175, i16* %162, align 4
-	%176 = load i8* %17, align 8		; <i8> [#uses=1]
-	%177 = zext i8 %176 to i32		; <i32> [#uses=1]
-	%178 = add i32 %177, 3		; <i32> [#uses=1]
-	%179 = zext i32 %178 to i64		; <i64> [#uses=1]
-	%180 = getelementptr i8* %1, i64 %179		; <i8*> [#uses=1]
-	%181 = lshr i16 %175, 8		; <i16> [#uses=1]
-	%182 = trunc i16 %181 to i8		; <i8> [#uses=1]
-	store i8 %182, i8* %180, align 1
-	%183 = load i8* %17, align 8		; <i8> [#uses=1]
-	%184 = zext i8 %183 to i32		; <i32> [#uses=1]
-	%185 = add i32 %184, 3		; <i32> [#uses=1]
-	%186 = zext i32 %185 to i64		; <i64> [#uses=1]
-	%187 = load i16* %162, align 4		; <i16> [#uses=1]
-	%188 = trunc i16 %187 to i8		; <i8> [#uses=1]
-	%.sum = add i64 %186, 1		; <i64> [#uses=1]
-	%189 = getelementptr i8* %1, i64 %.sum		; <i8*> [#uses=1]
-	store i8 %188, i8* %189, align 1
-	%190 = load i16* %41, align 2		; <i16> [#uses=1]
-	%191 = add i16 %190, 2		; <i16> [#uses=1]
-	store i16 %191, i16* %41, align 2
-	%192 = getelementptr %struct.MemPage* %pPage, i64 0, i32 1		; <i8*> [#uses=1]
-	store i8 1, i8* %192, align 1
-	ret void
-}

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/stdcall.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/stdcall.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/stdcall.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/stdcall.ll Tue Oct 26 19:48:03 2010
@@ -2,7 +2,7 @@
 ; PR5851
 
 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f80:128:128-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32"
-target triple = "i386-mingw32"
+target triple = "i386-pc-mingw32"
 
 %0 = type { void (...)* }
 

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/store-narrow.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/store-narrow.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/store-narrow.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/store-narrow.ll Tue Oct 26 19:48:03 2010
@@ -1,6 +1,6 @@
 ; rdar://7860110
-; RUN: llc < %s | FileCheck %s -check-prefix=X64
-; RUN: llc -march=x86 < %s | FileCheck %s -check-prefix=X32
+; RUN: llc -asm-verbose=false < %s | FileCheck %s -check-prefix=X64
+; RUN: llc -march=x86 -asm-verbose=false < %s | FileCheck %s -check-prefix=X32
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
 target triple = "x86_64-apple-darwin10.2"
 
@@ -125,3 +125,44 @@
 ; X32: movb	%cl, 5(%{{.*}})
 }
 
+; PR7833
+
+ at g_16 = internal global i32 -1
+
+; X64: test8:
+; X64-NEXT: movl _g_16(%rip), %eax
+; X64-NEXT: movl $0, _g_16(%rip)
+; X64-NEXT: orl  $1, %eax
+; X64-NEXT: movl %eax, _g_16(%rip)
+; X64-NEXT: ret
+define void @test8() nounwind {
+  %tmp = load i32* @g_16
+  store i32 0, i32* @g_16
+  %or = or i32 %tmp, 1
+  store i32 %or, i32* @g_16
+  ret void
+}
+
+; X64: test9:
+; X64-NEXT: orb $1, _g_16(%rip)
+; X64-NEXT: ret
+define void @test9() nounwind {
+  %tmp = load i32* @g_16
+  %or = or i32 %tmp, 1
+  store i32 %or, i32* @g_16
+  ret void
+}
+
+; rdar://8494845 + PR8244
+; X64: test10:
+; X64-NEXT: movsbl	(%rdi), %eax
+; X64-NEXT: shrl	$8, %eax
+; X64-NEXT: ret
+define i8 @test10(i8* %P) nounwind ssp {
+entry:
+  %tmp = load i8* %P, align 1
+  %conv = sext i8 %tmp to i32
+  %shr3 = lshr i32 %conv, 8
+  %conv2 = trunc i32 %shr3 to i8
+  ret i8 %conv2
+}

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/store_op_load_fold2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/store_op_load_fold2.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/store_op_load_fold2.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/store_op_load_fold2.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 -x86-asm-syntax=intel | FileCheck %s
+; RUN: llc < %s -mtriple=i686-linux -x86-asm-syntax=intel | FileCheck %s
 
 target datalayout = "e-p:32:32"
         %struct.Macroblock = type { i32, i32, i32, i32, i32, [8 x i32], %struct.Macroblock*, %struct.Macroblock*, i32, [2 x [4 x [4 x [2 x i32]]]], [16 x i8], [16 x i8], i32, i64, [4 x i32], [4 x i32], i64, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i16, double, i32, i32, i32, i32, i32, i32, i32, i32, i32 }

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/tail-opts.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/tail-opts.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/tail-opts.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/tail-opts.ll Tue Oct 26 19:48:03 2010
@@ -62,11 +62,11 @@
 
 ; CHECK: tail_duplicate_me:
 ; CHECK:      movl $0, GHJK(%rip)
-; CHECK-NEXT: jmpq *%rbx
+; CHECK-NEXT: jmpq *%r
 ; CHECK:      movl $0, GHJK(%rip)
-; CHECK-NEXT: jmpq *%rbx
+; CHECK-NEXT: jmpq *%r
 ; CHECK:      movl $0, GHJK(%rip)
-; CHECK-NEXT: jmpq *%rbx
+; CHECK-NEXT: jmpq *%r
 
 define void @tail_duplicate_me() nounwind {
 entry:
@@ -153,19 +153,16 @@
 ; an unconditional jump to complete a two-way conditional branch.
 
 ; CHECK: c_expand_expr_stmt:
-; CHECK:        jmp .LBB3_7
-; CHECK-NEXT: .LBB3_12:
+; CHECK:        jmp .LBB3_11
+; CHECK-NEXT: .LBB3_9:
 ; CHECK-NEXT:   movq 8(%rax), %rax
+; CHECK-NEXT:   xorb %dl, %dl
 ; CHECK-NEXT:   movb 16(%rax), %al
 ; CHECK-NEXT:   cmpb $16, %al
-; CHECK-NEXT:   je .LBB3_6
+; CHECK-NEXT:   je .LBB3_11
 ; CHECK-NEXT:   cmpb $23, %al
-; CHECK-NEXT:   je .LBB3_6
-; CHECK-NEXT:   jmp .LBB3_15
-; CHECK-NEXT: .LBB3_14:
-; CHECK-NEXT:   cmpb $23, %bl
-; CHECK-NEXT:   jne .LBB3_15
-; CHECK-NEXT: .LBB3_15:
+; CHECK-NEXT:   jne .LBB3_14
+; CHECK-NEXT: .LBB3_11:
 
 %0 = type { %struct.rtx_def* }
 %struct.lang_decl = type opaque

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/tailcall-fastisel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/tailcall-fastisel.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/tailcall-fastisel.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/tailcall-fastisel.ll Tue Oct 26 19:48:03 2010
@@ -1,8 +1,6 @@
-; RUN: llc < %s -march=x86-64 -tailcallopt -fast-isel | grep TAILCALL
+; RUN: llc < %s -march=x86-64 -tailcallopt -fast-isel | not grep TAILCALL
 
-; Fast-isel shouldn't attempt to handle this tail call, and it should
-; cleanly terminate instruction selection in the block after it's
-; done to avoid emitting invalid MachineInstrs.
+; Fast-isel shouldn't attempt to cope with tail calls.
 
 %0 = type { i64, i32, i8* }
 
@@ -11,3 +9,11 @@
   %tmp20 = tail call fastcc i8* @"visit_array_aux<`Reference>"(%0 %arg, i32 undef) ; <i8*> [#uses=1]
   ret i8* %tmp20
 }
+
+define i32 @foo() nounwind {
+entry:
+ %0 = tail call i32 (...)* @bar() nounwind       ; <i32> [#uses=1]
+ ret i32 %0
+}
+
+declare i32 @bar(...) nounwind

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/tailcall-stackalign.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/tailcall-stackalign.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/tailcall-stackalign.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/tailcall-stackalign.ll Tue Oct 26 19:48:03 2010
@@ -19,5 +19,5 @@
  ret i32 0
 }
 
-; CHECK: call tailcaller
+; CHECK: calll tailcaller
 ; CHECK-NEXT: subl $12

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/tailcallfp2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/tailcallfp2.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/tailcallfp2.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/tailcallfp2.ll Tue Oct 26 19:48:03 2010
@@ -1,8 +1,9 @@
-; RUN: llc < %s -march=x86 -tailcallopt | grep {jmp} | grep {\\*%edx}
+; RUN: llc < %s -march=x86 -tailcallopt | FileCheck %s
 
 declare i32 @putchar(i32)
 
 define fastcc i32 @checktail(i32 %x, i32* %f, i32 %g) nounwind {
+; CHECK: checktail:
         %tmp1 = icmp sgt i32 %x, 0
         br i1 %tmp1, label %if-then, label %if-else
 
@@ -10,6 +11,7 @@
         %fun_ptr = bitcast i32* %f to i32(i32, i32*, i32)* 
         %arg1    = add i32 %x, -1
         call i32 @putchar(i32 90)       
+; CHECK: jmpl *%e{{.*}}
         %res = tail call fastcc i32 %fun_ptr( i32 %arg1, i32 * %f, i32 %g)
         ret i32 %res
 

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/tls9.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/tls9.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/tls9.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/tls9.ll Tue Oct 26 19:48:03 2010
@@ -5,7 +5,7 @@
 
 @i = external hidden thread_local global i32
 
-define i32 @f() {
+define i32 @f() nounwind {
 entry:
 	%tmp1 = load i32* @i
 	ret i32 %tmp1

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/twoaddr-coalesce.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/twoaddr-coalesce.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/twoaddr-coalesce.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/twoaddr-coalesce.ll Tue Oct 26 19:48:03 2010
@@ -3,7 +3,7 @@
 
 @"\01LC" = internal constant [4 x i8] c"%d\0A\00"		; <[4 x i8]*> [#uses=1]
 
-define i32 @main() nounwind {
+define i32 @foo() nounwind {
 bb1.thread:
 	br label %bb1
 

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/unaligned-load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/unaligned-load.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/unaligned-load.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/unaligned-load.ll Tue Oct 26 19:48:03 2010
@@ -13,7 +13,7 @@
 bb:
   %String2Loc9 = getelementptr inbounds [31 x i8]* %String2Loc, i64 0, i64 0
   call void @llvm.memcpy.i64(i8* %String2Loc9, i8* getelementptr inbounds ([31 x i8]* @.str3, i64 0, i64 0), i64 31, i32 1)
-; I386: call {{_?}}memcpy
+; I386: calll {{_?}}memcpy
 
 ; CORE2: movabsq
 ; CORE2: movabsq

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/v2f32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/v2f32.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/v2f32.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/v2f32.ll Tue Oct 26 19:48:03 2010
@@ -10,15 +10,16 @@
   store float %c, float* %P2
   ret void
 ; X64: test1:
-; X64-NEXT: addss	%xmm1, %xmm0
-; X64-NEXT: movss	%xmm0, (%rdi)
+; X64-NEXT: pshufd	$1, %xmm0, %xmm1
+; X64-NEXT: addss	%xmm0, %xmm1
+; X64-NEXT: movss	%xmm1, (%rdi)
 ; X64-NEXT: ret
 
 ; X32: test1:
-; X32-NEXT: movss	4(%esp), %xmm0
-; X32-NEXT: addss	8(%esp), %xmm0
-; X32-NEXT: movl	12(%esp), %eax
-; X32-NEXT: movss	%xmm0, (%eax)
+; X32-NEXT: pshufd	$1, %xmm0, %xmm1
+; X32-NEXT: addss	%xmm0, %xmm1
+; X32-NEXT: movl	4(%esp), %eax
+; X32-NEXT: movss	%xmm1, (%eax)
 ; X32-NEXT: ret
 }
 
@@ -28,12 +29,42 @@
   ret <2 x float> %Z
   
 ; X64: test2:
-; X64-NEXT: insertps $0
-; X64-NEXT: insertps $16
-; X64-NEXT: insertps $0
-; X64-NEXT: insertps $16
-; X64-NEXT: addps
-; X64-NEXT: movaps
-; X64-NEXT: pshufd
+; X64-NEXT: addps	%xmm1, %xmm0
 ; X64-NEXT: ret
 }
+
+
+define <2 x float> @test3(<4 x float> %A) nounwind {
+	%B = shufflevector <4 x float> %A, <4 x float> undef, <2 x i32> <i32 0, i32 1>
+	%C = fadd <2 x float> %B, %B
+	ret <2 x float> %C
+; CHECK: test3:
+; CHECK-NEXT: 	addps	%xmm0, %xmm0
+; CHECK-NEXT: 	ret
+}
+
+define <2 x float> @test4(<2 x float> %A) nounwind {
+	%C = fadd <2 x float> %A, %A
+	ret <2 x float> %C
+; CHECK: test4:
+; CHECK-NEXT: 	addps	%xmm0, %xmm0
+; CHECK-NEXT: 	ret
+}
+
+define <4 x float> @test5(<4 x float> %A) nounwind {
+	%B = shufflevector <4 x float> %A, <4 x float> undef, <2 x i32> <i32 0, i32 1>
+	%C = fadd <2 x float> %B, %B
+        br label %BB
+        
+BB:
+        %D = fadd <2 x float> %C, %C
+	%E = shufflevector <2 x float> %D, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
+	ret <4 x float> %E
+        
+; CHECK: _test5:
+; CHECK-NEXT: 	addps	%xmm0, %xmm0
+; CHECK-NEXT: 	addps	%xmm0, %xmm0
+; CHECK-NEXT: 	ret
+}
+
+

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/vec_cast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/vec_cast.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/vec_cast.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/vec_cast.ll Tue Oct 26 19:48:03 2010
@@ -1,15 +1,16 @@
-; RUN: llc < %s -march=x86-64 
-; RUN: llc < %s -march=x86-64 -disable-mmx
+; RUN: llc < %s -march=x86-64 -mcpu=core2
+; RUN: llc < %s -march=x86-64 -mcpu=core2 -disable-mmx
+
 
 define <8 x i32> @a(<8 x i16> %a) nounwind {
   %c = sext <8 x i16> %a to <8 x i32>
   ret <8 x i32> %c
 }
 
-define <3 x i32> @b(<3 x i16> %a) nounwind {
-  %c = sext <3 x i16> %a to <3 x i32>
-  ret <3 x i32> %c
-}
+;define <3 x i32> @b(<3 x i16> %a) nounwind {
+;  %c = sext <3 x i16> %a to <3 x i32>
+;  ret <3 x i32> %c
+;}
 
 define <1 x i32> @c(<1 x i16> %a) nounwind {
   %c = sext <1 x i16> %a to <1 x i32>
@@ -21,10 +22,10 @@
   ret <8 x i32> %c
 }
 
-define <3 x i32> @e(<3 x i16> %a) nounwind {
-  %c = zext <3 x i16> %a to <3 x i32>
-  ret <3 x i32> %c
-}
+;define <3 x i32> @e(<3 x i16> %a) nounwind {
+;  %c = zext <3 x i16> %a to <3 x i32>
+;  ret <3 x i32> %c
+;}
 
 define <1 x i32> @f(<1 x i16> %a) nounwind {
   %c = zext <1 x i16> %a to <1 x i32>

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/vec_insert-5.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/vec_insert-5.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/vec_insert-5.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/vec_insert-5.ll Tue Oct 26 19:48:03 2010
@@ -1,15 +1,16 @@
 ; RUN: llc < %s -march=x86 -mattr=+sse2 > %t
-; RUN: grep psllq %t | grep 32
+; RUN: grep shll %t | grep 12
 ; RUN: grep pslldq %t | grep 12
 ; RUN: grep psrldq %t | grep 8
 ; RUN: grep psrldq %t | grep 12
+; There are no MMX operations in @t1
 
-define void  @t1(i32 %a, <1 x i64>* %P) nounwind {
+define void  @t1(i32 %a, x86_mmx* %P) nounwind {
        %tmp12 = shl i32 %a, 12
        %tmp21 = insertelement <2 x i32> undef, i32 %tmp12, i32 1
        %tmp22 = insertelement <2 x i32> %tmp21, i32 0, i32 0
-       %tmp23 = bitcast <2 x i32> %tmp22 to <1 x i64>
-       store <1 x i64> %tmp23, <1 x i64>* %P
+       %tmp23 = bitcast <2 x i32> %tmp22 to x86_mmx
+       store x86_mmx %tmp23, x86_mmx* %P
        ret void
 }
 

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/vec_insert-6.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/vec_insert-6.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/vec_insert-6.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/vec_insert-6.ll Tue Oct 26 19:48:03 2010
@@ -1,5 +1,5 @@
 ; RUN: llc < %s -march=x86 -mattr=+sse2 | grep pslldq
-; RUN: llc < %s -march=x86 -mattr=+sse2 -mtriple=i686-apple-darwin9 -stats -info-output-file - | grep asm-printer | grep 6
+; RUN: llc < %s -march=x86 -mattr=+sse2 -mtriple=i686-apple-darwin9 -o /dev/null -stats -info-output-file - | grep asm-printer | grep 6
 
 define <4 x float> @t3(<4 x float>* %P) nounwind  {
 	%tmp1 = load <4 x float>* %P

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/vec_insert-7.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/vec_insert-7.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/vec_insert-7.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/vec_insert-7.ll Tue Oct 26 19:48:03 2010
@@ -1,8 +1,15 @@
-; RUN: llc < %s -march=x86 -mattr=+mmx -mtriple=i686-apple-darwin9 -o - | grep punpckldq
+; RUN: llc < %s -march=x86 -mattr=+mmx,+sse42 -mtriple=i686-apple-darwin9 | FileCheck %s
+; MMX insertelement is not available; these are promoted to XMM.
+; (Without SSE they are split to two ints, and the code is much better.)
 
-define <2 x i32> @mmx_movzl(<2 x i32> %x) nounwind  {
+define x86_mmx @mmx_movzl(x86_mmx %x) nounwind  {
 entry:
-	%tmp3 = insertelement <2 x i32> %x, i32 32, i32 0		; <<2 x i32>> [#uses=1]
+; CHECK: mmx_movzl
+; CHECK: pinsrd
+; CHECK: pinsrd
+        %tmp = bitcast x86_mmx %x to <2 x i32> 
+	%tmp3 = insertelement <2 x i32> %tmp, i32 32, i32 0		; <<2 x i32>> [#uses=1]
 	%tmp8 = insertelement <2 x i32> %tmp3, i32 0, i32 1		; <<2 x i32>> [#uses=1]
-	ret <2 x i32> %tmp8
+        %tmp9 = bitcast <2 x i32> %tmp8 to x86_mmx
+	ret x86_mmx %tmp9
 }

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/vec_insert-9.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/vec_insert-9.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/vec_insert-9.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/vec_insert-9.ll Tue Oct 26 19:48:03 2010
@@ -1,5 +1,5 @@
 ; RUN: llc < %s -march=x86 -mattr=+sse41 > %t
-; RUN: grep pinsrd %t | count 2
+; RUN: grep pinsrd %t | count 1
 
 define <4 x i32> @var_insert2(<4 x i32> %x, i32 %val, i32 %idx) nounwind  {
 entry:

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/vec_set-F.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/vec_set-F.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/vec_set-F.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/vec_set-F.ll Tue Oct 26 19:48:03 2010
@@ -1,6 +1,6 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep movq
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep movsd
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep mov | count 3
+; RUN: llc < %s -mtriple=i686-linux -mattr=+sse2 | grep movq
+; RUN: llc < %s -mtriple=i686-linux -mattr=+sse2 | grep movsd
+; RUN: llc < %s -mtriple=i686-linux -mattr=+sse2 | grep mov | count 3
 
 define <2 x i64> @t1(<2 x i64>* %ptr) nounwind  {
 	%tmp45 = bitcast <2 x i64>* %ptr to <2 x i32>*

Removed: llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-10.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-10.ll?rev=117424&view=auto
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-10.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-10.ll (removed)
@@ -1,25 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -o %t
-; RUN: grep unpcklps %t | count 1
-; RUN: grep pshufd   %t | count 1
-; RUN: not grep {sub.*esp} %t
-
-define void @test(<4 x float>* %res, <4 x float>* %A, <4 x float>* %B) {
-	%tmp = load <4 x float>* %B		; <<4 x float>> [#uses=2]
-	%tmp3 = load <4 x float>* %A		; <<4 x float>> [#uses=2]
-	%tmp.upgrd.1 = extractelement <4 x float> %tmp3, i32 0		; <float> [#uses=1]
-	%tmp7 = extractelement <4 x float> %tmp, i32 0		; <float> [#uses=1]
-	%tmp8 = extractelement <4 x float> %tmp3, i32 1		; <float> [#uses=1]
-	%tmp9 = extractelement <4 x float> %tmp, i32 1		; <float> [#uses=1]
-	%tmp10 = insertelement <4 x float> undef, float %tmp.upgrd.1, i32 0		; <<4 x float>> [#uses=1]
-	%tmp11 = insertelement <4 x float> %tmp10, float %tmp7, i32 1		; <<4 x float>> [#uses=1]
-	%tmp12 = insertelement <4 x float> %tmp11, float %tmp8, i32 2		; <<4 x float>> [#uses=1]
-	%tmp13 = insertelement <4 x float> %tmp12, float %tmp9, i32 3		; <<4 x float>> [#uses=1]
-	store <4 x float> %tmp13, <4 x float>* %res
-	ret void
-}
-
-define void @test2(<4 x float> %X, <4 x float>* %res) {
-	%tmp5 = shufflevector <4 x float> %X, <4 x float> undef, <4 x i32> < i32 2, i32 6, i32 3, i32 7 >		; <<4 x float>> [#uses=1]
-	store <4 x float> %tmp5, <4 x float>* %res
-	ret void
-}

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-19.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-19.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-19.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-19.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -mtriple=i686-apple-darwin9 -stats -info-output-file - | grep asm-printer | grep 4
+; RUN: llc < %s -o /dev/null -march=x86 -mattr=+sse2 -mtriple=i686-apple-darwin9 -stats -info-output-file - | grep asm-printer | grep 4
 ; PR2485
 
 define <4 x i32> @t(<4 x i32> %a, <4 x i32> %b) nounwind  {

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-20.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-20.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-20.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-20.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -mtriple=i686-apple-darwin9 -stats -info-output-file - | grep asm-printer | grep 3
+; RUN: llc < %s -o /dev/null -march=x86 -mattr=+sse2 -mtriple=i686-apple-darwin9 -stats -info-output-file - | grep asm-printer | grep 3
 
 define <4 x float> @func(<4 x float> %fp0, <4 x float> %fp1) nounwind  {
 entry:

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-24.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-24.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-24.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-24.ll Tue Oct 26 19:48:03 2010
@@ -1,7 +1,8 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2  |     grep punpck
+; RUN: llc < %s -march=x86 -mattr=+sse2 | FileCheck %s
 
 define i32 @t() nounwind optsize {
 entry:
+; CHECK: punpckldq
 	%a = alloca <4 x i32>		; <<4 x i32>*> [#uses=2]
 	%b = alloca <4 x i32>		; <<4 x i32>*> [#uses=5]
 	volatile store <4 x i32> < i32 0, i32 1, i32 2, i32 3 >, <4 x i32>* %a

Removed: llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-3.ll?rev=117424&view=auto
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-3.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-3.ll (removed)
@@ -1,20 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -o %t
-; RUN: grep movlhps %t | count 1
-; RUN: grep movhlps %t | count 1
-
-define <4 x float> @test1(<4 x float>* %x, <4 x float>* %y) {
-        %tmp = load <4 x float>* %y             ; <<4 x float>> [#uses=2]
-        %tmp5 = load <4 x float>* %x            ; <<4 x float>> [#uses=2]
-        %tmp9 = fadd <4 x float> %tmp5, %tmp             ; <<4 x float>> [#uses=1]
-        %tmp21 = fsub <4 x float> %tmp5, %tmp            ; <<4 x float>> [#uses=1]
-        %tmp27 = shufflevector <4 x float> %tmp9, <4 x float> %tmp21, <4 x i32> < i32 0, i32 1, i32 4, i32 5 >                ; <<4 x float>> [#uses=1]
-        ret <4 x float> %tmp27
-}
-
-define <4 x float> @movhl(<4 x float>* %x, <4 x float>* %y) {
-entry:
-        %tmp = load <4 x float>* %y             ; <<4 x float>> [#uses=1]
-        %tmp3 = load <4 x float>* %x            ; <<4 x float>> [#uses=1]
-        %tmp4 = shufflevector <4 x float> %tmp3, <4 x float> %tmp, <4 x i32> < i32 2, i32 3, i32 6, i32 7 >           ; <<4 x float>> [#uses=1]
-        ret <4 x float> %tmp4
-}

Removed: llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-4.ll?rev=117424&view=auto
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-4.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-4.ll (removed)
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 > %t
-; RUN: grep shuf %t | count 2
-; RUN: not grep unpck %t
-
-define void @test(<4 x float>* %res, <4 x float>* %A, <4 x float>* %B, <4 x float>* %C) {
-        %tmp3 = load <4 x float>* %B            ; <<4 x float>> [#uses=1]
-        %tmp5 = load <4 x float>* %C            ; <<4 x float>> [#uses=1]
-        %tmp11 = shufflevector <4 x float> %tmp3, <4 x float> %tmp5, <4 x i32> < i32 1, i32 4, i32 1, i32 5 >         ; <<4 x float>> [#uses=1]
-        store <4 x float> %tmp11, <4 x float>* %res
-        ret void
-}
-

Removed: llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-5.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-5.ll?rev=117424&view=auto
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-5.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-5.ll (removed)
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -o %t
-; RUN: grep movhlps %t | count 1
-; RUN: grep shufps  %t | count 1
-
-define void @test() nounwind {
-        %tmp1 = load <4 x float>* null          ; <<4 x float>> [#uses=2]
-        %tmp2 = shufflevector <4 x float> %tmp1, <4 x float> < float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00 >, <4 x i32> < i32 0, i32 1, i32 6, i32 7 >             ; <<4 x float>> [#uses=1]
-        %tmp3 = shufflevector <4 x float> %tmp1, <4 x float> zeroinitializer, <4 x i32> < i32 2, i32 3, i32 6, i32 7 >                ; <<4 x float>> [#uses=1]
-        %tmp4 = fadd <4 x float> %tmp2, %tmp3            ; <<4 x float>> [#uses=1]
-        store <4 x float> %tmp4, <4 x float>* null
-        ret void
-}
-

Removed: llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-6.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-6.ll?rev=117424&view=auto
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-6.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-6.ll (removed)
@@ -1,42 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -o %t
-; RUN: grep movapd %t | count 1
-; RUN: grep movaps %t | count 1
-; RUN: grep movups %t | count 2
-
-target triple = "i686-apple-darwin"
- at x = external global [4 x i32]
-
-define <2 x i64> @test1() {
-	%tmp = load i32* getelementptr ([4 x i32]* @x, i32 0, i32 0)		; <i32> [#uses=1]
-	%tmp3 = load i32* getelementptr ([4 x i32]* @x, i32 0, i32 1)		; <i32> [#uses=1]
-	%tmp5 = load i32* getelementptr ([4 x i32]* @x, i32 0, i32 2)		; <i32> [#uses=1]
-	%tmp7 = load i32* getelementptr ([4 x i32]* @x, i32 0, i32 3)		; <i32> [#uses=1]
-	%tmp.upgrd.1 = insertelement <4 x i32> undef, i32 %tmp, i32 0		; <<4 x i32>> [#uses=1]
-	%tmp13 = insertelement <4 x i32> %tmp.upgrd.1, i32 %tmp3, i32 1		; <<4 x i32>> [#uses=1]
-	%tmp14 = insertelement <4 x i32> %tmp13, i32 %tmp5, i32 2		; <<4 x i32>> [#uses=1]
-	%tmp15 = insertelement <4 x i32> %tmp14, i32 %tmp7, i32 3		; <<4 x i32>> [#uses=1]
-	%tmp16 = bitcast <4 x i32> %tmp15 to <2 x i64>		; <<2 x i64>> [#uses=1]
-	ret <2 x i64> %tmp16
-}
-
-define <4 x float> @test2(i32 %dummy, float %a, float %b, float %c, float %d) {
-	%tmp = insertelement <4 x float> undef, float %a, i32 0		; <<4 x float>> [#uses=1]
-	%tmp11 = insertelement <4 x float> %tmp, float %b, i32 1		; <<4 x float>> [#uses=1]
-	%tmp12 = insertelement <4 x float> %tmp11, float %c, i32 2		; <<4 x float>> [#uses=1]
-	%tmp13 = insertelement <4 x float> %tmp12, float %d, i32 3		; <<4 x float>> [#uses=1]
-	ret <4 x float> %tmp13
-}
-
-define <4 x float> @test3(float %a, float %b, float %c, float %d) {
-	%tmp = insertelement <4 x float> undef, float %a, i32 0		; <<4 x float>> [#uses=1]
-	%tmp11 = insertelement <4 x float> %tmp, float %b, i32 1		; <<4 x float>> [#uses=1]
-	%tmp12 = insertelement <4 x float> %tmp11, float %c, i32 2		; <<4 x float>> [#uses=1]
-	%tmp13 = insertelement <4 x float> %tmp12, float %d, i32 3		; <<4 x float>> [#uses=1]
-	ret <4 x float> %tmp13
-}
-
-define <2 x double> @test4(double %a, double %b) {
-	%tmp = insertelement <2 x double> undef, double %a, i32 0		; <<2 x double>> [#uses=1]
-	%tmp7 = insertelement <2 x double> %tmp, double %b, i32 1		; <<2 x double>> [#uses=1]
-	ret <2 x double> %tmp7
-}

Removed: llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-7.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-7.ll?rev=117424&view=auto
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-7.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-7.ll (removed)
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -o %t
-; RUN: grep pxor %t | count 1
-; RUN: not grep shufps %t
-
-define void @test() {
-        bitcast <4 x i32> zeroinitializer to <4 x float>                ; <<4 x float>>:1 [#uses=1]
-        shufflevector <4 x float> %1, <4 x float> zeroinitializer, <4 x i32> zeroinitializer         ; <<4 x float>>:2 [#uses=1]
-        store <4 x float> %2, <4 x float>* null
-        unreachable
-}
-

Removed: llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-8.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-8.ll?rev=117424&view=auto
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-8.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-8.ll (removed)
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | \
-; RUN:   not grep shufps
-
-define void @test(<4 x float>* %res, <4 x float>* %A) {
-        %tmp1 = load <4 x float>* %A            ; <<4 x float>> [#uses=1]
-        %tmp2 = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> < i32 0, i32 5, i32 6, i32 7 >          ; <<4 x float>> [#uses=1]
-        store <4 x float> %tmp2, <4 x float>* %res
-        ret void
-}
-

Removed: llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-9.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-9.ll?rev=117424&view=auto
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-9.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-9.ll (removed)
@@ -1,21 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | FileCheck %s
-
-define <4 x i32> @test(i8** %ptr) {
-; CHECK: pxor
-; CHECK: punpcklbw
-; CHECK: punpcklwd
-
-	%tmp = load i8** %ptr		; <i8*> [#uses=1]
-	%tmp.upgrd.1 = bitcast i8* %tmp to float*		; <float*> [#uses=1]
-	%tmp.upgrd.2 = load float* %tmp.upgrd.1		; <float> [#uses=1]
-	%tmp.upgrd.3 = insertelement <4 x float> undef, float %tmp.upgrd.2, i32 0		; <<4 x float>> [#uses=1]
-	%tmp9 = insertelement <4 x float> %tmp.upgrd.3, float 0.000000e+00, i32 1		; <<4 x float>> [#uses=1]
-	%tmp10 = insertelement <4 x float> %tmp9, float 0.000000e+00, i32 2		; <<4 x float>> [#uses=1]
-	%tmp11 = insertelement <4 x float> %tmp10, float 0.000000e+00, i32 3		; <<4 x float>> [#uses=1]
-	%tmp21 = bitcast <4 x float> %tmp11 to <16 x i8>		; <<16 x i8>> [#uses=1]
-	%tmp22 = shufflevector <16 x i8> %tmp21, <16 x i8> zeroinitializer, <16 x i32> < i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23 >		; <<16 x i8>> [#uses=1]
-	%tmp31 = bitcast <16 x i8> %tmp22 to <8 x i16>		; <<8 x i16>> [#uses=1]
-	%tmp.upgrd.4 = shufflevector <8 x i16> zeroinitializer, <8 x i16> %tmp31, <8 x i32> < i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11 >		; <<8 x i16>> [#uses=1]
-	%tmp36 = bitcast <8 x i16> %tmp.upgrd.4 to <4 x i32>		; <<4 x i32>> [#uses=1]
-	ret <4 x i32> %tmp36
-}

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/vec_zero_cse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/vec_zero_cse.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/vec_zero_cse.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/vec_zero_cse.ll Tue Oct 26 19:48:03 2010
@@ -1,5 +1,6 @@
-; RUN: llc < %s -relocation-model=static -march=x86 -mcpu=yonah | grep pxor | count 2
-; RUN: llc < %s -relocation-model=static -march=x86 -mcpu=yonah | grep pcmpeqd | count 2
+; RUN: llc < %s -relocation-model=static -march=x86 -mcpu=yonah | grep pxor | count 1
+; RUN: llc < %s -relocation-model=static -march=x86 -mcpu=yonah | grep pcmpeqd | count 1
+; 64-bit stores here do not use MMX.
 
 @M1 = external global <1 x i64>
 @M2 = external global <2 x i32>

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/widen_select-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/widen_select-1.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/widen_select-1.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/widen_select-1.ll Tue Oct 26 19:48:03 2010
@@ -1,5 +1,5 @@
 ; RUN: llc < %s -march=x86 -mattr=+sse42 -disable-mmx | FileCheck %s
-; CHECK: jne
+; CHECK: je
 
 ; widening select v6i32 and then a sub
 

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/widen_shuffle-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/widen_shuffle-1.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/widen_shuffle-1.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/widen_shuffle-1.ll Tue Oct 26 19:48:03 2010
@@ -3,7 +3,8 @@
 ; widening shuffle v3float and then a add
 define void @shuf(<3 x float>* %dst.addr, <3 x float> %src1,<3 x float> %src2) nounwind {
 entry:
-; CHECK: insertps
+; CHECK: shuf:
+; CHECK: extractps
 ; CHECK: extractps
 	%x = shufflevector <3 x float> %src1, <3 x float> %src2, <3 x i32> < i32 0, i32 1, i32 2>
 	%val = fadd <3 x float> %x, %src2
@@ -15,7 +16,8 @@
 ; widening shuffle v3float with a different mask and then a add
 define void @shuf2(<3 x float>* %dst.addr, <3 x float> %src1,<3 x float> %src2) nounwind {
 entry:
-; CHECK: insertps
+; CHECK: shuf2:
+; CHECK: extractps
 ; CHECK: extractps
 	%x = shufflevector <3 x float> %src1, <3 x float> %src2, <3 x i32> < i32 0, i32 4, i32 2>
 	%val = fadd <3 x float> %x, %src2
@@ -26,7 +28,7 @@
 ; Example of when widening a v3float operation causes the DAG to replace a node
 ; with the operation that we are currently widening, i.e. when replacing
 ; opA with opB, the DAG will produce new operations with opA.
-define void @shuf3(<4 x float> %tmp10, <4 x float> %vecinit15, <4 x float>* %dst) {
+define void @shuf3(<4 x float> %tmp10, <4 x float> %vecinit15, <4 x float>* %dst) nounwind {
 entry:
 ; CHECK: pshufd
   %shuffle.i.i.i12 = shufflevector <4 x float> %tmp10, <4 x float> %vecinit15, <4 x i32> <i32 0, i32 1, i32 4, i32 5>

Modified: llvm/branches/wendling/eh/test/CodeGen/X86/zero-remat.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/zero-remat.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/zero-remat.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/zero-remat.ll Tue Oct 26 19:48:03 2010
@@ -1,5 +1,5 @@
 ; RUN: llc < %s -march=x86-64 | FileCheck %s --check-prefix=CHECK-64
-; RUN: llc < %s -march=x86-64 -stats  -info-output-file - | grep asm-printer  | grep 12
+; RUN: llc < %s -march=x86-64 -o /dev/null -stats  -info-output-file - | grep asm-printer  | grep 12
 ; RUN: llc < %s -march=x86 | FileCheck %s --check-prefix=CHECK-32
 
 declare void @bar(double %x)

Modified: llvm/branches/wendling/eh/test/CodeGen/XCore/ashr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/XCore/ashr.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/XCore/ashr.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/XCore/ashr.ll Tue Oct 26 19:48:03 2010
@@ -50,9 +50,9 @@
 	ret i32 %2
 }
 ; CHECK: f3:
-; CHECK-NEXT: ashr r1, r0, 32
+; CHECK-NEXT: ashr r0, r0, 32
+; CHECK-NEXT: bf r0
 ; CHECK-NEXT: ldc r0, 10
-; CHECK-NEXT: bt r1
 ; CHECK: ldc r0, 17
 
 define i32 @f4(i32 %a) {
@@ -61,9 +61,9 @@
 	ret i32 %2
 }
 ; CHECK: f4:
-; CHECK-NEXT: ashr r1, r0, 32
+; CHECK-NEXT: ashr r0, r0, 32
+; CHECK-NEXT: bf r0
 ; CHECK-NEXT: ldc r0, 17
-; CHECK-NEXT: bt r1
 ; CHECK: ldc r0, 10
 
 define i32 @f5(i32 %a) {

Modified: llvm/branches/wendling/eh/test/DebugInfo/2009-10-16-Scope.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/DebugInfo/2009-10-16-Scope.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/DebugInfo/2009-10-16-Scope.ll (original)
+++ llvm/branches/wendling/eh/test/DebugInfo/2009-10-16-Scope.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,5 @@
-; RUN: llc %s -O0 -o /dev/null
+; RUN: llc %s -O0 -o /dev/null -mtriple=x86_64-apple-darwin
+; RUN: llc %s -O0 -o /dev/null -mtriple=arm-apple-darwin
 ; PR 5197
 ; There is not any llvm instruction assocated with !5. The code generator
 ; should be able to handle this.

Removed: llvm/branches/wendling/eh/test/DebugInfo/2010-01-18-DbgValue.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/DebugInfo/2010-01-18-DbgValue.ll?rev=117424&view=auto
==============================================================================
--- llvm/branches/wendling/eh/test/DebugInfo/2010-01-18-DbgValue.ll (original)
+++ llvm/branches/wendling/eh/test/DebugInfo/2010-01-18-DbgValue.ll (removed)
@@ -1,51 +0,0 @@
-; RUN: llc -O0 < %s | FileCheck %s
-; ModuleID = 'try.c'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32"
-target triple = "i386-apple-darwin9.8"
-; Currently, dbg.declare generates a DEBUG_VALUE comment.  Eventually it will
-; generate DWARF and this test will need to be modified or removed.
-
-
-%struct.Pt = type { double, double }
-%struct.Rect = type { %struct.Pt, %struct.Pt }
-
-define double @foo(%struct.Rect* byval %my_r0) nounwind ssp {
-entry:
-;CHECK: DEBUG_VALUE
-  %retval = alloca double                         ; <double*> [#uses=2]
-  %0 = alloca double                              ; <double*> [#uses=2]
-  %"alloca point" = bitcast i32 0 to i32          ; <i32> [#uses=0]
-  call void @llvm.dbg.declare(metadata !{%struct.Rect* %my_r0}, metadata !0), !dbg !15
-  %1 = getelementptr inbounds %struct.Rect* %my_r0, i32 0, i32 0, !dbg !16 ; <%struct.Pt*> [#uses=1]
-  %2 = getelementptr inbounds %struct.Pt* %1, i32 0, i32 0, !dbg !16 ; <double*> [#uses=1]
-  %3 = load double* %2, align 8, !dbg !16         ; <double> [#uses=1]
-  store double %3, double* %0, align 8, !dbg !16
-  %4 = load double* %0, align 8, !dbg !16         ; <double> [#uses=1]
-  store double %4, double* %retval, align 8, !dbg !16
-  br label %return, !dbg !16
-
-return:                                           ; preds = %entry
-  %retval1 = load double* %retval, !dbg !16       ; <double> [#uses=1]
-  ret double %retval1, !dbg !16
-}
-
-declare void @llvm.dbg.declare(metadata, metadata) nounwind readnone
-
-!0 = metadata !{i32 524545, metadata !1, metadata !"my_r0", metadata !2, i32 11, metadata !7} ; [ DW_TAG_arg_variable ]
-!1 = metadata !{i32 524334, i32 0, metadata !2, metadata !"foo", metadata !"foo", metadata !"foo", metadata !2, i32 11, metadata !4, i1 false, i1 true, i32 0, i32 0, null, i1 false} ; [ DW_TAG_subprogram ]
-!2 = metadata !{i32 524329, metadata !"b2.c", metadata !"/tmp/", metadata !3} ; [ DW_TAG_file_type ]
-!3 = metadata !{i32 524305, i32 0, i32 1, metadata !"b2.c", metadata !"/tmp/", metadata !"4.2.1 (Based on Apple Inc. build 5658) (LLVM build)", i1 true, i1 false, metadata !"", i32 0} ; [ DW_TAG_compile_unit ]
-!4 = metadata !{i32 524309, metadata !2, metadata !"", metadata !2, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !5, i32 0, null} ; [ DW_TAG_subroutine_type ]
-!5 = metadata !{metadata !6, metadata !7}
-!6 = metadata !{i32 524324, metadata !2, metadata !"double", metadata !2, i32 0, i64 64, i64 64, i64 0, i32 0, i32 4} ; [ DW_TAG_base_type ]
-!7 = metadata !{i32 524307, metadata !2, metadata !"Rect", metadata !2, i32 6, i64 256, i64 64, i64 0, i32 0, null, metadata !8, i32 0, null} ; [ DW_TAG_structure_type ]
-!8 = metadata !{metadata !9, metadata !14}
-!9 = metadata !{i32 524301, metadata !7, metadata !"P1", metadata !2, i32 7, i64 128, i64 64, i64 0, i32 0, metadata !10} ; [ DW_TAG_member ]
-!10 = metadata !{i32 524307, metadata !2, metadata !"Pt", metadata !2, i32 1, i64 128, i64 64, i64 0, i32 0, null, metadata !11, i32 0, null} ; [ DW_TAG_structure_type ]
-!11 = metadata !{metadata !12, metadata !13}
-!12 = metadata !{i32 524301, metadata !10, metadata !"x", metadata !2, i32 2, i64 64, i64 64, i64 0, i32 0, metadata !6} ; [ DW_TAG_member ]
-!13 = metadata !{i32 524301, metadata !10, metadata !"y", metadata !2, i32 3, i64 64, i64 64, i64 64, i32 0, metadata !6} ; [ DW_TAG_member ]
-!14 = metadata !{i32 524301, metadata !7, metadata !"P2", metadata !2, i32 8, i64 128, i64 64, i64 128, i32 0, metadata !10} ; [ DW_TAG_member ]
-!15 = metadata !{i32 11, i32 0, metadata !1, null}
-!16 = metadata !{i32 12, i32 0, metadata !17, null}
-!17 = metadata !{i32 524299, metadata !1, i32 11, i32 0} ; [ DW_TAG_lexical_block ]

Removed: llvm/branches/wendling/eh/test/DebugInfo/2010-02-01-DbgValueCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/DebugInfo/2010-02-01-DbgValueCrash.ll?rev=117424&view=auto
==============================================================================
--- llvm/branches/wendling/eh/test/DebugInfo/2010-02-01-DbgValueCrash.ll (original)
+++ llvm/branches/wendling/eh/test/DebugInfo/2010-02-01-DbgValueCrash.ll (removed)
@@ -1,34 +0,0 @@
-; RUN: llc -O1 < %s
-; ModuleID = 'pr6157.bc'
-target triple = "x86_64-unknown-linux-gnu"
-; formerly crashed in SelectionDAGBuilder
-
-%tart.reflect.ComplexType = type { double, double }
-
- at .type.SwitchStmtTest = constant %tart.reflect.ComplexType { double 3.0, double 2.0 }
-
-define i32 @"main(tart.core.String[])->int32"(i32 %args) {
-entry:
-  tail call void @llvm.dbg.value(metadata !14, i64 0, metadata !8)
-  tail call void @"tart.reflect.ComplexType.create->tart.core.Object"(%tart.reflect.ComplexType* @.type.SwitchStmtTest) ; <%tart.core.Object*> [#uses=2]
-  ret i32 3
-}
-
-declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
-declare void @"tart.reflect.ComplexType.create->tart.core.Object"(%tart.reflect.ComplexType*) nounwind readnone
-
-!0 = metadata !{i32 458769, i32 0, i32 1, metadata !"sm.c", metadata !"/Volumes/MacOS9/tests/", metadata !"4.2.1 (Based on Apple Inc. build 5658) (LLVM build)", i1 true, i1 false, metadata !"", i32 0} ; [ DW_TAG_compile_unit ]
-!1 = metadata !{i32 458790, metadata !0, metadata !"", metadata !0, i32 0, i64 192, i64 64, i64 0, i32 0, metadata !2} ; [ DW_TAG_const_type ]
-!2 = metadata !{i32 458771, metadata !0, metadata !"C", metadata !0, i32 1, i64 192, i64 64, i64 0, i32 0, null, metadata !3, i32 0, null} ; [ DW_TAG_structure_type ]
-!3 = metadata !{metadata !4, metadata !6, metadata !7}
-!4 = metadata !{i32 458765, metadata !2, metadata !"x", metadata !0, i32 1, i64 64, i64 64, i64 0, i32 0, metadata !5} ; [ DW_TAG_member ]
-!5 = metadata !{i32 458788, metadata !0, metadata !"double", metadata !0, i32 0, i64 64, i64 64, i64 0, i32 0, i32 4} ; [ DW_TAG_base_type ]
-!6 = metadata !{i32 458765, metadata !2, metadata !"y", metadata !0, i32 1, i64 64, i64 64, i64 64, i32 0, metadata !5} ; [ DW_TAG_member ]
-!7 = metadata !{i32 458765, metadata !2, metadata !"z", metadata !0, i32 1, i64 64, i64 64, i64 128, i32 0, metadata !5} ; [ DW_TAG_member ]
-!8 = metadata !{i32 459008, metadata !9, metadata !"t", metadata !0, i32 5, metadata !2} ; [ DW_TAG_auto_variable ]
-!9 = metadata !{i32 458763, metadata !10}        ; [ DW_TAG_lexical_block ]
-!10 = metadata !{i32 458798, i32 0, metadata !0, metadata !"foo", metadata !"foo", metadata !"foo", metadata !0, i32 4, metadata !11, i1 false, i1 true, i32 0, i32 0, null} ; [ DW_TAG_subprogram ]
-!11 = metadata !{i32 458773, metadata !0, metadata !"", metadata !0, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !12, i32 0, null} ; [ DW_TAG_subroutine_type ]
-!12 = metadata !{metadata !13}
-!13 = metadata !{i32 458788, metadata !0, metadata !"int", metadata !0, i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
-!14 = metadata !{%tart.reflect.ComplexType* @.type.SwitchStmtTest}

Modified: llvm/branches/wendling/eh/test/DebugInfo/2010-05-10-MultipleCU.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/DebugInfo/2010-05-10-MultipleCU.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/DebugInfo/2010-05-10-MultipleCU.ll (original)
+++ llvm/branches/wendling/eh/test/DebugInfo/2010-05-10-MultipleCU.ll Tue Oct 26 19:48:03 2010
@@ -1,7 +1,21 @@
-; RUN: llc -O0 -asm-verbose  %s -o %t
-; RUN: grep DW_TAG_compile_unit %t | count 3
+; RUN: llc -O0 -asm-verbose < %s | FileCheck %s
 ; One for a.c, second one for b.c and third one for abbrev.
 
+; CHECK: info_begin
+; CHECK: DW_TAG_compile_unit
+; CHECK-NOT: DW_TAG_compile_unit
+; CHECK: info_end
+
+; CHECK: info_begin
+; CHECK: DW_TAG_compile_unit
+; CHECK-NOT: DW_TAG_compile_unit
+; CHECK: info_end
+
+; CHECK: abbrev_begin
+; CHECK: DW_TAG_compile_unit
+; CHECK-NOT: DW_TAG_compile_unit
+; CHECK: abbrev_end
+
 define i32 @foo() nounwind readnone ssp {
 return:
   ret i32 42, !dbg !0

Removed: llvm/branches/wendling/eh/test/DebugInfo/2010-05-25-DotDebugLoc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/DebugInfo/2010-05-25-DotDebugLoc.ll?rev=117424&view=auto
==============================================================================
--- llvm/branches/wendling/eh/test/DebugInfo/2010-05-25-DotDebugLoc.ll (original)
+++ llvm/branches/wendling/eh/test/DebugInfo/2010-05-25-DotDebugLoc.ll (removed)
@@ -1,239 +0,0 @@
-; RUN: llc -O2 < %s -mtriple=x86_64-apple-darwin | grep debug_loc12
-; Test to check .debug_loc support. This test case emits 13 debug_loc entries.
-
-%0 = type { double }
-
-define hidden %0 @__divsc3(float %a, float %b, float %c, float %d) nounwind readnone {
-entry:
-  tail call void @llvm.dbg.value(metadata !{float %a}, i64 0, metadata !0)
-  tail call void @llvm.dbg.value(metadata !{float %b}, i64 0, metadata !11)
-  tail call void @llvm.dbg.value(metadata !{float %c}, i64 0, metadata !12)
-  tail call void @llvm.dbg.value(metadata !{float %d}, i64 0, metadata !13)
-  %0 = tail call float @fabsf(float %c) nounwind readnone, !dbg !19 ; <float> [#uses=1]
-  %1 = tail call float @fabsf(float %d) nounwind readnone, !dbg !19 ; <float> [#uses=1]
-  %2 = fcmp olt float %0, %1, !dbg !19            ; <i1> [#uses=1]
-  br i1 %2, label %bb, label %bb1, !dbg !19
-
-bb:                                               ; preds = %entry
-  %3 = fdiv float %c, %d, !dbg !20                ; <float> [#uses=3]
-  tail call void @llvm.dbg.value(metadata !{float %3}, i64 0, metadata !16), !dbg !20
-  %4 = fmul float %3, %c, !dbg !21                ; <float> [#uses=1]
-  %5 = fadd float %4, %d, !dbg !21                ; <float> [#uses=2]
-  tail call void @llvm.dbg.value(metadata !{float %5}, i64 0, metadata !14), !dbg !21
-  %6 = fmul float %3, %a, !dbg !22                ; <float> [#uses=1]
-  %7 = fadd float %6, %b, !dbg !22                ; <float> [#uses=1]
-  %8 = fdiv float %7, %5, !dbg !22                ; <float> [#uses=1]
-  tail call void @llvm.dbg.value(metadata !{float %8}, i64 0, metadata !17), !dbg !22
-  %9 = fmul float %3, %b, !dbg !23                ; <float> [#uses=1]
-  %10 = fsub float %9, %a, !dbg !23               ; <float> [#uses=1]
-  %11 = fdiv float %10, %5, !dbg !23              ; <float> [#uses=1]
-  tail call void @llvm.dbg.value(metadata !{float %11}, i64 0, metadata !18), !dbg !23
-  br label %bb2, !dbg !23
-
-bb1:                                              ; preds = %entry
-  %12 = fdiv float %d, %c, !dbg !24               ; <float> [#uses=3]
-  tail call void @llvm.dbg.value(metadata !{float %12}, i64 0, metadata !16), !dbg !24
-  %13 = fmul float %12, %d, !dbg !25              ; <float> [#uses=1]
-  %14 = fadd float %13, %c, !dbg !25              ; <float> [#uses=2]
-  tail call void @llvm.dbg.value(metadata !{float %14}, i64 0, metadata !14), !dbg !25
-  %15 = fmul float %12, %b, !dbg !26              ; <float> [#uses=1]
-  %16 = fadd float %15, %a, !dbg !26              ; <float> [#uses=1]
-  %17 = fdiv float %16, %14, !dbg !26             ; <float> [#uses=1]
-  tail call void @llvm.dbg.value(metadata !{float %17}, i64 0, metadata !17), !dbg !26
-  %18 = fmul float %12, %a, !dbg !27              ; <float> [#uses=1]
-  %19 = fsub float %b, %18, !dbg !27              ; <float> [#uses=1]
-  %20 = fdiv float %19, %14, !dbg !27             ; <float> [#uses=1]
-  tail call void @llvm.dbg.value(metadata !{float %20}, i64 0, metadata !18), !dbg !27
-  br label %bb2, !dbg !27
-
-bb2:                                              ; preds = %bb1, %bb
-  %y.0 = phi float [ %11, %bb ], [ %20, %bb1 ]    ; <float> [#uses=5]
-  %x.0 = phi float [ %8, %bb ], [ %17, %bb1 ]     ; <float> [#uses=5]
-  %21 = fcmp uno float %x.0, 0.000000e+00, !dbg !28 ; <i1> [#uses=1]
-  %22 = fcmp uno float %y.0, 0.000000e+00, !dbg !28 ; <i1> [#uses=1]
-  %or.cond = and i1 %21, %22                      ; <i1> [#uses=1]
-  br i1 %or.cond, label %bb4, label %bb46, !dbg !28
-
-bb4:                                              ; preds = %bb2
-  %23 = fcmp une float %c, 0.000000e+00, !dbg !29 ; <i1> [#uses=1]
-  %24 = fcmp une float %d, 0.000000e+00, !dbg !29 ; <i1> [#uses=1]
-  %or.cond93 = or i1 %23, %24                     ; <i1> [#uses=1]
-  br i1 %or.cond93, label %bb9, label %bb6, !dbg !29
-
-bb6:                                              ; preds = %bb4
-  %25 = fcmp uno float %a, 0.000000e+00, !dbg !29 ; <i1> [#uses=1]
-  %26 = fcmp uno float %b, 0.000000e+00, !dbg !29 ; <i1> [#uses=1]
-  %or.cond94 = and i1 %25, %26                    ; <i1> [#uses=1]
-  br i1 %or.cond94, label %bb9, label %bb8, !dbg !29
-
-bb8:                                              ; preds = %bb6
-  %27 = tail call float @copysignf(float 0x7FF0000000000000, float %c) nounwind readnone, !dbg !30 ; <float> [#uses=2]
-  %28 = fmul float %27, %a, !dbg !30              ; <float> [#uses=1]
-  tail call void @llvm.dbg.value(metadata !{float %28}, i64 0, metadata !17), !dbg !30
-  %29 = fmul float %27, %b, !dbg !31              ; <float> [#uses=1]
-  tail call void @llvm.dbg.value(metadata !{float %29}, i64 0, metadata !18), !dbg !31
-  br label %bb46, !dbg !31
-
-bb9:                                              ; preds = %bb6, %bb4
-  %30 = fcmp ord float %a, 0.000000e+00           ; <i1> [#uses=1]
-  %31 = fsub float %a, %a, !dbg !32               ; <float> [#uses=3]
-  %32 = fcmp uno float %31, 0.000000e+00          ; <i1> [#uses=1]
-  %33 = and i1 %30, %32, !dbg !32                 ; <i1> [#uses=2]
-  br i1 %33, label %bb14, label %bb11, !dbg !32
-
-bb11:                                             ; preds = %bb9
-  %34 = fcmp ord float %b, 0.000000e+00           ; <i1> [#uses=1]
-  %35 = fsub float %b, %b, !dbg !32               ; <float> [#uses=1]
-  %36 = fcmp uno float %35, 0.000000e+00          ; <i1> [#uses=1]
-  %37 = and i1 %34, %36, !dbg !32                 ; <i1> [#uses=1]
-  br i1 %37, label %bb14, label %bb27, !dbg !32
-
-bb14:                                             ; preds = %bb11, %bb9
-  %38 = fsub float %c, %c, !dbg !32               ; <float> [#uses=1]
-  %39 = fcmp ord float %38, 0.000000e+00          ; <i1> [#uses=1]
-  br i1 %39, label %bb15, label %bb27, !dbg !32
-
-bb15:                                             ; preds = %bb14
-  %40 = fsub float %d, %d, !dbg !32               ; <float> [#uses=1]
-  %41 = fcmp ord float %40, 0.000000e+00          ; <i1> [#uses=1]
-  br i1 %41, label %bb16, label %bb27, !dbg !32
-
-bb16:                                             ; preds = %bb15
-  %iftmp.0.0 = select i1 %33, float 1.000000e+00, float 0.000000e+00 ; <float> [#uses=1]
-  %42 = tail call float @copysignf(float %iftmp.0.0, float %a) nounwind readnone, !dbg !33 ; <float> [#uses=2]
-  tail call void @llvm.dbg.value(metadata !{float %42}, i64 0, metadata !0), !dbg !33
-  %43 = fcmp ord float %b, 0.000000e+00           ; <i1> [#uses=1]
-  %44 = fsub float %b, %b, !dbg !34               ; <float> [#uses=1]
-  %45 = fcmp uno float %44, 0.000000e+00          ; <i1> [#uses=1]
-  %46 = and i1 %43, %45, !dbg !34                 ; <i1> [#uses=1]
-  %iftmp.1.0 = select i1 %46, float 1.000000e+00, float 0.000000e+00 ; <float> [#uses=1]
-  %47 = tail call float @copysignf(float %iftmp.1.0, float %b) nounwind readnone, !dbg !34 ; <float> [#uses=2]
-  tail call void @llvm.dbg.value(metadata !{float %47}, i64 0, metadata !11), !dbg !34
-  %48 = fmul float %42, %c, !dbg !35              ; <float> [#uses=1]
-  %49 = fmul float %47, %d, !dbg !35              ; <float> [#uses=1]
-  %50 = fadd float %48, %49, !dbg !35             ; <float> [#uses=1]
-  %51 = fmul float %50, 0x7FF0000000000000, !dbg !35 ; <float> [#uses=1]
-  tail call void @llvm.dbg.value(metadata !{float %51}, i64 0, metadata !17), !dbg !35
-  %52 = fmul float %47, %c, !dbg !36              ; <float> [#uses=1]
-  %53 = fmul float %42, %d, !dbg !36              ; <float> [#uses=1]
-  %54 = fsub float %52, %53, !dbg !36             ; <float> [#uses=1]
-  %55 = fmul float %54, 0x7FF0000000000000, !dbg !36 ; <float> [#uses=1]
-  tail call void @llvm.dbg.value(metadata !{float %55}, i64 0, metadata !18), !dbg !36
-  br label %bb46, !dbg !36
-
-bb27:                                             ; preds = %bb15, %bb14, %bb11
-  %56 = fcmp ord float %c, 0.000000e+00           ; <i1> [#uses=1]
-  %57 = fsub float %c, %c, !dbg !37               ; <float> [#uses=1]
-  %58 = fcmp uno float %57, 0.000000e+00          ; <i1> [#uses=1]
-  %59 = and i1 %56, %58, !dbg !37                 ; <i1> [#uses=2]
-  br i1 %59, label %bb33, label %bb30, !dbg !37
-
-bb30:                                             ; preds = %bb27
-  %60 = fcmp ord float %d, 0.000000e+00           ; <i1> [#uses=1]
-  %61 = fsub float %d, %d, !dbg !37               ; <float> [#uses=1]
-  %62 = fcmp uno float %61, 0.000000e+00          ; <i1> [#uses=1]
-  %63 = and i1 %60, %62, !dbg !37                 ; <i1> [#uses=1]
-  %64 = fcmp ord float %31, 0.000000e+00          ; <i1> [#uses=1]
-  %or.cond95 = and i1 %63, %64                    ; <i1> [#uses=1]
-  br i1 %or.cond95, label %bb34, label %bb46, !dbg !37
-
-bb33:                                             ; preds = %bb27
-  %.old = fcmp ord float %31, 0.000000e+00        ; <i1> [#uses=1]
-  br i1 %.old, label %bb34, label %bb46, !dbg !37
-
-bb34:                                             ; preds = %bb33, %bb30
-  %65 = fsub float %b, %b, !dbg !37               ; <float> [#uses=1]
-  %66 = fcmp ord float %65, 0.000000e+00          ; <i1> [#uses=1]
-  br i1 %66, label %bb35, label %bb46, !dbg !37
-
-bb35:                                             ; preds = %bb34
-  %iftmp.2.0 = select i1 %59, float 1.000000e+00, float 0.000000e+00 ; <float> [#uses=1]
-  %67 = tail call float @copysignf(float %iftmp.2.0, float %c) nounwind readnone, !dbg !38 ; <float> [#uses=2]
-  tail call void @llvm.dbg.value(metadata !{float %67}, i64 0, metadata !12), !dbg !38
-  %68 = fcmp ord float %d, 0.000000e+00           ; <i1> [#uses=1]
-  %69 = fsub float %d, %d, !dbg !39               ; <float> [#uses=1]
-  %70 = fcmp uno float %69, 0.000000e+00          ; <i1> [#uses=1]
-  %71 = and i1 %68, %70, !dbg !39                 ; <i1> [#uses=1]
-  %iftmp.3.0 = select i1 %71, float 1.000000e+00, float 0.000000e+00 ; <float> [#uses=1]
-  %72 = tail call float @copysignf(float %iftmp.3.0, float %d) nounwind readnone, !dbg !39 ; <float> [#uses=2]
-  tail call void @llvm.dbg.value(metadata !{float %72}, i64 0, metadata !13), !dbg !39
-  %73 = fmul float %67, %a, !dbg !40              ; <float> [#uses=1]
-  %74 = fmul float %72, %b, !dbg !40              ; <float> [#uses=1]
-  %75 = fadd float %73, %74, !dbg !40             ; <float> [#uses=1]
-  %76 = fmul float %75, 0.000000e+00, !dbg !40    ; <float> [#uses=1]
-  tail call void @llvm.dbg.value(metadata !{float %76}, i64 0, metadata !17), !dbg !40
-  %77 = fmul float %67, %b, !dbg !41              ; <float> [#uses=1]
-  %78 = fmul float %72, %a, !dbg !41              ; <float> [#uses=1]
-  %79 = fsub float %77, %78, !dbg !41             ; <float> [#uses=1]
-  %80 = fmul float %79, 0.000000e+00, !dbg !41    ; <float> [#uses=1]
-  tail call void @llvm.dbg.value(metadata !{float %80}, i64 0, metadata !18), !dbg !41
-  br label %bb46, !dbg !41
-
-bb46:                                             ; preds = %bb35, %bb34, %bb33, %bb30, %bb16, %bb8, %bb2
-  %y.1 = phi float [ %80, %bb35 ], [ %y.0, %bb34 ], [ %y.0, %bb33 ], [ %y.0, %bb30 ], [ %55, %bb16 ], [ %29, %bb8 ], [ %y.0, %bb2 ] ; <float> [#uses=2]
-  %x.1 = phi float [ %76, %bb35 ], [ %x.0, %bb34 ], [ %x.0, %bb33 ], [ %x.0, %bb30 ], [ %51, %bb16 ], [ %28, %bb8 ], [ %x.0, %bb2 ] ; <float> [#uses=1]
-  %81 = fmul float %y.1, 0.000000e+00, !dbg !42   ; <float> [#uses=1]
-  %82 = fadd float %y.1, 0.000000e+00, !dbg !42   ; <float> [#uses=1]
-  %tmpr = fadd float %x.1, %81, !dbg !42          ; <float> [#uses=1]
-  %tmp89 = bitcast float %tmpr to i32             ; <i32> [#uses=1]
-  %tmp90 = zext i32 %tmp89 to i64                 ; <i64> [#uses=1]
-  %tmp85 = bitcast float %82 to i32               ; <i32> [#uses=1]
-  %tmp86 = zext i32 %tmp85 to i64                 ; <i64> [#uses=1]
-  %tmp87 = shl i64 %tmp86, 32                     ; <i64> [#uses=1]
-  %ins = or i64 %tmp90, %tmp87                    ; <i64> [#uses=1]
-  %tmp84 = bitcast i64 %ins to double             ; <double> [#uses=1]
-  %mrv75 = insertvalue %0 undef, double %tmp84, 0, !dbg !42 ; <%0> [#uses=1]
-  ret %0 %mrv75, !dbg !42
-}
-
-declare float @fabsf(float)
-
-declare float @copysignf(float, float) nounwind readnone
-
-declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
-
-!llvm.dbg.lv = !{!0, !11, !12, !13, !14, !16, !17, !18}
-
-!0 = metadata !{i32 524545, metadata !1, metadata !"a", metadata !2, i32 1921, metadata !9} ; [ DW_TAG_arg_variable ]
-!1 = metadata !{i32 524334, i32 0, metadata !2, metadata !"__divsc3", metadata !"__divsc3", metadata !"__divsc3", metadata !2, i32 1922, metadata !4, i1 false, i1 true, i32 0, i32 0, null, i1 false, i1 true} ; [ DW_TAG_subprogram ]
-!2 = metadata !{i32 524329, metadata !"libgcc2.c", metadata !"/Users/yash/clean/LG.D/gcc/../../llvmgcc/gcc", metadata !3} ; [ DW_TAG_file_type ]
-!3 = metadata !{i32 524305, i32 0, i32 1, metadata !"libgcc2.c", metadata !"/Users/yash/clean/LG.D/gcc/../../llvmgcc/gcc", metadata !"4.2.1 (Based on Apple Inc. build 5658) (LLVM build)", i1 true, i1 true, metadata !"", i32 0} ; [ DW_TAG_compile_unit ]
-!4 = metadata !{i32 524309, metadata !2, metadata !"", metadata !2, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !5, i32 0, null} ; [ DW_TAG_subroutine_type ]
-!5 = metadata !{metadata !6, metadata !9, metadata !9, metadata !9, metadata !9}
-!6 = metadata !{i32 524310, metadata !7, metadata !"SCtype", metadata !7, i32 170, i64 0, i64 0, i64 0, i32 0, metadata !8} ; [ DW_TAG_typedef ]
-!7 = metadata !{i32 524329, metadata !"libgcc2.h", metadata !"/Users/yash/clean/LG.D/gcc/../../llvmgcc/gcc", metadata !3} ; [ DW_TAG_file_type ]
-!8 = metadata !{i32 524324, metadata !2, metadata !"complex float", metadata !2, i32 0, i64 64, i64 32, i64 0, i32 0, i32 3} ; [ DW_TAG_base_type ]
-!9 = metadata !{i32 524310, metadata !7, metadata !"SFtype", metadata !7, i32 167, i64 0, i64 0, i64 0, i32 0, metadata !10} ; [ DW_TAG_typedef ]
-!10 = metadata !{i32 524324, metadata !2, metadata !"float", metadata !2, i32 0, i64 32, i64 32, i64 0, i32 0, i32 4} ; [ DW_TAG_base_type ]
-!11 = metadata !{i32 524545, metadata !1, metadata !"b", metadata !2, i32 1921, metadata !9} ; [ DW_TAG_arg_variable ]
-!12 = metadata !{i32 524545, metadata !1, metadata !"c", metadata !2, i32 1921, metadata !9} ; [ DW_TAG_arg_variable ]
-!13 = metadata !{i32 524545, metadata !1, metadata !"d", metadata !2, i32 1921, metadata !9} ; [ DW_TAG_arg_variable ]
-!14 = metadata !{i32 524544, metadata !15, metadata !"denom", metadata !2, i32 1923, metadata !9} ; [ DW_TAG_auto_variable ]
-!15 = metadata !{i32 524299, metadata !1, i32 1922, i32 0} ; [ DW_TAG_lexical_block ]
-!16 = metadata !{i32 524544, metadata !15, metadata !"ratio", metadata !2, i32 1923, metadata !9} ; [ DW_TAG_auto_variable ]
-!17 = metadata !{i32 524544, metadata !15, metadata !"x", metadata !2, i32 1923, metadata !9} ; [ DW_TAG_auto_variable ]
-!18 = metadata !{i32 524544, metadata !15, metadata !"y", metadata !2, i32 1923, metadata !9} ; [ DW_TAG_auto_variable ]
-!19 = metadata !{i32 1929, i32 0, metadata !15, null}
-!20 = metadata !{i32 1931, i32 0, metadata !15, null}
-!21 = metadata !{i32 1932, i32 0, metadata !15, null}
-!22 = metadata !{i32 1933, i32 0, metadata !15, null}
-!23 = metadata !{i32 1934, i32 0, metadata !15, null}
-!24 = metadata !{i32 1938, i32 0, metadata !15, null}
-!25 = metadata !{i32 1939, i32 0, metadata !15, null}
-!26 = metadata !{i32 1940, i32 0, metadata !15, null}
-!27 = metadata !{i32 1941, i32 0, metadata !15, null}
-!28 = metadata !{i32 1946, i32 0, metadata !15, null}
-!29 = metadata !{i32 1948, i32 0, metadata !15, null}
-!30 = metadata !{i32 1950, i32 0, metadata !15, null}
-!31 = metadata !{i32 1951, i32 0, metadata !15, null}
-!32 = metadata !{i32 1953, i32 0, metadata !15, null}
-!33 = metadata !{i32 1955, i32 0, metadata !15, null}
-!34 = metadata !{i32 1956, i32 0, metadata !15, null}
-!35 = metadata !{i32 1957, i32 0, metadata !15, null}
-!36 = metadata !{i32 1958, i32 0, metadata !15, null}
-!37 = metadata !{i32 1960, i32 0, metadata !15, null}
-!38 = metadata !{i32 1962, i32 0, metadata !15, null}
-!39 = metadata !{i32 1963, i32 0, metadata !15, null}
-!40 = metadata !{i32 1964, i32 0, metadata !15, null}
-!41 = metadata !{i32 1965, i32 0, metadata !15, null}
-!42 = metadata !{i32 1969, i32 0, metadata !15, null}

Removed: llvm/branches/wendling/eh/test/DebugInfo/2010-05-28-Crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/DebugInfo/2010-05-28-Crash.ll?rev=117424&view=auto
==============================================================================
--- llvm/branches/wendling/eh/test/DebugInfo/2010-05-28-Crash.ll (original)
+++ llvm/branches/wendling/eh/test/DebugInfo/2010-05-28-Crash.ll (removed)
@@ -1,44 +0,0 @@
-; RUN: llc  -mtriple=x86_64-apple-darwin < %s | FileCheck %s
-; Test to check separate label for inlined function argument.
-
-define i32 @foo(i32 %y) nounwind optsize ssp {
-entry:
-  tail call void @llvm.dbg.value(metadata !{i32 %y}, i64 0, metadata !0)
-  %0 = tail call i32 (...)* @zoo(i32 %y) nounwind, !dbg !9 ; <i32> [#uses=1]
-  ret i32 %0, !dbg !9
-}
-
-declare i32 @zoo(...)
-
-declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
-
-define i32 @bar(i32 %x) nounwind optsize ssp {
-entry:
-  tail call void @llvm.dbg.value(metadata !{i32 %x}, i64 0, metadata !7)
-  tail call void @llvm.dbg.value(metadata !11, i64 0, metadata !0) nounwind
-  %0 = tail call i32 (...)* @zoo(i32 1) nounwind, !dbg !12 ; <i32> [#uses=1]
-  %1 = add nsw i32 %0, %x, !dbg !13               ; <i32> [#uses=1]
-  ret i32 %1, !dbg !13
-}
-
-!llvm.dbg.lv = !{!0, !7}
-
-!0 = metadata !{i32 524545, metadata !1, metadata !"y", metadata !2, i32 2, metadata !6} ; [ DW_TAG_arg_variable ]
-!1 = metadata !{i32 524334, i32 0, metadata !2, metadata !"foo", metadata !"foo", metadata !"foo", metadata !2, i32 2, metadata !4, i1 false, i1 true, i32 0, i32 0, null, i1 false, i1 true} ; [ DW_TAG_subprogram ]
-!2 = metadata !{i32 524329, metadata !"f.c", metadata !"/tmp", metadata !3} ; [ DW_TAG_file_type ]
-!3 = metadata !{i32 524305, i32 0, i32 1, metadata !"f.c", metadata !"/tmp", metadata !"4.2.1 (Based on Apple Inc. build 5658) (LLVM build)", i1 true, i1 true, metadata !"", i32 0} ; [ DW_TAG_compile_unit ]
-!4 = metadata !{i32 524309, metadata !2, metadata !"", metadata !2, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !5, i32 0, null} ; [ DW_TAG_subroutine_type ]
-!5 = metadata !{metadata !6, metadata !6}
-!6 = metadata !{i32 524324, metadata !2, metadata !"int", metadata !2, i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
-!7 = metadata !{i32 524545, metadata !8, metadata !"x", metadata !2, i32 6, metadata !6} ; [ DW_TAG_arg_variable ]
-!8 = metadata !{i32 524334, i32 0, metadata !2, metadata !"bar", metadata !"bar", metadata !"bar", metadata !2, i32 6, metadata !4, i1 false, i1 true, i32 0, i32 0, null, i1 false, i1 true} ; [ DW_TAG_subprogram ]
-!9 = metadata !{i32 3, i32 0, metadata !10, null}
-!10 = metadata !{i32 524299, metadata !1, i32 2, i32 0} ; [ DW_TAG_lexical_block ]
-!11 = metadata !{i32 1}
-!12 = metadata !{i32 3, i32 0, metadata !10, metadata !13}
-!13 = metadata !{i32 7, i32 0, metadata !14, null}
-!14 = metadata !{i32 524299, metadata !8, i32 6, i32 0} ; [ DW_TAG_lexical_block ]
-
-;CHECK:	        DEBUG_VALUE: bar:x <- EBX+0
-;CHECK-NEXT:Ltmp
-;CHECK-NEXT:	DEBUG_VALUE: foo:y <- 1+0

Removed: llvm/branches/wendling/eh/test/DebugInfo/2010-06-01-DeadArg-DbgInfo.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/DebugInfo/2010-06-01-DeadArg-DbgInfo.ll?rev=117424&view=auto
==============================================================================
--- llvm/branches/wendling/eh/test/DebugInfo/2010-06-01-DeadArg-DbgInfo.ll (original)
+++ llvm/branches/wendling/eh/test/DebugInfo/2010-06-01-DeadArg-DbgInfo.ll (removed)
@@ -1,53 +0,0 @@
-; RUN: llc -O2 < %s | FileCheck %s 
-; Test to check that unused argument 'this' is not undefined in debug info.
-
-target triple = "x86_64-apple-darwin10.2"
-%struct.foo = type { i32 }
-
- at llvm.used = appending global [1 x i8*] [i8* bitcast (i32 (%struct.foo*, i32)* @_ZN3foo3bazEi to i8*)], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-
-define i32 @_ZN3foo3bazEi(%struct.foo* nocapture %this, i32 %x) nounwind readnone optsize noinline ssp align 2 {
-;CHECK: DEBUG_VALUE: baz:this <- RDI+0
-entry:
-  tail call void @llvm.dbg.value(metadata !{%struct.foo* %this}, i64 0, metadata !15)
-  tail call void @llvm.dbg.value(metadata !{i32 %x}, i64 0, metadata !16)
-  %0 = mul nsw i32 %x, 7, !dbg !29                ; <i32> [#uses=1]
-  %1 = add nsw i32 %0, 1, !dbg !29                ; <i32> [#uses=1]
-  ret i32 %1, !dbg !29
-}
-
-declare void @llvm.dbg.value(metadata, i64, metadata) nounwind readnone
-
-!llvm.dbg.lv = !{!0, !14, !15, !16, !17, !24, !25, !28}
-
-!0 = metadata !{i32 524545, metadata !1, metadata !"this", metadata !3, i32 11, metadata !12} ; [ DW_TAG_arg_variable ]
-!1 = metadata !{i32 524334, i32 0, metadata !2, metadata !"bar", metadata !"bar", metadata !"_ZN3foo3barEi", metadata !3, i32 11, metadata !9, i1 false, i1 true, i32 0, i32 0, null, i1 false, i1 true} ; [ DW_TAG_subprogram ]
-!2 = metadata !{i32 524307, metadata !3, metadata !"foo", metadata !3, i32 3, i64 32, i64 32, i64 0, i32 0, null, metadata !5, i32 0, null} ; [ DW_TAG_structure_type ]
-!3 = metadata !{i32 524329, metadata !"foo.cp", metadata !"/tmp/", metadata !4} ; [ DW_TAG_file_type ]
-!4 = metadata !{i32 524305, i32 0, i32 4, metadata !"foo.cp", metadata !"/tmp/", metadata !"4.2.1 LLVM build", i1 true, i1 true, metadata !"", i32 0} ; [ DW_TAG_compile_unit ]
-!5 = metadata !{metadata !6, metadata !1, metadata !8}
-!6 = metadata !{i32 524301, metadata !2, metadata !"y", metadata !3, i32 8, i64 32, i64 32, i64 0, i32 0, metadata !7} ; [ DW_TAG_member ]
-!7 = metadata !{i32 524324, metadata !3, metadata !"int", metadata !3, i32 0, i64 32, i64 32, i64 0, i32 0, i32 5} ; [ DW_TAG_base_type ]
-!8 = metadata !{i32 524334, i32 0, metadata !2, metadata !"baz", metadata !"baz", metadata !"_ZN3foo3bazEi", metadata !3, i32 15, metadata !9, i1 false, i1 true, i32 0, i32 0, null, i1 false, i1 true} ; [ DW_TAG_subprogram ]
-!9 = metadata !{i32 524309, metadata !3, metadata !"", metadata !3, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !10, i32 0, null} ; [ DW_TAG_subroutine_type ]
-!10 = metadata !{metadata !7, metadata !11, metadata !7}
-!11 = metadata !{i32 524303, metadata !3, metadata !"", metadata !3, i32 0, i64 64, i64 64, i64 0, i32 64, metadata !2} ; [ DW_TAG_pointer_type ]
-!12 = metadata !{i32 524326, metadata !3, metadata !"", metadata !3, i32 0, i64 64, i64 64, i64 0, i32 64, metadata !13} ; [ DW_TAG_const_type ]
-!13 = metadata !{i32 524303, metadata !3, metadata !"", metadata !3, i32 0, i64 64, i64 64, i64 0, i32 0, metadata !2} ; [ DW_TAG_pointer_type ]
-!14 = metadata !{i32 524545, metadata !1, metadata !"x", metadata !3, i32 11, metadata !7} ; [ DW_TAG_arg_variable ]
-!15 = metadata !{i32 524545, metadata !8, metadata !"this", metadata !3, i32 15, metadata !12} ; [ DW_TAG_arg_variable ]
-!16 = metadata !{i32 524545, metadata !8, metadata !"x", metadata !3, i32 15, metadata !7} ; [ DW_TAG_arg_variable ]
-!17 = metadata !{i32 524545, metadata !18, metadata !"argc", metadata !3, i32 19, metadata !7} ; [ DW_TAG_arg_variable ]
-!18 = metadata !{i32 524334, i32 0, metadata !3, metadata !"main", metadata !"main", metadata !"main", metadata !3, i32 19, metadata !19, i1 false, i1 true, i32 0, i32 0, null, i1 false, i1 true} ; [ DW_TAG_subprogram ]
-!19 = metadata !{i32 524309, metadata !3, metadata !"", metadata !3, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !20, i32 0, null} ; [ DW_TAG_subroutine_type ]
-!20 = metadata !{metadata !7, metadata !7, metadata !21}
-!21 = metadata !{i32 524303, metadata !3, metadata !"", metadata !3, i32 0, i64 64, i64 64, i64 0, i32 0, metadata !22} ; [ DW_TAG_pointer_type ]
-!22 = metadata !{i32 524303, metadata !3, metadata !"", metadata !3, i32 0, i64 64, i64 64, i64 0, i32 0, metadata !23} ; [ DW_TAG_pointer_type ]
-!23 = metadata !{i32 524324, metadata !3, metadata !"char", metadata !3, i32 0, i64 8, i64 8, i64 0, i32 0, i32 6} ; [ DW_TAG_base_type ]
-!24 = metadata !{i32 524545, metadata !18, metadata !"argv", metadata !3, i32 19, metadata !21} ; [ DW_TAG_arg_variable ]
-!25 = metadata !{i32 524544, metadata !26, metadata !"a", metadata !3, i32 20, metadata !2} ; [ DW_TAG_auto_variable ]
-!26 = metadata !{i32 524299, metadata !27, i32 19, i32 0} ; [ DW_TAG_lexical_block ]
-!27 = metadata !{i32 524299, metadata !18, i32 19, i32 0} ; [ DW_TAG_lexical_block ]
-!28 = metadata !{i32 524544, metadata !26, metadata !"b", metadata !3, i32 21, metadata !7} ; [ DW_TAG_auto_variable ]
-!29 = metadata !{i32 16, i32 0, metadata !30, null}
-!30 = metadata !{i32 524299, metadata !8, i32 15, i32 0} ; [ DW_TAG_lexical_block ]

Modified: llvm/branches/wendling/eh/test/DebugInfo/printdbginfo2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/DebugInfo/printdbginfo2.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/DebugInfo/printdbginfo2.ll (original)
+++ llvm/branches/wendling/eh/test/DebugInfo/printdbginfo2.ll Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-; RUN: opt < %s -print-dbginfo -disable-output | FileCheck %s
+; RUN: opt < %s -print-dbginfo -disable-output |& FileCheck %s
 ;  grep {%b is variable b of type x declared at x.c:7} %t1
 ;  grep {%2 is variable b of type x declared at x.c:7} %t1
 ;  grep {@c.1442 is variable c of type int declared at x.c:4} %t1

Modified: llvm/branches/wendling/eh/test/Feature/linker_private_linkages.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Feature/linker_private_linkages.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Feature/linker_private_linkages.ll (original)
+++ llvm/branches/wendling/eh/test/Feature/linker_private_linkages.ll Tue Oct 26 19:48:03 2010
@@ -4,3 +4,4 @@
 
 @foo = linker_private hidden global i32 0
 @bar = linker_private_weak hidden global i32 0
+ at qux = linker_private_weak_def_auto global i32 0

Modified: llvm/branches/wendling/eh/test/Feature/metadata.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Feature/metadata.ll?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Feature/metadata.ll (original)
+++ llvm/branches/wendling/eh/test/Feature/metadata.ll Tue Oct 26 19:48:03 2010
@@ -1,9 +1,11 @@
 ; RUN: llvm-as < %s | llvm-dis | llvm-as | llvm-dis
 ; PR7105
 
-define void @foo() {
+define void @foo(i32 %x) {
   call void @llvm.zonk(metadata !1, i64 0, metadata !1)
-  ret void
+  store i32 0, i32* null, !whatever !0, !whatever_else !{}, !more !{metadata !"hello"}
+  store i32 0, i32* null, !whatever !{i32 %x, metadata !"hello", metadata !1, metadata !{}, metadata !2}
+  ret void, !whatever !{i32 %x}
 }
 
 declare void @llvm.zonk(metadata, i64, metadata) nounwind readnone

Removed: llvm/branches/wendling/eh/test/Feature/unions.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Feature/unions.ll?rev=117424&view=auto
==============================================================================
--- llvm/branches/wendling/eh/test/Feature/unions.ll (original)
+++ llvm/branches/wendling/eh/test/Feature/unions.ll (removed)
@@ -1,14 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-%union.anon = type union { i8, i32, float }
-
- at union1 = constant union { i32, i8 } { i32 4 }
- at union2 = constant union { i32, i8 } insertvalue(union { i32, i8 } undef, i32 4, 0)
- at union3 = common global %union.anon zeroinitializer, align 8 
-
-define void @"Unions" () {
-  ret void
-}
-

Modified: llvm/branches/wendling/eh/test/FrontendC++/2010-04-30-OptimizedMethod-Dbg.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/FrontendC%2B%2B/2010-04-30-OptimizedMethod-Dbg.cpp?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/FrontendC++/2010-04-30-OptimizedMethod-Dbg.cpp (original)
+++ llvm/branches/wendling/eh/test/FrontendC++/2010-04-30-OptimizedMethod-Dbg.cpp Tue Oct 26 19:48:03 2010
@@ -7,12 +7,12 @@
 };
 
 int foo::bar(int x) {
-  // CHECK: {{i1 false, i1 true(, i[0-9]+ [^\}]+[}]|[}]) ; \[ DW_TAG_subprogram \]}}
+  // CHECK: {{i32 [0-9]+, i1 true(, i[0-9]+ [^\}]+[}]|[}]) ; \[ DW_TAG_subprogram \]}}
     return x*4 + 1;
 }
 
 int foo::baz(int x) {
-  // CHECK: {{i1 false, i1 true(, i[0-9]+ [^\},]+[}]|[}]) ; \[ DW_TAG_subprogram \]}}
+  // CHECK: {{i32 [0-9]+, i1 true(, i[0-9]+ [^\},]+[}]|[}]) ; \[ DW_TAG_subprogram \]}}
     return x*4 + 1;
 }
 

Modified: llvm/branches/wendling/eh/test/FrontendC/2007-10-01-BuildArrayRef.c
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/FrontendC/2007-10-01-BuildArrayRef.c?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/FrontendC/2007-10-01-BuildArrayRef.c (original)
+++ llvm/branches/wendling/eh/test/FrontendC/2007-10-01-BuildArrayRef.c Tue Oct 26 19:48:03 2010
@@ -1,8 +1,20 @@
-// RUN: not %llvmgcc -S %s -o /dev/null |& grep "error: assignment of read-only location"
+// RUN: not %llvmgcc_only -c %s -o /dev/null |& FileCheck %s
 // PR 1603
-int func()
+void func()
 {
    const int *arr;
-   arr[0] = 1;
+   arr[0] = 1;  // CHECK: error: assignment of read-only location
 }
 
+struct foo {
+  int bar;
+};
+struct foo sfoo = { 0 };
+
+int func2()
+{
+  const struct foo *fp;
+  fp = &sfoo;
+  fp[0].bar = 1;  // CHECK: error: assignment of read-only member 'bar'
+  return sfoo.bar;
+}

Modified: llvm/branches/wendling/eh/test/FrontendC/2008-03-24-BitField-And-Alloca.c
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/FrontendC/2008-03-24-BitField-And-Alloca.c?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/FrontendC/2008-03-24-BitField-And-Alloca.c (original)
+++ llvm/branches/wendling/eh/test/FrontendC/2008-03-24-BitField-And-Alloca.c Tue Oct 26 19:48:03 2010
@@ -1,5 +1,5 @@
 // RUN: %llvmgcc -O2 -S %s -o - | not grep alloca
-// RUN: %llvmgcc -m32 -O2 -S %s -o - | not grep store 
+// RUN: %llvmgcc -m32 -O2 -S %s -o - | not grep {store }
 
 enum {
  PP_C,

Modified: llvm/branches/wendling/eh/test/FrontendC/2010-05-18-asmsched.c
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/FrontendC/2010-05-18-asmsched.c?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/FrontendC/2010-05-18-asmsched.c (original)
+++ llvm/branches/wendling/eh/test/FrontendC/2010-05-18-asmsched.c Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-// RUN: %llvmgcc %s -c -O3 -m64 -emit-llvm -o - | llc -march=x86-64 -mtriple=x86_64-apple-darwin | FileCheck %s
+// RUN: %llvmgcc %s -c -O3 -emit-llvm -o - | llc -march=x86-64 -mtriple=x86_64-apple-darwin | FileCheck %s
 // r9 used to be clobbered before its value was moved to r10.  7993104.
 
 void foo(int x, int y) {
@@ -14,4 +14,4 @@
   lr9 = x;
   lr10 = foo;
   asm volatile("bar" : "=r"(lr9) : "r"(lr9), "r"(lr10));
-}
\ No newline at end of file
+}

Modified: llvm/branches/wendling/eh/test/FrontendC/2010-07-14-overconservative-align.c
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/FrontendC/2010-07-14-overconservative-align.c?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/FrontendC/2010-07-14-overconservative-align.c (original)
+++ llvm/branches/wendling/eh/test/FrontendC/2010-07-14-overconservative-align.c Tue Oct 26 19:48:03 2010
@@ -1,4 +1,4 @@
-// RUN: %llvmgcc %s -emit-llvm -m64 -S -o - | FileCheck %s
+// RUN: %llvmgcc %s -emit-llvm -S -o - | FileCheck %s
 // PR 5995
 struct s {
     int word;
@@ -9,6 +9,6 @@
 
 void func (struct s *s)
 {
-// CHECK: load %struct.s** %s_addr, align 8
+// CHECK: load %struct.s** %s_addr, align {{[48]}}
     s->word = 0;
 }

Modified: llvm/branches/wendling/eh/test/FrontendC/2010-07-14-ref-off-end.c
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/FrontendC/2010-07-14-ref-off-end.c?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/FrontendC/2010-07-14-ref-off-end.c (original)
+++ llvm/branches/wendling/eh/test/FrontendC/2010-07-14-ref-off-end.c Tue Oct 26 19:48:03 2010
@@ -17,8 +17,8 @@
 }
 main()
 {
-// CHECK:  getelementptr inbounds %struct.T* %t, i32 0, i32 0 ; <i32*> [#uses=2]
-// CHECK:  getelementptr inbounds %struct.T* %t, i32 0, i32 0 ; <i32*> [#uses=2]
+// CHECK:  getelementptr inbounds %struct.T* %t, i32 0, i32 0 
+// CHECK:  getelementptr inbounds %struct.T* %t, i32 0, i32 0
 struct T t;
 t.i=0xff;
 t.c=0xffff11;

Modified: llvm/branches/wendling/eh/test/FrontendC/cstring-align.c
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/FrontendC/cstring-align.c?rev=117425&r1=117424&r2=117425&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/FrontendC/cstring-align.c (original)
+++ llvm/branches/wendling/eh/test/FrontendC/cstring-align.c Tue Oct 26 19:48:03 2010
@@ -1,6 +1,4 @@
-// RUN: %llvmgcc %s -c -Os -m32 -emit-llvm -o - | llc -march=x86 -mtriple=i386-apple-darwin10 | FileCheck %s -check-prefix=DARWIN32
-// RUN: %llvmgcc %s -c -Os -m64 -emit-llvm -o - | llc -march=x86-64 -mtriple=x86_64-apple-darwin10 | FileCheck %s -check-prefix=DARWIN64
-// XTARGET: darwin
+// RUN: %llvmgcc %s -c -Os -emit-llvm -o - | llc -march=x86 -mtriple=i386-apple-darwin10 | FileCheck %s
 
 extern void func(const char *, const char *);
 
@@ -8,10 +6,6 @@
   func("%s: the function name", __func__);
 }
 
-// DARWIN64: .align 4
-// DARWIN64: ___func__.
-// DARWIN64: .asciz "long_function_name"
-
-// DARWIN32: .align 4
-// DARWIN32: ___func__.
-// DARWIN32: .asciz "long_function_name"
+// CHECK: .align 4
+// CHECK: ___func__.
+// CHECK: .asciz "long_function_name"

Removed: llvm/branches/wendling/eh/test/Integer/a15.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Integer/a15.ll?rev=117424&view=auto
==============================================================================
--- llvm/branches/wendling/eh/test/Integer/a15.ll (original)
+++ llvm/branches/wendling/eh/test/Integer/a15.ll (removed)
@@ -1,27 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t.ll
-; RUN: diff %t.ll %s.out
-
-; test 15 bits
-;
- at b = constant i15 add(i15 32767, i15 1)
- at c = constant i15 add(i15 32767, i15 32767)
- at d = constant i15 add(i15 32760, i15 8)
- at e = constant i15 sub(i15 0 , i15 1)
- at f = constant i15 sub(i15 0 , i15 32767)
- at g = constant i15 sub(i15 2 , i15 32767)
-
- at h = constant i15 shl(i15 1 , i15 15)
- at i = constant i15 shl(i15 1 , i15 14)
- at j = constant i15 lshr(i15 32767 , i15 14)
- at l = constant i15 ashr(i15 32767 , i15 14)
-
- at n = constant i15 mul(i15 32767, i15 2)
- at q = constant i15 mul(i15 -16383,i15 -3)
- at r = constant i15 sdiv(i15 -1,   i15 16383)
- at s = constant i15 udiv(i15 -1,   i15 16383)
- at t = constant i15 srem(i15 1,    i15 32766)
- at u = constant i15 urem(i15 32767,i15 -1)
- at o = constant i15 trunc( i16 32768  to i15 )
- at p = constant i15 trunc( i16 32767  to i15 )
- at v = constant i15 srem(i15 -1,    i15 768)
- 

Removed: llvm/branches/wendling/eh/test/Integer/a15.ll.out
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Integer/a15.ll.out?rev=117424&view=auto
==============================================================================
--- llvm/branches/wendling/eh/test/Integer/a15.ll.out (original)
+++ llvm/branches/wendling/eh/test/Integer/a15.ll.out (removed)
@@ -1,21 +0,0 @@
-; ModuleID = '<stdin>'
-
- at b = constant i15 0                               ; <i15*> [#uses=0]
- at c = constant i15 -2                              ; <i15*> [#uses=0]
- at d = constant i15 0                               ; <i15*> [#uses=0]
- at e = constant i15 -1                              ; <i15*> [#uses=0]
- at f = constant i15 1                               ; <i15*> [#uses=0]
- at g = constant i15 3                               ; <i15*> [#uses=0]
- at h = constant i15 undef                           ; <i15*> [#uses=0]
- at i = constant i15 -16384                          ; <i15*> [#uses=0]
- at j = constant i15 1                               ; <i15*> [#uses=0]
- at l = constant i15 -1                              ; <i15*> [#uses=0]
- at n = constant i15 -2                              ; <i15*> [#uses=0]
- at q = constant i15 16381                           ; <i15*> [#uses=0]
- at r = constant i15 0                               ; <i15*> [#uses=0]
- at s = constant i15 2                               ; <i15*> [#uses=0]
- at t = constant i15 1                               ; <i15*> [#uses=0]
- at u = constant i15 0                               ; <i15*> [#uses=0]
- at o = constant i15 0                               ; <i15*> [#uses=0]
- at p = constant i15 -1                              ; <i15*> [#uses=0]
- at v = constant i15 -1                              ; <i15*> [#uses=0]





More information about the llvm-branch-commits mailing list