[llvm] cf7cdaf - [X86][VARARG] Avoid spilling xmm registers for va_start.

Alexey Lapshin via llvm-commits llvm-commits at lists.llvm.org
Sat Mar 6 04:30:23 PST 2021


Author: Alexey Lapshin
Date: 2021-03-06T15:25:47+03:00
New Revision: cf7cdaff64fb2a97f9af533ad0a5ed9c94b72c8e

URL: https://github.com/llvm/llvm-project/commit/cf7cdaff64fb2a97f9af533ad0a5ed9c94b72c8e
DIFF: https://github.com/llvm/llvm-project/commit/cf7cdaff64fb2a97f9af533ad0a5ed9c94b72c8e.diff

LOG: [X86][VARARG] Avoid spilling xmm registers for va_start.

That review is extracted from D69372.
It fixes https://bugs.llvm.org/show_bug.cgi?id=42219 bug.

For the noimplicitfloat mode, the compiler mustn't generate
floating-point code if it was not asked directly to do so.
This rule does not work with variable function arguments currently.
Though compiler correctly guards block of code, which copies xmm vararg
parameters with a check for %al, it does not protect spills for xmm registers.
Thus, such spills are generated in non-protected areas and could break code,
which does not expect floating-point data. The problem happens in -O0
optimization mode. With this optimization level there is used
FastRegisterAllocator, which spills virtual registers at basic block boundaries.
Register Allocator does not protect spills with additional control-flow modifications.
Thus to resolve that problem, it is suggested to not copy incoming physical
registers into virtual registers. Instead, store incoming physical xmm registers
into the memory from scratch.

Differential Revision: https://reviews.llvm.org/D80163

Added: 
    llvm/test/CodeGen/X86/xmm-vararg-noopt.ll

Modified: 
    llvm/lib/Target/X86/X86ExpandPseudo.cpp
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/lib/Target/X86/X86ISelLowering.h
    llvm/lib/Target/X86/X86InstrCompiler.td
    llvm/test/CodeGen/X86/musttail-varargs.ll
    llvm/test/CodeGen/X86/vastart-defs-eflags.ll
    llvm/test/CodeGen/X86/x32-va_start.ll
    llvm/test/CodeGen/X86/x86-64-varargs.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ExpandPseudo.cpp b/llvm/lib/Target/X86/X86ExpandPseudo.cpp
index ab0062e027a7..e0875fb6432d 100644
--- a/llvm/lib/Target/X86/X86ExpandPseudo.cpp
+++ b/llvm/lib/Target/X86/X86ExpandPseudo.cpp
@@ -64,6 +64,18 @@ class X86ExpandPseudo : public MachineFunctionPass {
 
   bool ExpandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI);
   bool ExpandMBB(MachineBasicBlock &MBB);
+
+  /// This function expands pseudos which affects control flow.
+  /// It is done in separate pass to simplify blocks navigation in main
+  /// pass(calling ExpandMBB).
+  bool ExpandPseudosWhichAffectControlFlow(MachineFunction &MF);
+
+  /// Expand X86::VASTART_SAVE_XMM_REGS into set of xmm copying instructions,
+  /// placed into separate block guarded by check for al register(for SystemV
+  /// abi).
+  void ExpandVastartSaveXmmRegs(
+      MachineBasicBlock *MBB,
+      MachineBasicBlock::iterator VAStartPseudoInstr) const;
 };
 char X86ExpandPseudo::ID = 0;
 
@@ -504,6 +516,115 @@ bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
   llvm_unreachable("Previous switch has a fallthrough?");
 }
 
+// This function creates additional block for storing varargs guarded
+// registers. It adds check for %al into entry block, to skip
+// GuardedRegsBlk if xmm registers should not be stored.
+//
+//     EntryBlk[VAStartPseudoInstr]     EntryBlk
+//        |                              |     .
+//        |                              |        .
+//        |                              |   GuardedRegsBlk
+//        |                      =>      |        .
+//        |                              |     .
+//        |                             TailBlk
+//        |                              |
+//        |                              |
+//
+void X86ExpandPseudo::ExpandVastartSaveXmmRegs(
+    MachineBasicBlock *EntryBlk,
+    MachineBasicBlock::iterator VAStartPseudoInstr) const {
+  assert(VAStartPseudoInstr->getOpcode() == X86::VASTART_SAVE_XMM_REGS);
+
+  MachineFunction *Func = EntryBlk->getParent();
+  const TargetInstrInfo *TII = STI->getInstrInfo();
+  DebugLoc DL = VAStartPseudoInstr->getDebugLoc();
+  Register CountReg = VAStartPseudoInstr->getOperand(0).getReg();
+
+  // Calculate liveins for newly created blocks.
+  LivePhysRegs LiveRegs(*STI->getRegisterInfo());
+  SmallVector<std::pair<MCPhysReg, const MachineOperand *>, 8> Clobbers;
+
+  LiveRegs.addLiveIns(*EntryBlk);
+  for (MachineInstr &MI : EntryBlk->instrs()) {
+    if (MI.getOpcode() == VAStartPseudoInstr->getOpcode())
+      break;
+
+    LiveRegs.stepForward(MI, Clobbers);
+  }
+
+  // Create the new basic blocks. One block contains all the XMM stores,
+  // and another block is the final destination regardless of whether any
+  // stores were performed.
+  const BasicBlock *LLVMBlk = EntryBlk->getBasicBlock();
+  MachineFunction::iterator EntryBlkIter = ++EntryBlk->getIterator();
+  MachineBasicBlock *GuardedRegsBlk = Func->CreateMachineBasicBlock(LLVMBlk);
+  MachineBasicBlock *TailBlk = Func->CreateMachineBasicBlock(LLVMBlk);
+  Func->insert(EntryBlkIter, GuardedRegsBlk);
+  Func->insert(EntryBlkIter, TailBlk);
+
+  // Transfer the remainder of EntryBlk and its successor edges to TailBlk.
+  TailBlk->splice(TailBlk->begin(), EntryBlk,
+                  std::next(MachineBasicBlock::iterator(VAStartPseudoInstr)),
+                  EntryBlk->end());
+  TailBlk->transferSuccessorsAndUpdatePHIs(EntryBlk);
+
+  int64_t FrameIndex = VAStartPseudoInstr->getOperand(1).getImm();
+  Register BaseReg;
+  uint64_t FrameOffset =
+      X86FL->getFrameIndexReference(*Func, FrameIndex, BaseReg).getFixed();
+  uint64_t VarArgsRegsOffset = VAStartPseudoInstr->getOperand(2).getImm();
+
+  // TODO: add support for YMM and ZMM here.
+  unsigned MOVOpc = STI->hasAVX() ? X86::VMOVAPSmr : X86::MOVAPSmr;
+
+  // In the XMM save block, save all the XMM argument registers.
+  for (int64_t OpndIdx = 3, RegIdx = 0;
+       OpndIdx < VAStartPseudoInstr->getNumOperands() - 1;
+       OpndIdx++, RegIdx++) {
+
+    int64_t Offset = FrameOffset + VarArgsRegsOffset + RegIdx * 16;
+
+    MachineMemOperand *MMO = Func->getMachineMemOperand(
+        MachinePointerInfo::getFixedStack(*Func, FrameIndex, Offset),
+        MachineMemOperand::MOStore,
+        /*Size=*/16, Align(16));
+
+    BuildMI(GuardedRegsBlk, DL, TII->get(MOVOpc))
+        .addReg(BaseReg)
+        .addImm(/*Scale=*/1)
+        .addReg(/*IndexReg=*/0)
+        .addImm(/*Disp=*/Offset)
+        .addReg(/*Segment=*/0)
+        .addReg(VAStartPseudoInstr->getOperand(OpndIdx).getReg())
+        .addMemOperand(MMO);
+    assert(Register::isPhysicalRegister(
+        VAStartPseudoInstr->getOperand(OpndIdx).getReg()));
+  }
+
+  // The original block will now fall through to the GuardedRegsBlk.
+  EntryBlk->addSuccessor(GuardedRegsBlk);
+  // The GuardedRegsBlk will fall through to the TailBlk.
+  GuardedRegsBlk->addSuccessor(TailBlk);
+
+  if (!STI->isCallingConvWin64(Func->getFunction().getCallingConv())) {
+    // If %al is 0, branch around the XMM save block.
+    BuildMI(EntryBlk, DL, TII->get(X86::TEST8rr))
+        .addReg(CountReg)
+        .addReg(CountReg);
+    BuildMI(EntryBlk, DL, TII->get(X86::JCC_1))
+        .addMBB(TailBlk)
+        .addImm(X86::COND_E);
+    EntryBlk->addSuccessor(TailBlk);
+  }
+
+  // Add liveins to the created block.
+  addLiveIns(*GuardedRegsBlk, LiveRegs);
+  addLiveIns(*TailBlk, LiveRegs);
+
+  // Delete the pseudo.
+  VAStartPseudoInstr->eraseFromParent();
+}
+
 /// Expand all pseudo instructions contained in \p MBB.
 /// \returns true if any expansion occurred for \p MBB.
 bool X86ExpandPseudo::ExpandMBB(MachineBasicBlock &MBB) {
@@ -520,6 +641,20 @@ bool X86ExpandPseudo::ExpandMBB(MachineBasicBlock &MBB) {
   return Modified;
 }
 
+bool X86ExpandPseudo::ExpandPseudosWhichAffectControlFlow(MachineFunction &MF) {
+  // Currently pseudo which affects control flow is only
+  // X86::VASTART_SAVE_XMM_REGS which is located in Entry block.
+  // So we do not need to evaluate other blocks.
+  for (MachineInstr &Instr : MF.front().instrs()) {
+    if (Instr.getOpcode() == X86::VASTART_SAVE_XMM_REGS) {
+      ExpandVastartSaveXmmRegs(&(MF.front()), Instr);
+      return true;
+    }
+  }
+
+  return false;
+}
+
 bool X86ExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
   STI = &static_cast<const X86Subtarget &>(MF.getSubtarget());
   TII = STI->getInstrInfo();
@@ -527,7 +662,8 @@ bool X86ExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
   X86FI = MF.getInfo<X86MachineFunctionInfo>();
   X86FL = STI->getFrameLowering();
 
-  bool Modified = false;
+  bool Modified = ExpandPseudosWhichAffectControlFlow(MF);
+
   for (MachineBasicBlock &MBB : MF)
     Modified |= ExpandMBB(MBB);
   return Modified;

diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 1a9259718336..039ddc3e76f1 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -3483,9 +3483,12 @@ void VarArgsLoweringHelper::createVarArgAreaAndStoreRegisters(
       Register AL = TheMachineFunction.addLiveIn(X86::AL, &X86::GR8RegClass);
       ALVal = DAG.getCopyFromReg(Chain, DL, AL, MVT::i8);
       for (MCPhysReg Reg : AvailableXmms) {
-        Register XMMReg = TheMachineFunction.addLiveIn(Reg, &X86::VR128RegClass);
-        LiveXMMRegs.push_back(
-            DAG.getCopyFromReg(Chain, DL, XMMReg, MVT::v4f32));
+        // FastRegisterAllocator spills virtual registers at basic
+        // block boundary. That leads to usages of xmm registers
+        // outside of check for %al. Pass physical registers to
+        // VASTART_SAVE_XMM_REGS to avoid unneccessary spilling.
+        TheMachineFunction.getRegInfo().addLiveIn(Reg);
+        LiveXMMRegs.push_back(DAG.getRegister(Reg, MVT::v4f32));
       }
     }
 
@@ -32035,81 +32038,6 @@ X86TargetLowering::EmitVAARGWithCustomInserter(MachineInstr &MI,
   return endMBB;
 }
 
-MachineBasicBlock *X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
-    MachineInstr &MI, MachineBasicBlock *MBB) const {
-  // Emit code to save XMM registers to the stack. The ABI says that the
-  // number of registers to save is given in %al, so it's theoretically
-  // possible to do an indirect jump trick to avoid saving all of them,
-  // however this code takes a simpler approach and just executes all
-  // of the stores if %al is non-zero. It's less code, and it's probably
-  // easier on the hardware branch predictor, and stores aren't all that
-  // expensive anyway.
-
-  // Create the new basic blocks. One block contains all the XMM stores,
-  // and one block is the final destination regardless of whether any
-  // stores were performed.
-  const BasicBlock *LLVM_BB = MBB->getBasicBlock();
-  MachineFunction *F = MBB->getParent();
-  MachineFunction::iterator MBBIter = ++MBB->getIterator();
-  MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB);
-  MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB);
-  F->insert(MBBIter, XMMSaveMBB);
-  F->insert(MBBIter, EndMBB);
-
-  // Transfer the remainder of MBB and its successor edges to EndMBB.
-  EndMBB->splice(EndMBB->begin(), MBB,
-                 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
-  EndMBB->transferSuccessorsAndUpdatePHIs(MBB);
-
-  // The original block will now fall through to the XMM save block.
-  MBB->addSuccessor(XMMSaveMBB);
-  // The XMMSaveMBB will fall through to the end block.
-  XMMSaveMBB->addSuccessor(EndMBB);
-
-  // Now add the instructions.
-  const TargetInstrInfo *TII = Subtarget.getInstrInfo();
-  const DebugLoc &DL = MI.getDebugLoc();
-
-  Register CountReg = MI.getOperand(0).getReg();
-  int RegSaveFrameIndex = MI.getOperand(1).getImm();
-  int64_t VarArgsFPOffset = MI.getOperand(2).getImm();
-
-  if (!Subtarget.isCallingConvWin64(F->getFunction().getCallingConv())) {
-    // If %al is 0, branch around the XMM save block.
-    BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
-    BuildMI(MBB, DL, TII->get(X86::JCC_1)).addMBB(EndMBB).addImm(X86::COND_E);
-    MBB->addSuccessor(EndMBB);
-  }
-
-  // Make sure the last operand is EFLAGS, which gets clobbered by the branch
-  // that was just emitted, but clearly shouldn't be "saved".
-  assert((MI.getNumOperands() <= 3 ||
-          !MI.getOperand(MI.getNumOperands() - 1).isReg() ||
-          MI.getOperand(MI.getNumOperands() - 1).getReg() == X86::EFLAGS) &&
-         "Expected last argument to be EFLAGS");
-  unsigned MOVOpc = Subtarget.hasAVX() ? X86::VMOVAPSmr : X86::MOVAPSmr;
-  // In the XMM save block, save all the XMM argument registers.
-  for (int i = 3, e = MI.getNumOperands() - 1; i != e; ++i) {
-    int64_t Offset = (i - 3) * 16 + VarArgsFPOffset;
-    MachineMemOperand *MMO = F->getMachineMemOperand(
-        MachinePointerInfo::getFixedStack(*F, RegSaveFrameIndex, Offset),
-        MachineMemOperand::MOStore,
-        /*Size=*/16, Align(16));
-    BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc))
-        .addFrameIndex(RegSaveFrameIndex)
-        .addImm(/*Scale=*/1)
-        .addReg(/*IndexReg=*/0)
-        .addImm(/*Disp=*/Offset)
-        .addReg(/*Segment=*/0)
-        .addReg(MI.getOperand(i).getReg())
-        .addMemOperand(MMO);
-  }
-
-  MI.eraseFromParent(); // The pseudo instruction is gone now.
-
-  return EndMBB;
-}
-
 // The EFLAGS operand of SelectItr might be missing a kill marker
 // because there were multiple uses of EFLAGS, and ISel didn't know
 // which to mark. Figure out whether SelectItr should have had a
@@ -33943,9 +33871,6 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
   case X86::XBEGIN:
     return emitXBegin(MI, BB, Subtarget.getInstrInfo());
 
-  case X86::VASTART_SAVE_XMM_REGS:
-    return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
-
   case X86::VAARG_64:
   case X86::VAARG_X32:
     return EmitVAARGWithCustomInserter(MI, BB);

diff  --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h
index 86b57f746c57..f62d49e060ce 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/llvm/lib/Target/X86/X86ISelLowering.h
@@ -1586,10 +1586,6 @@ namespace llvm {
     EmitVAARGWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const;
 
     /// Utility function to emit the xmm reg save portion of va_start.
-    MachineBasicBlock *
-    EmitVAStartSaveXMMRegsWithCustomInserter(MachineInstr &BInstr,
-                                             MachineBasicBlock *BB) const;
-
     MachineBasicBlock *EmitLoweredCascadedSelect(MachineInstr &MI1,
                                                  MachineInstr &MI2,
                                                  MachineBasicBlock *BB) const;

diff  --git a/llvm/lib/Target/X86/X86InstrCompiler.td b/llvm/lib/Target/X86/X86InstrCompiler.td
index 7a2facf226d8..6ad29c6cc550 100644
--- a/llvm/lib/Target/X86/X86InstrCompiler.td
+++ b/llvm/lib/Target/X86/X86InstrCompiler.td
@@ -69,7 +69,7 @@ def : Pat<(X86callseq_start timm:$amt1, timm:$amt2),
 let SchedRW = [WriteSystem] in {
 
 // x86-64 va_start lowering magic.
-let usesCustomInserter = 1, Defs = [EFLAGS] in {
+let hasSideEffects = 1, Defs = [EFLAGS] in {
 def VASTART_SAVE_XMM_REGS : I<0, Pseudo,
                               (outs),
                               (ins GR8:$al,
@@ -80,7 +80,9 @@ def VASTART_SAVE_XMM_REGS : I<0, Pseudo,
                                                          timm:$regsavefi,
                                                          timm:$offset),
                                (implicit EFLAGS)]>;
+}
 
+let usesCustomInserter = 1, Defs = [EFLAGS] in {
 // The VAARG_64 and VAARG_X32 pseudo-instructions take the address of the
 // va_list, and place the address of the next argument into a register.
 let Defs = [EFLAGS] in {

diff  --git a/llvm/test/CodeGen/X86/musttail-varargs.ll b/llvm/test/CodeGen/X86/musttail-varargs.ll
index 34c98dda6010..e7842031ba1d 100644
--- a/llvm/test/CodeGen/X86/musttail-varargs.ll
+++ b/llvm/test/CodeGen/X86/musttail-varargs.ll
@@ -37,13 +37,26 @@ define void @f_thunk(i8* %this, ...) {
 ; LINUX-NEXT:    .cfi_offset %r14, -32
 ; LINUX-NEXT:    .cfi_offset %r15, -24
 ; LINUX-NEXT:    .cfi_offset %rbp, -16
+; LINUX-NEXT:    movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; LINUX-NEXT:    movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; LINUX-NEXT:    movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; LINUX-NEXT:    movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; LINUX-NEXT:    movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; LINUX-NEXT:    movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; LINUX-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; LINUX-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; LINUX-NEXT:    movb %al, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill
 ; LINUX-NEXT:    movq %r9, %r15
 ; LINUX-NEXT:    movq %r8, %r12
 ; LINUX-NEXT:    movq %rcx, %r13
 ; LINUX-NEXT:    movq %rdx, %rbp
 ; LINUX-NEXT:    movq %rsi, %rbx
 ; LINUX-NEXT:    movq %rdi, %r14
-; LINUX-NEXT:    movb %al, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill
+; LINUX-NEXT:    movq %rsi, {{[0-9]+}}(%rsp)
+; LINUX-NEXT:    movq %rdx, {{[0-9]+}}(%rsp)
+; LINUX-NEXT:    movq %rcx, {{[0-9]+}}(%rsp)
+; LINUX-NEXT:    movq %r8, {{[0-9]+}}(%rsp)
+; LINUX-NEXT:    movq %r9, {{[0-9]+}}(%rsp)
 ; LINUX-NEXT:    testb %al, %al
 ; LINUX-NEXT:    je .LBB0_2
 ; LINUX-NEXT:  # %bb.1:
@@ -56,27 +69,13 @@ define void @f_thunk(i8* %this, ...) {
 ; LINUX-NEXT:    movaps %xmm6, {{[0-9]+}}(%rsp)
 ; LINUX-NEXT:    movaps %xmm7, {{[0-9]+}}(%rsp)
 ; LINUX-NEXT:  .LBB0_2:
-; LINUX-NEXT:    movq %rbx, {{[0-9]+}}(%rsp)
-; LINUX-NEXT:    movq %rbp, {{[0-9]+}}(%rsp)
-; LINUX-NEXT:    movq %r13, {{[0-9]+}}(%rsp)
-; LINUX-NEXT:    movq %r12, {{[0-9]+}}(%rsp)
-; LINUX-NEXT:    movq %r15, {{[0-9]+}}(%rsp)
 ; LINUX-NEXT:    leaq {{[0-9]+}}(%rsp), %rax
 ; LINUX-NEXT:    movq %rax, {{[0-9]+}}(%rsp)
 ; LINUX-NEXT:    leaq {{[0-9]+}}(%rsp), %rax
 ; LINUX-NEXT:    movq %rax, {{[0-9]+}}(%rsp)
 ; LINUX-NEXT:    movabsq $206158430216, %rax # imm = 0x3000000008
 ; LINUX-NEXT:    movq %rax, {{[0-9]+}}(%rsp)
-; LINUX-NEXT:    movq %r14, %rdi
-; LINUX-NEXT:    movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; LINUX-NEXT:    movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; LINUX-NEXT:    movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; LINUX-NEXT:    movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; LINUX-NEXT:    movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; LINUX-NEXT:    movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; LINUX-NEXT:    movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; LINUX-NEXT:    movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; LINUX-NEXT:    callq get_f
+; LINUX-NEXT:    callq get_f at PLT
 ; LINUX-NEXT:    movq %rax, %r11
 ; LINUX-NEXT:    movq %r14, %rdi
 ; LINUX-NEXT:    movq %rbx, %rsi
@@ -131,13 +130,26 @@ define void @f_thunk(i8* %this, ...) {
 ; LINUX-X32-NEXT:    .cfi_offset %r14, -32
 ; LINUX-X32-NEXT:    .cfi_offset %r15, -24
 ; LINUX-X32-NEXT:    .cfi_offset %rbp, -16
+; LINUX-X32-NEXT:    movaps %xmm7, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; LINUX-X32-NEXT:    movaps %xmm6, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; LINUX-X32-NEXT:    movaps %xmm5, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; LINUX-X32-NEXT:    movaps %xmm4, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; LINUX-X32-NEXT:    movaps %xmm3, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; LINUX-X32-NEXT:    movaps %xmm2, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; LINUX-X32-NEXT:    movaps %xmm1, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; LINUX-X32-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; LINUX-X32-NEXT:    movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
 ; LINUX-X32-NEXT:    movq %r9, %r15
 ; LINUX-X32-NEXT:    movq %r8, %r12
 ; LINUX-X32-NEXT:    movq %rcx, %r13
 ; LINUX-X32-NEXT:    movq %rdx, %rbp
 ; LINUX-X32-NEXT:    movq %rsi, %rbx
 ; LINUX-X32-NEXT:    movq %rdi, %r14
-; LINUX-X32-NEXT:    movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; LINUX-X32-NEXT:    movq %rsi, {{[0-9]+}}(%esp)
+; LINUX-X32-NEXT:    movq %rdx, {{[0-9]+}}(%esp)
+; LINUX-X32-NEXT:    movq %rcx, {{[0-9]+}}(%esp)
+; LINUX-X32-NEXT:    movq %r8, {{[0-9]+}}(%esp)
+; LINUX-X32-NEXT:    movq %r9, {{[0-9]+}}(%esp)
 ; LINUX-X32-NEXT:    testb %al, %al
 ; LINUX-X32-NEXT:    je .LBB0_2
 ; LINUX-X32-NEXT:  # %bb.1:
@@ -150,27 +162,13 @@ define void @f_thunk(i8* %this, ...) {
 ; LINUX-X32-NEXT:    movaps %xmm6, {{[0-9]+}}(%esp)
 ; LINUX-X32-NEXT:    movaps %xmm7, {{[0-9]+}}(%esp)
 ; LINUX-X32-NEXT:  .LBB0_2:
-; LINUX-X32-NEXT:    movq %rbx, {{[0-9]+}}(%esp)
-; LINUX-X32-NEXT:    movq %rbp, {{[0-9]+}}(%esp)
-; LINUX-X32-NEXT:    movq %r13, {{[0-9]+}}(%esp)
-; LINUX-X32-NEXT:    movq %r12, {{[0-9]+}}(%esp)
-; LINUX-X32-NEXT:    movq %r15, {{[0-9]+}}(%esp)
 ; LINUX-X32-NEXT:    leal {{[0-9]+}}(%rsp), %eax
 ; LINUX-X32-NEXT:    movl %eax, {{[0-9]+}}(%esp)
 ; LINUX-X32-NEXT:    leal {{[0-9]+}}(%rsp), %eax
 ; LINUX-X32-NEXT:    movl %eax, {{[0-9]+}}(%esp)
 ; LINUX-X32-NEXT:    movabsq $206158430216, %rax # imm = 0x3000000008
 ; LINUX-X32-NEXT:    movq %rax, {{[0-9]+}}(%esp)
-; LINUX-X32-NEXT:    movq %r14, %rdi
-; LINUX-X32-NEXT:    movaps %xmm7, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; LINUX-X32-NEXT:    movaps %xmm6, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; LINUX-X32-NEXT:    movaps %xmm5, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; LINUX-X32-NEXT:    movaps %xmm4, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; LINUX-X32-NEXT:    movaps %xmm3, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; LINUX-X32-NEXT:    movaps %xmm2, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; LINUX-X32-NEXT:    movaps %xmm1, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; LINUX-X32-NEXT:    movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; LINUX-X32-NEXT:    callq get_f
+; LINUX-X32-NEXT:    callq get_f at PLT
 ; LINUX-X32-NEXT:    movl %eax, %r11d
 ; LINUX-X32-NEXT:    movq %r14, %rdi
 ; LINUX-X32-NEXT:    movq %rbx, %rsi

diff  --git a/llvm/test/CodeGen/X86/vastart-defs-eflags.ll b/llvm/test/CodeGen/X86/vastart-defs-eflags.ll
index 00e605ae5169..f7671fea8c00 100644
--- a/llvm/test/CodeGen/X86/vastart-defs-eflags.ll
+++ b/llvm/test/CodeGen/X86/vastart-defs-eflags.ll
@@ -9,9 +9,14 @@ define i32 @check_flag(i32 %flags, ...) nounwind {
 ; CHECK-LABEL: check_flag:
 ; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    subq $56, %rsp
+; CHECK-NEXT:    movq %rsi, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movq %rdx, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movq %rcx, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movq %r8, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movq %r9, -{{[0-9]+}}(%rsp)
 ; CHECK-NEXT:    testb %al, %al
-; CHECK-NEXT:    je LBB0_2
-; CHECK-NEXT:  ## %bb.1: ## %entry
+; CHECK-NEXT:    je LBB0_4
+; CHECK-NEXT:  ## %bb.3: ## %entry
 ; CHECK-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
 ; CHECK-NEXT:    movaps %xmm1, -{{[0-9]+}}(%rsp)
 ; CHECK-NEXT:    movaps %xmm2, -{{[0-9]+}}(%rsp)
@@ -20,16 +25,11 @@ define i32 @check_flag(i32 %flags, ...) nounwind {
 ; CHECK-NEXT:    movaps %xmm5, (%rsp)
 ; CHECK-NEXT:    movaps %xmm6, {{[0-9]+}}(%rsp)
 ; CHECK-NEXT:    movaps %xmm7, {{[0-9]+}}(%rsp)
-; CHECK-NEXT:  LBB0_2: ## %entry
-; CHECK-NEXT:    movq %rsi, -{{[0-9]+}}(%rsp)
-; CHECK-NEXT:    movq %rdx, -{{[0-9]+}}(%rsp)
-; CHECK-NEXT:    movq %rcx, -{{[0-9]+}}(%rsp)
-; CHECK-NEXT:    movq %r8, -{{[0-9]+}}(%rsp)
-; CHECK-NEXT:    movq %r9, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT:  LBB0_4: ## %entry
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    testl $512, %edi ## imm = 0x200
-; CHECK-NEXT:    je LBB0_4
-; CHECK-NEXT:  ## %bb.3: ## %if.then
+; CHECK-NEXT:    je LBB0_2
+; CHECK-NEXT:  ## %bb.1: ## %if.then
 ; CHECK-NEXT:    leaq -{{[0-9]+}}(%rsp), %rax
 ; CHECK-NEXT:    movq %rax, 16
 ; CHECK-NEXT:    leaq {{[0-9]+}}(%rsp), %rax
@@ -37,7 +37,7 @@ define i32 @check_flag(i32 %flags, ...) nounwind {
 ; CHECK-NEXT:    movl $48, 4
 ; CHECK-NEXT:    movl $8, 0
 ; CHECK-NEXT:    movl $1, %eax
-; CHECK-NEXT:  LBB0_4: ## %if.end
+; CHECK-NEXT:  LBB0_2: ## %if.end
 ; CHECK-NEXT:    addq $56, %rsp
 ; CHECK-NEXT:    retq
 entry:

diff  --git a/llvm/test/CodeGen/X86/x32-va_start.ll b/llvm/test/CodeGen/X86/x32-va_start.ll
index 7202a3fb4cdc..1697b7db2b16 100644
--- a/llvm/test/CodeGen/X86/x32-va_start.ll
+++ b/llvm/test/CodeGen/X86/x32-va_start.ll
@@ -1,5 +1,8 @@
-; RUN: llc < %s -mtriple=x86_64-linux-gnux32 | FileCheck %s -check-prefix=CHECK -check-prefix=SSE
-; RUN: llc < %s -mtriple=x86_64-linux-gnux32 -mattr=-sse | FileCheck %s -check-prefix=CHECK -check-prefix=NOSSE
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-linux-gnux32 | FileCheck %s -check-prefix=SSE
+; RUN: llc < %s -mtriple=x86_64-linux-gnux32 -mattr=-sse | FileCheck %s -check-prefix=NOSSE
+; RUN: llc < %s -mtriple=i386-linux-gnux32 | FileCheck %s -check-prefix=32BITABI
+; RUN: llc < %s -mtriple=i686-linux-gnux32 | FileCheck %s -check-prefix=32BITABI
 ;
 ; Verifies that x32 va_start lowering is sane. To regenerate this test, use
 ; cat <<EOF |
@@ -21,34 +24,109 @@ target triple = "x86_64-unknown-linux-gnux32"
 %struct.__va_list_tag = type { i32, i32, i8*, i8* }
 
 define i32 @foo(float %a, i8* nocapture readnone %fmt, ...) nounwind {
+; SSE-LABEL: foo:
+; SSE:       # %bb.0: # %entry
+; SSE-NEXT:    subl $72, %esp
+; SSE-NEXT:    movq %rsi, -{{[0-9]+}}(%esp)
+; SSE-NEXT:    movq %rdx, -{{[0-9]+}}(%esp)
+; SSE-NEXT:    movq %rcx, -{{[0-9]+}}(%esp)
+; SSE-NEXT:    movq %r8, -{{[0-9]+}}(%esp)
+; SSE-NEXT:    movq %r9, -{{[0-9]+}}(%esp)
+; SSE-NEXT:    testb %al, %al
+; SSE-NEXT:    je .LBB0_5
+; SSE-NEXT:  # %bb.4: # %entry
+; SSE-NEXT:    movaps %xmm1, -{{[0-9]+}}(%esp)
+; SSE-NEXT:    movaps %xmm2, -{{[0-9]+}}(%esp)
+; SSE-NEXT:    movaps %xmm3, -{{[0-9]+}}(%esp)
+; SSE-NEXT:    movaps %xmm4, (%esp)
+; SSE-NEXT:    movaps %xmm5, {{[0-9]+}}(%esp)
+; SSE-NEXT:    movaps %xmm6, {{[0-9]+}}(%esp)
+; SSE-NEXT:    movaps %xmm7, {{[0-9]+}}(%esp)
+; SSE-NEXT:  .LBB0_5: # %entry
+; SSE-NEXT:    leal -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT:    movl %eax, -{{[0-9]+}}(%esp)
+; SSE-NEXT:    leal {{[0-9]+}}(%rsp), %eax
+; SSE-NEXT:    movl %eax, -{{[0-9]+}}(%esp)
+; SSE-NEXT:    movabsq $274877906952, %rax # imm = 0x4000000008
+; SSE-NEXT:    movq %rax, -{{[0-9]+}}(%esp)
+; SSE-NEXT:    movl $8, %ecx
+; SSE-NEXT:    cmpl $40, %ecx
+; SSE-NEXT:    ja .LBB0_2
+; SSE-NEXT:  # %bb.1: # %vaarg.in_reg
+; SSE-NEXT:    movl -{{[0-9]+}}(%esp), %eax
+; SSE-NEXT:    addl %ecx, %eax
+; SSE-NEXT:    addl $8, %ecx
+; SSE-NEXT:    movl %ecx, -{{[0-9]+}}(%esp)
+; SSE-NEXT:    jmp .LBB0_3
+; SSE-NEXT:  .LBB0_2: # %vaarg.in_mem
+; SSE-NEXT:    movl -{{[0-9]+}}(%esp), %eax
+; SSE-NEXT:    leal 8(%rax), %ecx
+; SSE-NEXT:    movl %ecx, -{{[0-9]+}}(%esp)
+; SSE-NEXT:  .LBB0_3: # %vaarg.end
+; SSE-NEXT:    movl (%eax), %eax
+; SSE-NEXT:    addl $72, %esp
+; SSE-NEXT:    retq
+;
+; NOSSE-LABEL: foo:
+; NOSSE:       # %bb.0: # %entry
+; NOSSE-NEXT:    movq %rsi, -{{[0-9]+}}(%esp)
+; NOSSE-NEXT:    movq %rdx, -{{[0-9]+}}(%esp)
+; NOSSE-NEXT:    movq %rcx, -{{[0-9]+}}(%esp)
+; NOSSE-NEXT:    movq %r8, -{{[0-9]+}}(%esp)
+; NOSSE-NEXT:    movq %r9, -{{[0-9]+}}(%esp)
+; NOSSE-NEXT:    leal -{{[0-9]+}}(%rsp), %eax
+; NOSSE-NEXT:    movl %eax, -{{[0-9]+}}(%esp)
+; NOSSE-NEXT:    leal {{[0-9]+}}(%rsp), %eax
+; NOSSE-NEXT:    movl %eax, -{{[0-9]+}}(%esp)
+; NOSSE-NEXT:    movabsq $206158430216, %rax # imm = 0x3000000008
+; NOSSE-NEXT:    movq %rax, -{{[0-9]+}}(%esp)
+; NOSSE-NEXT:    movl $8, %ecx
+; NOSSE-NEXT:    cmpl $40, %ecx
+; NOSSE-NEXT:    ja .LBB0_2
+; NOSSE-NEXT:  # %bb.1: # %vaarg.in_reg
+; NOSSE-NEXT:    movl -{{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT:    addl %ecx, %eax
+; NOSSE-NEXT:    addl $8, %ecx
+; NOSSE-NEXT:    movl %ecx, -{{[0-9]+}}(%esp)
+; NOSSE-NEXT:    movl (%eax), %eax
+; NOSSE-NEXT:    retq
+; NOSSE-NEXT:  .LBB0_2: # %vaarg.in_mem
+; NOSSE-NEXT:    movl -{{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT:    leal 8(%rax), %ecx
+; NOSSE-NEXT:    movl %ecx, -{{[0-9]+}}(%esp)
+; NOSSE-NEXT:    movl (%eax), %eax
+; NOSSE-NEXT:    retq
+;
+; 32BITABI-LABEL: foo:
+; 32BITABI:       # %bb.0: # %entry
+; 32BITABI-NEXT:    subl $28, %esp
+; 32BITABI-NEXT:    leal {{[0-9]+}}(%esp), %ecx
+; 32BITABI-NEXT:    movl %ecx, (%esp)
+; 32BITABI-NEXT:    cmpl $40, %ecx
+; 32BITABI-NEXT:    ja .LBB0_2
+; 32BITABI-NEXT:  # %bb.1: # %vaarg.in_reg
+; 32BITABI-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; 32BITABI-NEXT:    addl %ecx, %eax
+; 32BITABI-NEXT:    addl $8, %ecx
+; 32BITABI-NEXT:    movl %ecx, (%esp)
+; 32BITABI-NEXT:    jmp .LBB0_3
+; 32BITABI-NEXT:  .LBB0_2: # %vaarg.in_mem
+; 32BITABI-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; 32BITABI-NEXT:    leal 8(%eax), %ecx
+; 32BITABI-NEXT:    movl %ecx, {{[0-9]+}}(%esp)
+; 32BITABI-NEXT:  .LBB0_3: # %vaarg.end
+; 32BITABI-NEXT:    movl (%eax), %eax
+; 32BITABI-NEXT:    addl $28, %esp
+; 32BITABI-NEXT:    retl
 entry:
   %ap = alloca [1 x %struct.__va_list_tag], align 16
   %0 = bitcast [1 x %struct.__va_list_tag]* %ap to i8*
   call void @llvm.lifetime.start.p0i8(i64 16, i8* %0) #2
   call void @llvm.va_start(i8* %0)
-; SSE: subl $72, %esp
-; SSE: testb %al, %al
-; SSE: je .[[NOFP:.*]]
-; SSE-DAG: movaps %xmm1
-; SSE-DAG: movaps %xmm2
-; SSE-DAG: movaps %xmm3
-; SSE-DAG: movaps %xmm4
-; SSE-DAG: movaps %xmm5
-; SSE-DAG: movaps %xmm6
-; SSE-DAG: movaps %xmm7
-; NOSSE-NOT: xmm
-; SSE: .[[NOFP]]:
-; CHECK-DAG: movq %r9
-; CHECK-DAG: movq %r8
-; CHECK-DAG: movq %rcx
-; CHECK-DAG: movq %rdx
-; CHECK-DAG: movq %rsi
   %gp_offset_p = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i32 0, i32 0, i32 0
   %gp_offset = load i32, i32* %gp_offset_p, align 16
   %fits_in_gp = icmp ult i32 %gp_offset, 41
   br i1 %fits_in_gp, label %vaarg.in_reg, label %vaarg.in_mem
-; CHECK: cmpl $40, [[COUNT:.*]]
-; CHECK: ja .[[IN_MEM:.*]]
 
 vaarg.in_reg:                                     ; preds = %entry
   %1 = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i32 0, i32 0, i32 3
@@ -57,22 +135,12 @@ vaarg.in_reg:                                     ; preds = %entry
   %3 = add i32 %gp_offset, 8
   store i32 %3, i32* %gp_offset_p, align 16
   br label %vaarg.end
-; CHECK: movl {{[^,]*}}, [[ADDR:.*]]
-; CHECK: addl [[COUNT]], [[ADDR]]
-; SSE: jmp .[[END:.*]]
-; NOSSE: movl ([[ADDR]]), %eax
-; NOSSE: retq
-; CHECK: .[[IN_MEM]]:
 vaarg.in_mem:                                     ; preds = %entry
   %overflow_arg_area_p = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i32 0, i32 0, i32 2
   %overflow_arg_area = load i8*, i8** %overflow_arg_area_p, align 8
   %overflow_arg_area.next = getelementptr i8, i8* %overflow_arg_area, i32 8
   store i8* %overflow_arg_area.next, i8** %overflow_arg_area_p, align 8
   br label %vaarg.end
-; CHECK: movl {{[^,]*}}, [[ADDR]]
-; NOSSE: movl ([[ADDR]]), %eax
-; NOSSE: retq
-; SSE: .[[END]]:
 
 vaarg.end:                                        ; preds = %vaarg.in_mem, %vaarg.in_reg
   %vaarg.addr.in = phi i8* [ %2, %vaarg.in_reg ], [ %overflow_arg_area, %vaarg.in_mem ]
@@ -81,8 +149,6 @@ vaarg.end:                                        ; preds = %vaarg.in_mem, %vaar
   call void @llvm.va_end(i8* %0)
   call void @llvm.lifetime.end.p0i8(i64 16, i8* %0) #2
   ret i32 %4
-; SSE: movl ([[ADDR]]), %eax
-; SSE: retq
 }
 
 ; Function Attrs: nounwind argmemonly

diff  --git a/llvm/test/CodeGen/X86/x86-64-varargs.ll b/llvm/test/CodeGen/X86/x86-64-varargs.ll
index 146248857fae..82c24ba39b09 100644
--- a/llvm/test/CodeGen/X86/x86-64-varargs.ll
+++ b/llvm/test/CodeGen/X86/x86-64-varargs.ll
@@ -18,8 +18,8 @@ define void @func(...) nounwind {
 ; CHECK-X64-NEXT:    pushq %rbx
 ; CHECK-X64-NEXT:    subq $224, %rsp
 ; CHECK-X64-NEXT:    testb %al, %al
-; CHECK-X64-NEXT:    je LBB0_2
-; CHECK-X64-NEXT:  ## %bb.1: ## %entry
+; CHECK-X64-NEXT:    je LBB0_47
+; CHECK-X64-NEXT:  ## %bb.46: ## %entry
 ; CHECK-X64-NEXT:    movaps %xmm0, 96(%rsp)
 ; CHECK-X64-NEXT:    movaps %xmm1, 112(%rsp)
 ; CHECK-X64-NEXT:    movaps %xmm2, 128(%rsp)
@@ -28,7 +28,7 @@ define void @func(...) nounwind {
 ; CHECK-X64-NEXT:    movaps %xmm5, 176(%rsp)
 ; CHECK-X64-NEXT:    movaps %xmm6, 192(%rsp)
 ; CHECK-X64-NEXT:    movaps %xmm7, 208(%rsp)
-; CHECK-X64-NEXT:  LBB0_2: ## %entry
+; CHECK-X64-NEXT:  LBB0_47: ## %entry
 ; CHECK-X64-NEXT:    movq %rdi, 48(%rsp)
 ; CHECK-X64-NEXT:    movq %rsi, 56(%rsp)
 ; CHECK-X64-NEXT:    movq %rdx, 64(%rsp)
@@ -43,51 +43,51 @@ define void @func(...) nounwind {
 ; CHECK-X64-NEXT:    movq %rax, 16(%rsp)
 ; CHECK-X64-NEXT:    movl (%rsp), %ecx
 ; CHECK-X64-NEXT:    cmpl $48, %ecx
-; CHECK-X64-NEXT:    jae LBB0_4
-; CHECK-X64-NEXT:  ## %bb.3: ## %entry
+; CHECK-X64-NEXT:    jae LBB0_2
+; CHECK-X64-NEXT:  ## %bb.1: ## %entry
 ; CHECK-X64-NEXT:    movq 16(%rsp), %rax
 ; CHECK-X64-NEXT:    addq %rcx, %rax
 ; CHECK-X64-NEXT:    addl $8, %ecx
 ; CHECK-X64-NEXT:    movl %ecx, (%rsp)
-; CHECK-X64-NEXT:    jmp LBB0_5
-; CHECK-X64-NEXT:  LBB0_4: ## %entry
+; CHECK-X64-NEXT:    jmp LBB0_3
+; CHECK-X64-NEXT:  LBB0_2: ## %entry
 ; CHECK-X64-NEXT:    movq 8(%rsp), %rax
 ; CHECK-X64-NEXT:    movq %rax, %rcx
 ; CHECK-X64-NEXT:    addq $8, %rcx
 ; CHECK-X64-NEXT:    movq %rcx, 8(%rsp)
-; CHECK-X64-NEXT:  LBB0_5: ## %entry
+; CHECK-X64-NEXT:  LBB0_3: ## %entry
 ; CHECK-X64-NEXT:    movl (%rax), %r10d
 ; CHECK-X64-NEXT:    movl (%rsp), %ecx
 ; CHECK-X64-NEXT:    cmpl $48, %ecx
-; CHECK-X64-NEXT:    jae LBB0_7
-; CHECK-X64-NEXT:  ## %bb.6: ## %entry
+; CHECK-X64-NEXT:    jae LBB0_5
+; CHECK-X64-NEXT:  ## %bb.4: ## %entry
 ; CHECK-X64-NEXT:    movq 16(%rsp), %rax
 ; CHECK-X64-NEXT:    addq %rcx, %rax
 ; CHECK-X64-NEXT:    addl $8, %ecx
 ; CHECK-X64-NEXT:    movl %ecx, (%rsp)
-; CHECK-X64-NEXT:    jmp LBB0_8
-; CHECK-X64-NEXT:  LBB0_7: ## %entry
+; CHECK-X64-NEXT:    jmp LBB0_6
+; CHECK-X64-NEXT:  LBB0_5: ## %entry
 ; CHECK-X64-NEXT:    movq 8(%rsp), %rax
 ; CHECK-X64-NEXT:    movq %rax, %rcx
 ; CHECK-X64-NEXT:    addq $8, %rcx
 ; CHECK-X64-NEXT:    movq %rcx, 8(%rsp)
-; CHECK-X64-NEXT:  LBB0_8: ## %entry
+; CHECK-X64-NEXT:  LBB0_6: ## %entry
 ; CHECK-X64-NEXT:    movl (%rax), %r11d
 ; CHECK-X64-NEXT:    movl (%rsp), %ecx
 ; CHECK-X64-NEXT:    cmpl $48, %ecx
-; CHECK-X64-NEXT:    jae LBB0_10
-; CHECK-X64-NEXT:  ## %bb.9: ## %entry
+; CHECK-X64-NEXT:    jae LBB0_8
+; CHECK-X64-NEXT:  ## %bb.7: ## %entry
 ; CHECK-X64-NEXT:    movq 16(%rsp), %rax
 ; CHECK-X64-NEXT:    addq %rcx, %rax
 ; CHECK-X64-NEXT:    addl $8, %ecx
 ; CHECK-X64-NEXT:    movl %ecx, (%rsp)
-; CHECK-X64-NEXT:    jmp LBB0_11
-; CHECK-X64-NEXT:  LBB0_10: ## %entry
+; CHECK-X64-NEXT:    jmp LBB0_9
+; CHECK-X64-NEXT:  LBB0_8: ## %entry
 ; CHECK-X64-NEXT:    movq 8(%rsp), %rax
 ; CHECK-X64-NEXT:    movq %rax, %rcx
 ; CHECK-X64-NEXT:    addq $8, %rcx
 ; CHECK-X64-NEXT:    movq %rcx, 8(%rsp)
-; CHECK-X64-NEXT:  LBB0_11: ## %entry
+; CHECK-X64-NEXT:  LBB0_9: ## %entry
 ; CHECK-X64-NEXT:    movl (%rax), %r9d
 ; CHECK-X64-NEXT:    movq 16(%rsp), %rax
 ; CHECK-X64-NEXT:    movq %rax, 40(%rsp)
@@ -97,172 +97,172 @@ define void @func(...) nounwind {
 ; CHECK-X64-NEXT:    movq %rax, 24(%rsp)
 ; CHECK-X64-NEXT:    movl 4(%rsp), %eax
 ; CHECK-X64-NEXT:    cmpl $176, %eax
-; CHECK-X64-NEXT:    jae LBB0_13
-; CHECK-X64-NEXT:  ## %bb.12: ## %entry
+; CHECK-X64-NEXT:    jae LBB0_11
+; CHECK-X64-NEXT:  ## %bb.10: ## %entry
 ; CHECK-X64-NEXT:    addl $16, %eax
 ; CHECK-X64-NEXT:    movl %eax, 4(%rsp)
-; CHECK-X64-NEXT:    jmp LBB0_14
-; CHECK-X64-NEXT:  LBB0_13: ## %entry
+; CHECK-X64-NEXT:    jmp LBB0_12
+; CHECK-X64-NEXT:  LBB0_11: ## %entry
 ; CHECK-X64-NEXT:    movq 8(%rsp), %rax
 ; CHECK-X64-NEXT:    addq $8, %rax
 ; CHECK-X64-NEXT:    movq %rax, 8(%rsp)
-; CHECK-X64-NEXT:  LBB0_14: ## %entry
+; CHECK-X64-NEXT:  LBB0_12: ## %entry
 ; CHECK-X64-NEXT:    movl 28(%rsp), %ecx
 ; CHECK-X64-NEXT:    cmpl $176, %ecx
-; CHECK-X64-NEXT:    jae LBB0_16
-; CHECK-X64-NEXT:  ## %bb.15: ## %entry
+; CHECK-X64-NEXT:    jae LBB0_14
+; CHECK-X64-NEXT:  ## %bb.13: ## %entry
 ; CHECK-X64-NEXT:    movq 40(%rsp), %rax
 ; CHECK-X64-NEXT:    addq %rcx, %rax
 ; CHECK-X64-NEXT:    addl $16, %ecx
 ; CHECK-X64-NEXT:    movl %ecx, 28(%rsp)
-; CHECK-X64-NEXT:    jmp LBB0_17
-; CHECK-X64-NEXT:  LBB0_16: ## %entry
+; CHECK-X64-NEXT:    jmp LBB0_15
+; CHECK-X64-NEXT:  LBB0_14: ## %entry
 ; CHECK-X64-NEXT:    movq 32(%rsp), %rax
 ; CHECK-X64-NEXT:    movq %rax, %rcx
 ; CHECK-X64-NEXT:    addq $8, %rcx
 ; CHECK-X64-NEXT:    movq %rcx, 32(%rsp)
-; CHECK-X64-NEXT:  LBB0_17: ## %entry
+; CHECK-X64-NEXT:  LBB0_15: ## %entry
 ; CHECK-X64-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
 ; CHECK-X64-NEXT:    movl (%rsp), %ecx
 ; CHECK-X64-NEXT:    cmpl $48, %ecx
-; CHECK-X64-NEXT:    jae LBB0_19
-; CHECK-X64-NEXT:  ## %bb.18: ## %entry
+; CHECK-X64-NEXT:    jae LBB0_17
+; CHECK-X64-NEXT:  ## %bb.16: ## %entry
 ; CHECK-X64-NEXT:    movq 16(%rsp), %rax
 ; CHECK-X64-NEXT:    addq %rcx, %rax
 ; CHECK-X64-NEXT:    addl $8, %ecx
 ; CHECK-X64-NEXT:    movl %ecx, (%rsp)
-; CHECK-X64-NEXT:    jmp LBB0_20
-; CHECK-X64-NEXT:  LBB0_19: ## %entry
+; CHECK-X64-NEXT:    jmp LBB0_18
+; CHECK-X64-NEXT:  LBB0_17: ## %entry
 ; CHECK-X64-NEXT:    movq 8(%rsp), %rax
 ; CHECK-X64-NEXT:    movq %rax, %rcx
 ; CHECK-X64-NEXT:    addq $8, %rcx
 ; CHECK-X64-NEXT:    movq %rcx, 8(%rsp)
-; CHECK-X64-NEXT:  LBB0_20: ## %entry
+; CHECK-X64-NEXT:  LBB0_18: ## %entry
 ; CHECK-X64-NEXT:    movl (%rax), %r8d
 ; CHECK-X64-NEXT:    movl 24(%rsp), %eax
 ; CHECK-X64-NEXT:    cmpl $48, %eax
-; CHECK-X64-NEXT:    jae LBB0_22
-; CHECK-X64-NEXT:  ## %bb.21: ## %entry
+; CHECK-X64-NEXT:    jae LBB0_20
+; CHECK-X64-NEXT:  ## %bb.19: ## %entry
 ; CHECK-X64-NEXT:    addl $8, %eax
 ; CHECK-X64-NEXT:    movl %eax, 24(%rsp)
-; CHECK-X64-NEXT:    jmp LBB0_23
-; CHECK-X64-NEXT:  LBB0_22: ## %entry
+; CHECK-X64-NEXT:    jmp LBB0_21
+; CHECK-X64-NEXT:  LBB0_20: ## %entry
 ; CHECK-X64-NEXT:    movq 32(%rsp), %rax
 ; CHECK-X64-NEXT:    addq $8, %rax
 ; CHECK-X64-NEXT:    movq %rax, 32(%rsp)
-; CHECK-X64-NEXT:  LBB0_23: ## %entry
+; CHECK-X64-NEXT:  LBB0_21: ## %entry
 ; CHECK-X64-NEXT:    movl (%rsp), %eax
 ; CHECK-X64-NEXT:    cmpl $48, %eax
-; CHECK-X64-NEXT:    jae LBB0_25
-; CHECK-X64-NEXT:  ## %bb.24: ## %entry
+; CHECK-X64-NEXT:    jae LBB0_23
+; CHECK-X64-NEXT:  ## %bb.22: ## %entry
 ; CHECK-X64-NEXT:    addl $8, %eax
 ; CHECK-X64-NEXT:    movl %eax, (%rsp)
-; CHECK-X64-NEXT:    jmp LBB0_26
-; CHECK-X64-NEXT:  LBB0_25: ## %entry
+; CHECK-X64-NEXT:    jmp LBB0_24
+; CHECK-X64-NEXT:  LBB0_23: ## %entry
 ; CHECK-X64-NEXT:    movq 8(%rsp), %rax
 ; CHECK-X64-NEXT:    addq $8, %rax
 ; CHECK-X64-NEXT:    movq %rax, 8(%rsp)
-; CHECK-X64-NEXT:  LBB0_26: ## %entry
+; CHECK-X64-NEXT:  LBB0_24: ## %entry
 ; CHECK-X64-NEXT:    movl 24(%rsp), %ecx
 ; CHECK-X64-NEXT:    cmpl $48, %ecx
-; CHECK-X64-NEXT:    jae LBB0_28
-; CHECK-X64-NEXT:  ## %bb.27: ## %entry
+; CHECK-X64-NEXT:    jae LBB0_26
+; CHECK-X64-NEXT:  ## %bb.25: ## %entry
 ; CHECK-X64-NEXT:    movq 40(%rsp), %rax
 ; CHECK-X64-NEXT:    addq %rcx, %rax
 ; CHECK-X64-NEXT:    addl $8, %ecx
 ; CHECK-X64-NEXT:    movl %ecx, 24(%rsp)
-; CHECK-X64-NEXT:    jmp LBB0_29
-; CHECK-X64-NEXT:  LBB0_28: ## %entry
+; CHECK-X64-NEXT:    jmp LBB0_27
+; CHECK-X64-NEXT:  LBB0_26: ## %entry
 ; CHECK-X64-NEXT:    movq 32(%rsp), %rax
 ; CHECK-X64-NEXT:    movq %rax, %rcx
 ; CHECK-X64-NEXT:    addq $8, %rcx
 ; CHECK-X64-NEXT:    movq %rcx, 32(%rsp)
-; CHECK-X64-NEXT:  LBB0_29: ## %entry
+; CHECK-X64-NEXT:  LBB0_27: ## %entry
 ; CHECK-X64-NEXT:    movq (%rax), %rcx
 ; CHECK-X64-NEXT:    movl (%rsp), %edx
 ; CHECK-X64-NEXT:    cmpl $48, %edx
-; CHECK-X64-NEXT:    jae LBB0_31
-; CHECK-X64-NEXT:  ## %bb.30: ## %entry
+; CHECK-X64-NEXT:    jae LBB0_29
+; CHECK-X64-NEXT:  ## %bb.28: ## %entry
 ; CHECK-X64-NEXT:    movq 16(%rsp), %rax
 ; CHECK-X64-NEXT:    addq %rdx, %rax
 ; CHECK-X64-NEXT:    addl $8, %edx
 ; CHECK-X64-NEXT:    movl %edx, (%rsp)
-; CHECK-X64-NEXT:    jmp LBB0_32
-; CHECK-X64-NEXT:  LBB0_31: ## %entry
+; CHECK-X64-NEXT:    jmp LBB0_30
+; CHECK-X64-NEXT:  LBB0_29: ## %entry
 ; CHECK-X64-NEXT:    movq 8(%rsp), %rax
 ; CHECK-X64-NEXT:    movq %rax, %rdx
 ; CHECK-X64-NEXT:    addq $8, %rdx
 ; CHECK-X64-NEXT:    movq %rdx, 8(%rsp)
-; CHECK-X64-NEXT:  LBB0_32: ## %entry
+; CHECK-X64-NEXT:  LBB0_30: ## %entry
 ; CHECK-X64-NEXT:    movl (%rax), %edx
 ; CHECK-X64-NEXT:    movl 24(%rsp), %eax
 ; CHECK-X64-NEXT:    cmpl $48, %eax
-; CHECK-X64-NEXT:    jae LBB0_34
-; CHECK-X64-NEXT:  ## %bb.33: ## %entry
+; CHECK-X64-NEXT:    jae LBB0_32
+; CHECK-X64-NEXT:  ## %bb.31: ## %entry
 ; CHECK-X64-NEXT:    addl $8, %eax
 ; CHECK-X64-NEXT:    movl %eax, 24(%rsp)
-; CHECK-X64-NEXT:    jmp LBB0_35
-; CHECK-X64-NEXT:  LBB0_34: ## %entry
+; CHECK-X64-NEXT:    jmp LBB0_33
+; CHECK-X64-NEXT:  LBB0_32: ## %entry
 ; CHECK-X64-NEXT:    movq 32(%rsp), %rax
 ; CHECK-X64-NEXT:    addq $8, %rax
 ; CHECK-X64-NEXT:    movq %rax, 32(%rsp)
-; CHECK-X64-NEXT:  LBB0_35: ## %entry
+; CHECK-X64-NEXT:  LBB0_33: ## %entry
 ; CHECK-X64-NEXT:    movl 4(%rsp), %eax
 ; CHECK-X64-NEXT:    cmpl $176, %eax
-; CHECK-X64-NEXT:    jae LBB0_37
-; CHECK-X64-NEXT:  ## %bb.36: ## %entry
+; CHECK-X64-NEXT:    jae LBB0_35
+; CHECK-X64-NEXT:  ## %bb.34: ## %entry
 ; CHECK-X64-NEXT:    addl $16, %eax
 ; CHECK-X64-NEXT:    movl %eax, 4(%rsp)
-; CHECK-X64-NEXT:    jmp LBB0_38
-; CHECK-X64-NEXT:  LBB0_37: ## %entry
+; CHECK-X64-NEXT:    jmp LBB0_36
+; CHECK-X64-NEXT:  LBB0_35: ## %entry
 ; CHECK-X64-NEXT:    movq 8(%rsp), %rax
 ; CHECK-X64-NEXT:    addq $8, %rax
 ; CHECK-X64-NEXT:    movq %rax, 8(%rsp)
-; CHECK-X64-NEXT:  LBB0_38: ## %entry
+; CHECK-X64-NEXT:  LBB0_36: ## %entry
 ; CHECK-X64-NEXT:    movl 28(%rsp), %esi
 ; CHECK-X64-NEXT:    cmpl $176, %esi
-; CHECK-X64-NEXT:    jae LBB0_40
-; CHECK-X64-NEXT:  ## %bb.39: ## %entry
+; CHECK-X64-NEXT:    jae LBB0_38
+; CHECK-X64-NEXT:  ## %bb.37: ## %entry
 ; CHECK-X64-NEXT:    movq 40(%rsp), %rax
 ; CHECK-X64-NEXT:    addq %rsi, %rax
 ; CHECK-X64-NEXT:    addl $16, %esi
 ; CHECK-X64-NEXT:    movl %esi, 28(%rsp)
-; CHECK-X64-NEXT:    jmp LBB0_41
-; CHECK-X64-NEXT:  LBB0_40: ## %entry
+; CHECK-X64-NEXT:    jmp LBB0_39
+; CHECK-X64-NEXT:  LBB0_38: ## %entry
 ; CHECK-X64-NEXT:    movq 32(%rsp), %rax
 ; CHECK-X64-NEXT:    movq %rax, %rsi
 ; CHECK-X64-NEXT:    addq $8, %rsi
 ; CHECK-X64-NEXT:    movq %rsi, 32(%rsp)
-; CHECK-X64-NEXT:  LBB0_41: ## %entry
+; CHECK-X64-NEXT:  LBB0_39: ## %entry
 ; CHECK-X64-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; CHECK-X64-NEXT:    movl (%rsp), %esi
 ; CHECK-X64-NEXT:    cmpl $48, %esi
-; CHECK-X64-NEXT:    jae LBB0_43
-; CHECK-X64-NEXT:  ## %bb.42: ## %entry
+; CHECK-X64-NEXT:    jae LBB0_41
+; CHECK-X64-NEXT:  ## %bb.40: ## %entry
 ; CHECK-X64-NEXT:    movq 16(%rsp), %rax
 ; CHECK-X64-NEXT:    addq %rsi, %rax
 ; CHECK-X64-NEXT:    addl $8, %esi
 ; CHECK-X64-NEXT:    movl %esi, (%rsp)
-; CHECK-X64-NEXT:    jmp LBB0_44
-; CHECK-X64-NEXT:  LBB0_43: ## %entry
+; CHECK-X64-NEXT:    jmp LBB0_42
+; CHECK-X64-NEXT:  LBB0_41: ## %entry
 ; CHECK-X64-NEXT:    movq 8(%rsp), %rax
 ; CHECK-X64-NEXT:    movq %rax, %rsi
 ; CHECK-X64-NEXT:    addq $8, %rsi
 ; CHECK-X64-NEXT:    movq %rsi, 8(%rsp)
-; CHECK-X64-NEXT:  LBB0_44: ## %entry
+; CHECK-X64-NEXT:  LBB0_42: ## %entry
 ; CHECK-X64-NEXT:    movl (%rax), %esi
 ; CHECK-X64-NEXT:    movl 24(%rsp), %eax
 ; CHECK-X64-NEXT:    cmpl $48, %eax
-; CHECK-X64-NEXT:    jae LBB0_46
-; CHECK-X64-NEXT:  ## %bb.45: ## %entry
+; CHECK-X64-NEXT:    jae LBB0_44
+; CHECK-X64-NEXT:  ## %bb.43: ## %entry
 ; CHECK-X64-NEXT:    addl $8, %eax
 ; CHECK-X64-NEXT:    movl %eax, 24(%rsp)
-; CHECK-X64-NEXT:    jmp LBB0_47
-; CHECK-X64-NEXT:  LBB0_46: ## %entry
+; CHECK-X64-NEXT:    jmp LBB0_45
+; CHECK-X64-NEXT:  LBB0_44: ## %entry
 ; CHECK-X64-NEXT:    movq 32(%rsp), %rax
 ; CHECK-X64-NEXT:    addq $8, %rax
 ; CHECK-X64-NEXT:    movq %rax, 32(%rsp)
-; CHECK-X64-NEXT:  LBB0_47: ## %entry
+; CHECK-X64-NEXT:  LBB0_45: ## %entry
 ; CHECK-X64-NEXT:    movabsq $_.str, %rdi
 ; CHECK-X64-NEXT:    movabsq $_printf, %rbx
 ; CHECK-X64-NEXT:    movb $2, %al
@@ -277,8 +277,8 @@ define void @func(...) nounwind {
 ; CHECK-X32:       # %bb.0: # %entry
 ; CHECK-X32-NEXT:    subl $216, %esp
 ; CHECK-X32-NEXT:    testb %al, %al
-; CHECK-X32-NEXT:    je .LBB0_2
-; CHECK-X32-NEXT:  # %bb.1: # %entry
+; CHECK-X32-NEXT:    je .LBB0_47
+; CHECK-X32-NEXT:  # %bb.46: # %entry
 ; CHECK-X32-NEXT:    movaps %xmm0, 80(%esp)
 ; CHECK-X32-NEXT:    movaps %xmm1, 96(%esp)
 ; CHECK-X32-NEXT:    movaps %xmm2, 112(%esp)
@@ -287,7 +287,7 @@ define void @func(...) nounwind {
 ; CHECK-X32-NEXT:    movaps %xmm5, 160(%esp)
 ; CHECK-X32-NEXT:    movaps %xmm6, 176(%esp)
 ; CHECK-X32-NEXT:    movaps %xmm7, 192(%esp)
-; CHECK-X32-NEXT:  .LBB0_2: # %entry
+; CHECK-X32-NEXT:  .LBB0_47: # %entry
 ; CHECK-X32-NEXT:    movq %rdi, 32(%esp)
 ; CHECK-X32-NEXT:    movq %rsi, 40(%esp)
 ; CHECK-X32-NEXT:    movq %rdx, 48(%esp)
@@ -302,51 +302,51 @@ define void @func(...) nounwind {
 ; CHECK-X32-NEXT:    movl %eax, 12(%esp)
 ; CHECK-X32-NEXT:    movl (%esp), %ecx
 ; CHECK-X32-NEXT:    cmpl $48, %ecx
-; CHECK-X32-NEXT:    jae .LBB0_4
-; CHECK-X32-NEXT:  # %bb.3: # %entry
+; CHECK-X32-NEXT:    jae .LBB0_2
+; CHECK-X32-NEXT:  # %bb.1: # %entry
 ; CHECK-X32-NEXT:    movl 12(%esp), %eax
 ; CHECK-X32-NEXT:    addl %ecx, %eax
 ; CHECK-X32-NEXT:    addl $8, %ecx
 ; CHECK-X32-NEXT:    movl %ecx, (%esp)
-; CHECK-X32-NEXT:    jmp .LBB0_5
-; CHECK-X32-NEXT:  .LBB0_4: # %entry
+; CHECK-X32-NEXT:    jmp .LBB0_3
+; CHECK-X32-NEXT:  .LBB0_2: # %entry
 ; CHECK-X32-NEXT:    movl 8(%esp), %eax
 ; CHECK-X32-NEXT:    movl %eax, %ecx
 ; CHECK-X32-NEXT:    addl $8, %ecx
 ; CHECK-X32-NEXT:    movl %ecx, 8(%esp)
-; CHECK-X32-NEXT:  .LBB0_5: # %entry
+; CHECK-X32-NEXT:  .LBB0_3: # %entry
 ; CHECK-X32-NEXT:    movl (%eax), %r10d
 ; CHECK-X32-NEXT:    movl (%esp), %ecx
 ; CHECK-X32-NEXT:    cmpl $48, %ecx
-; CHECK-X32-NEXT:    jae .LBB0_7
-; CHECK-X32-NEXT:  # %bb.6: # %entry
+; CHECK-X32-NEXT:    jae .LBB0_5
+; CHECK-X32-NEXT:  # %bb.4: # %entry
 ; CHECK-X32-NEXT:    movl 12(%esp), %eax
 ; CHECK-X32-NEXT:    addl %ecx, %eax
 ; CHECK-X32-NEXT:    addl $8, %ecx
 ; CHECK-X32-NEXT:    movl %ecx, (%esp)
-; CHECK-X32-NEXT:    jmp .LBB0_8
-; CHECK-X32-NEXT:  .LBB0_7: # %entry
+; CHECK-X32-NEXT:    jmp .LBB0_6
+; CHECK-X32-NEXT:  .LBB0_5: # %entry
 ; CHECK-X32-NEXT:    movl 8(%esp), %eax
 ; CHECK-X32-NEXT:    movl %eax, %ecx
 ; CHECK-X32-NEXT:    addl $8, %ecx
 ; CHECK-X32-NEXT:    movl %ecx, 8(%esp)
-; CHECK-X32-NEXT:  .LBB0_8: # %entry
+; CHECK-X32-NEXT:  .LBB0_6: # %entry
 ; CHECK-X32-NEXT:    movl (%eax), %r11d
 ; CHECK-X32-NEXT:    movl (%esp), %ecx
 ; CHECK-X32-NEXT:    cmpl $48, %ecx
-; CHECK-X32-NEXT:    jae .LBB0_10
-; CHECK-X32-NEXT:  # %bb.9: # %entry
+; CHECK-X32-NEXT:    jae .LBB0_8
+; CHECK-X32-NEXT:  # %bb.7: # %entry
 ; CHECK-X32-NEXT:    movl 12(%esp), %eax
 ; CHECK-X32-NEXT:    addl %ecx, %eax
 ; CHECK-X32-NEXT:    addl $8, %ecx
 ; CHECK-X32-NEXT:    movl %ecx, (%esp)
-; CHECK-X32-NEXT:    jmp .LBB0_11
-; CHECK-X32-NEXT:  .LBB0_10: # %entry
+; CHECK-X32-NEXT:    jmp .LBB0_9
+; CHECK-X32-NEXT:  .LBB0_8: # %entry
 ; CHECK-X32-NEXT:    movl 8(%esp), %eax
 ; CHECK-X32-NEXT:    movl %eax, %ecx
 ; CHECK-X32-NEXT:    addl $8, %ecx
 ; CHECK-X32-NEXT:    movl %ecx, 8(%esp)
-; CHECK-X32-NEXT:  .LBB0_11: # %entry
+; CHECK-X32-NEXT:  .LBB0_9: # %entry
 ; CHECK-X32-NEXT:    movl (%eax), %r9d
 ; CHECK-X32-NEXT:    movq (%esp), %rax
 ; CHECK-X32-NEXT:    movq 8(%esp), %rcx
@@ -354,172 +354,172 @@ define void @func(...) nounwind {
 ; CHECK-X32-NEXT:    movq %rax, 16(%esp)
 ; CHECK-X32-NEXT:    movl 4(%esp), %eax
 ; CHECK-X32-NEXT:    cmpl $176, %eax
-; CHECK-X32-NEXT:    jae .LBB0_13
-; CHECK-X32-NEXT:  # %bb.12: # %entry
+; CHECK-X32-NEXT:    jae .LBB0_11
+; CHECK-X32-NEXT:  # %bb.10: # %entry
 ; CHECK-X32-NEXT:    addl $16, %eax
 ; CHECK-X32-NEXT:    movl %eax, 4(%esp)
-; CHECK-X32-NEXT:    jmp .LBB0_14
-; CHECK-X32-NEXT:  .LBB0_13: # %entry
+; CHECK-X32-NEXT:    jmp .LBB0_12
+; CHECK-X32-NEXT:  .LBB0_11: # %entry
 ; CHECK-X32-NEXT:    movl 8(%esp), %eax
 ; CHECK-X32-NEXT:    addl $8, %eax
 ; CHECK-X32-NEXT:    movl %eax, 8(%esp)
-; CHECK-X32-NEXT:  .LBB0_14: # %entry
+; CHECK-X32-NEXT:  .LBB0_12: # %entry
 ; CHECK-X32-NEXT:    movl 20(%esp), %ecx
 ; CHECK-X32-NEXT:    cmpl $176, %ecx
-; CHECK-X32-NEXT:    jae .LBB0_16
-; CHECK-X32-NEXT:  # %bb.15: # %entry
+; CHECK-X32-NEXT:    jae .LBB0_14
+; CHECK-X32-NEXT:  # %bb.13: # %entry
 ; CHECK-X32-NEXT:    movl 28(%esp), %eax
 ; CHECK-X32-NEXT:    addl %ecx, %eax
 ; CHECK-X32-NEXT:    addl $16, %ecx
 ; CHECK-X32-NEXT:    movl %ecx, 20(%esp)
-; CHECK-X32-NEXT:    jmp .LBB0_17
-; CHECK-X32-NEXT:  .LBB0_16: # %entry
+; CHECK-X32-NEXT:    jmp .LBB0_15
+; CHECK-X32-NEXT:  .LBB0_14: # %entry
 ; CHECK-X32-NEXT:    movl 24(%esp), %eax
 ; CHECK-X32-NEXT:    movl %eax, %ecx
 ; CHECK-X32-NEXT:    addl $8, %ecx
 ; CHECK-X32-NEXT:    movl %ecx, 24(%esp)
-; CHECK-X32-NEXT:  .LBB0_17: # %entry
+; CHECK-X32-NEXT:  .LBB0_15: # %entry
 ; CHECK-X32-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
 ; CHECK-X32-NEXT:    movl (%esp), %ecx
 ; CHECK-X32-NEXT:    cmpl $48, %ecx
-; CHECK-X32-NEXT:    jae .LBB0_19
-; CHECK-X32-NEXT:  # %bb.18: # %entry
+; CHECK-X32-NEXT:    jae .LBB0_17
+; CHECK-X32-NEXT:  # %bb.16: # %entry
 ; CHECK-X32-NEXT:    movl 12(%esp), %eax
 ; CHECK-X32-NEXT:    addl %ecx, %eax
 ; CHECK-X32-NEXT:    addl $8, %ecx
 ; CHECK-X32-NEXT:    movl %ecx, (%esp)
-; CHECK-X32-NEXT:    jmp .LBB0_20
-; CHECK-X32-NEXT:  .LBB0_19: # %entry
+; CHECK-X32-NEXT:    jmp .LBB0_18
+; CHECK-X32-NEXT:  .LBB0_17: # %entry
 ; CHECK-X32-NEXT:    movl 8(%esp), %eax
 ; CHECK-X32-NEXT:    movl %eax, %ecx
 ; CHECK-X32-NEXT:    addl $8, %ecx
 ; CHECK-X32-NEXT:    movl %ecx, 8(%esp)
-; CHECK-X32-NEXT:  .LBB0_20: # %entry
+; CHECK-X32-NEXT:  .LBB0_18: # %entry
 ; CHECK-X32-NEXT:    movl (%eax), %r8d
 ; CHECK-X32-NEXT:    movl 16(%esp), %eax
 ; CHECK-X32-NEXT:    cmpl $48, %eax
-; CHECK-X32-NEXT:    jae .LBB0_22
-; CHECK-X32-NEXT:  # %bb.21: # %entry
+; CHECK-X32-NEXT:    jae .LBB0_20
+; CHECK-X32-NEXT:  # %bb.19: # %entry
 ; CHECK-X32-NEXT:    addl $8, %eax
 ; CHECK-X32-NEXT:    movl %eax, 16(%esp)
-; CHECK-X32-NEXT:    jmp .LBB0_23
-; CHECK-X32-NEXT:  .LBB0_22: # %entry
+; CHECK-X32-NEXT:    jmp .LBB0_21
+; CHECK-X32-NEXT:  .LBB0_20: # %entry
 ; CHECK-X32-NEXT:    movl 24(%esp), %eax
 ; CHECK-X32-NEXT:    addl $8, %eax
 ; CHECK-X32-NEXT:    movl %eax, 24(%esp)
-; CHECK-X32-NEXT:  .LBB0_23: # %entry
+; CHECK-X32-NEXT:  .LBB0_21: # %entry
 ; CHECK-X32-NEXT:    movl (%esp), %eax
 ; CHECK-X32-NEXT:    cmpl $48, %eax
-; CHECK-X32-NEXT:    jae .LBB0_25
-; CHECK-X32-NEXT:  # %bb.24: # %entry
+; CHECK-X32-NEXT:    jae .LBB0_23
+; CHECK-X32-NEXT:  # %bb.22: # %entry
 ; CHECK-X32-NEXT:    addl $8, %eax
 ; CHECK-X32-NEXT:    movl %eax, (%esp)
-; CHECK-X32-NEXT:    jmp .LBB0_26
-; CHECK-X32-NEXT:  .LBB0_25: # %entry
+; CHECK-X32-NEXT:    jmp .LBB0_24
+; CHECK-X32-NEXT:  .LBB0_23: # %entry
 ; CHECK-X32-NEXT:    movl 8(%esp), %eax
 ; CHECK-X32-NEXT:    addl $8, %eax
 ; CHECK-X32-NEXT:    movl %eax, 8(%esp)
-; CHECK-X32-NEXT:  .LBB0_26: # %entry
+; CHECK-X32-NEXT:  .LBB0_24: # %entry
 ; CHECK-X32-NEXT:    movl 16(%esp), %ecx
 ; CHECK-X32-NEXT:    cmpl $48, %ecx
-; CHECK-X32-NEXT:    jae .LBB0_28
-; CHECK-X32-NEXT:  # %bb.27: # %entry
+; CHECK-X32-NEXT:    jae .LBB0_26
+; CHECK-X32-NEXT:  # %bb.25: # %entry
 ; CHECK-X32-NEXT:    movl 28(%esp), %eax
 ; CHECK-X32-NEXT:    addl %ecx, %eax
 ; CHECK-X32-NEXT:    addl $8, %ecx
 ; CHECK-X32-NEXT:    movl %ecx, 16(%esp)
-; CHECK-X32-NEXT:    jmp .LBB0_29
-; CHECK-X32-NEXT:  .LBB0_28: # %entry
+; CHECK-X32-NEXT:    jmp .LBB0_27
+; CHECK-X32-NEXT:  .LBB0_26: # %entry
 ; CHECK-X32-NEXT:    movl 24(%esp), %eax
 ; CHECK-X32-NEXT:    movl %eax, %ecx
 ; CHECK-X32-NEXT:    addl $8, %ecx
 ; CHECK-X32-NEXT:    movl %ecx, 24(%esp)
-; CHECK-X32-NEXT:  .LBB0_29: # %entry
+; CHECK-X32-NEXT:  .LBB0_27: # %entry
 ; CHECK-X32-NEXT:    movq (%eax), %rcx
 ; CHECK-X32-NEXT:    movl (%esp), %edx
 ; CHECK-X32-NEXT:    cmpl $48, %edx
-; CHECK-X32-NEXT:    jae .LBB0_31
-; CHECK-X32-NEXT:  # %bb.30: # %entry
+; CHECK-X32-NEXT:    jae .LBB0_29
+; CHECK-X32-NEXT:  # %bb.28: # %entry
 ; CHECK-X32-NEXT:    movl 12(%esp), %eax
 ; CHECK-X32-NEXT:    addl %edx, %eax
 ; CHECK-X32-NEXT:    addl $8, %edx
 ; CHECK-X32-NEXT:    movl %edx, (%esp)
-; CHECK-X32-NEXT:    jmp .LBB0_32
-; CHECK-X32-NEXT:  .LBB0_31: # %entry
+; CHECK-X32-NEXT:    jmp .LBB0_30
+; CHECK-X32-NEXT:  .LBB0_29: # %entry
 ; CHECK-X32-NEXT:    movl 8(%esp), %eax
 ; CHECK-X32-NEXT:    movl %eax, %edx
 ; CHECK-X32-NEXT:    addl $8, %edx
 ; CHECK-X32-NEXT:    movl %edx, 8(%esp)
-; CHECK-X32-NEXT:  .LBB0_32: # %entry
+; CHECK-X32-NEXT:  .LBB0_30: # %entry
 ; CHECK-X32-NEXT:    movl (%eax), %edx
 ; CHECK-X32-NEXT:    movl 16(%esp), %eax
 ; CHECK-X32-NEXT:    cmpl $48, %eax
-; CHECK-X32-NEXT:    jae .LBB0_34
-; CHECK-X32-NEXT:  # %bb.33: # %entry
+; CHECK-X32-NEXT:    jae .LBB0_32
+; CHECK-X32-NEXT:  # %bb.31: # %entry
 ; CHECK-X32-NEXT:    addl $8, %eax
 ; CHECK-X32-NEXT:    movl %eax, 16(%esp)
-; CHECK-X32-NEXT:    jmp .LBB0_35
-; CHECK-X32-NEXT:  .LBB0_34: # %entry
+; CHECK-X32-NEXT:    jmp .LBB0_33
+; CHECK-X32-NEXT:  .LBB0_32: # %entry
 ; CHECK-X32-NEXT:    movl 24(%esp), %eax
 ; CHECK-X32-NEXT:    addl $8, %eax
 ; CHECK-X32-NEXT:    movl %eax, 24(%esp)
-; CHECK-X32-NEXT:  .LBB0_35: # %entry
+; CHECK-X32-NEXT:  .LBB0_33: # %entry
 ; CHECK-X32-NEXT:    movl 4(%esp), %eax
 ; CHECK-X32-NEXT:    cmpl $176, %eax
-; CHECK-X32-NEXT:    jae .LBB0_37
-; CHECK-X32-NEXT:  # %bb.36: # %entry
+; CHECK-X32-NEXT:    jae .LBB0_35
+; CHECK-X32-NEXT:  # %bb.34: # %entry
 ; CHECK-X32-NEXT:    addl $16, %eax
 ; CHECK-X32-NEXT:    movl %eax, 4(%esp)
-; CHECK-X32-NEXT:    jmp .LBB0_38
-; CHECK-X32-NEXT:  .LBB0_37: # %entry
+; CHECK-X32-NEXT:    jmp .LBB0_36
+; CHECK-X32-NEXT:  .LBB0_35: # %entry
 ; CHECK-X32-NEXT:    movl 8(%esp), %eax
 ; CHECK-X32-NEXT:    addl $8, %eax
 ; CHECK-X32-NEXT:    movl %eax, 8(%esp)
-; CHECK-X32-NEXT:  .LBB0_38: # %entry
+; CHECK-X32-NEXT:  .LBB0_36: # %entry
 ; CHECK-X32-NEXT:    movl 20(%esp), %esi
 ; CHECK-X32-NEXT:    cmpl $176, %esi
-; CHECK-X32-NEXT:    jae .LBB0_40
-; CHECK-X32-NEXT:  # %bb.39: # %entry
+; CHECK-X32-NEXT:    jae .LBB0_38
+; CHECK-X32-NEXT:  # %bb.37: # %entry
 ; CHECK-X32-NEXT:    movl 28(%esp), %eax
 ; CHECK-X32-NEXT:    addl %esi, %eax
 ; CHECK-X32-NEXT:    addl $16, %esi
 ; CHECK-X32-NEXT:    movl %esi, 20(%esp)
-; CHECK-X32-NEXT:    jmp .LBB0_41
-; CHECK-X32-NEXT:  .LBB0_40: # %entry
+; CHECK-X32-NEXT:    jmp .LBB0_39
+; CHECK-X32-NEXT:  .LBB0_38: # %entry
 ; CHECK-X32-NEXT:    movl 24(%esp), %eax
 ; CHECK-X32-NEXT:    movl %eax, %esi
 ; CHECK-X32-NEXT:    addl $8, %esi
 ; CHECK-X32-NEXT:    movl %esi, 24(%esp)
-; CHECK-X32-NEXT:  .LBB0_41: # %entry
+; CHECK-X32-NEXT:  .LBB0_39: # %entry
 ; CHECK-X32-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; CHECK-X32-NEXT:    movl (%esp), %esi
 ; CHECK-X32-NEXT:    cmpl $48, %esi
-; CHECK-X32-NEXT:    jae .LBB0_43
-; CHECK-X32-NEXT:  # %bb.42: # %entry
+; CHECK-X32-NEXT:    jae .LBB0_41
+; CHECK-X32-NEXT:  # %bb.40: # %entry
 ; CHECK-X32-NEXT:    movl 12(%esp), %eax
 ; CHECK-X32-NEXT:    addl %esi, %eax
 ; CHECK-X32-NEXT:    addl $8, %esi
 ; CHECK-X32-NEXT:    movl %esi, (%esp)
-; CHECK-X32-NEXT:    jmp .LBB0_44
-; CHECK-X32-NEXT:  .LBB0_43: # %entry
+; CHECK-X32-NEXT:    jmp .LBB0_42
+; CHECK-X32-NEXT:  .LBB0_41: # %entry
 ; CHECK-X32-NEXT:    movl 8(%esp), %eax
 ; CHECK-X32-NEXT:    movl %eax, %esi
 ; CHECK-X32-NEXT:    addl $8, %esi
 ; CHECK-X32-NEXT:    movl %esi, 8(%esp)
-; CHECK-X32-NEXT:  .LBB0_44: # %entry
+; CHECK-X32-NEXT:  .LBB0_42: # %entry
 ; CHECK-X32-NEXT:    movl (%eax), %esi
 ; CHECK-X32-NEXT:    movl 16(%esp), %eax
 ; CHECK-X32-NEXT:    cmpl $48, %eax
-; CHECK-X32-NEXT:    jae .LBB0_46
-; CHECK-X32-NEXT:  # %bb.45: # %entry
+; CHECK-X32-NEXT:    jae .LBB0_44
+; CHECK-X32-NEXT:  # %bb.43: # %entry
 ; CHECK-X32-NEXT:    addl $8, %eax
 ; CHECK-X32-NEXT:    movl %eax, 16(%esp)
-; CHECK-X32-NEXT:    jmp .LBB0_47
-; CHECK-X32-NEXT:  .LBB0_46: # %entry
+; CHECK-X32-NEXT:    jmp .LBB0_45
+; CHECK-X32-NEXT:  .LBB0_44: # %entry
 ; CHECK-X32-NEXT:    movl 24(%esp), %eax
 ; CHECK-X32-NEXT:    addl $8, %eax
 ; CHECK-X32-NEXT:    movl %eax, 24(%esp)
-; CHECK-X32-NEXT:  .LBB0_47: # %entry
+; CHECK-X32-NEXT:  .LBB0_45: # %entry
 ; CHECK-X32-NEXT:    movl $.str, %edi
 ; CHECK-X32-NEXT:    movb $2, %al
 ; CHECK-X32-NEXT:    pushq %r10
@@ -590,7 +590,7 @@ define i32 @main() nounwind {
 ; CHECK-X32-NEXT:    movl $-10, %ecx
 ; CHECK-X32-NEXT:    movl $120, %r9d
 ; CHECK-X32-NEXT:    movb $2, %al
-; CHECK-X32-NEXT:    callq func
+; CHECK-X32-NEXT:    callq func at PLT
 ; CHECK-X32-NEXT:    xorl %eax, %eax
 ; CHECK-X32-NEXT:    popq %rcx
 ; CHECK-X32-NEXT:    retq

diff  --git a/llvm/test/CodeGen/X86/xmm-vararg-noopt.ll b/llvm/test/CodeGen/X86/xmm-vararg-noopt.ll
new file mode 100644
index 000000000000..6fe048967f84
--- /dev/null
+++ b/llvm/test/CodeGen/X86/xmm-vararg-noopt.ll
@@ -0,0 +1,70 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -O0 -mtriple=x86_64-unknown-unknown < %s | FileCheck %s
+
+; ModuleID = 'variadic.c'
+source_filename = "variadic.c"
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux"
+
+%struct.__va_list_tag = type { i32, i32, i8*, i8* }
+
+ at .str = private unnamed_addr constant [9 x i8] c"\0A hello \00", align 1
+
+; Function Attrs: noinline nounwind optnone uwtable
+define dso_local void @testvarargs(i8* %fmt, ...) {
+; CHECK-LABEL: testvarargs:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    subq $216, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 224
+; CHECK-NEXT:    testb %al, %al
+; CHECK-NEXT:    je .LBB0_2
+; CHECK-NEXT:  # %bb.1: # %entry
+; CHECK-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movaps %xmm1, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movaps %xmm2, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movaps %xmm3, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movaps %xmm4, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movaps %xmm5, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movaps %xmm6, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movaps %xmm7, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:  .LBB0_2: # %entry
+; CHECK-NEXT:    movq %r9, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movq %r8, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movq %rcx, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movq %rdx, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movq %rsi, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movq %rdi, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movq %rsp, %rax
+; CHECK-NEXT:    movq %rax, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    leaq {{[0-9]+}}(%rsp), %rax
+; CHECK-NEXT:    movq %rax, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movl $48, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movl $8, {{[0-9]+}}(%rsp)
+; CHECK-NEXT:    movl $.L.str, %edi
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    # kill: def $al killed $al killed $eax
+; CHECK-NEXT:    callq printf
+; CHECK-NEXT:    addq $216, %rsp
+; CHECK-NEXT:    .cfi_def_cfa_offset 8
+; CHECK-NEXT:    retq
+entry:
+  %fmt.addr = alloca i8*, align 8
+  %va = alloca [1 x %struct.__va_list_tag], align 16
+  store i8* %fmt, i8** %fmt.addr, align 8
+  %arraydecay = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %va, i64 0, i64 0
+  %arraydecay1 = bitcast %struct.__va_list_tag* %arraydecay to i8*
+  call void @llvm.va_start(i8* %arraydecay1)
+  %arraydecay2 = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %va, i64 0, i64 0
+  %arraydecay23 = bitcast %struct.__va_list_tag* %arraydecay2 to i8*
+  call void @llvm.va_end(i8* %arraydecay23)
+  %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str, i64 0, i64 0))
+  ret void
+}
+
+; Function Attrs: nounwind
+declare void @llvm.va_start(i8*)
+
+; Function Attrs: nounwind
+declare void @llvm.va_end(i8*)
+
+declare dso_local i32 @printf(i8*, ...)


        


More information about the llvm-commits mailing list