[llvm-commits] [llvm] r124270 - in /llvm/trunk: lib/Target/X86/X86ISelLowering.cpp lib/Target/X86/X86InstrCompiler.td lib/Target/X86/X86InstrControl.td lib/Target/X86/X86InstrInfo.cpp lib/Target/X86/X86InstrInfo.td lib/Target/X86/X86RegisterInfo.td test/CodeGen/X86/tailcallstack64.ll utils/TableGen/CodeGenInstruction.cpp

NAKAMURA Takumi geek4civic at gmail.com
Tue Jan 25 18:03:37 PST 2011


Author: chapuni
Date: Tue Jan 25 20:03:37 2011
New Revision: 124270

URL: http://llvm.org/viewvc/llvm-project?rev=124270&view=rev
Log:
Fix whitespace.

Modified:
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
    llvm/trunk/lib/Target/X86/X86InstrCompiler.td
    llvm/trunk/lib/Target/X86/X86InstrControl.td
    llvm/trunk/lib/Target/X86/X86InstrInfo.cpp
    llvm/trunk/lib/Target/X86/X86InstrInfo.td
    llvm/trunk/lib/Target/X86/X86RegisterInfo.td
    llvm/trunk/test/CodeGen/X86/tailcallstack64.ll
    llvm/trunk/utils/TableGen/CodeGenInstruction.cpp

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=124270&r1=124269&r2=124270&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Tue Jan 25 20:03:37 2011
@@ -69,7 +69,7 @@
       return new X8664_MachoTargetObjectFile();
     return new TargetLoweringObjectFileMachO();
   }
-  
+
   if (TM.getSubtarget<X86Subtarget>().isTargetELF() ){
     if (is64Bit)
       return new X8664_ELFTargetObjectFile(TM);
@@ -256,7 +256,7 @@
     setOperationAction(ISD::UDIV, VT, Expand);
     setOperationAction(ISD::SREM, VT, Expand);
     setOperationAction(ISD::UREM, VT, Expand);
-    
+
     // Add/Sub overflow ops with MVT::Glues are lowered to EFLAGS dependences.
     setOperationAction(ISD::ADDC, VT, Custom);
     setOperationAction(ISD::ADDE, VT, Custom);
@@ -369,7 +369,7 @@
     setOperationAction(ISD::ATOMIC_CMP_SWAP, VT, Custom);
     setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
   }
-    
+
   if (!Subtarget->is64Bit()) {
     setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Custom);
     setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom);
@@ -931,7 +931,7 @@
   // We want to custom lower some of our intrinsics.
   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
 
-    
+
   // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
   // handle type legalization for these operations here.
   //
@@ -948,7 +948,7 @@
     setOperationAction(ISD::SMULO, VT, Custom);
     setOperationAction(ISD::UMULO, VT, Custom);
   }
-    
+
   // There are no 8-bit 3-address imul/mul instructions
   setOperationAction(ISD::SMULO, MVT::i8, Expand);
   setOperationAction(ISD::UMULO, MVT::i8, Expand);
@@ -6198,7 +6198,7 @@
     // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
     MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
     MFI->setAdjustsStack(true);
-    
+
     // And our return value (tls address) is in the standard call return value
     // location.
     unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
@@ -7047,7 +7047,7 @@
       (cast<ConstantSDNode>(Op1)->getZExtValue() == 1 ||
        cast<ConstantSDNode>(Op1)->isNullValue()) &&
       (CC == ISD::SETEQ || CC == ISD::SETNE)) {
- 
+
     // If the input is a setcc, then reuse the input setcc or use a new one with
     // the inverted condition.
     if (Op0.getOpcode() == X86ISD::SETCC) {
@@ -7055,7 +7055,7 @@
       bool Invert = (CC == ISD::SETNE) ^
         cast<ConstantSDNode>(Op1)->isNullValue();
       if (!Invert) return Op0;
-      
+
       CCode = X86::GetOppositeBranchCondition(CCode);
       return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
                          DAG.getConstant(CCode, MVT::i8), Op0.getOperand(1));
@@ -7206,7 +7206,7 @@
 
   if (Op.getResNo() == 2 && Opc == X86ISD::UMUL)
     return true;
-    
+
   return false;
 }
 
@@ -7242,24 +7242,24 @@
       Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
       isZero(Cond.getOperand(1).getOperand(1))) {
     SDValue Cmp = Cond.getOperand(1);
-    
+
     unsigned CondCode =cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue();
-    
-    if ((isAllOnes(Op1) || isAllOnes(Op2)) && 
+
+    if ((isAllOnes(Op1) || isAllOnes(Op2)) &&
         (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
       SDValue Y = isAllOnes(Op2) ? Op1 : Op2;
 
       SDValue CmpOp0 = Cmp.getOperand(0);
       Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32,
                         CmpOp0, DAG.getConstant(1, CmpOp0.getValueType()));
-      
+
       SDValue Res =   // Res = 0 or -1.
         DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
                     DAG.getConstant(X86::COND_B, MVT::i8), Cmp);
-      
+
       if (isAllOnes(Op1) != (CondCode == X86::COND_E))
         Res = DAG.getNOT(DL, Res, Res.getValueType());
-      
+
       ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(Op2);
       if (N2C == 0 || !N2C->isNullValue())
         Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
@@ -8443,7 +8443,7 @@
     Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
 
     // return pblendv(r, r+r, a);
-    R = DAG.getNode(X86ISD::PBLENDVB, dl, VT, 
+    R = DAG.getNode(X86ISD::PBLENDVB, dl, VT,
                     R, DAG.getNode(ISD::ADD, dl, VT, R, R), Op);
     return R;
   }
@@ -8503,12 +8503,12 @@
     SDVTList VTs = DAG.getVTList(N->getValueType(0), N->getValueType(0),
                                  MVT::i32);
     SDValue Sum = DAG.getNode(X86ISD::UMUL, DL, VTs, LHS, RHS);
-    
+
     SDValue SetCC =
       DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
                   DAG.getConstant(X86::COND_O, MVT::i32),
                   SDValue(Sum.getNode(), 2));
-    
+
     DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), SetCC);
     return Sum;
   }
@@ -8663,9 +8663,9 @@
   // Let legalize expand this if it isn't a legal type yet.
   if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
     return SDValue();
-  
+
   SDVTList VTs = DAG.getVTList(VT, MVT::i32);
-  
+
   unsigned Opc;
   bool ExtraOp = false;
   switch (Op.getOpcode()) {
@@ -8675,7 +8675,7 @@
   case ISD::SUBC: Opc = X86ISD::SUB; break;
   case ISD::SUBE: Opc = X86ISD::SBB; ExtraOp = true; break;
   }
-  
+
   if (!ExtraOp)
     return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0),
                        Op.getOperand(1));
@@ -9555,14 +9555,14 @@
 X86TargetLowering::EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB) const {
   DebugLoc dl = MI->getDebugLoc();
   const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
-  
+
   // Address into RAX/EAX, other two args into ECX, EDX.
   unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
   unsigned MemReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
   MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg);
   for (int i = 0; i < X86::AddrNumOperands; ++i)
     MIB.addOperand(MI->getOperand(i));
-  
+
   unsigned ValOps = X86::AddrNumOperands;
   BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX)
     .addReg(MI->getOperand(ValOps).getReg());
@@ -9571,7 +9571,7 @@
 
   // The instruction doesn't actually take any operands though.
   BuildMI(*BB, MI, dl, TII->get(X86::MONITORrrr));
-  
+
   MI->eraseFromParent(); // The pseudo is gone now.
   return BB;
 }
@@ -9580,16 +9580,16 @@
 X86TargetLowering::EmitMwait(MachineInstr *MI, MachineBasicBlock *BB) const {
   DebugLoc dl = MI->getDebugLoc();
   const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
-  
+
   // First arg in ECX, the second in EAX.
   BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX)
     .addReg(MI->getOperand(0).getReg());
   BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EAX)
     .addReg(MI->getOperand(1).getReg());
-    
+
   // The instruction doesn't actually take any operands though.
   BuildMI(*BB, MI, dl, TII->get(X86::MWAITrr));
-  
+
   MI->eraseFromParent(); // The pseudo is gone now.
   return BB;
 }
@@ -10195,7 +10195,7 @@
 
     // Thread synchronization.
   case X86::MONITOR:
-    return EmitMonitor(MI, BB);  
+    return EmitMonitor(MI, BB);
   case X86::MWAIT:
     return EmitMwait(MI, BB);
 
@@ -11116,19 +11116,19 @@
                                  const X86Subtarget *Subtarget) {
   if (DCI.isBeforeLegalizeOps())
     return SDValue();
-  
+
   // Want to form PANDN nodes, in the hopes of then easily combining them with
   // OR and AND nodes to form PBLEND/PSIGN.
   EVT VT = N->getValueType(0);
   if (VT != MVT::v2i64)
     return SDValue();
-  
+
   SDValue N0 = N->getOperand(0);
   SDValue N1 = N->getOperand(1);
   DebugLoc DL = N->getDebugLoc();
-  
+
   // Check LHS for vnot
-  if (N0.getOpcode() == ISD::XOR && 
+  if (N0.getOpcode() == ISD::XOR &&
       ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode()))
     return DAG.getNode(X86ISD::PANDN, DL, VT, N0.getOperand(0), N1);
 
@@ -11136,7 +11136,7 @@
   if (N1.getOpcode() == ISD::XOR &&
       ISD::isBuildVectorAllOnes(N1.getOperand(1).getNode()))
     return DAG.getNode(X86ISD::PANDN, DL, VT, N1.getOperand(0), N0);
-  
+
   return SDValue();
 }
 
@@ -11152,7 +11152,7 @@
 
   SDValue N0 = N->getOperand(0);
   SDValue N1 = N->getOperand(1);
-  
+
   // look for psign/blend
   if (Subtarget->hasSSSE3()) {
     if (VT == MVT::v2i64) {
@@ -11168,17 +11168,17 @@
           Y = N0.getOperand(1);
         if (N0.getOperand(1) == Mask)
           Y = N0.getOperand(0);
-        
+
         // Check to see if the mask appeared in both the AND and PANDN and
         if (!Y.getNode())
           return SDValue();
-        
+
         // Validate that X, Y, and Mask are BIT_CONVERTS, and see through them.
         if (Mask.getOpcode() != ISD::BITCAST ||
             X.getOpcode() != ISD::BITCAST ||
             Y.getOpcode() != ISD::BITCAST)
           return SDValue();
-        
+
         // Look through mask bitcast.
         Mask = Mask.getOperand(0);
         EVT MaskVT = Mask.getValueType();
@@ -11187,7 +11187,7 @@
         // will be an intrinsic.
         if (Mask.getOpcode() != ISD::INTRINSIC_WO_CHAIN)
           return SDValue();
-        
+
         // FIXME: what to do for bytes, since there is a psignb/pblendvb, but
         // there is no psrai.b
         switch (cast<ConstantSDNode>(Mask.getOperand(0))->getZExtValue()) {
@@ -11196,14 +11196,14 @@
           break;
         default: return SDValue();
         }
-        
+
         // Check that the SRA is all signbits.
         SDValue SraC = Mask.getOperand(2);
         unsigned SraAmt  = cast<ConstantSDNode>(SraC)->getZExtValue();
         unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits();
         if ((SraAmt + 1) != EltBits)
           return SDValue();
-        
+
         DebugLoc DL = N->getDebugLoc();
 
         // Now we know we at least have a plendvb with the mask val.  See if
@@ -11229,7 +11229,7 @@
         // PBLENDVB only available on SSE 4.1
         if (!Subtarget->hasSSE41())
           return SDValue();
-        
+
         X = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, X);
         Y = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Y);
         Mask = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Mask);
@@ -11238,7 +11238,7 @@
       }
     }
   }
-  
+
   // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
   if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
     std::swap(N0, N1);
@@ -11290,7 +11290,7 @@
                          DAG.getNode(ISD::TRUNCATE, DL,
                                        MVT::i8, ShAmt0));
   }
-  
+
   return SDValue();
 }
 
@@ -11500,7 +11500,7 @@
   unsigned X86CC = N->getConstantOperandVal(0);
   SDValue EFLAG = N->getOperand(1);
   DebugLoc DL = N->getDebugLoc();
-  
+
   // Materialize "setb reg" as "sbb reg,reg", since it can be extended without
   // a zext and produces an all-ones bit which is more useful than 0/1 in some
   // cases.
@@ -11509,10 +11509,10 @@
                        DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
                                    DAG.getConstant(X86CC, MVT::i8), EFLAG),
                        DAG.getConstant(1, MVT::i8));
-  
+
   return SDValue();
 }
-          
+
 // Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
 static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG,
                                  X86TargetLowering::DAGCombinerInfo &DCI) {
@@ -11544,7 +11544,7 @@
 //      (sub (setne X, 0), Y) -> adc -1, Y
 static SDValue OptimizeConditonalInDecrement(SDNode *N, SelectionDAG &DAG) {
   DebugLoc DL = N->getDebugLoc();
-  
+
   // Look through ZExts.
   SDValue Ext = N->getOperand(N->getOpcode() == ISD::SUB ? 1 : 0);
   if (Ext.getOpcode() != ISD::ZERO_EXTEND || !Ext.hasOneUse())

Modified: llvm/trunk/lib/Target/X86/X86InstrCompiler.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrCompiler.td?rev=124270&r1=124269&r2=124270&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrCompiler.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrCompiler.td Tue Jan 25 20:03:37 2011
@@ -849,38 +849,38 @@
 // tailcall stuff
 def : Pat<(X86tcret GR32_TC:$dst, imm:$off),
           (TCRETURNri GR32_TC:$dst, imm:$off)>,
-	  Requires<[In32BitMode]>;
+          Requires<[In32BitMode]>;
 
 // FIXME: This is disabled for 32-bit PIC mode because the global base
 // register which is part of the address mode may be assigned a
 // callee-saved register.
 def : Pat<(X86tcret (load addr:$dst), imm:$off),
           (TCRETURNmi addr:$dst, imm:$off)>,
-	  Requires<[In32BitMode, IsNotPIC]>;
+          Requires<[In32BitMode, IsNotPIC]>;
 
 def : Pat<(X86tcret (i32 tglobaladdr:$dst), imm:$off),
           (TCRETURNdi texternalsym:$dst, imm:$off)>,
-	  Requires<[In32BitMode]>;
+          Requires<[In32BitMode]>;
 
 def : Pat<(X86tcret (i32 texternalsym:$dst), imm:$off),
           (TCRETURNdi texternalsym:$dst, imm:$off)>,
-	  Requires<[In32BitMode]>;
+          Requires<[In32BitMode]>;
 
 def : Pat<(X86tcret GR64_TC:$dst, imm:$off),
           (TCRETURNri64 GR64_TC:$dst, imm:$off)>,
-	  Requires<[In64BitMode]>;
+          Requires<[In64BitMode]>;
 
 def : Pat<(X86tcret (load addr:$dst), imm:$off),
           (TCRETURNmi64 addr:$dst, imm:$off)>,
-	  Requires<[In64BitMode]>;
+          Requires<[In64BitMode]>;
 
 def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off),
           (TCRETURNdi64 tglobaladdr:$dst, imm:$off)>,
-	  Requires<[In64BitMode]>;
+          Requires<[In64BitMode]>;
 
 def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off),
           (TCRETURNdi64 texternalsym:$dst, imm:$off)>,
-	  Requires<[In64BitMode]>;
+          Requires<[In64BitMode]>;
 
 // Normal calls, with various flavors of addresses.
 def : Pat<(X86call (i32 tglobaladdr:$dst)),
@@ -1661,4 +1661,3 @@
           (AND64ri8 GR64:$src1, i64immSExt8:$src2)>;
 def : Pat<(and GR64:$src1, i64immSExt32:$src2),
           (AND64ri32 GR64:$src1, i64immSExt32:$src2)>;
-

Modified: llvm/trunk/lib/Target/X86/X86InstrControl.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrControl.td?rev=124270&r1=124269&r2=124270&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrControl.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrControl.td Tue Jan 25 20:03:37 2011
@@ -1,10 +1,10 @@
 //===- X86InstrControl.td - Control Flow Instructions ------*- tablegen -*-===//
-// 
+//
 //                     The LLVM Compiler Infrastructure
 //
 // This file is distributed under the University of Illinois Open Source
 // License. See LICENSE.TXT for details.
-// 
+//
 //===----------------------------------------------------------------------===//
 //
 // This file describes the X86 jump, return, call, and related instructions.
@@ -43,7 +43,7 @@
                         "jmp\t$dst", [(br bb:$dst)]>;
   def JMP_1 : Ii8PCRel<0xEB, RawFrm, (outs), (ins brtarget8:$dst),
                        "jmp\t$dst", []>;
-  def JMP64pcrel32 : I<0xE9, RawFrm, (outs), (ins brtarget:$dst), 
+  def JMP64pcrel32 : I<0xE9, RawFrm, (outs), (ins brtarget:$dst),
                        "jmp{q}\t$dst", []>;
 }
 
@@ -108,16 +108,16 @@
   def JMP64m     : I<0xFF, MRM4m, (outs), (ins i64mem:$dst), "jmp{q}\t{*}$dst",
                      [(brind (loadi64 addr:$dst))]>, Requires<[In64BitMode]>;
 
-  def FARJMP16i  : Iseg16<0xEA, RawFrmImm16, (outs), 
+  def FARJMP16i  : Iseg16<0xEA, RawFrmImm16, (outs),
                           (ins i16imm:$off, i16imm:$seg),
                           "ljmp{w}\t{$seg, $off|$off, $seg}", []>, OpSize;
   def FARJMP32i  : Iseg32<0xEA, RawFrmImm16, (outs),
                           (ins i32imm:$off, i16imm:$seg),
-                          "ljmp{l}\t{$seg, $off|$off, $seg}", []>;                     
+                          "ljmp{l}\t{$seg, $off|$off, $seg}", []>;
   def FARJMP64   : RI<0xFF, MRM5m, (outs), (ins opaque80mem:$dst),
                       "ljmp{q}\t{*}$dst", []>;
 
-  def FARJMP16m  : I<0xFF, MRM5m, (outs), (ins opaque32mem:$dst), 
+  def FARJMP16m  : I<0xFF, MRM5m, (outs), (ins opaque32mem:$dst),
                      "ljmp{w}\t{*}$dst", []>, OpSize;
   def FARJMP32m  : I<0xFF, MRM5m, (outs), (ins opaque48mem:$dst),
                      "ljmp{l}\t{*}$dst", []>;
@@ -152,14 +152,14 @@
     def CALL32m     : I<0xFF, MRM2m, (outs), (ins i32mem:$dst, variable_ops),
                         "call{l}\t{*}$dst", [(X86call (loadi32 addr:$dst))]>,
                         Requires<[In32BitMode]>;
-  
-    def FARCALL16i  : Iseg16<0x9A, RawFrmImm16, (outs), 
+
+    def FARCALL16i  : Iseg16<0x9A, RawFrmImm16, (outs),
                              (ins i16imm:$off, i16imm:$seg),
                              "lcall{w}\t{$seg, $off|$off, $seg}", []>, OpSize;
     def FARCALL32i  : Iseg32<0x9A, RawFrmImm16, (outs),
                              (ins i32imm:$off, i16imm:$seg),
                              "lcall{l}\t{$seg, $off|$off, $seg}", []>;
-                             
+
     def FARCALL16m  : I<0xFF, MRM3m, (outs), (ins opaque32mem:$dst),
                         "lcall{w}\t{*}$dst", []>, OpSize;
     def FARCALL32m  : I<0xFF, MRM3m, (outs), (ins opaque48mem:$dst),
@@ -182,12 +182,12 @@
               XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
               XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
       Uses = [ESP] in {
-  def TCRETURNdi : PseudoI<(outs), 
+  def TCRETURNdi : PseudoI<(outs),
                      (ins i32imm_pcrel:$dst, i32imm:$offset, variable_ops), []>;
-  def TCRETURNri : PseudoI<(outs), 
+  def TCRETURNri : PseudoI<(outs),
                      (ins GR32_TC:$dst, i32imm:$offset, variable_ops), []>;
   let mayLoad = 1 in
-  def TCRETURNmi : PseudoI<(outs), 
+  def TCRETURNmi : PseudoI<(outs),
                      (ins i32mem_TC:$dst, i32imm:$offset, variable_ops), []>;
 
   // FIXME: The should be pseudo instructions that are lowered when going to
@@ -196,7 +196,7 @@
                            (ins i32imm_pcrel:$dst, variable_ops),
                  "jmp\t$dst  # TAILCALL",
                  []>;
-  def TAILJMPr : I<0xFF, MRM4r, (outs), (ins GR32_TC:$dst, variable_ops), 
+  def TAILJMPr : I<0xFF, MRM4r, (outs), (ins GR32_TC:$dst, variable_ops),
                    "", []>;  // FIXME: Remove encoding when JIT is dead.
   let mayLoad = 1 in
   def TAILJMPm : I<0xFF, MRM4m, (outs), (ins i32mem_TC:$dst, variable_ops),
@@ -218,7 +218,7 @@
               XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
               XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
       Uses = [RSP] in {
-      
+
     // NOTE: this pattern doesn't match "X86call imm", because we do not know
     // that the offset between an arbitrary immediate and the call will fit in
     // the 32-bit pcrel field that we have.
@@ -232,12 +232,12 @@
     def CALL64m       : I<0xFF, MRM2m, (outs), (ins i64mem:$dst, variable_ops),
                           "call{q}\t{*}$dst", [(X86call (loadi64 addr:$dst))]>,
                         Requires<[In64BitMode, NotWin64]>;
-                        
+
     def FARCALL64   : RI<0xFF, MRM3m, (outs), (ins opaque80mem:$dst),
                          "lcall{q}\t{*}$dst", []>;
   }
 
-  // FIXME: We need to teach codegen about single list of call-clobbered 
+  // FIXME: We need to teach codegen about single list of call-clobbered
   // registers.
 let isCall = 1, isCodeGenOnly = 1 in
   // All calls clobber the non-callee saved registers. RSP is marked as
@@ -256,10 +256,10 @@
     def WINCALL64r       : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops),
                              "call{q}\t{*}$dst",
                              [(X86call GR64:$dst)]>, Requires<[IsWin64]>;
-    def WINCALL64m       : I<0xFF, MRM2m, (outs), 
+    def WINCALL64m       : I<0xFF, MRM2m, (outs),
                               (ins i64mem:$dst,variable_ops),
                              "call{q}\t{*}$dst",
-                             [(X86call (loadi64 addr:$dst))]>, 
+                             [(X86call (loadi64 addr:$dst))]>,
                            Requires<[IsWin64]>;
   }
 
@@ -278,7 +278,7 @@
   def TCRETURNri64 : PseudoI<(outs),
                       (ins GR64_TC:$dst, i32imm:$offset, variable_ops), []>;
   let mayLoad = 1 in
-  def TCRETURNmi64 : PseudoI<(outs), 
+  def TCRETURNmi64 : PseudoI<(outs),
                        (ins i64mem_TC:$dst, i32imm:$offset, variable_ops), []>;
 
   def TAILJMPd64 : Ii32PCRel<0xE9, RawFrm, (outs),
@@ -291,4 +291,3 @@
   def TAILJMPm64 : I<0xFF, MRM4m, (outs), (ins i64mem_TC:$dst, variable_ops),
                      "jmp{q}\t{*}$dst  # TAILCALL", []>;
 }
-

Modified: llvm/trunk/lib/Target/X86/X86InstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrInfo.cpp?rev=124270&r1=124269&r2=124270&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrInfo.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86InstrInfo.cpp Tue Jan 25 20:03:37 2011
@@ -58,7 +58,7 @@
     TB_NOT_REVERSABLE = 1U << 31,
     TB_FLAGS = TB_NOT_REVERSABLE
   };
-      
+
   static const unsigned OpTbl2Addr[][2] = {
     { X86::ADC32ri,     X86::ADC32mi },
     { X86::ADC32ri8,    X86::ADC32mi8 },
@@ -231,16 +231,16 @@
     unsigned MemOp = OpTbl2Addr[i][1] & ~TB_FLAGS;
     assert(!RegOp2MemOpTable2Addr.count(RegOp) && "Duplicated entries?");
     RegOp2MemOpTable2Addr[RegOp] = std::make_pair(MemOp, 0U);
-    
+
     // If this is not a reversable operation (because there is a many->one)
     // mapping, don't insert the reverse of the operation into MemOp2RegOpTable.
     if (OpTbl2Addr[i][1] & TB_NOT_REVERSABLE)
       continue;
-                          
+
     // Index 0, folded load and store, no alignment requirement.
     unsigned AuxInfo = 0 | (1 << 4) | (1 << 5);
-    
-    assert(!MemOp2RegOpTable.count(MemOp) && 
+
+    assert(!MemOp2RegOpTable.count(MemOp) &&
             "Duplicated entries in unfolding maps?");
     MemOp2RegOpTable[MemOp] = std::make_pair(RegOp, AuxInfo);
   }
@@ -334,12 +334,12 @@
     unsigned Align      = OpTbl0[i][3];
     assert(!RegOp2MemOpTable0.count(RegOp) && "Duplicated entries?");
     RegOp2MemOpTable0[RegOp] = std::make_pair(MemOp, Align);
-    
+
     // If this is not a reversable operation (because there is a many->one)
     // mapping, don't insert the reverse of the operation into MemOp2RegOpTable.
     if (OpTbl0[i][1] & TB_NOT_REVERSABLE)
       continue;
-    
+
     // Index 0, folded load or store.
     unsigned AuxInfo = 0 | (FoldedLoad << 4) | ((FoldedLoad^1) << 5);
     assert(!MemOp2RegOpTable.count(MemOp) && "Duplicated entries?");
@@ -461,12 +461,12 @@
     unsigned Align = OpTbl1[i][2];
     assert(!RegOp2MemOpTable1.count(RegOp) && "Duplicate entries");
     RegOp2MemOpTable1[RegOp] = std::make_pair(MemOp, Align);
-    
+
     // If this is not a reversable operation (because there is a many->one)
     // mapping, don't insert the reverse of the operation into MemOp2RegOpTable.
     if (OpTbl1[i][1] & TB_NOT_REVERSABLE)
       continue;
-    
+
     // Index 1, folded load
     unsigned AuxInfo = 1 | (1 << 4);
     assert(!MemOp2RegOpTable.count(MemOp) && "Duplicate entries");
@@ -678,15 +678,15 @@
     unsigned RegOp = OpTbl2[i][0];
     unsigned MemOp = OpTbl2[i][1] & ~TB_FLAGS;
     unsigned Align = OpTbl2[i][2];
-    
+
     assert(!RegOp2MemOpTable2.count(RegOp) && "Duplicate entry!");
     RegOp2MemOpTable2[RegOp] = std::make_pair(MemOp, Align);
-    
+
     // If this is not a reversable operation (because there is a many->one)
     // mapping, don't insert the reverse of the operation into MemOp2RegOpTable.
     if (OpTbl2[i][1] & TB_NOT_REVERSABLE)
       continue;
-    
+
     // Index 2, folded load
     unsigned AuxInfo = 2 | (1 << 4);
     assert(!MemOp2RegOpTable.count(MemOp) &&
@@ -808,7 +808,7 @@
   return false;
 }
 
-unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr *MI, 
+unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
                                            int &FrameIndex) const {
   if (isFrameLoadOpcode(MI->getOpcode()))
     if (MI->getOperand(0).getSubReg() == 0 && isFrameOperand(MI, 1, FrameIndex))
@@ -816,7 +816,7 @@
   return 0;
 }
 
-unsigned X86InstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI, 
+unsigned X86InstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
                                                  int &FrameIndex) const {
   if (isFrameLoadOpcode(MI->getOpcode())) {
     unsigned Reg;
@@ -946,10 +946,10 @@
           isPICBase = true;
         }
         return isPICBase;
-      } 
+      }
       return false;
     }
- 
+
      case X86::LEA32r:
      case X86::LEA64r: {
        if (MI->getOperand(2).isImm() &&
@@ -1124,9 +1124,9 @@
   MachineRegisterInfo &RegInfo = MFI->getParent()->getRegInfo();
   unsigned leaInReg = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
   unsigned leaOutReg = RegInfo.createVirtualRegister(&X86::GR32RegClass);
-            
+
   // Build and insert into an implicit UNDEF value. This is OK because
-  // well be shifting and then extracting the lower 16-bits. 
+  // well be shifting and then extracting the lower 16-bits.
   // This has the potential to cause partial register stall. e.g.
   //   movw    (%rbp,%rcx,2), %dx
   //   leal    -65(%rdx), %esi
@@ -1162,7 +1162,7 @@
   case X86::ADD16ri8:
   case X86::ADD16ri_DB:
   case X86::ADD16ri8_DB:
-    addRegOffset(MIB, leaInReg, true, MI->getOperand(2).getImm());    
+    addRegOffset(MIB, leaInReg, true, MI->getOperand(2).getImm());
     break;
   case X86::ADD16rr:
   case X86::ADD16rr_DB: {
@@ -1177,7 +1177,7 @@
     } else {
       leaInReg2 = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
       // Build and insert into an implicit UNDEF value. This is OK because
-      // well be shifting and then extracting the lower 16-bits. 
+      // well be shifting and then extracting the lower 16-bits.
       BuildMI(*MFI, MIB, MI->getDebugLoc(), get(X86::IMPLICIT_DEF), leaInReg2);
       InsMI2 =
         BuildMI(*MFI, MIB, MI->getDebugLoc(), get(TargetOpcode::COPY))
@@ -1244,7 +1244,7 @@
   case X86::SHUFPSrri: {
     assert(MI->getNumOperands() == 4 && "Unknown shufps instruction!");
     if (!TM.getSubtarget<X86Subtarget>().hasSSE2()) return 0;
-    
+
     unsigned B = MI->getOperand(1).getReg();
     unsigned C = MI->getOperand(2).getReg();
     if (B != C) return 0;
@@ -1392,7 +1392,7 @@
         RC = X86::GR32_NOSPRegisterClass;
       }
 
-      
+
       unsigned Src2 = MI->getOperand(2).getReg();
       bool isKill2 = MI->getOperand(2).isKill();
 
@@ -1471,7 +1471,7 @@
       LV->replaceKillInstruction(Dest, MI, NewMI);
   }
 
-  MFI->insert(MBBI, NewMI);          // Insert the new inst    
+  MFI->insert(MBBI, NewMI);          // Insert the new inst
   return NewMI;
 }
 
@@ -1692,7 +1692,7 @@
 bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
   const TargetInstrDesc &TID = MI->getDesc();
   if (!TID.isTerminator()) return false;
-  
+
   // Conditional branch is a special case.
   if (TID.isBranch() && !TID.isBarrier())
     return true;
@@ -1701,7 +1701,7 @@
   return !isPredicated(MI);
 }
 
-bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, 
+bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
                                  MachineBasicBlock *&TBB,
                                  MachineBasicBlock *&FBB,
                                  SmallVectorImpl<MachineOperand> &Cond,
@@ -1862,7 +1862,7 @@
     I = MBB.end();
     ++Count;
   }
-  
+
   return Count;
 }
 
@@ -2177,7 +2177,7 @@
     MIB.addOperand(MOs[i]);
   if (NumAddrOps < 4)  // FrameIndex only
     addOffset(MIB, 0);
-  
+
   // Loop over the rest of the ri operands, converting them over.
   unsigned NumOps = MI->getDesc().getNumOperands()-2;
   for (unsigned i = 0; i != NumOps; ++i) {
@@ -2198,7 +2198,7 @@
   MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode),
                                               MI->getDebugLoc(), true);
   MachineInstrBuilder MIB(NewMI);
-  
+
   for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
     MachineOperand &MO = MI->getOperand(i);
     if (i == OpNo) {
@@ -2247,7 +2247,7 @@
   if (isTwoAddr && NumOps >= 2 && i < 2 &&
       MI->getOperand(0).isReg() &&
       MI->getOperand(1).isReg() &&
-      MI->getOperand(0).getReg() == MI->getOperand(1).getReg()) { 
+      MI->getOperand(0).getReg() == MI->getOperand(1).getReg()) {
     OpcodeTablePtr = &RegOp2MemOpTable2Addr;
     isTwoAddrFold = true;
   } else if (i == 0) { // If operand 0
@@ -2261,14 +2261,14 @@
       NewMI = MakeM0Inst(*this, X86::MOV8mi, MOs, MI);
     if (NewMI)
       return NewMI;
-    
+
     OpcodeTablePtr = &RegOp2MemOpTable0;
   } else if (i == 1) {
     OpcodeTablePtr = &RegOp2MemOpTable1;
   } else if (i == 2) {
     OpcodeTablePtr = &RegOp2MemOpTable2;
   }
-  
+
   // If table selected...
   if (OpcodeTablePtr) {
     // Find the Opcode to fuse
@@ -2316,8 +2316,8 @@
       return NewMI;
     }
   }
-  
-  // No fusion 
+
+  // No fusion
   if (PrintFailedFusing && !MI->isCopy())
     dbgs() << "We failed to fuse operand " << i << " in " << *MI;
   return NULL;
@@ -2328,7 +2328,7 @@
                                                   MachineInstr *MI,
                                            const SmallVectorImpl<unsigned> &Ops,
                                                   int FrameIndex) const {
-  // Check switch flag 
+  // Check switch flag
   if (NoFusing) return NULL;
 
   if (!MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize))
@@ -2380,7 +2380,7 @@
                                                   MachineInstr *MI,
                                            const SmallVectorImpl<unsigned> &Ops,
                                                   MachineInstr *LoadMI) const {
-  // Check switch flag 
+  // Check switch flag
   if (NoFusing) return NULL;
 
   if (!MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize))
@@ -2523,13 +2523,13 @@
 
 bool X86InstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
                                   const SmallVectorImpl<unsigned> &Ops) const {
-  // Check switch flag 
+  // Check switch flag
   if (NoFusing) return 0;
 
   if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
     switch (MI->getOpcode()) {
     default: return false;
-    case X86::TEST8rr: 
+    case X86::TEST8rr:
     case X86::TEST16rr:
     case X86::TEST32rr:
     case X86::TEST64rr:
@@ -2550,7 +2550,7 @@
   // instruction is different than folding it other places.  It requires
   // replacing the *two* registers with the memory location.
   const DenseMap<unsigned, std::pair<unsigned,unsigned> > *OpcodeTablePtr = 0;
-  if (isTwoAddr && NumOps >= 2 && OpNum < 2) { 
+  if (isTwoAddr && NumOps >= 2 && OpNum < 2) {
     OpcodeTablePtr = &RegOp2MemOpTable2Addr;
   } else if (OpNum == 0) { // If operand 0
     switch (Opc) {
@@ -2566,7 +2566,7 @@
   } else if (OpNum == 2) {
     OpcodeTablePtr = &RegOp2MemOpTable2;
   }
-  
+
   if (OpcodeTablePtr && OpcodeTablePtr->count(Opc))
     return true;
   return TargetInstrInfoImpl::canFoldMemoryOperand(MI, Ops);
@@ -2636,7 +2636,7 @@
   // Emit the data processing instruction.
   MachineInstr *DataMI = MF.CreateMachineInstr(TID, MI->getDebugLoc(), true);
   MachineInstrBuilder MIB(DataMI);
-  
+
   if (FoldedStore)
     MIB.addReg(Reg, RegState::Define);
   for (unsigned i = 0, e = BeforeOps.size(); i != e; ++i)
@@ -3156,11 +3156,11 @@
         PC = RegInfo.createVirtualRegister(X86::GR32RegisterClass);
       else
         PC = GlobalBaseReg;
-  
+
       // Operand of MovePCtoStack is completely ignored by asm printer. It's
       // only used in JIT code emission as displacement to pc.
       BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVPC32r), PC).addImm(0);
-  
+
       // If we're using vanilla 'GOT' PIC style, we should use relative addressing
       // not to pc, but to _GLOBAL_OFFSET_TABLE_ external.
       if (TM->getSubtarget<X86Subtarget>().isPICStyleGOT()) {

Modified: llvm/trunk/lib/Target/X86/X86InstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrInfo.td?rev=124270&r1=124269&r2=124270&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrInfo.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrInfo.td Tue Jan 25 20:03:37 2011
@@ -36,7 +36,7 @@
                                              SDTCisSameAs<0, 3>,
                                              SDTCisInt<0>, SDTCisVT<1, i32>]>;
 
-// SDTBinaryArithWithFlagsInOut - RES1, EFLAGS = op LHS, RHS, EFLAGS 
+// SDTBinaryArithWithFlagsInOut - RES1, EFLAGS = op LHS, RHS, EFLAGS
 def SDTBinaryArithWithFlagsInOut : SDTypeProfile<2, 3,
                                             [SDTCisSameAs<0, 2>,
                                              SDTCisSameAs<0, 3>,
@@ -1612,4 +1612,3 @@
 def : InstAlias<"xchgw $mem, $val", (XCHG16rm GR16:$val, i16mem:$mem)>;
 def : InstAlias<"xchgl $mem, $val", (XCHG32rm GR32:$val, i32mem:$mem)>;
 def : InstAlias<"xchgq $mem, $val", (XCHG64rm GR64:$val, i64mem:$mem)>;
-

Modified: llvm/trunk/lib/Target/X86/X86RegisterInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86RegisterInfo.td?rev=124270&r1=124269&r2=124270&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86RegisterInfo.td (original)
+++ llvm/trunk/lib/Target/X86/X86RegisterInfo.td Tue Jan 25 20:03:37 2011
@@ -1,10 +1,10 @@
 //===- X86RegisterInfo.td - Describe the X86 Register File --*- tablegen -*-==//
-// 
+//
 //                     The LLVM Compiler Infrastructure
 //
 // This file is distributed under the University of Illinois Open Source
 // License. See LICENSE.TXT for details.
-// 
+//
 //===----------------------------------------------------------------------===//
 //
 // This file describes the X86 Register file, defining the registers themselves,
@@ -34,8 +34,8 @@
   // because the register file generator is smart enough to figure out that
   // AL aliases AX if we tell it that AX aliased AL (for example).
 
-  // Dwarf numbering is different for 32-bit and 64-bit, and there are 
-  // variations by target as well. Currently the first entry is for X86-64, 
+  // Dwarf numbering is different for 32-bit and 64-bit, and there are
+  // variations by target as well. Currently the first entry is for X86-64,
   // second - for EH on X86-32/Darwin and third is 'generic' one (X86-32/Linux
   // and debug information on X86-32/Darwin)
 
@@ -81,7 +81,7 @@
   def SP : RegisterWithSubRegs<"sp", [SPL]>, DwarfRegNum<[7, 5, 4]>;
   }
   def IP : Register<"ip">, DwarfRegNum<[16]>;
-  
+
   // X86-64 only
   let SubRegIndices = [sub_8bit] in {
   def R8W  : RegisterWithSubRegs<"r8w", [R8B]>, DwarfRegNum<[8, -2, -2]>;
@@ -103,8 +103,8 @@
   def EDI : RegisterWithSubRegs<"edi", [DI]>, DwarfRegNum<[5, 7, 7]>;
   def EBP : RegisterWithSubRegs<"ebp", [BP]>, DwarfRegNum<[6, 4, 5]>;
   def ESP : RegisterWithSubRegs<"esp", [SP]>, DwarfRegNum<[7, 5, 4]>;
-  def EIP : RegisterWithSubRegs<"eip", [IP]>, DwarfRegNum<[16, 8, 8]>;  
-  
+  def EIP : RegisterWithSubRegs<"eip", [IP]>, DwarfRegNum<[16, 8, 8]>;
+
   // X86-64 only
   def R8D  : RegisterWithSubRegs<"r8d", [R8W]>, DwarfRegNum<[8, -2, -2]>;
   def R9D  : RegisterWithSubRegs<"r9d", [R9W]>, DwarfRegNum<[9, -2, -2]>;
@@ -208,7 +208,7 @@
   def ST4 : Register<"st(4)">, DwarfRegNum<[37, 16, 15]>;
   def ST5 : Register<"st(5)">, DwarfRegNum<[38, 17, 16]>;
   def ST6 : Register<"st(6)">, DwarfRegNum<[39, 18, 17]>;
-  def ST7 : Register<"st(7)">, DwarfRegNum<[40, 19, 18]>; 
+  def ST7 : Register<"st(7)">, DwarfRegNum<[40, 19, 18]>;
 
   // Status flags register
   def EFLAGS : Register<"flags">;
@@ -220,7 +220,7 @@
   def ES : Register<"es">;
   def FS : Register<"fs">;
   def GS : Register<"gs">;
-  
+
   // Debug registers
   def DR0 : Register<"dr0">;
   def DR1 : Register<"dr1">;
@@ -230,7 +230,7 @@
   def DR5 : Register<"dr5">;
   def DR6 : Register<"dr6">;
   def DR7 : Register<"dr7">;
-  
+
   // Control registers
   def CR0 : Register<"cr0">;
   def CR1 : Register<"cr1">;
@@ -261,10 +261,10 @@
 // implicitly defined to be the register allocation order.
 //
 
-// List call-clobbered registers before callee-save registers. RBX, RBP, (and 
+// List call-clobbered registers before callee-save registers. RBX, RBP, (and
 // R12, R13, R14, and R15 for X86-64) are callee-save registers.
 // In 64-mode, there are 12 additional i8 registers, SIL, DIL, BPL, SPL, and
-// R8B, ... R15B. 
+// R8B, ... R15B.
 // Allocate R12 and R13 last, as these require an extra byte when
 // encoded in x86_64 instructions.
 // FIXME: Allow AH, CH, DH, BH to be used as general-purpose registers in

Modified: llvm/trunk/test/CodeGen/X86/tailcallstack64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/tailcallstack64.ll?rev=124270&r1=124269&r2=124270&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/tailcallstack64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/tailcallstack64.ll Tue Jan 25 20:03:37 2011
@@ -22,4 +22,3 @@
         %retval = tail call fastcc i32 @tailcallee(i32 %p1, i32 %p2, i32 %p3, i32 %p4, i32 %p5, i32 %p6, i32 %in2,i32 %tmp)
         ret i32 %retval
 }
-

Modified: llvm/trunk/utils/TableGen/CodeGenInstruction.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/utils/TableGen/CodeGenInstruction.cpp?rev=124270&r1=124269&r2=124270&view=diff
==============================================================================
--- llvm/trunk/utils/TableGen/CodeGenInstruction.cpp (original)
+++ llvm/trunk/utils/TableGen/CodeGenInstruction.cpp Tue Jan 25 20:03:37 2011
@@ -28,15 +28,15 @@
   isPredicable = false;
   hasOptionalDef = false;
   isVariadic = false;
-  
+
   DagInit *OutDI = R->getValueAsDag("OutOperandList");
-  
+
   if (DefInit *Init = dynamic_cast<DefInit*>(OutDI->getOperator())) {
     if (Init->getDef()->getName() != "outs")
       throw R->getName() + ": invalid def name for output list: use 'outs'";
   } else
     throw R->getName() + ": invalid output list: use 'outs'";
-  
+
   NumDefs = OutDI->getNumArgs();
 
   DagInit *InDI = R->getValueAsDag("InOperandList");
@@ -45,7 +45,7 @@
       throw R->getName() + ": invalid def name for input list: use 'ins'";
   } else
     throw R->getName() + ": invalid input list: use 'ins'";
-  
+
   unsigned MIOperandNo = 0;
   std::set<std::string> OperandNames;
   for (unsigned i = 0, e = InDI->getNumArgs()+OutDI->getNumArgs(); i != e; ++i){
@@ -58,11 +58,11 @@
       ArgInit = InDI->getArg(i-NumDefs);
       ArgName = InDI->getArgName(i-NumDefs);
     }
-    
+
     DefInit *Arg = dynamic_cast<DefInit*>(ArgInit);
     if (!Arg)
       throw "Illegal operand for the '" + R->getName() + "' instruction!";
-    
+
     Record *Rec = Arg->getDef();
     std::string PrintMethod = "printOperand";
     std::string EncoderMethod;
@@ -73,19 +73,19 @@
       // If there is an explicit encoder method, use it.
       EncoderMethod = Rec->getValueAsString("EncoderMethod");
       MIOpInfo = Rec->getValueAsDag("MIOperandInfo");
-      
+
       // Verify that MIOpInfo has an 'ops' root value.
       if (!dynamic_cast<DefInit*>(MIOpInfo->getOperator()) ||
           dynamic_cast<DefInit*>(MIOpInfo->getOperator())
           ->getDef()->getName() != "ops")
         throw "Bad value for MIOperandInfo in operand '" + Rec->getName() +
         "'\n";
-      
+
       // If we have MIOpInfo, then we have #operands equal to number of entries
       // in MIOperandInfo.
       if (unsigned NumArgs = MIOpInfo->getNumArgs())
         NumOps = NumArgs;
-      
+
       if (Rec->isSubClassOf("PredicateOperand"))
         isPredicable = true;
       else if (Rec->isSubClassOf("OptionalDefOperand"))
@@ -97,7 +97,7 @@
                Rec->getName() != "ptr_rc" && Rec->getName() != "unknown")
       throw "Unknown operand class '" + Rec->getName() +
       "' in '" + R->getName() + "' instruction!";
-    
+
     // Check that the operand has a name and that it's unique.
     if (ArgName.empty())
       throw "In instruction '" + R->getName() + "', operand #" + utostr(i) +
@@ -105,13 +105,13 @@
     if (!OperandNames.insert(ArgName).second)
       throw "In instruction '" + R->getName() + "', operand #" + utostr(i) +
       " has the same name as a previous operand!";
-    
+
     OperandList.push_back(OperandInfo(Rec, ArgName, PrintMethod, EncoderMethod,
                                       MIOperandNo, NumOps, MIOpInfo));
     MIOperandNo += NumOps;
   }
-  
-  
+
+
   // Make sure the constraints list for each operand is large enough to hold
   // constraint info, even if none is present.
   for (unsigned i = 0, e = OperandList.size(); i != e; ++i)
@@ -126,7 +126,7 @@
 unsigned CGIOperandList::getOperandNamed(StringRef Name) const {
   unsigned OpIdx;
   if (hasOperandNamed(Name, OpIdx)) return OpIdx;
-  throw "'" + TheDef->getName() + "' does not have an operand named '$" + 
+  throw "'" + TheDef->getName() + "' does not have an operand named '$" +
     Name.str() + "'!";
 }
 
@@ -147,10 +147,10 @@
 CGIOperandList::ParseOperandName(const std::string &Op, bool AllowWholeOp) {
   if (Op.empty() || Op[0] != '$')
     throw TheDef->getName() + ": Illegal operand name: '" + Op + "'";
-  
+
   std::string OpName = Op.substr(1);
   std::string SubOpName;
-  
+
   // Check to see if this is $foo.bar.
   std::string::size_type DotIdx = OpName.find_first_of(".");
   if (DotIdx != std::string::npos) {
@@ -159,30 +159,30 @@
       throw TheDef->getName() + ": illegal empty suboperand name in '" +Op +"'";
     OpName = OpName.substr(0, DotIdx);
   }
-  
+
   unsigned OpIdx = getOperandNamed(OpName);
-  
+
   if (SubOpName.empty()) {  // If no suboperand name was specified:
     // If one was needed, throw.
     if (OperandList[OpIdx].MINumOperands > 1 && !AllowWholeOp &&
         SubOpName.empty())
       throw TheDef->getName() + ": Illegal to refer to"
       " whole operand part of complex operand '" + Op + "'";
-    
+
     // Otherwise, return the operand.
     return std::make_pair(OpIdx, 0U);
   }
-  
+
   // Find the suboperand number involved.
   DagInit *MIOpInfo = OperandList[OpIdx].MIOperandInfo;
   if (MIOpInfo == 0)
     throw TheDef->getName() + ": unknown suboperand name in '" + Op + "'";
-  
+
   // Find the operand with the right name.
   for (unsigned i = 0, e = MIOpInfo->getNumArgs(); i != e; ++i)
     if (MIOpInfo->getArgName(i) == SubOpName)
       return std::make_pair(OpIdx, i);
-  
+
   // Otherwise, didn't find it!
   throw TheDef->getName() + ": unknown suboperand name in '" + Op + "'";
 }
@@ -199,7 +199,7 @@
       throw "Illegal format for @earlyclobber constraint: '" + CStr + "'";
     Name = Name.substr(wpos);
     std::pair<unsigned,unsigned> Op = Ops.ParseOperandName(Name, false);
-    
+
     // Build the string for the operand
     if (!Ops[Op.first].Constraints[Op.second].isNone())
       throw "Operand '" + Name + "' cannot have multiple constraints!";
@@ -207,33 +207,33 @@
     CGIOperandList::ConstraintInfo::getEarlyClobber();
     return;
   }
-  
+
   // Only other constraint is "TIED_TO" for now.
   std::string::size_type pos = CStr.find_first_of('=');
   assert(pos != std::string::npos && "Unrecognized constraint");
   start = CStr.find_first_not_of(" \t");
   std::string Name = CStr.substr(start, pos - start);
-  
+
   // TIED_TO: $src1 = $dst
   wpos = Name.find_first_of(" \t");
   if (wpos == std::string::npos)
     throw "Illegal format for tied-to constraint: '" + CStr + "'";
   std::string DestOpName = Name.substr(0, wpos);
   std::pair<unsigned,unsigned> DestOp = Ops.ParseOperandName(DestOpName, false);
-  
+
   Name = CStr.substr(pos+1);
   wpos = Name.find_first_not_of(" \t");
   if (wpos == std::string::npos)
     throw "Illegal format for tied-to constraint: '" + CStr + "'";
-  
+
   std::pair<unsigned,unsigned> SrcOp =
   Ops.ParseOperandName(Name.substr(wpos), false);
   if (SrcOp > DestOp)
     throw "Illegal tied-to operand constraint '" + CStr + "'";
-  
-  
+
+
   unsigned FlatOpNo = Ops.getFlattenedOperandNumber(SrcOp);
-  
+
   if (!Ops[DestOp.first].Constraints[DestOp.second].isNone())
     throw "Operand '" + DestOpName + "' cannot have multiple constraints!";
   Ops[DestOp.first].Constraints[DestOp.second] =
@@ -242,16 +242,16 @@
 
 static void ParseConstraints(const std::string &CStr, CGIOperandList &Ops) {
   if (CStr.empty()) return;
-  
+
   const std::string delims(",");
   std::string::size_type bidx, eidx;
-  
+
   bidx = CStr.find_first_not_of(delims);
   while (bidx != std::string::npos) {
     eidx = CStr.find_first_of(delims, bidx);
     if (eidx == std::string::npos)
       eidx = CStr.length();
-    
+
     ParseConstraint(CStr.substr(bidx, eidx - bidx), Ops);
     bidx = CStr.find_first_not_of(delims, eidx);
   }
@@ -262,16 +262,16 @@
     std::string OpName;
     tie(OpName, DisableEncoding) = getToken(DisableEncoding, " ,\t");
     if (OpName.empty()) break;
-    
+
     // Figure out which operand this is.
     std::pair<unsigned,unsigned> Op = ParseOperandName(OpName, false);
-    
+
     // Mark the operand as not-to-be encoded.
     if (Op.second >= OperandList[Op.first].DoNotEncode.size())
       OperandList[Op.first].DoNotEncode.resize(Op.second+1);
     OperandList[Op.first].DoNotEncode[Op.second] = true;
   }
-  
+
 }
 
 //===----------------------------------------------------------------------===//
@@ -325,11 +325,11 @@
 MVT::SimpleValueType CodeGenInstruction::
 HasOneImplicitDefWithKnownVT(const CodeGenTarget &TargetInfo) const {
   if (ImplicitDefs.empty()) return MVT::Other;
-  
+
   // Check to see if the first implicit def has a resolvable type.
   Record *FirstImplicitDef = ImplicitDefs[0];
   assert(FirstImplicitDef->isSubClassOf("Register"));
-  const std::vector<MVT::SimpleValueType> &RegVTs = 
+  const std::vector<MVT::SimpleValueType> &RegVTs =
     TargetInfo.getRegisterVTs(FirstImplicitDef);
   if (RegVTs.size() == 1)
     return RegVTs[0];
@@ -342,7 +342,7 @@
 std::string CodeGenInstruction::
 FlattenAsmStringVariants(StringRef Cur, unsigned Variant) {
   std::string Res = "";
-  
+
   for (;;) {
     // Find the start of the next variant string.
     size_t VariantsStart = 0;
@@ -351,14 +351,14 @@
           (VariantsStart == 0 || (Cur[VariantsStart-1] != '$' &&
                                   Cur[VariantsStart-1] != '\\')))
         break;
-    
+
     // Add the prefix to the result.
     Res += Cur.slice(0, VariantsStart);
     if (VariantsStart == Cur.size())
       break;
-    
+
     ++VariantsStart; // Skip the '{'.
-    
+
     // Scan to the end of the variants string.
     size_t VariantsEnd = VariantsStart;
     unsigned NestedBraces = 1;
@@ -369,18 +369,18 @@
       } else if (Cur[VariantsEnd] == '{')
         ++NestedBraces;
     }
-    
+
     // Select the Nth variant (or empty).
     StringRef Selection = Cur.slice(VariantsStart, VariantsEnd);
     for (unsigned i = 0; i != Variant; ++i)
       Selection = Selection.split('|').second;
     Res += Selection.split('|').first;
-    
+
     assert(VariantsEnd != Cur.size() &&
            "Unterminated variants in assembly string!");
     Cur = Cur.substr(VariantsEnd + 1);
   }
-  
+
   return Res;
 }
 
@@ -399,7 +399,7 @@
     throw TGError(R->getLoc(), "result of inst alias should be an instruction");
 
   ResultInst = &T.getInstruction(DI->getDef());
-  
+
   // NameClass - If argument names are repeated, we need to verify they have
   // the same class.
   StringMap<Record*> NameClass;
@@ -417,7 +417,7 @@
                     ADI->getDef()->getName() + "!");
     Entry = ADI->getDef();
   }
-    
+
   // Decode and validate the arguments of the result.
   unsigned AliasOpNo = 0;
   for (unsigned i = 0, e = ResultInst->Operands.size(); i != e; ++i) {
@@ -430,8 +430,8 @@
                     " arguments, but " + ResultInst->TheDef->getName() +
                     " instruction expects " +
                     utostr(ResultInst->Operands.size()) + " operands!");
-    
-    
+
+
     Init *Arg = Result->getArg(AliasOpNo);
     Record *ResultOpRec = ResultInst->Operands[i].Rec;
 
@@ -441,16 +441,16 @@
         if (!Result->getArgName(AliasOpNo).empty())
           throw TGError(R->getLoc(), "result fixed register argument must "
                         "not have a name!");
-        
+
         if (!ResultOpRec->isSubClassOf("RegisterClass"))
           throw TGError(R->getLoc(), "result fixed register argument is not "
                         "passed to a RegisterClass operand!");
-        
+
         if (!T.getRegisterClass(ResultOpRec).containsRegister(ADI->getDef()))
           throw TGError(R->getLoc(), "fixed register " +ADI->getDef()->getName()
                         + " is not a member of the " + ResultOpRec->getName() +
                         " register class!");
-                                                                              
+
         // Now that it is validated, add it.
         ResultOperands.push_back(ResultOperand(ADI->getDef()));
         ResultInstOperandIndex.push_back(i);
@@ -474,7 +474,7 @@
         continue;
       }
     }
-    
+
     // If the operand is a record, it must have a name, and the record type must
     // match up with the instruction's argument type.
     if (DefInit *ADI = dynamic_cast<DefInit*>(Arg)) {
@@ -485,9 +485,9 @@
       if (ADI->getDef() != ResultOpRec)
         throw TGError(R->getLoc(), "result argument #" + utostr(AliasOpNo) +
                       " declared with class " + ADI->getDef()->getName() +
-                      ", instruction operand is class " + 
+                      ", instruction operand is class " +
                       ResultOpRec->getName());
-      
+
       // Now that it is validated, add it.
       ResultOperands.push_back(ResultOperand(Result->getArgName(AliasOpNo),
                                              ADI->getDef()));
@@ -495,7 +495,7 @@
       ++AliasOpNo;
       continue;
     }
-    
+
     if (IntInit *II = dynamic_cast<IntInit*>(Arg)) {
       // Integer arguments can't have names.
       if (!Result->getArgName(AliasOpNo).empty())
@@ -503,7 +503,7 @@
                       " must not have a name!");
       if (ResultInst->Operands[i].MINumOperands != 1 ||
           !ResultOpRec->isSubClassOf("Operand"))
-        throw TGError(R->getLoc(), "invalid argument class " + 
+        throw TGError(R->getLoc(), "invalid argument class " +
                       ResultOpRec->getName() +
                       " for integer result operand!");
       ResultOperands.push_back(ResultOperand(II->getValue()));
@@ -514,7 +514,7 @@
 
     throw TGError(R->getLoc(), "result of inst alias has unknown operand type");
   }
-  
+
   if (AliasOpNo != Result->getNumArgs())
     throw TGError(R->getLoc(), "result has " + utostr(Result->getNumArgs()) +
                   " arguments, but " + ResultInst->TheDef->getName() +





More information about the llvm-commits mailing list