[llvm-branch-commits] [llvm-branch] r107653 - in /llvm/branches/wendling/eh: ./ docs/ include/llvm/ include/llvm/Support/ include/llvm/Target/ lib/CodeGen/ lib/CodeGen/SelectionDAG/ lib/Target/CellSPU/ lib/Target/X86/ lib/Transforms/Scalar/ test/CodeGen/Generic/ test/CodeGen/PowerPC/ test/CodeGen/X86/
Bill Wendling
isanbard at gmail.com
Tue Jul 6 04:42:41 PDT 2010
Author: void
Date: Tue Jul 6 06:42:41 2010
New Revision: 107653
URL: http://llvm.org/viewvc/llvm-project?rev=107653&view=rev
Log:
Up-to-date merge.
Added:
llvm/branches/wendling/eh/test/CodeGen/PowerPC/stack-protector.ll
- copied unchanged from r107652, llvm/trunk/test/CodeGen/PowerPC/stack-protector.ll
llvm/branches/wendling/eh/test/CodeGen/X86/stack-protector-linux.ll
- copied unchanged from r107652, llvm/trunk/test/CodeGen/X86/stack-protector-linux.ll
llvm/branches/wendling/eh/test/CodeGen/X86/v2f32.ll
- copied unchanged from r107652, llvm/trunk/test/CodeGen/X86/v2f32.ll
Removed:
llvm/branches/wendling/eh/test/CodeGen/Generic/stack-protector.ll
Modified:
llvm/branches/wendling/eh/ (props changed)
llvm/branches/wendling/eh/docs/Passes.html
llvm/branches/wendling/eh/include/llvm/Instructions.h
llvm/branches/wendling/eh/include/llvm/Support/CallSite.h
llvm/branches/wendling/eh/include/llvm/Target/TargetLowering.h
llvm/branches/wendling/eh/lib/CodeGen/MachineFunction.cpp
llvm/branches/wendling/eh/lib/CodeGen/MachineInstr.cpp
llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/TargetLowering.cpp
llvm/branches/wendling/eh/lib/CodeGen/StackProtector.cpp
llvm/branches/wendling/eh/lib/Target/CellSPU/SPUFrameInfo.h
llvm/branches/wendling/eh/lib/Target/CellSPU/SPUISelLowering.cpp
llvm/branches/wendling/eh/lib/Target/X86/README-SSE.txt
llvm/branches/wendling/eh/lib/Target/X86/X86CallingConv.td
llvm/branches/wendling/eh/lib/Target/X86/X86ISelLowering.cpp
llvm/branches/wendling/eh/lib/Target/X86/X86ISelLowering.h
llvm/branches/wendling/eh/lib/Target/X86/X86InstrMMX.td
llvm/branches/wendling/eh/lib/Target/X86/X86InstrSSE.td
llvm/branches/wendling/eh/lib/Target/X86/X86MCCodeEmitter.cpp
llvm/branches/wendling/eh/lib/Target/X86/X86RegisterInfo.td
llvm/branches/wendling/eh/lib/Target/X86/X86Subtarget.cpp
llvm/branches/wendling/eh/lib/Target/X86/X86Subtarget.h
llvm/branches/wendling/eh/lib/Transforms/Scalar/SimplifyLibCalls.cpp
Propchange: llvm/branches/wendling/eh/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Jul 6 06:42:41 2010
@@ -1 +1 @@
-/llvm/trunk:104459-107580
+/llvm/trunk:104459-107652
Modified: llvm/branches/wendling/eh/docs/Passes.html
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/docs/Passes.html?rev=107653&r1=107652&r2=107653&view=diff
==============================================================================
--- llvm/branches/wendling/eh/docs/Passes.html (original)
+++ llvm/branches/wendling/eh/docs/Passes.html Tue Jul 6 06:42:41 2010
@@ -128,7 +128,6 @@
<tr><td><a href="#globaldce">-globaldce</a></td><td>Dead Global Elimination</td></tr>
<tr><td><a href="#globalopt">-globalopt</a></td><td>Global Variable Optimizer</td></tr>
<tr><td><a href="#gvn">-gvn</a></td><td>Global Value Numbering</td></tr>
-<tr><td><a href="#indmemrem">-indmemrem</a></td><td>Indirect Malloc and Free Removal</td></tr>
<tr><td><a href="#indvars">-indvars</a></td><td>Canonicalize Induction Variables</td></tr>
<tr><td><a href="#inline">-inline</a></td><td>Function Integration/Inlining</td></tr>
<tr><td><a href="#insert-block-profiling">-insert-block-profiling</a></td><td>Insert instrumentation for block profiling</td></tr>
@@ -152,7 +151,6 @@
<tr><td><a href="#loop-unroll">-loop-unroll</a></td><td>Unroll loops</td></tr>
<tr><td><a href="#loop-unswitch">-loop-unswitch</a></td><td>Unswitch loops</td></tr>
<tr><td><a href="#loopsimplify">-loopsimplify</a></td><td>Canonicalize natural loops</td></tr>
-<tr><td><a href="#lowerallocs">-lowerallocs</a></td><td>Lower allocations from instructions to calls</td></tr>
<tr><td><a href="#lowerinvoke">-lowerinvoke</a></td><td>Lower invoke and unwind, for unwindless code generators</td></tr>
<tr><td><a href="#lowersetjmp">-lowersetjmp</a></td><td>Lower Set Jump</td></tr>
<tr><td><a href="#lowerswitch">-lowerswitch</a></td><td>Lower SwitchInst's to branches</td></tr>
@@ -831,23 +829,6 @@
<!-------------------------------------------------------------------------- -->
<div class="doc_subsection">
- <a name="indmemrem">Indirect Malloc and Free Removal</a>
-</div>
-<div class="doc_text">
- <p>
- This pass finds places where memory allocation functions may escape into
- indirect land. Some transforms are much easier (aka possible) only if free
- or malloc are not called indirectly.
- </p>
-
- <p>
- Thus find places where the address of memory functions are taken and construct
- bounce functions with direct calls of those functions.
- </p>
-</div>
-
-<!-------------------------------------------------------------------------- -->
-<div class="doc_subsection">
<a name="indvars">Canonicalize Induction Variables</a>
</div>
<div class="doc_text">
@@ -1329,22 +1310,6 @@
<!-------------------------------------------------------------------------- -->
<div class="doc_subsection">
- <a name="lowerallocs">Lower allocations from instructions to calls</a>
-</div>
-<div class="doc_text">
- <p>
- Turn <tt>malloc</tt> and <tt>free</tt> instructions into <tt>@malloc</tt> and
- <tt>@free</tt> calls.
- </p>
-
- <p>
- This is a target-dependent tranformation because it depends on the size of
- data types and alignment constraints.
- </p>
-</div>
-
-<!-------------------------------------------------------------------------- -->
-<div class="doc_subsection">
<a name="lowerinvoke">Lower invoke and unwind, for unwindless code generators</a>
</div>
<div class="doc_text">
Modified: llvm/branches/wendling/eh/include/llvm/Instructions.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Instructions.h?rev=107653&r1=107652&r2=107653&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Instructions.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Instructions.h Tue Jul 6 06:42:41 2010
@@ -940,24 +940,8 @@
unsigned(isTC));
}
- /// @deprecated these "define hacks" will go away soon
- /// @brief coerce out-of-tree code to abandon the low-level interfaces
- /// @detail see below comments and update your code to high-level interfaces
- /// in LLVM v2.8-only code
- /// - getOperand(N+1) ---> getArgOperand(N)
- /// - setOperand(N+1, V) ---> setArgOperand(N, V)
- /// - getNumOperands() ---> getNumArgOperands()+1 // note the "+1"!
- ///
- /// in backward compatible code please consult llvm/Support/CallSite.h,
- /// you should create a callsite using the CallInst pointer and call its methods
- ///
-# define public private
-# define protected private
/// Provide fast operand accessors
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
-# undef public
-# undef protected
-public:
enum { ArgOffset = 1 }; ///< temporary, do not use for new code!
unsigned getNumArgOperands() const { return getNumOperands() - 1; }
@@ -967,7 +951,7 @@
/// Provide compile-time errors for accessing operand 0
/// @deprecated these will go away soon
/// @detail see below comments and update your code to high-level interfaces
- /// - getOperand(0) ---> getCalledValue(), or possibly getCalledFunction()
+ /// - getOperand(0) ---> getCalledValue()
/// - setOperand(0, V) ---> setCalledFunction(V)
///
private:
@@ -1009,7 +993,7 @@
/// @brief Return true if the call should not be inlined.
bool isNoInline() const { return paramHasAttr(~0, Attribute::NoInline); }
- void setIsNoInline(bool Value) {
+ void setIsNoInline(bool Value = true) {
if (Value) addAttribute(~0, Attribute::NoInline);
else removeAttribute(~0, Attribute::NoInline);
}
@@ -1033,18 +1017,14 @@
}
/// @brief Determine if the call cannot return.
- bool doesNotReturn() const {
- return paramHasAttr(~0, Attribute::NoReturn);
- }
+ bool doesNotReturn() const { return paramHasAttr(~0, Attribute::NoReturn); }
void setDoesNotReturn(bool DoesNotReturn = true) {
if (DoesNotReturn) addAttribute(~0, Attribute::NoReturn);
else removeAttribute(~0, Attribute::NoReturn);
}
/// @brief Determine if the call cannot unwind.
- bool doesNotThrow() const {
- return paramHasAttr(~0, Attribute::NoUnwind);
- }
+ bool doesNotThrow() const { return paramHasAttr(~0, Attribute::NoUnwind); }
void setDoesNotThrow(bool DoesNotThrow = true) {
if (DoesNotThrow) addAttribute(~0, Attribute::NoUnwind);
else removeAttribute(~0, Attribute::NoUnwind);
@@ -1123,10 +1103,6 @@
typename std::iterator_traits<InputIterator>::iterator_category());
}
-
-// Note: if you get compile errors about private methods then
-// please update your code to use the high-level operand
-// interfaces. See line 943 above.
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CallInst, Value)
//===----------------------------------------------------------------------===//
@@ -2530,11 +2506,11 @@
/// @brief Return true if the call should not be inlined.
bool isNoInline() const { return paramHasAttr(~0, Attribute::NoInline); }
- void setIsNoInline(bool Value) {
+ void setIsNoInline(bool Value = true) {
if (Value) addAttribute(~0, Attribute::NoInline);
else removeAttribute(~0, Attribute::NoInline);
}
-
+
/// @brief Determine if the call does not access memory.
bool doesNotAccessMemory() const {
return paramHasAttr(~0, Attribute::ReadNone);
@@ -2554,18 +2530,14 @@
}
/// @brief Determine if the call cannot return.
- bool doesNotReturn() const {
- return paramHasAttr(~0, Attribute::NoReturn);
- }
+ bool doesNotReturn() const { return paramHasAttr(~0, Attribute::NoReturn); }
void setDoesNotReturn(bool DoesNotReturn = true) {
if (DoesNotReturn) addAttribute(~0, Attribute::NoReturn);
else removeAttribute(~0, Attribute::NoReturn);
}
/// @brief Determine if the call cannot unwind.
- bool doesNotThrow() const {
- return paramHasAttr(~0, Attribute::NoUnwind);
- }
+ bool doesNotThrow() const { return paramHasAttr(~0, Attribute::NoUnwind); }
void setDoesNotThrow(bool DoesNotThrow = true) {
if (DoesNotThrow) addAttribute(~0, Attribute::NoUnwind);
else removeAttribute(~0, Attribute::NoUnwind);
Modified: llvm/branches/wendling/eh/include/llvm/Support/CallSite.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Support/CallSite.h?rev=107653&r1=107652&r2=107653&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Support/CallSite.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Support/CallSite.h Tue Jul 6 06:42:41 2010
@@ -204,9 +204,9 @@
CALLSITE_DELEGATE_GETTER(isNoInline());
}
void setIsNoInline(bool Value = true) {
- CALLSITE_DELEGATE_GETTER(setIsNoInline(Value));
+ CALLSITE_DELEGATE_SETTER(setIsNoInline(Value));
}
-
+
/// @brief Determine if the call does not access memory.
bool doesNotAccessMemory() const {
CALLSITE_DELEGATE_GETTER(doesNotAccessMemory());
Modified: llvm/branches/wendling/eh/include/llvm/Target/TargetLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Target/TargetLowering.h?rev=107653&r1=107652&r2=107653&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Target/TargetLowering.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Target/TargetLowering.h Tue Jul 6 06:42:41 2010
@@ -749,6 +749,14 @@
/// getFunctionAlignment - Return the Log2 alignment of this function.
virtual unsigned getFunctionAlignment(const Function *) const = 0;
+ /// getStackCookieLocation - Return true if the target stores stack
+ /// protector cookies at a fixed offset in some non-standard address
+ /// space, and populates the address space and offset as
+ /// appropriate.
+ virtual bool getStackCookieLocation(unsigned &AddressSpace, unsigned &Offset) const {
+ return false;
+ }
+
//===--------------------------------------------------------------------===//
// TargetLowering Optimization Methods
//
Modified: llvm/branches/wendling/eh/lib/CodeGen/MachineFunction.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/MachineFunction.cpp?rev=107653&r1=107652&r2=107653&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/MachineFunction.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/MachineFunction.cpp Tue Jul 6 06:42:41 2010
@@ -457,7 +457,13 @@
int MachineFrameInfo::CreateFixedObject(uint64_t Size, int64_t SPOffset,
bool Immutable) {
assert(Size != 0 && "Cannot allocate zero size fixed stack objects!");
- Objects.insert(Objects.begin(), StackObject(Size, 1, SPOffset, Immutable,
+ // The alignment of the frame index can be determined from its offset from
+ // the incoming frame position. If the frame object is at offset 32 and
+ // the stack is guaranteed to be 16-byte aligned, then we know that the
+ // object is 16-byte aligned.
+ unsigned StackAlign = TFI.getStackAlignment();
+ unsigned Align = MinAlign(SPOffset, StackAlign);
+ Objects.insert(Objects.begin(), StackObject(Size, Align, SPOffset, Immutable,
/*isSS*/false));
return -++NumFixedObjects;
}
Modified: llvm/branches/wendling/eh/lib/CodeGen/MachineInstr.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/MachineInstr.cpp?rev=107653&r1=107652&r2=107653&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/MachineInstr.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/MachineInstr.cpp Tue Jul 6 06:42:41 2010
@@ -1305,6 +1305,8 @@
OS << "!\"" << MDS->getString() << '\"';
else
MO.print(OS, TM);
+ } else if (TM && (isInsertSubreg() || isRegSequence()) && MO.isImm()) {
+ OS << TM->getRegisterInfo()->getSubRegIndexName(MO.getImm());
} else
MO.print(OS, TM);
}
Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAG.cpp?rev=107653&r1=107652&r2=107653&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAG.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAG.cpp Tue Jul 6 06:42:41 2010
@@ -6262,23 +6262,6 @@
const MachineFrameInfo &MFI = *getMachineFunction().getFrameInfo();
unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx),
FrameOffset);
- if (MFI.isFixedObjectIndex(FrameIdx)) {
- int64_t ObjectOffset = MFI.getObjectOffset(FrameIdx) + FrameOffset;
-
- // The alignment of the frame index can be determined from its offset from
- // the incoming frame position. If the frame object is at offset 32 and
- // the stack is guaranteed to be 16-byte aligned, then we know that the
- // object is 16-byte aligned.
- unsigned StackAlign = getTarget().getFrameInfo()->getStackAlignment();
- unsigned Align = MinAlign(ObjectOffset, StackAlign);
-
- // Finally, the frame object itself may have a known alignment. Factor
- // the alignment + offset into a new alignment. For example, if we know
- // the FI is 8 byte aligned, but the pointer is 4 off, we really have a
- // 4-byte alignment of the resultant pointer. Likewise align 4 + 4-byte
- // offset = 4-byte alignment, align 4 + 1-byte offset = align 1, etc.
- return std::max(Align, FIInfoAlign);
- }
return FIInfoAlign;
}
Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp?rev=107653&r1=107652&r2=107653&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp Tue Jul 6 06:42:41 2010
@@ -4882,11 +4882,16 @@
void SelectionDAGBuilder::visitCall(const CallInst &I) {
+ // Handle inline assembly differently.
+ if (isa<InlineAsm>(I.getCalledValue())) {
+ visitInlineAsm(&I);
+ return;
+ }
+
const char *RenameFn = 0;
if (Function *F = I.getCalledFunction()) {
if (F->isDeclaration()) {
- const TargetIntrinsicInfo *II = TM.getIntrinsicInfo();
- if (II) {
+ if (const TargetIntrinsicInfo *II = TM.getIntrinsicInfo()) {
if (unsigned IID = II->getIntrinsicID(F)) {
RenameFn = visitIntrinsicCall(I, IID);
if (!RenameFn)
@@ -4959,11 +4964,8 @@
return;
}
}
- } else if (isa<InlineAsm>(I.getCalledValue())) {
- visitInlineAsm(&I);
- return;
}
-
+
SDValue Callee;
if (!RenameFn)
Callee = getValue(I.getCalledValue());
Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp?rev=107653&r1=107652&r2=107653&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp Tue Jul 6 06:42:41 2010
@@ -1586,7 +1586,7 @@
SDValue(Res, ResNumResults-1));
if ((EmitNodeInfo & OPFL_FlagOutput) != 0)
- --ResNumResults;
+ --ResNumResults;
// Move the chain reference if needed.
if ((EmitNodeInfo & OPFL_Chain) && OldChainResultNo != -1 &&
Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/TargetLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/TargetLowering.cpp?rev=107653&r1=107652&r2=107653&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/TargetLowering.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/TargetLowering.cpp Tue Jul 6 06:42:41 2010
@@ -609,9 +609,9 @@
static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT,
- unsigned &NumIntermediates,
- EVT &RegisterVT,
- TargetLowering* TLI) {
+ unsigned &NumIntermediates,
+ EVT &RegisterVT,
+ TargetLowering *TLI) {
// Figure out the right, legal destination reg to copy into.
unsigned NumElts = VT.getVectorNumElements();
MVT EltTy = VT.getVectorElementType();
@@ -641,16 +641,12 @@
EVT DestVT = TLI->getRegisterType(NewVT);
RegisterVT = DestVT;
- if (EVT(DestVT).bitsLT(NewVT)) {
- // Value is expanded, e.g. i64 -> i16.
+ if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16.
return NumVectorRegs*(NewVT.getSizeInBits()/DestVT.getSizeInBits());
- } else {
- // Otherwise, promotion or legal types use the same number of registers as
- // the vector decimated to the appropriate level.
- return NumVectorRegs;
- }
- return 1;
+ // Otherwise, promotion or legal types use the same number of registers as
+ // the vector decimated to the appropriate level.
+ return NumVectorRegs;
}
/// computeRegisterProperties - Once all of the register classes are added,
@@ -736,39 +732,39 @@
for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE;
i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
MVT VT = (MVT::SimpleValueType)i;
- if (!isTypeLegal(VT)) {
- MVT IntermediateVT;
- EVT RegisterVT;
- unsigned NumIntermediates;
- NumRegistersForVT[i] =
- getVectorTypeBreakdownMVT(VT, IntermediateVT, NumIntermediates,
- RegisterVT, this);
- RegisterTypeForVT[i] = RegisterVT;
-
- // Determine if there is a legal wider type.
- bool IsLegalWiderType = false;
- EVT EltVT = VT.getVectorElementType();
- unsigned NElts = VT.getVectorNumElements();
- for (unsigned nVT = i+1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
- EVT SVT = (MVT::SimpleValueType)nVT;
- if (isTypeSynthesizable(SVT) && SVT.getVectorElementType() == EltVT &&
- SVT.getVectorNumElements() > NElts && NElts != 1) {
- TransformToType[i] = SVT;
- ValueTypeActions.setTypeAction(VT, Promote);
- IsLegalWiderType = true;
- break;
- }
+ if (isTypeLegal(VT)) continue;
+
+ MVT IntermediateVT;
+ EVT RegisterVT;
+ unsigned NumIntermediates;
+ NumRegistersForVT[i] =
+ getVectorTypeBreakdownMVT(VT, IntermediateVT, NumIntermediates,
+ RegisterVT, this);
+ RegisterTypeForVT[i] = RegisterVT;
+
+ // Determine if there is a legal wider type.
+ bool IsLegalWiderType = false;
+ EVT EltVT = VT.getVectorElementType();
+ unsigned NElts = VT.getVectorNumElements();
+ for (unsigned nVT = i+1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
+ EVT SVT = (MVT::SimpleValueType)nVT;
+ if (isTypeSynthesizable(SVT) && SVT.getVectorElementType() == EltVT &&
+ SVT.getVectorNumElements() > NElts && NElts != 1) {
+ TransformToType[i] = SVT;
+ ValueTypeActions.setTypeAction(VT, Promote);
+ IsLegalWiderType = true;
+ break;
}
- if (!IsLegalWiderType) {
- EVT NVT = VT.getPow2VectorType();
- if (NVT == VT) {
- // Type is already a power of 2. The default action is to split.
- TransformToType[i] = MVT::Other;
- ValueTypeActions.setTypeAction(VT, Expand);
- } else {
- TransformToType[i] = NVT;
- ValueTypeActions.setTypeAction(VT, Promote);
- }
+ }
+ if (!IsLegalWiderType) {
+ EVT NVT = VT.getPow2VectorType();
+ if (NVT == VT) {
+ // Type is already a power of 2. The default action is to split.
+ TransformToType[i] = MVT::Other;
+ ValueTypeActions.setTypeAction(VT, Expand);
+ } else {
+ TransformToType[i] = NVT;
+ ValueTypeActions.setTypeAction(VT, Promote);
}
}
}
Modified: llvm/branches/wendling/eh/lib/CodeGen/StackProtector.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/StackProtector.cpp?rev=107653&r1=107652&r2=107653&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/StackProtector.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/StackProtector.cpp Tue Jul 6 06:42:41 2010
@@ -136,7 +136,7 @@
bool StackProtector::InsertStackProtectors() {
BasicBlock *FailBB = 0; // The basic block to jump to if check fails.
AllocaInst *AI = 0; // Place on stack that stores the stack guard.
- Constant *StackGuardVar = 0; // The stack guard variable.
+ Value *StackGuardVar = 0; // The stack guard variable.
for (Function::iterator I = F->begin(), E = F->end(); I != E; ) {
BasicBlock *BB = I++;
@@ -155,7 +155,20 @@
//
PointerType *PtrTy = PointerType::getUnqual(
Type::getInt8Ty(RI->getContext()));
- StackGuardVar = M->getOrInsertGlobal("__stack_chk_guard", PtrTy);
+
+ unsigned AddressSpace, Offset;
+ if (TLI->getStackCookieLocation(AddressSpace, Offset)) {
+ Constant *ASPtr = Constant::getNullValue(
+ PointerType::get(Type::getInt8Ty(RI->getContext()), AddressSpace));
+ APInt OffsetInt(32, Offset);
+ Constant *OffsetVal = Constant::getIntegerValue(
+ Type::getInt32Ty(RI->getContext()), OffsetInt);
+ StackGuardVar = ConstantExpr::getPointerCast(
+ ConstantExpr::getGetElementPtr(ASPtr, &OffsetVal, 1),
+ PointerType::get(PtrTy, AddressSpace));
+ } else {
+ StackGuardVar = M->getOrInsertGlobal("__stack_chk_guard", PtrTy);
+ }
BasicBlock &Entry = F->getEntryBlock();
Instruction *InsPt = &Entry.front();
Modified: llvm/branches/wendling/eh/lib/Target/CellSPU/SPUFrameInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/CellSPU/SPUFrameInfo.h?rev=107653&r1=107652&r2=107653&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/CellSPU/SPUFrameInfo.h (original)
+++ llvm/branches/wendling/eh/lib/Target/CellSPU/SPUFrameInfo.h Tue Jul 6 06:42:41 2010
@@ -53,10 +53,6 @@
static int minStackSize() {
return (2 * stackSlotSize());
}
- //! Frame size required to spill all registers plus frame info
- static int fullSpillSize() {
- return (SPURegisterInfo::getNumArgRegs() * stackSlotSize());
- }
//! Convert frame index to stack offset
static int FItoStackOffset(int frame_index) {
return frame_index * stackSlotSize();
Modified: llvm/branches/wendling/eh/lib/Target/CellSPU/SPUISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/CellSPU/SPUISelLowering.cpp?rev=107653&r1=107652&r2=107653&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/CellSPU/SPUISelLowering.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/CellSPU/SPUISelLowering.cpp Tue Jul 6 06:42:41 2010
@@ -1179,24 +1179,8 @@
case MVT::i32:
case MVT::i64:
case MVT::i128:
- if (ArgRegIdx != NumArgRegs) {
- RegsToPass.push_back(std::make_pair(ArgRegs[ArgRegIdx++], Arg));
- } else {
- MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, NULL, 0,
- false, false, 0));
- ArgOffset += StackSlotSize;
- }
- break;
case MVT::f32:
case MVT::f64:
- if (ArgRegIdx != NumArgRegs) {
- RegsToPass.push_back(std::make_pair(ArgRegs[ArgRegIdx++], Arg));
- } else {
- MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, NULL, 0,
- false, false, 0));
- ArgOffset += StackSlotSize;
- }
- break;
case MVT::v2i64:
case MVT::v2f64:
case MVT::v4f32:
Modified: llvm/branches/wendling/eh/lib/Target/X86/README-SSE.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/README-SSE.txt?rev=107653&r1=107652&r2=107653&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/README-SSE.txt (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/README-SSE.txt Tue Jul 6 06:42:41 2010
@@ -846,3 +846,34 @@
doing a shuffle from v[1] to v[0] then a float store.
//===---------------------------------------------------------------------===//
+
+On SSE4 machines, we compile this code:
+
+define <2 x float> @test2(<2 x float> %Q, <2 x float> %R,
+ <2 x float> *%P) nounwind {
+ %Z = fadd <2 x float> %Q, %R
+
+ store <2 x float> %Z, <2 x float> *%P
+ ret <2 x float> %Z
+}
+
+into:
+
+_test2: ## @test2
+## BB#0:
+ insertps $0, %xmm2, %xmm2
+ insertps $16, %xmm3, %xmm2
+ insertps $0, %xmm0, %xmm3
+ insertps $16, %xmm1, %xmm3
+ addps %xmm2, %xmm3
+ movq %xmm3, (%rdi)
+ movaps %xmm3, %xmm0
+ pshufd $1, %xmm3, %xmm1
+ ## kill: XMM1<def> XMM1<kill>
+ ret
+
+The insertps's of $0 are pointless complex copies.
+
+//===---------------------------------------------------------------------===//
+
+
Modified: llvm/branches/wendling/eh/lib/Target/X86/X86CallingConv.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/X86CallingConv.td?rev=107653&r1=107652&r2=107653&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/X86CallingConv.td (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/X86CallingConv.td Tue Jul 6 06:42:41 2010
@@ -42,7 +42,7 @@
// MMX vector types are always returned in MM0. If the target doesn't have
// MM0, it doesn't support these vector types.
- CCIfType<[v8i8, v4i16, v2i32, v1i64, v2f32], CCAssignToReg<[MM0]>>,
+ CCIfType<[v8i8, v4i16, v2i32, v1i64], CCAssignToReg<[MM0]>>,
// Long double types are always returned in ST0 (even with SSE).
CCIfType<[f80], CCAssignToReg<[ST0, ST1]>>
@@ -89,7 +89,7 @@
// returned in RAX. This disagrees with ABI documentation but is bug
// compatible with gcc.
CCIfType<[v1i64], CCAssignToReg<[RAX]>>,
- CCIfType<[v8i8, v4i16, v2i32, v2f32], CCAssignToReg<[XMM0, XMM1]>>,
+ CCIfType<[v8i8, v4i16, v2i32], CCAssignToReg<[XMM0, XMM1]>>,
CCDelegateTo<RetCC_X86Common>
]>;
@@ -155,7 +155,7 @@
// The first 8 MMX (except for v1i64) vector arguments are passed in XMM
// registers on Darwin.
- CCIfType<[v8i8, v4i16, v2i32, v2f32],
+ CCIfType<[v8i8, v4i16, v2i32],
CCIfSubtarget<"isTargetDarwin()",
CCIfSubtarget<"hasSSE2()",
CCPromoteToType<v2i64>>>>,
@@ -177,7 +177,7 @@
CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToStack<16, 16>>,
// __m64 vectors get 8-byte stack slots that are 8-byte aligned.
- CCIfType<[v8i8, v4i16, v2i32, v1i64, v2f32], CCAssignToStack<8, 8>>
+ CCIfType<[v8i8, v4i16, v2i32, v1i64], CCAssignToStack<8, 8>>
]>;
// Calling convention used on Win64
@@ -195,7 +195,7 @@
CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCPassIndirect<i64>>,
// The first 4 MMX vector arguments are passed in GPRs.
- CCIfType<[v8i8, v4i16, v2i32, v1i64, v2f32],
+ CCIfType<[v8i8, v4i16, v2i32, v1i64],
CCBitConvertToType<i64>>,
// The first 4 integer arguments are passed in integer registers.
@@ -254,7 +254,7 @@
// The first 3 __m64 (except for v1i64) vector arguments are passed in mmx
// registers if the call is not a vararg call.
- CCIfNotVarArg<CCIfType<[v8i8, v4i16, v2i32, v2f32],
+ CCIfNotVarArg<CCIfType<[v8i8, v4i16, v2i32],
CCAssignToReg<[MM0, MM1, MM2]>>>,
// Integer/Float values get stored in stack slots that are 4 bytes in
Modified: llvm/branches/wendling/eh/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/X86ISelLowering.cpp?rev=107653&r1=107652&r2=107653&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/X86ISelLowering.cpp Tue Jul 6 06:42:41 2010
@@ -62,21 +62,19 @@
SDValue V2);
static TargetLoweringObjectFile *createTLOF(X86TargetMachine &TM) {
- switch (TM.getSubtarget<X86Subtarget>().TargetType) {
- default: llvm_unreachable("unknown subtarget type");
- case X86Subtarget::isDarwin:
- if (TM.getSubtarget<X86Subtarget>().is64Bit())
- return new X8664_MachoTargetObjectFile();
+
+ bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit();
+
+ if (TM.getSubtarget<X86Subtarget>().isTargetDarwin()) {
+ if (is64Bit) return new X8664_MachoTargetObjectFile();
return new TargetLoweringObjectFileMachO();
- case X86Subtarget::isELF:
- if (TM.getSubtarget<X86Subtarget>().is64Bit())
- return new X8664_ELFTargetObjectFile(TM);
+ } else if (TM.getSubtarget<X86Subtarget>().isTargetELF() ){
+ if (is64Bit) return new X8664_ELFTargetObjectFile(TM);
return new X8632_ELFTargetObjectFile(TM);
- case X86Subtarget::isMingw:
- case X86Subtarget::isCygwin:
- case X86Subtarget::isWindows:
+ } else if (TM.getSubtarget<X86Subtarget>().isTargetCOFF()) {
return new TargetLoweringObjectFileCOFF();
- }
+ }
+ llvm_unreachable("unknown subtarget type");
}
X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
@@ -617,7 +615,7 @@
addRegisterClass(MVT::v8i8, X86::VR64RegisterClass, false);
addRegisterClass(MVT::v4i16, X86::VR64RegisterClass, false);
addRegisterClass(MVT::v2i32, X86::VR64RegisterClass, false);
- addRegisterClass(MVT::v2f32, X86::VR64RegisterClass, false);
+
addRegisterClass(MVT::v1i64, X86::VR64RegisterClass, false);
setOperationAction(ISD::ADD, MVT::v8i8, Legal);
@@ -663,14 +661,11 @@
AddPromotedToType (ISD::LOAD, MVT::v4i16, MVT::v1i64);
setOperationAction(ISD::LOAD, MVT::v2i32, Promote);
AddPromotedToType (ISD::LOAD, MVT::v2i32, MVT::v1i64);
- setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
- AddPromotedToType (ISD::LOAD, MVT::v2f32, MVT::v1i64);
setOperationAction(ISD::LOAD, MVT::v1i64, Legal);
setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Custom);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v2f32, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom);
@@ -678,7 +673,6 @@
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i32, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1i64, Custom);
- setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f32, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Custom);
@@ -697,7 +691,6 @@
setOperationAction(ISD::BIT_CONVERT, MVT::v8i8, Custom);
setOperationAction(ISD::BIT_CONVERT, MVT::v4i16, Custom);
setOperationAction(ISD::BIT_CONVERT, MVT::v2i32, Custom);
- setOperationAction(ISD::BIT_CONVERT, MVT::v2f32, Custom);
setOperationAction(ISD::BIT_CONVERT, MVT::v1i64, Custom);
}
}
@@ -798,9 +791,8 @@
EVT VT = SVT;
// Do not attempt to promote non-128-bit vectors
- if (!VT.is128BitVector()) {
+ if (!VT.is128BitVector())
continue;
- }
setOperationAction(ISD::AND, SVT, Promote);
AddPromotedToType (ISD::AND, SVT, MVT::v2i64);
@@ -1197,6 +1189,27 @@
return F->hasFnAttr(Attribute::OptimizeForSize) ? 0 : 4;
}
+bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace,
+ unsigned &Offset) const {
+ if (!Subtarget->isTargetLinux())
+ return false;
+
+ if (Subtarget->is64Bit()) {
+ // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs:
+ Offset = 0x28;
+ if (getTargetMachine().getCodeModel() == CodeModel::Kernel)
+ AddressSpace = 256;
+ else
+ AddressSpace = 257;
+ } else {
+ // %gs:0x14 on i386
+ Offset = 0x14;
+ AddressSpace = 256;
+ }
+ return true;
+}
+
+
//===----------------------------------------------------------------------===//
// Return Value Calling Convention Implementation
//===----------------------------------------------------------------------===//
@@ -4443,7 +4456,7 @@
}
/// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide
-/// ones, or rewriting v4i32 / v2f32 as 2 wide ones if possible. This can be
+/// ones, or rewriting v4i32 / v2i32 as 2 wide ones if possible. This can be
/// done when every pair / quad of shuffle mask elements point to elements in
/// the right sequence. e.g.
/// vector_shuffle <>, <>, < 3, 4, | 10, 11, | 0, 1, | 14, 15>
@@ -5068,13 +5081,9 @@
SDValue
X86TargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const {
DebugLoc dl = Op.getDebugLoc();
- if (Op.getValueType() == MVT::v2f32)
- return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f32,
- DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i32,
- DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32,
- Op.getOperand(0))));
-
- if (Op.getValueType() == MVT::v1i64 && Op.getOperand(0).getValueType() == MVT::i64)
+
+ if (Op.getValueType() == MVT::v1i64 &&
+ Op.getOperand(0).getValueType() == MVT::i64)
return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0));
SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
Modified: llvm/branches/wendling/eh/lib/Target/X86/X86ISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/X86ISelLowering.h?rev=107653&r1=107652&r2=107653&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/X86ISelLowering.h (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/X86ISelLowering.h Tue Jul 6 06:42:41 2010
@@ -593,6 +593,12 @@
/// getFunctionAlignment - Return the Log2 alignment of this function.
virtual unsigned getFunctionAlignment(const Function *F) const;
+ /// getStackCookieLocation - Return true if the target stores stack
+ /// protector cookies at a fixed offset in some non-standard address
+ /// space, and populates the address space and offset as
+ /// appropriate.
+ virtual bool getStackCookieLocation(unsigned &AddressSpace, unsigned &Offset) const;
+
private:
/// Subtarget - Keep a pointer to the X86Subtarget around so that we can
/// make the right decision when generating code for different targets.
Modified: llvm/branches/wendling/eh/lib/Target/X86/X86InstrMMX.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/X86InstrMMX.td?rev=107653&r1=107652&r2=107653&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/X86InstrMMX.td (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/X86InstrMMX.td Tue Jul 6 06:42:41 2010
@@ -513,30 +513,20 @@
(MMX_MOVQ64mr addr:$dst, VR64:$src)>;
def : Pat<(store (v2i32 VR64:$src), addr:$dst),
(MMX_MOVQ64mr addr:$dst, VR64:$src)>;
-def : Pat<(store (v2f32 VR64:$src), addr:$dst),
- (MMX_MOVQ64mr addr:$dst, VR64:$src)>;
def : Pat<(store (v1i64 VR64:$src), addr:$dst),
(MMX_MOVQ64mr addr:$dst, VR64:$src)>;
// Bit convert.
def : Pat<(v8i8 (bitconvert (v1i64 VR64:$src))), (v8i8 VR64:$src)>;
def : Pat<(v8i8 (bitconvert (v2i32 VR64:$src))), (v8i8 VR64:$src)>;
-def : Pat<(v8i8 (bitconvert (v2f32 VR64:$src))), (v8i8 VR64:$src)>;
def : Pat<(v8i8 (bitconvert (v4i16 VR64:$src))), (v8i8 VR64:$src)>;
def : Pat<(v4i16 (bitconvert (v1i64 VR64:$src))), (v4i16 VR64:$src)>;
def : Pat<(v4i16 (bitconvert (v2i32 VR64:$src))), (v4i16 VR64:$src)>;
-def : Pat<(v4i16 (bitconvert (v2f32 VR64:$src))), (v4i16 VR64:$src)>;
def : Pat<(v4i16 (bitconvert (v8i8 VR64:$src))), (v4i16 VR64:$src)>;
def : Pat<(v2i32 (bitconvert (v1i64 VR64:$src))), (v2i32 VR64:$src)>;
-def : Pat<(v2i32 (bitconvert (v2f32 VR64:$src))), (v2i32 VR64:$src)>;
def : Pat<(v2i32 (bitconvert (v4i16 VR64:$src))), (v2i32 VR64:$src)>;
def : Pat<(v2i32 (bitconvert (v8i8 VR64:$src))), (v2i32 VR64:$src)>;
-def : Pat<(v2f32 (bitconvert (v1i64 VR64:$src))), (v2f32 VR64:$src)>;
-def : Pat<(v2f32 (bitconvert (v2i32 VR64:$src))), (v2f32 VR64:$src)>;
-def : Pat<(v2f32 (bitconvert (v4i16 VR64:$src))), (v2f32 VR64:$src)>;
-def : Pat<(v2f32 (bitconvert (v8i8 VR64:$src))), (v2f32 VR64:$src)>;
def : Pat<(v1i64 (bitconvert (v2i32 VR64:$src))), (v1i64 VR64:$src)>;
-def : Pat<(v1i64 (bitconvert (v2f32 VR64:$src))), (v1i64 VR64:$src)>;
def : Pat<(v1i64 (bitconvert (v4i16 VR64:$src))), (v1i64 VR64:$src)>;
def : Pat<(v1i64 (bitconvert (v8i8 VR64:$src))), (v1i64 VR64:$src)>;
@@ -545,8 +535,6 @@
(MMX_MOVD64to64rr GR64:$src)>;
def : Pat<(v2i32 (bitconvert (i64 GR64:$src))),
(MMX_MOVD64to64rr GR64:$src)>;
-def : Pat<(v2f32 (bitconvert (i64 GR64:$src))),
- (MMX_MOVD64to64rr GR64:$src)>;
def : Pat<(v4i16 (bitconvert (i64 GR64:$src))),
(MMX_MOVD64to64rr GR64:$src)>;
def : Pat<(v8i8 (bitconvert (i64 GR64:$src))),
@@ -555,8 +543,6 @@
(MMX_MOVD64from64rr VR64:$src)>;
def : Pat<(i64 (bitconvert (v2i32 VR64:$src))),
(MMX_MOVD64from64rr VR64:$src)>;
-def : Pat<(i64 (bitconvert (v2f32 VR64:$src))),
- (MMX_MOVD64from64rr VR64:$src)>;
def : Pat<(i64 (bitconvert (v4i16 VR64:$src))),
(MMX_MOVD64from64rr VR64:$src)>;
def : Pat<(i64 (bitconvert (v8i8 VR64:$src))),
Modified: llvm/branches/wendling/eh/lib/Target/X86/X86InstrSSE.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/X86InstrSSE.td?rev=107653&r1=107652&r2=107653&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/X86InstrSSE.td (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/X86InstrSSE.td Tue Jul 6 06:42:41 2010
@@ -3584,10 +3584,6 @@
(PALIGNR64rr VR64:$src2, VR64:$src1,
(SHUFFLE_get_palign_imm VR64:$src3))>,
Requires<[HasSSSE3]>;
-def : Pat<(v2f32 (palign:$src3 VR64:$src1, VR64:$src2)),
- (PALIGNR64rr VR64:$src2, VR64:$src1,
- (SHUFFLE_get_palign_imm VR64:$src3))>,
- Requires<[HasSSSE3]>;
def : Pat<(v4i16 (palign:$src3 VR64:$src1, VR64:$src2)),
(PALIGNR64rr VR64:$src2, VR64:$src1,
(SHUFFLE_get_palign_imm VR64:$src3))>,
Modified: llvm/branches/wendling/eh/lib/Target/X86/X86MCCodeEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/X86MCCodeEmitter.cpp?rev=107653&r1=107652&r2=107653&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/X86MCCodeEmitter.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/X86MCCodeEmitter.cpp Tue Jul 6 06:42:41 2010
@@ -655,8 +655,9 @@
/// EmitOpcodePrefix - Emit all instruction prefixes prior to the opcode.
void X86MCCodeEmitter::EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
- const MCInst &MI, const TargetInstrDesc &Desc,
- raw_ostream &OS) const {
+ const MCInst &MI,
+ const TargetInstrDesc &Desc,
+ raw_ostream &OS) const {
// Emit the lock opcode prefix as needed.
if (TSFlags & X86II::LOCK)
Modified: llvm/branches/wendling/eh/lib/Target/X86/X86RegisterInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/X86RegisterInfo.td?rev=107653&r1=107652&r2=107653&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/X86RegisterInfo.td (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/X86RegisterInfo.td Tue Jul 6 06:42:41 2010
@@ -780,7 +780,7 @@
}
// Generic vector registers: VR64 and VR128.
-def VR64 : RegisterClass<"X86", [v8i8, v4i16, v2i32, v1i64, v2f32], 64,
+def VR64 : RegisterClass<"X86", [v8i8, v4i16, v2i32, v1i64], 64,
[MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7]>;
def VR128 : RegisterClass<"X86", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],128,
[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
Modified: llvm/branches/wendling/eh/lib/Target/X86/X86Subtarget.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/X86Subtarget.cpp?rev=107653&r1=107652&r2=107653&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/X86Subtarget.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/X86Subtarget.cpp Tue Jul 6 06:42:41 2010
@@ -296,12 +296,11 @@
, IsBTMemSlow(false)
, IsUAMemFast(false)
, HasVectorUAMem(false)
- , DarwinVers(0)
, stackAlignment(8)
// FIXME: this is a known good value for Yonah. How about others?
, MaxInlineSizeThreshold(128)
- , Is64Bit(is64Bit)
- , TargetType(isELF) { // Default to ELF unless otherwise specified.
+ , TargetTriple(TT)
+ , Is64Bit(is64Bit) {
// default to hard float ABI
if (FloatABIType == FloatABI::Default)
@@ -331,45 +330,15 @@
HasCMov = true;
}
-
DEBUG(dbgs() << "Subtarget features: SSELevel " << X86SSELevel
<< ", 3DNowLevel " << X863DNowLevel
<< ", 64bit " << HasX86_64 << "\n");
assert((!Is64Bit || HasX86_64) &&
"64-bit code requested on a subtarget that doesn't support it!");
- // Set the boolean corresponding to the current target triple, or the default
- // if one cannot be determined, to true.
- if (TT.length() > 5) {
- size_t Pos;
- if ((Pos = TT.find("-darwin")) != std::string::npos) {
- TargetType = isDarwin;
-
- // Compute the darwin version number.
- if (isdigit(TT[Pos+7]))
- DarwinVers = atoi(&TT[Pos+7]);
- else
- DarwinVers = 8; // Minimum supported darwin is Tiger.
- } else if (TT.find("linux") != std::string::npos) {
- // Linux doesn't imply ELF, but we don't currently support anything else.
- TargetType = isELF;
- } else if (TT.find("cygwin") != std::string::npos) {
- TargetType = isCygwin;
- } else if (TT.find("mingw") != std::string::npos) {
- TargetType = isMingw;
- } else if (TT.find("win32") != std::string::npos) {
- TargetType = isWindows;
- } else if (TT.find("windows") != std::string::npos) {
- TargetType = isWindows;
- } else if (TT.find("-cl") != std::string::npos) {
- TargetType = isDarwin;
- DarwinVers = 9;
- }
- }
-
// Stack alignment is 16 bytes on Darwin (both 32 and 64 bit) and for all 64
// bit targets.
- if (TargetType == isDarwin || Is64Bit)
+ if (isTargetDarwin() || Is64Bit)
stackAlignment = 16;
if (StackAlignment)
Modified: llvm/branches/wendling/eh/lib/Target/X86/X86Subtarget.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/X86Subtarget.h?rev=107653&r1=107652&r2=107653&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/X86Subtarget.h (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/X86Subtarget.h Tue Jul 6 06:42:41 2010
@@ -14,6 +14,7 @@
#ifndef X86SUBTARGET_H
#define X86SUBTARGET_H
+#include "llvm/ADT/Triple.h"
#include "llvm/Target/TargetSubtarget.h"
#include "llvm/CallingConv.h"
#include <string>
@@ -89,10 +90,6 @@
/// operands. This may require setting a feature bit in the processor.
bool HasVectorUAMem;
- /// DarwinVers - Nonzero if this is a darwin platform: the numeric
- /// version of the platform, e.g. 8 = 10.4 (Tiger), 9 = 10.5 (Leopard), etc.
- unsigned char DarwinVers; // Is any darwin-x86 platform.
-
/// stackAlignment - The minimum alignment known to hold of the stack frame on
/// entry to the function and which must be maintained by every function.
unsigned stackAlignment;
@@ -100,6 +97,9 @@
/// Max. memset / memcpy size that is turned into rep/movs, rep/stos ops.
///
unsigned MaxInlineSizeThreshold;
+
+ /// TargetTriple - What processor and OS we're targeting.
+ Triple TargetTriple;
private:
/// Is64Bit - True if the processor supports 64-bit instructions and
@@ -107,9 +107,6 @@
bool Is64Bit;
public:
- enum {
- isELF, isCygwin, isDarwin, isWindows, isMingw
- } TargetType;
/// This constructor initializes the data members to match that
/// of the specified triple.
@@ -158,24 +155,31 @@
bool isUnalignedMemAccessFast() const { return IsUAMemFast; }
bool hasVectorUAMem() const { return HasVectorUAMem; }
- bool isTargetDarwin() const { return TargetType == isDarwin; }
- bool isTargetELF() const { return TargetType == isELF; }
-
- bool isTargetWindows() const { return TargetType == isWindows; }
- bool isTargetMingw() const { return TargetType == isMingw; }
- bool isTargetCygwin() const { return TargetType == isCygwin; }
+ bool isTargetDarwin() const { return TargetTriple.getOS() == Triple::Darwin; }
+
+ // ELF is a reasonably sane default and the only other X86 targets we
+ // support are Darwin and Windows. Just use "not those".
+ bool isTargetELF() const {
+ return !isTargetDarwin() && !isTargetWindows() && !isTargetCygMing();
+ }
+ bool isTargetLinux() const { return TargetTriple.getOS() == Triple::Linux; }
+
+ bool isTargetWindows() const { return TargetTriple.getOS() == Triple::Win32; }
+ bool isTargetMingw() const {
+ return TargetTriple.getOS() == Triple::MinGW32 ||
+ TargetTriple.getOS() == Triple::MinGW64; }
+ bool isTargetCygwin() const { return TargetTriple.getOS() == Triple::Cygwin; }
bool isTargetCygMing() const {
- return TargetType == isMingw || TargetType == isCygwin;
+ return isTargetMingw() || isTargetCygwin();
}
-
+
/// isTargetCOFF - Return true if this is any COFF/Windows target variant.
bool isTargetCOFF() const {
- return TargetType == isMingw || TargetType == isCygwin ||
- TargetType == isWindows;
+ return isTargetMingw() || isTargetCygwin() || isTargetWindows();
}
bool isTargetWin64() const {
- return Is64Bit && (TargetType == isMingw || TargetType == isWindows);
+ return Is64Bit && (isTargetMingw() || isTargetWindows());
}
std::string getDataLayout() const {
@@ -209,7 +213,10 @@
/// getDarwinVers - Return the darwin version number, 8 = Tiger, 9 = Leopard,
/// 10 = Snow Leopard, etc.
- unsigned getDarwinVers() const { return DarwinVers; }
+ unsigned getDarwinVers() const {
+ if (isTargetDarwin()) return TargetTriple.getDarwinMajorNumber();
+ return 0;
+ }
/// ClassifyGlobalReference - Classify a global variable reference for the
/// current subtarget according to how we should reference it in a non-pcrel
Modified: llvm/branches/wendling/eh/lib/Transforms/Scalar/SimplifyLibCalls.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Transforms/Scalar/SimplifyLibCalls.cpp?rev=107653&r1=107652&r2=107653&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Transforms/Scalar/SimplifyLibCalls.cpp (original)
+++ llvm/branches/wendling/eh/lib/Transforms/Scalar/SimplifyLibCalls.cpp Tue Jul 6 06:42:41 2010
@@ -279,7 +279,7 @@
// Verify the "strcmp" function prototype.
const FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 2 ||
- !FT->getReturnType()->isIntegerTy(32) ||
+ !FT->getReturnType()->isIntegerTy(32) ||
FT->getParamType(0) != FT->getParamType(1) ||
FT->getParamType(0) != Type::getInt8PtrTy(*Context))
return 0;
@@ -327,7 +327,7 @@
// Verify the "strncmp" function prototype.
const FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 3 ||
- !FT->getReturnType()->isIntegerTy(32) ||
+ !FT->getReturnType()->isIntegerTy(32) ||
FT->getParamType(0) != FT->getParamType(1) ||
FT->getParamType(0) != Type::getInt8PtrTy(*Context) ||
!FT->getParamType(2)->isIntegerTy())
@@ -682,7 +682,7 @@
// memset(p, v, n) -> llvm.memset(p, v, n, 1)
Value *Val = B.CreateIntCast(CI->getArgOperand(1), Type::getInt8Ty(*Context),
- false);
+ false);
EmitMemSet(CI->getArgOperand(0), Val, CI->getArgOperand(2), false, B, TD);
return CI->getArgOperand(0);
}
@@ -766,11 +766,11 @@
if (SIToFPInst *OpC = dyn_cast<SIToFPInst>(Op)) {
if (OpC->getOperand(0)->getType()->getPrimitiveSizeInBits() <= 32)
LdExpArg = B.CreateSExt(OpC->getOperand(0),
- Type::getInt32Ty(*Context), "tmp");
+ Type::getInt32Ty(*Context), "tmp");
} else if (UIToFPInst *OpC = dyn_cast<UIToFPInst>(Op)) {
if (OpC->getOperand(0)->getType()->getPrimitiveSizeInBits() < 32)
LdExpArg = B.CreateZExt(OpC->getOperand(0),
- Type::getInt32Ty(*Context), "tmp");
+ Type::getInt32Ty(*Context), "tmp");
}
if (LdExpArg) {
@@ -836,7 +836,7 @@
// Just make sure this has 2 arguments of the same FP type, which match the
// result type.
if (FT->getNumParams() != 1 ||
- !FT->getReturnType()->isIntegerTy(32) ||
+ !FT->getReturnType()->isIntegerTy(32) ||
!FT->getParamType(0)->isIntegerTy())
return 0;
@@ -860,7 +860,7 @@
Value *Cond = B.CreateICmpNE(Op, Constant::getNullValue(ArgType), "tmp");
return B.CreateSelect(Cond, V,
- ConstantInt::get(Type::getInt32Ty(*Context), 0));
+ ConstantInt::get(Type::getInt32Ty(*Context), 0));
}
};
Removed: llvm/branches/wendling/eh/test/CodeGen/Generic/stack-protector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Generic/stack-protector.ll?rev=107652&view=auto
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Generic/stack-protector.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Generic/stack-protector.ll (removed)
@@ -1,25 +0,0 @@
-; RUN: llc < %s -o - | grep {__stack_chk_guard}
-; RUN: llc < %s -o - | grep {__stack_chk_fail}
-
-@"\01LC" = internal constant [11 x i8] c"buf == %s\0A\00" ; <[11 x i8]*> [#uses=1]
-
-define void @test(i8* %a) nounwind ssp {
-entry:
- %a_addr = alloca i8* ; <i8**> [#uses=2]
- %buf = alloca [8 x i8] ; <[8 x i8]*> [#uses=2]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store i8* %a, i8** %a_addr
- %buf1 = bitcast [8 x i8]* %buf to i8* ; <i8*> [#uses=1]
- %0 = load i8** %a_addr, align 4 ; <i8*> [#uses=1]
- %1 = call i8* @strcpy(i8* %buf1, i8* %0) nounwind ; <i8*> [#uses=0]
- %buf2 = bitcast [8 x i8]* %buf to i8* ; <i8*> [#uses=1]
- %2 = call i32 (i8*, ...)* @printf(i8* getelementptr ([11 x i8]* @"\01LC", i32 0, i32 0), i8* %buf2) nounwind ; <i32> [#uses=0]
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-declare i8* @strcpy(i8*, i8*) nounwind
-
-declare i32 @printf(i8*, ...) nounwind
More information about the llvm-branch-commits
mailing list