[llvm-commits] [llvm] r66377 - in /llvm/branches/Apple/Dib: include/llvm/IntrinsicInst.h lib/Transforms/IPO/GlobalOpt.cpp lib/Transforms/Scalar/InstructionCombining.cpp lib/Transforms/Scalar/MemCpyOptimizer.cpp lib/Transforms/Scalar/ScalarReplAggregates.cpp lib/VMCore/Constants.cpp test/Transforms/ScalarRepl/2008-06-22-LargeArray.ll test/Transforms/ScalarRepl/vector_memcpy.ll
Bill Wendling
isanbard at gmail.com
Sun Mar 8 00:31:17 PST 2009
Author: void
Date: Sun Mar 8 03:31:16 2009
New Revision: 66377
URL: http://llvm.org/viewvc/llvm-project?rev=66377&view=rev
Log:
Merging r66361 into Dib
U include/llvm/IntrinsicInst.h
U lib/Transforms/Scalar/InstructionCombining.cpp
U lib/Transforms/Scalar/ScalarReplAggregates.cpp
Merging r66362 into Dib
U lib/Transforms/IPO/GlobalOpt.cpp
Merging r66364 into Dib
G include/llvm/IntrinsicInst.h
G lib/Transforms/Scalar/InstructionCombining.cpp
C lib/Transforms/Scalar/ScalarReplAggregates.cpp
U lib/Transforms/Scalar/MemCpyOptimizer.cpp
Merging r66366 into Dib
A test/Transforms/ScalarRepl/vector_memcpy.ll
U test/Transforms/ScalarRepl/2008-06-22-LargeArray.ll
C lib/Transforms/Scalar/ScalarReplAggregates.cpp
Merging r66367 into Dib
U lib/VMCore/Constants.cpp
Merging r66368 into Dib
U test/Transforms/ScalarRepl/vector_memcpy.ll
G lib/Transforms/Scalar/ScalarReplAggregates.cpp
Added:
llvm/branches/Apple/Dib/test/Transforms/ScalarRepl/vector_memcpy.ll
- copied, changed from r66366, llvm/trunk/test/Transforms/ScalarRepl/vector_memcpy.ll
Modified:
llvm/branches/Apple/Dib/include/llvm/IntrinsicInst.h
llvm/branches/Apple/Dib/lib/Transforms/IPO/GlobalOpt.cpp
llvm/branches/Apple/Dib/lib/Transforms/Scalar/InstructionCombining.cpp
llvm/branches/Apple/Dib/lib/Transforms/Scalar/MemCpyOptimizer.cpp
llvm/branches/Apple/Dib/lib/Transforms/Scalar/ScalarReplAggregates.cpp
llvm/branches/Apple/Dib/lib/VMCore/Constants.cpp
llvm/branches/Apple/Dib/test/Transforms/ScalarRepl/2008-06-22-LargeArray.ll
Modified: llvm/branches/Apple/Dib/include/llvm/IntrinsicInst.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/include/llvm/IntrinsicInst.h?rev=66377&r1=66376&r2=66377&view=diff
==============================================================================
--- llvm/branches/Apple/Dib/include/llvm/IntrinsicInst.h (original)
+++ llvm/branches/Apple/Dib/include/llvm/IntrinsicInst.h Sun Mar 8 03:31:16 2009
@@ -176,9 +176,13 @@
Value *getRawDest() const { return const_cast<Value*>(getOperand(1)); }
Value *getLength() const { return const_cast<Value*>(getOperand(3)); }
- ConstantInt *getAlignment() const {
+ ConstantInt *getAlignmentCst() const {
return cast<ConstantInt>(const_cast<Value*>(getOperand(4)));
}
+
+ unsigned getAlignment() const {
+ return getAlignmentCst()->getZExtValue();
+ }
/// getDest - This is just like getRawDest, but it strips off any cast
/// instructions that feed it, giving the original input. The returned
@@ -198,12 +202,11 @@
"setLength called with value of wrong type!");
setOperand(3, L);
}
- void setAlignment(ConstantInt *A) {
- assert(getAlignment()->getType() == A->getType() &&
- "setAlignment called with value of wrong type!");
- setOperand(4, A);
+ void setAlignment(unsigned A) {
+ const Type *Int32Ty = getOperand(4)->getType();
+ setOperand(4, ConstantInt::get(Int32Ty, A));
}
-
+
// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const MemIntrinsic *) { return true; }
static inline bool classof(const IntrinsicInst *I) {
@@ -220,81 +223,79 @@
}
};
-
- /// MemCpyInst - This class wraps the llvm.memcpy intrinsic.
+ /// MemSetInst - This class wraps the llvm.memset intrinsic.
///
- struct MemCpyInst : public MemIntrinsic {
+ struct MemSetInst : public MemIntrinsic {
/// get* - Return the arguments to the instruction.
///
- Value *getRawSource() const { return const_cast<Value*>(getOperand(2)); }
-
- /// getSource - This is just like getRawSource, but it strips off any cast
- /// instructions that feed it, giving the original input. The returned
- /// value is guaranteed to be a pointer.
- Value *getSource() const { return getRawSource()->stripPointerCasts(); }
-
-
- void setSource(Value *Ptr) {
- assert(getRawSource()->getType() == Ptr->getType() &&
+ Value *getValue() const { return const_cast<Value*>(getOperand(2)); }
+
+ void setValue(Value *Val) {
+ assert(getValue()->getType() == Val->getType() &&
"setSource called with pointer of wrong type!");
- setOperand(2, Ptr);
+ setOperand(2, Val);
}
-
+
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const MemCpyInst *) { return true; }
+ static inline bool classof(const MemSetInst *) { return true; }
static inline bool classof(const IntrinsicInst *I) {
- return I->getIntrinsicID() == Intrinsic::memcpy;
+ return I->getIntrinsicID() == Intrinsic::memset;
}
static inline bool classof(const Value *V) {
return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
}
};
-
- /// MemMoveInst - This class wraps the llvm.memmove intrinsic.
+
+ /// MemTransferInst - This class wraps the llvm.memcpy/memmove intrinsics.
///
- struct MemMoveInst : public MemIntrinsic {
+ struct MemTransferInst : public MemIntrinsic {
/// get* - Return the arguments to the instruction.
///
Value *getRawSource() const { return const_cast<Value*>(getOperand(2)); }
-
+
/// getSource - This is just like getRawSource, but it strips off any cast
/// instructions that feed it, giving the original input. The returned
/// value is guaranteed to be a pointer.
Value *getSource() const { return getRawSource()->stripPointerCasts(); }
-
+
void setSource(Value *Ptr) {
assert(getRawSource()->getType() == Ptr->getType() &&
"setSource called with pointer of wrong type!");
setOperand(2, Ptr);
}
-
+
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const MemMoveInst *) { return true; }
+ static inline bool classof(const MemTransferInst *) { return true; }
static inline bool classof(const IntrinsicInst *I) {
- return I->getIntrinsicID() == Intrinsic::memmove;
+ return I->getIntrinsicID() == Intrinsic::memcpy ||
+ I->getIntrinsicID() == Intrinsic::memmove;
}
static inline bool classof(const Value *V) {
return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
}
};
-
- /// MemSetInst - This class wraps the llvm.memset intrinsic.
+
+
+ /// MemCpyInst - This class wraps the llvm.memcpy intrinsic.
///
- struct MemSetInst : public MemIntrinsic {
- /// get* - Return the arguments to the instruction.
- ///
- Value *getValue() const { return const_cast<Value*>(getOperand(2)); }
-
- void setValue(Value *Val) {
- assert(getValue()->getType() == Val->getType() &&
- "setSource called with pointer of wrong type!");
- setOperand(2, Val);
+ struct MemCpyInst : public MemTransferInst {
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const MemCpyInst *) { return true; }
+ static inline bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::memcpy;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
}
+ };
+ /// MemMoveInst - This class wraps the llvm.memmove intrinsic.
+ ///
+ struct MemMoveInst : public MemTransferInst {
// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const MemSetInst *) { return true; }
+ static inline bool classof(const MemMoveInst *) { return true; }
static inline bool classof(const IntrinsicInst *I) {
- return I->getIntrinsicID() == Intrinsic::memset;
+ return I->getIntrinsicID() == Intrinsic::memmove;
}
static inline bool classof(const Value *V) {
return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
Modified: llvm/branches/Apple/Dib/lib/Transforms/IPO/GlobalOpt.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/lib/Transforms/IPO/GlobalOpt.cpp?rev=66377&r1=66376&r2=66377&view=diff
==============================================================================
--- llvm/branches/Apple/Dib/lib/Transforms/IPO/GlobalOpt.cpp (original)
+++ llvm/branches/Apple/Dib/lib/Transforms/IPO/GlobalOpt.cpp Sun Mar 8 03:31:16 2009
@@ -217,7 +217,7 @@
if (AnalyzeGlobal(I, GS, PHIUsers)) return true;
GS.HasPHIUser = true;
} else if (isa<CmpInst>(I)) {
- } else if (isa<MemCpyInst>(I) || isa<MemMoveInst>(I)) {
+ } else if (isa<MemTransferInst>(I)) {
if (I->getOperand(1) == V)
GS.StoredType = GlobalStatus::isStored;
if (I->getOperand(2) == V)
Modified: llvm/branches/Apple/Dib/lib/Transforms/Scalar/InstructionCombining.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/lib/Transforms/Scalar/InstructionCombining.cpp?rev=66377&r1=66376&r2=66377&view=diff
==============================================================================
--- llvm/branches/Apple/Dib/lib/Transforms/Scalar/InstructionCombining.cpp (original)
+++ llvm/branches/Apple/Dib/lib/Transforms/Scalar/InstructionCombining.cpp Sun Mar 8 03:31:16 2009
@@ -9290,10 +9290,10 @@
unsigned DstAlign = GetOrEnforceKnownAlignment(MI->getOperand(1));
unsigned SrcAlign = GetOrEnforceKnownAlignment(MI->getOperand(2), DstAlign);
unsigned MinAlign = std::min(DstAlign, SrcAlign);
- unsigned CopyAlign = MI->getAlignment()->getZExtValue();
+ unsigned CopyAlign = MI->getAlignment();
if (CopyAlign < MinAlign) {
- MI->setAlignment(ConstantInt::get(Type::Int32Ty, MinAlign));
+ MI->setAlignment(MinAlign);
return MI;
}
@@ -9365,8 +9365,8 @@
Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
unsigned Alignment = GetOrEnforceKnownAlignment(MI->getDest());
- if (MI->getAlignment()->getZExtValue() < Alignment) {
- MI->setAlignment(ConstantInt::get(Type::Int32Ty, Alignment));
+ if (MI->getAlignment() < Alignment) {
+ MI->setAlignment(Alignment);
return MI;
}
@@ -9376,7 +9376,7 @@
if (!LenC || !FillC || FillC->getType() != Type::Int8Ty)
return 0;
uint64_t Len = LenC->getZExtValue();
- Alignment = MI->getAlignment()->getZExtValue();
+ Alignment = MI->getAlignment();
// If the length is zero, this is a no-op
if (Len == 0) return MI; // memset(d,c,0,a) -> noop
@@ -9452,7 +9452,7 @@
// If we can determine a pointer alignment that is bigger than currently
// set, update the alignment.
- if (isa<MemCpyInst>(MI) || isa<MemMoveInst>(MI)) {
+ if (isa<MemTransferInst>(MI)) {
if (Instruction *I = SimplifyMemTransfer(MI))
return I;
} else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) {
Modified: llvm/branches/Apple/Dib/lib/Transforms/Scalar/MemCpyOptimizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/lib/Transforms/Scalar/MemCpyOptimizer.cpp?rev=66377&r1=66376&r2=66377&view=diff
==============================================================================
--- llvm/branches/Apple/Dib/lib/Transforms/Scalar/MemCpyOptimizer.cpp (original)
+++ llvm/branches/Apple/Dib/lib/Transforms/Scalar/MemCpyOptimizer.cpp Sun Mar 8 03:31:16 2009
@@ -678,13 +678,11 @@
M->getParent()->getParent()->getParent(),
M->getIntrinsicID(), Tys, 1);
- std::vector<Value*> args;
- args.push_back(M->getRawDest());
- args.push_back(MDep->getRawSource());
- args.push_back(M->getLength());
- args.push_back(M->getAlignment());
+ Value *Args[4] = {
+ M->getRawDest(), MDep->getRawSource(), M->getLength(), M->getAlignmentCst()
+ };
- CallInst* C = CallInst::Create(MemCpyFun, args.begin(), args.end(), "", M);
+ CallInst* C = CallInst::Create(MemCpyFun, Args, Args+4, "", M);
// If C and M don't interfere, then this is a valid transformation. If they
Modified: llvm/branches/Apple/Dib/lib/Transforms/Scalar/ScalarReplAggregates.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/lib/Transforms/Scalar/ScalarReplAggregates.cpp?rev=66377&r1=66376&r2=66377&view=diff
==============================================================================
--- llvm/branches/Apple/Dib/lib/Transforms/Scalar/ScalarReplAggregates.cpp (original)
+++ llvm/branches/Apple/Dib/lib/Transforms/Scalar/ScalarReplAggregates.cpp Sun Mar 8 03:31:16 2009
@@ -605,7 +605,7 @@
return MarkUnsafe(Info);
// We only know about memcpy/memset/memmove.
- if (!isa<MemCpyInst>(MI) && !isa<MemSetInst>(MI) && !isa<MemMoveInst>(MI))
+ if (!isa<MemIntrinsic>(MI))
return MarkUnsafe(Info);
// Otherwise, we can transform it. Determine whether this is a memcpy/set
@@ -721,21 +721,17 @@
SmallVector<AllocaInst*, 32> &NewElts) {
// If this is a memcpy/memmove, construct the other pointer as the
- // appropriate type.
+ // appropriate type. The "Other" pointer is the pointer that goes to memory
+ // that doesn't have anything to do with the alloca that we are promoting. For
+ // memset, this Value* stays null.
Value *OtherPtr = 0;
- if (MemCpyInst *MCI = dyn_cast<MemCpyInst>(MI)) {
- if (BCInst == MCI->getRawDest())
- OtherPtr = MCI->getRawSource();
+ unsigned MemAlignment = MI->getAlignment();
+ if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { // memmove/memcopy
+ if (BCInst == MTI->getRawDest())
+ OtherPtr = MTI->getRawSource();
else {
- assert(BCInst == MCI->getRawSource());
- OtherPtr = MCI->getRawDest();
- }
- } else if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) {
- if (BCInst == MMI->getRawDest())
- OtherPtr = MMI->getRawSource();
- else {
- assert(BCInst == MMI->getRawSource());
- OtherPtr = MMI->getRawDest();
+ assert(BCInst == MTI->getRawSource());
+ OtherPtr = MTI->getRawDest();
}
}
@@ -771,22 +767,47 @@
for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
// If this is a memcpy/memmove, emit a GEP of the other element address.
Value *OtherElt = 0;
+ unsigned OtherEltAlign = MemAlignment;
+
if (OtherPtr) {
Value *Idx[2] = { Zero, ConstantInt::get(Type::Int32Ty, i) };
OtherElt = GetElementPtrInst::Create(OtherPtr, Idx, Idx + 2,
OtherPtr->getNameStr()+"."+utostr(i),
MI);
+ uint64_t EltOffset;
+ const PointerType *OtherPtrTy = cast<PointerType>(OtherPtr->getType());
+ if (const StructType *ST =
+ dyn_cast<StructType>(OtherPtrTy->getElementType())) {
+ EltOffset = TD->getStructLayout(ST)->getElementOffset(i);
+ } else {
+ const Type *EltTy =
+ cast<SequentialType>(OtherPtr->getType())->getElementType();
+ EltOffset = TD->getTypePaddedSize(EltTy)*i;
+ }
+
+ // The alignment of the other pointer is the guaranteed alignment of the
+ // element, which is affected by both the known alignment of the whole
+ // mem intrinsic and the alignment of the element. If the alignment of
+ // the memcpy (f.e.) is 32 but the element is at a 4-byte offset, then the
+ // known alignment is just 4 bytes.
+ OtherEltAlign = (unsigned)MinAlign(OtherEltAlign, EltOffset);
}
Value *EltPtr = NewElts[i];
- const Type *EltTy =cast<PointerType>(EltPtr->getType())->getElementType();
+ const Type *EltTy = cast<PointerType>(EltPtr->getType())->getElementType();
// If we got down to a scalar, insert a load or store as appropriate.
if (EltTy->isSingleValueType()) {
- if (isa<MemCpyInst>(MI) || isa<MemMoveInst>(MI)) {
- Value *Elt = new LoadInst(SROADest ? OtherElt : EltPtr, "tmp",
- MI);
- new StoreInst(Elt, SROADest ? EltPtr : OtherElt, MI);
+ if (isa<MemTransferInst>(MI)) {
+ if (SROADest) {
+ // From Other to Alloca.
+ Value *Elt = new LoadInst(OtherElt, "tmp", false, OtherEltAlign, MI);
+ new StoreInst(Elt, EltPtr, MI);
+ } else {
+ // From Alloca to Other.
+ Value *Elt = new LoadInst(EltPtr, "tmp", MI);
+ new StoreInst(Elt, OtherElt, false, OtherEltAlign, MI);
+ }
continue;
}
assert(isa<MemSetInst>(MI));
@@ -847,12 +868,12 @@
unsigned EltSize = TD->getTypePaddedSize(EltTy);
// Finally, insert the meminst for this element.
- if (isa<MemCpyInst>(MI) || isa<MemMoveInst>(MI)) {
+ if (isa<MemTransferInst>(MI)) {
Value *Ops[] = {
SROADest ? EltPtr : OtherElt, // Dest ptr
SROADest ? OtherElt : EltPtr, // Src ptr
ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size
- Zero // Align
+ ConstantInt::get(Type::Int32Ty, OtherEltAlign) // Align
};
CallInst::Create(TheFn, Ops, Ops + 4, "", MI);
} else {
@@ -1322,19 +1343,32 @@
IsNotTrivial = true;
continue;
}
-
+
// If this is a constant sized memset of a constant value (e.g. 0) we can
// handle it.
- if (isa<MemSetInst>(User) &&
- // Store of constant value.
- isa<ConstantInt>(User->getOperand(2)) &&
- // Store with constant size.
- isa<ConstantInt>(User->getOperand(3))) {
- VecTy = Type::VoidTy;
- IsNotTrivial = true;
- continue;
+ if (MemSetInst *MSI = dyn_cast<MemSetInst>(User)) {
+ // Store of constant value and constant size.
+ if (isa<ConstantInt>(MSI->getValue()) &&
+ isa<ConstantInt>(MSI->getLength())) {
+ IsNotTrivial = true;
+ continue;
+ }
+ }
+
+ // If this is a memcpy or memmove into or out of the whole allocation, we
+ // can handle it like a load or store of the scalar type.
+ if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(User)) {
+ if (ConstantInt *Len = dyn_cast<ConstantInt>(MTI->getLength()))
+ if (Len->getZExtValue() == AllocaSize && Offset == 0) {
+ IsNotTrivial = true;
+ continue;
+ }
}
+ // Ignore dbg intrinsic.
+ if (isa<DbgInfoIntrinsic>(User))
+ continue;
+
// Otherwise, we cannot handle this!
return false;
}
@@ -1414,8 +1448,51 @@
MSI->eraseFromParent();
continue;
}
+
+ // If this is a memcpy or memmove into or out of the whole allocation, we
+ // can handle it like a load or store of the scalar type.
+ if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(User)) {
+ assert(Offset == 0 && "must be store to start of alloca");
+
+ // If the source and destination are both to the same alloca, then this is
+ // a noop copy-to-self, just delete it. Otherwise, emit a load and store
+ // as appropriate.
+ AllocaInst *OrigAI = cast<AllocaInst>(Ptr->getUnderlyingObject());
+
+ if (MTI->getSource()->getUnderlyingObject() != OrigAI) {
+ // Dest must be OrigAI, change this to be a load from the original
+ // pointer (bitcasted), then a store to our new alloca.
+ assert(MTI->getRawDest() == Ptr && "Neither use is of pointer?");
+ Value *SrcPtr = MTI->getSource();
+ SrcPtr = Builder.CreateBitCast(SrcPtr, NewAI->getType());
+ LoadInst *SrcVal = Builder.CreateLoad(SrcPtr, "srcval");
+ SrcVal->setAlignment(MTI->getAlignment());
+ Builder.CreateStore(SrcVal, NewAI);
+ } else if (MTI->getDest()->getUnderlyingObject() != OrigAI) {
+ // Src must be OrigAI, change this to be a load from NewAI then a store
+ // through the original dest pointer (bitcasted).
+ assert(MTI->getRawSource() == Ptr && "Neither use is of pointer?");
+ LoadInst *SrcVal = Builder.CreateLoad(NewAI, "srcval");
+
+ Value *DstPtr = Builder.CreateBitCast(MTI->getDest(), NewAI->getType());
+ StoreInst *NewStore = Builder.CreateStore(SrcVal, DstPtr);
+ NewStore->setAlignment(MTI->getAlignment());
+ } else {
+ // Noop transfer. Src == Dst
+ }
+
+
+ MTI->eraseFromParent();
+ continue;
+ }
+ // If user is a dbg info intrinsic then it is safe to remove it.
+ if (isa<DbgInfoIntrinsic>(User)) {
+ User->eraseFromParent();
+ continue;
+ }
+
assert(0 && "Unsupported operation!");
abort();
}
@@ -1549,21 +1626,25 @@
const Type *AllocaType = Old->getType();
if (const VectorType *VTy = dyn_cast<VectorType>(AllocaType)) {
- // If the result alloca is a vector type, this is either an element
- // access or a bitcast to another vector type.
- if (isa<VectorType>(SV->getType())) {
- SV = Builder.CreateBitCast(SV, AllocaType, "tmp");
- } else {
- // Must be an element insertion.
- unsigned Elt = Offset/TD->getTypePaddedSizeInBits(VTy->getElementType());
-
- if (SV->getType() != VTy->getElementType())
- SV = Builder.CreateBitCast(SV, VTy->getElementType(), "tmp");
-
- SV = Builder.CreateInsertElement(Old, SV,
- ConstantInt::get(Type::Int32Ty, Elt),
- "tmp");
- }
+ uint64_t VecSize = TD->getTypePaddedSizeInBits(VTy);
+ uint64_t ValSize = TD->getTypePaddedSizeInBits(SV->getType());
+
+ // Changing the whole vector with memset or with an access of a different
+ // vector type?
+ if (ValSize == VecSize)
+ return Builder.CreateBitCast(SV, AllocaType, "tmp");
+
+ uint64_t EltSize = TD->getTypePaddedSizeInBits(VTy->getElementType());
+
+ // Must be an element insertion.
+ unsigned Elt = Offset/EltSize;
+
+ if (SV->getType() != VTy->getElementType())
+ SV = Builder.CreateBitCast(SV, VTy->getElementType(), "tmp");
+
+ SV = Builder.CreateInsertElement(Old, SV,
+ ConstantInt::get(Type::Int32Ty, Elt),
+ "tmp");
return SV;
}
@@ -1694,7 +1775,7 @@
// If this is isn't our memcpy/memmove, reject it as something we can't
// handle.
- if (!isa<MemCpyInst>(*UI) && !isa<MemMoveInst>(*UI))
+ if (!isa<MemTransferInst>(*UI))
return false;
// If we already have seen a copy, reject the second one.
Modified: llvm/branches/Apple/Dib/lib/VMCore/Constants.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/lib/VMCore/Constants.cpp?rev=66377&r1=66376&r2=66377&view=diff
==============================================================================
--- llvm/branches/Apple/Dib/lib/VMCore/Constants.cpp (original)
+++ llvm/branches/Apple/Dib/lib/VMCore/Constants.cpp Sun Mar 8 03:31:16 2009
@@ -1995,7 +1995,7 @@
unsigned SrcBitSize = SrcTy->getPrimitiveSizeInBits();
unsigned DstBitSize = DstTy->getPrimitiveSizeInBits();
#endif
- assert(SrcBitSize == DstBitSize && "BitCast requies types of same width");
+ assert(SrcBitSize == DstBitSize && "BitCast requires types of same width");
return getFoldedCast(Instruction::BitCast, C, DstTy);
}
Modified: llvm/branches/Apple/Dib/test/Transforms/ScalarRepl/2008-06-22-LargeArray.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/test/Transforms/ScalarRepl/2008-06-22-LargeArray.ll?rev=66377&r1=66376&r2=66377&view=diff
==============================================================================
--- llvm/branches/Apple/Dib/test/Transforms/ScalarRepl/2008-06-22-LargeArray.ll (original)
+++ llvm/branches/Apple/Dib/test/Transforms/ScalarRepl/2008-06-22-LargeArray.ll Sun Mar 8 03:31:16 2009
@@ -6,12 +6,11 @@
define void @memtest1(i8* %dst, i8* %src) nounwind {
entry:
- %temp = alloca [100 x i8] ; <[100 x i8]*> [#uses=2]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %temp1 = bitcast [100 x i8]* %temp to i8* ; <i8*> [#uses=1]
- call void @llvm.memcpy.i32( i8* %temp1, i8* %src, i32 100, i32 1 )
- %temp3 = bitcast [100 x i8]* %temp to i8* ; <i8*> [#uses=1]
- call void @llvm.memcpy.i32( i8* %dst, i8* %temp3, i32 100, i32 1 )
+ %temp = alloca [200 x i8] ; <[100 x i8]*> [#uses=2]
+ %temp1 = bitcast [200 x i8]* %temp to i8* ; <i8*> [#uses=1]
+ call void @llvm.memcpy.i32( i8* %temp1, i8* %src, i32 200, i32 1 )
+ %temp3 = bitcast [200 x i8]* %temp to i8* ; <i8*> [#uses=1]
+ call void @llvm.memcpy.i32( i8* %dst, i8* %temp3, i32 200, i32 1 )
ret void
}
Copied: llvm/branches/Apple/Dib/test/Transforms/ScalarRepl/vector_memcpy.ll (from r66366, llvm/trunk/test/Transforms/ScalarRepl/vector_memcpy.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Dib/test/Transforms/ScalarRepl/vector_memcpy.ll?p2=llvm/branches/Apple/Dib/test/Transforms/ScalarRepl/vector_memcpy.ll&p1=llvm/trunk/test/Transforms/ScalarRepl/vector_memcpy.ll&r1=66366&r2=66377&rev=66377&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ScalarRepl/vector_memcpy.ll (original)
+++ llvm/branches/Apple/Dib/test/Transforms/ScalarRepl/vector_memcpy.ll Sun Mar 8 03:31:16 2009
@@ -1,4 +1,7 @@
-; RUN: llvm-as < %s | opt -scalarrepl | llvm-dis | grep {ret <16 x float> %A}
+; RUN: llvm-as < %s | opt -scalarrepl | llvm-dis > %t
+; RUN: grep {ret <16 x float> %A} %t
+; RUN: grep {ret <16 x float> zeroinitializer} %t
+
define <16 x float> @foo(<16 x float> %A) nounwind {
%tmp = alloca <16 x float>, align 16
%tmp2 = alloca <16 x float>, align 16
@@ -11,5 +14,16 @@
ret <16 x float> %R
}
-declare void @llvm.memcpy.i64(i8* nocapture, i8* nocapture, i64, i32) nounwind
+define <16 x float> @foo2(<16 x float> %A) nounwind {
+ %tmp2 = alloca <16 x float>, align 16
+ %s2 = bitcast <16 x float>* %tmp2 to i8*
+ call void @llvm.memset.i64(i8* %s2, i8 0, i64 64, i32 16)
+
+ %R = load <16 x float>* %tmp2
+ ret <16 x float> %R
+}
+
+
+declare void @llvm.memcpy.i64(i8* nocapture, i8* nocapture, i64, i32) nounwind
+declare void @llvm.memset.i64(i8* nocapture, i8, i64, i32) nounwind
More information about the llvm-commits
mailing list