[llvm-branch-commits] [llvm-branch] r81693 - /llvm/branches/release_26/lib/Transforms/Scalar/MemCpyOptimizer.cpp
Tanya Lattner
tonic at nondot.org
Sun Sep 13 12:00:13 PDT 2009
Author: tbrethou
Date: Sun Sep 13 14:00:12 2009
New Revision: 81693
URL: http://llvm.org/viewvc/llvm-project?rev=81693&view=rev
Log:
Merge 81175 from mainline.
Fix PR4882, by making MemCpyOpt not dereference removed stores to get the
context for the newly created operations.
Modified:
llvm/branches/release_26/lib/Transforms/Scalar/MemCpyOptimizer.cpp
Modified: llvm/branches/release_26/lib/Transforms/Scalar/MemCpyOptimizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/release_26/lib/Transforms/Scalar/MemCpyOptimizer.cpp?rev=81693&r1=81692&r2=81693&view=diff
==============================================================================
--- llvm/branches/release_26/lib/Transforms/Scalar/MemCpyOptimizer.cpp (original)
+++ llvm/branches/release_26/lib/Transforms/Scalar/MemCpyOptimizer.cpp Sun Sep 13 14:00:12 2009
@@ -338,13 +338,15 @@
bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator& BBI) {
if (SI->isVolatile()) return false;
+ LLVMContext &Context = SI->getContext();
+
// There are two cases that are interesting for this code to handle: memcpy
// and memset. Right now we only handle memset.
// Ensure that the value being stored is something that can be memset'able a
// byte at a time like "0" or "-1" or any width, as well as things like
// 0xA0A0A0A0 and 0.0.
- Value *ByteVal = isBytewiseValue(SI->getOperand(0), SI->getContext());
+ Value *ByteVal = isBytewiseValue(SI->getOperand(0), Context);
if (!ByteVal)
return false;
@@ -385,8 +387,7 @@
if (NextStore->isVolatile()) break;
// Check to see if this stored value is of the same byte-splattable value.
- if (ByteVal != isBytewiseValue(NextStore->getOperand(0),
- NextStore->getContext()))
+ if (ByteVal != isBytewiseValue(NextStore->getOperand(0), Context))
break;
// Check to see if this store is to a constant offset from the start ptr.
@@ -406,7 +407,6 @@
// store as well. We try to avoid this unless there is at least something
// interesting as a small compile-time optimization.
Ranges.addStore(0, SI);
-
Function *MemSetF = 0;
@@ -430,17 +430,15 @@
BasicBlock::iterator InsertPt = BI;
if (MemSetF == 0) {
- const Type *Tys[] = {Type::getInt64Ty(SI->getContext())};
- MemSetF = Intrinsic::getDeclaration(M, Intrinsic::memset,
- Tys, 1);
- }
+ const Type *Ty = Type::getInt64Ty(Context);
+ MemSetF = Intrinsic::getDeclaration(M, Intrinsic::memset, &Ty, 1);
+ }
// Get the starting pointer of the block.
StartPtr = Range.StartPtr;
// Cast the start ptr to be i8* as memset requires.
- const Type *i8Ptr =
- PointerType::getUnqual(Type::getInt8Ty(SI->getContext()));
+ const Type *i8Ptr = PointerType::getUnqual(Type::getInt8Ty(Context));
if (StartPtr->getType() != i8Ptr)
StartPtr = new BitCastInst(StartPtr, i8Ptr, StartPtr->getName(),
InsertPt);
@@ -448,10 +446,9 @@
Value *Ops[] = {
StartPtr, ByteVal, // Start, value
// size
- ConstantInt::get(Type::getInt64Ty(SI->getContext()),
- Range.End-Range.Start),
+ ConstantInt::get(Type::getInt64Ty(Context), Range.End-Range.Start),
// align
- ConstantInt::get(Type::getInt32Ty(SI->getContext()), Range.Alignment)
+ ConstantInt::get(Type::getInt32Ty(Context), Range.Alignment)
};
Value *C = CallInst::Create(MemSetF, Ops, Ops+4, "", InsertPt);
DEBUG(cerr << "Replace stores:\n";
@@ -463,7 +460,8 @@
BBI = BI;
// Zap all the stores.
- for (SmallVector<StoreInst*, 16>::const_iterator SI = Range.TheStores.begin(),
+ for (SmallVector<StoreInst*, 16>::const_iterator
+ SI = Range.TheStores.begin(),
SE = Range.TheStores.end(); SI != SE; ++SI)
(*SI)->eraseFromParent();
++NumMemSetInfer;
More information about the llvm-branch-commits
mailing list