[llvm-commits] [llvm-gcc-4.0] r44114 - in /llvm-gcc-4.0/trunk/gcc: config/i386/llvm-i386.cpp config/rs6000/llvm-rs6000.cpp llvm-convert.cpp llvm-internal.h
Duncan Sands
baldrick at free.fr
Wed Nov 14 05:26:03 PST 2007
Author: baldrick
Date: Wed Nov 14 07:26:03 2007
New Revision: 44114
URL: http://llvm.org/viewvc/llvm-project?rev=44114&view=rev
Log:
Backport from 4.2 by Bill Wendling of
http://lists.cs.uiuc.edu/pipermail/llvm-commits/Week-of-Mon-20071105/055332.html
Modified:
llvm-gcc-4.0/trunk/gcc/config/i386/llvm-i386.cpp
llvm-gcc-4.0/trunk/gcc/config/rs6000/llvm-rs6000.cpp
llvm-gcc-4.0/trunk/gcc/llvm-convert.cpp
llvm-gcc-4.0/trunk/gcc/llvm-internal.h
Modified: llvm-gcc-4.0/trunk/gcc/config/i386/llvm-i386.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.0/trunk/gcc/config/i386/llvm-i386.cpp?rev=44114&r1=44113&r2=44114&view=diff
==============================================================================
--- llvm-gcc-4.0/trunk/gcc/config/i386/llvm-i386.cpp (original)
+++ llvm-gcc-4.0/trunk/gcc/config/i386/llvm-i386.cpp Wed Nov 14 07:26:03 2007
@@ -40,7 +40,7 @@
*/
bool TreeToLLVM::TargetIntrinsicLower(tree exp,
unsigned FnCode,
- Value *DestLoc,
+ const MemRef *DestLoc,
Value *&Result,
const Type *ResultType,
std::vector<Value*> &Ops) {
Modified: llvm-gcc-4.0/trunk/gcc/config/rs6000/llvm-rs6000.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.0/trunk/gcc/config/rs6000/llvm-rs6000.cpp?rev=44114&r1=44113&r2=44114&view=diff
==============================================================================
--- llvm-gcc-4.0/trunk/gcc/config/rs6000/llvm-rs6000.cpp (original)
+++ llvm-gcc-4.0/trunk/gcc/config/rs6000/llvm-rs6000.cpp Wed Nov 14 07:26:03 2007
@@ -80,7 +80,7 @@
//
bool TreeToLLVM::TargetIntrinsicLower(tree exp,
unsigned FnCode,
- Value *DestLoc,
+ const MemRef *DestLoc,
Value *&Result,
const Type *ResultType,
std::vector<Value*> &Ops) {
Modified: llvm-gcc-4.0/trunk/gcc/llvm-convert.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.0/trunk/gcc/llvm-convert.cpp?rev=44114&r1=44113&r2=44114&view=diff
==============================================================================
--- llvm-gcc-4.0/trunk/gcc/llvm-convert.cpp (original)
+++ llvm-gcc-4.0/trunk/gcc/llvm-convert.cpp Wed Nov 14 07:26:03 2007
@@ -35,6 +35,7 @@
#include "llvm/Instructions.h"
#include "llvm/Module.h"
#include "llvm/Analysis/ConstantFolding.h"
+#include "llvm/Support/MathExtras.h"
#include "llvm/Target/TargetAsmInfo.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetMachine.h"
@@ -758,7 +759,7 @@
}
-Value *TreeToLLVM::Emit(tree exp, Value *DestLoc) {
+Value *TreeToLLVM::Emit(tree exp, const MemRef *DestLoc) {
assert((isAggregateTreeType(TREE_TYPE(exp)) == (DestLoc != 0) ||
TREE_CODE(exp) == MODIFY_EXPR) &&
"Didn't pass DestLoc to an aggregate expr, or passed it to scalar!");
@@ -1108,6 +1109,15 @@
return new AllocaInst(Ty, 0, "memtmp", AllocaInsertionPoint);
}
+/// CreateTempLoc - Like CreateTemporary, but returns a MemRef.
+MemRef TreeToLLVM::CreateTempLoc(const Type *Ty) {
+ AllocaInst *AI = CreateTemporary(Ty);
+ // MemRefs do not allow alignment 0.
+ if (!AI->getAlignment())
+ AI->setAlignment(TD.getPrefTypeAlignment(Ty));
+ return MemRef(AI, AI->getAlignment(), false);
+}
+
/// EmitBlock - Add the specified basic block to the end of the function. If
/// the previous block falls through into it, add an explicit branch. Also,
/// manage fixups for EH info.
@@ -1147,42 +1157,46 @@
/// CopyAggregate - Recursively traverse the potientially aggregate src/dest
/// ptrs, copying all of the elements.
-static void CopyAggregate(Value *DestPtr, Value *SrcPtr,
- bool isDstVolatile, bool isSrcVolatile,
- unsigned Alignment, LLVMBuilder &Builder) {
- assert(DestPtr->getType() == SrcPtr->getType() &&
+static void CopyAggregate(MemRef DestLoc, MemRef SrcLoc, LLVMBuilder &Builder) {
+ assert(DestLoc.Ptr->getType() == SrcLoc.Ptr->getType() &&
"Cannot copy between two pointers of different type!");
- const Type *ElTy = cast<PointerType>(DestPtr->getType())->getElementType();
+ const Type *ElTy =
+ cast<PointerType>(DestLoc.Ptr->getType())->getElementType();
- unsigned TypeAlign = getTargetData().getABITypeAlignment(ElTy);
- Alignment = MIN(Alignment, TypeAlign);
+ unsigned Alignment = std::min(DestLoc.Alignment, SrcLoc.Alignment);
if (ElTy->isFirstClassType()) {
- LoadInst *V = Builder.CreateLoad(SrcPtr, isSrcVolatile, "tmp");
- StoreInst *S = Builder.CreateStore(V, DestPtr, isDstVolatile);
+ LoadInst *V = Builder.CreateLoad(SrcLoc.Ptr, SrcLoc.Volatile, "tmp");
+ StoreInst *S = Builder.CreateStore(V, DestLoc.Ptr, DestLoc.Volatile);
V->setAlignment(Alignment);
S->setAlignment(Alignment);
} else if (const StructType *STy = dyn_cast<StructType>(ElTy)) {
+ const StructLayout *SL = getTargetData().getStructLayout(STy);
Constant *Zero = ConstantInt::get(Type::Int32Ty, 0);
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
if (isPaddingElement(STy, i))
continue;
Constant *Idx = ConstantInt::get(Type::Int32Ty, i);
Value *Idxs[2] = { Zero, Idx };
- Value *DElPtr = Builder.CreateGEP(DestPtr, Idxs, Idxs + 2, "tmp");
- Value *SElPtr = Builder.CreateGEP(SrcPtr, Idxs, Idxs + 2, "tmp");
- CopyAggregate(DElPtr, SElPtr, isDstVolatile, isSrcVolatile, Alignment,
+ Value *DElPtr = Builder.CreateGEP(DestLoc.Ptr, Idxs, Idxs + 2, "tmp");
+ Value *SElPtr = Builder.CreateGEP(SrcLoc.Ptr, Idxs, Idxs + 2, "tmp");
+ unsigned Align = MinAlign(Alignment, SL->getElementOffset(i));
+ CopyAggregate(MemRef(DElPtr, Align, DestLoc.Volatile),
+ MemRef(SElPtr, Align, SrcLoc.Volatile),
Builder);
}
} else {
const ArrayType *ATy = cast<ArrayType>(ElTy);
Constant *Zero = ConstantInt::get(Type::Int32Ty, 0);
+ unsigned EltSize = getTargetData().getABITypeSize(ATy->getElementType());
for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
Constant *Idx = ConstantInt::get(Type::Int32Ty, i);
Value *Idxs[2] = { Zero, Idx };
- Value *DElPtr = Builder.CreateGEP(DestPtr, Idxs, Idxs + 2, "tmp");
- Value *SElPtr = Builder.CreateGEP(SrcPtr, Idxs, Idxs + 2, "tmp");
- CopyAggregate(DElPtr, SElPtr, isDstVolatile, isSrcVolatile, Alignment,
+ Value *DElPtr = Builder.CreateGEP(DestLoc.Ptr, Idxs, Idxs + 2, "tmp");
+ Value *SElPtr = Builder.CreateGEP(SrcLoc.Ptr, Idxs, Idxs + 2, "tmp");
+ unsigned Align = MinAlign(Alignment, i * EltSize);
+ CopyAggregate(MemRef(DElPtr, Align, DestLoc.Volatile),
+ MemRef(SElPtr, Align, SrcLoc.Volatile),
Builder);
}
}
@@ -1208,12 +1222,10 @@
#define TARGET_LLVM_MIN_BYTES_COPY_BY_MEMCPY 64
#endif
-/// EmitAggregateCopy - Copy the elements from SrcPtr to DestPtr, using the
+/// EmitAggregateCopy - Copy the elements from SrcLoc to DestLoc, using the
/// GCC type specified by GCCType to know which elements to copy.
-void TreeToLLVM::EmitAggregateCopy(Value *DestPtr, Value *SrcPtr, tree type,
- bool isDstVolatile, bool isSrcVolatile,
- unsigned Alignment) {
- if (DestPtr == SrcPtr && !isDstVolatile && !isSrcVolatile)
+void TreeToLLVM::EmitAggregateCopy(MemRef DestLoc, MemRef SrcLoc, tree type) {
+ if (DestLoc.Ptr == SrcLoc.Ptr && !DestLoc.Volatile && !SrcLoc.Volatile)
return; // noop copy.
// If the type is small, copy the elements instead of using a block copy.
@@ -1227,66 +1239,76 @@
if (!TheTypeConverter->GCCTypeOverlapsWithLLVMTypePadding(type, LLVMTy) &&
// Don't copy tons of tiny elements.
CountAggregateElements(LLVMTy) <= 8) {
- DestPtr = CastToType(Instruction::BitCast, DestPtr,
- PointerType::get(LLVMTy));
- SrcPtr = CastToType(Instruction::BitCast, SrcPtr,
- PointerType::get(LLVMTy));
- CopyAggregate(DestPtr, SrcPtr, isDstVolatile, isSrcVolatile, Alignment,
- Builder);
+ DestLoc.Ptr = CastToType(Instruction::BitCast, DestLoc.Ptr,
+ PointerType::get(LLVMTy));
+ SrcLoc.Ptr = CastToType(Instruction::BitCast, SrcLoc.Ptr,
+ PointerType::get(LLVMTy));
+ CopyAggregate(DestLoc, SrcLoc, Builder);
return;
}
}
Value *TypeSize = Emit(TYPE_SIZE_UNIT(type), 0);
- EmitMemCpy(DestPtr, SrcPtr, TypeSize, Alignment);
+ EmitMemCpy(DestLoc.Ptr, SrcLoc.Ptr, TypeSize,
+ std::min(DestLoc.Alignment, SrcLoc.Alignment));
}
-/// ZeroAggregate - Recursively traverse the potientially aggregate dest
-/// ptr, zero'ing all of the elements.
-static void ZeroAggregate(Value *DestPtr, LLVMBuilder &Builder) {
- const Type *ElTy = cast<PointerType>(DestPtr->getType())->getElementType();
+/// ZeroAggregate - Recursively traverse the potentially aggregate DestLoc,
+/// zero'ing all of the elements.
+static void ZeroAggregate(MemRef DestLoc, LLVMBuilder &Builder) {
+ const Type *ElTy =
+ cast<PointerType>(DestLoc.Ptr->getType())->getElementType();
if (ElTy->isFirstClassType()) {
- Builder.CreateStore(Constant::getNullValue(ElTy), DestPtr);
+ StoreInst *St = Builder.CreateStore(Constant::getNullValue(ElTy),
+ DestLoc.Ptr, DestLoc.Volatile);
+ St->setAlignment(DestLoc.Alignment);
} else if (const StructType *STy = dyn_cast<StructType>(ElTy)) {
+ const StructLayout *SL = getTargetData().getStructLayout(STy);
Constant *Zero = ConstantInt::get(Type::Int32Ty, 0);
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
Constant *Idx = ConstantInt::get(Type::Int32Ty, i);
Value *Idxs[2] = { Zero, Idx };
- ZeroAggregate(Builder.CreateGEP(DestPtr, Idxs, Idxs + 2, "tmp"),
- Builder);
+ Value *Ptr = Builder.CreateGEP(DestLoc.Ptr, Idxs, Idxs + 2, "tmp");
+ unsigned Alignment = MinAlign(DestLoc.Alignment, SL->getElementOffset(i));
+ ZeroAggregate(MemRef(Ptr, Alignment, DestLoc.Volatile), Builder);
}
} else {
const ArrayType *ATy = cast<ArrayType>(ElTy);
Constant *Zero = ConstantInt::get(Type::Int32Ty, 0);
+ unsigned EltSize = getTargetData().getABITypeSize(ATy->getElementType());
for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
Constant *Idx = ConstantInt::get(Type::Int32Ty, i);
Value *Idxs[2] = { Zero, Idx };
- ZeroAggregate(Builder.CreateGEP(DestPtr, Idxs, Idxs + 2, "tmp"),
- Builder);
+ Value *Ptr = Builder.CreateGEP(DestLoc.Ptr, Idxs, Idxs + 2, "tmp");
+ unsigned Alignment = MinAlign(DestLoc.Alignment, i * EltSize);
+ ZeroAggregate(MemRef(Ptr, Alignment, DestLoc.Volatile), Builder);
}
}
}
/// EmitAggregateZero - Zero the elements of DestPtr.
///
-void TreeToLLVM::EmitAggregateZero(Value *DestPtr, tree type) {
+void TreeToLLVM::EmitAggregateZero(MemRef DestLoc, tree type) {
// If the type is small, copy the elements instead of using a block copy.
if (TREE_CODE(TYPE_SIZE(type)) == INTEGER_CST &&
TREE_INT_CST_LOW(TYPE_SIZE_UNIT(type)) < 128) {
const Type *LLVMTy = ConvertType(type);
- DestPtr = CastToType(Instruction::BitCast, DestPtr,
- PointerType::get(LLVMTy));
-
- // FIXME: Is this always safe? The LLVM type might theoretically have holes
- // or might be suboptimal to copy this way. It may be better to copy the
- // structure by the GCCType's fields.
- ZeroAggregate(DestPtr, Builder);
- return;
+
+ // If the GCC type is not fully covered by the LLVM type, use memset. This
+ // can occur with unions etc.
+ if (!TheTypeConverter->GCCTypeOverlapsWithLLVMTypePadding(type, LLVMTy) &&
+ // Don't zero tons of tiny elements.
+ CountAggregateElements(LLVMTy) <= 8) {
+ DestLoc.Ptr = CastToType(Instruction::BitCast, DestLoc.Ptr,
+ PointerType::get(LLVMTy));
+
+ ZeroAggregate(DestLoc, Builder);
+ return;
+ }
}
- unsigned Alignment = TYPE_ALIGN_OK(type) ? (TYPE_ALIGN_UNIT(type) & ~0U) : 0;
- EmitMemSet(DestPtr, ConstantInt::get(Type::Int8Ty, 0),
- Emit(TYPE_SIZE_UNIT(type), 0), Alignment);
+ EmitMemSet(DestLoc.Ptr, ConstantInt::get(Type::Int8Ty, 0),
+ Emit(TYPE_SIZE_UNIT(type), 0), DestLoc.Alignment);
}
void TreeToLLVM::EmitMemCpy(Value *DestPtr, Value *SrcPtr, Value *Size,
@@ -1297,10 +1319,7 @@
CastToType(Instruction::BitCast, DestPtr, SBP),
CastToType(Instruction::BitCast, SrcPtr, SBP),
CastToSIntType(Size, IntPtr),
- // FIXME: (PR1781) The alignment of DestPtr and SrcPtr may be different. We
- // want the alignment of memcpy to be the minimal of the two. This isn't
- // currently possible. Setting to 1 for the meantime.
- ConstantInt::get(Type::Int32Ty, 1)
+ ConstantInt::get(Type::Int32Ty, Align)
};
Intrinsic::ID IID =
@@ -1316,10 +1335,7 @@
CastToType(Instruction::BitCast, DestPtr, SBP),
CastToType(Instruction::BitCast, SrcPtr, SBP),
CastToSIntType(Size, IntPtr),
- // FIXME: (PR1781) The alignment of DestPtr and SrcPtr may be different. We
- // want the alignment of memmove to be the minimal of the two. This isn't
- // currently possible. Setting to 1 for the meantime.
- ConstantInt::get(Type::Int32Ty, 1)
+ ConstantInt::get(Type::Int32Ty, Align)
};
Intrinsic::ID IID =
@@ -1335,10 +1351,7 @@
CastToType(Instruction::BitCast, DestPtr, SBP),
CastToSIntType(SrcVal, Type::Int8Ty),
CastToSIntType(Size, IntPtr),
- // FIXME: (PR1781) The alignment of DestPtr and SrcPtr may be different. We
- // want the alignment of memset to be the minimal of the two. This isn't
- // currently possible. Setting to 1 for the meantime.
- ConstantInt::get(Type::Int32Ty, 1)
+ ConstantInt::get(Type::Int32Ty, Align)
};
Intrinsic::ID IID =
@@ -1641,7 +1654,7 @@
}
}
-Value *TreeToLLVM::EmitBIND_EXPR(tree exp, Value *DestLoc) {
+Value *TreeToLLVM::EmitBIND_EXPR(tree exp, const MemRef *DestLoc) {
// Start region only if not top level.
if (TheDebugInfo && DECL_SAVED_TREE(FnDecl) != exp)
TheDebugInfo->EmitRegionStart(Fn, Builder.GetInsertBlock());
@@ -1684,21 +1697,21 @@
return Result;
}
-Value *TreeToLLVM::EmitSTATEMENT_LIST(tree exp, Value *DestLoc) {
+Value *TreeToLLVM::EmitSTATEMENT_LIST(tree exp, const MemRef *DestLoc) {
assert(DestLoc == 0 && "Does not return a value!");
// Convert each statement.
for (tree_stmt_iterator I = tsi_start(exp); !tsi_end_p(I); tsi_next(&I)) {
tree stmt = tsi_stmt(I);
- Value *DestLoc = 0;
+ MemRef DestLoc;
// If this stmt returns an aggregate value (e.g. a call whose result is
// ignored), create a temporary to receive the value. Note that we don't
// do this for MODIFY_EXPRs as an efficiency hack.
if (isAggregateTreeType(TREE_TYPE(stmt)) && TREE_CODE(stmt) != MODIFY_EXPR)
- DestLoc = CreateTemporary(ConvertType(TREE_TYPE(stmt)));
+ DestLoc = CreateTempLoc(ConvertType(TREE_TYPE(stmt)));
- Emit(stmt, DestLoc);
+ Emit(stmt, DestLoc.Ptr ? &DestLoc : NULL);
}
return 0;
}
@@ -1793,16 +1806,16 @@
}
-Value *TreeToLLVM::EmitRETURN_EXPR(tree exp, Value *DestLoc) {
+Value *TreeToLLVM::EmitRETURN_EXPR(tree exp, const MemRef *DestLoc) {
assert(DestLoc == 0 && "Does not return a value!");
if (TREE_OPERAND(exp, 0)) {
// Emit the expression, including the assignment to RESULT_DECL. If the
// operand is an aggregate value, create a temporary to evaluate it into.
- Value *DestLoc = 0;
+ MemRef DestLoc;
const Type *DestTy = ConvertType(TREE_TYPE(TREE_OPERAND(exp, 0)));
if (!DestTy->isFirstClassType() && TREE_CODE(exp) != MODIFY_EXPR)
- DestLoc = CreateTemporary(DestTy);
- Emit(TREE_OPERAND(exp, 0), DestLoc);
+ DestLoc = CreateTempLoc(DestTy);
+ Emit(TREE_OPERAND(exp, 0), DestLoc.Ptr ? &DestLoc : NULL);
}
// Emit a branch to the exit label.
@@ -2520,20 +2533,20 @@
/// FIXME: When merged with mainline, remove this code. The C++ front-end has
/// been fixed.
///
-void TreeToLLVM::EmitINTEGER_CST_Aggregate(tree exp, Value *DestLoc) {
+void TreeToLLVM::EmitINTEGER_CST_Aggregate(tree exp, const MemRef *DestLoc) {
tree type = TREE_TYPE(exp);
#ifndef NDEBUG
assert(TREE_CODE(type) == RECORD_TYPE && "Not an empty class!");
for (tree F = TYPE_FIELDS(type); F; F = TREE_CHAIN(F))
assert(TREE_CODE(F) != FIELD_DECL && "Not an empty struct/class!");
#endif
- EmitAggregateZero(DestLoc, type);
+ EmitAggregateZero(*DestLoc, type);
}
/// EmitLoadOfLValue - When an l-value expression is used in a context that
/// requires an r-value, this method emits the lvalue computation, then loads
/// the result.
-Value *TreeToLLVM::EmitLoadOfLValue(tree exp, Value *DestLoc) {
+Value *TreeToLLVM::EmitLoadOfLValue(tree exp, const MemRef *DestLoc) {
// If this is an SSA value, don't emit a load, just use the result.
if (isGCC_SSA_Temporary(exp)) {
assert(DECL_LLVM_SET_P(exp) && "Definition not found before use!");
@@ -2560,8 +2573,8 @@
LI->setAlignment(Alignment);
return LI;
} else {
- EmitAggregateCopy(DestLoc, LV.Ptr, TREE_TYPE(exp), false, isVolatile,
- Alignment);
+ EmitAggregateCopy(*DestLoc, MemRef(LV.Ptr, Alignment, isVolatile),
+ TREE_TYPE(exp));
return 0;
}
} else {
@@ -2622,7 +2635,7 @@
ConvertType(TREE_TYPE(exp)));
}
-Value *TreeToLLVM::EmitCALL_EXPR(tree exp, Value *DestLoc) {
+Value *TreeToLLVM::EmitCALL_EXPR(tree exp, const MemRef *DestLoc) {
// Check for a built-in function call. If we can lower it directly, do so
// now.
tree fndecl = get_callee_fndecl(exp);
@@ -2674,12 +2687,12 @@
CallingConv::ID &CallingConvention;
bool isStructRet;
LLVMBuilder &Builder;
- Value *DestLoc;
+ const MemRef *DestLoc;
std::vector<Value*> LocStack;
FunctionCallArgumentConversion(tree exp, SmallVector<Value*, 16> &ops,
CallingConv::ID &cc,
- LLVMBuilder &b, Value *destloc)
+ LLVMBuilder &b, const MemRef *destloc)
: CallExpression(exp), CallOperands(ops), CallingConvention(cc),
Builder(b), DestLoc(destloc) {
CallingConvention = CallingConv::C;
@@ -2740,12 +2753,16 @@
// Otherwise, we need to pass a buffer to return into. If the caller uses
// the result, DestLoc will be set. If it ignores it, it could be unset,
// in which case we need to create a dummy buffer.
- if (DestLoc == 0)
- DestLoc = TheTreeToLLVM->CreateTemporary(PtrArgTy->getElementType());
- else
- assert(PtrArgTy == DestLoc->getType());
- CallOperands.push_back(DestLoc);
- }
+ // FIXME: The alignment and volatility of the buffer are being ignored!
+ Value *DestPtr;
+ if (DestLoc == 0) {
+ DestPtr = TheTreeToLLVM->CreateTemporary(PtrArgTy->getElementType());
+ } else {
+ DestPtr = DestLoc->Ptr;
+ assert(PtrArgTy == DestPtr->getType());
+ }
+ CallOperands.push_back(DestPtr);
+ }
void HandleScalarArgument(const llvm::Type *LLVMTy, tree type) {
assert(!LocStack.empty());
@@ -2778,7 +2795,7 @@
/// EmitCallOf - Emit a call to the specified callee with the operands specified
/// in the CALL_EXP 'exp'. If the result of the call is a scalar, return the
/// result, otherwise store it in DestLoc.
-Value *TreeToLLVM::EmitCallOf(Value *Callee, tree exp, Value *DestLoc) {
+Value *TreeToLLVM::EmitCallOf(Value *Callee, tree exp, const MemRef *DestLoc) {
// Determine if we need to generate an invoke instruction (instead of a simple
// call) and if so, what the exception destination will be.
BasicBlock *UnwindBlock = 0;
@@ -2906,8 +2923,9 @@
if (!DestLoc)
return Call; // Normal scalar return.
- DestLoc = BitCastToType(DestLoc, PointerType::get(Call->getType()));
- Builder.CreateStore(Call, DestLoc);
+ Value *Ptr = BitCastToType(DestLoc->Ptr, PointerType::get(Call->getType()));
+ StoreInst *St = Builder.CreateStore(Call, Ptr, DestLoc->Volatile);
+ St->setAlignment(DestLoc->Alignment);
return 0;
}
@@ -2972,7 +2990,7 @@
/// EmitMODIFY_EXPR - Note that MODIFY_EXPRs are rvalues only!
///
-Value *TreeToLLVM::EmitMODIFY_EXPR(tree exp, Value *DestLoc) {
+Value *TreeToLLVM::EmitMODIFY_EXPR(tree exp, const MemRef *DestLoc) {
// If this is the definition of an SSA variable, set its DECL_LLVM to the
// RHS.
bool Op0Signed = !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp, 0)));
@@ -3025,27 +3043,21 @@
}
// Non-bitfield aggregate value.
+ MemRef NewLoc(LV.Ptr, Alignment, isVolatile);
+
if (DestLoc) {
- Emit(TREE_OPERAND(exp, 1), LV.Ptr);
- EmitAggregateCopy(DestLoc, LV.Ptr, TREE_TYPE(exp), isVolatile, false,
- Alignment);
- } else if (!isVolatile && TREE_CODE(TREE_OPERAND(exp, 0)) != RESULT_DECL) {
- Emit(TREE_OPERAND(exp, 1), LV.Ptr);
+ Emit(TREE_OPERAND(exp, 1), &NewLoc);
+ EmitAggregateCopy(*DestLoc, NewLoc, TREE_TYPE(exp));
+ } else if (TREE_CODE(TREE_OPERAND(exp, 0)) != RESULT_DECL) {
+ Emit(TREE_OPERAND(exp, 1), &NewLoc);
} else {
- // Need to do a volatile store into TREE_OPERAND(exp, 1). To do this, we
- // emit it into a temporary memory location, then do a volatile copy into
- // the real destination. This is probably suboptimal in some cases, but
- // it gets the volatile memory access right. It would be better if the
- // destloc pointer of 'Emit' had a flag that indicated it should be
- // volatile.
// We do this for stores into RESULT_DECL because it is possible for that
// memory area to overlap with the object being stored into it; see
// gcc.c-torture/execute/20010124-1.c.
- Value *Tmp = CreateTemporary(ConvertType(TREE_TYPE(TREE_OPERAND(exp,1))));
- Emit(TREE_OPERAND(exp, 1), Tmp);
- EmitAggregateCopy(LV.Ptr, Tmp, TREE_TYPE(TREE_OPERAND(exp,1)),
- isVolatile, false, Alignment);
+ MemRef Tmp = CreateTempLoc(ConvertType(TREE_TYPE(TREE_OPERAND(exp,1))));
+ Emit(TREE_OPERAND(exp, 1), &Tmp);
+ EmitAggregateCopy(NewLoc, Tmp, TREE_TYPE(TREE_OPERAND(exp,1)));
}
return 0;
}
@@ -3088,7 +3100,7 @@
return RetVal;
}
-Value *TreeToLLVM::EmitNOP_EXPR(tree exp, Value *DestLoc) {
+Value *TreeToLLVM::EmitNOP_EXPR(tree exp, const MemRef *DestLoc) {
if (TREE_CODE(TREE_TYPE(exp)) == VOID_TYPE && // deleted statement.
TREE_CODE(TREE_OPERAND(exp, 0)) == INTEGER_CST)
return 0;
@@ -3105,21 +3117,24 @@
return CastToAnyType(OpVal, OpIsSigned, Ty, ExpIsSigned);
} else if (isAggregateTreeType(TREE_TYPE(Op))) {
// Aggregate to aggregate copy.
- DestLoc = CastToType(Instruction::BitCast, DestLoc, PointerType::get(Ty));
- Value *OpVal = Emit(Op, DestLoc);
+ MemRef NewLoc = *DestLoc;
+ NewLoc.Ptr =
+ CastToType(Instruction::BitCast, DestLoc->Ptr, PointerType::get(Ty));
+ Value *OpVal = Emit(Op, &NewLoc);
assert(OpVal == 0 && "Shouldn't cast scalar to aggregate!");
return 0;
}
// Scalar to aggregate copy.
Value *OpVal = Emit(Op, 0);
- DestLoc = CastToType(Instruction::BitCast, DestLoc,
- PointerType::get(OpVal->getType()));
- Builder.CreateStore(OpVal, DestLoc);
+ Value *Ptr = CastToType(Instruction::BitCast, DestLoc->Ptr,
+ PointerType::get(OpVal->getType()));
+ StoreInst *St = Builder.CreateStore(OpVal, Ptr, DestLoc->Volatile);
+ St->setAlignment(DestLoc->Alignment);
return 0;
}
-Value *TreeToLLVM::EmitCONVERT_EXPR(tree exp, Value *DestLoc) {
+Value *TreeToLLVM::EmitCONVERT_EXPR(tree exp, const MemRef *DestLoc) {
assert(!DestLoc && "Cannot handle aggregate casts!");
Value *Op = Emit(TREE_OPERAND(exp, 0), 0);
bool OpIsSigned = !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp, 0)));
@@ -3127,20 +3142,25 @@
return CastToAnyType(Op, OpIsSigned, ConvertType(TREE_TYPE(exp)),ExpIsSigned);
}
-Value *TreeToLLVM::EmitVIEW_CONVERT_EXPR(tree exp, Value *DestLoc) {
+Value *TreeToLLVM::EmitVIEW_CONVERT_EXPR(tree exp, const MemRef *DestLoc) {
tree Op = TREE_OPERAND(exp, 0);
if (isAggregateTreeType(TREE_TYPE(Op))) {
const Type *OpTy = ConvertType(TREE_TYPE(Op));
- Value *Target = DestLoc ?
+ MemRef Target;
+ if (DestLoc) {
// This is an aggregate-to-agg VIEW_CONVERT_EXPR, just evaluate in place.
- CastToType(Instruction::BitCast, DestLoc, PointerType::get(OpTy)) :
+ Target = *DestLoc;
+ Target.Ptr =
+ CastToType(Instruction::BitCast, DestLoc->Ptr, PointerType::get(OpTy));
+ } else {
// This is an aggregate-to-scalar VIEW_CONVERT_EXPR, evaluate, then load.
- CreateTemporary(OpTy);
+ Target = CreateTempLoc(OpTy);
+ }
switch (TREE_CODE(Op)) {
default: {
- Value *OpVal = Emit(Op, Target);
+ Value *OpVal = Emit(Op, &Target);
assert(OpVal == 0 && "Expected an aggregate operand!");
break;
}
@@ -3164,16 +3184,17 @@
bool isVolatile = TREE_THIS_VOLATILE(Op);
unsigned Alignment = expr_align(Op) / 8;
- EmitAggregateCopy(Target, LV.Ptr, TREE_TYPE(exp), false, isVolatile,
- Alignment);
+ EmitAggregateCopy(Target, MemRef(LV.Ptr, Alignment, isVolatile),
+ TREE_TYPE(exp));
break;
}
if (DestLoc)
return 0;
+ // Target holds the temporary created above.
const Type *ExpTy = ConvertType(TREE_TYPE(exp));
- return Builder.CreateLoad(CastToType(Instruction::BitCast, Target,
+ return Builder.CreateLoad(CastToType(Instruction::BitCast, Target.Ptr,
PointerType::get(ExpTy)), "tmp");
}
@@ -3182,9 +3203,10 @@
// then store into DestLoc.
Value *OpVal = Emit(Op, 0);
assert(OpVal && "Expected a scalar result!");
- DestLoc = CastToType(Instruction::BitCast, DestLoc,
- PointerType::get(OpVal->getType()));
- Builder.CreateStore(OpVal, DestLoc);
+ Value *Ptr = CastToType(Instruction::BitCast, DestLoc->Ptr,
+ PointerType::get(OpVal->getType()));
+ StoreInst *St = Builder.CreateStore(OpVal, Ptr, DestLoc->Volatile);
+ St->setAlignment(DestLoc->Alignment);
return 0;
}
@@ -3210,7 +3232,7 @@
return Builder.CreateBitCast(OpVal, DestTy, "tmp");
}
-Value *TreeToLLVM::EmitNEGATE_EXPR(tree exp, Value *DestLoc) {
+Value *TreeToLLVM::EmitNEGATE_EXPR(tree exp, const MemRef *DestLoc) {
if (!DestLoc) {
Value *V = Emit(TREE_OPERAND(exp, 0), 0);
if (!isa<PointerType>(V->getType()))
@@ -3224,31 +3246,33 @@
}
// Emit the operand to a temporary.
- const Type *ComplexTy=cast<PointerType>(DestLoc->getType())->getElementType();
- Value *Tmp = CreateTemporary(ComplexTy);
- Emit(TREE_OPERAND(exp, 0), Tmp);
+ const Type *ComplexTy =
+ cast<PointerType>(DestLoc->Ptr->getType())->getElementType();
+ MemRef Tmp = CreateTempLoc(ComplexTy);
+ Emit(TREE_OPERAND(exp, 0), &Tmp);
// Handle complex numbers: -(a+ib) = -a + i*-b
Value *R, *I;
- EmitLoadFromComplex(R, I, Tmp, TREE_THIS_VOLATILE(TREE_OPERAND(exp, 0)));
+ EmitLoadFromComplex(R, I, Tmp);
R = Builder.CreateNeg(R, "tmp");
I = Builder.CreateNeg(I, "tmp");
- EmitStoreToComplex(DestLoc, R, I, false);
+ EmitStoreToComplex(*DestLoc, R, I);
return 0;
}
-Value *TreeToLLVM::EmitCONJ_EXPR(tree exp, Value *DestLoc) {
+Value *TreeToLLVM::EmitCONJ_EXPR(tree exp, const MemRef *DestLoc) {
assert(DestLoc && "CONJ_EXPR only applies to complex numbers.");
// Emit the operand to a temporary.
- const Type *ComplexTy=cast<PointerType>(DestLoc->getType())->getElementType();
- Value *Tmp = CreateTemporary(ComplexTy);
- Emit(TREE_OPERAND(exp, 0), Tmp);
+ const Type *ComplexTy =
+ cast<PointerType>(DestLoc->Ptr->getType())->getElementType();
+ MemRef Tmp = CreateTempLoc(ComplexTy);
+ Emit(TREE_OPERAND(exp, 0), &Tmp);
// Handle complex numbers: ~(a+ib) = a + i*-b
Value *R, *I;
- EmitLoadFromComplex(R, I, Tmp, TREE_THIS_VOLATILE(TREE_OPERAND(exp, 0)));
+ EmitLoadFromComplex(R, I, Tmp);
I = Builder.CreateNeg(I, "tmp");
- EmitStoreToComplex(DestLoc, R, I, false);
+ EmitStoreToComplex(*DestLoc, R, I);
return 0;
}
@@ -3329,7 +3353,7 @@
/// EmitBinOp - 'exp' is a binary operator.
///
-Value *TreeToLLVM::EmitBinOp(tree exp, Value *DestLoc, unsigned Opc) {
+Value *TreeToLLVM::EmitBinOp(tree exp, const MemRef *DestLoc, unsigned Opc) {
const Type *Ty = ConvertType(TREE_TYPE(exp));
if (isa<PointerType>(Ty))
return EmitPtrBinOp(exp, Opc); // Pointer arithmetic!
@@ -3364,7 +3388,7 @@
TREE_CODE(TREE_OPERAND(exp, 1)) == INTEGER_CST) {
int64_t Offset = getINTEGER_CSTVal(TREE_OPERAND(exp, 1));
- // If POINTER_SIZE is 32-bits and the offset is signed, sign extend the offset.
+ // If POINTER_SIZE is 32-bits and the offset is signed, sign extend it.
if (POINTER_SIZE == 32 && !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp, 1))))
Offset = (Offset << 32) >> 32;
@@ -3420,7 +3444,7 @@
}
-Value *TreeToLLVM::EmitShiftOp(tree exp, Value *DestLoc, unsigned Opc) {
+Value *TreeToLLVM::EmitShiftOp(tree exp, const MemRef *DestLoc, unsigned Opc) {
assert(DestLoc == 0 && "aggregate shift?");
const Type *Ty = ConvertType(TREE_TYPE(exp));
assert(!isa<PointerType>(Ty) && "Pointer arithmetic!?");
@@ -3486,7 +3510,7 @@
TREE_CODE(exp) == MAX_EXPR ? "max" : "min");
}
-Value *TreeToLLVM::EmitEXACT_DIV_EXPR(tree exp, Value *DestLoc) {
+Value *TreeToLLVM::EmitEXACT_DIV_EXPR(tree exp, const MemRef *DestLoc) {
// Unsigned EXACT_DIV_EXPR -> normal udiv.
if (TYPE_UNSIGNED(TREE_TYPE(exp)))
return EmitBinOp(exp, DestLoc, Instruction::UDiv);
@@ -3510,7 +3534,7 @@
return EmitBinOp(exp, DestLoc, Instruction::SDiv);
}
-Value *TreeToLLVM::EmitFLOOR_MOD_EXPR(tree exp, Value *DestLoc) {
+Value *TreeToLLVM::EmitFLOOR_MOD_EXPR(tree exp, const MemRef *DestLoc) {
// Notation: FLOOR_MOD_EXPR <-> Mod, TRUNC_MOD_EXPR <-> Rem.
// We express Mod in terms of Rem as follows: if RHS exactly divides LHS,
@@ -3700,7 +3724,7 @@
/// Reads from register variables are handled by emitting an inline asm node
/// that copies the value out of the specified register.
-Value *TreeToLLVM::EmitReadOfRegisterVariable(tree decl, Value *DestLoc) {
+Value *TreeToLLVM::EmitReadOfRegisterVariable(tree decl, const MemRef *DestLoc){
const Type *Ty = ConvertType(TREE_TYPE(decl));
// If there was an error, return something bogus.
@@ -4253,7 +4277,8 @@
/// This method returns true if the builtin is handled, otherwise false.
///
bool TreeToLLVM::EmitFrontendExpandedBuiltinCall(tree exp, tree fndecl,
- Value *DestLoc,Value *&Result){
+ const MemRef *DestLoc,
+ Value *&Result){
#ifdef LLVM_TARGET_INTRINSIC_LOWER
// Get the result type and oeprand line in an easy to consume format.
const Type *ResultType = ConvertType(TREE_TYPE(TREE_TYPE(fndecl)));
@@ -4280,7 +4305,7 @@
/// the call in a special way, setting Result to the scalar result if necessary.
/// If we can't handle the builtin, return false, otherwise return true.
bool TreeToLLVM::EmitBuiltinCall(tree exp, tree fndecl,
- Value *DestLoc, Value *&Result) {
+ const MemRef *DestLoc, Value *&Result) {
if (DECL_BUILT_IN_CLASS(fndecl) == BUILT_IN_MD) {
unsigned FnCode = DECL_FUNCTION_CODE(fndecl);
if (TargetBuiltinCache.size() <= FnCode)
@@ -4943,7 +4968,8 @@
return true;
}
-bool TreeToLLVM::EmitBuiltinExpect(tree exp, Value *DestLoc, Value *&Result) {
+bool TreeToLLVM::EmitBuiltinExpect(tree exp, const MemRef *DestLoc,
+ Value *&Result) {
// Ignore the hint for now, just expand the expr. This is safe, but not
// optimal.
tree arglist = TREE_OPERAND(exp, 1);
@@ -5009,16 +5035,18 @@
// If the target has aggregate valists, emit the srcval directly into a
// temporary.
const Type *VAListTy = cast<PointerType>(Arg1->getType())->getElementType();
- Arg2 = CreateTemporary(VAListTy);
- Emit(Arg2T, Arg2);
+ MemRef DestLoc = CreateTempLoc(VAListTy);
+ Emit(Arg2T, &DestLoc);
+ Arg2 = DestLoc.Ptr;
}
static const Type *VPTy = PointerType::get(Type::Int8Ty);
+ // FIXME: This ignores alignment and volatility of the arguments.
SmallVector<Value *, 2> Args;
Args.push_back(CastToType(Instruction::BitCast, Arg1, VPTy));
Args.push_back(CastToType(Instruction::BitCast, Arg2, VPTy));
-
+
Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::vacopy),
Args.begin(), Args.end());
return true;
@@ -5054,62 +5082,69 @@
//===----------------------------------------------------------------------===//
void TreeToLLVM::EmitLoadFromComplex(Value *&Real, Value *&Imag,
- Value *SrcComplex, bool isVolatile) {
+ MemRef SrcComplex) {
Value *I0 = ConstantInt::get(Type::Int32Ty, 0);
Value *I1 = ConstantInt::get(Type::Int32Ty, 1);
Value *Idxs[2] = { I0, I0 };
- Value *RealPtr = Builder.CreateGEP(SrcComplex, Idxs, Idxs + 2, "real");
- Real = Builder.CreateLoad(RealPtr, isVolatile, "real");
+ Value *RealPtr = Builder.CreateGEP(SrcComplex.Ptr, Idxs, Idxs + 2, "real");
+ Real = Builder.CreateLoad(RealPtr, SrcComplex.Volatile, "real");
+ cast<LoadInst>(Real)->setAlignment(SrcComplex.Alignment);
Idxs[1] = I1;
- Value *ImagPtr = Builder.CreateGEP(SrcComplex, Idxs, Idxs + 2, "real");
- Imag = Builder.CreateLoad(ImagPtr, isVolatile, "imag");
+ Value *ImagPtr = Builder.CreateGEP(SrcComplex.Ptr, Idxs, Idxs + 2, "real");
+ Imag = Builder.CreateLoad(ImagPtr, SrcComplex.Volatile, "imag");
+ cast<LoadInst>(Imag)->setAlignment(
+ MinAlign(SrcComplex.Alignment, TD.getABITypeSize(Real->getType()))
+ );
}
-void TreeToLLVM::EmitStoreToComplex(Value *DestComplex, Value *Real,
- Value *Imag, bool isVolatile) {
+void TreeToLLVM::EmitStoreToComplex(MemRef DestComplex, Value *Real,
+ Value *Imag) {
Value *I0 = ConstantInt::get(Type::Int32Ty, 0);
Value *I1 = ConstantInt::get(Type::Int32Ty, 1);
Value *Idxs[2] = { I0, I0 };
+ StoreInst *St;
- Value *RealPtr = Builder.CreateGEP(DestComplex, Idxs, Idxs + 2, "real");
- Builder.CreateStore(Real, RealPtr, isVolatile);
+ Value *RealPtr = Builder.CreateGEP(DestComplex.Ptr, Idxs, Idxs + 2, "real");
+ St = Builder.CreateStore(Real, RealPtr, DestComplex.Volatile);
+ St->setAlignment(DestComplex.Alignment);
Idxs[1] = I1;
- Value *ImagPtr = Builder.CreateGEP(DestComplex, Idxs, Idxs + 2, "real");
- Builder.CreateStore(Imag, ImagPtr, isVolatile);
+ Value *ImagPtr = Builder.CreateGEP(DestComplex.Ptr, Idxs, Idxs + 2, "real");
+ St = Builder.CreateStore(Imag, ImagPtr, DestComplex.Volatile);
+ St->setAlignment(
+ MinAlign(DestComplex.Alignment, TD.getABITypeSize(Real->getType()))
+ );
}
-void TreeToLLVM::EmitCOMPLEX_EXPR(tree exp, Value *DestLoc) {
+void TreeToLLVM::EmitCOMPLEX_EXPR(tree exp, const MemRef *DestLoc) {
Value *Real = Emit(TREE_OPERAND(exp, 0), 0);
Value *Imag = Emit(TREE_OPERAND(exp, 1), 0);
- EmitStoreToComplex(DestLoc, Real, Imag, false);
+ EmitStoreToComplex(*DestLoc, Real, Imag);
}
-void TreeToLLVM::EmitCOMPLEX_CST(tree exp, Value *DestLoc) {
+void TreeToLLVM::EmitCOMPLEX_CST(tree exp, const MemRef *DestLoc) {
Value *Real = Emit(TREE_REALPART(exp), 0);
Value *Imag = Emit(TREE_IMAGPART(exp), 0);
- EmitStoreToComplex(DestLoc, Real, Imag, false);
+ EmitStoreToComplex(*DestLoc, Real, Imag);
}
// EmitComplexBinOp - Note that this operands on binops like ==/!=, which return
// a bool, not a complex value.
-Value *TreeToLLVM::EmitComplexBinOp(tree exp, Value *DestLoc) {
+Value *TreeToLLVM::EmitComplexBinOp(tree exp, const MemRef *DestLoc) {
const Type *ComplexTy = ConvertType(TREE_TYPE(TREE_OPERAND(exp, 0)));
-
- Value *LHSTmp = CreateTemporary(ComplexTy);
- Value *RHSTmp = CreateTemporary(ComplexTy);
- Emit(TREE_OPERAND(exp, 0), LHSTmp);
- Emit(TREE_OPERAND(exp, 1), RHSTmp);
-
+
+ MemRef LHSTmp = CreateTempLoc(ComplexTy);
+ MemRef RHSTmp = CreateTempLoc(ComplexTy);
+ Emit(TREE_OPERAND(exp, 0), &LHSTmp);
+ Emit(TREE_OPERAND(exp, 1), &RHSTmp);
+
Value *LHSr, *LHSi;
- EmitLoadFromComplex(LHSr, LHSi, LHSTmp,
- TREE_THIS_VOLATILE(TREE_OPERAND(exp, 0)));
+ EmitLoadFromComplex(LHSr, LHSi, LHSTmp);
Value *RHSr, *RHSi;
- EmitLoadFromComplex(RHSr, RHSi, RHSTmp,
- TREE_THIS_VOLATILE(TREE_OPERAND(exp, 1)));
+ EmitLoadFromComplex(RHSr, RHSi, RHSTmp);
Value *DSTr, *DSTi;
switch (TREE_CODE(exp)) {
@@ -5169,7 +5204,7 @@
return Builder.CreateOr(DSTr, DSTi, "tmp");
}
- EmitStoreToComplex(DestLoc, DSTr, DSTi, false);
+ EmitStoreToComplex(*DestLoc, DSTr, DSTi);
return 0;
}
@@ -5563,7 +5598,7 @@
/// EmitCONSTRUCTOR - emit the constructor into the location specified by
/// DestLoc.
-Value *TreeToLLVM::EmitCONSTRUCTOR(tree exp, Value *DestLoc) {
+Value *TreeToLLVM::EmitCONSTRUCTOR(tree exp, const MemRef *DestLoc) {
tree type = TREE_TYPE(exp);
const Type *Ty = ConvertType(type);
if (const VectorType *PTy = dyn_cast<VectorType>(Ty)) {
@@ -5592,7 +5627,7 @@
assert(!Ty->isFirstClassType() && "Constructor for scalar type??");
// Start out with the value zero'd out.
- EmitAggregateZero(DestLoc, type);
+ EmitAggregateZero(*DestLoc, type);
tree elt = CONSTRUCTOR_ELTS(exp);
switch (TREE_CODE(TREE_TYPE(exp))) {
@@ -5617,9 +5652,10 @@
} else {
// Scalar value. Evaluate to a register, then do the store.
Value *V = Emit(TREE_VALUE(elt), 0);
- DestLoc = CastToType(Instruction::BitCast, DestLoc,
- PointerType::get(V->getType()));
- Builder.CreateStore(V, DestLoc);
+ Value *Ptr = CastToType(Instruction::BitCast, DestLoc->Ptr,
+ PointerType::get(V->getType()));
+ StoreInst *St = Builder.CreateStore(V, Ptr, DestLoc->Volatile);
+ St->setAlignment(DestLoc->Alignment);
}
break;
}
Modified: llvm-gcc-4.0/trunk/gcc/llvm-internal.h
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.0/trunk/gcc/llvm-internal.h?rev=44114&r1=44113&r2=44114&view=diff
==============================================================================
--- llvm-gcc-4.0/trunk/gcc/llvm-internal.h (original)
+++ llvm-gcc-4.0/trunk/gcc/llvm-internal.h Wed Nov 14 07:26:03 2007
@@ -202,6 +202,21 @@
/// false.
bool ValidateRegisterVariable(tree_node *decl);
+/// MemRef - This struct holds the information needed for a memory access: a
+/// pointer to the memory, its alignment and whether the access is volatile.
+struct MemRef {
+ Value *Ptr;
+ unsigned Alignment;
+ bool Volatile;
+
+ MemRef() : Ptr(0), Alignment(0), Volatile(false) {}
+ MemRef(Value *P, unsigned A, bool V)
+ : Ptr(P), Alignment(A), Volatile(V) {
+ // Allowing alignment 0 would complicate calculations, so forbid it.
+ assert(A && !(A & (A - 1)) && "Alignment not a power of 2!");
+ }
+};
+
/// LValue - This struct represents an lvalue in the program. In particular,
/// the Ptr member indicates the memory that the lvalue lives in. If this is
/// a bitfield reference, BitStart indicates the first bit in the memory that
@@ -375,7 +390,7 @@
/// expression that fits into an LLVM scalar value, the result is returned. If
/// the result is an aggregate, it is stored into the location specified by
/// DestLoc.
- Value *Emit(tree_node *exp, Value *DestLoc);
+ Value *Emit(tree_node *exp, const MemRef *DestLoc);
/// EmitLV - Convert the specified l-value tree node to LLVM code, returning
/// the address of the result.
@@ -437,14 +452,13 @@
/// manage fixups for EH info.
void EmitBlock(BasicBlock *BB);
- /// EmitAggregateCopy - Copy the elements from SrcPtr to DestPtr, using the
+ /// EmitAggregateCopy - Copy the elements from SrcPtr to DestLoc, using the
/// GCC type specified by GCCType to know which elements to copy.
- void EmitAggregateCopy(Value *DestPtr, Value *SrcPtr, tree_node *GCCType,
- bool isDstVolatile, bool isSrcVolatile,
- unsigned Alignment);
- /// EmitAggregateZero - Zero the elements of DestPtr.
+ void EmitAggregateCopy(MemRef DestLoc, MemRef SrcLoc, tree_node *GCCType);
+
+ /// EmitAggregateZero - Zero the elements of DestLoc.
///
- void EmitAggregateZero(Value *DestPtr, tree_node *GCCType);
+ void EmitAggregateZero(MemRef DestLoc, tree_node *GCCType);
/// EmitMemCpy/EmitMemMove/EmitMemSet - Emit an llvm.memcpy/llvm.memmove or
/// llvm.memset call with the specified operands.
@@ -462,6 +476,9 @@
/// that works.
void AddBranchFixup(BranchInst *BI, bool isExceptionEdge);
private:
+ /// CreateTempLoc - Like CreateTemporary, but returns a MemRef.
+ MemRef CreateTempLoc(const Type *Ty);
+
void EmitAutomaticVariableDecl(tree_node *decl);
/// isNoopCast - Return true if a cast from V to Ty does not change any bits.
@@ -492,8 +509,8 @@
// characteristics.
// Basic lists and binding scopes.
- Value *EmitBIND_EXPR(tree_node *exp, Value *DestLoc);
- Value *EmitSTATEMENT_LIST(tree_node *exp, Value *DestLoc);
+ Value *EmitBIND_EXPR(tree_node *exp, const MemRef *DestLoc);
+ Value *EmitSTATEMENT_LIST(tree_node *exp, const MemRef *DestLoc);
// Helpers for exception handling.
void EmitProtectedCleanups(tree_node *cleanups);
@@ -502,7 +519,7 @@
// Control flow.
Value *EmitLABEL_EXPR(tree_node *exp);
Value *EmitGOTO_EXPR(tree_node *exp);
- Value *EmitRETURN_EXPR(tree_node *exp, Value *DestLoc);
+ Value *EmitRETURN_EXPR(tree_node *exp, const MemRef *DestLoc);
Value *EmitCOND_EXPR(tree_node *exp);
Value *EmitSWITCH_EXPR(tree_node *exp);
Value *EmitTRY_EXPR(tree_node *exp);
@@ -511,39 +528,39 @@
Value *EmitEH_FILTER_EXPR(tree_node *exp);
// Expressions.
- void EmitINTEGER_CST_Aggregate(tree_node *exp, Value *DestLoc);
- Value *EmitLoadOfLValue(tree_node *exp, Value *DestLoc);
- Value *EmitOBJ_TYPE_REF(tree_node *exp, Value *DestLoc);
+ void EmitINTEGER_CST_Aggregate(tree_node *exp, const MemRef *DestLoc);
+ Value *EmitLoadOfLValue(tree_node *exp, const MemRef *DestLoc);
+ Value *EmitOBJ_TYPE_REF(tree_node *exp, const MemRef *DestLoc);
Value *EmitADDR_EXPR(tree_node *exp);
Value *EmitOBJ_TYPE_REF(tree_node *exp);
- Value *EmitCALL_EXPR(tree_node *exp, Value *DestLoc);
- Value *EmitCallOf(Value *Callee, tree_node *exp, Value *DestLoc);
- Value *EmitMODIFY_EXPR(tree_node *exp, Value *DestLoc);
- Value *EmitNOP_EXPR(tree_node *exp, Value *DestLoc);
- Value *EmitCONVERT_EXPR(tree_node *exp, Value *DestLoc);
- Value *EmitVIEW_CONVERT_EXPR(tree_node *exp, Value *DestLoc);
- Value *EmitNEGATE_EXPR(tree_node *exp, Value *DestLoc);
- Value *EmitCONJ_EXPR(tree_node *exp, Value *DestLoc);
+ Value *EmitCALL_EXPR(tree_node *exp, const MemRef *DestLoc);
+ Value *EmitCallOf(Value *Callee, tree_node *exp, const MemRef *DestLoc);
+ Value *EmitMODIFY_EXPR(tree_node *exp, const MemRef *DestLoc);
+ Value *EmitNOP_EXPR(tree_node *exp, const MemRef *DestLoc);
+ Value *EmitCONVERT_EXPR(tree_node *exp, const MemRef *DestLoc);
+ Value *EmitVIEW_CONVERT_EXPR(tree_node *exp, const MemRef *DestLoc);
+ Value *EmitNEGATE_EXPR(tree_node *exp, const MemRef *DestLoc);
+ Value *EmitCONJ_EXPR(tree_node *exp, const MemRef *DestLoc);
Value *EmitABS_EXPR(tree_node *exp);
Value *EmitBIT_NOT_EXPR(tree_node *exp);
Value *EmitTRUTH_NOT_EXPR(tree_node *exp);
- Value *EmitEXACT_DIV_EXPR(tree_node *exp, Value *DestLoc);
+ Value *EmitEXACT_DIV_EXPR(tree_node *exp, const MemRef *DestLoc);
Value *EmitCompare(tree_node *exp, unsigned UIPred, unsigned SIPred,
unsigned FPOpc);
- Value *EmitBinOp(tree_node *exp, Value *DestLoc, unsigned Opc);
+ Value *EmitBinOp(tree_node *exp, const MemRef *DestLoc, unsigned Opc);
Value *EmitPtrBinOp(tree_node *exp, unsigned Opc);
Value *EmitTruthOp(tree_node *exp, unsigned Opc);
- Value *EmitShiftOp(tree_node *exp, Value *DestLoc, unsigned Opc);
+ Value *EmitShiftOp(tree_node *exp, const MemRef *DestLoc, unsigned Opc);
Value *EmitRotateOp(tree_node *exp, unsigned Opc1, unsigned Opc2);
Value *EmitMinMaxExpr(tree_node *exp, unsigned UIPred, unsigned SIPred,
unsigned Opc);
- Value *EmitFLOOR_MOD_EXPR(tree_node *exp, Value *DestLoc);
+ Value *EmitFLOOR_MOD_EXPR(tree_node *exp, const MemRef *DestLoc);
Value *EmitCEIL_DIV_EXPR(tree_node *exp);
Value *EmitROUND_DIV_EXPR(tree_node *exp);
// Inline Assembly and Register Variables.
Value *EmitASM_EXPR(tree_node *exp);
- Value *EmitReadOfRegisterVariable(tree_node *vardecl, Value *DestLoc);
+ Value *EmitReadOfRegisterVariable(tree_node *vardecl, const MemRef *DestLoc);
void EmitModifyOfRegisterVariable(tree_node *vardecl, Value *RHS);
// Helpers for Builtin Function Expansion.
@@ -553,9 +570,9 @@
// Builtin Function Expansion.
bool EmitBuiltinCall(tree_node *exp, tree_node *fndecl,
- Value *DestLoc, Value *&Result);
+ const MemRef *DestLoc, Value *&Result);
bool EmitFrontendExpandedBuiltinCall(tree_node *exp, tree_node *fndecl,
- Value *DestLoc, Value *&Result);
+ const MemRef *DestLoc, Value *&Result);
bool EmitBuiltinUnaryIntOp(Value *InVal, Value *&Result, Intrinsic::ID Id);
Value *EmitBuiltinUnaryFPOp(Value *InVal, const char *F32Name,
const char *F64Name, const char *LongDoubleName);
@@ -564,7 +581,7 @@
bool EmitBuiltinConstantP(tree_node *exp, Value *&Result);
bool EmitBuiltinAlloca(tree_node *exp, Value *&Result);
- bool EmitBuiltinExpect(tree_node *exp, Value *DestLoc, Value *&Result);
+ bool EmitBuiltinExpect(tree_node *exp, const MemRef *DestLoc, Value *&Result);
bool EmitBuiltinExtendPointer(tree_node *exp, Value *&Result);
bool EmitBuiltinVAStart(tree_node *exp);
bool EmitBuiltinVAEnd(tree_node *exp);
@@ -587,13 +604,11 @@
bool EmitBuiltinInitTrampoline(tree_node *exp, Value *&Result);
// Complex Math Expressions.
- void EmitLoadFromComplex(Value *&Real, Value *&Imag, Value *SrcComplex,
- bool isVolatile);
- void EmitStoreToComplex(Value *DestComplex, Value *Real, Value *Imag,
- bool isVolatile);
- void EmitCOMPLEX_CST(tree_node *exp, Value *DestLoc);
- void EmitCOMPLEX_EXPR(tree_node *exp, Value *DestLoc);
- Value *EmitComplexBinOp(tree_node *exp, Value *DestLoc);
+ void EmitLoadFromComplex(Value *&Real, Value *&Imag, MemRef SrcComplex);
+ void EmitStoreToComplex(MemRef DestComplex, Value *Real, Value *Imag);
+ void EmitCOMPLEX_CST(tree_node *exp, const MemRef *DestLoc);
+ void EmitCOMPLEX_EXPR(tree_node *exp, const MemRef *DestLoc);
+ Value *EmitComplexBinOp(tree_node *exp, const MemRef *DestLoc);
// L-Value Expressions.
LValue EmitLV_DECL(tree_node *exp);
@@ -606,12 +621,12 @@
// Constant Expressions.
Value *EmitINTEGER_CST(tree_node *exp);
Value *EmitREAL_CST(tree_node *exp);
- Value *EmitCONSTRUCTOR(tree_node *exp, Value *DestLoc);
+ Value *EmitCONSTRUCTOR(tree_node *exp, const MemRef *DestLoc);
// Optional target defined builtin intrinsic expanding function.
bool TargetIntrinsicLower(tree_node *exp,
unsigned FnCode,
- Value *DestLoc,
+ const MemRef *DestLoc,
Value *&Result,
const Type *ResultType,
std::vector<Value*> &Ops);
More information about the llvm-commits
mailing list