[llvm-commits] [gcc-plugin] r82144 - in /gcc-plugin/trunk: llvm-convert.cpp llvm-internal.h
Duncan Sands
baldrick at free.fr
Thu Sep 17 07:01:40 PDT 2009
Author: baldrick
Date: Thu Sep 17 09:01:39 2009
New Revision: 82144
URL: http://llvm.org/viewvc/llvm-project?rev=82144&view=rev
Log:
Handle GIMPLE_ASM directly, rather than going via trees. Unfortunately we
still need to muck around with lists (and create a tree for the asm string)
because we use some GCC helpers that require this.
Modified:
gcc-plugin/trunk/llvm-convert.cpp
gcc-plugin/trunk/llvm-internal.h
Modified: gcc-plugin/trunk/llvm-convert.cpp
URL: http://llvm.org/viewvc/llvm-project/gcc-plugin/trunk/llvm-convert.cpp?rev=82144&r1=82143&r2=82144&view=diff
==============================================================================
--- gcc-plugin/trunk/llvm-convert.cpp (original)
+++ gcc-plugin/trunk/llvm-convert.cpp Thu Sep 17 09:01:39 2009
@@ -949,7 +949,6 @@
switch (gimple_code(gimple_stmt)) {
case GIMPLE_ASSIGN:
- case GIMPLE_ASM:
case GIMPLE_CALL: {
// TODO Handle gimple directly, rather than converting to a tree.
tree stmt = gimple_to_tree(gimple_stmt);
@@ -968,6 +967,10 @@
break;
}
+ case GIMPLE_ASM:
+ RenderGIMPLE_ASM(gimple_stmt);
+ break;
+
case GIMPLE_COND:
RenderGIMPLE_COND(gimple_stmt);
break;
@@ -1080,7 +1083,6 @@
case CALL_EXPR: Result = EmitCALL_EXPR(exp, DestLoc); break;
case INIT_EXPR:
case MODIFY_EXPR: Result = EmitMODIFY_EXPR(exp, DestLoc); break;
- case ASM_EXPR: Result = EmitASM_EXPR(exp); break;
case NON_LVALUE_EXPR: Result = Emit(TREE_OPERAND(exp, 0), DestLoc); break;
// Unary Operators
@@ -3857,15 +3859,15 @@
/// punctuation.
/// Other %xN expressions are turned into LLVM ${N:x} operands.
///
-static std::string ConvertInlineAsmStr(tree exp, unsigned NumOperands) {
-
- tree str = ASM_STRING(exp);
- if (TREE_CODE(str) == ADDR_EXPR) str = TREE_OPERAND(str, 0);
-
- // ASM_INPUT_P - This flag is set if this is a non-extended ASM, which means
- // that the asm string should not be interpreted, other than to escape $'s.
- if (ASM_INPUT_P(exp)) {
- const char *InStr = TREE_STRING_POINTER(str);
+static std::string ConvertInlineAsmStr(gimple stmt, tree outputs, tree inputs,
+ unsigned NumOperands) {
+ const char *AsmStr = gimple_asm_string(stmt);
+
+ // gimple_asm_input_p - This flag is set if this is a non-extended ASM,
+ // which means that the asm string should not be interpreted, other than
+ // to escape $'s.
+ if (gimple_asm_input_p(stmt)) {
+ const char *InStr = AsmStr;
std::string Result;
while (1) {
switch (*InStr++) {
@@ -3877,7 +3879,8 @@
}
// Expand [name] symbolic operand names.
- str = resolve_asm_operand_names(str, ASM_OUTPUTS(exp), ASM_INPUTS(exp));
+ tree str = resolve_asm_operand_names(build_string (strlen (AsmStr), AsmStr),
+ outputs, inputs);
const char *InStr = TREE_STRING_POINTER(str);
@@ -3911,10 +3914,11 @@
unsigned long OpNum = strtoul(InStr, &EndPtr, 10);
if (InStr == EndPtr) {
- error_at(EXPR_LOCATION(exp),"operand number missing after %%-letter");
+ error_at(gimple_location(stmt),
+ "operand number missing after %%-letter");
return Result;
} else if (OpNum >= NumOperands) {
- error_at(EXPR_LOCATION(exp), "operand number out of range");
+ error_at(gimple_location(stmt), "operand number out of range");
return Result;
}
Result += "${" + utostr(OpNum) + ":" + EscapedChar + "}";
@@ -4098,9 +4102,9 @@
/// is performed after things like SROA, not before. At the moment we are
/// just trying to pick one that will work. This may get refined.
static void
-ChooseConstraintTuple (const char **Constraints, tree exp, unsigned NumInputs,
- unsigned NumOutputs, unsigned NumChoices,
- const char **ReplacementStrings)
+ChooseConstraintTuple(const char **Constraints, gimple stmt, tree outputs,
+ tree inputs, unsigned NumOutputs, unsigned NumInputs,
+ unsigned NumChoices, const char **ReplacementStrings)
{
int MaxWeight = -1;
unsigned int CommasToSkip = 0;
@@ -4116,7 +4120,7 @@
for (unsigned int i=0; i<NumChoices; i++) {
Weights[i] = 0;
unsigned int j = 0;
- for (tree Output = ASM_OUTPUTS(exp); j<NumOutputs;
+ for (tree Output = outputs; j<NumOutputs;
j++, Output = TREE_CHAIN(Output)) {
if (i==0)
RunningConstraints[j]++; // skip leading =
@@ -4143,7 +4147,7 @@
RunningConstraints[j] = p;
}
assert(j==NumOutputs);
- for (tree Input = ASM_INPUTS(exp); j<NumInputs+NumOutputs;
+ for (tree Input = inputs; j<NumInputs+NumOutputs;
j++, Input = TREE_CHAIN(Input)) {
const char* p = RunningConstraints[j];
if (Weights[i] != -1) {
@@ -4221,517 +4225,124 @@
#endif
}
-Value *TreeToLLVM::EmitASM_EXPR(tree exp) {
- unsigned NumInputs = list_length(ASM_INPUTS(exp));
- unsigned NumOutputs = list_length(ASM_OUTPUTS(exp));
- unsigned NumInOut = 0;
-
- // Look for multiple alternative constraints: multiple alternatives separated
- // by commas.
- unsigned NumChoices = 0; // sentinal; real value is always at least 1.
- const char* p;
- for (tree t = ASM_INPUTS(exp); t; t = TREE_CHAIN(t)) {
- unsigned NumInputChoices = 1;
- for (p = TREE_STRING_POINTER(TREE_VALUE(TREE_PURPOSE(t))); *p; p++) {
- if (*p == ',')
- NumInputChoices++;
- }
- if (NumChoices==0)
- NumChoices = NumInputChoices;
- else if (NumChoices != NumInputChoices)
- abort(); // invalid constraints
- }
- for (tree t = ASM_OUTPUTS(exp); t; t = TREE_CHAIN(t)) {
- unsigned NumOutputChoices = 1;
- for (p = TREE_STRING_POINTER(TREE_VALUE(TREE_PURPOSE(t))); *p; p++) {
- if (*p == ',')
- NumOutputChoices++;
- }
- if (NumChoices==0)
- NumChoices = NumOutputChoices;
- else if (NumChoices != NumOutputChoices)
- abort(); // invalid constraints
- }
-
- /// Constraints - The output/input constraints, concatenated together in array
- /// form instead of list form.
- const char **Constraints =
- (const char **)alloca((NumOutputs + NumInputs) * sizeof(const char *));
- // Process outputs.
- int ValNum = 0;
- for (tree Output = ASM_OUTPUTS(exp); Output;
- Output = TREE_CHAIN(Output), ++ValNum) {
- tree Operand = TREE_VALUE(Output);
- tree type = TREE_TYPE(Operand);
- // If there's an erroneous arg, emit no insn.
- if (type == error_mark_node) return 0;
+//===----------------------------------------------------------------------===//
+// ... Helpers for Builtin Function Expansion ...
+//===----------------------------------------------------------------------===//
- // Parse the output constraint.
- const char *Constraint =
- TREE_STRING_POINTER(TREE_VALUE(TREE_PURPOSE(Output)));
- Constraints[ValNum] = Constraint;
- }
- // Process inputs.
- for (tree Input = ASM_INPUTS(exp); Input; Input = TREE_CHAIN(Input),++ValNum){
- tree Val = TREE_VALUE(Input);
- tree type = TREE_TYPE(Val);
- // If there's an erroneous arg, emit no insn.
- if (type == error_mark_node) return 0;
+Value *TreeToLLVM::BuildVector(const std::vector<Value*> &Ops) {
+ assert((Ops.size() & (Ops.size()-1)) == 0 &&
+ "Not a power-of-two sized vector!");
+ bool AllConstants = true;
+ for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i)
+ AllConstants &= isa<Constant>(Ops[i]);
- const char *Constraint =
- TREE_STRING_POINTER(TREE_VALUE(TREE_PURPOSE(Input)));
- Constraints[ValNum] = Constraint;
+ // If this is a constant vector, create a ConstantVector.
+ if (AllConstants) {
+ std::vector<Constant*> CstOps;
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i)
+ CstOps.push_back(cast<Constant>(Ops[i]));
+ return ConstantVector::get(CstOps);
}
- // If there are multiple constraint tuples, pick one. Constraints is
- // altered to point to shorter strings (which are malloc'ed), and everything
- // below Just Works as in the NumChoices==1 case.
- const char** ReplacementStrings = 0;
- if (NumChoices>1) {
- ReplacementStrings =
- (const char **)alloca((NumOutputs + NumInputs) * sizeof(const char *));
- ChooseConstraintTuple(Constraints, exp, NumInputs, NumOutputs, NumChoices,
- ReplacementStrings);
- }
+ // Otherwise, insertelement the values to build the vector.
+ Value *Result =
+ UndefValue::get(VectorType::get(Ops[0]->getType(), Ops.size()));
- std::vector<Value*> CallOps;
- std::vector<const Type*> CallArgTypes;
- std::string NewAsmStr = ConvertInlineAsmStr(exp, NumOutputs+NumInputs);
- std::string ConstraintStr;
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i)
+ Result = Builder.CreateInsertElement(Result, Ops[i],
+ ConstantInt::get(Type::getInt32Ty(Context), i));
- // StoreCallResultAddr - The pointer to store the result of the call through.
- SmallVector<Value *, 4> StoreCallResultAddrs;
- SmallVector<const Type *, 4> CallResultTypes;
- SmallVector<bool, 4> CallResultIsSigned;
- SmallVector<tree, 4> CallResultSSANames;
- SmallVector<Value *, 4> CallResultSSATemps;
+ return Result;
+}
- // Process outputs.
- ValNum = 0;
- for (tree Output = ASM_OUTPUTS(exp); Output;
- Output = TREE_CHAIN(Output), ++ValNum) {
- tree Operand = TREE_VALUE(Output);
+/// BuildVector - This varargs function builds a literal vector ({} syntax) with
+/// the specified null-terminated list of elements. The elements must be all
+/// the same element type and there must be a power of two of them.
+Value *TreeToLLVM::BuildVector(Value *Elt, ...) {
+ std::vector<Value*> Ops;
+ va_list VA;
+ va_start(VA, Elt);
- // Parse the output constraint.
- const char *Constraint = Constraints[ValNum];
- bool IsInOut, AllowsReg, AllowsMem;
- if (!parse_output_constraint(&Constraint, ValNum, NumInputs, NumOutputs,
- &AllowsMem, &AllowsReg, &IsInOut)) {
- if (NumChoices>1)
- FreeConstTupleStrings(ReplacementStrings, NumInputs+NumOutputs);
- return 0;
- }
- assert(Constraint[0] == '=' && "Not an output constraint?");
+ Ops.push_back(Elt);
+ while (Value *Arg = va_arg(VA, Value *))
+ Ops.push_back(Arg);
+ va_end(VA);
- // Output constraints must be addressable if they aren't simple register
- // constraints (this emits "address of register var" errors, etc).
- if (!AllowsReg && (AllowsMem || IsInOut))
- mark_addressable(Operand);
+ return BuildVector(Ops);
+}
- // Count the number of "+" constraints.
- if (IsInOut)
- ++NumInOut, ++NumInputs;
+/// BuildVectorShuffle - Given two vectors and a variable length list of int
+/// constants, create a shuffle of the elements of the inputs, where each dest
+/// is specified by the indexes. The int constant list must be as long as the
+/// number of elements in the input vector.
+///
+/// Undef values may be specified by passing in -1 as the result value.
+///
+Value *TreeToLLVM::BuildVectorShuffle(Value *InVec1, Value *InVec2, ...) {
+ assert(isa<VectorType>(InVec1->getType()) &&
+ InVec1->getType() == InVec2->getType() && "Invalid shuffle!");
+ unsigned NumElements = cast<VectorType>(InVec1->getType())->getNumElements();
- std::string SimplifiedConstraint;
- // If this output register is pinned to a machine register, use that machine
- // register instead of the specified constraint.
- if (TREE_CODE(Operand) == VAR_DECL && DECL_HARD_REGISTER(Operand)) {
- const char* RegName = extractRegisterName(Operand);
- int RegNum = decode_reg_name(RegName);
- if (RegNum >= 0) {
- RegName = getConstraintRegNameFromGccTables(RegName, RegNum);
- unsigned RegNameLen = strlen(RegName);
- char *NewConstraint = (char*)alloca(RegNameLen+4);
- NewConstraint[0] = '=';
- NewConstraint[1] = '{';
- memcpy(NewConstraint+2, RegName, RegNameLen);
- NewConstraint[RegNameLen+2] = '}';
- NewConstraint[RegNameLen+3] = 0;
- SimplifiedConstraint = NewConstraint;
- // We should no longer consider mem constraints.
- AllowsMem = false;
- } else {
- // If we can simplify the constraint into something else, do so now.
- // This avoids LLVM having to know about all the (redundant) GCC
- // constraints.
- SimplifiedConstraint = CanonicalizeConstraint(Constraint+1);
- }
- } else {
- SimplifiedConstraint = CanonicalizeConstraint(Constraint+1);
+ // Get all the indexes from varargs.
+ std::vector<Constant*> Idxs;
+ va_list VA;
+ va_start(VA, InVec2);
+ for (unsigned i = 0; i != NumElements; ++i) {
+ int idx = va_arg(VA, int);
+ if (idx == -1)
+ Idxs.push_back(UndefValue::get(Type::getInt32Ty(Context)));
+ else {
+ assert((unsigned)idx < 2*NumElements && "Element index out of range!");
+ Idxs.push_back(ConstantInt::get(Type::getInt32Ty(Context), idx));
}
+ }
+ va_end(VA);
- LValue Dest;
- const Type *DestValTy;
- if (TREE_CODE(Operand) == SSA_NAME) {
- // The ASM is defining an ssa name. Store the output to a temporary, then
- // load it out again later as the ssa name.
- DestValTy = ConvertType(TREE_TYPE(Operand));
- Dest.Ptr = CreateTemporary(DestValTy);
- CallResultSSANames.push_back(Operand);
- CallResultSSATemps.push_back(Dest.Ptr);
- } else {
- Dest = EmitLV(Operand);
- DestValTy = cast<PointerType>(Dest.Ptr->getType())->getElementType();
- }
+ // Turn this into the appropriate shuffle operation.
+ return Builder.CreateShuffleVector(InVec1, InVec2,
+ ConstantVector::get(Idxs));
+}
- assert(!Dest.isBitfield() && "Cannot assign into a bitfield!");
- if (!AllowsMem && DestValTy->isSingleValueType()) {// Reg dest -> asm return
- StoreCallResultAddrs.push_back(Dest.Ptr);
- ConstraintStr += ",=";
- ConstraintStr += SimplifiedConstraint;
- CallResultTypes.push_back(DestValTy);
- CallResultIsSigned.push_back(!TYPE_UNSIGNED(TREE_TYPE(Operand)));
+//===----------------------------------------------------------------------===//
+// ... Builtin Function Expansion ...
+//===----------------------------------------------------------------------===//
+
+/// EmitFrontendExpandedBuiltinCall - For MD builtins that do not have a
+/// directly corresponding LLVM intrinsic, we allow the target to do some amount
+/// of lowering. This allows us to avoid having intrinsics for operations that
+/// directly correspond to LLVM constructs.
+///
+/// This method returns true if the builtin is handled, otherwise false.
+///
+bool TreeToLLVM::EmitFrontendExpandedBuiltinCall(tree exp, tree fndecl,
+ const MemRef *DestLoc,
+ Value *&Result) {
+#ifdef LLVM_TARGET_INTRINSIC_LOWER
+ // Get the result type and operand line in an easy to consume format.
+ const Type *ResultType = ConvertType(TREE_TYPE(TREE_TYPE(fndecl)));
+ std::vector<Value*> Operands;
+ for (tree Op = CALL_EXPR_ARGS(exp); Op; Op = TREE_CHAIN(Op)) {
+ tree OpVal = TREE_VALUE(Op);
+ if (isAggregateTreeType(TREE_TYPE(OpVal))) {
+ MemRef OpLoc = CreateTempLoc(ConvertType(TREE_TYPE(OpVal)));
+ Emit(OpVal, &OpLoc);
+ Operands.push_back(Builder.CreateLoad(OpLoc.Ptr));
} else {
- ConstraintStr += ",=*";
- ConstraintStr += SimplifiedConstraint;
- CallOps.push_back(Dest.Ptr);
- CallArgTypes.push_back(Dest.Ptr->getType());
+ Operands.push_back(Emit(OpVal, NULL));
}
}
- // Process inputs.
- for (tree Input = ASM_INPUTS(exp); Input; Input = TREE_CHAIN(Input),++ValNum){
- tree Val = TREE_VALUE(Input);
- tree type = TREE_TYPE(Val);
-
- const char *Constraint = Constraints[ValNum];
+ unsigned FnCode = DECL_FUNCTION_CODE(fndecl);
+ return LLVM_TARGET_INTRINSIC_LOWER(exp, FnCode, DestLoc, Result, ResultType,
+ Operands);
+#endif
+ return false;
+}
- bool AllowsReg, AllowsMem;
- if (!parse_input_constraint(Constraints+ValNum, ValNum-NumOutputs,
- NumInputs, NumOutputs, NumInOut,
- Constraints, &AllowsMem, &AllowsReg)) {
- if (NumChoices>1)
- FreeConstTupleStrings(ReplacementStrings, NumInputs+NumOutputs);
- return 0;
- }
- bool isIndirect = false;
- if (AllowsReg || !AllowsMem) { // Register operand.
- const Type *LLVMTy = ConvertType(type);
-
- Value *Op = 0;
- if (LLVMTy->isSingleValueType()) {
- if (TREE_CODE(Val)==ADDR_EXPR &&
- TREE_CODE(TREE_OPERAND(Val,0))==LABEL_DECL) {
- // Emit the label, but do not assume it is going to be the target
- // of an indirect branch. Having this logic here is a hack; there
- // should be a bit in the label identifying it as in an asm.
- Op = getLabelDeclBlock(TREE_OPERAND(Val, 0));
- } else
- Op = Emit(Val, 0);
- } else {
- LValue LV = EmitLV(Val);
- assert(!LV.isBitfield() && "Inline asm can't have bitfield operand");
-
- // Structs and unions are permitted here, as long as they're the
- // same size as a register.
- uint64_t TySize = TD.getTypeSizeInBits(LLVMTy);
- if (TySize == 1 || TySize == 8 || TySize == 16 ||
- TySize == 32 || TySize == 64) {
- LLVMTy = IntegerType::get(Context, TySize);
- Op = Builder.CreateLoad(BitCastToType(LV.Ptr,
- PointerType::getUnqual(LLVMTy)));
- } else {
- // Otherwise, emit our value as a lvalue and let the codegen deal with
- // it.
- isIndirect = true;
- Op = LV.Ptr;
- }
- }
-
- const Type *OpTy = Op->getType();
- // If this input operand is matching an output operand, e.g. '0', check if
- // this is something that llvm supports. If the operand types are
- // different, then emit an error if 1) one of the types is not integer or
- // pointer, 2) if size of input type is larger than the output type. If
- // the size of the integer input size is smaller than the integer output
- // type, then cast it to the larger type and shift the value if the target
- // is big endian.
- if (ISDIGIT(Constraint[0])) {
- unsigned Match = atoi(Constraint);
- const Type *OTy = (Match < CallResultTypes.size())
- ? CallResultTypes[Match] : 0;
- if (OTy && OTy != OpTy) {
- if (!(isa<IntegerType>(OTy) || isa<PointerType>(OTy)) ||
- !(isa<IntegerType>(OpTy) || isa<PointerType>(OpTy))) {
- error_at(EXPR_LOCATION(exp),
- "unsupported inline asm: input constraint with a matching "
- "output constraint of incompatible type!");
- if (NumChoices>1)
- FreeConstTupleStrings(ReplacementStrings, NumInputs+NumOutputs);
- return 0;
- }
- unsigned OTyBits = TD.getTypeSizeInBits(OTy);
- unsigned OpTyBits = TD.getTypeSizeInBits(OpTy);
- if (OTyBits == 0 || OpTyBits == 0 || OTyBits < OpTyBits) {
- // It's tempting to implement the OTyBits < OpTyBits case by truncating
- // Op down to OTy, however that breaks in the case of an inline asm
- // constraint that corresponds to a single register, because the
- // user can write code that assumes the whole register is defined,
- // despite the output operand being only a subset of the register. For
- // example:
- //
- // asm ("sarl $10, %%eax" : "=a"(c) : "0"(1000000));
- //
- // The expected behavior is for %eax to be fully defined with the value
- // 1000000 immediately before the asm.
- error_at(EXPR_LOCATION(exp),
- "unsupported inline asm: input constraint with a matching "
- "output constraint of incompatible type!");
- return 0;
- } else if (OTyBits > OpTyBits) {
- Op = CastToAnyType(Op, !TYPE_UNSIGNED(type),
- OTy, CallResultIsSigned[Match]);
- if (BYTES_BIG_ENDIAN) {
- Constant *ShAmt = ConstantInt::get(Op->getType(),
- OTyBits-OpTyBits);
- Op = Builder.CreateLShr(Op, ShAmt);
- }
- OpTy = Op->getType();
- }
- }
- }
-
- CallOps.push_back(Op);
- CallArgTypes.push_back(OpTy);
- } else { // Memory operand.
- mark_addressable(TREE_VALUE(Input));
- isIndirect = true;
- LValue Src = EmitLV(Val);
- assert(!Src.isBitfield() && "Cannot read from a bitfield!");
- CallOps.push_back(Src.Ptr);
- CallArgTypes.push_back(Src.Ptr->getType());
- }
-
- ConstraintStr += ',';
- if (isIndirect)
- ConstraintStr += '*';
-
- // If this output register is pinned to a machine register, use that machine
- // register instead of the specified constraint.
- if (TREE_CODE(Val) == VAR_DECL && DECL_HARD_REGISTER(Val)) {
- const char *RegName = extractRegisterName(Val);
- int RegNum = decode_reg_name(RegName);
- if (RegNum >= 0) {
- RegName = getConstraintRegNameFromGccTables(RegName, RegNum);
- ConstraintStr += '{';
- ConstraintStr += RegName;
- ConstraintStr += '}';
- continue;
- }
- }
-
- // If there is a simpler form for the register constraint, use it.
- std::string Simplified = CanonicalizeConstraint(Constraint);
- ConstraintStr += Simplified;
- }
-
- // Process clobbers.
-
- // Some targets automatically clobber registers across an asm.
- tree Clobbers = targetm.md_asm_clobbers(ASM_OUTPUTS(exp), ASM_INPUTS(exp),
- ASM_CLOBBERS(exp));
- for (; Clobbers; Clobbers = TREE_CHAIN(Clobbers)) {
- const char *RegName = TREE_STRING_POINTER(TREE_VALUE(Clobbers));
- int RegCode = decode_reg_name(RegName);
-
- switch (RegCode) {
- case -1: // Nothing specified?
- case -2: // Invalid.
- error_at(EXPR_LOCATION(exp), "unknown register name %qs in %<asm%>",
- RegName);
- if (NumChoices>1)
- FreeConstTupleStrings(ReplacementStrings, NumInputs+NumOutputs);
- return 0;
- case -3: // cc
- ConstraintStr += ",~{cc}";
- break;
- case -4: // memory
- ConstraintStr += ",~{memory}";
- break;
- default: // Normal register name.
- RegName = getConstraintRegNameFromGccTables(RegName, RegCode);
- ConstraintStr += ",~{";
- ConstraintStr += RegName;
- ConstraintStr += "}";
- break;
- }
- }
-
- const Type *CallResultType;
- switch (CallResultTypes.size()) {
- case 0: CallResultType = Type::getVoidTy(Context); break;
- case 1: CallResultType = CallResultTypes[0]; break;
- default:
- std::vector<const Type*> TmpVec(CallResultTypes.begin(),
- CallResultTypes.end());
- CallResultType = StructType::get(Context, TmpVec);
- break;
- }
-
- const FunctionType *FTy =
- FunctionType::get(CallResultType, CallArgTypes, false);
-
- // Remove the leading comma if we have operands.
- if (!ConstraintStr.empty())
- ConstraintStr.erase(ConstraintStr.begin());
-
- // Make sure we're created a valid inline asm expression.
- if (!InlineAsm::Verify(FTy, ConstraintStr)) {
- error_at(EXPR_LOCATION(exp), "Invalid or unsupported inline assembly!");
- if (NumChoices>1)
- FreeConstTupleStrings(ReplacementStrings, NumInputs+NumOutputs);
- return 0;
- }
-
- Value *Asm = InlineAsm::get(FTy, NewAsmStr, ConstraintStr,
- ASM_VOLATILE_P(exp) || !ASM_OUTPUTS(exp));
- CallInst *CV = Builder.CreateCall(Asm, CallOps.begin(), CallOps.end(),
- CallResultTypes.empty() ? "" : "asmtmp");
- CV->setDoesNotThrow();
-
- // If the call produces a value, store it into the destination.
- if (StoreCallResultAddrs.size() == 1)
- Builder.CreateStore(CV, StoreCallResultAddrs[0]);
- else if (unsigned NumResults = StoreCallResultAddrs.size()) {
- for (unsigned i = 0; i != NumResults; ++i) {
- Value *ValI = Builder.CreateExtractValue(CV, i, "asmresult");
- Builder.CreateStore(ValI, StoreCallResultAddrs[i]);
- }
- }
-
- // If the call defined any ssa names, associate them with their value.
- for (unsigned i = 0, e = CallResultSSANames.size(); i != e; ++i)
- SSANames[CallResultSSANames[i]] = Builder.CreateLoad(CallResultSSATemps[i]);
-
- // Give the backend a chance to upgrade the inline asm to LLVM code. This
- // handles some common cases that LLVM has intrinsics for, e.g. x86 bswap ->
- // llvm.bswap.
- if (const TargetLowering *TLI = TheTarget->getTargetLowering())
- TLI->ExpandInlineAsm(CV);
-
- if (NumChoices>1)
- FreeConstTupleStrings(ReplacementStrings, NumInputs+NumOutputs);
- return 0;
-}
-
-//===----------------------------------------------------------------------===//
-// ... Helpers for Builtin Function Expansion ...
-//===----------------------------------------------------------------------===//
-
-Value *TreeToLLVM::BuildVector(const std::vector<Value*> &Ops) {
- assert((Ops.size() & (Ops.size()-1)) == 0 &&
- "Not a power-of-two sized vector!");
- bool AllConstants = true;
- for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i)
- AllConstants &= isa<Constant>(Ops[i]);
-
- // If this is a constant vector, create a ConstantVector.
- if (AllConstants) {
- std::vector<Constant*> CstOps;
- for (unsigned i = 0, e = Ops.size(); i != e; ++i)
- CstOps.push_back(cast<Constant>(Ops[i]));
- return ConstantVector::get(CstOps);
- }
-
- // Otherwise, insertelement the values to build the vector.
- Value *Result =
- UndefValue::get(VectorType::get(Ops[0]->getType(), Ops.size()));
-
- for (unsigned i = 0, e = Ops.size(); i != e; ++i)
- Result = Builder.CreateInsertElement(Result, Ops[i],
- ConstantInt::get(Type::getInt32Ty(Context), i));
-
- return Result;
-}
-
-/// BuildVector - This varargs function builds a literal vector ({} syntax) with
-/// the specified null-terminated list of elements. The elements must be all
-/// the same element type and there must be a power of two of them.
-Value *TreeToLLVM::BuildVector(Value *Elt, ...) {
- std::vector<Value*> Ops;
- va_list VA;
- va_start(VA, Elt);
-
- Ops.push_back(Elt);
- while (Value *Arg = va_arg(VA, Value *))
- Ops.push_back(Arg);
- va_end(VA);
-
- return BuildVector(Ops);
-}
-
-/// BuildVectorShuffle - Given two vectors and a variable length list of int
-/// constants, create a shuffle of the elements of the inputs, where each dest
-/// is specified by the indexes. The int constant list must be as long as the
-/// number of elements in the input vector.
-///
-/// Undef values may be specified by passing in -1 as the result value.
-///
-Value *TreeToLLVM::BuildVectorShuffle(Value *InVec1, Value *InVec2, ...) {
- assert(isa<VectorType>(InVec1->getType()) &&
- InVec1->getType() == InVec2->getType() && "Invalid shuffle!");
- unsigned NumElements = cast<VectorType>(InVec1->getType())->getNumElements();
-
- // Get all the indexes from varargs.
- std::vector<Constant*> Idxs;
- va_list VA;
- va_start(VA, InVec2);
- for (unsigned i = 0; i != NumElements; ++i) {
- int idx = va_arg(VA, int);
- if (idx == -1)
- Idxs.push_back(UndefValue::get(Type::getInt32Ty(Context)));
- else {
- assert((unsigned)idx < 2*NumElements && "Element index out of range!");
- Idxs.push_back(ConstantInt::get(Type::getInt32Ty(Context), idx));
- }
- }
- va_end(VA);
-
- // Turn this into the appropriate shuffle operation.
- return Builder.CreateShuffleVector(InVec1, InVec2,
- ConstantVector::get(Idxs));
-}
-
-//===----------------------------------------------------------------------===//
-// ... Builtin Function Expansion ...
-//===----------------------------------------------------------------------===//
-
-/// EmitFrontendExpandedBuiltinCall - For MD builtins that do not have a
-/// directly corresponding LLVM intrinsic, we allow the target to do some amount
-/// of lowering. This allows us to avoid having intrinsics for operations that
-/// directly correspond to LLVM constructs.
-///
-/// This method returns true if the builtin is handled, otherwise false.
-///
-bool TreeToLLVM::EmitFrontendExpandedBuiltinCall(tree exp, tree fndecl,
- const MemRef *DestLoc,
- Value *&Result) {
-#ifdef LLVM_TARGET_INTRINSIC_LOWER
- // Get the result type and operand line in an easy to consume format.
- const Type *ResultType = ConvertType(TREE_TYPE(TREE_TYPE(fndecl)));
- std::vector<Value*> Operands;
- for (tree Op = CALL_EXPR_ARGS(exp); Op; Op = TREE_CHAIN(Op)) {
- tree OpVal = TREE_VALUE(Op);
- if (isAggregateTreeType(TREE_TYPE(OpVal))) {
- MemRef OpLoc = CreateTempLoc(ConvertType(TREE_TYPE(OpVal)));
- Emit(OpVal, &OpLoc);
- Operands.push_back(Builder.CreateLoad(OpLoc.Ptr));
- } else {
- Operands.push_back(Emit(OpVal, NULL));
- }
- }
-
- unsigned FnCode = DECL_FUNCTION_CODE(fndecl);
- return LLVM_TARGET_INTRINSIC_LOWER(exp, FnCode, DestLoc, Result, ResultType,
- Operands);
-#endif
- return false;
-}
-
-/// TargetBuiltinCache - A cache of builtin intrinsics indexed by the GCC
-/// builtin number.
-static std::vector<Constant*> TargetBuiltinCache;
+/// TargetBuiltinCache - A cache of builtin intrinsics indexed by the GCC
+/// builtin number.
+static std::vector<Constant*> TargetBuiltinCache;
void clearTargetBuiltinCache() {
TargetBuiltinCache.clear();
@@ -7812,206 +7423,627 @@
break;
}
- // Check that the type of the lvalue is indeed that of a pointer to the tree
- // node. Since LLVM has no void* type, don't insist that void* be converted
- // to a specific LLVM type.
- assert((VOID_TYPE_P(TREE_TYPE(exp)) ||
- LV->getType() == ConvertType(TREE_TYPE(exp))->getPointerTo()) &&
- "LValue of constant has wrong type!");
+ // Check that the type of the lvalue is indeed that of a pointer to the tree
+ // node. Since LLVM has no void* type, don't insist that void* be converted
+ // to a specific LLVM type.
+ assert((VOID_TYPE_P(TREE_TYPE(exp)) ||
+ LV->getType() == ConvertType(TREE_TYPE(exp))->getPointerTo()) &&
+ "LValue of constant has wrong type!");
+
+ return LV;
+}
+
+Constant *TreeConstantToLLVM::EmitLV_Decl(tree exp) {
+ GlobalValue *Val = cast<GlobalValue>(DECL_LLVM(exp));
+
+ // Ensure variable marked as used even if it doesn't go through a parser. If
+ // it hasn't been used yet, write out an external definition.
+ if (!TREE_USED(exp)) {
+ assemble_external(exp);
+ TREE_USED(exp) = 1;
+ Val = cast<GlobalValue>(DECL_LLVM(exp));
+ }
+
+ // If this is an aggregate, emit it to LLVM now. GCC happens to
+ // get this case right by forcing the initializer into memory.
+ if (TREE_CODE(exp) == CONST_DECL || TREE_CODE(exp) == VAR_DECL) {
+ if ((DECL_INITIAL(exp) || !TREE_PUBLIC(exp)) && !DECL_EXTERNAL(exp) &&
+ Val->isDeclaration() &&
+ !BOGUS_CTOR(exp)) {
+ emit_global_to_llvm(exp);
+ // Decl could have change if it changed type.
+ Val = cast<GlobalValue>(DECL_LLVM(exp));
+ }
+ } else {
+ // Otherwise, inform cgraph that we used the global.
+ mark_decl_referenced(exp);
+ if (tree ID = DECL_ASSEMBLER_NAME(exp))
+ mark_referenced(ID);
+ }
+
+ // The type of the global value output for exp need not match that of exp.
+ // For example if the global's initializer has a different type to the global
+ // itself (allowed in GCC but not in LLVM) then the global is changed to have
+ // the type of the initializer. Correct for this now.
+ const Type *Ty = ConvertType(TREE_TYPE(exp));
+ if (Ty == Type::getVoidTy(Context)) Ty = Type::getInt8Ty(Context); // void* -> i8*.
+
+ return TheFolder->CreateBitCast(Val, Ty->getPointerTo());
+}
+
+/// EmitLV_LABEL_DECL - Someone took the address of a label.
+Constant *TreeConstantToLLVM::EmitLV_LABEL_DECL(tree exp) {
+ assert(TheTreeToLLVM &&
+ "taking the address of a label while not compiling the function!");
+
+ // Figure out which function this is for, verify it's the one we're compiling.
+ if (DECL_CONTEXT(exp)) {
+ assert(TREE_CODE(DECL_CONTEXT(exp)) == FUNCTION_DECL &&
+ "Address of label in nested function?");
+ assert(TheTreeToLLVM->getFUNCTION_DECL() == DECL_CONTEXT(exp) &&
+ "Taking the address of a label that isn't in the current fn!?");
+ }
+
+ BasicBlock *BB = TheTreeToLLVM->getLabelDeclBlock(exp);
+ Constant *C = TheTreeToLLVM->getIndirectGotoBlockNumber(BB);
+ return
+ TheFolder->CreateIntToPtr(C, PointerType::getUnqual(Type::getInt8Ty(Context)));
+}
+
+Constant *TreeConstantToLLVM::EmitLV_COMPLEX_CST(tree exp) {
+ Constant *Init = TreeConstantToLLVM::ConvertCOMPLEX_CST(exp);
+
+ // Cache the constants to avoid making obvious duplicates that have to be
+ // folded by the optimizer.
+ static std::map<Constant*, GlobalVariable*> ComplexCSTCache;
+ GlobalVariable *&Slot = ComplexCSTCache[Init];
+ if (Slot) return Slot;
+
+ // Create a new complex global.
+ Slot = new GlobalVariable(*TheModule, Init->getType(), true,
+ GlobalVariable::PrivateLinkage, Init, ".cpx");
+ return Slot;
+}
+
+Constant *TreeConstantToLLVM::EmitLV_STRING_CST(tree exp) {
+ Constant *Init = TreeConstantToLLVM::ConvertSTRING_CST(exp);
+
+ GlobalVariable **SlotP = 0;
+
+ // Cache the string constants to avoid making obvious duplicate strings that
+ // have to be folded by the optimizer.
+ static std::map<Constant*, GlobalVariable*> StringCSTCache;
+ GlobalVariable *&Slot = StringCSTCache[Init];
+ if (Slot) return Slot;
+ SlotP = &Slot;
+
+ // Create a new string global.
+ GlobalVariable *GV = new GlobalVariable(*TheModule, Init->getType(), true,
+ GlobalVariable::PrivateLinkage, Init,
+ ".str");
+
+ GV->setAlignment(TYPE_ALIGN(TREE_TYPE(exp)) / 8);
+
+ if (SlotP) *SlotP = GV;
+ return GV;
+}
+
+Constant *TreeConstantToLLVM::EmitLV_ARRAY_REF(tree exp) {
+ tree Array = TREE_OPERAND(exp, 0);
+ tree ArrayType = TREE_TYPE(Array);
+ tree Index = TREE_OPERAND(exp, 1);
+ tree IndexType = TREE_TYPE(Index);
+ assert(TREE_CODE(ArrayType) == ARRAY_TYPE && "Unknown ARRAY_REF!");
+
+ // Check for variable sized reference.
+ // FIXME: add support for array types where the size doesn't fit into 64 bits
+ assert(isSequentialCompatible(ArrayType) && "Global with variable size?");
+
+ Constant *ArrayAddr;
+ // First subtract the lower bound, if any, in the type of the index.
+ tree LowerBound = array_ref_low_bound(exp);
+ if (!integer_zerop(LowerBound))
+ Index = fold(build2(MINUS_EXPR, IndexType, Index, LowerBound));
+ ArrayAddr = EmitLV(Array);
+
+ Constant *IndexVal = Convert(Index);
+
+ const Type *IntPtrTy = getTargetData().getIntPtrType(Context);
+ if (IndexVal->getType() != IntPtrTy)
+ IndexVal = TheFolder->CreateIntCast(IndexVal, IntPtrTy,
+ !TYPE_UNSIGNED(IndexType));
+
+ Value *Idx[2];
+ Idx[0] = ConstantInt::get(IntPtrTy, 0);
+ Idx[1] = IndexVal;
+
+ return TheFolder->CreateGetElementPtr(ArrayAddr, Idx, 2);
+}
+
+Constant *TreeConstantToLLVM::EmitLV_COMPONENT_REF(tree exp) {
+ Constant *StructAddrLV = EmitLV(TREE_OPERAND(exp, 0));
+
+ // Ensure that the struct type has been converted, so that the fielddecls
+ // are laid out.
+ const Type *StructTy = ConvertType(TREE_TYPE(TREE_OPERAND(exp, 0)));
+
+ tree FieldDecl = TREE_OPERAND(exp, 1);
+
+ StructAddrLV = TheFolder->CreateBitCast(StructAddrLV,
+ PointerType::getUnqual(StructTy));
+ const Type *FieldTy = ConvertType(getDeclaredType(FieldDecl));
+
+ // BitStart - This is the actual offset of the field from the start of the
+ // struct, in bits. For bitfields this may be on a non-byte boundary.
+ unsigned BitStart = getComponentRefOffsetInBits(exp);
+ Constant *FieldPtr;
+ const TargetData &TD = getTargetData();
+
+ tree field_offset = component_ref_field_offset (exp);
+ // If this is a normal field at a fixed offset from the start, handle it.
+ if (TREE_CODE(field_offset) == INTEGER_CST) {
+ unsigned int MemberIndex = GetFieldIndex(FieldDecl);
+
+ Constant *Ops[] = {
+ StructAddrLV,
+ Constant::getNullValue(Type::getInt32Ty(Context)),
+ ConstantInt::get(Type::getInt32Ty(Context), MemberIndex)
+ };
+ FieldPtr = TheFolder->CreateGetElementPtr(StructAddrLV, Ops+1, 2);
+
+ FieldPtr = ConstantFoldInstOperands(Instruction::GetElementPtr,
+ FieldPtr->getType(), Ops,
+ 3, Context, &TD);
+
+ // Now that we did an offset from the start of the struct, subtract off
+ // the offset from BitStart.
+ if (MemberIndex) {
+ const StructLayout *SL = TD.getStructLayout(cast<StructType>(StructTy));
+ BitStart -= SL->getElementOffset(MemberIndex) * 8;
+ }
+
+ } else {
+ Constant *Offset = Convert(field_offset);
+ Constant *Ptr = TheFolder->CreatePtrToInt(StructAddrLV, Offset->getType());
+ Ptr = TheFolder->CreateAdd(Ptr, Offset);
+ FieldPtr = TheFolder->CreateIntToPtr(Ptr,
+ PointerType::getUnqual(FieldTy));
+ }
+
+ // Make sure we return a result of the right type.
+ if (PointerType::getUnqual(FieldTy) != FieldPtr->getType())
+ FieldPtr = TheFolder->CreateBitCast(FieldPtr,
+ PointerType::getUnqual(FieldTy));
- return LV;
+ assert(BitStart == 0 &&
+ "It's a bitfield reference or we didn't get to the field!");
+ return FieldPtr;
}
-Constant *TreeConstantToLLVM::EmitLV_Decl(tree exp) {
- GlobalValue *Val = cast<GlobalValue>(DECL_LLVM(exp));
+//===----------------------------------------------------------------------===//
+// ... Convert GIMPLE to LLVM ...
+//===----------------------------------------------------------------------===//
- // Ensure variable marked as used even if it doesn't go through a parser. If
- // it hasn't been used yet, write out an external definition.
- if (!TREE_USED(exp)) {
- assemble_external(exp);
- TREE_USED(exp) = 1;
- Val = cast<GlobalValue>(DECL_LLVM(exp));
+void TreeToLLVM::RenderGIMPLE_ASM(gimple stmt) {
+ // Some of the GCC utilities we use still want lists and not gimple, so create
+ // input, output and clobber lists for their benefit.
+ unsigned NumOutputs = gimple_asm_noutputs (stmt);
+ tree outputs = NULL_TREE;
+ if (NumOutputs) {
+ tree t = outputs = gimple_asm_output_op (stmt, 0);
+ for (unsigned i = 1; i < NumOutputs; i++) {
+ TREE_CHAIN (t) = gimple_asm_output_op (stmt, i);
+ t = gimple_asm_output_op (stmt, i);
+ }
}
- // If this is an aggregate, emit it to LLVM now. GCC happens to
- // get this case right by forcing the initializer into memory.
- if (TREE_CODE(exp) == CONST_DECL || TREE_CODE(exp) == VAR_DECL) {
- if ((DECL_INITIAL(exp) || !TREE_PUBLIC(exp)) && !DECL_EXTERNAL(exp) &&
- Val->isDeclaration() &&
- !BOGUS_CTOR(exp)) {
- emit_global_to_llvm(exp);
- // Decl could have change if it changed type.
- Val = cast<GlobalValue>(DECL_LLVM(exp));
+ unsigned NumInputs = gimple_asm_ninputs(stmt);
+ tree inputs = NULL_TREE;
+ if (NumInputs) {
+ tree t = inputs = gimple_asm_input_op (stmt, 0);
+ for (unsigned i = 1; i < NumInputs; i++) {
+ TREE_CHAIN (t) = gimple_asm_input_op (stmt, i);
+ t = gimple_asm_input_op (stmt, i);
}
- } else {
- // Otherwise, inform cgraph that we used the global.
- mark_decl_referenced(exp);
- if (tree ID = DECL_ASSEMBLER_NAME(exp))
- mark_referenced(ID);
}
- // The type of the global value output for exp need not match that of exp.
- // For example if the global's initializer has a different type to the global
- // itself (allowed in GCC but not in LLVM) then the global is changed to have
- // the type of the initializer. Correct for this now.
- const Type *Ty = ConvertType(TREE_TYPE(exp));
- if (Ty == Type::getVoidTy(Context)) Ty = Type::getInt8Ty(Context); // void* -> i8*.
+ unsigned NumClobbers = gimple_asm_nclobbers (stmt);
+ tree clobbers = NULL_TREE;
+ if (NumClobbers) {
+ tree t = clobbers = gimple_asm_clobber_op (stmt, 0);
+ for (unsigned i = 1; i < NumClobbers; i++) {
+ TREE_CHAIN (t) = gimple_asm_clobber_op (stmt, i);
+ t = gimple_asm_clobber_op (stmt, i);
+ }
+ }
- return TheFolder->CreateBitCast(Val, Ty->getPointerTo());
-}
+ unsigned NumInOut = 0;
-/// EmitLV_LABEL_DECL - Someone took the address of a label.
-Constant *TreeConstantToLLVM::EmitLV_LABEL_DECL(tree exp) {
- assert(TheTreeToLLVM &&
- "taking the address of a label while not compiling the function!");
+ // Look for multiple alternative constraints: multiple alternatives separated
+ // by commas.
+ unsigned NumChoices = 0; // sentinal; real value is always at least 1.
+ const char* p;
+ for (tree t = inputs; t; t = TREE_CHAIN(t)) {
+ unsigned NumInputChoices = 1;
+ for (p = TREE_STRING_POINTER(TREE_VALUE(TREE_PURPOSE(t))); *p; p++) {
+ if (*p == ',')
+ NumInputChoices++;
+ }
+ if (NumChoices==0)
+ NumChoices = NumInputChoices;
+ else if (NumChoices != NumInputChoices)
+ abort(); // invalid constraints
+ }
+ for (tree t = outputs; t; t = TREE_CHAIN(t)) {
+ unsigned NumOutputChoices = 1;
+ for (p = TREE_STRING_POINTER(TREE_VALUE(TREE_PURPOSE(t))); *p; p++) {
+ if (*p == ',')
+ NumOutputChoices++;
+ }
+ if (NumChoices==0)
+ NumChoices = NumOutputChoices;
+ else if (NumChoices != NumOutputChoices)
+ abort(); // invalid constraints
+ }
- // Figure out which function this is for, verify it's the one we're compiling.
- if (DECL_CONTEXT(exp)) {
- assert(TREE_CODE(DECL_CONTEXT(exp)) == FUNCTION_DECL &&
- "Address of label in nested function?");
- assert(TheTreeToLLVM->getFUNCTION_DECL() == DECL_CONTEXT(exp) &&
- "Taking the address of a label that isn't in the current fn!?");
+ /// Constraints - The output/input constraints, concatenated together in array
+ /// form instead of list form.
+ const char **Constraints =
+ (const char **)alloca((NumOutputs + NumInputs) * sizeof(const char *));
+
+ // Process outputs.
+ int ValNum = 0;
+ for (tree Output = outputs; Output; Output = TREE_CHAIN(Output), ++ValNum) {
+ tree Operand = TREE_VALUE(Output);
+ tree type = TREE_TYPE(Operand);
+ // If there's an erroneous arg, emit no insn.
+ if (type == error_mark_node) return;
+
+ // Parse the output constraint.
+ const char *Constraint =
+ TREE_STRING_POINTER(TREE_VALUE(TREE_PURPOSE(Output)));
+ Constraints[ValNum] = Constraint;
}
+ // Process inputs.
+ for (tree Input = inputs; Input; Input = TREE_CHAIN(Input),++ValNum) {
+ tree Val = TREE_VALUE(Input);
+ tree type = TREE_TYPE(Val);
+ // If there's an erroneous arg, emit no insn.
+ if (type == error_mark_node) return;
- BasicBlock *BB = TheTreeToLLVM->getLabelDeclBlock(exp);
- Constant *C = TheTreeToLLVM->getIndirectGotoBlockNumber(BB);
- return
- TheFolder->CreateIntToPtr(C, PointerType::getUnqual(Type::getInt8Ty(Context)));
-}
+ const char *Constraint =
+ TREE_STRING_POINTER(TREE_VALUE(TREE_PURPOSE(Input)));
+ Constraints[ValNum] = Constraint;
+ }
-Constant *TreeConstantToLLVM::EmitLV_COMPLEX_CST(tree exp) {
- Constant *Init = TreeConstantToLLVM::ConvertCOMPLEX_CST(exp);
+ // If there are multiple constraint tuples, pick one. Constraints is
+ // altered to point to shorter strings (which are malloc'ed), and everything
+ // below Just Works as in the NumChoices==1 case.
+ const char** ReplacementStrings = 0;
+ if (NumChoices>1) {
+ ReplacementStrings =
+ (const char **)alloca((NumOutputs + NumInputs) * sizeof(const char *));
+ ChooseConstraintTuple(Constraints, stmt, outputs, inputs, NumOutputs,
+ NumInputs, NumChoices, ReplacementStrings);
+ }
- // Cache the constants to avoid making obvious duplicates that have to be
- // folded by the optimizer.
- static std::map<Constant*, GlobalVariable*> ComplexCSTCache;
- GlobalVariable *&Slot = ComplexCSTCache[Init];
- if (Slot) return Slot;
+ std::vector<Value*> CallOps;
+ std::vector<const Type*> CallArgTypes;
+ std::string NewAsmStr = ConvertInlineAsmStr(stmt, outputs, inputs,
+ NumOutputs+NumInputs);
+ std::string ConstraintStr;
- // Create a new complex global.
- Slot = new GlobalVariable(*TheModule, Init->getType(), true,
- GlobalVariable::PrivateLinkage, Init, ".cpx");
- return Slot;
-}
+ // StoreCallResultAddr - The pointer to store the result of the call through.
+ SmallVector<Value *, 4> StoreCallResultAddrs;
+ SmallVector<const Type *, 4> CallResultTypes;
+ SmallVector<bool, 4> CallResultIsSigned;
+ SmallVector<tree, 4> CallResultSSANames;
+ SmallVector<Value *, 4> CallResultSSATemps;
-Constant *TreeConstantToLLVM::EmitLV_STRING_CST(tree exp) {
- Constant *Init = TreeConstantToLLVM::ConvertSTRING_CST(exp);
+ // Process outputs.
+ ValNum = 0;
+ for (tree Output = outputs; Output; Output = TREE_CHAIN(Output), ++ValNum) {
+ tree Operand = TREE_VALUE(Output);
- GlobalVariable **SlotP = 0;
+ // Parse the output constraint.
+ const char *Constraint = Constraints[ValNum];
+ bool IsInOut, AllowsReg, AllowsMem;
+ if (!parse_output_constraint(&Constraint, ValNum, NumInputs, NumOutputs,
+ &AllowsMem, &AllowsReg, &IsInOut)) {
+ if (NumChoices>1)
+ FreeConstTupleStrings(ReplacementStrings, NumInputs+NumOutputs);
+ return;
+ }
+ assert(Constraint[0] == '=' && "Not an output constraint?");
- // Cache the string constants to avoid making obvious duplicate strings that
- // have to be folded by the optimizer.
- static std::map<Constant*, GlobalVariable*> StringCSTCache;
- GlobalVariable *&Slot = StringCSTCache[Init];
- if (Slot) return Slot;
- SlotP = &Slot;
+ // Output constraints must be addressable if they aren't simple register
+ // constraints (this emits "address of register var" errors, etc).
+ if (!AllowsReg && (AllowsMem || IsInOut))
+ mark_addressable(Operand);
- // Create a new string global.
- GlobalVariable *GV = new GlobalVariable(*TheModule, Init->getType(), true,
- GlobalVariable::PrivateLinkage, Init,
- ".str");
+ // Count the number of "+" constraints.
+ if (IsInOut)
+ ++NumInOut, ++NumInputs;
- GV->setAlignment(TYPE_ALIGN(TREE_TYPE(exp)) / 8);
+ std::string SimplifiedConstraint;
+ // If this output register is pinned to a machine register, use that machine
+ // register instead of the specified constraint.
+ if (TREE_CODE(Operand) == VAR_DECL && DECL_HARD_REGISTER(Operand)) {
+ const char* RegName = extractRegisterName(Operand);
+ int RegNum = decode_reg_name(RegName);
+ if (RegNum >= 0) {
+ RegName = getConstraintRegNameFromGccTables(RegName, RegNum);
+ unsigned RegNameLen = strlen(RegName);
+ char *NewConstraint = (char*)alloca(RegNameLen+4);
+ NewConstraint[0] = '=';
+ NewConstraint[1] = '{';
+ memcpy(NewConstraint+2, RegName, RegNameLen);
+ NewConstraint[RegNameLen+2] = '}';
+ NewConstraint[RegNameLen+3] = 0;
+ SimplifiedConstraint = NewConstraint;
+ // We should no longer consider mem constraints.
+ AllowsMem = false;
+ } else {
+ // If we can simplify the constraint into something else, do so now.
+ // This avoids LLVM having to know about all the (redundant) GCC
+ // constraints.
+ SimplifiedConstraint = CanonicalizeConstraint(Constraint+1);
+ }
+ } else {
+ SimplifiedConstraint = CanonicalizeConstraint(Constraint+1);
+ }
- if (SlotP) *SlotP = GV;
- return GV;
-}
+ LValue Dest;
+ const Type *DestValTy;
+ if (TREE_CODE(Operand) == SSA_NAME) {
+ // The ASM is defining an ssa name. Store the output to a temporary, then
+ // load it out again later as the ssa name.
+ DestValTy = ConvertType(TREE_TYPE(Operand));
+ Dest.Ptr = CreateTemporary(DestValTy);
+ CallResultSSANames.push_back(Operand);
+ CallResultSSATemps.push_back(Dest.Ptr);
+ } else {
+ Dest = EmitLV(Operand);
+ DestValTy = cast<PointerType>(Dest.Ptr->getType())->getElementType();
+ }
+
+ assert(!Dest.isBitfield() && "Cannot assign into a bitfield!");
+ if (!AllowsMem && DestValTy->isSingleValueType()) {// Reg dest -> asm return
+ StoreCallResultAddrs.push_back(Dest.Ptr);
+ ConstraintStr += ",=";
+ ConstraintStr += SimplifiedConstraint;
+ CallResultTypes.push_back(DestValTy);
+ CallResultIsSigned.push_back(!TYPE_UNSIGNED(TREE_TYPE(Operand)));
+ } else {
+ ConstraintStr += ",=*";
+ ConstraintStr += SimplifiedConstraint;
+ CallOps.push_back(Dest.Ptr);
+ CallArgTypes.push_back(Dest.Ptr->getType());
+ }
+ }
+
+ // Process inputs.
+ for (tree Input = inputs; Input; Input = TREE_CHAIN(Input),++ValNum) {
+ tree Val = TREE_VALUE(Input);
+ tree type = TREE_TYPE(Val);
+
+ const char *Constraint = Constraints[ValNum];
+
+ bool AllowsReg, AllowsMem;
+ if (!parse_input_constraint(Constraints+ValNum, ValNum-NumOutputs,
+ NumInputs, NumOutputs, NumInOut,
+ Constraints, &AllowsMem, &AllowsReg)) {
+ if (NumChoices>1)
+ FreeConstTupleStrings(ReplacementStrings, NumInputs+NumOutputs);
+ return;
+ }
+ bool isIndirect = false;
+ if (AllowsReg || !AllowsMem) { // Register operand.
+ const Type *LLVMTy = ConvertType(type);
-Constant *TreeConstantToLLVM::EmitLV_ARRAY_REF(tree exp) {
- tree Array = TREE_OPERAND(exp, 0);
- tree ArrayType = TREE_TYPE(Array);
- tree Index = TREE_OPERAND(exp, 1);
- tree IndexType = TREE_TYPE(Index);
- assert(TREE_CODE(ArrayType) == ARRAY_TYPE && "Unknown ARRAY_REF!");
+ Value *Op = 0;
+ if (LLVMTy->isSingleValueType()) {
+ if (TREE_CODE(Val)==ADDR_EXPR &&
+ TREE_CODE(TREE_OPERAND(Val,0))==LABEL_DECL) {
+ // Emit the label, but do not assume it is going to be the target
+ // of an indirect branch. Having this logic here is a hack; there
+ // should be a bit in the label identifying it as in an asm.
+ Op = getLabelDeclBlock(TREE_OPERAND(Val, 0));
+ } else
+ Op = Emit(Val, 0);
+ } else {
+ LValue LV = EmitLV(Val);
+ assert(!LV.isBitfield() && "Inline asm can't have bitfield operand");
- // Check for variable sized reference.
- // FIXME: add support for array types where the size doesn't fit into 64 bits
- assert(isSequentialCompatible(ArrayType) && "Global with variable size?");
+ // Structs and unions are permitted here, as long as they're the
+ // same size as a register.
+ uint64_t TySize = TD.getTypeSizeInBits(LLVMTy);
+ if (TySize == 1 || TySize == 8 || TySize == 16 ||
+ TySize == 32 || TySize == 64) {
+ LLVMTy = IntegerType::get(Context, TySize);
+ Op = Builder.CreateLoad(BitCastToType(LV.Ptr,
+ PointerType::getUnqual(LLVMTy)));
+ } else {
+ // Otherwise, emit our value as a lvalue and let the codegen deal with
+ // it.
+ isIndirect = true;
+ Op = LV.Ptr;
+ }
+ }
- Constant *ArrayAddr;
- // First subtract the lower bound, if any, in the type of the index.
- tree LowerBound = array_ref_low_bound(exp);
- if (!integer_zerop(LowerBound))
- Index = fold(build2(MINUS_EXPR, IndexType, Index, LowerBound));
- ArrayAddr = EmitLV(Array);
+ const Type *OpTy = Op->getType();
+ // If this input operand is matching an output operand, e.g. '0', check if
+ // this is something that llvm supports. If the operand types are
+ // different, then emit an error if 1) one of the types is not integer or
+ // pointer, 2) if size of input type is larger than the output type. If
+ // the size of the integer input size is smaller than the integer output
+ // type, then cast it to the larger type and shift the value if the target
+ // is big endian.
+ if (ISDIGIT(Constraint[0])) {
+ unsigned Match = atoi(Constraint);
+ const Type *OTy = (Match < CallResultTypes.size())
+ ? CallResultTypes[Match] : 0;
+ if (OTy && OTy != OpTy) {
+ if (!(isa<IntegerType>(OTy) || isa<PointerType>(OTy)) ||
+ !(isa<IntegerType>(OpTy) || isa<PointerType>(OpTy))) {
+ error_at(gimple_location(stmt),
+ "unsupported inline asm: input constraint with a matching "
+ "output constraint of incompatible type!");
+ if (NumChoices>1)
+ FreeConstTupleStrings(ReplacementStrings, NumInputs+NumOutputs);
+ return;
+ }
+ unsigned OTyBits = TD.getTypeSizeInBits(OTy);
+ unsigned OpTyBits = TD.getTypeSizeInBits(OpTy);
+ if (OTyBits == 0 || OpTyBits == 0 || OTyBits < OpTyBits) {
+ // It's tempting to implement the OTyBits < OpTyBits case by truncating
+ // Op down to OTy, however that breaks in the case of an inline asm
+ // constraint that corresponds to a single register, because the
+ // user can write code that assumes the whole register is defined,
+ // despite the output operand being only a subset of the register. For
+ // example:
+ //
+ // asm ("sarl $10, %%eax" : "=a"(c) : "0"(1000000));
+ //
+ // The expected behavior is for %eax to be fully defined with the value
+ // 1000000 immediately before the asm.
+ error_at(gimple_location(stmt),
+ "unsupported inline asm: input constraint with a matching "
+ "output constraint of incompatible type!");
+ return;
+ } else if (OTyBits > OpTyBits) {
+ Op = CastToAnyType(Op, !TYPE_UNSIGNED(type),
+ OTy, CallResultIsSigned[Match]);
+ if (BYTES_BIG_ENDIAN) {
+ Constant *ShAmt = ConstantInt::get(Op->getType(),
+ OTyBits-OpTyBits);
+ Op = Builder.CreateLShr(Op, ShAmt);
+ }
+ OpTy = Op->getType();
+ }
+ }
+ }
- Constant *IndexVal = Convert(Index);
+ CallOps.push_back(Op);
+ CallArgTypes.push_back(OpTy);
+ } else { // Memory operand.
+ mark_addressable(TREE_VALUE(Input));
+ isIndirect = true;
+ LValue Src = EmitLV(Val);
+ assert(!Src.isBitfield() && "Cannot read from a bitfield!");
+ CallOps.push_back(Src.Ptr);
+ CallArgTypes.push_back(Src.Ptr->getType());
+ }
- const Type *IntPtrTy = getTargetData().getIntPtrType(Context);
- if (IndexVal->getType() != IntPtrTy)
- IndexVal = TheFolder->CreateIntCast(IndexVal, IntPtrTy,
- !TYPE_UNSIGNED(IndexType));
+ ConstraintStr += ',';
+ if (isIndirect)
+ ConstraintStr += '*';
- Value *Idx[2];
- Idx[0] = ConstantInt::get(IntPtrTy, 0);
- Idx[1] = IndexVal;
+ // If this output register is pinned to a machine register, use that machine
+ // register instead of the specified constraint.
+ if (TREE_CODE(Val) == VAR_DECL && DECL_HARD_REGISTER(Val)) {
+ const char *RegName = extractRegisterName(Val);
+ int RegNum = decode_reg_name(RegName);
+ if (RegNum >= 0) {
+ RegName = getConstraintRegNameFromGccTables(RegName, RegNum);
+ ConstraintStr += '{';
+ ConstraintStr += RegName;
+ ConstraintStr += '}';
+ continue;
+ }
+ }
- return TheFolder->CreateGetElementPtr(ArrayAddr, Idx, 2);
-}
+ // If there is a simpler form for the register constraint, use it.
+ std::string Simplified = CanonicalizeConstraint(Constraint);
+ ConstraintStr += Simplified;
+ }
-Constant *TreeConstantToLLVM::EmitLV_COMPONENT_REF(tree exp) {
- Constant *StructAddrLV = EmitLV(TREE_OPERAND(exp, 0));
+ // Process clobbers.
- // Ensure that the struct type has been converted, so that the fielddecls
- // are laid out.
- const Type *StructTy = ConvertType(TREE_TYPE(TREE_OPERAND(exp, 0)));
+ // Some targets automatically clobber registers across an asm.
+ tree Clobbers = targetm.md_asm_clobbers(outputs, inputs, clobbers);
+ for (; Clobbers; Clobbers = TREE_CHAIN(Clobbers)) {
+ const char *RegName = TREE_STRING_POINTER(TREE_VALUE(Clobbers));
+ int RegCode = decode_reg_name(RegName);
- tree FieldDecl = TREE_OPERAND(exp, 1);
+ switch (RegCode) {
+ case -1: // Nothing specified?
+ case -2: // Invalid.
+ error_at(gimple_location(stmt), "unknown register name %qs in %<asm%>",
+ RegName);
+ if (NumChoices>1)
+ FreeConstTupleStrings(ReplacementStrings, NumInputs+NumOutputs);
+ return;
+ case -3: // cc
+ ConstraintStr += ",~{cc}";
+ break;
+ case -4: // memory
+ ConstraintStr += ",~{memory}";
+ break;
+ default: // Normal register name.
+ RegName = getConstraintRegNameFromGccTables(RegName, RegCode);
+ ConstraintStr += ",~{";
+ ConstraintStr += RegName;
+ ConstraintStr += "}";
+ break;
+ }
+ }
- StructAddrLV = TheFolder->CreateBitCast(StructAddrLV,
- PointerType::getUnqual(StructTy));
- const Type *FieldTy = ConvertType(getDeclaredType(FieldDecl));
+ const Type *CallResultType;
+ switch (CallResultTypes.size()) {
+ case 0: CallResultType = Type::getVoidTy(Context); break;
+ case 1: CallResultType = CallResultTypes[0]; break;
+ default:
+ std::vector<const Type*> TmpVec(CallResultTypes.begin(),
+ CallResultTypes.end());
+ CallResultType = StructType::get(Context, TmpVec);
+ break;
+ }
- // BitStart - This is the actual offset of the field from the start of the
- // struct, in bits. For bitfields this may be on a non-byte boundary.
- unsigned BitStart = getComponentRefOffsetInBits(exp);
- Constant *FieldPtr;
- const TargetData &TD = getTargetData();
+ const FunctionType *FTy =
+ FunctionType::get(CallResultType, CallArgTypes, false);
- tree field_offset = component_ref_field_offset (exp);
- // If this is a normal field at a fixed offset from the start, handle it.
- if (TREE_CODE(field_offset) == INTEGER_CST) {
- unsigned int MemberIndex = GetFieldIndex(FieldDecl);
+ // Remove the leading comma if we have operands.
+ if (!ConstraintStr.empty())
+ ConstraintStr.erase(ConstraintStr.begin());
- Constant *Ops[] = {
- StructAddrLV,
- Constant::getNullValue(Type::getInt32Ty(Context)),
- ConstantInt::get(Type::getInt32Ty(Context), MemberIndex)
- };
- FieldPtr = TheFolder->CreateGetElementPtr(StructAddrLV, Ops+1, 2);
+ // Make sure we're created a valid inline asm expression.
+ if (!InlineAsm::Verify(FTy, ConstraintStr)) {
+ error_at(gimple_location(stmt), "Invalid or unsupported inline assembly!");
+ if (NumChoices>1)
+ FreeConstTupleStrings(ReplacementStrings, NumInputs+NumOutputs);
+ return;
+ }
- FieldPtr = ConstantFoldInstOperands(Instruction::GetElementPtr,
- FieldPtr->getType(), Ops,
- 3, Context, &TD);
+ Value *Asm = InlineAsm::get(FTy, NewAsmStr, ConstraintStr,
+ gimple_asm_volatile_p(stmt) || !outputs);
+ CallInst *CV = Builder.CreateCall(Asm, CallOps.begin(), CallOps.end(),
+ CallResultTypes.empty() ? "" : "asmtmp");
+ CV->setDoesNotThrow();
- // Now that we did an offset from the start of the struct, subtract off
- // the offset from BitStart.
- if (MemberIndex) {
- const StructLayout *SL = TD.getStructLayout(cast<StructType>(StructTy));
- BitStart -= SL->getElementOffset(MemberIndex) * 8;
+ // If the call produces a value, store it into the destination.
+ if (StoreCallResultAddrs.size() == 1)
+ Builder.CreateStore(CV, StoreCallResultAddrs[0]);
+ else if (unsigned NumResults = StoreCallResultAddrs.size()) {
+ for (unsigned i = 0; i != NumResults; ++i) {
+ Value *ValI = Builder.CreateExtractValue(CV, i, "asmresult");
+ Builder.CreateStore(ValI, StoreCallResultAddrs[i]);
}
-
- } else {
- Constant *Offset = Convert(field_offset);
- Constant *Ptr = TheFolder->CreatePtrToInt(StructAddrLV, Offset->getType());
- Ptr = TheFolder->CreateAdd(Ptr, Offset);
- FieldPtr = TheFolder->CreateIntToPtr(Ptr,
- PointerType::getUnqual(FieldTy));
}
- // Make sure we return a result of the right type.
- if (PointerType::getUnqual(FieldTy) != FieldPtr->getType())
- FieldPtr = TheFolder->CreateBitCast(FieldPtr,
- PointerType::getUnqual(FieldTy));
+ // If the call defined any ssa names, associate them with their value.
+ for (unsigned i = 0, e = CallResultSSANames.size(); i != e; ++i)
+ SSANames[CallResultSSANames[i]] = Builder.CreateLoad(CallResultSSATemps[i]);
- assert(BitStart == 0 &&
- "It's a bitfield reference or we didn't get to the field!");
- return FieldPtr;
-}
+ // Give the backend a chance to upgrade the inline asm to LLVM code. This
+ // handles some common cases that LLVM has intrinsics for, e.g. x86 bswap ->
+ // llvm.bswap.
+ if (const TargetLowering *TLI = TheTarget->getTargetLowering())
+ TLI->ExpandInlineAsm(CV);
-//===----------------------------------------------------------------------===//
-// ... Convert GIMPLE to LLVM ...
-//===----------------------------------------------------------------------===//
+ if (NumChoices>1)
+ FreeConstTupleStrings(ReplacementStrings, NumInputs+NumOutputs);
+}
void TreeToLLVM::RenderGIMPLE_COND(gimple stmt) {
// Emit the comparison.
Modified: gcc-plugin/trunk/llvm-internal.h
URL: http://llvm.org/viewvc/llvm-project/gcc-plugin/trunk/llvm-internal.h?rev=82144&r1=82143&r2=82144&view=diff
==============================================================================
--- gcc-plugin/trunk/llvm-internal.h (original)
+++ gcc-plugin/trunk/llvm-internal.h Thu Sep 17 09:01:39 2009
@@ -519,6 +519,7 @@
private:
// Render* - Convert GIMPLE to LLVM.
+ void RenderGIMPLE_ASM(gimple_statement_d *);
void RenderGIMPLE_COND(gimple_statement_d *);
void RenderGIMPLE_GOTO(gimple_statement_d *);
void RenderGIMPLE_RESX(gimple_statement_d *);
@@ -582,7 +583,6 @@
Value *EmitFILTER_EXPR(tree_node *exp);
// Inline Assembly and Register Variables.
- Value *EmitASM_EXPR(tree_node *exp);
Value *EmitReadOfRegisterVariable(tree_node *vardecl, const MemRef *DestLoc);
void EmitModifyOfRegisterVariable(tree_node *vardecl, Value *RHS);
More information about the llvm-commits
mailing list