[llvm-commits] [gcc-plugin] r83061 - /gcc-plugin/trunk/llvm-convert.cpp

Duncan Sands baldrick at free.fr
Tue Sep 29 01:01:23 PDT 2009


Author: baldrick
Date: Tue Sep 29 03:01:22 2009
New Revision: 83061

URL: http://llvm.org/viewvc/llvm-project?rev=83061&view=rev
Log:
Move gimple rendering after expression emission and before
constant conversion.

Modified:
    gcc-plugin/trunk/llvm-convert.cpp

Modified: gcc-plugin/trunk/llvm-convert.cpp
URL: http://llvm.org/viewvc/llvm-project/gcc-plugin/trunk/llvm-convert.cpp?rev=83061&r1=83060&r2=83061&view=diff

==============================================================================
--- gcc-plugin/trunk/llvm-convert.cpp (original)
+++ gcc-plugin/trunk/llvm-convert.cpp Tue Sep 29 03:01:22 2009
@@ -6215,1927 +6215,1927 @@
 
 
 //===----------------------------------------------------------------------===//
-//                       ... Constant Expressions ...
+//                      ... Convert GIMPLE to LLVM ...
 //===----------------------------------------------------------------------===//
 
-/// EmitCONSTRUCTOR - emit the constructor into the location specified by
-/// DestLoc.
-Value *TreeToLLVM::EmitCONSTRUCTOR(tree exp, const MemRef *DestLoc) {
-  tree type = TREE_TYPE(exp);
-  const Type *Ty = ConvertType(type);
-  if (const VectorType *PTy = dyn_cast<VectorType>(Ty)) {
-    assert(DestLoc == 0 && "Dest location for packed value?");
-
-    std::vector<Value *> BuildVecOps;
-
-    // Insert zero initializers for any uninitialized values.
-    Constant *Zero = Constant::getNullValue(PTy->getElementType());
-    BuildVecOps.resize(cast<VectorType>(Ty)->getNumElements(), Zero);
-
-    // Insert all of the elements here.
-    unsigned HOST_WIDE_INT ix;
-    tree purpose, value;
-    FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (exp), ix, purpose, value) {
-      if (!purpose) continue;  // Not actually initialized?
-
-      unsigned FieldNo = TREE_INT_CST_LOW(purpose);
-
-      // Update the element.
-      if (FieldNo < BuildVecOps.size())
-        BuildVecOps[FieldNo] = Emit(value, 0);
+void TreeToLLVM::RenderGIMPLE_ASM(gimple stmt) {
+  // Some of the GCC utilities we use still want lists and not gimple, so create
+  // input, output and clobber lists for their benefit.
+  unsigned NumOutputs = gimple_asm_noutputs (stmt);
+  tree outputs = NULL_TREE;
+  if (NumOutputs) {
+    tree t = outputs = gimple_asm_output_op (stmt, 0);
+    for (unsigned i = 1; i < NumOutputs; i++) {
+      TREE_CHAIN (t) = gimple_asm_output_op (stmt, i);
+      t = gimple_asm_output_op (stmt, i);
     }
+  }
 
-    return BuildVector(BuildVecOps);
+  unsigned NumInputs = gimple_asm_ninputs(stmt);
+  tree inputs = NULL_TREE;
+  if (NumInputs) {
+    tree t = inputs = gimple_asm_input_op (stmt, 0);
+    for (unsigned i = 1; i < NumInputs; i++) {
+      TREE_CHAIN (t) = gimple_asm_input_op (stmt, i);
+      t = gimple_asm_input_op (stmt, i);
+    }
   }
 
-  assert(!Ty->isSingleValueType() && "Constructor for scalar type??");
+  unsigned NumClobbers = gimple_asm_nclobbers (stmt);
+  tree clobbers = NULL_TREE;
+  if (NumClobbers) {
+    tree t = clobbers = gimple_asm_clobber_op (stmt, 0);
+    for (unsigned i = 1; i < NumClobbers; i++) {
+      TREE_CHAIN (t) = gimple_asm_clobber_op (stmt, i);
+      t = gimple_asm_clobber_op (stmt, i);
+    }
+  }
 
-  // Start out with the value zero'd out.
-  EmitAggregateZero(*DestLoc, type);
+  // TODO: Understand what these labels are about, and handle them properly.
+  unsigned NumLabels = gimple_asm_nlabels (stmt);
+  tree labels = NULL_TREE;
+  if (NumLabels) {
+    tree t = labels = gimple_asm_label_op (stmt, 0);
+    for (unsigned i = 1; i < NumLabels; i++) {
+      TREE_CHAIN (t) = gimple_asm_label_op (stmt, i);
+      t = gimple_asm_label_op (stmt, i);
+    }
+  }
 
-  VEC(constructor_elt, gc) *elt = CONSTRUCTOR_ELTS(exp);
-  switch (TREE_CODE(TREE_TYPE(exp))) {
-  case ARRAY_TYPE:
-  case RECORD_TYPE:
-  default:
-    if (elt && VEC_length(constructor_elt, elt)) {
-      // We don't handle elements yet.
+  unsigned NumInOut = 0;
 
-      TODO(exp);
+  // Look for multiple alternative constraints: multiple alternatives separated
+  // by commas.
+  unsigned NumChoices = 0;    // sentinal; real value is always at least 1.
+  const char* p;
+  for (tree t = inputs; t; t = TREE_CHAIN(t)) {
+    unsigned NumInputChoices = 1;
+    for (p = TREE_STRING_POINTER(TREE_VALUE(TREE_PURPOSE(t))); *p; p++) {
+      if (*p == ',')
+        NumInputChoices++;
     }
-    return 0;
-  case QUAL_UNION_TYPE:
-  case UNION_TYPE:
-    // Store each element of the constructor into the corresponding field of
-    // DEST.
-    if (!elt || VEC_empty(constructor_elt, elt)) return 0;  // no elements
-    assert(VEC_length(constructor_elt, elt) == 1
-           && "Union CONSTRUCTOR should have one element!");
-    tree tree_purpose = VEC_index(constructor_elt, elt, 0)->index;
-    tree tree_value   = VEC_index(constructor_elt, elt, 0)->value;
-    if (!tree_purpose)
-      return 0;  // Not actually initialized?
-
-    if (!ConvertType(TREE_TYPE(tree_purpose))->isSingleValueType()) {
-      Value *V = Emit(tree_value, DestLoc);
-      (void)V;
-      assert(V == 0 && "Aggregate value returned in a register?");
-    } else {
-      // Scalar value.  Evaluate to a register, then do the store.
-      Value *V = Emit(tree_value, 0);
-      Value *Ptr = Builder.CreateBitCast(DestLoc->Ptr,
-                                         PointerType::getUnqual(V->getType()));
-      StoreInst *St = Builder.CreateStore(V, Ptr, DestLoc->Volatile);
-      St->setAlignment(DestLoc->getAlignment());
+    if (NumChoices==0)
+      NumChoices = NumInputChoices;
+    else if (NumChoices != NumInputChoices)
+      abort();      // invalid constraints
+  }
+  for (tree t = outputs; t; t = TREE_CHAIN(t)) {
+    unsigned NumOutputChoices = 1;
+    for (p = TREE_STRING_POINTER(TREE_VALUE(TREE_PURPOSE(t))); *p; p++) {
+      if (*p == ',')
+        NumOutputChoices++;
     }
-    break;
+    if (NumChoices==0)
+      NumChoices = NumOutputChoices;
+    else if (NumChoices != NumOutputChoices)
+      abort();      // invalid constraints
   }
-  return 0;
-}
 
-Constant *TreeConstantToLLVM::Convert(tree exp) {
-  assert((TREE_CONSTANT(exp) || TREE_CODE(exp) == STRING_CST) &&
-         "Isn't a constant!");
-  switch (TREE_CODE(exp)) {
-  case FDESC_EXPR:    // Needed on itanium
-  default:
-    debug_tree(exp);
-    assert(0 && "Unknown constant to convert!");
-    abort();
-  case INTEGER_CST:   return ConvertINTEGER_CST(exp);
-  case REAL_CST:      return ConvertREAL_CST(exp);
-  case VECTOR_CST:    return ConvertVECTOR_CST(exp);
-  case STRING_CST:    return ConvertSTRING_CST(exp);
-  case COMPLEX_CST:   return ConvertCOMPLEX_CST(exp);
-  case NOP_EXPR:      return ConvertNOP_EXPR(exp);
-  case CONVERT_EXPR:  return ConvertCONVERT_EXPR(exp);
-  case PLUS_EXPR:
-  case MINUS_EXPR:    return ConvertBinOp_CST(exp);
-  case CONSTRUCTOR:   return ConvertCONSTRUCTOR(exp);
-  case VIEW_CONVERT_EXPR: return Convert(TREE_OPERAND(exp, 0));
-  case POINTER_PLUS_EXPR: return ConvertPOINTER_PLUS_EXPR(exp);
-  case ADDR_EXPR:
-    return TheFolder->CreateBitCast(EmitLV(TREE_OPERAND(exp, 0)),
-                                    ConvertType(TREE_TYPE(exp)));
-  }
-}
+  /// Constraints - The output/input constraints, concatenated together in array
+  /// form instead of list form.
+  const char **Constraints =
+    (const char **)alloca((NumOutputs + NumInputs) * sizeof(const char *));
 
-Constant *TreeConstantToLLVM::ConvertINTEGER_CST(tree exp) {
-  const Type *Ty = ConvertType(TREE_TYPE(exp));
+  // Process outputs.
+  int ValNum = 0;
+  for (tree Output = outputs; Output; Output = TREE_CHAIN(Output), ++ValNum) {
+    tree Operand = TREE_VALUE(Output);
+    tree type = TREE_TYPE(Operand);
+    // If there's an erroneous arg, emit no insn.
+    if (type == error_mark_node) return;
 
-  // Handle i128 specially.
-  if (const IntegerType *IT = dyn_cast<IntegerType>(Ty)) {
-    if (IT->getBitWidth() == 128) {
-      // GCC only supports i128 on 64-bit systems.
-      assert(HOST_BITS_PER_WIDE_INT == 64 &&
-             "i128 only supported on 64-bit system");
-      uint64_t Bits[] = { TREE_INT_CST_LOW(exp), TREE_INT_CST_HIGH(exp) };
-      return ConstantInt::get(Context, APInt(128, 2, Bits));
-    }
+    // Parse the output constraint.
+    const char *Constraint =
+      TREE_STRING_POINTER(TREE_VALUE(TREE_PURPOSE(Output)));
+    Constraints[ValNum] = Constraint;
   }
+  // Process inputs.
+  for (tree Input = inputs; Input; Input = TREE_CHAIN(Input),++ValNum) {
+    tree Val = TREE_VALUE(Input);
+    tree type = TREE_TYPE(Val);
+    // If there's an erroneous arg, emit no insn.
+    if (type == error_mark_node) return;
 
-  // Build the value as a ulong constant, then constant fold it to the right
-  // type.  This handles overflow and other things appropriately.
-  uint64_t IntValue = getINTEGER_CSTVal(exp);
-  ConstantInt *C = ConstantInt::get(Type::getInt64Ty(Context), IntValue);
-  // The destination type can be a pointer, integer or floating point
-  // so we need a generalized cast here
-  Instruction::CastOps opcode = CastInst::getCastOpcode(C, false, Ty,
-      !TYPE_UNSIGNED(TREE_TYPE(exp)));
-  return TheFolder->CreateCast(opcode, C, Ty);
-}
+    const char *Constraint =
+      TREE_STRING_POINTER(TREE_VALUE(TREE_PURPOSE(Input)));
+    Constraints[ValNum] = Constraint;
+  }
 
-Constant *TreeConstantToLLVM::ConvertREAL_CST(tree exp) {
-  const Type *Ty = ConvertType(TREE_TYPE(exp));
-  assert(Ty->isFloatingPoint() && "Integer REAL_CST?");
-  long RealArr[2];
-  union {
-    int UArr[2];
-    double V;
-  };
-  if (Ty==Type::getFloatTy(Context) || Ty==Type::getDoubleTy(Context)) {
-    REAL_VALUE_TO_TARGET_DOUBLE(TREE_REAL_CST(exp), RealArr);
+  // If there are multiple constraint tuples, pick one.  Constraints is
+  // altered to point to shorter strings (which are malloc'ed), and everything
+  // below Just Works as in the NumChoices==1 case.
+  const char** ReplacementStrings = 0;
+  if (NumChoices>1) {
+    ReplacementStrings =
+      (const char **)alloca((NumOutputs + NumInputs) * sizeof(const char *));
+    ChooseConstraintTuple(Constraints, stmt, outputs, inputs, NumOutputs,
+                          NumInputs, NumChoices, ReplacementStrings);
+  }
 
-    // Here's how this works:
-    // REAL_VALUE_TO_TARGET_DOUBLE() will generate the floating point number
-    // as an array of integers in the target's representation.  Each integer
-    // in the array will hold 32 bits of the result REGARDLESS OF THE HOST'S
-    // INTEGER SIZE.
-    //
-    // This, then, makes the conversion pretty simple.  The tricky part is
-    // getting the byte ordering correct and make sure you don't print any
-    // more than 32 bits per integer on platforms with ints > 32 bits.
-    //
-    // We want to switch the words of UArr if host and target endianness
-    // do not match.  FLOAT_WORDS_BIG_ENDIAN describes the target endianness.
-    // The host's used to be available in HOST_WORDS_BIG_ENDIAN, but the gcc
-    // maintainers removed this in a fit of cleanliness between 4.0
-    // and 4.2. llvm::sys has a substitute.
+  std::vector<Value*> CallOps;
+  std::vector<const Type*> CallArgTypes;
+  std::string NewAsmStr = ConvertInlineAsmStr(stmt, outputs, inputs, labels,
+                                              NumOutputs+NumInputs);
+  std::string ConstraintStr;
 
-    UArr[0] = RealArr[0];   // Long -> int convert
-    UArr[1] = RealArr[1];
+  // StoreCallResultAddr - The pointer to store the result of the call through.
+  SmallVector<Value *, 4> StoreCallResultAddrs;
+  SmallVector<const Type *, 4> CallResultTypes;
+  SmallVector<bool, 4> CallResultIsSigned;
+  SmallVector<tree, 4> CallResultSSANames;
+  SmallVector<Value *, 4> CallResultSSATemps;
 
-    if (llvm::sys::isBigEndianHost() != FLOAT_WORDS_BIG_ENDIAN)
-      std::swap(UArr[0], UArr[1]);
+  // Process outputs.
+  ValNum = 0;
+  for (tree Output = outputs; Output; Output = TREE_CHAIN(Output), ++ValNum) {
+    tree Operand = TREE_VALUE(Output);
 
-    return
-      ConstantFP::get(Context, Ty==Type::getFloatTy(Context) ?
-                      APFloat((float)V) : APFloat(V));
-  } else if (Ty==Type::getX86_FP80Ty(Context)) {
-    long RealArr[4];
-    uint64_t UArr[2];
-    REAL_VALUE_TO_TARGET_LONG_DOUBLE(TREE_REAL_CST(exp), RealArr);
-    UArr[0] = ((uint64_t)((uint32_t)RealArr[0])) |
-              ((uint64_t)((uint32_t)RealArr[1]) << 32);
-    UArr[1] = (uint16_t)RealArr[2];
-    return ConstantFP::get(Context, APFloat(APInt(80, 2, UArr)));
-  } else if (Ty==Type::getPPC_FP128Ty(Context)) {
-    long RealArr[4];
-    uint64_t UArr[2];
-    REAL_VALUE_TO_TARGET_LONG_DOUBLE(TREE_REAL_CST(exp), RealArr);
+    // Parse the output constraint.
+    const char *Constraint = Constraints[ValNum];
+    bool IsInOut, AllowsReg, AllowsMem;
+    if (!parse_output_constraint(&Constraint, ValNum, NumInputs, NumOutputs,
+                                 &AllowsMem, &AllowsReg, &IsInOut)) {
+      if (NumChoices>1)
+        FreeConstTupleStrings(ReplacementStrings, NumInputs+NumOutputs);
+      return;
+    }
+    assert(Constraint[0] == '=' && "Not an output constraint?");
 
-    UArr[0] = ((uint64_t)((uint32_t)RealArr[0]) << 32) |
-              ((uint64_t)((uint32_t)RealArr[1]));
-    UArr[1] = ((uint64_t)((uint32_t)RealArr[2]) << 32) |
-              ((uint64_t)((uint32_t)RealArr[3]));
-    return ConstantFP::get(Context, APFloat(APInt(128, 2, UArr)));
-  }
-  assert(0 && "Floating point type not handled yet");
-  return 0;   // outwit compiler warning
-}
+    // Output constraints must be addressable if they aren't simple register
+    // constraints (this emits "address of register var" errors, etc).
+    if (!AllowsReg && (AllowsMem || IsInOut))
+      mark_addressable(Operand);
 
-Constant *TreeConstantToLLVM::ConvertVECTOR_CST(tree exp) {
-  if (!TREE_VECTOR_CST_ELTS(exp))
-    return Constant::getNullValue(ConvertType(TREE_TYPE(exp)));
+    // Count the number of "+" constraints.
+    if (IsInOut)
+      ++NumInOut, ++NumInputs;
 
-  std::vector<Constant*> Elts;
-  for (tree elt = TREE_VECTOR_CST_ELTS(exp); elt; elt = TREE_CHAIN(elt))
-    Elts.push_back(Convert(TREE_VALUE(elt)));
+    std::string SimplifiedConstraint;
+    // If this output register is pinned to a machine register, use that machine
+    // register instead of the specified constraint.
+    if (TREE_CODE(Operand) == VAR_DECL && DECL_HARD_REGISTER(Operand)) {
+      const char* RegName = extractRegisterName(Operand);
+      int RegNum = decode_reg_name(RegName);
+      if (RegNum >= 0) {
+        RegName = getConstraintRegNameFromGccTables(RegName, RegNum);
+        unsigned RegNameLen = strlen(RegName);
+        char *NewConstraint = (char*)alloca(RegNameLen+4);
+        NewConstraint[0] = '=';
+        NewConstraint[1] = '{';
+        memcpy(NewConstraint+2, RegName, RegNameLen);
+        NewConstraint[RegNameLen+2] = '}';
+        NewConstraint[RegNameLen+3] = 0;
+        SimplifiedConstraint = NewConstraint;
+        // We should no longer consider mem constraints.
+        AllowsMem = false;
+      } else {
+        // If we can simplify the constraint into something else, do so now.
+        // This avoids LLVM having to know about all the (redundant) GCC
+        // constraints.
+        SimplifiedConstraint = CanonicalizeConstraint(Constraint+1);
+      }
+    } else {
+      SimplifiedConstraint = CanonicalizeConstraint(Constraint+1);
+    }
 
-  // The vector should be zero filled if insufficient elements are provided.
-  if (Elts.size() < TYPE_VECTOR_SUBPARTS(TREE_TYPE(exp))) {
-    tree EltType = TREE_TYPE(TREE_TYPE(exp));
-    Constant *Zero = Constant::getNullValue(ConvertType(EltType));
-    while (Elts.size() < TYPE_VECTOR_SUBPARTS(TREE_TYPE(exp)))
-      Elts.push_back(Zero);
+    LValue Dest;
+    const Type *DestValTy;
+    if (TREE_CODE(Operand) == SSA_NAME) {
+      // The ASM is defining an ssa name.  Store the output to a temporary, then
+      // load it out again later as the ssa name.
+      DestValTy = ConvertType(TREE_TYPE(Operand));
+      Dest.Ptr = CreateTemporary(DestValTy);
+      CallResultSSANames.push_back(Operand);
+      CallResultSSATemps.push_back(Dest.Ptr);
+    } else {
+      Dest = EmitLV(Operand);
+      DestValTy = cast<PointerType>(Dest.Ptr->getType())->getElementType();
+    }
+
+    assert(!Dest.isBitfield() && "Cannot assign into a bitfield!");
+    if (!AllowsMem && DestValTy->isSingleValueType()) {// Reg dest -> asm return
+      StoreCallResultAddrs.push_back(Dest.Ptr);
+      ConstraintStr += ",=";
+      ConstraintStr += SimplifiedConstraint;
+      CallResultTypes.push_back(DestValTy);
+      CallResultIsSigned.push_back(!TYPE_UNSIGNED(TREE_TYPE(Operand)));
+    } else {
+      ConstraintStr += ",=*";
+      ConstraintStr += SimplifiedConstraint;
+      CallOps.push_back(Dest.Ptr);
+      CallArgTypes.push_back(Dest.Ptr->getType());
+    }
   }
 
-  return ConstantVector::get(Elts);
-}
+  // Process inputs.
+  for (tree Input = inputs; Input; Input = TREE_CHAIN(Input),++ValNum) {
+    tree Val = TREE_VALUE(Input);
+    tree type = TREE_TYPE(Val);
 
-Constant *TreeConstantToLLVM::ConvertSTRING_CST(tree exp) {
-  const ArrayType *StrTy = cast<ArrayType>(ConvertType(TREE_TYPE(exp)));
-  const Type *ElTy = StrTy->getElementType();
+    const char *Constraint = Constraints[ValNum];
 
-  unsigned Len = (unsigned)TREE_STRING_LENGTH(exp);
+    bool AllowsReg, AllowsMem;
+    if (!parse_input_constraint(Constraints+ValNum, ValNum-NumOutputs,
+                                NumInputs, NumOutputs, NumInOut,
+                                Constraints, &AllowsMem, &AllowsReg)) {
+      if (NumChoices>1)
+        FreeConstTupleStrings(ReplacementStrings, NumInputs+NumOutputs);
+      return;
+    }
+    bool isIndirect = false;
+    if (AllowsReg || !AllowsMem) {    // Register operand.
+      const Type *LLVMTy = ConvertType(type);
 
-  std::vector<Constant*> Elts;
-  if (ElTy == Type::getInt8Ty(Context)) {
-    const unsigned char *InStr =(const unsigned char *)TREE_STRING_POINTER(exp);
-    for (unsigned i = 0; i != Len; ++i)
-      Elts.push_back(ConstantInt::get(Type::getInt8Ty(Context), InStr[i]));
-  } else if (ElTy == Type::getInt16Ty(Context)) {
-    assert((Len&1) == 0 &&
-           "Length in bytes should be a multiple of element size");
-    const uint16_t *InStr =
-      (const unsigned short *)TREE_STRING_POINTER(exp);
-    for (unsigned i = 0; i != Len/2; ++i) {
-      // gcc has constructed the initializer elements in the target endianness,
-      // but we're going to treat them as ordinary shorts from here, with
-      // host endianness.  Adjust if necessary.
-      if (llvm::sys::isBigEndianHost() == BYTES_BIG_ENDIAN)
-        Elts.push_back(ConstantInt::get(Type::getInt16Ty(Context), InStr[i]));
-      else
-        Elts.push_back(ConstantInt::get(Type::getInt16Ty(Context), ByteSwap_16(InStr[i])));
+      Value *Op = 0;
+      if (LLVMTy->isSingleValueType()) {
+        if (TREE_CODE(Val)==ADDR_EXPR &&
+            TREE_CODE(TREE_OPERAND(Val,0))==LABEL_DECL) {
+          // Emit the label, but do not assume it is going to be the target
+          // of an indirect branch.  Having this logic here is a hack; there
+          // should be a bit in the label identifying it as in an asm.
+          Op = getLabelDeclBlock(TREE_OPERAND(Val, 0));
+        } else
+          Op = Emit(Val, 0);
+      } else {
+        LValue LV = EmitLV(Val);
+        assert(!LV.isBitfield() && "Inline asm can't have bitfield operand");
+
+        // Structs and unions are permitted here, as long as they're the
+        // same size as a register.
+        uint64_t TySize = TD.getTypeSizeInBits(LLVMTy);
+        if (TySize == 1 || TySize == 8 || TySize == 16 ||
+            TySize == 32 || TySize == 64) {
+          LLVMTy = IntegerType::get(Context, TySize);
+          Op = Builder.CreateLoad(Builder.CreateBitCast(LV.Ptr,
+                                               PointerType::getUnqual(LLVMTy)));
+        } else {
+          // Otherwise, emit our value as a lvalue and let the codegen deal with
+          // it.
+          isIndirect = true;
+          Op = LV.Ptr;
+        }
+      }
+
+      const Type *OpTy = Op->getType();
+      // If this input operand is matching an output operand, e.g. '0', check if
+      // this is something that llvm supports. If the operand types are
+      // different, then emit an error if 1) one of the types is not integer or
+      // pointer, 2) if size of input type is larger than the output type. If
+      // the size of the integer input size is smaller than the integer output
+      // type, then cast it to the larger type and shift the value if the target
+      // is big endian.
+      if (ISDIGIT(Constraint[0])) {
+        unsigned Match = atoi(Constraint);
+        const Type *OTy = (Match < CallResultTypes.size())
+          ? CallResultTypes[Match] : 0;
+        if (OTy && OTy != OpTy) {
+          if (!(isa<IntegerType>(OTy) || isa<PointerType>(OTy)) ||
+              !(isa<IntegerType>(OpTy) || isa<PointerType>(OpTy))) {
+            error_at(gimple_location(stmt),
+                     "unsupported inline asm: input constraint with a matching "
+                     "output constraint of incompatible type!");
+            if (NumChoices>1)
+              FreeConstTupleStrings(ReplacementStrings, NumInputs+NumOutputs);
+            return;
+          }
+          unsigned OTyBits = TD.getTypeSizeInBits(OTy);
+          unsigned OpTyBits = TD.getTypeSizeInBits(OpTy);
+          if (OTyBits == 0 || OpTyBits == 0 || OTyBits < OpTyBits) {
+            // It's tempting to implement the OTyBits < OpTyBits case by truncating
+            // Op down to OTy, however that breaks in the case of an inline asm
+            // constraint that corresponds to a single register, because the
+            // user can write code that assumes the whole register is defined,
+            // despite the output operand being only a subset of the register. For
+            // example:
+            //
+            //   asm ("sarl $10, %%eax" : "=a"(c) : "0"(1000000));
+            //
+            // The expected behavior is for %eax to be fully defined with the value
+            // 1000000 immediately before the asm.
+            error_at(gimple_location(stmt),
+                     "unsupported inline asm: input constraint with a matching "
+                     "output constraint of incompatible type!");
+            return;
+          } else if (OTyBits > OpTyBits) {
+            Op = CastToAnyType(Op, !TYPE_UNSIGNED(type),
+                               OTy, CallResultIsSigned[Match]);
+            if (BYTES_BIG_ENDIAN) {
+              Constant *ShAmt = ConstantInt::get(Op->getType(),
+                                                 OTyBits-OpTyBits);
+              Op = Builder.CreateLShr(Op, ShAmt);
+            }
+            OpTy = Op->getType();
+          }
+        }
+      }
+
+      CallOps.push_back(Op);
+      CallArgTypes.push_back(OpTy);
+    } else {                          // Memory operand.
+      mark_addressable(TREE_VALUE(Input));
+      isIndirect = true;
+      LValue Src = EmitLV(Val);
+      assert(!Src.isBitfield() && "Cannot read from a bitfield!");
+      CallOps.push_back(Src.Ptr);
+      CallArgTypes.push_back(Src.Ptr->getType());
     }
-  } else if (ElTy == Type::getInt32Ty(Context)) {
-    assert((Len&3) == 0 &&
-           "Length in bytes should be a multiple of element size");
-    const uint32_t *InStr = (const uint32_t *)TREE_STRING_POINTER(exp);
-    for (unsigned i = 0; i != Len/4; ++i) {
-      // gcc has constructed the initializer elements in the target endianness,
-      // but we're going to treat them as ordinary ints from here, with
-      // host endianness.  Adjust if necessary.
-      if (llvm::sys::isBigEndianHost() == BYTES_BIG_ENDIAN)
-        Elts.push_back(ConstantInt::get(Type::getInt32Ty(Context), InStr[i]));
-      else
-        Elts.push_back(ConstantInt::get(Type::getInt32Ty(Context), ByteSwap_32(InStr[i])));
+
+    ConstraintStr += ',';
+    if (isIndirect)
+      ConstraintStr += '*';
+
+    // If this output register is pinned to a machine register, use that machine
+    // register instead of the specified constraint.
+    if (TREE_CODE(Val) == VAR_DECL && DECL_HARD_REGISTER(Val)) {
+      const char *RegName = extractRegisterName(Val);
+      int RegNum = decode_reg_name(RegName);
+      if (RegNum >= 0) {
+        RegName = getConstraintRegNameFromGccTables(RegName, RegNum);
+        ConstraintStr += '{';
+        ConstraintStr += RegName;
+        ConstraintStr += '}';
+        continue;
+      }
     }
-  } else {
-    assert(0 && "Unknown character type!");
+
+    // If there is a simpler form for the register constraint, use it.
+    std::string Simplified = CanonicalizeConstraint(Constraint);
+    ConstraintStr += Simplified;
   }
 
-  unsigned LenInElts = Len /
-          TREE_INT_CST_LOW(TYPE_SIZE_UNIT(TREE_TYPE(TREE_TYPE(exp))));
-  unsigned ConstantSize = StrTy->getNumElements();
+  // Process clobbers.
 
-  if (LenInElts != ConstantSize) {
-    // If this is a variable sized array type, set the length to LenInElts.
-    if (ConstantSize == 0) {
-      tree Domain = TYPE_DOMAIN(TREE_TYPE(exp));
-      if (!Domain || !TYPE_MAX_VALUE(Domain)) {
-        ConstantSize = LenInElts;
-        StrTy = ArrayType::get(ElTy, LenInElts);
-      }
+  // Some targets automatically clobber registers across an asm.
+  tree Clobbers = targetm.md_asm_clobbers(outputs, inputs, clobbers);
+  for (; Clobbers; Clobbers = TREE_CHAIN(Clobbers)) {
+    const char *RegName = TREE_STRING_POINTER(TREE_VALUE(Clobbers));
+    int RegCode = decode_reg_name(RegName);
+
+    switch (RegCode) {
+    case -1:     // Nothing specified?
+    case -2:     // Invalid.
+      error_at(gimple_location(stmt), "unknown register name %qs in %<asm%>",
+               RegName);
+      if (NumChoices>1)
+        FreeConstTupleStrings(ReplacementStrings, NumInputs+NumOutputs);
+      return;
+    case -3:     // cc
+      ConstraintStr += ",~{cc}";
+      break;
+    case -4:     // memory
+      ConstraintStr += ",~{memory}";
+      break;
+    default:     // Normal register name.
+      RegName = getConstraintRegNameFromGccTables(RegName, RegCode);
+      ConstraintStr += ",~{";
+      ConstraintStr += RegName;
+      ConstraintStr += "}";
+      break;
     }
+  }
 
-    if (ConstantSize < LenInElts) {
-      // Only some chars are being used, truncate the string: char X[2] = "foo";
-      Elts.resize(ConstantSize);
-    } else {
-      // Fill the end of the string with nulls.
-      Constant *C = Constant::getNullValue(ElTy);
-      for (; LenInElts != ConstantSize; ++LenInElts)
-        Elts.push_back(C);
+  const Type *CallResultType;
+  switch (CallResultTypes.size()) {
+  case 0: CallResultType = Type::getVoidTy(Context); break;
+  case 1: CallResultType = CallResultTypes[0]; break;
+  default:
+    std::vector<const Type*> TmpVec(CallResultTypes.begin(),
+                                    CallResultTypes.end());
+    CallResultType = StructType::get(Context, TmpVec);
+    break;
+  }
+
+  const FunctionType *FTy =
+    FunctionType::get(CallResultType, CallArgTypes, false);
+
+  // Remove the leading comma if we have operands.
+  if (!ConstraintStr.empty())
+    ConstraintStr.erase(ConstraintStr.begin());
+
+  // Make sure we're created a valid inline asm expression.
+  if (!InlineAsm::Verify(FTy, ConstraintStr)) {
+    error_at(gimple_location(stmt), "Invalid or unsupported inline assembly!");
+    if (NumChoices>1)
+      FreeConstTupleStrings(ReplacementStrings, NumInputs+NumOutputs);
+    return;
+  }
+
+  Value *Asm = InlineAsm::get(FTy, NewAsmStr, ConstraintStr,
+                              gimple_asm_volatile_p(stmt) || !outputs);
+  CallInst *CV = Builder.CreateCall(Asm, CallOps.begin(), CallOps.end(),
+                                    CallResultTypes.empty() ? "" : "asmtmp");
+  CV->setDoesNotThrow();
+
+  // If the call produces a value, store it into the destination.
+  if (StoreCallResultAddrs.size() == 1)
+    Builder.CreateStore(CV, StoreCallResultAddrs[0]);
+  else if (unsigned NumResults = StoreCallResultAddrs.size()) {
+    for (unsigned i = 0; i != NumResults; ++i) {
+      Value *ValI = Builder.CreateExtractValue(CV, i, "asmresult");
+      Builder.CreateStore(ValI, StoreCallResultAddrs[i]);
     }
   }
-  return ConstantArray::get(StrTy, Elts);
-}
 
-Constant *TreeConstantToLLVM::ConvertCOMPLEX_CST(tree exp) {
-  Constant *Elts[2] = {
-    Convert(TREE_REALPART(exp)),
-    Convert(TREE_IMAGPART(exp))
-  };
-  return ConstantStruct::get(Context, Elts, 2, false);
-}
+  // If the call defined any ssa names, associate them with their value.
+  for (unsigned i = 0, e = CallResultSSANames.size(); i != e; ++i)
+    SSANames[CallResultSSANames[i]] = Builder.CreateLoad(CallResultSSATemps[i]);
 
-Constant *TreeConstantToLLVM::ConvertNOP_EXPR(tree exp) {
-  Constant *Elt = Convert(TREE_OPERAND(exp, 0));
-  const Type *Ty = ConvertType(TREE_TYPE(exp));
-  bool EltIsSigned = !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp, 0)));
-  bool TyIsSigned = !TYPE_UNSIGNED(TREE_TYPE(exp));
+  // Give the backend a chance to upgrade the inline asm to LLVM code.  This
+  // handles some common cases that LLVM has intrinsics for, e.g. x86 bswap ->
+  // llvm.bswap.
+  if (const TargetLowering *TLI = TheTarget->getTargetLowering())
+    TLI->ExpandInlineAsm(CV);
 
-  // If this is a structure-to-structure cast, just return the uncasted value.
-  if (!Elt->getType()->isSingleValueType() || !Ty->isSingleValueType())
-    return Elt;
+  if (NumChoices>1)
+    FreeConstTupleStrings(ReplacementStrings, NumInputs+NumOutputs);
+}
 
-  // Elt and Ty can be integer, float or pointer here: need generalized cast
-  Instruction::CastOps opcode = CastInst::getCastOpcode(Elt, EltIsSigned,
-                                                        Ty, TyIsSigned);
-  return TheFolder->CreateCast(opcode, Elt, Ty);
+void TreeToLLVM::RenderGIMPLE_ASSIGN(gimple stmt) {
+  tree lhs = gimple_assign_lhs(stmt);
+  if (AGGREGATE_TYPE_P(TREE_TYPE(lhs))) {
+    LValue LV = EmitLV(lhs);
+    MemRef NewLoc(LV.Ptr, LV.getAlignment(), TREE_THIS_VOLATILE(lhs));
+    // TODO: This case can presumably only happen with special gimple
+    // assign right-hand-sides.  Try to simplify by exploiting this.
+    EmitGimpleAssignRHS(stmt, &NewLoc);
+    return;
+  }
+  WriteScalarToLHS(lhs, EmitGimpleAssignRHS(stmt, 0));
 }
 
-Constant *TreeConstantToLLVM::ConvertCONVERT_EXPR(tree exp) {
-  Constant *Elt = Convert(TREE_OPERAND(exp, 0));
-  bool EltIsSigned = !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp, 0)));
-  const Type *Ty = ConvertType(TREE_TYPE(exp));
-  bool TyIsSigned = !TYPE_UNSIGNED(TREE_TYPE(exp));
-  Instruction::CastOps opcode = CastInst::getCastOpcode(Elt, EltIsSigned, Ty,
-                                                        TyIsSigned);
-  return TheFolder->CreateCast(opcode, Elt, Ty);
+void TreeToLLVM::RenderGIMPLE_CALL(gimple stmt) {
+  tree lhs = gimple_call_lhs(stmt);
+  if (!lhs) {
+    // The returned value is not used.
+    if (!AGGREGATE_TYPE_P(gimple_call_return_type(stmt))) {
+      EmitGimpleCallRHS(stmt, 0);
+      return;
+    }
+    // Create a temporary to hold the returned value.
+    // TODO: Figure out how to avoid creating this temporary and the
+    // associated useless code that stores the returned value into it.
+    MemRef Loc = CreateTempLoc(ConvertType(gimple_call_return_type(stmt)));
+    EmitGimpleCallRHS(stmt, &Loc);
+    return;
+  }
+
+  if (AGGREGATE_TYPE_P(TREE_TYPE(lhs))) {
+    LValue LV = EmitLV(lhs);
+    MemRef NewLoc(LV.Ptr, LV.getAlignment(), TREE_THIS_VOLATILE(lhs));
+    EmitGimpleCallRHS(stmt, &NewLoc);
+    return;
+  }
+  WriteScalarToLHS(lhs, EmitGimpleCallRHS(stmt, 0));
 }
 
-Constant *TreeConstantToLLVM::ConvertPOINTER_PLUS_EXPR(tree exp) {
-  Constant *Ptr = Convert(TREE_OPERAND(exp, 0)); // The pointer.
-  Constant *Idx = Convert(TREE_OPERAND(exp, 1)); // The offset in bytes.
+void TreeToLLVM::RenderGIMPLE_COND(gimple stmt) {
+  // Emit the comparison.
+  Value *Cond = EmitCompare(gimple_cond_lhs(stmt), gimple_cond_rhs(stmt),
+                            gimple_cond_code(stmt));
 
-  // Convert the pointer into an i8* and add the offset to it.
-  Ptr = TheFolder->CreateBitCast(Ptr, Type::getInt8Ty(Context)->getPointerTo());
-  Constant *GEP = POINTER_TYPE_OVERFLOW_UNDEFINED ?
-    TheFolder->CreateInBoundsGetElementPtr(Ptr, &Idx, 1) :
-    TheFolder->CreateGetElementPtr(Ptr, &Idx, 1);
+  // Extract the target basic blocks.
+  edge true_edge, false_edge;
+  extract_true_false_edges_from_block(gimple_bb(stmt), &true_edge, &false_edge);
+  BasicBlock *IfTrue = getBasicBlock(true_edge->dest);
+  BasicBlock *IfFalse = getBasicBlock(false_edge->dest);
 
-  // The result may be of a different pointer type.
-  return TheFolder->CreateBitCast(GEP, ConvertType(TREE_TYPE(exp)));
+  // Branch based on the condition.
+  Builder.CreateCondBr(Cond, IfTrue, IfFalse);
 }
 
-Constant *TreeConstantToLLVM::ConvertBinOp_CST(tree exp) {
-  Constant *LHS = Convert(TREE_OPERAND(exp, 0));
-  bool LHSIsSigned = !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp,0)));
-  Constant *RHS = Convert(TREE_OPERAND(exp, 1));
-  bool RHSIsSigned = !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp,1)));
-  Instruction::CastOps opcode;
-  if (isa<PointerType>(LHS->getType())) {
-    const Type *IntPtrTy = getTargetData().getIntPtrType(Context);
-    opcode = CastInst::getCastOpcode(LHS, LHSIsSigned, IntPtrTy, false);
-    LHS = TheFolder->CreateCast(opcode, LHS, IntPtrTy);
-    opcode = CastInst::getCastOpcode(RHS, RHSIsSigned, IntPtrTy, false);
-    RHS = TheFolder->CreateCast(opcode, RHS, IntPtrTy);
-  }
+void TreeToLLVM::RenderGIMPLE_GOTO(gimple stmt) {
+  tree dest = gimple_goto_dest(stmt);
 
-  Constant *Result;
-  switch (TREE_CODE(exp)) {
-  default: assert(0 && "Unexpected case!");
-  case PLUS_EXPR:   Result = TheFolder->CreateAdd(LHS, RHS); break;
-  case MINUS_EXPR:  Result = TheFolder->CreateSub(LHS, RHS); break;
+  if (TREE_CODE(dest) == LABEL_DECL) {
+    // Direct branch.
+    Builder.CreateBr(getLabelDeclBlock(dest));
+    return;
   }
 
-  const Type *Ty = ConvertType(TREE_TYPE(exp));
-  bool TyIsSigned = !TYPE_UNSIGNED(TREE_TYPE(exp));
-  opcode = CastInst::getCastOpcode(Result, LHSIsSigned, Ty, TyIsSigned);
-  return TheFolder->CreateCast(opcode, Result, Ty);
-}
+  // Otherwise we have an indirect goto.
+  BasicBlock *DestBB = getIndirectGotoBlock();
 
-Constant *TreeConstantToLLVM::ConvertCONSTRUCTOR(tree exp) {
-  // Please note, that we can have empty ctor, even if array is non-trivial (has
-  // nonzero number of entries). This situation is typical for static ctors,
-  // when array is filled during program initialization.
-  if (CONSTRUCTOR_ELTS(exp) == 0 ||
-      VEC_length(constructor_elt, CONSTRUCTOR_ELTS(exp)) == 0)  // All zeros?
-    return Constant::getNullValue(ConvertType(TREE_TYPE(exp)));
+  // Store the destination block to the GotoValue alloca.
+  Value *V = Builder.CreatePtrToInt(Emit(dest, 0), TD.getIntPtrType(Context));
+  Builder.CreateStore(V, IndirectGotoValue);
 
-  switch (TREE_CODE(TREE_TYPE(exp))) {
-  default:
-    debug_tree(exp);
-    assert(0 && "Unknown ctor!");
-  case VECTOR_TYPE:
-  case ARRAY_TYPE:  return ConvertArrayCONSTRUCTOR(exp);
-  case RECORD_TYPE: return ConvertRecordCONSTRUCTOR(exp);
-  case QUAL_UNION_TYPE:
-  case UNION_TYPE:  return ConvertUnionCONSTRUCTOR(exp);
-  }
+  // FIXME: This is HORRIBLY INCORRECT in the presence of exception handlers.
+  // There should be one collector block per cleanup level!
+  Builder.CreateBr(DestBB);
 }
 
-Constant *TreeConstantToLLVM::ConvertArrayCONSTRUCTOR(tree exp) {
-  // Vectors are like arrays, but the domain is stored via an array
-  // type indirectly.
-
-  // If we have a lower bound for the range of the type, get it.
-  tree InitType = TREE_TYPE(exp);
-  tree min_element = size_zero_node;
-  std::vector<Constant*> ResultElts;
-
-  if (TREE_CODE(InitType) == VECTOR_TYPE) {
-    ResultElts.resize(TYPE_VECTOR_SUBPARTS(InitType));
-  } else {
-    assert(TREE_CODE(InitType) == ARRAY_TYPE && "Unknown type for init");
-    tree Domain = TYPE_DOMAIN(InitType);
-    if (Domain && TYPE_MIN_VALUE(Domain))
-      min_element = fold_convert(sizetype, TYPE_MIN_VALUE(Domain));
+void TreeToLLVM::RenderGIMPLE_RESX(gimple stmt) {
+abort();
+//FIXME  int RegionNo = gimple_resx_region(stmt);
+//FIXME  std::vector<eh_region> Handlers;
+//FIXME
+//FIXME  foreach_reachable_handler(RegionNo, true, false, AddHandler, &Handlers);
+//FIXME
+//FIXME  if (!Handlers.empty()) {
+//FIXME    for (std::vector<eh_region>::iterator I = Handlers.begin(),
+//FIXME         E = Handlers.end(); I != E; ++I)
+//FIXME      // Create a post landing pad for the handler.
+//FIXME      getPostPad(get_eh_region_number(*I));
+//FIXME
+//FIXME    Builder.CreateBr(getPostPad(get_eh_region_number(*Handlers.begin())));
+//FIXME  } else {
+//FIXME    assert(can_throw_external_1(RegionNo, true, false) &&
+//FIXME           "Must-not-throw region handled by runtime?");
+//FIXME    // Unwinding continues in the caller.
+//FIXME    if (!UnwindBB)
+//FIXME      UnwindBB = BasicBlock::Create(Context, "Unwind");
+//FIXME    Builder.CreateBr(UnwindBB);
+//FIXME  }
+}
 
-    if (Domain && TYPE_MAX_VALUE(Domain)) {
-      tree max_element = fold_convert(sizetype, TYPE_MAX_VALUE(Domain));
-      tree size = size_binop (MINUS_EXPR, max_element, min_element);
-      size = size_binop (PLUS_EXPR, size, size_one_node);
+void TreeToLLVM::RenderGIMPLE_RETURN(gimple stmt) {
+  tree retval = gimple_return_retval(stmt);
+  tree result = DECL_RESULT(current_function_decl);
 
-      if (host_integerp(size, 1))
-        ResultElts.resize(tree_low_cst(size, 1));
+  if (retval && retval != error_mark_node && retval != result) {
+    // Store the return value to the function's DECL_RESULT.
+    if (AGGREGATE_TYPE_P(TREE_TYPE(result))) {
+      MemRef DestLoc(DECL_LOCAL(result), 1, false); // FIXME: What alignment?
+      Emit(retval, &DestLoc);
+    } else {
+      Value *Val = Builder.CreateBitCast(Emit(retval, 0),
+                                         ConvertType(TREE_TYPE(result)));
+      Builder.CreateStore(Val, DECL_LOCAL(result));
     }
   }
 
-  unsigned NextFieldToFill = 0;
-  unsigned HOST_WIDE_INT ix;
-  tree elt_index, elt_value;
-  Constant *SomeVal = 0;
-  FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (exp), ix, elt_index, elt_value) {
-    // Find and decode the constructor's value.
-    Constant *Val = Convert(elt_value);
-    SomeVal = Val;
-
-    // Get the index position of the element within the array.  Note that this
-    // can be NULL_TREE, which means that it belongs in the next available slot.
-    tree index = elt_index;
+  // Emit a branch to the exit label.
+  Builder.CreateBr(ReturnBB);
+}
 
-    // The first and last field to fill in, inclusive.
-    unsigned FieldOffset, FieldLastOffset;
-    if (index && TREE_CODE(index) == RANGE_EXPR) {
-      tree first = fold_convert (sizetype, TREE_OPERAND(index, 0));
-      tree last  = fold_convert (sizetype, TREE_OPERAND(index, 1));
+void TreeToLLVM::RenderGIMPLE_SWITCH(gimple stmt) {
+  // Emit the condition.
+  Value *Index = Emit(gimple_switch_index(stmt), 0);
+  bool IndexIsSigned = !TYPE_UNSIGNED(TREE_TYPE(gimple_switch_index(stmt)));
 
-      first = size_binop (MINUS_EXPR, first, min_element);
-      last  = size_binop (MINUS_EXPR, last, min_element);
+  // Create the switch instruction.
+  tree default_label = CASE_LABEL(gimple_switch_label(stmt, 0));
+  SwitchInst *SI = Builder.CreateSwitch(Index, getLabelDeclBlock(default_label),
+                                        gimple_switch_num_labels(stmt));
 
-      assert(host_integerp(first, 1) && host_integerp(last, 1) &&
-             "Unknown range_expr!");
-      FieldOffset     = tree_low_cst(first, 1);
-      FieldLastOffset = tree_low_cst(last, 1);
-    } else if (index) {
-      index = size_binop (MINUS_EXPR, fold_convert (sizetype, index),
-                          min_element);
-      assert(host_integerp(index, 1));
-      FieldOffset = tree_low_cst(index, 1);
-      FieldLastOffset = FieldOffset;
-    } else {
-      FieldOffset = NextFieldToFill;
-      FieldLastOffset = FieldOffset;
-    }
+  // Add the switch cases.
+  BasicBlock *IfBlock = 0; // Set if a range was output as an "if".
+  for (size_t i = 1, e = gimple_switch_num_labels(stmt); i != e; ++i) {
+    tree label = gimple_switch_label(stmt, i);
+    BasicBlock *Dest = getLabelDeclBlock(CASE_LABEL(label));
 
-    // Process all of the elements in the range.
-    for (--FieldOffset; FieldOffset != FieldLastOffset; ) {
-      ++FieldOffset;
-      if (FieldOffset == ResultElts.size())
-        ResultElts.push_back(Val);
-      else {
-        if (FieldOffset >= ResultElts.size())
-          ResultElts.resize(FieldOffset+1);
-        ResultElts[FieldOffset] = Val;
-      }
+    // Convert the integer to the right type.
+    Value *Val = Emit(CASE_LOW(label), 0);
+    Val = CastToAnyType(Val, !TYPE_UNSIGNED(TREE_TYPE(CASE_LOW(label))),
+                        Index->getType(), IndexIsSigned);
+    ConstantInt *LowC = cast<ConstantInt>(Val);
 
-      NextFieldToFill = FieldOffset+1;
+    if (!CASE_HIGH(label)) {
+      SI->addCase(LowC, Dest); // Single destination.
+      continue;
     }
-  }
 
-  // Zero length array.
-  if (ResultElts.empty())
-    return ConstantArray::get(
-      cast<ArrayType>(ConvertType(TREE_TYPE(exp))), ResultElts);
-  assert(SomeVal && "If we had some initializer, we should have some value!");
+    // Otherwise, we have a range, like 'case 1 ... 17'.
+    Val = Emit(CASE_HIGH(label), 0);
+    // Make sure the case value is the same type as the switch expression
+    Val = CastToAnyType(Val, !TYPE_UNSIGNED(TREE_TYPE(CASE_HIGH(label))),
+                        Index->getType(), IndexIsSigned);
+    ConstantInt *HighC = cast<ConstantInt>(Val);
 
-  // Do a post-pass over all of the elements.  We're taking care of two things
-  // here:
-  //   #1. If any elements did not have initializers specified, provide them
-  //       with a null init.
-  //   #2. If any of the elements have different types, return a struct instead
-  //       of an array.  This can occur in cases where we have an array of
-  //       unions, and the various unions had different pieces init'd.
-  const Type *ElTy = SomeVal->getType();
-  Constant *Filler = Constant::getNullValue(ElTy);
-  bool AllEltsSameType = true;
-  for (unsigned i = 0, e = ResultElts.size(); i != e; ++i) {
-    if (ResultElts[i] == 0)
-      ResultElts[i] = Filler;
-    else if (ResultElts[i]->getType() != ElTy)
-      AllEltsSameType = false;
+    APInt Range = HighC->getValue() - LowC->getValue();
+    if (Range.ult(APInt(Range.getBitWidth(), 64))) {
+      // Add all of the necessary successors to the switch.
+      APInt CurrentValue = LowC->getValue();
+      while (1) {
+        SI->addCase(LowC, Dest);
+        if (LowC == HighC) break;  // Emitted the last one.
+        CurrentValue++;
+        LowC = ConstantInt::get(Context, CurrentValue);
+      }
+    } else {
+      // The range is too big to add to the switch - emit an "if".
+      if (!IfBlock) {
+        IfBlock = BasicBlock::Create(Context);
+        EmitBlock(IfBlock);
+      }
+      Value *Diff = Builder.CreateSub(Index, LowC);
+      Value *Cond = Builder.CreateICmpULE(Diff,
+                                          ConstantInt::get(Context, Range));
+      BasicBlock *False_Block = BasicBlock::Create(Context);
+      Builder.CreateCondBr(Cond, Dest, False_Block);
+      EmitBlock(False_Block);
+    }
   }
 
-  if (TREE_CODE(InitType) == VECTOR_TYPE) {
-    assert(AllEltsSameType && "Vector of heterogeneous element types?");
-    return ConstantVector::get(ResultElts);
+  if (IfBlock) {
+    Builder.CreateBr(SI->getDefaultDest());
+    SI->setSuccessor(0, IfBlock);
   }
-
-  if (AllEltsSameType)
-    return ConstantArray::get(
-      ArrayType::get(ElTy, ResultElts.size()), ResultElts);
-  return ConstantStruct::get(Context, ResultElts, false);
 }
 
 
-namespace {
-/// ConstantLayoutInfo - A helper class used by ConvertRecordCONSTRUCTOR to
-/// lay out struct inits.
-struct ConstantLayoutInfo {
-  const TargetData &TD;
+//===----------------------------------------------------------------------===//
+//                       ... Constant Expressions ...
+//===----------------------------------------------------------------------===//
 
-  /// ResultElts - The initializer elements so far.
-  std::vector<Constant*> ResultElts;
+/// EmitCONSTRUCTOR - emit the constructor into the location specified by
+/// DestLoc.
+Value *TreeToLLVM::EmitCONSTRUCTOR(tree exp, const MemRef *DestLoc) {
+  tree type = TREE_TYPE(exp);
+  const Type *Ty = ConvertType(type);
+  if (const VectorType *PTy = dyn_cast<VectorType>(Ty)) {
+    assert(DestLoc == 0 && "Dest location for packed value?");
 
-  /// StructIsPacked - This is set to true if we find out that we have to emit
-  /// the ConstantStruct as a Packed LLVM struct type (because the LLVM
-  /// alignment rules would prevent laying out the struct correctly).
-  bool StructIsPacked;
+    std::vector<Value *> BuildVecOps;
 
-  /// NextFieldByteStart - This field indicates the *byte* that the next field
-  /// will start at.  Put another way, this is the size of the struct as
-  /// currently laid out, but without any tail padding considered.
-  uint64_t NextFieldByteStart;
+    // Insert zero initializers for any uninitialized values.
+    Constant *Zero = Constant::getNullValue(PTy->getElementType());
+    BuildVecOps.resize(cast<VectorType>(Ty)->getNumElements(), Zero);
 
-  /// MaxLLVMFieldAlignment - This is the largest alignment of any IR field,
-  /// which is the alignment that the ConstantStruct will get.
-  unsigned MaxLLVMFieldAlignment;
+    // Insert all of the elements here.
+    unsigned HOST_WIDE_INT ix;
+    tree purpose, value;
+    FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (exp), ix, purpose, value) {
+      if (!purpose) continue;  // Not actually initialized?
 
+      unsigned FieldNo = TREE_INT_CST_LOW(purpose);
 
-  ConstantLayoutInfo(const TargetData &TD) : TD(TD) {
-    StructIsPacked = false;
-    NextFieldByteStart = 0;
-    MaxLLVMFieldAlignment = 1;
-  }
+      // Update the element.
+      if (FieldNo < BuildVecOps.size())
+        BuildVecOps[FieldNo] = Emit(value, 0);
+    }
 
-  void ConvertToPacked();
-  void AddFieldToRecordConstant(Constant *Val, uint64_t GCCFieldOffsetInBits);
-  void AddBitFieldToRecordConstant(ConstantInt *Val,
-                                   uint64_t GCCFieldOffsetInBits);
-  void HandleTailPadding(uint64_t GCCStructBitSize);
-};
+    return BuildVector(BuildVecOps);
+  }
 
-}
+  assert(!Ty->isSingleValueType() && "Constructor for scalar type??");
 
-/// ConvertToPacked - Given a partially constructed initializer for a LLVM
-/// struct constant, change it to make all the implicit padding between elements
-/// be fully explicit.
-void ConstantLayoutInfo::ConvertToPacked() {
-  assert(!StructIsPacked && "Struct is already packed");
-  uint64_t EltOffs = 0;
-  for (unsigned i = 0, e = ResultElts.size(); i != e; ++i) {
-    Constant *Val = ResultElts[i];
+  // Start out with the value zero'd out.
+  EmitAggregateZero(*DestLoc, type);
 
-    // Check to see if this element has an alignment that would cause it to get
-    // offset.  If so, insert explicit padding for the offset.
-    unsigned ValAlign = TD.getABITypeAlignment(Val->getType());
-    uint64_t AlignedEltOffs = TargetData::RoundUpAlignment(EltOffs, ValAlign);
+  VEC(constructor_elt, gc) *elt = CONSTRUCTOR_ELTS(exp);
+  switch (TREE_CODE(TREE_TYPE(exp))) {
+  case ARRAY_TYPE:
+  case RECORD_TYPE:
+  default:
+    if (elt && VEC_length(constructor_elt, elt)) {
+      // We don't handle elements yet.
 
-    // If the alignment doesn't affect the element offset, then the value is ok.
-    // Accept the field and keep moving.
-    if (AlignedEltOffs == EltOffs) {
-      EltOffs += TD.getTypeAllocSize(Val->getType());
-      continue;
+      TODO(exp);
     }
+    return 0;
+  case QUAL_UNION_TYPE:
+  case UNION_TYPE:
+    // Store each element of the constructor into the corresponding field of
+    // DEST.
+    if (!elt || VEC_empty(constructor_elt, elt)) return 0;  // no elements
+    assert(VEC_length(constructor_elt, elt) == 1
+           && "Union CONSTRUCTOR should have one element!");
+    tree tree_purpose = VEC_index(constructor_elt, elt, 0)->index;
+    tree tree_value   = VEC_index(constructor_elt, elt, 0)->value;
+    if (!tree_purpose)
+      return 0;  // Not actually initialized?
 
-    // Otherwise, there is padding here.  Insert explicit zeros.
-    const Type *PadTy = Type::getInt8Ty(Context);
-    if (AlignedEltOffs-EltOffs != 1)
-      PadTy = ArrayType::get(PadTy, AlignedEltOffs-EltOffs);
-    ResultElts.insert(ResultElts.begin()+i,
-                      Constant::getNullValue(PadTy));
-
-    // The padding is now element "i" and just bumped us up to "AlignedEltOffs".
-    EltOffs = AlignedEltOffs;
-    ++e;  // One extra element to scan.
+    if (!ConvertType(TREE_TYPE(tree_purpose))->isSingleValueType()) {
+      Value *V = Emit(tree_value, DestLoc);
+      (void)V;
+      assert(V == 0 && "Aggregate value returned in a register?");
+    } else {
+      // Scalar value.  Evaluate to a register, then do the store.
+      Value *V = Emit(tree_value, 0);
+      Value *Ptr = Builder.CreateBitCast(DestLoc->Ptr,
+                                         PointerType::getUnqual(V->getType()));
+      StoreInst *St = Builder.CreateStore(V, Ptr, DestLoc->Volatile);
+      St->setAlignment(DestLoc->getAlignment());
+    }
+    break;
   }
+  return 0;
+}
 
-  // Packed now!
-  MaxLLVMFieldAlignment = 1;
-  StructIsPacked = true;
+Constant *TreeConstantToLLVM::Convert(tree exp) {
+  assert((TREE_CONSTANT(exp) || TREE_CODE(exp) == STRING_CST) &&
+         "Isn't a constant!");
+  switch (TREE_CODE(exp)) {
+  case FDESC_EXPR:    // Needed on itanium
+  default:
+    debug_tree(exp);
+    assert(0 && "Unknown constant to convert!");
+    abort();
+  case INTEGER_CST:   return ConvertINTEGER_CST(exp);
+  case REAL_CST:      return ConvertREAL_CST(exp);
+  case VECTOR_CST:    return ConvertVECTOR_CST(exp);
+  case STRING_CST:    return ConvertSTRING_CST(exp);
+  case COMPLEX_CST:   return ConvertCOMPLEX_CST(exp);
+  case NOP_EXPR:      return ConvertNOP_EXPR(exp);
+  case CONVERT_EXPR:  return ConvertCONVERT_EXPR(exp);
+  case PLUS_EXPR:
+  case MINUS_EXPR:    return ConvertBinOp_CST(exp);
+  case CONSTRUCTOR:   return ConvertCONSTRUCTOR(exp);
+  case VIEW_CONVERT_EXPR: return Convert(TREE_OPERAND(exp, 0));
+  case POINTER_PLUS_EXPR: return ConvertPOINTER_PLUS_EXPR(exp);
+  case ADDR_EXPR:
+    return TheFolder->CreateBitCast(EmitLV(TREE_OPERAND(exp, 0)),
+                                    ConvertType(TREE_TYPE(exp)));
+  }
 }
 
+Constant *TreeConstantToLLVM::ConvertINTEGER_CST(tree exp) {
+  const Type *Ty = ConvertType(TREE_TYPE(exp));
 
-/// AddFieldToRecordConstant - As ConvertRecordCONSTRUCTOR builds up an LLVM
-/// constant to represent a GCC CONSTRUCTOR node, it calls this method to add
-/// fields.  The design of this is that it adds leading/trailing padding as
-/// needed to make the piece fit together and honor the GCC layout.  This does
-/// not handle bitfields.
-///
-/// The arguments are:
-///   Val: The value to add to the struct, with a size that matches the size of
-///        the corresponding GCC field.
-///   GCCFieldOffsetInBits: The offset that we have to put Val in the result.
-///
-void ConstantLayoutInfo::
-AddFieldToRecordConstant(Constant *Val, uint64_t GCCFieldOffsetInBits) {
-  // Figure out how to add this non-bitfield value to our constant struct so
-  // that it ends up at the right offset.  There are four cases we have to
-  // think about:
-  //   1. We may be able to just slap it onto the end of our struct and have
-  //      everything be ok.
-  //   2. We may have to insert explicit padding into the LLVM struct to get
-  //      the initializer over into the right space.  This is needed when the
-  //      GCC field has a larger alignment than the LLVM field.
-  //   3. The LLVM field may be too far over and we may be forced to convert
-  //      this to an LLVM packed struct.  This is required when the LLVM
-  //      alignment is larger than the GCC alignment.
-  //   4. We may have a bitfield that needs to be merged into a previous
-  //      field.
-  // Start by determining which case we have by looking at where LLVM and GCC
-  // would place the field.
+  // Handle i128 specially.
+  if (const IntegerType *IT = dyn_cast<IntegerType>(Ty)) {
+    if (IT->getBitWidth() == 128) {
+      // GCC only supports i128 on 64-bit systems.
+      assert(HOST_BITS_PER_WIDE_INT == 64 &&
+             "i128 only supported on 64-bit system");
+      uint64_t Bits[] = { TREE_INT_CST_LOW(exp), TREE_INT_CST_HIGH(exp) };
+      return ConstantInt::get(Context, APInt(128, 2, Bits));
+    }
+  }
 
-  // Verified that we haven't already laid out bytes that will overlap with
-  // this new field.
-  assert(NextFieldByteStart*8 <= GCCFieldOffsetInBits &&
-         "Overlapping LLVM fields!");
+  // Build the value as a ulong constant, then constant fold it to the right
+  // type.  This handles overflow and other things appropriately.
+  uint64_t IntValue = getINTEGER_CSTVal(exp);
+  ConstantInt *C = ConstantInt::get(Type::getInt64Ty(Context), IntValue);
+  // The destination type can be a pointer, integer or floating point
+  // so we need a generalized cast here
+  Instruction::CastOps opcode = CastInst::getCastOpcode(C, false, Ty,
+      !TYPE_UNSIGNED(TREE_TYPE(exp)));
+  return TheFolder->CreateCast(opcode, C, Ty);
+}
 
-  // Compute the offset the field would get if we just stuck 'Val' onto the
-  // end of our structure right now.  It is NextFieldByteStart rounded up to
-  // the LLVM alignment of Val's type.
-  unsigned ValLLVMAlign = 1;
+Constant *TreeConstantToLLVM::ConvertREAL_CST(tree exp) {
+  const Type *Ty = ConvertType(TREE_TYPE(exp));
+  assert(Ty->isFloatingPoint() && "Integer REAL_CST?");
+  long RealArr[2];
+  union {
+    int UArr[2];
+    double V;
+  };
+  if (Ty==Type::getFloatTy(Context) || Ty==Type::getDoubleTy(Context)) {
+    REAL_VALUE_TO_TARGET_DOUBLE(TREE_REAL_CST(exp), RealArr);
 
-  if (!StructIsPacked) { // Packed structs ignore the alignment of members.
-    ValLLVMAlign = TD.getABITypeAlignment(Val->getType());
-    MaxLLVMFieldAlignment = std::max(MaxLLVMFieldAlignment, ValLLVMAlign);
-  }
+    // Here's how this works:
+    // REAL_VALUE_TO_TARGET_DOUBLE() will generate the floating point number
+    // as an array of integers in the target's representation.  Each integer
+    // in the array will hold 32 bits of the result REGARDLESS OF THE HOST'S
+    // INTEGER SIZE.
+    //
+    // This, then, makes the conversion pretty simple.  The tricky part is
+    // getting the byte ordering correct and make sure you don't print any
+    // more than 32 bits per integer on platforms with ints > 32 bits.
+    //
+    // We want to switch the words of UArr if host and target endianness
+    // do not match.  FLOAT_WORDS_BIG_ENDIAN describes the target endianness.
+    // The host's used to be available in HOST_WORDS_BIG_ENDIAN, but the gcc
+    // maintainers removed this in a fit of cleanliness between 4.0
+    // and 4.2. llvm::sys has a substitute.
 
-  // LLVMNaturalByteOffset - This is where LLVM would drop the field if we
-  // slap it onto the end of the struct.
-  uint64_t LLVMNaturalByteOffset
-    = TargetData::RoundUpAlignment(NextFieldByteStart, ValLLVMAlign);
+    UArr[0] = RealArr[0];   // Long -> int convert
+    UArr[1] = RealArr[1];
 
-  // If adding the LLVM field would push it over too far, then we must have a
-  // case that requires the LLVM struct to be packed.  Do it now if so.
-  if (LLVMNaturalByteOffset*8 > GCCFieldOffsetInBits) {
-    // Switch to packed.
-    ConvertToPacked();
-    assert(NextFieldByteStart*8 <= GCCFieldOffsetInBits &&
-           "Packing didn't fix the problem!");
+    if (llvm::sys::isBigEndianHost() != FLOAT_WORDS_BIG_ENDIAN)
+      std::swap(UArr[0], UArr[1]);
 
-    // Recurse to add the field after converting to packed.
-    return AddFieldToRecordConstant(Val, GCCFieldOffsetInBits);
+    return
+      ConstantFP::get(Context, Ty==Type::getFloatTy(Context) ?
+                      APFloat((float)V) : APFloat(V));
+  } else if (Ty==Type::getX86_FP80Ty(Context)) {
+    long RealArr[4];
+    uint64_t UArr[2];
+    REAL_VALUE_TO_TARGET_LONG_DOUBLE(TREE_REAL_CST(exp), RealArr);
+    UArr[0] = ((uint64_t)((uint32_t)RealArr[0])) |
+              ((uint64_t)((uint32_t)RealArr[1]) << 32);
+    UArr[1] = (uint16_t)RealArr[2];
+    return ConstantFP::get(Context, APFloat(APInt(80, 2, UArr)));
+  } else if (Ty==Type::getPPC_FP128Ty(Context)) {
+    long RealArr[4];
+    uint64_t UArr[2];
+    REAL_VALUE_TO_TARGET_LONG_DOUBLE(TREE_REAL_CST(exp), RealArr);
+
+    UArr[0] = ((uint64_t)((uint32_t)RealArr[0]) << 32) |
+              ((uint64_t)((uint32_t)RealArr[1]));
+    UArr[1] = ((uint64_t)((uint32_t)RealArr[2]) << 32) |
+              ((uint64_t)((uint32_t)RealArr[3]));
+    return ConstantFP::get(Context, APFloat(APInt(128, 2, UArr)));
   }
+  assert(0 && "Floating point type not handled yet");
+  return 0;   // outwit compiler warning
+}
 
-  // If the LLVM offset is not large enough, we need to insert explicit
-  // padding in the LLVM struct between the fields.
-  if (LLVMNaturalByteOffset*8 < GCCFieldOffsetInBits) {
-    // Insert enough padding to fully fill in the hole.  Insert padding from
-    // NextFieldByteStart (not LLVMNaturalByteOffset) because the padding will
-    // not get the same alignment as "Val".
-    const Type *FillTy = Type::getInt8Ty(Context);
-    if (GCCFieldOffsetInBits/8-NextFieldByteStart != 1)
-      FillTy = ArrayType::get(FillTy,
-                              GCCFieldOffsetInBits/8-NextFieldByteStart);
-    ResultElts.push_back(Constant::getNullValue(FillTy));
+Constant *TreeConstantToLLVM::ConvertVECTOR_CST(tree exp) {
+  if (!TREE_VECTOR_CST_ELTS(exp))
+    return Constant::getNullValue(ConvertType(TREE_TYPE(exp)));
 
-    NextFieldByteStart = GCCFieldOffsetInBits/8;
+  std::vector<Constant*> Elts;
+  for (tree elt = TREE_VECTOR_CST_ELTS(exp); elt; elt = TREE_CHAIN(elt))
+    Elts.push_back(Convert(TREE_VALUE(elt)));
 
-    // Recurse to add the field.  This handles the case when the LLVM struct
-    // needs to be converted to packed after inserting tail padding.
-    return AddFieldToRecordConstant(Val, GCCFieldOffsetInBits);
+  // The vector should be zero filled if insufficient elements are provided.
+  if (Elts.size() < TYPE_VECTOR_SUBPARTS(TREE_TYPE(exp))) {
+    tree EltType = TREE_TYPE(TREE_TYPE(exp));
+    Constant *Zero = Constant::getNullValue(ConvertType(EltType));
+    while (Elts.size() < TYPE_VECTOR_SUBPARTS(TREE_TYPE(exp)))
+      Elts.push_back(Zero);
   }
 
-  // Slap 'Val' onto the end of our ConstantStruct, it must be known to land
-  // at the right offset now.
-  assert(LLVMNaturalByteOffset*8 == GCCFieldOffsetInBits);
-  ResultElts.push_back(Val);
-  NextFieldByteStart = LLVMNaturalByteOffset;
-  NextFieldByteStart += TD.getTypeAllocSize(Val->getType());
+  return ConstantVector::get(Elts);
 }
 
-/// AddBitFieldToRecordConstant - Bitfields can span multiple LLVM fields and
-/// have other annoying properties, thus requiring extra layout rules.  This
-/// routine handles the extra complexity and then forwards to
-/// AddFieldToRecordConstant.
-void ConstantLayoutInfo::
-AddBitFieldToRecordConstant(ConstantInt *ValC, uint64_t GCCFieldOffsetInBits) {
-  // If the GCC field starts after our current LLVM field then there must have
-  // been an anonymous bitfield or other thing that shoved it over.  No matter,
-  // just insert some i8 padding until there are bits to fill in.
-  while (GCCFieldOffsetInBits > NextFieldByteStart*8) {
-    ResultElts.push_back(ConstantInt::get(Type::getInt8Ty(Context), 0));
-    ++NextFieldByteStart;
-  }
-
-  // If the field is a bitfield, it could partially go in a previously
-  // laid out structure member, and may add elements to the end of the currently
-  // laid out structure.
-  //
-  // Since bitfields can only partially overlap other bitfields, because we
-  // always emit components of bitfields as i8, and because we never emit tail
-  // padding until we know it exists, this boils down to merging pieces of the
-  // bitfield values into i8's.  This is also simplified by the fact that
-  // bitfields can only be initialized by ConstantInts.  An interesting case is
-  // sharing of tail padding in C++ structures.  Because this can only happen
-  // in inheritance cases, and those are non-POD, we should never see them here.
+Constant *TreeConstantToLLVM::ConvertSTRING_CST(tree exp) {
+  const ArrayType *StrTy = cast<ArrayType>(ConvertType(TREE_TYPE(exp)));
+  const Type *ElTy = StrTy->getElementType();
 
-  // First handle any part of Val that overlaps an already laid out field by
-  // merging it into it.  By the above invariants, we know that it is an i8 that
-  // we are merging into.  Note that we may be inserting *all* of Val into the
-  // previous field.
-  if (GCCFieldOffsetInBits < NextFieldByteStart*8) {
-    unsigned ValBitSize = ValC->getBitWidth();
-    assert(!ResultElts.empty() && "Bitfield starts before first element?");
-    assert(ResultElts.back()->getType() == Type::getInt8Ty(Context) &&
-           isa<ConstantInt>(ResultElts.back()) &&
-           "Merging bitfield with non-bitfield value?");
-    assert(NextFieldByteStart*8 - GCCFieldOffsetInBits < 8 &&
-           "Bitfield overlaps backwards more than one field?");
+  unsigned Len = (unsigned)TREE_STRING_LENGTH(exp);
 
-    // Figure out how many bits can fit into the previous field given the
-    // starting point in that field.
-    unsigned BitsInPreviousField =
-      unsigned(NextFieldByteStart*8 - GCCFieldOffsetInBits);
-    assert(BitsInPreviousField != 0 && "Previous field should not be null!");
+  std::vector<Constant*> Elts;
+  if (ElTy == Type::getInt8Ty(Context)) {
+    const unsigned char *InStr =(const unsigned char *)TREE_STRING_POINTER(exp);
+    for (unsigned i = 0; i != Len; ++i)
+      Elts.push_back(ConstantInt::get(Type::getInt8Ty(Context), InStr[i]));
+  } else if (ElTy == Type::getInt16Ty(Context)) {
+    assert((Len&1) == 0 &&
+           "Length in bytes should be a multiple of element size");
+    const uint16_t *InStr =
+      (const unsigned short *)TREE_STRING_POINTER(exp);
+    for (unsigned i = 0; i != Len/2; ++i) {
+      // gcc has constructed the initializer elements in the target endianness,
+      // but we're going to treat them as ordinary shorts from here, with
+      // host endianness.  Adjust if necessary.
+      if (llvm::sys::isBigEndianHost() == BYTES_BIG_ENDIAN)
+        Elts.push_back(ConstantInt::get(Type::getInt16Ty(Context), InStr[i]));
+      else
+        Elts.push_back(ConstantInt::get(Type::getInt16Ty(Context), ByteSwap_16(InStr[i])));
+    }
+  } else if (ElTy == Type::getInt32Ty(Context)) {
+    assert((Len&3) == 0 &&
+           "Length in bytes should be a multiple of element size");
+    const uint32_t *InStr = (const uint32_t *)TREE_STRING_POINTER(exp);
+    for (unsigned i = 0; i != Len/4; ++i) {
+      // gcc has constructed the initializer elements in the target endianness,
+      // but we're going to treat them as ordinary ints from here, with
+      // host endianness.  Adjust if necessary.
+      if (llvm::sys::isBigEndianHost() == BYTES_BIG_ENDIAN)
+        Elts.push_back(ConstantInt::get(Type::getInt32Ty(Context), InStr[i]));
+      else
+        Elts.push_back(ConstantInt::get(Type::getInt32Ty(Context), ByteSwap_32(InStr[i])));
+    }
+  } else {
+    assert(0 && "Unknown character type!");
+  }
 
-    // Split the bits that will be inserted into the previous element out of
-    // Val into a new constant.  If Val is completely contained in the previous
-    // element, this sets Val to null, otherwise we shrink Val to contain the
-    // bits to insert in the next element.
-    APInt ValForPrevField(ValC->getValue());
-    if (BitsInPreviousField >= ValBitSize) {
-      // The whole field fits into the previous field.
-      ValC = 0;
-    } else if (!BYTES_BIG_ENDIAN) {
-      // Little endian, take bits from the bottom of the field value.
-      ValForPrevField.trunc(BitsInPreviousField);
-      APInt Tmp = ValC->getValue();
-      Tmp = Tmp.lshr(BitsInPreviousField);
-      Tmp = Tmp.trunc(ValBitSize-BitsInPreviousField);
-      ValC = ConstantInt::get(Context, Tmp);
-    } else {
-      // Big endian, take bits from the top of the field value.
-      ValForPrevField = ValForPrevField.lshr(ValBitSize-BitsInPreviousField);
-      ValForPrevField.trunc(BitsInPreviousField);
+  unsigned LenInElts = Len /
+          TREE_INT_CST_LOW(TYPE_SIZE_UNIT(TREE_TYPE(TREE_TYPE(exp))));
+  unsigned ConstantSize = StrTy->getNumElements();
 
-      APInt Tmp = ValC->getValue();
-      Tmp = Tmp.trunc(ValBitSize-BitsInPreviousField);
-      ValC = ConstantInt::get(Context, Tmp);
+  if (LenInElts != ConstantSize) {
+    // If this is a variable sized array type, set the length to LenInElts.
+    if (ConstantSize == 0) {
+      tree Domain = TYPE_DOMAIN(TREE_TYPE(exp));
+      if (!Domain || !TYPE_MAX_VALUE(Domain)) {
+        ConstantSize = LenInElts;
+        StrTy = ArrayType::get(ElTy, LenInElts);
+      }
     }
 
-    // Okay, we're going to insert ValForPrevField into the previous i8, extend
-    // it and shift into place.
-    ValForPrevField.zext(8);
-    if (!BYTES_BIG_ENDIAN) {
-      ValForPrevField = ValForPrevField.shl(8-BitsInPreviousField);
+    if (ConstantSize < LenInElts) {
+      // Only some chars are being used, truncate the string: char X[2] = "foo";
+      Elts.resize(ConstantSize);
     } else {
-      // On big endian, if the entire field fits into the remaining space, shift
-      // over to not take part of the next field's bits.
-      if (BitsInPreviousField > ValBitSize)
-        ValForPrevField = ValForPrevField.shl(BitsInPreviousField-ValBitSize);
+      // Fill the end of the string with nulls.
+      Constant *C = Constant::getNullValue(ElTy);
+      for (; LenInElts != ConstantSize; ++LenInElts)
+        Elts.push_back(C);
     }
-
-    // "or" in the previous value and install it.
-    const APInt &LastElt = cast<ConstantInt>(ResultElts.back())->getValue();
-    ResultElts.back() = ConstantInt::get(Context, ValForPrevField | LastElt);
-
-    // If the whole bit-field fit into the previous field, we're done.
-    if (ValC == 0) return;
-    GCCFieldOffsetInBits = NextFieldByteStart*8;
   }
+  return ConstantArray::get(StrTy, Elts);
+}
 
-  APInt Val = ValC->getValue();
-
-  // Okay, we know that we're plopping bytes onto the end of the struct.
-  // Iterate while there is stuff to do.
-  while (1) {
-    ConstantInt *ValToAppend;
-    if (Val.getBitWidth() > 8) {
-      if (!BYTES_BIG_ENDIAN) {
-        // Little endian lays out low bits first.
-        APInt Tmp = Val;
-        Tmp.trunc(8);
-        ValToAppend = ConstantInt::get(Context, Tmp);
+Constant *TreeConstantToLLVM::ConvertCOMPLEX_CST(tree exp) {
+  Constant *Elts[2] = {
+    Convert(TREE_REALPART(exp)),
+    Convert(TREE_IMAGPART(exp))
+  };
+  return ConstantStruct::get(Context, Elts, 2, false);
+}
 
-        Val = Val.lshr(8);
-      } else {
-        // Big endian lays out high bits first.
-        APInt Tmp = Val;
-        Tmp = Tmp.lshr(Tmp.getBitWidth()-8);
-        Tmp.trunc(8);
-        ValToAppend = ConstantInt::get(Context, Tmp);
-      }
-    } else if (Val.getBitWidth() == 8) {
-      ValToAppend = ConstantInt::get(Context, Val);
-    } else {
-      APInt Tmp = Val;
-      Tmp.zext(8);
+Constant *TreeConstantToLLVM::ConvertNOP_EXPR(tree exp) {
+  Constant *Elt = Convert(TREE_OPERAND(exp, 0));
+  const Type *Ty = ConvertType(TREE_TYPE(exp));
+  bool EltIsSigned = !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp, 0)));
+  bool TyIsSigned = !TYPE_UNSIGNED(TREE_TYPE(exp));
 
-      if (BYTES_BIG_ENDIAN)
-        Tmp = Tmp << 8-Val.getBitWidth();
-      ValToAppend = ConstantInt::get(Context, Tmp);
-    }
+  // If this is a structure-to-structure cast, just return the uncasted value.
+  if (!Elt->getType()->isSingleValueType() || !Ty->isSingleValueType())
+    return Elt;
 
-    ResultElts.push_back(ValToAppend);
-    ++NextFieldByteStart;
+  // Elt and Ty can be integer, float or pointer here: need generalized cast
+  Instruction::CastOps opcode = CastInst::getCastOpcode(Elt, EltIsSigned,
+                                                        Ty, TyIsSigned);
+  return TheFolder->CreateCast(opcode, Elt, Ty);
+}
 
-    if (Val.getBitWidth() <= 8)
-      break;
-    Val.trunc(Val.getBitWidth()-8);
-  }
+Constant *TreeConstantToLLVM::ConvertCONVERT_EXPR(tree exp) {
+  Constant *Elt = Convert(TREE_OPERAND(exp, 0));
+  bool EltIsSigned = !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp, 0)));
+  const Type *Ty = ConvertType(TREE_TYPE(exp));
+  bool TyIsSigned = !TYPE_UNSIGNED(TREE_TYPE(exp));
+  Instruction::CastOps opcode = CastInst::getCastOpcode(Elt, EltIsSigned, Ty,
+                                                        TyIsSigned);
+  return TheFolder->CreateCast(opcode, Elt, Ty);
 }
 
+Constant *TreeConstantToLLVM::ConvertPOINTER_PLUS_EXPR(tree exp) {
+  Constant *Ptr = Convert(TREE_OPERAND(exp, 0)); // The pointer.
+  Constant *Idx = Convert(TREE_OPERAND(exp, 1)); // The offset in bytes.
 
-/// HandleTailPadding - Check to see if the struct fields, as laid out so far,
-/// will be large enough to make the generated constant struct have the right
-/// size.  If not, add explicit tail padding.  If rounding up based on the LLVM
-/// IR alignment would make the struct too large, convert it to a packed LLVM
-/// struct.
-void ConstantLayoutInfo::HandleTailPadding(uint64_t GCCStructBitSize) {
-  uint64_t GCCStructSize = (GCCStructBitSize+7)/8;
-  uint64_t LLVMNaturalSize =
-    TargetData::RoundUpAlignment(NextFieldByteStart, MaxLLVMFieldAlignment);
+  // Convert the pointer into an i8* and add the offset to it.
+  Ptr = TheFolder->CreateBitCast(Ptr, Type::getInt8Ty(Context)->getPointerTo());
+  Constant *GEP = POINTER_TYPE_OVERFLOW_UNDEFINED ?
+    TheFolder->CreateInBoundsGetElementPtr(Ptr, &Idx, 1) :
+    TheFolder->CreateGetElementPtr(Ptr, &Idx, 1);
 
-  // If the total size of the laid out data is within the size of the GCC type
-  // but the rounded-up size (including the tail padding induced by LLVM
-  // alignment) is too big, convert to a packed struct type.  We don't do this
-  // if the size of the laid out fields is too large because initializers like
-  //
-  //    struct X { int A; char C[]; } x = { 4, "foo" };
-  //
-  // can occur and no amount of packing will help.
-  if (NextFieldByteStart <= GCCStructSize &&   // Not flexible init case.
-      LLVMNaturalSize > GCCStructSize) {       // Tail pad will overflow type.
-    assert(!StructIsPacked && "LLVM Struct type overflow!");
+  // The result may be of a different pointer type.
+  return TheFolder->CreateBitCast(GEP, ConvertType(TREE_TYPE(exp)));
+}
 
-    // Switch to packed.
-    ConvertToPacked();
-    LLVMNaturalSize = NextFieldByteStart;
+Constant *TreeConstantToLLVM::ConvertBinOp_CST(tree exp) {
+  Constant *LHS = Convert(TREE_OPERAND(exp, 0));
+  bool LHSIsSigned = !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp,0)));
+  Constant *RHS = Convert(TREE_OPERAND(exp, 1));
+  bool RHSIsSigned = !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp,1)));
+  Instruction::CastOps opcode;
+  if (isa<PointerType>(LHS->getType())) {
+    const Type *IntPtrTy = getTargetData().getIntPtrType(Context);
+    opcode = CastInst::getCastOpcode(LHS, LHSIsSigned, IntPtrTy, false);
+    LHS = TheFolder->CreateCast(opcode, LHS, IntPtrTy);
+    opcode = CastInst::getCastOpcode(RHS, RHSIsSigned, IntPtrTy, false);
+    RHS = TheFolder->CreateCast(opcode, RHS, IntPtrTy);
+  }
 
-    // Verify that packing solved the problem.
-    assert(LLVMNaturalSize <= GCCStructSize &&
-           "Oversized should be handled by packing");
+  Constant *Result;
+  switch (TREE_CODE(exp)) {
+  default: assert(0 && "Unexpected case!");
+  case PLUS_EXPR:   Result = TheFolder->CreateAdd(LHS, RHS); break;
+  case MINUS_EXPR:  Result = TheFolder->CreateSub(LHS, RHS); break;
   }
 
-  // If the LLVM Size is too small, add some tail padding to fill it in.
-  if (LLVMNaturalSize < GCCStructSize) {
-    const Type *FillTy = Type::getInt8Ty(Context);
-    if (GCCStructSize - NextFieldByteStart != 1)
-      FillTy = ArrayType::get(FillTy, GCCStructSize - NextFieldByteStart);
-    ResultElts.push_back(Constant::getNullValue(FillTy));
-    NextFieldByteStart = GCCStructSize;
+  const Type *Ty = ConvertType(TREE_TYPE(exp));
+  bool TyIsSigned = !TYPE_UNSIGNED(TREE_TYPE(exp));
+  opcode = CastInst::getCastOpcode(Result, LHSIsSigned, Ty, TyIsSigned);
+  return TheFolder->CreateCast(opcode, Result, Ty);
+}
 
-    // At this point, we know that our struct should have the right size.
-    // However, if the size of the struct is not a multiple of the largest
-    // element alignment, the rounding could bump up the struct more.  In this
-    // case, we have to convert the struct to being packed.
-    LLVMNaturalSize =
-      TargetData::RoundUpAlignment(NextFieldByteStart, MaxLLVMFieldAlignment);
+Constant *TreeConstantToLLVM::ConvertCONSTRUCTOR(tree exp) {
+  // Please note, that we can have empty ctor, even if array is non-trivial (has
+  // nonzero number of entries). This situation is typical for static ctors,
+  // when array is filled during program initialization.
+  if (CONSTRUCTOR_ELTS(exp) == 0 ||
+      VEC_length(constructor_elt, CONSTRUCTOR_ELTS(exp)) == 0)  // All zeros?
+    return Constant::getNullValue(ConvertType(TREE_TYPE(exp)));
 
-    // If the alignment will make the struct too big, convert it to being
-    // packed.
-    if (LLVMNaturalSize > GCCStructSize) {
-      assert(!StructIsPacked && "LLVM Struct type overflow!");
-      ConvertToPacked();
-    }
+  switch (TREE_CODE(TREE_TYPE(exp))) {
+  default:
+    debug_tree(exp);
+    assert(0 && "Unknown ctor!");
+  case VECTOR_TYPE:
+  case ARRAY_TYPE:  return ConvertArrayCONSTRUCTOR(exp);
+  case RECORD_TYPE: return ConvertRecordCONSTRUCTOR(exp);
+  case QUAL_UNION_TYPE:
+  case UNION_TYPE:  return ConvertUnionCONSTRUCTOR(exp);
   }
 }
 
-Constant *TreeConstantToLLVM::ConvertRecordCONSTRUCTOR(tree exp) {
-  ConstantLayoutInfo LayoutInfo(getTargetData());
-
-  tree NextField = TYPE_FIELDS(TREE_TYPE(exp));
-  unsigned HOST_WIDE_INT CtorIndex;
-  tree FieldValue;
-  tree Field; // The FIELD_DECL for the field.
-  FOR_EACH_CONSTRUCTOR_ELT(CONSTRUCTOR_ELTS(exp), CtorIndex, Field, FieldValue){
-    // If an explicit field is specified, use it.
-    if (Field == 0) {
-      Field = NextField;
-      // Advance to the next FIELD_DECL, skipping over other structure members
-      // (e.g. enums).
-      while (1) {
-        assert(Field && "Fell off end of record!");
-        if (TREE_CODE(Field) == FIELD_DECL) break;
-        Field = TREE_CHAIN(Field);
-      }
-    }
-
-    // Decode the field's value.
-    Constant *Val = Convert(FieldValue);
-
-    // GCCFieldOffsetInBits is where GCC is telling us to put the current field.
-    uint64_t GCCFieldOffsetInBits = getFieldOffsetInBits(Field);
-    NextField = TREE_CHAIN(Field);
+Constant *TreeConstantToLLVM::ConvertArrayCONSTRUCTOR(tree exp) {
+  // Vectors are like arrays, but the domain is stored via an array
+  // type indirectly.
 
+  // If we have a lower bound for the range of the type, get it.
+  tree InitType = TREE_TYPE(exp);
+  tree min_element = size_zero_node;
+  std::vector<Constant*> ResultElts;
 
-    // If this is a non-bitfield value, just slap it onto the end of the struct
-    // with the appropriate padding etc.  If it is a bitfield, we have more
-    // processing to do.
-    if (!isBitfield(Field))
-      LayoutInfo.AddFieldToRecordConstant(Val, GCCFieldOffsetInBits);
-    else {
-      // Bitfields can only be initialized with constants (integer constant
-      // expressions).
-      ConstantInt *ValC = cast<ConstantInt>(Val);
-      uint64_t FieldSizeInBits = getInt64(DECL_SIZE(Field), true);
-      uint64_t ValueSizeInBits = Val->getType()->getPrimitiveSizeInBits();
+  if (TREE_CODE(InitType) == VECTOR_TYPE) {
+    ResultElts.resize(TYPE_VECTOR_SUBPARTS(InitType));
+  } else {
+    assert(TREE_CODE(InitType) == ARRAY_TYPE && "Unknown type for init");
+    tree Domain = TYPE_DOMAIN(InitType);
+    if (Domain && TYPE_MIN_VALUE(Domain))
+      min_element = fold_convert(sizetype, TYPE_MIN_VALUE(Domain));
 
-      // G++ has various bugs handling {} initializers where it doesn't
-      // synthesize a zero node of the right type.  Instead of figuring out G++,
-      // just hack around it by special casing zero and allowing it to be the
-      // wrong size.
-      if (ValueSizeInBits < FieldSizeInBits && ValC->isZero()) {
-        APInt ValAsInt = ValC->getValue();
-        ValC = ConstantInt::get(Context, ValAsInt.zext(FieldSizeInBits));
-        ValueSizeInBits = FieldSizeInBits;
-      }
+    if (Domain && TYPE_MAX_VALUE(Domain)) {
+      tree max_element = fold_convert(sizetype, TYPE_MAX_VALUE(Domain));
+      tree size = size_binop (MINUS_EXPR, max_element, min_element);
+      size = size_binop (PLUS_EXPR, size, size_one_node);
 
-      assert(ValueSizeInBits >= FieldSizeInBits &&
-             "disagreement between LLVM and GCC on bitfield size");
-      if (ValueSizeInBits != FieldSizeInBits) {
-        // Fields are allowed to be smaller than their type.  Simply discard
-        // the unwanted upper bits in the field value.
-        APInt ValAsInt = ValC->getValue();
-        ValC = ConstantInt::get(Context, ValAsInt.trunc(FieldSizeInBits));
-      }
-      LayoutInfo.AddBitFieldToRecordConstant(ValC, GCCFieldOffsetInBits);
+      if (host_integerp(size, 1))
+        ResultElts.resize(tree_low_cst(size, 1));
     }
   }
 
-  // Check to see if the struct fields, as laid out so far, will be large enough
-  // to make the generated constant struct have the right size.  If not, add
-  // explicit tail padding.  If rounding up based on the LLVM IR alignment would
-  // make the struct too large, convert it to a packed LLVM struct.
-  tree StructTypeSizeTree = TYPE_SIZE(TREE_TYPE(exp));
-  if (StructTypeSizeTree && TREE_CODE(StructTypeSizeTree) == INTEGER_CST)
-    LayoutInfo.HandleTailPadding(getInt64(StructTypeSizeTree, true));
+  unsigned NextFieldToFill = 0;
+  unsigned HOST_WIDE_INT ix;
+  tree elt_index, elt_value;
+  Constant *SomeVal = 0;
+  FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (exp), ix, elt_index, elt_value) {
+    // Find and decode the constructor's value.
+    Constant *Val = Convert(elt_value);
+    SomeVal = Val;
 
-  // Okay, we're done, return the computed elements.
-  return ConstantStruct::get(Context, LayoutInfo.ResultElts,
-                             LayoutInfo.StructIsPacked);
-}
+    // Get the index position of the element within the array.  Note that this
+    // can be NULL_TREE, which means that it belongs in the next available slot.
+    tree index = elt_index;
 
-Constant *TreeConstantToLLVM::ConvertUnionCONSTRUCTOR(tree exp) {
-  assert(!VEC_empty(constructor_elt, CONSTRUCTOR_ELTS(exp))
-         && "Union CONSTRUCTOR has no elements? Zero?");
+    // The first and last field to fill in, inclusive.
+    unsigned FieldOffset, FieldLastOffset;
+    if (index && TREE_CODE(index) == RANGE_EXPR) {
+      tree first = fold_convert (sizetype, TREE_OPERAND(index, 0));
+      tree last  = fold_convert (sizetype, TREE_OPERAND(index, 1));
 
-  VEC(constructor_elt, gc) *elt = CONSTRUCTOR_ELTS(exp);
-  assert(VEC_length(constructor_elt, elt) == 1
-         && "Union CONSTRUCTOR with multiple elements?");
+      first = size_binop (MINUS_EXPR, first, min_element);
+      last  = size_binop (MINUS_EXPR, last, min_element);
 
-  std::vector<Constant*> Elts;
-  // Convert the constant itself.
-  Elts.push_back(Convert(VEC_index(constructor_elt, elt, 0)->value));
+      assert(host_integerp(first, 1) && host_integerp(last, 1) &&
+             "Unknown range_expr!");
+      FieldOffset     = tree_low_cst(first, 1);
+      FieldLastOffset = tree_low_cst(last, 1);
+    } else if (index) {
+      index = size_binop (MINUS_EXPR, fold_convert (sizetype, index),
+                          min_element);
+      assert(host_integerp(index, 1));
+      FieldOffset = tree_low_cst(index, 1);
+      FieldLastOffset = FieldOffset;
+    } else {
+      FieldOffset = NextFieldToFill;
+      FieldLastOffset = FieldOffset;
+    }
 
-  // If the union has a fixed size, and if the value we converted isn't large
-  // enough to fill all the bits, add a zero initialized array at the end to pad
-  // it out.
-  tree UnionType = TREE_TYPE(exp);
-  if (TYPE_SIZE(UnionType) && TREE_CODE(TYPE_SIZE(UnionType)) == INTEGER_CST) {
-    uint64_t UnionSize = ((uint64_t)TREE_INT_CST_LOW(TYPE_SIZE(UnionType))+7)/8;
-    uint64_t InitSize = getTargetData().getTypeAllocSize(Elts[0]->getType());
-    if (UnionSize != InitSize) {
-      const Type *FillTy;
-      assert(UnionSize > InitSize && "Init shouldn't be larger than union!");
-      if (UnionSize - InitSize == 1)
-        FillTy = Type::getInt8Ty(Context);
-      else
-        FillTy = ArrayType::get(Type::getInt8Ty(Context), UnionSize - InitSize);
-      Elts.push_back(Constant::getNullValue(FillTy));
+    // Process all of the elements in the range.
+    for (--FieldOffset; FieldOffset != FieldLastOffset; ) {
+      ++FieldOffset;
+      if (FieldOffset == ResultElts.size())
+        ResultElts.push_back(Val);
+      else {
+        if (FieldOffset >= ResultElts.size())
+          ResultElts.resize(FieldOffset+1);
+        ResultElts[FieldOffset] = Val;
+      }
+
+      NextFieldToFill = FieldOffset+1;
     }
   }
-  return ConstantStruct::get(Context, Elts, false);
-}
-
-//===----------------------------------------------------------------------===//
-//                  ... Constant Expressions L-Values ...
-//===----------------------------------------------------------------------===//
 
-Constant *TreeConstantToLLVM::EmitLV(tree exp) {
-  Constant *LV;
+  // Zero length array.
+  if (ResultElts.empty())
+    return ConstantArray::get(
+      cast<ArrayType>(ConvertType(TREE_TYPE(exp))), ResultElts);
+  assert(SomeVal && "If we had some initializer, we should have some value!");
 
-  switch (TREE_CODE(exp)) {
-  default:
-    debug_tree(exp);
-    assert(0 && "Unknown constant lvalue to convert!");
-    abort();
-  case FUNCTION_DECL:
-  case CONST_DECL:
-  case VAR_DECL:
-    LV = EmitLV_Decl(exp);
-    break;
-  case LABEL_DECL:
-    LV = EmitLV_LABEL_DECL(exp);
-    break;
-  case COMPLEX_CST:
-    LV = EmitLV_COMPLEX_CST(exp);
-    break;
-  case STRING_CST:
-    LV = EmitLV_STRING_CST(exp);
-    break;
-  case COMPONENT_REF:
-    LV = EmitLV_COMPONENT_REF(exp);
-    break;
-  case ARRAY_RANGE_REF:
-  case ARRAY_REF:
-    LV = EmitLV_ARRAY_REF(exp);
-    break;
-  case INDIRECT_REF:
-    // The lvalue is just the address.
-    LV = Convert(TREE_OPERAND(exp, 0));
-    break;
-  case COMPOUND_LITERAL_EXPR: // FIXME: not gimple - defined by C front-end
-    /* This used to read
-       return EmitLV(COMPOUND_LITERAL_EXPR_DECL(exp));
-       but gcc warns about that and there doesn't seem to be any way to stop it
-       with casts or the like.  The following is equivalent with no checking
-       (since we know TREE_CODE(exp) is COMPOUND_LITERAL_EXPR the checking
-       doesn't accomplish anything anyway). */
-    LV = EmitLV(DECL_EXPR_DECL (TREE_OPERAND (exp, 0)));
-    break;
+  // Do a post-pass over all of the elements.  We're taking care of two things
+  // here:
+  //   #1. If any elements did not have initializers specified, provide them
+  //       with a null init.
+  //   #2. If any of the elements have different types, return a struct instead
+  //       of an array.  This can occur in cases where we have an array of
+  //       unions, and the various unions had different pieces init'd.
+  const Type *ElTy = SomeVal->getType();
+  Constant *Filler = Constant::getNullValue(ElTy);
+  bool AllEltsSameType = true;
+  for (unsigned i = 0, e = ResultElts.size(); i != e; ++i) {
+    if (ResultElts[i] == 0)
+      ResultElts[i] = Filler;
+    else if (ResultElts[i]->getType() != ElTy)
+      AllEltsSameType = false;
   }
 
-  // Check that the type of the lvalue is indeed that of a pointer to the tree
-  // node.  Since LLVM has no void* type, don't insist that void* be converted
-  // to a specific LLVM type.
-  assert((VOID_TYPE_P(TREE_TYPE(exp)) ||
-          LV->getType() == ConvertType(TREE_TYPE(exp))->getPointerTo()) &&
-         "LValue of constant has wrong type!");
+  if (TREE_CODE(InitType) == VECTOR_TYPE) {
+    assert(AllEltsSameType && "Vector of heterogeneous element types?");
+    return ConstantVector::get(ResultElts);
+  }
 
-  return LV;
+  if (AllEltsSameType)
+    return ConstantArray::get(
+      ArrayType::get(ElTy, ResultElts.size()), ResultElts);
+  return ConstantStruct::get(Context, ResultElts, false);
 }
 
-Constant *TreeConstantToLLVM::EmitLV_Decl(tree exp) {
-  GlobalValue *Val = cast<GlobalValue>(DECL_LLVM(exp));
-
-  // Ensure variable marked as used even if it doesn't go through a parser.  If
-  // it hasn't been used yet, write out an external definition.
-  if (!TREE_USED(exp)) {
-    assemble_external(exp);
-    TREE_USED(exp) = 1;
-    Val = cast<GlobalValue>(DECL_LLVM(exp));
-  }
 
-  // If this is an aggregate, emit it to LLVM now.  GCC happens to
-  // get this case right by forcing the initializer into memory.
-  if (TREE_CODE(exp) == CONST_DECL || TREE_CODE(exp) == VAR_DECL) {
-    if ((DECL_INITIAL(exp) || !TREE_PUBLIC(exp)) && !DECL_EXTERNAL(exp) &&
-        Val->isDeclaration() &&
-        !BOGUS_CTOR(exp)) {
-      emit_global_to_llvm(exp);
-      // Decl could have change if it changed type.
-      Val = cast<GlobalValue>(DECL_LLVM(exp));
-    }
-  } else {
-    // Otherwise, inform cgraph that we used the global.
-    mark_decl_referenced(exp);
-    if (tree ID = DECL_ASSEMBLER_NAME(exp))
-      mark_referenced(ID);
-  }
+namespace {
+/// ConstantLayoutInfo - A helper class used by ConvertRecordCONSTRUCTOR to
+/// lay out struct inits.
+struct ConstantLayoutInfo {
+  const TargetData &TD;
 
-  // The type of the global value output for exp need not match that of exp.
-  // For example if the global's initializer has a different type to the global
-  // itself (allowed in GCC but not in LLVM) then the global is changed to have
-  // the type of the initializer.  Correct for this now.
-  const Type *Ty = ConvertType(TREE_TYPE(exp));
-  if (Ty == Type::getVoidTy(Context)) Ty = Type::getInt8Ty(Context);  // void* -> i8*.
+  /// ResultElts - The initializer elements so far.
+  std::vector<Constant*> ResultElts;
 
-  return TheFolder->CreateBitCast(Val, Ty->getPointerTo());
-}
+  /// StructIsPacked - This is set to true if we find out that we have to emit
+  /// the ConstantStruct as a Packed LLVM struct type (because the LLVM
+  /// alignment rules would prevent laying out the struct correctly).
+  bool StructIsPacked;
 
-/// EmitLV_LABEL_DECL - Someone took the address of a label.
-Constant *TreeConstantToLLVM::EmitLV_LABEL_DECL(tree exp) {
-  assert(TheTreeToLLVM &&
-         "taking the address of a label while not compiling the function!");
+  /// NextFieldByteStart - This field indicates the *byte* that the next field
+  /// will start at.  Put another way, this is the size of the struct as
+  /// currently laid out, but without any tail padding considered.
+  uint64_t NextFieldByteStart;
 
-  // Figure out which function this is for, verify it's the one we're compiling.
-  if (DECL_CONTEXT(exp)) {
-    assert(TREE_CODE(DECL_CONTEXT(exp)) == FUNCTION_DECL &&
-           "Address of label in nested function?");
-    assert(TheTreeToLLVM->getFUNCTION_DECL() == DECL_CONTEXT(exp) &&
-           "Taking the address of a label that isn't in the current fn!?");
-  }
+  /// MaxLLVMFieldAlignment - This is the largest alignment of any IR field,
+  /// which is the alignment that the ConstantStruct will get.
+  unsigned MaxLLVMFieldAlignment;
 
-  BasicBlock *BB = TheTreeToLLVM->getLabelDeclBlock(exp);
-  Constant *C = TheTreeToLLVM->getIndirectGotoBlockNumber(BB);
-  return
-       TheFolder->CreateIntToPtr(C, PointerType::getUnqual(Type::getInt8Ty(Context)));
-}
 
-Constant *TreeConstantToLLVM::EmitLV_COMPLEX_CST(tree exp) {
-  Constant *Init = TreeConstantToLLVM::ConvertCOMPLEX_CST(exp);
+  ConstantLayoutInfo(const TargetData &TD) : TD(TD) {
+    StructIsPacked = false;
+    NextFieldByteStart = 0;
+    MaxLLVMFieldAlignment = 1;
+  }
 
-  // Cache the constants to avoid making obvious duplicates that have to be
-  // folded by the optimizer.
-  static std::map<Constant*, GlobalVariable*> ComplexCSTCache;
-  GlobalVariable *&Slot = ComplexCSTCache[Init];
-  if (Slot) return Slot;
+  void ConvertToPacked();
+  void AddFieldToRecordConstant(Constant *Val, uint64_t GCCFieldOffsetInBits);
+  void AddBitFieldToRecordConstant(ConstantInt *Val,
+                                   uint64_t GCCFieldOffsetInBits);
+  void HandleTailPadding(uint64_t GCCStructBitSize);
+};
 
-  // Create a new complex global.
-  Slot = new GlobalVariable(*TheModule, Init->getType(), true,
-                            GlobalVariable::PrivateLinkage, Init, ".cpx");
-  return Slot;
 }
 
-Constant *TreeConstantToLLVM::EmitLV_STRING_CST(tree exp) {
-  Constant *Init = TreeConstantToLLVM::ConvertSTRING_CST(exp);
+/// ConvertToPacked - Given a partially constructed initializer for a LLVM
+/// struct constant, change it to make all the implicit padding between elements
+/// be fully explicit.
+void ConstantLayoutInfo::ConvertToPacked() {
+  assert(!StructIsPacked && "Struct is already packed");
+  uint64_t EltOffs = 0;
+  for (unsigned i = 0, e = ResultElts.size(); i != e; ++i) {
+    Constant *Val = ResultElts[i];
 
-  GlobalVariable **SlotP = 0;
+    // Check to see if this element has an alignment that would cause it to get
+    // offset.  If so, insert explicit padding for the offset.
+    unsigned ValAlign = TD.getABITypeAlignment(Val->getType());
+    uint64_t AlignedEltOffs = TargetData::RoundUpAlignment(EltOffs, ValAlign);
 
-  // Cache the string constants to avoid making obvious duplicate strings that
-  // have to be folded by the optimizer.
-  static std::map<Constant*, GlobalVariable*> StringCSTCache;
-  GlobalVariable *&Slot = StringCSTCache[Init];
-  if (Slot) return Slot;
-  SlotP = &Slot;
+    // If the alignment doesn't affect the element offset, then the value is ok.
+    // Accept the field and keep moving.
+    if (AlignedEltOffs == EltOffs) {
+      EltOffs += TD.getTypeAllocSize(Val->getType());
+      continue;
+    }
 
-  // Create a new string global.
-  GlobalVariable *GV = new GlobalVariable(*TheModule, Init->getType(), true,
-                                          GlobalVariable::PrivateLinkage, Init,
-                                          ".str");
+    // Otherwise, there is padding here.  Insert explicit zeros.
+    const Type *PadTy = Type::getInt8Ty(Context);
+    if (AlignedEltOffs-EltOffs != 1)
+      PadTy = ArrayType::get(PadTy, AlignedEltOffs-EltOffs);
+    ResultElts.insert(ResultElts.begin()+i,
+                      Constant::getNullValue(PadTy));
 
-  GV->setAlignment(TYPE_ALIGN(TREE_TYPE(exp)) / 8);
+    // The padding is now element "i" and just bumped us up to "AlignedEltOffs".
+    EltOffs = AlignedEltOffs;
+    ++e;  // One extra element to scan.
+  }
 
-  if (SlotP) *SlotP = GV;
-  return GV;
+  // Packed now!
+  MaxLLVMFieldAlignment = 1;
+  StructIsPacked = true;
 }
 
-Constant *TreeConstantToLLVM::EmitLV_ARRAY_REF(tree exp) {
-  tree Array = TREE_OPERAND(exp, 0);
-  tree Index = TREE_OPERAND(exp, 1);
-  tree IndexType = TREE_TYPE(Index);
-  assert(TREE_CODE(TREE_TYPE(Array)) == ARRAY_TYPE && "Unknown ARRAY_REF!");
 
-  // Check for variable sized reference.
-  // FIXME: add support for array types where the size doesn't fit into 64 bits
-  assert(isSequentialCompatible(TREE_TYPE(Array)) &&
-         "Global with variable size?");
+/// AddFieldToRecordConstant - As ConvertRecordCONSTRUCTOR builds up an LLVM
+/// constant to represent a GCC CONSTRUCTOR node, it calls this method to add
+/// fields.  The design of this is that it adds leading/trailing padding as
+/// needed to make the piece fit together and honor the GCC layout.  This does
+/// not handle bitfields.
+///
+/// The arguments are:
+///   Val: The value to add to the struct, with a size that matches the size of
+///        the corresponding GCC field.
+///   GCCFieldOffsetInBits: The offset that we have to put Val in the result.
+///
+void ConstantLayoutInfo::
+AddFieldToRecordConstant(Constant *Val, uint64_t GCCFieldOffsetInBits) {
+  // Figure out how to add this non-bitfield value to our constant struct so
+  // that it ends up at the right offset.  There are four cases we have to
+  // think about:
+  //   1. We may be able to just slap it onto the end of our struct and have
+  //      everything be ok.
+  //   2. We may have to insert explicit padding into the LLVM struct to get
+  //      the initializer over into the right space.  This is needed when the
+  //      GCC field has a larger alignment than the LLVM field.
+  //   3. The LLVM field may be too far over and we may be forced to convert
+  //      this to an LLVM packed struct.  This is required when the LLVM
+  //      alignment is larger than the GCC alignment.
+  //   4. We may have a bitfield that needs to be merged into a previous
+  //      field.
+  // Start by determining which case we have by looking at where LLVM and GCC
+  // would place the field.
+
+  // Verified that we haven't already laid out bytes that will overlap with
+  // this new field.
+  assert(NextFieldByteStart*8 <= GCCFieldOffsetInBits &&
+         "Overlapping LLVM fields!");
+
+  // Compute the offset the field would get if we just stuck 'Val' onto the
+  // end of our structure right now.  It is NextFieldByteStart rounded up to
+  // the LLVM alignment of Val's type.
+  unsigned ValLLVMAlign = 1;
 
-  Constant *ArrayAddr;
+  if (!StructIsPacked) { // Packed structs ignore the alignment of members.
+    ValLLVMAlign = TD.getABITypeAlignment(Val->getType());
+    MaxLLVMFieldAlignment = std::max(MaxLLVMFieldAlignment, ValLLVMAlign);
+  }
 
-  // First subtract the lower bound, if any, in the type of the index.
-  Constant *IndexVal = Convert(Index);
-  tree LowerBound = array_ref_low_bound(exp);
-  if (!integer_zerop(LowerBound))
-    IndexVal = TYPE_UNSIGNED(TREE_TYPE(Index)) ?
-      TheFolder->CreateSub(IndexVal, Convert(LowerBound)) :
-      TheFolder->CreateNSWSub(IndexVal, Convert(LowerBound));
+  // LLVMNaturalByteOffset - This is where LLVM would drop the field if we
+  // slap it onto the end of the struct.
+  uint64_t LLVMNaturalByteOffset
+    = TargetData::RoundUpAlignment(NextFieldByteStart, ValLLVMAlign);
 
-  ArrayAddr = EmitLV(Array);
+  // If adding the LLVM field would push it over too far, then we must have a
+  // case that requires the LLVM struct to be packed.  Do it now if so.
+  if (LLVMNaturalByteOffset*8 > GCCFieldOffsetInBits) {
+    // Switch to packed.
+    ConvertToPacked();
+    assert(NextFieldByteStart*8 <= GCCFieldOffsetInBits &&
+           "Packing didn't fix the problem!");
 
-  const Type *IntPtrTy = getTargetData().getIntPtrType(Context);
-  if (IndexVal->getType() != IntPtrTy)
-    IndexVal = TheFolder->CreateIntCast(IndexVal, IntPtrTy,
-                                        !TYPE_UNSIGNED(IndexType));
+    // Recurse to add the field after converting to packed.
+    return AddFieldToRecordConstant(Val, GCCFieldOffsetInBits);
+  }
 
-  Value *Idx[2];
-  Idx[0] = ConstantInt::get(IntPtrTy, 0);
-  Idx[1] = IndexVal;
+  // If the LLVM offset is not large enough, we need to insert explicit
+  // padding in the LLVM struct between the fields.
+  if (LLVMNaturalByteOffset*8 < GCCFieldOffsetInBits) {
+    // Insert enough padding to fully fill in the hole.  Insert padding from
+    // NextFieldByteStart (not LLVMNaturalByteOffset) because the padding will
+    // not get the same alignment as "Val".
+    const Type *FillTy = Type::getInt8Ty(Context);
+    if (GCCFieldOffsetInBits/8-NextFieldByteStart != 1)
+      FillTy = ArrayType::get(FillTy,
+                              GCCFieldOffsetInBits/8-NextFieldByteStart);
+    ResultElts.push_back(Constant::getNullValue(FillTy));
 
-  return TheFolder->CreateGetElementPtr(ArrayAddr, Idx, 2);
-}
+    NextFieldByteStart = GCCFieldOffsetInBits/8;
 
-Constant *TreeConstantToLLVM::EmitLV_COMPONENT_REF(tree exp) {
-  Constant *StructAddrLV = EmitLV(TREE_OPERAND(exp, 0));
+    // Recurse to add the field.  This handles the case when the LLVM struct
+    // needs to be converted to packed after inserting tail padding.
+    return AddFieldToRecordConstant(Val, GCCFieldOffsetInBits);
+  }
 
-  // Ensure that the struct type has been converted, so that the fielddecls
-  // are laid out.
-  const Type *StructTy = ConvertType(TREE_TYPE(TREE_OPERAND(exp, 0)));
+  // Slap 'Val' onto the end of our ConstantStruct, it must be known to land
+  // at the right offset now.
+  assert(LLVMNaturalByteOffset*8 == GCCFieldOffsetInBits);
+  ResultElts.push_back(Val);
+  NextFieldByteStart = LLVMNaturalByteOffset;
+  NextFieldByteStart += TD.getTypeAllocSize(Val->getType());
+}
 
-  tree FieldDecl = TREE_OPERAND(exp, 1);
+/// AddBitFieldToRecordConstant - Bitfields can span multiple LLVM fields and
+/// have other annoying properties, thus requiring extra layout rules.  This
+/// routine handles the extra complexity and then forwards to
+/// AddFieldToRecordConstant.
+void ConstantLayoutInfo::
+AddBitFieldToRecordConstant(ConstantInt *ValC, uint64_t GCCFieldOffsetInBits) {
+  // If the GCC field starts after our current LLVM field then there must have
+  // been an anonymous bitfield or other thing that shoved it over.  No matter,
+  // just insert some i8 padding until there are bits to fill in.
+  while (GCCFieldOffsetInBits > NextFieldByteStart*8) {
+    ResultElts.push_back(ConstantInt::get(Type::getInt8Ty(Context), 0));
+    ++NextFieldByteStart;
+  }
 
-  StructAddrLV = TheFolder->CreateBitCast(StructAddrLV,
-                                          PointerType::getUnqual(StructTy));
-  const Type *FieldTy = ConvertType(getDeclaredType(FieldDecl));
+  // If the field is a bitfield, it could partially go in a previously
+  // laid out structure member, and may add elements to the end of the currently
+  // laid out structure.
+  //
+  // Since bitfields can only partially overlap other bitfields, because we
+  // always emit components of bitfields as i8, and because we never emit tail
+  // padding until we know it exists, this boils down to merging pieces of the
+  // bitfield values into i8's.  This is also simplified by the fact that
+  // bitfields can only be initialized by ConstantInts.  An interesting case is
+  // sharing of tail padding in C++ structures.  Because this can only happen
+  // in inheritance cases, and those are non-POD, we should never see them here.
 
-  // BitStart - This is the actual offset of the field from the start of the
-  // struct, in bits.  For bitfields this may be on a non-byte boundary.
-  unsigned BitStart = getComponentRefOffsetInBits(exp);
-  Constant *FieldPtr;
-  const TargetData &TD = getTargetData();
+  // First handle any part of Val that overlaps an already laid out field by
+  // merging it into it.  By the above invariants, we know that it is an i8 that
+  // we are merging into.  Note that we may be inserting *all* of Val into the
+  // previous field.
+  if (GCCFieldOffsetInBits < NextFieldByteStart*8) {
+    unsigned ValBitSize = ValC->getBitWidth();
+    assert(!ResultElts.empty() && "Bitfield starts before first element?");
+    assert(ResultElts.back()->getType() == Type::getInt8Ty(Context) &&
+           isa<ConstantInt>(ResultElts.back()) &&
+           "Merging bitfield with non-bitfield value?");
+    assert(NextFieldByteStart*8 - GCCFieldOffsetInBits < 8 &&
+           "Bitfield overlaps backwards more than one field?");
 
-  tree field_offset = component_ref_field_offset (exp);
-  // If this is a normal field at a fixed offset from the start, handle it.
-  if (TREE_CODE(field_offset) == INTEGER_CST) {
-    unsigned int MemberIndex = GetFieldIndex(FieldDecl);
+    // Figure out how many bits can fit into the previous field given the
+    // starting point in that field.
+    unsigned BitsInPreviousField =
+      unsigned(NextFieldByteStart*8 - GCCFieldOffsetInBits);
+    assert(BitsInPreviousField != 0 && "Previous field should not be null!");
 
-    Constant *Ops[] = {
-      StructAddrLV,
-      Constant::getNullValue(Type::getInt32Ty(Context)),
-      ConstantInt::get(Type::getInt32Ty(Context), MemberIndex)
-    };
-    FieldPtr = TheFolder->CreateGetElementPtr(StructAddrLV, Ops+1, 2);
+    // Split the bits that will be inserted into the previous element out of
+    // Val into a new constant.  If Val is completely contained in the previous
+    // element, this sets Val to null, otherwise we shrink Val to contain the
+    // bits to insert in the next element.
+    APInt ValForPrevField(ValC->getValue());
+    if (BitsInPreviousField >= ValBitSize) {
+      // The whole field fits into the previous field.
+      ValC = 0;
+    } else if (!BYTES_BIG_ENDIAN) {
+      // Little endian, take bits from the bottom of the field value.
+      ValForPrevField.trunc(BitsInPreviousField);
+      APInt Tmp = ValC->getValue();
+      Tmp = Tmp.lshr(BitsInPreviousField);
+      Tmp = Tmp.trunc(ValBitSize-BitsInPreviousField);
+      ValC = ConstantInt::get(Context, Tmp);
+    } else {
+      // Big endian, take bits from the top of the field value.
+      ValForPrevField = ValForPrevField.lshr(ValBitSize-BitsInPreviousField);
+      ValForPrevField.trunc(BitsInPreviousField);
 
-    FieldPtr = ConstantFoldInstOperands(Instruction::GetElementPtr,
-                                        FieldPtr->getType(), Ops,
-                                        3, Context, &TD);
+      APInt Tmp = ValC->getValue();
+      Tmp = Tmp.trunc(ValBitSize-BitsInPreviousField);
+      ValC = ConstantInt::get(Context, Tmp);
+    }
 
-    // Now that we did an offset from the start of the struct, subtract off
-    // the offset from BitStart.
-    if (MemberIndex) {
-      const StructLayout *SL = TD.getStructLayout(cast<StructType>(StructTy));
-      BitStart -= SL->getElementOffset(MemberIndex) * 8;
+    // Okay, we're going to insert ValForPrevField into the previous i8, extend
+    // it and shift into place.
+    ValForPrevField.zext(8);
+    if (!BYTES_BIG_ENDIAN) {
+      ValForPrevField = ValForPrevField.shl(8-BitsInPreviousField);
+    } else {
+      // On big endian, if the entire field fits into the remaining space, shift
+      // over to not take part of the next field's bits.
+      if (BitsInPreviousField > ValBitSize)
+        ValForPrevField = ValForPrevField.shl(BitsInPreviousField-ValBitSize);
     }
 
-  } else {
-    Constant *Offset = Convert(field_offset);
-    Constant *Ptr = TheFolder->CreatePtrToInt(StructAddrLV, Offset->getType());
-    Ptr = TheFolder->CreateAdd(Ptr, Offset);
-    FieldPtr = TheFolder->CreateIntToPtr(Ptr,
-                                         PointerType::getUnqual(FieldTy));
-  }
+    // "or" in the previous value and install it.
+    const APInt &LastElt = cast<ConstantInt>(ResultElts.back())->getValue();
+    ResultElts.back() = ConstantInt::get(Context, ValForPrevField | LastElt);
 
-  // Make sure we return a result of the right type.
-  if (PointerType::getUnqual(FieldTy) != FieldPtr->getType())
-    FieldPtr = TheFolder->CreateBitCast(FieldPtr,
-                                        PointerType::getUnqual(FieldTy));
+    // If the whole bit-field fit into the previous field, we're done.
+    if (ValC == 0) return;
+    GCCFieldOffsetInBits = NextFieldByteStart*8;
+  }
 
-  assert(BitStart == 0 &&
-         "It's a bitfield reference or we didn't get to the field!");
-  return FieldPtr;
-}
+  APInt Val = ValC->getValue();
 
-//===----------------------------------------------------------------------===//
-//                    ... GIMPLE conversion helpers ...
-//===----------------------------------------------------------------------===//
+  // Okay, we know that we're plopping bytes onto the end of the struct.
+  // Iterate while there is stuff to do.
+  while (1) {
+    ConstantInt *ValToAppend;
+    if (Val.getBitWidth() > 8) {
+      if (!BYTES_BIG_ENDIAN) {
+        // Little endian lays out low bits first.
+        APInt Tmp = Val;
+        Tmp.trunc(8);
+        ValToAppend = ConstantInt::get(Context, Tmp);
 
-/// WriteScalarToLHS - Store RHS, a non-aggregate value, into the given LHS.
-void TreeToLLVM::WriteScalarToLHS(tree lhs, Value *RHS) {
-  // Perform a useless type conversion (useless_type_conversion_p).
-  RHS = Builder.CreateBitCast(RHS, ConvertType(TREE_TYPE(lhs)));
+        Val = Val.lshr(8);
+      } else {
+        // Big endian lays out high bits first.
+        APInt Tmp = Val;
+        Tmp = Tmp.lshr(Tmp.getBitWidth()-8);
+        Tmp.trunc(8);
+        ValToAppend = ConstantInt::get(Context, Tmp);
+      }
+    } else if (Val.getBitWidth() == 8) {
+      ValToAppend = ConstantInt::get(Context, Val);
+    } else {
+      APInt Tmp = Val;
+      Tmp.zext(8);
 
-  // If this is the definition of an ssa name, record it in the SSANames map.
-  if (TREE_CODE(lhs) == SSA_NAME) {
-    assert(SSANames.find(lhs) == SSANames.end() &&"Multiply defined SSA name!");
-    SSANames[lhs] = RHS;
-    return;
-  }
+      if (BYTES_BIG_ENDIAN)
+        Tmp = Tmp << 8-Val.getBitWidth();
+      ValToAppend = ConstantInt::get(Context, Tmp);
+    }
 
-  if (canEmitRegisterVariable(lhs)) {
-    // If this is a store to a register variable, EmitLV can't handle the dest
-    // (there is no l-value of a register variable).  Emit an inline asm node
-    // that copies the value into the specified register.
-    EmitModifyOfRegisterVariable(lhs, RHS);
-    return;
+    ResultElts.push_back(ValToAppend);
+    ++NextFieldByteStart;
+
+    if (Val.getBitWidth() <= 8)
+      break;
+    Val.trunc(Val.getBitWidth()-8);
   }
+}
 
-  LValue LV = EmitLV(lhs);
-  bool isVolatile = TREE_THIS_VOLATILE(lhs);
-  unsigned Alignment = LV.getAlignment();
 
-  if (!LV.isBitfield()) {
-    // Non-bitfield, scalar value.  Just emit a store.
-    StoreInst *SI = Builder.CreateStore(RHS, LV.Ptr, isVolatile);
-    SI->setAlignment(Alignment);
-    return;
+/// HandleTailPadding - Check to see if the struct fields, as laid out so far,
+/// will be large enough to make the generated constant struct have the right
+/// size.  If not, add explicit tail padding.  If rounding up based on the LLVM
+/// IR alignment would make the struct too large, convert it to a packed LLVM
+/// struct.
+void ConstantLayoutInfo::HandleTailPadding(uint64_t GCCStructBitSize) {
+  uint64_t GCCStructSize = (GCCStructBitSize+7)/8;
+  uint64_t LLVMNaturalSize =
+    TargetData::RoundUpAlignment(NextFieldByteStart, MaxLLVMFieldAlignment);
+
+  // If the total size of the laid out data is within the size of the GCC type
+  // but the rounded-up size (including the tail padding induced by LLVM
+  // alignment) is too big, convert to a packed struct type.  We don't do this
+  // if the size of the laid out fields is too large because initializers like
+  //
+  //    struct X { int A; char C[]; } x = { 4, "foo" };
+  //
+  // can occur and no amount of packing will help.
+  if (NextFieldByteStart <= GCCStructSize &&   // Not flexible init case.
+      LLVMNaturalSize > GCCStructSize) {       // Tail pad will overflow type.
+    assert(!StructIsPacked && "LLVM Struct type overflow!");
+
+    // Switch to packed.
+    ConvertToPacked();
+    LLVMNaturalSize = NextFieldByteStart;
+
+    // Verify that packing solved the problem.
+    assert(LLVMNaturalSize <= GCCStructSize &&
+           "Oversized should be handled by packing");
   }
 
-  // Last case, this is a store to a bitfield, so we have to emit a
-  // read/modify/write sequence.
+  // If the LLVM Size is too small, add some tail padding to fill it in.
+  if (LLVMNaturalSize < GCCStructSize) {
+    const Type *FillTy = Type::getInt8Ty(Context);
+    if (GCCStructSize - NextFieldByteStart != 1)
+      FillTy = ArrayType::get(FillTy, GCCStructSize - NextFieldByteStart);
+    ResultElts.push_back(Constant::getNullValue(FillTy));
+    NextFieldByteStart = GCCStructSize;
 
-  if (!LV.BitSize)
-    return;
+    // At this point, we know that our struct should have the right size.
+    // However, if the size of the struct is not a multiple of the largest
+    // element alignment, the rounding could bump up the struct more.  In this
+    // case, we have to convert the struct to being packed.
+    LLVMNaturalSize =
+      TargetData::RoundUpAlignment(NextFieldByteStart, MaxLLVMFieldAlignment);
 
-  const Type *ValTy = cast<PointerType>(LV.Ptr->getType())->getElementType();
-  unsigned ValSizeInBits = ValTy->getPrimitiveSizeInBits();
+    // If the alignment will make the struct too big, convert it to being
+    // packed.
+    if (LLVMNaturalSize > GCCStructSize) {
+      assert(!StructIsPacked && "LLVM Struct type overflow!");
+      ConvertToPacked();
+    }
+  }
+}
 
-  // The number of stores needed to write the entire bitfield.
-  unsigned Strides = 1 + (LV.BitStart + LV.BitSize - 1) / ValSizeInBits;
+Constant *TreeConstantToLLVM::ConvertRecordCONSTRUCTOR(tree exp) {
+  ConstantLayoutInfo LayoutInfo(getTargetData());
 
-  assert(ValTy->isInteger() && "Invalid bitfield lvalue!");
-  assert(ValSizeInBits > LV.BitStart && "Bad bitfield lvalue!");
-  assert(ValSizeInBits >= LV.BitSize && "Bad bitfield lvalue!");
-  assert(2*ValSizeInBits > LV.BitSize+LV.BitStart && "Bad bitfield lvalue!");
+  tree NextField = TYPE_FIELDS(TREE_TYPE(exp));
+  unsigned HOST_WIDE_INT CtorIndex;
+  tree FieldValue;
+  tree Field; // The FIELD_DECL for the field.
+  FOR_EACH_CONSTRUCTOR_ELT(CONSTRUCTOR_ELTS(exp), CtorIndex, Field, FieldValue){
+    // If an explicit field is specified, use it.
+    if (Field == 0) {
+      Field = NextField;
+      // Advance to the next FIELD_DECL, skipping over other structure members
+      // (e.g. enums).
+      while (1) {
+        assert(Field && "Fell off end of record!");
+        if (TREE_CODE(Field) == FIELD_DECL) break;
+        Field = TREE_CHAIN(Field);
+      }
+    }
 
-  bool Signed = !TYPE_UNSIGNED(TREE_TYPE(lhs));
-  RHS = CastToAnyType(RHS, Signed, ValTy, Signed);
+    // Decode the field's value.
+    Constant *Val = Convert(FieldValue);
 
-  for (unsigned I = 0; I < Strides; I++) {
-    unsigned Index = BYTES_BIG_ENDIAN ? Strides - I - 1 : I; // LSB first
-    unsigned ThisFirstBit = Index * ValSizeInBits;
-    unsigned ThisLastBitPlusOne = ThisFirstBit + ValSizeInBits;
-    if (ThisFirstBit < LV.BitStart)
-      ThisFirstBit = LV.BitStart;
-    if (ThisLastBitPlusOne > LV.BitStart+LV.BitSize)
-      ThisLastBitPlusOne = LV.BitStart+LV.BitSize;
+    // GCCFieldOffsetInBits is where GCC is telling us to put the current field.
+    uint64_t GCCFieldOffsetInBits = getFieldOffsetInBits(Field);
+    NextField = TREE_CHAIN(Field);
 
-    Value *Ptr = Index ?
-      Builder.CreateGEP(LV.Ptr, ConstantInt::get(Type::getInt32Ty(Context), Index)) :
-      LV.Ptr;
-    LoadInst *LI = Builder.CreateLoad(Ptr, isVolatile);
-    LI->setAlignment(Alignment);
-    Value *OldVal = LI;
-    Value *NewVal = RHS;
 
-    unsigned BitsInVal = ThisLastBitPlusOne - ThisFirstBit;
-    unsigned FirstBitInVal = ThisFirstBit % ValSizeInBits;
+    // If this is a non-bitfield value, just slap it onto the end of the struct
+    // with the appropriate padding etc.  If it is a bitfield, we have more
+    // processing to do.
+    if (!isBitfield(Field))
+      LayoutInfo.AddFieldToRecordConstant(Val, GCCFieldOffsetInBits);
+    else {
+      // Bitfields can only be initialized with constants (integer constant
+      // expressions).
+      ConstantInt *ValC = cast<ConstantInt>(Val);
+      uint64_t FieldSizeInBits = getInt64(DECL_SIZE(Field), true);
+      uint64_t ValueSizeInBits = Val->getType()->getPrimitiveSizeInBits();
 
-    if (BYTES_BIG_ENDIAN)
-      FirstBitInVal = ValSizeInBits-FirstBitInVal-BitsInVal;
+      // G++ has various bugs handling {} initializers where it doesn't
+      // synthesize a zero node of the right type.  Instead of figuring out G++,
+      // just hack around it by special casing zero and allowing it to be the
+      // wrong size.
+      if (ValueSizeInBits < FieldSizeInBits && ValC->isZero()) {
+        APInt ValAsInt = ValC->getValue();
+        ValC = ConstantInt::get(Context, ValAsInt.zext(FieldSizeInBits));
+        ValueSizeInBits = FieldSizeInBits;
+      }
 
-    // If not storing into the zero'th bit, shift the Src value to the left.
-    if (FirstBitInVal) {
-      Value *ShAmt = ConstantInt::get(ValTy, FirstBitInVal);
-      NewVal = Builder.CreateShl(NewVal, ShAmt);
+      assert(ValueSizeInBits >= FieldSizeInBits &&
+             "disagreement between LLVM and GCC on bitfield size");
+      if (ValueSizeInBits != FieldSizeInBits) {
+        // Fields are allowed to be smaller than their type.  Simply discard
+        // the unwanted upper bits in the field value.
+        APInt ValAsInt = ValC->getValue();
+        ValC = ConstantInt::get(Context, ValAsInt.trunc(FieldSizeInBits));
+      }
+      LayoutInfo.AddBitFieldToRecordConstant(ValC, GCCFieldOffsetInBits);
     }
+  }
 
-    // Next, if this doesn't touch the top bit, mask out any bits that shouldn't
-    // be set in the result.
-    uint64_t MaskVal = ((1ULL << BitsInVal)-1) << FirstBitInVal;
-    Constant *Mask = ConstantInt::get(Type::getInt64Ty(Context), MaskVal);
-    Mask = Builder.getFolder().CreateTruncOrBitCast(Mask, ValTy);
+  // Check to see if the struct fields, as laid out so far, will be large enough
+  // to make the generated constant struct have the right size.  If not, add
+  // explicit tail padding.  If rounding up based on the LLVM IR alignment would
+  // make the struct too large, convert it to a packed LLVM struct.
+  tree StructTypeSizeTree = TYPE_SIZE(TREE_TYPE(exp));
+  if (StructTypeSizeTree && TREE_CODE(StructTypeSizeTree) == INTEGER_CST)
+    LayoutInfo.HandleTailPadding(getInt64(StructTypeSizeTree, true));
 
-    if (FirstBitInVal+BitsInVal != ValSizeInBits)
-      NewVal = Builder.CreateAnd(NewVal, Mask);
+  // Okay, we're done, return the computed elements.
+  return ConstantStruct::get(Context, LayoutInfo.ResultElts,
+                             LayoutInfo.StructIsPacked);
+}
 
-    // Next, mask out the bits this bit-field should include from the old value.
-    Mask = Builder.getFolder().CreateNot(Mask);
-    OldVal = Builder.CreateAnd(OldVal, Mask);
+Constant *TreeConstantToLLVM::ConvertUnionCONSTRUCTOR(tree exp) {
+  assert(!VEC_empty(constructor_elt, CONSTRUCTOR_ELTS(exp))
+         && "Union CONSTRUCTOR has no elements? Zero?");
 
-    // Finally, merge the two together and store it.
-    NewVal = Builder.CreateOr(OldVal, NewVal);
+  VEC(constructor_elt, gc) *elt = CONSTRUCTOR_ELTS(exp);
+  assert(VEC_length(constructor_elt, elt) == 1
+         && "Union CONSTRUCTOR with multiple elements?");
 
-    StoreInst *SI = Builder.CreateStore(NewVal, Ptr, isVolatile);
-    SI->setAlignment(Alignment);
+  std::vector<Constant*> Elts;
+  // Convert the constant itself.
+  Elts.push_back(Convert(VEC_index(constructor_elt, elt, 0)->value));
 
-    if (I + 1 < Strides) {
-      Value *ShAmt = ConstantInt::get(ValTy, BitsInVal);
-      RHS = Builder.CreateLShr(RHS, ShAmt);
+  // If the union has a fixed size, and if the value we converted isn't large
+  // enough to fill all the bits, add a zero initialized array at the end to pad
+  // it out.
+  tree UnionType = TREE_TYPE(exp);
+  if (TYPE_SIZE(UnionType) && TREE_CODE(TYPE_SIZE(UnionType)) == INTEGER_CST) {
+    uint64_t UnionSize = ((uint64_t)TREE_INT_CST_LOW(TYPE_SIZE(UnionType))+7)/8;
+    uint64_t InitSize = getTargetData().getTypeAllocSize(Elts[0]->getType());
+    if (UnionSize != InitSize) {
+      const Type *FillTy;
+      assert(UnionSize > InitSize && "Init shouldn't be larger than union!");
+      if (UnionSize - InitSize == 1)
+        FillTy = Type::getInt8Ty(Context);
+      else
+        FillTy = ArrayType::get(Type::getInt8Ty(Context), UnionSize - InitSize);
+      Elts.push_back(Constant::getNullValue(FillTy));
     }
   }
+  return ConstantStruct::get(Context, Elts, false);
 }
 
-
 //===----------------------------------------------------------------------===//
-//                      ... Convert GIMPLE to LLVM ...
+//                  ... Constant Expressions L-Values ...
 //===----------------------------------------------------------------------===//
 
-void TreeToLLVM::RenderGIMPLE_ASM(gimple stmt) {
-  // Some of the GCC utilities we use still want lists and not gimple, so create
-  // input, output and clobber lists for their benefit.
-  unsigned NumOutputs = gimple_asm_noutputs (stmt);
-  tree outputs = NULL_TREE;
-  if (NumOutputs) {
-    tree t = outputs = gimple_asm_output_op (stmt, 0);
-    for (unsigned i = 1; i < NumOutputs; i++) {
-      TREE_CHAIN (t) = gimple_asm_output_op (stmt, i);
-      t = gimple_asm_output_op (stmt, i);
-    }
-  }
+Constant *TreeConstantToLLVM::EmitLV(tree exp) {
+  Constant *LV;
 
-  unsigned NumInputs = gimple_asm_ninputs(stmt);
-  tree inputs = NULL_TREE;
-  if (NumInputs) {
-    tree t = inputs = gimple_asm_input_op (stmt, 0);
-    for (unsigned i = 1; i < NumInputs; i++) {
-      TREE_CHAIN (t) = gimple_asm_input_op (stmt, i);
-      t = gimple_asm_input_op (stmt, i);
-    }
+  switch (TREE_CODE(exp)) {
+  default:
+    debug_tree(exp);
+    assert(0 && "Unknown constant lvalue to convert!");
+    abort();
+  case FUNCTION_DECL:
+  case CONST_DECL:
+  case VAR_DECL:
+    LV = EmitLV_Decl(exp);
+    break;
+  case LABEL_DECL:
+    LV = EmitLV_LABEL_DECL(exp);
+    break;
+  case COMPLEX_CST:
+    LV = EmitLV_COMPLEX_CST(exp);
+    break;
+  case STRING_CST:
+    LV = EmitLV_STRING_CST(exp);
+    break;
+  case COMPONENT_REF:
+    LV = EmitLV_COMPONENT_REF(exp);
+    break;
+  case ARRAY_RANGE_REF:
+  case ARRAY_REF:
+    LV = EmitLV_ARRAY_REF(exp);
+    break;
+  case INDIRECT_REF:
+    // The lvalue is just the address.
+    LV = Convert(TREE_OPERAND(exp, 0));
+    break;
+  case COMPOUND_LITERAL_EXPR: // FIXME: not gimple - defined by C front-end
+    /* This used to read
+       return EmitLV(COMPOUND_LITERAL_EXPR_DECL(exp));
+       but gcc warns about that and there doesn't seem to be any way to stop it
+       with casts or the like.  The following is equivalent with no checking
+       (since we know TREE_CODE(exp) is COMPOUND_LITERAL_EXPR the checking
+       doesn't accomplish anything anyway). */
+    LV = EmitLV(DECL_EXPR_DECL (TREE_OPERAND (exp, 0)));
+    break;
   }
 
-  unsigned NumClobbers = gimple_asm_nclobbers (stmt);
-  tree clobbers = NULL_TREE;
-  if (NumClobbers) {
-    tree t = clobbers = gimple_asm_clobber_op (stmt, 0);
-    for (unsigned i = 1; i < NumClobbers; i++) {
-      TREE_CHAIN (t) = gimple_asm_clobber_op (stmt, i);
-      t = gimple_asm_clobber_op (stmt, i);
-    }
-  }
+  // Check that the type of the lvalue is indeed that of a pointer to the tree
+  // node.  Since LLVM has no void* type, don't insist that void* be converted
+  // to a specific LLVM type.
+  assert((VOID_TYPE_P(TREE_TYPE(exp)) ||
+          LV->getType() == ConvertType(TREE_TYPE(exp))->getPointerTo()) &&
+         "LValue of constant has wrong type!");
 
-  // TODO: Understand what these labels are about, and handle them properly.
-  unsigned NumLabels = gimple_asm_nlabels (stmt);
-  tree labels = NULL_TREE;
-  if (NumLabels) {
-    tree t = labels = gimple_asm_label_op (stmt, 0);
-    for (unsigned i = 1; i < NumLabels; i++) {
-      TREE_CHAIN (t) = gimple_asm_label_op (stmt, i);
-      t = gimple_asm_label_op (stmt, i);
-    }
-  }
+  return LV;
+}
 
-  unsigned NumInOut = 0;
+Constant *TreeConstantToLLVM::EmitLV_Decl(tree exp) {
+  GlobalValue *Val = cast<GlobalValue>(DECL_LLVM(exp));
 
-  // Look for multiple alternative constraints: multiple alternatives separated
-  // by commas.
-  unsigned NumChoices = 0;    // sentinal; real value is always at least 1.
-  const char* p;
-  for (tree t = inputs; t; t = TREE_CHAIN(t)) {
-    unsigned NumInputChoices = 1;
-    for (p = TREE_STRING_POINTER(TREE_VALUE(TREE_PURPOSE(t))); *p; p++) {
-      if (*p == ',')
-        NumInputChoices++;
-    }
-    if (NumChoices==0)
-      NumChoices = NumInputChoices;
-    else if (NumChoices != NumInputChoices)
-      abort();      // invalid constraints
+  // Ensure variable marked as used even if it doesn't go through a parser.  If
+  // it hasn't been used yet, write out an external definition.
+  if (!TREE_USED(exp)) {
+    assemble_external(exp);
+    TREE_USED(exp) = 1;
+    Val = cast<GlobalValue>(DECL_LLVM(exp));
   }
-  for (tree t = outputs; t; t = TREE_CHAIN(t)) {
-    unsigned NumOutputChoices = 1;
-    for (p = TREE_STRING_POINTER(TREE_VALUE(TREE_PURPOSE(t))); *p; p++) {
-      if (*p == ',')
-        NumOutputChoices++;
+
+  // If this is an aggregate, emit it to LLVM now.  GCC happens to
+  // get this case right by forcing the initializer into memory.
+  if (TREE_CODE(exp) == CONST_DECL || TREE_CODE(exp) == VAR_DECL) {
+    if ((DECL_INITIAL(exp) || !TREE_PUBLIC(exp)) && !DECL_EXTERNAL(exp) &&
+        Val->isDeclaration() &&
+        !BOGUS_CTOR(exp)) {
+      emit_global_to_llvm(exp);
+      // Decl could have change if it changed type.
+      Val = cast<GlobalValue>(DECL_LLVM(exp));
     }
-    if (NumChoices==0)
-      NumChoices = NumOutputChoices;
-    else if (NumChoices != NumOutputChoices)
-      abort();      // invalid constraints
+  } else {
+    // Otherwise, inform cgraph that we used the global.
+    mark_decl_referenced(exp);
+    if (tree ID = DECL_ASSEMBLER_NAME(exp))
+      mark_referenced(ID);
   }
 
-  /// Constraints - The output/input constraints, concatenated together in array
-  /// form instead of list form.
-  const char **Constraints =
-    (const char **)alloca((NumOutputs + NumInputs) * sizeof(const char *));
-
-  // Process outputs.
-  int ValNum = 0;
-  for (tree Output = outputs; Output; Output = TREE_CHAIN(Output), ++ValNum) {
-    tree Operand = TREE_VALUE(Output);
-    tree type = TREE_TYPE(Operand);
-    // If there's an erroneous arg, emit no insn.
-    if (type == error_mark_node) return;
+  // The type of the global value output for exp need not match that of exp.
+  // For example if the global's initializer has a different type to the global
+  // itself (allowed in GCC but not in LLVM) then the global is changed to have
+  // the type of the initializer.  Correct for this now.
+  const Type *Ty = ConvertType(TREE_TYPE(exp));
+  if (Ty == Type::getVoidTy(Context)) Ty = Type::getInt8Ty(Context);  // void* -> i8*.
 
-    // Parse the output constraint.
-    const char *Constraint =
-      TREE_STRING_POINTER(TREE_VALUE(TREE_PURPOSE(Output)));
-    Constraints[ValNum] = Constraint;
-  }
-  // Process inputs.
-  for (tree Input = inputs; Input; Input = TREE_CHAIN(Input),++ValNum) {
-    tree Val = TREE_VALUE(Input);
-    tree type = TREE_TYPE(Val);
-    // If there's an erroneous arg, emit no insn.
-    if (type == error_mark_node) return;
+  return TheFolder->CreateBitCast(Val, Ty->getPointerTo());
+}
 
-    const char *Constraint =
-      TREE_STRING_POINTER(TREE_VALUE(TREE_PURPOSE(Input)));
-    Constraints[ValNum] = Constraint;
-  }
+/// EmitLV_LABEL_DECL - Someone took the address of a label.
+Constant *TreeConstantToLLVM::EmitLV_LABEL_DECL(tree exp) {
+  assert(TheTreeToLLVM &&
+         "taking the address of a label while not compiling the function!");
 
-  // If there are multiple constraint tuples, pick one.  Constraints is
-  // altered to point to shorter strings (which are malloc'ed), and everything
-  // below Just Works as in the NumChoices==1 case.
-  const char** ReplacementStrings = 0;
-  if (NumChoices>1) {
-    ReplacementStrings =
-      (const char **)alloca((NumOutputs + NumInputs) * sizeof(const char *));
-    ChooseConstraintTuple(Constraints, stmt, outputs, inputs, NumOutputs,
-                          NumInputs, NumChoices, ReplacementStrings);
+  // Figure out which function this is for, verify it's the one we're compiling.
+  if (DECL_CONTEXT(exp)) {
+    assert(TREE_CODE(DECL_CONTEXT(exp)) == FUNCTION_DECL &&
+           "Address of label in nested function?");
+    assert(TheTreeToLLVM->getFUNCTION_DECL() == DECL_CONTEXT(exp) &&
+           "Taking the address of a label that isn't in the current fn!?");
   }
 
-  std::vector<Value*> CallOps;
-  std::vector<const Type*> CallArgTypes;
-  std::string NewAsmStr = ConvertInlineAsmStr(stmt, outputs, inputs, labels,
-                                              NumOutputs+NumInputs);
-  std::string ConstraintStr;
-
-  // StoreCallResultAddr - The pointer to store the result of the call through.
-  SmallVector<Value *, 4> StoreCallResultAddrs;
-  SmallVector<const Type *, 4> CallResultTypes;
-  SmallVector<bool, 4> CallResultIsSigned;
-  SmallVector<tree, 4> CallResultSSANames;
-  SmallVector<Value *, 4> CallResultSSATemps;
+  BasicBlock *BB = TheTreeToLLVM->getLabelDeclBlock(exp);
+  Constant *C = TheTreeToLLVM->getIndirectGotoBlockNumber(BB);
+  return
+       TheFolder->CreateIntToPtr(C, PointerType::getUnqual(Type::getInt8Ty(Context)));
+}
 
-  // Process outputs.
-  ValNum = 0;
-  for (tree Output = outputs; Output; Output = TREE_CHAIN(Output), ++ValNum) {
-    tree Operand = TREE_VALUE(Output);
+Constant *TreeConstantToLLVM::EmitLV_COMPLEX_CST(tree exp) {
+  Constant *Init = TreeConstantToLLVM::ConvertCOMPLEX_CST(exp);
 
-    // Parse the output constraint.
-    const char *Constraint = Constraints[ValNum];
-    bool IsInOut, AllowsReg, AllowsMem;
-    if (!parse_output_constraint(&Constraint, ValNum, NumInputs, NumOutputs,
-                                 &AllowsMem, &AllowsReg, &IsInOut)) {
-      if (NumChoices>1)
-        FreeConstTupleStrings(ReplacementStrings, NumInputs+NumOutputs);
-      return;
-    }
-    assert(Constraint[0] == '=' && "Not an output constraint?");
+  // Cache the constants to avoid making obvious duplicates that have to be
+  // folded by the optimizer.
+  static std::map<Constant*, GlobalVariable*> ComplexCSTCache;
+  GlobalVariable *&Slot = ComplexCSTCache[Init];
+  if (Slot) return Slot;
 
-    // Output constraints must be addressable if they aren't simple register
-    // constraints (this emits "address of register var" errors, etc).
-    if (!AllowsReg && (AllowsMem || IsInOut))
-      mark_addressable(Operand);
+  // Create a new complex global.
+  Slot = new GlobalVariable(*TheModule, Init->getType(), true,
+                            GlobalVariable::PrivateLinkage, Init, ".cpx");
+  return Slot;
+}
 
-    // Count the number of "+" constraints.
-    if (IsInOut)
-      ++NumInOut, ++NumInputs;
+Constant *TreeConstantToLLVM::EmitLV_STRING_CST(tree exp) {
+  Constant *Init = TreeConstantToLLVM::ConvertSTRING_CST(exp);
 
-    std::string SimplifiedConstraint;
-    // If this output register is pinned to a machine register, use that machine
-    // register instead of the specified constraint.
-    if (TREE_CODE(Operand) == VAR_DECL && DECL_HARD_REGISTER(Operand)) {
-      const char* RegName = extractRegisterName(Operand);
-      int RegNum = decode_reg_name(RegName);
-      if (RegNum >= 0) {
-        RegName = getConstraintRegNameFromGccTables(RegName, RegNum);
-        unsigned RegNameLen = strlen(RegName);
-        char *NewConstraint = (char*)alloca(RegNameLen+4);
-        NewConstraint[0] = '=';
-        NewConstraint[1] = '{';
-        memcpy(NewConstraint+2, RegName, RegNameLen);
-        NewConstraint[RegNameLen+2] = '}';
-        NewConstraint[RegNameLen+3] = 0;
-        SimplifiedConstraint = NewConstraint;
-        // We should no longer consider mem constraints.
-        AllowsMem = false;
-      } else {
-        // If we can simplify the constraint into something else, do so now.
-        // This avoids LLVM having to know about all the (redundant) GCC
-        // constraints.
-        SimplifiedConstraint = CanonicalizeConstraint(Constraint+1);
-      }
-    } else {
-      SimplifiedConstraint = CanonicalizeConstraint(Constraint+1);
-    }
+  GlobalVariable **SlotP = 0;
 
-    LValue Dest;
-    const Type *DestValTy;
-    if (TREE_CODE(Operand) == SSA_NAME) {
-      // The ASM is defining an ssa name.  Store the output to a temporary, then
-      // load it out again later as the ssa name.
-      DestValTy = ConvertType(TREE_TYPE(Operand));
-      Dest.Ptr = CreateTemporary(DestValTy);
-      CallResultSSANames.push_back(Operand);
-      CallResultSSATemps.push_back(Dest.Ptr);
-    } else {
-      Dest = EmitLV(Operand);
-      DestValTy = cast<PointerType>(Dest.Ptr->getType())->getElementType();
-    }
+  // Cache the string constants to avoid making obvious duplicate strings that
+  // have to be folded by the optimizer.
+  static std::map<Constant*, GlobalVariable*> StringCSTCache;
+  GlobalVariable *&Slot = StringCSTCache[Init];
+  if (Slot) return Slot;
+  SlotP = &Slot;
 
-    assert(!Dest.isBitfield() && "Cannot assign into a bitfield!");
-    if (!AllowsMem && DestValTy->isSingleValueType()) {// Reg dest -> asm return
-      StoreCallResultAddrs.push_back(Dest.Ptr);
-      ConstraintStr += ",=";
-      ConstraintStr += SimplifiedConstraint;
-      CallResultTypes.push_back(DestValTy);
-      CallResultIsSigned.push_back(!TYPE_UNSIGNED(TREE_TYPE(Operand)));
-    } else {
-      ConstraintStr += ",=*";
-      ConstraintStr += SimplifiedConstraint;
-      CallOps.push_back(Dest.Ptr);
-      CallArgTypes.push_back(Dest.Ptr->getType());
-    }
-  }
+  // Create a new string global.
+  GlobalVariable *GV = new GlobalVariable(*TheModule, Init->getType(), true,
+                                          GlobalVariable::PrivateLinkage, Init,
+                                          ".str");
 
-  // Process inputs.
-  for (tree Input = inputs; Input; Input = TREE_CHAIN(Input),++ValNum) {
-    tree Val = TREE_VALUE(Input);
-    tree type = TREE_TYPE(Val);
+  GV->setAlignment(TYPE_ALIGN(TREE_TYPE(exp)) / 8);
 
-    const char *Constraint = Constraints[ValNum];
+  if (SlotP) *SlotP = GV;
+  return GV;
+}
 
-    bool AllowsReg, AllowsMem;
-    if (!parse_input_constraint(Constraints+ValNum, ValNum-NumOutputs,
-                                NumInputs, NumOutputs, NumInOut,
-                                Constraints, &AllowsMem, &AllowsReg)) {
-      if (NumChoices>1)
-        FreeConstTupleStrings(ReplacementStrings, NumInputs+NumOutputs);
-      return;
-    }
-    bool isIndirect = false;
-    if (AllowsReg || !AllowsMem) {    // Register operand.
-      const Type *LLVMTy = ConvertType(type);
+Constant *TreeConstantToLLVM::EmitLV_ARRAY_REF(tree exp) {
+  tree Array = TREE_OPERAND(exp, 0);
+  tree Index = TREE_OPERAND(exp, 1);
+  tree IndexType = TREE_TYPE(Index);
+  assert(TREE_CODE(TREE_TYPE(Array)) == ARRAY_TYPE && "Unknown ARRAY_REF!");
 
-      Value *Op = 0;
-      if (LLVMTy->isSingleValueType()) {
-        if (TREE_CODE(Val)==ADDR_EXPR &&
-            TREE_CODE(TREE_OPERAND(Val,0))==LABEL_DECL) {
-          // Emit the label, but do not assume it is going to be the target
-          // of an indirect branch.  Having this logic here is a hack; there
-          // should be a bit in the label identifying it as in an asm.
-          Op = getLabelDeclBlock(TREE_OPERAND(Val, 0));
-        } else
-          Op = Emit(Val, 0);
-      } else {
-        LValue LV = EmitLV(Val);
-        assert(!LV.isBitfield() && "Inline asm can't have bitfield operand");
+  // Check for variable sized reference.
+  // FIXME: add support for array types where the size doesn't fit into 64 bits
+  assert(isSequentialCompatible(TREE_TYPE(Array)) &&
+         "Global with variable size?");
 
-        // Structs and unions are permitted here, as long as they're the
-        // same size as a register.
-        uint64_t TySize = TD.getTypeSizeInBits(LLVMTy);
-        if (TySize == 1 || TySize == 8 || TySize == 16 ||
-            TySize == 32 || TySize == 64) {
-          LLVMTy = IntegerType::get(Context, TySize);
-          Op = Builder.CreateLoad(Builder.CreateBitCast(LV.Ptr,
-                                               PointerType::getUnqual(LLVMTy)));
-        } else {
-          // Otherwise, emit our value as a lvalue and let the codegen deal with
-          // it.
-          isIndirect = true;
-          Op = LV.Ptr;
-        }
-      }
+  Constant *ArrayAddr;
 
-      const Type *OpTy = Op->getType();
-      // If this input operand is matching an output operand, e.g. '0', check if
-      // this is something that llvm supports. If the operand types are
-      // different, then emit an error if 1) one of the types is not integer or
-      // pointer, 2) if size of input type is larger than the output type. If
-      // the size of the integer input size is smaller than the integer output
-      // type, then cast it to the larger type and shift the value if the target
-      // is big endian.
-      if (ISDIGIT(Constraint[0])) {
-        unsigned Match = atoi(Constraint);
-        const Type *OTy = (Match < CallResultTypes.size())
-          ? CallResultTypes[Match] : 0;
-        if (OTy && OTy != OpTy) {
-          if (!(isa<IntegerType>(OTy) || isa<PointerType>(OTy)) ||
-              !(isa<IntegerType>(OpTy) || isa<PointerType>(OpTy))) {
-            error_at(gimple_location(stmt),
-                     "unsupported inline asm: input constraint with a matching "
-                     "output constraint of incompatible type!");
-            if (NumChoices>1)
-              FreeConstTupleStrings(ReplacementStrings, NumInputs+NumOutputs);
-            return;
-          }
-          unsigned OTyBits = TD.getTypeSizeInBits(OTy);
-          unsigned OpTyBits = TD.getTypeSizeInBits(OpTy);
-          if (OTyBits == 0 || OpTyBits == 0 || OTyBits < OpTyBits) {
-            // It's tempting to implement the OTyBits < OpTyBits case by truncating
-            // Op down to OTy, however that breaks in the case of an inline asm
-            // constraint that corresponds to a single register, because the
-            // user can write code that assumes the whole register is defined,
-            // despite the output operand being only a subset of the register. For
-            // example:
-            //
-            //   asm ("sarl $10, %%eax" : "=a"(c) : "0"(1000000));
-            //
-            // The expected behavior is for %eax to be fully defined with the value
-            // 1000000 immediately before the asm.
-            error_at(gimple_location(stmt),
-                     "unsupported inline asm: input constraint with a matching "
-                     "output constraint of incompatible type!");
-            return;
-          } else if (OTyBits > OpTyBits) {
-            Op = CastToAnyType(Op, !TYPE_UNSIGNED(type),
-                               OTy, CallResultIsSigned[Match]);
-            if (BYTES_BIG_ENDIAN) {
-              Constant *ShAmt = ConstantInt::get(Op->getType(),
-                                                 OTyBits-OpTyBits);
-              Op = Builder.CreateLShr(Op, ShAmt);
-            }
-            OpTy = Op->getType();
-          }
-        }
-      }
+  // First subtract the lower bound, if any, in the type of the index.
+  Constant *IndexVal = Convert(Index);
+  tree LowerBound = array_ref_low_bound(exp);
+  if (!integer_zerop(LowerBound))
+    IndexVal = TYPE_UNSIGNED(TREE_TYPE(Index)) ?
+      TheFolder->CreateSub(IndexVal, Convert(LowerBound)) :
+      TheFolder->CreateNSWSub(IndexVal, Convert(LowerBound));
 
-      CallOps.push_back(Op);
-      CallArgTypes.push_back(OpTy);
-    } else {                          // Memory operand.
-      mark_addressable(TREE_VALUE(Input));
-      isIndirect = true;
-      LValue Src = EmitLV(Val);
-      assert(!Src.isBitfield() && "Cannot read from a bitfield!");
-      CallOps.push_back(Src.Ptr);
-      CallArgTypes.push_back(Src.Ptr->getType());
-    }
+  ArrayAddr = EmitLV(Array);
 
-    ConstraintStr += ',';
-    if (isIndirect)
-      ConstraintStr += '*';
+  const Type *IntPtrTy = getTargetData().getIntPtrType(Context);
+  if (IndexVal->getType() != IntPtrTy)
+    IndexVal = TheFolder->CreateIntCast(IndexVal, IntPtrTy,
+                                        !TYPE_UNSIGNED(IndexType));
 
-    // If this output register is pinned to a machine register, use that machine
-    // register instead of the specified constraint.
-    if (TREE_CODE(Val) == VAR_DECL && DECL_HARD_REGISTER(Val)) {
-      const char *RegName = extractRegisterName(Val);
-      int RegNum = decode_reg_name(RegName);
-      if (RegNum >= 0) {
-        RegName = getConstraintRegNameFromGccTables(RegName, RegNum);
-        ConstraintStr += '{';
-        ConstraintStr += RegName;
-        ConstraintStr += '}';
-        continue;
-      }
-    }
+  Value *Idx[2];
+  Idx[0] = ConstantInt::get(IntPtrTy, 0);
+  Idx[1] = IndexVal;
 
-    // If there is a simpler form for the register constraint, use it.
-    std::string Simplified = CanonicalizeConstraint(Constraint);
-    ConstraintStr += Simplified;
-  }
+  return TheFolder->CreateGetElementPtr(ArrayAddr, Idx, 2);
+}
 
-  // Process clobbers.
+Constant *TreeConstantToLLVM::EmitLV_COMPONENT_REF(tree exp) {
+  Constant *StructAddrLV = EmitLV(TREE_OPERAND(exp, 0));
 
-  // Some targets automatically clobber registers across an asm.
-  tree Clobbers = targetm.md_asm_clobbers(outputs, inputs, clobbers);
-  for (; Clobbers; Clobbers = TREE_CHAIN(Clobbers)) {
-    const char *RegName = TREE_STRING_POINTER(TREE_VALUE(Clobbers));
-    int RegCode = decode_reg_name(RegName);
+  // Ensure that the struct type has been converted, so that the fielddecls
+  // are laid out.
+  const Type *StructTy = ConvertType(TREE_TYPE(TREE_OPERAND(exp, 0)));
 
-    switch (RegCode) {
-    case -1:     // Nothing specified?
-    case -2:     // Invalid.
-      error_at(gimple_location(stmt), "unknown register name %qs in %<asm%>",
-               RegName);
-      if (NumChoices>1)
-        FreeConstTupleStrings(ReplacementStrings, NumInputs+NumOutputs);
-      return;
-    case -3:     // cc
-      ConstraintStr += ",~{cc}";
-      break;
-    case -4:     // memory
-      ConstraintStr += ",~{memory}";
-      break;
-    default:     // Normal register name.
-      RegName = getConstraintRegNameFromGccTables(RegName, RegCode);
-      ConstraintStr += ",~{";
-      ConstraintStr += RegName;
-      ConstraintStr += "}";
-      break;
-    }
-  }
+  tree FieldDecl = TREE_OPERAND(exp, 1);
 
-  const Type *CallResultType;
-  switch (CallResultTypes.size()) {
-  case 0: CallResultType = Type::getVoidTy(Context); break;
-  case 1: CallResultType = CallResultTypes[0]; break;
-  default:
-    std::vector<const Type*> TmpVec(CallResultTypes.begin(),
-                                    CallResultTypes.end());
-    CallResultType = StructType::get(Context, TmpVec);
-    break;
-  }
+  StructAddrLV = TheFolder->CreateBitCast(StructAddrLV,
+                                          PointerType::getUnqual(StructTy));
+  const Type *FieldTy = ConvertType(getDeclaredType(FieldDecl));
 
-  const FunctionType *FTy =
-    FunctionType::get(CallResultType, CallArgTypes, false);
+  // BitStart - This is the actual offset of the field from the start of the
+  // struct, in bits.  For bitfields this may be on a non-byte boundary.
+  unsigned BitStart = getComponentRefOffsetInBits(exp);
+  Constant *FieldPtr;
+  const TargetData &TD = getTargetData();
 
-  // Remove the leading comma if we have operands.
-  if (!ConstraintStr.empty())
-    ConstraintStr.erase(ConstraintStr.begin());
+  tree field_offset = component_ref_field_offset (exp);
+  // If this is a normal field at a fixed offset from the start, handle it.
+  if (TREE_CODE(field_offset) == INTEGER_CST) {
+    unsigned int MemberIndex = GetFieldIndex(FieldDecl);
 
-  // Make sure we're created a valid inline asm expression.
-  if (!InlineAsm::Verify(FTy, ConstraintStr)) {
-    error_at(gimple_location(stmt), "Invalid or unsupported inline assembly!");
-    if (NumChoices>1)
-      FreeConstTupleStrings(ReplacementStrings, NumInputs+NumOutputs);
-    return;
-  }
+    Constant *Ops[] = {
+      StructAddrLV,
+      Constant::getNullValue(Type::getInt32Ty(Context)),
+      ConstantInt::get(Type::getInt32Ty(Context), MemberIndex)
+    };
+    FieldPtr = TheFolder->CreateGetElementPtr(StructAddrLV, Ops+1, 2);
 
-  Value *Asm = InlineAsm::get(FTy, NewAsmStr, ConstraintStr,
-                              gimple_asm_volatile_p(stmt) || !outputs);
-  CallInst *CV = Builder.CreateCall(Asm, CallOps.begin(), CallOps.end(),
-                                    CallResultTypes.empty() ? "" : "asmtmp");
-  CV->setDoesNotThrow();
+    FieldPtr = ConstantFoldInstOperands(Instruction::GetElementPtr,
+                                        FieldPtr->getType(), Ops,
+                                        3, Context, &TD);
 
-  // If the call produces a value, store it into the destination.
-  if (StoreCallResultAddrs.size() == 1)
-    Builder.CreateStore(CV, StoreCallResultAddrs[0]);
-  else if (unsigned NumResults = StoreCallResultAddrs.size()) {
-    for (unsigned i = 0; i != NumResults; ++i) {
-      Value *ValI = Builder.CreateExtractValue(CV, i, "asmresult");
-      Builder.CreateStore(ValI, StoreCallResultAddrs[i]);
+    // Now that we did an offset from the start of the struct, subtract off
+    // the offset from BitStart.
+    if (MemberIndex) {
+      const StructLayout *SL = TD.getStructLayout(cast<StructType>(StructTy));
+      BitStart -= SL->getElementOffset(MemberIndex) * 8;
     }
+
+  } else {
+    Constant *Offset = Convert(field_offset);
+    Constant *Ptr = TheFolder->CreatePtrToInt(StructAddrLV, Offset->getType());
+    Ptr = TheFolder->CreateAdd(Ptr, Offset);
+    FieldPtr = TheFolder->CreateIntToPtr(Ptr,
+                                         PointerType::getUnqual(FieldTy));
   }
 
-  // If the call defined any ssa names, associate them with their value.
-  for (unsigned i = 0, e = CallResultSSANames.size(); i != e; ++i)
-    SSANames[CallResultSSANames[i]] = Builder.CreateLoad(CallResultSSATemps[i]);
-
-  // Give the backend a chance to upgrade the inline asm to LLVM code.  This
-  // handles some common cases that LLVM has intrinsics for, e.g. x86 bswap ->
-  // llvm.bswap.
-  if (const TargetLowering *TLI = TheTarget->getTargetLowering())
-    TLI->ExpandInlineAsm(CV);
+  // Make sure we return a result of the right type.
+  if (PointerType::getUnqual(FieldTy) != FieldPtr->getType())
+    FieldPtr = TheFolder->CreateBitCast(FieldPtr,
+                                        PointerType::getUnqual(FieldTy));
 
-  if (NumChoices>1)
-    FreeConstTupleStrings(ReplacementStrings, NumInputs+NumOutputs);
+  assert(BitStart == 0 &&
+         "It's a bitfield reference or we didn't get to the field!");
+  return FieldPtr;
 }
 
-void TreeToLLVM::RenderGIMPLE_ASSIGN(gimple stmt) {
-  tree lhs = gimple_assign_lhs(stmt);
-  if (AGGREGATE_TYPE_P(TREE_TYPE(lhs))) {
-    LValue LV = EmitLV(lhs);
-    MemRef NewLoc(LV.Ptr, LV.getAlignment(), TREE_THIS_VOLATILE(lhs));
-    // TODO: This case can presumably only happen with special gimple
-    // assign right-hand-sides.  Try to simplify by exploiting this.
-    EmitGimpleAssignRHS(stmt, &NewLoc);
+//===----------------------------------------------------------------------===//
+//                    ... GIMPLE conversion helpers ...
+//===----------------------------------------------------------------------===//
+
+/// WriteScalarToLHS - Store RHS, a non-aggregate value, into the given LHS.
+void TreeToLLVM::WriteScalarToLHS(tree lhs, Value *RHS) {
+  // Perform a useless type conversion (useless_type_conversion_p).
+  RHS = Builder.CreateBitCast(RHS, ConvertType(TREE_TYPE(lhs)));
+
+  // If this is the definition of an ssa name, record it in the SSANames map.
+  if (TREE_CODE(lhs) == SSA_NAME) {
+    assert(SSANames.find(lhs) == SSANames.end() &&"Multiply defined SSA name!");
+    SSANames[lhs] = RHS;
     return;
   }
-  WriteScalarToLHS(lhs, EmitGimpleAssignRHS(stmt, 0));
-}
 
-void TreeToLLVM::RenderGIMPLE_CALL(gimple stmt) {
-  tree lhs = gimple_call_lhs(stmt);
-  if (!lhs) {
-    // The returned value is not used.
-    if (!AGGREGATE_TYPE_P(gimple_call_return_type(stmt))) {
-      EmitGimpleCallRHS(stmt, 0);
-      return;
-    }
-    // Create a temporary to hold the returned value.
-    // TODO: Figure out how to avoid creating this temporary and the
-    // associated useless code that stores the returned value into it.
-    MemRef Loc = CreateTempLoc(ConvertType(gimple_call_return_type(stmt)));
-    EmitGimpleCallRHS(stmt, &Loc);
+  if (canEmitRegisterVariable(lhs)) {
+    // If this is a store to a register variable, EmitLV can't handle the dest
+    // (there is no l-value of a register variable).  Emit an inline asm node
+    // that copies the value into the specified register.
+    EmitModifyOfRegisterVariable(lhs, RHS);
     return;
   }
 
-  if (AGGREGATE_TYPE_P(TREE_TYPE(lhs))) {
-    LValue LV = EmitLV(lhs);
-    MemRef NewLoc(LV.Ptr, LV.getAlignment(), TREE_THIS_VOLATILE(lhs));
-    EmitGimpleCallRHS(stmt, &NewLoc);
+  LValue LV = EmitLV(lhs);
+  bool isVolatile = TREE_THIS_VOLATILE(lhs);
+  unsigned Alignment = LV.getAlignment();
+
+  if (!LV.isBitfield()) {
+    // Non-bitfield, scalar value.  Just emit a store.
+    StoreInst *SI = Builder.CreateStore(RHS, LV.Ptr, isVolatile);
+    SI->setAlignment(Alignment);
     return;
   }
-  WriteScalarToLHS(lhs, EmitGimpleCallRHS(stmt, 0));
-}
 
-void TreeToLLVM::RenderGIMPLE_COND(gimple stmt) {
-  // Emit the comparison.
-  Value *Cond = EmitCompare(gimple_cond_lhs(stmt), gimple_cond_rhs(stmt),
-                            gimple_cond_code(stmt));
+  // Last case, this is a store to a bitfield, so we have to emit a
+  // read/modify/write sequence.
 
-  // Extract the target basic blocks.
-  edge true_edge, false_edge;
-  extract_true_false_edges_from_block(gimple_bb(stmt), &true_edge, &false_edge);
-  BasicBlock *IfTrue = getBasicBlock(true_edge->dest);
-  BasicBlock *IfFalse = getBasicBlock(false_edge->dest);
+  if (!LV.BitSize)
+    return;
 
-  // Branch based on the condition.
-  Builder.CreateCondBr(Cond, IfTrue, IfFalse);
-}
+  const Type *ValTy = cast<PointerType>(LV.Ptr->getType())->getElementType();
+  unsigned ValSizeInBits = ValTy->getPrimitiveSizeInBits();
 
-void TreeToLLVM::RenderGIMPLE_GOTO(gimple stmt) {
-  tree dest = gimple_goto_dest(stmt);
+  // The number of stores needed to write the entire bitfield.
+  unsigned Strides = 1 + (LV.BitStart + LV.BitSize - 1) / ValSizeInBits;
 
-  if (TREE_CODE(dest) == LABEL_DECL) {
-    // Direct branch.
-    Builder.CreateBr(getLabelDeclBlock(dest));
-    return;
-  }
+  assert(ValTy->isInteger() && "Invalid bitfield lvalue!");
+  assert(ValSizeInBits > LV.BitStart && "Bad bitfield lvalue!");
+  assert(ValSizeInBits >= LV.BitSize && "Bad bitfield lvalue!");
+  assert(2*ValSizeInBits > LV.BitSize+LV.BitStart && "Bad bitfield lvalue!");
 
-  // Otherwise we have an indirect goto.
-  BasicBlock *DestBB = getIndirectGotoBlock();
+  bool Signed = !TYPE_UNSIGNED(TREE_TYPE(lhs));
+  RHS = CastToAnyType(RHS, Signed, ValTy, Signed);
 
-  // Store the destination block to the GotoValue alloca.
-  Value *V = Builder.CreatePtrToInt(Emit(dest, 0), TD.getIntPtrType(Context));
-  Builder.CreateStore(V, IndirectGotoValue);
+  for (unsigned I = 0; I < Strides; I++) {
+    unsigned Index = BYTES_BIG_ENDIAN ? Strides - I - 1 : I; // LSB first
+    unsigned ThisFirstBit = Index * ValSizeInBits;
+    unsigned ThisLastBitPlusOne = ThisFirstBit + ValSizeInBits;
+    if (ThisFirstBit < LV.BitStart)
+      ThisFirstBit = LV.BitStart;
+    if (ThisLastBitPlusOne > LV.BitStart+LV.BitSize)
+      ThisLastBitPlusOne = LV.BitStart+LV.BitSize;
 
-  // FIXME: This is HORRIBLY INCORRECT in the presence of exception handlers.
-  // There should be one collector block per cleanup level!
-  Builder.CreateBr(DestBB);
-}
+    Value *Ptr = Index ?
+      Builder.CreateGEP(LV.Ptr, ConstantInt::get(Type::getInt32Ty(Context), Index)) :
+      LV.Ptr;
+    LoadInst *LI = Builder.CreateLoad(Ptr, isVolatile);
+    LI->setAlignment(Alignment);
+    Value *OldVal = LI;
+    Value *NewVal = RHS;
 
-void TreeToLLVM::RenderGIMPLE_RESX(gimple stmt) {
-abort();
-//FIXME  int RegionNo = gimple_resx_region(stmt);
-//FIXME  std::vector<eh_region> Handlers;
-//FIXME
-//FIXME  foreach_reachable_handler(RegionNo, true, false, AddHandler, &Handlers);
-//FIXME
-//FIXME  if (!Handlers.empty()) {
-//FIXME    for (std::vector<eh_region>::iterator I = Handlers.begin(),
-//FIXME         E = Handlers.end(); I != E; ++I)
-//FIXME      // Create a post landing pad for the handler.
-//FIXME      getPostPad(get_eh_region_number(*I));
-//FIXME
-//FIXME    Builder.CreateBr(getPostPad(get_eh_region_number(*Handlers.begin())));
-//FIXME  } else {
-//FIXME    assert(can_throw_external_1(RegionNo, true, false) &&
-//FIXME           "Must-not-throw region handled by runtime?");
-//FIXME    // Unwinding continues in the caller.
-//FIXME    if (!UnwindBB)
-//FIXME      UnwindBB = BasicBlock::Create(Context, "Unwind");
-//FIXME    Builder.CreateBr(UnwindBB);
-//FIXME  }
-}
+    unsigned BitsInVal = ThisLastBitPlusOne - ThisFirstBit;
+    unsigned FirstBitInVal = ThisFirstBit % ValSizeInBits;
 
-void TreeToLLVM::RenderGIMPLE_RETURN(gimple stmt) {
-  tree retval = gimple_return_retval(stmt);
-  tree result = DECL_RESULT(current_function_decl);
+    if (BYTES_BIG_ENDIAN)
+      FirstBitInVal = ValSizeInBits-FirstBitInVal-BitsInVal;
 
-  if (retval && retval != error_mark_node && retval != result) {
-    // Store the return value to the function's DECL_RESULT.
-    if (AGGREGATE_TYPE_P(TREE_TYPE(result))) {
-      MemRef DestLoc(DECL_LOCAL(result), 1, false); // FIXME: What alignment?
-      Emit(retval, &DestLoc);
-    } else {
-      Value *Val = Builder.CreateBitCast(Emit(retval, 0),
-                                         ConvertType(TREE_TYPE(result)));
-      Builder.CreateStore(Val, DECL_LOCAL(result));
+    // If not storing into the zero'th bit, shift the Src value to the left.
+    if (FirstBitInVal) {
+      Value *ShAmt = ConstantInt::get(ValTy, FirstBitInVal);
+      NewVal = Builder.CreateShl(NewVal, ShAmt);
     }
-  }
-
-  // Emit a branch to the exit label.
-  Builder.CreateBr(ReturnBB);
-}
-
-void TreeToLLVM::RenderGIMPLE_SWITCH(gimple stmt) {
-  // Emit the condition.
-  Value *Index = Emit(gimple_switch_index(stmt), 0);
-  bool IndexIsSigned = !TYPE_UNSIGNED(TREE_TYPE(gimple_switch_index(stmt)));
 
-  // Create the switch instruction.
-  tree default_label = CASE_LABEL(gimple_switch_label(stmt, 0));
-  SwitchInst *SI = Builder.CreateSwitch(Index, getLabelDeclBlock(default_label),
-                                        gimple_switch_num_labels(stmt));
+    // Next, if this doesn't touch the top bit, mask out any bits that shouldn't
+    // be set in the result.
+    uint64_t MaskVal = ((1ULL << BitsInVal)-1) << FirstBitInVal;
+    Constant *Mask = ConstantInt::get(Type::getInt64Ty(Context), MaskVal);
+    Mask = Builder.getFolder().CreateTruncOrBitCast(Mask, ValTy);
 
-  // Add the switch cases.
-  BasicBlock *IfBlock = 0; // Set if a range was output as an "if".
-  for (size_t i = 1, e = gimple_switch_num_labels(stmt); i != e; ++i) {
-    tree label = gimple_switch_label(stmt, i);
-    BasicBlock *Dest = getLabelDeclBlock(CASE_LABEL(label));
+    if (FirstBitInVal+BitsInVal != ValSizeInBits)
+      NewVal = Builder.CreateAnd(NewVal, Mask);
 
-    // Convert the integer to the right type.
-    Value *Val = Emit(CASE_LOW(label), 0);
-    Val = CastToAnyType(Val, !TYPE_UNSIGNED(TREE_TYPE(CASE_LOW(label))),
-                        Index->getType(), IndexIsSigned);
-    ConstantInt *LowC = cast<ConstantInt>(Val);
+    // Next, mask out the bits this bit-field should include from the old value.
+    Mask = Builder.getFolder().CreateNot(Mask);
+    OldVal = Builder.CreateAnd(OldVal, Mask);
 
-    if (!CASE_HIGH(label)) {
-      SI->addCase(LowC, Dest); // Single destination.
-      continue;
-    }
+    // Finally, merge the two together and store it.
+    NewVal = Builder.CreateOr(OldVal, NewVal);
 
-    // Otherwise, we have a range, like 'case 1 ... 17'.
-    Val = Emit(CASE_HIGH(label), 0);
-    // Make sure the case value is the same type as the switch expression
-    Val = CastToAnyType(Val, !TYPE_UNSIGNED(TREE_TYPE(CASE_HIGH(label))),
-                        Index->getType(), IndexIsSigned);
-    ConstantInt *HighC = cast<ConstantInt>(Val);
+    StoreInst *SI = Builder.CreateStore(NewVal, Ptr, isVolatile);
+    SI->setAlignment(Alignment);
 
-    APInt Range = HighC->getValue() - LowC->getValue();
-    if (Range.ult(APInt(Range.getBitWidth(), 64))) {
-      // Add all of the necessary successors to the switch.
-      APInt CurrentValue = LowC->getValue();
-      while (1) {
-        SI->addCase(LowC, Dest);
-        if (LowC == HighC) break;  // Emitted the last one.
-        CurrentValue++;
-        LowC = ConstantInt::get(Context, CurrentValue);
-      }
-    } else {
-      // The range is too big to add to the switch - emit an "if".
-      if (!IfBlock) {
-        IfBlock = BasicBlock::Create(Context);
-        EmitBlock(IfBlock);
-      }
-      Value *Diff = Builder.CreateSub(Index, LowC);
-      Value *Cond = Builder.CreateICmpULE(Diff,
-                                          ConstantInt::get(Context, Range));
-      BasicBlock *False_Block = BasicBlock::Create(Context);
-      Builder.CreateCondBr(Cond, Dest, False_Block);
-      EmitBlock(False_Block);
+    if (I + 1 < Strides) {
+      Value *ShAmt = ConstantInt::get(ValTy, BitsInVal);
+      RHS = Builder.CreateLShr(RHS, ShAmt);
     }
   }
-
-  if (IfBlock) {
-    Builder.CreateBr(SI->getDefaultDest());
-    SI->setSuccessor(0, IfBlock);
-  }
 }





More information about the llvm-commits mailing list