[llvm-commits] [llvm-gcc-4.2] r135372 - in /llvm-gcc-4.2/trunk/gcc: config/arm/llvm-arm.cpp config/i386/llvm-i386.cpp config/rs6000/llvm-rs6000.cpp llvm-backend.cpp llvm-convert.cpp llvm-internal.h llvm-types.cpp

Chris Lattner sabre at nondot.org
Sun Jul 17 21:44:09 PDT 2011


Author: lattner
Date: Sun Jul 17 23:44:09 2011
New Revision: 135372

URL: http://llvm.org/viewvc/llvm-project?rev=135372&view=rev
Log:
spend more of my life updating the dead compiler, this time
deconstifying types.

Modified:
    llvm-gcc-4.2/trunk/gcc/config/arm/llvm-arm.cpp
    llvm-gcc-4.2/trunk/gcc/config/i386/llvm-i386.cpp
    llvm-gcc-4.2/trunk/gcc/config/rs6000/llvm-rs6000.cpp
    llvm-gcc-4.2/trunk/gcc/llvm-backend.cpp
    llvm-gcc-4.2/trunk/gcc/llvm-convert.cpp
    llvm-gcc-4.2/trunk/gcc/llvm-internal.h
    llvm-gcc-4.2/trunk/gcc/llvm-types.cpp

Modified: llvm-gcc-4.2/trunk/gcc/config/arm/llvm-arm.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/config/arm/llvm-arm.cpp?rev=135372&r1=135371&r2=135372&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/config/arm/llvm-arm.cpp (original)
+++ llvm-gcc-4.2/trunk/gcc/config/arm/llvm-arm.cpp Sun Jul 17 23:44:09 2011
@@ -133,12 +133,12 @@
 
 /// BuildDup - Build a splat operation to duplicate a value into every
 /// element of a vector.
-static Value *BuildDup(const Type *ResultType, Value *Val,
+static Value *BuildDup(Type *ResultType, Value *Val,
                        LLVMBuilder &Builder) {
   // GCC may promote the scalar argument; cast it back.
-  const VectorType *VTy = dyn_cast<const VectorType>(ResultType);
+  VectorType *VTy = dyn_cast<VectorType>(ResultType);
   assert(VTy && "expected a vector type");
-  const Type *ElTy = VTy->getElementType();
+  Type *ElTy = VTy->getElementType();
   if (Val->getType() != ElTy) {
     assert(!ElTy->isFloatingPointTy() &&
            "only integer types expected to be promoted");
@@ -1681,7 +1681,7 @@
     // GCC may promote the scalar argument; cast it back.
     const VectorType *VTy = dyn_cast<const VectorType>(Ops[1]->getType());
     assert(VTy && "expected a vector type for vset_lane vector operand");
-    const Type *ElTy = VTy->getElementType();
+    Type *ElTy = VTy->getElementType();
     if (Ops[0]->getType() != ElTy) {
       assert(!ElTy->isFloatingPointTy() &&
              "only integer types expected to be promoted");
@@ -1721,7 +1721,7 @@
 
   case NEON_BUILTIN_vget_high:
   case NEON_BUILTIN_vget_low: {
-    const Type *v2f64Ty = VectorType::get(Type::getDoubleTy(Context), 2);
+    Type *v2f64Ty = VectorType::get(Type::getDoubleTy(Context), 2);
     unsigned Idx = (neon_code == NEON_BUILTIN_vget_low ? 0 : 1);
     Result = Builder.CreateBitCast(Ops[0], v2f64Ty);
     Result = Builder.CreateExtractElement(Result, getInt32Const(Idx));
@@ -1800,7 +1800,7 @@
     }
     const VectorType *VTy = dyn_cast<const VectorType>(ResultType);
     assert(VTy && "expected a vector type");
-    const Type *ElTy = VTy->getElementType();
+    Type *ElTy = VTy->getElementType();
     unsigned ChunkElts = ChunkBits / ElTy->getPrimitiveSizeInBits();
 
     // Translate to a vector shuffle.
@@ -2521,7 +2521,7 @@
 // Walk over an LLVM Type that we know is a homogeneous aggregate and
 // push the proper LLVM Types that represent the register types to pass
 // that struct member in.
-static void push_elts(const Type *Ty, std::vector<Type*> &Elts)
+static void push_elts(Type *Ty, std::vector<Type*> &Elts)
 {
   for (Type::subtype_iterator I = Ty->subtype_begin(), E = Ty->subtype_end();
        I != E; ++I) {
@@ -2553,7 +2553,7 @@
 static unsigned count_num_words(std::vector<Type*> &ScalarElts) {
   unsigned NumWords = 0;
   for (unsigned i = 0, e = ScalarElts.size(); i != e; ++i) {
-    const Type *Ty = ScalarElts[i];
+    Type *Ty = ScalarElts[i];
     if (Ty->isPointerTy()) {
       NumWords++;
     } else if (Ty->isIntegerTy()) {
@@ -2585,7 +2585,7 @@
 
   if (TARGET_HARD_FLOAT_ABI)
     return false;
-  const Type *Ty = ConvertType(type);
+  Type *Ty = ConvertType(type);
   if (Ty->isPointerTy())
     return false;
 
@@ -2689,7 +2689,7 @@
 static bool count_num_registers_uses(std::vector<Type*> &ScalarElts,
                                      bool *SPRs) {
   for (unsigned i = 0, e = ScalarElts.size(); i != e; ++i) {
-    const Type *Ty = ScalarElts[i];
+    Type *Ty = ScalarElts[i];
     if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
       switch (VTy->getBitWidth())
       {
@@ -2764,7 +2764,7 @@
   // Walk Ty and push LLVM types corresponding to register types onto
   // Elts.
   std::vector<Type*> Elts;
-  const Type *Ty = ConvertType(TreeType);
+  Type *Ty = ConvertType(TreeType);
   push_elts(Ty, Elts);
 
   return StructType::get(Context, Elts, false);
@@ -2817,7 +2817,7 @@
 
   while (SNO < NumElements) {
 
-    const Type *DestElemType = DestTy->getElementType(DNO);
+    Type *DestElemType = DestTy->getElementType(DNO);
 
     // Directly access first class values.
     if (DestElemType->isSingleValueType()) {

Modified: llvm-gcc-4.2/trunk/gcc/config/i386/llvm-i386.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/config/i386/llvm-i386.cpp?rev=135372&r1=135371&r2=135372&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/config/i386/llvm-i386.cpp (original)
+++ llvm-gcc-4.2/trunk/gcc/config/i386/llvm-i386.cpp Sun Jul 17 23:44:09 2011
@@ -44,7 +44,7 @@
  * type.
  */
 static Value *ConvertToX86_MMXTy(Value *Val, LLVMBuilder &Builder) {
-  static const Type *MMXTy = Type::getX86_MMXTy(Context);
+  static Type *MMXTy = Type::getX86_MMXTy(Context);
   if (Val->getType() == MMXTy) return Val;
   return Builder.CreateBitCast(Val, MMXTy, "mmx_var");
 }
@@ -64,7 +64,7 @@
                                    unsigned EncodePattern,
                                    LLVMBuilder &Builder) {
   unsigned NumOps = Ops.size();
-  static const Type *MMXTy = Type::getX86_MMXTy(Context);
+  static Type *MMXTy = Type::getX86_MMXTy(Context);
   Function *Func = Intrinsic::getDeclaration(TheModule, IntID);
 
   Value *Arg0 = 0, *Arg1 = 0;
@@ -428,10 +428,10 @@
                            Builder);
     return true;
   case IX86_BUILTIN_MOVNTQ: {
-    static const Type *MMXTy = Type::getX86_MMXTy(Context);
+    static Type *MMXTy = Type::getX86_MMXTy(Context);
     Function *Func = Intrinsic::getDeclaration(TheModule,
                                                Intrinsic::x86_mmx_movnt_dq);
-    const PointerType *PTy = cast<PointerType>(Ops[0]->getType());
+    PointerType *PTy = cast<PointerType>(Ops[0]->getType());
     Value *Arg0 = 0;
     if (PTy->getElementType() == MMXTy)
       Arg0 = Ops[0];
@@ -461,7 +461,7 @@
     return SI;
   }
   case IX86_BUILTIN_PALIGNR: {
-    static const Type *MMXTy = Type::getX86_MMXTy(Context);
+    static Type *MMXTy = Type::getX86_MMXTy(Context);
     Function *Func = Intrinsic::getDeclaration(TheModule,
                                                Intrinsic::x86_mmx_palignr_b);
     Value *Arg0 = ConvertToX86_MMXTy(Ops[0], Builder);
@@ -707,7 +707,7 @@
     return true;
   }
   case IX86_BUILTIN_LOADQ: {
-    const PointerType *i64Ptr = Type::getInt64PtrTy(Context);
+    PointerType *i64Ptr = Type::getInt64PtrTy(Context);
     Ops[0] = Builder.CreateBitCast(Ops[0], i64Ptr);
     Ops[0] = Builder.CreateLoad(Ops[0]);
     Value *Zero = ConstantInt::get(Type::getInt64Ty(Context), 0);
@@ -719,7 +719,7 @@
   }
   case IX86_BUILTIN_LOADUPS: {
     VectorType *v4f32 = VectorType::get(Type::getFloatTy(Context), 4);
-    const PointerType *v4f32Ptr = v4f32->getPointerTo();
+    PointerType *v4f32Ptr = v4f32->getPointerTo();
     Value *BC = Builder.CreateBitCast(Ops[0], v4f32Ptr);
     LoadInst *LI = Builder.CreateLoad(BC);
     LI->setAlignment(1);
@@ -728,7 +728,7 @@
   }
   case IX86_BUILTIN_LOADUPD: {
     VectorType *v2f64 = VectorType::get(Type::getDoubleTy(Context), 2);
-    const PointerType *v2f64Ptr = v2f64->getPointerTo();
+    PointerType *v2f64Ptr = v2f64->getPointerTo();
     Value *BC = Builder.CreateBitCast(Ops[0], v2f64Ptr);
     LoadInst *LI = Builder.CreateLoad(BC);
     LI->setAlignment(1);
@@ -737,7 +737,7 @@
   }
   case IX86_BUILTIN_LOADDQU: {
     VectorType *v16i8 = VectorType::get(Type::getInt8Ty(Context), 16);
-    const PointerType *v16i8Ptr = v16i8->getPointerTo();
+    PointerType *v16i8Ptr = v16i8->getPointerTo();
     Value *BC = Builder.CreateBitCast(Ops[0], v16i8Ptr);
     LoadInst *LI = Builder.CreateLoad(BC);
     LI->setAlignment(1);
@@ -746,7 +746,7 @@
   }
   case IX86_BUILTIN_STOREUPS: {
     VectorType *v4f32 = VectorType::get(Type::getFloatTy(Context), 4);
-    const PointerType *v4f32Ptr = v4f32->getPointerTo();
+    PointerType *v4f32Ptr = v4f32->getPointerTo();
     Value *BC = Builder.CreateBitCast(Ops[0], v4f32Ptr);
     StoreInst *SI = Builder.CreateStore(Ops[1], BC);
     SI->setAlignment(1);
@@ -754,7 +754,7 @@
   }
   case IX86_BUILTIN_STOREUPD: {
     VectorType *v2f64 = VectorType::get(Type::getDoubleTy(Context), 2);
-    const PointerType *v2f64Ptr = v2f64->getPointerTo();
+    PointerType *v2f64Ptr = v2f64->getPointerTo();
     Value *BC = Builder.CreateBitCast(Ops[0], v2f64Ptr);
     StoreInst *SI = Builder.CreateStore(Ops[1], BC);
     SI->setAlignment(1);
@@ -762,14 +762,14 @@
   }
   case IX86_BUILTIN_STOREDQU: {
     VectorType *v16i8 = VectorType::get(Type::getInt8Ty(Context), 16);
-    const PointerType *v16i8Ptr = v16i8->getPointerTo();
+    PointerType *v16i8Ptr = v16i8->getPointerTo();
     Value *BC = Builder.CreateBitCast(Ops[0], v16i8Ptr);
     StoreInst *SI = Builder.CreateStore(Ops[1], BC);
     SI->setAlignment(1);
     return true;
   }
   case IX86_BUILTIN_LOADHPS: {
-    const PointerType *f64Ptr = Type::getDoublePtrTy(Context);
+    PointerType *f64Ptr = Type::getDoublePtrTy(Context);
     Ops[1] = Builder.CreateBitCast(Ops[1], f64Ptr);
     Value *Load = Builder.CreateLoad(Ops[1]);
     Ops[1] = BuildVector(Load, UndefValue::get(Type::getDoubleTy(Context)), NULL);
@@ -779,7 +779,7 @@
     return true;
   }
   case IX86_BUILTIN_LOADLPS: {
-    const PointerType *f64Ptr = Type::getDoublePtrTy(Context);
+    PointerType *f64Ptr = Type::getDoublePtrTy(Context);
     Ops[1] = Builder.CreateBitCast(Ops[1], f64Ptr);
     Value *Load = Builder.CreateLoad(Ops[1]);
     Ops[1] = BuildVector(Load, UndefValue::get(Type::getDoubleTy(Context)), NULL);
@@ -806,7 +806,7 @@
   }
   case IX86_BUILTIN_STOREHPS: {
     VectorType *v2f64 = VectorType::get(Type::getDoubleTy(Context), 2);
-    const PointerType *f64Ptr = Type::getDoublePtrTy(Context);
+    PointerType *f64Ptr = Type::getDoublePtrTy(Context);
     Ops[0] = Builder.CreateBitCast(Ops[0], f64Ptr);
     Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), 1);
     Ops[1] = Builder.CreateBitCast(Ops[1], v2f64);
@@ -816,7 +816,7 @@
   }
   case IX86_BUILTIN_STORELPS: {
     VectorType *v2f64 = VectorType::get(Type::getDoubleTy(Context), 2);
-    const PointerType *f64Ptr = Type::getDoublePtrTy(Context);
+    PointerType *f64Ptr = Type::getDoublePtrTy(Context);
     Ops[0] = Builder.CreateBitCast(Ops[0], f64Ptr);
     Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), 0);
     Ops[1] = Builder.CreateBitCast(Ops[1], v2f64);
@@ -1023,9 +1023,9 @@
       // If palignr is shifting the pair of input vectors less than 17 bytes,
       // emit a shuffle instruction.
       if (shiftVal <= 16) {
-        const llvm::Type *IntTy = Type::getInt32Ty(Context);
-        const llvm::Type *EltTy = Type::getInt8Ty(Context);
-        const llvm::Type *VecTy = VectorType::get(EltTy, 16);
+        Type *IntTy = Type::getInt32Ty(Context);
+        Type *EltTy = Type::getInt8Ty(Context);
+        Type *VecTy = VectorType::get(EltTy, 16);
         
         Ops[1] = Builder.CreateBitCast(Ops[1], VecTy);
         Ops[0] = Builder.CreateBitCast(Ops[0], VecTy);
@@ -1042,9 +1042,9 @@
       // If palignr is shifting the pair of input vectors more than 16 but less
       // than 32 bytes, emit a logical right shift of the destination.
       if (shiftVal < 32) {
-        const llvm::Type *EltTy = Type::getInt64Ty(Context);
-        const llvm::Type *VecTy = VectorType::get(EltTy, 2);
-        const llvm::Type *IntTy = Type::getInt32Ty(Context);
+        Type *EltTy = Type::getInt64Ty(Context);
+        Type *VecTy = VectorType::get(EltTy, 2);
+        Type *IntTy = Type::getInt32Ty(Context);
 
         Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
         Ops[1] = ConstantInt::get(IntTy, (shiftVal-16) * 8);
@@ -1094,10 +1094,10 @@
 }
 
 /* Returns true if all elements of the type are integer types. */
-static bool llvm_x86_is_all_integer_types(const Type *Ty) {
+static bool llvm_x86_is_all_integer_types(Type *Ty) {
   for (Type::subtype_iterator I = Ty->subtype_begin(), E = Ty->subtype_end();
        I != E; ++I) {
-    const Type *STy = *I;
+    Type *STy = *I;
     if (!STy->isIntOrIntVectorTy() && !STy->isPointerTy())
       return false;
   }
@@ -1122,7 +1122,7 @@
   // Note that we can't support passing all structs this way.  For example,
   // {i16, i16} should be passed in on 32-bit unit, which is not how "i16, i16"
   // would be passed as stand-alone arguments.
-  const StructType *STy = dyn_cast<StructType>(Ty);
+  StructType *STy = dyn_cast<StructType>(Ty);
   if (!STy || STy->isPacked()) return false;
 
   for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
@@ -1151,13 +1151,13 @@
 bool llvm_x86_should_pass_aggregate_as_fca(tree type, Type *Ty) {
   if (TREE_CODE(type) != COMPLEX_TYPE)
     return false;
-  const StructType *STy = dyn_cast<StructType>(Ty);
+  StructType *STy = dyn_cast<StructType>(Ty);
   if (!STy || STy->isPacked()) return false;
 
   // FIXME: Currently codegen isn't lowering most _Complex types in a way that
   // makes it ABI compatible for x86-64. Same for _Complex char and _Complex
   // short in 32-bit.
-  const Type *EltTy = STy->getElementType(0);
+  Type *EltTy = STy->getElementType(0);
   return !((TARGET_64BIT && (EltTy->isIntegerTy() ||
                              EltTy->isFloatTy() ||
                              EltTy->isDoubleTy())) ||
@@ -1190,7 +1190,7 @@
 static void count_num_registers_uses(std::vector<Type*> &ScalarElts,
                                      unsigned &NumGPRs, unsigned &NumXMMs) {
   for (unsigned i = 0, e = ScalarElts.size(); i != e; ++i) {
-    const Type *Ty = ScalarElts[i];
+    Type *Ty = ScalarElts[i];
     if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
       if (!TARGET_MACHO)
         continue;
@@ -1314,8 +1314,8 @@
           assert(0 && "Not yet handled!");
       } else if ((NumClasses-i) == 2) {
         if (Class[i+1] == X86_64_SSEUP_CLASS) {
-          const Type *Ty = ConvertType(TreeType);
-          if (const StructType *STy = dyn_cast<StructType>(Ty))
+          Type *Ty = ConvertType(TreeType);
+          if (StructType *STy = dyn_cast<StructType>(Ty))
             // Look pass the struct wrapper.
             if (STy->getNumElements() == 1)
               Ty = STy->getElementType(0);
@@ -1507,13 +1507,13 @@
 
 // llvm_suitable_multiple_ret_value_type - Return TRUE if return value 
 // of type TY should be returned using multiple value return instruction.
-static bool llvm_suitable_multiple_ret_value_type(const Type *Ty,
+static bool llvm_suitable_multiple_ret_value_type(Type *Ty,
                                                   tree TreeType) {
 
   if (!TARGET_64BIT)
     return false;
 
-  const StructType *STy = dyn_cast<StructType>(Ty);
+  StructType *STy = dyn_cast<StructType>(Ty);
   if (!STy)
     return false;
 
@@ -1547,7 +1547,7 @@
 // can be returned as a scalar, otherwise return NULL.
 Type *llvm_x86_scalar_type_for_struct_return(tree type, unsigned *Offset) {
   *Offset = 0;
-  const Type *Ty = ConvertType(type);
+  Type *Ty = ConvertType(type);
   unsigned Size = getTargetData().getTypeAllocSize(Ty);
   if (Size == 0)
     return Type::getVoidTy(Context);
@@ -1633,7 +1633,7 @@
 /// The original implementation of this routine is based on 
 /// llvm_x86_64_should_pass_aggregate_in_mixed_regs code.
 void
-llvm_x86_64_get_multiple_return_reg_classes(tree TreeType, const Type *Ty,
+llvm_x86_64_get_multiple_return_reg_classes(tree TreeType, Type *Ty,
                                             std::vector<Type*> &Elts){
   enum x86_64_reg_class Class[MAX_CLASSES];
   enum machine_mode Mode = ix86_getNaturalModeForType(TreeType);
@@ -1690,8 +1690,8 @@
           assert(0 && "Not yet handled!");
       } else if ((NumClasses-i) == 2) {
         if (Class[i+1] == X86_64_SSEUP_CLASS) {
-          const Type *Ty = ConvertType(TreeType);
-          if (const StructType *STy = dyn_cast<StructType>(Ty))
+          Type *Ty = ConvertType(TreeType);
+          if (StructType *STy = dyn_cast<StructType>(Ty))
             // Look pass the struct wrapper.
             if (STy->getNumElements() == 1)
               Ty = STy->getElementType(0);
@@ -1770,11 +1770,11 @@
 // Return LLVM Type if TYPE can be returned as an aggregate, 
 // otherwise return NULL.
 Type *llvm_x86_aggr_type_for_struct_return(tree type) {
-  const Type *Ty = ConvertType(type);
+  Type *Ty = ConvertType(type);
   if (!llvm_suitable_multiple_ret_value_type(Ty, type))
     return NULL;
 
-  const StructType *STy = cast<StructType>(Ty);
+  StructType *STy = cast<StructType>(Ty);
   std::vector<Type *> ElementTypes;
 
   // Special handling for _Complex.
@@ -1804,11 +1804,11 @@
                                                LLVMBuilder &Builder,
                                                bool isVolatile) {
   Value *EVI = Builder.CreateExtractValue(Src, SrcFieldNo, "mrv_gr");
-  const StructType *STy = cast<StructType>(Src->getType());
+  StructType *STy = cast<StructType>(Src->getType());
   llvm::Value *Idxs[3];
-  Idxs[0] = ConstantInt::get(llvm::Type::getInt32Ty(Context), 0);
-  Idxs[1] = ConstantInt::get(llvm::Type::getInt32Ty(Context), DestFieldNo);
-  Idxs[2] = ConstantInt::get(llvm::Type::getInt32Ty(Context), DestElemNo);
+  Idxs[0] = ConstantInt::get(Type::getInt32Ty(Context), 0);
+  Idxs[1] = ConstantInt::get(Type::getInt32Ty(Context), DestFieldNo);
+  Idxs[2] = ConstantInt::get(Type::getInt32Ty(Context), DestElemNo);
   Value *GEP = Builder.CreateGEP(Dest, Idxs, Idxs+3, "mrv_gep");
   if (STy->getElementType(SrcFieldNo)->isVectorTy()) {
     Value *ElemIndex = ConstantInt::get(Type::getInt32Ty(Context), SrcElemNo);
@@ -1826,11 +1826,11 @@
                                             bool isVolatile,
                                             LLVMBuilder &Builder) {
   
-  const StructType *STy = cast<StructType>(Src->getType());
+  StructType *STy = cast<StructType>(Src->getType());
   unsigned NumElements = STy->getNumElements();
 
-  const PointerType *PTy = cast<PointerType>(Dest->getType());
-  const StructType *DestTy = cast<StructType>(PTy->getElementType());
+  PointerType *PTy = cast<PointerType>(Dest->getType());
+  StructType *DestTy = cast<StructType>(PTy->getElementType());
 
   unsigned SNO = 0;
   unsigned DNO = 0;
@@ -1861,8 +1861,7 @@
   }
 
   while (SNO < NumElements) {
-
-    const Type *DestElemType = DestTy->getElementType(DNO);
+    Type *DestElemType = DestTy->getElementType(DNO);
 
     // Directly access first class values using getresult.
     if (DestElemType->isSingleValueType()) {
@@ -1876,10 +1875,10 @@
     // Special treatement for _Complex.
     if (DestElemType->isStructTy()) {
       llvm::Value *Idxs[3];
-      Idxs[0] = ConstantInt::get(llvm::Type::getInt32Ty(Context), 0);
-      Idxs[1] = ConstantInt::get(llvm::Type::getInt32Ty(Context), DNO);
+      Idxs[0] = ConstantInt::get(Type::getInt32Ty(Context), 0);
+      Idxs[1] = ConstantInt::get(Type::getInt32Ty(Context), DNO);
 
-      Idxs[2] = ConstantInt::get(llvm::Type::getInt32Ty(Context), 0);
+      Idxs[2] = ConstantInt::get(Type::getInt32Ty(Context), 0);
       Value *GEP = Builder.CreateGEP(Dest, Idxs, Idxs+3, "mrv_gep");
       Value *EVI = Builder.CreateExtractValue(Src, 0, "mrv_gr");
       Builder.CreateStore(EVI, GEP, isVolatile);
@@ -1895,7 +1894,7 @@
     
     // Access array elements individually. Note, Src and Dest type may
     // not match. For example { <2 x float>, float } and { float[3]; }
-    const ArrayType *ATy = cast<ArrayType>(DestElemType);
+    ArrayType *ATy = cast<ArrayType>(DestElemType);
     unsigned ArraySize = ATy->getNumElements();
     unsigned DElemNo = 0; // DestTy's DNO field's element number
     while (DElemNo < ArraySize) {

Modified: llvm-gcc-4.2/trunk/gcc/config/rs6000/llvm-rs6000.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/config/rs6000/llvm-rs6000.cpp?rev=135372&r1=135371&r2=135372&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/config/rs6000/llvm-rs6000.cpp (original)
+++ llvm-gcc-4.2/trunk/gcc/config/rs6000/llvm-rs6000.cpp Sun Jul 17 23:44:09 2011
@@ -53,10 +53,10 @@
 // name of the resultant intrinsic.
 static void MergeIntPtrOperand(TreeToLLVM *TTL,
                                unsigned OpNum, Intrinsic::ID IID,
-                               const Type *ResultType,
+                               Type *ResultType,
                                std::vector<Value*> &Ops,
                                LLVMBuilder &Builder, Value *&Result) {
-  const Type *VoidPtrTy = PointerType::getUnqual(Type::getInt8Ty(Context));
+  Type *VoidPtrTy = PointerType::getUnqual(Type::getInt8Ty(Context));
   
   Function *IntFn = Intrinsic::getDeclaration(TheModule, IID);
   
@@ -79,7 +79,7 @@
 
 // GetAltivecTypeNumFromType - Given an LLVM type, return a unique ID for
 // the type in the range 0-3.
-static int GetAltivecTypeNumFromType(const Type *Ty) {
+static int GetAltivecTypeNumFromType(Type *Ty) {
   return (Ty->isIntegerTy(32) ? 0 : \
           (Ty->isIntegerTy(16) ? 1 : \
            (Ty->isIntegerTy(8) ? 2 : \
@@ -360,7 +360,7 @@
   case ALTIVEC_BUILTIN_VPERM_8HI:
   case ALTIVEC_BUILTIN_VPERM_16QI: {
     // Operation is identical on all types; we have a single intrinsic.
-    const Type *VecTy = VectorType::get(Type::getInt32Ty(Context), 4);
+    Type *VecTy = VectorType::get(Type::getInt32Ty(Context), 4);
     Value *Op0 = CastToType(Instruction::BitCast, Ops[0], VecTy);
     Value *Op1 = CastToType(Instruction::BitCast, Ops[1], VecTy);
     Value *ActualOps[] = { Op0, Op1, Ops[2]};
@@ -375,7 +375,7 @@
   case ALTIVEC_BUILTIN_VSEL_8HI:
   case ALTIVEC_BUILTIN_VSEL_16QI: {
     // Operation is identical on all types; we have a single intrinsic.
-    const Type *VecTy = VectorType::get(Type::getInt32Ty(Context), 4);
+    Type *VecTy = VectorType::get(Type::getInt32Ty(Context), 4);
     Value *Op0 = CastToType(Instruction::BitCast, Ops[0], VecTy);
     Value *Op1 = CastToType(Instruction::BitCast, Ops[1], VecTy);
     Value *Op2 = CastToType(Instruction::BitCast, Ops[2], VecTy);
@@ -396,7 +396,7 @@
   for (unsigned i = 0, e = ScalarElts.size(); i != e; ++i) {
     if (NumGPRs >= 8)
       break;
-    const Type *Ty = ScalarElts[i];
+    Type *Ty = ScalarElts[i];
     if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
       abort();
     } else if (Ty->isPointerTy()) {
@@ -534,7 +534,7 @@
 
   // ppc32 passes aggregates by copying, either in int registers or on the 
   // stack.
-  const StructType *STy = dyn_cast<StructType>(Ty);
+  StructType *STy = dyn_cast<StructType>(Ty);
   if (!STy) return true;
 
   // A struct containing only a float, double or vector field, possibly with
@@ -608,7 +608,7 @@
   if (SrcSize <= 0 || SrcSize > 16)
     return false;
 
-  const StructType *STy = dyn_cast<StructType>(Ty);
+  StructType *STy = dyn_cast<StructType>(Ty);
   if (!STy) return false;
 
   // A struct containing only a float, double or Altivec field, possibly with

Modified: llvm-gcc-4.2/trunk/gcc/llvm-backend.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/llvm-backend.cpp?rev=135372&r1=135371&r2=135372&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/llvm-backend.cpp (original)
+++ llvm-gcc-4.2/trunk/gcc/llvm-backend.cpp Sun Jul 17 23:44:09 2011
@@ -916,7 +916,7 @@
   
   LLVMContext &Context = getGlobalContext();
   
-  const Type *FPTy =
+  Type *FPTy =
     FunctionType::get(Type::getVoidTy(Context), std::vector<Type*>(), false);
   FPTy = FPTy->getPointerTo();
   
@@ -999,7 +999,7 @@
 
   if (!AttributeUsedGlobals.empty()) {
     std::vector<Constant *> AUGs;
-    const Type *SBP= Type::getInt8PtrTy(Context);
+    Type *SBP= Type::getInt8PtrTy(Context);
     for (SmallSetVector<Constant *,32>::iterator
            AI = AttributeUsedGlobals.begin(),
            AE = AttributeUsedGlobals.end(); AI != AE; ++AI) {
@@ -1019,7 +1019,7 @@
 
   if (!AttributeCompilerUsedGlobals.empty()) {
     std::vector<Constant *> ACUGs;
-    const Type *SBP= Type::getInt8PtrTy(Context);
+    Type *SBP= Type::getInt8PtrTy(Context);
     for (SmallSetVector<Constant *,32>::iterator
            AI = AttributeCompilerUsedGlobals.begin(),
            AE = AttributeCompilerUsedGlobals.end(); AI != AE; ++AI) {
@@ -1312,7 +1312,7 @@
   Constant *lineNo = ConstantInt::get(Type::getInt32Ty(Context),
                                       DECL_SOURCE_LINE(decl));
   Constant *file = ConvertMetadataStringToGV(DECL_SOURCE_FILE(decl));
-  const Type *SBP= Type::getInt8PtrTy(Context);
+  Type *SBP= Type::getInt8PtrTy(Context);
   file = TheFolder->CreateBitCast(file, SBP);
  
   // There may be multiple annotate attributes. Pass return of lookup_attr 
@@ -1642,7 +1642,7 @@
 /// false.
 bool ValidateRegisterVariable(tree decl) {
   int RegNumber = decode_reg_name(extractRegisterName(decl));
-  const Type *Ty = ConvertType(TREE_TYPE(decl));
+  Type *Ty = ConvertType(TREE_TYPE(decl));
 
   if (errorcount || sorrycount)
     return true;  // Do not process broken code.
@@ -1757,7 +1757,7 @@
     if (FnEntry == 0) {
       CallingConv::ID CC;
       AttrListPtr PAL;
-      const FunctionType *Ty = 
+      FunctionType *Ty = 
         TheTypeConverter->ConvertFunctionType(TREE_TYPE(decl), decl, NULL,
                                               CC, PAL);
       FnEntry = Function::Create(Ty, Function::ExternalLinkage, Name, TheModule);
@@ -1799,7 +1799,7 @@
   } else {
     assert((TREE_CODE(decl) == VAR_DECL ||
             TREE_CODE(decl) == CONST_DECL) && "Not a function or var decl?");
-    const Type *Ty = ConvertType(TREE_TYPE(decl));
+    Type *Ty = ConvertType(TREE_TYPE(decl));
     GlobalVariable *GV ;
 
     // If we have "extern void foo", make the global have type {} instead of
@@ -1990,7 +1990,7 @@
   // adaptor which would be simpler and more efficient.  In the meantime, just
   // adapt the adaptor.
   raw_os_ostream RO(FS);
-  RO << *(const Type*)LLVM;
+  RO << *(Type*)LLVM;
 }
 
 /// extractRegisterName - Get a register name given its decl. In 4.2 unlike 4.0

Modified: llvm-gcc-4.2/trunk/gcc/llvm-convert.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/llvm-convert.cpp?rev=135372&r1=135371&r2=135372&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/llvm-convert.cpp (original)
+++ llvm-gcc-4.2/trunk/gcc/llvm-convert.cpp Sun Jul 17 23:44:09 2011
@@ -222,7 +222,7 @@
 /// llvm_store_scalar_argument - Store scalar argument ARGVAL of type
 /// LLVMTY at location LOC.
 static void llvm_store_scalar_argument(Value *Loc, Value *ArgVal,
-                                       const llvm::Type *LLVMTy,
+                                       Type *LLVMTy,
                                        unsigned RealSize,
                                        LLVMBuilder &Builder) {
   if (RealSize) {
@@ -230,7 +230,7 @@
     assert(!BYTES_BIG_ENDIAN && "Unsupported case - please report");
     // Do byte wise store because actual argument type does not match LLVMTy.
     assert(ArgVal->getType()->isIntegerTy() && "Expected an integer value!");
-    const Type *StoreType = IntegerType::get(Context, RealSize * 8);
+    Type *StoreType = IntegerType::get(Context, RealSize * 8);
     Loc = Builder.CreateBitCast(Loc, StoreType->getPointerTo());
     if (ArgVal->getType()->getPrimitiveSizeInBits() >=
         StoreType->getPrimitiveSizeInBits())
@@ -278,7 +278,7 @@
     /// getCallingConv - This provides the desired CallingConv for the function.
     CallingConv::ID& getCallingConv(void) { return CallingConv; }
 
-    void HandlePad(llvm::Type *LLVMTy) {
+    void HandlePad(Type *LLVMTy) {
       ++AI;
     }
 
@@ -341,7 +341,7 @@
       ++AI;
     }
 
-    void HandleScalarArgument(llvm::Type *LLVMTy, tree type,
+    void HandleScalarArgument(Type *LLVMTy, tree type,
                               unsigned RealSize = 0) {
       Value *ArgVal = AI;
       LLVMTy = LLVM_ADJUST_MMX_PARAMETER_TYPE(LLVMTy);
@@ -371,7 +371,7 @@
       ++AI;
     }
 
-    void HandleByValArgument(llvm::Type *LLVMTy, tree type) {
+    void HandleByValArgument(Type *LLVMTy, tree type) {
       if (LLVM_BYVAL_ALIGNMENT_TOO_SMALL(type)) {
         // Incoming object on stack is insufficiently aligned for the type.
         // Make a correctly aligned copy.
@@ -402,7 +402,7 @@
       ++AI;
     }
 
-    void HandleFCAArgument(llvm::Type *LLVMTy, tree /*type*/) {
+    void HandleFCAArgument(Type *LLVMTy, tree /*type*/) {
       // Store the FCA argument into alloca.
       assert(!LocStack.empty());
       Value *Loc = LocStack.back();
@@ -415,7 +415,7 @@
       this->Offset = Offset;
     }
 
-    void EnterField(unsigned FieldNo, llvm::Type *StructTy) {
+    void EnterField(unsigned FieldNo, Type *StructTy) {
       NameStack.push_back(NameStack.back()+"."+utostr(FieldNo));
 
       Value *Loc = LocStack.back();
@@ -497,7 +497,7 @@
 
   // Determine the FunctionType and calling convention for this function.
   tree static_chain = cfun->static_chain_decl;
-  const FunctionType *FTy;
+  FunctionType *FTy;
   CallingConv::ID CallingConv;
   AttrListPtr PAL;
 
@@ -796,13 +796,13 @@
       RetVals.push_back(RetVal);
     } else {
       Value *RetVal = DECL_LLVM(DECL_RESULT(FnDecl));
-      if (const StructType *STy = dyn_cast<StructType>(Fn->getReturnType())) {
+      if (StructType *STy = dyn_cast<StructType>(Fn->getReturnType())) {
         Value *R1 = BitCastToType(RetVal, STy->getPointerTo());
 
-        llvm::Value *Idxs[2];
-        Idxs[0] = ConstantInt::get(llvm::Type::getInt32Ty(Context), 0);
+        Value *Idxs[2];
+        Idxs[0] = ConstantInt::get(Type::getInt32Ty(Context), 0);
         for (unsigned ri = 0; ri < STy->getNumElements(); ++ri) {
-          Idxs[1] = ConstantInt::get(llvm::Type::getInt32Ty(Context), ri);
+          Idxs[1] = ConstantInt::get(Type::getInt32Ty(Context), ri);
           Value *GEP = Builder.CreateGEP(R1, Idxs, Idxs+2, "mrv_gep");
           Value *E = Builder.CreateLoad(GEP, "mrv");
           RetVals.push_back(E);
@@ -1345,7 +1345,7 @@
 
 /// CastToType - Cast the specified value to the specified type if it is
 /// not already that type.
-Value *TreeToLLVM::CastToType(unsigned opcode, Value *V, const Type* Ty) {
+Value *TreeToLLVM::CastToType(unsigned opcode, Value *V, Type* Ty) {
   // Handle 'trunc (zext i1 X to T2) to i1' as X, because this occurs all over
   // the place.
   if (ZExtInst *CI = dyn_cast<ZExtInst>(V))
@@ -1359,7 +1359,7 @@
 /// CastToAnyType - Cast the specified value to the specified type making no
 /// assumptions about the types of the arguments. This creates an inferred cast.
 Value *TreeToLLVM::CastToAnyType(Value *V, bool VisSigned,
-                                 const Type* Ty, bool TyIsSigned) {
+                                 Type* Ty, bool TyIsSigned) {
   // Eliminate useless casts of a type to itself.
   if (V->getType() == Ty)
     return V;
@@ -1375,7 +1375,7 @@
 
 /// CastToUIntType - Cast the specified value to the specified type assuming
 /// that the value and type are unsigned integer types.
-Value *TreeToLLVM::CastToUIntType(Value *V, const Type* Ty) {
+Value *TreeToLLVM::CastToUIntType(Value *V, Type* Ty) {
   // Eliminate useless casts of a type to itself.
   if (V->getType() == Ty)
     return V;
@@ -1391,7 +1391,7 @@
 
 /// CastToSIntType - Cast the specified value to the specified type assuming
 /// that the value and type are signed integer types.
-Value *TreeToLLVM::CastToSIntType(Value *V, const Type* Ty) {
+Value *TreeToLLVM::CastToSIntType(Value *V, Type* Ty) {
   // Eliminate useless casts of a type to itself.
   if (V->getType() == Ty)
     return V;
@@ -1407,7 +1407,7 @@
 
 /// CastToFPType - Cast the specified value to the specified type assuming
 /// that the value and type are floating point.
-Value *TreeToLLVM::CastToFPType(Value *V, const Type* Ty) {
+Value *TreeToLLVM::CastToFPType(Value *V, Type* Ty) {
   unsigned SrcBits = V->getType()->getPrimitiveSizeInBits();
   unsigned DstBits = Ty->getPrimitiveSizeInBits();
   if (SrcBits == DstBits)
@@ -1419,14 +1419,14 @@
 
 /// BitCastToType - Insert a BitCast from V to Ty if needed. This is just a
 /// shorthand convenience function for CastToType(Instruction::BitCast,V,Ty).
-Value *TreeToLLVM::BitCastToType(Value *V, const Type *Ty) {
+Value *TreeToLLVM::BitCastToType(Value *V, Type *Ty) {
   return CastToType(Instruction::BitCast, V, Ty);
 }
 
 /// CreateTemporary - Create a new alloca instruction of the specified type,
 /// inserting it into the entry block and returning it.  The resulting
 /// instruction's type is a pointer to the specified type.
-AllocaInst *TreeToLLVM::CreateTemporary(const Type *Ty, unsigned align) {
+AllocaInst *TreeToLLVM::CreateTemporary(Type *Ty, unsigned align) {
   if (AllocaInsertionPoint == 0) {
     // Create a dummy instruction in the entry block as a marker to insert new
     // alloc instructions before.  It doesn't matter what this instruction is,
@@ -1446,7 +1446,7 @@
 }
 
 /// CreateTempLoc - Like CreateTemporary, but returns a MemRef.
-MemRef TreeToLLVM::CreateTempLoc(const Type *Ty) {
+MemRef TreeToLLVM::CreateTempLoc(Type *Ty) {
   AllocaInst *AI = CreateTemporary(Ty);
   // MemRefs do not allow alignment 0.
   if (!AI->getAlignment())
@@ -1480,7 +1480,7 @@
                           LLVMBuilder &Builder, tree gccType){
   assert(DestLoc.Ptr->getType() == SrcLoc.Ptr->getType() &&
          "Cannot copy between two pointers of different type!");
-  const Type *ElTy =
+  Type *ElTy =
     cast<PointerType>(DestLoc.Ptr->getType())->getElementType();
 
   unsigned Alignment = std::min(DestLoc.getAlignment(), SrcLoc.getAlignment());
@@ -1490,7 +1490,7 @@
     StoreInst *S = Builder.CreateStore(V, DestLoc.Ptr, DestLoc.Volatile);
     V->setAlignment(Alignment);
     S->setAlignment(Alignment);
-  } else if (const StructType *STy = dyn_cast<StructType>(ElTy)) {
+  } else if (StructType *STy = dyn_cast<StructType>(ElTy)) {
     const StructLayout *SL = getTargetData().getStructLayout(STy);
     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
       if (gccType && isPaddingElement(gccType, i))
@@ -1503,7 +1503,7 @@
                     Builder, 0);
     }
   } else {
-    const ArrayType *ATy = cast<ArrayType>(ElTy);
+    ArrayType *ATy = cast<ArrayType>(ElTy);
     unsigned EltSize = getTargetData().getTypeAllocSize(ATy->getElementType());
     for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
       Value *DElPtr = Builder.CreateStructGEP(DestLoc.Ptr, i);
@@ -1518,16 +1518,16 @@
 
 /// CountAggregateElements - Return the number of elements in the specified type
 /// that will need to be loaded/stored if we copy this by explicit accesses.
-static unsigned CountAggregateElements(const Type *Ty) {
+static unsigned CountAggregateElements(Type *Ty) {
   if (Ty->isSingleValueType()) return 1;
 
-  if (const StructType *STy = dyn_cast<StructType>(Ty)) {
+  if (StructType *STy = dyn_cast<StructType>(Ty)) {
     unsigned NumElts = 0;
     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
       NumElts += CountAggregateElements(STy->getElementType(i));
     return NumElts;
   } else {
-    const ArrayType *ATy = cast<ArrayType>(Ty);
+    ArrayType *ATy = cast<ArrayType>(Ty);
     return ATy->getNumElements()*CountAggregateElements(ATy->getElementType());
   }
 }
@@ -1535,14 +1535,14 @@
 /// containsFPField - indicates whether the given LLVM type
 /// contains any floating point elements.
 
-static bool containsFPField(const Type *LLVMTy) {
+static bool containsFPField(Type *LLVMTy) {
   if (LLVMTy->isFloatingPointTy())
     return true;
-  const StructType* STy = dyn_cast<StructType>(LLVMTy);
+  StructType* STy = dyn_cast<StructType>(LLVMTy);
   if (STy) {
     for (StructType::element_iterator I = STy->element_begin(),
                                       E = STy->element_end(); I != E; I++) {
-      const Type *Ty = *I;
+      Type *Ty = *I;
       if (Ty->isFloatingPointTy())
         return true;
       if (Ty->isStructTy() && containsFPField(Ty))
@@ -1569,7 +1569,7 @@
     return;  // noop copy.
 
   // If the type is small, copy the elements instead of using a block copy.
-  const Type *LLVMTy = ConvertType(type);
+  Type *LLVMTy = ConvertType(type);
   unsigned NumElts = CountAggregateElements(LLVMTy);
   if (TREE_CODE(TYPE_SIZE(type)) == INTEGER_CST &&
       (NumElts == 1 ||
@@ -1604,13 +1604,13 @@
 /// ZeroAggregate - Recursively traverse the potentially aggregate DestLoc,
 /// zero'ing all of the elements.
 static void ZeroAggregate(MemRef DestLoc, LLVMBuilder &Builder) {
-  const Type *ElTy =
+  Type *ElTy =
     cast<PointerType>(DestLoc.Ptr->getType())->getElementType();
   if (ElTy->isSingleValueType()) {
     StoreInst *St = Builder.CreateStore(Constant::getNullValue(ElTy),
                                         DestLoc.Ptr, DestLoc.Volatile);
     St->setAlignment(DestLoc.getAlignment());
-  } else if (const StructType *STy = dyn_cast<StructType>(ElTy)) {
+  } else if (StructType *STy = dyn_cast<StructType>(ElTy)) {
     const StructLayout *SL = getTargetData().getStructLayout(STy);
     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
       Value *Ptr = Builder.CreateStructGEP(DestLoc.Ptr, i);
@@ -1619,7 +1619,7 @@
       ZeroAggregate(MemRef(Ptr, Alignment, DestLoc.Volatile), Builder);
     }
   } else {
-    const ArrayType *ATy = cast<ArrayType>(ElTy);
+    ArrayType *ATy = cast<ArrayType>(ElTy);
     unsigned EltSize = getTargetData().getTypeAllocSize(ATy->getElementType());
     for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
       Value *Ptr = Builder.CreateStructGEP(DestLoc.Ptr, i);
@@ -1635,7 +1635,7 @@
   // If the type is small, copy the elements instead of using a block copy.
   if (TREE_CODE(TYPE_SIZE(type)) == INTEGER_CST &&
       TREE_INT_CST_LOW(TYPE_SIZE_UNIT(type)) < 128) {
-    const Type *LLVMTy = ConvertType(type);
+    Type *LLVMTy = ConvertType(type);
 
     // If the GCC type is not fully covered by the LLVM type, use memset. This
     // can occur with unions etc.
@@ -1718,7 +1718,7 @@
 
   // The idea is that it's a pointer to type "Value"
   // which is opaque* but the routine expects i8** and i8*.
-  const PointerType *Ty = Type::getInt8PtrTy(Context);
+  PointerType *Ty = Type::getInt8PtrTy(Context);
   V = Builder.CreateBitCast(V, Ty->getPointerTo());
 
   Value *Ops[2] = {
@@ -1745,7 +1745,7 @@
   Constant *lineNo =
     ConstantInt::get(Type::getInt32Ty(Context), DECL_SOURCE_LINE(decl));
   Constant *file = ConvertMetadataStringToGV(DECL_SOURCE_FILE(decl));
-  const Type *SBP= Type::getInt8PtrTy(Context);
+  Type *SBP= Type::getInt8PtrTy(Context);
   file = Builder.getFolder().CreateBitCast(file, SBP);
 
   // There may be multiple annotate attributes. Pass return of lookup_attr
@@ -1765,7 +1765,7 @@
       // Assert its a string, and then get that string.
       assert(TREE_CODE(val) == STRING_CST &&
              "Annotate attribute arg should always be a string");
-      const Type *SBP = Type::getInt8PtrTy(Context);
+      Type *SBP = Type::getInt8PtrTy(Context);
       Constant *strGV = TreeConstantToLLVM::EmitLV_STRING_CST(val);
       Value *Ops[4] = {
         BitCastToType(V, SBP),
@@ -1830,7 +1830,7 @@
   if (isGimpleTemporary(decl))
     return;
 
-  const Type *Ty;  // Type to allocate
+  Type *Ty;  // Type to allocate
   Value *Size = 0; // Amount to alloca (null for 1)
 
   if (DECL_SIZE(decl) == 0) {    // Variable with incomplete type.
@@ -1923,7 +1923,7 @@
     {
       // We should null out local variables so that a stack crawl
       // before initialization doesn't get garbage results to follow.
-      const Type *T = cast<PointerType>(AI->getType())->getElementType();
+      Type *T = cast<PointerType>(AI->getType())->getElementType();
       EmitTypeGcroot(AI, decl);
       Builder.CreateStore(Constant::getNullValue(T), AI);
     }
@@ -2517,7 +2517,7 @@
 
   LValue LV = EmitLV(exp);
   bool isVolatile = TREE_THIS_VOLATILE(exp);
-  const Type *Ty = ConvertType(TREE_TYPE(exp));
+  Type *Ty = ConvertType(TREE_TYPE(exp));
   if (LLVM_IS_DECL_MMX_REGISTER(exp))
     Ty = Type::getX86_MMXTy(Context);
   unsigned Alignment = LV.getAlignment();
@@ -2543,7 +2543,7 @@
     if (!LV.BitSize)
       return Constant::getNullValue(Ty);
 
-    const Type *ValTy = cast<PointerType>(LV.Ptr->getType())->getElementType();
+    Type *ValTy = cast<PointerType>(LV.Ptr->getType())->getElementType();
     unsigned ValSizeInBits = ValTy->getPrimitiveSizeInBits();
 
     // The number of loads needed to read the entire bitfield.
@@ -2650,10 +2650,9 @@
   CallingConv::ID CallingConv;
   AttrListPtr PAL;
 
-  const Type *Ty = TheTypeConverter->ConvertFunctionType(function_type,
-                                                         fndecl,
-                                                         TREE_OPERAND(exp, 2),
-                                                         CallingConv, PAL);
+  Type *Ty = TheTypeConverter->ConvertFunctionType(function_type, fndecl,
+                                                   TREE_OPERAND(exp, 2),
+                                                   CallingConv, PAL);
 
   // If this is a direct call to a function using a static chain then we need
   // to ensure the function type is the one just calculated: it has an extra
@@ -2712,7 +2711,7 @@
 
 /// llvm_load_scalar_argument - Load value located at LOC.
 static Value *llvm_load_scalar_argument(Value *L,
-                                        const llvm::Type *LLVMTy,
+                                        Type *LLVMTy,
                                         unsigned RealSize,
                                         LLVMBuilder &Builder) {
   if (!RealSize)
@@ -2721,7 +2720,7 @@
   // Not clear what this is supposed to do on big endian machines...
   assert(!BYTES_BIG_ENDIAN && "Unsupported case - please report");
   assert(LLVMTy->isIntegerTy() && "Expected an integer value!");
-  const Type *LoadType = IntegerType::get(Context, RealSize * 8);
+  Type *LoadType = IntegerType::get(Context, RealSize * 8);
   L = Builder.CreateBitCast(L, LoadType->getPointerTo());
   Value *Val = Builder.CreateLoad(L);
   if (LoadType->getPrimitiveSizeInBits() >= LLVMTy->getPrimitiveSizeInBits())
@@ -2794,7 +2793,7 @@
     }
 
     // Get the value of the current location (of type Ty).
-    Value *getValue(const Type *Ty) {
+    Value *getValue(Type *Ty) {
       assert(!LocStack.empty());
       Value *Loc = LocStack.back();
       if (Loc) {
@@ -2888,7 +2887,7 @@
       isShadowRet = true;
     }
 
-    void HandlePad(llvm::Type *LLVMTy) {
+    void HandlePad(Type *LLVMTy) {
       CallOperands.push_back(UndefValue::get(LLVMTy));
     }
 
@@ -2910,7 +2909,7 @@
 
     /// HandleScalarArgument - This is the primary callback that specifies an
     /// LLVM argument to pass.  It is only used for first class types.
-    void HandleScalarArgument(llvm::Type *LLVMTy, tree type,
+    void HandleScalarArgument(Type *LLVMTy, tree type,
                               unsigned RealSize = 0) {
       Value *Loc = NULL;
       if (RealSize) {
@@ -2921,7 +2920,7 @@
 
       // Perform any implicit type conversions.
       if (CallOperands.size() < FTy->getNumParams()) {
-        const Type *CalledTy= FTy->getParamType(CallOperands.size());
+        Type *CalledTy= FTy->getParamType(CallOperands.size());
         if (Loc->getType() != CalledTy) {
           assert(type && "Inconsistent parameter types?");
           bool isSigned = !TYPE_UNSIGNED(type);
@@ -2935,7 +2934,7 @@
     /// HandleByInvisibleReferenceArgument - This callback is invoked if a
     /// pointer (of type PtrTy) to the argument is passed rather than the
     /// argument itself.
-    void HandleByInvisibleReferenceArgument(llvm::Type *PtrTy, tree type){
+    void HandleByInvisibleReferenceArgument(Type *PtrTy, tree type){
       Value *Loc = getAddress();
       Loc = Builder.CreateBitCast(Loc, PtrTy);
       CallOperands.push_back(Loc);
@@ -2944,7 +2943,7 @@
     /// HandleByValArgument - This callback is invoked if the aggregate function
     /// argument is passed by value. It is lowered to a parameter passed by
     /// reference with an additional parameter attribute "ByVal".
-    void HandleByValArgument(llvm::Type *LLVMTy, tree type) {
+    void HandleByValArgument(Type *LLVMTy, tree type) {
       Value *Loc = getAddress();
       assert(LLVMTy->getPointerTo() == Loc->getType());
       CallOperands.push_back(Loc);
@@ -2952,7 +2951,7 @@
 
     /// HandleFCAArgument - This callback is invoked if the aggregate function
     /// argument is passed as a first class aggregate.
-    void HandleFCAArgument(llvm::Type *LLVMTy, tree /*type*/) {
+    void HandleFCAArgument(Type *LLVMTy, tree /*type*/) {
       Value *Loc = getAddress();
       assert(LLVMTy->getPointerTo() == Loc->getType());
       CallOperands.push_back(Builder.CreateLoad(Loc));
@@ -2961,7 +2960,7 @@
     /// EnterField - Called when we're about the enter the field of a struct
     /// or union.  FieldNo is the number of the element we are entering in the
     /// LLVM Struct, StructTy is the LLVM type of the struct we are entering.
-    void EnterField(unsigned FieldNo, llvm::Type *StructTy) {
+    void EnterField(unsigned FieldNo, Type *StructTy) {
       Value *Loc = getAddress();
       Loc = Builder.CreateBitCast(Loc, StructTy->getPointerTo());
       pushAddress(Builder.CreateStructGEP(Loc, FieldNo, "elt"));
@@ -3288,7 +3287,7 @@
       // There's a side-effect; alloc a temporary to receive the
       // value, if any.  Do not store into lhs; we must not
       // reference it.
-      const Type *RHSTy = ConvertType(TREE_TYPE(rhs));
+      Type *RHSTy = ConvertType(TREE_TYPE(rhs));
       MemRef dest = CreateTempLoc(RHSTy);
       return Emit(rhs, &dest);
     } else
@@ -3306,7 +3305,7 @@
       return EmitMODIFY_EXPR(exp, DestLoc);
     }
     Value *RHS = Emit(rhs, 0);
-    const Type *LHSTy = ConvertType(TREE_TYPE(lhs));
+    Type *LHSTy = ConvertType(TREE_TYPE(lhs));
     // The value may need to be replaced later if this temporary is multiply
     // defined - ensure it can be uniquely identified by not folding the cast.
     Instruction::CastOps opc = CastInst::getCastOpcode(RHS, RHSSigned,
@@ -3333,7 +3332,7 @@
   unsigned Alignment = LV.getAlignment();
 
   if (!LV.isBitfield()) {
-    const Type *ValTy = ConvertType(TREE_TYPE(rhs));
+    Type *ValTy = ConvertType(TREE_TYPE(rhs));
     if (ValTy->isSingleValueType()) {
       // Non-bitfield, scalar value.  Just emit a store.
       Value *RHS = Emit(rhs, 0);
@@ -3370,7 +3369,7 @@
   if (!LV.BitSize)
     return RHS;
 
-  const Type *ValTy = cast<PointerType>(LV.Ptr->getType())->getElementType();
+  Type *ValTy = cast<PointerType>(LV.Ptr->getType())->getElementType();
   unsigned ValSizeInBits = ValTy->getPrimitiveSizeInBits();
 
   // The number of stores needed to write the entire bitfield.
@@ -3469,7 +3468,7 @@
       TREE_CODE(TREE_OPERAND(exp, 0)) == INTEGER_CST)
     return 0;
   tree Op = TREE_OPERAND(exp, 0);
-  const Type *Ty = ConvertType(TREE_TYPE(exp));
+  Type *Ty = ConvertType(TREE_TYPE(exp));
   bool OpIsSigned = !TYPE_UNSIGNED(TREE_TYPE(Op));
   bool ExpIsSigned = !TYPE_UNSIGNED(TREE_TYPE(exp));
   if (DestLoc == 0) {
@@ -3528,7 +3527,7 @@
       Target = CreateTempLoc(ConvertType(TREE_TYPE(exp)));
 
     // Make the destination look like the source type.
-    const Type *OpTy = ConvertType(TREE_TYPE(Op));
+    Type *OpTy = ConvertType(TREE_TYPE(Op));
     Target.Ptr = BitCastToType(Target.Ptr, OpTy->getPointerTo());
 
     // Needs to be in sync with EmitLV.
@@ -3567,7 +3566,7 @@
       return 0;
 
     // Target holds the temporary created above.
-    const Type *ExpTy = ConvertType(TREE_TYPE(exp));
+    Type *ExpTy = ConvertType(TREE_TYPE(exp));
     return Builder.CreateLoad(BitCastToType(Target.Ptr,
                                           ExpTy->getPointerTo()));
   }
@@ -3586,7 +3585,7 @@
   // Otherwise, this is a scalar to scalar conversion.
   Value *OpVal = Emit(Op, 0);
   assert(OpVal && "Expected a scalar result!");
-  const Type *DestTy = ConvertType(TREE_TYPE(exp));
+  Type *DestTy = ConvertType(TREE_TYPE(exp));
 
   // If the source is a pointer, use ptrtoint to get it to something
   // bitcast'able.  This supports things like v_c_e(foo*, float).
@@ -3623,7 +3622,7 @@
   }
 
   // Emit the operand to a temporary.
-  const Type *ComplexTy =
+  Type *ComplexTy =
     cast<PointerType>(DestLoc->Ptr->getType())->getElementType();
   MemRef Tmp = CreateTempLoc(ComplexTy);
   Emit(TREE_OPERAND(exp, 0), &Tmp);
@@ -3645,7 +3644,7 @@
 Value *TreeToLLVM::EmitCONJ_EXPR(tree exp, const MemRef *DestLoc) {
   assert(DestLoc && "CONJ_EXPR only applies to complex numbers.");
   // Emit the operand to a temporary.
-  const Type *ComplexTy =
+  Type *ComplexTy =
     cast<PointerType>(DestLoc->Ptr->getType())->getElementType();
   MemRef Tmp = CreateTempLoc(ComplexTy);
   Emit(TREE_OPERAND(exp, 0), &Tmp);
@@ -3715,10 +3714,10 @@
 /// getSuitableBitCastIntType - Given Ty is a floating point type or a vector
 /// type with floating point elements, return an integer type to bitcast to.
 /// e.g. 4 x float -> 4 x i32
-static const Type *getSuitableBitCastIntType(const Type *Ty) {
+static Type *getSuitableBitCastIntType(Type *Ty) {
   if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
     unsigned NumElements = VTy->getNumElements();
-    const Type *EltTy = VTy->getElementType();
+    Type *EltTy = VTy->getElementType();
     return VectorType::get(
       IntegerType::get(Context, EltTy->getPrimitiveSizeInBits()), NumElements);
   }
@@ -3727,7 +3726,7 @@
 
 Value *TreeToLLVM::EmitBIT_NOT_EXPR(tree exp) {
   Value *Op = Emit(TREE_OPERAND(exp, 0), 0);
-  const Type *Ty = Op->getType();
+  Type *Ty = Op->getType();
   if (Ty->isPointerTy()) {
     assert (TREE_CODE(TREE_TYPE(exp)) == INTEGER_TYPE &&
             "Expected integer type here");
@@ -3760,7 +3759,7 @@
 /// integer type.  Otherwise, return the expression as whatever TREE_TYPE(exp)
 /// corresponds to.
 Value *TreeToLLVM::EmitCompare(tree exp, unsigned UIOpc, unsigned SIOpc,
-                               unsigned FPPred, const Type *DestTy) {
+                               unsigned FPPred, Type *DestTy) {
   // Get the type of the operands
   tree lhsty = TREE_TYPE(TREE_OPERAND(exp, 0));
   tree rhsty = TREE_TYPE(TREE_OPERAND(exp, 1));
@@ -3817,7 +3816,7 @@
 ///
 Value *TreeToLLVM::EmitBinOp(tree exp, const MemRef *DestLoc, unsigned Opc) {
   tree expty = TREE_TYPE(exp);
-  const Type *Ty = ConvertType(expty);
+  Type *Ty = ConvertType(expty);
   if (Ty->isPointerTy())
     return EmitPtrBinOp(exp, Opc);   // Pointer arithmetic!
   if (Ty->isStructTy())
@@ -3854,7 +3853,7 @@
   // integer types first.
   bool isLogicalOp = Opc == Instruction::And || Opc == Instruction::Or ||
     Opc == Instruction::Xor;
-  const Type *ResTy = Ty;
+  Type *ResTy = Ty;
   if (isLogicalOp &&
       (Ty->isFloatingPointTy() ||
        (Ty->isVectorTy() &&
@@ -3901,7 +3900,7 @@
       Offset = (Offset << 32) >> 32;
 
     // Figure out how large the element pointed to is.
-    const Type *ElTy = cast<PointerType>(LHS->getType())->getElementType();
+    Type *ElTy = cast<PointerType>(LHS->getType())->getElementType();
     // We can't get the type size (and thus convert to using a GEP instr) from
     // pointers to opaque structs if the type isn't abstract.
     if (ElTy->isSized()) {
@@ -3926,7 +3925,7 @@
 
   Value *RHS = Emit(TREE_OPERAND(exp, 1), 0);
 
-  const Type *IntPtrTy = TD.getIntPtrType(Context);
+  Type *IntPtrTy = TD.getIntPtrType(Context);
   bool LHSIsSigned = !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp, 0)));
   bool RHSIsSigned = !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp, 1)));
   LHS = CastToAnyType(LHS, LHSIsSigned, IntPtrTy, false);
@@ -3956,7 +3955,7 @@
 
 Value *TreeToLLVM::EmitShiftOp(tree exp, const MemRef *DestLoc, unsigned Opc) {
   assert(DestLoc == 0 && "aggregate shift?");
-  const Type *Ty = ConvertType(TREE_TYPE(exp));
+  Type *Ty = ConvertType(TREE_TYPE(exp));
   assert(!Ty->isPointerTy() && "Pointer arithmetic!?");
 
   Value *LHS = Emit(TREE_OPERAND(exp, 0), 0);
@@ -3973,7 +3972,7 @@
   Value *Amt = Emit(TREE_OPERAND(exp, 1), 0);
 
   if (In->getType()->isPointerTy()) {
-    const Type *Ty =
+    Type *Ty =
       IntegerType::get(Context,
                        TYPE_PRECISION(TREE_TYPE (TREE_OPERAND (exp, 0))));
     In = Builder.CreatePtrToInt(In, Ty,
@@ -4003,7 +4002,7 @@
   Value *LHS = Emit(TREE_OPERAND(exp, 0), 0);
   Value *RHS = Emit(TREE_OPERAND(exp, 1), 0);
 
-  const Type *Ty = ConvertType(TREE_TYPE(exp));
+  Type *Ty = ConvertType(TREE_TYPE(exp));
 
   // The LHS, RHS and Ty could be integer, floating or pointer typed. We need
   // to convert the LHS and RHS into the destination type before doing the
@@ -4049,7 +4048,7 @@
     // LHS and RHS values must have the same sign if their type is unsigned.
     return EmitBinOp(exp, DestLoc, Instruction::URem);
 
-  const Type *Ty = ConvertType(TREE_TYPE(exp));
+  Type *Ty = ConvertType(TREE_TYPE(exp));
   Constant *Zero = ConstantInt::get(Ty, 0);
 
   Value *LHS = Emit(TREE_OPERAND(exp, 0), 0);
@@ -4080,7 +4079,7 @@
   //   LHS CDiv RHS = (LHS - Sign(RHS)) Div RHS + 1
   // otherwise.
 
-  const Type *Ty = ConvertType(TREE_TYPE(exp));
+  Type *Ty = ConvertType(TREE_TYPE(exp));
   Constant *Zero = ConstantInt::get(Ty, 0);
   Constant *One = ConstantInt::get(Ty, 1);
   Constant *MinusOne = Constant::getAllOnesValue(Ty);
@@ -4152,7 +4151,7 @@
     // same sign, so FDiv is the same as Div.
     return Builder.CreateUDiv(LHS, RHS, "fdiv");
 
-  const Type *Ty = ConvertType(TREE_TYPE(exp));
+  Type *Ty = ConvertType(TREE_TYPE(exp));
   Constant *Zero = ConstantInt::get(Ty, 0);
   Constant *One = ConstantInt::get(Ty, 1);
   Constant *MinusOne = Constant::getAllOnesValue(Ty);
@@ -4198,7 +4197,7 @@
   // required to ensure correct results.  The details depend on whether
   // we are doing signed or unsigned arithmetic.
 
-  const Type *Ty = ConvertType(TREE_TYPE(exp));
+  Type *Ty = ConvertType(TREE_TYPE(exp));
   Constant *Zero = ConstantInt::get(Ty, 0);
   Constant *Two = ConstantInt::get(Ty, 2);
 
@@ -4340,7 +4339,7 @@
  /// asm node that copies the value out of the specified register.
 Value *TreeToLLVM::EmitReadOfRegisterVariable(tree decl,
                                               const MemRef *DestLoc) {
-  const Type *Ty = ConvertType(TREE_TYPE(decl));
+  Type *Ty = ConvertType(TREE_TYPE(decl));
   if (LLVM_IS_DECL_MMX_REGISTER(decl))
     Ty = Type::getX86_MMXTy(Context);
 
@@ -4972,13 +4971,13 @@
     }
     bool isIndirect = false;
     if (AllowsReg || !AllowsMem) {    // Register operand.
-      const Type *LLVMTy = ConvertType(type);
+      Type *LLVMTy = ConvertType(type);
 
       Value *Op = 0;
       if (LLVM_IS_DECL_MMX_REGISTER(Val))
         LLVMTy = Type::getX86_MMXTy(Context);
-      const Type *OpTy = LLVMTy;
-      const StructType *STy = dyn_cast<StructType>(OpTy);
+      Type *OpTy = LLVMTy;
+      StructType *STy = dyn_cast<StructType>(OpTy);
       if (LLVMTy->isSingleValueType() ||
           (STy && STy->getNumElements() == 1)) {
         if (TREE_CODE(Val)==ADDR_EXPR &&
@@ -5023,7 +5022,7 @@
         unsigned Match = atoi(Constraint);
         // This output might have gotten put in either CallResult or CallArg
         // depending whether it's a register or not.  Find its type.
-        const Type *OTy = 0;
+        Type *OTy = 0;
         if (Match < OutputLocations.size()) {
           // Indices here known to be within range.
           if (OutputLocations[Match].first)
@@ -5150,7 +5149,7 @@
     }
   }
 
-  const Type *CallResultType;
+  Type *CallResultType;
   switch (CallResultTypes.size()) {
   case 0: CallResultType = Type::getVoidTy(Context); break;
   case 1: CallResultType = CallResultTypes[0]; break;
@@ -5159,7 +5158,7 @@
     break;
   }
 
-  const FunctionType *FTy =
+  FunctionType *FTy =
     FunctionType::get(CallResultType, CallArgTypes, false);
 
   // Remove the leading comma if we have operands.
@@ -5183,14 +5182,14 @@
   // If the call produces a value, store it into the destination.
   if (StoreCallResultAddrs.size() == 1) {
     Value *V = CV;
-    const Type *DestValTy =
+    Type *DestValTy =
       cast<PointerType>(StoreCallResultAddrs[0]->getType())->getElementType();
     if (CV->getType() != DestValTy)
       V = BitCastToType(CV, DestValTy);
     Builder.CreateStore(V, StoreCallResultAddrs[0]);
   } else if (unsigned NumResults = StoreCallResultAddrs.size()) {
     for (unsigned i = 0; i != NumResults; ++i) {
-      const Type *DestValTy =
+      Type *DestValTy =
         cast<PointerType>(StoreCallResultAddrs[i]->getType())->getElementType();
       Value *ValI = Builder.CreateExtractValue(CV, i, "asmresult");
       if (ValI->getType() != DestValTy)
@@ -5460,7 +5459,7 @@
 
         error("%Hunsupported target builtin %<%s%> used", &EXPR_LOCATION(exp),
               BuiltinName);
-        const Type *ResTy = ConvertType(TREE_TYPE(exp));
+        Type *ResTy = ConvertType(TREE_TYPE(exp));
         if (ResTy->isSingleValueType())
           Result = UndefValue::get(ResTy);
         return true;
@@ -5586,7 +5585,7 @@
   case BUILT_IN_CLZLL: {
     Value *Amt = Emit(TREE_VALUE(TREE_OPERAND(exp, 1)), 0);
     EmitBuiltinUnaryOp(Amt, Result, Intrinsic::ctlz);
-    const Type *DestTy = ConvertType(TREE_TYPE(exp));
+    Type *DestTy = ConvertType(TREE_TYPE(exp));
     Result = Builder.CreateIntCast(Result, DestTy, 
                                    !TYPE_UNSIGNED(TREE_TYPE(exp)),
                                    "cast");
@@ -5597,7 +5596,7 @@
   case BUILT_IN_CTZLL: {
     Value *Amt = Emit(TREE_VALUE(TREE_OPERAND(exp, 1)), 0);
     EmitBuiltinUnaryOp(Amt, Result, Intrinsic::cttz);
-    const Type *DestTy = ConvertType(TREE_TYPE(exp));
+    Type *DestTy = ConvertType(TREE_TYPE(exp));
     Result = Builder.CreateIntCast(Result, DestTy,
                                    !TYPE_UNSIGNED(TREE_TYPE(exp)),
                                    "cast");
@@ -5610,7 +5609,7 @@
     EmitBuiltinUnaryOp(Amt, Result, Intrinsic::ctpop);
     Result = Builder.CreateBinOp(Instruction::And, Result,
                                  ConstantInt::get(Result->getType(), 1));
-    const Type *DestTy = ConvertType(TREE_TYPE(exp));
+    Type *DestTy = ConvertType(TREE_TYPE(exp));
     Result = Builder.CreateIntCast(Result, DestTy,
                                    !TYPE_UNSIGNED(TREE_TYPE(exp)),
                                    "cast");
@@ -5621,7 +5620,7 @@
   case BUILT_IN_POPCOUNTLL: {
     Value *Amt = Emit(TREE_VALUE(TREE_OPERAND(exp, 1)), 0);
     EmitBuiltinUnaryOp(Amt, Result, Intrinsic::ctpop);
-    const Type *DestTy = ConvertType(TREE_TYPE(exp));
+    Type *DestTy = ConvertType(TREE_TYPE(exp));
     Result = Builder.CreateIntCast(Result, DestTy,
                                    !TYPE_UNSIGNED(TREE_TYPE(exp)),
                                    "cast");
@@ -5631,7 +5630,7 @@
   case BUILT_IN_BSWAP64: {
     Value *Amt = Emit(TREE_VALUE(TREE_OPERAND(exp, 1)), 0);
     EmitBuiltinUnaryOp(Amt, Result, Intrinsic::bswap);
-    const Type *DestTy = ConvertType(TREE_TYPE(exp));
+    Type *DestTy = ConvertType(TREE_TYPE(exp));
     Result = Builder.CreateIntCast(Result, DestTy,
                                    !TYPE_UNSIGNED(TREE_TYPE(exp)),
                                    "cast");
@@ -5753,7 +5752,7 @@
     location_t locus = EXPR_LOCATION (exp);
     Constant *lineNo = ConstantInt::get(Type::getInt32Ty(Context), locus.line);
     Constant *file = ConvertMetadataStringToGV(locus.file);
-    const Type *SBP= Type::getInt8PtrTy(Context);
+    Type *SBP= Type::getInt8PtrTy(Context);
     file = Builder.getFolder().CreateBitCast(file, SBP);
 
     // Get arguments.
@@ -6268,7 +6267,7 @@
 
     // FIXME: HACK: Just ignore these.
     {
-      const Type *Ty = ConvertType(TREE_TYPE(exp));
+      Type *Ty = ConvertType(TREE_TYPE(exp));
       if (Ty != Type::getVoidTy(Context))
         Result = Constant::getNullValue(Ty);
       return true;
@@ -6663,7 +6662,7 @@
   if (!validate_arglist(arglist, INTEGER_TYPE, POINTER_TYPE, VOID_TYPE))
     return false;
 
-  const Type *IntPtr = TD.getIntPtrType(Context);
+  Type *IntPtr = TD.getIntPtrType(Context);
   Value *Offset = Emit(TREE_VALUE(arglist), 0);
   Value *Handler = Emit(TREE_VALUE(TREE_CHAIN(arglist)), 0);
 
@@ -6857,7 +6856,7 @@
     Arg2 = Emit(Arg2T, 0);
   }
 
-  static const Type *VPTy = Type::getInt8PtrTy(Context);
+  static Type *VPTy = Type::getInt8PtrTy(Context);
 
   // FIXME: This ignores alignment and volatility of the arguments.
   SmallVector<Value *, 2> Args;
@@ -6875,7 +6874,7 @@
                          VOID_TYPE))
     return false;
 
-  static const Type *VPTy = Type::getInt8PtrTy(Context);
+  static Type *VPTy = Type::getInt8PtrTy(Context);
 
   Value *Tramp = Emit(TREE_VALUE(arglist), 0);
   Tramp = BitCastToType(Tramp, VPTy);
@@ -6942,7 +6941,7 @@
 // EmitComplexBinOp - Note that this operates on binops like ==/!=, which return
 // a bool, not a complex value.
 Value *TreeToLLVM::EmitComplexBinOp(tree exp, const MemRef *DestLoc) {
-  const Type *ComplexTy = ConvertType(TREE_TYPE(TREE_OPERAND(exp, 0)));
+  Type *ComplexTy = ConvertType(TREE_TYPE(TREE_OPERAND(exp, 0)));
 
   MemRef LHSTmp = CreateTempLoc(ComplexTy);
   MemRef RHSTmp = CreateTempLoc(ComplexTy);
@@ -7115,7 +7114,7 @@
         File,  LineNo
       };
 
-      const Type* FieldPtrType = FieldPtr->getType();
+      Type* FieldPtrType = FieldPtr->getType();
       FieldPtr = Builder.CreateCall(Fn, Ops);
       FieldPtr = BitCastToType(FieldPtr, FieldPtrType);
     }
@@ -7169,7 +7168,7 @@
 
   Value *IndexVal = Emit(Index, 0);
 
-  const Type *IntPtrTy = getTargetData().getIntPtrType(Context);
+  Type *IntPtrTy = getTargetData().getIntPtrType(Context);
   if (TYPE_UNSIGNED(IndexType)) // if the index is unsigned
     // ZExt it to retain its value in the larger type
     IndexVal = CastToUIntType(IndexVal, IntPtrTy);
@@ -7187,7 +7186,7 @@
       Builder.CreateGEP(ArrayAddr, Idx.begin(), Idx.end()) :
       Builder.CreateInBoundsGEP(ArrayAddr, Idx.begin(), Idx.end());
 
-    const Type *ElementTy = ConvertType(ElementType);
+    Type *ElementTy = ConvertType(ElementType);
     unsigned Alignment = MinAlign(ArrayAlign, TD.getABITypeAlignment(ElementTy));
     return LValue(BitCastToType(Ptr,
                                 ConvertType(TREE_TYPE(exp))->getPointerTo()),
@@ -7227,7 +7226,7 @@
 
   unsigned BitStart = (unsigned)TREE_INT_CST_LOW(TREE_OPERAND(exp, 2));
   unsigned BitSize = (unsigned)TREE_INT_CST_LOW(TREE_OPERAND(exp, 1));
-  const Type *ValTy = ConvertType(TREE_TYPE(exp));
+  Type *ValTy = ConvertType(TREE_TYPE(exp));
 
   unsigned ValueSizeInBits = TD.getTypeSizeInBits(ValTy);
   assert(BitSize <= ValueSizeInBits &&
@@ -7273,14 +7272,14 @@
   // are laid out.  Note that we convert to the context of the Field, not to the
   // type of Operand #0, because GCC doesn't always have the field match up with
   // operand #0's type.
-  const Type *StructTy = ConvertType(DECL_CONTEXT(FieldDecl));
+  Type *StructTy = ConvertType(DECL_CONTEXT(FieldDecl));
 
   assert((!StructAddrLV.isBitfield() ||
           StructAddrLV.BitStart == 0) && "structs cannot be bitfields!");
 
   StructAddrLV.Ptr = BitCastToType(StructAddrLV.Ptr,
                                    StructTy->getPointerTo());
-  const Type *FieldTy = ConvertType(getDeclaredType(FieldDecl));
+  Type *FieldTy = ConvertType(getDeclaredType(FieldDecl));
 
   // BitStart - This is the actual offset of the field from the start of the
   // struct, in bits.  For bitfields this may be on a non-byte boundary.
@@ -7374,7 +7373,7 @@
            "Variable sized bitfield?");
     unsigned BitfieldSize = TREE_INT_CST_LOW(DECL_SIZE(FieldDecl));
 
-    const Type *LLVMFieldTy =
+    Type *LLVMFieldTy =
       cast<PointerType>(FieldPtr->getType())->getElementType();
 
     // 'piecemeal' will be true if the fetch-type we wish to use will
@@ -7481,7 +7480,7 @@
     
   } else {
     // Make sure we return a pointer to the right type.
-    const Type *EltTy = ConvertType(TREE_TYPE(exp));
+    Type *EltTy = ConvertType(TREE_TYPE(exp));
     FieldPtr = BitCastToType(FieldPtr, EltTy->getPointerTo());
   }
 
@@ -7522,8 +7521,8 @@
   Value *Decl = DECL_LLVM(exp);
   if (Decl == 0) {
     if (errorcount || sorrycount) {
-      const Type *Ty = ConvertType(TREE_TYPE(exp));
-      const PointerType *PTy = Ty->getPointerTo();
+      Type *Ty = ConvertType(TREE_TYPE(exp));
+      PointerType *PTy = Ty->getPointerTo();
       LValue LV(ConstantPointerNull::get(PTy), 1);
       return LV;
     }
@@ -7557,13 +7556,13 @@
     }
   }
 
-  const Type *Ty = ConvertType(TREE_TYPE(exp));
+  Type *Ty = ConvertType(TREE_TYPE(exp));
   // If we have "extern void foo", make the global have type {} instead of
   // type void.
   if (Ty->isVoidTy()) Ty = StructType::get(Context);
   if (LLVM_IS_DECL_MMX_REGISTER(exp))
     Ty = Type::getX86_MMXTy(Context);
-  const PointerType *PTy = Ty->getPointerTo();
+  PointerType *PTy = Ty->getPointerTo();
   unsigned Alignment = Ty->isSized() ? TD.getABITypeAlignment(Ty) : 1;
   if (DECL_ALIGN(exp)) {
     if (DECL_USER_ALIGN(exp) || 8 * Alignment < (unsigned)DECL_ALIGN(exp))
@@ -7658,7 +7657,7 @@
 /// DestLoc.
 Value *TreeToLLVM::EmitCONSTRUCTOR(tree exp, const MemRef *DestLoc) {
   tree type = TREE_TYPE(exp);
-  const Type *Ty = ConvertType(type);
+  Type *Ty = ConvertType(type);
   if (const VectorType *PTy = dyn_cast<VectorType>(Ty)) {
     assert(DestLoc == 0 && "Dest location for packed value?");
 
@@ -7759,7 +7758,7 @@
 }
 
 Constant *TreeConstantToLLVM::ConvertINTEGER_CST(tree exp) {
-  const Type *Ty = ConvertType(TREE_TYPE(exp));
+  Type *Ty = ConvertType(TREE_TYPE(exp));
 
   // Handle i128 specially.
   if (const IntegerType *IT = dyn_cast<IntegerType>(Ty)) {
@@ -7784,7 +7783,7 @@
 }
 
 Constant *TreeConstantToLLVM::ConvertREAL_CST(tree exp) {
-  const Type *Ty = ConvertType(TREE_TYPE(exp));
+  Type *Ty = ConvertType(TREE_TYPE(exp));
   assert((Ty->isFloatingPointTy() ||
 	  Ty->isIntegerTy(16)) && "Integer REAL_CST?");
   long RealArr[2];
@@ -7869,8 +7868,8 @@
 }
 
 Constant *TreeConstantToLLVM::ConvertSTRING_CST(tree exp) {
-  const ArrayType *StrTy = cast<ArrayType>(ConvertType(TREE_TYPE(exp)));
-  const Type *ElTy = StrTy->getElementType();
+  ArrayType *StrTy = cast<ArrayType>(ConvertType(TREE_TYPE(exp)));
+  Type *ElTy = StrTy->getElementType();
 
   unsigned Len = (unsigned)TREE_STRING_LENGTH(exp);
 
@@ -7948,7 +7947,7 @@
 
 Constant *TreeConstantToLLVM::ConvertNOP_EXPR(tree exp) {
   Constant *Elt = Convert(TREE_OPERAND(exp, 0));
-  const Type *Ty = ConvertType(TREE_TYPE(exp));
+  Type *Ty = ConvertType(TREE_TYPE(exp));
   bool EltIsSigned = !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp, 0)));
   bool TyIsSigned = !TYPE_UNSIGNED(TREE_TYPE(exp));
 
@@ -7965,7 +7964,7 @@
 Constant *TreeConstantToLLVM::ConvertCONVERT_EXPR(tree exp) {
   Constant *Elt = Convert(TREE_OPERAND(exp, 0));
   bool EltIsSigned = !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp, 0)));
-  const Type *Ty = ConvertType(TREE_TYPE(exp));
+  Type *Ty = ConvertType(TREE_TYPE(exp));
   bool TyIsSigned = !TYPE_UNSIGNED(TREE_TYPE(exp));
   Instruction::CastOps opcode = CastInst::getCastOpcode(Elt, EltIsSigned, Ty,
                                                         TyIsSigned);
@@ -7979,7 +7978,7 @@
   bool RHSIsSigned = !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp,1)));
   Instruction::CastOps opcode;
   if (LHS->getType()->isPointerTy()) {
-    const Type *IntPtrTy = getTargetData().getIntPtrType(Context);
+    Type *IntPtrTy = getTargetData().getIntPtrType(Context);
     opcode = CastInst::getCastOpcode(LHS, LHSIsSigned, IntPtrTy, false);
     LHS = TheFolder->CreateCast(opcode, LHS, IntPtrTy);
     opcode = CastInst::getCastOpcode(RHS, RHSIsSigned, IntPtrTy, false);
@@ -7993,7 +7992,7 @@
   case MINUS_EXPR:  Result = TheFolder->CreateSub(LHS, RHS); break;
   }
 
-  const Type *Ty = ConvertType(TREE_TYPE(exp));
+  Type *Ty = ConvertType(TREE_TYPE(exp));
   bool TyIsSigned = !TYPE_UNSIGNED(TREE_TYPE(exp));
   opcode = CastInst::getCastOpcode(Result, LHSIsSigned, Ty, TyIsSigned);
   return TheFolder->CreateCast(opcode, Result, Ty);
@@ -8111,7 +8110,7 @@
   //   #2. If any of the elements have different types, return a struct instead
   //       of an array.  This can occur in cases where we have an array of
   //       unions, and the various unions had different pieces init'd.
-  const Type *ElTy = SomeVal->getType();
+  Type *ElTy = SomeVal->getType();
   Constant *Filler = Constant::getNullValue(ElTy);
   bool AllEltsSameType = true;
   for (unsigned i = 0, e = ResultElts.size(); i != e; ++i) {
@@ -8194,7 +8193,7 @@
     }
 
     // Otherwise, there is padding here.  Insert explicit zeros.
-    const Type *PadTy = Type::getInt8Ty(Context);
+    Type *PadTy = Type::getInt8Ty(Context);
     if (AlignedEltOffs-EltOffs != 1)
       PadTy = ArrayType::get(PadTy, AlignedEltOffs-EltOffs);
     ResultElts.insert(ResultElts.begin()+i,
@@ -8278,7 +8277,7 @@
     // Insert enough padding to fully fill in the hole.  Insert padding from
     // NextFieldByteStart (not LLVMNaturalByteOffset) because the padding will
     // not get the same alignment as "Val".
-    const Type *FillTy = Type::getInt8Ty(Context);
+    Type *FillTy = Type::getInt8Ty(Context);
     if (GCCFieldOffsetInBits/8-NextFieldByteStart != 1)
       FillTy = ArrayType::get(FillTy,
                               GCCFieldOffsetInBits/8-NextFieldByteStart);
@@ -8461,7 +8460,7 @@
 
   // If the LLVM Size is too small, add some tail padding to fill it in.
   if (LLVMNaturalSize < GCCStructSize) {
-    const Type *FillTy = Type::getInt8Ty(Context);
+    Type *FillTy = Type::getInt8Ty(Context);
     if (GCCStructSize - NextFieldByteStart != 1)
       FillTy = ArrayType::get(FillTy, GCCStructSize - NextFieldByteStart);
     ResultElts.push_back(Constant::getNullValue(FillTy));
@@ -8576,7 +8575,7 @@
                                              LayoutInfo.StructIsPacked);
 
   // This is a hack for brokenness in the objc frontend.
-  const StructType *LLVMTy = dyn_cast<StructType>(ConvertType(TREE_TYPE(exp)));
+  StructType *LLVMTy = dyn_cast<StructType>(ConvertType(TREE_TYPE(exp)));
   if (LLVMTy && !LLVMTy->isAnonymous() &&
       cast<StructType>(Result->getType())->isLayoutIdentical(LLVMTy))
     Result = ConstantStruct::get(LLVMTy, LayoutInfo.ResultElts);
@@ -8742,7 +8741,7 @@
   // For example if the global's initializer has a different type to the global
   // itself (allowed in GCC but not in LLVM) then the global is changed to have
   // the type of the initializer.  Correct for this now.
-  const Type *Ty = ConvertType(TREE_TYPE(exp));
+  Type *Ty = ConvertType(TREE_TYPE(exp));
   if (Ty->isVoidTy()) Ty = Type::getInt8Ty(Context);  // void* -> i8*.
 
   return TheFolder->CreateBitCast(Val, Ty->getPointerTo());
@@ -8857,7 +8856,7 @@
 
   Constant *IndexVal = Convert(Index);
 
-  const Type *IntPtrTy = getTargetData().getIntPtrType(Context);
+  Type *IntPtrTy = getTargetData().getIntPtrType(Context);
   if (IndexVal->getType() != IntPtrTy)
     IndexVal = TheFolder->CreateIntCast(IndexVal, IntPtrTy,
                                         !TYPE_UNSIGNED(IndexType));
@@ -8875,13 +8874,13 @@
 
   // Ensure that the struct type has been converted, so that the fielddecls
   // are laid out.
-  const Type *StructTy = ConvertType(TREE_TYPE(TREE_OPERAND(exp, 0)));
+  Type *StructTy = ConvertType(TREE_TYPE(TREE_OPERAND(exp, 0)));
 
   tree FieldDecl = TREE_OPERAND(exp, 1);
 
   StructAddrLV = TheFolder->CreateBitCast(StructAddrLV,
                                       StructTy->getPointerTo());
-  const Type *FieldTy = ConvertType(getDeclaredType(FieldDecl));
+  Type *FieldTy = ConvertType(getDeclaredType(FieldDecl));
 
   // BitStart - This is the actual offset of the field from the start of the
   // struct, in bits.  For bitfields this may be on a non-byte boundary.

Modified: llvm-gcc-4.2/trunk/gcc/llvm-internal.h
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/llvm-internal.h?rev=135372&r1=135371&r2=135372&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/llvm-internal.h (original)
+++ llvm-gcc-4.2/trunk/gcc/llvm-internal.h Sun Jul 17 23:44:09 2011
@@ -134,7 +134,7 @@
   /// GCCTypeOverlapsWithLLVMTypePadding - Return true if the specified GCC type
   /// has any data that overlaps with structure padding in the specified LLVM
   /// type.
-  static bool GCCTypeOverlapsWithLLVMTypePadding(tree_node *t, const Type *Ty);
+  static bool GCCTypeOverlapsWithLLVMTypePadding(tree_node *t, Type *Ty);
   
   
   /// ConvertFunctionType - Convert the specified FUNCTION_TYPE or METHOD_TYPE
@@ -149,11 +149,11 @@
   /// ConvertArgListToFnType - Given a DECL_ARGUMENTS list on an GCC tree,
   /// return the LLVM type corresponding to the function.  This is useful for
   /// turning "T foo(...)" functions into "T foo(void)" functions.
-  const FunctionType *ConvertArgListToFnType(tree_node *type,
-                                             tree_node *arglist,
-                                             tree_node *static_chain,
-                                             CallingConv::ID &CallingConv,
-                                             AttrListPtr &PAL);
+  FunctionType *ConvertArgListToFnType(tree_node *type,
+                                       tree_node *arglist,
+                                       tree_node *static_chain,
+                                       CallingConv::ID &CallingConv,
+                                       AttrListPtr &PAL);
   
 private:
   Type *ConvertRECORD(tree_node *type, tree_node *orig_type);
@@ -359,41 +359,41 @@
   
   /// CastToType - Cast the specified value to the specified type if it is
   /// not already that type.
-  Value *CastToType(unsigned opcode, Value *V, const Type *Ty);
+  Value *CastToType(unsigned opcode, Value *V, Type *Ty);
   Value *CastToType(unsigned opcode, Value *V, tree_node *type) {
     return CastToType(opcode, V, ConvertType(type));
   }
 
   /// CastToAnyType - Cast the specified value to the specified type regardless
   /// of the types involved. This is an inferred cast.
-  Value *CastToAnyType (Value *V, bool VSigned, const Type* Ty, bool TySigned);
+  Value *CastToAnyType (Value *V, bool VSigned, Type* Ty, bool TySigned);
 
   /// CastToUIntType - Cast the specified value to the specified type assuming
   /// that V's type and Ty are integral types. This arbitrates between BitCast,
   /// Trunc and ZExt.
-  Value *CastToUIntType(Value *V, const Type* Ty);
+  Value *CastToUIntType(Value *V, Type* Ty);
 
   /// CastToSIntType - Cast the specified value to the specified type assuming
   /// that V's type and Ty are integral types. This arbitrates between BitCast,
   /// Trunc and SExt.
-  Value *CastToSIntType(Value *V, const Type* Ty);
+  Value *CastToSIntType(Value *V, Type* Ty);
 
   /// CastToFPType - Cast the specified value to the specified type assuming
   /// that V's type and Ty are floating point types. This arbitrates between
   /// BitCast, FPTrunc and FPExt.
-  Value *CastToFPType(Value *V, const Type* Ty);
+  Value *CastToFPType(Value *V, Type* Ty);
 
   /// NOOPCastToType - Insert a BitCast from V to Ty if needed. This is just a
   /// convenience function for CastToType(Instruction::BitCast, V, Ty);
-  Value *BitCastToType(Value *V, const Type *Ty);
+  Value *BitCastToType(Value *V, Type *Ty);
 
   /// CreateTemporary - Create a new alloca instruction of the specified type,
   /// inserting it into the entry block and returning it.  The resulting
   /// instruction's type is a pointer to the specified type.
-  AllocaInst *CreateTemporary(const Type *Ty, unsigned align=0);
+  AllocaInst *CreateTemporary(Type *Ty, unsigned align=0);
 
   /// CreateTempLoc - Like CreateTemporary, but returns a MemRef.
-  MemRef CreateTempLoc(const Type *Ty);
+  MemRef CreateTempLoc(Type *Ty);
 
   /// EmitAggregateCopy - Copy the elements from SrcLoc to DestLoc, using the
   /// GCC type specified by GCCType to know which elements to copy.
@@ -476,7 +476,7 @@
 
   /// isNoopCast - Return true if a cast from V to Ty does not change any bits.
   ///
-  static bool isNoopCast(Value *V, const Type *Ty);
+  static bool isNoopCast(Value *V, Type *Ty);
 
   void HandleMultiplyDefinedGimpleTemporary(tree_node *var);
   
@@ -517,7 +517,7 @@
   Value *EmitTRUTH_NOT_EXPR(tree_node *exp);
   Value *EmitEXACT_DIV_EXPR(tree_node *exp, const MemRef *DestLoc);
   Value *EmitCompare(tree_node *exp, unsigned UIPred, unsigned SIPred, 
-                     unsigned FPPred, const Type *DestTy = 0);
+                     unsigned FPPred, Type *DestTy = 0);
   Value *EmitBinOp(tree_node *exp, const MemRef *DestLoc, unsigned Opc);
   Value *EmitPtrBinOp(tree_node *exp, unsigned Opc);
   Value *EmitTruthOp(tree_node *exp, unsigned Opc);

Modified: llvm-gcc-4.2/trunk/gcc/llvm-types.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/llvm-types.cpp?rev=135372&r1=135371&r2=135372&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/llvm-types.cpp (original)
+++ llvm-gcc-4.2/trunk/gcc/llvm-types.cpp Sun Jul 17 23:44:09 2011
@@ -140,7 +140,7 @@
   if (!V)
     return;
 
-  const StructType *STy = cast<StructType>(V->getType()->getElementType());
+  StructType *STy = cast<StructType>(V->getType()->getElementType());
 
   for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
     if (const PointerType *PTy = dyn_cast<PointerType>(STy->getElementType(i)))
@@ -320,9 +320,9 @@
 
 /// FindLLVMTypePadding - If the specified struct has any inter-element padding,
 /// add it to the Padding array.
-static void FindLLVMTypePadding(const Type *Ty, tree type, uint64_t BitOffset,
+static void FindLLVMTypePadding(Type *Ty, tree type, uint64_t BitOffset,
                        SmallVector<std::pair<uint64_t,uint64_t>, 16> &Padding) {
-  if (const StructType *STy = dyn_cast<StructType>(Ty)) {
+  if (StructType *STy = dyn_cast<StructType>(Ty)) {
     const TargetData &TD = getTargetData();
     const StructLayout *SL = TD.getStructLayout(STy);
     uint64_t PrevFieldEnd = 0;
@@ -470,7 +470,7 @@
 }
 
 bool TypeConverter::GCCTypeOverlapsWithLLVMTypePadding(tree type, 
-                                                       const Type *Ty) {
+                                                       Type *Ty) {
   
   // Start by finding all of the padding in the LLVM Type.
   SmallVector<std::pair<uint64_t,uint64_t>, 16> StructPadding;
@@ -601,7 +601,7 @@
       return Ty;
 
     uint64_t ElementSize;
-    const Type *ElementTy;
+    Type *ElementTy;
     if (isSequentialCompatible(type)) {
       // The gcc element type maps to an LLVM type of the same size.
       // Convert to an LLVM array of the converted element type.
@@ -793,7 +793,7 @@
 /// for the function.  This method takes the DECL_ARGUMENTS list (Args), and
 /// fills in Result with the argument types for the function.  It returns the
 /// specified result type for the function.
-const FunctionType *TypeConverter::
+FunctionType *TypeConverter::
 ConvertArgListToFnType(tree type, tree Args, tree static_chain,
                        CallingConv::ID &CallingConv, AttrListPtr &PAL) {
   tree ReturnType = TREE_TYPE(type);
@@ -1100,13 +1100,13 @@
   
   /// getTypeAlignment - Return the alignment of the specified type in bytes.
   ///
-  unsigned getTypeAlignment(const Type *Ty) const {
+  unsigned getTypeAlignment(Type *Ty) const {
     return Packed ? 1 : TD.getABITypeAlignment(Ty);
   }
   
   /// getTypeSize - Return the size of the specified type in bytes.
   ///
-  uint64_t getTypeSize(const Type *Ty) const {
+  uint64_t getTypeSize(Type *Ty) const {
     return TD.getTypeAllocSize(Ty);
   }
   
@@ -1150,7 +1150,7 @@
     if (NoOfBytesToRemove == 0)
       return;
 
-    const Type *LastType = Elements.back();
+    Type *LastType = Elements.back();
     unsigned PadBytes = 0;
 
     if (LastType->isIntegerTy(8))
@@ -1181,7 +1181,7 @@
   /// layout is sized properly. Return false if unable to handle ByteOffset.
   /// In this case caller should redo this struct as a packed structure.
   bool ResizeLastElementIfOverlapsWith(uint64_t ByteOffset, tree Field,
-                                       const Type *Ty) {
+                                       Type *Ty) {
     Type *SavedTy = NULL;
 
     if (!Elements.empty()) {
@@ -1266,7 +1266,7 @@
   
   /// addElement - Add an element to the structure with the specified type,
   /// offset and size.
-  void addElement(const Type *Ty, uint64_t Offset, uint64_t Size,
+  void addElement(Type *Ty, uint64_t Offset, uint64_t Size,
                   bool ExtraPadding = false) {
     Elements.push_back((Type*)Ty);
     ElementOffsetInBytes.push_back(Offset);
@@ -1818,7 +1818,7 @@
       PadBytes = StartOffsetInBits/8-FirstUnallocatedByte;
 
     if (PadBytes) {
-      const Type *Pad = Type::getInt8Ty(Context);
+      Type *Pad = Type::getInt8Ty(Context);
       if (PadBytes != 1)
         Pad = ArrayType::get(Pad, PadBytes);
       Info.addElement(Pad, FirstUnallocatedByte, PadBytes);
@@ -1852,7 +1852,7 @@
 /// then we will add padding later on anyway to match union size.
 void TypeConverter::SelectUnionMember(tree type,
                                       StructTypeConversionInfo &Info) {
-  const Type *UnionTy = 0;
+  Type *UnionTy = 0;
   tree GccUnionTy = 0;
   tree UnionField = 0;
   unsigned MaxAlignSize = 0, MaxAlign = 0;
@@ -2030,12 +2030,12 @@
              Info->getTypeAlignment(Type::getInt32Ty(Context))) == 0) {
           // Insert array of i32.
           unsigned Int32ArraySize = (GCCTypeSize-LLVMStructSize) / 4;
-          const Type *PadTy =
+          Type *PadTy =
             ArrayType::get(Type::getInt32Ty(Context), Int32ArraySize);
           Info->addElement(PadTy, GCCTypeSize - LLVMLastElementEnd,
                            Int32ArraySize, true /* Padding Element */);
         } else {
-          const Type *PadTy = ArrayType::get(Type::getInt8Ty(Context),
+          Type *PadTy = ArrayType::get(Type::getInt8Ty(Context),
                                              GCCTypeSize-LLVMStructSize);
           Info->addElement(PadTy, GCCTypeSize - LLVMLastElementEnd,
                            GCCTypeSize - LLVMLastElementEnd,





More information about the llvm-commits mailing list