[llvm-commits] CVS: llvm/lib/CodeGen/AsmPrinter.cpp IntrinsicLowering.cpp MachineDebugInfo.cpp

Reid Spencer reid at x10sys.com
Sun Nov 26 17:06:26 PST 2006



Changes in directory llvm/lib/CodeGen:

AsmPrinter.cpp updated: 1.117 -> 1.118
IntrinsicLowering.cpp updated: 1.46 -> 1.47
MachineDebugInfo.cpp updated: 1.59 -> 1.60
---
Log message:

For PR950: http://llvm.org/PR950 :
The long awaited CAST patch. This introduces 12 new instructions into LLVM
to replace the cast instruction. Corresponding changes throughout LLVM are
provided. This passes llvm-test, llvm/test, and SPEC CPUINT2000 with the
exception of 175.vpr which fails only on a slight floating point output
difference.


---
Diffs of the changes:  (+95 -24)

 AsmPrinter.cpp        |   15 +++++++
 IntrinsicLowering.cpp |  100 +++++++++++++++++++++++++++++++++++++++-----------
 MachineDebugInfo.cpp  |    4 +-
 3 files changed, 95 insertions(+), 24 deletions(-)


Index: llvm/lib/CodeGen/AsmPrinter.cpp
diff -u llvm/lib/CodeGen/AsmPrinter.cpp:1.117 llvm/lib/CodeGen/AsmPrinter.cpp:1.118
--- llvm/lib/CodeGen/AsmPrinter.cpp:1.117	Mon Nov 20 14:29:06 2006
+++ llvm/lib/CodeGen/AsmPrinter.cpp	Sun Nov 26 19:05:09 2006
@@ -423,7 +423,20 @@
       }
       break;
     }
-    case Instruction::Cast: {
+    case Instruction::Trunc:
+    case Instruction::ZExt:
+    case Instruction::SExt:
+    case Instruction::FPTrunc:
+    case Instruction::FPExt:
+    case Instruction::UIToFP:
+    case Instruction::SIToFP:
+    case Instruction::FPToUI:
+    case Instruction::FPToSI:
+      assert(0 && "FIXME: Don't yet support this kind of constant cast expr");
+      break;
+    case Instruction::IntToPtr:
+    case Instruction::PtrToInt:
+    case Instruction::BitCast: {
       // Support only foldable casts to/from pointers that can be eliminated by
       // changing the pointer to the appropriately sized integer type.
       Constant *Op = CE->getOperand(0);


Index: llvm/lib/CodeGen/IntrinsicLowering.cpp
diff -u llvm/lib/CodeGen/IntrinsicLowering.cpp:1.46 llvm/lib/CodeGen/IntrinsicLowering.cpp:1.47
--- llvm/lib/CodeGen/IntrinsicLowering.cpp:1.46	Wed Nov 15 12:00:10 2006
+++ llvm/lib/CodeGen/IntrinsicLowering.cpp	Sun Nov 26 19:05:09 2006
@@ -40,6 +40,7 @@
 template <class ArgIt>
 static CallInst *ReplaceCallWith(const char *NewFn, CallInst *CI,
                                  ArgIt ArgBegin, ArgIt ArgEnd,
+                                 const unsigned *castOpcodes,
                                  const Type *RetTy, Function *&FCache) {
   if (!FCache) {
     // If we haven't already looked up this function, check to see if the
@@ -63,7 +64,12 @@
        ++I, ++ArgNo) {
     Value *Arg = *I;
     if (Arg->getType() != FT->getParamType(ArgNo))
-      Arg = new CastInst(Arg, FT->getParamType(ArgNo), Arg->getName(), CI);
+      if (castOpcodes[ArgNo])
+        Arg = CastInst::create(Instruction::CastOps(castOpcodes[ArgNo]),
+          Arg, FT->getParamType(ArgNo), Arg->getName(), CI);
+      else
+        Arg = CastInst::createInferredCast(Arg, FT->getParamType(ArgNo), 
+                                           Arg->getName(), CI);
     Operands.push_back(Arg);
   }
   // Pass nulls into any additional arguments...
@@ -76,7 +82,7 @@
   if (!CI->use_empty()) {
     Value *V = NewCI;
     if (CI->getType() != NewCI->getType())
-      V = new CastInst(NewCI, CI->getType(), Name, CI);
+      V = CastInst::createInferredCast(NewCI, CI->getType(), Name, CI);
     CI->replaceAllUsesWith(V);
   }
   return NewCI;
@@ -283,8 +289,9 @@
     // convert the call to an explicit setjmp or longjmp call.
   case Intrinsic::setjmp: {
     static Function *SetjmpFCache = 0;
+    static const unsigned castOpcodes[] = { Instruction::BitCast };
     Value *V = ReplaceCallWith("setjmp", CI, CI->op_begin()+1, CI->op_end(),
-                               Type::IntTy, SetjmpFCache);
+                               castOpcodes, Type::IntTy, SetjmpFCache);
     if (CI->getType() != Type::VoidTy)
       CI->replaceAllUsesWith(V);
     break;
@@ -296,16 +303,20 @@
 
   case Intrinsic::longjmp: {
     static Function *LongjmpFCache = 0;
+    static const unsigned castOpcodes[] = 
+      { Instruction::BitCast, 0 };
     ReplaceCallWith("longjmp", CI, CI->op_begin()+1, CI->op_end(),
-                    Type::VoidTy, LongjmpFCache);
+                    castOpcodes, Type::VoidTy, LongjmpFCache);
     break;
   }
 
   case Intrinsic::siglongjmp: {
     // Insert the call to abort
     static Function *AbortFCache = 0;
-    ReplaceCallWith("abort", CI, CI->op_end(), CI->op_end(), Type::VoidTy,
-                    AbortFCache);
+    static const unsigned castOpcodes[] =
+      { Instruction::BitCast, 0 };
+    ReplaceCallWith("abort", CI, CI->op_end(), CI->op_end(), 
+                    castOpcodes, Type::VoidTy, AbortFCache);
     break;
   }
   case Intrinsic::ctpop_i8:
@@ -383,31 +394,76 @@
   case Intrinsic::dbg_declare:
     break;    // Simply strip out debugging intrinsics
 
-  case Intrinsic::memcpy_i32:
-  case Intrinsic::memcpy_i64: {
+  case Intrinsic::memcpy_i32: {
     // The memcpy intrinsic take an extra alignment argument that the memcpy
     // libc function does not.
+    static unsigned opcodes[] = 
+      { Instruction::BitCast, Instruction::BitCast, Instruction::BitCast };
+    // FIXME:
+    // if (target_is_64_bit) opcodes[2] = Instruction::ZExt;
+    // else opcodes[2] = Instruction::BitCast;
+    static Function *MemcpyFCache = 0;
+    ReplaceCallWith("memcpy", CI, CI->op_begin()+1, CI->op_end()-1,
+                    opcodes, (*(CI->op_begin()+1))->getType(), MemcpyFCache);
+    break;
+  }
+  case Intrinsic::memcpy_i64: {
+    static unsigned opcodes[] = 
+      { Instruction::BitCast, Instruction::BitCast, Instruction::Trunc };
+    // FIXME:
+    // if (target_is_64_bit) opcodes[2] = Instruction::BitCast;
+    // else opcodes[2] = Instruction::Trunc;
     static Function *MemcpyFCache = 0;
     ReplaceCallWith("memcpy", CI, CI->op_begin()+1, CI->op_end()-1,
-                    (*(CI->op_begin()+1))->getType(), MemcpyFCache);
+                     opcodes, (*(CI->op_begin()+1))->getType(), MemcpyFCache);
+    break;
+  }
+  case Intrinsic::memmove_i32: {
+    // The memmove intrinsic take an extra alignment argument that the memmove
+    // libc function does not.
+    static unsigned opcodes[] = 
+      { Instruction::BitCast, Instruction::BitCast, Instruction::BitCast };
+    // FIXME:
+    // if (target_is_64_bit) opcodes[2] = Instruction::ZExt;
+    // else opcodes[2] = Instruction::BitCast;
+    static Function *MemmoveFCache = 0;
+    ReplaceCallWith("memmove", CI, CI->op_begin()+1, CI->op_end()-1,
+                    opcodes, (*(CI->op_begin()+1))->getType(), MemmoveFCache);
     break;
   }
-  case Intrinsic::memmove_i32: 
   case Intrinsic::memmove_i64: {
     // The memmove intrinsic take an extra alignment argument that the memmove
     // libc function does not.
+    static const unsigned opcodes[] = 
+      { Instruction::BitCast, Instruction::BitCast, Instruction::Trunc };
+    // if (target_is_64_bit) opcodes[2] = Instruction::BitCast;
+    // else opcodes[2] = Instruction::Trunc;
     static Function *MemmoveFCache = 0;
     ReplaceCallWith("memmove", CI, CI->op_begin()+1, CI->op_end()-1,
-                    (*(CI->op_begin()+1))->getType(), MemmoveFCache);
+                    opcodes, (*(CI->op_begin()+1))->getType(), MemmoveFCache);
     break;
   }
-  case Intrinsic::memset_i32:
+  case Intrinsic::memset_i32: {
+    // The memset intrinsic take an extra alignment argument that the memset
+    // libc function does not.
+    static const unsigned opcodes[] = 
+      { Instruction::BitCast, Instruction::ZExt, Instruction::ZExt, 0 };
+    // if (target_is_64_bit) opcodes[2] = Instruction::BitCast;
+    // else opcodes[2] = Instruction::ZExt;
+    static Function *MemsetFCache = 0;
+    ReplaceCallWith("memset", CI, CI->op_begin()+1, CI->op_end()-1,
+                    opcodes, (*(CI->op_begin()+1))->getType(), MemsetFCache);
+  }
   case Intrinsic::memset_i64: {
     // The memset intrinsic take an extra alignment argument that the memset
     // libc function does not.
+    static const unsigned opcodes[] = 
+      { Instruction::BitCast, Instruction::ZExt, Instruction::Trunc, 0 };
+    // if (target_is_64_bit) opcodes[2] = Instruction::BitCast;
+    // else opcodes[2] = Instruction::Trunc;
     static Function *MemsetFCache = 0;
     ReplaceCallWith("memset", CI, CI->op_begin()+1, CI->op_end()-1,
-                    (*(CI->op_begin()+1))->getType(), MemsetFCache);
+                    opcodes, (*(CI->op_begin()+1))->getType(), MemsetFCache);
     break;
   }
   case Intrinsic::isunordered_f32:
@@ -422,16 +478,18 @@
                              "isunordered", CI));
     break;
   }
-  case Intrinsic::sqrt_f32:
+  case Intrinsic::sqrt_f32: {
+    static const unsigned opcodes[] = { 0 };
+    static Function *sqrtfFCache = 0;
+    ReplaceCallWith("sqrtf", CI, CI->op_begin()+1, CI->op_end(),
+                    opcodes, Type::FloatTy, sqrtfFCache);
+    break;
+  }
   case Intrinsic::sqrt_f64: {
+    static const unsigned opcodes[] =  { 0 };
     static Function *sqrtFCache = 0;
-    static Function *sqrtfFCache = 0;
-    if(CI->getType() == Type::FloatTy)
-      ReplaceCallWith("sqrtf", CI, CI->op_begin()+1, CI->op_end(),
-                      Type::FloatTy, sqrtfFCache);
-    else
-      ReplaceCallWith("sqrt", CI, CI->op_begin()+1, CI->op_end(),
-                      Type::DoubleTy, sqrtFCache);
+    ReplaceCallWith("sqrt", CI, CI->op_begin()+1, CI->op_end(),
+                    opcodes, Type::DoubleTy, sqrtFCache);
     break;
   }
   }


Index: llvm/lib/CodeGen/MachineDebugInfo.cpp
diff -u llvm/lib/CodeGen/MachineDebugInfo.cpp:1.59 llvm/lib/CodeGen/MachineDebugInfo.cpp:1.60
--- llvm/lib/CodeGen/MachineDebugInfo.cpp:1.59	Wed Nov  8 08:17:45 2006
+++ llvm/lib/CodeGen/MachineDebugInfo.cpp	Sun Nov 26 19:05:09 2006
@@ -102,7 +102,7 @@
   if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
     return GV;
   } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
-    if (CE->getOpcode() == Instruction::Cast) {
+    if (CE->getOpcode() == Instruction::BitCast) {
       return dyn_cast<GlobalVariable>(CE->getOperand(0));
     }
   }
@@ -115,7 +115,7 @@
   if (isa<GlobalVariable>(V) || isa<ConstantPointerNull>(V)) {
     return true;
   } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
-    if (CE->getOpcode() == Instruction::Cast) {
+    if (CE->getOpcode() == Instruction::BitCast) {
       return isa<GlobalVariable>(CE->getOperand(0));
     }
   }






More information about the llvm-commits mailing list