[llvm-commits] [llvm] r68887 - /llvm/trunk/lib/Target/X86/X86FastISel.cpp
Chris Lattner
sabre at nondot.org
Sun Apr 12 00:36:02 PDT 2009
Author: lattner
Date: Sun Apr 12 02:36:01 2009
New Revision: 68887
URL: http://llvm.org/viewvc/llvm-project?rev=68887&view=rev
Log:
simplify code by using IntrinsicInst.
Modified:
llvm/trunk/lib/Target/X86/X86FastISel.cpp
Modified: llvm/trunk/lib/Target/X86/X86FastISel.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86FastISel.cpp?rev=68887&r1=68886&r2=68887&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86FastISel.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86FastISel.cpp Sun Apr 12 02:36:01 2009
@@ -23,7 +23,7 @@
#include "llvm/DerivedTypes.h"
#include "llvm/GlobalVariable.h"
#include "llvm/Instructions.h"
-#include "llvm/Intrinsics.h"
+#include "llvm/IntrinsicInst.h"
#include "llvm/CodeGen/FastISel.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
@@ -112,7 +112,7 @@
bool X86SelectExtractValue(Instruction *I);
- bool X86VisitIntrinsicCall(CallInst &I, unsigned Intrinsic);
+ bool X86VisitIntrinsicCall(IntrinsicInst &I);
bool X86SelectCall(Instruction *I);
CCAssignFn *CCAssignFnForCall(unsigned CC, bool isTailCall = false);
@@ -777,55 +777,45 @@
// looking for the SETO/SETB instruction. If an instruction modifies the
// EFLAGS register before we reach the SETO/SETB instruction, then we can't
// convert the branch into a JO/JB instruction.
-
- Value *Agg = EI->getAggregateOperand();
-
- if (CallInst *CI = dyn_cast<CallInst>(Agg)) {
- Function *F = CI->getCalledFunction();
-
- if (F && F->isDeclaration()) {
- switch (F->getIntrinsicID()) {
- default: break;
- case Intrinsic::sadd_with_overflow:
- case Intrinsic::uadd_with_overflow: {
- const MachineInstr *SetMI = 0;
- unsigned Reg = lookUpRegForValue(EI);
-
- for (MachineBasicBlock::const_reverse_iterator
- RI = MBB->rbegin(), RE = MBB->rend(); RI != RE; ++RI) {
- const MachineInstr &MI = *RI;
-
- if (MI.modifiesRegister(Reg)) {
- unsigned Src, Dst, SrcSR, DstSR;
-
- if (getInstrInfo()->isMoveInstr(MI, Src, Dst, SrcSR, DstSR)) {
- Reg = Src;
- continue;
- }
-
- SetMI = &MI;
- break;
+ if (IntrinsicInst *CI = dyn_cast<IntrinsicInst>(EI->getAggregateOperand())){
+ if (CI->getIntrinsicID() == Intrinsic::sadd_with_overflow ||
+ CI->getIntrinsicID() == Intrinsic::uadd_with_overflow) {
+ const MachineInstr *SetMI = 0;
+ unsigned Reg = lookUpRegForValue(EI);
+
+ for (MachineBasicBlock::const_reverse_iterator
+ RI = MBB->rbegin(), RE = MBB->rend(); RI != RE; ++RI) {
+ const MachineInstr &MI = *RI;
+
+ if (MI.modifiesRegister(Reg)) {
+ unsigned Src, Dst, SrcSR, DstSR;
+
+ if (getInstrInfo()->isMoveInstr(MI, Src, Dst, SrcSR, DstSR)) {
+ Reg = Src;
+ continue;
}
- const TargetInstrDesc &TID = MI.getDesc();
- if (TID.hasUnmodeledSideEffects() ||
- TID.hasImplicitDefOfPhysReg(X86::EFLAGS))
- break;
+ SetMI = &MI;
+ break;
}
- if (SetMI) {
- unsigned OpCode = SetMI->getOpcode();
+ const TargetInstrDesc &TID = MI.getDesc();
+ if (TID.hasUnmodeledSideEffects() ||
+ TID.hasImplicitDefOfPhysReg(X86::EFLAGS))
+ break;
+ }
- if (OpCode == X86::SETOr || OpCode == X86::SETBr) {
- BuildMI(MBB, DL, TII.get((OpCode == X86::SETOr) ?
- X86::JO : X86::JB)).addMBB(TrueMBB);
- FastEmitBranch(FalseMBB);
- MBB->addSuccessor(TrueMBB);
- return true;
- }
+ if (SetMI) {
+ unsigned OpCode = SetMI->getOpcode();
+
+ if (OpCode == X86::SETOr || OpCode == X86::SETBr) {
+ BuildMI(MBB, DL, TII.get((OpCode == X86::SETOr) ?
+ X86::JO : X86::JB)).addMBB(TrueMBB);
+ FastEmitBranch(FalseMBB);
+ MBB->addSuccessor(TrueMBB);
+ return true;
}
}
- }
}
}
}
@@ -1027,30 +1017,26 @@
ExtractValueInst *EI = cast<ExtractValueInst>(I);
Value *Agg = EI->getAggregateOperand();
- if (CallInst *CI = dyn_cast<CallInst>(Agg)) {
- Function *F = CI->getCalledFunction();
-
- if (F && F->isDeclaration()) {
- switch (F->getIntrinsicID()) {
- default: break;
- case Intrinsic::sadd_with_overflow:
- case Intrinsic::uadd_with_overflow:
- // Cheat a little. We know that the registers for "add" and "seto" are
- // allocated sequentially. However, we only keep track of the register
- // for "add" in the value map. Use extractvalue's index to get the
- // correct register for "seto".
- UpdateValueMap(I, lookUpRegForValue(Agg) + *EI->idx_begin());
- return true;
- }
+ if (IntrinsicInst *CI = dyn_cast<IntrinsicInst>(Agg)) {
+ switch (CI->getIntrinsicID()) {
+ default: break;
+ case Intrinsic::sadd_with_overflow:
+ case Intrinsic::uadd_with_overflow:
+ // Cheat a little. We know that the registers for "add" and "seto" are
+ // allocated sequentially. However, we only keep track of the register
+ // for "add" in the value map. Use extractvalue's index to get the
+ // correct register for "seto".
+ UpdateValueMap(I, lookUpRegForValue(Agg) + *EI->idx_begin());
+ return true;
}
}
return false;
}
-bool X86FastISel::X86VisitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
+bool X86FastISel::X86VisitIntrinsicCall(IntrinsicInst &I) {
// FIXME: Handle more intrinsics.
- switch (Intrinsic) {
+ switch (I.getIntrinsicID()) {
default: return false;
case Intrinsic::sadd_with_overflow:
case Intrinsic::uadd_with_overflow: {
@@ -1059,11 +1045,11 @@
// instructions are encountered, we use the fact that two registers were
// created sequentially to get the correct registers for the "sum" and the
// "overflow bit".
- MVT VT;
const Function *Callee = I.getCalledFunction();
const Type *RetTy =
cast<StructType>(Callee->getReturnType())->getTypeAtIndex(unsigned(0));
+ MVT VT;
if (!isTypeLegal(RetTy, VT))
return false;
@@ -1077,7 +1063,6 @@
return false;
unsigned OpC = 0;
-
if (VT == MVT::i32)
OpC = X86::ADD32rr;
else if (VT == MVT::i64)
@@ -1090,8 +1075,10 @@
UpdateValueMap(&I, ResultReg);
ResultReg = createResultReg(TLI.getRegClassFor(MVT::i8));
- BuildMI(MBB, DL, TII.get((Intrinsic == Intrinsic::sadd_with_overflow) ?
- X86::SETOr : X86::SETBr), ResultReg);
+ unsigned Opc = X86::SETBr;
+ if (I.getIntrinsicID() == Intrinsic::sadd_with_overflow)
+ Opc = X86::SETOr;
+ BuildMI(MBB, DL, TII.get(Opc), ResultReg);
return true;
}
}
@@ -1106,10 +1093,8 @@
return false;
// Handle intrinsic calls.
- if (Function *F = CI->getCalledFunction())
- if (F->isDeclaration())
- if (unsigned IID = F->getIntrinsicID())
- return X86VisitIntrinsicCall(*CI, IID);
+ if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI))
+ return X86VisitIntrinsicCall(*II);
// Handle only C and fastcc calling conventions for now.
CallSite CS(CI);
More information about the llvm-commits
mailing list