[llvm] r213050 - [FastISel][X86] Implement the FastLowerIntrinsicCall hook.

Eric Christopher echristo at gmail.com
Wed Jul 16 16:05:15 PDT 2014


Mind replying to that one and saying what changed?

Thanks!
On Jul 16, 2014 3:35 PM, "Juergen Ributzka" <juergen at apple.com> wrote:

> Sorry about that - I will remember it for next time.
> This commit actually never changed, but it was dependent on another commit
> that was broken.
>
> Cheers,
> Juergen
>
> On Jul 15, 2014, at 3:57 PM, Eric Christopher <echristo at gmail.com> wrote:
>
> > So you reapplied these without a comment as to what you
> > fixed/changed/anything again.
> >
> > Please remember to comment on the patches with what changed if
> > anything and next time please be sure to add in the commit message
> > that you're reapplying something that you've reverted with a fix or a
> > comment that it didn't break anything so that everyone else knows
> > what's going on.
> >
> > Thanks.
> >
> > -eric
> >
> > On Mon, Jul 14, 2014 at 11:35 PM, Juergen Ributzka <juergen at apple.com>
> wrote:
> >> Author: ributzka
> >> Date: Tue Jul 15 01:35:50 2014
> >> New Revision: 213050
> >>
> >> URL: http://llvm.org/viewvc/llvm-project?rev=213050&view=rev
> >> Log:
> >> [FastISel][X86] Implement the FastLowerIntrinsicCall hook.
> >>
> >> Rename X86VisitIntrinsicCall -> FastLowerIntrinsicCall, which
> effectively
> >> implements the target hook.
> >>
> >> Modified:
> >>    llvm/trunk/lib/Target/X86/X86FastISel.cpp
> >>
> >> Modified: llvm/trunk/lib/Target/X86/X86FastISel.cpp
> >> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86FastISel.cpp?rev=213050&r1=213049&r2=213050&view=diff
> >>
> ==============================================================================
> >> --- llvm/trunk/lib/Target/X86/X86FastISel.cpp (original)
> >> +++ llvm/trunk/lib/Target/X86/X86FastISel.cpp Tue Jul 15 01:35:50 2014
> >> @@ -75,6 +75,7 @@ public:
> >>
> >>   bool FastLowerArguments() override;
> >>   bool FastLowerCall(CallLoweringInfo &CLI) override;
> >> +  bool FastLowerIntrinsicCall(const IntrinsicInst *II) override;
> >>
> >> #include "X86GenFastISel.inc"
> >>
> >> @@ -125,7 +126,6 @@ private:
> >>   bool X86SelectFPExt(const Instruction *I);
> >>   bool X86SelectFPTrunc(const Instruction *I);
> >>
> >> -  bool X86VisitIntrinsicCall(const IntrinsicInst &I);
> >>   bool X86SelectCall(const Instruction *I);
> >>
> >>   bool DoSelectCall(const Instruction *I, const char *MemIntName);
> >> @@ -2167,8 +2167,8 @@ bool X86FastISel::TryEmitSmallMemcpy(X86
> >>   return true;
> >> }
> >>
> >> -static bool isCommutativeIntrinsic(IntrinsicInst const &I) {
> >> -  switch (I.getIntrinsicID()) {
> >> +static bool isCommutativeIntrinsic(IntrinsicInst const *II) {
> >> +  switch (II->getIntrinsicID()) {
> >>   case Intrinsic::sadd_with_overflow:
> >>   case Intrinsic::uadd_with_overflow:
> >>   case Intrinsic::smul_with_overflow:
> >> @@ -2179,12 +2179,12 @@ static bool isCommutativeIntrinsic(Intri
> >>   }
> >> }
> >>
> >> -bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
> >> +bool X86FastISel::FastLowerIntrinsicCall(const IntrinsicInst *II) {
> >>   // FIXME: Handle more intrinsics.
> >> -  switch (I.getIntrinsicID()) {
> >> +  switch (II->getIntrinsicID()) {
> >>   default: return false;
> >>   case Intrinsic::frameaddress: {
> >> -    Type *RetTy = I.getCalledFunction()->getReturnType();
> >> +    Type *RetTy = II->getCalledFunction()->getReturnType();
> >>
> >>     MVT VT;
> >>     if (!isTypeLegal(RetTy, VT))
> >> @@ -2224,7 +2224,7 @@ bool X86FastISel::X86VisitIntrinsicCall(
> >>     // movq (%rax), %rax
> >>     // ...
> >>     unsigned DestReg;
> >> -    unsigned Depth =
> cast<ConstantInt>(I.getOperand(0))->getZExtValue();
> >> +    unsigned Depth =
> cast<ConstantInt>(II->getOperand(0))->getZExtValue();
> >>     while (Depth--) {
> >>       DestReg = createResultReg(RC);
> >>       addDirectMem(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
> >> @@ -2232,23 +2232,23 @@ bool X86FastISel::X86VisitIntrinsicCall(
> >>       SrcReg = DestReg;
> >>     }
> >>
> >> -    UpdateValueMap(&I, SrcReg);
> >> +    UpdateValueMap(II, SrcReg);
> >>     return true;
> >>   }
> >>   case Intrinsic::memcpy: {
> >> -    const MemCpyInst &MCI = cast<MemCpyInst>(I);
> >> +    const MemCpyInst *MCI = cast<MemCpyInst>(II);
> >>     // Don't handle volatile or variable length memcpys.
> >> -    if (MCI.isVolatile())
> >> +    if (MCI->isVolatile())
> >>       return false;
> >>
> >> -    if (isa<ConstantInt>(MCI.getLength())) {
> >> +    if (isa<ConstantInt>(MCI->getLength())) {
> >>       // Small memcpy's are common enough that we want to do them
> >>       // without a call if possible.
> >> -      uint64_t Len =
> cast<ConstantInt>(MCI.getLength())->getZExtValue();
> >> +      uint64_t Len =
> cast<ConstantInt>(MCI->getLength())->getZExtValue();
> >>       if (IsMemcpySmall(Len)) {
> >>         X86AddressMode DestAM, SrcAM;
> >> -        if (!X86SelectAddress(MCI.getRawDest(), DestAM) ||
> >> -            !X86SelectAddress(MCI.getRawSource(), SrcAM))
> >> +        if (!X86SelectAddress(MCI->getRawDest(), DestAM) ||
> >> +            !X86SelectAddress(MCI->getRawSource(), SrcAM))
> >>           return false;
> >>         TryEmitSmallMemcpy(DestAM, SrcAM, Len);
> >>         return true;
> >> @@ -2256,35 +2256,35 @@ bool X86FastISel::X86VisitIntrinsicCall(
> >>     }
> >>
> >>     unsigned SizeWidth = Subtarget->is64Bit() ? 64 : 32;
> >> -    if (!MCI.getLength()->getType()->isIntegerTy(SizeWidth))
> >> +    if (!MCI->getLength()->getType()->isIntegerTy(SizeWidth))
> >>       return false;
> >>
> >> -    if (MCI.getSourceAddressSpace() > 255 || MCI.getDestAddressSpace()
> > 255)
> >> +    if (MCI->getSourceAddressSpace() > 255 ||
> MCI->getDestAddressSpace() > 255)
> >>       return false;
> >>
> >> -    return DoSelectCall(&I, "memcpy");
> >> +    return LowerCallTo(II, "memcpy", II->getNumArgOperands() - 2);
> >>   }
> >>   case Intrinsic::memset: {
> >> -    const MemSetInst &MSI = cast<MemSetInst>(I);
> >> +    const MemSetInst *MSI = cast<MemSetInst>(II);
> >>
> >> -    if (MSI.isVolatile())
> >> +    if (MSI->isVolatile())
> >>       return false;
> >>
> >>     unsigned SizeWidth = Subtarget->is64Bit() ? 64 : 32;
> >> -    if (!MSI.getLength()->getType()->isIntegerTy(SizeWidth))
> >> +    if (!MSI->getLength()->getType()->isIntegerTy(SizeWidth))
> >>       return false;
> >>
> >> -    if (MSI.getDestAddressSpace() > 255)
> >> +    if (MSI->getDestAddressSpace() > 255)
> >>       return false;
> >>
> >> -    return DoSelectCall(&I, "memset");
> >> +    return LowerCallTo(II, "memset", II->getNumArgOperands() - 2);
> >>   }
> >>   case Intrinsic::stackprotector: {
> >>     // Emit code to store the stack guard onto the stack.
> >>     EVT PtrTy = TLI.getPointerTy();
> >>
> >> -    const Value *Op1 = I.getArgOperand(0); // The guard's value.
> >> -    const AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
> >> +    const Value *Op1 = II->getArgOperand(0); // The guard's value.
> >> +    const AllocaInst *Slot = cast<AllocaInst>(II->getArgOperand(1));
> >>
> >>     MFI.setStackProtectorIndex(FuncInfo.StaticAllocaMap[Slot]);
> >>
> >> @@ -2295,7 +2295,7 @@ bool X86FastISel::X86VisitIntrinsicCall(
> >>     return true;
> >>   }
> >>   case Intrinsic::dbg_declare: {
> >> -    const DbgDeclareInst *DI = cast<DbgDeclareInst>(&I);
> >> +    const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
> >>     X86AddressMode AM;
> >>     assert(DI->getAddress() && "Null address should be checked
> earlier!");
> >>     if (!X86SelectAddress(DI->getAddress(), AM))
> >> @@ -2315,7 +2315,7 @@ bool X86FastISel::X86VisitIntrinsicCall(
> >>     if (!Subtarget->hasSSE1())
> >>       return false;
> >>
> >> -    Type *RetTy = I.getCalledFunction()->getReturnType();
> >> +    Type *RetTy = II->getCalledFunction()->getReturnType();
> >>
> >>     MVT VT;
> >>     if (!isTypeLegal(RetTy, VT))
> >> @@ -2337,7 +2337,7 @@ bool X86FastISel::X86VisitIntrinsicCall(
> >>     case MVT::f64: Opc = SqrtOpc[1][HasAVX]; RC = &X86::FR64RegClass;
> break;
> >>     }
> >>
> >> -    const Value *SrcVal = I.getArgOperand(0);
> >> +    const Value *SrcVal = II->getArgOperand(0);
> >>     unsigned SrcReg = getRegForValue(SrcVal);
> >>
> >>     if (SrcReg == 0)
> >> @@ -2360,7 +2360,7 @@ bool X86FastISel::X86VisitIntrinsicCall(
> >>
> >>     MIB.addReg(SrcReg);
> >>
> >> -    UpdateValueMap(&I, ResultReg);
> >> +    UpdateValueMap(II, ResultReg);
> >>     return true;
> >>   }
> >>   case Intrinsic::sadd_with_overflow:
> >> @@ -2371,7 +2371,7 @@ bool X86FastISel::X86VisitIntrinsicCall(
> >>   case Intrinsic::umul_with_overflow: {
> >>     // This implements the basic lowering of the xalu with overflow
> intrinsics
> >>     // into add/sub/mul followed by either seto or setb.
> >> -    const Function *Callee = I.getCalledFunction();
> >> +    const Function *Callee = II->getCalledFunction();
> >>     auto *Ty = cast<StructType>(Callee->getReturnType());
> >>     Type *RetTy = Ty->getTypeAtIndex(0U);
> >>     Type *CondTy = Ty->getTypeAtIndex(1);
> >> @@ -2383,16 +2383,16 @@ bool X86FastISel::X86VisitIntrinsicCall(
> >>     if (VT < MVT::i8 || VT > MVT::i64)
> >>       return false;
> >>
> >> -    const Value *LHS = I.getArgOperand(0);
> >> -    const Value *RHS = I.getArgOperand(1);
> >> +    const Value *LHS = II->getArgOperand(0);
> >> +    const Value *RHS = II->getArgOperand(1);
> >>
> >>     // Canonicalize immediate to the RHS.
> >>     if (isa<ConstantInt>(LHS) && !isa<ConstantInt>(RHS) &&
> >> -        isCommutativeIntrinsic(I))
> >> +        isCommutativeIntrinsic(II))
> >>       std::swap(LHS, RHS);
> >>
> >>     unsigned BaseOpc, CondOpc;
> >> -    switch (I.getIntrinsicID()) {
> >> +    switch (II->getIntrinsicID()) {
> >>     default: llvm_unreachable("Unexpected intrinsic!");
> >>     case Intrinsic::sadd_with_overflow:
> >>       BaseOpc = ISD::ADD; CondOpc = X86::SETOr; break;
> >> @@ -2469,7 +2469,7 @@ bool X86FastISel::X86VisitIntrinsicCall(
> >>     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CondOpc),
> >>             ResultReg2);
> >>
> >> -    UpdateValueMap(&I, ResultReg, 2);
> >> +    UpdateValueMap(II, ResultReg, 2);
> >>     return true;
> >>   }
> >>   case Intrinsic::x86_sse_cvttss2si:
> >> @@ -2477,7 +2477,7 @@ bool X86FastISel::X86VisitIntrinsicCall(
> >>   case Intrinsic::x86_sse2_cvttsd2si:
> >>   case Intrinsic::x86_sse2_cvttsd2si64: {
> >>     bool IsInputDouble;
> >> -    switch (I.getIntrinsicID()) {
> >> +    switch (II->getIntrinsicID()) {
> >>     default: llvm_unreachable("Unexpected intrinsic.");
> >>     case Intrinsic::x86_sse_cvttss2si:
> >>     case Intrinsic::x86_sse_cvttss2si64:
> >> @@ -2493,7 +2493,7 @@ bool X86FastISel::X86VisitIntrinsicCall(
> >>       break;
> >>     }
> >>
> >> -    Type *RetTy = I.getCalledFunction()->getReturnType();
> >> +    Type *RetTy = II->getCalledFunction()->getReturnType();
> >>     MVT VT;
> >>     if (!isTypeLegal(RetTy, VT))
> >>       return false;
> >> @@ -2513,7 +2513,7 @@ bool X86FastISel::X86VisitIntrinsicCall(
> >>     }
> >>
> >>     // Check if we can fold insertelement instructions into the convert.
> >> -    const Value *Op = I.getArgOperand(0);
> >> +    const Value *Op = II->getArgOperand(0);
> >>     while (auto *IE = dyn_cast<InsertElementInst>(Op)) {
> >>       const Value *Index = IE->getOperand(2);
> >>       if (!isa<ConstantInt>(Index))
> >> @@ -2535,7 +2535,7 @@ bool X86FastISel::X86VisitIntrinsicCall(
> >>     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc),
> ResultReg)
> >>       .addReg(Reg);
> >>
> >> -    UpdateValueMap(&I, ResultReg);
> >> +    UpdateValueMap(II, ResultReg);
> >>     return true;
> >>   }
> >>   }
> >> @@ -2644,9 +2644,9 @@ bool X86FastISel::X86SelectCall(const In
> >>   if (isa<InlineAsm>(Callee))
> >>     return false;
> >>
> >> -  // Handle intrinsic calls.
> >> -  if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI))
> >> -    return X86VisitIntrinsicCall(*II);
> >> +  // Skip intrinsic calls - we already handled these.
> >> +  if (isa<IntrinsicInst>(CI))
> >> +    return false;
> >>
> >>   // Allow SelectionDAG isel to handle tail calls.
> >>   if (cast<CallInst>(I)->isTailCall())
> >>
> >>
> >> _______________________________________________
> >> llvm-commits mailing list
> >> llvm-commits at cs.uiuc.edu
> >> http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
>
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20140716/332d310f/attachment.html>


More information about the llvm-commits mailing list