[llvm-commits] [llvm] r157755 - in /llvm/trunk: include/llvm/Target/TargetInstrInfo.h lib/CodeGen/PeepholeOptimizer.cpp lib/Target/X86/X86InstrInfo.cpp lib/Target/X86/X86InstrInfo.h test/CodeGen/X86/jump_sign.ll

Rotem, Nadav nadav.rotem at intel.com
Sat Jun 2 13:01:49 PDT 2012


Hi Manman,

Why did you decide to implement this optimization as a peephole pass and not as a DAGcombine or even InstCombine optimization ?

Thanks,
Nadav

-----Original Message-----
From: llvm-commits-bounces at cs.uiuc.edu [mailto:llvm-commits-bounces at cs.uiuc.edu] On Behalf Of Manman Ren
Sent: Thursday, May 31, 2012 20:20
To: llvm-commits at cs.uiuc.edu
Subject: [llvm-commits] [llvm] r157755 - in /llvm/trunk: include/llvm/Target/TargetInstrInfo.h lib/CodeGen/PeepholeOptimizer.cpp lib/Target/X86/X86InstrInfo.cpp lib/Target/X86/X86InstrInfo.h test/CodeGen/X86/jump_sign.ll

Author: mren
Date: Thu May 31 12:20:29 2012
New Revision: 157755

URL: http://llvm.org/viewvc/llvm-project?rev=157755&view=rev
Log:
X86: replace SUB with CMP if possible

This patch will optimize the following
        movq    %rdi, %rax
        subq    %rsi, %rax
        cmovsq  %rsi, %rdi
        movq    %rdi, %rax
to
        cmpq    %rsi, %rdi
        cmovsq  %rsi, %rdi
        movq    %rdi, %rax

Perform this optimization if the actual result of SUB is not used.

rdar: 11540023

Modified:
    llvm/trunk/include/llvm/Target/TargetInstrInfo.h
    llvm/trunk/lib/CodeGen/PeepholeOptimizer.cpp
    llvm/trunk/lib/Target/X86/X86InstrInfo.cpp
    llvm/trunk/lib/Target/X86/X86InstrInfo.h
    llvm/trunk/test/CodeGen/X86/jump_sign.ll

Modified: llvm/trunk/include/llvm/Target/TargetInstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Target/TargetInstrInfo.h?rev=157755&r1=157754&r2=157755&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Target/TargetInstrInfo.h (original)
+++ llvm/trunk/include/llvm/Target/TargetInstrInfo.h Thu May 31 12:20:29 
+++ 2012
@@ -640,6 +640,14 @@
     return false;
   }
 
+  /// OptimizeSubInstr - See if the SUB instruction can be converted 
+ into  /// something more efficient E.g., on X86, we can replace SUB 
+ with CMP  /// if the actual result of SUB is not used.
+  virtual bool OptimizeSubInstr(MachineInstr *SubInstr,
+                                const MachineRegisterInfo *MRI) const {
+    return false;
+  }
+
   /// FoldImmediate - 'Reg' is known to be defined by a move immediate
   /// instruction, try to fold the immediate into the use instruction.
   virtual bool FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI,

Modified: llvm/trunk/lib/CodeGen/PeepholeOptimizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/PeepholeOptimizer.cpp?rev=157755&r1=157754&r2=157755&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/PeepholeOptimizer.cpp (original)
+++ llvm/trunk/lib/CodeGen/PeepholeOptimizer.cpp Thu May 31 12:20:29 
+++ 2012
@@ -472,6 +472,7 @@
         if (SeenMoveImm)
           Changed |= foldImmediate(MI, MBB, ImmDefRegs, ImmDefMIs);
       }
+      Changed |= TII->OptimizeSubInstr(MI, MRI);
 
       First = false;
       PMII = MII;

Modified: llvm/trunk/lib/Target/X86/X86InstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrInfo.cpp?rev=157755&r1=157754&r2=157755&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrInfo.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86InstrInfo.cpp Thu May 31 12:20:29 2012
@@ -2709,6 +2709,44 @@
   NewMIs.push_back(MIB);
 }
 
+bool X86InstrInfo::
+OptimizeSubInstr(MachineInstr *SubInstr, const MachineRegisterInfo 
+*MRI) const {
+  // If destination is a memory operand, do not perform this optimization.
+  if ((SubInstr->getOpcode() != X86::SUB64rr) &&
+      (SubInstr->getOpcode() != X86::SUB32rr) &&
+      (SubInstr->getOpcode() != X86::SUB16rr) &&
+      (SubInstr->getOpcode() != X86::SUB8rr) &&
+      (SubInstr->getOpcode() != X86::SUB64ri32) &&
+      (SubInstr->getOpcode() != X86::SUB64ri8) &&
+      (SubInstr->getOpcode() != X86::SUB32ri) &&
+      (SubInstr->getOpcode() != X86::SUB32ri8) &&
+      (SubInstr->getOpcode() != X86::SUB16ri) &&
+      (SubInstr->getOpcode() != X86::SUB16ri8) &&
+      (SubInstr->getOpcode() != X86::SUB8ri))
+    return false;
+  unsigned DestReg = SubInstr->getOperand(0).getReg();
+  if (MRI->use_begin(DestReg) != MRI->use_end())
+    return false;
+
+  // There is no use of the destination register, we can replace SUB with CMP.
+  switch (SubInstr->getOpcode()) {
+    default: break;
+    case X86::SUB64rr:   SubInstr->setDesc(get(X86::CMP64rr));   break;
+    case X86::SUB32rr:   SubInstr->setDesc(get(X86::CMP32rr));   break;
+    case X86::SUB16rr:   SubInstr->setDesc(get(X86::CMP16rr));   break;
+    case X86::SUB8rr:    SubInstr->setDesc(get(X86::CMP8rr));    break;
+    case X86::SUB64ri32: SubInstr->setDesc(get(X86::CMP64ri32)); break;
+    case X86::SUB64ri8:  SubInstr->setDesc(get(X86::CMP64ri8));  break;
+    case X86::SUB32ri:   SubInstr->setDesc(get(X86::CMP32ri));   break;
+    case X86::SUB32ri8:  SubInstr->setDesc(get(X86::CMP32ri8));  break;
+    case X86::SUB16ri:   SubInstr->setDesc(get(X86::CMP16ri));   break;
+    case X86::SUB16ri8:  SubInstr->setDesc(get(X86::CMP16ri8));  break;
+    case X86::SUB8ri:    SubInstr->setDesc(get(X86::CMP8ri));    break;
+  }
+  SubInstr->RemoveOperand(0);
+  return true;
+}
+
 /// Expand2AddrUndef - Expand a single-def pseudo instruction to a two-addr  /// instruction with two undef reads of the register being defined.  This is  /// used for mapping:

Modified: llvm/trunk/lib/Target/X86/X86InstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrInfo.h?rev=157755&r1=157754&r2=157755&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrInfo.h (original)
+++ llvm/trunk/lib/Target/X86/X86InstrInfo.h Thu May 31 12:20:29 2012
@@ -365,6 +365,9 @@
                              const MachineInstr *DefMI, unsigned DefIdx,
                              const MachineInstr *UseMI, unsigned UseIdx) const;
 
+  virtual bool OptimizeSubInstr(MachineInstr *SubInstr,
+                                const MachineRegisterInfo *MRI) const;
+
 private:
   MachineInstr * convertToThreeAddressWithLEA(unsigned MIOpc,
                                               MachineFunction::iterator &MFI,

Modified: llvm/trunk/test/CodeGen/X86/jump_sign.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/jump_sign.ll?rev=157755&r1=157754&r2=157755&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/jump_sign.ll (original)
+++ llvm/trunk/test/CodeGen/X86/jump_sign.ll Thu May 31 12:20:29 2012
@@ -83,3 +83,14 @@
   %cond = select i1 %cmp, i32 %sub, i32 0
   ret i32 %cond
 }
+; rdar://11540023
+define i64 @n(i64 %x, i64 %y) nounwind {
+entry:
+; CHECK: n:
+; CHECK-NOT: sub
+; CHECK: cmp
+  %sub = sub nsw i64 %x, %y
+  %cmp = icmp slt i64 %sub, 0
+  %y.x = select i1 %cmp, i64 %y, i64 %x
+  ret i64 %y.x
+}


_______________________________________________
llvm-commits mailing list
llvm-commits at cs.uiuc.edu
http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
---------------------------------------------------------------------
Intel Israel (74) Limited

This e-mail and any attachments may contain confidential material for
the sole use of the intended recipient(s). Any review or distribution
by others is strictly prohibited. If you are not the intended
recipient, please contact the sender and delete all copies.





More information about the llvm-commits mailing list