[llvm-commits] [llvm] r52670 - in /llvm/trunk: lib/Target/X86/X86InstrInfo.cpp test/CodeGen/X86/remat-mov0.ll

Evan Cheng evan.cheng at apple.com
Tue Jun 24 00:10:51 PDT 2008


Author: evancheng
Date: Tue Jun 24 02:10:51 2008
New Revision: 52670

URL: http://llvm.org/viewvc/llvm-project?rev=52670&view=rev
Log:
If it's determined safe, remat MOV32r0 (i.e. xor r, r) and others as it is instead of using the longer MOV32ri instruction.

Added:
    llvm/trunk/test/CodeGen/X86/remat-mov0.ll
Modified:
    llvm/trunk/lib/Target/X86/X86InstrInfo.cpp

Modified: llvm/trunk/lib/Target/X86/X86InstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrInfo.cpp?rev=52670&r1=52669&r2=52670&view=diff

==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrInfo.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86InstrInfo.cpp Tue Jun 24 02:10:51 2008
@@ -832,6 +832,40 @@
   return true;
 }
 
+/// isSafeToClobberEFLAGS - Return true if it's safe insert an instruction that
+/// would clobber the EFLAGS condition register. Note the result may be
+/// conservative. If it cannot definitely determine the safety after visiting
+/// two instructions it assumes it's not safe.
+static bool isSafeToClobberEFLAGS(MachineBasicBlock &MBB,
+                                  MachineBasicBlock::iterator I) {
+  // For compile time consideration, if we are not able to determine the
+  // safety after visiting 2 instructions, we will assume it's not safe.
+  for (unsigned i = 0; i < 2; ++i) {
+    if (I == MBB.end())
+      // Reached end of block, it's safe.
+      return true;
+    bool SeenDef = false;
+    for (unsigned j = 0, e = I->getNumOperands(); j != e; ++j) {
+      MachineOperand &MO = I->getOperand(j);
+      if (!MO.isRegister())
+        continue;
+      if (MO.getReg() == X86::EFLAGS) {
+        if (MO.isUse())
+          return false;
+        SeenDef = true;
+      }
+    }
+
+    if (SeenDef)
+      // This instruction defines EFLAGS, no need to look any further.
+      return true;
+    ++I;
+  }
+
+  // Conservative answer.
+  return false;
+}
+
 void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB,
                                  MachineBasicBlock::iterator I,
                                  unsigned DestReg,
@@ -846,25 +880,33 @@
 
   // MOV32r0 etc. are implemented with xor which clobbers condition code.
   // Re-materialize them as movri instructions to avoid side effects.
+  bool Emitted = false;
   switch (Orig->getOpcode()) {
+  default: break;
   case X86::MOV8r0:
-    BuildMI(MBB, I, get(X86::MOV8ri), DestReg).addImm(0);
-    break;
   case X86::MOV16r0:
-    BuildMI(MBB, I, get(X86::MOV16ri), DestReg).addImm(0);
-    break;
   case X86::MOV32r0:
-    BuildMI(MBB, I, get(X86::MOV32ri), DestReg).addImm(0);
-    break;
-  case X86::MOV64r0:
-    BuildMI(MBB, I, get(X86::MOV64ri32), DestReg).addImm(0);
+  case X86::MOV64r0: {
+    if (!isSafeToClobberEFLAGS(MBB, I)) {
+      unsigned Opc = 0;
+      switch (Orig->getOpcode()) {
+      default: break;
+      case X86::MOV8r0:  Opc = X86::MOV8ri;  break;
+      case X86::MOV16r0: Opc = X86::MOV16ri; break;
+      case X86::MOV32r0: Opc = X86::MOV32ri; break;
+      case X86::MOV64r0: Opc = X86::MOV64ri32; break;
+      }
+      BuildMI(MBB, I, get(Opc), DestReg).addImm(0);
+      Emitted = true;
+    }
     break;
-  default: {
+  }
+  }
+
+  if (!Emitted) {
     MachineInstr *MI = Orig->clone();
     MI->getOperand(0).setReg(DestReg);
     MBB.insert(I, MI);
-    break;
-  }
   }
 
   if (ChangeSubIdx) {

Added: llvm/trunk/test/CodeGen/X86/remat-mov0.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/remat-mov0.ll?rev=52670&view=auto

==============================================================================
--- llvm/trunk/test/CodeGen/X86/remat-mov0.ll (added)
+++ llvm/trunk/test/CodeGen/X86/remat-mov0.ll Tue Jun 24 02:10:51 2008
@@ -0,0 +1,45 @@
+; RUN: llvm-as < %s | llc -march=x86 | grep xor | count 3
+
+	%struct.FILE = type { i8*, i32, i32, i16, i16, %struct.__sbuf, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct.__sbuf, %struct.__sFILEX*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
+	%struct.ImgT = type { i8, i8*, i8*, %struct.FILE*, i32, i32, i32, i32, i8*, double*, float*, float*, float*, i32*, double, double, i32*, double*, i32*, i32* }
+	%struct._CompT = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, float, float, i8, %struct._PixT*, %struct._CompT*, i8, %struct._CompT* }
+	%struct._PixT = type { i32, i32, %struct._PixT* }
+	%struct.__sFILEX = type opaque
+	%struct.__sbuf = type { i8*, i32 }
+
+declare fastcc void @MergeComponents(%struct._CompT*, %struct._CompT*, %struct._CompT*, %struct._CompT**, %struct.ImgT*) nounwind 
+
+define fastcc void @MergeToLeft(%struct._CompT* %comp, %struct._CompT** %head, %struct.ImgT* %img) nounwind  {
+entry:
+	br label %bb208
+
+bb105:		; preds = %bb200
+	br i1 false, label %bb197, label %bb149
+
+bb149:		; preds = %bb105
+	%tmp151 = getelementptr %struct._CompT* null, i32 0, i32 0		; <i32*> [#uses=1]
+	br i1 false, label %bb184, label %bb193
+
+bb184:		; preds = %bb149
+	tail call fastcc void @MergeComponents( %struct._CompT* %comp, %struct._CompT* null, %struct._CompT* null, %struct._CompT** %head, %struct.ImgT* %img ) nounwind 
+	tail call fastcc void @MergeToLeft( %struct._CompT* %comp, %struct._CompT** %head, %struct.ImgT* %img ) nounwind 
+	br label %bb193
+
+bb193:		; preds = %bb184, %bb149
+	%tmp196 = load i32* %tmp151, align 4		; <i32> [#uses=1]
+	br label %bb197
+
+bb197:		; preds = %bb193, %bb105
+	%last_comp.0 = phi i32 [ %tmp196, %bb193 ], [ 0, %bb105 ]		; <i32> [#uses=0]
+	%indvar.next = add i32 %indvar, 1		; <i32> [#uses=1]
+	br label %bb200
+
+bb200:		; preds = %bb208, %bb197
+	%indvar = phi i32 [ 0, %bb208 ], [ %indvar.next, %bb197 ]		; <i32> [#uses=2]
+	%xm.0 = sub i32 %indvar, 0		; <i32> [#uses=1]
+	%tmp202 = icmp slt i32 %xm.0, 1		; <i1> [#uses=1]
+	br i1 %tmp202, label %bb105, label %bb208
+
+bb208:		; preds = %bb200, %entry
+	br label %bb200
+}





More information about the llvm-commits mailing list