[llvm-commits] [llvm] r161207 - in /llvm/trunk: include/llvm/Target/TargetInstrInfo.h lib/CodeGen/PeepholeOptimizer.cpp lib/Target/X86/X86InstrInfo.h test/CodeGen/X86/sse-minmax.ll test/CodeGen/X86/vec_compare.ll
Manman Ren
mren at apple.com
Thu Aug 2 12:37:32 PDT 2012
Author: mren
Date: Thu Aug 2 14:37:32 2012
New Revision: 161207
URL: http://llvm.org/viewvc/llvm-project?rev=161207&view=rev
Log:
X86 Peephole: fold loads to the source register operand if possible.
Add more comments and use early returns to reduce nesting in isLoadFoldable.
Also disable folding for V_SET0 to avoid introducing a const pool entry and
a const pool load.
rdar://10554090 and rdar://11873276
Modified:
llvm/trunk/include/llvm/Target/TargetInstrInfo.h
llvm/trunk/lib/CodeGen/PeepholeOptimizer.cpp
llvm/trunk/lib/Target/X86/X86InstrInfo.h
llvm/trunk/test/CodeGen/X86/sse-minmax.ll
llvm/trunk/test/CodeGen/X86/vec_compare.ll
Modified: llvm/trunk/include/llvm/Target/TargetInstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Target/TargetInstrInfo.h?rev=161207&r1=161206&r2=161207&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Target/TargetInstrInfo.h (original)
+++ llvm/trunk/include/llvm/Target/TargetInstrInfo.h Thu Aug 2 14:37:32 2012
@@ -696,7 +696,11 @@
/// optimizeLoadInstr - Try to remove the load by folding it to a register
/// operand at the use. We fold the load instructions if and only if the
- /// def and use are in the same BB.
+ /// def and use are in the same BB. We only look at one load and see
+ /// whether it can be folded into MI. FoldAsLoadDefReg is the virtual register
+ /// defined by the load we are trying to fold. DefMI returns the machine
+ /// instruction that defines FoldAsLoadDefReg, and the function returns
+ /// the machine instruction generated due to folding.
virtual MachineInstr* optimizeLoadInstr(MachineInstr *MI,
const MachineRegisterInfo *MRI,
unsigned &FoldAsLoadDefReg,
Modified: llvm/trunk/lib/CodeGen/PeepholeOptimizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/PeepholeOptimizer.cpp?rev=161207&r1=161206&r2=161207&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/PeepholeOptimizer.cpp (original)
+++ llvm/trunk/lib/CodeGen/PeepholeOptimizer.cpp Thu Aug 2 14:37:32 2012
@@ -391,20 +391,21 @@
/// register defined has a single use.
bool PeepholeOptimizer::isLoadFoldable(MachineInstr *MI,
unsigned &FoldAsLoadDefReg) {
- if (MI->canFoldAsLoad()) {
- const MCInstrDesc &MCID = MI->getDesc();
- if (MCID.getNumDefs() == 1) {
- unsigned Reg = MI->getOperand(0).getReg();
- // To reduce compilation time, we check MRI->hasOneUse when inserting
- // loads. It should be checked when processing uses of the load, since
- // uses can be removed during peephole.
- if (!MI->getOperand(0).getSubReg() &&
- TargetRegisterInfo::isVirtualRegister(Reg) &&
- MRI->hasOneUse(Reg)) {
- FoldAsLoadDefReg = Reg;
- return true;
- }
- }
+ if (!MI->canFoldAsLoad() || !MI->mayLoad())
+ return false;
+ const MCInstrDesc &MCID = MI->getDesc();
+ if (MCID.getNumDefs() != 1)
+ return false;
+
+ unsigned Reg = MI->getOperand(0).getReg();
+ // To reduce compilation time, we check MRI->hasOneUse when inserting
+ // loads. It should be checked when processing uses of the load, since
+ // uses can be removed during peephole.
+ if (!MI->getOperand(0).getSubReg() &&
+ TargetRegisterInfo::isVirtualRegister(Reg) &&
+ MRI->hasOneUse(Reg)) {
+ FoldAsLoadDefReg = Reg;
+ return true;
}
return false;
}
Modified: llvm/trunk/lib/Target/X86/X86InstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrInfo.h?rev=161207&r1=161206&r2=161207&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrInfo.h (original)
+++ llvm/trunk/lib/Target/X86/X86InstrInfo.h Thu Aug 2 14:37:32 2012
@@ -389,7 +389,11 @@
/// optimizeLoadInstr - Try to remove the load by folding it to a register
/// operand at the use. We fold the load instructions if and only if the
- /// def and use are in the same BB.
+ /// def and use are in the same BB. We only look at one load and see
+ /// whether it can be folded into MI. FoldAsLoadDefReg is the virtual register
+ /// defined by the load we are trying to fold. DefMI returns the machine
+ /// instruction that defines FoldAsLoadDefReg, and the function returns
+ /// the machine instruction generated due to folding.
virtual MachineInstr* optimizeLoadInstr(MachineInstr *MI,
const MachineRegisterInfo *MRI,
unsigned &FoldAsLoadDefReg,
Modified: llvm/trunk/test/CodeGen/X86/sse-minmax.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse-minmax.ll?rev=161207&r1=161206&r2=161207&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse-minmax.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse-minmax.ll Thu Aug 2 14:37:32 2012
@@ -137,13 +137,16 @@
}
; CHECK: ogt_x:
-; CHECK-NEXT: maxsd LCP{{.*}}(%rip), %xmm0
+; CHECK-NEXT: xorp{{[sd]}} %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm1, %xmm0
; CHECK-NEXT: ret
; UNSAFE: ogt_x:
-; UNSAFE-NEXT: maxsd LCP{{.*}}(%rip), %xmm0
+; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
+; UNSAFE-NEXT: maxsd %xmm1, %xmm0
; UNSAFE-NEXT: ret
; FINITE: ogt_x:
-; FINITE-NEXT: maxsd LCP{{.*}}(%rip), %xmm0
+; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
+; FINITE-NEXT: maxsd %xmm1, %xmm0
; FINITE-NEXT: ret
define double @ogt_x(double %x) nounwind {
%c = fcmp ogt double %x, 0.000000e+00
@@ -152,13 +155,16 @@
}
; CHECK: olt_x:
-; CHECK-NEXT: minsd LCP{{.*}}(%rip), %xmm0
+; CHECK-NEXT: xorp{{[sd]}} %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm1, %xmm0
; CHECK-NEXT: ret
; UNSAFE: olt_x:
-; UNSAFE-NEXT: minsd LCP{{.*}}(%rip), %xmm0
+; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
+; UNSAFE-NEXT: minsd %xmm1, %xmm0
; UNSAFE-NEXT: ret
; FINITE: olt_x:
-; FINITE-NEXT: minsd LCP{{.*}}(%rip), %xmm0
+; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
+; FINITE-NEXT: minsd %xmm1, %xmm0
; FINITE-NEXT: ret
define double @olt_x(double %x) nounwind {
%c = fcmp olt double %x, 0.000000e+00
@@ -211,10 +217,12 @@
; CHECK: oge_x:
; CHECK: ucomisd %xmm1, %xmm0
; UNSAFE: oge_x:
-; UNSAFE-NEXT: maxsd LCP{{.*}}(%rip), %xmm0
+; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
+; UNSAFE-NEXT: maxsd %xmm1, %xmm0
; UNSAFE-NEXT: ret
; FINITE: oge_x:
-; FINITE-NEXT: maxsd LCP{{.*}}(%rip), %xmm0
+; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
+; FINITE-NEXT: maxsd %xmm1, %xmm0
; FINITE-NEXT: ret
define double @oge_x(double %x) nounwind {
%c = fcmp oge double %x, 0.000000e+00
@@ -225,10 +233,12 @@
; CHECK: ole_x:
; CHECK: ucomisd %xmm0, %xmm1
; UNSAFE: ole_x:
-; UNSAFE-NEXT: minsd LCP{{.*}}(%rip), %xmm0
+; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
+; UNSAFE-NEXT: minsd %xmm1, %xmm0
; UNSAFE-NEXT: ret
; FINITE: ole_x:
-; FINITE-NEXT: minsd LCP{{.*}}(%rip), %xmm0
+; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
+; FINITE-NEXT: minsd %xmm1, %xmm0
; FINITE-NEXT: ret
define double @ole_x(double %x) nounwind {
%c = fcmp ole double %x, 0.000000e+00
@@ -401,10 +411,12 @@
; CHECK: ugt_x:
; CHECK: ucomisd %xmm0, %xmm1
; UNSAFE: ugt_x:
-; UNSAFE-NEXT: maxsd LCP{{.*}}(%rip), %xmm0
+; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
+; UNSAFE-NEXT: maxsd %xmm1, %xmm0
; UNSAFE-NEXT: ret
; FINITE: ugt_x:
-; FINITE-NEXT: maxsd LCP{{.*}}(%rip), %xmm0
+; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
+; FINITE-NEXT: maxsd %xmm1, %xmm0
; FINITE-NEXT: ret
define double @ugt_x(double %x) nounwind {
%c = fcmp ugt double %x, 0.000000e+00
@@ -415,10 +427,12 @@
; CHECK: ult_x:
; CHECK: ucomisd %xmm1, %xmm0
; UNSAFE: ult_x:
-; UNSAFE-NEXT: minsd LCP{{.*}}(%rip), %xmm0
+; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
+; UNSAFE-NEXT: minsd %xmm1, %xmm0
; UNSAFE-NEXT: ret
; FINITE: ult_x:
-; FINITE-NEXT: minsd LCP{{.*}}(%rip), %xmm0
+; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
+; FINITE-NEXT: minsd %xmm1, %xmm0
; FINITE-NEXT: ret
define double @ult_x(double %x) nounwind {
%c = fcmp ult double %x, 0.000000e+00
@@ -468,10 +482,12 @@
; CHECK-NEXT: movap{{[sd]}} %xmm1, %xmm0
; CHECK-NEXT: ret
; UNSAFE: uge_x:
-; UNSAFE-NEXT: maxsd LCP{{.*}}(%rip), %xmm0
+; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
+; UNSAFE-NEXT: maxsd %xmm1, %xmm0
; UNSAFE-NEXT: ret
; FINITE: uge_x:
-; FINITE-NEXT: maxsd LCP{{.*}}(%rip), %xmm0
+; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
+; FINITE-NEXT: maxsd %xmm1, %xmm0
; FINITE-NEXT: ret
define double @uge_x(double %x) nounwind {
%c = fcmp uge double %x, 0.000000e+00
@@ -485,10 +501,12 @@
; CHECK-NEXT: movap{{[sd]}} %xmm1, %xmm0
; CHECK-NEXT: ret
; UNSAFE: ule_x:
-; UNSAFE-NEXT: minsd LCP{{.*}}(%rip), %xmm0
+; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
+; UNSAFE-NEXT: minsd %xmm1, %xmm0
; UNSAFE-NEXT: ret
; FINITE: ule_x:
-; FINITE-NEXT: minsd LCP{{.*}}(%rip), %xmm0
+; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
+; FINITE-NEXT: minsd %xmm1, %xmm0
; FINITE-NEXT: ret
define double @ule_x(double %x) nounwind {
%c = fcmp ule double %x, 0.000000e+00
@@ -497,7 +515,8 @@
}
; CHECK: uge_inverse_x:
-; CHECK-NEXT: minsd LCP{{.*}}(%rip), %xmm0
+; CHECK-NEXT: xorp{{[sd]}} %xmm1, %xmm1
+; CHECK-NEXT: minsd %xmm1, %xmm0
; CHECK-NEXT: ret
; UNSAFE: uge_inverse_x:
; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
@@ -516,7 +535,8 @@
}
; CHECK: ule_inverse_x:
-; CHECK-NEXT: maxsd LCP{{.*}}(%rip), %xmm0
+; CHECK-NEXT: xorp{{[sd]}} %xmm1, %xmm1
+; CHECK-NEXT: maxsd %xmm1, %xmm0
; CHECK-NEXT: ret
; UNSAFE: ule_inverse_x:
; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
Modified: llvm/trunk/test/CodeGen/X86/vec_compare.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vec_compare.ll?rev=161207&r1=161206&r2=161207&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vec_compare.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vec_compare.ll Thu Aug 2 14:37:32 2012
@@ -14,8 +14,8 @@
define <4 x i32> @test2(<4 x i32> %A, <4 x i32> %B) nounwind {
; CHECK: test2:
; CHECK: pcmp
-; CHECK: pxor LCP
-; CHECK: movdqa
+; CHECK: pcmp
+; CHECK: pxor
; CHECK: ret
%C = icmp sge <4 x i32> %A, %B
%D = sext <4 x i1> %C to <4 x i32>
More information about the llvm-commits
mailing list