[llvm] r294636 - X86: Introduce relocImm-based patterns for cmp.
Peter Collingbourne via llvm-commits
llvm-commits at lists.llvm.org
Thu Feb 9 14:02:29 PST 2017
Author: pcc
Date: Thu Feb 9 16:02:28 2017
New Revision: 294636
URL: http://llvm.org/viewvc/llvm-project?rev=294636&view=rev
Log:
X86: Introduce relocImm-based patterns for cmp.
Differential Revision: https://reviews.llvm.org/D28690
Added:
llvm/trunk/test/CodeGen/X86/absolute-cmp.ll
Modified:
llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp
llvm/trunk/lib/Target/X86/X86InstrCompiler.td
llvm/trunk/lib/Target/X86/X86InstrInfo.td
Modified: llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp?rev=294636&r1=294635&r2=294636&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp Thu Feb 9 16:02:28 2017
@@ -384,6 +384,16 @@ namespace {
bool ComplexPatternFuncMutatesDAG() const override {
return true;
}
+
+ bool isSExtAbsoluteSymbolRef(unsigned Width, SDNode *N) const;
+
+ /// Returns whether this is a relocatable immediate in the range
+ /// [-2^Width .. 2^Width-1].
+ template <unsigned Width> bool isSExtRelocImm(SDNode *N) const {
+ if (auto *CN = dyn_cast<ConstantSDNode>(N))
+ return isInt<Width>(CN->getSExtValue());
+ return isSExtAbsoluteSymbolRef(Width, N);
+ }
};
}
@@ -1789,6 +1799,21 @@ SDNode *X86DAGToDAGISel::getGlobalBaseRe
return CurDAG->getRegister(GlobalBaseReg, TLI->getPointerTy(DL)).getNode();
}
+bool X86DAGToDAGISel::isSExtAbsoluteSymbolRef(unsigned Width, SDNode *N) const {
+ if (N->getOpcode() == ISD::TRUNCATE)
+ N = N->getOperand(0).getNode();
+ if (N->getOpcode() != X86ISD::Wrapper)
+ return false;
+
+ auto *GA = dyn_cast<GlobalAddressSDNode>(N->getOperand(0));
+ if (!GA)
+ return false;
+
+ Optional<ConstantRange> CR = GA->getGlobal()->getAbsoluteSymbolRange();
+ return CR && CR->getSignedMin().sge(-1ull << Width) &&
+ CR->getSignedMax().slt(1ull << Width);
+}
+
/// Test whether the given X86ISD::CMP node has any uses which require the SF
/// or OF bits to be accurate.
static bool hasNoSignedComparisonUses(SDNode *N) {
Modified: llvm/trunk/lib/Target/X86/X86InstrCompiler.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrCompiler.td?rev=294636&r1=294635&r2=294636&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrCompiler.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrCompiler.td Thu Feb 9 16:02:28 2017
@@ -1743,6 +1743,12 @@ def : Pat<(X86sub_flag 0, GR16:$src), (N
def : Pat<(X86sub_flag 0, GR32:$src), (NEG32r GR32:$src)>;
def : Pat<(X86sub_flag 0, GR64:$src), (NEG64r GR64:$src)>;
+// sub reg, relocImm
+def : Pat<(X86sub_flag GR64:$src1, i64relocImmSExt8_su:$src2),
+ (SUB64ri8 GR64:$src1, i64relocImmSExt8_su:$src2)>;
+def : Pat<(X86sub_flag GR64:$src1, i64relocImmSExt32_su:$src2),
+ (SUB64ri32 GR64:$src1, i64relocImmSExt32_su:$src2)>;
+
// mul reg, reg
def : Pat<(mul GR16:$src1, GR16:$src2),
(IMUL16rr GR16:$src1, GR16:$src2)>;
Modified: llvm/trunk/lib/Target/X86/X86InstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrInfo.td?rev=294636&r1=294635&r2=294636&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrInfo.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrInfo.td Thu Feb 9 16:02:28 2017
@@ -933,6 +933,15 @@ def i32immSExt8 : ImmLeaf<i32, [{ retur
def i64immSExt8 : ImmLeaf<i64, [{ return isInt<8>(Imm); }]>;
def i64immSExt32 : ImmLeaf<i64, [{ return isInt<32>(Imm); }]>;
+// FIXME: Ideally we would just replace the above i*immSExt* matchers with
+// relocImm-based matchers, but then FastISel would be unable to use them.
+def i64relocImmSExt8 : PatLeaf<(i64 relocImm), [{
+ return isSExtRelocImm<8>(N);
+}]>;
+def i64relocImmSExt32 : PatLeaf<(i64 relocImm), [{
+ return isSExtRelocImm<32>(N);
+}]>;
+
// If we have multiple users of an immediate, it's much smaller to reuse
// the register, rather than encode the immediate in every instruction.
// This has the risk of increasing register pressure from stretched live
@@ -973,6 +982,13 @@ def i64immSExt8_su : PatLeaf<(i64immSExt
return !shouldAvoidImmediateInstFormsForSize(N);
}]>;
+def i64relocImmSExt8_su : PatLeaf<(i64relocImmSExt8), [{
+ return !shouldAvoidImmediateInstFormsForSize(N);
+}]>;
+def i64relocImmSExt32_su : PatLeaf<(i64relocImmSExt32), [{
+ return !shouldAvoidImmediateInstFormsForSize(N);
+}]>;
+
// i64immZExt32 predicate - True if the 64-bit immediate fits in a 32-bit
// unsigned field.
def i64immZExt32 : ImmLeaf<i64, [{ return isUInt<32>(Imm); }]>;
Added: llvm/trunk/test/CodeGen/X86/absolute-cmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/absolute-cmp.ll?rev=294636&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/absolute-cmp.ll (added)
+++ llvm/trunk/test/CodeGen/X86/absolute-cmp.ll Thu Feb 9 16:02:28 2017
@@ -0,0 +1,39 @@
+; RUN: llc < %s | FileCheck %s
+; RUN: llc -relocation-model=pic < %s | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+ at cmp8 = external hidden global i8, !absolute_symbol !0
+ at cmp32 = external hidden global i8, !absolute_symbol !1
+
+declare void @f()
+
+define void @foo8(i64 %val) {
+ ; CHECK: cmpq $cmp8 at ABS8, %rdi
+ %cmp = icmp ule i64 %val, ptrtoint (i8* @cmp8 to i64)
+ br i1 %cmp, label %t, label %f
+
+t:
+ call void @f()
+ ret void
+
+f:
+ ret void
+}
+
+define void @foo32(i64 %val) {
+ ; CHECK: cmpq $cmp32, %rdi
+ %cmp = icmp ule i64 %val, ptrtoint (i8* @cmp32 to i64)
+ br i1 %cmp, label %t, label %f
+
+t:
+ call void @f()
+ ret void
+
+f:
+ ret void
+}
+
+!0 = !{i64 0, i64 128}
+!1 = !{i64 0, i64 2147483648}
More information about the llvm-commits
mailing list