[llvm] r346170 - [X86] Don't turn any_extend from a mask register into a sign_extend during lowering. Add patterns to match any_extend during isel instead.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Mon Nov 5 14:08:17 PST 2018
Author: ctopper
Date: Mon Nov 5 14:08:17 2018
New Revision: 346170
URL: http://llvm.org/viewvc/llvm-project?rev=346170&view=rev
Log:
[X86] Don't turn any_extend from a mask register into a sign_extend during lowering. Add patterns to match any_extend during isel instead.
SimplifyDemandedBits can turn a sign_extend back into an any_extend and trigger an infinite loop. So instead legalize it the same way as a sign_extend, but preserve the opcode. Then just pattern match it the same as sign_extend during isel.
I don't have a reduced test case for such an infinite loop yet.
Modified:
llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
llvm/trunk/lib/Target/X86/X86InstrAVX512.td
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=346170&r1=346169&r2=346170&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Mon Nov 5 14:08:17 2018
@@ -19697,7 +19697,7 @@ static SDValue LowerSIGN_EXTEND_Mask(SDV
if (!Subtarget.hasBWI() && VTElt.getSizeInBits() <= 16) {
// If v16i32 is to be avoided, we'll need to split and concatenate.
if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
- return SplitAndExtendv16i1(ISD::SIGN_EXTEND, VT, In, dl, DAG);
+ return SplitAndExtendv16i1(Op.getOpcode(), VT, In, dl, DAG);
ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
}
@@ -19716,7 +19716,7 @@ static SDValue LowerSIGN_EXTEND_Mask(SDV
MVT WideEltVT = WideVT.getVectorElementType();
if ((Subtarget.hasDQI() && WideEltVT.getSizeInBits() >= 32) ||
(Subtarget.hasBWI() && WideEltVT.getSizeInBits() <= 16)) {
- V = DAG.getNode(ISD::SIGN_EXTEND, dl, WideVT, In);
+ V = DAG.getNode(Op.getOpcode(), dl, WideVT, In);
} else {
SDValue NegOne = getOnesVector(WideVT, DAG, dl);
SDValue Zero = getZeroVector(WideVT, Subtarget, DAG, dl);
Modified: llvm/trunk/lib/Target/X86/X86InstrAVX512.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrAVX512.td?rev=346170&r1=346169&r2=346170&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrAVX512.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrAVX512.td Mon Nov 5 14:08:17 2018
@@ -9958,6 +9958,10 @@ def rr : AVX512XS8I<opc, MRMSrcReg, (out
!strconcat(OpcodeStr##Vec.Suffix, "\t{$src, $dst|$dst, $src}"),
[(set Vec.RC:$dst, (Vec.VT (sext Vec.KRC:$src)))]>,
EVEX, Sched<[WriteMove]>; // TODO - WriteVecTrunc?
+
+// Also need a pattern for anyextend.
+def : Pat<(Vec.VT (anyext Vec.KRC:$src)),
+ (!cast<Instruction>(NAME#"rr") Vec.KRC:$src)>;
}
multiclass cvt_mask_by_elt_width<bits<8> opc, AVX512VLVectorVTInfo VTInfo,
@@ -10031,11 +10035,19 @@ let Predicates = [HasDQI, NoBWI] in {
(VPMOVDBZrr (v16i32 (VPMOVM2DZrr VK16:$src)))>;
def : Pat<(v16i16 (sext (v16i1 VK16:$src))),
(VPMOVDWZrr (v16i32 (VPMOVM2DZrr VK16:$src)))>;
+
+ def : Pat<(v16i8 (anyext (v16i1 VK16:$src))),
+ (VPMOVDBZrr (v16i32 (VPMOVM2DZrr VK16:$src)))>;
+ def : Pat<(v16i16 (anyext (v16i1 VK16:$src))),
+ (VPMOVDWZrr (v16i32 (VPMOVM2DZrr VK16:$src)))>;
}
let Predicates = [HasDQI, NoBWI, HasVLX] in {
def : Pat<(v8i16 (sext (v8i1 VK8:$src))),
(VPMOVDWZ256rr (v8i32 (VPMOVM2DZ256rr VK8:$src)))>;
+
+ def : Pat<(v8i16 (anyext (v8i1 VK8:$src))),
+ (VPMOVDWZ256rr (v8i32 (VPMOVM2DZ256rr VK8:$src)))>;
}
//===----------------------------------------------------------------------===//
More information about the llvm-commits
mailing list