[llvm] r350038 - [X86] Remove the ANDN check from EmitTest.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Sun Dec 23 17:10:14 PST 2018
Author: ctopper
Date: Sun Dec 23 17:10:13 2018
New Revision: 350038
URL: http://llvm.org/viewvc/llvm-project?rev=350038&view=rev
Log:
[X86] Remove the ANDN check from EmitTest.
Remove the TESTmr isel patterns and add another postprocessing combine for TESTrr+ANDrm->TESTmr. We already have a postprocessing combine for TESTrr+ANDrr->TESTrr. With this we can give ANDN a chance to match first. And clean it up during post processing if we ended up with just a regular AND.
This is another step towards my plan to gut EmitTest and do more flag handling during isel matching or by using optimizeCompare.
Modified:
llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp
llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
llvm/trunk/lib/Target/X86/X86InstrArithmetic.td
Modified: llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp?rev=350038&r1=350037&r2=350038&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp Sun Dec 23 17:10:13 2018
@@ -921,6 +921,30 @@ void X86DAGToDAGISel::PostprocessISelDAG
MadeChange = true;
continue;
}
+ if (N0Opc == X86::AND8rm || N0Opc == X86::AND16rm ||
+ N0Opc == X86::AND32rm || N0Opc == X86::AND64rm) {
+ unsigned NewOpc;
+ switch (N0Opc) {
+ case X86::AND8rm: NewOpc = X86::TEST8mr; break;
+ case X86::AND16rm: NewOpc = X86::TEST16mr; break;
+ case X86::AND32rm: NewOpc = X86::TEST32mr; break;
+ case X86::AND64rm: NewOpc = X86::TEST64mr; break;
+ }
+
+ // Need to swap the memory and register operand.
+ SDValue Ops[] = { And.getOperand(1),
+ And.getOperand(2),
+ And.getOperand(3),
+ And.getOperand(4),
+ And.getOperand(5),
+ And.getOperand(0),
+ And.getOperand(6) /* Chain */ };
+ MachineSDNode *Test = CurDAG->getMachineNode(NewOpc, SDLoc(N),
+ MVT::i32, MVT::Other, Ops);
+ ReplaceUses(N, Test);
+ MadeChange = true;
+ continue;
+ }
}
// Attempt to remove vectors moves that were inserted to zero upper bits.
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=350038&r1=350037&r2=350038&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Sun Dec 23 17:10:13 2018
@@ -18696,57 +18696,52 @@ static SDValue EmitTest(SDValue Op, unsi
SDValue Op0 = ArithOp->getOperand(0);
SDValue Op1 = ArithOp->getOperand(1);
EVT VT = ArithOp.getValueType();
- bool isAndn = isBitwiseNot(Op0) || isBitwiseNot(Op1);
- bool isLegalAndnType = VT == MVT::i32 || VT == MVT::i64;
- bool isProperAndn = isAndn && isLegalAndnType && Subtarget.hasBMI();
-
- // If we cannot select an ANDN instruction, check if we can replace
- // AND+IMM64 with a shift before giving up. This is possible for masks
- // like 0xFF000000 or 0x00FFFFFF and if we care only about the zero flag.
- if (!isProperAndn) {
- if (!ZeroCheck)
- break;
-
- // And with cosntant should be canonicalized unless we're dealing
- // with opaque constants.
- assert((!isa<ConstantSDNode>(Op0) ||
- (isa<ConstantSDNode>(Op1) &&
- (cast<ConstantSDNode>(Op0)->isOpaque() ||
- cast<ConstantSDNode>(Op1)->isOpaque()))) &&
- "AND node isn't canonicalized");
- auto *CN = dyn_cast<ConstantSDNode>(Op1);
- if (!CN)
- break;
-
- const APInt &Mask = CN->getAPIntValue();
- if (Mask.isSignedIntN(ShiftToAndMaxMaskWidth))
- break; // Prefer TEST instruction.
-
- unsigned BitWidth = Mask.getBitWidth();
- unsigned LeadingOnes = Mask.countLeadingOnes();
- unsigned TrailingZeros = Mask.countTrailingZeros();
-
- if (LeadingOnes + TrailingZeros == BitWidth) {
- assert(TrailingZeros < VT.getSizeInBits() &&
- "Shift amount should be less than the type width");
- SDValue ShAmt = DAG.getConstant(TrailingZeros, dl, MVT::i8);
- Op = DAG.getNode(ISD::SRL, dl, VT, Op0, ShAmt);
- break;
- }
-
- unsigned LeadingZeros = Mask.countLeadingZeros();
- unsigned TrailingOnes = Mask.countTrailingOnes();
-
- if (LeadingZeros + TrailingOnes == BitWidth) {
- assert(LeadingZeros < VT.getSizeInBits() &&
- "Shift amount should be less than the type width");
- SDValue ShAmt = DAG.getConstant(LeadingZeros, dl, MVT::i8);
- Op = DAG.getNode(ISD::SHL, dl, VT, Op0, ShAmt);
- break;
- }
+ // Check if we can replace AND+IMM64 with a shift before giving up. This
+ // is possible for masks/ like 0xFF000000 or 0x00FFFFFF and if we care
+ // only about the zero flag.
+ if (!ZeroCheck)
+ break;
+
+ // And with constant should be canonicalized unless we're dealing
+ // with opaque constants.
+ assert((!isa<ConstantSDNode>(Op0) ||
+ (isa<ConstantSDNode>(Op1) &&
+ (cast<ConstantSDNode>(Op0)->isOpaque() ||
+ cast<ConstantSDNode>(Op1)->isOpaque()))) &&
+ "AND node isn't canonicalized");
+ auto *CN = dyn_cast<ConstantSDNode>(Op1);
+ if (!CN)
+ break;
+
+ const APInt &Mask = CN->getAPIntValue();
+ if (Mask.isSignedIntN(ShiftToAndMaxMaskWidth))
+ break; // Prefer TEST instruction.
+
+ unsigned BitWidth = Mask.getBitWidth();
+ unsigned LeadingOnes = Mask.countLeadingOnes();
+ unsigned TrailingZeros = Mask.countTrailingZeros();
+
+ if (LeadingOnes + TrailingZeros == BitWidth) {
+ assert(TrailingZeros < VT.getSizeInBits() &&
+ "Shift amount should be less than the type width");
+ SDValue ShAmt = DAG.getConstant(TrailingZeros, dl, MVT::i8);
+ Op = DAG.getNode(ISD::SRL, dl, VT, Op0, ShAmt);
break;
}
+
+ unsigned LeadingZeros = Mask.countLeadingZeros();
+ unsigned TrailingOnes = Mask.countTrailingOnes();
+
+ if (LeadingZeros + TrailingOnes == BitWidth) {
+ assert(LeadingZeros < VT.getSizeInBits() &&
+ "Shift amount should be less than the type width");
+ SDValue ShAmt = DAG.getConstant(LeadingZeros, dl, MVT::i8);
+ Op = DAG.getNode(ISD::SHL, dl, VT, Op0, ShAmt);
+ break;
+ }
+
+ break;
}
LLVM_FALLTHROUGH;
case ISD::SUB:
Modified: llvm/trunk/lib/Target/X86/X86InstrArithmetic.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrArithmetic.td?rev=350038&r1=350037&r2=350038&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrArithmetic.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrArithmetic.td Sun Dec 23 17:10:13 2018
@@ -1221,10 +1221,12 @@ let isCompare = 1 in {
def TEST64rr : BinOpRR_F<0x84, "test", Xi64, null_frag>;
} // isCommutable
- def TEST8mr : BinOpMR_F<0x84, "test", Xi8 , X86testpat>;
- def TEST16mr : BinOpMR_F<0x84, "test", Xi16, X86testpat>;
- def TEST32mr : BinOpMR_F<0x84, "test", Xi32, X86testpat>;
- def TEST64mr : BinOpMR_F<0x84, "test", Xi64, X86testpat>;
+ let hasSideEffects = 0, mayLoad = 1 in {
+ def TEST8mr : BinOpMR_F<0x84, "test", Xi8 , null_frag>;
+ def TEST16mr : BinOpMR_F<0x84, "test", Xi16, null_frag>;
+ def TEST32mr : BinOpMR_F<0x84, "test", Xi32, null_frag>;
+ def TEST64mr : BinOpMR_F<0x84, "test", Xi64, null_frag>;
+ }
def TEST8ri : BinOpRI_F<0xF6, "test", Xi8 , X86testpat, MRM0r>;
def TEST16ri : BinOpRI_F<0xF6, "test", Xi16, X86testpat, MRM0r>;
More information about the llvm-commits
mailing list