[llvm-branch-commits] [llvm-branch] r101451 - in /llvm/branches/Apple/Morbo: ./ lib/CodeGen/SelectionDAG/DAGCombiner.cpp lib/Target/README.txt test/CodeGen/X86/store-narrow.ll
Bill Wendling
isanbard at gmail.com
Fri Apr 16 00:41:59 PDT 2010
Author: void
Date: Fri Apr 16 02:41:59 2010
New Revision: 101451
URL: http://llvm.org/viewvc/llvm-project?rev=101451&view=rev
Log:
$ svn merge -c 101343 https://llvm.org/svn/llvm-project/llvm/trunk
--- Merging r101343 into '.':
A test/CodeGen/X86/store-narrow.ll
U lib/CodeGen/SelectionDAG/DAGCombiner.cpp
U lib/Target/README.txt
Added:
llvm/branches/Apple/Morbo/test/CodeGen/X86/store-narrow.ll
- copied unchanged from r101343, llvm/trunk/test/CodeGen/X86/store-narrow.ll
Modified:
llvm/branches/Apple/Morbo/ (props changed)
llvm/branches/Apple/Morbo/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
llvm/branches/Apple/Morbo/lib/Target/README.txt
Propchange: llvm/branches/Apple/Morbo/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Apr 16 02:41:59 2010
@@ -1,2 +1,2 @@
/llvm/branches/Apple/Hermes:96832,96835,96858,96870,96876,96879
-/llvm/trunk:98602,98604,98612,98615-98616,98675,98686,98743-98744,98768,98773,98778,98780,98810,98835,98839,98845,98855,98862,98881,98920,98977,99032-99033,99043,99196,99223,99263,99282-99284,99306,99319-99321,99324,99336,99378,99418,99423,99429,99455,99463,99465,99469,99484,99490,99492-99494,99507,99524,99537,99539-99540,99544,99570,99575,99598,99620,99629-99630,99636,99671,99692,99695,99697,99699,99722,99816,99836,99845-99846,99848,99850,99855,99879,99881-99883,99895,99899,99910,99916,99919,99952-99954,99957,99959,99974-99975,99982,99984-99986,99988,99992-99993,99995,99997-99999,100016,100035,100037-100038,100042,100044,100056,100072,100074,100081-100090,100092,100094-100095,100116,100134,100184,100209,100214-100218,100220-100221,100223-100225,100257,100261,100304,100332,100353,100384,100454-100455,100457,100478,100480,100487,100494,100497,100505,100521,100553,100568,100584,100592,100609-100610,100636,100710,100736,100742,100751,100768-100769,100771,100781,100797,100804,10
0837,100867,100892,100936-100937,101011,101023,101075,101077,101079,101081,101085,101154,101158,101162,101165,101181,101190,101202,101282,101303,101314-101315,101317,101383,101392,101420
+/llvm/trunk:98602,98604,98612,98615-98616,98675,98686,98743-98744,98768,98773,98778,98780,98810,98835,98839,98845,98855,98862,98881,98920,98977,99032-99033,99043,99196,99223,99263,99282-99284,99306,99319-99321,99324,99336,99378,99418,99423,99429,99455,99463,99465,99469,99484,99490,99492-99494,99507,99524,99537,99539-99540,99544,99570,99575,99598,99620,99629-99630,99636,99671,99692,99695,99697,99699,99722,99816,99836,99845-99846,99848,99850,99855,99879,99881-99883,99895,99899,99910,99916,99919,99952-99954,99957,99959,99974-99975,99982,99984-99986,99988,99992-99993,99995,99997-99999,100016,100035,100037-100038,100042,100044,100056,100072,100074,100081-100090,100092,100094-100095,100116,100134,100184,100209,100214-100218,100220-100221,100223-100225,100257,100261,100304,100332,100353,100384,100454-100455,100457,100478,100480,100487,100494,100497,100505,100521,100553,100568,100584,100592,100609-100610,100636,100710,100736,100742,100751,100768-100769,100771,100781,100797,100804,10
0837,100867,100892,100936-100937,101011,101023,101075,101077,101079,101081,101085,101154,101158,101162,101165,101181,101190,101202,101282,101303,101314-101315,101317,101343,101383,101392,101420
Modified: llvm/branches/Apple/Morbo/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Morbo/lib/CodeGen/SelectionDAG/DAGCombiner.cpp?rev=101451&r1=101450&r2=101451&view=diff
==============================================================================
--- llvm/branches/Apple/Morbo/lib/CodeGen/SelectionDAG/DAGCombiner.cpp (original)
+++ llvm/branches/Apple/Morbo/lib/CodeGen/SelectionDAG/DAGCombiner.cpp Fri Apr 16 02:41:59 2010
@@ -254,24 +254,28 @@
/// looking for a better chain (aliasing node.)
SDValue FindBetterChain(SDNode *N, SDValue Chain);
- /// getShiftAmountTy - Returns a type large enough to hold any valid
- /// shift amount - before type legalization these can be huge.
- EVT getShiftAmountTy() {
- return LegalTypes ? TLI.getShiftAmountTy() : TLI.getPointerTy();
- }
-
-public:
+ public:
DAGCombiner(SelectionDAG &D, AliasAnalysis &A, CodeGenOpt::Level OL)
- : DAG(D),
- TLI(D.getTargetLoweringInfo()),
- Level(Unrestricted),
- OptLevel(OL),
- LegalOperations(false),
- LegalTypes(false),
- AA(A) {}
+ : DAG(D), TLI(D.getTargetLoweringInfo()), Level(Unrestricted),
+ OptLevel(OL), LegalOperations(false), LegalTypes(false), AA(A) {}
/// Run - runs the dag combiner on all nodes in the work list
void Run(CombineLevel AtLevel);
+
+ SelectionDAG &getDAG() const { return DAG; }
+
+ /// getShiftAmountTy - Returns a type large enough to hold any valid
+ /// shift amount - before type legalization these can be huge.
+ EVT getShiftAmountTy() {
+ return LegalTypes ? TLI.getShiftAmountTy() : TLI.getPointerTy();
+ }
+
+ /// isTypeLegal - This method returns true if we are running before type
+ /// legalization or if the specified VT is legal.
+ bool isTypeLegal(const EVT &VT) {
+ if (!LegalTypes) return true;
+ return TLI.isTypeLegal(VT);
+ }
};
}
@@ -3949,7 +3953,7 @@
VT.isInteger() && !VT.isVector()) {
unsigned OrigXWidth = N0.getOperand(1).getValueType().getSizeInBits();
EVT IntXVT = EVT::getIntegerVT(*DAG.getContext(), OrigXWidth);
- if (TLI.isTypeLegal(IntXVT) || !LegalTypes) {
+ if (isTypeLegal(IntXVT)) {
SDValue X = DAG.getNode(ISD::BIT_CONVERT, N0.getDebugLoc(),
IntXVT, N0.getOperand(1));
AddToWorkList(X.getNode());
@@ -4464,7 +4468,7 @@
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
// fold (fp_round_inreg c1fp) -> c1fp
- if (N0CFP && (TLI.isTypeLegal(EVT) || !LegalTypes)) {
+ if (N0CFP && isTypeLegal(EVT)) {
SDValue Round = DAG.getConstantFP(*N0CFP->getConstantFPValue(), EVT);
return DAG.getNode(ISD::FP_EXTEND, N->getDebugLoc(), VT, Round);
}
@@ -5145,6 +5149,123 @@
return SDValue();
}
+/// CheckForMaskedLoad - Check to see if V is (and load (ptr), imm), where the
+/// load is having specific bytes cleared out. If so, return the byte size
+/// being masked out and the shift amount.
+static std::pair<unsigned, unsigned>
+CheckForMaskedLoad(SDValue V, SDValue Ptr, SDValue Chain) {
+ std::pair<unsigned, unsigned> Result(0, 0);
+
+ // Check for the structure we're looking for.
+ if (V->getOpcode() != ISD::AND ||
+ !isa<ConstantSDNode>(V->getOperand(1)) ||
+ !ISD::isNormalLoad(V->getOperand(0).getNode()))
+ return Result;
+
+ // Check the chain and pointer. The store should be chained directly to the
+ // load (TODO: Or through a TF node!) since it's to the same address.
+ LoadSDNode *LD = cast<LoadSDNode>(V->getOperand(0));
+ if (LD->getBasePtr() != Ptr ||
+ V->getOperand(0).getNode() != Chain.getNode())
+ return Result;
+
+ // This only handles simple types.
+ if (V.getValueType() != MVT::i16 &&
+ V.getValueType() != MVT::i32 &&
+ V.getValueType() != MVT::i64)
+ return Result;
+
+ // Check the constant mask. Invert it so that the bits being masked out are
+ // 0 and the bits being kept are 1. Use getSExtValue so that leading bits
+ // follow the sign bit for uniformity.
+ uint64_t NotMask = ~cast<ConstantSDNode>(V->getOperand(1))->getSExtValue();
+ unsigned NotMaskLZ = CountLeadingZeros_64(NotMask);
+ if (NotMaskLZ & 7) return Result; // Must be multiple of a byte.
+ unsigned NotMaskTZ = CountTrailingZeros_64(NotMask);
+ if (NotMaskTZ & 7) return Result; // Must be multiple of a byte.
+ if (NotMaskLZ == 64) return Result; // All zero mask.
+
+ // See if we have a continuous run of bits. If so, we have 0*1+0*
+ if (CountTrailingOnes_64(NotMask >> NotMaskTZ)+NotMaskTZ+NotMaskLZ != 64)
+ return Result;
+
+ // Adjust NotMaskLZ down to be from the actual size of the int instead of i64.
+ if (V.getValueType() != MVT::i64 && NotMaskLZ)
+ NotMaskLZ -= 64-V.getValueSizeInBits();
+
+ unsigned MaskedBytes = (V.getValueSizeInBits()-NotMaskLZ-NotMaskTZ)/8;
+ switch (MaskedBytes) {
+ case 1:
+ case 2:
+ case 4: break;
+ default: return Result; // All one mask, or 5-byte mask.
+ }
+
+ // Verify that the first bit starts at a multiple of mask so that the access
+ // is aligned the same as the access width.
+ if (NotMaskTZ && NotMaskTZ/8 % MaskedBytes) return Result;
+
+ Result.first = MaskedBytes;
+ Result.second = NotMaskTZ/8;
+ return Result;
+}
+
+
+/// ShrinkLoadReplaceStoreWithStore - Check to see if IVal is something that
+/// provides a value as specified by MaskInfo. If so, replace the specified
+/// store with a narrower store of truncated IVal.
+static SDNode *
+ShrinkLoadReplaceStoreWithStore(const std::pair<unsigned, unsigned> &MaskInfo,
+ SDValue IVal, StoreSDNode *St,
+ DAGCombiner *DC) {
+ unsigned NumBytes = MaskInfo.first;
+ unsigned ByteShift = MaskInfo.second;
+ SelectionDAG &DAG = DC->getDAG();
+
+ // Check to see if IVal is all zeros in the part being masked in by the 'or'
+ // that uses this. If not, this is not a replacement.
+ APInt Mask = ~APInt::getBitsSet(IVal.getValueSizeInBits(),
+ ByteShift*8, (ByteShift+NumBytes)*8);
+ if (!DAG.MaskedValueIsZero(IVal, Mask)) return 0;
+
+ // Check that it is legal on the target to do this. It is legal if the new
+ // VT we're shrinking to (i8/i16/i32) is legal or we're still before type
+ // legalization.
+ MVT VT = MVT::getIntegerVT(NumBytes*8);
+ if (!DC->isTypeLegal(VT))
+ return 0;
+
+ // Okay, we can do this! Replace the 'St' store with a store of IVal that is
+ // shifted by ByteShift and truncated down to NumBytes.
+ if (ByteShift)
+ IVal = DAG.getNode(ISD::SRL, IVal->getDebugLoc(), IVal.getValueType(), IVal,
+ DAG.getConstant(ByteShift*8, DC->getShiftAmountTy()));
+
+ // Figure out the offset for the store and the alignment of the access.
+ unsigned StOffset;
+ unsigned NewAlign = St->getAlignment();
+
+ if (DAG.getTargetLoweringInfo().isLittleEndian())
+ StOffset = ByteShift;
+ else
+ StOffset = IVal.getValueType().getStoreSize() - ByteShift - NumBytes;
+
+ SDValue Ptr = St->getBasePtr();
+ if (StOffset) {
+ Ptr = DAG.getNode(ISD::ADD, IVal->getDebugLoc(), Ptr.getValueType(),
+ Ptr, DAG.getConstant(StOffset, Ptr.getValueType()));
+ NewAlign = MinAlign(NewAlign, StOffset);
+ }
+
+ // Truncate down to the new size.
+ IVal = DAG.getNode(ISD::TRUNCATE, IVal->getDebugLoc(), VT, IVal);
+
+ ++OpsNarrowed;
+ return DAG.getStore(St->getChain(), St->getDebugLoc(), IVal, Ptr,
+ St->getSrcValue(), St->getSrcValueOffset()+StOffset,
+ false, false, NewAlign).getNode();
+}
+
/// ReduceLoadOpStoreWidth - Look for sequence of load / op / store where op is
/// one of 'or', 'xor', and 'and' of immediates. If 'op' is only touching some
@@ -5164,6 +5285,28 @@
return SDValue();
unsigned Opc = Value.getOpcode();
+
+ // If this is "store (or X, Y), P" and X is "(and (load P), cst)", where cst
+ // is a byte mask indicating a consecutive number of bytes, check to see if
+ // Y is known to provide just those bytes. If so, we try to replace the
+ // load + replace + store sequence with a single (narrower) store, which makes
+ // the load dead.
+ if (Opc == ISD::OR) {
+ std::pair<unsigned, unsigned> MaskedLoad;
+ MaskedLoad = CheckForMaskedLoad(Value.getOperand(0), Ptr, Chain);
+ if (MaskedLoad.first)
+ if (SDNode *NewST = ShrinkLoadReplaceStoreWithStore(MaskedLoad,
+ Value.getOperand(1), ST,this))
+ return SDValue(NewST, 0);
+
+ // Or is commutative, so try swapping X and Y.
+ MaskedLoad = CheckForMaskedLoad(Value.getOperand(1), Ptr, Chain);
+ if (MaskedLoad.first)
+ if (SDNode *NewST = ShrinkLoadReplaceStoreWithStore(MaskedLoad,
+ Value.getOperand(0), ST,this))
+ return SDValue(NewST, 0);
+ }
+
if ((Opc != ISD::OR && Opc != ISD::XOR && Opc != ISD::AND) ||
Value.getOperand(1).getOpcode() != ISD::Constant)
return SDValue();
@@ -5211,8 +5354,8 @@
PtrOff = (BitWidth + 7 - NewBW) / 8 - PtrOff;
unsigned NewAlign = MinAlign(LD->getAlignment(), PtrOff);
- if (NewAlign <
- TLI.getTargetData()->getABITypeAlignment(NewVT.getTypeForEVT(*DAG.getContext())))
+ const Type *NewVTTy = NewVT.getTypeForEVT(*DAG.getContext());
+ if (NewAlign < TLI.getTargetData()->getABITypeAlignment(NewVTTy))
return SDValue();
SDValue NewPtr = DAG.getNode(ISD::ADD, LD->getDebugLoc(),
@@ -5282,8 +5425,7 @@
case MVT::ppcf128:
break;
case MVT::f32:
- if (((TLI.isTypeLegal(MVT::i32) || !LegalTypes) && !LegalOperations &&
- !ST->isVolatile()) ||
+ if ((isTypeLegal(MVT::i32) && !LegalOperations && !ST->isVolatile()) ||
TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i32)) {
Tmp = DAG.getConstant((uint32_t)CFP->getValueAPF().
bitcastToAPInt().getZExtValue(), MVT::i32);
@@ -5294,7 +5436,7 @@
}
break;
case MVT::f64:
- if (((TLI.isTypeLegal(MVT::i64) || !LegalTypes) && !LegalOperations &&
+ if ((TLI.isTypeLegal(MVT::i64) && !LegalOperations &&
!ST->isVolatile()) ||
TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i64)) {
Tmp = DAG.getConstant(CFP->getValueAPF().bitcastToAPInt().
@@ -5659,7 +5801,7 @@
}
// Add count and size info.
- if (!TLI.isTypeLegal(VT) && LegalTypes)
+ if (!isTypeLegal(VT))
return SDValue();
// Return the new VECTOR_SHUFFLE node.
Modified: llvm/branches/Apple/Morbo/lib/Target/README.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/Apple/Morbo/lib/Target/README.txt?rev=101451&r1=101450&r2=101451&view=diff
==============================================================================
--- llvm/branches/Apple/Morbo/lib/Target/README.txt (original)
+++ llvm/branches/Apple/Morbo/lib/Target/README.txt Fri Apr 16 02:41:59 2010
@@ -263,19 +263,6 @@
//===---------------------------------------------------------------------===//
-Turn this into a single byte store with no load (the other 3 bytes are
-unmodified):
-
-define void @test(i32* %P) {
- %tmp = load i32* %P
- %tmp14 = or i32 %tmp, 3305111552
- %tmp15 = and i32 %tmp14, 3321888767
- store i32 %tmp15, i32* %P
- ret void
-}
-
-//===---------------------------------------------------------------------===//
-
quantum_sigma_x in 462.libquantum contains the following loop:
for(i=0; i<reg->size; i++)
More information about the llvm-branch-commits
mailing list