[llvm-commits] [llvm] r78255 - in /llvm/trunk/lib/Target/X86: X86ISelDAGToDAG.cpp X86ISelLowering.cpp X86ISelLowering.h
Marius Wachtler
malloc at inode.at
Fri Aug 21 08:32:04 PDT 2009
Hello
I think I found a typo while looking over your diff.
In X86ISelDAGToDAG.cpp
"(M == CodeModel::Small || CodeModel::Kernel)"
should be
"(M == CodeModel::Small || M == CodeModel::Kernel)"
IMHO.
Because otherwise the test is always true because CodeModel::Kernel has a
non-zero value.
Marius Wachtler
On Thu, Aug 6, 2009 at 1:01 AM, Anton Korobeynikov <asl at math.spbu.ru> wrote:
> Author: asl
> Date: Wed Aug 5 18:01:26 2009
> New Revision: 78255
>
> URL: http://llvm.org/viewvc/llvm-project?rev=78255&view=rev
> Log:
> Better handle kernel code model. Also, generalize the things and fix one
> subtle bug with small code model.
>
> Modified:
> llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp
> llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
> llvm/trunk/lib/Target/X86/X86ISelLowering.h
>
> Modified: llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp?rev=78255&r1=78254&r2=78255&view=diff
>
>
> ==============================================================================
> --- llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp (original)
> +++ llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp Wed Aug 5 18:01:26 2009
> @@ -705,7 +705,7 @@
> /// MatchWrapper - Try to match X86ISD::Wrapper and X86ISD::WrapperRIP
> nodes
> /// into an addressing mode. These wrap things that will resolve down
> into a
> /// symbol reference. If no match is possible, this returns true,
> otherwise it
> -/// returns false.
> +/// returns false.
> bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) {
> // If the addressing mode already has a symbol as the displacement, we
> can
> // never match another symbol.
> @@ -713,28 +713,27 @@
> return true;
>
> SDValue N0 = N.getOperand(0);
> -
> + CodeModel::Model M = TM.getCodeModel();
> +
> // Handle X86-64 rip-relative addresses. We check this before checking
> direct
> // folding because RIP is preferable to non-RIP accesses.
> if (Subtarget->is64Bit() &&
> // Under X86-64 non-small code model, GV (and friends) are 64-bits,
> so
> // they cannot be folded into immediate fields.
> // FIXME: This can be improved for kernel and other models?
> - TM.getCodeModel() == CodeModel::Small &&
> -
> + (M == CodeModel::Small || CodeModel::Kernel) &&
> // Base and index reg must be 0 in order to use %rip as base and
> lowering
> // must allow RIP.
> !AM.hasBaseOrIndexReg() && N.getOpcode() == X86ISD::WrapperRIP) {
> -
> if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
> int64_t Offset = AM.Disp + G->getOffset();
> - if (!isInt32(Offset)) return true;
> + if (!X86::isOffsetSuitableForCodeModel(Offset, M)) return true;
> AM.GV = G->getGlobal();
> AM.Disp = Offset;
> AM.SymbolFlags = G->getTargetFlags();
> } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
> int64_t Offset = AM.Disp + CP->getOffset();
> - if (!isInt32(Offset)) return true;
> + if (!X86::isOffsetSuitableForCodeModel(Offset, M)) return true;
> AM.CP = CP->getConstVal();
> AM.Align = CP->getAlignment();
> AM.Disp = Offset;
> @@ -747,7 +746,7 @@
> AM.JT = J->getIndex();
> AM.SymbolFlags = J->getTargetFlags();
> }
> -
> +
> if (N.getOpcode() == X86ISD::WrapperRIP)
> AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64));
> return false;
> @@ -757,7 +756,7 @@
> // X86-32 always and X86-64 when in -static -mcmodel=small mode. In
> 64-bit
> // mode, this results in a non-RIP-relative computation.
> if (!Subtarget->is64Bit() ||
> - (TM.getCodeModel() == CodeModel::Small &&
> + ((M == CodeModel::Small || M == CodeModel::Kernel) &&
> TM.getRelocationModel() == Reloc::Static)) {
> if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
> AM.GV = G->getGlobal();
> @@ -809,7 +808,9 @@
> // Limit recursion.
> if (Depth > 5)
> return MatchAddressBase(N, AM);
> -
> +
> + CodeModel::Model M = TM.getCodeModel();
> +
> // If this is already a %rip relative address, we can only merge
> immediates
> // into it. Instead of handling this in every case, we handle it here.
> // RIP relative addressing: %rip + 32-bit displacement!
> @@ -818,10 +819,11 @@
> // displacements. It isn't very important, but this should be fixed
> for
> // consistency.
> if (!AM.ES && AM.JT != -1) return true;
> -
> +
> if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N)) {
> int64_t Val = AM.Disp + Cst->getSExtValue();
> - if (isInt32(Val)) {
> + if (X86::isOffsetSuitableForCodeModel(Val, M,
> + AM.hasSymbolicDisplacement()))
> {
> AM.Disp = Val;
> return false;
> }
> @@ -833,7 +835,9 @@
> default: break;
> case ISD::Constant: {
> uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
> - if (!is64Bit || isInt32(AM.Disp + Val)) {
> + if (!is64Bit ||
> + X86::isOffsetSuitableForCodeModel(AM.Disp + Val, M,
> + AM.hasSymbolicDisplacement())) {
> AM.Disp += Val;
> return false;
> }
> @@ -889,7 +893,9 @@
> ConstantSDNode *AddVal =
> cast<ConstantSDNode>(ShVal.getNode()->getOperand(1));
> uint64_t Disp = AM.Disp + (AddVal->getSExtValue() << Val);
> - if (!is64Bit || isInt32(Disp))
> + if (!is64Bit ||
> + X86::isOffsetSuitableForCodeModel(Disp, M,
> +
> AM.hasSymbolicDisplacement()))
> AM.Disp = Disp;
> else
> AM.IndexReg = ShVal;
> @@ -931,7 +937,9 @@
> cast<ConstantSDNode>(MulVal.getNode()->getOperand(1));
> uint64_t Disp = AM.Disp + AddVal->getSExtValue() *
> CN->getZExtValue();
> - if (!is64Bit || isInt32(Disp))
> + if (!is64Bit ||
> + X86::isOffsetSuitableForCodeModel(Disp, M,
> +
> AM.hasSymbolicDisplacement()))
> AM.Disp = Disp;
> else
> Reg = N.getNode()->getOperand(0);
> @@ -1050,7 +1058,9 @@
> // Address could not have picked a GV address for the
> displacement.
> AM.GV == NULL &&
> // On x86-64, the resultant disp must fit in 32-bits.
> - (!is64Bit || isInt32(AM.Disp + Offset)) &&
> + (!is64Bit ||
> + X86::isOffsetSuitableForCodeModel(AM.Disp + Offset, M,
> +
> AM.hasSymbolicDisplacement())) &&
> // Check to see if the LHS & C is zero.
> CurDAG->MaskedValueIsZero(N.getOperand(0), CN->getAPIntValue()))
> {
> AM.Disp += Offset;
>
> Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=78255&r1=78254&r2=78255&view=diff
>
>
> ==============================================================================
> --- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
> +++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Wed Aug 5 18:01:26 2009
> @@ -2126,6 +2126,36 @@
> }
>
>
> +bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
> + bool hasSymbolicDisplacement) {
> + // Offset should fit into 32 bit immediate field.
> + if (!isInt32(Offset))
> + return false;
> +
> + // If we don't have a symbolic displacement - we don't have any extra
> + // restrictions.
> + if (!hasSymbolicDisplacement)
> + return true;
> +
> + // FIXME: Some tweaks might be needed for medium code model.
> + if (M != CodeModel::Small && M != CodeModel::Kernel)
> + return false;
> +
> + // For small code model we assume that latest object is 16MB before end
> of 31
> + // bits boundary. We may also accept pretty large negative constants
> knowing
> + // that all objects are in the positive half of address space.
> + if (M == CodeModel::Small && Offset < 16*1024*1024)
> + return true;
> +
> + // For kernel code model we know that all object resist in the negative
> half
> + // of 32bits address space. We may not accept negative offsets, since
> they may
> + // be just off and we may accept pretty large positive ones.
> + if (M == CodeModel::Kernel && Offset > 0)
> + return true;
> +
> + return false;
> +}
> +
> /// TranslateX86CC - do a one to one translation of a ISD::CondCode to the
> X86
> /// specific condition code, returning the condition code and the LHS/RHS
> of the
> /// comparison to make.
> @@ -4440,9 +4470,10 @@
> // global base reg.
> unsigned char OpFlag = 0;
> unsigned WrapperKind = X86ISD::Wrapper;
> -
> + CodeModel::Model M = getTargetMachine().getCodeModel();
> +
> if (Subtarget->isPICStyleRIPRel() &&
> - getTargetMachine().getCodeModel() == CodeModel::Small)
> + (M == CodeModel::Small || M == CodeModel::Kernel))
> WrapperKind = X86ISD::WrapperRIP;
> else if (Subtarget->isPICStyleGOT())
> OpFlag = X86II::MO_GOTOFF;
> @@ -4472,9 +4503,10 @@
> // global base reg.
> unsigned char OpFlag = 0;
> unsigned WrapperKind = X86ISD::Wrapper;
> -
> + CodeModel::Model M = getTargetMachine().getCodeModel();
> +
> if (Subtarget->isPICStyleRIPRel() &&
> - getTargetMachine().getCodeModel() == CodeModel::Small)
> + (M == CodeModel::Small || M == CodeModel::Kernel))
> WrapperKind = X86ISD::WrapperRIP;
> else if (Subtarget->isPICStyleGOT())
> OpFlag = X86II::MO_GOTOFF;
> @@ -4505,8 +4537,10 @@
> // global base reg.
> unsigned char OpFlag = 0;
> unsigned WrapperKind = X86ISD::Wrapper;
> + CodeModel::Model M = getTargetMachine().getCodeModel();
> +
> if (Subtarget->isPICStyleRIPRel() &&
> - getTargetMachine().getCodeModel() == CodeModel::Small)
> + (M == CodeModel::Small || M == CodeModel::Kernel))
> WrapperKind = X86ISD::WrapperRIP;
> else if (Subtarget->isPICStyleGOT())
> OpFlag = X86II::MO_GOTOFF;
> @@ -4540,8 +4574,10 @@
> // offset if it is legal.
> unsigned char OpFlags =
> Subtarget->ClassifyGlobalReference(GV, getTargetMachine());
> + CodeModel::Model M = getTargetMachine().getCodeModel();
> SDValue Result;
> - if (OpFlags == X86II::MO_NO_FLAG && isInt32(Offset)) {
> + if (OpFlags == X86II::MO_NO_FLAG &&
> + X86::isOffsetSuitableForCodeModel(Offset, M)) {
> // A direct static reference to a global.
> Result = DAG.getTargetGlobalAddress(GV, getPointerTy(), Offset);
> Offset = 0;
> @@ -4550,7 +4586,7 @@
> }
>
> if (Subtarget->isPICStyleRIPRel() &&
> - getTargetMachine().getCodeModel() == CodeModel::Small)
> + (M == CodeModel::Small || M == CodeModel::Kernel))
> Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
> else
> Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
> @@ -7049,32 +7085,28 @@
> bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM,
> const Type *Ty) const {
> // X86 supports extremely general addressing modes.
> + CodeModel::Model M = getTargetMachine().getCodeModel();
>
> // X86 allows a sign-extended 32-bit immediate field as a displacement.
> - if (AM.BaseOffs <= -(1LL << 32) || AM.BaseOffs >= (1LL << 32)-1)
> + if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV !=
> NULL))
> return false;
>
> if (AM.BaseGV) {
> unsigned GVFlags =
> Subtarget->ClassifyGlobalReference(AM.BaseGV, getTargetMachine());
> -
> +
> // If a reference to this global requires an extra load, we can't fold
> it.
> if (isGlobalStubReference(GVFlags))
> return false;
> -
> +
> // If BaseGV requires a register for the PIC base, we cannot also have
> a
> // BaseReg specified.
> if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
> return false;
>
> - // X86-64 only supports addr of globals in small code model.
> - if (Subtarget->is64Bit()) {
> - if (getTargetMachine().getCodeModel() != CodeModel::Small)
> - return false;
> - // If lower 4G is not available, then we must use rip-relative
> addressing.
> - if (AM.BaseOffs || AM.Scale > 1)
> - return false;
> - }
> + // If lower 4G is not available, then we must use rip-relative
> addressing.
> + if (Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1))
> + return false;
> }
>
> switch (AM.Scale) {
>
> Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.h
> URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.h?rev=78255&r1=78254&r2=78255&view=diff
>
>
> ==============================================================================
> --- llvm/trunk/lib/Target/X86/X86ISelLowering.h (original)
> +++ llvm/trunk/lib/Target/X86/X86ISelLowering.h Wed Aug 5 18:01:26 2009
> @@ -336,6 +336,11 @@
> /// isZeroNode - Returns true if Elt is a constant zero or a floating
> point
> /// constant +0.0.
> bool isZeroNode(SDValue Elt);
> +
> + /// isOffsetSuitableForCodeModel - Returns true of the given offset
> can be
> + /// fit into displacement field of the instruction.
> + bool isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
> + bool hasSymbolicDisplacement =
> true);
> }
>
>
> //===--------------------------------------------------------------------===//
>
>
> _______________________________________________
> llvm-commits mailing list
> llvm-commits at cs.uiuc.edu
> http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20090821/7048b4a5/attachment.html>
More information about the llvm-commits
mailing list