[llvm] r260399 - AMDGPU: Fix indentation and variable names
Matt Arsenault via llvm-commits
llvm-commits at lists.llvm.org
Wed Feb 10 10:21:45 PST 2016
Author: arsenm
Date: Wed Feb 10 12:21:45 2016
New Revision: 260399
URL: http://llvm.org/viewvc/llvm-project?rev=260399&view=rev
Log:
AMDGPU: Fix indentation and variable names
Modified:
llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp
Modified: llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp?rev=260399&r1=260398&r2=260399&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp Wed Feb 10 12:21:45 2016
@@ -1600,10 +1600,10 @@ SDValue SITargetLowering::LowerLOAD(SDVa
SDLoc DL(Op);
LoadSDNode *Load = cast<LoadSDNode>(Op);
ISD::LoadExtType ExtType = Load->getExtensionType();
- EVT VT = Load->getMemoryVT();
+ EVT MemVT = Load->getMemoryVT();
- if (ExtType == ISD::NON_EXTLOAD && VT.getSizeInBits() < 32) {
- assert(VT == MVT::i1 && "Only i1 non-extloads expected");
+ if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) {
+ assert(MemVT == MVT::i1 && "Only i1 non-extloads expected");
// FIXME: Copied from PPC
// First, load into 32 bits, then truncate to 1 bit.
@@ -1615,45 +1615,42 @@ SDValue SITargetLowering::LowerLOAD(SDVa
BasePtr, MVT::i8, MMO);
SDValue Ops[] = {
- DAG.getNode(ISD::TRUNCATE, DL, VT, NewLD),
+ DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD),
NewLD.getValue(1)
};
return DAG.getMergeValues(Ops, DL);
}
- if (Op.getValueType().isVector()) {
- assert(Op.getValueType().getVectorElementType() == MVT::i32 &&
- "Custom lowering for non-i32 vectors hasn't been implemented.");
- unsigned NumElements = Op.getValueType().getVectorNumElements();
- assert(NumElements != 2 && "v2 loads are supported for all address spaces.");
-
- switch (Load->getAddressSpace()) {
- default: break;
- case AMDGPUAS::CONSTANT_ADDRESS:
- if (isMemOpUniform(Load))
- break;
- // Non-uniform loads will be selected to MUBUF instructions, so they
- // have the same legalization requires ments as global and private
- // loads.
- //
- // Fall-through
- case AMDGPUAS::GLOBAL_ADDRESS:
- case AMDGPUAS::PRIVATE_ADDRESS:
- if (NumElements >= 8)
- return SplitVectorLoad(Op, DAG);
-
- // v4 loads are supported for private and global memory.
- if (NumElements <= 4)
- break;
- // fall-through
- case AMDGPUAS::LOCAL_ADDRESS:
- // If properly aligned, if we split we might be able to use ds_read_b64.
- return SplitVectorLoad(Op, DAG);
- }
- }
+ if (!MemVT.isVector())
+ return SDValue();
- return SDValue();
+ assert(Op.getValueType().getVectorElementType() == MVT::i32 &&
+ "Custom lowering for non-i32 vectors hasn't been implemented.");
+ unsigned NumElements = MemVT.getVectorNumElements();
+ assert(NumElements != 2 && "v2 loads are supported for all address spaces.");
+
+ switch (Load->getAddressSpace()) {
+ case AMDGPUAS::CONSTANT_ADDRESS:
+ if (isMemOpUniform(Load))
+ return SDValue();
+ // Non-uniform loads will be selected to MUBUF instructions, so they
+ // have the same legalization requires ments as global and private
+ // loads.
+ //
+ // Fall-through
+ case AMDGPUAS::GLOBAL_ADDRESS:
+ case AMDGPUAS::PRIVATE_ADDRESS:
+ if (NumElements >= 8)
+ return SplitVectorLoad(Op, DAG);
+ // v4 loads are supported for private and global memory.
+ return SDValue();
+ case AMDGPUAS::LOCAL_ADDRESS:
+ // If properly aligned, if we split we might be able to use ds_read_b64.
+ return SplitVectorLoad(Op, DAG);
+ default:
+ return SDValue();
+ }
}
SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
More information about the llvm-commits
mailing list