[llvm] r337021 - AMDGPU: Fix handling of alignment padding in DAG argument lowering
Evgenii Stepanov via llvm-commits
llvm-commits at lists.llvm.org
Fri Jul 13 17:39:40 PDT 2018
Hi,
MSan is not happy about this change, but the report in unfortunately
truncated:
http://lab.llvm.org:8011/builders/sanitizer-x86_64-linux-bootstrap-msan/builds/5926/steps/check-llvm%20msan/logs/stdio
On Fri, Jul 13, 2018 at 9:40 AM, Matt Arsenault via llvm-commits <
llvm-commits at lists.llvm.org> wrote:
> Author: arsenm
> Date: Fri Jul 13 09:40:25 2018
> New Revision: 337021
>
> URL: http://llvm.org/viewvc/llvm-project?rev=337021&view=rev
> Log:
> AMDGPU: Fix handling of alignment padding in DAG argument lowering
>
> This was completely broken if there was ever a struct argument, as
> this information is thrown away during the argument analysis.
>
> The offsets as passed in to LowerFormalArguments are not useful,
> as they partially depend on the legalized result register type,
> and they don't consider the alignment in the first place.
>
> Ignore the Ins array, and instead figure out from the raw IR type
> what we need to do. This seems to fix the padding computation
> if the DAG lowering is forced (and stops breaking arguments
> following padded arguments if the arguments were only partially
> lowered in the IR)
>
> Added:
> llvm/trunk/test/CodeGen/AMDGPU/kernel-argument-dag-lowering.ll
> Modified:
> llvm/trunk/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
> llvm/trunk/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp
> llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
> llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.h
> llvm/trunk/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp
> llvm/trunk/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp
> llvm/trunk/lib/Target/AMDGPU/AMDGPUMachineFunction.h
> llvm/trunk/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
> llvm/trunk/lib/Target/AMDGPU/AMDGPUSubtarget.h
> llvm/trunk/lib/Target/AMDGPU/R600.td
> llvm/trunk/lib/Target/AMDGPU/R600ISelLowering.cpp
> llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp
> llvm/trunk/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
> llvm/trunk/test/CodeGen/AMDGPU/kernel-args.ll
> llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.implicitarg.ptr.ll
> llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.kernarg.segment.ptr.ll
>
> Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/
> AMDGPU/AMDGPUAsmPrinter.cpp?rev=337021&r1=337020&r2=337021&view=diff
> ============================================================
> ==================
> --- llvm/trunk/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp (original)
> +++ llvm/trunk/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp Fri Jul 13 09:40:25
> 2018
> @@ -1128,6 +1128,13 @@ static amd_element_byte_size_t getElemen
> void AMDGPUAsmPrinter::getAmdKernelCode(amd_kernel_code_t &Out,
> const SIProgramInfo
> &CurrentProgramInfo,
> const MachineFunction &MF) const {
> + const Function &F = MF.getFunction();
> +
> + // Avoid asserting on erroneous cases.
> + if (F.getCallingConv() != CallingConv::AMDGPU_KERNEL &&
> + F.getCallingConv() != CallingConv::SPIR_KERNEL)
> + return;
> +
> const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
> const GCNSubtarget &STM = MF.getSubtarget<GCNSubtarget>();
>
> @@ -1174,9 +1181,8 @@ void AMDGPUAsmPrinter::getAmdKernelCode(
> if (STM.isXNACKEnabled())
> Out.code_properties |= AMD_CODE_PROPERTY_IS_XNACK_SUPPORTED;
>
> - // FIXME: Should use getKernArgSize
> - Out.kernarg_segment_byte_size =
> - STM.getKernArgSegmentSize(MF.getFunction(),
> MFI->getExplicitKernArgSize());
> + unsigned MaxKernArgAlign;
> + Out.kernarg_segment_byte_size = STM.getKernArgSegmentSize(F,
> MaxKernArgAlign);
> Out.wavefront_sgpr_count = CurrentProgramInfo.NumSGPR;
> Out.workitem_vgpr_count = CurrentProgramInfo.NumVGPR;
> Out.workitem_private_segment_byte_size = CurrentProgramInfo.
> ScratchSize;
> @@ -1185,7 +1191,7 @@ void AMDGPUAsmPrinter::getAmdKernelCode(
> // These alignment values are specified in powers of two, so alignment =
> // 2^n. The minimum alignment is 2^4 = 16.
> Out.kernarg_segment_alignment = std::max((size_t)4,
> - countTrailingZeros(MFI->getMaxKernArgAlign()));
> + countTrailingZeros(MaxKernArgAlign));
>
> if (STM.debuggerEmitPrologue()) {
> Out.debug_wavefront_private_segment_offset_sgpr =
>
> Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/
> AMDGPUHSAMetadataStreamer.cpp?rev=337021&r1=337020&r2=337021&view=diff
> ============================================================
> ==================
> --- llvm/trunk/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp (original)
> +++ llvm/trunk/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp Fri Jul 13
> 09:40:25 2018
> @@ -209,15 +209,16 @@ Kernel::CodeProps::Metadata MetadataStre
> const Function &F = MF.getFunction();
>
> // Avoid asserting on erroneous cases.
> - if (F.getCallingConv() != CallingConv::AMDGPU_KERNEL)
> + if (F.getCallingConv() != CallingConv::AMDGPU_KERNEL &&
> + F.getCallingConv() != CallingConv::SPIR_KERNEL)
> return HSACodeProps;
>
> - HSACodeProps.mKernargSegmentSize =
> - STM.getKernArgSegmentSize(F, MFI.getExplicitKernArgSize());
> + unsigned MaxKernArgAlign;
> + HSACodeProps.mKernargSegmentSize = STM.getKernArgSegmentSize(F,
> +
> MaxKernArgAlign);
> HSACodeProps.mGroupSegmentFixedSize = ProgramInfo.LDSSize;
> HSACodeProps.mPrivateSegmentFixedSize = ProgramInfo.ScratchSize;
> - HSACodeProps.mKernargSegmentAlign =
> - std::max(uint32_t(4), MFI.getMaxKernArgAlign());
> + HSACodeProps.mKernargSegmentAlign = std::max(MaxKernArgAlign, 4u);
> HSACodeProps.mWavefrontSize = STM.getWavefrontSize();
> HSACodeProps.mNumSGPRs = ProgramInfo.NumSGPR;
> HSACodeProps.mNumVGPRs = ProgramInfo.NumVGPR;
>
> Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/
> AMDGPU/AMDGPUISelLowering.cpp?rev=337021&r1=337020&r2=337021&view=diff
> ============================================================
> ==================
> --- llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp (original)
> +++ llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp Fri Jul 13
> 09:40:25 2018
> @@ -30,6 +30,7 @@
> #include "SIInstrInfo.h"
> #include "SIMachineFunctionInfo.h"
> #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
> +#include "llvm/CodeGen/Analysis.h"
> #include "llvm/CodeGen/CallingConvLower.h"
> #include "llvm/CodeGen/MachineFunction.h"
> #include "llvm/CodeGen/MachineRegisterInfo.h"
> @@ -40,18 +41,6 @@
> #include "llvm/Support/KnownBits.h"
> using namespace llvm;
>
> -static bool allocateKernArg(unsigned ValNo, MVT ValVT, MVT LocVT,
> - CCValAssign::LocInfo LocInfo,
> - ISD::ArgFlagsTy ArgFlags, CCState &State) {
> - MachineFunction &MF = State.getMachineFunction();
> - AMDGPUMachineFunction *MFI = MF.getInfo<AMDGPUMachineFunction>();
> -
> - uint64_t Offset = MFI->allocateKernArg(LocVT.getStoreSize(),
> - ArgFlags.getOrigAlign());
> - State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT,
> LocInfo));
> - return true;
> -}
> -
> static bool allocateCCRegs(unsigned ValNo, MVT ValVT, MVT LocVT,
> CCValAssign::LocInfo LocInfo,
> ISD::ArgFlagsTy ArgFlags, CCState &State,
> @@ -910,74 +899,118 @@ CCAssignFn *AMDGPUCallLowering::CCAssign
> /// for each individual part is i8. We pass the memory type as LocVT to
> the
> /// calling convention analysis function and the register type
> (Ins[x].VT) as
> /// the ValVT.
> -void AMDGPUTargetLowering::analyzeFormalArgumentsCompute(CCState &State,
> - const SmallVectorImpl<ISD::InputArg> &Ins)
> const {
> - for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
> - const ISD::InputArg &In = Ins[i];
> - EVT MemVT;
> -
> - unsigned NumRegs = getNumRegisters(State.getContext(), In.ArgVT);
> -
> - if (!Subtarget->isAmdHsaOS() &&
> - (In.ArgVT == MVT::i16 || In.ArgVT == MVT::i8 || In.ArgVT ==
> MVT::f16)) {
> - // The ABI says the caller will extend these values to 32-bits.
> - MemVT = In.ArgVT.isInteger() ? MVT::i32 : MVT::f32;
> - } else if (NumRegs == 1) {
> - // This argument is not split, so the IR type is the memory type.
> - assert(!In.Flags.isSplit());
> - if (In.ArgVT.isExtended()) {
> - // We have an extended type, like i24, so we should just use the
> register type
> - MemVT = In.VT;
> +void AMDGPUTargetLowering::analyzeFormalArgumentsCompute(
> + CCState &State,
> + const SmallVectorImpl<ISD::InputArg> &Ins) const {
> + const MachineFunction &MF = State.getMachineFunction();
> + const Function &Fn = MF.getFunction();
> + LLVMContext &Ctx = Fn.getParent()->getContext();
> + const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(MF);
> + const unsigned ExplicitOffset = ST.getExplicitKernelArgOffset(Fn);
> +
> + unsigned MaxAlign = 1;
> + uint64_t ExplicitArgOffset = 0;
> + const DataLayout &DL = Fn.getParent()->getDataLayout();
> +
> + unsigned InIndex = 0;
> +
> + for (const Argument &Arg : Fn.args()) {
> + Type *BaseArgTy = Arg.getType();
> + unsigned Align = DL.getABITypeAlignment(BaseArgTy);
> + MaxAlign = std::max(Align, MaxAlign);
> + unsigned AllocSize = DL.getTypeAllocSize(BaseArgTy);
> +
> + uint64_t ArgOffset = alignTo(ExplicitArgOffset, Align) +
> ExplicitOffset;
> + ExplicitArgOffset = alignTo(ExplicitArgOffset, Align) + AllocSize;
> +
> + // We're basically throwing away everything passed into us and
> starting over
> + // to get accurate in-memory offsets. The "PartOffset" is completely
> useless
> + // to us as computed in Ins.
> + //
> + // We also need to figure out what type legalization is trying to do
> to get
> + // the correct memory offsets.
> +
> + SmallVector<EVT, 16> ValueVTs;
> + SmallVector<uint64_t, 16> Offsets;
> + ComputeValueVTs(*this, DL, BaseArgTy, ValueVTs, &Offsets, ArgOffset);
> +
> + for (unsigned Value = 0, NumValues = ValueVTs.size();
> + Value != NumValues; ++Value) {
> + uint64_t BasePartOffset = Offsets[Value];
> +
> + EVT ArgVT = ValueVTs[Value];
> + EVT MemVT = ArgVT;
> + MVT RegisterVT =
> + getRegisterTypeForCallingConv(Ctx, ArgVT);
> + unsigned NumRegs =
> + getNumRegistersForCallingConv(Ctx, ArgVT);
> +
> + if (!Subtarget->isAmdHsaOS() &&
> + (ArgVT == MVT::i16 || ArgVT == MVT::i8 || ArgVT == MVT::f16)) {
> + // The ABI says the caller will extend these values to 32-bits.
> + MemVT = ArgVT.isInteger() ? MVT::i32 : MVT::f32;
> + } else if (NumRegs == 1) {
> + // This argument is not split, so the IR type is the memory type.
> + if (ArgVT.isExtended()) {
> + // We have an extended type, like i24, so we should just use the
> + // register type.
> + MemVT = RegisterVT;
> + } else {
> + MemVT = ArgVT;
> + }
> + } else if (ArgVT.isVector() && RegisterVT.isVector() &&
> + ArgVT.getScalarType() == RegisterVT.getScalarType()) {
> + assert(ArgVT.getVectorNumElements() > RegisterVT.
> getVectorNumElements());
> + // We have a vector value which has been split into a vector with
> + // the same scalar type, but fewer elements. This should handle
> + // all the floating-point vector types.
> + MemVT = RegisterVT;
> + } else if (ArgVT.isVector() &&
> + ArgVT.getVectorNumElements() == NumRegs) {
> + // This arg has been split so that each element is stored in a
> separate
> + // register.
> + MemVT = ArgVT.getScalarType();
> + } else if (ArgVT.isExtended()) {
> + // We have an extended type, like i65.
> + MemVT = RegisterVT;
> } else {
> - MemVT = In.ArgVT;
> + unsigned MemoryBits = ArgVT.getStoreSizeInBits() / NumRegs;
> + assert(ArgVT.getStoreSizeInBits() % NumRegs == 0);
> + if (RegisterVT.isInteger()) {
> + MemVT = EVT::getIntegerVT(State.getContext(), MemoryBits);
> + } else if (RegisterVT.isVector()) {
> + assert(!RegisterVT.getScalarType().isFloatingPoint());
> + unsigned NumElements = RegisterVT.getVectorNumElements();
> + assert(MemoryBits % NumElements == 0);
> + // This vector type has been split into another vector type with
> + // a different elements size.
> + EVT ScalarVT = EVT::getIntegerVT(State.getContext(),
> + MemoryBits / NumElements);
> + MemVT = EVT::getVectorVT(State.getContext(), ScalarVT,
> NumElements);
> + } else {
> + llvm_unreachable("cannot deduce memory type.");
> + }
> }
> - } else if (In.ArgVT.isVector() && In.VT.isVector() &&
> - In.ArgVT.getScalarType() == In.VT.getScalarType()) {
> - assert(In.ArgVT.getVectorNumElements() >
> In.VT.getVectorNumElements());
> - // We have a vector value which has been split into a vector with
> - // the same scalar type, but fewer elements. This should handle
> - // all the floating-point vector types.
> - MemVT = In.VT;
> - } else if (In.ArgVT.isVector() &&
> - In.ArgVT.getVectorNumElements() == NumRegs) {
> - // This arg has been split so that each element is stored in a
> separate
> - // register.
> - MemVT = In.ArgVT.getScalarType();
> - } else if (In.ArgVT.isExtended()) {
> - // We have an extended type, like i65.
> - MemVT = In.VT;
> - } else {
> - unsigned MemoryBits = In.ArgVT.getStoreSizeInBits() / NumRegs;
> - assert(In.ArgVT.getStoreSizeInBits() % NumRegs == 0);
> - if (In.VT.isInteger()) {
> - MemVT = EVT::getIntegerVT(State.getContext(), MemoryBits);
> - } else if (In.VT.isVector()) {
> - assert(!In.VT.getScalarType().isFloatingPoint());
> - unsigned NumElements = In.VT.getVectorNumElements();
> - assert(MemoryBits % NumElements == 0);
> - // This vector type has been split into another vector type with
> - // a different elements size.
> - EVT ScalarVT = EVT::getIntegerVT(State.getContext(),
> - MemoryBits / NumElements);
> - MemVT = EVT::getVectorVT(State.getContext(), ScalarVT,
> NumElements);
> - } else {
> - llvm_unreachable("cannot deduce memory type.");
> +
> + // Convert one element vectors to scalar.
> + if (MemVT.isVector() && MemVT.getVectorNumElements() == 1)
> + MemVT = MemVT.getScalarType();
> +
> + if (MemVT.isExtended()) {
> + // This should really only happen if we have vec3 arguments
> + assert(MemVT.isVector() && MemVT.getVectorNumElements() == 3);
> + MemVT = MemVT.getPow2VectorType(State.getContext());
> }
> - }
>
> - // Convert one element vectors to scalar.
> - if (MemVT.isVector() && MemVT.getVectorNumElements() == 1)
> - MemVT = MemVT.getScalarType();
> -
> - if (MemVT.isExtended()) {
> - // This should really only happen if we have vec3 arguments
> - assert(MemVT.isVector() && MemVT.getVectorNumElements() == 3);
> - MemVT = MemVT.getPow2VectorType(State.getContext());
> + unsigned PartOffset = 0;
> + for (unsigned i = 0; i != NumRegs; ++i) {
> + State.addLoc(CCValAssign::getCustomMem(InIndex++, RegisterVT,
> + BasePartOffset +
> PartOffset,
> + MemVT.getSimpleVT(),
> + CCValAssign::Full));
> + PartOffset += MemVT.getStoreSize();
> + }
> }
> -
> - assert(MemVT.isSimple());
> - allocateKernArg(i, In.VT, MemVT.getSimpleVT(), CCValAssign::Full,
> In.Flags,
> - State);
> }
> }
>
>
> Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.h
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/
> AMDGPU/AMDGPUISelLowering.h?rev=337021&r1=337020&r2=337021&view=diff
> ============================================================
> ==================
> --- llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.h (original)
> +++ llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.h Fri Jul 13 09:40:25
> 2018
> @@ -122,8 +122,11 @@ protected:
> SDValue LowerDIVREM24(SDValue Op, SelectionDAG &DAG, bool sign) const;
> void LowerUDIVREM64(SDValue Op, SelectionDAG &DAG,
> SmallVectorImpl<SDValue> &Results)
> const;
> - void analyzeFormalArgumentsCompute(CCState &State,
> - const SmallVectorImpl<ISD::InputArg> &Ins)
> const;
> +
> + void analyzeFormalArgumentsCompute(
> + CCState &State,
> + const SmallVectorImpl<ISD::InputArg> &Ins) const;
> +
> public:
> AMDGPUTargetLowering(const TargetMachine &TM, const AMDGPUSubtarget
> &STI);
>
>
> Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/
> AMDGPULowerKernelArguments.cpp?rev=337021&r1=337020&r2=337021&view=diff
> ============================================================
> ==================
> --- llvm/trunk/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp (original)
> +++ llvm/trunk/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp Fri Jul
> 13 09:40:25 2018
> @@ -77,8 +77,9 @@ bool AMDGPULowerKernelArguments::runOnFu
> const unsigned KernArgBaseAlign = 16; // FIXME: Increase if necessary
> const uint64_t BaseOffset = ST.getExplicitKernelArgOffset(F);
>
> + unsigned MaxAlign;
> // FIXME: Alignment is broken broken with explicit arg offset.;
> - const uint64_t TotalKernArgSize = ST.getKernArgSegmentSize(F);
> + const uint64_t TotalKernArgSize = ST.getKernArgSegmentSize(F, MaxAlign);
> if (TotalKernArgSize == 0)
> return false;
>
> @@ -91,13 +92,11 @@ bool AMDGPULowerKernelArguments::runOnFu
> Attribute::getWithDereferenceableBytes(Ctx, TotalKernArgSize));
>
> unsigned AS = KernArgSegment->getType()->getPointerAddressSpace();
> - unsigned MaxAlign = 1;
> uint64_t ExplicitArgOffset = 0;
>
> for (Argument &Arg : F.args()) {
> Type *ArgTy = Arg.getType();
> unsigned Align = DL.getABITypeAlignment(ArgTy);
> - MaxAlign = std::max(Align, MaxAlign);
> unsigned Size = DL.getTypeSizeInBits(ArgTy);
> unsigned AllocSize = DL.getTypeAllocSize(ArgTy);
>
>
> Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/
> AMDGPU/AMDGPUMachineFunction.cpp?rev=337021&r1=337020&r2=337021&view=diff
> ============================================================
> ==================
> --- llvm/trunk/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp (original)
> +++ llvm/trunk/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp Fri Jul 13
> 09:40:25 2018
> @@ -24,16 +24,23 @@ AMDGPUMachineFunction::AMDGPUMachineFunc
> NoSignedZerosFPMath(MF.getTarget().Options.NoSignedZerosFPMath),
> MemoryBound(false),
> WaveLimiter(false) {
> + const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(MF);
> +
> // FIXME: Should initialize KernArgSize based on
> ExplicitKernelArgOffset,
> // except reserved size is not correctly aligned.
> + const Function &F = MF.getFunction();
>
> if (auto *Resolver = MF.getMMI().getResolver()) {
> if (AMDGPUPerfHintAnalysis *PHA = static_cast<
> AMDGPUPerfHintAnalysis*>(
> Resolver->getAnalysisIfAvailable(&AMDGPUPerfHintAnalysisID,
> true))) {
> - MemoryBound = PHA->isMemoryBound(&MF.getFunction());
> - WaveLimiter = PHA->needsWaveLimiter(&MF.getFunction());
> + MemoryBound = PHA->isMemoryBound(&F);
> + WaveLimiter = PHA->needsWaveLimiter(&F);
> }
> }
> +
> + CallingConv::ID CC = F.getCallingConv();
> + if (CC == CallingConv::AMDGPU_KERNEL || CC == CallingConv::SPIR_KERNEL)
> + ExplicitKernArgSize = ST.getExplicitKernArgSize(F, MaxKernArgAlign);
> }
>
> unsigned AMDGPUMachineFunction::allocateLDSGlobal(const DataLayout &DL,
>
> Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUMachineFunction.h
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/
> AMDGPU/AMDGPUMachineFunction.h?rev=337021&r1=337020&r2=337021&view=diff
> ============================================================
> ==================
> --- llvm/trunk/lib/Target/AMDGPU/AMDGPUMachineFunction.h (original)
> +++ llvm/trunk/lib/Target/AMDGPU/AMDGPUMachineFunction.h Fri Jul 13
> 09:40:25 2018
> @@ -23,8 +23,8 @@ class AMDGPUMachineFunction : public Mac
> SmallDenseMap<const GlobalValue *, unsigned, 4> LocalMemoryObjects;
>
> protected:
> - uint64_t ExplicitKernArgSize;
> - unsigned MaxKernArgAlign;
> + uint64_t ExplicitKernArgSize; // Cache for this.
> + unsigned MaxKernArgAlign; // Cache for this.
>
> /// Number of bytes in the LDS that are being used.
> unsigned LDSSize;
> @@ -44,17 +44,6 @@ protected:
> public:
> AMDGPUMachineFunction(const MachineFunction &MF);
>
> - uint64_t allocateKernArg(uint64_t Size, unsigned Align) {
> - assert(isPowerOf2_32(Align));
> - ExplicitKernArgSize = alignTo(ExplicitKernArgSize, Align);
> -
> - uint64_t Result = ExplicitKernArgSize;
> - ExplicitKernArgSize += Size;
> -
> - MaxKernArgAlign = std::max(Align, MaxKernArgAlign);
> - return Result;
> - }
> -
> uint64_t getExplicitKernArgSize() const {
> return ExplicitKernArgSize;
> }
>
> Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/
> AMDGPU/AMDGPUSubtarget.cpp?rev=337021&r1=337020&r2=337021&view=diff
> ============================================================
> ==================
> --- llvm/trunk/lib/Target/AMDGPU/AMDGPUSubtarget.cpp (original)
> +++ llvm/trunk/lib/Target/AMDGPU/AMDGPUSubtarget.cpp Fri Jul 13 09:40:25
> 2018
> @@ -209,7 +209,7 @@ GCNSubtarget::GCNSubtarget(const Triple
>
> FeatureDisable(false),
> InstrInfo(initializeSubtargetDependencies(TT, GPU, FS)),
> - TLInfo(TM, *this),
> + TLInfo(TM, *this),
> FrameLowering(TargetFrameLowering::StackGrowsUp,
> getStackAlignment(), 0) {
> AS = AMDGPU::getAMDGPUAS(TT);
> CallLoweringInfo.reset(new AMDGPUCallLowering(*getTargetLowering()));
> @@ -406,6 +406,44 @@ bool AMDGPUSubtarget::makeLIDRangeMetada
> return true;
> }
>
> +uint64_t AMDGPUSubtarget::getExplicitKernArgSize(const Function &F,
> + unsigned &MaxAlign)
> const {
> + assert(F.getCallingConv() == CallingConv::AMDGPU_KERNEL ||
> + F.getCallingConv() == CallingConv::SPIR_KERNEL);
> +
> + const DataLayout &DL = F.getParent()->getDataLayout();
> + uint64_t ExplicitArgBytes = 0;
> + MaxAlign = 1;
> +
> + for (const Argument &Arg : F.args()) {
> + Type *ArgTy = Arg.getType();
> +
> + unsigned Align = DL.getABITypeAlignment(ArgTy);
> + uint64_t AllocSize = DL.getTypeAllocSize(ArgTy);
> + ExplicitArgBytes = alignTo(ExplicitArgBytes, Align) + AllocSize;
> + MaxAlign = std::max(MaxAlign, Align);
> + }
> +
> + return ExplicitArgBytes;
> +}
> +
> +unsigned AMDGPUSubtarget::getKernArgSegmentSize(const Function &F,
> + unsigned &MaxAlign) const
> {
> + uint64_t ExplicitArgBytes = getExplicitKernArgSize(F, MaxAlign);
> +
> + unsigned ExplicitOffset = getExplicitKernelArgOffset(F);
> +
> + uint64_t TotalSize = ExplicitOffset + ExplicitArgBytes;
> + unsigned ImplicitBytes = getImplicitArgNumBytes(F);
> + if (ImplicitBytes != 0) {
> + unsigned Alignment = getAlignmentForImplicitArgPtr();
> + TotalSize = alignTo(ExplicitArgBytes, Alignment) + ImplicitBytes;
> + }
> +
> + // Being able to dereference past the end is useful for emitting scalar
> loads.
> + return alignTo(TotalSize, 4);
> +}
> +
> R600Subtarget::R600Subtarget(const Triple &TT, StringRef GPU, StringRef
> FS,
> const TargetMachine &TM) :
> R600GenSubtargetInfo(TT, GPU, FS),
> @@ -446,40 +484,6 @@ bool GCNSubtarget::isVGPRSpillingEnabled
> return EnableVGPRSpilling || !AMDGPU::isShader(F.getCallingConv());
> }
>
> -uint64_t GCNSubtarget::getExplicitKernArgSize(const Function &F) const {
> - assert(F.getCallingConv() == CallingConv::AMDGPU_KERNEL);
> -
> - const DataLayout &DL = F.getParent()->getDataLayout();
> - uint64_t ExplicitArgBytes = 0;
> - for (const Argument &Arg : F.args()) {
> - Type *ArgTy = Arg.getType();
> -
> - unsigned Align = DL.getABITypeAlignment(ArgTy);
> - uint64_t AllocSize = DL.getTypeAllocSize(ArgTy);
> - ExplicitArgBytes = alignTo(ExplicitArgBytes, Align) + AllocSize;
> - }
> -
> - return ExplicitArgBytes;
> -}
> -
> -unsigned GCNSubtarget::getKernArgSegmentSize(const Function &F,
> - int64_t ExplicitArgBytes)
> const {
> - if (ExplicitArgBytes == -1)
> - ExplicitArgBytes = getExplicitKernArgSize(F);
> -
> - unsigned ExplicitOffset = getExplicitKernelArgOffset(F);
> -
> - uint64_t TotalSize = ExplicitOffset + ExplicitArgBytes;
> - unsigned ImplicitBytes = getImplicitArgNumBytes(F);
> - if (ImplicitBytes != 0) {
> - unsigned Alignment = getAlignmentForImplicitArgPtr();
> - TotalSize = alignTo(ExplicitArgBytes, Alignment) + ImplicitBytes;
> - }
> -
> - // Being able to dereference past the end is useful for emitting scalar
> loads.
> - return alignTo(TotalSize, 4);
> -}
> -
> unsigned GCNSubtarget::getOccupancyWithNumSGPRs(unsigned SGPRs) const {
> if (getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
> if (SGPRs <= 80)
>
> Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUSubtarget.h
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/
> AMDGPU/AMDGPUSubtarget.h?rev=337021&r1=337020&r2=337021&view=diff
> ============================================================
> ==================
> --- llvm/trunk/lib/Target/AMDGPU/AMDGPUSubtarget.h (original)
> +++ llvm/trunk/lib/Target/AMDGPU/AMDGPUSubtarget.h Fri Jul 13 09:40:25
> 2018
> @@ -51,7 +51,7 @@ public:
> enum Generation {
> R600 = 0,
> R700 = 1,
> - EVERGREEN = 2,
> + EVERGREEN = 2,
> NORTHERN_ISLANDS = 3,
> SOUTHERN_ISLANDS = 4,
> SEA_ISLANDS = 5,
> @@ -82,7 +82,7 @@ public:
>
> static const AMDGPUSubtarget &get(const MachineFunction &MF);
> static const AMDGPUSubtarget &get(const TargetMachine &TM,
> - const Function &F);
> + const Function &F);
>
> /// \returns Default range flat work group size for a calling
> convention.
> std::pair<unsigned, unsigned> getDefaultFlatWorkGroupSize(CallingConv::ID
> CC) const;
> @@ -231,6 +231,18 @@ public:
> /// Creates value range metadata on an workitemid.* inrinsic call or
> load.
> bool makeLIDRangeMetadata(Instruction *I) const;
>
> + /// \returns Number of bytes of arguments that are passed to a shader or
> + /// kernel in addition to the explicit ones declared for the function.
> + unsigned getImplicitArgNumBytes(const Function &F) const {
> + if (isMesaKernel(F))
> + return 16;
> + return AMDGPU::getIntegerAttribute(F, "amdgpu-implicitarg-num-bytes",
> 0);
> + }
> + uint64_t getExplicitKernArgSize(const Function &F,
> + unsigned &MaxAlign) const;
> + unsigned getKernArgSegmentSize(const Function &F,
> + unsigned &MaxAlign) const;
> +
> virtual ~AMDGPUSubtarget() {}
> };
>
> @@ -669,14 +681,6 @@ public:
> return D16PreservesUnusedBits;
> }
>
> - /// \returns Number of bytes of arguments that are passed to a shader or
> - /// kernel in addition to the explicit ones declared for the function.
> - unsigned getImplicitArgNumBytes(const Function &F) const {
> - if (isMesaKernel(F))
> - return 16;
> - return AMDGPU::getIntegerAttribute(F, "amdgpu-implicitarg-num-bytes",
> 0);
> - }
> -
> // Scratch is allocated in 256 dword per wave blocks for the entire
> // wavefront. When viewed from the perspecive of an arbitrary workitem,
> this
> // is 4-byte aligned.
> @@ -825,10 +829,6 @@ public:
> return getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS;
> }
>
> - uint64_t getExplicitKernArgSize(const Function &F) const;
> - unsigned getKernArgSegmentSize(const Function &F,
> - int64_t ExplicitArgBytes = -1) const;
> -
> /// Return the maximum number of waves per SIMD for kernels using \p
> SGPRs
> /// SGPRs
> unsigned getOccupancyWithNumSGPRs(unsigned SGPRs) const;
>
> Modified: llvm/trunk/lib/Target/AMDGPU/R600.td
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/
> AMDGPU/R600.td?rev=337021&r1=337020&r2=337021&view=diff
> ============================================================
> ==================
> --- llvm/trunk/lib/Target/AMDGPU/R600.td (original)
> +++ llvm/trunk/lib/Target/AMDGPU/R600.td Fri Jul 13 09:40:25 2018
> @@ -52,8 +52,3 @@ def CC_R600 : CallingConv<[
> T30_XYZW, T31_XYZW, T32_XYZW
> ]>>>
> ]>;
> -
> -// Calling convention for compute kernels
> -def CC_R600_Kernel : CallingConv<[
> - CCCustom<"allocateKernArg">
> -]>;
>
> Modified: llvm/trunk/lib/Target/AMDGPU/R600ISelLowering.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/
> AMDGPU/R600ISelLowering.cpp?rev=337021&r1=337020&r2=337021&view=diff
> ============================================================
> ==================
> --- llvm/trunk/lib/Target/AMDGPU/R600ISelLowering.cpp (original)
> +++ llvm/trunk/lib/Target/AMDGPU/R600ISelLowering.cpp Fri Jul 13 09:40:25
> 2018
> @@ -50,18 +50,6 @@
>
> using namespace llvm;
>
> -static bool allocateKernArg(unsigned ValNo, MVT ValVT, MVT LocVT,
> - CCValAssign::LocInfo LocInfo,
> - ISD::ArgFlagsTy ArgFlags, CCState &State) {
> - MachineFunction &MF = State.getMachineFunction();
> - AMDGPUMachineFunction *MFI = MF.getInfo<AMDGPUMachineFunction>();
> -
> - uint64_t Offset = MFI->allocateKernArg(LocVT.getStoreSize(),
> - ArgFlags.getOrigAlign());
> - State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT,
> LocInfo));
> - return true;
> -}
> -
> #include "R600GenCallingConv.inc"
>
> R600TargetLowering::R600TargetLowering(const TargetMachine &TM,
> @@ -234,7 +222,7 @@ R600TargetLowering::R600TargetLowering(c
> setOperationAction(ISD::FMA, MVT::f32, Expand);
> setOperationAction(ISD::FMA, MVT::f64, Expand);
> }
> -
> +
> // FIXME: This was moved from AMDGPUTargetLowering, I'm not sure if we
> // need it for R600.
> if (!Subtarget->hasFP32Denormals())
> @@ -1583,7 +1571,7 @@ CCAssignFn *R600TargetLowering::CCAssign
> case CallingConv::C:
> case CallingConv::Fast:
> case CallingConv::Cold:
> - return CC_R600_Kernel;
> + llvm_unreachable("kernels should not be handled here");
> case CallingConv::AMDGPU_VS:
> case CallingConv::AMDGPU_GS:
> case CallingConv::AMDGPU_PS:
> @@ -1658,13 +1646,12 @@ SDValue R600TargetLowering::LowerFormalA
>
> unsigned ValBase = ArgLocs[In.getOrigArgIndex()].getLocMemOffset();
> unsigned PartOffset = VA.getLocMemOffset();
> - unsigned Offset = Subtarget->getExplicitKernelArgOffset(MF.getFunction())
> +
> - VA.getLocMemOffset();
>
> MachinePointerInfo PtrInfo(UndefValue::get(PtrTy), PartOffset -
> ValBase);
> SDValue Arg = DAG.getLoad(
> ISD::UNINDEXED, Ext, VT, DL, Chain,
> - DAG.getConstant(Offset, DL, MVT::i32), DAG.getUNDEF(MVT::i32),
> PtrInfo,
> + DAG.getConstant(PartOffset, DL, MVT::i32), DAG.getUNDEF(MVT::i32),
> + PtrInfo,
> MemVT, /* Alignment = */ 4, MachineMemOperand::MONonTemporal |
> MachineMemOperand::MODereferenceable
> |
> MachineMemOperand::MOInvariant);
>
> Modified: llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/
> AMDGPU/SIISelLowering.cpp?rev=337021&r1=337020&r2=337021&view=diff
> ============================================================
> ==================
> --- llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp (original)
> +++ llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp Fri Jul 13 09:40:25
> 2018
> @@ -1164,8 +1164,8 @@ SDValue SITargetLowering::lowerKernargMe
> // Try to avoid using an extload by loading earlier than the argument
> address,
> // and extracting the relevant bits. The load should hopefully be
> merged with
> // the previous argument.
> - if (Align < 4) {
> - assert(MemVT.getStoreSize() < 4);
> + if (MemVT.getStoreSize() < 4 && Align < 4) {
> + // TODO: Handle align < 4 and size >= 4 (can happen with packed
> structs).
> int64_t AlignDownOffset = alignDown(Offset, 4);
> int64_t OffsetDiff = Offset - AlignDownOffset;
>
> @@ -1781,7 +1781,6 @@ SDValue SITargetLowering::LowerFormalArg
> // FIXME: Alignment of explicit arguments totally broken with non-0
> explicit
> // kern arg offset.
> const unsigned KernelArgBaseAlign = 16;
> - const unsigned ExplicitOffset = Subtarget->
> getExplicitKernelArgOffset(Fn);
>
> for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) {
> const ISD::InputArg &Arg = Ins[i];
> @@ -1797,11 +1796,9 @@ SDValue SITargetLowering::LowerFormalArg
> VT = Ins[i].VT;
> EVT MemVT = VA.getLocVT();
>
> - const uint64_t Offset = ExplicitOffset + VA.getLocMemOffset();
> + const uint64_t Offset = VA.getLocMemOffset();
> unsigned Align = MinAlign(KernelArgBaseAlign, Offset);
>
> - // The first 36 bytes of the input buffer contains information about
> - // thread group and global sizes for clover.
> SDValue Arg = lowerKernargMemParameter(
> DAG, VT, MemVT, DL, Chain, Offset, Align, Ins[i].Flags.isSExt(),
> &Ins[i]);
> Chains.push_back(Arg.getValue(1));
>
> Modified: llvm/trunk/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/
> AMDGPU/SIMachineFunctionInfo.cpp?rev=337021&r1=337020&r2=337021&view=diff
> ============================================================
> ==================
> --- llvm/trunk/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp (original)
> +++ llvm/trunk/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp Fri Jul 13
> 09:40:25 2018
> @@ -54,6 +54,16 @@ SIMachineFunctionInfo::SIMachineFunction
>
> Occupancy = getMaxWavesPerEU();
> limitOccupancy(MF);
> + CallingConv::ID CC = F.getCallingConv();
> +
> + if (CC == CallingConv::AMDGPU_KERNEL || CC == CallingConv::SPIR_KERNEL)
> {
> + if (!F.arg_empty())
> + KernargSegmentPtr = true;
> + WorkGroupIDX = true;
> + WorkItemIDX = true;
> + } else if (CC == CallingConv::AMDGPU_PS) {
> + PSInputAddr = AMDGPU::getInitialPSInputAddr(F);
> + }
>
> if (!isEntryFunction()) {
> // Non-entry functions have no special inputs for now, other registers
> @@ -73,21 +83,11 @@ SIMachineFunctionInfo::SIMachineFunction
> } else {
> if (F.hasFnAttribute("amdgpu-implicitarg-ptr")) {
> KernargSegmentPtr = true;
> - assert(MaxKernArgAlign == 0);
> - MaxKernArgAlign = ST.getAlignmentForImplicitArgPtr();
> + MaxKernArgAlign = std::max(ST.getAlignmentForImplicitArgPtr(),
> + MaxKernArgAlign);
> }
> }
>
> - CallingConv::ID CC = F.getCallingConv();
> - if (CC == CallingConv::AMDGPU_KERNEL || CC == CallingConv::SPIR_KERNEL)
> {
> - if (!F.arg_empty())
> - KernargSegmentPtr = true;
> - WorkGroupIDX = true;
> - WorkItemIDX = true;
> - } else if (CC == CallingConv::AMDGPU_PS) {
> - PSInputAddr = AMDGPU::getInitialPSInputAddr(F);
> - }
> -
> if (ST.debuggerEmitPrologue()) {
> // Enable everything.
> WorkGroupIDX = true;
>
> Modified: llvm/trunk/test/CodeGen/AMDGPU/kernel-args.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/
> CodeGen/AMDGPU/kernel-args.ll?rev=337021&r1=337020&r2=337021&view=diff
> ============================================================
> ==================
> --- llvm/trunk/test/CodeGen/AMDGPU/kernel-args.ll (original)
> +++ llvm/trunk/test/CodeGen/AMDGPU/kernel-args.ll Fri Jul 13 09:40:25 2018
> @@ -589,6 +589,17 @@ entry:
> ; ret void
> ; }
>
> +; FUNC-LABEL: {{^}}i65_arg:
> +; HSA-VI: kernarg_segment_byte_size = 24
> +; HSA-VI: kernarg_segment_alignment = 4
> +; HSA-VI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[4:5], 0x0
> +; HSA-VI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[4:5], 0x8
> +define amdgpu_kernel void @i65_arg(i65 addrspace(1)* nocapture %out, i65
> %in) nounwind {
> +entry:
> + store i65 %in, i65 addrspace(1)* %out, align 4
> + ret void
> +}
> +
> ; FUNC-LABEL: {{^}}i1_arg:
> ; HSA-VI: kernarg_segment_byte_size = 12
> ; HSA-VI: kernarg_segment_alignment = 4
> @@ -651,7 +662,7 @@ define amdgpu_kernel void @i1_arg_sext_i
> }
>
> ; FUNC-LABEL: {{^}}empty_struct_arg:
> -; HSA: kernarg_segment_byte_size = 0
> +; HSA-VI: kernarg_segment_byte_size = 0
> define amdgpu_kernel void @empty_struct_arg({} %in) nounwind {
> ret void
> }
> @@ -667,11 +678,11 @@ define amdgpu_kernel void @empty_struct_
>
> ; FIXME: Total argument size is computed wrong
> ; FUNC-LABEL: {{^}}struct_argument_alignment:
> -; HSA: kernarg_segment_byte_size = 40
> -; HSA: s_load_dword s{{[0-9]+}}, s[4:5], 0x0
> -; HSA: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[4:5], 0x8
> -; HSA: s_load_dword s{{[0-9]+}}, s[4:5], 0x18
> -; HSA: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[4:5], 0x20
> +; HSA-VI: kernarg_segment_byte_size = 40
> +; HSA-VI: s_load_dword s{{[0-9]+}}, s[4:5], 0x0
> +; HSA-VI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[4:5], 0x8
> +; HSA-VI: s_load_dword s{{[0-9]+}}, s[4:5], 0x18
> +; HSA-VI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[4:5], 0x20
> define amdgpu_kernel void @struct_argument_alignment({i32, i64} %arg0,
> i8, {i32, i64} %arg1) {
> %val0 = extractvalue {i32, i64} %arg0, 0
> %val1 = extractvalue {i32, i64} %arg0, 1
> @@ -687,11 +698,11 @@ define amdgpu_kernel void @struct_argume
> ; No padding between i8 and next struct, but round up at end to 4 byte
> ; multiple.
> ; FUNC-LABEL: {{^}}packed_struct_argument_alignment:
> -; HSA: kernarg_segment_byte_size = 28
> -; HSA: s_load_dword s{{[0-9]+}}, s[4:5], 0x0
> -; HSA: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[4:5], 0x4
> -; HSA: s_load_dword s{{[0-9]+}}, s[4:5], 0xc
> -; HSA: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[4:5], 0x10
> +; HSA-VI: kernarg_segment_byte_size = 28
> +; HSA-VI: s_load_dword s{{[0-9]+}}, s[4:5], 0x0
> +; HSA-VI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[4:5], 0x4
> +; HSA-VI: s_load_dword s{{[0-9]+}}, s[4:5], 0xc
> +; HSA-VI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[4:5], 0x10
> define amdgpu_kernel void @packed_struct_argument_alignment(<{i32, i64}>
> %arg0, i8, <{i32, i64}> %arg1) {
> %val0 = extractvalue <{i32, i64}> %arg0, 0
> %val1 = extractvalue <{i32, i64}> %arg0, 1
> @@ -703,3 +714,47 @@ define amdgpu_kernel void @packed_struct
> store volatile i64 %val3, i64 addrspace(1)* null
> ret void
> }
> +
> +; GCN-LABEL: {{^}}struct_argument_alignment_after:
> +; HSA-VI: kernarg_segment_byte_size = 64
> +; HSA-VI: s_load_dword s{{[0-9]+}}, s[4:5], 0x0
> +; HSA-VI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[4:5], 0x8
> +; HSA-VI: s_load_dword s{{[0-9]+}}, s[4:5], 0x18
> +; HSA-VI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[4:5], 0x20
> +; HSA-VI: s_load_dwordx4 s{{\[[0-9]+:[0-9]+\]}}, s[4:5], 0x30
> +define amdgpu_kernel void @struct_argument_alignment_after({i32, i64}
> %arg0, i8, {i32, i64} %arg2, i8, <4 x i32> %arg4) {
> + %val0 = extractvalue {i32, i64} %arg0, 0
> + %val1 = extractvalue {i32, i64} %arg0, 1
> + %val2 = extractvalue {i32, i64} %arg2, 0
> + %val3 = extractvalue {i32, i64} %arg2, 1
> + store volatile i32 %val0, i32 addrspace(1)* null
> + store volatile i64 %val1, i64 addrspace(1)* null
> + store volatile i32 %val2, i32 addrspace(1)* null
> + store volatile i64 %val3, i64 addrspace(1)* null
> + store volatile <4 x i32> %arg4, <4 x i32> addrspace(1)* null
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}array_3xi32:
> +; HSA-VI: s_load_dword s{{[0-9]+}}, s[4:5], 0x0
> +; HSA-VI: s_load_dword s{{[0-9]+}}, s[4:5], 0x4
> +; HSA-VI: s_load_dword s{{[0-9]+}}, s[4:5], 0x8
> +; HSA-VI: s_load_dword s{{[0-9]+}}, s[4:5], 0xc
> +define amdgpu_kernel void @array_3xi32(i16 %arg0, [3 x i32] %arg1) {
> + store volatile i16 %arg0, i16 addrspace(1)* undef
> + store volatile [3 x i32] %arg1, [3 x i32] addrspace(1)* undef
> + ret void
> +}
> +
> +; FIXME: Why not all scalar loads?
> +; GCN-LABEL: {{^}}array_3xi16:
> +; HSA-VI: s_add_u32 s{{[0-9]+}}, s4, 2
> +; HSA-VI: s_addc_u32 s{{[0-9]+}}, s5, 0
> +; HSA-VI: flat_load_ushort
> +; HSA-VI: s_load_dword s{{[0-9]+}}, s[4:5], 0x0
> +; HSA-VI: s_load_dword s{{[0-9]+}}, s[4:5], 0x4
> +define amdgpu_kernel void @array_3xi16(i8 %arg0, [3 x i16] %arg1) {
> + store volatile i8 %arg0, i8 addrspace(1)* undef
> + store volatile [3 x i16] %arg1, [3 x i16] addrspace(1)* undef
> + ret void
> +}
>
> Added: llvm/trunk/test/CodeGen/AMDGPU/kernel-argument-dag-lowering.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/
> CodeGen/AMDGPU/kernel-argument-dag-lowering.ll?rev=337021&view=auto
> ============================================================
> ==================
> --- llvm/trunk/test/CodeGen/AMDGPU/kernel-argument-dag-lowering.ll (added)
> +++ llvm/trunk/test/CodeGen/AMDGPU/kernel-argument-dag-lowering.ll Fri
> Jul 13 09:40:25 2018
> @@ -0,0 +1,132 @@
> +; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx900 -amdgpu-ir-lower-kernel-arguments=0
> -verify-machineinstrs < %s | FileCheck -enable-var-scope
> -check-prefixes=VI,GCN,HSA-VI,FUNC %s
> +
> +; Repeat of some problematic tests in kernel-args.ll, with the IR
> +; argument lowering pass disabled. Struct padding needs to be
> +; accounted for, as well as legalization of types changing offsets.
> +
> +; FUNC-LABEL: {{^}}i1_arg:
> +; HSA-VI: kernarg_segment_byte_size = 12
> +; HSA-VI: kernarg_segment_alignment = 4
> +
> +; GCN: s_load_dword s
> +; GCN: s_and_b32
> +define amdgpu_kernel void @i1_arg(i1 addrspace(1)* %out, i1 %x) nounwind {
> + store i1 %x, i1 addrspace(1)* %out, align 1
> + ret void
> +}
> +
> +; FUNC-LABEL: {{^}}v3i8_arg:
> +; HSA-VI: kernarg_segment_byte_size = 12
> +; HSA-VI: kernarg_segment_alignment = 4
> +; HSA-VI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[4:5], 0x0
> +; HSA-VI: s_load_dword s{{[0-9]+}}, s[4:5], 0x8
> +define amdgpu_kernel void @v3i8_arg(<3 x i8> addrspace(1)* nocapture
> %out, <3 x i8> %in) nounwind {
> +entry:
> + store <3 x i8> %in, <3 x i8> addrspace(1)* %out, align 4
> + ret void
> +}
> +
> +; FUNC-LABEL: {{^}}i65_arg:
> +; HSA-VI: kernarg_segment_byte_size = 24
> +; HSA-VI: kernarg_segment_alignment = 4
> +; HSA-VI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[4:5], 0x0
> +; HSA-VI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[4:5], 0x8
> +define amdgpu_kernel void @i65_arg(i65 addrspace(1)* nocapture %out, i65
> %in) nounwind {
> +entry:
> + store i65 %in, i65 addrspace(1)* %out, align 4
> + ret void
> +}
> +
> +; FUNC-LABEL: {{^}}empty_struct_arg:
> +; HSA-VI: kernarg_segment_byte_size = 0
> +define amdgpu_kernel void @empty_struct_arg({} %in) nounwind {
> + ret void
> +}
> +
> +; The correct load offsets for these:
> +; load 4 from 0,
> +; load 8 from 8
> +; load 4 from 24
> +; load 8 from 32
> +
> +; With the SelectionDAG argument lowering, the alignments for the
> +; struct members is not properly considered, making these wrong.
> +
> +; FIXME: Total argument size is computed wrong
> +; FUNC-LABEL: {{^}}struct_argument_alignment:
> +; HSA-VI: kernarg_segment_byte_size = 40
> +; HSA-VI: s_load_dword s{{[0-9]+}}, s[4:5], 0x0
> +; HSA-VI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[4:5], 0x8
> +; HSA-VI: s_load_dword s{{[0-9]+}}, s[4:5], 0x18
> +; HSA-VI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[4:5], 0x20
> +define amdgpu_kernel void @struct_argument_alignment({i32, i64} %arg0,
> i8, {i32, i64} %arg1) {
> + %val0 = extractvalue {i32, i64} %arg0, 0
> + %val1 = extractvalue {i32, i64} %arg0, 1
> + %val2 = extractvalue {i32, i64} %arg1, 0
> + %val3 = extractvalue {i32, i64} %arg1, 1
> + store volatile i32 %val0, i32 addrspace(1)* null
> + store volatile i64 %val1, i64 addrspace(1)* null
> + store volatile i32 %val2, i32 addrspace(1)* null
> + store volatile i64 %val3, i64 addrspace(1)* null
> + ret void
> +}
> +
> +; No padding between i8 and next struct, but round up at end to 4 byte
> +; multiple.
> +; FUNC-LABEL: {{^}}packed_struct_argument_alignment:
> +; HSA-VI: kernarg_segment_byte_size = 28
> +; HSA-VI: global_load_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, off
> offset:13
> +; HSA-VI: global_load_dwordx2 v{{\[[0-9]+:[0-9]+\]}},
> v{{\[[0-9]+:[0-9]+\]}}, off offset:17
> +; HSA-VI: s_load_dword s{{[0-9]+}}, s[4:5], 0x0
> +; HSA-VI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[4:5], 0x4
> +define amdgpu_kernel void @packed_struct_argument_alignment(<{i32, i64}>
> %arg0, i8, <{i32, i64}> %arg1) {
> + %val0 = extractvalue <{i32, i64}> %arg0, 0
> + %val1 = extractvalue <{i32, i64}> %arg0, 1
> + %val2 = extractvalue <{i32, i64}> %arg1, 0
> + %val3 = extractvalue <{i32, i64}> %arg1, 1
> + store volatile i32 %val0, i32 addrspace(1)* null
> + store volatile i64 %val1, i64 addrspace(1)* null
> + store volatile i32 %val2, i32 addrspace(1)* null
> + store volatile i64 %val3, i64 addrspace(1)* null
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}struct_argument_alignment_after:
> +; HSA-VI: kernarg_segment_byte_size = 64
> +; HSA-VI: s_load_dword s{{[0-9]+}}, s[4:5], 0x0
> +; HSA-VI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[4:5], 0x8
> +; HSA-VI: s_load_dword s{{[0-9]+}}, s[4:5], 0x18
> +; HSA-VI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[4:5], 0x20
> +; HSA-VI: s_load_dwordx4 s{{\[[0-9]+:[0-9]+\]}}, s[4:5], 0x30
> +define amdgpu_kernel void @struct_argument_alignment_after({i32, i64}
> %arg0, i8, {i32, i64} %arg2, i8, <4 x i32> %arg4) {
> + %val0 = extractvalue {i32, i64} %arg0, 0
> + %val1 = extractvalue {i32, i64} %arg0, 1
> + %val2 = extractvalue {i32, i64} %arg2, 0
> + %val3 = extractvalue {i32, i64} %arg2, 1
> + store volatile i32 %val0, i32 addrspace(1)* null
> + store volatile i64 %val1, i64 addrspace(1)* null
> + store volatile i32 %val2, i32 addrspace(1)* null
> + store volatile i64 %val3, i64 addrspace(1)* null
> + store volatile <4 x i32> %arg4, <4 x i32> addrspace(1)* null
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}array_3xi32:
> +; HSA-VI: s_load_dword s{{[0-9]+}}, s[4:5], 0x0
> +; HSA-VI: s_load_dword s{{[0-9]+}}, s[4:5], 0x4
> +; HSA-VI: s_load_dword s{{[0-9]+}}, s[4:5], 0x8
> +; HSA-VI: s_load_dword s{{[0-9]+}}, s[4:5], 0xc
> +define amdgpu_kernel void @array_3xi32(i16 %arg0, [3 x i32] %arg1) {
> + store volatile i16 %arg0, i16 addrspace(1)* undef
> + store volatile [3 x i32] %arg1, [3 x i32] addrspace(1)* undef
> + ret void
> +}
> +
> +; GCN-LABEL: {{^}}array_3xi16:
> +; HSA-VI: s_load_dword s{{[0-9]+}}, s[4:5], 0x0
> +; HSA-VI: s_load_dword s{{[0-9]+}}, s[4:5], 0x4
> +define amdgpu_kernel void @array_3xi16(i8 %arg0, [3 x i16] %arg1) {
> + store volatile i8 %arg0, i8 addrspace(1)* undef
> + store volatile [3 x i16] %arg1, [3 x i16] addrspace(1)* undef
> + ret void
> +}
>
> Modified: llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.implicitarg.ptr.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/
> CodeGen/AMDGPU/llvm.amdgcn.implicitarg.ptr.ll?rev=337021&
> r1=337020&r2=337021&view=diff
> ============================================================
> ==================
> --- llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.implicitarg.ptr.ll
> (original)
> +++ llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.implicitarg.ptr.ll Fri Jul
> 13 09:40:25 2018
> @@ -33,7 +33,7 @@ define amdgpu_kernel void @opencl_kernel
> ; GCN: enable_sgpr_kernarg_segment_ptr = 1
>
> ; HSA: kernarg_segment_byte_size = 112
> -; MESA: kernarg_segment_byte_size = 464
> +; MESA: kernarg_segment_byte_size = 128
>
> ; HSA: s_load_dword s0, s[4:5], 0x1c
> define amdgpu_kernel void @kernel_implicitarg_ptr([112 x i8]) #0 {
> @@ -47,7 +47,7 @@ define amdgpu_kernel void @kernel_implic
> ; GCN: enable_sgpr_kernarg_segment_ptr = 1
>
> ; HSA: kernarg_segment_byte_size = 160
> -; MESA: kernarg_segment_byte_size = 464
> +; MESA: kernarg_segment_byte_size = 128
>
> ; HSA: s_load_dword s0, s[4:5], 0x1c
> define amdgpu_kernel void @opencl_kernel_implicitarg_ptr([112 x i8]) #1 {
> @@ -118,10 +118,10 @@ define amdgpu_kernel void @opencl_kernel
> ; GCN-LABEL: {{^}}kernel_call_implicitarg_ptr_func:
> ; GCN: enable_sgpr_kernarg_segment_ptr = 1
> ; HSA: kernarg_segment_byte_size = 112
> -; MESA: kernarg_segment_byte_size = 464
> +; MESA: kernarg_segment_byte_size = 128
>
> ; HSA: s_add_u32 s6, s4, 0x70
> -; MESA: s_add_u32 s6, s4, 0x1c0
> +; MESA: s_add_u32 s6, s4, 0x70
>
> ; GCN: s_addc_u32 s7, s5, 0{{$}}
> ; GCN: s_swappc_b64
> @@ -133,10 +133,9 @@ define amdgpu_kernel void @kernel_call_i
> ; GCN-LABEL: {{^}}opencl_kernel_call_implicitarg_ptr_func:
> ; GCN: enable_sgpr_kernarg_segment_ptr = 1
> ; HSA: kernarg_segment_byte_size = 160
> -; MESA: kernarg_segment_byte_size = 464
> +; MESA: kernarg_segment_byte_size = 128
>
> -; HSA: s_add_u32 s6, s4, 0x70
> -; MESA: s_add_u32 s6, s4, 0x1c0
> +; GCN: s_add_u32 s6, s4, 0x70
>
> ; GCN: s_addc_u32 s7, s5, 0{{$}}
> ; GCN: s_swappc_b64
> @@ -219,8 +218,7 @@ define void @opencl_func_kernarg_implici
>
> ; GCN-LABEL: {{^}}kernel_call_kernarg_implicitarg_ptr_func:
> ; GCN: s_mov_b64 s[6:7], s[4:5]
> -; HSA: s_add_u32 s8, s6, 0x70
> -; MESA: s_add_u32 s8, s6, 0x1c0
> +; GCN: s_add_u32 s8, s6, 0x70
> ; GCN: s_addc_u32 s9, s7, 0
> ; GCN: s_swappc_b64
> define amdgpu_kernel void @kernel_call_kernarg_implicitarg_ptr_func([112
> x i8]) #0 {
>
> Modified: llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.kernarg.
> segment.ptr.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/
> CodeGen/AMDGPU/llvm.amdgcn.kernarg.segment.ptr.ll?rev=
> 337021&r1=337020&r2=337021&view=diff
> ============================================================
> ==================
> --- llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.kernarg.segment.ptr.ll
> (original)
> +++ llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.kernarg.segment.ptr.ll Fri
> Jul 13 09:40:25 2018
> @@ -79,7 +79,7 @@ define amdgpu_kernel void @opencl_test_i
> ; CO-V2: enable_sgpr_kernarg_segment_ptr = 1
> ; HSA: kernarg_segment_byte_size = 0
> ; OS-MESA3D: kernarg_segment_byte_size = 16
> -; CO-V2: kernarg_segment_alignment = 32
> +; CO-V2: kernarg_segment_alignment = 4
>
> ; HSA: s_load_dword s{{[0-9]+}}, s[4:5]
> define amdgpu_kernel void @test_no_kernargs() #1 {
>
>
> _______________________________________________
> llvm-commits mailing list
> llvm-commits at lists.llvm.org
> http://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20180713/c60db09d/attachment-0001.html>
More information about the llvm-commits
mailing list