[llvm] r341413 - [AMDGPU] Legalize VGPR Rsrc operands for MUBUF instructions

Galina Kistanova via llvm-commits llvm-commits at lists.llvm.org
Thu Sep 6 11:03:25 PDT 2018


Ping?

Failing Tests (2):
    LLVM :: CodeGen/AMDGPU/mubuf-legalize-operands.ll
    LLVM :: CodeGen/AMDGPU/mubuf-legalize-operands.mir

Thanks

Galina

On Wed, Sep 5, 2018 at 1:54 PM Galina Kistanova <gkistanova at gmail.com>
wrote:

> Hello Scott,
>
> This commit added couple of broken tests to one of our builders:
>
> http://lab.llvm.org:8011/builders/llvm-clang-x86_64-expensive-checks-win/builds/12238
>
> . . .
> Failing Tests (3):
>     Clang :: Driver/print-multi-directory.c
>     LLVM :: CodeGen/AMDGPU/mubuf-legalize-operands.ll
>     LLVM :: CodeGen/AMDGPU/mubuf-legalize-operands.mir
>
> Please have a look?
> The builder was already red and did not send notifications on this.
>
> Thanks
>
> Galina
>
> On Tue, Sep 4, 2018 at 2:51 PM Scott Linder via llvm-commits <
> llvm-commits at lists.llvm.org> wrote:
>
>> Author: scott.linder
>> Date: Tue Sep  4 14:50:47 2018
>> New Revision: 341413
>>
>> URL: http://llvm.org/viewvc/llvm-project?rev=341413&view=rev
>> Log:
>> [AMDGPU] Legalize VGPR Rsrc operands for MUBUF instructions
>>
>> Emit a waterfall loop in the general case for a potentially-divergent Rsrc
>> operand. When practical, avoid this by using Addr64 instructions.
>>
>> Differential Revision: https://reviews.llvm.org/D50982
>>
>> Added:
>>     llvm/trunk/test/CodeGen/AMDGPU/mubuf-legalize-operands.ll
>>     llvm/trunk/test/CodeGen/AMDGPU/mubuf-legalize-operands.mir
>> Modified:
>>     llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp
>>     llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.h
>>     llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td
>>
>> Modified: llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp
>> URL:
>> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp?rev=341413&r1=341412&r2=341413&view=diff
>>
>> ==============================================================================
>> --- llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp (original)
>> +++ llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp Tue Sep  4 14:50:47 2018
>> @@ -3579,6 +3579,177 @@ void SIInstrInfo::legalizeGenericOperand
>>      FoldImmediate(*Copy, *Def, OpReg, &MRI);
>>  }
>>
>> +// Emit the actual waterfall loop, executing the wrapped instruction for
>> each
>> +// unique value of \p Rsrc across all lanes. In the best case we execute
>> 1
>> +// iteration, in the worst case we execute 64 (once per lane).
>> +static void emitLoadSRsrcFromVGPRLoop(const SIInstrInfo &TII,
>> +                                      MachineRegisterInfo &MRI,
>> +                                      MachineBasicBlock &OrigBB,
>> +                                      MachineBasicBlock &LoopBB,
>> +                                      const DebugLoc &DL,
>> +                                      MachineOperand &Rsrc) {
>> +  MachineBasicBlock::iterator I = LoopBB.begin();
>> +
>> +  unsigned VRsrc = Rsrc.getReg();
>> +  unsigned VRsrcUndef = getUndefRegState(Rsrc.isUndef());
>> +
>> +  unsigned SaveExec =
>> MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
>> +  unsigned CondReg0 =
>> MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
>> +  unsigned CondReg1 =
>> MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
>> +  unsigned AndCond = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
>> +  unsigned SRsrcSub0 =
>> MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
>> +  unsigned SRsrcSub1 =
>> MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
>> +  unsigned SRsrcSub2 =
>> MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
>> +  unsigned SRsrcSub3 =
>> MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
>> +  unsigned SRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass);
>> +
>> +  // Beginning of the loop, read the next Rsrc variant.
>> +  BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub0)
>> +    .addReg(VRsrc, VRsrcUndef, AMDGPU::sub0);
>> +  BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub1)
>> +    .addReg(VRsrc, VRsrcUndef, AMDGPU::sub1);
>> +  BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub2)
>> +    .addReg(VRsrc, VRsrcUndef, AMDGPU::sub2);
>> +  BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub3)
>> +    .addReg(VRsrc, VRsrcUndef, AMDGPU::sub3);
>> +
>> +  BuildMI(LoopBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), SRsrc)
>> +    .addReg(SRsrcSub0)
>> +    .addImm(AMDGPU::sub0)
>> +    .addReg(SRsrcSub1)
>> +    .addImm(AMDGPU::sub1)
>> +    .addReg(SRsrcSub2)
>> +    .addImm(AMDGPU::sub2)
>> +    .addReg(SRsrcSub3)
>> +    .addImm(AMDGPU::sub3);
>> +
>> +  // Update Rsrc operand to use the SGPR Rsrc.
>> +  Rsrc.setReg(SRsrc);
>> +  Rsrc.setIsKill(true);
>> +
>> +  // Identify all lanes with identical Rsrc operands in their VGPRs.
>> +  BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_CMP_EQ_U64_e64), CondReg0)
>> +    .addReg(SRsrc, 0, AMDGPU::sub0_sub1)
>> +    .addReg(VRsrc, 0, AMDGPU::sub0_sub1);
>> +  BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_CMP_EQ_U64_e64), CondReg1)
>> +    .addReg(SRsrc, 0, AMDGPU::sub2_sub3)
>> +    .addReg(VRsrc, 0, AMDGPU::sub2_sub3);
>> +  BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_AND_B64), AndCond)
>> +    .addReg(CondReg0)
>> +    .addReg(CondReg1);
>> +
>> +  MRI.setSimpleHint(SaveExec, AndCond);
>> +
>> +  // Update EXEC to matching lanes, saving original to SaveExec.
>> +  BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_AND_SAVEEXEC_B64), SaveExec)
>> +    .addReg(AndCond, RegState::Kill);
>> +
>> +  // The original instruction is here; we insert the terminators after
>> it.
>> +  I = LoopBB.end();
>> +
>> +  // Update EXEC, switch all done bits to 0 and all todo bits to 1.
>> +  BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_XOR_B64_term), AMDGPU::EXEC)
>> +    .addReg(AMDGPU::EXEC)
>> +    .addReg(SaveExec);
>> +  BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_CBRANCH_EXECNZ))
>> +    .addMBB(&LoopBB);
>> +}
>> +
>> +// Build a waterfall loop around \p MI, replacing the VGPR \p Rsrc
>> register
>> +// with SGPRs by iterating over all unique values across all lanes.
>> +static void loadSRsrcFromVGPR(const SIInstrInfo &TII,
>> +                              MachineInstr &MI,
>> +                              MachineOperand &Rsrc) {
>> +  MachineBasicBlock &MBB = *MI.getParent();
>> +  MachineFunction &MF = *MBB.getParent();
>> +  MachineRegisterInfo &MRI = MF.getRegInfo();
>> +  MachineBasicBlock::iterator I(&MI);
>> +  const DebugLoc &DL = MI.getDebugLoc();
>> +
>> +  unsigned SaveExec =
>> MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass);
>> +
>> +  // Save the EXEC mask
>> +  BuildMI(MBB, I, DL, TII.get(AMDGPU::S_MOV_B64), SaveExec)
>> +    .addReg(AMDGPU::EXEC);
>> +
>> +  // Killed uses in the instruction we are waterfalling around will be
>> +  // incorrect due to the added control-flow.
>> +  for (auto &MO : MI.uses())
>> +    if (MO.isReg() && MO.isUse())
>> +      MRI.clearKillFlags(MO.getReg());
>> +
>> +  // To insert the loop we need to split the block. Move everything
>> after this
>> +  // point to a new block, and insert a new empty block between the two.
>> +  MachineBasicBlock *LoopBB = MF.CreateMachineBasicBlock();
>> +  MachineBasicBlock *RemainderBB = MF.CreateMachineBasicBlock();
>> +  MachineFunction::iterator MBBI(MBB);
>> +  ++MBBI;
>> +
>> +  MF.insert(MBBI, LoopBB);
>> +  MF.insert(MBBI, RemainderBB);
>> +
>> +  LoopBB->addSuccessor(LoopBB);
>> +  LoopBB->addSuccessor(RemainderBB);
>> +
>> +  // Move MI to the LoopBB, and the remainder of the block to
>> RemainderBB.
>> +  MachineBasicBlock::iterator J = I++;
>> +  RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB);
>> +  RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end());
>> +  LoopBB->splice(LoopBB->begin(), &MBB, J);
>> +
>> +  MBB.addSuccessor(LoopBB);
>> +
>> +  emitLoadSRsrcFromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, Rsrc);
>> +
>> +  // Restore the EXEC mask
>> +  MachineBasicBlock::iterator First = RemainderBB->begin();
>> +  BuildMI(*RemainderBB, First, DL, TII.get(AMDGPU::S_MOV_B64),
>> AMDGPU::EXEC)
>> +    .addReg(SaveExec);
>> +}
>> +
>> +// Extract pointer from Rsrc and return a zero-value Rsrc replacement.
>> +static std::tuple<unsigned, unsigned> extractRsrcPtr(const SIInstrInfo
>> &TII,
>> +                                                     MachineInstr &MI,
>> +                                                     MachineOperand
>> &Rsrc) {
>> +  MachineBasicBlock &MBB = *MI.getParent();
>> +  MachineFunction &MF = *MBB.getParent();
>> +  MachineRegisterInfo &MRI = MF.getRegInfo();
>> +
>> +  // Extract the ptr from the resource descriptor.
>> +  unsigned RsrcPtr = TII.buildExtractSubReg(MI, MRI, Rsrc,
>> +    &AMDGPU::VReg_128RegClass, AMDGPU::sub0_sub1,
>> &AMDGPU::VReg_64RegClass);
>> +
>> +  // Create an empty resource descriptor
>> +  unsigned Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
>> +  unsigned SRsrcFormatLo =
>> MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
>> +  unsigned SRsrcFormatHi =
>> MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
>> +  unsigned NewSRsrc =
>> MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass);
>> +  uint64_t RsrcDataFormat = TII.getDefaultRsrcDataFormat();
>> +
>> +  // Zero64 = 0
>> +  BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B64), Zero64)
>> +      .addImm(0);
>> +
>> +  // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0}
>> +  BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32),
>> SRsrcFormatLo)
>> +      .addImm(RsrcDataFormat & 0xFFFFFFFF);
>> +
>> +  // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32}
>> +  BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32),
>> SRsrcFormatHi)
>> +      .addImm(RsrcDataFormat >> 32);
>> +
>> +  // NewSRsrc = {Zero64, SRsrcFormat}
>> +  BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::REG_SEQUENCE),
>> NewSRsrc)
>> +      .addReg(Zero64)
>> +      .addImm(AMDGPU::sub0_sub1)
>> +      .addReg(SRsrcFormatLo)
>> +      .addImm(AMDGPU::sub2)
>> +      .addReg(SRsrcFormatHi)
>> +      .addImm(AMDGPU::sub3);
>> +
>> +  return std::tie(RsrcPtr, NewSRsrc);
>> +}
>> +
>>  void SIInstrInfo::legalizeOperands(MachineInstr &MI) const {
>>    MachineFunction &MF = *MI.getParent()->getParent();
>>    MachineRegisterInfo &MRI = MF.getRegInfo();
>> @@ -3721,74 +3892,55 @@ void SIInstrInfo::legalizeOperands(Machi
>>      return;
>>    }
>>
>> -  // Legalize MUBUF* instructions by converting to addr64 form.
>> -  // FIXME: If we start using the non-addr64 instructions for compute, we
>> -  // may need to legalize them as above. This especially applies to the
>> -  // buffer_load_format_* variants and variants with idxen (or bothen).
>> -  int SRsrcIdx =
>> +  // Legalize MUBUF* instructions.
>> +  int RsrcIdx =
>>        AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc);
>> -  if (SRsrcIdx != -1) {
>> +  if (RsrcIdx != -1) {
>>      // We have an MUBUF instruction
>> -    MachineOperand *SRsrc = &MI.getOperand(SRsrcIdx);
>> -    unsigned SRsrcRC = get(MI.getOpcode()).OpInfo[SRsrcIdx].RegClass;
>> -    if (RI.getCommonSubClass(MRI.getRegClass(SRsrc->getReg()),
>> -                                             RI.getRegClass(SRsrcRC))) {
>> +    MachineOperand *Rsrc = &MI.getOperand(RsrcIdx);
>> +    unsigned RsrcRC = get(MI.getOpcode()).OpInfo[RsrcIdx].RegClass;
>> +    if (RI.getCommonSubClass(MRI.getRegClass(Rsrc->getReg()),
>> +                                             RI.getRegClass(RsrcRC))) {
>>        // The operands are legal.
>>        // FIXME: We may need to legalize operands besided srsrc.
>>        return;
>>      }
>>
>> -    MachineBasicBlock &MBB = *MI.getParent();
>> +    // Legalize a VGPR Rsrc.
>> +    //
>> +    // If the instruction is _ADDR64, we can avoid a waterfall by
>> extracting
>> +    // the base pointer from the VGPR Rsrc, adding it to the VAddr, then
>> using
>> +    // a zero-value SRsrc.
>> +    //
>> +    // If the instruction is _OFFSET (both idxen and offen disabled),
>> and we
>> +    // support ADDR64 instructions, we can convert to ADDR64 and do the
>> same as
>> +    // above.
>> +    //
>> +    // Otherwise we are on non-ADDR64 hardware, and/or we have
>> +    // idxen/offen/bothen and we fall back to a waterfall loop.
>>
>> -    // Extract the ptr from the resource descriptor.
>> -    unsigned SRsrcPtr = buildExtractSubReg(MI, MRI, *SRsrc,
>> -      &AMDGPU::VReg_128RegClass, AMDGPU::sub0_sub1,
>> &AMDGPU::VReg_64RegClass);
>> -
>> -    // Create an empty resource descriptor
>> -    unsigned Zero64 =
>> MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
>> -    unsigned SRsrcFormatLo =
>> MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
>> -    unsigned SRsrcFormatHi =
>> MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
>> -    unsigned NewSRsrc =
>> MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass);
>> -    uint64_t RsrcDataFormat = getDefaultRsrcDataFormat();
>> -
>> -    // Zero64 = 0
>> -    BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::S_MOV_B64), Zero64)
>> -        .addImm(0);
>> -
>> -    // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0}
>> -    BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::S_MOV_B32),
>> SRsrcFormatLo)
>> -        .addImm(RsrcDataFormat & 0xFFFFFFFF);
>> -
>> -    // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32}
>> -    BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::S_MOV_B32),
>> SRsrcFormatHi)
>> -        .addImm(RsrcDataFormat >> 32);
>> -
>> -    // NewSRsrc = {Zero64, SRsrcFormat}
>> -    BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE),
>> NewSRsrc)
>> -        .addReg(Zero64)
>> -        .addImm(AMDGPU::sub0_sub1)
>> -        .addReg(SRsrcFormatLo)
>> -        .addImm(AMDGPU::sub2)
>> -        .addReg(SRsrcFormatHi)
>> -        .addImm(AMDGPU::sub3);
>> +    MachineBasicBlock &MBB = *MI.getParent();
>>
>>      MachineOperand *VAddr = getNamedOperand(MI, AMDGPU::OpName::vaddr);
>> -    unsigned NewVAddr =
>> MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
>> -    if (VAddr) {
>> +    if (VAddr && AMDGPU::getIfAddr64Inst(MI.getOpcode()) != -1) {
>>        // This is already an ADDR64 instruction so we need to add the
>> pointer
>>        // extracted from the resource descriptor to the current value of
>> VAddr.
>>        unsigned NewVAddrLo =
>> MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
>>        unsigned NewVAddrHi =
>> MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
>> +      unsigned NewVAddr =
>> MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
>>
>> -      // NewVaddrLo = SRsrcPtr:sub0 + VAddr:sub0
>> +      unsigned RsrcPtr, NewSRsrc;
>> +      std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc);
>> +
>> +      // NewVaddrLo = RsrcPtr:sub0 + VAddr:sub0
>>        DebugLoc DL = MI.getDebugLoc();
>>        BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e32), NewVAddrLo)
>> -        .addReg(SRsrcPtr, 0, AMDGPU::sub0)
>> +        .addReg(RsrcPtr, 0, AMDGPU::sub0)
>>          .addReg(VAddr->getReg(), 0, AMDGPU::sub0);
>>
>> -      // NewVaddrHi = SRsrcPtr:sub1 + VAddr:sub1
>> +      // NewVaddrHi = RsrcPtr:sub1 + VAddr:sub1
>>        BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e32), NewVAddrHi)
>> -        .addReg(SRsrcPtr, 0, AMDGPU::sub1)
>> +        .addReg(RsrcPtr, 0, AMDGPU::sub1)
>>          .addReg(VAddr->getReg(), 0, AMDGPU::sub1);
>>
>>        // NewVaddr = {NewVaddrHi, NewVaddrLo}
>> @@ -3797,13 +3949,20 @@ void SIInstrInfo::legalizeOperands(Machi
>>            .addImm(AMDGPU::sub0)
>>            .addReg(NewVAddrHi)
>>            .addImm(AMDGPU::sub1);
>> -    } else {
>> +
>> +      VAddr->setReg(NewVAddr);
>> +      Rsrc->setReg(NewSRsrc);
>> +    } else if (!VAddr && ST.hasAddr64()) {
>>        // This instructions is the _OFFSET variant, so we need to convert
>> it to
>>        // ADDR64.
>>
>>  assert(MBB.getParent()->getSubtarget<GCNSubtarget>().getGeneration()
>>               < AMDGPUSubtarget::VOLCANIC_ISLANDS &&
>>               "FIXME: Need to emit flat atomics here");
>>
>> +      unsigned RsrcPtr, NewSRsrc;
>> +      std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc);
>> +
>> +      unsigned NewVAddr =
>> MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
>>        MachineOperand *VData = getNamedOperand(MI, AMDGPU::OpName::vdata);
>>        MachineOperand *Offset = getNamedOperand(MI,
>> AMDGPU::OpName::offset);
>>        MachineOperand *SOffset = getNamedOperand(MI,
>> AMDGPU::OpName::soffset);
>> @@ -3819,10 +3978,8 @@ void SIInstrInfo::legalizeOperands(Machi
>>          MachineInstrBuilder MIB =
>>              BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode))
>>                  .add(*VData)
>> -                .addReg(AMDGPU::NoRegister) // Dummy value for vaddr.
>> -                // This will be replaced later
>> -                // with the new value of vaddr.
>> -                .add(*SRsrc)
>> +                .addReg(NewVAddr)
>> +                .addReg(NewSRsrc)
>>                  .add(*SOffset)
>>                  .add(*Offset);
>>
>> @@ -3846,10 +4003,8 @@ void SIInstrInfo::legalizeOperands(Machi
>>          Addr64 = BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode))
>>                       .add(*VData)
>>                       .add(*VDataIn)
>> -                     .addReg(AMDGPU::NoRegister) // Dummy value for
>> vaddr.
>> -                     // This will be replaced later
>> -                     // with the new value of vaddr.
>> -                     .add(*SRsrc)
>> +                     .addReg(NewVAddr)
>> +                     .addReg(NewSRsrc)
>>                       .add(*SOffset)
>>                       .add(*Offset)
>>                       .addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc))
>> @@ -3861,19 +4016,15 @@ void SIInstrInfo::legalizeOperands(Machi
>>        // NewVaddr = {NewVaddrHi, NewVaddrLo}
>>        BuildMI(MBB, Addr64, Addr64->getDebugLoc(),
>> get(AMDGPU::REG_SEQUENCE),
>>                NewVAddr)
>> -          .addReg(SRsrcPtr, 0, AMDGPU::sub0)
>> +          .addReg(RsrcPtr, 0, AMDGPU::sub0)
>>            .addImm(AMDGPU::sub0)
>> -          .addReg(SRsrcPtr, 0, AMDGPU::sub1)
>> +          .addReg(RsrcPtr, 0, AMDGPU::sub1)
>>            .addImm(AMDGPU::sub1);
>> -
>> -      VAddr = getNamedOperand(*Addr64, AMDGPU::OpName::vaddr);
>> -      SRsrc = getNamedOperand(*Addr64, AMDGPU::OpName::srsrc);
>> +    } else {
>> +      // This is another variant; legalize Rsrc with waterfall loop from
>> VGPRs
>> +      // to SGPRs.
>> +      loadSRsrcFromVGPR(*this, MI, *Rsrc);
>>      }
>> -
>> -    // Update the instruction to use NewVaddr
>> -    VAddr->setReg(NewVAddr);
>> -    // Update the instruction to use NewSRsrc
>> -    SRsrc->setReg(NewSRsrc);
>>    }
>>  }
>>
>>
>> Modified: llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.h
>> URL:
>> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.h?rev=341413&r1=341412&r2=341413&view=diff
>>
>> ==============================================================================
>> --- llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.h (original)
>> +++ llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.h Tue Sep  4 14:50:47 2018
>> @@ -919,6 +919,12 @@ namespace AMDGPU {
>>    LLVM_READONLY
>>    int getAddr64Inst(uint16_t Opcode);
>>
>> +  /// Check if \p Opcode is an Addr64 opcode.
>> +  ///
>> +  /// \returns \p Opcode if it is an Addr64 opcode, otherwise -1.
>> +  LLVM_READONLY
>> +  int getIfAddr64Inst(uint16_t Opcode);
>> +
>>    LLVM_READONLY
>>    int getMUBUFNoLdsInst(uint16_t Opcode);
>>
>>
>> Modified: llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td
>> URL:
>> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td?rev=341413&r1=341412&r2=341413&view=diff
>>
>> ==============================================================================
>> --- llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td (original)
>> +++ llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td Tue Sep  4 14:50:47 2018
>> @@ -1986,6 +1986,14 @@ def getAddr64Inst : InstrMapping {
>>    let ValueCols = [["1"]];
>>  }
>>
>> +def getIfAddr64Inst : InstrMapping {
>> +  let FilterClass = "MUBUFAddr64Table";
>> +  let RowFields = ["OpName"];
>> +  let ColFields = ["IsAddr64"];
>> +  let KeyCol = ["1"];
>> +  let ValueCols = [["1"]];
>> +}
>> +
>>  def getMUBUFNoLdsInst : InstrMapping {
>>    let FilterClass = "MUBUFLdsTable";
>>    let RowFields = ["OpName"];
>>
>> Added: llvm/trunk/test/CodeGen/AMDGPU/mubuf-legalize-operands.ll
>> URL:
>> http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/mubuf-legalize-operands.ll?rev=341413&view=auto
>>
>> ==============================================================================
>> --- llvm/trunk/test/CodeGen/AMDGPU/mubuf-legalize-operands.ll (added)
>> +++ llvm/trunk/test/CodeGen/AMDGPU/mubuf-legalize-operands.ll Tue Sep  4
>> 14:50:47 2018
>> @@ -0,0 +1,230 @@
>> +; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -o - %s |
>> FileCheck %s
>> +; RUN: llc -O0 -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -o - %s
>> | FileCheck %s --check-prefix=CHECK-O0
>> +
>> +; Test that we correctly legalize VGPR Rsrc operands in MUBUF
>> instructions.
>> +
>> +; CHECK-LABEL: mubuf_vgpr
>> +; CHECK: s_mov_b64 [[SAVEEXEC:s\[[0-9]+:[0-9]+\]]], exec
>> +; CHECK: [[LOOPBB:BB[0-9]+_[0-9]+]]:
>> +; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC0:[0-9]+]], v0
>> +; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC1:[0-9]+]], v1
>> +; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC2:[0-9]+]], v2
>> +; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC3:[0-9]+]], v3
>> +; CHECK: v_cmp_eq_u64_e32 vcc, s{{\[}}[[SRSRC0]]:[[SRSRC1]]{{\]}}, v[0:1]
>> +; CHECK: v_cmp_eq_u64_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]],
>> s{{\[}}[[SRSRC2]]:[[SRSRC3]]{{\]}}, v[2:3]
>> +; CHECK: s_and_b64 [[CMP:s\[[0-9]+:[0-9]+\]]], vcc, [[CMP0]]
>> +; CHECK: s_and_saveexec_b64 [[CMP]], [[CMP]]
>> +; CHECK: s_waitcnt vmcnt(0)
>> +; CHECK: buffer_load_format_x [[RES:v[0-9]+]], v4,
>> s{{\[}}[[SRSRC0]]:[[SRSRC3]]{{\]}}, 0 idxen
>> +; CHECK: s_xor_b64 exec, exec, [[CMP]]
>> +; CHECK: s_cbranch_execnz [[LOOPBB]]
>> +; CHECK: s_mov_b64 exec, [[SAVEEXEC]]
>> +; CHECK: v_mov_b32_e32 v0, [[RES]]
>> +define float @mubuf_vgpr(<4 x i32> %i, i32 %c) #0 {
>> +  %call = call float @llvm.amdgcn.buffer.load.format.f32(<4 x i32> %i,
>> i32 %c, i32 0, i1 zeroext false, i1 zeroext false) #1
>> +  ret float %call
>> +}
>> +
>> +; CHECK-LABEL: mubuf_vgpr_adjacent_in_block
>> +
>> +; CHECK: s_mov_b64 [[SAVEEXEC:s\[[0-9]+:[0-9]+\]]], exec
>> +; CHECK: [[LOOPBB0:BB[0-9]+_[0-9]+]]:
>> +; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC0:[0-9]+]], v0
>> +; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC1:[0-9]+]], v1
>> +; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC2:[0-9]+]], v2
>> +; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC3:[0-9]+]], v3
>> +; CHECK: v_cmp_eq_u64_e32 vcc, s{{\[}}[[SRSRC0]]:[[SRSRC1]]{{\]}}, v[0:1]
>> +; CHECK: v_cmp_eq_u64_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]],
>> s{{\[}}[[SRSRC2]]:[[SRSRC3]]{{\]}}, v[2:3]
>> +; CHECK: s_and_b64 [[CMP:s\[[0-9]+:[0-9]+\]]], vcc, [[CMP0]]
>> +; CHECK: s_and_saveexec_b64 [[CMP]], [[CMP]]
>> +; CHECK: s_waitcnt vmcnt(0)
>> +; CHECK: buffer_load_format_x [[RES0:v[0-9]+]], v8,
>> s{{\[}}[[SRSRC0]]:[[SRSRC3]]{{\]}}, 0 idxen
>> +; CHECK: s_xor_b64 exec, exec, [[CMP]]
>> +; CHECK: s_cbranch_execnz [[LOOPBB0]]
>> +
>> +; CHECK: s_mov_b64 exec, [[SAVEEXEC]]
>> +; FIXME: redundant s_mov
>> +; CHECK: s_mov_b64 [[SAVEEXEC:s\[[0-9]+:[0-9]+\]]], exec
>> +
>> +; CHECK: [[LOOPBB1:BB[0-9]+_[0-9]+]]:
>> +; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC0:[0-9]+]], v4
>> +; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC1:[0-9]+]], v5
>> +; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC2:[0-9]+]], v6
>> +; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC3:[0-9]+]], v7
>> +; CHECK: v_cmp_eq_u64_e32 vcc, s{{\[}}[[SRSRC0]]:[[SRSRC1]]{{\]}}, v[4:5]
>> +; CHECK: v_cmp_eq_u64_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]],
>> s{{\[}}[[SRSRC2]]:[[SRSRC3]]{{\]}}, v[6:7]
>> +; CHECK: s_and_b64 [[CMP:s\[[0-9]+:[0-9]+\]]], vcc, [[CMP0]]
>> +; CHECK: s_and_saveexec_b64 [[CMP]], [[CMP]]
>> +; CHECK: s_waitcnt vmcnt(0)
>> +; CHECK: buffer_load_format_x [[RES1:v[0-9]+]], v8,
>> s{{\[}}[[SRSRC0]]:[[SRSRC3]]{{\]}}, 0 idxen
>> +; CHECK: s_xor_b64 exec, exec, [[CMP]]
>> +; CHECK: s_cbranch_execnz [[LOOPBB1]]
>> +
>> +; CHECK: s_mov_b64 exec, [[SAVEEXEC]]
>> +; CHECK-DAG: global_store_dword v[9:10], [[RES0]], off
>> +; CHECK-DAG: global_store_dword v[11:12], [[RES1]], off
>> +
>> +define void @mubuf_vgpr_adjacent_in_block(<4 x i32> %i, <4 x i32> %j,
>> i32 %c, float addrspace(1)* %out0, float addrspace(1)* %out1) #0 {
>> +entry:
>> +  %val0 = call float @llvm.amdgcn.buffer.load.format.f32(<4 x i32> %i,
>> i32 %c, i32 0, i1 zeroext false, i1 zeroext false) #1
>> +  %val1 = call float @llvm.amdgcn.buffer.load.format.f32(<4 x i32> %j,
>> i32 %c, i32 0, i1 zeroext false, i1 zeroext false) #1
>> +  store volatile float %val0, float addrspace(1)* %out0
>> +  store volatile float %val1, float addrspace(1)* %out1
>> +  ret void
>> +}
>> +
>> +; CHECK-LABEL: mubuf_vgpr_outside_entry
>> +
>> +; CHECK-DAG: v_mov_b32_e32 [[IDX:v[0-9]+]], s4
>> +; CHECK-DAG: s_mov_b64 [[SAVEEXEC:s\[[0-9]+:[0-9]+\]]], exec
>> +
>> +; CHECK: [[LOOPBB0:BB[0-9]+_[0-9]+]]:
>> +; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC0:[0-9]+]], v0
>> +; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC1:[0-9]+]], v1
>> +; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC2:[0-9]+]], v2
>> +; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC3:[0-9]+]], v3
>> +; CHECK: v_cmp_eq_u64_e32 vcc, s{{\[}}[[SRSRC0]]:[[SRSRC1]]{{\]}}, v[0:1]
>> +; CHECK: v_cmp_eq_u64_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]],
>> s{{\[}}[[SRSRC2]]:[[SRSRC3]]{{\]}}, v[2:3]
>> +; CHECK: s_and_b64 [[CMP:s\[[0-9]+:[0-9]+\]]], vcc, [[CMP0]]
>> +; CHECK: s_and_saveexec_b64 [[CMP]], [[CMP]]
>> +; CHECK: s_waitcnt vmcnt(0)
>> +; CHECK: buffer_load_format_x [[RES:v[0-9]+]], [[IDX]],
>> s{{\[}}[[SRSRC0]]:[[SRSRC3]]{{\]}}, 0 idxen
>> +; CHECK: s_xor_b64 exec, exec, [[CMP]]
>> +; CHECK: s_cbranch_execnz [[LOOPBB0]]
>> +
>> +; CHECK: s_mov_b64 exec, [[SAVEEXEC]]
>> +; CHECK: s_cbranch_execz [[TERMBB:BB[0-9]+_[0-9]+]]
>> +
>> +; CHECK: BB{{[0-9]+_[0-9]+}}:
>> +; CHECK-DAG: v_mov_b32_e32 [[IDX:v[0-9]+]], s4
>> +; CHECK-DAG: s_mov_b64 [[SAVEEXEC:s\[[0-9]+:[0-9]+\]]], exec
>> +
>> +; CHECK: [[LOOPBB1:BB[0-9]+_[0-9]+]]:
>> +; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC0:[0-9]+]], v4
>> +; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC1:[0-9]+]], v5
>> +; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC2:[0-9]+]], v6
>> +; CHECK-DAG: v_readfirstlane_b32 s[[SRSRC3:[0-9]+]], v7
>> +; CHECK: v_cmp_eq_u64_e32 vcc, s{{\[}}[[SRSRC0]]:[[SRSRC1]]{{\]}}, v[4:5]
>> +; CHECK: v_cmp_eq_u64_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]],
>> s{{\[}}[[SRSRC2]]:[[SRSRC3]]{{\]}}, v[6:7]
>> +; CHECK: s_and_b64 [[CMP:s\[[0-9]+:[0-9]+\]]], vcc, [[CMP0]]
>> +; CHECK: s_and_saveexec_b64 [[CMP]], [[CMP]]
>> +; CHECK: s_waitcnt vmcnt(0)
>> +; CHECK: buffer_load_format_x [[RES]], [[IDX]],
>> s{{\[}}[[SRSRC0]]:[[SRSRC3]]{{\]}}, 0 idxen
>> +; CHECK: s_xor_b64 exec, exec, [[CMP]]
>> +; CHECK: s_cbranch_execnz [[LOOPBB1]]
>> +
>> +; CHECK: s_mov_b64 exec, [[SAVEEXEC]]
>> +
>> +; CHECK: [[TERMBB]]:
>> +; CHECK: global_store_dword v[11:12], [[RES]], off
>> +
>> +; Confirm spills do not occur between the XOR and branch that terminate
>> the
>> +; waterfall loop BBs.
>> +
>> +; CHECK-O0-LABEL: mubuf_vgpr_outside_entry
>> +
>> +; CHECK-O0-DAG: s_mov_b32 [[IDX_S:s[0-9]+]], s4
>> +; CHECK-O0-DAG: v_mov_b32_e32 [[IDX_V:v[0-9]+]], [[IDX_S]]
>> +; CHECK-O0-DAG: s_mov_b64 [[SAVEEXEC:s\[[0-9]+:[0-9]+\]]], exec
>> +; CHECK-O0-DAG: buffer_store_dword [[IDX_V]], off, s[0:3], s5
>> offset:[[IDX_OFF:[0-9]+]] ; 4-byte Folded Spill
>> +
>> +; CHECK-O0: [[LOOPBB0:BB[0-9]+_[0-9]+]]:
>> +; CHECK-O0: buffer_load_dword v[[VRSRC0:[0-9]+]], {{.*}} ; 4-byte Folded
>> Reload
>> +; CHECK-O0: s_waitcnt vmcnt(0)
>> +; CHECK-O0: buffer_load_dword v[[VRSRC1:[0-9]+]], {{.*}} ; 4-byte Folded
>> Reload
>> +; CHECK-O0: s_waitcnt vmcnt(0)
>> +; CHECK-O0: buffer_load_dword v[[VRSRC2:[0-9]+]], {{.*}} ; 4-byte Folded
>> Reload
>> +; CHECK-O0: s_waitcnt vmcnt(0)
>> +; CHECK-O0: buffer_load_dword v[[VRSRC3:[0-9]+]], {{.*}} ; 4-byte Folded
>> Reload
>> +; CHECK-O0: s_waitcnt vmcnt(0)
>> +; CHECK-O0-DAG: v_readfirstlane_b32 s[[SRSRCTMP0:[0-9]+]], v[[VRSRC0]]
>> +; CHECK-O0-DAG: v_readfirstlane_b32 s[[SRSRCTMP1:[0-9]+]], v[[VRSRC1]]
>> +; CHECK-O0-DAG: v_readfirstlane_b32 s[[SRSRCTMP2:[0-9]+]], v[[VRSRC2]]
>> +; CHECK-O0-DAG: v_readfirstlane_b32 s[[SRSRCTMP3:[0-9]+]], v[[VRSRC3]]
>> +; CHECK-O0-DAG: s_mov_b32 s[[SRSRC0:[0-9]+]], s[[SRSRCTMP0]]
>> +; CHECK-O0-DAG: s_mov_b32 s[[SRSRC1:[0-9]+]], s[[SRSRCTMP1]]
>> +; CHECK-O0-DAG: s_mov_b32 s[[SRSRC2:[0-9]+]], s[[SRSRCTMP2]]
>> +; CHECK-O0-DAG: s_mov_b32 s[[SRSRC3:[0-9]+]], s[[SRSRCTMP3]]
>> +; CHECK-O0: v_cmp_eq_u64_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]],
>> s{{\[}}[[SRSRC0]]:[[SRSRC1]]{{\]}}, v{{\[}}[[VRSRC0]]:[[VRSRC1]]{{\]}}
>> +; CHECK-O0: v_cmp_eq_u64_e64 [[CMP1:s\[[0-9]+:[0-9]+\]]],
>> s{{\[}}[[SRSRC2]]:[[SRSRC3]]{{\]}}, v{{\[}}[[VRSRC2]]:[[VRSRC3]]{{\]}}
>> +; CHECK-O0: s_and_b64 [[CMP:s\[[0-9]+:[0-9]+\]]], [[CMP0]], [[CMP1]]
>> +; CHECK-O0: s_and_saveexec_b64 [[CMP]], [[CMP]]
>> +; CHECK-O0: buffer_load_dword [[IDX:v[0-9]+]], off, s[0:3], s5
>> offset:[[IDX_OFF]] ; 4-byte Folded Reload
>> +; CHECK-O0: buffer_load_format_x [[RES:v[0-9]+]], [[IDX]],
>> s{{\[}}[[SRSRC0]]:[[SRSRC3]]{{\]}}, {{.*}} idxen
>> +; CHECK-O0: s_waitcnt vmcnt(0)
>> +; CHECK-O0: buffer_store_dword [[RES]], off, s[0:3], s5
>> offset:[[RES_OFF_TMP:[0-9]+]] ; 4-byte Folded Spill
>> +; CHECK-O0: s_xor_b64 exec, exec, [[CMP]]
>> +; CHECK-O0-NEXT: s_cbranch_execnz [[LOOPBB0]]
>> +
>> +; CHECK-O0: s_mov_b64 exec, [[SAVEEXEC]]
>> +; CHECK-O0: buffer_load_dword [[RES:v[0-9]+]], off, s[0:3], s5
>> offset:[[RES_OFF_TMP]] ; 4-byte Folded Reload
>> +; CHECK-O0: buffer_store_dword [[RES]], off, s[0:3], s5
>> offset:[[RES_OFF:[0-9]+]] ; 4-byte Folded Spill
>> +; CHECK-O0: s_cbranch_execz [[TERMBB:BB[0-9]+_[0-9]+]]
>> +
>> +; CHECK-O0: BB{{[0-9]+_[0-9]+}}:
>> +; CHECK-O0-DAG: s_mov_b64
>> s{{\[}}[[SAVEEXEC0:[0-9]+]]:[[SAVEEXEC1:[0-9]+]]{{\]}}, exec
>> +; CHECK-O0-DAG: buffer_store_dword {{v[0-9]+}}, off, s[0:3], s5
>> offset:[[IDX_OFF:[0-9]+]] ; 4-byte Folded Spill
>> +; CHECK-O0: v_writelane_b32 [[VSAVEEXEC:v[0-9]+]], s[[SAVEEXEC0]],
>> [[SAVEEXEC_IDX0:[0-9]+]]
>> +; CHECK-O0: v_writelane_b32 [[VSAVEEXEC:v[0-9]+]], s[[SAVEEXEC1]],
>> [[SAVEEXEC_IDX1:[0-9]+]]
>> +
>> +; CHECK-O0: [[LOOPBB1:BB[0-9]+_[0-9]+]]:
>> +; CHECK-O0: buffer_load_dword v[[VRSRC0:[0-9]+]], {{.*}} ; 4-byte Folded
>> Reload
>> +; CHECK-O0: s_waitcnt vmcnt(0)
>> +; CHECK-O0: buffer_load_dword v[[VRSRC1:[0-9]+]], {{.*}} ; 4-byte Folded
>> Reload
>> +; CHECK-O0: s_waitcnt vmcnt(0)
>> +; CHECK-O0: buffer_load_dword v[[VRSRC2:[0-9]+]], {{.*}} ; 4-byte Folded
>> Reload
>> +; CHECK-O0: s_waitcnt vmcnt(0)
>> +; CHECK-O0: buffer_load_dword v[[VRSRC3:[0-9]+]], {{.*}} ; 4-byte Folded
>> Reload
>> +; CHECK-O0: s_waitcnt vmcnt(0)
>> +; CHECK-O0-DAG: v_readfirstlane_b32 s[[SRSRCTMP0:[0-9]+]], v[[VRSRC0]]
>> +; CHECK-O0-DAG: v_readfirstlane_b32 s[[SRSRCTMP1:[0-9]+]], v[[VRSRC1]]
>> +; CHECK-O0-DAG: v_readfirstlane_b32 s[[SRSRCTMP2:[0-9]+]], v[[VRSRC2]]
>> +; CHECK-O0-DAG: v_readfirstlane_b32 s[[SRSRCTMP3:[0-9]+]], v[[VRSRC3]]
>> +; CHECK-O0-DAG: s_mov_b32 s[[SRSRC0:[0-9]+]], s[[SRSRCTMP0]]
>> +; CHECK-O0-DAG: s_mov_b32 s[[SRSRC1:[0-9]+]], s[[SRSRCTMP1]]
>> +; CHECK-O0-DAG: s_mov_b32 s[[SRSRC2:[0-9]+]], s[[SRSRCTMP2]]
>> +; CHECK-O0-DAG: s_mov_b32 s[[SRSRC3:[0-9]+]], s[[SRSRCTMP3]]
>> +; CHECK-O0: v_cmp_eq_u64_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]],
>> s{{\[}}[[SRSRC0]]:[[SRSRC1]]{{\]}}, v{{\[}}[[VRSRC0]]:[[VRSRC1]]{{\]}}
>> +; CHECK-O0: v_cmp_eq_u64_e64 [[CMP1:s\[[0-9]+:[0-9]+\]]],
>> s{{\[}}[[SRSRC2]]:[[SRSRC3]]{{\]}}, v{{\[}}[[VRSRC2]]:[[VRSRC3]]{{\]}}
>> +; CHECK-O0: s_and_b64 [[CMP:s\[[0-9]+:[0-9]+\]]], [[CMP0]], [[CMP1]]
>> +; CHECK-O0: s_and_saveexec_b64 [[CMP]], [[CMP]]
>> +; CHECK-O0: buffer_load_dword [[IDX:v[0-9]+]], off, s[0:3], s5
>> offset:[[IDX_OFF]] ; 4-byte Folded Reload
>> +; CHECK-O0: buffer_load_format_x [[RES:v[0-9]+]], [[IDX]],
>> s{{\[}}[[SRSRC0]]:[[SRSRC3]]{{\]}}, {{.*}} idxen
>> +; CHECK-O0: s_waitcnt vmcnt(0)
>> +; CHECK-O0: buffer_store_dword [[RES]], off, s[0:3], s5
>> offset:[[RES_OFF_TMP:[0-9]+]] ; 4-byte Folded Spill
>> +; CHECK-O0: s_xor_b64 exec, exec, [[CMP]]
>> +; CHECK-O0-NEXT: s_cbranch_execnz [[LOOPBB1]]
>> +
>> +; CHECK-O0: v_readlane_b32 s[[SAVEEXEC0:[0-9]+]], [[VSAVEEXEC]],
>> [[SAVEEXEC_IDX0]]
>> +; CHECK-O0: v_readlane_b32 s[[SAVEEXEC1:[0-9]+]], [[VSAVEEXEC]],
>> [[SAVEEXEC_IDX1]]
>> +; CHECK-O0: s_mov_b64 exec, s{{\[}}[[SAVEEXEC0]]:[[SAVEEXEC1]]{{\]}}
>> +; CHECK-O0: buffer_load_dword [[RES:v[0-9]+]], off, s[0:3], s5
>> offset:[[RES_OFF_TMP]] ; 4-byte Folded Reload
>> +; CHECK-O0: buffer_store_dword [[RES]], off, s[0:3], s5
>> offset:[[RES_OFF]] ; 4-byte Folded Spill
>> +
>> +; CHECK-O0: [[TERMBB]]:
>> +; CHECK-O0: buffer_load_dword [[RES:v[0-9]+]], off, s[0:3], s5
>> offset:[[RES_OFF]] ; 4-byte Folded Reload
>> +; CHECK-O0: global_store_dword v[{{[0-9]+:[0-9]+}}], [[RES]], off
>> +
>> +define void @mubuf_vgpr_outside_entry(<4 x i32> %i, <4 x i32> %j, i32
>> %c, float addrspace(1)* %in, float addrspace(1)* %out) #0 {
>> +entry:
>> +  %live.out.reg = call i32 asm sideeffect "s_mov_b32 $0, 17", "={s4}" ()
>> +  %val0 = call float @llvm.amdgcn.buffer.load.format.f32(<4 x i32> %i,
>> i32 %live.out.reg, i32 0, i1 zeroext false, i1 zeroext false) #1
>> +  %idx = call i32 @llvm.amdgcn.workitem.id.x() #1
>> +  %cmp = icmp eq i32 %idx, 0
>> +  br i1 %cmp, label %bb1, label %bb2
>> +
>> +bb1:
>> +  %val1 = call float @llvm.amdgcn.buffer.load.format.f32(<4 x i32> %j,
>> i32 %live.out.reg, i32 0, i1 zeroext false, i1 zeroext false) #1
>> +  br label %bb2
>> +
>> +bb2:
>> +  %val = phi float [ %val0, %entry ], [ %val1, %bb1 ]
>> +  store volatile float %val, float addrspace(1)* %out
>> +  ret void
>> +}
>> +
>> +declare i32 @llvm.amdgcn.workitem.id.x() #1
>> +declare float @llvm.amdgcn.buffer.load.format.f32(<4 x i32>, i32, i32,
>> i1, i1) #1
>> +
>> +attributes #0 = { nounwind }
>> +attributes #1 = { nounwind readnone }
>>
>> Added: llvm/trunk/test/CodeGen/AMDGPU/mubuf-legalize-operands.mir
>> URL:
>> http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/mubuf-legalize-operands.mir?rev=341413&view=auto
>>
>> ==============================================================================
>> --- llvm/trunk/test/CodeGen/AMDGPU/mubuf-legalize-operands.mir (added)
>> +++ llvm/trunk/test/CodeGen/AMDGPU/mubuf-legalize-operands.mir Tue Sep  4
>> 14:50:47 2018
>> @@ -0,0 +1,239 @@
>> +# RUN: llc -march=amdgcn -mcpu=gfx700 -verify-machineinstrs
>> --run-pass=si-fix-sgpr-copies -o - %s | FileCheck %s
>> --check-prefixes=COMMON,ADDR64
>> +# RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs
>> --run-pass=si-fix-sgpr-copies -o - %s | FileCheck %s
>> --check-prefixes=COMMON,NO-ADDR64
>> +
>> +# Test that we correctly legalize VGPR Rsrc operands in MUBUF
>> instructions.
>> +#
>> +# On ADDR64 hardware we optimize the _ADDR64 and _OFFSET cases to avoid
>> +# needing a waterfall. For all other instruction variants, and when we
>> are
>> +# on non-ADDR64 hardware, we emit a waterfall loop.
>> +
>> +# COMMON-LABEL: name: idxen
>> +# COMMON-LABEL:  bb.0:
>> +# COMMON-NEXT: successors: %bb.1({{.*}})
>> +# COMMON: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0,
>> %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3
>> +# COMMON: [[SAVEEXEC:%[0-9]+]]:sreg_64_xexec = S_MOV_B64 $exec
>> +# COMMON-LABEL: bb.1:
>> +# COMMON-NEXT: successors: %bb.1({{.*}}), %bb.2({{.*}})
>> +# COMMON: [[SRSRC0:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32
>> [[VRSRC]].sub0, implicit $exec
>> +# COMMON: [[SRSRC1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32
>> [[VRSRC]].sub1, implicit $exec
>> +# COMMON: [[SRSRC2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32
>> [[VRSRC]].sub2, implicit $exec
>> +# COMMON: [[SRSRC3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32
>> [[VRSRC]].sub3, implicit $exec
>> +# COMMON: [[SRSRC:%[0-9]+]]:sreg_128 = REG_SEQUENCE [[SRSRC0]],
>> %subreg.sub0, [[SRSRC1]], %subreg.sub1, [[SRSRC2]], %subreg.sub2,
>> [[SRSRC3]], %subreg.sub3
>> +# COMMON: [[CMP0:%[0-9]+]]:sreg_64 = V_CMP_EQ_U64_e64
>> [[SRSRC]].sub0_sub1, [[VRSRC]].sub0_sub1, implicit $exec
>> +# COMMON: [[CMP1:%[0-9]+]]:sreg_64 = V_CMP_EQ_U64_e64
>> [[SRSRC]].sub2_sub3, [[VRSRC]].sub2_sub3, implicit $exec
>> +# COMMON: [[CMP:%[0-9]+]]:sreg_64 = S_AND_B64 [[CMP0]], [[CMP1]],
>> implicit-def $scc
>> +# COMMON: [[TMPEXEC:%[0-9]+]]:sreg_64 = S_AND_SAVEEXEC_B64 killed
>> [[CMP]], implicit-def $exec, implicit-def $scc, implicit $exec
>> +# COMMON: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN %4, killed
>> [[SRSRC]], 0, 0, 0, 0, 0, implicit $exec
>> +# COMMON: $exec = S_XOR_B64_term $exec, [[TMPEXEC]], implicit-def $scc
>> +# COMMON: S_CBRANCH_EXECNZ %bb.1, implicit $exec
>> +# COMMON-LABEL  bb.2:
>> +# COMMON: $exec = S_MOV_B64 [[SAVEEXEC]]
>> +---
>> +name:            idxen
>> +liveins:
>> +  - { reg: '$vgpr0', virtual-reg: '%0' }
>> +  - { reg: '$vgpr1', virtual-reg: '%1' }
>> +  - { reg: '$vgpr2', virtual-reg: '%2' }
>> +  - { reg: '$vgpr3', virtual-reg: '%3' }
>> +  - { reg: '$vgpr4', virtual-reg: '%4' }
>> +  - { reg: '$sgpr30_sgpr31', virtual-reg: '%5' }
>> +body:             |
>> +  bb.0:
>> +    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $sgpr30_sgpr31
>> +    %5:sreg_64 = COPY $sgpr30_sgpr31
>> +    %4:vgpr_32 = COPY $vgpr4
>> +    %3:vgpr_32 = COPY $vgpr3
>> +    %2:vgpr_32 = COPY $vgpr2
>> +    %1:vgpr_32 = COPY $vgpr1
>> +    %0:vgpr_32 = COPY $vgpr0
>> +    %6:sreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2,
>> %subreg.sub2, %3, %subreg.sub3
>> +    %7:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN %4, killed %6, 0, 0, 0, 0,
>> 0, implicit $exec
>> +    $sgpr30_sgpr31 = COPY %5
>> +    $vgpr0 = COPY %7
>> +    S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
>> +...
>> +
>> +# COMMON-LABEL: name: offen
>> +# COMMON-LABEL:  bb.0:
>> +# COMMON-NEXT: successors: %bb.1({{.*}})
>> +# COMMON: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0,
>> %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3
>> +# COMMON: [[SAVEEXEC:%[0-9]+]]:sreg_64_xexec = S_MOV_B64 $exec
>> +# COMMON-LABEL: bb.1:
>> +# COMMON-NEXT: successors: %bb.1({{.*}}), %bb.2({{.*}})
>> +# COMMON: [[SRSRC0:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32
>> [[VRSRC]].sub0, implicit $exec
>> +# COMMON: [[SRSRC1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32
>> [[VRSRC]].sub1, implicit $exec
>> +# COMMON: [[SRSRC2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32
>> [[VRSRC]].sub2, implicit $exec
>> +# COMMON: [[SRSRC3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32
>> [[VRSRC]].sub3, implicit $exec
>> +# COMMON: [[SRSRC:%[0-9]+]]:sreg_128 = REG_SEQUENCE [[SRSRC0]],
>> %subreg.sub0, [[SRSRC1]], %subreg.sub1, [[SRSRC2]], %subreg.sub2,
>> [[SRSRC3]], %subreg.sub3
>> +# COMMON: [[CMP0:%[0-9]+]]:sreg_64 = V_CMP_EQ_U64_e64
>> [[SRSRC]].sub0_sub1, [[VRSRC]].sub0_sub1, implicit $exec
>> +# COMMON: [[CMP1:%[0-9]+]]:sreg_64 = V_CMP_EQ_U64_e64
>> [[SRSRC]].sub2_sub3, [[VRSRC]].sub2_sub3, implicit $exec
>> +# COMMON: [[CMP:%[0-9]+]]:sreg_64 = S_AND_B64 [[CMP0]], [[CMP1]],
>> implicit-def $scc
>> +# COMMON: [[TMPEXEC:%[0-9]+]]:sreg_64 = S_AND_SAVEEXEC_B64 killed
>> [[CMP]], implicit-def $exec, implicit-def $scc, implicit $exec
>> +# COMMON: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_OFFEN %4, killed
>> [[SRSRC]], 0, 0, 0, 0, 0, implicit $exec
>> +# COMMON: $exec = S_XOR_B64_term $exec, [[TMPEXEC]], implicit-def $scc
>> +# COMMON: S_CBRANCH_EXECNZ %bb.1, implicit $exec
>> +# COMMON-LABEL  bb.2:
>> +# COMMON: $exec = S_MOV_B64 [[SAVEEXEC]]
>> +---
>> +name:            offen
>> +liveins:
>> +  - { reg: '$vgpr0', virtual-reg: '%0' }
>> +  - { reg: '$vgpr1', virtual-reg: '%1' }
>> +  - { reg: '$vgpr2', virtual-reg: '%2' }
>> +  - { reg: '$vgpr3', virtual-reg: '%3' }
>> +  - { reg: '$vgpr4', virtual-reg: '%4' }
>> +  - { reg: '$sgpr30_sgpr31', virtual-reg: '%5' }
>> +body:             |
>> +  bb.0:
>> +    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $sgpr30_sgpr31
>> +    %5:sreg_64 = COPY $sgpr30_sgpr31
>> +    %4:vgpr_32 = COPY $vgpr4
>> +    %3:vgpr_32 = COPY $vgpr3
>> +    %2:vgpr_32 = COPY $vgpr2
>> +    %1:vgpr_32 = COPY $vgpr1
>> +    %0:vgpr_32 = COPY $vgpr0
>> +    %6:sreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2,
>> %subreg.sub2, %3, %subreg.sub3
>> +    %7:vgpr_32 = BUFFER_LOAD_FORMAT_X_OFFEN %4, killed %6, 0, 0, 0, 0,
>> 0, implicit $exec
>> +    $sgpr30_sgpr31 = COPY %5
>> +    $vgpr0 = COPY %7
>> +    S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
>> +...
>> +
>> +# COMMON-LABEL: name: bothen
>> +# COMMON-LABEL:  bb.0:
>> +# COMMON-NEXT: successors: %bb.1({{.*}})
>> +# COMMON: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0,
>> %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3
>> +# COMMON: [[SAVEEXEC:%[0-9]+]]:sreg_64_xexec = S_MOV_B64 $exec
>> +# COMMON-LABEL: bb.1:
>> +# COMMON-NEXT: successors: %bb.1({{.*}}), %bb.2({{.*}})
>> +# COMMON: [[SRSRC0:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32
>> [[VRSRC]].sub0, implicit $exec
>> +# COMMON: [[SRSRC1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32
>> [[VRSRC]].sub1, implicit $exec
>> +# COMMON: [[SRSRC2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32
>> [[VRSRC]].sub2, implicit $exec
>> +# COMMON: [[SRSRC3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32
>> [[VRSRC]].sub3, implicit $exec
>> +# COMMON: [[SRSRC:%[0-9]+]]:sreg_128 = REG_SEQUENCE [[SRSRC0]],
>> %subreg.sub0, [[SRSRC1]], %subreg.sub1, [[SRSRC2]], %subreg.sub2,
>> [[SRSRC3]], %subreg.sub3
>> +# COMMON: [[CMP0:%[0-9]+]]:sreg_64 = V_CMP_EQ_U64_e64
>> [[SRSRC]].sub0_sub1, [[VRSRC]].sub0_sub1, implicit $exec
>> +# COMMON: [[CMP1:%[0-9]+]]:sreg_64 = V_CMP_EQ_U64_e64
>> [[SRSRC]].sub2_sub3, [[VRSRC]].sub2_sub3, implicit $exec
>> +# COMMON: [[CMP:%[0-9]+]]:sreg_64 = S_AND_B64 [[CMP0]], [[CMP1]],
>> implicit-def $scc
>> +# COMMON: [[TMPEXEC:%[0-9]+]]:sreg_64 = S_AND_SAVEEXEC_B64 killed
>> [[CMP]], implicit-def $exec, implicit-def $scc, implicit $exec
>> +# COMMON: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_BOTHEN %4, killed
>> [[SRSRC]], 0, 0, 0, 0, 0, implicit $exec
>> +# COMMON: $exec = S_XOR_B64_term $exec, [[TMPEXEC]], implicit-def $scc
>> +# COMMON: S_CBRANCH_EXECNZ %bb.1, implicit $exec
>> +# COMMON-LABEL  bb.2:
>> +# COMMON: $exec = S_MOV_B64 [[SAVEEXEC]]
>> +---
>> +name:            bothen
>> +liveins:
>> +  - { reg: '$vgpr0', virtual-reg: '%0' }
>> +  - { reg: '$vgpr1', virtual-reg: '%1' }
>> +  - { reg: '$vgpr2', virtual-reg: '%2' }
>> +  - { reg: '$vgpr3', virtual-reg: '%3' }
>> +  - { reg: '$vgpr4_vgpr5', virtual-reg: '%4' }
>> +  - { reg: '$sgpr30_sgpr31', virtual-reg: '%5' }
>> +body:             |
>> +  bb.0:
>> +    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $sgpr30_sgpr31
>> +    %5:sreg_64 = COPY $sgpr30_sgpr31
>> +    %4:vreg_64 = COPY $vgpr4_vgpr5
>> +    %3:vgpr_32 = COPY $vgpr3
>> +    %2:vgpr_32 = COPY $vgpr2
>> +    %1:vgpr_32 = COPY $vgpr1
>> +    %0:vgpr_32 = COPY $vgpr0
>> +    %6:sreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2,
>> %subreg.sub2, %3, %subreg.sub3
>> +    %7:vgpr_32 = BUFFER_LOAD_FORMAT_X_BOTHEN %4, killed %6, 0, 0, 0, 0,
>> 0, implicit $exec
>> +    $sgpr30_sgpr31 = COPY %5
>> +    $vgpr0 = COPY %7
>> +    S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
>> +...
>> +
>> +# COMMON-LABEL: name: addr64
>> +# COMMON-LABEL:  bb.0:
>> +# COMMON: %12:vreg_64 = COPY %8.sub0_sub1
>> +# COMMON: %13:sreg_64 = S_MOV_B64 0
>> +# COMMON: %14:sgpr_32 = S_MOV_B32 0
>> +# COMMON: %15:sgpr_32 = S_MOV_B32 61440
>> +# COMMON: %16:sreg_128 = REG_SEQUENCE %13, %subreg.sub0_sub1, %14,
>> %subreg.sub2, %15, %subreg.sub3
>> +# COMMON: %9:vgpr_32 = V_ADD_I32_e32 %12.sub0, %4.sub0, implicit-def
>> $vcc, implicit $exec
>> +# COMMON: %10:vgpr_32 = V_ADDC_U32_e32 %12.sub1, %4.sub1, implicit-def
>> $vcc, implicit $vcc, implicit $exec
>> +# COMMON: %11:vreg_64 = REG_SEQUENCE %9, %subreg.sub0, %10, %subreg.sub1
>> +# COMMON: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_ADDR64 %11, killed
>> %16, 0, 0, 0, 0, 0, implicit $exec
>> +---
>> +name:            addr64
>> +liveins:
>> +  - { reg: '$vgpr0', virtual-reg: '%0' }
>> +  - { reg: '$vgpr1', virtual-reg: '%1' }
>> +  - { reg: '$vgpr2', virtual-reg: '%2' }
>> +  - { reg: '$vgpr3', virtual-reg: '%3' }
>> +  - { reg: '$vgpr4_vgpr5', virtual-reg: '%4' }
>> +  - { reg: '$sgpr30_sgpr31', virtual-reg: '%5' }
>> +body:             |
>> +  bb.0:
>> +    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $sgpr30_sgpr31
>> +    %5:sreg_64 = COPY $sgpr30_sgpr31
>> +    %4:vreg_64 = COPY $vgpr4_vgpr5
>> +    %3:vgpr_32 = COPY $vgpr3
>> +    %2:vgpr_32 = COPY $vgpr2
>> +    %1:vgpr_32 = COPY $vgpr1
>> +    %0:vgpr_32 = COPY $vgpr0
>> +    %6:sreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2,
>> %subreg.sub2, %3, %subreg.sub3
>> +    %7:vgpr_32 = BUFFER_LOAD_FORMAT_X_ADDR64 %4, killed %6, 0, 0, 0, 0,
>> 0, implicit $exec
>> +    $sgpr30_sgpr31 = COPY %5
>> +    $vgpr0 = COPY %7
>> +    S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
>> +...
>> +
>> +# COMMON-LABEL: name: offset
>> +# COMMON-LABEL:  bb.0:
>> +
>> +# NO-ADDR64-NEXT: successors: %bb.1({{.*}})
>> +# NO-ADDR64: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0,
>> %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3
>> +# NO-ADDR64: [[SAVEEXEC:%[0-9]+]]:sreg_64_xexec = S_MOV_B64 $exec
>> +# NO-ADDR64-LABEL: bb.1:
>> +# NO-ADDR64-NEXT: successors: %bb.1({{.*}}), %bb.2({{.*}})
>> +# NO-ADDR64: [[SRSRC0:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32
>> [[VRSRC]].sub0, implicit $exec
>> +# NO-ADDR64: [[SRSRC1:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32
>> [[VRSRC]].sub1, implicit $exec
>> +# NO-ADDR64: [[SRSRC2:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32
>> [[VRSRC]].sub2, implicit $exec
>> +# NO-ADDR64: [[SRSRC3:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32
>> [[VRSRC]].sub3, implicit $exec
>> +# NO-ADDR64: [[SRSRC:%[0-9]+]]:sreg_128 = REG_SEQUENCE [[SRSRC0]],
>> %subreg.sub0, [[SRSRC1]], %subreg.sub1, [[SRSRC2]], %subreg.sub2,
>> [[SRSRC3]], %subreg.sub3
>> +# NO-ADDR64: [[CMP0:%[0-9]+]]:sreg_64 = V_CMP_EQ_U64_e64
>> [[SRSRC]].sub0_sub1, [[VRSRC]].sub0_sub1, implicit $exec
>> +# NO-ADDR64: [[CMP1:%[0-9]+]]:sreg_64 = V_CMP_EQ_U64_e64
>> [[SRSRC]].sub2_sub3, [[VRSRC]].sub2_sub3, implicit $exec
>> +# NO-ADDR64: [[CMP:%[0-9]+]]:sreg_64 = S_AND_B64 [[CMP0]], [[CMP1]],
>> implicit-def $scc
>> +# NO-ADDR64: [[TMPEXEC:%[0-9]+]]:sreg_64 = S_AND_SAVEEXEC_B64 killed
>> [[CMP]], implicit-def $exec, implicit-def $scc, implicit $exec
>> +# NO-ADDR64: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_OFFSET killed
>> [[SRSRC]], 0, 0, 0, 0, 0, implicit $exec
>> +# NO-ADDR64: $exec = S_XOR_B64_term $exec, [[TMPEXEC]], implicit-def $scc
>> +# NO-ADDR64: S_CBRANCH_EXECNZ %bb.1, implicit $exec
>> +# NO-ADDR64-LABEL  bb.2:
>> +# NO-ADDR64: $exec = S_MOV_B64 [[SAVEEXEC]]
>> +
>> +# ADDR64: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0,
>> %1, %subreg.sub1, %2, %subreg.sub2, %3, %subreg.sub3
>> +# ADDR64: [[RSRCPTR:%[0-9]+]]:vreg_64 = COPY [[VRSRC]].sub0_sub1
>> +# ADDR64: [[ZERO64:%[0-9]+]]:sreg_64 = S_MOV_B64 0
>> +# ADDR64: [[RSRCFMTLO:%[0-9]+]]:sgpr_32 = S_MOV_B32 0
>> +# ADDR64: [[RSRCFMTHI:%[0-9]+]]:sgpr_32 = S_MOV_B32 61440
>> +# ADDR64: [[ZERORSRC:%[0-9]+]]:sreg_128 = REG_SEQUENCE [[ZERO64]],
>> %subreg.sub0_sub1, [[RSRCFMTLO]], %subreg.sub2, [[RSRCFMTHI]], %subreg.sub3
>> +# ADDR64: [[VADDR64:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[RSRCPTR]].sub0,
>> %subreg.sub0, [[RSRCPTR]].sub1, %subreg.sub1
>> +# ADDR64: {{[0-9]+}}:vgpr_32 = BUFFER_LOAD_FORMAT_X_ADDR64 [[VADDR64]],
>> [[ZERORSRC]], 0, 0, 0, 0, 0, implicit $exec
>> +
>> +---
>> +name:            offset
>> +liveins:
>> +  - { reg: '$vgpr0', virtual-reg: '%0' }
>> +  - { reg: '$vgpr1', virtual-reg: '%1' }
>> +  - { reg: '$vgpr2', virtual-reg: '%2' }
>> +  - { reg: '$vgpr3', virtual-reg: '%3' }
>> +  - { reg: '$vgpr4_vgpr5', virtual-reg: '%4' }
>> +  - { reg: '$sgpr30_sgpr31', virtual-reg: '%5' }
>> +body:             |
>> +  bb.0:
>> +    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $sgpr30_sgpr31
>> +    %5:sreg_64 = COPY $sgpr30_sgpr31
>> +    %4:vreg_64 = COPY $vgpr4_vgpr5
>> +    %3:vgpr_32 = COPY $vgpr3
>> +    %2:vgpr_32 = COPY $vgpr2
>> +    %1:vgpr_32 = COPY $vgpr1
>> +    %0:vgpr_32 = COPY $vgpr0
>> +    %6:sreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2,
>> %subreg.sub2, %3, %subreg.sub3
>> +    %7:vgpr_32 = BUFFER_LOAD_FORMAT_X_OFFSET killed %6, 0, 0, 0, 0, 0,
>> implicit $exec
>> +    $sgpr30_sgpr31 = COPY %5
>> +    $vgpr0 = COPY %7
>> +    S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
>> +...
>>
>>
>> _______________________________________________
>> llvm-commits mailing list
>> llvm-commits at lists.llvm.org
>> http://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits
>>
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20180906/18f4df59/attachment-0001.html>


More information about the llvm-commits mailing list