[llvm] r341068 - [AMDGPU] Preliminary patch for divergence driven instruction selection. Operands Folding 1.
Alexander Timofeev via llvm-commits
llvm-commits at lists.llvm.org
Thu Aug 30 06:55:05 PDT 2018
Author: alex-t
Date: Thu Aug 30 06:55:04 2018
New Revision: 341068
URL: http://llvm.org/viewvc/llvm-project?rev=341068&view=rev
Log:
[AMDGPU] Preliminary patch for divergence driven instruction selection. Operands Folding 1.
Reviewers: rampitec
Differential revision: https://reviews/llvm/org/D51316
Added:
llvm/trunk/test/CodeGen/AMDGPU/fold-imm-copy.mir
Modified:
llvm/trunk/lib/Target/AMDGPU/SIFoldOperands.cpp
Modified: llvm/trunk/lib/Target/AMDGPU/SIFoldOperands.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIFoldOperands.cpp?rev=341068&r1=341067&r2=341068&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIFoldOperands.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIFoldOperands.cpp Thu Aug 30 06:55:04 2018
@@ -438,8 +438,6 @@ void SIFoldOperands::foldOperand(
bool FoldingImm = OpToFold.isImm();
- // In order to fold immediates into copies, we need to change the
- // copy to a MOV.
if (FoldingImm && UseMI->isCopy()) {
unsigned DestReg = UseMI->getOperand(0).getReg();
const TargetRegisterClass *DestRC
@@ -447,6 +445,31 @@ void SIFoldOperands::foldOperand(
MRI->getRegClass(DestReg) :
TRI->getPhysRegClass(DestReg);
+ unsigned SrcReg = UseMI->getOperand(1).getReg();
+ if (TargetRegisterInfo::isVirtualRegister(DestReg) &&
+ TargetRegisterInfo::isVirtualRegister(SrcReg)) {
+ const TargetRegisterClass * SrcRC = MRI->getRegClass(SrcReg);
+ if (TRI->isSGPRClass(SrcRC) && TRI->hasVGPRs(DestRC)) {
+ MachineRegisterInfo::use_iterator NextUse;
+ SmallVector<FoldCandidate, 4> CopyUses;
+ for (MachineRegisterInfo::use_iterator
+ Use = MRI->use_begin(DestReg), E = MRI->use_end();
+ Use != E; Use = NextUse) {
+ NextUse = std::next(Use);
+ FoldCandidate FC = FoldCandidate(Use->getParent(),
+ Use.getOperandNo(), &UseMI->getOperand(1));
+ CopyUses.push_back(FC);
+ }
+ for (auto & F : CopyUses) {
+ foldOperand(*F.OpToFold, F.UseMI, F.UseOpNo,
+ FoldList, CopiesToReplace);
+ }
+ }
+ }
+
+ // In order to fold immediates into copies, we need to change the
+ // copy to a MOV.
+
unsigned MovOp = TII->getMovOpcode(DestRC);
if (MovOp == AMDGPU::COPY)
return;
Added: llvm/trunk/test/CodeGen/AMDGPU/fold-imm-copy.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fold-imm-copy.mir?rev=341068&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/fold-imm-copy.mir (added)
+++ llvm/trunk/test/CodeGen/AMDGPU/fold-imm-copy.mir Thu Aug 30 06:55:04 2018
@@ -0,0 +1,22 @@
+# RUN: llc -march=amdgcn -run-pass si-fold-operands %s -o - | FileCheck -check-prefix=GCN %s
+
+# GCN-LABEL: name: fold-imm-copy
+# GCN: [[SREG:%[0-9+]]]:sreg_32_xm0 = S_MOV_B32 65535
+# GCN: V_AND_B32_e32 [[SREG]]
+
+name: fold-imm-copy
+body: |
+ bb.0:
+ liveins: $vgpr0, $sgpr0_sgpr1
+ %0:vgpr_32 = COPY $vgpr0
+ %1:sgpr_64 = COPY $sgpr0_sgpr1
+ %2:sreg_128 = S_LOAD_DWORDX4_IMM %1, 9, 0
+ %3:sreg_32_xm0 = S_MOV_B32 2
+ %4:vgpr_32 = V_LSHLREV_B32_e64 killed %3, %0, implicit $exec
+ %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ %6:vreg_64 = REG_SEQUENCE killed %4, %subreg.sub0, killed %5, %subreg.sub1
+ %7:vgpr_32 = BUFFER_LOAD_DWORD_ADDR64 %6, %2, 0, 4, 0, 0, 0, implicit $exec
+ %8:sreg_32_xm0 = S_MOV_B32 65535
+ %9:vgpr_32 = COPY %8
+ %10:vgpr_32 = V_AND_B32_e32 %7, %9, implicit $exec
+...
More information about the llvm-commits
mailing list