[llvm] r311278 - [GlobalISel][X86] Support asimetric copy from/to GPR physical register.
Igor Breger via llvm-commits
llvm-commits at lists.llvm.org
Sun Aug 20 00:14:40 PDT 2017
Author: ibreger
Date: Sun Aug 20 00:14:40 2017
New Revision: 311278
URL: http://llvm.org/viewvc/llvm-project?rev=311278&view=rev
Log:
[GlobalISel][X86] Support asimetric copy from/to GPR physical register.
Usually this case generated by ABI lowering, it requare to performe trancate/anyext.
Added:
llvm/trunk/test/CodeGen/X86/GlobalISel/select-copy.mir
Modified:
llvm/trunk/lib/Target/X86/X86InstructionSelector.cpp
Modified: llvm/trunk/lib/Target/X86/X86InstructionSelector.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstructionSelector.cpp?rev=311278&r1=311277&r2=311278&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstructionSelector.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86InstructionSelector.cpp Sun Aug 20 00:14:40 2017
@@ -167,21 +167,72 @@ X86InstructionSelector::getRegClass(LLT
return getRegClass(Ty, RegBank);
}
+unsigned getSubRegIndex(const TargetRegisterClass *RC) {
+ unsigned SubIdx = X86::NoSubRegister;
+ if (RC == &X86::GR32RegClass) {
+ SubIdx = X86::sub_32bit;
+ } else if (RC == &X86::GR16RegClass) {
+ SubIdx = X86::sub_16bit;
+ } else if (RC == &X86::GR8RegClass) {
+ SubIdx = X86::sub_8bit;
+ }
+
+ return SubIdx;
+}
+
+const TargetRegisterClass *getRegClassFromGRPhysReg(unsigned Reg) {
+ assert(TargetRegisterInfo::isPhysicalRegister(Reg));
+ if (X86::GR64RegClass.contains(Reg))
+ return &X86::GR64RegClass;
+ if (X86::GR32RegClass.contains(Reg))
+ return &X86::GR32RegClass;
+ if (X86::GR16RegClass.contains(Reg))
+ return &X86::GR16RegClass;
+ if (X86::GR8RegClass.contains(Reg))
+ return &X86::GR8RegClass;
+
+ llvm_unreachable("Unknown RegClass for PhysReg!");
+}
+
// Set X86 Opcode and constrain DestReg.
bool X86InstructionSelector::selectCopy(MachineInstr &I,
MachineRegisterInfo &MRI) const {
unsigned DstReg = I.getOperand(0).getReg();
+ const unsigned DstSize = RBI.getSizeInBits(DstReg, MRI, TRI);
+ const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
+
+ unsigned SrcReg = I.getOperand(1).getReg();
+ const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
+ const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
+
if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
assert(I.isCopy() && "Generic operators do not allow physical registers");
+
+ if (DstSize > SrcSize && SrcRegBank.getID() == X86::GPRRegBankID &&
+ DstRegBank.getID() == X86::GPRRegBankID) {
+
+ const TargetRegisterClass *SrcRC =
+ getRegClass(MRI.getType(SrcReg), SrcRegBank);
+ const TargetRegisterClass *DstRC = getRegClassFromGRPhysReg(DstReg);
+
+ if (SrcRC != DstRC) {
+ // This case can be generated by ABI lowering, performe anyext
+ unsigned ExtSrc = MRI.createVirtualRegister(DstRC);
+ BuildMI(*I.getParent(), I, I.getDebugLoc(),
+ TII.get(TargetOpcode::SUBREG_TO_REG))
+ .addDef(ExtSrc)
+ .addImm(0)
+ .addReg(SrcReg)
+ .addImm(getSubRegIndex(SrcRC));
+
+ I.getOperand(1).setReg(ExtSrc);
+ }
+ }
+
return true;
}
- const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
- const unsigned DstSize = MRI.getType(DstReg).getSizeInBits();
- unsigned SrcReg = I.getOperand(1).getReg();
- const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
-
assert((!TargetRegisterInfo::isPhysicalRegister(SrcReg) || I.isCopy()) &&
"No phys reg on generic operators");
assert((DstSize == SrcSize ||
@@ -191,38 +242,28 @@ bool X86InstructionSelector::selectCopy(
DstSize <= RBI.getSizeInBits(SrcReg, MRI, TRI))) &&
"Copy with different width?!");
- const TargetRegisterClass *RC = nullptr;
+ const TargetRegisterClass *DstRC =
+ getRegClass(MRI.getType(DstReg), DstRegBank);
+
+ if (SrcRegBank.getID() == X86::GPRRegBankID &&
+ DstRegBank.getID() == X86::GPRRegBankID && SrcSize > DstSize &&
+ TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
+ // Change the physical register to performe truncate.
- switch (RegBank.getID()) {
- case X86::GPRRegBankID:
- assert((DstSize <= 64) && "GPRs cannot get more than 64-bit width values.");
- RC = getRegClass(MRI.getType(DstReg), RegBank);
-
- // Change the physical register
- if (SrcSize > DstSize && TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
- if (RC == &X86::GR32RegClass)
- I.getOperand(1).setSubReg(X86::sub_32bit);
- else if (RC == &X86::GR16RegClass)
- I.getOperand(1).setSubReg(X86::sub_16bit);
- else if (RC == &X86::GR8RegClass)
- I.getOperand(1).setSubReg(X86::sub_8bit);
+ const TargetRegisterClass *SrcRC = getRegClassFromGRPhysReg(SrcReg);
+ if (DstRC != SrcRC) {
+ I.getOperand(1).setSubReg(getSubRegIndex(DstRC));
I.getOperand(1).substPhysReg(SrcReg, TRI);
}
- break;
- case X86::VECRRegBankID:
- RC = getRegClass(MRI.getType(DstReg), RegBank);
- break;
- default:
- llvm_unreachable("Unknown RegBank!");
}
// No need to constrain SrcReg. It will get constrained when
// we hit another of its use or its defs.
// Copies do not have constraints.
const TargetRegisterClass *OldRC = MRI.getRegClassOrNull(DstReg);
- if (!OldRC || !RC->hasSubClassEq(OldRC)) {
- if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
+ if (!OldRC || !DstRC->hasSubClassEq(OldRC)) {
+ if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
<< " operand\n");
return false;
Added: llvm/trunk/test/CodeGen/X86/GlobalISel/select-copy.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-copy.mir?rev=311278&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-copy.mir (added)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-copy.mir Sun Aug 20 00:14:40 2017
@@ -0,0 +1,185 @@
+# RUN: llc -mtriple=i386-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X32
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
+
+--- |
+
+ define void @test_copy() {
+ ret void
+ }
+
+ define void @test_copy2() {
+ ret void
+ }
+
+ define void @test_copy3() {
+ ret void
+ }
+
+ define void @test_copy4() {
+ ret void
+ }
+
+ define void @test_copy5() {
+ ret void
+ }
+
+ define void @test_copy6() {
+ ret void
+ }
+
+...
+---
+name: test_copy
+# ALL-LABEL: name: test_copy
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr8, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: gr32, preferred-register: '' }
+# ALL-NEXT: - { id: 2, class: gr32, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+# ALL: %0 = COPY %al
+# ALL-NEXT: %2 = SUBREG_TO_REG 0, %0, 1
+# ALL-NEXT: %1 = AND32ri8 %2, 1, implicit-def %eflags
+# ALL-NEXT: %eax = COPY %1
+# ALL-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %eax
+
+ %0(s1) = COPY %al
+ %1(s32) = G_ZEXT %0(s1)
+ %eax = COPY %1(s32)
+ RET 0, implicit %eax
+
+...
+---
+name: test_copy2
+# ALL-LABEL: name: test_copy2
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr8, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: gr32, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+# ALL: %0 = COPY %al
+# ALL-NEXT: %1 = MOVZX32rr8 %0
+# ALL-NEXT: %eax = COPY %1
+# ALL-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %eax
+
+ %0(s8) = COPY %al
+ %1(s32) = G_ZEXT %0(s8)
+ %eax = COPY %1(s32)
+ RET 0, implicit %eax
+
+...
+---
+name: test_copy3
+# ALL-LABEL: name: test_copy3
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr8, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: gr32, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+# ALL: %0 = COPY %al
+# ALL-NEXT: %1 = MOVZX32rr8 %0
+# ALL-NEXT: %eax = COPY %1
+# ALL-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %eax
+
+ %0(s8) = COPY %ax
+ %1(s32) = G_ZEXT %0(s8)
+ %eax = COPY %1(s32)
+ RET 0, implicit %eax
+
+...
+---
+name: test_copy4
+# ALL-LABEL: name: test_copy4
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr16, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: gr32, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+# ALL: %0 = COPY %ax
+# ALL-NEXT: %1 = MOVZX32rr16 %0
+# ALL-NEXT: %eax = COPY %1
+# ALL-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %eax
+
+ %0(s16) = COPY %eax
+ %1(s32) = G_ZEXT %0(s16)
+ %eax = COPY %1(s32)
+ RET 0, implicit %eax
+
+...
+---
+name: test_copy5
+# ALL-LABEL: name: test_copy5
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr8, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: gr32, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr, preferred-register: '' }
+# ALL: %0 = COPY %dl
+# ALL-NEXT: %1 = SUBREG_TO_REG 0, %0, 1
+# ALL-NEXT: %eax = COPY %1
+# ALL-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %eax,%edx
+
+ %0(s8) = COPY %edx
+ %eax = COPY %0(s8)
+ RET 0, implicit %eax
+
+...
+---
+name: test_copy6
+# ALL-LABEL: name: test_copy6
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr16, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: gr32, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr, preferred-register: '' }
+# ALL: %0 = COPY %dx
+# ALL-NEXT: %1 = SUBREG_TO_REG 0, %0, 3
+# ALL-NEXT: %eax = COPY %1
+# ALL-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %eax,%edx
+
+ %0(s16) = COPY %edx
+ %eax = COPY %0(s16)
+ RET 0, implicit %eax
+
+...
+
More information about the llvm-commits
mailing list