[llvm] r324665 - [GlobalISel][X86] Fixing failures after https://reviews.llvm.org/D37775
Alexander Ivchenko via llvm-commits
llvm-commits at lists.llvm.org
Thu Feb 8 14:41:47 PST 2018
Author: aivchenk
Date: Thu Feb 8 14:41:47 2018
New Revision: 324665
URL: http://llvm.org/viewvc/llvm-project?rev=324665&view=rev
Log:
[GlobalISel][X86] Fixing failures after https://reviews.llvm.org/D37775
The patch essentially makes sure that X86CallLowering adds proper
G_COPY/G_TRUNC and G_ANYEXT/G_COPY when we are doing lowering of
arguments/returns for floating point values passed on registers.
Tests are updated accordingly
Reviewed By: qcolombet
Differential Revision: https://reviews.llvm.org/D42287
Modified:
llvm/trunk/lib/Target/X86/X86CallLowering.cpp
llvm/trunk/lib/Target/X86/X86InstructionSelector.cpp
llvm/trunk/lib/Target/X86/X86LegalizerInfo.cpp
llvm/trunk/lib/Target/X86/X86RegisterBankInfo.cpp
llvm/trunk/test/CodeGen/X86/GlobalISel/callingconv.ll
llvm/trunk/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll
llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fadd-scalar.mir
llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fdiv-scalar.mir
llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fmul-scalar.mir
llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fpext-scalar.mir
llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fsub-scalar.mir
llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-phi.mir
llvm/trunk/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir
llvm/trunk/test/CodeGen/X86/GlobalISel/select-fadd-scalar.mir
llvm/trunk/test/CodeGen/X86/GlobalISel/select-fconstant.mir
llvm/trunk/test/CodeGen/X86/GlobalISel/select-fdiv-scalar.mir
llvm/trunk/test/CodeGen/X86/GlobalISel/select-fmul-scalar.mir
llvm/trunk/test/CodeGen/X86/GlobalISel/select-fpext-scalar.mir
llvm/trunk/test/CodeGen/X86/GlobalISel/select-fsub-scalar.mir
llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir
llvm/trunk/test/CodeGen/X86/GlobalISel/select-phi.mir
Modified: llvm/trunk/lib/Target/X86/X86CallLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86CallLowering.cpp?rev=324665&r1=324664&r2=324665&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86CallLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86CallLowering.cpp Thu Feb 8 14:41:47 2018
@@ -126,7 +126,25 @@ struct OutgoingValueHandler : public Cal
void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
CCValAssign &VA) override {
MIB.addUse(PhysReg, RegState::Implicit);
- unsigned ExtReg = extendRegister(ValVReg, VA);
+
+ unsigned ExtReg;
+ // If we are copying the value to a physical register with the
+ // size larger than the size of the value itself - build AnyExt
+ // to the size of the register first and only then do the copy.
+ // The example of that would be copying from s32 to xmm0, for which
+ // case ValVT == LocVT == MVT::f32. If LocSize and ValSize are not equal
+ // we expect normal extendRegister mechanism to work.
+ unsigned PhysRegSize =
+ MRI.getTargetRegisterInfo()->getRegSizeInBits(PhysReg, MRI);
+ unsigned ValSize = VA.getValVT().getSizeInBits();
+ unsigned LocSize = VA.getLocVT().getSizeInBits();
+ if (PhysRegSize > ValSize && LocSize == ValSize) {
+ assert((PhysRegSize == 128 || PhysRegSize == 80) && "We expect that to be 128 bit");
+ auto MIB = MIRBuilder.buildAnyExt(LLT::scalar(PhysRegSize), ValVReg);
+ ExtReg = MIB->getOperand(0).getReg();
+ } else
+ ExtReg = extendRegister(ValVReg, VA);
+
MIRBuilder.buildCopy(PhysReg, ExtReg);
}
@@ -229,10 +247,28 @@ struct IncomingValueHandler : public Cal
void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
CCValAssign &VA) override {
markPhysRegUsed(PhysReg);
+
switch (VA.getLocInfo()) {
- default:
+ default: {
+ // If we are copying the value from a physical register with the
+ // size larger than the size of the value itself - build the copy
+ // of the phys reg first and then build the truncation of that copy.
+ // The example of that would be copying from xmm0 to s32, for which
+ // case ValVT == LocVT == MVT::f32. If LocSize and ValSize are not equal
+ // we expect this to be handled in SExt/ZExt/AExt case.
+ unsigned PhysRegSize =
+ MRI.getTargetRegisterInfo()->getRegSizeInBits(PhysReg, MRI);
+ unsigned ValSize = VA.getValVT().getSizeInBits();
+ unsigned LocSize = VA.getLocVT().getSizeInBits();
+ if (PhysRegSize > ValSize && LocSize == ValSize) {
+ auto Copy = MIRBuilder.buildCopy(LLT::scalar(PhysRegSize), PhysReg);
+ MIRBuilder.buildTrunc(ValVReg, Copy);
+ return;
+ }
+
MIRBuilder.buildCopy(ValVReg, PhysReg);
break;
+ }
case CCValAssign::LocInfo::SExt:
case CCValAssign::LocInfo::ZExt:
case CCValAssign::LocInfo::AExt: {
Modified: llvm/trunk/lib/Target/X86/X86InstructionSelector.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstructionSelector.cpp?rev=324665&r1=324664&r2=324665&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstructionSelector.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86InstructionSelector.cpp Thu Feb 8 14:41:47 2018
@@ -104,6 +104,11 @@ private:
MachineFunction &MF) const;
bool selectCondBranch(MachineInstr &I, MachineRegisterInfo &MRI,
MachineFunction &MF) const;
+ bool selectTurnIntoCOPY(MachineInstr &I, MachineRegisterInfo &MRI,
+ const unsigned DstReg,
+ const TargetRegisterClass *DstRC,
+ const unsigned SrcReg,
+ const TargetRegisterClass *SrcRC) const;
bool materializeFP(MachineInstr &I, MachineRegisterInfo &MRI,
MachineFunction &MF) const;
bool selectImplicitDefOrPHI(MachineInstr &I, MachineRegisterInfo &MRI) const;
@@ -640,6 +645,31 @@ bool X86InstructionSelector::selectConst
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
}
+// Helper function for selectTrunc and selectAnyext.
+// Returns true if DstRC lives on a floating register class and
+// SrcRC lives on a 128-bit vector class.
+static bool canTurnIntoCOPY(const TargetRegisterClass *DstRC,
+ const TargetRegisterClass *SrcRC) {
+ return (DstRC == &X86::FR32RegClass || DstRC == &X86::FR32XRegClass ||
+ DstRC == &X86::FR64RegClass || DstRC == &X86::FR64XRegClass) &&
+ (SrcRC == &X86::VR128RegClass || SrcRC == &X86::VR128XRegClass);
+}
+
+bool X86InstructionSelector::selectTurnIntoCOPY(
+ MachineInstr &I, MachineRegisterInfo &MRI, const unsigned DstReg,
+ const TargetRegisterClass *DstRC, const unsigned SrcReg,
+ const TargetRegisterClass *SrcRC) const {
+
+ if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
+ !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
+ DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
+ << " operand\n");
+ return false;
+ }
+ I.setDesc(TII.get(X86::COPY));
+ return true;
+}
+
bool X86InstructionSelector::selectTrunc(MachineInstr &I,
MachineRegisterInfo &MRI,
MachineFunction &MF) const {
@@ -659,15 +689,19 @@ bool X86InstructionSelector::selectTrunc
return false;
}
- if (DstRB.getID() != X86::GPRRegBankID)
- return false;
-
const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
- if (!DstRC)
+ const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
+
+ if (!DstRC || !SrcRC)
return false;
- const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
- if (!SrcRC)
+ // If that's truncation of the value that lives on the vector class and goes
+ // into the floating class, just replace it with copy, as we are able to
+ // select it as a regular move.
+ if (canTurnIntoCOPY(DstRC, SrcRC))
+ return selectTurnIntoCOPY(I, MRI, DstReg, DstRC, SrcReg, SrcRC);
+
+ if (DstRB.getID() != X86::GPRRegBankID)
return false;
unsigned SubIdx;
@@ -765,12 +799,18 @@ bool X86InstructionSelector::selectAnyex
assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
"G_ANYEXT incorrect operand size");
- if (DstRB.getID() != X86::GPRRegBankID)
- return false;
-
const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
+ // If that's ANY_EXT of the value that lives on the floating class and goes
+ // into the vector class, just replace it with copy, as we are able to select
+ // it as a regular move.
+ if (canTurnIntoCOPY(SrcRC, DstRC))
+ return selectTurnIntoCOPY(I, MRI, SrcReg, SrcRC, DstReg, DstRC);
+
+ if (DstRB.getID() != X86::GPRRegBankID)
+ return false;
+
if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
Modified: llvm/trunk/lib/Target/X86/X86LegalizerInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86LegalizerInfo.cpp?rev=324665&r1=324664&r2=324665&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86LegalizerInfo.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86LegalizerInfo.cpp Thu Feb 8 14:41:47 2018
@@ -92,6 +92,7 @@ void X86LegalizerInfo::setLegalizerInfo3
const LLT s16 = LLT::scalar(16);
const LLT s32 = LLT::scalar(32);
const LLT s64 = LLT::scalar(64);
+ const LLT s128 = LLT::scalar(128);
for (auto Ty : {p0, s1, s8, s16, s32})
setAction({G_IMPLICIT_DEF, Ty}, Legal);
@@ -136,6 +137,7 @@ void X86LegalizerInfo::setLegalizerInfo3
setAction({G_SEXT, Ty}, Legal);
setAction({G_ANYEXT, Ty}, Legal);
}
+ setAction({G_ANYEXT, s128}, Legal);
// Comparison
setAction({G_ICMP, s1}, Legal);
Modified: llvm/trunk/lib/Target/X86/X86RegisterBankInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86RegisterBankInfo.cpp?rev=324665&r1=324664&r2=324665&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86RegisterBankInfo.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86RegisterBankInfo.cpp Thu Feb 8 14:41:47 2018
@@ -73,6 +73,8 @@ X86GenRegisterBankInfo::getPartialMappin
return PMI_GPR32;
case 64:
return PMI_GPR64;
+ case 128:
+ return PMI_VEC128;
break;
default:
llvm_unreachable("Unsupported register size.");
@@ -83,6 +85,8 @@ X86GenRegisterBankInfo::getPartialMappin
return PMI_FP32;
case 64:
return PMI_FP64;
+ case 128:
+ return PMI_VEC128;
default:
llvm_unreachable("Unsupported register size.");
}
@@ -190,6 +194,23 @@ X86RegisterBankInfo::getInstrMapping(con
// Instruction having only floating-point operands (all scalars in VECRReg)
getInstrPartialMappingIdxs(MI, MRI, /* isFP */ true, OpRegBankIdx);
break;
+ case TargetOpcode::G_TRUNC:
+ case TargetOpcode::G_ANYEXT: {
+ auto &Op0 = MI.getOperand(0);
+ auto &Op1 = MI.getOperand(1);
+ const LLT Ty0 = MRI.getType(Op0.getReg());
+ const LLT Ty1 = MRI.getType(Op1.getReg());
+
+ bool isFPTrunc = (Ty0.getSizeInBits() == 32 || Ty0.getSizeInBits() == 64) &&
+ Ty1.getSizeInBits() == 128 && Opc == TargetOpcode::G_TRUNC;
+ bool isFPAnyExt =
+ Ty0.getSizeInBits() == 128 &&
+ (Ty1.getSizeInBits() == 32 || Ty1.getSizeInBits() == 64) &&
+ Opc == TargetOpcode::G_ANYEXT;
+
+ getInstrPartialMappingIdxs(MI, MRI, /* isFP */ isFPTrunc || isFPAnyExt,
+ OpRegBankIdx);
+ } break;
default:
// Track the bank of each register, use NotFP mapping (all scalars in GPRs)
getInstrPartialMappingIdxs(MI, MRI, /* isFP */ false, OpRegBankIdx);
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/callingconv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/callingconv.ll?rev=324665&r1=324664&r2=324665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/callingconv.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/callingconv.ll Thu Feb 8 14:41:47 2018
@@ -400,9 +400,9 @@ define void @test_variadic_call_2(i8** %
; X64-NEXT: pushq %rax
; X64-NEXT: .cfi_def_cfa_offset 16
; X64-NEXT: movq (%rdi), %rdi
-; X64-NEXT: movq (%rsi), %rcx
+; X64-NEXT: movq (%rsi), %rax
+; X64-NEXT: movq %rax, %xmm0
; X64-NEXT: movb $1, %al
-; X64-NEXT: movq %rcx, %xmm0
; X64-NEXT: callq variadic_callee
; X64-NEXT: popq %rax
; X64-NEXT: retq
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll?rev=324665&r1=324664&r2=324665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll Thu Feb 8 14:41:47 2018
@@ -222,14 +222,18 @@ define float @test_float_args(float %arg
; X32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.1, align 0)
; X32: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; X32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.0, align 0)
- ; X32: $fp0 = COPY [[LOAD1]](s32)
+ ; X32: [[ANYEXT:%[0-9]+]]:_(s80) = G_ANYEXT [[LOAD1]](s32)
+ ; X32: $fp0 = COPY [[ANYEXT]](s80)
; X32: RET 0, implicit $fp0
; X64-LABEL: name: test_float_args
; X64: bb.1 (%ir-block.0):
; X64: liveins: $xmm0, $xmm1
- ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY $xmm0
- ; X64: [[COPY1:%[0-9]+]]:_(s32) = COPY $xmm1
- ; X64: $xmm0 = COPY [[COPY1]](s32)
+ ; X64: [[COPY:%[0-9]+]]:_(s128) = COPY $xmm0
+ ; X64: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s128)
+ ; X64: [[COPY1:%[0-9]+]]:_(s128) = COPY $xmm1
+ ; X64: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s128)
+ ; X64: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[TRUNC1]](s32)
+ ; X64: $xmm0 = COPY [[ANYEXT]](s128)
; X64: RET 0, implicit $xmm0
ret float %arg2
}
@@ -241,14 +245,18 @@ define double @test_double_args(double %
; X32: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 8 from %fixed-stack.1, align 0)
; X32: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; X32: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 8 from %fixed-stack.0, align 0)
- ; X32: $fp0 = COPY [[LOAD1]](s64)
+ ; X32: [[ANYEXT:%[0-9]+]]:_(s80) = G_ANYEXT [[LOAD1]](s64)
+ ; X32: $fp0 = COPY [[ANYEXT]](s80)
; X32: RET 0, implicit $fp0
; X64-LABEL: name: test_double_args
; X64: bb.1 (%ir-block.0):
; X64: liveins: $xmm0, $xmm1
- ; X64: [[COPY:%[0-9]+]]:_(s64) = COPY $xmm0
- ; X64: [[COPY1:%[0-9]+]]:_(s64) = COPY $xmm1
- ; X64: $xmm0 = COPY [[COPY1]](s64)
+ ; X64: [[COPY:%[0-9]+]]:_(s128) = COPY $xmm0
+ ; X64: [[TRUNC:%[0-9]+]]:_(s64) = G_TRUNC [[COPY]](s128)
+ ; X64: [[COPY1:%[0-9]+]]:_(s128) = COPY $xmm1
+ ; X64: [[TRUNC1:%[0-9]+]]:_(s64) = G_TRUNC [[COPY1]](s128)
+ ; X64: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[TRUNC1]](s64)
+ ; X64: $xmm0 = COPY [[ANYEXT]](s128)
; X64: RET 0, implicit $xmm0
ret double %arg2
}
@@ -684,7 +692,8 @@ define void @test_variadic_call_2(i8** %
; X64: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[COPY1]](p0) :: (load 8 from %ir.val_ptr)
; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
; X64: $rdi = COPY [[LOAD]](p0)
- ; X64: $xmm0 = COPY [[LOAD1]](s64)
+ ; X64: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[LOAD1]](s64)
+ ; X64: $xmm0 = COPY [[ANYEXT]](s128)
; X64: $al = MOV8ri 1
; X64: CALL64pcrel32 @variadic_callee, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $xmm0, implicit $al
; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fadd-scalar.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fadd-scalar.mir?rev=324665&r1=324664&r2=324665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fadd-scalar.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fadd-scalar.mir Thu Feb 8 14:41:47 2018
@@ -22,6 +22,9 @@ registers:
- { id: 0, class: _, preferred-register: '' }
- { id: 1, class: _, preferred-register: '' }
- { id: 2, class: _, preferred-register: '' }
+ - { id: 3, class: _, preferred-register: '' }
+ - { id: 4, class: _, preferred-register: '' }
+ - { id: 5, class: _, preferred-register: '' }
liveins:
fixedStack:
stack:
@@ -31,15 +34,21 @@ body: |
liveins: $xmm0, $xmm1
; CHECK-LABEL: name: test_fadd_float
- ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $xmm0
- ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $xmm1
- ; CHECK: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[COPY]], [[COPY1]]
- ; CHECK: $xmm0 = COPY [[FADD]](s32)
+ ; CHECK: [[COPY:%[0-9]+]]:_(s128) = COPY $xmm0
+ ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s128)
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s128) = COPY $xmm1
+ ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s128)
+ ; CHECK: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[TRUNC]], [[TRUNC1]]
+ ; CHECK: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[FADD]](s32)
+ ; CHECK: $xmm0 = COPY [[ANYEXT]](s128)
; CHECK: RET 0, implicit $xmm0
- %0(s32) = COPY $xmm0
- %1(s32) = COPY $xmm1
- %2(s32) = G_FADD %0, %1
- $xmm0 = COPY %2(s32)
+ %2:_(s128) = COPY $xmm0
+ %0:_(s32) = G_TRUNC %2(s128)
+ %3:_(s128) = COPY $xmm1
+ %1:_(s32) = G_TRUNC %3(s128)
+ %4:_(s32) = G_FADD %0, %1
+ %5:_(s128) = G_ANYEXT %4(s32)
+ $xmm0 = COPY %5(s128)
RET 0, implicit $xmm0
...
@@ -52,6 +61,9 @@ registers:
- { id: 0, class: _, preferred-register: '' }
- { id: 1, class: _, preferred-register: '' }
- { id: 2, class: _, preferred-register: '' }
+ - { id: 3, class: _, preferred-register: '' }
+ - { id: 4, class: _, preferred-register: '' }
+ - { id: 5, class: _, preferred-register: '' }
liveins:
fixedStack:
stack:
@@ -61,15 +73,21 @@ body: |
liveins: $xmm0, $xmm1
; CHECK-LABEL: name: test_fadd_double
- ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $xmm0
- ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $xmm1
- ; CHECK: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[COPY]], [[COPY1]]
- ; CHECK: $xmm0 = COPY [[FADD]](s64)
+ ; CHECK: [[COPY:%[0-9]+]]:_(s128) = COPY $xmm0
+ ; CHECK: [[TRUNC:%[0-9]+]]:_(s64) = G_TRUNC [[COPY]](s128)
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s128) = COPY $xmm1
+ ; CHECK: [[TRUNC1:%[0-9]+]]:_(s64) = G_TRUNC [[COPY1]](s128)
+ ; CHECK: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[TRUNC]], [[TRUNC1]]
+ ; CHECK: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[FADD]](s64)
+ ; CHECK: $xmm0 = COPY [[ANYEXT]](s128)
; CHECK: RET 0, implicit $xmm0
- %0(s64) = COPY $xmm0
- %1(s64) = COPY $xmm1
- %2(s64) = G_FADD %0, %1
- $xmm0 = COPY %2(s64)
+ %2:_(s128) = COPY $xmm0
+ %0:_(s64) = G_TRUNC %2(s128)
+ %3:_(s128) = COPY $xmm1
+ %1:_(s64) = G_TRUNC %3(s128)
+ %4:_(s64) = G_FADD %0, %1
+ %5:_(s128) = G_ANYEXT %4(s64)
+ $xmm0 = COPY %5(s128)
RET 0, implicit $xmm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fdiv-scalar.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fdiv-scalar.mir?rev=324665&r1=324664&r2=324665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fdiv-scalar.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fdiv-scalar.mir Thu Feb 8 14:41:47 2018
@@ -22,6 +22,9 @@ registers:
- { id: 0, class: _, preferred-register: '' }
- { id: 1, class: _, preferred-register: '' }
- { id: 2, class: _, preferred-register: '' }
+ - { id: 3, class: _, preferred-register: '' }
+ - { id: 4, class: _, preferred-register: '' }
+ - { id: 5, class: _, preferred-register: '' }
liveins:
fixedStack:
stack:
@@ -31,15 +34,21 @@ body: |
liveins: $xmm0, $xmm1
; CHECK-LABEL: name: test_fdiv_float
- ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $xmm0
- ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $xmm1
- ; CHECK: [[FDIV:%[0-9]+]]:_(s32) = G_FDIV [[COPY]], [[COPY1]]
- ; CHECK: $xmm0 = COPY [[FDIV]](s32)
+ ; CHECK: [[COPY:%[0-9]+]]:_(s128) = COPY $xmm0
+ ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s128)
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s128) = COPY $xmm1
+ ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s128)
+ ; CHECK: [[FDIV:%[0-9]+]]:_(s32) = G_FDIV [[TRUNC]], [[TRUNC1]]
+ ; CHECK: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[FDIV]](s32)
+ ; CHECK: $xmm0 = COPY [[ANYEXT]](s128)
; CHECK: RET 0, implicit $xmm0
- %0(s32) = COPY $xmm0
- %1(s32) = COPY $xmm1
- %2(s32) = G_FDIV %0, %1
- $xmm0 = COPY %2(s32)
+ %2:_(s128) = COPY $xmm0
+ %0:_(s32) = G_TRUNC %2(s128)
+ %3:_(s128) = COPY $xmm1
+ %1:_(s32) = G_TRUNC %3(s128)
+ %4:_(s32) = G_FDIV %0, %1
+ %5:_(s128) = G_ANYEXT %4(s32)
+ $xmm0 = COPY %5(s128)
RET 0, implicit $xmm0
...
@@ -52,6 +61,9 @@ registers:
- { id: 0, class: _, preferred-register: '' }
- { id: 1, class: _, preferred-register: '' }
- { id: 2, class: _, preferred-register: '' }
+ - { id: 3, class: _, preferred-register: '' }
+ - { id: 4, class: _, preferred-register: '' }
+ - { id: 5, class: _, preferred-register: '' }
liveins:
fixedStack:
stack:
@@ -61,15 +73,21 @@ body: |
liveins: $xmm0, $xmm1
; CHECK-LABEL: name: test_fdiv_double
- ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $xmm0
- ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $xmm1
- ; CHECK: [[FDIV:%[0-9]+]]:_(s64) = G_FDIV [[COPY]], [[COPY1]]
- ; CHECK: $xmm0 = COPY [[FDIV]](s64)
+ ; CHECK: [[COPY:%[0-9]+]]:_(s128) = COPY $xmm0
+ ; CHECK: [[TRUNC:%[0-9]+]]:_(s64) = G_TRUNC [[COPY]](s128)
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s128) = COPY $xmm1
+ ; CHECK: [[TRUNC1:%[0-9]+]]:_(s64) = G_TRUNC [[COPY1]](s128)
+ ; CHECK: [[FDIV:%[0-9]+]]:_(s64) = G_FDIV [[TRUNC]], [[TRUNC1]]
+ ; CHECK: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[FDIV]](s64)
+ ; CHECK: $xmm0 = COPY [[ANYEXT]](s128)
; CHECK: RET 0, implicit $xmm0
- %0(s64) = COPY $xmm0
- %1(s64) = COPY $xmm1
- %2(s64) = G_FDIV %0, %1
- $xmm0 = COPY %2(s64)
+ %2:_(s128) = COPY $xmm0
+ %0:_(s64) = G_TRUNC %2(s128)
+ %3:_(s128) = COPY $xmm1
+ %1:_(s64) = G_TRUNC %3(s128)
+ %4:_(s64) = G_FDIV %0, %1
+ %5:_(s128) = G_ANYEXT %4(s64)
+ $xmm0 = COPY %5(s128)
RET 0, implicit $xmm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fmul-scalar.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fmul-scalar.mir?rev=324665&r1=324664&r2=324665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fmul-scalar.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fmul-scalar.mir Thu Feb 8 14:41:47 2018
@@ -22,6 +22,9 @@ registers:
- { id: 0, class: _, preferred-register: '' }
- { id: 1, class: _, preferred-register: '' }
- { id: 2, class: _, preferred-register: '' }
+ - { id: 3, class: _, preferred-register: '' }
+ - { id: 4, class: _, preferred-register: '' }
+ - { id: 5, class: _, preferred-register: '' }
liveins:
fixedStack:
stack:
@@ -31,15 +34,21 @@ body: |
liveins: $xmm0, $xmm1
; CHECK-LABEL: name: test_fmul_float
- ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $xmm0
- ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $xmm1
- ; CHECK: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY]], [[COPY1]]
- ; CHECK: $xmm0 = COPY [[FMUL]](s32)
+ ; CHECK: [[COPY:%[0-9]+]]:_(s128) = COPY $xmm0
+ ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s128)
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s128) = COPY $xmm1
+ ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s128)
+ ; CHECK: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[TRUNC]], [[TRUNC1]]
+ ; CHECK: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[FMUL]](s32)
+ ; CHECK: $xmm0 = COPY [[ANYEXT]](s128)
; CHECK: RET 0, implicit $xmm0
- %0(s32) = COPY $xmm0
- %1(s32) = COPY $xmm1
- %2(s32) = G_FMUL %0, %1
- $xmm0 = COPY %2(s32)
+ %2:_(s128) = COPY $xmm0
+ %0:_(s32) = G_TRUNC %2(s128)
+ %3:_(s128) = COPY $xmm1
+ %1:_(s32) = G_TRUNC %3(s128)
+ %4:_(s32) = G_FMUL %0, %1
+ %5:_(s128) = G_ANYEXT %4(s32)
+ $xmm0 = COPY %5(s128)
RET 0, implicit $xmm0
...
@@ -52,6 +61,9 @@ registers:
- { id: 0, class: _, preferred-register: '' }
- { id: 1, class: _, preferred-register: '' }
- { id: 2, class: _, preferred-register: '' }
+ - { id: 3, class: _, preferred-register: '' }
+ - { id: 4, class: _, preferred-register: '' }
+ - { id: 5, class: _, preferred-register: '' }
liveins:
fixedStack:
stack:
@@ -61,15 +73,21 @@ body: |
liveins: $xmm0, $xmm1
; CHECK-LABEL: name: test_fmul_double
- ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $xmm0
- ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $xmm1
- ; CHECK: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[COPY]], [[COPY1]]
- ; CHECK: $xmm0 = COPY [[FMUL]](s64)
+ ; CHECK: [[COPY:%[0-9]+]]:_(s128) = COPY $xmm0
+ ; CHECK: [[TRUNC:%[0-9]+]]:_(s64) = G_TRUNC [[COPY]](s128)
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s128) = COPY $xmm1
+ ; CHECK: [[TRUNC1:%[0-9]+]]:_(s64) = G_TRUNC [[COPY1]](s128)
+ ; CHECK: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[TRUNC]], [[TRUNC1]]
+ ; CHECK: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[FMUL]](s64)
+ ; CHECK: $xmm0 = COPY [[ANYEXT]](s128)
; CHECK: RET 0, implicit $xmm0
- %0(s64) = COPY $xmm0
- %1(s64) = COPY $xmm1
- %2(s64) = G_FMUL %0, %1
- $xmm0 = COPY %2(s64)
+ %2:_(s128) = COPY $xmm0
+ %0:_(s64) = G_TRUNC %2(s128)
+ %3:_(s128) = COPY $xmm1
+ %1:_(s64) = G_TRUNC %3(s128)
+ %4:_(s64) = G_FMUL %0, %1
+ %5:_(s128) = G_ANYEXT %4(s64)
+ $xmm0 = COPY %5(s128)
RET 0, implicit $xmm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fpext-scalar.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fpext-scalar.mir?rev=324665&r1=324664&r2=324665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fpext-scalar.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fpext-scalar.mir Thu Feb 8 14:41:47 2018
@@ -17,18 +17,24 @@ regBankSelected: false
registers:
- { id: 0, class: _, preferred-register: '' }
- { id: 1, class: _, preferred-register: '' }
+ - { id: 2, class: _, preferred-register: '' }
+ - { id: 3, class: _, preferred-register: '' }
body: |
bb.1.entry:
liveins: $xmm0
; ALL-LABEL: name: test
- ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY $xmm0
- ; ALL: [[FPEXT:%[0-9]+]]:_(s64) = G_FPEXT [[COPY]](s32)
- ; ALL: $xmm0 = COPY [[FPEXT]](s64)
+ ; ALL: [[COPY:%[0-9]+]]:_(s128) = COPY $xmm0
+ ; ALL: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s128)
+ ; ALL: [[FPEXT:%[0-9]+]]:_(s64) = G_FPEXT [[TRUNC]](s32)
+ ; ALL: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[FPEXT]](s64)
+ ; ALL: $xmm0 = COPY [[ANYEXT]](s128)
; ALL: RET 0, implicit $xmm0
- %0(s32) = COPY $xmm0
- %1(s64) = G_FPEXT %0(s32)
- $xmm0 = COPY %1(s64)
+ %1:_(s128) = COPY $xmm0
+ %0:_(s32) = G_TRUNC %1(s128)
+ %2:_(s64) = G_FPEXT %0(s32)
+ %3:_(s128) = G_ANYEXT %2(s64)
+ $xmm0 = COPY %3(s128)
RET 0, implicit $xmm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fsub-scalar.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fsub-scalar.mir?rev=324665&r1=324664&r2=324665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fsub-scalar.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fsub-scalar.mir Thu Feb 8 14:41:47 2018
@@ -22,6 +22,9 @@ registers:
- { id: 0, class: _, preferred-register: '' }
- { id: 1, class: _, preferred-register: '' }
- { id: 2, class: _, preferred-register: '' }
+ - { id: 3, class: _, preferred-register: '' }
+ - { id: 4, class: _, preferred-register: '' }
+ - { id: 5, class: _, preferred-register: '' }
liveins:
fixedStack:
stack:
@@ -31,15 +34,21 @@ body: |
liveins: $xmm0, $xmm1
; CHECK-LABEL: name: test_fsub_float
- ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $xmm0
- ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $xmm1
- ; CHECK: [[FSUB:%[0-9]+]]:_(s32) = G_FSUB [[COPY]], [[COPY1]]
- ; CHECK: $xmm0 = COPY [[FSUB]](s32)
+ ; CHECK: [[COPY:%[0-9]+]]:_(s128) = COPY $xmm0
+ ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s128)
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s128) = COPY $xmm1
+ ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s128)
+ ; CHECK: [[FSUB:%[0-9]+]]:_(s32) = G_FSUB [[TRUNC]], [[TRUNC1]]
+ ; CHECK: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[FSUB]](s32)
+ ; CHECK: $xmm0 = COPY [[ANYEXT]](s128)
; CHECK: RET 0, implicit $xmm0
- %0(s32) = COPY $xmm0
- %1(s32) = COPY $xmm1
- %2(s32) = G_FSUB %0, %1
- $xmm0 = COPY %2(s32)
+ %2:_(s128) = COPY $xmm0
+ %0:_(s32) = G_TRUNC %2(s128)
+ %3:_(s128) = COPY $xmm1
+ %1:_(s32) = G_TRUNC %3(s128)
+ %4:_(s32) = G_FSUB %0, %1
+ %5:_(s128) = G_ANYEXT %4(s32)
+ $xmm0 = COPY %5(s128)
RET 0, implicit $xmm0
...
@@ -52,6 +61,9 @@ registers:
- { id: 0, class: _, preferred-register: '' }
- { id: 1, class: _, preferred-register: '' }
- { id: 2, class: _, preferred-register: '' }
+ - { id: 3, class: _, preferred-register: '' }
+ - { id: 4, class: _, preferred-register: '' }
+ - { id: 5, class: _, preferred-register: '' }
liveins:
fixedStack:
stack:
@@ -61,15 +73,21 @@ body: |
liveins: $xmm0, $xmm1
; CHECK-LABEL: name: test_fsub_double
- ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $xmm0
- ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $xmm1
- ; CHECK: [[FSUB:%[0-9]+]]:_(s64) = G_FSUB [[COPY]], [[COPY1]]
- ; CHECK: $xmm0 = COPY [[FSUB]](s64)
+ ; CHECK: [[COPY:%[0-9]+]]:_(s128) = COPY $xmm0
+ ; CHECK: [[TRUNC:%[0-9]+]]:_(s64) = G_TRUNC [[COPY]](s128)
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s128) = COPY $xmm1
+ ; CHECK: [[TRUNC1:%[0-9]+]]:_(s64) = G_TRUNC [[COPY1]](s128)
+ ; CHECK: [[FSUB:%[0-9]+]]:_(s64) = G_FSUB [[TRUNC]], [[TRUNC1]]
+ ; CHECK: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[FSUB]](s64)
+ ; CHECK: $xmm0 = COPY [[ANYEXT]](s128)
; CHECK: RET 0, implicit $xmm0
- %0(s64) = COPY $xmm0
- %1(s64) = COPY $xmm1
- %2(s64) = G_FSUB %0, %1
- $xmm0 = COPY %2(s64)
+ %2:_(s128) = COPY $xmm0
+ %0:_(s64) = G_TRUNC %2(s128)
+ %3:_(s128) = COPY $xmm1
+ %1:_(s64) = G_TRUNC %3(s128)
+ %4:_(s64) = G_FSUB %0, %1
+ %5:_(s128) = G_ANYEXT %4(s64)
+ $xmm0 = COPY %5(s128)
RET 0, implicit $xmm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-phi.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-phi.mir?rev=324665&r1=324664&r2=324665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-phi.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-phi.mir Thu Feb 8 14:41:47 2018
@@ -473,49 +473,45 @@ constants:
body: |
; ALL-LABEL: name: test_float
- ; ALL: bb.0.{{[a-zA-Z0-9]+}}:
- ; ALL: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; ALL: bb.0.entry:
+ ; ALL: successors: %bb.2(0x40000000), %bb.1(0x40000000)
; ALL: liveins: $edi, $xmm0, $xmm1
; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
- ; ALL: [[COPY1:%[0-9]+]]:_(s32) = COPY $xmm0
- ; ALL: [[COPY2:%[0-9]+]]:_(s32) = COPY $xmm1
+ ; ALL: [[COPY1:%[0-9]+]]:_(s128) = COPY $xmm0
+ ; ALL: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s128)
+ ; ALL: [[COPY2:%[0-9]+]]:_(s128) = COPY $xmm1
+ ; ALL: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY2]](s128)
; ALL: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; ALL: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
- ; ALL: G_BRCOND [[ICMP]](s1), %bb.1
- ; ALL: G_BR %bb.2
- ; ALL: bb.1.cond.true:
- ; ALL: successors: %bb.3(0x80000000)
- ; ALL: G_BR %bb.3
- ; ALL: bb.2.cond.false:
- ; ALL: successors: %bb.3(0x80000000)
- ; ALL: bb.3.cond.end:
- ; ALL: [[PHI:%[0-9]+]]:_(s32) = G_PHI [[COPY1]](s32), %bb.1, [[COPY2]](s32), %bb.2
- ; ALL: $xmm0 = COPY [[PHI]](s32)
+ ; ALL: G_BRCOND [[ICMP]](s1), %bb.2
+ ; ALL: bb.1.cond.false:
+ ; ALL: successors: %bb.2(0x80000000)
+ ; ALL: bb.2.cond.end:
+ ; ALL: [[PHI:%[0-9]+]]:_(s32) = G_PHI [[TRUNC1]](s32), %bb.1, [[TRUNC]](s32), %bb.0
+ ; ALL: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[PHI]](s32)
+ ; ALL: $xmm0 = COPY [[ANYEXT]](s128)
; ALL: RET 0, implicit $xmm0
bb.1.entry:
- successors: %bb.2(0x40000000), %bb.3(0x40000000)
+ successors: %bb.3(0x40000000), %bb.2(0x40000000)
liveins: $edi, $xmm0, $xmm1
- %0(s32) = COPY $edi
- %1(s32) = COPY $xmm0
- %2(s32) = COPY $xmm1
- %3(s32) = G_CONSTANT i32 0
- %4(s1) = G_ICMP intpred(sgt), %0(s32), %3
- G_BRCOND %4(s1), %bb.2
- G_BR %bb.3
-
- bb.2.cond.true:
- successors: %bb.4(0x80000000)
-
- G_BR %bb.4
-
- bb.3.cond.false:
- successors: %bb.4(0x80000000)
-
-
- bb.4.cond.end:
- %5(s32) = G_PHI %1(s32), %bb.2, %2(s32), %bb.3
- $xmm0 = COPY %5(s32)
+ %0:_(s32) = COPY $edi
+ %3:_(s128) = COPY $xmm0
+ %1:_(s32) = G_TRUNC %3(s128)
+ %4:_(s128) = COPY $xmm1
+ %2:_(s32) = G_TRUNC %4(s128)
+ %5:_(s32) = G_CONSTANT i32 0
+ %6:_(s1) = G_ICMP intpred(sgt), %0(s32), %5
+ G_BRCOND %6(s1), %bb.3
+
+ bb.2.cond.false:
+ successors: %bb.3(0x80000000)
+
+
+ bb.3.cond.end:
+ %7:_(s32) = G_PHI %2(s32), %bb.2, %1(s32), %bb.1
+ %8:_(s128) = G_ANYEXT %7(s32)
+ $xmm0 = COPY %8(s128)
RET 0, implicit $xmm0
...
@@ -532,6 +528,9 @@ registers:
- { id: 3, class: _, preferred-register: '' }
- { id: 4, class: _, preferred-register: '' }
- { id: 5, class: _, preferred-register: '' }
+ - { id: 6, class: _, preferred-register: '' }
+ - { id: 7, class: _, preferred-register: '' }
+ - { id: 8, class: _, preferred-register: '' }
liveins:
fixedStack:
stack:
@@ -541,49 +540,45 @@ constants:
body: |
; ALL-LABEL: name: test_double
- ; ALL: bb.0.{{[a-zA-Z0-9]+}}:
- ; ALL: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; ALL: bb.0.entry:
+ ; ALL: successors: %bb.2(0x40000000), %bb.1(0x40000000)
; ALL: liveins: $edi, $xmm0, $xmm1
; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
- ; ALL: [[COPY1:%[0-9]+]]:_(s64) = COPY $xmm0
- ; ALL: [[COPY2:%[0-9]+]]:_(s64) = COPY $xmm1
+ ; ALL: [[COPY1:%[0-9]+]]:_(s128) = COPY $xmm0
+ ; ALL: [[TRUNC:%[0-9]+]]:_(s64) = G_TRUNC [[COPY1]](s128)
+ ; ALL: [[COPY2:%[0-9]+]]:_(s128) = COPY $xmm1
+ ; ALL: [[TRUNC1:%[0-9]+]]:_(s64) = G_TRUNC [[COPY2]](s128)
; ALL: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; ALL: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
- ; ALL: G_BRCOND [[ICMP]](s1), %bb.1
- ; ALL: G_BR %bb.2
- ; ALL: bb.1.cond.true:
- ; ALL: successors: %bb.3(0x80000000)
- ; ALL: G_BR %bb.3
- ; ALL: bb.2.cond.false:
- ; ALL: successors: %bb.3(0x80000000)
- ; ALL: bb.3.cond.end:
- ; ALL: [[PHI:%[0-9]+]]:_(s64) = G_PHI [[COPY1]](s64), %bb.1, [[COPY2]](s64), %bb.2
- ; ALL: $xmm0 = COPY [[PHI]](s64)
+ ; ALL: G_BRCOND [[ICMP]](s1), %bb.2
+ ; ALL: bb.1.cond.false:
+ ; ALL: successors: %bb.2(0x80000000)
+ ; ALL: bb.2.cond.end:
+ ; ALL: [[PHI:%[0-9]+]]:_(s64) = G_PHI [[TRUNC1]](s64), %bb.1, [[TRUNC]](s64), %bb.0
+ ; ALL: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[PHI]](s64)
+ ; ALL: $xmm0 = COPY [[ANYEXT]](s128)
; ALL: RET 0, implicit $xmm0
bb.1.entry:
- successors: %bb.2(0x40000000), %bb.3(0x40000000)
+ successors: %bb.3(0x40000000), %bb.2(0x40000000)
liveins: $edi, $xmm0, $xmm1
- %0(s32) = COPY $edi
- %1(s64) = COPY $xmm0
- %2(s64) = COPY $xmm1
- %3(s32) = G_CONSTANT i32 0
- %4(s1) = G_ICMP intpred(sgt), %0(s32), %3
- G_BRCOND %4(s1), %bb.2
- G_BR %bb.3
-
- bb.2.cond.true:
- successors: %bb.4(0x80000000)
-
- G_BR %bb.4
-
- bb.3.cond.false:
- successors: %bb.4(0x80000000)
-
-
- bb.4.cond.end:
- %5(s64) = G_PHI %1(s64), %bb.2, %2(s64), %bb.3
- $xmm0 = COPY %5(s64)
+ %0:_(s32) = COPY $edi
+ %3:_(s128) = COPY $xmm0
+ %1:_(s64) = G_TRUNC %3(s128)
+ %4:_(s128) = COPY $xmm1
+ %2:_(s64) = G_TRUNC %4(s128)
+ %5:_(s32) = G_CONSTANT i32 0
+ %6:_(s1) = G_ICMP intpred(sgt), %0(s32), %5
+ G_BRCOND %6(s1), %bb.3
+
+ bb.2.cond.false:
+ successors: %bb.3(0x80000000)
+
+
+ bb.3.cond.end:
+ %7:_(s64) = G_PHI %2(s64), %bb.2, %1(s64), %bb.1
+ %8:_(s128) = G_ANYEXT %7(s64)
+ $xmm0 = COPY %8(s128)
RET 0, implicit $xmm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir?rev=324665&r1=324664&r2=324665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir Thu Feb 8 14:41:47 2018
@@ -447,28 +447,40 @@ registers:
- { id: 0, class: _ }
- { id: 1, class: _ }
- { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
body: |
bb.1 (%ir-block.0):
liveins: $xmm0, $xmm1
; FAST-LABEL: name: test_add_float
; FAST: liveins: $xmm0, $xmm1
- ; FAST: [[COPY:%[0-9]+]]:vecr(s32) = COPY $xmm0
- ; FAST: [[COPY1:%[0-9]+]]:vecr(s32) = COPY $xmm1
- ; FAST: [[FADD:%[0-9]+]]:vecr(s32) = G_FADD [[COPY]], [[COPY1]]
- ; FAST: $xmm0 = COPY [[FADD]](s32)
+ ; FAST: [[COPY:%[0-9]+]]:vecr(s128) = COPY $xmm0
+ ; FAST: [[TRUNC:%[0-9]+]]:vecr(s32) = G_TRUNC [[COPY]](s128)
+ ; FAST: [[COPY1:%[0-9]+]]:vecr(s128) = COPY $xmm1
+ ; FAST: [[TRUNC1:%[0-9]+]]:vecr(s32) = G_TRUNC [[COPY1]](s128)
+ ; FAST: [[FADD:%[0-9]+]]:vecr(s32) = G_FADD [[TRUNC]], [[TRUNC1]]
+ ; FAST: [[ANYEXT:%[0-9]+]]:vecr(s128) = G_ANYEXT [[FADD]](s32)
+ ; FAST: $xmm0 = COPY [[ANYEXT]](s128)
; FAST: RET 0, implicit $xmm0
; GREEDY-LABEL: name: test_add_float
; GREEDY: liveins: $xmm0, $xmm1
- ; GREEDY: [[COPY:%[0-9]+]]:vecr(s32) = COPY $xmm0
- ; GREEDY: [[COPY1:%[0-9]+]]:vecr(s32) = COPY $xmm1
- ; GREEDY: [[FADD:%[0-9]+]]:vecr(s32) = G_FADD [[COPY]], [[COPY1]]
- ; GREEDY: $xmm0 = COPY [[FADD]](s32)
+ ; GREEDY: [[COPY:%[0-9]+]]:vecr(s128) = COPY $xmm0
+ ; GREEDY: [[TRUNC:%[0-9]+]]:vecr(s32) = G_TRUNC [[COPY]](s128)
+ ; GREEDY: [[COPY1:%[0-9]+]]:vecr(s128) = COPY $xmm1
+ ; GREEDY: [[TRUNC1:%[0-9]+]]:vecr(s32) = G_TRUNC [[COPY1]](s128)
+ ; GREEDY: [[FADD:%[0-9]+]]:vecr(s32) = G_FADD [[TRUNC]], [[TRUNC1]]
+ ; GREEDY: [[ANYEXT:%[0-9]+]]:vecr(s128) = G_ANYEXT [[FADD]](s32)
+ ; GREEDY: $xmm0 = COPY [[ANYEXT]](s128)
; GREEDY: RET 0, implicit $xmm0
- %0(s32) = COPY $xmm0
- %1(s32) = COPY $xmm1
- %2(s32) = G_FADD %0, %1
- $xmm0 = COPY %2(s32)
+ %2:_(s128) = COPY $xmm0
+ %0:_(s32) = G_TRUNC %2(s128)
+ %3:_(s128) = COPY $xmm1
+ %1:_(s32) = G_TRUNC %3(s128)
+ %4:_(s32) = G_FADD %0, %1
+ %5:_(s128) = G_ANYEXT %4(s32)
+ $xmm0 = COPY %5(s128)
RET 0, implicit $xmm0
...
@@ -483,28 +495,40 @@ registers:
- { id: 0, class: _ }
- { id: 1, class: _ }
- { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
body: |
bb.1 (%ir-block.0):
liveins: $xmm0, $xmm1
; FAST-LABEL: name: test_add_double
; FAST: liveins: $xmm0, $xmm1
- ; FAST: [[COPY:%[0-9]+]]:vecr(s64) = COPY $xmm0
- ; FAST: [[COPY1:%[0-9]+]]:vecr(s64) = COPY $xmm1
- ; FAST: [[FADD:%[0-9]+]]:vecr(s64) = G_FADD [[COPY]], [[COPY1]]
- ; FAST: $xmm0 = COPY [[FADD]](s64)
+ ; FAST: [[COPY:%[0-9]+]]:vecr(s128) = COPY $xmm0
+ ; FAST: [[TRUNC:%[0-9]+]]:vecr(s64) = G_TRUNC [[COPY]](s128)
+ ; FAST: [[COPY1:%[0-9]+]]:vecr(s128) = COPY $xmm1
+ ; FAST: [[TRUNC1:%[0-9]+]]:vecr(s64) = G_TRUNC [[COPY1]](s128)
+ ; FAST: [[FADD:%[0-9]+]]:vecr(s64) = G_FADD [[TRUNC]], [[TRUNC1]]
+ ; FAST: [[ANYEXT:%[0-9]+]]:vecr(s128) = G_ANYEXT [[FADD]](s64)
+ ; FAST: $xmm0 = COPY [[ANYEXT]](s128)
; FAST: RET 0, implicit $xmm0
; GREEDY-LABEL: name: test_add_double
; GREEDY: liveins: $xmm0, $xmm1
- ; GREEDY: [[COPY:%[0-9]+]]:vecr(s64) = COPY $xmm0
- ; GREEDY: [[COPY1:%[0-9]+]]:vecr(s64) = COPY $xmm1
- ; GREEDY: [[FADD:%[0-9]+]]:vecr(s64) = G_FADD [[COPY]], [[COPY1]]
- ; GREEDY: $xmm0 = COPY [[FADD]](s64)
+ ; GREEDY: [[COPY:%[0-9]+]]:vecr(s128) = COPY $xmm0
+ ; GREEDY: [[TRUNC:%[0-9]+]]:vecr(s64) = G_TRUNC [[COPY]](s128)
+ ; GREEDY: [[COPY1:%[0-9]+]]:vecr(s128) = COPY $xmm1
+ ; GREEDY: [[TRUNC1:%[0-9]+]]:vecr(s64) = G_TRUNC [[COPY1]](s128)
+ ; GREEDY: [[FADD:%[0-9]+]]:vecr(s64) = G_FADD [[TRUNC]], [[TRUNC1]]
+ ; GREEDY: [[ANYEXT:%[0-9]+]]:vecr(s128) = G_ANYEXT [[FADD]](s64)
+ ; GREEDY: $xmm0 = COPY [[ANYEXT]](s128)
; GREEDY: RET 0, implicit $xmm0
- %0(s64) = COPY $xmm0
- %1(s64) = COPY $xmm1
- %2(s64) = G_FADD %0, %1
- $xmm0 = COPY %2(s64)
+ %2:_(s128) = COPY $xmm0
+ %0:_(s64) = G_TRUNC %2(s128)
+ %3:_(s128) = COPY $xmm1
+ %1:_(s64) = G_TRUNC %3(s128)
+ %4:_(s64) = G_FADD %0, %1
+ %5:_(s128) = G_ANYEXT %4(s64)
+ $xmm0 = COPY %5(s128)
RET 0, implicit $xmm0
...
@@ -842,16 +866,21 @@ body: |
; FAST-LABEL: name: test_load_float
; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi
; FAST: [[LOAD:%[0-9]+]]:gpr(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.p1)
- ; FAST: $xmm0 = COPY [[LOAD]](s32)
+ ; FAST: [[COPY1:%[0-9]+]]:vecr(s32) = COPY [[LOAD]](s32)
+ ; FAST: [[ANYEXT:%[0-9]+]]:vecr(s128) = G_ANYEXT [[COPY1]](s32)
+ ; FAST: $xmm0 = COPY [[ANYEXT]](s128)
; FAST: RET 0, implicit $xmm0
; GREEDY-LABEL: name: test_load_float
; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi
; GREEDY: [[LOAD:%[0-9]+]]:gpr(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.p1)
- ; GREEDY: $xmm0 = COPY [[LOAD]](s32)
+ ; GREEDY: [[COPY1:%[0-9]+]]:vecr(s32) = COPY [[LOAD]](s32)
+ ; GREEDY: [[ANYEXT:%[0-9]+]]:vecr(s128) = G_ANYEXT [[COPY1]](s32)
+ ; GREEDY: $xmm0 = COPY [[ANYEXT]](s128)
; GREEDY: RET 0, implicit $xmm0
- %0(p0) = COPY $rdi
- %1(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p1)
- $xmm0 = COPY %1(s32)
+ %0:_(p0) = COPY $rdi
+ %1:_(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p1)
+ %2:_(s128) = G_ANYEXT %1(s32)
+ $xmm0 = COPY %2(s128)
RET 0, implicit $xmm0
...
@@ -871,16 +900,21 @@ body: |
; FAST-LABEL: name: test_load_double
; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi
; FAST: [[LOAD:%[0-9]+]]:gpr(s64) = G_LOAD [[COPY]](p0) :: (load 8 from %ir.p1)
- ; FAST: $xmm0 = COPY [[LOAD]](s64)
+ ; FAST: [[COPY1:%[0-9]+]]:vecr(s64) = COPY [[LOAD]](s64)
+ ; FAST: [[ANYEXT:%[0-9]+]]:vecr(s128) = G_ANYEXT [[COPY1]](s64)
+ ; FAST: $xmm0 = COPY [[ANYEXT]](s128)
; FAST: RET 0, implicit $xmm0
; GREEDY-LABEL: name: test_load_double
; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi
; GREEDY: [[LOAD:%[0-9]+]]:gpr(s64) = G_LOAD [[COPY]](p0) :: (load 8 from %ir.p1)
- ; GREEDY: $xmm0 = COPY [[LOAD]](s64)
+ ; GREEDY: [[COPY1:%[0-9]+]]:vecr(s64) = COPY [[LOAD]](s64)
+ ; GREEDY: [[ANYEXT:%[0-9]+]]:vecr(s128) = G_ANYEXT [[COPY1]](s64)
+ ; GREEDY: $xmm0 = COPY [[ANYEXT]](s128)
; GREEDY: RET 0, implicit $xmm0
- %0(p0) = COPY $rdi
- %1(s64) = G_LOAD %0(p0) :: (load 8 from %ir.p1)
- $xmm0 = COPY %1(s64)
+ %0:_(p0) = COPY $rdi
+ %1:_(s64) = G_LOAD %0(p0) :: (load 8 from %ir.p1)
+ %2:_(s128) = G_ANYEXT %1(s64)
+ $xmm0 = COPY %2(s128)
RET 0, implicit $xmm0
...
@@ -994,23 +1028,23 @@ body: |
liveins: $rdi, $xmm0
; FAST-LABEL: name: test_store_float
- ; FAST: [[COPY:%[0-9]+]]:vecr(s32) = COPY $xmm0
+ ; FAST: [[COPY:%[0-9]+]]:vecr(s128) = COPY $xmm0
+ ; FAST: [[TRUNC:%[0-9]+]]:vecr(s32) = G_TRUNC [[COPY]](s128)
; FAST: [[COPY1:%[0-9]+]]:gpr(p0) = COPY $rdi
- ; FAST: [[COPY2:%[0-9]+]]:gpr(s32) = COPY [[COPY]](s32)
+ ; FAST: [[COPY2:%[0-9]+]]:gpr(s32) = COPY [[TRUNC]](s32)
; FAST: G_STORE [[COPY2]](s32), [[COPY1]](p0) :: (store 4 into %ir.p1)
; FAST: $rax = COPY [[COPY1]](p0)
; FAST: RET 0, implicit $rax
; GREEDY-LABEL: name: test_store_float
- ; GREEDY: [[COPY:%[0-9]+]]:vecr(s32) = COPY $xmm0
+ ; GREEDY: [[COPY:%[0-9]+]]:vecr(s128) = COPY $xmm0
+ ; GREEDY: [[TRUNC:%[0-9]+]]:vecr(s32) = G_TRUNC [[COPY]](s128)
; GREEDY: [[COPY1:%[0-9]+]]:gpr(p0) = COPY $rdi
- ; GREEDY: G_STORE [[COPY]](s32), [[COPY1]](p0) :: (store 4 into %ir.p1)
+ ; GREEDY: G_STORE [[TRUNC]](s32), [[COPY1]](p0) :: (store 4 into %ir.p1)
; GREEDY: $rax = COPY [[COPY1]](p0)
; GREEDY: RET 0, implicit $rax
- %0(s32) = COPY $xmm0
- %1(p0) = COPY $rdi
-
-
-
+ %2:_(s128) = COPY $xmm0
+ %0:_(s32) = G_TRUNC %2(s128)
+ %1:_(p0) = COPY $rdi
G_STORE %0(s32), %1(p0) :: (store 4 into %ir.p1)
$rax = COPY %1(p0)
RET 0, implicit $rax
@@ -1033,24 +1067,23 @@ body: |
liveins: $rdi, $xmm0
; FAST-LABEL: name: test_store_double
- ; FAST: [[COPY:%[0-9]+]]:vecr(s64) = COPY $xmm0
+ ; FAST: [[COPY:%[0-9]+]]:vecr(s128) = COPY $xmm0
+ ; FAST: [[TRUNC:%[0-9]+]]:vecr(s64) = G_TRUNC [[COPY]](s128)
; FAST: [[COPY1:%[0-9]+]]:gpr(p0) = COPY $rdi
- ; FAST: [[COPY2:%[0-9]+]]:gpr(s64) = COPY [[COPY]](s64)
+ ; FAST: [[COPY2:%[0-9]+]]:gpr(s64) = COPY [[TRUNC]](s64)
; FAST: G_STORE [[COPY2]](s64), [[COPY1]](p0) :: (store 8 into %ir.p1)
; FAST: $rax = COPY [[COPY1]](p0)
; FAST: RET 0, implicit $rax
; GREEDY-LABEL: name: test_store_double
- ; GREEDY: [[COPY:%[0-9]+]]:vecr(s64) = COPY $xmm0
+ ; GREEDY: [[COPY:%[0-9]+]]:vecr(s128) = COPY $xmm0
+ ; GREEDY: [[TRUNC:%[0-9]+]]:vecr(s64) = G_TRUNC [[COPY]](s128)
; GREEDY: [[COPY1:%[0-9]+]]:gpr(p0) = COPY $rdi
- ; GREEDY: G_STORE [[COPY]](s64), [[COPY1]](p0) :: (store 8 into %ir.p1)
+ ; GREEDY: G_STORE [[TRUNC]](s64), [[COPY1]](p0) :: (store 8 into %ir.p1)
; GREEDY: $rax = COPY [[COPY1]](p0)
; GREEDY: RET 0, implicit $rax
- %0(s64) = COPY $xmm0
- %1(p0) = COPY $rdi
-
-
-
-
+ %2:_(s128) = COPY $xmm0
+ %0:_(s64) = G_TRUNC %2(s128)
+ %1:_(p0) = COPY $rdi
G_STORE %0(s64), %1(p0) :: (store 8 into %ir.p1)
$rax = COPY %1(p0)
RET 0, implicit $rax
@@ -1511,7 +1544,7 @@ alignment: 4
legalized: true
regBankSelected: false
registers:
- - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
liveins:
fixedStack:
stack:
@@ -1519,15 +1552,15 @@ constants:
body: |
bb.1 (%ir-block.0):
; FAST-LABEL: name: test_undef3
- ; FAST: [[DEF:%[0-9]+]]:gpr(s32) = G_IMPLICIT_DEF
- ; FAST: $xmm0 = COPY [[DEF]](s32)
+ ; FAST: [[DEF:%[0-9]+]]:vecr(s128) = G_IMPLICIT_DEF
+ ; FAST: $xmm0 = COPY [[DEF]](s128)
; FAST: RET 0, implicit $xmm0
; GREEDY-LABEL: name: test_undef3
- ; GREEDY: [[DEF:%[0-9]+]]:gpr(s32) = G_IMPLICIT_DEF
- ; GREEDY: $xmm0 = COPY [[DEF]](s32)
+ ; GREEDY: [[DEF:%[0-9]+]]:vecr(s128) = G_IMPLICIT_DEF
+ ; GREEDY: $xmm0 = COPY [[DEF]](s128)
; GREEDY: RET 0, implicit $xmm0
- %0(s32) = G_IMPLICIT_DEF
- $xmm0 = COPY %0(s32)
+ %1(s128) = G_IMPLICIT_DEF
+ $xmm0 = COPY %1(s128)
RET 0, implicit $xmm0
...
@@ -1540,6 +1573,8 @@ registers:
- { id: 0, class: _, preferred-register: '' }
- { id: 1, class: _, preferred-register: '' }
- { id: 2, class: _, preferred-register: '' }
+ - { id: 3, class: _, preferred-register: '' }
+ - { id: 4, class: _, preferred-register: '' }
liveins:
fixedStack:
stack:
@@ -1549,23 +1584,29 @@ body: |
liveins: $xmm0
; FAST-LABEL: name: test_undef4
- ; FAST: [[COPY:%[0-9]+]]:vecr(s32) = COPY $xmm0
+ ; FAST: [[COPY:%[0-9]+]]:vecr(s128) = COPY $xmm0
+ ; FAST: [[TRUNC:%[0-9]+]]:vecr(s32) = G_TRUNC [[COPY]](s128)
; FAST: [[DEF:%[0-9]+]]:gpr(s32) = G_IMPLICIT_DEF
; FAST: [[COPY1:%[0-9]+]]:vecr(s32) = COPY [[DEF]](s32)
- ; FAST: [[FADD:%[0-9]+]]:vecr(s32) = G_FADD [[COPY]], [[COPY1]]
- ; FAST: $xmm0 = COPY [[FADD]](s32)
+ ; FAST: [[FADD:%[0-9]+]]:vecr(s32) = G_FADD [[TRUNC]], [[COPY1]]
+ ; FAST: [[ANYEXT:%[0-9]+]]:vecr(s128) = G_ANYEXT [[FADD]](s32)
+ ; FAST: $xmm0 = COPY [[ANYEXT]](s128)
; FAST: RET 0, implicit $xmm0
; GREEDY-LABEL: name: test_undef4
- ; GREEDY: [[COPY:%[0-9]+]]:vecr(s32) = COPY $xmm0
+ ; GREEDY: [[COPY:%[0-9]+]]:vecr(s128) = COPY $xmm0
+ ; GREEDY: [[TRUNC:%[0-9]+]]:vecr(s32) = G_TRUNC [[COPY]](s128)
; GREEDY: [[DEF:%[0-9]+]]:gpr(s32) = G_IMPLICIT_DEF
; GREEDY: [[COPY1:%[0-9]+]]:vecr(s32) = COPY [[DEF]](s32)
- ; GREEDY: [[FADD:%[0-9]+]]:vecr(s32) = G_FADD [[COPY]], [[COPY1]]
- ; GREEDY: $xmm0 = COPY [[FADD]](s32)
+ ; GREEDY: [[FADD:%[0-9]+]]:vecr(s32) = G_FADD [[TRUNC]], [[COPY1]]
+ ; GREEDY: [[ANYEXT:%[0-9]+]]:vecr(s128) = G_ANYEXT [[FADD]](s32)
+ ; GREEDY: $xmm0 = COPY [[ANYEXT]](s128)
; GREEDY: RET 0, implicit $xmm0
- %0(s32) = COPY $xmm0
- %1(s32) = G_IMPLICIT_DEF
- %2(s32) = G_FADD %0, %1
- $xmm0 = COPY %2(s32)
+ %1:_(s128) = COPY $xmm0
+ %0:_(s32) = G_TRUNC %1(s128)
+ %2:_(s32) = G_IMPLICIT_DEF
+ %3:_(s32) = G_FADD %0, %2
+ %4:_(s128) = G_ANYEXT %3(s32)
+ $xmm0 = COPY %4(s128)
RET 0, implicit $xmm0
...
@@ -1665,67 +1706,63 @@ registers:
body: |
; FAST-LABEL: name: test_float
; FAST: bb.0.entry:
- ; FAST: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; FAST: successors: %bb.2(0x40000000), %bb.1(0x40000000)
; FAST: liveins: $edi, $xmm0, $xmm1
; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi
- ; FAST: [[COPY1:%[0-9]+]]:vecr(s32) = COPY $xmm0
- ; FAST: [[COPY2:%[0-9]+]]:vecr(s32) = COPY $xmm1
+ ; FAST: [[COPY1:%[0-9]+]]:vecr(s128) = COPY $xmm0
+ ; FAST: [[TRUNC:%[0-9]+]]:vecr(s32) = G_TRUNC [[COPY1]](s128)
+ ; FAST: [[COPY2:%[0-9]+]]:vecr(s128) = COPY $xmm1
+ ; FAST: [[TRUNC1:%[0-9]+]]:vecr(s32) = G_TRUNC [[COPY2]](s128)
; FAST: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 0
; FAST: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
- ; FAST: G_BRCOND [[ICMP]](s1), %bb.1
- ; FAST: G_BR %bb.2
- ; FAST: bb.1.cond.true:
- ; FAST: successors: %bb.3(0x80000000)
- ; FAST: G_BR %bb.3
- ; FAST: bb.2.cond.false:
- ; FAST: successors: %bb.3(0x80000000)
- ; FAST: bb.3.cond.end:
- ; FAST: [[PHI:%[0-9]+]]:vecr(s32) = G_PHI [[COPY1]](s32), %bb.1, [[COPY2]](s32), %bb.2
- ; FAST: $xmm0 = COPY [[PHI]](s32)
+ ; FAST: G_BRCOND [[ICMP]](s1), %bb.2
+ ; FAST: bb.1.cond.false:
+ ; FAST: successors: %bb.2(0x80000000)
+ ; FAST: bb.2.cond.end:
+ ; FAST: [[PHI:%[0-9]+]]:vecr(s32) = G_PHI [[TRUNC1]](s32), %bb.1, [[TRUNC]](s32), %bb.0
+ ; FAST: [[ANYEXT:%[0-9]+]]:vecr(s128) = G_ANYEXT [[PHI]](s32)
+ ; FAST: $xmm0 = COPY [[ANYEXT]](s128)
; FAST: RET 0, implicit $xmm0
; GREEDY-LABEL: name: test_float
; GREEDY: bb.0.entry:
- ; GREEDY: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; GREEDY: successors: %bb.2(0x40000000), %bb.1(0x40000000)
; GREEDY: liveins: $edi, $xmm0, $xmm1
; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi
- ; GREEDY: [[COPY1:%[0-9]+]]:vecr(s32) = COPY $xmm0
- ; GREEDY: [[COPY2:%[0-9]+]]:vecr(s32) = COPY $xmm1
+ ; GREEDY: [[COPY1:%[0-9]+]]:vecr(s128) = COPY $xmm0
+ ; GREEDY: [[TRUNC:%[0-9]+]]:vecr(s32) = G_TRUNC [[COPY1]](s128)
+ ; GREEDY: [[COPY2:%[0-9]+]]:vecr(s128) = COPY $xmm1
+ ; GREEDY: [[TRUNC1:%[0-9]+]]:vecr(s32) = G_TRUNC [[COPY2]](s128)
; GREEDY: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 0
; GREEDY: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
- ; GREEDY: G_BRCOND [[ICMP]](s1), %bb.1
- ; GREEDY: G_BR %bb.2
- ; GREEDY: bb.1.cond.true:
- ; GREEDY: successors: %bb.3(0x80000000)
- ; GREEDY: G_BR %bb.3
- ; GREEDY: bb.2.cond.false:
- ; GREEDY: successors: %bb.3(0x80000000)
- ; GREEDY: bb.3.cond.end:
- ; GREEDY: [[PHI:%[0-9]+]]:vecr(s32) = G_PHI [[COPY1]](s32), %bb.1, [[COPY2]](s32), %bb.2
- ; GREEDY: $xmm0 = COPY [[PHI]](s32)
+ ; GREEDY: G_BRCOND [[ICMP]](s1), %bb.2
+ ; GREEDY: bb.1.cond.false:
+ ; GREEDY: successors: %bb.2(0x80000000)
+ ; GREEDY: bb.2.cond.end:
+ ; GREEDY: [[PHI:%[0-9]+]]:vecr(s32) = G_PHI [[TRUNC1]](s32), %bb.1, [[TRUNC]](s32), %bb.0
+ ; GREEDY: [[ANYEXT:%[0-9]+]]:vecr(s128) = G_ANYEXT [[PHI]](s32)
+ ; GREEDY: $xmm0 = COPY [[ANYEXT]](s128)
; GREEDY: RET 0, implicit $xmm0
- bb.0.entry:
- successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ bb.1.entry:
+ successors: %bb.3(0x40000000), %bb.2(0x40000000)
liveins: $edi, $xmm0, $xmm1
- %0(s32) = COPY $edi
- %1(s32) = COPY $xmm0
- %2(s32) = COPY $xmm1
- %3(s32) = G_CONSTANT i32 0
- %4(s1) = G_ICMP intpred(sgt), %0(s32), %3
- G_BRCOND %4(s1), %bb.1
- G_BR %bb.2
-
- bb.1.cond.true:
- successors: %bb.3(0x80000000)
-
- G_BR %bb.3
+ %0:_(s32) = COPY $edi
+ %3:_(s128) = COPY $xmm0
+ %1:_(s32) = G_TRUNC %3(s128)
+ %4:_(s128) = COPY $xmm1
+ %2:_(s32) = G_TRUNC %4(s128)
+ %5:_(s32) = G_CONSTANT i32 0
+ %6:_(s1) = G_ICMP intpred(sgt), %0(s32), %5
+ G_BRCOND %6(s1), %bb.3
bb.2.cond.false:
successors: %bb.3(0x80000000)
+
bb.3.cond.end:
- %5(s32) = G_PHI %1(s32), %bb.1, %2(s32), %bb.2
- $xmm0 = COPY %5(s32)
+ %7:_(s32) = G_PHI %2(s32), %bb.2, %1(s32), %bb.1
+ %8:_(s128) = G_ANYEXT %7(s32)
+ $xmm0 = COPY %8(s128)
RET 0, implicit $xmm0
...
@@ -1742,18 +1779,24 @@ body: |
liveins: $xmm0
; FAST-LABEL: name: test_fpext
- ; FAST: [[COPY:%[0-9]+]]:vecr(s32) = COPY $xmm0
- ; FAST: [[FPEXT:%[0-9]+]]:vecr(s64) = G_FPEXT [[COPY]](s32)
- ; FAST: $xmm0 = COPY [[FPEXT]](s64)
+ ; FAST: [[COPY:%[0-9]+]]:vecr(s128) = COPY $xmm0
+ ; FAST: [[TRUNC:%[0-9]+]]:vecr(s32) = G_TRUNC [[COPY]](s128)
+ ; FAST: [[FPEXT:%[0-9]+]]:vecr(s64) = G_FPEXT [[TRUNC]](s32)
+ ; FAST: [[ANYEXT:%[0-9]+]]:vecr(s128) = G_ANYEXT [[FPEXT]](s64)
+ ; FAST: $xmm0 = COPY [[ANYEXT]](s128)
; FAST: RET 0, implicit $xmm0
; GREEDY-LABEL: name: test_fpext
- ; GREEDY: [[COPY:%[0-9]+]]:vecr(s32) = COPY $xmm0
- ; GREEDY: [[FPEXT:%[0-9]+]]:vecr(s64) = G_FPEXT [[COPY]](s32)
- ; GREEDY: $xmm0 = COPY [[FPEXT]](s64)
+ ; GREEDY: [[COPY:%[0-9]+]]:vecr(s128) = COPY $xmm0
+ ; GREEDY: [[TRUNC:%[0-9]+]]:vecr(s32) = G_TRUNC [[COPY]](s128)
+ ; GREEDY: [[FPEXT:%[0-9]+]]:vecr(s64) = G_FPEXT [[TRUNC]](s32)
+ ; GREEDY: [[ANYEXT:%[0-9]+]]:vecr(s128) = G_ANYEXT [[FPEXT]](s64)
+ ; GREEDY: $xmm0 = COPY [[ANYEXT]](s128)
; GREEDY: RET 0, implicit $xmm0
- %0(s32) = COPY $xmm0
- %1(s64) = G_FPEXT %0(s32)
- $xmm0 = COPY %1(s64)
+ %1:_(s128) = COPY $xmm0
+ %0:_(s32) = G_TRUNC %1(s128)
+ %2:_(s64) = G_FPEXT %0(s32)
+ %3:_(s128) = G_ANYEXT %2(s64)
+ $xmm0 = COPY %3(s128)
RET 0, implicit $xmm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-fadd-scalar.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-fadd-scalar.mir?rev=324665&r1=324664&r2=324665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-fadd-scalar.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-fadd-scalar.mir Thu Feb 8 14:41:47 2018
@@ -26,6 +26,9 @@ registers:
- { id: 0, class: vecr, preferred-register: '' }
- { id: 1, class: vecr, preferred-register: '' }
- { id: 2, class: vecr, preferred-register: '' }
+ - { id: 3, class: vecr, preferred-register: '' }
+ - { id: 4, class: vecr, preferred-register: '' }
+ - { id: 5, class: vecr, preferred-register: '' }
liveins:
fixedStack:
stack:
@@ -37,33 +40,48 @@ body: |
liveins: $xmm0, $xmm1
; SSE-LABEL: name: test_fadd_float
- ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
- ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY $xmm1
- ; SSE: [[ADDSSrr:%[0-9]+]]:fr32 = ADDSSrr [[COPY]], [[COPY1]]
- ; SSE: $xmm0 = COPY [[ADDSSrr]]
+ ; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
+ ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
+ ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
+ ; SSE: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
+ ; SSE: [[ADDSSrr:%[0-9]+]]:fr32 = ADDSSrr [[COPY1]], [[COPY3]]
+ ; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY [[ADDSSrr]]
+ ; SSE: $xmm0 = COPY [[COPY4]]
; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_fadd_float
- ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
- ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY $xmm1
- ; AVX: [[VADDSSrr:%[0-9]+]]:fr32 = VADDSSrr [[COPY]], [[COPY1]]
- ; AVX: $xmm0 = COPY [[VADDSSrr]]
+ ; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
+ ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
+ ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
+ ; AVX: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
+ ; AVX: [[VADDSSrr:%[0-9]+]]:fr32 = VADDSSrr [[COPY1]], [[COPY3]]
+ ; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY [[VADDSSrr]]
+ ; AVX: $xmm0 = COPY [[COPY4]]
; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_fadd_float
- ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0
- ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY $xmm1
- ; AVX512F: [[VADDSSZrr:%[0-9]+]]:fr32x = VADDSSZrr [[COPY]], [[COPY1]]
- ; AVX512F: $xmm0 = COPY [[VADDSSZrr]]
+ ; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
+ ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
+ ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
+ ; AVX512F: [[COPY3:%[0-9]+]]:fr32x = COPY [[COPY2]]
+ ; AVX512F: [[VADDSSZrr:%[0-9]+]]:fr32x = VADDSSZrr [[COPY1]], [[COPY3]]
+ ; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY [[VADDSSZrr]]
+ ; AVX512F: $xmm0 = COPY [[COPY4]]
; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_fadd_float
- ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0
- ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY $xmm1
- ; AVX512VL: [[VADDSSZrr:%[0-9]+]]:fr32x = VADDSSZrr [[COPY]], [[COPY1]]
- ; AVX512VL: $xmm0 = COPY [[VADDSSZrr]]
+ ; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
+ ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
+ ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
+ ; AVX512VL: [[COPY3:%[0-9]+]]:fr32x = COPY [[COPY2]]
+ ; AVX512VL: [[VADDSSZrr:%[0-9]+]]:fr32x = VADDSSZrr [[COPY1]], [[COPY3]]
+ ; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY [[VADDSSZrr]]
+ ; AVX512VL: $xmm0 = COPY [[COPY4]]
; AVX512VL: RET 0, implicit $xmm0
- %0(s32) = COPY $xmm0
- %1(s32) = COPY $xmm1
- %2(s32) = G_FADD %0, %1
- $xmm0 = COPY %2(s32)
+ %2:vecr(s128) = COPY $xmm0
+ %0:vecr(s32) = G_TRUNC %2(s128)
+ %3:vecr(s128) = COPY $xmm1
+ %1:vecr(s32) = G_TRUNC %3(s128)
+ %4:vecr(s32) = G_FADD %0, %1
+ %5:vecr(s128) = G_ANYEXT %4(s32)
+ $xmm0 = COPY %5(s128)
RET 0, implicit $xmm0
...
@@ -77,6 +95,9 @@ registers:
- { id: 0, class: vecr, preferred-register: '' }
- { id: 1, class: vecr, preferred-register: '' }
- { id: 2, class: vecr, preferred-register: '' }
+ - { id: 3, class: vecr, preferred-register: '' }
+ - { id: 4, class: vecr, preferred-register: '' }
+ - { id: 5, class: vecr, preferred-register: '' }
liveins:
fixedStack:
stack:
@@ -88,33 +109,48 @@ body: |
liveins: $xmm0, $xmm1
; SSE-LABEL: name: test_fadd_double
- ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0
- ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY $xmm1
- ; SSE: [[ADDSDrr:%[0-9]+]]:fr64 = ADDSDrr [[COPY]], [[COPY1]]
- ; SSE: $xmm0 = COPY [[ADDSDrr]]
+ ; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
+ ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
+ ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
+ ; SSE: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
+ ; SSE: [[ADDSDrr:%[0-9]+]]:fr64 = ADDSDrr [[COPY1]], [[COPY3]]
+ ; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY [[ADDSDrr]]
+ ; SSE: $xmm0 = COPY [[COPY4]]
; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_fadd_double
- ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0
- ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY $xmm1
- ; AVX: [[VADDSDrr:%[0-9]+]]:fr64 = VADDSDrr [[COPY]], [[COPY1]]
- ; AVX: $xmm0 = COPY [[VADDSDrr]]
+ ; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
+ ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
+ ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
+ ; AVX: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
+ ; AVX: [[VADDSDrr:%[0-9]+]]:fr64 = VADDSDrr [[COPY1]], [[COPY3]]
+ ; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY [[VADDSDrr]]
+ ; AVX: $xmm0 = COPY [[COPY4]]
; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_fadd_double
- ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0
- ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY $xmm1
- ; AVX512F: [[VADDSDZrr:%[0-9]+]]:fr64x = VADDSDZrr [[COPY]], [[COPY1]]
- ; AVX512F: $xmm0 = COPY [[VADDSDZrr]]
+ ; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
+ ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
+ ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
+ ; AVX512F: [[COPY3:%[0-9]+]]:fr64x = COPY [[COPY2]]
+ ; AVX512F: [[VADDSDZrr:%[0-9]+]]:fr64x = VADDSDZrr [[COPY1]], [[COPY3]]
+ ; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY [[VADDSDZrr]]
+ ; AVX512F: $xmm0 = COPY [[COPY4]]
; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_fadd_double
- ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0
- ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY $xmm1
- ; AVX512VL: [[VADDSDZrr:%[0-9]+]]:fr64x = VADDSDZrr [[COPY]], [[COPY1]]
- ; AVX512VL: $xmm0 = COPY [[VADDSDZrr]]
+ ; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
+ ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
+ ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
+ ; AVX512VL: [[COPY3:%[0-9]+]]:fr64x = COPY [[COPY2]]
+ ; AVX512VL: [[VADDSDZrr:%[0-9]+]]:fr64x = VADDSDZrr [[COPY1]], [[COPY3]]
+ ; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY [[VADDSDZrr]]
+ ; AVX512VL: $xmm0 = COPY [[COPY4]]
; AVX512VL: RET 0, implicit $xmm0
- %0(s64) = COPY $xmm0
- %1(s64) = COPY $xmm1
- %2(s64) = G_FADD %0, %1
- $xmm0 = COPY %2(s64)
+ %2:vecr(s128) = COPY $xmm0
+ %0:vecr(s64) = G_TRUNC %2(s128)
+ %3:vecr(s128) = COPY $xmm1
+ %1:vecr(s64) = G_TRUNC %3(s128)
+ %4:vecr(s64) = G_FADD %0, %1
+ %5:vecr(s128) = G_ANYEXT %4(s64)
+ $xmm0 = COPY %5(s128)
RET 0, implicit $xmm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-fconstant.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-fconstant.mir?rev=324665&r1=324664&r2=324665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-fconstant.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-fconstant.mir Thu Feb 8 14:41:47 2018
@@ -30,27 +30,33 @@ body: |
bb.1.entry:
; CHECK_NOPIC64-LABEL: name: test_float
; CHECK_NOPIC64: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm $rip, 1, $noreg, %const.0, $noreg
- ; CHECK_NOPIC64: $xmm0 = COPY [[MOVSSrm]]
+ ; CHECK_NOPIC64: [[COPY:%[0-9]+]]:vr128 = COPY [[MOVSSrm]]
+ ; CHECK_NOPIC64: $xmm0 = COPY [[COPY]]
; CHECK_NOPIC64: RET 0, implicit $xmm0
; CHECK_LARGE64-LABEL: name: test_float
; CHECK_LARGE64: [[MOV64ri:%[0-9]+]]:gr64 = MOV64ri %const.0
; CHECK_LARGE64: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm [[MOV64ri]], 1, $noreg, 0, $noreg :: (load 8 from constant-pool, align 32)
- ; CHECK_LARGE64: $xmm0 = COPY [[MOVSSrm]]
+ ; CHECK_LARGE64: [[COPY:%[0-9]+]]:vr128 = COPY [[MOVSSrm]]
+ ; CHECK_LARGE64: $xmm0 = COPY [[COPY]]
; CHECK_LARGE64: RET 0, implicit $xmm0
; CHECK_SMALL32-LABEL: name: test_float
; CHECK_SMALL32: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm $noreg, 1, $noreg, %const.0, $noreg
- ; CHECK_SMALL32: $xmm0 = COPY [[MOVSSrm]]
+ ; CHECK_SMALL32: [[COPY:%[0-9]+]]:vr128 = COPY [[MOVSSrm]]
+ ; CHECK_SMALL32: $xmm0 = COPY [[COPY]]
; CHECK_SMALL32: RET 0, implicit $xmm0
; CHECK_LARGE32-LABEL: name: test_float
; CHECK_LARGE32: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm $noreg, 1, $noreg, %const.0, $noreg
- ; CHECK_LARGE32: $xmm0 = COPY [[MOVSSrm]]
+ ; CHECK_LARGE32: [[COPY:%[0-9]+]]:vr128 = COPY [[MOVSSrm]]
+ ; CHECK_LARGE32: $xmm0 = COPY [[COPY]]
; CHECK_LARGE32: RET 0, implicit $xmm0
; CHECK_PIC64-LABEL: name: test_float
; CHECK_PIC64: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm $rip, 1, $noreg, %const.0, $noreg
- ; CHECK_PIC64: $xmm0 = COPY [[MOVSSrm]]
+ ; CHECK_PIC64: [[COPY:%[0-9]+]]:vr128 = COPY [[MOVSSrm]]
+ ; CHECK_PIC64: $xmm0 = COPY [[COPY]]
; CHECK_PIC64: RET 0, implicit $xmm0
- %0(s32) = G_FCONSTANT float 5.500000e+00
- $xmm0 = COPY %0(s32)
+ %0:vecr(s32) = G_FCONSTANT float 5.500000e+00
+ %1:vecr(s128) = G_ANYEXT %0(s32)
+ $xmm0 = COPY %1(s128)
RET 0, implicit $xmm0
...
@@ -71,27 +77,33 @@ body: |
bb.1.entry:
; CHECK_NOPIC64-LABEL: name: test_double
; CHECK_NOPIC64: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm $rip, 1, $noreg, %const.0, $noreg
- ; CHECK_NOPIC64: $xmm0 = COPY [[MOVSDrm]]
+ ; CHECK_NOPIC64: [[COPY:%[0-9]+]]:vr128 = COPY [[MOVSDrm]]
+ ; CHECK_NOPIC64: $xmm0 = COPY [[COPY]]
; CHECK_NOPIC64: RET 0, implicit $xmm0
; CHECK_LARGE64-LABEL: name: test_double
; CHECK_LARGE64: [[MOV64ri:%[0-9]+]]:gr64 = MOV64ri %const.0
; CHECK_LARGE64: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm [[MOV64ri]], 1, $noreg, 0, $noreg :: (load 8 from constant-pool, align 64)
- ; CHECK_LARGE64: $xmm0 = COPY [[MOVSDrm]]
+ ; CHECK_LARGE64: [[COPY:%[0-9]+]]:vr128 = COPY [[MOVSDrm]]
+ ; CHECK_LARGE64: $xmm0 = COPY [[COPY]]
; CHECK_LARGE64: RET 0, implicit $xmm0
; CHECK_SMALL32-LABEL: name: test_double
; CHECK_SMALL32: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm $noreg, 1, $noreg, %const.0, $noreg
- ; CHECK_SMALL32: $xmm0 = COPY [[MOVSDrm]]
+ ; CHECK_SMALL32: [[COPY:%[0-9]+]]:vr128 = COPY [[MOVSDrm]]
+ ; CHECK_SMALL32: $xmm0 = COPY [[COPY]]
; CHECK_SMALL32: RET 0, implicit $xmm0
; CHECK_LARGE32-LABEL: name: test_double
; CHECK_LARGE32: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm $noreg, 1, $noreg, %const.0, $noreg
- ; CHECK_LARGE32: $xmm0 = COPY [[MOVSDrm]]
+ ; CHECK_LARGE32: [[COPY:%[0-9]+]]:vr128 = COPY [[MOVSDrm]]
+ ; CHECK_LARGE32: $xmm0 = COPY [[COPY]]
; CHECK_LARGE32: RET 0, implicit $xmm0
; CHECK_PIC64-LABEL: name: test_double
; CHECK_PIC64: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm $rip, 1, $noreg, %const.0, $noreg
- ; CHECK_PIC64: $xmm0 = COPY [[MOVSDrm]]
+ ; CHECK_PIC64: [[COPY:%[0-9]+]]:vr128 = COPY [[MOVSDrm]]
+ ; CHECK_PIC64: $xmm0 = COPY [[COPY]]
; CHECK_PIC64: RET 0, implicit $xmm0
- %0(s64) = G_FCONSTANT double 5.500000e+00
- $xmm0 = COPY %0(s64)
+ %0:vecr(s64) = G_FCONSTANT double 5.500000e+00
+ %1:vecr(s128) = G_ANYEXT %0(s64)
+ $xmm0 = COPY %1(s128)
RET 0, implicit $xmm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-fdiv-scalar.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-fdiv-scalar.mir?rev=324665&r1=324664&r2=324665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-fdiv-scalar.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-fdiv-scalar.mir Thu Feb 8 14:41:47 2018
@@ -26,6 +26,9 @@ registers:
- { id: 0, class: vecr, preferred-register: '' }
- { id: 1, class: vecr, preferred-register: '' }
- { id: 2, class: vecr, preferred-register: '' }
+ - { id: 3, class: vecr, preferred-register: '' }
+ - { id: 4, class: vecr, preferred-register: '' }
+ - { id: 5, class: vecr, preferred-register: '' }
liveins:
fixedStack:
stack:
@@ -37,33 +40,48 @@ body: |
liveins: $xmm0, $xmm1
; SSE-LABEL: name: test_fdiv_float
- ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
- ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY $xmm1
- ; SSE: [[DIVSSrr:%[0-9]+]]:fr32 = DIVSSrr [[COPY]], [[COPY1]]
- ; SSE: $xmm0 = COPY [[DIVSSrr]]
+ ; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
+ ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
+ ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
+ ; SSE: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
+ ; SSE: [[DIVSSrr:%[0-9]+]]:fr32 = DIVSSrr [[COPY1]], [[COPY3]]
+ ; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY [[DIVSSrr]]
+ ; SSE: $xmm0 = COPY [[COPY4]]
; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_fdiv_float
- ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
- ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY $xmm1
- ; AVX: [[VDIVSSrr:%[0-9]+]]:fr32 = VDIVSSrr [[COPY]], [[COPY1]]
- ; AVX: $xmm0 = COPY [[VDIVSSrr]]
+ ; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
+ ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
+ ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
+ ; AVX: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
+ ; AVX: [[VDIVSSrr:%[0-9]+]]:fr32 = VDIVSSrr [[COPY1]], [[COPY3]]
+ ; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY [[VDIVSSrr]]
+ ; AVX: $xmm0 = COPY [[COPY4]]
; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_fdiv_float
- ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0
- ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY $xmm1
- ; AVX512F: [[VDIVSSZrr:%[0-9]+]]:fr32x = VDIVSSZrr [[COPY]], [[COPY1]]
- ; AVX512F: $xmm0 = COPY [[VDIVSSZrr]]
+ ; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
+ ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
+ ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
+ ; AVX512F: [[COPY3:%[0-9]+]]:fr32x = COPY [[COPY2]]
+ ; AVX512F: [[VDIVSSZrr:%[0-9]+]]:fr32x = VDIVSSZrr [[COPY1]], [[COPY3]]
+ ; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY [[VDIVSSZrr]]
+ ; AVX512F: $xmm0 = COPY [[COPY4]]
; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_fdiv_float
- ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0
- ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY $xmm1
- ; AVX512VL: [[VDIVSSZrr:%[0-9]+]]:fr32x = VDIVSSZrr [[COPY]], [[COPY1]]
- ; AVX512VL: $xmm0 = COPY [[VDIVSSZrr]]
+ ; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
+ ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
+ ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
+ ; AVX512VL: [[COPY3:%[0-9]+]]:fr32x = COPY [[COPY2]]
+ ; AVX512VL: [[VDIVSSZrr:%[0-9]+]]:fr32x = VDIVSSZrr [[COPY1]], [[COPY3]]
+ ; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY [[VDIVSSZrr]]
+ ; AVX512VL: $xmm0 = COPY [[COPY4]]
; AVX512VL: RET 0, implicit $xmm0
- %0(s32) = COPY $xmm0
- %1(s32) = COPY $xmm1
- %2(s32) = G_FDIV %0, %1
- $xmm0 = COPY %2(s32)
+ %2:vecr(s128) = COPY $xmm0
+ %0:vecr(s32) = G_TRUNC %2(s128)
+ %3:vecr(s128) = COPY $xmm1
+ %1:vecr(s32) = G_TRUNC %3(s128)
+ %4:vecr(s32) = G_FDIV %0, %1
+ %5:vecr(s128) = G_ANYEXT %4(s32)
+ $xmm0 = COPY %5(s128)
RET 0, implicit $xmm0
...
@@ -77,6 +95,9 @@ registers:
- { id: 0, class: vecr, preferred-register: '' }
- { id: 1, class: vecr, preferred-register: '' }
- { id: 2, class: vecr, preferred-register: '' }
+ - { id: 3, class: vecr, preferred-register: '' }
+ - { id: 4, class: vecr, preferred-register: '' }
+ - { id: 5, class: vecr, preferred-register: '' }
liveins:
fixedStack:
stack:
@@ -88,33 +109,48 @@ body: |
liveins: $xmm0, $xmm1
; SSE-LABEL: name: test_fdiv_double
- ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0
- ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY $xmm1
- ; SSE: [[DIVSDrr:%[0-9]+]]:fr64 = DIVSDrr [[COPY]], [[COPY1]]
- ; SSE: $xmm0 = COPY [[DIVSDrr]]
+ ; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
+ ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
+ ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
+ ; SSE: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
+ ; SSE: [[DIVSDrr:%[0-9]+]]:fr64 = DIVSDrr [[COPY1]], [[COPY3]]
+ ; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY [[DIVSDrr]]
+ ; SSE: $xmm0 = COPY [[COPY4]]
; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_fdiv_double
- ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0
- ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY $xmm1
- ; AVX: [[VDIVSDrr:%[0-9]+]]:fr64 = VDIVSDrr [[COPY]], [[COPY1]]
- ; AVX: $xmm0 = COPY [[VDIVSDrr]]
+ ; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
+ ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
+ ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
+ ; AVX: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
+ ; AVX: [[VDIVSDrr:%[0-9]+]]:fr64 = VDIVSDrr [[COPY1]], [[COPY3]]
+ ; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY [[VDIVSDrr]]
+ ; AVX: $xmm0 = COPY [[COPY4]]
; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_fdiv_double
- ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0
- ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY $xmm1
- ; AVX512F: [[VDIVSDZrr:%[0-9]+]]:fr64x = VDIVSDZrr [[COPY]], [[COPY1]]
- ; AVX512F: $xmm0 = COPY [[VDIVSDZrr]]
+ ; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
+ ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
+ ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
+ ; AVX512F: [[COPY3:%[0-9]+]]:fr64x = COPY [[COPY2]]
+ ; AVX512F: [[VDIVSDZrr:%[0-9]+]]:fr64x = VDIVSDZrr [[COPY1]], [[COPY3]]
+ ; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY [[VDIVSDZrr]]
+ ; AVX512F: $xmm0 = COPY [[COPY4]]
; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_fdiv_double
- ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0
- ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY $xmm1
- ; AVX512VL: [[VDIVSDZrr:%[0-9]+]]:fr64x = VDIVSDZrr [[COPY]], [[COPY1]]
- ; AVX512VL: $xmm0 = COPY [[VDIVSDZrr]]
+ ; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
+ ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
+ ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
+ ; AVX512VL: [[COPY3:%[0-9]+]]:fr64x = COPY [[COPY2]]
+ ; AVX512VL: [[VDIVSDZrr:%[0-9]+]]:fr64x = VDIVSDZrr [[COPY1]], [[COPY3]]
+ ; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY [[VDIVSDZrr]]
+ ; AVX512VL: $xmm0 = COPY [[COPY4]]
; AVX512VL: RET 0, implicit $xmm0
- %0(s64) = COPY $xmm0
- %1(s64) = COPY $xmm1
- %2(s64) = G_FDIV %0, %1
- $xmm0 = COPY %2(s64)
+ %2:vecr(s128) = COPY $xmm0
+ %0:vecr(s64) = G_TRUNC %2(s128)
+ %3:vecr(s128) = COPY $xmm1
+ %1:vecr(s64) = G_TRUNC %3(s128)
+ %4:vecr(s64) = G_FDIV %0, %1
+ %5:vecr(s128) = G_ANYEXT %4(s64)
+ $xmm0 = COPY %5(s128)
RET 0, implicit $xmm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-fmul-scalar.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-fmul-scalar.mir?rev=324665&r1=324664&r2=324665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-fmul-scalar.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-fmul-scalar.mir Thu Feb 8 14:41:47 2018
@@ -26,6 +26,9 @@ registers:
- { id: 0, class: vecr, preferred-register: '' }
- { id: 1, class: vecr, preferred-register: '' }
- { id: 2, class: vecr, preferred-register: '' }
+ - { id: 3, class: vecr, preferred-register: '' }
+ - { id: 4, class: vecr, preferred-register: '' }
+ - { id: 5, class: vecr, preferred-register: '' }
liveins:
fixedStack:
stack:
@@ -37,33 +40,48 @@ body: |
liveins: $xmm0, $xmm1
; SSE-LABEL: name: test_fmul_float
- ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
- ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY $xmm1
- ; SSE: [[MULSSrr:%[0-9]+]]:fr32 = MULSSrr [[COPY]], [[COPY1]]
- ; SSE: $xmm0 = COPY [[MULSSrr]]
+ ; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
+ ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
+ ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
+ ; SSE: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
+ ; SSE: [[MULSSrr:%[0-9]+]]:fr32 = MULSSrr [[COPY1]], [[COPY3]]
+ ; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY [[MULSSrr]]
+ ; SSE: $xmm0 = COPY [[COPY4]]
; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_fmul_float
- ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
- ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY $xmm1
- ; AVX: [[VMULSSrr:%[0-9]+]]:fr32 = VMULSSrr [[COPY]], [[COPY1]]
- ; AVX: $xmm0 = COPY [[VMULSSrr]]
+ ; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
+ ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
+ ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
+ ; AVX: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
+ ; AVX: [[VMULSSrr:%[0-9]+]]:fr32 = VMULSSrr [[COPY1]], [[COPY3]]
+ ; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY [[VMULSSrr]]
+ ; AVX: $xmm0 = COPY [[COPY4]]
; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_fmul_float
- ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0
- ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY $xmm1
- ; AVX512F: [[VMULSSZrr:%[0-9]+]]:fr32x = VMULSSZrr [[COPY]], [[COPY1]]
- ; AVX512F: $xmm0 = COPY [[VMULSSZrr]]
+ ; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
+ ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
+ ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
+ ; AVX512F: [[COPY3:%[0-9]+]]:fr32x = COPY [[COPY2]]
+ ; AVX512F: [[VMULSSZrr:%[0-9]+]]:fr32x = VMULSSZrr [[COPY1]], [[COPY3]]
+ ; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY [[VMULSSZrr]]
+ ; AVX512F: $xmm0 = COPY [[COPY4]]
; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_fmul_float
- ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0
- ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY $xmm1
- ; AVX512VL: [[VMULSSZrr:%[0-9]+]]:fr32x = VMULSSZrr [[COPY]], [[COPY1]]
- ; AVX512VL: $xmm0 = COPY [[VMULSSZrr]]
+ ; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
+ ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
+ ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
+ ; AVX512VL: [[COPY3:%[0-9]+]]:fr32x = COPY [[COPY2]]
+ ; AVX512VL: [[VMULSSZrr:%[0-9]+]]:fr32x = VMULSSZrr [[COPY1]], [[COPY3]]
+ ; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY [[VMULSSZrr]]
+ ; AVX512VL: $xmm0 = COPY [[COPY4]]
; AVX512VL: RET 0, implicit $xmm0
- %0(s32) = COPY $xmm0
- %1(s32) = COPY $xmm1
- %2(s32) = G_FMUL %0, %1
- $xmm0 = COPY %2(s32)
+ %2:vecr(s128) = COPY $xmm0
+ %0:vecr(s32) = G_TRUNC %2(s128)
+ %3:vecr(s128) = COPY $xmm1
+ %1:vecr(s32) = G_TRUNC %3(s128)
+ %4:vecr(s32) = G_FMUL %0, %1
+ %5:vecr(s128) = G_ANYEXT %4(s32)
+ $xmm0 = COPY %5(s128)
RET 0, implicit $xmm0
...
@@ -77,6 +95,9 @@ registers:
- { id: 0, class: vecr, preferred-register: '' }
- { id: 1, class: vecr, preferred-register: '' }
- { id: 2, class: vecr, preferred-register: '' }
+ - { id: 3, class: vecr, preferred-register: '' }
+ - { id: 4, class: vecr, preferred-register: '' }
+ - { id: 5, class: vecr, preferred-register: '' }
liveins:
fixedStack:
stack:
@@ -88,33 +109,48 @@ body: |
liveins: $xmm0, $xmm1
; SSE-LABEL: name: test_fmul_double
- ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0
- ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY $xmm1
- ; SSE: [[MULSDrr:%[0-9]+]]:fr64 = MULSDrr [[COPY]], [[COPY1]]
- ; SSE: $xmm0 = COPY [[MULSDrr]]
+ ; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
+ ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
+ ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
+ ; SSE: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
+ ; SSE: [[MULSDrr:%[0-9]+]]:fr64 = MULSDrr [[COPY1]], [[COPY3]]
+ ; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY [[MULSDrr]]
+ ; SSE: $xmm0 = COPY [[COPY4]]
; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_fmul_double
- ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0
- ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY $xmm1
- ; AVX: [[VMULSDrr:%[0-9]+]]:fr64 = VMULSDrr [[COPY]], [[COPY1]]
- ; AVX: $xmm0 = COPY [[VMULSDrr]]
+ ; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
+ ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
+ ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
+ ; AVX: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
+ ; AVX: [[VMULSDrr:%[0-9]+]]:fr64 = VMULSDrr [[COPY1]], [[COPY3]]
+ ; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY [[VMULSDrr]]
+ ; AVX: $xmm0 = COPY [[COPY4]]
; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_fmul_double
- ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0
- ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY $xmm1
- ; AVX512F: [[VMULSDZrr:%[0-9]+]]:fr64x = VMULSDZrr [[COPY]], [[COPY1]]
- ; AVX512F: $xmm0 = COPY [[VMULSDZrr]]
+ ; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
+ ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
+ ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
+ ; AVX512F: [[COPY3:%[0-9]+]]:fr64x = COPY [[COPY2]]
+ ; AVX512F: [[VMULSDZrr:%[0-9]+]]:fr64x = VMULSDZrr [[COPY1]], [[COPY3]]
+ ; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY [[VMULSDZrr]]
+ ; AVX512F: $xmm0 = COPY [[COPY4]]
; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_fmul_double
- ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0
- ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY $xmm1
- ; AVX512VL: [[VMULSDZrr:%[0-9]+]]:fr64x = VMULSDZrr [[COPY]], [[COPY1]]
- ; AVX512VL: $xmm0 = COPY [[VMULSDZrr]]
+ ; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
+ ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
+ ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
+ ; AVX512VL: [[COPY3:%[0-9]+]]:fr64x = COPY [[COPY2]]
+ ; AVX512VL: [[VMULSDZrr:%[0-9]+]]:fr64x = VMULSDZrr [[COPY1]], [[COPY3]]
+ ; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY [[VMULSDZrr]]
+ ; AVX512VL: $xmm0 = COPY [[COPY4]]
; AVX512VL: RET 0, implicit $xmm0
- %0(s64) = COPY $xmm0
- %1(s64) = COPY $xmm1
- %2(s64) = G_FMUL %0, %1
- $xmm0 = COPY %2(s64)
+ %2:vecr(s128) = COPY $xmm0
+ %0:vecr(s64) = G_TRUNC %2(s128)
+ %3:vecr(s128) = COPY $xmm1
+ %1:vecr(s64) = G_TRUNC %3(s128)
+ %4:vecr(s64) = G_FMUL %0, %1
+ %5:vecr(s128) = G_ANYEXT %4(s64)
+ $xmm0 = COPY %5(s128)
RET 0, implicit $xmm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-fpext-scalar.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-fpext-scalar.mir?rev=324665&r1=324664&r2=324665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-fpext-scalar.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-fpext-scalar.mir Thu Feb 8 14:41:47 2018
@@ -17,6 +17,8 @@ regBankSelected: true
registers:
- { id: 0, class: vecr, preferred-register: '' }
- { id: 1, class: vecr, preferred-register: '' }
+ - { id: 2, class: vecr, preferred-register: '' }
+ - { id: 3, class: vecr, preferred-register: '' }
liveins:
fixedStack:
stack:
@@ -26,13 +28,17 @@ body: |
liveins: $xmm0
; ALL-LABEL: name: test
- ; ALL: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
- ; ALL: [[CVTSS2SDrr:%[0-9]+]]:fr64 = CVTSS2SDrr [[COPY]]
- ; ALL: $xmm0 = COPY [[CVTSS2SDrr]]
+ ; ALL: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
+ ; ALL: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
+ ; ALL: [[CVTSS2SDrr:%[0-9]+]]:fr64 = CVTSS2SDrr [[COPY1]]
+ ; ALL: [[COPY2:%[0-9]+]]:vr128 = COPY [[CVTSS2SDrr]]
+ ; ALL: $xmm0 = COPY [[COPY2]]
; ALL: RET 0, implicit $xmm0
- %0(s32) = COPY $xmm0
- %1(s64) = G_FPEXT %0(s32)
- $xmm0 = COPY %1(s64)
+ %1:vecr(s128) = COPY $xmm0
+ %0:vecr(s32) = G_TRUNC %1(s128)
+ %2:vecr(s64) = G_FPEXT %0(s32)
+ %3:vecr(s128) = G_ANYEXT %2(s64)
+ $xmm0 = COPY %3(s128)
RET 0, implicit $xmm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-fsub-scalar.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-fsub-scalar.mir?rev=324665&r1=324664&r2=324665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-fsub-scalar.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-fsub-scalar.mir Thu Feb 8 14:41:47 2018
@@ -26,6 +26,9 @@ registers:
- { id: 0, class: vecr, preferred-register: '' }
- { id: 1, class: vecr, preferred-register: '' }
- { id: 2, class: vecr, preferred-register: '' }
+ - { id: 3, class: vecr, preferred-register: '' }
+ - { id: 4, class: vecr, preferred-register: '' }
+ - { id: 5, class: vecr, preferred-register: '' }
liveins:
fixedStack:
stack:
@@ -37,33 +40,48 @@ body: |
liveins: $xmm0, $xmm1
; SSE-LABEL: name: test_fsub_float
- ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
- ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY $xmm1
- ; SSE: [[SUBSSrr:%[0-9]+]]:fr32 = SUBSSrr [[COPY]], [[COPY1]]
- ; SSE: $xmm0 = COPY [[SUBSSrr]]
+ ; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
+ ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
+ ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
+ ; SSE: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
+ ; SSE: [[SUBSSrr:%[0-9]+]]:fr32 = SUBSSrr [[COPY1]], [[COPY3]]
+ ; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY [[SUBSSrr]]
+ ; SSE: $xmm0 = COPY [[COPY4]]
; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_fsub_float
- ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
- ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY $xmm1
- ; AVX: [[VSUBSSrr:%[0-9]+]]:fr32 = VSUBSSrr [[COPY]], [[COPY1]]
- ; AVX: $xmm0 = COPY [[VSUBSSrr]]
+ ; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
+ ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
+ ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
+ ; AVX: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
+ ; AVX: [[VSUBSSrr:%[0-9]+]]:fr32 = VSUBSSrr [[COPY1]], [[COPY3]]
+ ; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY [[VSUBSSrr]]
+ ; AVX: $xmm0 = COPY [[COPY4]]
; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_fsub_float
- ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0
- ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY $xmm1
- ; AVX512F: [[VSUBSSZrr:%[0-9]+]]:fr32x = VSUBSSZrr [[COPY]], [[COPY1]]
- ; AVX512F: $xmm0 = COPY [[VSUBSSZrr]]
+ ; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
+ ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
+ ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
+ ; AVX512F: [[COPY3:%[0-9]+]]:fr32x = COPY [[COPY2]]
+ ; AVX512F: [[VSUBSSZrr:%[0-9]+]]:fr32x = VSUBSSZrr [[COPY1]], [[COPY3]]
+ ; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY [[VSUBSSZrr]]
+ ; AVX512F: $xmm0 = COPY [[COPY4]]
; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_fsub_float
- ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0
- ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY $xmm1
- ; AVX512VL: [[VSUBSSZrr:%[0-9]+]]:fr32x = VSUBSSZrr [[COPY]], [[COPY1]]
- ; AVX512VL: $xmm0 = COPY [[VSUBSSZrr]]
+ ; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
+ ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
+ ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
+ ; AVX512VL: [[COPY3:%[0-9]+]]:fr32x = COPY [[COPY2]]
+ ; AVX512VL: [[VSUBSSZrr:%[0-9]+]]:fr32x = VSUBSSZrr [[COPY1]], [[COPY3]]
+ ; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY [[VSUBSSZrr]]
+ ; AVX512VL: $xmm0 = COPY [[COPY4]]
; AVX512VL: RET 0, implicit $xmm0
- %0(s32) = COPY $xmm0
- %1(s32) = COPY $xmm1
- %2(s32) = G_FSUB %0, %1
- $xmm0 = COPY %2(s32)
+ %2:vecr(s128) = COPY $xmm0
+ %0:vecr(s32) = G_TRUNC %2(s128)
+ %3:vecr(s128) = COPY $xmm1
+ %1:vecr(s32) = G_TRUNC %3(s128)
+ %4:vecr(s32) = G_FSUB %0, %1
+ %5:vecr(s128) = G_ANYEXT %4(s32)
+ $xmm0 = COPY %5(s128)
RET 0, implicit $xmm0
...
@@ -77,6 +95,9 @@ registers:
- { id: 0, class: vecr, preferred-register: '' }
- { id: 1, class: vecr, preferred-register: '' }
- { id: 2, class: vecr, preferred-register: '' }
+ - { id: 3, class: vecr, preferred-register: '' }
+ - { id: 4, class: vecr, preferred-register: '' }
+ - { id: 5, class: vecr, preferred-register: '' }
liveins:
fixedStack:
stack:
@@ -88,33 +109,48 @@ body: |
liveins: $xmm0, $xmm1
; SSE-LABEL: name: test_fsub_double
- ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0
- ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY $xmm1
- ; SSE: [[SUBSDrr:%[0-9]+]]:fr64 = SUBSDrr [[COPY]], [[COPY1]]
- ; SSE: $xmm0 = COPY [[SUBSDrr]]
+ ; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
+ ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
+ ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
+ ; SSE: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
+ ; SSE: [[SUBSDrr:%[0-9]+]]:fr64 = SUBSDrr [[COPY1]], [[COPY3]]
+ ; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY [[SUBSDrr]]
+ ; SSE: $xmm0 = COPY [[COPY4]]
; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_fsub_double
- ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0
- ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY $xmm1
- ; AVX: [[VSUBSDrr:%[0-9]+]]:fr64 = VSUBSDrr [[COPY]], [[COPY1]]
- ; AVX: $xmm0 = COPY [[VSUBSDrr]]
+ ; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
+ ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
+ ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
+ ; AVX: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
+ ; AVX: [[VSUBSDrr:%[0-9]+]]:fr64 = VSUBSDrr [[COPY1]], [[COPY3]]
+ ; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY [[VSUBSDrr]]
+ ; AVX: $xmm0 = COPY [[COPY4]]
; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_fsub_double
- ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0
- ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY $xmm1
- ; AVX512F: [[VSUBSDZrr:%[0-9]+]]:fr64x = VSUBSDZrr [[COPY]], [[COPY1]]
- ; AVX512F: $xmm0 = COPY [[VSUBSDZrr]]
+ ; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
+ ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
+ ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
+ ; AVX512F: [[COPY3:%[0-9]+]]:fr64x = COPY [[COPY2]]
+ ; AVX512F: [[VSUBSDZrr:%[0-9]+]]:fr64x = VSUBSDZrr [[COPY1]], [[COPY3]]
+ ; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY [[VSUBSDZrr]]
+ ; AVX512F: $xmm0 = COPY [[COPY4]]
; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_fsub_double
- ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0
- ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY $xmm1
- ; AVX512VL: [[VSUBSDZrr:%[0-9]+]]:fr64x = VSUBSDZrr [[COPY]], [[COPY1]]
- ; AVX512VL: $xmm0 = COPY [[VSUBSDZrr]]
+ ; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
+ ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
+ ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
+ ; AVX512VL: [[COPY3:%[0-9]+]]:fr64x = COPY [[COPY2]]
+ ; AVX512VL: [[VSUBSDZrr:%[0-9]+]]:fr64x = VSUBSDZrr [[COPY1]], [[COPY3]]
+ ; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY [[VSUBSDZrr]]
+ ; AVX512VL: $xmm0 = COPY [[COPY4]]
; AVX512VL: RET 0, implicit $xmm0
- %0(s64) = COPY $xmm0
- %1(s64) = COPY $xmm1
- %2(s64) = G_FSUB %0, %1
- $xmm0 = COPY %2(s64)
+ %2:vecr(s128) = COPY $xmm0
+ %0:vecr(s64) = G_TRUNC %2(s128)
+ %3:vecr(s128) = COPY $xmm1
+ %1:vecr(s64) = G_TRUNC %3(s128)
+ %4:vecr(s64) = G_FSUB %0, %1
+ %5:vecr(s128) = G_ANYEXT %4(s64)
+ $xmm0 = COPY %5(s128)
RET 0, implicit $xmm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir?rev=324665&r1=324664&r2=324665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir Thu Feb 8 14:41:47 2018
@@ -257,8 +257,10 @@ alignment: 4
legalized: true
regBankSelected: true
registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
+ - { id: 0, class: gpr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+ - { id: 2, class: vecr, preferred-register: '' }
+ - { id: 3, class: vecr, preferred-register: '' }
body: |
bb.1 (%ir-block.0):
liveins: $rdi
@@ -266,26 +268,36 @@ body: |
; SSE-LABEL: name: test_load_float
; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
- ; SSE: $xmm0 = COPY [[MOV32rm]]
+ ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[MOV32rm]]
+ ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]]
+ ; SSE: $xmm0 = COPY [[COPY2]]
; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_load_float
; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
- ; AVX: $xmm0 = COPY [[MOV32rm]]
+ ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[MOV32rm]]
+ ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]]
+ ; AVX: $xmm0 = COPY [[COPY2]]
; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_load_float
; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
- ; AVX512F: $xmm0 = COPY [[MOV32rm]]
+ ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[MOV32rm]]
+ ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]]
+ ; AVX512F: $xmm0 = COPY [[COPY2]]
; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_load_float
; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
- ; AVX512VL: $xmm0 = COPY [[MOV32rm]]
+ ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[MOV32rm]]
+ ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]]
+ ; AVX512VL: $xmm0 = COPY [[COPY2]]
; AVX512VL: RET 0, implicit $xmm0
- %0(p0) = COPY $rdi
- %1(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p1)
- $xmm0 = COPY %1(s32)
+ %0:gpr(p0) = COPY $rdi
+ %1:gpr(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p1)
+ %3:vecr(s32) = COPY %1(s32)
+ %2:vecr(s128) = G_ANYEXT %3(s32)
+ $xmm0 = COPY %2(s128)
RET 0, implicit $xmm0
...
@@ -295,35 +307,47 @@ alignment: 4
legalized: true
regBankSelected: true
registers:
- - { id: 0, class: gpr }
- - { id: 1, class: vecr }
+ - { id: 0, class: gpr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+ - { id: 2, class: vecr, preferred-register: '' }
+ - { id: 3, class: vecr, preferred-register: '' }
body: |
bb.1 (%ir-block.0):
liveins: $rdi
; SSE-LABEL: name: test_load_float_vecreg
; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
- ; SSE: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
- ; SSE: $xmm0 = COPY [[MOVSSrm]]
+ ; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
+ ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[MOV32rm]]
+ ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]]
+ ; SSE: $xmm0 = COPY [[COPY2]]
; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_load_float_vecreg
; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
- ; AVX: [[VMOVSSrm:%[0-9]+]]:fr32 = VMOVSSrm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
- ; AVX: $xmm0 = COPY [[VMOVSSrm]]
+ ; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
+ ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[MOV32rm]]
+ ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]]
+ ; AVX: $xmm0 = COPY [[COPY2]]
; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_load_float_vecreg
; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
- ; AVX512F: [[VMOVSSZrm:%[0-9]+]]:fr32x = VMOVSSZrm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
- ; AVX512F: $xmm0 = COPY [[VMOVSSZrm]]
+ ; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
+ ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[MOV32rm]]
+ ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]]
+ ; AVX512F: $xmm0 = COPY [[COPY2]]
; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_load_float_vecreg
; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
- ; AVX512VL: [[VMOVSSZrm:%[0-9]+]]:fr32x = VMOVSSZrm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
- ; AVX512VL: $xmm0 = COPY [[VMOVSSZrm]]
+ ; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
+ ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[MOV32rm]]
+ ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]]
+ ; AVX512VL: $xmm0 = COPY [[COPY2]]
; AVX512VL: RET 0, implicit $xmm0
- %0(p0) = COPY $rdi
- %1(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p1)
- $xmm0 = COPY %1(s32)
+ %0:gpr(p0) = COPY $rdi
+ %1:gpr(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p1)
+ %3:vecr(s32) = COPY %1(s32)
+ %2:vecr(s128) = G_ANYEXT %3(s32)
+ $xmm0 = COPY %2(s128)
RET 0, implicit $xmm0
...
@@ -333,8 +357,10 @@ alignment: 4
legalized: true
regBankSelected: true
registers:
- - { id: 0, class: gpr }
- - { id: 1, class: gpr }
+ - { id: 0, class: gpr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+ - { id: 2, class: vecr, preferred-register: '' }
+ - { id: 3, class: vecr, preferred-register: '' }
body: |
bb.1 (%ir-block.0):
liveins: $rdi
@@ -342,26 +368,36 @@ body: |
; SSE-LABEL: name: test_load_double
; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
- ; SSE: $xmm0 = COPY [[MOV64rm]]
+ ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[MOV64rm]]
+ ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]]
+ ; SSE: $xmm0 = COPY [[COPY2]]
; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_load_double
; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
- ; AVX: $xmm0 = COPY [[MOV64rm]]
+ ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[MOV64rm]]
+ ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]]
+ ; AVX: $xmm0 = COPY [[COPY2]]
; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_load_double
; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
- ; AVX512F: $xmm0 = COPY [[MOV64rm]]
+ ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[MOV64rm]]
+ ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]]
+ ; AVX512F: $xmm0 = COPY [[COPY2]]
; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_load_double
; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX512VL: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
- ; AVX512VL: $xmm0 = COPY [[MOV64rm]]
+ ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[MOV64rm]]
+ ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]]
+ ; AVX512VL: $xmm0 = COPY [[COPY2]]
; AVX512VL: RET 0, implicit $xmm0
- %0(p0) = COPY $rdi
- %1(s64) = G_LOAD %0(p0) :: (load 8 from %ir.p1)
- $xmm0 = COPY %1(s64)
+ %0:gpr(p0) = COPY $rdi
+ %1:gpr(s64) = G_LOAD %0(p0) :: (load 8 from %ir.p1)
+ %3:vecr(s64) = COPY %1(s64)
+ %2:vecr(s128) = G_ANYEXT %3(s64)
+ $xmm0 = COPY %2(s128)
RET 0, implicit $xmm0
...
@@ -371,35 +407,47 @@ alignment: 4
legalized: true
regBankSelected: true
registers:
- - { id: 0, class: gpr }
- - { id: 1, class: vecr }
+ - { id: 0, class: gpr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+ - { id: 2, class: vecr, preferred-register: '' }
+ - { id: 3, class: vecr, preferred-register: '' }
body: |
bb.1 (%ir-block.0):
liveins: $rdi
; SSE-LABEL: name: test_load_double_vecreg
; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
- ; SSE: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
- ; SSE: $xmm0 = COPY [[MOVSDrm]]
+ ; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
+ ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[MOV64rm]]
+ ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]]
+ ; SSE: $xmm0 = COPY [[COPY2]]
; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_load_double_vecreg
; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
- ; AVX: [[VMOVSDrm:%[0-9]+]]:fr64 = VMOVSDrm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
- ; AVX: $xmm0 = COPY [[VMOVSDrm]]
+ ; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
+ ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[MOV64rm]]
+ ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]]
+ ; AVX: $xmm0 = COPY [[COPY2]]
; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_load_double_vecreg
; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
- ; AVX512F: [[VMOVSDZrm:%[0-9]+]]:fr64x = VMOVSDZrm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
- ; AVX512F: $xmm0 = COPY [[VMOVSDZrm]]
+ ; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
+ ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[MOV64rm]]
+ ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]]
+ ; AVX512F: $xmm0 = COPY [[COPY2]]
; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_load_double_vecreg
; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
- ; AVX512VL: [[VMOVSDZrm:%[0-9]+]]:fr64x = VMOVSDZrm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
- ; AVX512VL: $xmm0 = COPY [[VMOVSDZrm]]
+ ; AVX512VL: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
+ ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[MOV64rm]]
+ ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]]
+ ; AVX512VL: $xmm0 = COPY [[COPY2]]
; AVX512VL: RET 0, implicit $xmm0
- %0(p0) = COPY $rdi
- %1(s64) = G_LOAD %0(p0) :: (load 8 from %ir.p1)
- $xmm0 = COPY %1(s64)
+ %0:gpr(p0) = COPY $rdi
+ %1:gpr(s64) = G_LOAD %0(p0) :: (load 8 from %ir.p1)
+ %3:vecr(s64) = COPY %1(s64)
+ %2:vecr(s128) = G_ANYEXT %3(s64)
+ $xmm0 = COPY %2(s128)
RET 0, implicit $xmm0
...
@@ -495,45 +543,51 @@ alignment: 4
legalized: true
regBankSelected: true
registers:
- - { id: 0, class: vecr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
+ - { id: 0, class: vecr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+ - { id: 2, class: vecr, preferred-register: '' }
+ - { id: 3, class: gpr, preferred-register: '' }
body: |
bb.1 (%ir-block.0):
liveins: $rdi, $xmm0
; SSE-LABEL: name: test_store_float
- ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
- ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
- ; SSE: [[COPY2:%[0-9]+]]:gr32 = COPY [[COPY]]
- ; SSE: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY2]] :: (store 4 into %ir.p1)
- ; SSE: $rax = COPY [[COPY1]]
+ ; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
+ ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
+ ; SSE: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
+ ; SSE: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]]
+ ; SSE: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 4 into %ir.p1)
+ ; SSE: $rax = COPY [[COPY2]]
; SSE: RET 0, implicit $rax
; AVX-LABEL: name: test_store_float
- ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
- ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
- ; AVX: [[COPY2:%[0-9]+]]:gr32 = COPY [[COPY]]
- ; AVX: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY2]] :: (store 4 into %ir.p1)
- ; AVX: $rax = COPY [[COPY1]]
+ ; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
+ ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
+ ; AVX: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]]
+ ; AVX: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 4 into %ir.p1)
+ ; AVX: $rax = COPY [[COPY2]]
; AVX: RET 0, implicit $rax
; AVX512F-LABEL: name: test_store_float
- ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0
- ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
- ; AVX512F: [[COPY2:%[0-9]+]]:gr32 = COPY [[COPY]]
- ; AVX512F: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY2]] :: (store 4 into %ir.p1)
- ; AVX512F: $rax = COPY [[COPY1]]
+ ; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
+ ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
+ ; AVX512F: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]]
+ ; AVX512F: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 4 into %ir.p1)
+ ; AVX512F: $rax = COPY [[COPY2]]
; AVX512F: RET 0, implicit $rax
; AVX512VL-LABEL: name: test_store_float
- ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0
- ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
- ; AVX512VL: [[COPY2:%[0-9]+]]:gr32 = COPY [[COPY]]
- ; AVX512VL: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY2]] :: (store 4 into %ir.p1)
- ; AVX512VL: $rax = COPY [[COPY1]]
+ ; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
+ ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
+ ; AVX512VL: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512VL: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]]
+ ; AVX512VL: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 4 into %ir.p1)
+ ; AVX512VL: $rax = COPY [[COPY2]]
; AVX512VL: RET 0, implicit $rax
- %0(s32) = COPY $xmm0
- %1(p0) = COPY $rdi
- %2(s32) = COPY %0(s32)
- G_STORE %2(s32), %1(p0) :: (store 4 into %ir.p1)
+ %2:vecr(s128) = COPY $xmm0
+ %0:vecr(s32) = G_TRUNC %2(s128)
+ %1:gpr(p0) = COPY $rdi
+ %3:gpr(s32) = COPY %0(s32)
+ G_STORE %3(s32), %1(p0) :: (store 4 into %ir.p1)
$rax = COPY %1(p0)
RET 0, implicit $rax
@@ -544,39 +598,51 @@ alignment: 4
legalized: true
regBankSelected: true
registers:
- - { id: 0, class: vecr }
- - { id: 1, class: gpr }
+ - { id: 0, class: vecr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+ - { id: 2, class: vecr, preferred-register: '' }
+ - { id: 3, class: gpr, preferred-register: '' }
body: |
bb.1 (%ir-block.0):
liveins: $rdi, $xmm0
; SSE-LABEL: name: test_store_float_vec
- ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
- ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
- ; SSE: MOVSSmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 4 into %ir.p1)
- ; SSE: $rax = COPY [[COPY1]]
+ ; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
+ ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
+ ; SSE: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
+ ; SSE: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]]
+ ; SSE: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 4 into %ir.p1)
+ ; SSE: $rax = COPY [[COPY2]]
; SSE: RET 0, implicit $rax
; AVX-LABEL: name: test_store_float_vec
- ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
- ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
- ; AVX: VMOVSSmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 4 into %ir.p1)
- ; AVX: $rax = COPY [[COPY1]]
+ ; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
+ ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
+ ; AVX: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]]
+ ; AVX: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 4 into %ir.p1)
+ ; AVX: $rax = COPY [[COPY2]]
; AVX: RET 0, implicit $rax
; AVX512F-LABEL: name: test_store_float_vec
- ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0
- ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
- ; AVX512F: VMOVSSZmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 4 into %ir.p1)
- ; AVX512F: $rax = COPY [[COPY1]]
+ ; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
+ ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
+ ; AVX512F: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]]
+ ; AVX512F: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 4 into %ir.p1)
+ ; AVX512F: $rax = COPY [[COPY2]]
; AVX512F: RET 0, implicit $rax
; AVX512VL-LABEL: name: test_store_float_vec
- ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0
- ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
- ; AVX512VL: VMOVSSZmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 4 into %ir.p1)
- ; AVX512VL: $rax = COPY [[COPY1]]
+ ; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
+ ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
+ ; AVX512VL: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512VL: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]]
+ ; AVX512VL: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 4 into %ir.p1)
+ ; AVX512VL: $rax = COPY [[COPY2]]
; AVX512VL: RET 0, implicit $rax
- %0(s32) = COPY $xmm0
- %1(p0) = COPY $rdi
- G_STORE %0(s32), %1(p0) :: (store 4 into %ir.p1)
+ %2:vecr(s128) = COPY $xmm0
+ %0:vecr(s32) = G_TRUNC %2(s128)
+ %1:gpr(p0) = COPY $rdi
+ %3:gpr(s32) = COPY %0(s32)
+ G_STORE %3(s32), %1(p0) :: (store 4 into %ir.p1)
$rax = COPY %1(p0)
RET 0, implicit $rax
@@ -587,46 +653,52 @@ alignment: 4
legalized: true
regBankSelected: true
registers:
- - { id: 0, class: vecr }
- - { id: 1, class: gpr }
- - { id: 2, class: gpr }
+ - { id: 0, class: vecr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+ - { id: 2, class: vecr, preferred-register: '' }
+ - { id: 3, class: gpr, preferred-register: '' }
# NO_AVX512X: %0:fr64 = COPY $xmm0
body: |
bb.1 (%ir-block.0):
liveins: $rdi, $xmm0
; SSE-LABEL: name: test_store_double
- ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0
- ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
- ; SSE: [[COPY2:%[0-9]+]]:gr64 = COPY [[COPY]]
- ; SSE: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY2]] :: (store 8 into %ir.p1)
- ; SSE: $rax = COPY [[COPY1]]
+ ; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
+ ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
+ ; SSE: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
+ ; SSE: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]]
+ ; SSE: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 8 into %ir.p1)
+ ; SSE: $rax = COPY [[COPY2]]
; SSE: RET 0, implicit $rax
; AVX-LABEL: name: test_store_double
- ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0
- ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
- ; AVX: [[COPY2:%[0-9]+]]:gr64 = COPY [[COPY]]
- ; AVX: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY2]] :: (store 8 into %ir.p1)
- ; AVX: $rax = COPY [[COPY1]]
+ ; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
+ ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
+ ; AVX: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]]
+ ; AVX: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 8 into %ir.p1)
+ ; AVX: $rax = COPY [[COPY2]]
; AVX: RET 0, implicit $rax
; AVX512F-LABEL: name: test_store_double
- ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0
- ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
- ; AVX512F: [[COPY2:%[0-9]+]]:gr64 = COPY [[COPY]]
- ; AVX512F: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY2]] :: (store 8 into %ir.p1)
- ; AVX512F: $rax = COPY [[COPY1]]
+ ; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
+ ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
+ ; AVX512F: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]]
+ ; AVX512F: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 8 into %ir.p1)
+ ; AVX512F: $rax = COPY [[COPY2]]
; AVX512F: RET 0, implicit $rax
; AVX512VL-LABEL: name: test_store_double
- ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0
- ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
- ; AVX512VL: [[COPY2:%[0-9]+]]:gr64 = COPY [[COPY]]
- ; AVX512VL: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY2]] :: (store 8 into %ir.p1)
- ; AVX512VL: $rax = COPY [[COPY1]]
+ ; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
+ ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
+ ; AVX512VL: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512VL: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]]
+ ; AVX512VL: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 8 into %ir.p1)
+ ; AVX512VL: $rax = COPY [[COPY2]]
; AVX512VL: RET 0, implicit $rax
- %0(s64) = COPY $xmm0
- %1(p0) = COPY $rdi
- %2(s64) = COPY %0(s64)
- G_STORE %2(s64), %1(p0) :: (store 8 into %ir.p1)
+ %2:vecr(s128) = COPY $xmm0
+ %0:vecr(s64) = G_TRUNC %2(s128)
+ %1:gpr(p0) = COPY $rdi
+ %3:gpr(s64) = COPY %0(s64)
+ G_STORE %3(s64), %1(p0) :: (store 8 into %ir.p1)
$rax = COPY %1(p0)
RET 0, implicit $rax
@@ -637,39 +709,51 @@ alignment: 4
legalized: true
regBankSelected: true
registers:
- - { id: 0, class: vecr }
- - { id: 1, class: gpr }
+ - { id: 0, class: vecr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+ - { id: 2, class: vecr, preferred-register: '' }
+ - { id: 3, class: gpr, preferred-register: '' }
body: |
bb.1 (%ir-block.0):
liveins: $rdi, $xmm0
; SSE-LABEL: name: test_store_double_vec
- ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0
- ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
- ; SSE: MOVSDmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 8 into %ir.p1)
- ; SSE: $rax = COPY [[COPY1]]
+ ; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
+ ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
+ ; SSE: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
+ ; SSE: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]]
+ ; SSE: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 8 into %ir.p1)
+ ; SSE: $rax = COPY [[COPY2]]
; SSE: RET 0, implicit $rax
; AVX-LABEL: name: test_store_double_vec
- ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0
- ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
- ; AVX: VMOVSDmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 8 into %ir.p1)
- ; AVX: $rax = COPY [[COPY1]]
+ ; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
+ ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
+ ; AVX: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]]
+ ; AVX: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 8 into %ir.p1)
+ ; AVX: $rax = COPY [[COPY2]]
; AVX: RET 0, implicit $rax
; AVX512F-LABEL: name: test_store_double_vec
- ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0
- ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
- ; AVX512F: VMOVSDZmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 8 into %ir.p1)
- ; AVX512F: $rax = COPY [[COPY1]]
+ ; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
+ ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
+ ; AVX512F: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]]
+ ; AVX512F: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 8 into %ir.p1)
+ ; AVX512F: $rax = COPY [[COPY2]]
; AVX512F: RET 0, implicit $rax
; AVX512VL-LABEL: name: test_store_double_vec
- ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0
- ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
- ; AVX512VL: VMOVSDZmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 8 into %ir.p1)
- ; AVX512VL: $rax = COPY [[COPY1]]
+ ; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
+ ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
+ ; AVX512VL: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512VL: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]]
+ ; AVX512VL: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 8 into %ir.p1)
+ ; AVX512VL: $rax = COPY [[COPY2]]
; AVX512VL: RET 0, implicit $rax
- %0(s64) = COPY $xmm0
- %1(p0) = COPY $rdi
- G_STORE %0(s64), %1(p0) :: (store 8 into %ir.p1)
+ %2:vecr(s128) = COPY $xmm0
+ %0:vecr(s64) = G_TRUNC %2(s128)
+ %1:gpr(p0) = COPY $rdi
+ %3:gpr(s64) = COPY %0(s64)
+ G_STORE %3(s64), %1(p0) :: (store 8 into %ir.p1)
$rax = COPY %1(p0)
RET 0, implicit $rax
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-phi.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-phi.mir?rev=324665&r1=324664&r2=324665&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-phi.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-phi.mir Thu Feb 8 14:41:47 2018
@@ -346,111 +346,127 @@ body: |
...
---
name: test_float
-# ALL-LABEL: name: test_float
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
-# ALL: registers:
-# ALL-NEXT: - { id: 0, class: gr32, preferred-register: '' }
-# ALL-NEXT: - { id: 1, class: fr32, preferred-register: '' }
-# ALL-NEXT: - { id: 2, class: fr32, preferred-register: '' }
-# ALL-NEXT: - { id: 3, class: gr32, preferred-register: '' }
-# ALL-NEXT: - { id: 4, class: gr8, preferred-register: '' }
-# ALL-NEXT: - { id: 5, class: fr32, preferred-register: '' }
registers:
- { id: 0, class: gpr, preferred-register: '' }
- { id: 1, class: vecr, preferred-register: '' }
- { id: 2, class: vecr, preferred-register: '' }
- - { id: 3, class: gpr, preferred-register: '' }
- - { id: 4, class: gpr, preferred-register: '' }
- - { id: 5, class: vecr, preferred-register: '' }
+ - { id: 3, class: vecr, preferred-register: '' }
+ - { id: 4, class: vecr, preferred-register: '' }
+ - { id: 5, class: gpr, preferred-register: '' }
+ - { id: 6, class: gpr, preferred-register: '' }
+ - { id: 7, class: vecr, preferred-register: '' }
+ - { id: 8, class: vecr, preferred-register: '' }
liveins:
fixedStack:
stack:
constants:
-# ALL-LABEL: bb.3.cond.end:
-# ALL: %5:fr32 = PHI %1, %bb.1, %2, %bb.2
-# ALL-NEXT: $xmm0 = COPY %5
-# ALL-NEXT: RET 0, implicit $xmm0
body: |
+ ; ALL-LABEL: name: test_float
+ ; ALL: bb.0.entry:
+ ; ALL: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; ALL: liveins: $edi, $xmm0, $xmm1
+ ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; ALL: [[COPY1:%[0-9]+]]:vr128 = COPY $xmm0
+ ; ALL: [[COPY2:%[0-9]+]]:fr32 = COPY [[COPY1]]
+ ; ALL: [[COPY3:%[0-9]+]]:vr128 = COPY $xmm1
+ ; ALL: [[COPY4:%[0-9]+]]:fr32 = COPY [[COPY3]]
+ ; ALL: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags
+ ; ALL: CMP32rr [[COPY]], [[MOV32r0_]], implicit-def $eflags
+ ; ALL: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit $eflags
+ ; ALL: TEST8ri [[SETGr]], 1, implicit-def $eflags
+ ; ALL: JNE_1 %bb.2, implicit $eflags
+ ; ALL: bb.1.cond.false:
+ ; ALL: successors: %bb.2(0x80000000)
+ ; ALL: bb.2.cond.end:
+ ; ALL: [[PHI:%[0-9]+]]:fr32 = PHI [[COPY4]], %bb.1, [[COPY2]], %bb.0
+ ; ALL: [[COPY5:%[0-9]+]]:vr128 = COPY [[PHI]]
+ ; ALL: $xmm0 = COPY [[COPY5]]
+ ; ALL: RET 0, implicit $xmm0
bb.1.entry:
- successors: %bb.2(0x40000000), %bb.3(0x40000000)
+ successors: %bb.3(0x40000000), %bb.2(0x40000000)
liveins: $edi, $xmm0, $xmm1
- %0(s32) = COPY $edi
- %1(s32) = COPY $xmm0
- %2(s32) = COPY $xmm1
- %3(s32) = G_CONSTANT i32 0
- %4(s1) = G_ICMP intpred(sgt), %0(s32), %3
- G_BRCOND %4(s1), %bb.2
- G_BR %bb.3
-
- bb.2.cond.true:
- successors: %bb.4(0x80000000)
-
- G_BR %bb.4
-
- bb.3.cond.false:
- successors: %bb.4(0x80000000)
-
-
- bb.4.cond.end:
- %5(s32) = G_PHI %1(s32), %bb.2, %2(s32), %bb.3
- $xmm0 = COPY %5(s32)
+ %0:gpr(s32) = COPY $edi
+ %3:vecr(s128) = COPY $xmm0
+ %1:vecr(s32) = G_TRUNC %3(s128)
+ %4:vecr(s128) = COPY $xmm1
+ %2:vecr(s32) = G_TRUNC %4(s128)
+ %5:gpr(s32) = G_CONSTANT i32 0
+ %6:gpr(s1) = G_ICMP intpred(sgt), %0(s32), %5
+ G_BRCOND %6(s1), %bb.3
+
+ bb.2.cond.false:
+ successors: %bb.3(0x80000000)
+
+ bb.3.cond.end:
+ %7:vecr(s32) = G_PHI %2(s32), %bb.2, %1(s32), %bb.1
+ %8:vecr(s128) = G_ANYEXT %7(s32)
+ $xmm0 = COPY %8(s128)
RET 0, implicit $xmm0
...
---
name: test_double
-# ALL-LABEL: name: test_double
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
-# ALL: registers:
-# ALL-NEXT: - { id: 0, class: gr32, preferred-register: '' }
-# ALL-NEXT: - { id: 1, class: fr64, preferred-register: '' }
-# ALL-NEXT: - { id: 2, class: fr64, preferred-register: '' }
-# ALL-NEXT: - { id: 3, class: gr32, preferred-register: '' }
-# ALL-NEXT: - { id: 4, class: gr8, preferred-register: '' }
-# ALL-NEXT: - { id: 5, class: fr64, preferred-register: '' }
registers:
- { id: 0, class: gpr, preferred-register: '' }
- { id: 1, class: vecr, preferred-register: '' }
- { id: 2, class: vecr, preferred-register: '' }
- - { id: 3, class: gpr, preferred-register: '' }
- - { id: 4, class: gpr, preferred-register: '' }
- - { id: 5, class: vecr, preferred-register: '' }
-# ALL-LABEL: bb.3.cond.end:
-# ALL: %5:fr64 = PHI %1, %bb.1, %2, %bb.2
-# ALL-NEXT: $xmm0 = COPY %5
-# ALL-NEXT: RET 0, implicit $xmm0
+ - { id: 3, class: vecr, preferred-register: '' }
+ - { id: 4, class: vecr, preferred-register: '' }
+ - { id: 5, class: gpr, preferred-register: '' }
+ - { id: 6, class: gpr, preferred-register: '' }
+ - { id: 7, class: vecr, preferred-register: '' }
+ - { id: 8, class: vecr, preferred-register: '' }
body: |
+ ; ALL-LABEL: name: test_double
+ ; ALL: bb.0.entry:
+ ; ALL: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; ALL: liveins: $edi, $xmm0, $xmm1
+ ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; ALL: [[COPY1:%[0-9]+]]:vr128 = COPY $xmm0
+ ; ALL: [[COPY2:%[0-9]+]]:fr64 = COPY [[COPY1]]
+ ; ALL: [[COPY3:%[0-9]+]]:vr128 = COPY $xmm1
+ ; ALL: [[COPY4:%[0-9]+]]:fr64 = COPY [[COPY3]]
+ ; ALL: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags
+ ; ALL: CMP32rr [[COPY]], [[MOV32r0_]], implicit-def $eflags
+ ; ALL: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit $eflags
+ ; ALL: TEST8ri [[SETGr]], 1, implicit-def $eflags
+ ; ALL: JNE_1 %bb.2, implicit $eflags
+ ; ALL: bb.1.cond.false:
+ ; ALL: successors: %bb.2(0x80000000)
+ ; ALL: bb.2.cond.end:
+ ; ALL: [[PHI:%[0-9]+]]:fr64 = PHI [[COPY4]], %bb.1, [[COPY2]], %bb.0
+ ; ALL: [[COPY5:%[0-9]+]]:vr128 = COPY [[PHI]]
+ ; ALL: $xmm0 = COPY [[COPY5]]
+ ; ALL: RET 0, implicit $xmm0
bb.1.entry:
- successors: %bb.2(0x40000000), %bb.3(0x40000000)
+ successors: %bb.3(0x40000000), %bb.2(0x40000000)
liveins: $edi, $xmm0, $xmm1
- %0(s32) = COPY $edi
- %1(s64) = COPY $xmm0
- %2(s64) = COPY $xmm1
- %3(s32) = G_CONSTANT i32 0
- %4(s1) = G_ICMP intpred(sgt), %0(s32), %3
- G_BRCOND %4(s1), %bb.2
- G_BR %bb.3
-
- bb.2.cond.true:
- successors: %bb.4(0x80000000)
-
- G_BR %bb.4
-
- bb.3.cond.false:
- successors: %bb.4(0x80000000)
-
-
- bb.4.cond.end:
- %5(s64) = G_PHI %1(s64), %bb.2, %2(s64), %bb.3
- $xmm0 = COPY %5(s64)
+ %0:gpr(s32) = COPY $edi
+ %3:vecr(s128) = COPY $xmm0
+ %1:vecr(s64) = G_TRUNC %3(s128)
+ %4:vecr(s128) = COPY $xmm1
+ %2:vecr(s64) = G_TRUNC %4(s128)
+ %5:gpr(s32) = G_CONSTANT i32 0
+ %6:gpr(s1) = G_ICMP intpred(sgt), %0(s32), %5
+ G_BRCOND %6(s1), %bb.3
+
+ bb.2.cond.false:
+ successors: %bb.3(0x80000000)
+
+ bb.3.cond.end:
+ %7:vecr(s64) = G_PHI %2(s64), %bb.2, %1(s64), %bb.1
+ %8:vecr(s128) = G_ANYEXT %7(s64)
+ $xmm0 = COPY %8(s128)
RET 0, implicit $xmm0
...
More information about the llvm-commits
mailing list