[llvm] r371800 - AMDGPU/GlobalISel: Legalize G_FMAD
Matt Arsenault via llvm-commits
llvm-commits at lists.llvm.org
Thu Sep 12 17:44:35 PDT 2019
Author: arsenm
Date: Thu Sep 12 17:44:35 2019
New Revision: 371800
URL: http://llvm.org/viewvc/llvm-project?rev=371800&view=rev
Log:
AMDGPU/GlobalISel: Legalize G_FMAD
Unlike SelectionDAG, treat this as a normally legalizable operation.
In SelectionDAG this is supposed to only ever formed if it's legal,
but I've found that to be restricting. For AMDGPU this is contextually
legal depending on whether denormal flushing is allowed in the use
function.
Technically we currently treat the denormal mode as a subtarget
feature, so custom lowering could be avoided. However I consider this
to be a defect, and this should be contextually dependent on the
controllable rounding mode of the parent function.
Added:
llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-fmad.mir
llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-fmad.s16.mir
Modified:
llvm/trunk/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
llvm/trunk/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
llvm/trunk/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
llvm/trunk/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
llvm/trunk/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
llvm/trunk/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
Modified: llvm/trunk/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h?rev=371800&r1=371799&r2=371800&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h (original)
+++ llvm/trunk/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h Thu Sep 12 17:44:35 2019
@@ -226,6 +226,7 @@ public:
LegalizeResult lowerMinMax(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
LegalizeResult lowerFCopySign(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
LegalizeResult lowerFMinNumMaxNum(MachineInstr &MI);
+ LegalizeResult lowerFMad(MachineInstr &MI);
LegalizeResult lowerUnmergeValues(MachineInstr &MI);
LegalizeResult lowerShuffleVector(MachineInstr &MI);
LegalizeResult lowerDynStackAlloc(MachineInstr &MI);
Modified: llvm/trunk/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h?rev=371800&r1=371799&r2=371800&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h (original)
+++ llvm/trunk/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h Thu Sep 12 17:44:35 2019
@@ -1361,8 +1361,9 @@ public:
/// Build and insert \p Res = G_FADD \p Op0, \p Op1
MachineInstrBuilder buildFAdd(const DstOp &Dst, const SrcOp &Src0,
- const SrcOp &Src1) {
- return buildInstr(TargetOpcode::G_FADD, {Dst}, {Src0, Src1});
+ const SrcOp &Src1,
+ Optional<unsigned> Flags = None) {
+ return buildInstr(TargetOpcode::G_FADD, {Dst}, {Src0, Src1}, Flags);
}
/// Build and insert \p Res = G_FSUB \p Op0, \p Op1
Modified: llvm/trunk/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/GlobalISel/LegalizerHelper.cpp?rev=371800&r1=371799&r2=371800&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/GlobalISel/LegalizerHelper.cpp (original)
+++ llvm/trunk/lib/CodeGen/GlobalISel/LegalizerHelper.cpp Thu Sep 12 17:44:35 2019
@@ -1949,6 +1949,8 @@ LegalizerHelper::lower(MachineInstr &MI,
MI.eraseFromParent();
return Legalized;
}
+ case TargetOpcode::G_FMAD:
+ return lowerFMad(MI);
case TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS: {
Register OldValRes = MI.getOperand(0).getReg();
Register SuccessRes = MI.getOperand(1).getReg();
@@ -3913,6 +3915,19 @@ LegalizerHelper::lowerFMinNumMaxNum(Mach
MI.eraseFromParent();
return Legalized;
}
+
+LegalizerHelper::LegalizeResult LegalizerHelper::lowerFMad(MachineInstr &MI) {
+ // Expand G_FMAD a, b, c -> G_FADD (G_FMUL a, b), c
+ Register DstReg = MI.getOperand(0).getReg();
+ LLT Ty = MRI.getType(DstReg);
+ unsigned Flags = MI.getFlags();
+
+ auto Mul = MIRBuilder.buildFMul(Ty, MI.getOperand(1), MI.getOperand(2),
+ Flags);
+ MIRBuilder.buildFAdd(DstReg, Mul, MI.getOperand(3), Flags);
+ MI.eraseFromParent();
+ return Legalized;
+}
LegalizerHelper::LegalizeResult
LegalizerHelper::lowerUnmergeValues(MachineInstr &MI) {
Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp?rev=371800&r1=371799&r2=371800&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp Thu Sep 12 17:44:35 2019
@@ -397,6 +397,15 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo
.scalarize(0)
.clampScalar(0, S32, S64);
+ // Whether this is legal depends on the floating point mode for the function.
+ auto &FMad = getActionDefinitionsBuilder(G_FMAD);
+ if (ST.hasMadF16())
+ FMad.customFor({S32, S16});
+ else
+ FMad.customFor({S32});
+ FMad.scalarize(0)
+ .lower();
+
getActionDefinitionsBuilder({G_SEXT, G_ZEXT, G_ANYEXT})
.legalFor({{S64, S32}, {S32, S16}, {S64, S16},
{S32, S1}, {S64, S1}, {S16, S1},
@@ -1050,6 +1059,8 @@ bool AMDGPULegalizerInfo::legalizeCustom
return legalizeGlobalValue(MI, MRI, B);
case TargetOpcode::G_LOAD:
return legalizeLoad(MI, MRI, B, Observer);
+ case TargetOpcode::G_FMAD:
+ return legalizeFMad(MI, MRI, B);
default:
return false;
}
@@ -1546,6 +1557,27 @@ bool AMDGPULegalizerInfo::legalizeLoad(
return true;
}
+bool AMDGPULegalizerInfo::legalizeFMad(
+ MachineInstr &MI, MachineRegisterInfo &MRI,
+ MachineIRBuilder &B) const {
+ LLT Ty = MRI.getType(MI.getOperand(0).getReg());
+ assert(Ty.isScalar());
+
+ // TODO: Always legal with future ftz flag.
+ if (Ty == LLT::scalar(32) && !ST.hasFP32Denormals())
+ return true;
+ if (Ty == LLT::scalar(16) && !ST.hasFP16Denormals())
+ return true;
+
+ MachineFunction &MF = B.getMF();
+
+ MachineIRBuilder HelperBuilder(MI);
+ GISelObserverWrapper DummyObserver;
+ LegalizerHelper Helper(MF, DummyObserver, HelperBuilder);
+ HelperBuilder.setMBB(*MI.getParent());
+ return Helper.lowerFMad(MI) == LegalizerHelper::Legalized;
+}
+
// Return the use branch instruction, otherwise null if the usage is invalid.
static MachineInstr *verifyCFIntrinsic(MachineInstr &MI,
MachineRegisterInfo &MRI) {
Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPULegalizerInfo.h?rev=371800&r1=371799&r2=371800&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPULegalizerInfo.h (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPULegalizerInfo.h Thu Sep 12 17:44:35 2019
@@ -64,6 +64,9 @@ public:
MachineIRBuilder &B,
GISelChangeObserver &Observer) const;
+ bool legalizeFMad(MachineInstr &MI, MachineRegisterInfo &MRI,
+ MachineIRBuilder &B) const;
+
Register getLiveInRegister(MachineRegisterInfo &MRI,
Register Reg, LLT Ty) const;
Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp?rev=371800&r1=371799&r2=371800&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp Thu Sep 12 17:44:35 2019
@@ -1780,6 +1780,7 @@ AMDGPURegisterBankInfo::getInstrMapping(
case AMDGPU::G_FPTOUI:
case AMDGPU::G_FMUL:
case AMDGPU::G_FMA:
+ case AMDGPU::G_FMAD:
case AMDGPU::G_FSQRT:
case AMDGPU::G_SITOFP:
case AMDGPU::G_UITOFP:
Added: llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-fmad.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-fmad.mir?rev=371800&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-fmad.mir (added)
+++ llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-fmad.mir Thu Sep 12 17:44:35 2019
@@ -0,0 +1,181 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -mattr=+fp32-denormals -run-pass=legalizer %s -o - | FileCheck -check-prefix=F32DENORM %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -mattr=-fp32-denormals -run-pass=legalizer %s -o - | FileCheck -check-prefix=F32FLUSH %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -mattr=+fp32-denormals -run-pass=legalizer %s -o - | FileCheck -check-prefix=F32DENORM %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -mattr=-fp32-denormals -run-pass=legalizer %s -o - | FileCheck -check-prefix=F32FLUSH %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -mattr=+fp32-denormals -run-pass=legalizer %s -o - | FileCheck -check-prefix=F32DENORM %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -mattr=-fp32-denormals -run-pass=legalizer %s -o - | FileCheck -check-prefix=F32FLUSH %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx10 -mattr=+fp32-denormals -run-pass=legalizer %s -o - | FileCheck -check-prefix=F32DENORM %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx10 -mattr=-fp32-denormals -run-pass=legalizer %s -o - | FileCheck -check-prefix=F32FLUSH %s
+
+---
+name: test_fmad_s32
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1, $vgpr2
+
+ ; F32DENORM-LABEL: name: test_fmad_s32
+ ; F32DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; F32DENORM: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; F32DENORM: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
+ ; F32DENORM: $vgpr0 = COPY %3(s32)
+ ; F32DENORM: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY]], [[COPY1]]
+ ; F32DENORM: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[FMUL]], [[COPY2]]
+ ; F32FLUSH-LABEL: name: test_fmad_s32
+ ; F32FLUSH: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; F32FLUSH: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; F32FLUSH: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
+ ; F32FLUSH: [[FMAD:%[0-9]+]]:_(s32) = G_FMAD [[COPY]], [[COPY1]], [[COPY2]]
+ ; F32FLUSH: $vgpr0 = COPY [[FMAD]](s32)
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s32) = COPY $vgpr1
+ %2:_(s32) = COPY $vgpr2
+ %3:_(s32) = G_FMAD %0, %1, %2
+ $vgpr0 = COPY %3
+...
+
+---
+name: test_fmad_s32_flags
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1, $vgpr2
+
+ ; F32DENORM-LABEL: name: test_fmad_s32_flags
+ ; F32DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; F32DENORM: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; F32DENORM: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
+ ; F32DENORM: $vgpr0 = COPY %3(s32)
+ ; F32DENORM: %4:_(s32) = nnan G_FMUL [[COPY]], [[COPY1]]
+ ; F32DENORM: %3:_(s32) = nnan G_FADD %4, [[COPY2]]
+ ; F32FLUSH-LABEL: name: test_fmad_s32_flags
+ ; F32FLUSH: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; F32FLUSH: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; F32FLUSH: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
+ ; F32FLUSH: %3:_(s32) = nnan G_FMAD [[COPY]], [[COPY1]], [[COPY2]]
+ ; F32FLUSH: $vgpr0 = COPY %3(s32)
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s32) = COPY $vgpr1
+ %2:_(s32) = COPY $vgpr2
+ %3:_(s32) = nnan G_FMAD %0, %1, %2
+ $vgpr0 = COPY %3
+...
+
+---
+name: test_fmad_v2s32
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1, $vgpr2_vgpr3, $vgpr4_vgpr5
+
+ ; F32DENORM-LABEL: name: test_fmad_v2s32
+ ; F32DENORM: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
+ ; F32DENORM: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
+ ; F32DENORM: [[COPY2:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr4_vgpr5
+ ; F32DENORM: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
+ ; F32DENORM: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
+ ; F32DENORM: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY2]](<2 x s32>)
+ ; F32DENORM: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR %10(s32), %11(s32)
+ ; F32DENORM: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
+ ; F32DENORM: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[UV1]], [[UV3]]
+ ; F32DENORM: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[FMUL]], [[UV5]]
+ ; F32DENORM: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[UV]], [[UV2]]
+ ; F32DENORM: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[FMUL1]], [[UV4]]
+ ; F32FLUSH-LABEL: name: test_fmad_v2s32
+ ; F32FLUSH: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
+ ; F32FLUSH: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
+ ; F32FLUSH: [[COPY2:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr4_vgpr5
+ ; F32FLUSH: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
+ ; F32FLUSH: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
+ ; F32FLUSH: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY2]](<2 x s32>)
+ ; F32FLUSH: [[FMAD:%[0-9]+]]:_(s32) = G_FMAD [[UV]], [[UV2]], [[UV4]]
+ ; F32FLUSH: [[FMAD1:%[0-9]+]]:_(s32) = G_FMAD [[UV1]], [[UV3]], [[UV5]]
+ ; F32FLUSH: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[FMAD]](s32), [[FMAD1]](s32)
+ ; F32FLUSH: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
+ %0:_(<2 x s32>) = COPY $vgpr0_vgpr1
+ %1:_(<2 x s32>) = COPY $vgpr2_vgpr3
+ %2:_(<2 x s32>) = COPY $vgpr4_vgpr5
+ %3:_(<2 x s32>) = G_FMAD %0, %1, %2
+ $vgpr0_vgpr1 = COPY %3
+...
+
+---
+name: test_fmad_v3s32
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2, $vgpr3_vgpr4_vgpr5, $vgpr6_vgpr7_vgpr8
+
+ ; F32DENORM-LABEL: name: test_fmad_v3s32
+ ; F32DENORM: [[COPY:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
+ ; F32DENORM: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr3_vgpr4_vgpr5
+ ; F32DENORM: [[COPY2:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr6_vgpr7_vgpr8
+ ; F32DENORM: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<3 x s32>)
+ ; F32DENORM: [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
+ ; F32DENORM: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY2]](<3 x s32>)
+ ; F32DENORM: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR %13(s32), %14(s32), %15(s32)
+ ; F32DENORM: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
+ ; F32DENORM: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[UV2]], [[UV5]]
+ ; F32DENORM: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[FMUL]], [[UV8]]
+ ; F32DENORM: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[UV1]], [[UV4]]
+ ; F32DENORM: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[FMUL1]], [[UV7]]
+ ; F32DENORM: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[UV]], [[UV3]]
+ ; F32DENORM: [[FADD2:%[0-9]+]]:_(s32) = G_FADD [[FMUL2]], [[UV6]]
+ ; F32FLUSH-LABEL: name: test_fmad_v3s32
+ ; F32FLUSH: [[COPY:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
+ ; F32FLUSH: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr3_vgpr4_vgpr5
+ ; F32FLUSH: [[COPY2:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr6_vgpr7_vgpr8
+ ; F32FLUSH: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<3 x s32>)
+ ; F32FLUSH: [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
+ ; F32FLUSH: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY2]](<3 x s32>)
+ ; F32FLUSH: [[FMAD:%[0-9]+]]:_(s32) = G_FMAD [[UV]], [[UV3]], [[UV6]]
+ ; F32FLUSH: [[FMAD1:%[0-9]+]]:_(s32) = G_FMAD [[UV1]], [[UV4]], [[UV7]]
+ ; F32FLUSH: [[FMAD2:%[0-9]+]]:_(s32) = G_FMAD [[UV2]], [[UV5]], [[UV8]]
+ ; F32FLUSH: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[FMAD]](s32), [[FMAD1]](s32), [[FMAD2]](s32)
+ ; F32FLUSH: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
+ %0:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
+ %1:_(<3 x s32>) = COPY $vgpr3_vgpr4_vgpr5
+ %2:_(<3 x s32>) = COPY $vgpr6_vgpr7_vgpr8
+ %3:_(<3 x s32>) = G_FMAD %0, %1, %2
+ $vgpr0_vgpr1_vgpr2 = COPY %3
+...
+
+---
+name: test_fmad_v4s32
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5_vgpr6_vgpr7, $vgpr8_vgpr9_vgpr10_vgpr11
+
+ ; F32DENORM-LABEL: name: test_fmad_v4s32
+ ; F32DENORM: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; F32DENORM: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
+ ; F32DENORM: [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr8_vgpr9_vgpr10_vgpr11
+ ; F32DENORM: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<4 x s32>)
+ ; F32DENORM: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<4 x s32>)
+ ; F32DENORM: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY2]](<4 x s32>)
+ ; F32DENORM: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR %16(s32), %17(s32), %18(s32), %19(s32)
+ ; F32DENORM: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
+ ; F32DENORM: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[UV3]], [[UV7]]
+ ; F32DENORM: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[FMUL]], [[UV11]]
+ ; F32DENORM: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[UV2]], [[UV6]]
+ ; F32DENORM: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[FMUL1]], [[UV10]]
+ ; F32DENORM: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[UV1]], [[UV5]]
+ ; F32DENORM: [[FADD2:%[0-9]+]]:_(s32) = G_FADD [[FMUL2]], [[UV9]]
+ ; F32DENORM: [[FMUL3:%[0-9]+]]:_(s32) = G_FMUL [[UV]], [[UV4]]
+ ; F32DENORM: [[FADD3:%[0-9]+]]:_(s32) = G_FADD [[FMUL3]], [[UV8]]
+ ; F32FLUSH-LABEL: name: test_fmad_v4s32
+ ; F32FLUSH: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; F32FLUSH: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
+ ; F32FLUSH: [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr8_vgpr9_vgpr10_vgpr11
+ ; F32FLUSH: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<4 x s32>)
+ ; F32FLUSH: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<4 x s32>)
+ ; F32FLUSH: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY2]](<4 x s32>)
+ ; F32FLUSH: [[FMAD:%[0-9]+]]:_(s32) = G_FMAD [[UV]], [[UV4]], [[UV8]]
+ ; F32FLUSH: [[FMAD1:%[0-9]+]]:_(s32) = G_FMAD [[UV1]], [[UV5]], [[UV9]]
+ ; F32FLUSH: [[FMAD2:%[0-9]+]]:_(s32) = G_FMAD [[UV2]], [[UV6]], [[UV10]]
+ ; F32FLUSH: [[FMAD3:%[0-9]+]]:_(s32) = G_FMAD [[UV3]], [[UV7]], [[UV11]]
+ ; F32FLUSH: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[FMAD]](s32), [[FMAD1]](s32), [[FMAD2]](s32), [[FMAD3]](s32)
+ ; F32FLUSH: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
+ %0:_(<4 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ %1:_(<4 x s32>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
+ %2:_(<4 x s32>) = COPY $vgpr8_vgpr9_vgpr10_vgpr11
+ %3:_(<4 x s32>) = G_FMAD %0, %1, %2
+ $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %3
+...
Added: llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-fmad.s16.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-fmad.s16.mir?rev=371800&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-fmad.s16.mir (added)
+++ llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-fmad.s16.mir Thu Sep 12 17:44:35 2019
@@ -0,0 +1,636 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -mattr=+fp64-fp16-denormals -run-pass=legalizer %s -o - | FileCheck -check-prefix=SI-F16DENORM %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -mattr=-fp64-fp16-denormals -run-pass=legalizer %s -o - | FileCheck -check-prefix=SI-F16FLUSH %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -mattr=+fp64-fp16-denormals -run-pass=legalizer %s -o - | FileCheck -check-prefix=VI-F16DENORM %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -mattr=-fp64-fp16-denormals -run-pass=legalizer %s -o - | FileCheck -check-prefix=VI-F16FLUSH %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -mattr=+fp64-fp16-denormals -run-pass=legalizer %s -o - | FileCheck -check-prefix=VI-F16DENORM %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -mattr=-fp64-fp16-denormals -run-pass=legalizer %s -o - | FileCheck -check-prefix=VI-F16FLUSH %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -mattr=+fp64-fp16-denormals -run-pass=legalizer %s -o - | FileCheck -check-prefix=GFX10 %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -mattr=-fp64-fp16-denormals -run-pass=legalizer %s -o - | FileCheck -check-prefix=GFX10 %s
+
+---
+name: test_fmad_s16
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1, $vgpr2
+
+ ; SI-F16DENORM-LABEL: name: test_fmad_s16
+ ; SI-F16DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; SI-F16DENORM: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; SI-F16DENORM: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; SI-F16DENORM: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; SI-F16DENORM: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; SI-F16DENORM: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
+ ; SI-F16DENORM: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
+ ; SI-F16DENORM: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC1]](s16)
+ ; SI-F16DENORM: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[FPEXT1]]
+ ; SI-F16DENORM: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMUL]](s32)
+ ; SI-F16DENORM: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[FPTRUNC]](s16)
+ ; SI-F16DENORM: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC2]](s16)
+ ; SI-F16DENORM: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[FPEXT2]], [[FPEXT3]]
+ ; SI-F16DENORM: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD]](s32)
+ ; SI-F16DENORM: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FPTRUNC1]](s16)
+ ; SI-F16DENORM: $vgpr0 = COPY [[ANYEXT]](s32)
+ ; SI-F16FLUSH-LABEL: name: test_fmad_s16
+ ; SI-F16FLUSH: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; SI-F16FLUSH: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; SI-F16FLUSH: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; SI-F16FLUSH: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; SI-F16FLUSH: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; SI-F16FLUSH: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
+ ; SI-F16FLUSH: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
+ ; SI-F16FLUSH: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC1]](s16)
+ ; SI-F16FLUSH: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[FPEXT1]]
+ ; SI-F16FLUSH: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMUL]](s32)
+ ; SI-F16FLUSH: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[FPTRUNC]](s16)
+ ; SI-F16FLUSH: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC2]](s16)
+ ; SI-F16FLUSH: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[FPEXT2]], [[FPEXT3]]
+ ; SI-F16FLUSH: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD]](s32)
+ ; SI-F16FLUSH: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FPTRUNC1]](s16)
+ ; SI-F16FLUSH: $vgpr0 = COPY [[ANYEXT]](s32)
+ ; VI-F16DENORM-LABEL: name: test_fmad_s16
+ ; VI-F16DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; VI-F16DENORM: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; VI-F16DENORM: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; VI-F16DENORM: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; VI-F16DENORM: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; VI-F16DENORM: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
+ ; VI-F16DENORM: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT %6(s16)
+ ; VI-F16DENORM: $vgpr0 = COPY [[ANYEXT]](s32)
+ ; VI-F16DENORM: [[FMUL:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC]], [[TRUNC1]]
+ ; VI-F16DENORM: [[FADD:%[0-9]+]]:_(s16) = G_FADD [[FMUL]], [[TRUNC2]]
+ ; VI-F16FLUSH-LABEL: name: test_fmad_s16
+ ; VI-F16FLUSH: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; VI-F16FLUSH: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; VI-F16FLUSH: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; VI-F16FLUSH: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; VI-F16FLUSH: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; VI-F16FLUSH: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
+ ; VI-F16FLUSH: [[FMAD:%[0-9]+]]:_(s16) = G_FMAD [[TRUNC]], [[TRUNC1]], [[TRUNC2]]
+ ; VI-F16FLUSH: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMAD]](s16)
+ ; VI-F16FLUSH: $vgpr0 = COPY [[ANYEXT]](s32)
+ ; GFX10-LABEL: name: test_fmad_s16
+ ; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX10: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX10: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX10: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX10: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; GFX10: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
+ ; GFX10: [[FMUL:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC]], [[TRUNC1]]
+ ; GFX10: [[FADD:%[0-9]+]]:_(s16) = G_FADD [[FMUL]], [[TRUNC2]]
+ ; GFX10: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FADD]](s16)
+ ; GFX10: $vgpr0 = COPY [[ANYEXT]](s32)
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s32) = COPY $vgpr1
+ %2:_(s32) = COPY $vgpr1
+ %3:_(s16) = G_TRUNC %0
+ %4:_(s16) = G_TRUNC %1
+ %5:_(s16) = G_TRUNC %2
+
+ %6:_(s16) = G_FMAD %3, %4, %5
+ %7:_(s32) = G_ANYEXT %6
+ $vgpr0 = COPY %7
+...
+
+---
+name: test_fmad_v2s16
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1, $vgpr2
+
+ ; SI-F16DENORM-LABEL: name: test_fmad_v2s16
+ ; SI-F16DENORM: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
+ ; SI-F16DENORM: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
+ ; SI-F16DENORM: [[COPY2:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr2
+ ; SI-F16DENORM: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY]](<2 x s16>)
+ ; SI-F16DENORM: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
+ ; SI-F16DENORM: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; SI-F16DENORM: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; SI-F16DENORM: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; SI-F16DENORM: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY1]](<2 x s16>)
+ ; SI-F16DENORM: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
+ ; SI-F16DENORM: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; SI-F16DENORM: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
+ ; SI-F16DENORM: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY2]](<2 x s16>)
+ ; SI-F16DENORM: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32)
+ ; SI-F16DENORM: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
+ ; SI-F16DENORM: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32)
+ ; SI-F16DENORM: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
+ ; SI-F16DENORM: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC2]](s16)
+ ; SI-F16DENORM: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[FPEXT1]]
+ ; SI-F16DENORM: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMUL]](s32)
+ ; SI-F16DENORM: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[FPTRUNC]](s16)
+ ; SI-F16DENORM: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC4]](s16)
+ ; SI-F16DENORM: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[FPEXT2]], [[FPEXT3]]
+ ; SI-F16DENORM: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD]](s32)
+ ; SI-F16DENORM: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC1]](s16)
+ ; SI-F16DENORM: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC3]](s16)
+ ; SI-F16DENORM: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT4]], [[FPEXT5]]
+ ; SI-F16DENORM: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMUL1]](s32)
+ ; SI-F16DENORM: [[FPEXT6:%[0-9]+]]:_(s32) = G_FPEXT [[FPTRUNC2]](s16)
+ ; SI-F16DENORM: [[FPEXT7:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC5]](s16)
+ ; SI-F16DENORM: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[FPEXT6]], [[FPEXT7]]
+ ; SI-F16DENORM: [[FPTRUNC3:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD1]](s32)
+ ; SI-F16DENORM: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[FPTRUNC1]](s16), [[FPTRUNC3]](s16)
+ ; SI-F16DENORM: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>)
+ ; SI-F16FLUSH-LABEL: name: test_fmad_v2s16
+ ; SI-F16FLUSH: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
+ ; SI-F16FLUSH: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
+ ; SI-F16FLUSH: [[COPY2:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr2
+ ; SI-F16FLUSH: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY]](<2 x s16>)
+ ; SI-F16FLUSH: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
+ ; SI-F16FLUSH: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; SI-F16FLUSH: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; SI-F16FLUSH: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; SI-F16FLUSH: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY1]](<2 x s16>)
+ ; SI-F16FLUSH: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
+ ; SI-F16FLUSH: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; SI-F16FLUSH: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
+ ; SI-F16FLUSH: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY2]](<2 x s16>)
+ ; SI-F16FLUSH: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32)
+ ; SI-F16FLUSH: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
+ ; SI-F16FLUSH: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32)
+ ; SI-F16FLUSH: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
+ ; SI-F16FLUSH: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC2]](s16)
+ ; SI-F16FLUSH: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[FPEXT1]]
+ ; SI-F16FLUSH: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMUL]](s32)
+ ; SI-F16FLUSH: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[FPTRUNC]](s16)
+ ; SI-F16FLUSH: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC4]](s16)
+ ; SI-F16FLUSH: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[FPEXT2]], [[FPEXT3]]
+ ; SI-F16FLUSH: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD]](s32)
+ ; SI-F16FLUSH: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC1]](s16)
+ ; SI-F16FLUSH: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC3]](s16)
+ ; SI-F16FLUSH: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT4]], [[FPEXT5]]
+ ; SI-F16FLUSH: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMUL1]](s32)
+ ; SI-F16FLUSH: [[FPEXT6:%[0-9]+]]:_(s32) = G_FPEXT [[FPTRUNC2]](s16)
+ ; SI-F16FLUSH: [[FPEXT7:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC5]](s16)
+ ; SI-F16FLUSH: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[FPEXT6]], [[FPEXT7]]
+ ; SI-F16FLUSH: [[FPTRUNC3:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD1]](s32)
+ ; SI-F16FLUSH: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[FPTRUNC1]](s16), [[FPTRUNC3]](s16)
+ ; SI-F16FLUSH: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>)
+ ; VI-F16DENORM-LABEL: name: test_fmad_v2s16
+ ; VI-F16DENORM: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
+ ; VI-F16DENORM: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
+ ; VI-F16DENORM: [[COPY2:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr2
+ ; VI-F16DENORM: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY]](<2 x s16>)
+ ; VI-F16DENORM: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
+ ; VI-F16DENORM: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; VI-F16DENORM: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; VI-F16DENORM: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; VI-F16DENORM: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY1]](<2 x s16>)
+ ; VI-F16DENORM: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
+ ; VI-F16DENORM: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; VI-F16DENORM: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
+ ; VI-F16DENORM: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY2]](<2 x s16>)
+ ; VI-F16DENORM: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32)
+ ; VI-F16DENORM: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
+ ; VI-F16DENORM: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32)
+ ; VI-F16DENORM: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR %10(s16), %11(s16)
+ ; VI-F16DENORM: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>)
+ ; VI-F16DENORM: [[FMUL:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC1]], [[TRUNC3]]
+ ; VI-F16DENORM: [[FADD:%[0-9]+]]:_(s16) = G_FADD [[FMUL]], [[TRUNC5]]
+ ; VI-F16DENORM: [[FMUL1:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC]], [[TRUNC2]]
+ ; VI-F16DENORM: [[FADD1:%[0-9]+]]:_(s16) = G_FADD [[FMUL1]], [[TRUNC4]]
+ ; VI-F16FLUSH-LABEL: name: test_fmad_v2s16
+ ; VI-F16FLUSH: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
+ ; VI-F16FLUSH: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
+ ; VI-F16FLUSH: [[COPY2:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr2
+ ; VI-F16FLUSH: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY]](<2 x s16>)
+ ; VI-F16FLUSH: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
+ ; VI-F16FLUSH: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; VI-F16FLUSH: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; VI-F16FLUSH: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; VI-F16FLUSH: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY1]](<2 x s16>)
+ ; VI-F16FLUSH: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
+ ; VI-F16FLUSH: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; VI-F16FLUSH: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
+ ; VI-F16FLUSH: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY2]](<2 x s16>)
+ ; VI-F16FLUSH: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32)
+ ; VI-F16FLUSH: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
+ ; VI-F16FLUSH: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32)
+ ; VI-F16FLUSH: [[FMAD:%[0-9]+]]:_(s16) = G_FMAD [[TRUNC]], [[TRUNC2]], [[TRUNC4]]
+ ; VI-F16FLUSH: [[FMAD1:%[0-9]+]]:_(s16) = G_FMAD [[TRUNC1]], [[TRUNC3]], [[TRUNC5]]
+ ; VI-F16FLUSH: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[FMAD]](s16), [[FMAD1]](s16)
+ ; VI-F16FLUSH: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>)
+ ; GFX10-LABEL: name: test_fmad_v2s16
+ ; GFX10: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
+ ; GFX10: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
+ ; GFX10: [[COPY2:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr2
+ ; GFX10: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY]](<2 x s16>)
+ ; GFX10: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
+ ; GFX10: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX10: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX10: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; GFX10: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY1]](<2 x s16>)
+ ; GFX10: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
+ ; GFX10: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; GFX10: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
+ ; GFX10: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY2]](<2 x s16>)
+ ; GFX10: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32)
+ ; GFX10: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
+ ; GFX10: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32)
+ ; GFX10: [[FMUL:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC]], [[TRUNC2]]
+ ; GFX10: [[FADD:%[0-9]+]]:_(s16) = G_FADD [[FMUL]], [[TRUNC4]]
+ ; GFX10: [[FMUL1:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC1]], [[TRUNC3]]
+ ; GFX10: [[FADD1:%[0-9]+]]:_(s16) = G_FADD [[FMUL1]], [[TRUNC5]]
+ ; GFX10: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[FADD]](s16), [[FADD1]](s16)
+ ; GFX10: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>)
+ %0:_(<2 x s16>) = COPY $vgpr0
+ %1:_(<2 x s16>) = COPY $vgpr1
+ %2:_(<2 x s16>) = COPY $vgpr2
+ %3:_(<2 x s16>) = G_FMAD %0, %1, %2
+ $vgpr0 = COPY %3
+...
+
+---
+name: test_fmad_v4s16
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1, $vgpr2_vgpr3, $vgpr4_vgpr5
+
+ ; SI-F16DENORM-LABEL: name: test_fmad_v4s16
+ ; SI-F16DENORM: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1
+ ; SI-F16DENORM: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3
+ ; SI-F16DENORM: [[COPY2:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr4_vgpr5
+ ; SI-F16DENORM: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
+ ; SI-F16DENORM: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>)
+ ; SI-F16DENORM: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
+ ; SI-F16DENORM: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; SI-F16DENORM: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; SI-F16DENORM: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; SI-F16DENORM: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>)
+ ; SI-F16DENORM: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
+ ; SI-F16DENORM: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; SI-F16DENORM: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
+ ; SI-F16DENORM: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
+ ; SI-F16DENORM: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
+ ; SI-F16DENORM: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32)
+ ; SI-F16DENORM: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
+ ; SI-F16DENORM: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32)
+ ; SI-F16DENORM: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
+ ; SI-F16DENORM: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST3]](s32)
+ ; SI-F16DENORM: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32)
+ ; SI-F16DENORM: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
+ ; SI-F16DENORM: [[UV4:%[0-9]+]]:_(<2 x s16>), [[UV5:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY2]](<4 x s16>)
+ ; SI-F16DENORM: [[BITCAST4:%[0-9]+]]:_(s32) = G_BITCAST [[UV4]](<2 x s16>)
+ ; SI-F16DENORM: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST4]](s32)
+ ; SI-F16DENORM: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST4]], [[C]](s32)
+ ; SI-F16DENORM: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR4]](s32)
+ ; SI-F16DENORM: [[BITCAST5:%[0-9]+]]:_(s32) = G_BITCAST [[UV5]](<2 x s16>)
+ ; SI-F16DENORM: [[TRUNC10:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST5]](s32)
+ ; SI-F16DENORM: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST5]], [[C]](s32)
+ ; SI-F16DENORM: [[TRUNC11:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR5]](s32)
+ ; SI-F16DENORM: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
+ ; SI-F16DENORM: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC4]](s16)
+ ; SI-F16DENORM: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[FPEXT1]]
+ ; SI-F16DENORM: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMUL]](s32)
+ ; SI-F16DENORM: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[FPTRUNC]](s16)
+ ; SI-F16DENORM: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC8]](s16)
+ ; SI-F16DENORM: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[FPEXT2]], [[FPEXT3]]
+ ; SI-F16DENORM: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD]](s32)
+ ; SI-F16DENORM: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC1]](s16)
+ ; SI-F16DENORM: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC5]](s16)
+ ; SI-F16DENORM: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT4]], [[FPEXT5]]
+ ; SI-F16DENORM: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMUL1]](s32)
+ ; SI-F16DENORM: [[FPEXT6:%[0-9]+]]:_(s32) = G_FPEXT [[FPTRUNC2]](s16)
+ ; SI-F16DENORM: [[FPEXT7:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC9]](s16)
+ ; SI-F16DENORM: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[FPEXT6]], [[FPEXT7]]
+ ; SI-F16DENORM: [[FPTRUNC3:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD1]](s32)
+ ; SI-F16DENORM: [[FPEXT8:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC2]](s16)
+ ; SI-F16DENORM: [[FPEXT9:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC6]](s16)
+ ; SI-F16DENORM: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT8]], [[FPEXT9]]
+ ; SI-F16DENORM: [[FPTRUNC4:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMUL2]](s32)
+ ; SI-F16DENORM: [[FPEXT10:%[0-9]+]]:_(s32) = G_FPEXT [[FPTRUNC4]](s16)
+ ; SI-F16DENORM: [[FPEXT11:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC10]](s16)
+ ; SI-F16DENORM: [[FADD2:%[0-9]+]]:_(s32) = G_FADD [[FPEXT10]], [[FPEXT11]]
+ ; SI-F16DENORM: [[FPTRUNC5:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD2]](s32)
+ ; SI-F16DENORM: [[FPEXT12:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC3]](s16)
+ ; SI-F16DENORM: [[FPEXT13:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC7]](s16)
+ ; SI-F16DENORM: [[FMUL3:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT12]], [[FPEXT13]]
+ ; SI-F16DENORM: [[FPTRUNC6:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMUL3]](s32)
+ ; SI-F16DENORM: [[FPEXT14:%[0-9]+]]:_(s32) = G_FPEXT [[FPTRUNC6]](s16)
+ ; SI-F16DENORM: [[FPEXT15:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC11]](s16)
+ ; SI-F16DENORM: [[FADD3:%[0-9]+]]:_(s32) = G_FADD [[FPEXT14]], [[FPEXT15]]
+ ; SI-F16DENORM: [[FPTRUNC7:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD3]](s32)
+ ; SI-F16DENORM: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[FPTRUNC1]](s16), [[FPTRUNC3]](s16), [[FPTRUNC5]](s16), [[FPTRUNC7]](s16)
+ ; SI-F16DENORM: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<4 x s16>)
+ ; SI-F16FLUSH-LABEL: name: test_fmad_v4s16
+ ; SI-F16FLUSH: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1
+ ; SI-F16FLUSH: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3
+ ; SI-F16FLUSH: [[COPY2:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr4_vgpr5
+ ; SI-F16FLUSH: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
+ ; SI-F16FLUSH: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>)
+ ; SI-F16FLUSH: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
+ ; SI-F16FLUSH: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; SI-F16FLUSH: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; SI-F16FLUSH: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; SI-F16FLUSH: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>)
+ ; SI-F16FLUSH: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
+ ; SI-F16FLUSH: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; SI-F16FLUSH: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
+ ; SI-F16FLUSH: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
+ ; SI-F16FLUSH: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
+ ; SI-F16FLUSH: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32)
+ ; SI-F16FLUSH: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
+ ; SI-F16FLUSH: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32)
+ ; SI-F16FLUSH: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
+ ; SI-F16FLUSH: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST3]](s32)
+ ; SI-F16FLUSH: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32)
+ ; SI-F16FLUSH: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
+ ; SI-F16FLUSH: [[UV4:%[0-9]+]]:_(<2 x s16>), [[UV5:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY2]](<4 x s16>)
+ ; SI-F16FLUSH: [[BITCAST4:%[0-9]+]]:_(s32) = G_BITCAST [[UV4]](<2 x s16>)
+ ; SI-F16FLUSH: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST4]](s32)
+ ; SI-F16FLUSH: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST4]], [[C]](s32)
+ ; SI-F16FLUSH: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR4]](s32)
+ ; SI-F16FLUSH: [[BITCAST5:%[0-9]+]]:_(s32) = G_BITCAST [[UV5]](<2 x s16>)
+ ; SI-F16FLUSH: [[TRUNC10:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST5]](s32)
+ ; SI-F16FLUSH: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST5]], [[C]](s32)
+ ; SI-F16FLUSH: [[TRUNC11:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR5]](s32)
+ ; SI-F16FLUSH: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
+ ; SI-F16FLUSH: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC4]](s16)
+ ; SI-F16FLUSH: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[FPEXT1]]
+ ; SI-F16FLUSH: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMUL]](s32)
+ ; SI-F16FLUSH: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[FPTRUNC]](s16)
+ ; SI-F16FLUSH: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC8]](s16)
+ ; SI-F16FLUSH: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[FPEXT2]], [[FPEXT3]]
+ ; SI-F16FLUSH: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD]](s32)
+ ; SI-F16FLUSH: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC1]](s16)
+ ; SI-F16FLUSH: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC5]](s16)
+ ; SI-F16FLUSH: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT4]], [[FPEXT5]]
+ ; SI-F16FLUSH: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMUL1]](s32)
+ ; SI-F16FLUSH: [[FPEXT6:%[0-9]+]]:_(s32) = G_FPEXT [[FPTRUNC2]](s16)
+ ; SI-F16FLUSH: [[FPEXT7:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC9]](s16)
+ ; SI-F16FLUSH: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[FPEXT6]], [[FPEXT7]]
+ ; SI-F16FLUSH: [[FPTRUNC3:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD1]](s32)
+ ; SI-F16FLUSH: [[FPEXT8:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC2]](s16)
+ ; SI-F16FLUSH: [[FPEXT9:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC6]](s16)
+ ; SI-F16FLUSH: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT8]], [[FPEXT9]]
+ ; SI-F16FLUSH: [[FPTRUNC4:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMUL2]](s32)
+ ; SI-F16FLUSH: [[FPEXT10:%[0-9]+]]:_(s32) = G_FPEXT [[FPTRUNC4]](s16)
+ ; SI-F16FLUSH: [[FPEXT11:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC10]](s16)
+ ; SI-F16FLUSH: [[FADD2:%[0-9]+]]:_(s32) = G_FADD [[FPEXT10]], [[FPEXT11]]
+ ; SI-F16FLUSH: [[FPTRUNC5:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD2]](s32)
+ ; SI-F16FLUSH: [[FPEXT12:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC3]](s16)
+ ; SI-F16FLUSH: [[FPEXT13:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC7]](s16)
+ ; SI-F16FLUSH: [[FMUL3:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT12]], [[FPEXT13]]
+ ; SI-F16FLUSH: [[FPTRUNC6:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMUL3]](s32)
+ ; SI-F16FLUSH: [[FPEXT14:%[0-9]+]]:_(s32) = G_FPEXT [[FPTRUNC6]](s16)
+ ; SI-F16FLUSH: [[FPEXT15:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC11]](s16)
+ ; SI-F16FLUSH: [[FADD3:%[0-9]+]]:_(s32) = G_FADD [[FPEXT14]], [[FPEXT15]]
+ ; SI-F16FLUSH: [[FPTRUNC7:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD3]](s32)
+ ; SI-F16FLUSH: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[FPTRUNC1]](s16), [[FPTRUNC3]](s16), [[FPTRUNC5]](s16), [[FPTRUNC7]](s16)
+ ; SI-F16FLUSH: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<4 x s16>)
+ ; VI-F16DENORM-LABEL: name: test_fmad_v4s16
+ ; VI-F16DENORM: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1
+ ; VI-F16DENORM: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3
+ ; VI-F16DENORM: [[COPY2:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr4_vgpr5
+ ; VI-F16DENORM: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
+ ; VI-F16DENORM: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>)
+ ; VI-F16DENORM: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
+ ; VI-F16DENORM: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; VI-F16DENORM: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; VI-F16DENORM: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; VI-F16DENORM: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>)
+ ; VI-F16DENORM: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
+ ; VI-F16DENORM: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; VI-F16DENORM: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
+ ; VI-F16DENORM: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
+ ; VI-F16DENORM: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
+ ; VI-F16DENORM: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32)
+ ; VI-F16DENORM: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
+ ; VI-F16DENORM: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32)
+ ; VI-F16DENORM: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
+ ; VI-F16DENORM: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST3]](s32)
+ ; VI-F16DENORM: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32)
+ ; VI-F16DENORM: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
+ ; VI-F16DENORM: [[UV4:%[0-9]+]]:_(<2 x s16>), [[UV5:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY2]](<4 x s16>)
+ ; VI-F16DENORM: [[BITCAST4:%[0-9]+]]:_(s32) = G_BITCAST [[UV4]](<2 x s16>)
+ ; VI-F16DENORM: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST4]](s32)
+ ; VI-F16DENORM: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST4]], [[C]](s32)
+ ; VI-F16DENORM: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR4]](s32)
+ ; VI-F16DENORM: [[BITCAST5:%[0-9]+]]:_(s32) = G_BITCAST [[UV5]](<2 x s16>)
+ ; VI-F16DENORM: [[TRUNC10:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST5]](s32)
+ ; VI-F16DENORM: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST5]], [[C]](s32)
+ ; VI-F16DENORM: [[TRUNC11:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR5]](s32)
+ ; VI-F16DENORM: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR %16(s16), %17(s16), %18(s16), %19(s16)
+ ; VI-F16DENORM: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<4 x s16>)
+ ; VI-F16DENORM: [[FMUL:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC3]], [[TRUNC7]]
+ ; VI-F16DENORM: [[FADD:%[0-9]+]]:_(s16) = G_FADD [[FMUL]], [[TRUNC11]]
+ ; VI-F16DENORM: [[FMUL1:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC2]], [[TRUNC6]]
+ ; VI-F16DENORM: [[FADD1:%[0-9]+]]:_(s16) = G_FADD [[FMUL1]], [[TRUNC10]]
+ ; VI-F16DENORM: [[FMUL2:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC1]], [[TRUNC5]]
+ ; VI-F16DENORM: [[FADD2:%[0-9]+]]:_(s16) = G_FADD [[FMUL2]], [[TRUNC9]]
+ ; VI-F16DENORM: [[FMUL3:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC]], [[TRUNC4]]
+ ; VI-F16DENORM: [[FADD3:%[0-9]+]]:_(s16) = G_FADD [[FMUL3]], [[TRUNC8]]
+ ; VI-F16FLUSH-LABEL: name: test_fmad_v4s16
+ ; VI-F16FLUSH: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1
+ ; VI-F16FLUSH: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3
+ ; VI-F16FLUSH: [[COPY2:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr4_vgpr5
+ ; VI-F16FLUSH: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
+ ; VI-F16FLUSH: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>)
+ ; VI-F16FLUSH: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
+ ; VI-F16FLUSH: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; VI-F16FLUSH: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; VI-F16FLUSH: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; VI-F16FLUSH: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>)
+ ; VI-F16FLUSH: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
+ ; VI-F16FLUSH: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; VI-F16FLUSH: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
+ ; VI-F16FLUSH: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
+ ; VI-F16FLUSH: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
+ ; VI-F16FLUSH: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32)
+ ; VI-F16FLUSH: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
+ ; VI-F16FLUSH: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32)
+ ; VI-F16FLUSH: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
+ ; VI-F16FLUSH: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST3]](s32)
+ ; VI-F16FLUSH: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32)
+ ; VI-F16FLUSH: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
+ ; VI-F16FLUSH: [[UV4:%[0-9]+]]:_(<2 x s16>), [[UV5:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY2]](<4 x s16>)
+ ; VI-F16FLUSH: [[BITCAST4:%[0-9]+]]:_(s32) = G_BITCAST [[UV4]](<2 x s16>)
+ ; VI-F16FLUSH: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST4]](s32)
+ ; VI-F16FLUSH: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST4]], [[C]](s32)
+ ; VI-F16FLUSH: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR4]](s32)
+ ; VI-F16FLUSH: [[BITCAST5:%[0-9]+]]:_(s32) = G_BITCAST [[UV5]](<2 x s16>)
+ ; VI-F16FLUSH: [[TRUNC10:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST5]](s32)
+ ; VI-F16FLUSH: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST5]], [[C]](s32)
+ ; VI-F16FLUSH: [[TRUNC11:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR5]](s32)
+ ; VI-F16FLUSH: [[FMAD:%[0-9]+]]:_(s16) = G_FMAD [[TRUNC]], [[TRUNC4]], [[TRUNC8]]
+ ; VI-F16FLUSH: [[FMAD1:%[0-9]+]]:_(s16) = G_FMAD [[TRUNC1]], [[TRUNC5]], [[TRUNC9]]
+ ; VI-F16FLUSH: [[FMAD2:%[0-9]+]]:_(s16) = G_FMAD [[TRUNC2]], [[TRUNC6]], [[TRUNC10]]
+ ; VI-F16FLUSH: [[FMAD3:%[0-9]+]]:_(s16) = G_FMAD [[TRUNC3]], [[TRUNC7]], [[TRUNC11]]
+ ; VI-F16FLUSH: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[FMAD]](s16), [[FMAD1]](s16), [[FMAD2]](s16), [[FMAD3]](s16)
+ ; VI-F16FLUSH: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<4 x s16>)
+ ; GFX10-LABEL: name: test_fmad_v4s16
+ ; GFX10: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1
+ ; GFX10: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3
+ ; GFX10: [[COPY2:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr4_vgpr5
+ ; GFX10: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
+ ; GFX10: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>)
+ ; GFX10: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
+ ; GFX10: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX10: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX10: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; GFX10: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>)
+ ; GFX10: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
+ ; GFX10: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; GFX10: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
+ ; GFX10: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
+ ; GFX10: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
+ ; GFX10: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32)
+ ; GFX10: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
+ ; GFX10: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32)
+ ; GFX10: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
+ ; GFX10: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST3]](s32)
+ ; GFX10: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32)
+ ; GFX10: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
+ ; GFX10: [[UV4:%[0-9]+]]:_(<2 x s16>), [[UV5:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY2]](<4 x s16>)
+ ; GFX10: [[BITCAST4:%[0-9]+]]:_(s32) = G_BITCAST [[UV4]](<2 x s16>)
+ ; GFX10: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST4]](s32)
+ ; GFX10: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST4]], [[C]](s32)
+ ; GFX10: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR4]](s32)
+ ; GFX10: [[BITCAST5:%[0-9]+]]:_(s32) = G_BITCAST [[UV5]](<2 x s16>)
+ ; GFX10: [[TRUNC10:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST5]](s32)
+ ; GFX10: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST5]], [[C]](s32)
+ ; GFX10: [[TRUNC11:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR5]](s32)
+ ; GFX10: [[FMUL:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC]], [[TRUNC4]]
+ ; GFX10: [[FADD:%[0-9]+]]:_(s16) = G_FADD [[FMUL]], [[TRUNC8]]
+ ; GFX10: [[FMUL1:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC1]], [[TRUNC5]]
+ ; GFX10: [[FADD1:%[0-9]+]]:_(s16) = G_FADD [[FMUL1]], [[TRUNC9]]
+ ; GFX10: [[FMUL2:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC2]], [[TRUNC6]]
+ ; GFX10: [[FADD2:%[0-9]+]]:_(s16) = G_FADD [[FMUL2]], [[TRUNC10]]
+ ; GFX10: [[FMUL3:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC3]], [[TRUNC7]]
+ ; GFX10: [[FADD3:%[0-9]+]]:_(s16) = G_FADD [[FMUL3]], [[TRUNC11]]
+ ; GFX10: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[FADD]](s16), [[FADD1]](s16), [[FADD2]](s16), [[FADD3]](s16)
+ ; GFX10: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<4 x s16>)
+ %0:_(<4 x s16>) = COPY $vgpr0_vgpr1
+ %1:_(<4 x s16>) = COPY $vgpr2_vgpr3
+ %2:_(<4 x s16>) = COPY $vgpr4_vgpr5
+ %3:_(<4 x s16>) = G_FMAD %0, %1, %2
+ $vgpr0_vgpr1 = COPY %3
+...
+
+---
+name: test_fmad_s64
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1, $vgpr2_vgpr3, $vgpr4_vgpr5
+
+ ; SI-F16DENORM-LABEL: name: test_fmad_s64
+ ; SI-F16DENORM: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+ ; SI-F16DENORM: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
+ ; SI-F16DENORM: [[COPY2:%[0-9]+]]:_(s64) = COPY $vgpr4_vgpr5
+ ; SI-F16DENORM: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[COPY]], [[COPY1]]
+ ; SI-F16DENORM: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[FMUL]], [[COPY2]]
+ ; SI-F16DENORM: $vgpr0_vgpr1 = COPY [[FADD]](s64)
+ ; SI-F16FLUSH-LABEL: name: test_fmad_s64
+ ; SI-F16FLUSH: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+ ; SI-F16FLUSH: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
+ ; SI-F16FLUSH: [[COPY2:%[0-9]+]]:_(s64) = COPY $vgpr4_vgpr5
+ ; SI-F16FLUSH: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[COPY]], [[COPY1]]
+ ; SI-F16FLUSH: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[FMUL]], [[COPY2]]
+ ; SI-F16FLUSH: $vgpr0_vgpr1 = COPY [[FADD]](s64)
+ ; VI-F16DENORM-LABEL: name: test_fmad_s64
+ ; VI-F16DENORM: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+ ; VI-F16DENORM: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
+ ; VI-F16DENORM: [[COPY2:%[0-9]+]]:_(s64) = COPY $vgpr4_vgpr5
+ ; VI-F16DENORM: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[COPY]], [[COPY1]]
+ ; VI-F16DENORM: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[FMUL]], [[COPY2]]
+ ; VI-F16DENORM: $vgpr0_vgpr1 = COPY [[FADD]](s64)
+ ; VI-F16FLUSH-LABEL: name: test_fmad_s64
+ ; VI-F16FLUSH: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+ ; VI-F16FLUSH: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
+ ; VI-F16FLUSH: [[COPY2:%[0-9]+]]:_(s64) = COPY $vgpr4_vgpr5
+ ; VI-F16FLUSH: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[COPY]], [[COPY1]]
+ ; VI-F16FLUSH: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[FMUL]], [[COPY2]]
+ ; VI-F16FLUSH: $vgpr0_vgpr1 = COPY [[FADD]](s64)
+ ; GFX10-LABEL: name: test_fmad_s64
+ ; GFX10: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+ ; GFX10: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
+ ; GFX10: [[COPY2:%[0-9]+]]:_(s64) = COPY $vgpr4_vgpr5
+ ; GFX10: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[COPY]], [[COPY1]]
+ ; GFX10: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[FMUL]], [[COPY2]]
+ ; GFX10: $vgpr0_vgpr1 = COPY [[FADD]](s64)
+ %0:_(s64) = COPY $vgpr0_vgpr1
+ %1:_(s64) = COPY $vgpr2_vgpr3
+ %2:_(s64) = COPY $vgpr4_vgpr5
+ %3:_(s64) = G_FMAD %0, %1, %2
+ $vgpr0_vgpr1 = COPY %3
+...
+
+---
+name: test_fmad_v2s64
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5_vgpr6_vgpr7, $vgpr8_vgpr9_vgpr10_vgpr11
+
+ ; SI-F16DENORM-LABEL: name: test_fmad_v2s64
+ ; SI-F16DENORM: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; SI-F16DENORM: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
+ ; SI-F16DENORM: [[COPY2:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr8_vgpr9_vgpr10_vgpr11
+ ; SI-F16DENORM: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
+ ; SI-F16DENORM: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
+ ; SI-F16DENORM: [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY2]](<2 x s64>)
+ ; SI-F16DENORM: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[UV]], [[UV2]]
+ ; SI-F16DENORM: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[FMUL]], [[UV4]]
+ ; SI-F16DENORM: [[FMUL1:%[0-9]+]]:_(s64) = G_FMUL [[UV1]], [[UV3]]
+ ; SI-F16DENORM: [[FADD1:%[0-9]+]]:_(s64) = G_FADD [[FMUL1]], [[UV5]]
+ ; SI-F16DENORM: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[FADD]](s64), [[FADD1]](s64)
+ ; SI-F16DENORM: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
+ ; SI-F16FLUSH-LABEL: name: test_fmad_v2s64
+ ; SI-F16FLUSH: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; SI-F16FLUSH: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
+ ; SI-F16FLUSH: [[COPY2:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr8_vgpr9_vgpr10_vgpr11
+ ; SI-F16FLUSH: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
+ ; SI-F16FLUSH: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
+ ; SI-F16FLUSH: [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY2]](<2 x s64>)
+ ; SI-F16FLUSH: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[UV]], [[UV2]]
+ ; SI-F16FLUSH: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[FMUL]], [[UV4]]
+ ; SI-F16FLUSH: [[FMUL1:%[0-9]+]]:_(s64) = G_FMUL [[UV1]], [[UV3]]
+ ; SI-F16FLUSH: [[FADD1:%[0-9]+]]:_(s64) = G_FADD [[FMUL1]], [[UV5]]
+ ; SI-F16FLUSH: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[FADD]](s64), [[FADD1]](s64)
+ ; SI-F16FLUSH: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
+ ; VI-F16DENORM-LABEL: name: test_fmad_v2s64
+ ; VI-F16DENORM: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; VI-F16DENORM: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
+ ; VI-F16DENORM: [[COPY2:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr8_vgpr9_vgpr10_vgpr11
+ ; VI-F16DENORM: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
+ ; VI-F16DENORM: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
+ ; VI-F16DENORM: [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY2]](<2 x s64>)
+ ; VI-F16DENORM: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[UV]], [[UV2]]
+ ; VI-F16DENORM: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[FMUL]], [[UV4]]
+ ; VI-F16DENORM: [[FMUL1:%[0-9]+]]:_(s64) = G_FMUL [[UV1]], [[UV3]]
+ ; VI-F16DENORM: [[FADD1:%[0-9]+]]:_(s64) = G_FADD [[FMUL1]], [[UV5]]
+ ; VI-F16DENORM: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[FADD]](s64), [[FADD1]](s64)
+ ; VI-F16DENORM: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
+ ; VI-F16FLUSH-LABEL: name: test_fmad_v2s64
+ ; VI-F16FLUSH: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; VI-F16FLUSH: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
+ ; VI-F16FLUSH: [[COPY2:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr8_vgpr9_vgpr10_vgpr11
+ ; VI-F16FLUSH: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
+ ; VI-F16FLUSH: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
+ ; VI-F16FLUSH: [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY2]](<2 x s64>)
+ ; VI-F16FLUSH: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[UV]], [[UV2]]
+ ; VI-F16FLUSH: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[FMUL]], [[UV4]]
+ ; VI-F16FLUSH: [[FMUL1:%[0-9]+]]:_(s64) = G_FMUL [[UV1]], [[UV3]]
+ ; VI-F16FLUSH: [[FADD1:%[0-9]+]]:_(s64) = G_FADD [[FMUL1]], [[UV5]]
+ ; VI-F16FLUSH: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[FADD]](s64), [[FADD1]](s64)
+ ; VI-F16FLUSH: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
+ ; GFX10-LABEL: name: test_fmad_v2s64
+ ; GFX10: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; GFX10: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
+ ; GFX10: [[COPY2:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr8_vgpr9_vgpr10_vgpr11
+ ; GFX10: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
+ ; GFX10: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
+ ; GFX10: [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY2]](<2 x s64>)
+ ; GFX10: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[UV]], [[UV2]]
+ ; GFX10: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[FMUL]], [[UV4]]
+ ; GFX10: [[FMUL1:%[0-9]+]]:_(s64) = G_FMUL [[UV1]], [[UV3]]
+ ; GFX10: [[FADD1:%[0-9]+]]:_(s64) = G_FADD [[FMUL1]], [[UV5]]
+ ; GFX10: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[FADD]](s64), [[FADD1]](s64)
+ ; GFX10: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
+ %0:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ %1:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
+ %2:_(<2 x s64>) = COPY $vgpr8_vgpr9_vgpr10_vgpr11
+ %3:_(<2 x s64>) = G_FMAD %0, %1, %2
+ $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %3
+...
More information about the llvm-commits
mailing list